blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c77b47f97824aff18a52fa35c02f547758d749b9
|
9f0d75ede9b67b5286d1e1ec61792d5737ba1d1f
|
/R/perform.scanpy.normalisation.R
|
d2d7fa500d5c8e50ada4cf10f6e81119a87aceef
|
[] |
no_license
|
jcogan1/IBRAP
|
34fcf3bbea82a22880443bd83db40929d1f8aae0
|
8202b52f38bb3cb9af48524c38cfd6c4b39e2457
|
refs/heads/main
| 2023-08-03T15:32:52.737201
| 2021-09-14T15:55:35
| 2021-09-14T15:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,349
|
r
|
perform.scanpy.normalisation.R
|
#' @name perform.scanpy
#' @aliases perform.scanpy
#'
#' @title Performs Scanpy normalisation, hvg selection, scaling and variance stabilisation and regression.
#'
#' @description A new method-assay is produced. Raw counts are normalised and HVGs identified using Scanpy
#'
#' @param object IBRAP S4 class object
#' @param assay Character. String containing indicating which assay to use
#' @param slot Character. String indicating which slot within the assay should be sourced
#' @param new.assay.name Character. What should the new assay be called. Default = 'SCRAN'
#' @param target_sum Numerical. What should the data be scaled to. Default = 1e6
#' @param exclude_highly_expressed Boolean. Should highly expressed genes be excluded. Default = FALSE
#' @param max_fraction Numerical. If exclude_highly_expressed=True, consider cells as highly expressed that have more counts than max_fraction of the original total counts in at least one cell. Default = 0.05
#' @param key_added Character. What should the column name be that contains cell scaling factors. Default = 'scanpy_norm_factor'
#' @param n_top_genes Numerical. How many HVGs should be identified. Default = 1500
#' @param max_mean Numerical. If n_top_genes is NULL, this is the maximum mean to determine HVGs. Default = 6
#' @param min_mean Numerical. If n_top_genes is NULL, this is the minimum mean to determine HVGs. Default = 0.0125
#' @param min_disp Numerical. If n_top_genes is NULL, The minimum dispersion that should be presented in a gene for it to be considered highly varaible. Default = 0.5
#' @param span Numerical. The fraction of cells that should be subset for the LOESS fit model. Default = 0.3
#' @param n_bins Numerical. Number of bins to produce when determining HVGs
#' @param flavour Character. Choosing which HVG selection method to use when, options: 'seurat', 'cell_ranger', 'seurat_v3'. Default = 'seurat'
#' @param batch_key Character. Which column in the metadata identifies the batches of the cells. Default = NULL
#' @param do.scale Boolean. Whether the gene expression should be scaled. Default = TRUE
#' @param vars.to.regress Character. A single or multiple columns of information in the metadata that should be regressed from the dataset. Default = NULL
#' @param do.scale Boolean. Whether the gene expression should be centred. Default = TRUE
#'
#' @return Produces a new 'methods' assay containing normalised, scaled and HVGs.
#'
#' @examples
#'
#' object <- perform.scanpy(object = object,
#' vars.to.regress = 'RAW_total.counts', do.scale = T)
#'
#' @export
perform.scanpy <- function(object,
assay='RAW',
slot='counts',
new.assay.name='SCANPY',
target_sum = 1e6,
exclude_highly_expressed = FALSE,
max_fraction = 0.05,
key_added = 'scanpy_norm_factor',
log1 = TRUE,
n_top_genes = 1500,
max_mean = 6,
min_mean = 0.0125,
min_disp = 0.5,
span = 0.3,
n_bins = 20,
flavor = 'seurat',
batch_key = NULL,
do.scale=TRUE,
vars.to.regress=NULL,
do.centre=TRUE
) {
if(!is(object = object, class2 = 'IBRAP')) {
stop('Object must be of class IBRAP\n')
}
if(!is.character(assay)) {
stop('Assay must be a character string\n')
}
if(!assay %in% names(object@methods)) {
stop('assay does not exist\n')
}
if(!is.character(slot)) {
stop('Slot must be a character string\n')
}
if(!slot %in% c('counts', 'normalised', 'norm.scaled')) {
stop('slot does not exist\n')
}
if(!is.character(new.assay.name)) {
stop('new.assay.name must be character string \n')
}
if(!is.numeric(target_sum)) {
stop('target_sum must be numerical \n')
}
if(!is.logical(exclude_highly_expressed)) {
stop('exclude_highly_expressed must be logical\n')
}
if(!is.numeric(max_fraction)) {
stop('max_fraction must be numerical \n')
}
if(!is.character(key_added)) {
stop('key_added must be character string \n')
}
if(!is.numeric(n_top_genes)) {
stop('n_top_genes must be numerical \n')
}
if(!is.numeric(max_mean)) {
stop('max_mean must be numerical \n')
}
if(!is.numeric(min_mean)) {
stop('min_mean must be numerical \n')
}
if(!is.numeric(min_disp)) {
stop('min_disp must be numerical \n')
}
if(!is.numeric(span)) {
stop('span must be numerical \n')
}
if(!is.character(flavor)) {
stop('flavor must be character string \n')
}
if(!is.null(batch_key)) {
if(!is.character(batch_key)) {
stop('batch_key must be character string\n')
}
}
if(!is.logical(do.scale)) {
stop('do.scale must be logical: TRUE/FALSE\n')
}
if(!is.null(batch_key)) {
if(!is.numeric(batch_key)) {
stop('batch_key must be character string\n')
}
}
if(!is.null(vars.to.regress)) {
if(!is.character(vars.to.regress)) {
stop('vars.to.regress must be character string\n')
}
}
if(!is.logical(do.scale)) {
stop('do.scale must be logical: TRUE/FALSE\n')
}
if(!is.logical(do.centre)) {
stop('do.centre must be logical: TRUE/FALSE\n')
}
sc <- reticulate::import('scanpy')
scobj <- sc$AnnData(X = t(as.matrix(object@methods[[assay]][[slot]])))
scobj$obs_names <- as.factor(colnames(object@methods[[assay]][[slot]]))
scobj$var_names <- as.factor(rownames(object@methods[[assay]][[slot]]))
if(length(names(object@sample_metadata)) >= 1) {
scobj$obs <- object@sample_metadata
}
cat(crayon::cyan(paste0(Sys.time(), ': normalising counts\n')))
if(!is.null(target_sum) & !is.null(key_added)) {
sc$pp$normalize_total(adata = scobj, target_sum = as.integer(target_sum),
exclude_highly_expressed = as.logical(exclude_highly_expressed),
max_fraction = as.integer(max_fraction), key_added = as.character(key_added))
} else if (!is.null(target_sum)) {
sc$pp$normalize_total(adata = scobj, target_sum = as.integer(target_sum),
exclude_highly_expressed = as.logical(exclude_highly_expressed),
max_fraction = as.integer(max_fraction))
} else if (!is.null(key_added)) {
sc$pp$normalize_total(adata = scobj,
exclude_highly_expressed = as.logical(exclude_highly_expressed),
max_fraction = as.integer(max_fraction))
} else {
sc$pp$normalize_total(adata = scobj)
}
.counts <- t(scobj$X)
rownames(.counts) <- rownames(object)
colnames(.counts) <- colnames(object)
feat.metadata <- feature_metadata(assay = .counts, col.prefix = 'SCANPY')
cat(crayon::cyan(paste0(Sys.time(), ': log transforming data\n')))
if(isTRUE(log1)) {
sc$pp$log1p(scobj)
} else if(isFALSE(log1)) {
scobj$X <- log2(scobj$X+1)
}
.normalised <- t(scobj$X)
rownames(.normalised) <- rownames(object@methods$RAW@counts)
colnames(.normalised) <- colnames(object@methods$RAW@counts)
cat(crayon::cyan(paste0(Sys.time(), ': computing highly variable genes\n')))
if (!is.null(n_top_genes) & !is.null(batch_key)) {
sc$pp$highly_variable_genes(adata = scobj,
n_top_genes = as.integer(n_top_genes),
min_mean = as.integer(min_mean),
max_mean = as.integer(max_mean),
min_disp = as.integer(min_disp),
span = as.integer(span),
n_bins = as.integer(n_bins),
flavor = as.character(flavor),
batch_key = as.character(batch_key))
} else if (!is.null(n_top_genes)) {
sc$pp$highly_variable_genes(adata = scobj,
n_top_genes = as.integer(n_top_genes),
min_mean = as.integer(min_mean),
max_mean = as.integer(max_mean),
min_disp = as.integer(min_disp),
span = as.integer(span),
n_bins = as.integer(n_bins),
flavor = as.character(flavor))
} else if (!is.null(batch_key)) {
sc$pp$highly_variable_genes(adata = scobj,
min_mean = as.integer(min_mean),
max_mean = as.integer(max_mean),
min_disp = as.integer(min_disp),
span = as.integer(span),
n_bins = as.integer(n_bins),
flavor = as.character(flavor))
} else {
sc$pp$highly_variable_genes(adata = scobj,
min_mean = as.integer(min_mean),
max_mean = as.integer(max_mean),
min_disp = as.integer(min_disp),
span = as.integer(span),
n_bins = as.integer(n_bins),
flavor = as.character(flavor))
}
.highly.variable.genes <- rownames(object@methods$RAW@counts)[scobj$var[['highly_variable']]]
seuobj <- suppressWarnings(Seurat::CreateSeuratObject(counts = .normalised[.highly.variable.genes,]))
seuobj@meta.data <- object@sample_metadata
seuobj <- Seurat::ScaleData(object = seuobj,
do.scale=do.scale,
vars.to.regress=vars.to.regress,
do.centre=do.centre)
scobj2 <- sc$AnnData(X = t(.normalised[.highly.variable.genes,]))
if(length(names(object@sample_metadata)) >= 1) {
scobj2$obs <- object@sample_metadata
}
.norm.scaled <- seuobj@assays$RNA@scale.data
object@sample_metadata <- cbind(object@sample_metadata, cell_metadata(assay = as.matrix(.normalised), col.prefix = new.assay.name))
object@methods[[new.assay.name]] <- new(Class = 'methods',
counts = as(.counts, 'dgCMatrix'),
normalised = as(.normalised, 'dgCMatrix'),
norm.scaled = as.matrix(.norm.scaled),
highly.variable.genes = .highly.variable.genes,
feature_metadata = feat.metadata)
cat(crayon::cyan(paste0(Sys.time(), ': Scanpy normalisation completed \n')))
return(object)
}
|
29467d4ba726271511252437e92ad5d3e9e54a40
|
a6f03b97b6b594867737bf238affc382be17a786
|
/Rprog4.R
|
7e670fa7e24b98c1b3010a766d32c0f2ae2f9916
|
[] |
no_license
|
eldersodre/ExData_Plotting1
|
db24a8cbfd402a0aa668cc701ccaf88ae412c490
|
0d0795666be6c1cbb724f4d321dcab70da6138d1
|
refs/heads/master
| 2021-01-14T13:47:38.727445
| 2014-10-11T22:07:19
| 2014-10-11T22:07:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,389
|
r
|
Rprog4.R
|
###### Exloratory Data Analysis ######
########### Assignment 1 #############
############### Plot 4 ###############
dataset<-read.table("household_power_consumption.txt",sep=";",header=T)
data<-dataset[66637:69516,] #Only two days
data[,3]<-as.numeric(levels(data[,3])[data[,3]])
data[,4]<-as.numeric(levels(data[,4])[data[,4]])
data[,5]<-as.numeric(levels(data[,5])[data[,5]])
data[,7]<-as.numeric(levels(data[,7])[data[,7]])
data[,8]<-as.numeric(levels(data[,8])[data[,8]])
datetime<-vector(mode="character",length=nrow(data))
for (i in 1:nrow(data)){
pas<-paste(data[i,1],data[i,2],sep=",")
datetime[[i]]<-pas
}
datetime #Date and time, pasted as characters
times<-strptime(datetime,"%d/%m/%Y,%H:%M:%S")
png("plot4.png") #Opening graphics device
par(mfrow=c(2,2))
#Topleft plot
plot(times,data[,3],type="l",xlab="",ylab="Global Active Power")
#Topright plot
plot(times,data[,5],type="l",xlab="datetime",ylab="Voltage")
#Bottomleft plot
plot(times,data[,7],type="n",ylab="Energy sub metering",xlab="")
lines(times,data[,7],type="l")
lines(times,data[,8],type="l",col="red")
lines(times,data[,9],type="l",col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lwd=1,bty="n")
#Bottomright plot
plot(times,data[,4],type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off() #Closing graphics device
|
301047779d25695b3b380dc1693e6295ea6d6387
|
8b72f83fe27a18d50540c5d3c69e67c3ac4a338d
|
/app.R
|
1d0040975ee8c42f58939af97410d838053f8d06
|
[] |
no_license
|
datasketch/app-dummy-data
|
bc0213de3ae7a0c5444cb99367e9afd06431ec62
|
17491a6b57b1f642e59d7cdd3e235ae88652c74a
|
refs/heads/master
| 2022-03-12T20:21:56.645352
| 2019-10-24T19:46:57
| 2019-10-24T19:46:57
| 217,369,579
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 833
|
r
|
app.R
|
library(shiny)
library(charlatan)
source('utils.R')
variables <- c(
'Name' = 'name',
'Job Title' = 'job',
'Phone Number' = 'phone_number',
'Email' = 'email',
'City' = 'city'
)
ui <- fluidPage(
titlePanel('fakeR: Generate fake data'),
sidebarLayout(
sidebarPanel(
checkboxGroupInput(
inputId = 'variables',
label = 'Select the variables you want',
choices = variables,
selected = 'name'
),
numericInput(
inputId = 'rows',
label = 'Number of rows',
min = 1,
value = 10
)
),
mainPanel(
tableOutput('fakedata')
)
)
)
server <- function(input, output) {
output$fakedata <- renderTable({
genTable(input$variables, rows = input$rows)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
d5f8d116f98e92589f62fcdcc2759abda864e6b5
|
59780f063cd6e6b735210cd7de0590dfb06c8798
|
/tech_growth_impact_real_estate/ui.R
|
bc9717c435fd899987a7d72550cf0bee44186d52
|
[] |
no_license
|
alexetalbott/Shiny-App-USA-Real-Estate-Tech-Jobs-Exploration
|
98f98f9de8f95a051634b726900dc1334f8bedff
|
70493cb11dd910857161f4e6d7d837413c1ae072
|
refs/heads/master
| 2020-04-14T19:16:04.600844
| 2019-01-25T02:16:27
| 2019-01-25T02:16:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,451
|
r
|
ui.R
|
shinyUI(
fluidPage(
titlePanel("Home Value and Tech Worker Growth"),
theme = shinytheme("sandstone"),
tabsetPanel(
tabPanel("Map",
sidebarLayout(
sidebarPanel(
selectInput(inputId = "top_or_bottom", label = "Select:",
choices=list("Top" = "top", "Bottom" = "bottom"),
selected="Top"),
selectInput(inputId = "n_rows_map", label = "(Choose a quantity)",
choices=list("5", "10", "20", "50", "100"),
selected="10"),
sliderInput(inputId = "year",
label = "from the year",
min = 2000,
max = 2016,
value = 2007,
sep = ""
),
selectInput(inputId = "dfcolumn", label = "by",
choices=list("Median Zestimate"= "median_metro_value", "Total Programming Jobs" = "math_and_programming_jobs"),
selected="Median Zestimate"),
actionButton("recalculate","Recalculate")
),
mainPanel(
fluidRow(
box(
leafletOutput("mymap",width=950)
)
),
fluidRow(
box(
tableOutput(outputId = "maptable")
)
)
)
)
),
tabPanel("Charts",
tabsetPanel(
tabPanel("State Scatterplot",
fluidRow(column(4,
pickerInput(inputId = "state_scatter", label = "Select State(s)",
choices= NULL, options = list(`actions-box` = TRUE),multiple = T
)),column(8,
sliderInput(inputId = "year_scatter",
label = "Select Year",
min = 2000,
max = 2016,
value = 2000,
sep = "",
animate=TRUE
)
)),
checkboxInput("checkbox", label = "scale x axis?", value = FALSE),
fluidRow(
plotlyOutput(outputId = "scatterplot_year")
)
),
# tabPanel("City Comparison",
# fluidRow(
# mainPanel(
# fluidRow(
# selectInput(inputId = "cities", label = "Select City",
# choices= NULL)
# ),
# fluidRow(
# plotOutput(outputId = "value_plot")
# ),
# fluidRow(
# selectInput(inputId = "cities2", label = "Select City",
# choices= NULL)
# ),
# fluidRow(
# plotOutput(outputId = "value_plot2")
# )
# )
# )
# ), ## end of tabItem 2
tabPanel("City Zestimate Over Time",
fluidRow(
mainPanel(
fluidRow(
selectInput(inputId = "cityLine_city", label = "Select City",
choices= NULL)
),
fluidRow(
plotOutput(outputId = "cityLine_plot")
),
fluidRow(
selectInput(inputId = "cityLine_city2", label = "Select City",
choices= NULL)
),
fluidRow(
plotOutput(outputId = "cityLine_plot2")
)
)
)
) ## end of cityZestimate tab
# tabPanel("City Zestimate Over Time",
# fluidRow(
# mainPanel(
# fluidRow(
# selectInput(inputId = "blsLine_city", label = "Select City",
# choices= NULL)
# ),
# fluidRow(
# plotOutput(outputId = "blsLine_plot")
# ),
# fluidRow(
# selectInput(inputId = "blsLine_city2", label = "Select City",
# choices= NULL)
# ),
# fluidRow(
# plotOutput(outputId = "blsLine_plot2")
# )
# )
# )
# ) ## end of BLS tab
) ## end of secondary tabs
) ## end of Chart tabPanel
) ## of main TabsetPanel
) ## end of fluidPage
) ## end of Shiny UI
|
af7a0bdf2aefbf73b198d7d32d5b9ee22e0b84d4
|
f80cddcbee78d55418a5d80dac8221792c8315cc
|
/belajar.R
|
6bb60ead565533a4d3b7e300d9b6a88606062edf
|
[] |
no_license
|
kiko0217/belajar-r
|
df27f36a6b61a1ac2ecb4af1f7ac7655312f22bc
|
7740fdb4a7e8c7335b99568194d0bfb76834f285
|
refs/heads/master
| 2023-02-23T15:49:27.938299
| 2021-01-28T10:25:49
| 2021-01-28T10:25:49
| 333,723,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 167
|
r
|
belajar.R
|
2+5
# demo comment
print("hello world")
x <- 2
x <- 3
x
y <-c(1,2,3,4,5)
y <-1:10
x <- y <-1:10
x+y
z <- x+y
z2 <- x*y
X <- 10
ls()
rm(X)
remove(z2)
rm(list = ls())
|
49d2063bf8efc3b7f305d5716b874e66f73d85b1
|
61f693b7a5560c87972d9008c668156f993acee0
|
/man/recode_dat_intercept.Rd
|
6457b17722fe6d5ed7c647b4ad0dd6d55d74a716
|
[
"MIT"
] |
permissive
|
explodecomputer/simulateGP
|
7e88fb90496502f86c379d22799d4977744e0506
|
7eeba0323df54dace3146c0941c7be699dc34433
|
refs/heads/master
| 2023-02-08T08:08:37.564481
| 2023-01-26T13:48:22
| 2023-01-26T13:48:22
| 91,170,950
| 10
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 439
|
rd
|
recode_dat_intercept.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_mr.r
\name{recode_dat_intercept}
\alias{recode_dat_intercept}
\title{Intercept recoding to have every effect on x positive}
\usage{
recode_dat_intercept(dat)
}
\arguments{
\item{dat}{Output from get_effs}
}
\value{
Data frame
}
\description{
Tries to avoid issue of recoding by finding intercept and pivoting negative g-x associations around intercept
}
|
b0d558ebc209c8dec4ceb9b9f47a35423b9ead61
|
46891316c185d2a7deda1a9971e8caab0b7d6147
|
/R/marfissci.get.data.R
|
d53ef7f559290aa8ba6f30987632595334763b3f
|
[
"MIT"
] |
permissive
|
jae0/aegis.mpa
|
900843423af3a9bd2490d5f1c1c95560a4a47f99
|
bcd3d08007474d910042a34edcefe11105fbb3fb
|
refs/heads/master
| 2023-06-08T21:53:46.714251
| 2023-05-27T23:29:31
| 2023-05-27T23:29:31
| 190,056,664
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,193
|
r
|
marfissci.get.data.R
|
marfissci.get.data <- function(spp = NULL, gear=NULL, years = NULL, get.nonlandings=T, save.csv=T) {
channel <- ROracle::dbConnect( DBI::dbDriver("Oracle"), dbname="PTRAN", username = oracle.personal.username, password = oracle.personal.password)
#'MMM March 31, 2016
#'This is a marfissci extraction that can limit results by species, gear(s)
#'and year(s). By default, it gets all removed biomass, which includes catch
#'that is defined seperately from typical "landings" (i.e. bait, discards, and
#'dead discards). This can be disabled by setting get.nonlandings to F. Also
#'by default, this saves the output to a csv. If this is not desired, you can
#'change save.csv to F.
#'
#'Marfis is valid from 2002 forwards.
#'
#'If the "entered" coordinate is not available, then the "determined" coordinate
#'is used as the position for the data. If no value for latitude is
#'available, if becomes 0, and if no value for longitude is available, if
#'becomes 0.
if (!is.null(spp)) {
spp.tweak = paste0("AND SPECIES_CODE IN (",paste(spp, collapse = ","),")")
}else{
spp.tweak = ""
}
if (!is.null(gear)) {
gear.tweak = paste0("AND GEAR_CODE IN (",paste(gear, collapse = ","),")")
}else{
gear.tweak = ""
}
if (!is.null(years)) {
years.tweak = paste0("AND to_char(DATE_FISHED,'YYYY') IN (",paste(years, collapse =","),")")
}else{
years.tweak = ""
}
if (get.nonlandings==T) {
catch.usage.tweak = "UNION
/*Get all catch where CATCH_USAGE_CODE <> 'LANDED'*/
SELECT
S1.LOG_EFRT_STD_INFO_ID,
E1.FV_FISHED_DATETIME DATE_FISHED,
E1.FV_GEAR_CODE GEAR_CODE,
S1.SSF_SPECIES_CODE AS SPECIES_CODE,
S1.SSF_LANDED_FORM_CODE AS LANDED_FORM_CODE,
S1.SSF_SPECIES_SIZE_CODE AS SPEC_SIZE_CODE,
S1.CATCH_USAGE_CODE,
E1.FV_DURATION_IN_HOURS,
E1.FV_NUM_OF_GEAR_UNITS,
S1.UNIT_OF_MEASURE_ID,
ROUND(
CASE
WHEN S1.UNIT_OF_MEASURE_ID = 10
THEN S1.WEIGHT
WHEN S1.UNIT_OF_MEASURE_ID = 20
THEN S1.WEIGHT * 0.453592 #lbs->kg
WHEN S1.UNIT_OF_MEASURE_ID = 30
THEN S1.WEIGHT / 1000 #metric tons->kg
END, 2) WEIGHT_KG,
-999 RPT_WEIGHT_KG,
/*try to position data*/
ROUND(
CASE WHEN E1.ENT_LATITUDE IS NOT NULL
THEN SUBSTR(E1.ENT_LATITUDE, 1, 2) + SUBSTR(E1.ENT_LATITUDE, 3, 2) / 60 + SUBSTR(E1.ENT_LATITUDE, 5, 2) / 3600
WHEN E1.DET_LATITUDE IS NOT NULL
THEN SUBSTR(E1.DET_LATITUDE, 1, 2) + SUBSTR(E1.DET_LATITUDE, 3, 2) / 60 + SUBSTR(E1.DET_LATITUDE, 5, 2) / 3600
ELSE 0
END, 4) LAT,
ROUND(
CASE WHEN E1.ENT_LONGITUDE IS NOT NULL
THEN (-1 * (SUBSTR(E1.ENT_LONGITUDE, 1, 2) + SUBSTR(E1.ENT_LONGITUDE, 3, 2) / 60 + SUBSTR(E1.ENT_LONGITUDE, 5, 2) / 3600))
WHEN E1.DET_LONGITUDE IS NOT NULL
THEN (-1 * (SUBSTR(E1.DET_LONGITUDE, 1, 2) + SUBSTR(E1.DET_LONGITUDE, 3, 2) / 60 + SUBSTR(E1.DET_LONGITUDE, 5, 2) / 3600))
--WHEN E1.DET_NAFO_UNIT_AREA_ID IS NOT NULL
--THEN c1.LON
ELSE 0
END, 4) LON
FROM MARFISSCI.LOG_SPC_STD_INFO S1
INNER JOIN MARFISSCI.LOG_EFRT_STD_INFO E1
ON S1.LOG_EFRT_STD_INFO_ID = E1.LOG_EFRT_STD_INFO_ID
-- not 'landed' or 'live discard'
WHERE S1.CATCH_USAGE_CODE NOT IN (10,50)"
}else{
catch.usage.tweak = ""
}
query.raw = paste0(
"
SELECT * FROM
(
/*Get all catch where CATCH_USAGE_CODE= 'LANDED' */
SELECT
P.LOG_EFRT_STD_INFO_ID,
P.DATE_FISHED,
P.GEAR_CODE,
P.SPECIES_CODE,
P.LANDED_FORM_CODE,
P.SPECIES_SIZE_CODE,
P.CATCH_USAGE_CODE,
E.FV_DURATION_IN_HOURS,
E.FV_NUM_OF_GEAR_UNITS,
-999 UNIT_OF_MEASURE_ID, --allkg
P.RND_WEIGHT_KGS,
P.RPT_WEIGHT_KGS,
/*try to position data*/
ROUND(
CASE WHEN E.ENT_LATITUDE IS NOT NULL
THEN SUBSTR(E.ENT_LATITUDE, 1, 2) + SUBSTR(E.ENT_LATITUDE, 3, 2) / 60 + SUBSTR(E.ENT_LATITUDE, 5, 2) / 3600
WHEN E.DET_LATITUDE IS NOT NULL
THEN SUBSTR(E.DET_LATITUDE, 1, 2) + SUBSTR(E.DET_LATITUDE, 3, 2) / 60 + SUBSTR(E.DET_LATITUDE, 5, 2) / 3600
WHEN P.LATITUDE IS NOT NULL
THEN SUBSTR(P.LATITUDE, 1, 2) + SUBSTR(P.LATITUDE, 3, 2) / 60 + SUBSTR(P.LATITUDE, 5, 2) / 3600
ELSE 0
END, 4) LAT,
ROUND(
CASE WHEN E.ENT_LONGITUDE IS NOT NULL
THEN (-1 * (SUBSTR(E.ENT_LONGITUDE, 1, 2) + SUBSTR(E.ENT_LONGITUDE, 3, 2) / 60 + SUBSTR(E.ENT_LONGITUDE, 5, 2) / 3600))
WHEN E.DET_LONGITUDE IS NOT NULL
THEN (-1 * (SUBSTR(E.DET_LONGITUDE, 1, 2) + SUBSTR(E.DET_LONGITUDE, 3, 2) / 60 + SUBSTR(E.DET_LONGITUDE, 5, 2) / 3600))
WHEN P.LONGITUDE IS NOT NULL
THEN (-1 * (SUBSTR(P.LONGITUDE, 1, 2) + SUBSTR(P.LONGITUDE, 3, 2) / 60 + SUBSTR(P.LONGITUDE, 5, 2) / 3600))
ELSE 0
END, 4) LON
FROM MARFISSCI.pro_spc_info P
INNER JOIN MARFISSCI.LOG_EFRT_STD_INFO E
ON P.LOG_EFRT_STD_INFO_ID = E.LOG_EFRT_STD_INFO_ID
WHERE P.CATCH_USAGE_CODE = 10
", catch.usage.tweak , "
)
WHERE 1 = 1
", spp.tweak,"
", gear.tweak,"
", years.tweak,"
"
)
data.raw = ROracle::dbGetQuery(channel,query.raw)
if (save.csv==T){
#make a descriptive name so we know what we've got
if (is.null(spp)){
spp.file = ""
}else if(range(spp)[1] == range(spp)[2]) {
spp.file = paste0("_",range(spp)[1])
}else{
spp.file = paste0("_",paste(range(spp),collapse = "_"))
}
if (is.null(gear)){
gear.file = ""
}else if (range(gear)[1] == range(gear)[2]) {
gear.file = paste0("_",range(gear)[1])
}else{
gear.file = paste0("_",paste(range(gear),collapse = "_"))
}
if (is.null(years)){
years.file = ""
}else if (range(years)[1] == range(years)[2]) {
years.file = paste0("_",range(years)[1])
}else{
years.file = paste0("_",paste(range(years),collapse = "_"))
}
file.output = paste0(project.datadirectory("aegis", "landings", "raw_data" ), years.file, gear.file, spp.file, ".csv")
write.csv(data.raw, file.output, row.names = F)
print(paste0("CSV written to ",file.output))
}
ROracle::dbDisconnect(channel)
return(data.raw)
}
#e.g. marfissci.get.data(spp=NULL, gear=51, years=2008, get.nonlandings=T, save.csv=T)
# for (i in 2002:2016){
# marfissci.get.data(years=i)
# }
|
d04ed2c5192519ea66960536c8be854e1c6d572e
|
92a0b69e95169c89ec0af530ed43a05af7134d45
|
/R/plan_orchard.R
|
afb9d3f9eb39ca54f1455e9f2e77c7bc0e072f08
|
[] |
no_license
|
gelfondjal/IT2
|
55185017b1b34849ac1010ea26afb6987471e62b
|
ee05e227403913e11bf16651658319c70c509481
|
refs/heads/master
| 2021-01-10T18:46:17.062432
| 2016-01-20T17:51:29
| 2016-01-20T17:51:29
| 21,449,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 552
|
r
|
plan_orchard.R
|
#' Create project hub files in root directory
#' @return logical for succesful creation or not
#' @export
#'
plant.orchard <- function(){
#
orchard.site <- file.path(path.expand.2("~"),"ProjectPaths","projectid_2_directory.csv")
if(!file.exists(orchard.site)){
dir.create(file.path(path.expand.2("~"),"ProjectPaths"))
empty.orchard <- data.frame(project.id="",project.path="",swap.directory="")[-1,]
write.csv(empty.orchard,orchard.site,row.names=FALSE)
return(TRUE)
}
return(FALSE)
}
|
1d4caab112fcad0ea0d0377e4e4965a3fdfedbed
|
bbd1cf229fc2dee5faf4111312486676faabd956
|
/ETE_2019/cours_3/script_3.R
|
ab97960754c4fc25d8f2ea26aadae63d09e587b4
|
[] |
no_license
|
nmeraihi/ACT3035
|
c28cbc4ef9daba9daea604eec793a11cb505538e
|
b0eaa56825c72c5c65185243d30b500ec4dcc02d
|
refs/heads/master
| 2022-12-21T21:11:34.641905
| 2022-12-07T22:53:13
| 2022-12-07T22:53:13
| 133,547,375
| 1
| 4
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,341
|
r
|
script_3.R
|
vect <- c(T, F, T, F, F, F, T)
as.numeric(vect)
vec <- 1:10
vec[5]<1
# if(respect ou pas de la condition){
# action a predre
# }
for(i in 1:10){
print("Bonjour")
}
rep("Bonjour", 10)
for(i in 1:5){
if(i==2){
next
}
print(paste("Bonjour le chiffre ", i ))
}
for(i in 1:5){
if(i==2){
break
}
print(paste("Bonjour le chiffre ", i ))
}
i=0
while(i<5){
i <- i+1
print(paste("Bonjour le chiffre ", i ))
}
i=1
t_0 <- Sys.time()
while(i<2){
if(Sys.time()-t_0>3){
break
}
print(paste(i, Sys.time()))
}
n=1
repeat{
n=n+1
print(mean(rnorm(100)))
if(n>50){break}
}
moyennes <- NULL
temps_1 <- system.time(
for(i in 1:100000){
mu <- mean(rnorm(100))
moyennes <- rbind(moyennes, mu)
}
)
temps_1
n <- 100000
moyennes <- numeric(length = n)
temps_2 <- system.time(
for(i in 1:n){
mu <- mean(rnorm(100))
moyennes[i] <- mu
}
)
temps_2
# ?append
# example(append)
# append(1:5, 0:1)
maFonctionSomme <- function(x,y){
return(x+y)
}
maFonctionSomme(13,15)
maFonctionSomme(T,"Bonjour")
maFonctionSomme <- function(x,y){
if(is.numeric(x) & is.numeric(y)){
return(x+y)
}else
{print("Assurez-vous que vos
arguments sont numériques")}
}
maFonctionOperation <- function(x,y){
if(is.numeric(x) & is.numeric(y)){
return(list("Somme"=x+y,
"Difference"=x-y,
"Multiplication"=x*y,
"Division"=x/y))
}else
{print("Assurez-vous que vos
arguments sont numériques")}
}
maFonctionOperation <- function(x=1,y=1){
if(is.numeric(x) & is.numeric(y)){
return(list("Somme"=x+y,
"Difference"=x-y,
"Multiplication"=x*y,
"Division"=x/y))
}else
{print("Assurez-vous que vos
arguments sont numériques")}
}
maFonctionOperation()
maFonctionOperation <- function(x=1,y=1){
# Cette fonction fait 4 calculs
# somme, diff, multiplication et division
# Arg:
# x: la valeur du premier élément
# y: la valeur du 2e élément
# retourne: somme, diff, multiplication et division
if(is.numeric(x) & is.numeric(y)){
return(list("Somme"=x+y,
"Difference"=x-y,
"Multiplication"=x*y,
"Division"=x/y))
}else
{print("Assurez-vous que vos
arguments sont numériques")}
}
maFonctionOperation
formals(maFonctionOperation)
mamat <- matrix(rnorm(100), 10)
save(mamat, file="mamat.RData")
rm(mamat)
load("mamat.RData")
write.csv(mamat,"mamat.csv")
# install.packages("leaflet")
install.packages("tidyverse")
library(tidyverse)
view(mpg)
ggplot(data = mpg)+
geom_point(mapping = aes(x=displ, y=hwy))
session <- c()
A <- rnorm(200)
B <- rnorm(200, .8)
dat <- data.frame(type=factor(rep(c("A", "B"),each=200)),
taux=c(rnorm(200), rnorm(200, .8)))
View(dat)
library(ggplot2)
ggplot(data = dat, aes(x=taux))+
geom_histogram(binwidth = .5)
graph <- ggplot(data = dat, aes(x=taux, fill=type))
graph+geom_histogram(aes(y=..density..), binwidth = .5,
position = "identity",
alpha=.5)+
geom_density(aes(colour=type))
|
28a67bce2244e623a841e3cb8e377c4bdc6d7dfc
|
9326d857c238ff56f993437fb44a5c90961d0753
|
/man/banner.Rd
|
0d7e5f6244802be1b02bb8b540cf0dec2ab4dd92
|
[] |
no_license
|
moj-analytical-services/shinyGovstyle
|
e1e9b4062710b229f269f9b0bb58c1398383f7e1
|
a033342e971b9f090c06b6e17b82b20d27dce50c
|
refs/heads/master
| 2023-07-11T05:45:21.430131
| 2022-02-22T10:36:38
| 2022-02-22T10:36:38
| 192,864,104
| 34
| 4
| null | 2022-02-07T12:41:32
| 2019-06-20T06:41:58
|
CSS
|
UTF-8
|
R
| false
| true
| 901
|
rd
|
banner.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/banner.R
\name{banner}
\alias{banner}
\title{Banner Function}
\usage{
banner(inputId, type, label)
}
\arguments{
\item{inputId}{The input slot that will be used to access the value.}
\item{type}{Main type of label e.g. alpha or beta. Can be any word.}
\item{label}{test to display.}
}
\value{
a banner html shiny object
}
\description{
This function create a detail component that you can click for further
details.
}
\examples{
if (interactive()) {
ui <- fluidPage(
shinyGovstyle::header(
main_text = "Example",
secondary_text = "User Examples",
logo="shinyGovstyle/images/moj_logo.png"),
shinyGovstyle::banner(
inputId = "banner", type = "beta", 'This is a new service')
)
server <- function(input, output, session) {}
shinyApp(ui = ui, server = server)
}
}
\keyword{banner}
|
6d20fa7ae319a4180df29ae3d21daee158e573a8
|
be9d5be42158084f14b81c4ef4a32f89948a34e0
|
/runAnalysis.R
|
560cbd1a150221bb6effd24cfd5d9ba5fd7812dc
|
[] |
no_license
|
xzw0005/GettingAndCleaningData
|
b2885fc281604dcb8a05d1e5d6f6b62e348a5fe8
|
000cba4d77ef59e827ab7323aa3d93ef902559e9
|
refs/heads/master
| 2021-01-09T21:52:19.478761
| 2015-05-24T22:31:12
| 2015-05-24T22:31:12
| 36,117,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,549
|
r
|
runAnalysis.R
|
setwd("F:\\Coursera\\GettingAndCleaningData\\getdata-project")
train = read.table("./UCI HAR Dataset/train/X_train.txt")
dim(train)
print(object.size(train), unit = "MB")
test = read.table("./UCI HAR Dataset/test/X_test.txt")
dim(test)
print(object.size(test), unit = "MB")
head(test)
features = read.table("./UCI HAR Dataset/features.txt")
dim(features)
names(train) = features[, 2]
names(test) = features[, 2]
head(train)
train$tag = "train"
test$tag = "test"
trainLabel = read.table("./UCI HAR Dataset/train/y_train.txt")
testLabel = read.table("./UCI HAR Dataset/test/y_test.txt")
dim(trainLabel)
train$activity = trainLabel[, 1]
test$activity = testLabel[, 1]
head(test)
trainSubject = read.table("./UCI HAR Dataset/train/subject_train.txt")
testSubject = read.table("./UCI HAR Dataset/test/subject_test.txt")
dim(trainSubject)
train$subject = trainSubject[, 1]
test$subject = testSubject[, 1]
dim(train)
dim(test)
mergedData = rbind(train, test)
dim(mergedData)
head(mergedData)
meanVars = grep("mean()", names(mergedData))
meanVars
stdVars = grep("std()", names(mergedData))
stdVars
actVar = grep("activity", names(mergedData))
actVar
subVar = grep("subject", names(mergedData))
myData = mergedData[, c(meanVars, stdVars, actVar, subVar)]
dim(myData)
for (i in 1:dim(myData)[1]) {
if (myData$activity[i] == 1) {myData$activity[i] = "WALKING"}
else if (myData$activity[i] == 2) {myData$activity[i] = "WALKING_UPSTAIRS"}
else if (myData$activity[i] == 3) {myData$activity[i] = "WALKING_DOWNSTAIRS"}
else if (myData$activity[i] == 4) {myData$activity[i] = "SITTING"}
else if (myData$activity[i] == 5) {myData$activity[i] = "STANDING"}
else if (myData$activity[i] == 6) {myData$activity[i] = "LAYING"}
}
myData = mergedData[, c(meanVars, stdVars, actVar, subVar)]
table(myData$activity)
table(myData$subject)
tidyData <- aggregate(myData,
by=list(myData$subject, myData$activity),FUN=mean)
tidyData = tidyData[, c(-1, -2)]
names(tidyData)
dim(tidyData)
head(tidyData)
for (i in 1:dim(tidyData)[1]) {
if (tidyData$activity[i] == 1) {tidyData$activity[i] = "WALKING"}
else if (tidyData$activity[i] == 2) {tidyData$activity[i] = "WALKING_UPSTAIRS"}
else if (tidyData$activity[i] == 3) {tidyData$activity[i] = "WALKING_DOWNSTAIRS"}
else if (tidyData$activity[i] == 4) {tidyData$activity[i] = "SITTING"}
else if (tidyData$activity[i] == 5) {tidyData$activity[i] = "STANDING"}
else if (tidyData$activity[i] == 6) {tidyData$activity[i] = "LAYING"}
}
write.table(tidyData, "./tidyData.txt", row.name=FALSE)
|
8aea8dca4dbbc8a2c1d1c4217050b9217066dce8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RgoogleMaps/examples/GetMap.Rd.R
|
de49f1b92eb0863f14e9c943e29b327d099131fe
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,766
|
r
|
GetMap.Rd.R
|
library(RgoogleMaps)
### Name: GetMap
### Title: download a static map from the Google server
### Aliases: GetMap
### ** Examples
lat = c(40.702147,40.718217,40.711614);
lon = c(-74.012318,-74.015794,-73.998284);
center = c(mean(lat), mean(lon));
zoom <- min(MaxZoom(range(lat), range(lon)));
#this overhead is taken care of implicitly by GetMap.bbox();
markers = paste0("&markers=color:blue|label:S|40.702147,-74.015794&markers=color:",
"green|label:G|40.711614,-74.012318&markers=color:red|color:red|",
"label:C|40.718217,-73.998284")
myMap <- GetMap(center=center, zoom=zoom,markers=markers);
#Note that in the presence of markers one often needs to add some extra padding to the
#latitude range to accomodate the extent of the top most marker
if (0){#takes too long to run for CRAN check
#add a path, i.e. polyline:
myMap <- GetMap(center=center, zoom=zoom,
path = paste0("&path=color:0x0000ff|weight:5|40.737102,-73.990318|",
"40.749825,-73.987963|40.752946,-73.987384|40.755823,-73.986397"));
#use implicit geo coding
BrooklynMap <- GetMap(center="Brooklyn", zoom=13)
PlotOnStaticMap(BrooklynMap)
#use implicit geo coding and display labels in Korean:
BrooklynMap <- GetMap(center="Brooklyn", zoom=13, hl="ko")
PlotOnStaticMap(BrooklynMap)
#no highways
ManHatMap <- GetMap(center="Lower Manhattan", zoom=14,
extraURL="&style=feature:road.highway|visibility:off",
destfile = "LowerManhattan.png")
PlotOnStaticMap(ManHatMap)
#reload the map without a new download:
ManHatMap <- GetMap(destfile = "LowerManhattan.png",NEWMAP=FALSE)
PlotOnStaticMap(ManHatMap)
#The example below defines a polygonal area within Manhattan, passed a series of
#intersections as locations:
#myMap <- GetMap(path = paste0("&path=color:0x00000000|weight:5|fillcolor:0xFFFF0033|",
# "8th+Avenue+%26+34th+St,New+York,NY|8th+Avenue+%26+42nd+St,New+York,NY|",
# "Park+Ave+%26+42nd+St,New+York,NY,NY|Park+Ave+%26+34th+St,New+York,NY,NY"),
# destfile = "MyTile3a.png");
#note that since the path string is just appended to the URL you can "abuse" the path
#argument to pass anything to the query, e.g. the style parameter:
#The following example displays a map of Brooklyn where local roads have been changed
#to bright green and the residential areas have been changed to black:
# myMap <- GetMap(center="Brooklyn", zoom=12, maptype = "roadmap",
#path = paste0("&style=feature:road.local|element:geometry|hue:0x00ff00|",
# "saturation:100&style=feature:landscape|element:geometry|lightness:-100"),
# sensor='false', destfile = "MyTile4.png", RETURNIMAGE = FALSE);
#In the last example we set RETURNIMAGE to FALSE which is a useful feature in general
#if png is not installed. In that cases, the images can still be fetched
#and saved but not read into R.
#In the following example we let the Static Maps API determine the correct center and
#zoom level implicitly, based on evaluation of the position of the markers.
#However, to be of use within R we do need to know the values for zoom and
#center explicitly, so it is better practice to compute them ourselves and
#pass them as arguments, in which case meta information on the map tile can be saved as well.
#myMap <- GetMap(markers = paste0("&markers=color:blue|label:S|40.702147,-74.015794&",
# "markers=color:green|label:G|40.711614,-74.012318&markers=color:red|",
# "color:red|label:C|40.718217,-73.998284"),
# destfile = "MyTile1.png", RETURNIMAGE = FALSE);
}
|
5ebaaa682d31c8f95ad4e82aafca081980629484
|
91134c9c434ee7ce2529efa478faa820dce61e0a
|
/Programs/shinypopfit/ui.R
|
1ce8332cb1337a73c43ece63bf15d891f387ffd0
|
[] |
no_license
|
Adam-Brand/Pooled_Testing_HIV
|
9855603f21eac6f479b035cadf446907d2a550e8
|
c82c184da1a028fdc4170ba4b105e6e212d09e6f
|
refs/heads/master
| 2023-06-19T00:59:02.218768
| 2021-07-19T11:48:31
| 2021-07-19T11:48:31
| 266,970,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,802
|
r
|
ui.R
|
#==============================================================================
# FILENAME: ui.R
# PROJECT: Pooled testing in HIV
# PURPOSE: ui.R file for the shiny program shinypopfit
#
#
# AUTHOR: Adam Brand
# INPUT datasets: none
# OUTPUT: none, this is a user interface for the a shiny app
# R VERSION: 3.6.1
#==============================================================================
#Notes: this needs to be in the same folder as the serverR file for this app, but does not need to be run
# =============================================================================
# This code also needs to be in a separate file, but called ui.R
library(shiny)
source("shinysource.R")
shinyUI(fluidPage(
titlePanel("Parametric Population Fit"),
sidebarLayout(
sidebarPanel(
numericInput("seed", label = "Seed", value=12, min=1),
numericInput("popsize", label = "Pop. Size", value=1000, min=1000),
numericInput("b0", label = "B0", value=1, min=-50),
numericInput("b1", label = "B1", value=.05, min=-50),
numericInput("b2", label = "B2", value=1, min=-50),
numericInput("b3", label = "B3", value=.05, min=-50),
numericInput("sd", label = "Standard Deviation", value=.5, min=0.01),
sliderInput("cutoff", label = "Failure Cutoff", min=200, max=2500, value=1000, step=50),
sliderInput("prev", label = "Prevalence Over Cutoff", min=.01, max=1, value=0.1),
sliderInput("pf", label = "Prior Failure Prevalence", min=.01, max=1, value=0.25),
numericInput("shape1", label = "Beta Shape 1", min=0.1, value=5),
numericInput("shape2", label = "Beta Shape 2", min=0.1, value=0.5)
),
mainPanel(
h1("Population vs Parametric Fit: Combined curves"),
plotOutput("plot2")
)
)
))
|
085e9f19cdf1374a07bb01958b32026effa47e93
|
eb98c8ee3611c8cff81b1c73e1db6b5971faa16d
|
/man/endowport.Rd
|
67088f442735446a3912151894e359929e8f8eb2
|
[] |
no_license
|
nathanesau/stocins
|
565fbd2250d390af22e1cbfc8c4df16c63277f42
|
047d7b88b96c888fbbdcd87bb9389d4ef55fae8a
|
refs/heads/master
| 2021-01-23T05:24:12.831326
| 2017-04-07T17:29:32
| 2017-04-07T17:29:32
| 86,301,821
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 600
|
rd
|
endowport.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/insuranceModels.R
\name{endowport}
\alias{endowport}
\alias{z.moment.iport.endowport}
\title{Endowment insurance portfolio (identical policies)}
\usage{
\method{z.moment}{iport.endowport}(moment, ins, mort, irm)
}
\description{
A portfolio of \code{c} identical endowment policies. See the
\link{insurance} class for details and examples on how to use this class.
}
\references{
Parker, Gary. An application of stochastic interest rate models in
life assurance. Diss. Heriot-Watt University, 1992.
}
|
1df07601d85b11f7e1103745c26c8e85ee014145
|
f51b84af824432b03c5d2379483171a6fec4b9e3
|
/R_code/data_extraction.R
|
790ddd815196e927b06af8c5bb850134a964dac4
|
[] |
no_license
|
ZiyingFeng/Exploratory-Data-Analysis---Household-Power-Consumption
|
587a2a8e671c8d6935f6cdd54d92665c21f9b64d
|
803e20d8ab3165eb43855fa4765c74bfc4f2e679
|
refs/heads/master
| 2021-06-17T15:15:28.410524
| 2017-06-06T17:00:57
| 2017-06-06T17:00:57
| 93,230,599
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
r
|
data_extraction.R
|
# Read the raw data
data <- read.table("./household_power_consumption.txt", header = TRUE, stringsAsFactors = FALSE)
# Convert the first column to date
data[,1] <- as.Date(data[,1], format = "%d/%m/%Y")
# Extract data from the dates 2007-02-01 and 2007-02-02
data1 <- subset(data, data[,1]>="2007-02-01" & data[,1]<="2007-02-02")
# Or using data1 <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
# Remove missing data
data2 <- data1[!grepl("\\?", data1[,2]),]
# Convert the first two columns to date
data3 <- data2
data3$DateTime <- ymd(data3[,1])+hms(data3[,2])
# Or using datetime <- strptime(paste(data1$Date, data1$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
|
f024e47571a1a9923cd28b47e07379e5ae650fac
|
864ee1d2fc91865f46666b7e810bcfbc9da6a007
|
/R/initial_centroids.R
|
6f1d10948396ac975eea86f9354f2557836daccd
|
[
"MIT"
] |
permissive
|
gabiborges1/kfactr
|
f4f5d927777057d367da47eefaf9ab70717fb1fc
|
6150e2ad8df65e751ec1228d0afd1719228435b1
|
refs/heads/master
| 2020-04-28T01:57:59.871497
| 2019-03-10T22:26:15
| 2019-03-10T22:26:15
| 174,880,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,030
|
r
|
initial_centroids.R
|
# Hello, world!
#
# This is the script containing the functions to
# initialize the centroids
#
# Author: Gabriela Borges
# Data Inicial: 10-03-2019
# Data da Última Modificação: 10-03-2019
#' Initializes the clusters using the traditional approach.
#'
#' @param data A dataframe.
#' @param k A integer number.
#' @return A dataframe containing \code{k} rows randomly selected from \code{data}.
#' @examples
#' data(mtcars)
#' k <- 3
#' initial_traditional(mtcars, k)
initial_traditional <- function(data, k){
centroids <- dplyr::sample_n(data, k)
return(centroids)
}
#' Selects, using weighted sampling, a category from the table.
#'
#' @param table A prop.table.
#' @return A vector of unit size containing the selected randomly weighted category.
#' @examples
#' data(mtcars)
#' table <- prop.table(table(mtcars$carb))
#' sample_weighted_category(table)
sample_weighted_category <- function(table){
categories <- names(table)
new_value <- sample(size = 1, x = categories, prob = table)
return(new_value)
}
|
2db7fbb1c8e4a6b8931cac8c733547c62a6f76ed
|
3abf5d69da2fb9b7ffdecc54285ac563801b3479
|
/glmnet_cv_penalty/summary_glmnet.R
|
c64b233c8b8c165474bbc5e0e003b08c020bcba3
|
[
"Apache-2.0"
] |
permissive
|
eitail/machine-learning-summary
|
eb209a1beb4808886d3a588aba12dba7318c9e38
|
f3de55cfc30433b16f005e24264f9869ce0f0cd0
|
refs/heads/master
| 2020-04-08T22:15:11.062336
| 2017-06-11T11:04:38
| 2017-06-11T11:04:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,674
|
r
|
summary_glmnet.R
|
rm(list=ls())
#about glmnet
#glmnet(x, y, family=c("gaussian","binomial","poisson","multinomial","cox","mgaussian"),
# weights, offset=NULL, alpha = 1, nlambda = 100,
# lambda.min.ratio = ifelse(nobs<nvars,0.01,0.0001), lambda=NULL,
# standardize = TRUE, intercept=TRUE, thresh = 1e-07, dfmax = nvars + 1,
# pmax = min(dfmax * 2+20, nvars), exclude, penalty.factor = rep(1, nvars),
# lower.limits=-Inf, upper.limits=Inf, maxit=100000,
# type.gaussian=ifelse(nvars<500,"covariance","naive"),
# type.logistic=c("Newton","modified.Newton"),
# standardize.response=FALSE, type.multinomial=c("ungrouped","grouped"))
#weights 权重,默认为1
#offset
#alpha 0-1
#nlambda default is 100
#lambda.min.ratio Smallest value for lambda 数据量大于属性数,0.0001;相反,0.01
#lambda
#standardize x 标准化
#thresh 坐标梯度收敛的阈值,默认为1E-7
#dfmax 最大变量数
#gaussian 1/2 RSS/nobs + λ*penalty,
#other models -loglik/nobs + λ*penalty
#penalty (1-α)/2||β_j||_2^2+α||β_j||_2
library(glmnet)
setwd("D:\\r\\R-3.2.3\\library\\glmnet\\data")
load("QuickStartExample.RData")
fit = glmnet(x, y)
plot(fit)
# Gaussian
x=matrix(rnorm(100*20),100,20)
y=rnorm(100)
fit1=glmnet(x,y)
print(fit1)
coef(fit1,s=0.01) # extract coefficients at a single value of lambda
predict(fit1,newx=x[1:10,],s=c(0.01,0.005)) # make predictions
#multivariate gaussian
y=matrix(rnorm(100*3),100,3)
fit1m=glmnet(x,y,family="mgaussian")
plot(fit1m,type.coef="2norm")
#binomial
g2=sample(1:2,100,replace=TRUE)
fit2=glmnet(x,g2,family="binomial")
#multinomial
g4=sample(1:4,100,replace=TRUE)
fit3=glmnet(x,g4,family="multinomial")
fit3a=glmnet(x,g4,family="multinomial",type.multinomial="grouped")
#poisson
N=500; p=20
nzc=5
x=matrix(rnorm(N*p),N,p)
beta=rnorm(nzc)
f = x[,seq(nzc)]%*%beta
mu=exp(f)
y=rpois(N,mu)
fit=glmnet(x,y,family="poisson")
plot(fit)
pfit = predict(fit,x,s=0.001,type="response")
plot(pfit,y)
#Cox
set.seed(10101)
N=1000;p=30
nzc=p/3
x=matrix(rnorm(N*p),N,p)
beta=rnorm(nzc)
fx=x[,seq(nzc)]%*%beta/3
hx=exp(fx)
ty=rexp(N,hx)
tcens=rbinom(n=N,prob=.3,size=1)# censoring indicator
y=cbind(time=ty,status=1-tcens) # y=Surv(ty,1-tcens) with library(survival)
fit=glmnet(x,y,family="cox")
plot(fit)
# Sparse
n=10000;p=200
nzc=trunc(p/10)
x=matrix(rnorm(n*p),n,p)
iz=sample(1:(n*p),size=n*p*.85,replace=FALSE)
x[iz]=0
sx=Matrix(x,sparse=TRUE)
inherits(sx,"sparseMatrix")#confirm that it is sparse
beta=rnorm(nzc)
fx=x[,seq(nzc)]%*%beta
eps=rnorm(n)
y=fx+eps
px=exp(fx)
px=px/(1+px)
ly=rbinom(n=length(px),prob=px,size=1)
system.time(fit1<-glmnet(sx,y))
system.time(fit2n<-glmnet(x,y))
|
d1284b5e76cff85cd580958a513a00c67b8598d7
|
1877590ba1981d9e117bf04359d6799f763fbcec
|
/Untitled.R
|
751a60cc85feb77d3ccf75481dc470685b64a4dc
|
[] |
no_license
|
mastreips/USPTO-Datamining-Scripts
|
61b0ba774046dfa0774ac07ec96cf1ee2f46a480
|
20de1a9d69b0e5c300c95c2ad24959b9fbdaa2ec
|
refs/heads/master
| 2021-01-10T07:15:49.497932
| 2015-09-23T14:30:23
| 2015-09-23T14:30:23
| 43,006,262
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 954
|
r
|
Untitled.R
|
library(XML)
library(RCurl)
library(xlsx)
URL <- getURL("http://patents.reedtech.com/parbft.php")
rt <- readHTMLTable(URL, header = TRUE)
rt
url <- "http://patents.reedtech.com/parbft.php"
doc <- htmlParse(url)
links <- xpathSApply(doc, "//a/@href")
free(doc)
links
write(links, file="upto_links.txt")
# wget --no-proxy -i upto_links_p2.txt
lines <- readLines("ipa150917.xml")
start <- grep('<?xml version="1.0" encoding="UTF-8"?>',lines,fixed=T)
end <- c(start[-1]-1,length(lines))
library(XML)
get.xml <- function(i) {
txt <- paste(lines[start[i]:end[i]],collapse="\n")
# print(i)
xmlTreeParse(txt,asText=T)
# return(i)
}
docs <- lapply(1:10,get.xml) #first batch of 10
class(docs[[1]])
# [1] "XMLInternalDocument" "XMLAbstractDocument"
#xml <- xmlParse("ipa150917.xml")
#docs <- lapply(1:length(start),get.xml) #parse all docs
sapply(docs,function(doc) xmlValue(doc["//city"][[1]]))
write(docs[1], file="test_parse.xml")
docs[1]
|
467a44e21f0a28f9ca173e007a22ed82ac4e7f90
|
e9a2b3624a6117ebc23d5d4131a9c95a0b26c78f
|
/hw3/mimiciv_shiny/global.R
|
9ab7b66832a943833b1e72c1210b9decb4b3d044
|
[] |
no_license
|
Larryzza/biostat-203b-2021-winter
|
cff0c8c81d558eb6d4dc70439bab5dc5dc5fd62c
|
4d09738b28106818c9b1626507a597b59e69b9ed
|
refs/heads/main
| 2023-03-29T11:54:27.248471
| 2021-03-27T04:46:45
| 2021-03-27T04:46:45
| 329,160,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,913
|
r
|
global.R
|
library(DT)
library(shiny)
library(dplyr)
library(plotly)
library(tableone)
library(data.table)
library(tidyverse)
library(shinydashboard)
library(shinyWidgets)
library(shinycssloaders)
library(wesanderson)
cores <- wes_palette("BottleRocket1", 5)[3:5]
icu_cohort <- readRDS("icu_cohort.rds")
icu_cohort <- icu_cohort %>% mutate_at(vars(hospital_expire_flag), as.factor)
tab_zza <- function(texto, cor, icon, id){
HTML(paste0('<a id="', id,'" href="#" class="action-button">
<div class = "voronoys-block" style = "background-color:', cor, ';">
<span class = "name">', texto, '</span>
<div class="img_block">
<div class="img_block_conteiner">
<img src="img/',icon,'">
</div>
</div>
</div></a>'))
}
demo <- c("first_careunit", "last_careunit", "gender", "anchor_age",
"anchor_year_group", "los", "admission_type", "admission_location",
"discharge_location", "ethnicity", "edregtime",
"insurance", "language", "marital_status", "anchor_year",
"edouttime", "hospital_expire_flag", "age_at_adm")
demo_groups <- icu_cohort[,demo] %>% select_if(is.character) %>% names()
demo_value <- icu_cohort[,demo] %>% select_if(is.numeric) %>% names()
lab <- c("bicarbonate", "calcium", "chloride",
"creatinine", "glucose", "magnesium",
"potassium", "sodium", "hematocrit", "wbc", "lactate")
vital <- c("heart_rate", "non_invasive_blood_pressure_systolic",
"non_invasive_blood_pressure_mean",
"respiratory_rate", "temperature_fahrenheit",
"arterial_blood_pressure_systolic",
"arterial_blood_pressure_mean")
tab_files <- list.files(path = "tabs", full.names = T, recursive = T)
tab_files <- tab_files[-grep(x = tab_files, pattern = "server")]
suppressMessages(lapply(tab_files, source))
|
8646acfdbb4084916bb4b158a95ebe8b6aeb2fb0
|
e3705c3a76fb0f4bacdae56518807dd81bf73e66
|
/Lab 4.R
|
05ea952b2e83a4fe2bc4fdb015a400b182257a5e
|
[] |
no_license
|
AnuC01/DataAnalytics2020_ANU_CHANDRASHEKAR
|
6acf6fbf6edbbdc46330c127d301ff418ecb66d3
|
3452c182ca6db2701ccc0a16de15eb6af5a5d486
|
refs/heads/master
| 2023-02-01T22:22:15.344015
| 2020-12-16T06:20:04
| 2020-12-16T06:20:04
| 294,528,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,410
|
r
|
Lab 4.R
|
#LAB 4
#Heatmap(), image(), and hierarchical clustering example
set.seed(12345)
help(par)
par(mar = rep(0.2,4))
data_Matrix <- matrix(rnorm(400), nrow = 40)
image(1:10, 1:40, t(data_Matrix)[,nrow(data_Matrix):1])
par(mar=rep(0.2,4))
heatmap(data_Matrix)
set.seed(678910)
for (i in 1:40) {
coin_Flip <- rbinom(1, size = 1, prob = 0.5)
if (coin_Flip) {
data_Matrix[i, ] <- data_Matrix[i, ] + rep(c(0,3), each = 5)
}
}
par (mar=rep(0.2,4))
image(1:10, 1:40, t(data_Matrix)[,nrow(data_Matrix):1])
par(mar=rep(0.2,4))
heatmap(data_Matrix)
hh <- hclust(dist(data_Matrix))
data_Matrix_Ordered <- data_Matrix[hh$order,]
par(mfrow = c(1,3))
image(t(data_Matrix_Ordered)[,nrow(data_Matrix_Ordered):1])
plot(rowMeans(data_Matrix_Ordered), 40:1, xlab = "The Row Mean", ylab = "Row", pch = 19)
plot(colMeans(data_Matrix_Ordered), xlab = "Column", ylab = "Column Mean", pch = 19)
#Trees for the Titanic
#rpart
data(Titanic)
library(rpart)
library(rpart.plot)
require(rpart)
survivedR <- rpart(Survived~.,data = Titanic)
survivedR
plot(survivedR)
text(survivedR)
#ctree
data(Titanic)
require(party)
survivedC <- ctree(Survived~., data = Titanic)
plot(survivedC)
#hclust
data(Titanic)
titanicH <- hclust(dist(Titanic),)
plot(titanicH)
#randomForest
data(Titanic)
require(randomForest)
survivedRF <- randomForest(Survived~., data = Titanic)
print(survivedRF)
importance(survivedRF)
plot(survivedRF)
|
ac4fdeb4cf0bbd5dde002c900053143c63eaa553
|
7756e8d3711b5cfed11011b2089ca17563734ee9
|
/exercise-2/exercise.R
|
6ad253c5004a6a85857e135abfbfd9168ec990ce
|
[
"MIT"
] |
permissive
|
chiuyt19/m7-functions
|
5827d586cdfb8df66727743caa5baa384766d1f3
|
97587b289ea2f55329daef6bd66608d9422b9ded
|
refs/heads/master
| 2021-01-19T01:18:32.327373
| 2017-04-06T01:07:48
| 2017-04-06T01:07:48
| 87,237,872
| 0
| 0
| null | 2017-04-04T21:43:05
| 2017-04-04T21:43:04
| null |
UTF-8
|
R
| false
| false
| 1,376
|
r
|
exercise.R
|
# Exercise 2: writing and executing functions (II)
# Write a function `CompareLength` that takes in 2 vectors, and returns the sentence:
# "The difference in lengths is N"
CompareLength<-function(a,b){
dif<-abs(length(a)-length(b))
all<-paste("The difference in lengths is ", dif)
return(all)
}
# Pass two vectors of different length to your `CompareLength` function
CompareLength(c(1:30),c(1:70))
# Write a function `DescribeDifference` that will return one of the following statements:
# "Your first vector is longer by N elements"
# "Your second vector is longer by N elements"
DescribeDifference<- function(a,b){
dif<-abs(length(a)-length(b))
if(length(a)>length(b)){
all<-paste('Your first vector is longer by ',dif)
}else if (length(b)>length(a)){
all<-paste('Your second vector is longer by ',dif)
}
return(all)
}
# Pass two vectors to your `DescribeDifference` function
DescribeDifference(c(1:50),c(1:88))
### Bonus ###
# Rewrite your `DescribeDifference` function to tell you the name of the vector which is longer
DescribeDifference<- function(a,b){
dif<-abs(length(a)-length(b))
if(length(a)>length(b)){
all<-paste(deparse(substitute(a)),' is longer by ',dif)
}else if (length(b)>length(a)){
all<-paste(deparse(substitute(b)),' is longer by ',dif)
}
return(all)
}
v1<-c(1:50)
v2<-c(1:88)
DescribeDifference(v1,v2)
|
fdf4e20804573392f681498e56e9b07291d1bcf7
|
2eaac83849c8f97d02c613a84aba44d7f73ab5fb
|
/02-07-19 media.R
|
59cd2bb1ef46fad31f746ecd68983fb9a57fb7d8
|
[] |
no_license
|
whipson/tidytuesday
|
ef47b117c7d77cff809d2f091a94884a2a2440fd
|
73d1aa89c73258687133f0c18ed9c03ad35476e5
|
refs/heads/master
| 2021-07-11T16:29:42.368183
| 2020-07-15T13:15:02
| 2020-07-15T13:15:02
| 179,088,594
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,628
|
r
|
02-07-19 media.R
|
library(tidyverse)
library(ggalluvial)
library(extrafont)
media <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-07-02/media_franchises.csv")
media_clean <- media %>%
mutate(original_media_lumped = fct_lump(original_media, 8),
revenue_category = case_when(revenue_category == "Book sales" ~ "Books",
revenue_category == "Box Office" ~ "Box Office",
revenue_category == "Comic or Manga" ~ "Comics",
revenue_category == "Home Video/Entertainment" ~ "Home Video",
revenue_category == "Merchandise, Licensing & Retail" ~ "Merchandise",
revenue_category == "Music" ~ "Music",
revenue_category == "TV" ~ "TV",
revenue_category == "Video Games/Games" ~ "Video Games"))
media_grouped <- media_clean %>%
group_by(original_media_lumped, revenue_category) %>%
summarize(total = n(),
average_revenue = mean(revenue))
media_lodes <- to_lodes_form(media_grouped, axes = 1:2)
library(RColorBrewer)
number_colors <- 17
my_colors <- colorRampPalette(brewer.pal(7, "Set1"))(number_colors)
media_lodes %>%
ggplot(aes(x = x, y = total, stratum = stratum, alluvium = alluvium,
fill = stratum, label = stratum)) +
scale_x_discrete(expand = c(.05, .05), labels = c("Original Format", "Revenue Streams")) +
scale_fill_manual(values = my_colors) +
geom_flow(width = 1/6) +
geom_stratum(alpha = .5, width = 2/12, size = 1, fill = NA, color = "grey") +
geom_text(stat = "stratum", size = 4, family = "Rockwell") +
labs(title = "The Evolution of Popular Media",
subtitle = "This plot combines data from over 300 popular media franchises. On the left are the original media,
on the left are extant revenue streams. Box size and line width is proportional to total franchises in that category.",
caption = "Source: Wikipedia") +
theme_minimal() +
theme(legend.position = "none",
axis.line = element_blank(),
panel.background = element_blank(),
panel.grid = element_blank(),
axis.text.x = element_text(vjust = 5, size = 14),
axis.title.x = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
text = element_text(family = "Rockwell"),
plot.title = element_text(size = 32),
plot.subtitle = element_text(size = 12))
ggsave("popular_media.png", width = 9.97)
|
2b77ee04cd90090b53af718d34ff87e231109c99
|
65b5014564a796bc2e438367f4456607ecc33fa9
|
/man/as_url.Rd
|
2c82819c512fc6834eff3624b44d04866dacdf55
|
[] |
no_license
|
liao961120/pttR
|
73a0cfc904cd7aeec66016e54a4dbb14b45f9e2d
|
c439aa0c1334a1f23a0e59dd59539a3f478a6ef4
|
refs/heads/master
| 2021-06-05T09:57:29.527674
| 2019-12-12T08:57:29
| 2019-12-12T08:57:29
| 144,726,983
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 629
|
rd
|
as_url.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ptt-handy.R
\name{as_url}
\alias{as_url}
\title{Turn PTT board name to URL}
\usage{
as_url(x, pre = "https://www.ptt.cc/bbs/")
}
\arguments{
\item{x}{Character. A board name or a partial URL (ending in
\code{.html}) with base URL removed.}
\item{pre}{Character. A base URL. Defaults to the URL of
\href{https://www.ptt.cc/bbs/}{PTT Web}.}
}
\description{
A wrapper of \code{\link[base]{paste0}} to turn board names
or partial post URLs removed back to the original URLs.
}
\examples{
as_url("gossiping")
as_url("Gossiping/M.1534490816.A.A3A.html")
}
|
aaaaeb4388baf6bdab2719814e76e547fbbd3e29
|
84e7c052fae39843d3f67be78049e175ea8c441c
|
/R/ALAdistributions.R
|
ecc7ab741c5bbc2b59d4f57574c37e62ea9f0fba
|
[] |
no_license
|
AngeVar/GLAHD
|
935b7d346dc9fb4cf8a8b552dda1400fe7100fda
|
80ff898cd15b2e670ea0ed7c31db83a69b657faf
|
refs/heads/master
| 2020-05-22T07:57:11.913885
| 2017-07-04T06:01:13
| 2017-07-04T06:01:13
| 36,908,501
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,141
|
r
|
ALAdistributions.R
|
setwd("W:/WorkingData/GHS39/GLAHD/Varhammar_A/")
#load some libraries
library(raster)
library(rgdal)
library(stringr)
library(scales)
#load file
seed_atsc<- read.csv("GLAHDseed.csv")
#load known distribution
d <- read.csv("distributions_bioclimv2.csv")
dist <- subset(d,d$Coordinate.Uncertainty.in.Metres...parsed<5000)
#plot final
cam<- subset(seed_atsc, Taxa == "camaldulensis")
camdist <- subset(dist, Species...matched == "Eucalyptus camaldulensis")
ter<- subset(seed_atsc, Taxa == "tereticornis")
terdist <- subset(dist, Species...matched == "Eucalyptus tereticornis")
bra<- subset(seed_atsc, Taxa == "brassiana")
bradist <- subset(dist, Species...matched == "Eucalyptus brassiana")
pel<- subset(seed_atsc, Taxa == "pellita")
peldist <- subset(dist, Species...matched == "Eucalyptus pellita")
pla<- subset(seed_atsc, Taxa == " platyphylla")
pladist <- subset(dist, Species...matched == "Eucalyptus platyphylla")
bot<- subset(seed_atsc, Taxa == "botryoides")
botdist <- subset(dist, Species...matched == "Eucalyptus botryoides")
lon<- subset(seed_atsc, Taxa == "longifolia")
londist <- subset(dist, Species...matched == "Eucalyptus longifolia")
smi<- subset(seed_atsc, Taxa == "smithii")
smidist <- subset(dist, Species...matched == "Eucalyptus smithii")
#load data
biodat <- getData("worldclim", var="bio", res=2.5, path="//ad.uws.edu.au/dfshare/HomesHWK$/30034792/My Documents/Projects/ARC/Seed/T data")
#subset data
#biodat1 <- subset(biodat,1)
#biodat4 <- subset(biodat,4)
biodat5 <- subset(biodat,5)
#biodat6 <- subset(biodat,6)
#biodat8 <- subset(biodat,8)
#biodat9 <- subset(biodat,9)
biodat10 <- subset(biodat,10)
#biodat12 <- subset(biodat,12)
#biodat13 <- subset(biodat,13)
#biodat14 <- subset(biodat,14)
#biodat15 <- subset(biodat,15)
biodat18 <- subset(biodat,18)
#biodat19 <- subset(biodat,19)
#clip data to E Australia
YbrevRange <- extent(141.00, 154.00, -44, -10.0)
#biodat.oz1 <- crop(biodat1,YbrevRange)
#biodat.oz4 <- crop(biodat4,YbrevRange)
biodat.oz5 <- crop(biodat5,YbrevRange)
#biodat.oz6 <- crop(biodat6,YbrevRange)
#biodat.oz8 <- crop(biodat8,YbrevRange)
#biodat.oz9 <- crop(biodat9,YbrevRange)
biodat.oz10 <- crop(biodat10,YbrevRange)
#biodat.oz12 <- crop(biodat12,YbrevRange)
#biodat.oz13 <- crop(biodat13,YbrevRange)
#biodat.oz14 <- crop(biodat14,YbrevRange)
#biodat.oz15 <- crop(biodat15,YbrevRange)
biodat.oz18 <- crop(biodat18,YbrevRange)
#biodat.oz19 <- crop(biodat19,YbrevRange)
#plot provenances on map
windows(9,12)
plot(biodat.oz10/10,main="Seed Provenances",xlim=c(144.1,144.3),ylim=c(-14.4,-14))
points(x=bra$lon,y=bra$lat,col="black", bg="yellow",cex=1.75,pch=21)
points(x=pel$lon,y=pel$lat,col="black", bg="orange",cex=1.75,pch=21)
points(x=145.15,y=-16.58,col="black", bg="red",cex=1.75,pch=21)
points(x=bot$lon,y=bot$lat,col="black", bg="dodgerblue",cex=1.75,pch=21)
points(x=lon$lon,y=lon$lat,col="black", bg="cyan",cex=1.75,pch=21)
points(x=smi$lon,y=smi$lat,col="black", bg="purple",cex=1.75,pch=21)
points(x=cam$lon,y=cam$lat,col="black", bg="black",cex=1.5,pch=21)
points(x=ter$lon,y=ter$lat,col="black", bg="white",cex=1.5,pch=21)
legend("topright",legend=c("E. camaldulensis","E. tereticornis", NA, "E. brassiana","E. pellita","E. platyphylla",NA,"E. botryoides","E. longifolia", "E. smithii")
,col=c("black","black",NA,"black","black","black",NA,"black","black","black"),
pt.bg=c("black","white",NA,"yellow","orange","red",NA,"dodgerblue","cyan","purple"),pch=21,cex=0.75, pt.cex=1.2, bg="white")
dev.copy2pdf(file="Seed Provenances.pdf")
#Plot individual species
#Remove E. tereticornis ssp. mediana
terdist<- subset(terdist, Latitude...processed >-37.6333)
windows(20,33)
par(mfrow=c(3,3), mar=c(2,2,2,1), oma=c(2,1,1,6))
plot(biodat.oz10/10,xlim=c(141,155), ylim=c(-44,-11),legend=F)
points(x=bra$lon,y=bra$lat,col="black", bg="yellow",cex=1.75,pch=21)
points(x=pel$lon,y=pel$lat,col="black", bg="orange",cex=1.75,pch=21)
points(x=145.15,y=-16.58,col="black", bg="red",cex=1.75,pch=21)
points(x=bot$lon,y=bot$lat,col="black", bg="dodgerblue",cex=1.75,pch=21)
points(x=lon$lon,y=lon$lat,col="black", bg="cyan",cex=1.75,pch=21)
points(x=smi$lon,y=smi$lat,col="black", bg="purple",cex=1.75,pch=21)
points(x=cam$lon,y=cam$lat,col="black", bg="black",cex=1.5,pch=21)
points(x=ter$lon,y=ter$lat,col="black", bg="white",cex=1.5,pch=21)
plot(biodat.oz10/10,main="E. camaldulensis",cex.main=1.5,font.main=3, xlim=c(141,155), ylim=c(-44,-11),legend=F)
points(x=camdist$Longitude...processed,y=camdist$Latitude...processed,col=alpha("grey85",0.3), bg=alpha("grey85",0.3),cex=1,pch=21)
points(x=cam$lon,y=cam$lat,col="black", bg="black",cex=1.5,pch=21)
plot(biodat.oz10/10,main="E. tereticornis",cex.main=1.5 ,font.main=3,xlim=c(141,155), ylim=c(-44,-11),legend=F)
points(x=terdist$Longitude...processed,y=terdist$Latitude...processed,col=alpha("grey85",0.3), bg=alpha("grey85",0.3),cex=1,pch=21)
points(x=ter$lon,y=ter$lat,col="black", bg="white",cex=1.5,pch=21)
plot(biodat.oz10/10,main="E. platyphylla",cex.main=1.5 ,font.main=3,xlim=c(141,155), ylim=c(-44,-11),legend=F)
points(x=pladist$Longitude...processed,y=pladist$Latitude...processed,col=alpha("red",0.3), bg=alpha("red",0.3),cex=1,pch=21)
points(x=145.15,y=-16.58,col="black", bg="red",cex=1.75,pch=21)
plot(biodat.oz10/10,main="E. brassiana",cex.main=1.5 ,font.main=3,xlim=c(141,155), ylim=c(-44,-11),legend=F)
points(x=bradist$Longitude...processed,y=bradist$Latitude...processed,col=alpha("yellow",0.3), bg=alpha("yellow",0.3),cex=1,pch=21)
points(x=bra$lon,y=bra$lat,col="black", bg="yellow",cex=1.75,pch=21)
plot(biodat.oz10/10,main="E. pellita",cex.main=1.5 ,font.main=3,xlim=c(141,155), ylim=c(-44,-11),legend=F)
points(x=bradist$Longitude...processed,y=bradist$Latitude...processed,col=alpha("orange",0.3), bg=alpha("orange",0.3),cex=1,pch=21)
points(x=pel$lon,y=pel$lat,col="black", bg="orange",cex=1.75,pch=21)
plot(biodat.oz10/10,main="E. botryoides",cex.main=1.5 ,font.main=3,xlim=c(141,155), ylim=c(-44,-11),legend=F)
points(x=botdist$Longitude...processed,y=botdist$Latitude...processed,col=alpha("dodgerblue",0.3), bg=alpha("dodgerblue",0.3),cex=1,pch=21)
points(x=bot$lon,y=bot$lat,col="black", bg="dodgerblue",cex=1.75,pch=21)
plot(biodat.oz10/10,main="E. longifolia",cex.main=1.5 ,font.main=3,xlim=c(141,155), ylim=c(-44,-11),legend=F)
points(x=londist$Longitude...processed,y=londist$Latitude...processed,col=alpha("cyan",0.3), bg=alpha("cyan",0.3),cex=1,pch=21)
points(x=lon$lon,y=lon$lat,col="black", bg="cyan",cex=1.75,pch=21)
plot(biodat.oz10/10,main="E. smithii",cex.main=1.5 ,font.main=3,xlim=c(141,155), ylim=c(-44,-11),legend=F)
points(x=smidist$Longitude...processed,y=smidist$Latitude...processed,col=alpha("purple",0.3), bg=alpha("purple",0.3),cex=1,pch=21)
points(x=smi$lon,y=smi$lat,col="black", bg="purple",cex=1.75,pch=21)
par(mfrow=c(1, 2), mar=c(0, 0, 0, 1), oma=c(0, 0, 0, 2),new=FALSE, xpd=TRUE)
plot(biodat.oz10/10, legend.only=TRUE,legend.shrink=0.75,legend.width=1.5,
axis.args=list(at=seq(10,30,5),labels=seq(10, 30, 5), cex.axis=0.8),
legend.args=list(text='Mean Temperature of Warmest Quarter', side=4, font=2, line=2, cex=0.8))
dev.copy2pdf(file="Seed Provenances + distribution2.pdf")
#plot heatwave provenances on map
windows(6,8)
plot(biodat.oz10/10,main="Seed Provenances",xlim=c(144.00, 154.00),ylim=c(-44, -32.0))
points(x=bot$lon,y=bot$lat,col="black", bg="dodgerblue",cex=1.75,pch=21)
points(x=smi$lon,y=smi$lat,col="black", bg="purple",cex=1.75,pch=21)
points(x=146.47,y=-36.36,col="black", bg="black",cex=1.75,pch=21)
points(x=150.07,y=-35.4,col="black", bg="white",cex=1.75,pch=21)
legend("bottomright",legend=c("E. camaldulensis","E. tereticornis", NA,"E. botryoides", "E. smithii")
,col=c("black","black",NA,"black","black"),
pt.bg=c("black","white",NA,"dodgerblue","purple"),pch=21,cex=1.2, pt.cex=1.2, bg="white")
dev.copy2pdf(file="Seed Provenances Heatwave.pdf")
#Plot individual species Heatwave
windows(18,17)
par(mfrow=c(2,3), mar=c(2,2,2,1), oma=c(2,1,1,6))
plot(biodat.oz10/10,xlim=c(145,155), ylim=c(-44,-32),legend=F)
points(x=146.47,y=-36.36,col="black", bg="black",cex=2,pch=21)
points(x=150.07-0.1,y=-35.4-0.1,col="black", bg="white",cex=2,pch=21)
points(x=bot$lon,y=bot$lat,col="black", bg="dodgerblue",cex=2,pch=21)
points(x=smi$lon,y=smi$lat,col="black", bg="purple",cex=2,pch=21)
plot(biodat.oz10/10,main="E. camaldulensis",cex.main=1.5,font.main=3, xlim=c(145,155), ylim=c(-44,-32),legend=F)
points(x=camdist$Longitude...processed,y=camdist$Latitude...processed,col=alpha("grey25",0.3), bg=alpha("grey",0.3),cex=1,pch=21)
points(x=146.47,y=-36.36,col="black", bg="black",cex=2,pch=21)
plot(biodat.oz10/10,main="E. tereticornis",cex.main=1.5 ,font.main=3,xlim=c(145,155), ylim=c(-44,-32),legend=F)
points(x=terdist$Longitude...processed,y=terdist$Latitude...processed,col=alpha("grey55",0.3), bg=alpha("white",0.3),cex=1,pch=21)
points(x=150.07,y=-35.4,col="black", bg="white",cex=2,pch=21)
plot(biodat.oz10/10,main="E. botryoides",cex.main=1.5 ,font.main=3,xlim=c(145,155), ylim=c(-44,-32),legend=F)
points(x=botdist$Longitude...processed,y=botdist$Latitude...processed,col=alpha("dodgerblue",0.3), bg=alpha("dodgerblue",0.3),cex=1,pch=21)
points(x=bot$lon,y=bot$lat,col="black", bg="dodgerblue",cex=2,pch=21)
plot(biodat.oz10/10,main="E. smithii",cex.main=1.5 ,font.main=3,xlim=c(145,155), ylim=c(-44,-32),legend=F)
points(x=smidist$Longitude...processed,y=smidist$Latitude...processed,col=alpha("purple",0.3), bg=alpha("purple",0.3),cex=1,pch=21)
points(x=smi$lon,y=smi$lat,col="black", bg="purple",cex=2,pch=21)
par(mfrow=c(1, 2), mar=c(0, 0, 0, 1), oma=c(0, 0, 0, 2),new=FALSE, xpd=TRUE)
plot(biodat.oz10/10, legend.only=TRUE,legend.shrink=0.75,legend.width=1.5,
axis.args=list(at=seq(10,30,5),labels=seq(10, 30, 5), cex.axis=0.8),
legend.args=list(text='Mean Temperature of Warmest Quarter', side=4, font=2, line=2, cex=0.8))
dev.copy2pdf(file="Seed Provenances Heatwave + distribution.pdf")
|
87eb1e60324202ae373b26db8384eb0687e894d5
|
8d876c616b3021e9359fefb4f4e7d100156ba144
|
/tests/testthat/test-history.R
|
099ef36ebed3656536beaa15e6c25f4b0034b801
|
[] |
no_license
|
pkq/covrpage
|
4feb664baffe6a2706e3b38f27ef812ee25ff577
|
1265cd29681b1d00a341011d063430aca5344c29
|
refs/heads/master
| 2021-06-23T22:58:49.851707
| 2021-06-22T01:39:06
| 2021-06-22T01:39:06
| 133,855,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,446
|
r
|
test-history.R
|
testthat::context("coverage history")
testthat::describe("fetch md files", {
testthat::skip_on_ci()
file.copy("../assets/covrpage_benchmark/covrpage", tempdir(), recursive = TRUE)
td <- file.path(tempdir(), "covrpage")
wd <- getwd()
setwd(td)
repo <- git2r::init()
git2r::add(repo, path = ".")
git2r::commit(repo, message = "init commit")
covrpage::covrpage(preview = FALSE, update_badge = FALSE)
git2r::add(repo, path = ".")
git2r::commit(repo, message = "new commit")
mds <- covrpage::covrpage_log()
it("outer class", {
testthat::expect_true(inherits(mds, "list"))
})
it("inner class", {
testthat::expect_true(inherits(mds[[1]], "character"))
})
setwd(wd)
unlink(td, recursive = TRUE, force = TRUE)
})
testthat::describe("fetch covr history", {
testthat::skip_on_ci()
file.copy("../assets/covrpage_benchmark/covrpage", tempdir(), recursive = TRUE)
td <- file.path(tempdir(), "covrpage")
wd <- getwd()
setwd(td)
repo <- git2r::init()
git2r::add(repo, path = ".")
git2r::commit(repo, message = "init commit")
covrpage::covrpage(preview = FALSE, update_badge = FALSE)
git2r::add(repo, path = ".")
git2r::commit(repo, message = "new commit")
ret <- covrpage::covr_log()
it("class", {
testthat::expect_true(inherits(ret, "data.frame"))
})
it("dim", {
testthat::expect_equal(ncol(ret), 3)
})
setwd(wd)
unlink(td, recursive = TRUE, force = TRUE)
})
|
fda2e276e0c5fc37e33d52fce0a4ee1353c7bd9f
|
9541504f1b8ce81b7627e4a1068baf9f49745cbd
|
/workspace/r-basic/03/data-exploration-2.R
|
9463c5ebd720574089abb8497830a66df2893c19
|
[] |
no_license
|
hsson428/ssac-academy
|
781954f0a0c9ba9b98e797cb49937e23cc258974
|
d332cad5e29732a8e079d29498df5a9e53bf452c
|
refs/heads/master
| 2023-03-10T04:18:07.318823
| 2021-02-22T06:53:19
| 2021-02-22T06:53:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,341
|
r
|
data-exploration-2.R
|
# package import
library(dplyr)
library(ggplot2)
# 1. 컬럼 이름 변경
df_raw <- data.frame(var1 = c(1, 2, 1),
var2 = c(2, 3, 2))
df_raw
df_new <- df_raw
df_new2 <- rename(df_new, v1 = var1, v2 = var2)
df_new2
colnames(df_raw)
colnames(df_raw) <- c("vx", 'vy')
df_raw
#####
copied_mpg <- mpg
copied_mpg
renamed_mpg <- rename(copied_mpg, city = cty, highway = hwy)
renamed_mpg
# 2. 새 컬럼 추가
mpg$mean <- (mpg$cty + mpg$hwy) / 2 # 복합연비비
mpg
summary(mpg$mean)
hist(mpg$mean)
mpg$test <- ifelse(mpg$mean >= 20, "pass", "fail")
mpg
table(mpg$test) # 빈도표
qplot(mpg$test) # ggplot의 약식 버전
ggplot(mpg, aes(x = test)) + geom_histogram(stat="count")
mpg$grade <- ifelse(mpg$mean >= 30, "A",
ifelse(mpg$mean >= 25, "B",
ifelse(mpg$mean >= 20, "C", "D")))
mpg
table(mpg$grade)
qplot(mpg$grade)
select(mpg, mean, test, grade)
select(mpg, -(displ:fl))
# 필터
exam <- read.csv("data-files/csv_exam.csv")
exam
exam %>% filter(class == 1)
exam %>% filter(class != 1)
exam %>% filter(math > 50)
exam %>% filter(math < 50)
exam %>% filter(english >= 80)
exam %>% filter(english <= 80)
exam %>% filter(class == 1 & math > 50)
exam %>% filter(class == 2 & english >= 80)
exam %>% filter(math >= 90 | english >= 90)
exam %>% filter(class == 1 | class == 2 | class == 3)
# 컬럼 선택
exam %>% select(english)
exam %>% select(class, math, english)
exam %>% select(-math)
exam %>% select(-math, -english)
exam %>%
filter(class == 1) %>%
select(english)
exam %>%
select(id, math) %>%
head(10)
# 정렬
exam %>% arrange(class, desc(math))
# 새 컬럼 추가
exam %>%
mutate(total = math + english + science) %>%
head(5)
exam %>%
mutate(total = math + english + science,
mean = total / 3) %>%
head(5)
# 집계 함수
exam %>%
group_by(class) %>%
summarise(mean_math = mean(math))
####
mpg %>%
group_by(manufacturer) %>%
filter(class == 'suv') %>%
mutate(tot = (cty + hwy) / 2) %>%
summarise(mean_tot = mean(tot)) %>%
arrange(desc(mean_tot)) %>%
head(5)
# 결합
test1 <- data.frame(id = c(1, 2, 3, 4, 5),
midterm = c(60, 80, 70, 90, 85))
test2 <- data.frame(id = c(1, 2, 3, 4, 5),
final = c(70, 83, 65, 95, 80))
test1
test2
total <- left_join(test1, test2, by = 'id')
total
name <- data.frame(class = c(1, 2, 3, 4, 5),
instructor = c("kim", "lee", "park", "choi", "jung"))
name
exam
exam_new <- left_join(exam, name, by = "class")
exam_new
group_a <- data.frame(id = c(1, 2, 3, 4, 5),
test = c(60, 80, 70, 90, 85))
group_b <- data.frame(id = c(6, 7, 8, 9, 10),
test = c(70, 83, 65, 95, 80))
group_a
group_b
group_all <- bind_rows(group_a, group_b)
group_all
#####################################################################
## 연습
?mpg
low_mpg <- mpg %>% filter(displ <= 4)
high_mpg <- mpg %>% filter(displ > 4)
mean(low_mpg$hwy)
mean(high_mpg$hwy)
mpg %>%
mutate(displ_level = ifelse(displ <= 4, 'low', 'high')) %>%
group_by(displ_level) %>%
summarise(hwy_mean = mean(hwy))
mpg %>%
filter(manufacturer %in% c('audi', 'toyota')) %>%
group_by(manufacturer) %>%
summarise(cty_mean = mean(cty))
mpg %>%
filter(manufacturer %in% c('chevrolet', 'ford', 'honda')) %>%
summarise(hwy_mean = mean(hwy))
mpg %>%
select(class, cty, hwy) %>%
head(10)
mpg %>%
filter(class %in% c('suv', 'compact')) %>%
select(class, cty, hwy) %>%
group_by(class) %>%
summarise(cty_mean = mean(cty))
mpg %>%
filter(manufacturer == 'audi') %>%
arrange(desc(hwy)) %>%
head(5)
mpg %>%
mutate(total = cty + hwy,
mean = total / 2) %>%
arrange(desc(mean)) %>%
head(3)
mpg %>%
group_by(class) %>%
summarise(cty_mean = mean(cty)) %>%
arrange(desc(cty_mean))
mpg %>%
group_by(class) %>%
summarise(hwy_mean = mean(hwy)) %>%
arrange(desc(hwy_mean)) %>%
head(5)
mpg %>%
filter(class == 'compact') %>%
group_by(manufacturer) %>%
summarise(n = n()) %>%
arrange(desc(n))
fuel <- data.frame(fl = c('c', 'd', 'e', 'p', 'r'),
price_fl = c(2.35, 2.38, 2.11, 2.76, 2.22),
stringsAsFactors = FALSE)
fuel
mpg
?left_join
mpg %>%
inner_join(fuel, by = 'fl') %>%
select(model, fl, price_fl) %>%
head
##############################################
df <- data.frame(sex = c("M", "F", NA, "M", "F"),
score = c(5, 4, 3, 4, NA))
df
is.na(df)
table(is.na(df))
table(is.na(df$sex))
table(is.na(df$score))
df_nomiss <- df %>% filter(!is.na(score) & !is.na(sex))
df_nomiss
df_nomiss2 <- na.omit(df)
df_nomiss2
exam <- read.csv("data-files/csv_exam.csv")
exam
exam[c(3, 8, 15), "math"] <- NA
exam
exam %>%
summarise(mean_math = mean(math))
exam %>%
summarise(mean_math = mean(math, na.rm=T))
math_mean <- mean(exam$math, na.rm=T)
exam$math <- ifelse(is.na(exam$math), math_mean, exam$math)
table(is.na(exam$math))
this_mpg <- as.data.frame(ggplot2::mpg)
this_mpg
this_mpg[c(65, 124, 131, 153, 212), "hwy"] <- NA
table(is.na(this_mpg$drv))
table(is.na(this_mpg$hwy))
this_mpg %>%
filter(!is.na(hwy)) %>%
group_by(drv) %>%
summarise(hwy_mean = mean(hwy))
#
outlier <- data.frame(sex = c(1, 2, 1, 3, 2, 1),
score = c(5, 4, 3, 4, 2, 6))
outlier
table(outlier$sex)
table(outlier$score)
outlier$sex <- ifelse(outlier$sex %in% c(1, 2), outlier$sex, NA)
outlier
outlier$score <- ifelse(outlier$score %in% 1:5, outlier$score, NA)
outlier
outlier %>%
filter(!is.na(sex) & !is.na(score)) %>%
group_by(sex) %>%
summarise(mean_score = mean(score))
#
boxplot(mpg$hwy, horizontal = TRUE)
hwy_range <- boxplot(mpg$hwy)$stats # 통계량 반환환
hwy_range
mpg$hwy <- ifelse(mpg$hwy < hwy_range[1, 1] | mpg$hwy > hwy_range[5, 1],
NA, mpg$hwy)
table(is.na(mpg$hwy))
mpg %>%
group_by(drv) %>%
summarise(mean_hwy = mean(hwy, na.rm = TRUE))
#
mpg <- as.data.frame(ggplot2::mpg)
mpg[c(10, 14, 58, 93), "drv"] <- "k";
mpg[c(29, 43, 129, 203), "cty"] <- c(3, 4, 39, 42)
table(mpg$drv)
mpg$drv <- ifelse(mpg$drv %in% c('4', 'f', 'r'), mpg$drv, NA)
table(is.na(mpg$drv))
table(mpg$cty)
boxplot(mpg$cty)$stats
mpg$cty <- ifelse( (mpg$cty < 9) | (mpg$cty > 26), NA, mpg$cty )
table(is.na(mpg$cty))
|
e71be5fe94b28f6c7ae2f3dd76caaa04581e0744
|
572718492ee0e6f58cdaecd298b463ce9410545d
|
/man/TCGA.PAM50_genefu_hg18.Rd
|
2b063e09eddf0e9518567c5a4990450df8905b78
|
[] |
no_license
|
cgpu/bioconductor-Omic-Circos
|
6c8c0e770052b9ca66db3d61bd5ed84f70432613
|
6ba6184d9c641bca2d258309acbe80387b0e53e3
|
refs/heads/main
| 2023-01-06T11:22:03.230979
| 2020-11-03T10:07:27
| 2020-11-03T10:07:27
| 309,644,147
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 297
|
rd
|
TCGA.PAM50_genefu_hg18.Rd
|
%%
\name{TCGA.PAM50_genefu_hg18}
\alias{TCGA.PAM50_genefu_hg18}
\docType{data}
\title{
BRCA PAM50 gene list (hg18)
}
\description{
Breast cancer PAM 50 gene list (hg18).
}
\author{
%% ~~ possibly secondary sources and usages ~~
Ying Hu <yhu@mail.nih.gov>
Chunhua Yan <yanch@mail.nih.gov>
}
|
59aa80d452c603a16d94c1d036d9712dc8716210
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/wavethresh/examples/InvBasis.wp.rd.R
|
5c9724d1a08fd9e477ee61c510f62ef391ae4580
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 261
|
r
|
InvBasis.wp.rd.R
|
library(wavethresh)
### Name: InvBasis.wp
### Title: Invert a wp library representation with a particular basis spec
### Aliases: InvBasis.wp
### Keywords: smooth
### ** Examples
#
# The example in InvBasis.wst can be used here, but replaced wst by wp
#
|
c8e25bfa420ec887fa02afa06933a51c57e7958e
|
87985172c0206ec527473d1aa4db81c812de013f
|
/tests/testthat/test-presents.R
|
017615e6367a6a77360553d652f46919e3000a53
|
[] |
no_license
|
adamsma/helloRworld
|
c9840513c4967f77864b8f7ef72784f66e8df7f6
|
e4850aa140997e57e4ee0f2670dbcedf2e536e65
|
refs/heads/master
| 2020-03-28T10:23:01.706623
| 2018-09-15T16:53:27
| 2018-09-15T16:53:27
| 148,103,992
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 395
|
r
|
test-presents.R
|
context("Testing Giving Presents")
test_that("Presents gives presents",{
expect_message(Presents(), "^Hello,")
expect_message(Presents(), "Here's some car data for you:")
expect_output(Presents(), "mpg|cyl|disp|hp|drat|wt|qsec|vs|am|gear|carb")
})
test_that("Data generated is subset of mtcars", {
expect_equal(NROW(GenData() %>% dplyr::anti_join(datasets::mtcars)), 0)
})
|
b0a05789d4b7222f25c1600584de79423b17caa1
|
20b4c4ad2f546739e7b3c3b6107094aecf4720ca
|
/jsserver/Ranalysis/DyanmoAnalysis.R
|
5b7f7290d9d672bf7c6d9171e5c0b96cde2dca02
|
[] |
no_license
|
ngopal/VisualEncodingEngine
|
111437109dd8ed00f01bbdf91b3c2697f4cc69a4
|
77b57529e3880047ba8af74225eb0b2b68301a3b
|
refs/heads/master
| 2020-04-06T06:51:31.883311
| 2016-09-06T21:04:15
| 2016-09-06T21:04:15
| 60,572,650
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,576
|
r
|
DyanmoAnalysis.R
|
library(randomForest)
library(RJSONIO)
library(ROCR)
# Research Questions
# 1. What are the ranked importances of node encodings?
# 2. What are the ranked importances of edge encodings?
# 3. How important is network structure to noticeability?
# 3a. Where do participants tend to click?
#Converting HEX color to INT
rgbToInt <- function(red,green,blue) {
# formula from https://www.shodor.org/stella2java/rgbint.html
return(256*256*red+256*green+blue)
}
calcPerceivedBrightness <- function(red,green,blue) {
# formula from http://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color
# Another potential option http://stackoverflow.com/questions/12043187/how-to-check-if-hex-color-is-too-black
return( (0.299*red + 0.587*green + 0.114*blue) )
}
### Connecting through MONGO ####
## http://stackoverflow.com/questions/30738974/rjava-load-error-in-rstudio-r-after-upgrading-to-osx-yosemite
library(rJava)
library(RMongo)
library(plyr)
pilotdb <- mongoDbConnect('pilot')
dbGetQuery(pilotdb, 'evaldata', '{}')['X_id']
connectSurveyToClickData <- function() {
uniqueSessions <- unlist(unique(dbGetQuery(pilotdb, 'evaldata', '{}', skip=0, limit=100000)['user']))
connectedData <- c()
for (u in uniqueSessions) {
cat(u,'\n')
cldata <- dbGetQuery(pilotdb, 'evaldata', paste('{ user : "',u,'", "page" : { "$ne" : "survey"} }', sep=''))
sudata <- dbGetQuery(pilotdb, 'evaldata', paste('{ user : "',u,'", "page" : "survey" }', sep=''))
if (dim(sudata)[1] == 0 || dim(sudata)[2] == 0) {
next
}
else {
sudata <- sudata[c("question1", "question2", "question3", "question4", "question5", "question6")]
}
combinedData <- cbind(cldata, sudata)
connectedData <- rbind(connectedData, combinedData)
}
return( data.frame(connectedData) )
}
surveydata <- dbGetQuery(pilotdb, 'evaldata', '{ page: "survey" }')
# Survey Data
survey.df <- data.frame(surveydata[c("question1", "question2", "question3", "question4", "question5", "question6")])
par(mar=c(5.1, 13 ,4.1 ,2.1))
barplot(table(survey.df[,1]), las=2, horiz = T)
barplot(table(survey.df[,2]), las=2, horiz = T)
barplot(table(survey.df[,3]), las=2, horiz = T)
barplot(table(survey.df[,4]), las=2, horiz = T)
barplot(table(survey.df[,5]), las=2, horiz = T)
# Click Data
#collectedData <- dbGetQuery(pilotdb, 'evaldata', '{ "page": {"$ne":"survey"} }')
#collectedData <- dbGetQuery(pilotdb, 'evaldata', '{ "page": {"$ne":"survey"}, "user":"488238d8-99be-e65d-ebb8-ce7c04c92b25" }')
#expd.dat <- data.frame(collectedData[names(head(collectedData))])
expd.dat <- connectSurveyToClickData()
expd.dat$linewidth <- as.numeric(gsub('px','',expd.dat$linewidth))
expd.dat$nodeheight <- as.numeric(gsub('px','',expd.dat$nodeheight))
expd.dat$nodeborderwidth <- as.numeric(gsub('px','',expd.dat$nodeborderwidth))
#replace "cy.js selection blue" with "normal gray"
#expd.dat$nodebackground <- revalue(expd.dat$nodebackground, c("#0169D9"="#999999"))
expd.dat$nodebackground <- revalue(expd.dat$nodebackground, c("#999"="#999999"))
#expd.dat$linecolor <- revalue(expd.dat$linecolor, c("#0169D9"="#999999"))
expd.dat$linecolor <- revalue(expd.dat$linecolor, c("#999"="#999999"))
#rgbtpint
# tt <- makeRGBMat(expd.dat, 5)
# expd.dat[,5] <- as.numeric(rgbToInt(tt[,1], tt[,2], tt[,3]))
# tt2 <- makeRGBMat(expd.dat, 15)
# expd.dat[,15] <- as.numeric(rgbToInt(tt2[,1], tt2[,2], tt2[,3]))
#brightness
# expd.dat <- cbind(expd.dat, as.numeric(calcPerceivedBrightness(tt[,1], tt[,2], tt[,3])), as.numeric(calcPerceivedBrightness(tt2[,1], tt2[,2], tt2[,3])))
# colnames(expd.dat) <- c(colnames(expd.dat)[c(-35,-36)],"nodeBrightness", "lineBrightness")
nodett <- t(rgb2hsv((col2rgb(expd.dat$nodebackground))))
edgett <- t(rgb2hsv((col2rgb(expd.dat$linecolor))))
expd.dat <- cbind(expd.dat,
as.numeric(nodett[,1]), as.numeric(nodett[,2]), as.numeric(nodett[,3]),
as.numeric(edgett[,1]), as.numeric(edgett[,2]), as.numeric(edgett[,3]))
colnames(expd.dat) <- c(colnames(expd.dat)[1:(length(colnames(expd.dat))-6)], "nodeHue", "nodeSaturation", "nodeValue", "edgeHue", "edgeSaturation", "edgeValue")
sampleBalancedData <- function(ds,type) {
pos <- 0;
if (type == "nodes") {
pos = "NA"
}
else {
pos = "edge"
}
users <- unique(ds$user)
networks <- unique(ds$network)
newds <- c()
for (u in users) {
for (n in networks) {
selected <- ds[which( (ds$user == u) & (ds$network == n) & (ds$xposition != pos) & (ds$selected == 1) ),]
numNotSelected <- length(which( (ds$user == u) & (ds$network == n) & (ds$xposition != pos) & (ds$selected != 1) ))
notSelected <- ds[which( (ds$user == u) & (ds$network == n) & (ds$xposition != pos) & (ds$selected != 1) ),][sample(1:numNotSelected, 1, replace = F),]
newds <- rbind(newds, selected, notSelected)
cat(selected$selected, '\n')
cat(notSelected$selected, '\n')
}
}
return( newds )
}
sampleBalancedData(expd.dat, "nodes")
# Sampling Idea
# I suppose I can have up to 6 nodes without having to use SMOTE
# I will put this on hold because I don't think I need to balance classes yet
#dbGetQuery(pilotdb, 'evaldata', '{ "page": {"$ne":"survey"}, "selected":0, "network":"rn2", "name" : {"$ne" : "NA"} }')
# Click Map
plot(expd.dat$xposition, expd.dat$yposition, xlim=c(0,max(expd.dat$xposition, na.rm=T)), ylim=c(0,max(expd.dat$yposition, na.rm=T)), col="gray" )
points(expd.dat$clickX, expd.dat$clickY, col="red")
expd.nodes <- data.frame(expd.dat[which(!is.na(expd.dat$xposition)),])
expd.nodes <- expd.nodes[which(as.numeric(as.character(expd.nodes$selected)) <= 1),]
expd.edges <- data.frame(expd.dat[which(is.na(expd.dat$xposition)),])
expd.edges <- expd.edges[which(as.numeric(as.character(expd.edges$selected)) <= 1),]
# Node Encodings Only Model / Selection
#expd.nodes.1 <- data.frame(expd.nodes[,c(4,8,9,13,15,32,39:41)])
expd.nodes.1 <- data.frame(expd.nodes[,c(3,5,10,14,19,24,35,42:44)])
expd.nodes.1$nodeshape <- as.factor(expd.nodes.1$nodeshape)
#expd.nodes.1$network <- as.factor(expd.nodes.1$network)
#expd.nodes.1$nodebackground <- as.factor(expd.nodes.1$nodebackground)
expd.nodes.1$selected <- as.factor(expd.nodes.1$selected) # This will make it classification
expd.nodes.1$selected <- as.numeric(as.character(expd.nodes.1$selected)) # This will make it regression
expd.nodes.1$eletype <- as.factor(expd.nodes.1$eletype)
expd.nodes.1$question1 <- as.factor(expd.nodes.1$question1)
expd.nodes.1$question2 <- as.factor(expd.nodes.1$question2)
expd.nodes.1$question3 <- as.factor(expd.nodes.1$question3)
expd.nodes.1$question4 <- as.factor(expd.nodes.1$question4)
expd.nodes.1$question5 <- as.factor(expd.nodes.1$question5)
expd.nodes.1c <- expd.nodes.1
expd.nodes.1c$selected <- as.factor(as.character(expd.nodes.1c$selected))
# Check for multicollinearily
library(rfUtilities)
multi.collinear(expd.nodes.1[,c(-2)])
#multi.collinear(expd.nodes.1[,c(3:4,6,7:9)])
#expd.nodes.1 <- expd.nodes.1[,c(-4,-6)]
# I could consider using "network" as strata below
selectedPrevalence.nodes.1 <- sum(as.numeric(expd.nodes.1$selected))/length(as.numeric(expd.nodes.1$selected))
unselectedPrevalence.nodes.1 <- 100-sum(as.numeric(expd.nodes.1$selected))/length(as.numeric(expd.nodes.1$selected))
tuneRF(x = expd.nodes.1[,c(-4)], y = expd.nodes.1$selected, importance=TRUE, proximity=TRUE, classwt = c(selectedPrevalence.nodes.1, unselectedPrevalence.nodes.1))
#rf1.nodes.1 <- randomForest(selected ~ ., data=expd.nodes.1[,c(-3, -14)], importance=TRUE, proximity=TRUE, classwt = c(selectedPrevalence.nodes.1, unselectedPrevalence.nodes.1))
#rf1.nodes.1 <- randomForest(selected ~ ., data=expd.nodes.1[,c(-3, -8:-14)], importance=TRUE, proximity=TRUE, classwt = c(selectedPrevalence.nodes.1, unselectedPrevalence.nodes.1))
#rf1.nodes.1 <- randomForest(selected ~ ., data=expd.nodes.1[,c(-5, -6, -13)], importance=TRUE, proximity=TRUE, classwt = c(selectedPrevalence.nodes.1, unselectedPrevalence.nodes.1))
rf1.nodes.1 <- randomForest(selected ~ ., data=expd.nodes.1[,-6], importance=TRUE, proximity=TRUE, classwt = c(selectedPrevalence.nodes.1, unselectedPrevalence.nodes.1), keep.inbag = TRUE)
#rf1.nodes.1 <- randomForest(selected ~ ., data=expd.nodes.1[,-6], importance=TRUE, proximity=TRUE, keep.inbag = TRUE)
rf1.nodes.1c <- randomForest(selected ~ ., data=expd.nodes.1c[,-6], importance=TRUE, proximity=TRUE, classwt = c(selectedPrevalence.nodes.1, unselectedPrevalence.nodes.1), keep.inbag = TRUE, ntree=1500)
print(rf1.nodes.1)
rf1.nodes.1$importance
varImpPlot(rf1.nodes.1,type=2)
rf1.nodes.1 <- randomForest(selected ~ ., data=expd.nodes.1[,c(1,3,4,7,14,15,16)], importance=TRUE, proximity=TRUE, classwt = c(selectedPrevalence.nodes.1, unselectedPrevalence.nodes.1))
print(rf1.nodes.1)
rf1.nodes.1$importance
varImpPlot(rf1.nodes.1,type=2)
# http://stats.stackexchange.com/questions/144700/negative-r2-at-random-regression-forest
# http://stats.stackexchange.com/questions/21152/obtaining-knowledge-from-a-random-forest
abline(v = abs(min(rf1.nodes.1$importance[,4])), lty="longdash", lwd=2)
rf1.nodes.1.p <- classCenter(expd.nodes.1[-4], expd.nodes.1[,4], rf1.nodes.1$proximity)
# Node Encodings Only Model / Reaction Time?
# Edge Encodings Only Model / Selection
expd.edges.1 <- data.frame(expd.edges[,c(3, 14, 21, 23, 45:47)])
expd.edges.1$linestyle <- as.factor(expd.edges.1$linestyle)
multi.collinear(expd.edges.1[,c(-4)])
rf1.edges.1 <- randomForest(selected ~ ., data=expd.edges.1, importance=TRUE, proximity=TRUE, keep.inbag = TRUE)
print(rf1.edges.1)
rf1.edges.1$importance
varImpPlot(rf1.edges.1,type=2)
rf1.edges.1.p <- classCenter(expd.edges.1[-2], expd.edges.1[,2], rf1.edges.1$proximity)
# Edge Encodings Only Model / Reaction Time?
# negative value means the mean error is larger than the variance of the response
# y. This could be because the predictor performs really poorly but also
# because of some calibration issue.
# Try to attach demographic information to the DF and see how that affects selection
# cbind(expd.both, surveydata)
# NOTE THAT THE TREE IS UNBALANCED RIGHT NOW, AND MUST BE SAMPLED
# BALANCED BEFORE RESULTS ARE RELIABLE
rf1.perf.nodes = performance( prediction(labels = expd.nodes.1$selected, predictions = rf1.nodes.1$predicted) ,"tpr","fpr")
rf1.perf.edges = performance( prediction(labels = expd.edges.1$selected, predictions = rf1.edges.1$predicted) ,"tpr","fpr")
par(mfrow=c(1,2))
#plot the curve
plot(rf1.perf.nodes,main="ROC Curve for Random Forest (Nodes)",col=2,lwd=2)
lines(unlist(rf1.perf.nodes@x.values),unlist(rf1.perf.nodes@y.values), col=4, lwd=2)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
plot(rf1.perf.edges,main="ROC Curve for Random Forest (Edges)",col=2,lwd=2)
lines(unlist(rf1.perf.edges@x.values),unlist(rf1.perf.edges@y.values), col=4, lwd=2)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
#compute area under curve
auc.rf1.nodes <- performance( prediction(labels = expd.nodes.1$selected, predictions = rf1.nodes.1$predicted) ,"auc")
auc.rf1.nodes <- unlist(slot(auc.rf1.nodes, "y.values"))
minauc<-min(round(auc.rf1.nodes, digits = 2))
maxauc<-max(round(auc.rf1.nodes, digits = 2))
minauct <- paste(c("min(AUC) = "),minauc,sep="")
maxauct <- paste(c("max(AUC) = "),maxauc,sep="")
minauct
maxauct
auc.rf1.edges <- performance( prediction(labels = expd.edges.1$selected, predictions = rf1.edges.1$predicted) ,"auc")
auc.rf1.edges <- unlist(slot(auc.rf1.edges, "y.values"))
minauc<-min(round(auc.rf1.edges, digits = 2))
maxauc<-max(round(auc.rf1.edges, digits = 2))
minauct <- paste(c("min(AUC) = "),minauc,sep="")
maxauct <- paste(c("max(AUC) = "),maxauc,sep="")
minauct
maxauct
# Forest Floor
library(forestFloor)
ff = forestFloor(
rf.fit = rf1.nodes.1, # mandatory
X = expd.nodes.1, # mandatory
calc_np = FALSE, # TRUE or FALSE both works, makes no difference
binary_reg = FALSE # takes no effect here when rfo$type="regression"
)
ffe = forestFloor(
rf.fit = rf1.edges.1, # mandatory
X = expd.edges.1, # mandatory
calc_np = FALSE, # TRUE or FALSE both works, makes no difference
binary_reg = FALSE # takes no effect here when rfo$type="regression"
)
#plot partial functions of most important variables first
plot(ff, # forestFloor object
plot_seq = 1:9, # optional sequence of features to plot
orderByImportance=TRUE, # if TRUE index sequence by importance, else by X column
col=ifelse(ff$Y, "red", "gray")
)
plot(ffe, # forestFloor object
plot_seq = 1:9, # optional sequence of features to plot
orderByImportance=TRUE, # if TRUE index sequence by importance, else by X column
col=ifelse(ffe$Y, "red", "gray")
)
par(mfrow=c(3,3))
for(i in 1:9) partialPlot(rf1.nodes.1,expd.nodes.1,x.var=eval(names(expd.nodes.1)[i]))
partialPlot(rf1.nodes.1, expd.nodes.1, nodeshape)
partialPlot(rf1.nodes.1, expd.nodes.1, nodeheight)
partialPlot(rf1.nodes.1, expd.nodes.1, nodeHue)
library(rfPermute)
library(ggRandomForests)
##ggRandomForest
library(ggplot2)
library(RColorBrewer)
library(plot3D)
library(dplyr)
library(reshape)
library(reshape2)
library(randomForestSRC)
library(gridExtra)
theme_set(theme_bw())
event.marks <- c(1,4)
event.labels <- c(FALSE, TRUE)
strCol <- brewer.pal(3, "Set1")[c(2,1,3)]
expd.nodes.1.melted <- melt(expd.nodes.1, id.vars=c("nodeshape", "selected"))
ggplot(expd.nodes.1.melted, aes(x=nodeshape, y=value, color=factor(selected)))+
geom_point(alpha=.4)+
geom_rug(data=expd.nodes.1.melted %>% filter(is.na(value)))+
# labs(y="", x=nodeshape) +
scale_color_brewer(palette="Set2")+
facet_wrap(~variable, scales="free_y", ncol=3)
rfsc_selected <- rfsrc(selected ~ ., data=expd.nodes.1)
#plot OOB against growth of forests
gg_e <- gg_error(rfsc_selected)
plot(gg_e)
# VIMP
plot(gg_vimp(rfsc_selected))
# Minimal Depth
varsel_node <- var.select(rfsc_selected)
gg_md <- gg_minimal_depth(varsel_node)
plot(gg_md)
# Compare VIMP and Minimal depth
plot(gg_minimal_vimp(gg_md))
# Variable Dependence (this can theoretically be generated by plotting the results from RF for each variable)
gg_v <- gg_variable(rfsc_selected)
xvar <- gg_md$topvars
plot(gg_v, xvar=xvar, panel=TRUE,
alpha=.4)
# labs(y=selected, x="")
# Partial Dependence (this is in the randomForest library and works there, so no need for this.)
partial_node <- plot.variable(rfsc_selected, xvar=gg_md$topvars, partial=TRUE, sorted=FALSE, show.plots=FALSE)
gg_p <- gg_partial(partial_node)
plot(gg_p, xvar=xvar, panel=TRUE)
interaction_nodes <- find.interaction(rfsc_selected)
plot(interaction_nodes, xvar=gg_md$topvars, panel=TRUE)
heatmap(interaction_nodes)
## Trying ICEbox
library(ICEbox)
nodes.ice1 <- ice(object = rf1.nodes.1, X = expd.nodes.1, y = expd.nodes.1$selected, predictor = "nodeheight", frac_to_build = .1)
nodes.ice2 <- ice(object = rf1.nodes.1, X = expd.nodes.1, y = expd.nodes.1$selected, predictor = "nodeHue", frac_to_build = .1)
nodes.ice3 <- ice(object = rf1.nodes.1, X = expd.nodes.1, y = expd.nodes.1$selected, predictor = "nodeborderwidth", frac_to_build = .1)
nodes.ice4 <- ice(object = rf1.nodes.1, X = expd.nodes.1, y = expd.nodes.1$selected, predictor = "numEdges", frac_to_build = .1)
#nodes.ice5 <- ice(object = rf1.nodes.1, X = expd.nodes.1, y = expd.nodes.1$selected, predictor = "nodeshape", frac_to_build = .1) #doesn't handle factors...I may be able to make a dummy variable?
nodes.ice6 <- ice(object = rf1.nodes.1, X = expd.nodes.1, y = expd.nodes.1$selected, predictor = "numConnected", frac_to_build = .1)
nodes.ice7 <- ice(object = rf1.nodes.1, X = expd.nodes.1, y = expd.nodes.1$selected, predictor = "nodeValue", frac_to_build = .1)
nodes.ice8 <- ice(object = rf1.nodes.1, X = expd.nodes.1, y = expd.nodes.1$selected, predictor = "nodeSaturation", frac_to_build = .1)
nodes.dice1 <- dice(nodes.ice1)
nodes.dice2 <- dice(nodes.ice2)
nodes.dice3 <- dice(nodes.ice3)
nodes.dice4 <- dice(nodes.ice4)
#nodes.dice5 <- dice(nodes.ice5)
nodes.dice6 <- dice(nodes.ice6)
nodes.dice7 <- dice(nodes.ice7)
nodes.dice8 <- dice(nodes.ice8)
# ICE = individual conditional expectation curves
dev.off()
par(mfrow=c(2,4))
plot(nodes.ice1)
plot(nodes.ice2)
plot(nodes.ice3)
plot(nodes.ice4)
plot(nodes.ice6)
plot(nodes.ice7)
plot(nodes.ice8)
# DICE = Estimates the partial derivative function for each curve in an ice object
dev.off()
par(mfrow=c(2,4))
plot(nodes.dice1)
plot(nodes.dice2)
plot(nodes.dice3)
plot(nodes.dice4)
plot(nodes.dice6)
plot(nodes.dice7)
plot(nodes.dice8)
# Interactions
library(plotmo)
plotmo(rf1.nodes.1c, type="prob")
plotmo(rf1.nodes.1)
plotmo(rf1.edges.1)
# Webers Law
calcWebersForRows <- function(rowIndexStart, rowIndexEnd, c) {
nodeCombs <- c()
for (i in rowIndexStart:rowIndexEnd) {
for (k in i:rowIndexEnd) {
cat(i,k,expd.dat[i,c],expd.dat[k,c],
calcWeber(expd.dat[i,c],expd.dat[k,c]),'\n')
nodeCombs <- rbind(nodeCombs,
c(i,k,expd.dat[i,c],expd.dat[k,c],calcWeber(expd.dat[i,c],expd.dat[k,c])))
}
}
colnames(nodeCombs) <- c("index1", "index2", "value1", "value2", "K")
return(nodeCombs)
}
calcWeber <- function(a,b) {
return( abs(a - b) / b )
}
calcWebersForRows(1, 4, 35)
calcWebersForRows(8, 11, 42)
for (n in unique(expd.dat[which(expd.dat$eletype == "node"),9]) ) {
numeros <- which(expd.dat[which(expd.dat$eletype == "node"),9] == n)
cat(which(expd.dat[which(expd.dat$eletype == "node"),9] == n),'\n')
}
countContiguous <- function(numeros) {
contig <- list()
k = 1;
contig[[k]] <- vector()
for (i in length(numeros)) {
if ( numeros[i+1 || i] == numeros[i] + 1 ) {
contig[[k]] <- append(i, contig[[k]])
}
else {
k = k + 1;
contig[[k]] <- append(i, contig[[k]])
}
}
return( contig )
}
# Count length of line
subNet <- expd.dat[1:7,c(7,8,13,33)]
for (i in which(is.na(subNet$xposition))) {
# subNet[subNet[i,]$elesource,]$xposition, subNet[subNet[i,]$elesource,]$yposition
# subNet[subNet[i,]$eletarget,]$xposition, subNet[subNet[i,]$eletarget,]$yposition
dd <- sqrt((subNet[subNet[i,]$elesource,]$xposition - subNet[subNet[i,]$eletarget,]$xposition)^2 +
(subNet[subNet[i,]$elesource,]$yposition - subNet[subNet[i,]$eletarget,]$yposition)^2)
cat(subNet[i,]$elesource, subNet[i,]$eletarget, dd,'\n')
}
# Attach an "edgeLength" column to expd.dat
edgeLength <- matrix(0, dim(expd.dat)[1])
subNets <- unique(paste(expd.dat$user,'---',expd.dat$network,sep=''))
for (s in subNets) {
li <- strsplit(s, '---')
u <- li[[1]][1]
n <- li[[1]][2]
inds <- which(expd.dat$network == n & expd.dat$user == u)
subNet <- expd.dat[inds,c(7,8,13,33)]
tsn <- c()
for (i in which(is.na(subNet$xposition))) {
dd <- sqrt((subNet[subNet[i,]$elesource,]$xposition - subNet[subNet[i,]$eletarget,]$xposition)^2 +
(subNet[subNet[i,]$elesource,]$yposition - subNet[subNet[i,]$eletarget,]$yposition)^2)
cat(subNet[i,]$elesource, subNet[i,]$eletarget, dd,'\n')
tsn <- append(tsn, dd)
}
edgeLength[inds[is.na(expd.dat[inds,]$xposition)]] <- tsn
}
# Calculate Steven's Power Law
selnodesonly <- expd.dat[which((expd.dat$selected == 1) & (expd.dat$xposition != 0)),] #selected nodes only
selnodesonly <- selnodesonly[which(selnodesonly$nodeEncoding1 == "node border (bin)" | selnodesonly$nodeEncoding1 == "node border (quant)" | selnodesonly$nodeEncoding2 == "node border (bin)" | selnodesonly$nodeEncoding2 == "node border (quant)"),]
log(selnodesonly[,35])
# Cannot be calculated because this experiment does not capture data
# about the perceived intensity of a data point, rather only the actual intensity
# used to visualize the data
# Node combinations
unique(cbind(expd.nodes$nodeEncoding1,expd.nodes$nodeEncoding2))
# Edge combinations
unique(cbind(expd.edges$edgeEncoding1,expd.edges$edgeEncoding2))
P########################################
col2rgb(htmlcolors)
black #000000 0,0,0
silver #C0C0C0 192,192,192
gray #808080 128,128,128
white #FFFFFF 255,255,255
maroon #800000 128,0,0
red #FF0000 255,0,0
purple #800080 128,0,128
fuchsia #FF00FF 255,0,255
green #008000 0,128,0
lime #00FF00 0,255,0
olive #808000 128,128,0
yellow #FFFF00 255,255,0
navy #000080 0,0,128
blue #0000FF 0,0,255
teal #008080 0,128,128
aqua #00FFFF 0,255,255
# https://www.w3.org/TR/css3-color/
eucColor <- function(inhex) {
htmlcolornames <- c("black", "silver", "gray", "white", "maroon", "red", "purple",
"fuchsia", "green", "lime", "olive", "yellow", "navy", "blue", "teal",
"aqua")
htmlcolors <- c("#000000",
"#C0C0C0",
"#808080",
"#FFFFFF",
"#800000",
"#FF0000",
"#800080",
"#FF00FF",
"#008000",
"#00FF00",
"#808000",
"#FFFF00",
"#000080",
"#0000FF",
"#008080",
"#00FFFF")
qrgb <- col2rgb(inhex)
htem <- matrix(c((col2rgb(htmlcolors)[1,] - qrgb[1,])^2,
(col2rgb(htmlcolors)[2,] - qrgb[2,])^2,
(col2rgb(htmlcolors)[3,] - qrgb[3,])^2), 16, 3)
vals <- sqrt(apply(htem, FUN=sum, MAR=1))
return( htmlcolornames[which(vals == min(vals))] )
}
"#cadef1" "#dde8f8" "#eff3ff" "#c6dbef"
eucColor("#cadef1")
eucColor("#dde8f8")
eucColor("#eff3ff")
eucColor("#c6dbef")
eucColor("#999999")
sapply(expd.dat$nodebackground, FUN=eucColor)
sapply(expd.dat$linecolor, FUN=eucColor)
# RF UTILITIES
library(rfUtilities)
multi.collinear(expd.nodes.1[,2:7])
# This shows that I can remove nodeheight from the model since it mirrors nodewidth
multi.collinear(expd.edges.1[,-3])
# No multicollinearity
multi.collinear(expd.both[,c(-3, -7, -10, -15)])
# In addition to nodeheight, windowheight, windowwidth, and nodeBrightness
# may be removed due to collinearity
# RF UTILITIES CLASS BALANCE
# https://cran.r-project.org/web/packages/rfUtilities/rfUtilities.pdf
rf.nodes.1.balanced <- rf.classBalance(ydata = expd.nodes.1[,4], xdata = expd.nodes.1[,c(2,3,5,6)])
# Future Functions Below
clickmap <- function() {
#plot(as.numeric(as.character(expd$clickX)) / as.numeric(as.character(expd$windowWidth)), as.numeric(as.character(expd$clickY)) / as.numeric(as.character(expd$windowHeight)))
plot(as.numeric(as.character(expd$xpos[which(expd$selected == 0)])) / as.numeric(as.character(expd$windowWidth[which(expd$selected == 0)])), as.numeric(as.character(expd$ypos[which(expd$selected == 0)])) / as.numeric(as.character(expd$windowHeight[which(expd$selected == 0)])), col="gray",
xlab="Normalized X Coordinate Position",
ylab="Normalized Y Coordinate Position",
main="Click Map of Selected Nodes Versus Unselected")
points(as.numeric(as.character(expd$xpos[which(expd$selected == 1)])) / as.numeric(as.character(expd$windowWidth[which(expd$selected == 1)])), as.numeric(as.character(expd$ypos[which(expd$selected == 1)])) / as.numeric(as.character(expd$windowHeight[which(expd$selected == 1)])), col="red", pch=2)
}
# Basic Visualizations
clickmap()
barplot(table(expd[which(expd$selected == 1),3]), horiz = T, las=1)
barplot(table(paste(expd[which(expd$selected == 1),4],expd[which(expd$selected == 1),5])), horiz=T, las=1)
tpch <- rep(1,dim(expd)[1])
tpch[which(expd.mod$selected == 1)] <- 1
ts <- rep(1,dim(expd)[1])
ts[which(expd.mod$selected == 1)] <- 1
pairs(expd.mod[,2:4], col=as.character(expd$nodecolor), pch=tpch, cex=ts)
pairs(expd.mod[,2:4], col=ifelse(expd.mod$selected == 1, as.character(expd$nodecolor), "gray"), pch=tpch, cex=ts)
#euclidian distance from center
for (f in levels(expd[,1])) {
cat(f,'\n')
#expd[which(expd[,1] == f),12] <- calcDistanceFromCenterOfNetwork(f)
expd[which(expd[,1] == f),13] <- calcDistanceFromCenterOfNetworkDoubleEnc(f)
}
calcDistanceFromCenterOfNetworkDoubleEnc <- function(file) {
return( c(expd[which(expd[,1] == file),10] - mean(expd[which(expd[,1] == file),11]) + expd[which(expd[,1] == file),10] - mean(expd[which(expd[,1] == file),11]))^2 )
}
# Add centrality data to data frame
expdwcent <- expd
expdwcent <- data.frame(cbind(expdwcent,
rep(centralization.betweenness(genemania.network.graph)$res, dim(expd)[1]),
rep(centralization.closeness(genemania.network.graph)$res, dim(expd)[1]),
rep(centralization.degree(genemania.network.graph)$res, dim(expd)[1]),
rep(centralization.evcent(genemania.network.graph)$vector, dim(expd)[1])
))
colnames(expdwcent) <- c("file","id", "name", "encoding1", "encoding2", "nodecolor", "nodeshape", "nodeborder", "nodesize", "xpos", "ypos", "selected", "clickX", "clickY", "windowHeight", "windowWidth", "betweenness", "closeness", "degree", "eigenvector")
colnames(expd) <- c("file","id", "name", "encoding1", "encoding2", "nodecolor", "nodeshape", "nodeborder", "nodesize", "xpos", "ypos", "selected","distCent")
expd[,13] <- as.numeric(as.character(expd[,13]))
expd <- as.data.frame(cbind(expd[,1:11],expd[,13],expd[,12]))
colnames(expd) <- c(colnames(expd)[1:11],"distCent","selected")
library(lme4)
expd.model1 <- lmer(as.numeric(selected) ~ as.numeric(nodeborder) + (1|name) + (1|file), data=expd)
expd.model2 <- lmer(as.numeric(selected) ~ as.numeric(nodesize) + (1|name) + (1|file), data=expd)
anova(expd.model1,expd.model2)
summary(expd.model1)
coef(expd.model1)
summary(lm(as.numeric(selected) ~ as.numeric(nodeborder) + log10(distCent) + as.numeric(nodesize) + name, data=expd))
mod.coef <- coef(lmer(as.numeric(selected) ~ as.numeric(nodeborder) + as.numeric(nodesize) + nodecolor + as.numeric(xpos) + as.numeric(ypos) + (1|name) + (1|file) + (1|encoding1) + (1|encoding2), data=expd))
mod.coef$name
heatmap(as.matrix(mod.coef$name), margins = c(10,10))
# randomly sampling an equal number rows of zero and one selection values
rsexpd <- expd[c(c(sample(which(expd[,11] == 0), length(which(expd[,11] == 1)))),c(which(expd[,11] == 1))),]
coef(lmer(as.numeric(selected) ~ as.numeric(nodesize) + (1|name) + (1|file), data=rsexpd))
coef(lmer(as.numeric(selected) ~ as.numeric(nodesize) + (1|name) + (1|encoding), data=rsexpd))
summary(lmer(as.numeric(selected) ~ as.numeric(nodesize) + as.numeric(nodeborder) + log10(distCent) + (1|name) + (1|encoding), data=rsexpd))
coef(lmer(as.numeric(selected) ~ as.numeric(nodesize) + as.numeric(nodeborder) + log10(distCent) + (1|name) + (1|encoding), data=rsexpd))
# Let's use an RF
library(randomForest)
expd.mod <- expd[,c(1,3,6:12)] #until 13 if I want to include distCent
#rf1 <- randomForest(as.numeric(selected) ~ ., data=expd, importance=TRUE, proximity=TRUE)
rf1 <- randomForest(as.numeric(selected) ~ ., data=expd.mod, importance=TRUE, proximity=TRUE)
print(rf1)
rf1$importance
varImpPlot(rf1,type=2)
voodoo <- c()
for (i in 1:dim(expd)[1]) {
if (expd[i,4] == "#999") {
voodoo <- append(voodoo, t(col2rgb("#999999")))
}
else {
voodoo <- append(voodoo, t(col2rgb(expd[i,4])))
}
}
mycolors <- t(matrix(voodoo, 3, length(voodoo)/3))
#r, g, b cols
expd.mod <- data.frame(cbind(expd[,3],mycolors[,1:3],expd[,7:18])) #until 13 if I want distCent
colnames(expd.mod) <- c("name", "R", "G", "B", colnames(expd)[7:18])
rf2 <- randomForest(as.numeric(selected) ~ ., data=expd.mod, importance=TRUE, proximity=TRUE, do.trace = TRUE)
print(rf2)
rf2$importance
varImpPlot(rf2,type=2)
# unsupervised
expd.urf <- randomForest(expd.mod[, -11])
MDSplot(expd.urf, expd$selected)
#regression
predict(rf2, expd.mod[sample(which(expd.mod[,11] == 1), 1),-11])
predict(rf2, expd.mod[sample(which(expd.mod[,11] == 0), 1),-11])
plot(rf2$predicted)
# optimizing mtry
tuneRF(x = expd.mod[,-11], y = expd.mod[,11], plot = T, doBest = T)
#trying to balance classes for RF
expd.mod.bal <- expd.mod[c(c(sample(which(expd.mod[,11] == 0), length(which(expd.mod[,11] == 1)))),c(which(expd.mod[,11] == 1))),]
tuneRF(x = expd.mod.bal[,-11], y = expd.mod.bal[,11], plot = T, doBest = T)
rf3 <- randomForest(as.numeric(selected) ~ ., data=expd.mod.bal, importance=TRUE, proximity=TRUE, do.trace = F, mtry=2)
print(rf3)
rf3$importance
varImpPlot(rf3,type=2)
rf4 <- randomForest(selected ~ ., data=expd.mod, importance=TRUE, proximity=TRUE, do.trace = F, mtry=2, strata = selected, sampsize = sum(expd.mod[,11] == 1))
print(rf4)
rf4$importance
varImpPlot(rf3,type=2)
#compare balanced vs unbalanced
library(ROCR)
rf3.perf = performance( prediction(labels = expd.mod.bal$selected, predictions = rf3$predicted) ,"tpr","fpr")
rf4.perf = performance( prediction(labels = expd.mod$selected, predictions = rf4$predicted) ,"tpr","fpr")
#plot the curve
plot(rf4.perf,main="ROC Curve for Random Forest",col=2,lwd=2)
lines(unlist(rf3.perf@x.values),unlist(rf3.perf@y.values), col=4, lwd=2)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
#compute area under curve
auc.rf3 <- performance( prediction(labels = expd.mod.bal$selected, predictions = rf3$predicted) ,"auc")
auc.rf4 <- performance( prediction(labels = expd.mod$selected, predictions = rf4$predicted) ,"auc")
auc.rf3 <- unlist(slot(auc.rf3, "y.values"))
auc.rf4 <- unlist(slot(auc.rf4, "y.values"))
minauc<-min(round(auc.rf3, digits = 2))
maxauc<-max(round(auc.rf3, digits = 2))
minauct <- paste(c("min(AUC) = "),minauc,sep="")
maxauct <- paste(c("max(AUC) = "),maxauc,sep="")
minauct
maxauct
minauc<-min(round(auc.rf4, digits = 2))
maxauc<-max(round(auc.rf4, digits = 2))
minauct <- paste(c("min(AUC) = "),minauc,sep="")
maxauct <- paste(c("max(AUC) = "),maxauc,sep="")
minauct
maxauct
# Threshold that Neil provided
gthresh <- function(numNodes) { return( ceiling(dim(combn(1:numNodes, 2))[2]*(log(numNodes)/numNodes)) ) }
plot(10:300, unlist(lapply(10:300, FUN=gthresh)), type="l")
# More ideas for color analysis
t(rgb2hsv((col2rgb(expd.dat$nodebackground))))
library(scatterplot3d)
tcolor <- rgb2hsv((col2rgb(expd.dat$nodebackground)))
scatterplot3d(tcolor[1,], tcolor[2,], tcolor[3,], color = expd.dat$nodebackground)
panel.hist <- function(x, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, ...)
}
panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste0(prefix, txt)
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex = cex.cor * r)
}
pairs(t(rgb2hsv((col2rgb(expd.dat$nodebackground)))), col = expd.dat$nodebackground, upper.panel=panel.cor,diag.panel=panel.hist)
pairs(t(rgb2hsv((col2rgb(expd.dat$nodebackground)))), col = expd.dat$nodebackground, upper.panel = NULL,diag.panel=panel.hist)
hist(tcolor[1,], main = "Distribution of Hues")
hist(tcolor[2,], main = "Distribution of Saturation")
hist(tcolor[3,], main = "Distribution of Values")
# library(jsonlite)
# tn <- as.data.frame(fromJSON(gsub("\'", "\"", "[{ 'data': { 'id': '1', 'name' : 'ENSG00000068793', 'dimension' : 'area', 'value' : '4.40646151205377' } },{ 'data': { 'id': '2', 'name' : 'ENSG00000162627', 'dimension' : 'area', 'value' : '5.38202560777306' } },{ 'data': { 'id': '3', 'name' : 'ENSG00000170266', 'dimension' : 'area', 'value' : '1.26156626101008' } },{ 'data': { 'id': '4', 'name' : 'ENSG00000175315', 'dimension' : 'area', 'value' : '4.40646151205377' } },{ 'data': { 'id': '5', 'source': '1', 'target': '2', 'dimension': 'weight', 'value':'0.000085'} },{ 'data': { 'id': '6', 'source': '1', 'target': '3', 'dimension': 'weight', 'value':'0.000037'} },{ 'data': { 'id': '7', 'source': '2', 'target': '3', 'dimension': 'weight', 'value':'0.000086'} },{ 'data': { 'id': '8', 'source': '3', 'target': '4', 'dimension': 'weight', 'value':'0.000099'} }]")))
# nodeRows <- which(!is.na(tn[1:dim(tn)[1],]$name))
# edgeRows <- which(is.na(tn[1:dim(tn)[1],]$name))
#
# edgeData <- cbind(tn[edgeRows,]$source,tn[edgeRows,]$target, tn[edgeRows,]$value)
# colnames(edgeData) <- c("from", "to", "weight")
#
# nodeData <- cbind(tn[nodeRows,]$id, tn[nodeRows,]$value, tn[nodeRows,]$name)
# colnames(nodeData) <- c("id", "area", "name")
#
# igobj <- graph.data.frame(edgeData, directed = F, vertices = nodeData)
# for (v in 1:length(V(igobj))) {
# print(length(neighbors(igobj, v, mode=("in"))))
# }
#
|
857110be0eece3da79614529e14f40de2bf079e4
|
6c9d3d4a6b6d4a5447f4c015f2079e65aaa36c55
|
/R/transform_functions.R
|
7a08cece6ab770422f0a0fc8ce9c477df9312073
|
[] |
no_license
|
cran/omu
|
31f6f8a72b070e9db8a0283b4694f6b6f0dc3102
|
43eee47e4bd12da86e86f4ad1f221c8653625f70
|
refs/heads/master
| 2023-04-16T05:24:11.938933
| 2023-04-06T21:00:03
| 2023-04-06T21:00:03
| 145,907,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,974
|
r
|
transform_functions.R
|
#' transform_samples
#' @description A functional to transform metabolomics data across samples.
#' @param count_data Metabolomics data
#' @param func a function to transform samples by. can be an anonymous function
#' @examples
#' data_ln <- transform_samples(count_data = c57_nos2KO_mouse_countDF, log)
#' @export
transform_samples <- function(count_data, func){
if(is.function(func)==FALSE){
stop("func must be a function.")
}
if(is.data.frame(count_data)==FALSE){
stop("count_data must be a data.frame")
}
count_data[,sapply(count_data, is.numeric)] <- apply(count_data[,sapply(count_data, is.numeric)], 2, func)
return(count_data)
}
#' transform_metabolites
#' @description A functional to transform metabolomics data across metabolites.
#' @param count_data Metabolomics data
#' @param func a function to transform metabolites by. can be an anonymous function
#' @examples
#' data_pareto_scaled <- transform_samples(count_data = c57_nos2KO_mouse_countDF,
#' function(x) x/sqrt(sd(x)))
#' @export
transform_metabolites <- function(count_data,func){
if(is.function(func)==FALSE){
stop("func must be a function.")
}
if(is.data.frame(count_data)==FALSE){
stop("count_data must be a data.frame")
}
#set metabolite to rownames
rownames(count_data) <- count_data$Metabolite
#store non-numeric data in dataframe to remerge later
char_data_cols <- sapply(count_data, function(x) !is.numeric(x))
char_data <- count_data[,char_data_cols]
#remove character data and transpose
metabo_num <- count_data[,which(char_data_cols==FALSE)]
metabo_num <- t(metabo_num)
metabo_num <- apply(metabo_num, 2, func)
#transpose, rejoin with character data by metabolite values
metabo_num <- as.data.frame(t(metabo_num))
metabo_num$Metabolite <- rownames(metabo_num)
metabo_merge <- merge(char_data, metabo_num, by = "Metabolite")
class(metabo_merge) <- append(class(metabo_merge), "cpd")
return(metabo_merge)
}
|
1794ea1f47390e433eb53294fea036122a21c888
|
eeffd0498b95546f503ecda2b6ee9c95bc931190
|
/R/alignment.R
|
ea9e02fdab58e6317b967530ac7c6ccbd51c7532
|
[] |
no_license
|
nijibabulu/clustably
|
8021da139c4a1956d9b2b92915057c96035858ca
|
fbb450e07b63c7d6477372e5a7c08debee7b39e1
|
refs/heads/master
| 2020-06-18T06:39:53.594507
| 2019-07-23T17:21:34
| 2019-07-23T17:21:34
| 196,199,236
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,799
|
r
|
alignment.R
|
#' Perform a global alignment of labelings on a set of objects
#'
#' Greedily group cell labelings and recode them into a single tibble
#'
#' @param labels a list of factors with labelings
#' @param cells a character vector of all cells ids in the experiment
#'
#' @importFrom tibble as_tibble enframe
#' @importFrom dplyr any_vars arrange bind_rows desc filter_all mutate mutate_if right_join select
#' @importFrom purrr map2 map_dfc set_names
#' @importFrom forcats fct_recode
#' @importFrom tidyr crossing spread
#'
#' @return a tibble containing the recoded labelings
alignIdents <- function(labels, cells) {
n <- length(labels)
labelTab <- table(labels, dnn=1:n) %>% as_tibble() %>% arrange(desc(n))
encodingTab <- labelTab %>% select(-n) %>% map_dfc(~ifelse(duplicated(.x), NA, .x)) %>% filter_all(any_vars(!is.na(.)))
recodedLabels <- map2(labels, encodingTab, ~fct_recode(.x, !!!set_names(.y, 1:length(.y)) %>% na.omit()))
alignedTbl <-
seq(n) %>%
map(~enframe(recodedLabels[[.x]], name="cell", value="ident") %>%
mutate_if(is.factor, as.character) %>%
mutate(n=.x)) %>%
bind_rows() %>%
right_join(crossing(n=seq(n), cell=cells)) %>%
spread(cell,ident) %>%
select(-n)
alignedTbl
}
#' Recode labels by an encoding of the labels after alignment
#'
#' @param labels the original labels given to to the cells
#' @param encoding the encoding given by alignLabels
#' @param cells all the cell names in the expeiriment
#'
#' @importFrom purrr map map2
#' @importFrom dplyr bind_rows right_join select
#' @importFrom tidyr spread
#' @importFrom forcats fct_recode
#'
#' @return a tbl with columns of cell names and rows of each of the replicates
#' @export
recodeLabels <- function(labels, encoding, cells) {
n <- length(labels)
recodedLabels <- map2(labels,
encoding[as.character(1:length(encoding))],
~fct_recode(.x, !!!set_names(.y, 1:length(.y)) %>% na.omit()))
recodedTbl <-
seq(n) %>%
map(~enframe(recodedLabels[[.x]], name="cell", value="ident") %>%
mutate_if(is.factor, as.character) %>%
mutate(n=.x)) %>%
bind_rows() %>%
right_join(crossing(n=seq(n), cell=cells), by=c("cell","n")) %>%
spread(cell,ident) %>%
select(-n)
recodedTbl
}
# Find the top path among the pairs. Walk through each of the top pairs
# and add it if neither replicate has been seen or the one that has been
# seen has the same cluster label (i.e. do not add an incompatible link)
# this is roughly equivalent to a greedy max path through the matrix.
#
# @param info a list containing pairs - a tibble of inter-replicate label pairs sorted by frequencies
# and encoding - the current encoding of the replicates and pairs
# @para n the number of replicates in the data set
#
#' @importFrom purrr map2
#' @importFrom dplyr filter inner_join anti_join full_join group_by_all select summarize bind_rows
#' @importFrom tibble tibble
#' @importFrom tidyr separate
topPath <- function(info, n) {
pairs <- info$pairs
encoding = tibble(rep=c(pairs[1,]$x.rep, pairs[1,]$y.rep),
clust=c(pairs[1,]$x.clust, pairs[1,]$y.clust))
for(i in 2:nrow(pairs)) {
with(pairs[i,], {
seen.x = encoding %>% filter(rep == x.rep)
seen.y = encoding %>% filter(rep == y.rep)
if(!(nrow(seen.x) && nrow(seen.y)) &&
!(nrow(seen.x) && seen.x$clust != x.clust) &&
!(nrow(seen.y) && seen.y$clust != y.clust)) {
encoding <<-
bind_rows(encoding,
tibble(rep=c(x.rep,y.rep),
clust=c(x.clust,y.clust))) %>%
distinct()
}
})
if(nrow(encoding) == n) { break }
}
encodingRow <- encoding %>% full_join(tibble(rep=as.character(1:n)), by="rep") %>% spread(rep, clust)
info$encoding <- bind_rows(info$encoding, encodingRow)
info$pairs <- info$pairs %>%
anti_join(encoding, by=c("x.rep"="rep", "x.clust"="clust")) %>%
anti_join(encoding, by=c("y.rep"="rep", "y.clust"="clust"))
info
}
#' Perform a gloabl alignment of replicate labels.
#'
#' Greedy search for the most likely cluster relationship between replicates.
#'
#' @param labels a list of character vectors of labels named by their cell ID
#' @param verbose show progress
#'
#' @return a tibble of encodings of the clusters
#'
#' @importFrom purrr map2
#' @importFrom dplyr inner_join filter summarize progress_estimated
#' @importFrom tidyr separate
#' @importFrom stringr str_c
#'
#' @export
alignLabels <- function(labels, verbose=FALSE) {
n <- length(labels)
# recode the labels into a 2-column tbl with the cellID and the
# labeling encoded as replicate_value
labelsTbl <- map2(1:n, labels,
~str_c(as.character(.x), .y, sep="_") %>%
set_names(names(.y)) %>%
enframe()) %>%
bind_rows()
# create a cross-classification tibble which is sorted by the number
# of overlapping annotations between two cells and sort them in
# descending order of
pairs <- inner_join(labelsTbl, labelsTbl, by="name") %>%
filter(value.x != value.y) %>% select(-name) %>% group_by_all() %>%
summarize(n=n()) %>% arrange(desc(n)) %>% select(-n) %>%
separate(value.x, c("x.rep", "x.clust"), "_") %>%
separate(value.y, c("y.rep", "y.clust"), "_") %>%
filter(x.rep != y.rep)
if(verbose) { pb <- progress_estimated(nrow(pairs)) }
info <- list(pairs=pairs, encoding=tibble())
repeat {
info <- topPath(info, n)
if(verbose) {
pb$i <- pb$n-nrow(info$pairs)
pb$print()
}
if(nrow(info$pairs) == 0) { break }
}
if(verbose) { pb$stop() }
info$encoding
}
|
a1d1bcc714ce9bdd2b0505e34efd2721a4cc2d5e
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_query50_1344/query09_query50_1344.R
|
a542842ddfa8571bf8dab831574b69753b8a9f72
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 72
|
r
|
query09_query50_1344.R
|
522baf8daaf5c12f22c8618452f473ec query09_query50_1344.qdimacs 4807 39775
|
f7eb5d1350f80aa9e0e94e3a5972551883699b77
|
d5967b81f2c0ae9e63f86e3396fbe07ece2ecec6
|
/cachematrix.R
|
db4faf68030a815154df20ef470e022120c7df4d
|
[] |
no_license
|
Akema1/ProgrammingAssignment2
|
3000a3a76d0c7294f39c11089c925a43411ed2ee
|
cf1d062f67364c436e6b10872f2c49d8b03531df
|
refs/heads/master
| 2021-01-17T06:45:11.191020
| 2015-11-22T02:10:30
| 2015-11-22T02:10:30
| 46,634,258
| 0
| 0
| null | 2015-11-21T21:17:34
| 2015-11-21T21:17:33
| null |
UTF-8
|
R
| false
| false
| 869
|
r
|
cachematrix.R
|
#makeCacheMatrix creates a list of 4 functions to store a matrix and a cached value of it's inverse
#set the value of the vector
#get the value of the vector
#set the value of the mean
#get the value of the mean
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() {
x
}
setInverse <- function(solve) {m <<- solve}
getInverse <- function() {m}
list(set = set, get = get, setInverse = setInverse,
getInverse = getInverse)
}
# cacheSolve calculates the inverse of a "special" matrix created with makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
e5ab096483cef84fcbabac63477776eba2d2030c
|
8b20d5cd8e94b57d28b2216ea620b152dcfc375a
|
/ui/ui_visualize.R
|
1f87c9f8eed157f8fe9380c0720e75d984fc4f8a
|
[] |
no_license
|
aravindhebbali/explorer
|
6248f690ff0294ff965f65f0277ae1b19a2ed3f7
|
c1f5c63ce8206a04d64eda3572f4e139df1e09d7
|
refs/heads/master
| 2021-03-27T16:21:45.253318
| 2017-06-11T07:21:52
| 2017-06-11T07:21:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 603
|
r
|
ui_visualize.R
|
tabPanel('Visualize', value = 'tab_viz', icon = icon('line-chart'),
navlistPanel(id = 'navlist_viz',
well = FALSE,
widths = c(2, 10),
source('ui/ui_bar.R', local = TRUE)[[1]],
source('ui/ui_bar2.R', local = TRUE)[[1]],
source('ui/ui_box.R', local = TRUE)[[1]],
source('ui/ui_box2.R', local = TRUE)[[1]],
source('ui/ui_hist.R', local = TRUE)[[1]],
source('ui/ui_scatter.R', local = TRUE)[[1]],
source('ui/ui_pie.R', local = TRUE)[[1]],
source('ui/ui_pie3d.R', local = TRUE)[[1]],
source('ui/ui_line.R', local = TRUE)[[1]]
)
)
|
bffbf2908c247a0f1ec7ca6cb5e5d73e0697ec36
|
f81687e1f90efa76dad7b5dcdb6ae503be6a704d
|
/R/date_utils.R
|
ca415e4029ea5cae766fbc285b7ce9058b6cfe93
|
[] |
no_license
|
fmichonneau/sok-marine-biodiversity
|
f5c4025e39cd0926b996f853d31d0a686fe4091b
|
fbe4338739d883cbf54378d8e8ea85d2c30160bf
|
refs/heads/master
| 2022-03-11T04:02:53.629611
| 2022-02-25T15:02:10
| 2022-02-25T15:02:10
| 68,727,281
| 2
| 2
| null | 2022-02-25T15:02:11
| 2016-09-20T15:41:53
|
R
|
UTF-8
|
R
| false
| false
| 504
|
r
|
date_utils.R
|
parse_year <- function(recs) {
recs %>%
dplyr::mutate(parsed_date = parse_date_time(datecollected, c("Y", "ymd", "ym", "%Y-%m-%d%H:%M:%S%z"))) %>%
dplyr::mutate(
year = year(parsed_date),
year = replace(year, year > 2017 | year < 1850, NA),
year = as.integer(year)
) %>%
dplyr::select(-parsed_date)
}
year_as_integer <- function(d) {
stopifnot(is.list(d))
lapply(d, function(x) {
if (exists("year", x)) {
x$year <- as.integer(x$year)
}
x
})
}
|
6993524dad4719cff8151a86d276f2c5e8ee033b
|
cac943a39da206c154f41bc7e4a5c6645b9fe062
|
/runMain.R
|
27445a185fa79d970d2ce7603087effc40353cf1
|
[] |
no_license
|
CIAT-DAPA/usaid_procesos_interfaz
|
57dc6ea42f97ec57c3496eec58fae44463be6d04
|
e769be5c897ecc6073111eed32516e922e0b2cf6
|
refs/heads/main
| 2023-09-03T10:44:13.768665
| 2017-03-08T20:18:48
| 2017-03-08T20:18:48
| 83,475,773
| 2
| 3
| null | 2023-01-26T16:04:13
| 2017-02-28T20:24:50
|
Python
|
UTF-8
|
R
| false
| false
| 6,411
|
r
|
runMain.R
|
# Librerias y prerequisitos:
# . gunzip
# . R librarys
library(funr)
library(lubridate)
library(reshape)
library(stringr)
library(trend)
library(data.table)
library(tidyverse)
library(magrittr)
library(lazyeval)
library(foreach)
## DIRECTORIO PRINCIPAL
# dirCurrent <- paste0(get_script_path(), "/", sep = "", collapse = NULL)
dirCurrent <- "C:/USAID/procesos_interfaz/"
## Variables globales paquete forecast
dirForecast <- paste0(dirCurrent, "prediccionClimatica/", sep = "", collapse = NULL)
dirInputs <- paste0(dirCurrent, "inputs/", sep = "", collapse = NULL)
dirOutputs <- paste0(dirCurrent, "outputs/", sep = "", collapse = NULL)
dirPrediccionInputs <- paste0(dirInputs, "prediccionClimatica/", sep = "", collapse = NULL)
dirPrediccionOutputs <- paste0(dirOutputs, "prediccionClimatica/", sep = "", collapse = NULL)
forecastAppDll <- paste0("dotnet ", dirCurrent, "forecast_app/CIAT.DAPA.USAID.Forecast.ForecastApp.dll ", sep = "", collapse = NULL)
dir_save <- paste0(dirPrediccionInputs, "descarga", sep = "", collapse = NULL)
dir_response <- paste0(dirPrediccionInputs, "estacionesMensuales", sep = "", collapse = NULL)
dir_stations <- paste0(dirPrediccionInputs, "dailyData", sep = "", collapse = NULL)
path_save <- paste0(dirPrediccionOutputs, "probForecast", sep = "", collapse = NULL)
path_output <- paste0(dirPrediccionOutputs, "resampling", sep = "", collapse = NULL)
path_output_sum <- paste0(path_output, "/summary", sep = "", collapse = NULL)
dir_dssat <- 'C:/DSSAT46/' ## its necessary to have the parameters .CUL, .ECO, .SPE Updated for running (calibrated the crop (Maize))
dirCultivosInputs <-paste0(dirInputs, "cultivos/", sep = "", collapse = NULL)
dirCultivosOutputs <-paste0(dirOutputs, "cultivos/", sep = "", collapse = NULL)
## Variables globales modelo Maiz
dirModeloMaiz <- paste0(dirCurrent, "modeloMaiz/", sep = "", collapse = NULL)
dirModeloMaizInputs <- paste0(dirInputs, "cultivos/maiz/", sep = "", collapse = NULL)
dirModeloMaizOutputs <-paste0(dirOutputs, "cultivos/maiz/", sep = "", collapse = NULL)
## Variables globales paquete arroz
dirModeloArroz <- paste0(dirCurrent, "modeloMaiz/", sep = "", collapse = NULL)
dirModeloArrozInputs <- paste0(dirInputs, "cultivos/arroz/", sep = "", collapse = NULL)
dirModeloArrozOutputs <-paste0(dirOutputs, "cultivos/arroz/", sep = "", collapse = NULL)
# Directorio salidas permanentes que se van a almacenar mes a mes
dirResults <- "C:/USAID/procesos_dssat/usaid_procesos_interfaz/results"
if (!file.exists(file.path(dirResults))){
dir.create(file.path(dirResults))
cat (paste0('\n... directorio "',dirResults,'" creado\n\n'))
}
# Funcion para borra y crear directorios
pathConstruct <- function(dirConstruct)
{
if (file.exists(file.path(dirConstruct))){
unlink(file.path(dirConstruct), recursive = TRUE, force = TRUE)
cat (paste0('\n... directorio "',dirConstruct,'" eliminado\n'))
dir.create(file.path(dirConstruct))
cat (paste0('... directorio "',dirConstruct,'" creado\n\n'))
}
else {
dir.create(file.path(dirConstruct))
cat (paste0('\n... directorio "',dirConstruct,'" creado\n\n'))
}
}
## Construyendo directorios de entrada y salida
pathConstruct(dirInputs)
pathConstruct(dirOutputs)
# predicion climatica
pathConstruct(dirPrediccionInputs)
pathConstruct(dirPrediccionOutputs)
pathConstruct(dir_save)
pathConstruct(path_save)
pathConstruct(path_output)
pathConstruct(path_output_sum)
# directorio de salida para los modelos
pathConstruct(dirCultivosOutputs)
# maiz
pathConstruct(dirModeloMaizOutputs)
# arroz
pathConstruct(dirModeloArrozOutputs)
## Descargando entradas desde la base de datos
CMDdirInputs <- paste0(gsub("/","\\\\",dirPrediccionInputs), "\\\"")
try(system(paste0(forecastAppDll,"-out -s \"prec\" -p \"",CMDdirInputs," -start 1981 -end 2013"), intern = TRUE, ignore.stderr = TRUE))
try(system(paste0(forecastAppDll,"-out -wf -p \"",CMDdirInputs," -name \"daily\""), intern = TRUE, ignore.stderr = TRUE))
CMDdirInputs <- paste0(gsub("/","\\\\",dirInputs), "\\\"")
try(system(paste0(forecastAppDll,"-out -fs -p \"",CMDdirInputs), intern = TRUE, ignore.stderr = TRUE))
# Funcion corrida moledos de cultivos (maiz y arroz)
runCrop <- function(crop, setups) {
for(x in 2:length(setups)){
setSplit <- strsplit(setups[x],"/")
longName <- setSplit[[1]][length(setSplit[[1]])]
longNameSplit <- strsplit(longName,"_")
hashStation <- longNameSplit[[1]][1]
hashCrop <- longNameSplit[[1]][2]
hashSoil<- longNameSplit[[1]][3]
hashDayRange <- longNameSplit[[1]][4]
cat(paste("\n\n Ejecutando modelo ", crop, " para estacion: \"", hashStation, "\" cultivar: \"", hashCrop, "\" suelo: \"", hashSoil, "\" rango de dias: \"", hashDayRange, "\"\n", sep = ""))
if (crop == 'maiz'){
region <- hashStation
name_csv <- paste0(longName, ".csv", sep = "", collapse = NULL)
dir_climate <- paste0(path_output, "/", hashStation, sep = "", collapse = NULL)
dir_parameters <- paste0(dirModeloMaizInputs, longName, "/", sep = "", collapse = NULL)
dir_soil <- paste0(dirModeloMaizInputs, longName, "/SOIL.SOL", sep = "", collapse = NULL)
dir_run <- paste0(dirModeloMaizOutputs, longName, "/run/", sep = "", collapse = NULL)
pathConstruct(paste0(dirModeloMaizOutputs, longName, sep = "", collapse = NULL))
out_dssat <- paste0(dirModeloMaizOutputs, longName, '/out_dssat', sep = "", collapse = NULL)
pathConstruct(out_dssat)
pathConstruct(dir_run)
runModeloMaiz <- source(paste(dirModeloMaiz,'call_functions.R', sep = "", collapse = NULL))
cat(crop)
}
if (crop == 'arroz'){
#runModeloMaiz <- source(paste(dirModeloArroz,'call_functions.R', sep = "", collapse = NULL))
cat(crop)
}
}
}
# Corrida Prediccion
runPrediccion <- source(paste(dirForecast,'01_prediccion.R', sep = "", collapse = NULL))
# Corrida Remuestreo
runRemuestreo <- source(paste(dirForecast,'02_remuestreo.R', sep = "", collapse = NULL))
## Corrida Modelo maiz
setups = list.dirs(dirModeloMaizInputs,full.names = T)
runCrop('maiz', setups)
## Corrida Modelo arroz
setups = list.dirs(dirModeloArrozInputs,full.names = T)
runCrop('arroz', setups)
# Escribiendo salidas en la base de datos
CMDdirOutputs <- paste0(gsub("/","\\\\",dirOutputs), "\\\"")
try(system(paste0(forecastAppDll,"-in -fs -cf 0.5 -p \"",CMDdirOutputs), intern = TRUE, ignore.stderr = TRUE))
|
f17b780a28821f703429b0139fe5899d8e275ec1
|
6c2029a4a11b86ff9b2d016fdeda82689840633e
|
/man/summary.expl_reg.Rd
|
77ff4038ee2ba400fe67c8bb37f8238d1cb1e7f6
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
multinormal/fhi.informed-health-choices-norway.2019
|
c0c3f3e2ae7745f3564789084e848ad50dc28509
|
e14a1ee1ebf6120a7522416bbdfe98e605964da3
|
refs/heads/master
| 2021-01-05T15:42:33.037842
| 2020-08-19T07:40:03
| 2020-08-19T07:40:03
| 241,065,111
| 0
| 0
|
NOASSERTION
| 2020-08-19T07:40:04
| 2020-02-17T09:17:32
|
R
|
UTF-8
|
R
| false
| true
| 436
|
rd
|
summary.expl_reg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods_expl_reg.R
\name{summary.expl_reg}
\alias{summary.expl_reg}
\title{Summarize an instance of \code{expl_reg}}
\usage{
\method{summary}{expl_reg}(object)
}
\arguments{
\item{object}{an instance of \code{expl_reg}.}
}
\value{
a 1-row \code{tibble} that summarizes the result in a readable way.
}
\description{
Summarize an instance of \code{expl_reg}
}
|
c0a9e43d60068f73daff50326c93cdf53cd4c437
|
959b8d01689825ce765ef1f783c579c43831d9a9
|
/R학습파일/200804.R
|
012525ca3a1cf600728d57adc72bcac4afe2cd4d
|
[] |
no_license
|
leeyouhee/R2
|
9f7117e2b99f37ad1ef9bf2e4242c21468196629
|
a7f448247d81ecaea148703b4ffa2be2aaa54ea7
|
refs/heads/master
| 2022-12-10T20:41:48.616158
| 2020-09-01T03:37:10
| 2020-09-01T03:37:10
| 283,909,285
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,339
|
r
|
200804.R
|
#명사 추출
#여행지 조사(사전에 builddictionary 후 다시 추출)
install.packages('multilinguer')
library(multilinguer)
install.packages(c('stringer', 'hash', 'tau',
'Sejong', 'RSQLite','devtools'), type = 'binary')
install.packages('remotes')
remotes::install_github('haven-jeon/KoNLP', upgrade='never',
INSTALL_opts = c('--no-multiarch'))
library(KoNLP)
.libPaths()
library(rJava)
install.packages(c('RColorBrewer','wordcloud'))
library(RColorBrewer)
library(wordcloud)
#파일 불러오기
jeju_data <- readLines(con = './data/jeju.txt')
head(jeju_data,3)
#명사 추출
jdata <- sapply(jeju_data,KoNLP::extractNoun,
USE.NAMES = F)
head(jdata,3)
#단어 분리
jdata2 <- unlist(jdata)
head(jdata2,3)
jdata2
#불용어 제거
jdata3 <- gsub('제주','',jdata2)
jdata3 <-gsub('제주도','',jdata3)
jdata3 <-gsub('[0-9]+','',jdata3)
jdata3 <-gsub('오전','',jdata3)
jdata3 <-gsub('오후','',jdata3)
jdata3 <-gsub('/','',jdata3)
jdata3 <-gsub('\\.','',jdata3)
jdata3 <-gsub('-','',jdata3)
jdata3 <-gsub('?','',jdata3)
jdata3
jdata3 <- gsub('추천','',jdata3)
jdata3 <- gsub('흑돼지','',jdata3)
jdata3 <- gsub('가게','',jdata3)
jdata3 <- gsub('쪽','',jdata3)
jdata3 <- gsub('^ㅇ','',jdata3)
jdata3 <- gsub('것','',jdata3)
jdata3 <- Filter(function(x){nchar(x)>=2},jdata3)
jdata3 <-gsub('숙소','',jdata3)
jdata3 <-gsub('시간','',jdata3)
jdata3 <-gsub('여행','',jdata3)
jdata3 <-gsub('해안','',jdata3)
jdata3 <-gsub('코스','',jdata3)
jdata3 <-gsub('까지','',jdata3)
jdata3 <-gsub('드라이브','',jdata3)
jdata3 <-gsub('출발','',jdata3)
jdata3 <-gsub('예약','',jdata3)
jdata3 <-gsub('경유','',jdata3)
jdata3 <-gsub('관광지','',jdata3)
jdata3 <-gsub('일정','',jdata3)
jdata3 <-gsub('하게','',jdata3)
jdata3 <-gsub('도착','',jdata3)
###############################################################################
install.packages("stringr")
library(stringr)
place_re <- str_replace_all(cdata,'[A-z0-9]','')
place_re <- gsub('제주','',x=place_re)
head(sort(table(place_re),decreasing = T),40) #단어를 확인하여 불필요한 단어 확인
#불용어 목록 만들고, 불러오기
no <- readLines(con='./data/불용어.txt',
encoding = 'UTF-8')
for( i in 1:length(no)){
place_re <- gsub(pattern = no[i],"",x = place_re)
}
#문자길이 2개 이상 단어 추출
place_re <- Filter(function(x){
nchar(x) >= 2 #nchar()는 문자길이 추출
},place_re)
#################################################################################
#데이터 저장
write(jdata3, 'jeju_new.txt')
#데이터 다시 불러오기
jdata4 <- read.table('jeju_new.txt')
jdata4
#빈도확인
wfreq <- head(sort(table(jdata4),decreasing = T),30)
palette <- brewer.pal(9,'Set1') #RcolorBrewer에서 제공해주는 함수
wordcloud(names(wfreq),freq = wfreq,scale = c(4,0.5),
rot.per = 0.25, min.freq=1,
random.order = F,random.color = T,colors = palette)
.libPaths()
#데이터 프레임으로 변경
Jeju <- readLines(con='./data/go.txt',
encoding = 'UTF-8')
Jeju_to <- data.frame(여행지 = Jeju, 형태 = 'ncn')
#user_dic에 추가
+1. user_dic의 의미가 중요!!!!
+2. 내가 고민했던 것은 : 딕셔너리로 만든 결과와 jeju.txt에 있는 형태소를 어떻게 비교할지!
+3. 하지만 아래와 같이 제주명소로 딕셔너리를 만들었고, 이것은 KoNLP 패키지 안에 자동적으로 참조되어 비교해 줌
KoNLP::buildDictionary()
buildDictionary(user_dic = Jeju_to,
replace_usr_dic = F)
#파일 불러오기
jeju_data <- readLines(con = './data/jeju.txt')
head(jeju_data,3)
#명사 추출
Jdata <- sapply(jeju_data,KoNLP::extractNoun,
USE.NAMES = F)
Jdata
#단어 분리
jdata2 <- unlist(Jdata)
head(jdata2,3)
jdata2
#불용어 제거
jdata3 <- gsub('제주','',jdata2)
jdata3 <-gsub('제주도','',jdata3)
jdata3 <-gsub('[0-9]+','',jdata3)
jdata3 <-gsub('오전','',jdata3)
jdata3 <-gsub('오후','',jdata3)
jdata3 <-gsub('/','',jdata3)
jdata3 <-gsub('\\.','',jdata3)
jdata3 <-gsub('-','',jdata3)
jdata3 <-gsub('?','',jdata3)
jdata3
jdata3 <- gsub('추천','',jdata3)
jdata3 <- gsub('흑돼지','',jdata3)
jdata3 <- gsub('가게','',jdata3)
jdata3 <- gsub('쪽','',jdata3)
jdata3 <- gsub('^ㅇ','',jdata3)
jdata3 <- gsub('것','',jdata3)
jdata3 <- Filter(function(x){nchar(x)>=2},jdata3)
jdata3 <-gsub('숙소','',jdata3)
jdata3 <-gsub('시간','',jdata3)
jdata3 <-gsub('여행','',jdata3)
jdata3 <-gsub('해안','',jdata3)
jdata3 <-gsub('코스','',jdata3)
jdata3 <-gsub('까지','',jdata3)
jdata3 <-gsub('드라이브','',jdata3)
jdata3 <-gsub('출발','',jdata3)
jdata3 <-gsub('예약','',jdata3)
jdata3 <-gsub('경유','',jdata3)
jdata3 <-gsub('관광지','',jdata3)
jdata3 <-gsub('일정','',jdata3)
jdata3 <-gsub('하게','',jdata3)
jdata3 <-gsub('도착','',jdata3)
#데이터 저장
write(jdata3, 'jeju_new.txt')
#데이터 다시 불러오기
jdata4 <- read.table('jeju_new.txt')
jdata4
#빈도확인
wfreq <- head(sort(table(jdata4),decreasing = T),30)
?brewer.pal()
palette <- brewer.pal(6,'Dark2') #RcolorBrewer에서 제공해주는 함수
wordcloud(names(wfreq),freq = wfreq,scale = c(3,0.5),
rot.per = 0.4, min.freq=1,
random.order = F,random.color = T,colors = palette)
###################연습문제######################
1. 다음 조건에 맞게 client 데이터프레임을 생성하고 데이터를 처리해보세요
+ name : '유관순','홍길동','이순신','신사임당'
+ gender : 'F','M','M','F'
+ price : 50,65,45,75
+ 조건1 : 3개의 벡터 객체를 이용하여 client 데이터 프레임 생성
+ 조건2 : price 변수의 값이 65만원 이상이면, 문자열 'Best', 65만원 미만이면 'Normal'을 변수 result 컬럼에 추가
+ result 변수를 대상으로 빈도수 구하기
#조건 1
name <- c('유관순','홍길동','이순신','신사임당당')
gender <- c('F','M','M','F')
price <- c(50,65,45,75)
client <- data.frame(이름 = name, 성별 = gender, 가치 = price)
client
#조건 2
for(i in 1:length(price)){
ifelse(price[i]>= 65, client$result[i] <- 'Best', client$result[i] <- 'Normal')
}
client
str(client)
table(client$result)
#########################################################################
2. 다음 벡터 EMP는 '입사년도이름급여' 순으로 사원의 정보가 기록된 데이터이다.
벡터 EMP를 이용하여 다음과 같은 결과가 나타나도록 함수를 정의해보세요
(함수에 변수 EMP를 전달했을 때 출력결과와 같도록 만드시면 됩니다)
EMP <- c('2014홍길동220','2002이순신300','2010유관순260',"2019왕건500","2019동방신기1000")
<출력결과>
전체 급여 평균 : 456
평균이상 급여 수령자
왕건 => 500
동방신기 => 1000
install.packages('stringr')
library(stringr)
cal_sal <-function(EMP){
#로직
#year <- str_extract(EMP,'[0-9]{1,}$') #anchor
EMP_year <- str_extract(EMP,'[0-9]{4}')
str_extract_all(EMP,'[가-힣]{1,}')
EMP_name <- unlist(str_extract_all(EMP,'[가-힣]{1,}'))
sal <- str_extract(EMP,'[가-힣]{1,}[0-9]{1,}')
EMP_sal <- unlist(str_extract_all(sal,'[0-9]{1,}'))
EMP_sal <- as.numeric(EMP_sal)
employee <- data.frame(name = EMP_name, year = EMP_year, Salary = EMP_sal
,stringsAsFactors = F) #StringAsFactor 주의!!
EMP_mean <- mean(employee$Salary)
#출력문
cat('<출력결과>\n')
cat('전체 급여 평균 : ', EMP_mean, '\n')
cat('평균 이상 급여 수령자 \n')
for(i in 1:length(employee$name)){
if(employee$Salary[i]>=EMP_mean){
cat(employee$name[i], ' => ', employee$Salary[i],'\n')
}
}
}
cal_sal(EMP)
###############################################################################
3. 함수 y = f(x)에서 x의 값이 a에서 b까지 변할 때 △x = b - a를 증분이라 하며,
△y = f(b) - f(a)를 y의 증분으로 표현한다.
평균변화율 : △y/△x = (f(b)- f(a))/(b - a)
조건) 함수 f(x) = x^3 + 4에서 x의 값이 1에서 3까지 변할 때 평균변화율을
구하는 함수를 작성해보세요. (평균변화율 = (f(3)-f(1))/(3-1) = 13)
Chan <- function(x1,x2){
a1 <- x1^3+4
a2 <- x2^3+4
b <- x2-x1
c <- a2-a1
return(c/b)
}
Chan(1,3)
#########################################################################
4. 실습 : 몬테카를로 시뮬레이션 (runif)
몬테카를로 시뮬레이션은 현실적으로 불가능한 문제의 해답을 얻기 위해서 난수의
확률분포를 이용하여 모의시험으로 근사적 해를 구하는 기법
n번 시행했을 때 동전던지기의 확률을 구하라!
mons <- function(n){
result = 0
for( i in 1:n){
co <- runif(1)
if (co >= 0.5){
result <- result +1
}
}
prob <- result/n
return(prob)
}
mons(99999)
####################################강사님 풀이###############################
1.
name <- c("유관순","홍길동","이순신","신사임당")
gender <- c("F","M","M","F")
price <- c(50,65,45,75)
# 데이터 프레임 생성
client <- data.frame(NAME=name,GENDER=gender,PRICE=price)
client
# 조건2 처리
result <- ifelse(client$PRICE>=65,"Best","Normal")
result
# 조건3
table(result)
2. EMP_df <- function(x){
library(stringr)
EMP1 <- str_extract_all(x,"[가-힣]{1,}[0-9]{1,}")
name <- str_extract_all(EMP1,"[가-힣]{1,}")
name <- unlist(name)
sal <- str_extract_all(EMP1,"[0-9]{1,}")
sal <- unlist(sal)
EMP0 <- data.frame(name, sal, stringsAsFactors = FALSE)
EMP0$sal <- as.numeric(EMP0$sal)
cat('전체 급여 평균:', mean(EMP0$sal),'\n')
cat('평균 이상 급여 수령자','\n')
for(i in 1:length(EMP0$sal)){
if(EMP0$sal[i] >= mean(EMP0$sal)){
cat(EMP0$name[i],'=>',EMP0$sal[i],'\n')
}
}
}
EMP_df(EMP)
3.
f <- function(x){
x <- x^3+4
}
mrc <- function(a,b){
result <- (f(b)-f(a))/(b - a)
print(paste('평균변화율=',result))
}
mrc(1,3)
4.
mon <- function(n){
a <- runif(n)
b <- ifelse(a<0.5, 0, 1)
cnt <- sum(b)
p <- cnt/n
return(p)
}
|
972c67ce67ec02ee24aea0bd08bdee5fbd14e5e2
|
de9d448132f90f073d29add688de2fcf72527a89
|
/man/group2list.Rd
|
bdbd024f4118ae6c18236afc2bcf3acdc8cdb5bb
|
[
"MIT"
] |
permissive
|
NMikolajewicz/scMiko
|
33f137e9e3a6318fb0386506ac4666a3822463f0
|
bd00724889db265817fc54d0d50b14647d32438d
|
refs/heads/master
| 2023-06-09T05:51:30.199131
| 2023-06-04T20:23:51
| 2023-06-04T20:23:51
| 249,496,034
| 20
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 765
|
rd
|
group2list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network_functions.R
\name{group2list}
\alias{group2list}
\title{Named list of cells grouped by meta feature from Seurat object}
\usage{
group2list(object, group = "seurat_clusters", is.num = F, prefix = "")
}
\arguments{
\item{object}{Seurat object}
\item{group}{grouping feature (must be present in `object` meta data). Default is "seurat_clusters".}
\item{is.num}{whether `group` is a numerical feature.}
\item{prefix}{prefix added to each named entry in list. Default is "".}
}
\value{
Returns named list of cells grouped by meta feature from Seurat object.
}
\description{
Returns named list of cells grouped by meta feature from Seurat object.
}
\author{
Nicholas Mikolajewicz
}
|
476be3fb3194223ec4268f028b97e63dc620571d
|
1fc5725383d5a594a97824c2a2c1eb3224dda916
|
/man/ifelse_pipe.Rd
|
6f54c9c332b449fe2e9f0bb72f145a0544e7aea6
|
[] |
no_license
|
stemangiola/ARMET
|
61fa21aec29a21a3450e778ace702acb68719741
|
dd5ea830c14634b82cde26e101b375bdb94580ab
|
refs/heads/master
| 2022-07-16T16:55:55.381918
| 2022-07-06T08:06:24
| 2022-07-06T08:06:24
| 120,414,626
| 3
| 1
| null | 2022-05-24T01:44:56
| 2018-02-06T06:56:54
|
R
|
UTF-8
|
R
| false
| true
| 685
|
rd
|
ifelse_pipe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{ifelse_pipe}
\alias{ifelse_pipe}
\title{This is a generalisation of ifelse that acceots an object and return an objects}
\usage{
ifelse_pipe(.x, .p, .f1, .f2 = NULL)
ifelse_pipe(.x, .p, .f1, .f2 = NULL)
}
\arguments{
\item{.x}{A tibble}
\item{.p}{A boolean}
\item{.f1}{A function}
\item{.f2}{A function}
\item{input.df}{A tibble}
\item{condition}{A boolean}
}
\value{
A tibble
A tibble
}
\description{
This is a generalisation of ifelse that acceots an object and return an objects
This is a generalisation of ifelse that acceots an object and return an objects
}
\keyword{internal}
|
2de0f62950d55e76a79c4f2bba6fd44e4417bdcd
|
08f3b72fabbab22bfbd90eb6b3984dc85cb971d8
|
/R/offset_latlon_by_meter.R
|
0a11b50d23a439b8fa66cebb0d6443c235076143
|
[
"MIT"
] |
permissive
|
benmack/lucas
|
8408801bbfcdcb7efd95d3fb8fb706b043218641
|
a4c3376455653bf0a307456b48a91831107b7883
|
refs/heads/master
| 2021-01-20T10:32:18.399915
| 2019-04-07T10:01:12
| 2019-04-07T10:01:12
| 66,085,066
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
offset_latlon_by_meter.R
|
#' Offset a latitude/longitude by some amount of meters
#'
#' @param lat latitude of the starting point(s) in decimal degree
#' @param lon longitude of the starting points(s) in decimal degree
#' @param de offset towards east (in meter)
#' @param dn offset towards north (in meter)
#'
#' @return latitud and longitude of the offsetted points
#' @export
#' @seealso http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters
offset_latlon_by_meter <- function(lat, lon, de=100, dn=100) {
# Earth’s radius, sphere
R=6378137
# Coordinate offsets in radians
dLat = dn/R
dLon = de/(R*cos(pi*lat/180))
#OffsetPosition, decimal degrees
latO = lat + dLat * 180/pi
lonO = lon + dLon * 180/pi
return(cbind(lat=latO, lon=lonO))
}
|
a19152d8ecace07487cd0c66725a23db2bde660e
|
e5a65dbebf3eb475e289040bfd70552785339e34
|
/plot4.R
|
bbb4741ca2a6d8b093ac9c8e52f956f30d76dab1
|
[] |
no_license
|
jallred/ExData_Plotting1
|
df6073455a234035fd0a380bc4ec551b76764724
|
d714e4f6d667ceedb4d1700ba9ca05c04753983c
|
refs/heads/master
| 2021-01-16T19:16:21.782903
| 2015-02-09T02:18:18
| 2015-02-09T02:18:18
| 30,211,197
| 0
| 0
| null | 2015-02-02T22:05:19
| 2015-02-02T22:05:19
| null |
UTF-8
|
R
| false
| false
| 1,127
|
r
|
plot4.R
|
library(datasets)
# read in the data, and name the columns since we're skipping the first row (and other rows)
t<-read.table("household_power_consumption.txt", skip=66637 ,nrows=2880, sep=";")
colnames(t) = c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
# put date/time into the Date column
t$Date <- strptime(paste(t$Date,t$Time), "%d/%m/%Y %H:%M:%S")
# png device open
png(file = "plot4.png")
# 2x2 layout
par(mfcol=c(2,2))
# top left
plot(t$Date,t$Global_active_power, type="l",ylab="Global Active Power",xlab="")
# bottom left
plot(t$Date,t$Sub_metering_1, type="l",ylab="Energy sub metering",xlab="")
lines(t$Date,t$Sub_metering_2, type="l", col="red")
lines(t$Date,t$Sub_metering_3, type="l", col="blue")
legend("topright", box.lwd=0, legend=names(data[7:9]), lty=1, col=c("black","red","blue"), cex=0.7)
# top right
plot(t$Date,t$Voltage, type="l",ylab="Voltage",xlab="datetime")
# bottom right
plot(t$Date,t$Global_reactive_power, type="l",ylab="Global_reactive_power",xlab="datetime")
# close the device
dev.off()
|
ceb435962916e9c943c71c8814ee5457992d907a
|
8d274be5f5624f442cff46a76ea160fb87e7d8b2
|
/src/draw_strain_pair_hgt_network.R
|
dd4017bcb176f9f206a5f0640ea1f825d5fc397a
|
[
"MIT"
] |
permissive
|
tauqeer9/RecentHGT
|
aa2a372a291b044936f4d8585f4782af7475fc9a
|
87ef638050a15d6df399f8987cea132be33c7591
|
refs/heads/master
| 2021-04-18T04:25:39.111881
| 2018-11-13T13:16:25
| 2018-11-13T13:16:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 893
|
r
|
draw_strain_pair_hgt_network.R
|
library(igraph)
links <- read.csv("trimmed_strain_pair_hgts.txt", header=T, as.is=T)
nodes <- read.csv("strains_network_nodes.txt", header = T, as.is=T)
links <- aggregate(links[,3], links[,-3], sum)
links <- links[order(links$from, links$to),]
colnames(links)[4] <- "weight"
rownames(links) <- NULL
net <- graph_from_data_frame(d=links, vertices=nodes, directed=F)
net <- simplify(net, remove.multiple = F, remove.loops = T)
colrs <- c("gray50", "tomato")
V(net)$color <- colrs[V(net)$strain.location]
E(net)$width <- E(net)$weight/60
par(mar=c(0,0,0,0))
plot(net, vertex.shape="none", vertex.label=V(net)$strain.name,
vertex.label.font=2, vertex.label.color=V(net)$color, vertex.label.cex=1,
edge.color="gray80", layout=layout.circle)
legend(x=1.0, y=1.0, c("Source strains","Native strains"), pch=22,
col="#777777", pt.bg=colrs, pt.cex=1.5, cex=0.9, bty="n", ncol=1)
|
26e726e7b2b86d01dbff044914a1b91ca042f8a2
|
01dc5196de85da11065f1bac96cbf798e436221d
|
/R/read_nifti_batch.R
|
6a42242c6e261ba200f3acac7ebc0dad94d15311
|
[] |
no_license
|
neuroimaginador/utils4ni
|
841f6c95657c71a059223e6f526cb9f3856e0eae
|
7aab7022345e9d33d8570b591251a6a788d40d73
|
refs/heads/master
| 2020-04-04T19:16:08.263669
| 2018-12-03T19:16:46
| 2018-12-03T19:16:46
| 156,198,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 315
|
r
|
read_nifti_batch.R
|
#' @title Read Multiple Files
#'
#' @description This function reads several NIfTI files.
#'
#' @param file_list (list) Path of files to import
#'
#' @return A list with one entry for each file read.
#'
#' @export
#'
read_nifti_batch <- function(file_list) {
return(lapply(file_list, read_nifti_to_array))
}
|
f5959b52f099f0db11025af2b920e890ca5c071e
|
8b8b10d0d42d1200664b3a96e999e3d92d249434
|
/man/MaxentVariableSelection-package.Rd
|
9ae3097b520cf76a9d6b13862ba913374b53b870
|
[] |
no_license
|
tim-salabim/MaxentVariableSelection
|
fd69776dc46d1e4ac3d0e00c2713349fe55e4c10
|
cb2fc28332d4e36c301b590b2b84c6bcf93a72b4
|
refs/heads/master
| 2021-01-16T20:40:18.095055
| 2016-06-20T19:00:43
| 2016-06-20T19:00:43
| 61,561,450
| 0
| 0
| null | 2016-06-20T16:07:01
| 2016-06-20T16:07:00
|
R
|
UTF-8
|
R
| false
| false
| 3,055
|
rd
|
MaxentVariableSelection-package.Rd
|
\name{MaxentVariableSelection-package}
\alias{MaxentVariableSelection}
\docType{package}
\title{Selecting the Best Set of Relevant Environmental Variables along with the
Optimal Regularization Multiplier for Maxent Niche Modeling}
\description{Complex niche models show low performance in identifying
the most important range-limiting environmental variables and in
transferring habitat suitability to novel environmental conditions
(Warren and Seifert, 2011; Warren et al., 2014). This package helps to
identify the most important set of uncorrelated variables and to
fine-tune Maxent's regularization multiplier. In combination, this
allows to constrain complexity and increase perforance of Maxent niche
models (assessed by information criteria, such as AICc (Akaike, 1974) ,
and by the area under the receiver operating characteristic (AUC)
(Fielding and Bell, 1997). Users of this package should be familiar with
Maxent niche modelling.}
\details{
\tabular{ll}{
Package: \tab MaxentVariableSelection\cr
Type: \tab Package\cr
Version: \tab 1.NA.NA\cr
Date: \tab 2016-06-20\cr
Depends: \tab R (>= 3.1.2)\cr
Imports: \tab ggplot2, raster\cr
Suggests: \tab knitr, rmarkdown\cr
VignetteBuilder: \tab knitr\cr
License: \tab GPL (>= 2)\cr
Literature: \tab Akaike H (1974)\cr
\tab A new look at the statistical model identification\cr
\tab \emph{IEEE Transactions on Automatic Control} \bold{19}:6 716--723.\cr
\tab \cr
\tab Fielding AH and Bell JF (1997)\cr
\tab A review of methods for the assessment of prediction\cr
\tab errors in conservation presence/absence models\cr
\tab \emph{Environmental Conservation} \bold{24}:1 38--49.\cr
\tab \cr
\tab Jimenez-Valverde A (2012)\cr
\tab Insights into the area under the receiver operating characteristic curve\cr
\tab (AUC) as a discrimination measure in species distribution modelling\cr
\tab \emph{Global Ecology and Biogeography} \bold{21}:4 498--507.\cr
\tab \cr
\tab Tyberghein L, Verbruggen H, Pauly K, Troupin C, Mineur F and {De Clerck}, O (2012)\cr
\tab Bio-ORACLE: a global environmental dataset for marine species distribution modelling\cr
\tab \emph{Global Ecology and Biogeography} \bold{21}:2 272--281.\cr
\tab \cr
\tab Warren DL, Glor RE, and Turelli M (2010)\cr
\tab ENMTools: a toolbox for comparative studies of environmental niche\cr
\tab models\cr
\tab \emph{Ecography} \bold{33}:3 607--611.\cr
\tab \cr
\tab Warren DL and Seifert SN (2011)\cr
\tab Ecological niche modeling in Maxent: the importance of model\cr
\tab complexity and the performance of model selection criteria\cr
\tab \emph{Ecological Applications} \bold{21}:2 335--342.\cr
}
}
\author{
Alexander Jueterbock
Maintainer: Alexander Jueterbock, <Alexander-Jueterbock@web.de> }
\section{Citation}{To cite the package 'MaxentVariableSelection' in publications use:\cr\cr
Jueterbock A, Smolina I, Coyer JA and Hoarau, G (2016)\cr
The fate of the Arctic seaweed \emph{Fucus distichus} under climate change:\cr
an ecological niche modelling approach\cr
\emph{Ecology and Evolution} \bold{6}(6), 1712-1724}
\keyword{package}
|
159b91ef1592e109a58aacc405d21575705dfc72
|
4fa1c9b43411b719d9051732af48d9c910dcd3ac
|
/R/utils.R
|
3f8e009b7ea6742b740d75b9c9ec781028b432fe
|
[] |
no_license
|
lian0090/BGData
|
df317c64830231b0e7843644642436c9303a8350
|
1e6ea1f73d46bf718a50bff5397d6b24a628f35c
|
refs/heads/master
| 2020-12-11T08:00:34.991075
| 2015-05-14T17:20:02
| 2015-05-14T17:20:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,121
|
r
|
utils.R
|
#' Computes a genomic relationship matrix G=xx'.
#'
#' Offers options for centering and scaling the columns of x before computing
#' xx'. If \code{centerCol=FALSE}, \code{scaleCol=FALSE} and
#' \code{scaleG=FALSE}, \code{getG} produces the same outcome than
#' \code{tcrossprod}.
#'
#' @param x matrix, ff_matrix, rmmMatrix or cmmMatrix
#' @param nChunks The number of columns that are processed at a time.
#' @param scaleCol TRUE/FALSE whether columns must be scaled before computing
#' xx'.
#' @param scaleG TRUE/FALSE whether columns must be scaled before computing xx'.
#' @param i (integer, boolean or character) Indicates which rows should be used.
#' By default, all rows are used.
#' @param j (integer, boolean or character) Indicates which columns should be
#' used. By default, all columns are used.
#' @return A positive semi-definite symmetric numeric matrix.
#' @export
getG<-function(x,nChunks=ceiling(ncol(x)/1e3),scaleCol=TRUE,scaleG=TRUE,verbose=TRUE,i=1:nrow(x),j=1:ncol(x),minVar=1e-5){
nX<-nrow(x); pX<-ncol(x); centerCol=TRUE # if this is made a parameter the imputation od NAs need to be modified.
# converting boolean to integer index (it leads to a more efficient subsetting than booleans)
if(is.logical(i)){ i<-which(i) }
if(is.logical(j)){ j<-which(j) }
n<-length(i); p<-length(j)
if(n>nX|p>pX){ stop('Index out of bounds')}
if(is.numeric(i)){ if( (min(i)<1)|(max(i)>nX)){ stop('Index out of bounds') }}
if(is.numeric(j)){ if( (min(j)<1)|(max(j)>pX)){ stop('Index out of bounds') }}
tmp<-x[i,1:2]
n<-nrow(tmp)
G<-matrix(0,nrow=n,ncol=n)
rownames(G)<-rownames(tmp)
colnames(G)<-rownames(G)
end<-0;
delta<-ceiling(p/nChunks);
for(k in 1:nChunks){
ini<-end+1;
if(ini<=p){
end<-min(p,ini+delta-1)
if(verbose){
cat("Chunk: ",k," (markers ", ini,":",end," ~",round(100*end/p,1),"% done)\n",sep="");
cat(" =>Acquiring genotypes...\n")
}
# subset
tmp<-j[ini:end]
X=x[i,tmp,drop=FALSE];
if(scaleCol){
VAR<-apply(X=X,FUN=var,MARGIN=2,na.rm=TRUE)
tmp<-which(VAR<minVar)
if(length(tmp)>0){
X<-X[,-tmp]
VAR<-VAR[-tmp]
}
}
if(ncol(X)>0){
if(verbose){ cat(" =>Computing...\n") }
if(centerCol|scaleCol){
X<-scale(X,center=centerCol,scale=scaleCol)
}
TMP<-is.na(X)
if(any(TMP)){ X<-ifelse(TMP,0,X) }
G<-G+tcrossprod(X)
}
}
}
if(scaleG){
tmp<-mean(diag(G))
G<-G/tmp
}
return(G)
}
#' Generate and store a simulated plaintext raw PED file (see \code{--recodeA}
#' in PLINK) or PED-like file for testing purposes.
#'
#' @param filename The path where to save the generated file.
#' @param n The number of observations to generate.
#' @param p The number of markers to generate.
#' @param genoChars The alphabet used to generate the genotypes.
#' @param na.string The symbol used to denote missing values.
#' @param propNA The probability of generating NAs.
#' @param returnGenos Whether to return the genotypes from the function.
#' @export
simPED<-function(filename,n,p,genoChars=0:2,na.string=NA,propNA=.02,returnGenos=FALSE){
if(file.exists(filename)){
stop(paste('File',filename,'already exists. Please move it or pick a different name.'))
}
markerNames<-paste0('mrk_',1:p)
subjectNames<-paste0('id_',1:n)
if(returnGenos){
OUT<-matrix(nrow=n,ncol=p,NA)
colnames(OUT)<-markerNames
rownames(OUT)<-subjectNames
}
fileOut<-file(filename,open='w')
pedP<-6+p
header<-c(c('FID','IID','PAT','MAT','SEX','PHENOTYPE'),markerNames)
write(header,ncolumns=pedP,append=TRUE,file=fileOut)
for(i in 1:n){
geno<-sample(genoChars,size=p,replace=TRUE)
geno[runif(p)<propNA]<-na.string
pheno<-c(0,subjectNames[i],rep(NA,4))
x<-c(pheno,geno)
write(x,ncolumns=pedP,append=TRUE,file=fileOut)
if(returnGenos){
OUT[i,]<-geno
}
}
close(fileOut)
if(returnGenos){
return(OUT)
}
}
randomString<-function(){
paste(sample(c(0:9,letters,LETTERS),size=5,replace=TRUE),collapse="")
}
normalizeType<-function(val){
type<-typeof(val)
# detect strings
if(type=='character'&&length(val)>0){
# convert to type if type and value match
convert<-try(vector(mode=val),silent=TRUE)
if(class(convert)=='try-error'){
# return a character type if conversion failed
warning('could no convert type, using character instead')
character()
}else{
# return conversion result otherwise
convert
}
# value doesn't contain type information and can be handled by typeof
}else{
val
}
}
|
437ce835010c57a551e31a5e94f08a747063c774
|
9dfc302b8e5dd1b1298dbc89873e80348ac7f954
|
/data/create_alc.R
|
37407d4231fe9ba0c2f1c600e37043800df2bace
|
[] |
no_license
|
Jonharju/IODS-project
|
14728455a741a6e7442336421e375fb1dfc37f19
|
c97ff8be442114e6af4138e2871b21f028ed25ec
|
refs/heads/master
| 2021-01-13T04:37:14.384031
| 2017-02-24T21:30:53
| 2017-02-24T21:30:53
| 79,477,561
| 0
| 0
| null | 2017-01-19T17:32:40
| 2017-01-19T17:32:39
| null |
UTF-8
|
R
| false
| false
| 1,426
|
r
|
create_alc.R
|
#Jonas Harjunpää 04.02.2017
#This file contains the wrangled data of a study about student alcohol consumpiton
#from here https://archive.ics.uci.edu/ml/datasets/STUDENT+ALCOHOL+CONSUMPTION
setwd("C:/Users/Jonas/Documents/GitHub/IODS-project/data")
getwd()
# read the math class questionaire data into memory
math <- read.csv("student-mat.csv", sep = ";" , header=TRUE)
# read the portuguese class questionaire data into memory
por <- read.csv("student-por.csv", sep = ";", header = TRUE)
# look at the structure and dimensions of both data
str(math)
dim(math)
str(por)
dim(por)
library(dplyr)
join_by <- c("school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet")
math_por <- inner_join(math, por, by = join_by, suffix=c(".math",".por"))
str(math_por)
dim(math_por)
alc <- select(math_por, one_of(join_by))
notjoined_columns <- colnames(math)[!colnames(math) %in% join_by]
notjoined_columns
for(column_name in notjoined_columns) {
two_columns <- select(math_por, starts_with(column_name))
first_column <- select(two_columns, 1)[[1]]
if(is.numeric(first_column)) {
alc[column_name] <- round(rowMeans(two_columns))
} else { # else if it's not numeric...
alc[column_name] <- first_column
}
}
alc <- mutate(alc, alc_use = (Dalc + Walc) / 2)
alc <- mutate(alc, high_use = alc_use > 2)
glimpse(alc)
write.csv(alc, file = "alc.csv", row.names=FALSE)
|
adf0b9868c15be513cf4a7e9960423306086cfb3
|
86d388a76b1debbdfbec5de7cd7c61d1f248cc04
|
/ShinyApp/ui.R
|
afcd0b3e183d808deb6483c9b017c670338e3bb4
|
[] |
no_license
|
info370/project-teamname-v2
|
f72ce6d2015d116801cc9136e5b3212488468e8b
|
b649da1978894d6f7841832d6757b28e41b04ab8
|
refs/heads/master
| 2021-08-29T16:02:05.840312
| 2017-12-14T07:56:27
| 2017-12-14T07:56:27
| 107,476,645
| 0
| 1
| null | 2017-12-14T05:44:19
| 2017-10-19T00:04:25
|
HTML
|
UTF-8
|
R
| false
| false
| 17,730
|
r
|
ui.R
|
library("shiny")
library(plotly)
library(shinythemes)
library(tidyverse)
require("maps")
library(geosphere)
library(stringr)
library(rgdal)
library(caret)
library(lubridate)
# library(maptools)
if (!require(ggmap)) { install.packages('ggmap'); require(ggmap) }
library(ggmap)
install.packages
here_long <- -122.3095
here_lat <- 47.6560
seattle = get_map(location = c(here_long, here_lat), zoom = 13, maptype = 'roadmap')
ui <- navbarPage("Walk Safely at UW!",
# define widgets
theme = shinytheme("yeti"), # Set theme
tabPanel("About",
mainPanel(
tags$h2('Our Project'),
tags$p(
"Our project set out to study what influences the risk of crime to a pedestrian in Seattle's U-District. As
students, we're often faced with the task of walking home safely, and as anyone around this area will tell you,
that is sometimes easier said than done. Our goal was to analyze years of Seattle City Police Department 911 call
data to attempt to find patterns of crimes against pedestrians in order to lend advice to those wishing to avoid areas with
a history of danger."
),
tags$h2('What we did'),
tags$p(
"In order to study the effects of a variety of conditions on crime in the U-District area, we had to decide what was relevant
to someone attempting to walk from point A to point B in worrisome areas. We reasoned that that the following were important
factors to consider when doing analysis:",
tags$ul(
tags$li('Type of crime'),
tags$li('Time of Day'),
tags$li('Police responsiveness'),
tags$li('Proximity to public services')
),
"To address the first factor, type of crime, we decided only to look at crimes likely to directly put a pedestrian in harm's
way. Such crimes include muggings, assaults, hazards, and robberies. We faced unique challenges in handling the other factors
, the details of which are outlined in the other sections of this app."
),
tags$h2('How to use this app'),
tags$p(
"Our results are seperated into three tabs, which you can see at the top of this page. We encourage you to click on any one
of them to learn more about how we examine the factors of risk we identified."
)
)
),
tabPanel("Time of Day",
titlePanel("Results of Testing by Time of Day"),
sidebarLayout(
sidebarPanel(style = "position:fixed;width:inherit;",
selectInput("time.of.day", "Select Time",
list("Morning", "Mid Day", "Afternoon", "Evening", "Night", "Early Morning")
),
width = 2),
mainPanel(
style = "padding-left: 2cm;",
p("Time is a huge factor when discussing pedestrian safety, or so we're told.
Common wisdom states that night time is more dangerous than day time, but is this even true?
When do crimes get reported, and how does that change where the centers of crime are located?
Here, we look at a year's worth of SPD data in order to gain some insight."),
plotOutput("Time.one"),
p("First of all, we want to make sure we use as much data
as possible. Using reports for all years
possible would be ideal, but that data set is
too large to handle or interpret easily. Instead, we will
use just the past year's worth, from November 1st,
2016 to October 31st, 2017. That gives us
a full year's worth of data to look at, and its far
enough in the past from today that we can ensure most,
if not all, incidences will be closed (and therefore
included in the data set)."),
p("Before we go any further though, it is important we determine whether or not the time of year has any meaningful effect on the number of observations we have to work with. If several months have much higher crime rates than others, it may skew the results of any analysis we attempt. With that in mind, let's take a look at the distribution of crimes for each month in the last year."),
plotOutput("Time.two"),
p("As we can see, there isn't much varience in the frequency of reported crimes in the UW area for the past year. We can use a Kruskal-Wallis to test the independence of Month and Number of Crimes, which should indicate whether there is a relationship between them or not."),
verbatimTextOutput("kruskal"), # textOutput would get rid of formatting, makes it messy
p("Since our null hypothesis, that the count of crimes for each month is consistent, was given a p-value of 0.4433, we can confidently reject it and state that there is no dependence between the month of year and the number of crimes reported during it."),
p("Next, we can look at the distribution of crimes across the categories we've defined. If there's no variation there, we can continue to look at the data set as a whole, but if there's significant variation, we'll need to handle things a bit more carefully. Here we see a histogram of the Event Clearance Descriptions (what the reported crime was classified as in the SPD's computer system)."),
plotOutput("Time.three"),
p("We see here that there are significant differences in the types of crimes that are reported, with Crisis Complaints comprising a large number of them. This could simply be due to a large number of mental health crisises in the U-District. More likely, however, officers are unsure of what to classify a crime as in an incident report and they simply choose a catch all category that comes closest to describing what happened. Regardless, this uneven distribution means that when it comes time to perform clustering, we'll need to be careful to account for the significant weight these crimes will inflict upon the cluster centers."),
p("We can bucket the data into 6 time frames to look at how reported crime changes throughout the day (Use widget to change time)."),
plotOutput("Time.four"),
p("We can see that crimes reported are generally normally distributed around the afternoon. This would suggest that the highest rate of crime is during the day, or that there are less people reporting crimes at night. Which one is true isn't possible to infer from this data."),
plotOutput("Time.five"),
p("Clustering reveals that the average reported crime doesn't stray too far from the Ave. Some time frames have somewhat lower densities (Use widget to change time)."),
plotOutput("Time.six.a"),
plotOutput("Time.six.b"),
p("The fact that Crisis Complaints outnumber every other description and its not very specific is skewing our analysis. Let's remove those crimes and try again."),
p("Crimes within the last year (No Crisis Complaints)"),
plotOutput("Time.seven.a"),
p("Count of crimes without Crisis Complaints"),
plotOutput("Time.seven.b"),
p("Clustering not factoring in Crisis Complaints"),
plotOutput("Time.seven.c"),
p("Clustering density not factoring in Crisis Complaints"),
plotOutput("Time.seven.d"),
p(
"In conclusion, there is variance in reported crime throughout the day. The highest time of activity seems to be around
mid day, which might be contrary to popular belief. It's still yet to be determined what effect less people around during night
time has on rate of crime reporting. Intuition should suggest that if there are less people awake to witness crimes, there's
a good chance less crime will be reported. This could explain why there are more reported crimes during the day, but it
would be interesting to study this in further detail at a later date. For now, we find it advisable to continue being
vigilant during all hours of the day."
),
p(
"From our clustering analysis we can see that the area between 42nd and 50th, Brooklyn to 15th is the hottest spot for
reported pedestrian crimes. While there are outliers that may change throughout the day, most crimes tend to be reported
in this area. Furthermore, it appears that during the afternoon and early morning, most crimes take place on 45th Avenue
specifically. It is advisable to be extra careful on this part of campus during these hours.
"
),
width = 8
)
)
),
tabPanel("Clearance Time",
titlePanel("Results of Testing by Clearance Time"),
mainPanel(
style = "padding-left: 2cm;",
p(
"Now, we will explore the clearance time for these crimes. Clearance time is calculated to be the
difference between the time that that police arrive at the scene and the time at which the crime has
officially been cleared by the Seattle Police Department. Shown below, we have a map of the University District
and the various data points representing crime that has occurred in the past year. The colors represent the
length of the clearance time.
"
),
img(src="Clearance_Map.png"),
p(
"We want to see the general distribution of the clearance times, so we will use a scatterplot to analyze
the distribution of the data."
),
img(src='Clearance_Plot_Skewed.png'),
p(
"By taking a look at the scatterplot, it can be inferred
that a few crimes that take more than 20,000 minutes to clear (almost 2 weeks) are skewing the data.
Let's take a look at this data and see what kind of crimes are associated with such long clearance times,
and, where these crimes are located."
),
img(src="Clearance_Plot.png"),
p(
"By taking a look at the new scatter plot with the filtered data, we can see that crimes with clearance code
350 (crimes under the Event Clearance Group, \"Hazards\") seem to be taking the longest time. We will filter
the crimes under the Event Clearance Group, Hazards, out of the data frame. We can take a look at the new map of
data in University District."
),
img(src="Clearance_Map_v2.png"),
p(
"Now, we can attempt to cluster the data and see if we can make some findings out of the clusters."
),
img(src="Cluster_Map.png"),
p(
"There were only two clusters that came out of the clustering algorithm in terms of
clearance times. The first cluster was close to NE 50th Avenue and represented a clearance time of nearly 40000
minutes, a clearance time of more than 27 days. The second cluster was towards Wallingford and had a clearance
time of about 20000 minutes, about 14 days. Unfortunately, a little amount of information can be inferred using
these clusters. Clearance Time is typically not the best form of data that one can use in terms of this decision
context, but hopefully with more data, we can look more into this form of data and that it can allow for safer
walking."
)
)
),
tabPanel("Proximity",
style = "padding-left: 2cm",
h2("Results of Testing by Proximity to Public Services"),
mainPanel(
p(
"Another one of the factors we investigated was whether or not proximity to public services, such as transport services like bus stops, had an impact on the frequency of crime in an area."
),
p("
To perform this analysis, we cross-referenced data between our filtered down crime dataset and Google Transit data regarding bus stops in Seattle. To begin with, a map showing all bus stops in our considered region is depicted below."
),
img(src='Bus_Stops.png'),
p("
As one would expect, the points on the map are aligned with the city's streets."
),
p("
Next, we defined 'dangerous bus stops' to be any stop with at least one criminal incident reported within 20 meters. Of the 297 stops found within 2600 meters of Red Square, 49 fell under this category. A histogram depicting frequency by proximity to stops may be seen here:
"),
img(src='Bus_Stops_Histogram.png'),
p("
With this, we can see that the majority of stops are likely to be safe - however, there are several outliers that have significantly higher rates of crime around them than the rest of the data set. While a mapping of all 49 stops featuring criminal incidents within 20 meters may look like this:
"),
img(src='Dangerous_Bus_Stops.png'),
p("
A mapping of the stops with the highest frequency of incident reports, including at least one per month in the dataset we have, will look like this:
"),
img(src='Most_Dangerous_Stops.png'),
p("
And these results are in line with the results of the clustering anaylsis we performed, which suggested that the area up and down University Way right next to U.W. campus is the area within our region of interest that is most dangerous to pedestrians.
"),
p("
While this analysis is not necessarily indicative of a relationship between bus stop location and crime ( and moreso just highlights the bus stops that are in the areas with the greatest prevalence of criminal activity ), it may further serve to inform students of where they should, or rather should not, wait to catch the bus if they want to stay safe.
")
)
)
)
shinyUI(ui)
|
7e43c147dc6e208bebb52cda446e2ec4326ff0e7
|
5e1b775edcb7683f1039eb20f2e6f9594a59f8eb
|
/Chapter 9/ch9E.R
|
fae5827ff8fcd03e1412c91f23310bf67df49b44
|
[] |
no_license
|
loufin/Business-Analytics
|
1973a649fca58da8aa4092a212eeb15e1c240098
|
e15f69154df19e7a017ba47ce9dd30f2c39d9b7d
|
refs/heads/main
| 2023-04-28T22:56:29.092261
| 2021-05-12T14:05:35
| 2021-05-12T14:05:35
| 335,809,886
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,367
|
r
|
ch9E.R
|
# 9.1
auctions.df <- read.csv("eBayAuctions.csv", header=T)
train.index <- sample(rownames(auctions.df), 0.6*dim(auctions.df)[1])
valid.index <- setdiff(rownames(auctions.df), train.index)
train.df <- auctions.df[train.index,]
valid.df <- auctions.df[valid.index,]
library(rpart)
library(rpart.plot)
class.tree <- rpart(Competitive. ~ ., data = auctions.df,
control = rpart.control(maxdepth = 7, minbucket = 50), method = "class")
prp(class.tree, type = 1, extra = 1, split.font = 1, varlen = -10)
class.tree <- rpart(Competitive. ~ ClosePrice+OpenPrice+sellerRating+Duration+endDay, data = auctions.df,
control = rpart.control(maxdepth = 7, minbucket = 50), method = "class")
pruned.ct <- prune(class.tree,
cp = class.tree$cptable[which.min(class.tree$cptable[,"xerror"]),"CP"])
length(pruned.ct$frame$var[pruned.ct$frame$var == "<leaf>"])
prp(pruned.ct, type = 1, extra = 1, split.font = 1, varlen = -10)
#9.2
flights.df <- read.csv("FlightDelays.csv", header=T)
flights.df$DAY_WEEK <- as.factor(flights.df$DAY_WEEK)
flights.df$DEP_TIME <- cut(flights.df$DEP_TIME, breaks = seq(0,2400,300), labels = c("3","6","9","12","15","18","21","24"), include.lowest = T, right = T)
flights.df <- flights.df[,-11]
flights.df$Weather <- as.factor(flights.df$Weather)
flights.df$FL_NUM <- as.factor(flights.df$FL_NUM)
flights.df$DISTANCE <- cut(flights.df$DISTANCE, breaks = seq(165,235,10))
train.index <- sample(rownames(flights.df), 0.6*dim(flights.df)[1])
valid.index <- setdiff(rownames(flights.df), train.index)
train.df <- flights.df[train.index,]
valid.df <- flights.df[valid.index,]
library(rpart)
library(rpart.plot)
class.tree <- rpart(Flight.Status ~ CARRIER+DEP_TIME+DEST+DISTANCE+ORIGIN+Weather+DAY_WEEK, data = flights.df,
control = rpart.control(maxdepth = 8, minbucket = 50), method = "class")
pruned.ct <- prune(class.tree, cp = 0.001)
length(pruned.ct$frame$var[pruned.ct$frame$var == "<leaf>"])
prp(pruned.ct, type = 1, extra = "auto", split.font = 2, varlen = 3)
library(caret)
class.tree.point.pred.flights <- predict(class.tree,flights.df,type = "class")
# generate confusion matrix for training data
confusionMatrix(class.tree.point.pred.flights, as.factor(flights.df$Flight.Status))
### repeat the code for the validation set, and the deeper tree
|
df19f463faa39b86260640d16743845cfdf18c05
|
21f4c8b6fa59bd8970fa4fc5aeef254bf228c603
|
/3-assemble.r
|
973b8c487226a09f8fcea6359bab4a6f08d50fbd
|
[
"MIT"
] |
permissive
|
brentonk/doe-h2o
|
e35dd6693ec9ee8b2372cc22319695d10a9358df
|
0f01c2d6bc58b674196a0b63165200abd7866c3d
|
refs/heads/master
| 2020-06-22T04:04:38.676379
| 2019-07-25T16:17:05
| 2019-07-25T16:17:05
| 197,627,941
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,423
|
r
|
3-assemble.r
|
### Assemble directed dyad predictions into a single data frame, then calculate
### the undirected scores
library("tidyverse")
library("assertr")
library("foreach")
sessionInfo()
## Load up the results from each individual year scoring, validate, and extract
doe_dir_dyad <- foreach (yr = 1816:2012, .combine = "rbind") %do% {
dat_in <- suppressMessages(read_csv(paste0("results/predict/", yr, "-in.csv")))
dat_out <- suppressMessages(read_csv(paste0("results/predict/", yr, "-out.csv")))
if (nrow(dat_in) != nrow(dat_out)) {
stop("Unequal row numbers in year ", yr)
}
cbind(dat_in, dat_out) %>%
as_tibble() %>%
select(year, ccode_a, ccode_b,
pr_win_a = VictoryA, pr_stalemate = Stalemate, pr_win_b = VictoryB)
}
## Clean up typing to ensure the written CSVs look how we'd want
doe_dir_dyad <- doe_dir_dyad %>%
mutate_at(vars(one_of("year", "ccode_a", "ccode_b")), ~ as.integer(.)) %>%
assert(not_na, everything()) %>%
verify(all.equal(pr_stalemate + pr_win_a + pr_win_b, rep(1.0, nrow(.)))) %>%
arrange(year, ccode_a, ccode_b)
## Create undirected data by averaging the directed scores
doe_dyad <- doe_dir_dyad %>%
mutate(ccode_min = pmin(ccode_a, ccode_b),
ccode_max = pmax(ccode_a, ccode_b),
pr_win_min = if_else(ccode_a == ccode_min, pr_win_a, pr_win_b),
pr_win_max = if_else(ccode_a == ccode_max, pr_win_a, pr_win_b)) %>%
verify(all.equal(pr_win_min + pr_win_max + pr_stalemate, rep(1.0, nrow(.)))) %>%
select(-matches("_[ab]$")) %>%
group_by(ccode_min, ccode_max, year) %>%
mutate(count = n()) %>%
verify(count == 2) %>%
summarise_at(vars("pr_stalemate", "pr_win_min", "pr_win_max"),
~ mean(.))
doe_dyad_a <- rename(doe_dyad,
ccode_a = ccode_min,
ccode_b = ccode_max,
pr_win_a = pr_win_min,
pr_win_b = pr_win_max)
doe_dyad_b <- rename(doe_dyad,
ccode_a = ccode_max,
ccode_b = ccode_min,
pr_win_a = pr_win_max,
pr_win_b = pr_win_min)
doe_dyad <- rbind(doe_dyad_a, doe_dyad_b) %>%
select(one_of(!! colnames(doe_dir_dyad))) %>%
verify(all.equal(pr_stalemate + pr_win_a + pr_win_b, rep(1.0, nrow(.)))) %>%
verify(!duplicated(paste(year, ccode_a, ccode_b))) %>%
arrange(year, ccode_a, ccode_b)
## Double check that the directed and undirected datasets have the same
## structure and organization
stopifnot(nrow(doe_dir_dyad) == nrow(doe_dyad))
stopifnot(all(doe_dir_dyad$year == doe_dyad$year))
stopifnot(all(doe_dir_dyad$ccode_a == doe_dyad$ccode_a))
stopifnot(all(doe_dir_dyad$ccode_b == doe_dyad$ccode_b))
stopifnot(all(colnames(doe_dir_dyad) == colnames(doe_dyad)))
## Look at correlations between the directed and undirected versions
cat("\nCorrelation between directed and undirected, pr_win_a:",
sprintf("%.3f", cor(doe_dir_dyad$pr_win_a, doe_dyad$pr_win_a)),
"\nCorrelation between directed and undirected, pr_stalemate:",
sprintf("%.3f", cor(doe_dir_dyad$pr_stalemate, doe_dyad$pr_stalemate)),
"\nCorrelation between directed and undirected, pr_win_b:",
sprintf("%.3f", cor(doe_dir_dyad$pr_win_b, doe_dyad$pr_win_b)),
"\n")
write_csv(doe_dir_dyad, path = "results/doe-dir-dyad-2.0.csv")
write_csv(doe_dyad, path = "results/doe-dyad-2.0.csv")
|
49d0e9069c0159216d9e816b84caa9674eb66eef
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dbparser/examples/parse_drug_enzymes_polypeptides_go_classifiers.Rd.R
|
b20dfd3865ff5d57289a2c2456b404e35726d06a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 431
|
r
|
parse_drug_enzymes_polypeptides_go_classifiers.Rd.R
|
library(dbparser)
### Name: parse_drug_enzymes_polypeptides_go_classifiers
### Title: Extracts the drug groups element and return data as data frame.
### Aliases: parse_drug_enzymes_polypeptides_go_classifiers
### ** Examples
## No test:
parse_drug_enzymes_polypeptides_go_classifiers()
parse_drug_enzymes_polypeptides_go_classifiers(TRUE)
parse_drug_enzymes_polypeptides_go_classifiers(save_table = FALSE)
## End(No test)
|
63e1998f86c955f4e199e2e871ebdfadad6bc809
|
7b602a40bfebdedc2f9f3c1b8dc31c3207564c40
|
/data_analysis/markdown/json_to_data.R
|
c7ff085c91dde7c4eea1927a308809c8b2f5c929
|
[] |
no_license
|
ejyoon/Polimp
|
f4d4696b0ebb807b70f3f03125ee0f1a4f55a82f
|
d601a2b9019ec27c4799599a0a7472e2830faf17
|
refs/heads/master
| 2021-01-18T21:03:57.898918
| 2017-05-08T04:12:28
| 2017-05-08T04:12:28
| 32,059,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,702
|
r
|
json_to_data.R
|
rm(list = ls())
library(jsonlite)
library(ggplot2)
source("/Users/ericang/Documents/Courses/Psych254/zt12rp/data_analysis/helper/useful.R")
raw.data.path <- "/Users/ericang/Documents/Courses/Psych254/zt12rp/production-results/"
processed.data.path <- "/Users/ericang/Documents/Courses/Psych254/zt12rp/data_analysis/processed_data/"
## LOOP TO READ IN FILES
all.data <- data.frame()
files <- dir(raw.data.path,pattern="*.json")
for (file.name in files) {
print(file.name)
## these are the two functions that are most meaningful
json_file <- readLines(paste(raw.data.path,file.name,sep=""))
json_file_str = paste(json_file, collapse = "")
json_file_str = gsub(",}", "}", json_file_str)
jso = jsonlite::fromJSON(json_file_str)
jso1 <- data.frame(jso)
jso1$subid <- file.name
## now here's where data get bound together
all.data <- rbind(all.data, jso1)
}
### FILTER criteria:
## 1. exclude participants who make errors that are 3 SDs above the mean errors of the majority group
jso_sub <- all.data %>%
group_by(subid, correct) %>%
summarise(count = n()) %>%
filter(correct == 0)
mean = mean(jso_sub$count)
sd = sd(jso_sub$count)
filter(jso_sub, count > mean + 3*sd)
# need to filter: pilot_B4.json, ztrp87.json
## 2. exclude participants who have average reaction times that are 4 SDs higher than the mean of the remaining participants
jso_sub <- all.data %>%
group_by(subid) %>%
summarise(mean_rt = mean(rt))
mean = mean(jso_sub$mean_rt)
sd = sd(jso_sub$mean_rt)
filter(jso_sub, mean_rt > mean + 4*sd)
# need to filter: none
## 3. Exclude each participant’s incorrect responses
## 4. Exclude responses that took response time that is greater than 3 SDs above the overall mean response time
jso_final <- all.data %>%
filter(subid != "pilot_B4.json" & subid != "ztrp87.json") %>%
filter(correct == 1) %>%
filter(rt < mean + 3*sd) %>%
mutate(inv_rt = 1/rt)
# histogram
qplot(rt, data = jso_final, geom = "histogram")
qplot(rt, data = jso_final, geom = "histogram", facets = ~cond)
qplot(inv_rt, data = jso_final, geom = "histogram")
qplot(inv_rt, data = jso_final, geom = "histogram", facets = ~cond, binwidth = .0001)
# plot of the means
jso_ms <- jso_final %>%
filter(correct == 1) %>%
filter(cond != "ap") %>%
group_by(cond) %>%
summarise(rt = mean(rt), na.rm = TRUE)
jso_mss <- jso_final %>%
filter(correct == 1) %>%
filter(cond != "ap") %>%
group_by(cond, subid) %>%
summarise(rt = mean(rt), na.rm = TRUE)
jso_ms$cih <- aggregate(rt ~ cond, jso_mss, ci.high)$rt
jso_ms$cil <- aggregate(rt ~ cond, jso_mss, ci.low)$rt
jso_ms$cond <- as.factor(jso_ms$cond)
levels(jso_ms$cond) <- c("dominant-dominant", "dominant-submissive", "submissive-submissive")
p1 <- ggplot(data = jso_ms,
aes(x=cond, y=rt, fill=cond)) +
geom_bar(stat="identity", position=position_dodge()) +
geom_errorbar(aes(ymin=rt-cil, ymax=rt+cih),
width=.2,
position=position_dodge(.9)) +
# coord_cartesian(ylim=c(.9,6)) +
# scale_y_continuous(breaks=1:6) +
xlab("Condition") +
ylab("Reaction time") +
ggtitle("Reaction times for identification")
p1
# anova
fit <- aov(rt ~ cond + Error(subid/cond),data=jso_mss)
summary(fit)
# lmer
jso_mss <- jso_final %>%
filter(correct == 1) %>%
filter(cond != "ap") %>%
mutate(item = substr(leftPic, 1, nchar(leftPic)-1)) %>%
group_by(cond, subid, item) %>%
summarise(rt = mean(rt), na.rm = TRUE)
jso_mss$cond <- as.factor(jso_mss$cond)
jso_mss$cond <- relevel(jso_mss$cond, "ds")
lmer <- lmer(rt ~ cond + (1 | subid) + (1 | item), data = jso_mss)
summary(lmer)
## inverse reaction time
jso_ms <- jso_final %>%
filter(correct == 1) %>%
filter(cond != "ap") %>%
group_by(cond) %>%
summarise(inv_rt = mean(inv_rt), na.rm = TRUE)
jso_mss <- jso_final %>%
filter(correct == 1) %>%
filter(cond != "ap") %>%
group_by(cond, subid) %>%
summarise(inv_rt = mean(inv_rt), na.rm = TRUE)
jso_ms$cih <- aggregate(inv_rt ~ cond, jso_mss, ci.high)$inv_rt
jso_ms$cil <- aggregate(inv_rt ~ cond, jso_mss, ci.low)$inv_rt
jso_ms$cond <- as.factor(jso_ms$cond)
levels(jso_ms$cond) <- c("dominant-dominant", "dominant-submissive", "submissive-submissive")
p1 <- ggplot(data = jso_ms,
aes(x=cond, y=inv_rt, fill=cond)) +
geom_bar(stat="identity", position=position_dodge()) +
geom_errorbar(aes(ymin=inv_rt-cil, ymax=inv_rt+cih),
width=.2,
position=position_dodge(.9)) +
# coord_cartesian(ylim=c(.9,6)) +
# scale_y_continuous(breaks=1:6) +
xlab("Condition") +
ylab("Average inverse reaction time") +
ggtitle("Reaction times for identification")
p1
## ANOVA
fit <- aov(inv_rt ~ cond + Error(subid/cond),data=jso_mss)
summary(fit)
## lmer?
jso_mss <- jso_final %>%
filter(correct == 1) %>%
filter(cond != "ap") %>%
mutate(item = substr(leftPic, 1, nchar(leftPic)-1)) %>%
group_by(cond, subid, item) %>%
summarise(inv_rt = mean(inv_rt), na.rm = TRUE)
jso_mss$cond <- as.factor(jso_mss$cond)
jso_mss$cond <- relevel(jso_mss$cond, "ds")
lmer <- lmer(inv_rt ~ cond + (1 | subid) + (1 | item), data = jso_mss)
summary(lmer)
## comparison with original data
# plot of the means
jso_ms <- jso_final %>%
filter(correct == 1) %>%
filter(cond != "ap") %>%
group_by(cond) %>%
summarise(rt = mean(rt), na.rm = TRUE)
jso_mss <- jso_final %>%
filter(correct == 1) %>%
filter(cond != "ap") %>%
group_by(cond, subid) %>%
summarise(rt = mean(rt), na.rm = TRUE)
jso_ms$cih <- aggregate(rt ~ cond, jso_mss, ci.high)$rt
jso_ms$cil <- aggregate(rt ~ cond, jso_mss, ci.low)$rt
jso_ms$cond <- as.factor(jso_ms$cond)
levels(jso_ms$cond) <- c("dominant-dominant", "dominant-submissive", "submissive-submissive")
jso_ms$expt <- "rp"
jso_ms$na.rm <- NULL
orig <- read.table(header=T, text='
cond rt cih cil expt
dominant-dominant 507.19 13.89 13.89 orig
dominant-submissive 459.32 12.82 12.82 orig
submissive-submissive 462.22 13.17 13.17 orig
')
rp <- rbind(jso_ms, orig)
rp$expt <- as.factor(rp$expt)
levels(rp$expt) <- c("Original", "Replication")
levels(rp$cond) <- c("both dominant","dominant-submissive", "both submissive")
p1 <- ggplot(data = rp,
aes(x=cond, y=rt, fill=cond)) +
geom_bar(stat="identity", position=position_dodge()) +
geom_errorbar(aes(ymin=rt-cil, ymax=rt+cih),
width=.2,
position=position_dodge(.9)) +
facet_grid(.~expt) +
# coord_cartesian(ylim=c(.9,6)) +
# scale_y_continuous(breaks=1:6) +
xlab("Condition") +
ylab("Reaction time") +
ggtitle("Reaction times for identification")
p1
|
415b0c92e97bfc6529083b9b03743f1ef7b2d717
|
8c3a9fc8db02ccfecb510402f3d4962f982ab79a
|
/dashboard/global.R
|
4d2a5c706e5e94de85879ee61d5c8cfe464fdecb
|
[
"MIT"
] |
permissive
|
rithwik/datakind-egovernments
|
c402a84983d06384f56a6131b7a52de67ac7e6f6
|
12807f7582450c76eb7cb62ac1b7f6b9a7df6925
|
refs/heads/master
| 2021-01-17T18:44:50.864333
| 2016-06-23T06:01:46
| 2016-06-23T06:01:46
| 59,551,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
global.R
|
library(xts)
library(hash)
library(data.table)
library(dplyr)
library(dygraphs)
library(plotly)
df = fread("../data/coc.csv")
df$Complaint.Date <- as.Date(df$Complaint.Date, format = "%m/%d/%Y")
df$Resolution.Date <- as.Date(df$Resolution.Date, format = "%m/%d/%Y")
df$NumComplaints <- 1
choicesForTime <- c("Daily", "Weekly", "Monthly", "Quarterly")
choicesMapping <- hash("Daily" = "day", "Weekly" = "week",
"Monthly" = "month", "Quarterly" = "quarter")
minDate <- min(df$Complaint.Date)
maxDate <- max(df$Complaint.Date)
topComplaintTypes <- data.frame(table(df$Complaint.Type))
topComplaintTypes <- topComplaintTypes[order(-topComplaintTypes$Freq),]
topComplaintTypes <- topComplaintTypes[1:10, ]
topComplaintTypes <- as.character(topComplaintTypes$Var1)
|
cecd5ce88fa00bde0d2cdae4171e4b31ce0ea8d8
|
11529bd6430cdf97087b3148a20f38b5acab2fcd
|
/man/createSpellConfig.Rd
|
b720732b8fadb8356bb2d2a8aad9e8eb9272faeb
|
[] |
no_license
|
omegahat/Aspell
|
288c4fbf626d3c34550134cb330814ffe6661082
|
41cba211ebe7820f6c8e1a201112fa5bee3b8aad
|
refs/heads/master
| 2020-06-05T01:43:38.082431
| 2018-12-30T00:58:45
| 2018-12-30T00:58:45
| 3,999,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,677
|
rd
|
createSpellConfig.Rd
|
\name{createSpellConfig}
\alias{createSpellConfig}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Create a new aspell configuration object.}
\description{
This function creates a new apsell configuration object
and allows the caller to set options within that.
Options can also be set later during the life
of the configuration object.
The configuration object is rarely needed
directly by users who want to spell
words. It is useful if one wants to customize
how the speller functions.
}
\usage{
createSpellConfig(..., .values = NULL, class = "AspellConfig")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{\dots}{the mechanism for specifying name-value options to se in
the new configuration object. This is intended for interactive use.}
\item{.values}{a named list or character vector giving option values for the
new configuration object. See \code{\link{getSpellConfig}}.}
\item{class}{the name of the class that will be instantiated and
returned containing the reference to the C-level configuration
value. This argument allows the caller to easily specify
the type of object that is desired and so this function can
be used for derived classes of \code{\link{AspellConfig-class}}.
}
}
\value{
An object of class \code{class}.
}
\references{ \url{http://aspell.sourceforge.net}}
\author{Duncan Temple Lang <duncan@wald.ucdavis.edu>}
\seealso{
\code{\link{getSpellInfo}}
\code{\link{getSpellConfig}}
}
\examples{
conf = createSpellConfig()
conf = createSpellConfig(lang = "en")
conf = createSpellConfig(lang = "en", mode = "email")
}
\keyword{IO}
|
b2d5817ea54d14185f532147f3634b38130deda3
|
190197a40d6779a986bacbf4ef8d4f293502b853
|
/toyCarLine.r
|
dc1c547f016bb9a876e68f83512de173ae3273ad
|
[] |
no_license
|
fgarzadeleon/SoftwareCarpentry
|
3b008778801318d885003e3c938a3b042b08b44d
|
b3458e767587e090e494d60c81df051b0df3cb18
|
refs/heads/master
| 2021-01-09T20:57:37.535968
| 2016-07-13T11:12:50
| 2016-07-13T11:12:50
| 63,233,647
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 198
|
r
|
toyCarLine.r
|
## toyCarLine.r
## Federico Garza de Leon fgarzadeleon@gmail.com
## plot the cars data as an example
plot(cars)
z <- line(cars)
abline(coef(z), col = "purple")
dev.print(pdf, "toyLinePlot.pdf")
|
95e6cf8e7e984d3ab86ae080d6774d0b73cf4015
|
65317ea9159976b3fda084b2321c9afe959f6794
|
/R/varNamesToChar.r
|
247bc8f434ae4c48c039139e157d4d179ac2e3e6
|
[] |
no_license
|
cran/reporttools
|
c5a306a433bad475952b4515c30c557438050c5c
|
fc18cc11152b5ae783ff34376120bc08196a12a9
|
refs/heads/master
| 2021-10-28T08:56:09.300219
| 2021-10-12T15:10:02
| 2021-10-12T15:10:02
| 17,699,149
| 1
| 0
| null | 2014-09-04T01:06:29
| 2014-03-13T06:05:19
|
TeX
|
UTF-8
|
R
| false
| false
| 222
|
r
|
varNamesToChar.r
|
`varNamesToChar` <-
function (varnam)
{
tmp2 <- ""
tmp1 <- strsplit(varnam, ", ")[[1]]
for (i in 1:length(tmp1)) {
tmp2 <- paste(tmp2, tmp1[[i]], "\", \"", sep = "")
}
return(tmp2)
}
|
99a144b350f7e23d60a3ec04e5d2939922d409c7
|
ab70aaa2fd087d4e935a228aeed6f7a341044f4f
|
/tests/testthat/test_bro.R
|
8579da6207e08a5a395c0643e4a85b521d4a43aa
|
[
"MIT"
] |
permissive
|
Ironholds/webreadr
|
f1d04bc6c7e7dbddb3e10a2089e2225ce8b71e9a
|
545932629e3e7082911c91ee41a9ac0474832e1d
|
refs/heads/master
| 2021-07-19T15:53:37.050370
| 2020-10-28T19:52:18
| 2020-10-28T19:52:18
| 32,247,262
| 43
| 15
|
NOASSERTION
| 2021-07-15T14:42:23
| 2015-03-15T05:54:02
|
R
|
UTF-8
|
R
| false
| false
| 1,883
|
r
|
test_bro.R
|
context("Test reading Bro file formats")
test_that("Bro app logs can be read", {
file <- system.file("extdata/app_stats.log", package = "webreadr")
data <- read_bro(file)
expect_equal(nrow(data), 1)
expect_equal(ncol(data), 6)
expect_equal(class(data$timestamp), c("POSIXct","POSIXt"))
})
test_that("Bro conn logs can be read", {
file <- system.file("extdata/conn.log", package = "webreadr")
data <- read_bro(file)
expect_equal(nrow(data), 23)
expect_equal(ncol(data), 20)
expect_equal(class(data$timestamp), c("POSIXct","POSIXt"))
})
test_that("Bro DHCP logs can be read", {
file <- system.file("extdata/dhcp.log", package = "webreadr")
data <- read_bro(file)
expect_equal(nrow(data), 2)
expect_equal(ncol(data), 10)
expect_equal(class(data$timestamp), c("POSIXct","POSIXt"))
})
test_that("Bro DNS logs can be read", {
file <- system.file("extdata/dns.log", package = "webreadr")
data <- read_bro(file)
expect_equal(nrow(data), 9)
expect_equal(ncol(data), 23)
expect_equal(class(data$timestamp), c("POSIXct","POSIXt"))
})
test_that("Bro FTP logs can be read", {
file <- system.file("extdata/ftp.log", package = "webreadr")
data <- read_bro(file)
expect_equal(nrow(data), 17)
expect_equal(ncol(data), 19)
expect_equal(class(data$timestamp), c("POSIXct","POSIXt"))
})
test_that("Bro file logs can be read", {
file <- system.file("extdata/files.log", package = "webreadr")
data <- read_bro(file)
expect_equal(nrow(data), 17)
expect_equal(ncol(data), 23)
expect_equal(class(data$timestamp), c("POSIXct","POSIXt"))
})
test_that("Bro HTTP logs can be read", {
file <- system.file("extdata/http.log", package = "webreadr")
data <- read_bro(file)
expect_equal(nrow(data), 91)
expect_equal(ncol(data), 27)
expect_equal(class(data$timestamp), c("POSIXct","POSIXt"))
})
|
760b3fbeb73d199bff9acd3d37e94a35201ab573
|
15895f0c1e41f82796d75cbcb9756f7a27ebaae7
|
/run_analysis.R
|
27515f348f9c5c9eb8e6496db17eaca1628ed161
|
[] |
no_license
|
BobWeerts/ds03Project
|
97ace8fb1d24d3e3d6576546602c80a774aa6cce
|
b91b06911f096563fb20c5667722982ad80c7227
|
refs/heads/master
| 2020-06-03T20:22:10.821283
| 2015-08-23T23:00:21
| 2015-08-23T23:00:21
| 41,269,744
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,056
|
r
|
run_analysis.R
|
run_analysis <- function() {
# 1. Merge the training and the test sets.
s_test <- read.table('./UCI HAR Dataset/test/subject_test.txt')
s_train <- read.table('./UCI HAR Dataset/train/subject_train.txt')
s_merged <- rbind(s_train, s_test)
names(s_merged) <- "subject"
x_test <- read.table('./UCI HAR Dataset/test/X_test.txt')
x_train <- read.table('./UCI HAR Dataset/train/X_train.txt')
x_merged <- rbind(x_train, x_test)
y_test <- read.table('./UCI HAR Dataset/test/y_test.txt')
y_train <- read.table('./UCI HAR Dataset/train/y_train.txt')
y_merged <- rbind(y_train, y_test)
# 2. Extract the mean and standard deviation for each measurement.
features <- read.table('./UCI HAR Dataset/features.txt', header=FALSE, col.names=c('id', 'name'))
mean_and_std_columns <- grep('mean\\(\\)|std\\(\\)', features$name)
mean_and_std_data <- x_merged[, mean_and_std_columns]
names(mean_and_std_data) <- features[features$id %in% mean_and_std_columns, 2]
# 3. Uses descriptive activity names to name the activities in the data set
activity_labels <- read.table('./UCI HAR Dataset/activity_labels.txt', header=FALSE, col.names=c('id', 'name'))
# 4. Appropriately labels the data set with descriptive variable names.
y_merged[, 1] = activity_labels[y_merged[, 1], 2]
names(y_merged) <- "activity"
combined_data <- cbind(s_merged, y_merged, mean_and_std_data)
# 5. From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
measurements <- combined_data[, 3:dim(combined_data)[2]]
tidy_data <- aggregate(measurements, list(combined_data$subject, combined_data$activity), mean)
names(tidy_data)[1:2] <- c('subject', 'activity')
write.table(tidy_data, "./tidy_data.txt")
}
|
1c5b4771bea807739c6b0766196cc989cc421670
|
1c9ffcb04f94e4306d373c1b769168d971ee2b48
|
/inst/doc/DirichletReg-vig.R
|
bd2a9764c29b606d69f468d81266378dfcd71763
|
[] |
no_license
|
cran/DirichletReg
|
6640d77d0d0e05b3ecf70141dbf327e89d2075b9
|
4f82dc0351c2dc249ebf46f40dc2b09395d84181
|
refs/heads/master
| 2021-07-04T22:55:11.813658
| 2021-05-18T09:30:03
| 2021-05-18T09:30:03
| 17,678,821
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,286
|
r
|
DirichletReg-vig.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE
, comment = "##"
, tidy = TRUE
, fig.width=7
, fig.height=7
)
## ---- message = FALSE---------------------------------------------------------
library("DirichletReg")
head(ArcticLake)
AL <- DR_data(ArcticLake[,1:3])
AL[1:6,]
## ---- fig.show='hold', fig.cap = "Figure 1: Arctic lake: Ternary plot and depth vs. composition. (left)"----
plot(AL, cex=.5, a2d=list(colored=FALSE, c.grid=FALSE))
## ---- fig.show='hold', fig.cap = "Figure 1: Arctic lake: Ternary plot and depth vs. composition. (right)"----
plot(rep(ArcticLake$depth,3),as.numeric(AL), pch=21, bg=rep(c("#E495A5", "#86B875", "#7DB0DD"),each=39), xlab="Depth (m)", ylab="Proportion", ylim=0:1)
## ---- tidy=TRUE---------------------------------------------------------------
lake1 <- DirichReg(AL~depth, ArcticLake)
lake1
coef(lake1)
lake2 <- update(lake1, .~.+I(depth^2)|.+I(depth^2)|.+I(depth^2))
anova(lake1,lake2)
summary(lake2)
## ---- fig.show='hold', fig.cap = "Figure 2: Arctic lake: Fitted values of the quadratic model."----
par(mar=c(4, 4, 4, 4)+0.1)
plot(rep(ArcticLake$depth,3),as.numeric(AL),
pch=21, bg=rep(c("#E495A5", "#86B875", "#7DB0DD"),each=39),
xlab="Depth (m)", ylab="Proportion", ylim=0:1, main="Sediment Composition in an Arctic Lake")
Xnew <- data.frame(depth=seq(min(ArcticLake$depth), max(ArcticLake$depth), length.out=100))
for(i in 1:3)lines(cbind(Xnew, predict(lake2, Xnew)[,i]),col=c("#E495A5", "#86B875", "#7DB0DD")[i],lwd=2)
legend("topleft", legend=c("Sand","Silt","Clay"), lwd=2, col=c("#E495A5", "#86B875", "#7DB0DD"), pt.bg=c("#E495A5", "#86B875", "#7DB0DD"), pch=21,bty="n")
par(new=TRUE)
plot(cbind(Xnew, predict(lake2, Xnew, F,F,T)), lty="24", type="l",ylim=c(0,max(predict(lake2, Xnew, F,F,T))),axes=F,ann=F,lwd=2)
axis(4)
mtext(expression(paste("Precision (",phi,")",sep="")), 4, line=3)
legend("top",legend=c(expression(hat(mu[c]==hat(alpha)[c]/hat(alpha)[0])),expression(hat(phi)==hat(alpha)[0])),lty=c(1,2),lwd=c(3,2),bty="n")
## -----------------------------------------------------------------------------
AL <- ArcticLake
AL$AL <- DR_data(ArcticLake[,1:3])
dd <- range(ArcticLake$depth)
X <- data.frame(depth=seq(dd[1], dd[2], length.out=200))
pp <- predict(DirichReg(AL~depth+I(depth^2),AL), X)
## ---- fig.cap = "Figure 3: Arctic lake: OLS (dashed) vs. Dirichlet regression (solid) predictions."----
plot(AL$AL, cex=.1, reset_par=FALSE)
points(toSimplex(AL$AL), pch=16, cex=.5, col=gray(.5))
lines(toSimplex(pp), lwd=3, col=c("#6E1D34", "#004E42")[2])
Dols <- log(cbind(ArcticLake[,2]/ArcticLake[,1],
ArcticLake[,3]/ArcticLake[,1]))
ols <- lm(Dols~depth+I(depth^2), ArcticLake)
p2 <- predict(ols, X)
p2m <- exp(cbind(0,p2[,1],p2[,2]))/rowSums(exp(cbind(0,p2[,1],p2[,2])))
lines(toSimplex(p2m), lwd=3, col=c("#6E1D34", "#004E42")[1], lty="21")
## -----------------------------------------------------------------------------
Bld <- BloodSamples
Bld$Smp <- DR_data(Bld[,1:4])
blood1 <- DirichReg(Smp~Disease|1, Bld, model="alternative", base=3)
blood2 <- DirichReg(Smp~Disease|Disease, Bld, model="alternative", base=3)
anova(blood1, blood2)
summary(blood1)
## ---- fig.cap="Blood samples: Box plots and fitted values (dashed lines indicate the fitted values for each group)."----
par(mfrow=c(1,4), mar=c(4,4,4,2)+.25)
for(i in 1:4){
boxplot(Bld$Smp[,i]~Bld$Disease, ylim=range(Bld$Smp[,1:4]), main=paste(names(Bld)[i]), xlab="Disease Type", ylab="Proportion")
segments(c(-5,1.5),unique(fitted(blood2)[,i]),
c(1.5,5),unique(fitted(blood2)[,i]),lwd=2,lty=2)
}
## -----------------------------------------------------------------------------
alpha <- predict(blood2, data.frame(Disease=factor(c("A","B"))), F, T, F)
L <- sapply(1:2, function(i) ddirichlet(DR_data(Bld[31:36,1:4]), unlist(alpha[i,])))
LP <- L / rowSums(L)
dimnames(LP) <- list(paste("C",1:6), c("A", "B"))
print(data.frame(round(LP * 100, 1),"pred."=as.factor(ifelse(LP[,1]>LP[,2], "==> A", "==> B"))),print.gap=2)
## ---- fig.cap = "Blood samples: Observed values and predictions"--------------
B2 <- DR_data(BloodSamples[,c(1,2,4)])
plot(B2, cex=.001, reset_par=FALSE)
div.col <- colorRampPalette(c("#023FA5", "#c0c0c0", "#8E063B"))(100)
# expected values
temp <- (alpha/rowSums(alpha))[,c(1,2,4)]
points(toSimplex(temp/rowSums(temp)), pch=22, bg=div.col[c(1,100)], cex=2, lwd=.25)
# known values
temp <- B2[1:30,]
points(toSimplex(temp/rowSums(temp)), pch=21, bg=(div.col[c(1,100)])[BloodSamples$Disease[1:30]], cex=.5, lwd=.25)
# unclassified
temp <- B2[31:36,]
points(toSimplex(temp/rowSums(temp)), pch=21, bg=div.col[round(100*LP[,2],0)], cex=1, lwd=.5)
legend("topright", bty="n", legend=c("Disease A","Disease B",NA,"Expected Values"), pch=c(21,21,NA,22), pt.bg=c(div.col[c(1,100)],NA,"white"))
## -----------------------------------------------------------------------------
RS <- ReadingSkills
RS$acc <- DR_data(RS$accuracy)
RS$dyslexia <- C(RS$dyslexia, treatment)
rs1 <- DirichReg(acc ~ dyslexia*iq | dyslexia*iq, RS, model="alternative")
rs2 <- DirichReg(acc ~ dyslexia*iq | dyslexia+iq, RS, model="alternative")
anova(rs1,rs2)
## ---- fig.cap="Reading skills: Predicted values of Dirichlet regression and OLS regression."----
g.ind <- as.numeric(RS$dyslexia)
g1 <- g.ind == 1 # normal
g2 <- g.ind != 1 # dyslexia
par(mar=c(4,4,4,4)+0.25)
plot(accuracy~iq, RS, pch=21, bg=c("#E495A5", "#39BEB1")[3-g.ind],
cex=1.5, main="Dyslexic (Red) vs. Control (Green) Group",
xlab="IQ Score",ylab="Reading Accuracy", xlim=range(ReadingSkills$iq))
x1 <- seq(min(RS$iq[g1]), max(RS$iq[g1]), length.out=200)
x2 <- seq(min(RS$iq[g2]), max(RS$iq[g2]), length.out=200)
n <- length(x1)
X <- data.frame(dyslexia=factor(rep(0:1, each=n), levels=0:1, labels=c("no", "yes")),iq=c(x1,x2))
pv <- predict(rs2, X, TRUE, TRUE, TRUE)
lines(x1, pv$mu[1:n,2], col=c("#E495A5", "#39BEB1")[2],lwd=3)
lines(x2, pv$mu[(n+1):(2*n),2], col=c("#E495A5", "#39BEB1")[1],lwd=3)
a <- RS$accuracy
logRa_a <- log(a/(1-a))
rlr <- lm(logRa_a~dyslexia*iq, RS)
ols <- 1/(1+exp(-predict(rlr, X)))
lines(x1, ols[1:n], col=c("#AD6071", "#00897D")[2],lwd=3,lty=2)
lines(x2, ols[(n+1):(2*n)], col=c("#AD6071", "#00897D")[1],lwd=3,lty=2)
### precision plot
par(new=TRUE)
plot(x1, pv$phi[1:n], col=c("#6E1D34", "#004E42")[2], lty="11", type="l",ylim=c(0,max(pv$phi)),axes=F,ann=F,lwd=2, xlim=range(RS$iq))
lines(x2, pv$phi[(n+1):(2*n)], col=c("#6E1D34", "#004E42")[1], lty="11", type="l",lwd=2)
axis(4)
mtext(expression(paste("Precision (",phi,")",sep="")), 4, line=3)
legend("topleft",legend=c(expression(hat(mu)),expression(hat(phi)),"OLS"),lty=c(1,3,2),lwd=c(3,2,3),bty="n")
## -----------------------------------------------------------------------------
a <- RS$accuracy
logRa_a <- log(a/(1-a))
rlr <- lm(logRa_a~dyslexia*iq, RS)
summary(rlr)
summary(rs2)
confint(rs2)
confint(rs2, exp=TRUE)
## ---- fig.height=7/2*3, fig.cap="Reading skills: residual plots of OLS and Dirichlet regression models."----
gcol <- c("#E495A5", "#39BEB1")[3-as.numeric(RS$dyslexia)]
tmt <- c(-3,3)
par(mfrow=c(3,2), cex=.8)
qqnorm(residuals(rlr,"pearson"), ylim=tmt, xlim=tmt, pch=21, bg=gcol, main="Normal Q-Q-Plot: OLS Residuals",cex=.75,lwd=.5)
abline(0,1, lwd=2)
qqline(residuals(rlr,"pearson"), lty=2)
qqnorm(residuals(rs2,"standardized")[,2], ylim=tmt, xlim=tmt, pch=21, bg=gcol, main="Normal Q-Q-Plot: DirichReg Residuals",cex=.75,lwd=.5)
abline(0,1, lwd=2)
qqline(residuals(rs2,"standardized")[,2], lty=2)
plot(ReadingSkills$iq, residuals(rlr,"pearson"), pch=21, bg=gcol, ylim=c(-3,3),main="OLS Residuals",xlab="IQ",ylab="Pearson Residuals",cex=.75,lwd=.5)
abline(h=0,lty=2)
plot(ReadingSkills$iq, residuals(rs2,"standardized")[,2], pch=21, bg=gcol ,ylim=c(-3,3),main="DirichReg Residuals",xlab="IQ",ylab="Standardized Residuals",cex=.75,lwd=.5)
abline(h=0,lty=2)
plot(fitted(rlr), residuals(rlr,"pearson"), pch=21, bg=gcol ,ylim=c(-3,3),main="OLS Residuals",xlab="Fitted",ylab="Pearson Residuals",cex=.75,lwd=.5)
abline(h=0,lty=2)
plot(fitted(rs2)[,2], residuals(rs2,"standardized")[,2], pch=21, bg=gcol ,ylim=c(-3,3),main="DirichReg Residuals",xlab="Fitted",ylab="Standardized Residuals",cex=.75,lwd=.5)
abline(h=0,lty=2)
|
9d7316a77a8015c1cbd3ebea92d0c141d667f4d4
|
1026dbe5504954e22052548ef823fcd42c651770
|
/R/3dDigitize.curve.r
|
eee9df1a11a7743638abc57c47ccf9e6f68988b5
|
[] |
no_license
|
alutterb/tkogl2
|
b7081c26b30439276853a5227fa32c8eaeff0e5f
|
799d432bdafb6d3f52d0024fd5a9027cd07e8067
|
refs/heads/master
| 2021-03-12T14:41:24.479925
| 2020-04-03T16:00:34
| 2020-04-03T16:00:34
| 246,627,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,864
|
r
|
3dDigitize.curve.r
|
################# main data structure ##############################
#dgtDataList
#dgtDataList[imgId][[1]]: speciman dir
#dgtDataList[imgId][[2]]: font
#dgtDataList[imgId][[3]]: number of landmark
#dgtDataList[1][[4]]: curves
#dgtDataList[imgId][[5]]: template
#dgtDataList[imgId][[6]]: rotation
#dgtDataList[imgId][[7]]: zoom
#dgtDataList[imgId][[8]]: surface file
#initializes parameters for curve component
init.curve <- function(e) {
e$curveDotNum <- 0
e$curveDots <- c()
e$curveLine<- c()
e$sliders<-c()
}
#creates user interface layout for curve component
ui.curve <- function(e, parent) {
curveCtlFrame <- ttkframe(parent)
fitBtn <- ttkbutton(curveCtlFrame, text = "Fit",command = function() onFit(e))
tkpack(ttklabel(curveCtlFrame, text = " "), pady = 6)
tkpack(fitBtn)
return (curveCtlFrame)
}
#drag and place landmarks on curve component
bind.curve <-function(e) {
#print("bind.curve")
tkbind(e$canvasFrame, "<ButtonPress-1>", function(x, y) {
e$dragX <- as.integer(x)
e$dragY <- as.integer(y)
})
tkbind(e$canvasFrame, "<ButtonPress-3>", function(x, y) {})
tkbind(e$canvasFrame, "<Double-Button-1>", function(x, y) {onSelectCurve(e, x, y)})
}
#loads curve data from .dgt file
read.curve <- function(content) {
#print("read.curve")
ignore.case = TRUE
startLine <- grep("Curve=", content, ignore.case)
num <- sub("Curve=", "", content[startLine], ignore.case)
if (num == 0) {
return (NULL)
}
endLine <- as.numeric(startLine) + as.numeric(num)
startLine <- startLine + 1
tmp <- content[startLine:endLine]
curves <- matrix(as.numeric(unlist(strsplit(tmp, " "))), ncol=3, byrow=TRUE)
return (curves)
}
#writes the curve data to .dgt file
write.curve <- function(fileName, curves) {
if(length(curves) > 0) {
write(paste("Curve=", nrow(curves),sep=""), fileName, append = TRUE)
} else {
write(paste("Curve=0",sep=""), fileName, append = TRUE)
}
if(length(curves) > 0) {
write.table(curves, fileName, sep = " ", col.names = FALSE, row.names = FALSE,append=TRUE)
write("",fileName,append = TRUE)
}
}
#display curves to GUI
draw.curves <- function(curves) {
print("Add curves ... ...")
for (j in 1:nrow(curves)) {
add("curve", curves[j,1], curves[j,2], curves[j,3])
}
}
#UI layout dynamic update callback
updateWidgets.curve <- function(e) {
}
#changes rgb values of selected dot to desired color
changeDotColor<-function(e) {
print("changeDotColor")
for(i in 1:3){
x <- e$curveDots[[(i - 1) * 3 + 1]]
y <- e$curveDots[[(i - 1) * 3 + 2]]
id <- e$curveDots[[(i - 1) * 3 + 3]]
if (set("dot", "selected", x, y)) {
if (id %in% e$sliders) {
set("dot", "color", 0.0, 0.0, 1.0)
} else {
set("dot", "color", -1.0, -1.0, -1.0)
}
}
}
}
#sets and configures dot on curve
onSelectCurve <- function(e, x, y) {
#print("onSelectCurve")
if (set("dot", "selected", x, y)) {
id <- tclvalue(shows("landmark", "id"))
if (id %in% e$curveLine) {
tkmessageBox(title = "Information", message = "Duplicate dot in one curve is not allowed", icon = "info", type = "ok")
return ()
}
e$curveLine <- c(e$curveLine, as.numeric(id))
e$curveDots <- c(e$curveDots, c(x, y, id))
e$curveDotNum <- e$curveDotNum + 1
set("dot", "color", as.double(1/255), as.double(164/255), as.double(191/255))
if (e$curveDotNum == 2) {
e$sliders<-c(e$sliders, id)
print(e$sliders)
} else if (e$curveDotNum == 3) {
set("window", "mode", "digitize")
changeDotColor(e)
set("window", "mode", "curve")
curves <- e$activeDataList[[1]][[4]]
newCurve<-matrix(e$curveLine,nrow=1,ncol=3)
curves <- rbind(curves, newCurve)
e$activeDataList[[1]][[4]] <- curves
add("curve", e$curveLine[1], e$curveLine[2], e$curveLine[3])
e$curveDots <- c()
e$curveDotNum <- 0
e$curveLine <- c()
}
}
}
|
9a39a342dfc8eaabc26c661f72e404137d1ecd07
|
3d55639c3f79aa24cee4852e0dd3448d6c2d48e0
|
/plot6.R
|
c821938f66572d227c3b29972e079196cb7ea650
|
[] |
no_license
|
jlgzb/ExData_project2
|
2b701032ecdaa81cee58ce4f6eb3bf4ade02f36e
|
ef756092229bbb9fd0be08783ac278b6950b5c8f
|
refs/heads/master
| 2020-03-25T21:11:53.353575
| 2018-08-10T00:22:14
| 2018-08-10T00:22:14
| 144,164,381
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,495
|
r
|
plot6.R
|
# week 4
# read the data
NEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
library(dplyr)
library(ggplot2)
NEI$year <- as.factor(NEI$year)
# extract the data of Baltimore City (fips == "24510")
dat_24510 <- subset(NEI, NEI$fips == "24510")
# extract the data of Los Angeles (fips == "06037")
dat_06037 <- subset(NEI, NEI$fips == "06037")
# extract the motor vehicle source code from SCC
source_motor <- SCC[grepl("Vehicle", SCC$SCC.Level.Two),]
# extract the motor vehicle data of Baltimore
dat_vehicle_24510 <- subset(dat_24510, dat_24510$SCC %in% source_motor$SCC)
# extract the motor vehicle data of Los Angeles
dat_vehicle_06370 <- subset(dat_06037, dat_06037$SCC %in% source_motor$SCC)
# compute the emissions change of Baltimore City during 1999-2008
emi_vehicle_24510 <- tapply(dat_vehicle_24510$Emissions, dat_vehicle_24510$year, sum)
# compute the emissions change of Los Angeles City during 1999-2008
emi_vehicle_06370 <- tapply(dat_vehicle_06370$Emissions, dat_vehicle_06370$year, sum)
png("plot6.png", width = 480, height = 480)
par(mfrow = c(1, 2), mar = c(5, 4, 1, 2), oma = c(0, 0, 3, 0))
barplot(emi_vehicle_24510,
xlab = "Baltimore Trend",
ylab = "Total emissions (tons)")
barplot(emi_vehicle_06370,
xlab = "Los Angeles Trend",
ylab = "Total emissions (tons)")
title("Emissions change from motor vehicle sources \n in Baltimore City and Los Angeles during 1999-2008", outer = TRUE)
dev.off()
|
a35da4d061c0f88e428c19c8ea81134add19fad0
|
22f3f3f959b0af491de1cc2cdd2d887343c93969
|
/CLASS-CDA-ToDo/syntax/JTN Code/Water1-jtn.R
|
4b0e981c090d6612fbaa3d45eafa5b581ec0ca8d
|
[] |
no_license
|
EccRiley/CLASS-CDA
|
ffef431e2c32579c1b2e2d6d067308609e00cfdf
|
5d74ca152e7553987d2ede3d6d9c9eed186e47bc
|
refs/heads/master
| 2021-05-01T01:24:39.675352
| 2017-05-01T16:46:23
| 2017-05-01T16:46:23
| 72,780,877
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 723
|
r
|
Water1-jtn.R
|
### Logistic regression
r=c(32,38)
n=c(107,59)
sex=c(1,0)
counts=cbind(r,n-r)
model=glm(counts~sex,family=binomial("logit"))
result=summary(model,corr=TRUE)
result$coefficients
result$corr
### Predicted probability of sex=0
phat0=1/(exp(-result$coefficients[1])+1)
upper=1/(exp(-result$coefficients[1]-qnorm(0.975)*result$coefficients[3])+1)
lower=1/(exp(-result$coefficients[1]+qnorm(0.975)*result$coefficients[3])+1)
pred=c(phat0,lower,upper)
###Predicted probability of sex=1
phat1=1/(exp(-result$coefficients[1]-result$coefficients[2])+1)
### estimated odds ratio
est_odds=exp(result$coefficients[2])
### Likelihood Ratio Test: the deviance change
LRT=drop1(model,test="Chisq")
|
ad702b606f6fea0cb19b3022bc1ca6db6a3fdd8a
|
c7a3ae8699dd590519951150fa170f2667e070ef
|
/R/vector_to_sqlfilter.R
|
045a5bf699e7fd7c9ddfca4f755435b483bceadf
|
[] |
no_license
|
leonardommarques/reliabilitytools
|
4e4192d9a9d770083c76319aba996262929373f5
|
18c76ec38f5fd94ee149ec4aeb3fa392cfa1ad19
|
refs/heads/master
| 2020-03-20T18:46:49.045916
| 2019-07-30T00:19:38
| 2019-07-30T00:19:38
| 137,603,901
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,059
|
r
|
vector_to_sqlfilter.R
|
#' AND SQL filter
#' makes an 'AND' filter statement
#'
#'
#' @param field_name The name of the fild in the data base.
#' @param values The values to be filtered
#' @return A \code{character} containing the SQL statement.
#' @details
#' @export
#' @examples
#' vector_to_sqlfilter(field_name = 'country',
#' values = c('BRA', 'GER', 'JPN'))
#'
vector_to_sqlfilter = function(field_name = ''
, values = c('') ){
# empty string and NA treament.
if(identical(gsub(values, patt=' +', repl = ''),'') | (sum(!is.na(values)==0) )){
return('')
} else if(length(values) == 1){
return(paste0('\nand ', field_name, ' in (','"'
,paste(strsplit(as.character(values)
, split = "[;]")[[1]]
, collapse='", "')
,'") ')
)
} else {
return(paste0('\nand ', field_name, ' in (','"'
,paste(values, collapse='", "')
,'") ')
)
}
}
|
cedffca867baa62fd85a5046923bab46d4147e99
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/dils/R/GetSampleFromDb.R
|
be62ea1025d59dbd6032199d4e568f453225e44a
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 901
|
r
|
GetSampleFromDb.R
|
#' Sample from the rows of a (possibly large) database table (NOT IMPLEMENTED)
#'
#' Access a database table directly. Return a data.frame whose rows are the sample.
#'
#' @param n numeric, size of sample to be taken.
#' @param db connection, connection to the database table containing the data.
#' @return data.frame, size n random subset of the rows of filename
#' @seealso \code{\link{ScalablePCA}}, \code{\link{GetSampleFromDataFrame}}, \code{\link{GetSampleFromFile}}
#' @references
#' \url{https://github.com/shaptonstahl/}
#' @author Stephen R. Haptonstahl \email{srh@@haptonstahl.org}
#' @examples
#' \dontrun{x <- dils:::GetSampleFromDb(10, my.db)}
GetSampleFromDb <- function(n, db) {
# Guardians
# determine the number of rows in the table
# determine the rows to be sampled
# query rows to be sampled
# format for return
return(NULL)
}
|
faf51b97270c054c2328eca6969a33910efd78be
|
fbea037a28a30155c133f0b872219e40746295dd
|
/Data2020.R
|
fc5496f61485a1423ece8ab1ad67eafb545e7a9a
|
[] |
no_license
|
UMDDataChallenge200047/UMDDataChallenge2020Presentation
|
def863c0c67671ff48522e5a8b382588c5e2df28
|
a662a0676e099b78200702eb3184721689845b84
|
refs/heads/master
| 2021-02-04T12:33:23.596737
| 2020-02-29T12:50:59
| 2020-02-29T12:50:59
| 243,666,584
| 0
| 0
| null | 2020-02-28T03:11:20
| 2020-02-28T03:03:31
| null |
UTF-8
|
R
| false
| false
| 1,433
|
r
|
Data2020.R
|
setwd("/Users/richa/Desktop/Random Crap/School Stuff/")
library(readxl)
HUDData<-read_xlsx("/Users/richa/Desktop/Random Crap/School Stuff/Data_Level2_HUD_HUDPrograms_Fulldataset.xlsx")
drops <- c("Year", "HEAD_ID","CHLDRN_AGE_0_3_CNT", "CHLDRN_AGE_4_5_CNT","CHLDRN_AGE_6_12_CNT", "CHLDRN_AGE_13_17_CNT",
"ADLT_AGE_18_21_CNT", "ADLT_AGE_22_25_CNT", "ADLT_AGE_26_35_CNT", "ADLT_AGE_35_49_CNT",
"ADLT_AGE_50_61_CNT", "ADLT_AGE_62_85_CNT", "ADLT_AGE_ABOVE85_CNT", "PVRTY_PRCNT", "MNRTY_PRCNT", "BLACK_PRCNT",
"HISPANIC_PRCNT", "WHITE_PRCNT")
HUDData[, !names(HUDData) %in% drops]
#as.character(HUDData$HEAD_RACE_CD)
#as.character(HUDData$pgm_type_edited)
#replace.value(HUDData, HUDData$HEAD_RACE_CD, c("1", "2", "3", "4", "5", "6"), c("White", "Black", "Native American", "Asian", "Hawaiian or Pacific Islander", "More than one race"))
#Tree Analysis
#something<-read.table("Data_Level2_HUD_HUDPrograms_Fulldataset.xlsx")
something <- as.data.frame(HUDData)
library("rpart")
treeAnalysis<-rpart(HUDData$pgm_type_edited~HUDData$HEAD_RACE_CD + HUDData$HEAD_ETHNCY_CD + HUDData$TOTAL_DPNDNT_CNT + HUDData$HEAD_GNDR_CD + HUDData$CHLDRN_MBR_CNT + HUDData$HEAD_DSBLTY_INDR, data=something, cp = -1, minsplit = 2500, minbucket = 2500)
#+HUDData$TOTAL_DPNDNT_CNT + HUDData$HEAD_GNDR_CD
library("rpart.plot")
options("scipen" = 100, "digits" = 4)
rpart.plot(treeAnalysis, extra = 1)
|
fa386b31460c03e4893d19f13cc208b1a0f0e2b2
|
c26a15db12227206fe363d3807ca2b192f4df2bc
|
/man/sav_gol.Rd
|
0cb65d4f6870bb6db086c57bdf13cf16656aa773
|
[] |
no_license
|
cran/RTisean
|
52f3e67f0b18f8ed9a141841b70170fa43cf4e50
|
f819f6b88aa814cdaa5d1d2f1411cee105c978d2
|
refs/heads/master
| 2021-01-01T05:49:40.303232
| 2011-12-29T00:00:00
| 2011-12-29T00:00:00
| 17,692,991
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,669
|
rd
|
sav_gol.Rd
|
\name{sav_gol}
\alias{sav_gol}
\title{Savitzky-Golay filter}
\description{
A Savitzky-Golay filter to either clean the data from high frequency noise or to get
a better estimate of its derivative of a chosen order.}
\usage{
sav_gol(series, l, x = 0, c, m, n = "2,2", p = 2, D = 0)
}
\arguments{
\item{series}{a vector or a matrix.}
\item{l}{number of data to use.}
\item{x}{number of lines to be ignored.}
\item{c}{column to be read.}
\item{m}{number of components to be read (dimension).}
\item{n}{a string containing the two lengths of the averaging windows back and forward in time, separated by comma (see example)}
\item{p}{order of the fitted polynomial.}
\item{D}{order of the derivative to be estimated.}
}
\value{
A matrix containining the filtered data, disposed in \code{l} lines, each of which has \code{m} columns.
The first length of the averaging window back in time and the last length of the
averaging window forward in time lines are special. They contain the raw data
in the case that \code{D} was set to 0 and zeroes in the case that \code{D} was larger than zero.
}
\references{
W. H. Press, B. P. Flannery, S. A. Teukolsky, and W. T. Vetterling, ``Numerical Recipes'', 2nd edn., Cambridge University Press, Cambridge (1992).
}
\examples{
\dontrun{
numdata <- 500
dat <- cos(1:numdata/25)+rnorm(numdata,0,0.1)
windowlength <- 15
plot(dat[windowlength:(numdata-windowlength)],xlab="Time",t="l",ylab="Cos Data",ylim=c(-1.5,1.2))
filteredata <- sav_gol(dat,n="15,15")
lines(filteredata[windowlength:(numdata-windowlength)],col=2,lwd=2)
legend(300,-1.2, c("Noisy Data","Filtered Data"),fill=c(1,2), bty="n")
}
}
\keyword{ts}
|
62b73ddbc988a858c2fd51ae2f35a87461348197
|
95bf609fc05d2a5278449fcd969fbc4c558a8f1e
|
/Plot1.R
|
2322a0f3e61c63f8b5856b29b7d52095a8fe0902
|
[] |
no_license
|
alialiyar/EDACourseraAssignment
|
071769ce4f63e17dbd418d3ddcdb021bc9be29e0
|
bd8577cec1a706f343d21469f937cd3bdfad85b7
|
refs/heads/master
| 2022-11-10T22:30:25.702965
| 2020-06-29T14:46:52
| 2020-06-29T14:46:52
| 275,674,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,053
|
r
|
Plot1.R
|
# Making Plot 1 - Histogram of the Global active power
# Loads and adds the package "data.table"
library("data.table")
# Returns the current directory before the change, invisibly and with
# the same conventions as getwd.
setwd("C:/Users/Pabricio Marcos/Desktop/Coursera/curso")
# Reads the data GAP (Global active power) from file then subsets data for specified dates
GAP <- data.table::fread(input = "household_power_consumption.txt"
, na.strings="?"
)
# Prevents histogram from printing in scientific notation
GAP[, Global_active_power := lapply(.SD, as.numeric), .SDcols = c("Global_active_power")]
# Change Date Column to Date Type
GAP[, Date := lapply(.SD, as.Date, "%d/%m/%Y"), .SDcols = c("Date")]
# Filter Dates for 2007-02-01 and 2007-02-02
GAP <- GAP[(Date >= "2007-02-01") & (Date <= "2007-02-02")]
png("plot1.png", width=480, height=480)
## Plot 1
hist(GAP[, Global_active_power], main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.off()
|
9cebc967f9804ee447c3fdce5b21a2324270bb6c
|
d7fe0a47e83e3ccec46f2dbb30ba4e809bd81f61
|
/anova.R
|
9bc6829b31a28a46ac99ae7f69a588c8f86a0826
|
[] |
no_license
|
xenofonte35/Graphs
|
6fc217a3c8f5f95e7a307a6b7d75f67f6e5b4349
|
0ed6351ddab97e82e798267ea962deff6f96f51e
|
refs/heads/master
| 2023-04-28T18:53:09.413645
| 2023-04-13T15:22:12
| 2023-04-13T15:22:12
| 211,964,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 413
|
r
|
anova.R
|
Mex<-c(MIST2$MEX)
Tur<-c(MIST2$TUR)
Ind<-c(MIST2$IND)
Combine_Groups<-data.frame(cbind(Mex,Tur,Ind))
Combine_Groups
Stack_Groups<-stack(Combine_Groups)
Stack_Groups
Anova_Results<-aov(values~ind,data=Stack_Groups)
summary(Anova_Results)
TukeyHSD(Anova_Results)
pairwise.t.test(Stack_Groups$values,Stack_Groups$ind,p.adj="bonferroni")
plot(TukeyHSD(Anova_Results), main="Mexico, Indonesia and Turkey")
|
d2d02fafd3561ca79554d98697d6fa81c620a3a0
|
cce85d168debacecc97c225c340fda2891772e1b
|
/ex3Jussi/ex32Jussi.r
|
0ed5e59a7596515c72841a2dca286f83943b1d25
|
[] |
no_license
|
sisu/uml
|
e58de0c009e42750f52deba36a712d6e9b452a43
|
61023ce8ec1100be43d68559e92e51f33f8c4922
|
refs/heads/master
| 2021-01-25T05:23:13.236569
| 2013-03-24T12:02:37
| 2013-03-24T12:02:37
| 7,877,643
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,741
|
r
|
ex32Jussi.r
|
## Exercise set 3
# Exercise 1
plot_colour <- function(x1,x2,U1) {
# x1, x2 data vecots
# U1 contains the values of the projection (which we use for the colouring of
# the data in x1 and x2)
ncol = 50 # number of colours used
myCol = rainbow(ncol, start = 0, end = 5/6)
ra <-range(U1)
d <-(ra[2]-ra[1])/ncol # space the range of U1 into equally wide intervals
# make a vector containing the color corresponding to the value in U1
U1col <- round((U1-ra[1])/d+1)
U1col[U1col==(ncol+1)] <- ncol
U1col <- myCol[U1col]
# plot
plot(x1,x2,col=U1col, pch = 16)
}
data <- as.matrix(read.table('data_proj.txt'))
N <- length(data[1,])
ndim <- length(data[,1])
# linear MDS
D <- (t(data) %*% data)/N
eig <- eigen(D)
index <- eig$values == sort(eig$values, decreasing = T)[1]
MDS <- (eig$vectors)[,index]
# plotting
plot_colour(data[1,], data[2,], abs(MDS))
## Exercise 2
PCA <- prcomp(t(data))
projection <- apply(data,2, '%*%', PCA$rotation[,1])
plot_colour(data[1,], data[2,], abs(projection))
## Exercise 3
# the Euclidian distance
eDist <- function(x,y) sqrt(colSums((x-y)^2))
# find indices for k smallest in a vector
which.kmin <- function(x, k){
match(sort(x)[1:k], x)
}
SOM <- function(data, nmodel, niter){
ndim <- length(data[,1])
index <- c(nmodel, 1: nmodel, 1)
neighbours = vector('list', nmodel)
for(iii in 1:nmodel){
neighbours[[iii]] <- index[iii:(iii+2)]
}
model <- vector('list', nmodel)
for(iii in 1:nmodel) model[[iii]] <- rnorm(dim(data)[1])
for(j in 1 : niter){
# distances of points to model vectors
d <- lapply(model, eDist, y = data)
#the closest model vector for each point:
closest.model <- max.col(-t(Reduce(rbind, d)))
# update the model vectors by mean of the data closest to them and their neighbours:
for(iii in 1:nmodel){
if(iii %in% closest.model){
if(length(data[, sapply(closest.model, '%in%', neighbours[[iii]])]) != 0){
model[[iii]] <- rowMeans(data[, sapply(closest.model, '%in%', neighbours[[iii]])])
} else {
model[[iii]] <- mean(data[, sapply(closest.model, '%in%', neighbours[[iii]])])
}
}
}
}
list('model' = model, 'closest.model' = closest.model, 'neighbours' = neighbours)
}
nmodel = 20
som <- SOM(data,nmodel,100)
# plot
plot_colour(data[1,], data[2,], som$closest.model)
for(j in 1:nmodel){
points(x = som$model[[j]][1], y = som$model[[j]][2], pch = 16)
}
## Exercise 4
I <- vector('list', 6)
for(iii in 1:6){
file <- paste(paste('I', iii, sep = ''), '.txt', sep = '')
I[[iii]] <- as.matrix(read.table(file))
}
genPatches <- function(img) {
r <- floor(dim(img)[1]/10)
c <- floor(dim(img)[2]/10)
data.frame(lapply(0:(r-1), function(y) sapply(0:(c-1), function(x) img[10*y+(1:10),10*x+(1:10)])))
}
patches <- data.frame(lapply(I, genPatches))
## Exercise 5
# preprocessing
patches <- apply(patches, 2, '-', rowMeans(patches))
patches <- t(apply(patches, 1, function(x) x/sqrt(var(x))))
# som-algorithm application:
som <- vector('list', 3)
for(iii in 1:3){
som[[iii]] <- SOM(patches, iii*10, 10)
}
# visualization:
source('visual.r')
visual(Reduce(rbind,som[[3]]$model))
## Exercise 6
patches <- as.matrix(data.frame(lapply(I, genPatches)))
# no preprocessing:
som <- SOM(patches, 20, 10)
visual(Reduce(rbind,som$model))
# no normalization to unit variance:
som <- SOM(apply(patches, 2, '-', rowMeans(patches)), 20, 10)
visual(Reduce(rbind,som$model))
# no subtraction of mean:
som <- SOM(t(apply(patches, 1, function(x) x/sqrt(var(x)))), 20, 10)
visual(Reduce(rbind,som$model))
# with preprocessing:
patches <- apply(patches, 2, '-', rowMeans(patches))
patches <- t(apply(patches, 1, function(x) x/sqrt(var(x))))
som <- SOM(patches, 20,10)
visual(Reduce(rbind,som$model))
|
94c53fd4e7f1a507be44c7a0ca6a84925a85291b
|
9d0396c164725f3b5dab9e1eec8828c30f2f9cd9
|
/RawData/Script/DownloadData.R
|
16a2a11e64c3e26b5e0373ace87bac23d67572f2
|
[] |
no_license
|
steven-tom/Stat-133-Grade-Distribution
|
4c5de7820ab1a7532c4fb61585d1f16abf34a87e
|
e9c53b0581346461a8312b62ab7d4997989b49dd
|
refs/heads/master
| 2021-01-01T05:07:33.569303
| 2016-04-22T04:20:24
| 2016-04-22T04:20:24
| 56,825,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 211
|
r
|
DownloadData.R
|
########################################
#
#Download the files into "RawData"
#
########################################
setwd("./RawData")
download.file()
########################################
|
1446e8c0c56411bf34f336ac448f96ecf6988bc5
|
47e52cf4f01a8139d89ba40128d6195173e49249
|
/juice/man/end.TScanonicalPrediction.Rd
|
9da0de8e8e8fd5ddf0bfffa070c37c1c5553d971
|
[] |
no_license
|
cran/dseplus
|
b63510aba28db0c03c824a3c71f4699dc2460d27
|
cfde581aa373baf801b98603c65340599e232afb
|
refs/heads/master
| 2016-09-05T19:55:26.678322
| 2006-10-04T00:00:00
| 2006-10-04T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 769
|
rd
|
end.TScanonicalPrediction.Rd
|
\name{end.TScanonicalPrediction}
\alias{end.TScanonicalPrediction}
\alias{start.TScanonicalPrediction}
\alias{frequency.TScanonicalPrediction}
\alias{periods.TScanonicalPrediction}
\title{Specific Methods for TScanonicalPrediction}
\description{See the generic function description.}
\usage{
\method{end}{TScanonicalPrediction}(x, ...)
\method{start}{TScanonicalPrediction}(x, ...)
\method{periods}{TScanonicalPrediction}(x)
\method{frequency}{TScanonicalPrediction}(x, ...)
}
\arguments{
\item{x}{An object containing TSdata.}
\item{...}{(further arguments, currently disregarded).}
}
\value{Depends.}
\seealso{
\code{\link{end}}
\code{\link{start}}
\code{\link[tframe]{periods}}
\code{\link{frequency}}
}
\concept{DSE}
\keyword{ts}
|
b17490b5145ccecd4642494eb053ddae5cad30d7
|
9267c5a23c403c2fc92f76c2ed27ac527f2877d0
|
/summary_plots/chrom_summary.old/repeats.R
|
33697ff18e78087254d18459822b12901f710f32
|
[] |
no_license
|
stajichlab/coprinopsis_PNAS_2010
|
039526b23609e9674071e654719e22cfefe4b854
|
d9fda3a02a62fa2a769e2d3b43f5277f26115045
|
refs/heads/master
| 2021-01-10T03:39:40.735664
| 2018-05-07T13:45:23
| 2018-05-07T13:45:23
| 46,472,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 246
|
r
|
repeats.R
|
repeatsarms <- read.table("repeats_arms.dat",header=T)
repeatsctr <- read.table("repeats_center.dat",header=T)
pdf("repeats.pdf")
boxplot(repeatsarms$TOTAL,repeatsctr$TOTAL,main="repeats Density BoxPlot", outline=FALSE, names=c("Arms","Center"))
|
36de06c18802428d9764a91f7487b2ea8f984946
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/netmeta/R/setref.R
|
e1d07fecc76957a9ac52f503a7615b95c015c2bb
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
setref.R
|
setref <- function(reference.group, levs){
if (length(reference.group)!=1)
stop("Argument 'reference.group' must be a numeric or a character string.", call.=FALSE)
##
if (is.numeric(reference.group)){
if (is.na(reference.group))
stop("Missing value not allowed in argument 'reference.group'.", call.=FALSE)
if (!(reference.group %in% (1:length(levs))))
stop(paste("Argument 'reference.group' must be any of the integers from 1 to ",
length(levs), ".", sep=""), call.=FALSE)
res <- levs[reference.group]
}
else if (is.character(reference.group)){
if (length(unique(levs)) == length(unique(tolower(levs))))
idx <- charmatch(tolower(reference.group), tolower(levs), nomatch=NA)
else
idx <- charmatch(reference.group, levs, nomatch=NA)
if (any(is.na(idx)) || any(idx==0))
stop(paste("Argument 'reference.group' must be any of following values:\n ",
paste(paste("'", levs, "'", sep=""),
collapse=" - "), sep=""), call.=FALSE)
res <- levs[idx]
}
res
}
|
e82f2d41fed2086d7cd0b265b16a42f71901aa80
|
69d799536643c4fb29a24dfa098ec6b19e76acd4
|
/R/config.R
|
e51c6f0cb25ed63498a3c04a5fc7e010e359320f
|
[
"MIT"
] |
permissive
|
PawanRamaMali/jinjar
|
6234d9ed53c987fb659cd178ffaa78d5bb94dd8e
|
658c7094b196bcc4810a58a119c517220087ccaf
|
refs/heads/master
| 2023-08-29T10:31:48.280009
| 2021-10-26T04:16:03
| 2021-10-26T04:16:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,017
|
r
|
config.R
|
#' Configure the templating engine
#'
#' Create an object to configure the templating engine behavior (e.g. customize
#' the syntax). The default values have been chosen to match the Jinja defaults.
#'
#' @note The equivalent Jinja class is `Environment`, but this term has special
#' significance in R (see [environment()]).
#'
#' @param loader How the engine discovers templates. Choices:
#' * `NULL` (default), disables search for templates.
#' * Path to template directory.
#' * A [`loader`] object.
#' @param block_open,block_close The opening and closing delimiters
#' for control blocks. Default: \verb{"\{\%"} and \verb{"\%\}"}.
#' @param variable_open,variable_close The opening and closing delimiters
#' for print statements. Default: `"{{"` and `"}}"`.
#' @param comment_open,comment_close The opening and closing delimiters
#' for comments. Default: `"{#"` and `"#}"`.
#' @param line_statement The prefix for an inline statement. If `NULL` (the
#' default), inline statements are disabled.
#' @param trim_blocks Remove first newline after a block. Default: `FALSE`.
#' @param lstrip_blocks Remove inline whitespace before a block. Default: `FALSE`.
#' @param ignore_missing_files Ignore `include` or `extends` statements when
#' the auxiliary template cannot be found. If `FALSE` (default), then an error
#' is raised.
#' @return A `"jinjar_config"` object.
#'
#' @examples
#' jinjar_config()
#' @export
jinjar_config <- function(loader = NULL,
block_open = "{%",
block_close = "%}",
variable_open = "{{",
variable_close = "}}",
comment_open = "{#",
comment_close = "#}",
line_statement = NULL,
trim_blocks = FALSE,
lstrip_blocks = FALSE,
ignore_missing_files = FALSE) {
checkmate::assert(
checkmate::check_null(loader),
checkmate::check_directory_exists(loader),
checkmate::check_class(loader, "jinjar_loader")
)
if (is.character(loader)) {
loader <- path_loader(loader)
}
checkmate::assert_string(block_open, min.chars = 1)
checkmate::assert_string(block_close, min.chars = 1)
checkmate::assert_string(variable_open, min.chars = 1)
checkmate::assert_string(variable_close, min.chars = 1)
checkmate::assert_string(comment_open, min.chars = 1)
checkmate::assert_string(comment_close, min.chars = 1)
checkmate::assert_string(line_statement, min.chars = 1, null.ok = TRUE)
checkmate::assert_flag(trim_blocks)
checkmate::assert_flag(lstrip_blocks)
checkmate::assert_flag(ignore_missing_files)
delimiters <- c(
variable_open = variable_open,
variable_close = variable_close,
block_open = block_open,
block_close = block_close,
line_statement = line_statement %||% "",
comment_open = comment_open,
comment_close = comment_close
)
if (anyDuplicated(delimiters)) {
conflicts <- delimiters[duplicated(delimiters) | duplicated(delimiters, fromLast = TRUE)]
stop(
paste("Conflicting delimiters:", paste(names(conflicts), collapse = ", ")),
call. = FALSE
)
}
structure(c(as.list(delimiters), list(
loader = loader,
trim_blocks = trim_blocks,
lstrip_blocks = lstrip_blocks,
ignore_missing_files = ignore_missing_files
)), class = "jinjar_config")
}
#' @export
print.jinjar_config <- function(x, ...) {
if (is.null(x$loader)) {
cat("Loader: disabled\n")
} else {
print(x$loader)
}
cat(
"Syntax:",
x$block_open, "block", x$block_close,
x$variable_open, "variable", x$variable_close,
x$comment_open, "comment", x$comment_close
)
invisible(x)
}
#' @rdname jinjar_config
#' @export
default_config <- function() {
config <- getOption("jinjar.default_config")
if (is.null(config)) {
config <- jinjar_config()
options("jinjar.default_config" = config)
}
config
}
|
6c935e44416055721d78917277a40813628e0880
|
916a2456f7e29af6403de6ae8dbf31c62f49e923
|
/proj_part2.R
|
a0dc745aaa313cc205175b0352c9e388981c1639
|
[] |
no_license
|
AndreyDrv/stat_cour
|
83ae4030a4b206ce79718abfcc908636ef13ee1a
|
d42d7a42aa1f488e89552bbdfd1f244928448b2a
|
refs/heads/master
| 2021-01-10T18:40:59.567382
| 2015-05-22T09:42:23
| 2015-05-22T09:42:23
| 35,663,154
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,325
|
r
|
proj_part2.R
|
###########################################################################
# The Effect of Vitamin C on Tooth Growth in Guinea Pigs
###########################################################################
#
#The response is the length of odontoblasts (teeth) in each of 10 guinea
#pigs at each of three dose levels of Vitamin C (0.5, 1, and 2 mg) with each
#of two delivery methods (orange juice or ascorbic acid).
#
#A data frame with 60 observations on 3 variables.
#[,1] len numeric Tooth length
#[,2] supp factor Supplement type (VC or OJ).
#[,3] dose numeric Dose in milligrams.
analyze <- function(){
d <- data(ToothGrowth)
###############################################
#1. Summary of the data
#-perform an exploratory data analysis of at least a single plot
#or table highlighting basic features of the data
summary(d)
#Supplementary:
#50% (30 observations) of Orange Juice supplementary
#50% (30 observations) of ascorbic acid supplementary
#Dose:
#Mean of doze of a supplementary is 1.167 mg
#Max doze of a supplementary is 2.000 mg
#Min doze of a supplementary is 0.500 mg
#Length:
#Mean of tooth length is 18.81 cm
#Max tooth length is 33.90 cm
#Min tooth length is 4.20 cm
ggplot(d, aes(x=len)) +
geom_histogram(binwidth=.5, colour="black", fill="white") +
geom_vline(aes(xintercept=(mean(d[,1]))), color="blue", linetype="solid", size=1) +
geom_vline(aes(xintercept=(c(-1,1) * sd(d[,1]) + mean(d[,1]))), color="blue", linetype="dashed", size=.5)
#rem?
#68% of values are between the dotted lines
ggplot(d, aes(x=len)) +
geom_histogram(aes(y=..density..), binwidth=.5, colour="black", fill="white") +
geom_density(alpha=.2, fill="#FF6666")
#we can say the data is slightly normalized. As the shape is more looks
#like gaussian. But there is not enough data (only 60 observations) to say it
#is fully normalized.
ggplot(d, aes(x=len, fill=supp)) + geom_density(alpha=.3)
oj_mean <- mean(d[which(d$supp=="OJ"),][,1])
vc_mean <- mean(d[which(d$supp=="VC"),][,1])
#comparing the tooth length by supplementary type we can see that with OJ
#has better results as it has more mean
###############################################
#2. Use confidence intervals and/or hypothesis tests to compare
#tooth growth by supp and dose. (Only use the techniques from class,
#even if there's other approaches worth considering)
#-Perform some relevant confidence intervals and/or tests
x1 <- d[which(d$supp=="VC"),][,1]
x2 <- d[which(d$supp=="OJ"),][,1]
(mean(x1) + c(-1, 1) * qnorm(.975) * sd(x1) / sqrt(length(x1)))
#confidence interval for VC
(mean(x2) + c(-1, 1) * qnorm(.975) * sd(x2) / sqrt(length(x2)))
#confidence interval for OJ
(mean(x) + c(-1, 1) * qnorm(.975) * sd(x) / sqrt(length(x)))
#confidence interval for both
###############################################
#3. State your conclusions and the assumptions needed for your conclusions
#-Were the results of the tests and/or intervals interpreted in the context
#of the problem correctly?
#-describe the assumptions needed for their conclusions?
#hypotesis #1: OJ has better influence on tooth grow length
#hypotesis #2: The influece depends on doze
#hypotesis #3: tooth growth by supp and dose
}
|
e04b1a6db9578eafe6bc71e0c034a6403c26a8e2
|
3f858f84495ae252181b9a32ef4807634c8afc93
|
/rabbitGUI_code/ui.R
|
064b59b1cd2d203204f722e8c191cc53e4041f0f
|
[] |
no_license
|
anabrandusa/rabbitGUI
|
a8cb73edea9fbc0856034cf8831969a7c879adaa
|
f9947bf0b67270c6fccc7930f5f11c47af99c12c
|
refs/heads/master
| 2021-01-12T12:06:25.545735
| 2017-03-02T18:42:40
| 2017-03-02T18:42:40
| 72,299,526
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,161
|
r
|
ui.R
|
# 10/29/2016. Author: Ana Brandusa Pavel
library(markdown)
step.names=c("gene filter" , "feature selection" , "biomarker size" , "classification" )
step.labels=c("Feature filter" , "Feature ranking" , "Size selection" , "Classification" )
names(step.names) = step.labels
box.col = c("blue", "magenta", "orange", "brown", "green", "pink", "purple", "cyan", "yellow", "indigo")
names(box.col)= step.names
number.of.models = nrow(auc.means)
#setwd("C:/Users/Ana/Documents/Visual Studio 2015/Projects/pipeline/pipeline")
#setwd(data.route)
#all.data.table = read.csv("alldata.csv", header = T, stringsAsFactors = F)
#message("Data table read")
#possible.iterations = unique(all.data.table$Iteration)
setwd(source.route)
source("ModelSelection.R")
setwd(source.route)
source("meanAUC.R")
source("calculateModelHeaders.R")
ui = navbarPage("rabbitGUI",
tabPanel("Model selection",
sidebarLayout(
sidebarPanel(
radioButtons("step", "Visualize the best option for each step",
step.labels)),
mainPanel(
#h3("Mean AUC"),
plotOutput("medianAuc"),
tags$a(h4("Boxplot summary (click to display)"), id = "boxplotSummaryHeader"),
shinyjs::hidden(
div(DT::dataTableOutput("boxplotParameters"), id = "boxplotSummary")),
h4("TukeyHSD comparison"),
div(DT::dataTableOutput("tukeyValues"), id = "tukeyValues"),
h4("Best combination of models"),
DT::dataTableOutput("displayBestModels")))),
tabPanel("Random mean AUC",
sidebarLayout(
sidebarPanel(
radioButtons("stepComparison", "Select the best option for each step",
step.labels), width = 2),
mainPanel(
h3("Real mean AUC"),
plotOutput("medianAucComparison"), width = 10
#,
#h3("Random mean AUC"),
#plotOutput("medianAucRandom")
))),
tabPanel("Prediction Scores",
fluidRow(splitLayout(cellWidths = c("50%", "50%"),
#sliderInput("selected.model.number", "Models sorted by the the highest AUC",
#min = 1, max = number.of.models, step = 1, value = 1),
selectInput("selected.model.header", label = h5("Model description"),
choices = as.list(header.descriptions.body),
width = 700,
selected = 1))
)
,
h3("Real score"),
fluidRow(splitLayout(cellWidths = c("70%", "30%"),
plotOutput("plotModelHistogram"),
plotOutput("ROCCurve"))),
h3("Random score"),
fluidRow(splitLayout(cellWidths = c("70%", "30%"),
plotOutput("plotModelHistogramRandom"),
plotOutput("ROCCurveRandom")))),
tabPanel("View biomarker: training",
sidebarLayout(
sidebarPanel(
fileInput("phenoFull", "Expression Phenotype File",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv")),
fileInput("sampleClassFull", "Sample Class File (0 - green, 1 - orange)",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv")), fileInput("featureListFull", "Feature List",
accept = c(
"text/txt",
".txt"))), mainPanel(
plotOutput("modelHeatmapFull", height = "600px")))),
tabPanel("View biomarker: test",
sidebarLayout(
sidebarPanel(
fileInput("pheno", "Expression Phenotype File",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv")),
fileInput("sampleClass", "Sample Class File (0 - green, 1 - orange)",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv")),
fileInput("featureList", "Feature List",
accept = c(
"text/txt",
".txt")),
fileInput("classifScores", "Classification Scores",
accept = c(
"text/txt",
".txt"))), mainPanel(
plotOutput("modelHeatmap", height = "600px"),
fluidRow(splitLayout(cellWidths = c("20%", "40%", "20%"),
plotOutput("Padding"),
plotOutput("modelROCCurve"),
plotOutput("Padding2")
)))
)),
#tabPanel("Biomarker Heatmap",
#sidebarLayout(
#sidebarPanel(
#fileInput("heatmapFile", "Choose heatmap File",
#accept = c(
#"text/csv",
#"text/comma-separated-values,text/plain",
#".csv"))),
#mainPanel(
#h3("Biormarker Heatmap")
##,plotOutput("biomarkerHeatmap")
#))),
#tabPanel("Model ranking",
#sidebarLayout(
#sidebarPanel(
#radioButtons("step", "Select the best option for each step",
#step.labels)),
#mainPanel(
#h2("AUC Comparison"),
#h3("Real AUC"),
#plotOutput("medianAuc"),
#h3("Random AUC"),
#plotOutput("medianAucRandom"),
#h2("Boxplot summary for real AUC"),
#DT::dataTableOutput("boxplotParameters"),
#h4("TukeyHSD comparison"),
#DT::dataTableOutput("tukeyValues")))),
#tabPanel("Summary",
#verbatimTextOutput("summary")
#),
#tabPanel("Table",
#DT::dataTableOutput("table")
#),
#navbarMenu("Help",
#tabPanel("Package dependencies",
#fluidRow(
#column(3,
#tags$b(
#"Dependencies for", a(href = "http://topepo.github.io/caret/index.html", "caret", target="_blank"), "package"
#),
#tags$ul(
#tags$li("pbkrtest (R >= 3.2.3)"),
#tags$li("car (R >= 3.2.0)"),
#tags$li("nlme (R >= 3.0.2)")
#),
#tags$b(
#"Dependencies for", a(href = "https://github.com/jperezrogers/rabbit", "rabbit", target="_blank"), "package"
#),
#tags$ul(
#tags$li("devtools"),
#tags$li("multtest"),
#tags$li("impute"),
#tags$li("samr"),
#tags$li("e1071"),
#tags$li("randomForest"),
#tags$li("klaR"),
#tags$li("kernlab"),
#tags$li("pROC"),
#tags$li("glmnet"),
#tags$li("limma"),
#tags$li("genefilter")
#),
#tags$b(
#"Dependencies for ", a(href = "https://github.com/anabrandusa/rabbitGUI", "rabbitGUI", target="_blank")
#),
#tags$ul(
#tags$li("shiny"),
#tags$li("DT")
#)
#)
#)
#),
tabPanel("About",
fluidRow(
column(3,
tags$b(
a(href = "https://github.com/jperezrogers/rabbit", "rabbit package", target = "_blank"),
renderText(paste("", "", sep = "\n")),
a(href = "https://github.com/anabrandusa/rabbitGUI/", "rabbitGUI v1.00", target = "_blank")),
img(class = "img-polaroid",
src = "https://avatars1.githubusercontent.com/u/5145014?v=3&s=400")
#tags$small(
# a(href = "https://github.com/jperezrogers/rabbit", "rabbit", target="_blank")
#)
# )
)
)
), shinyjs::useShinyjs()
)
|
98a2ed0a11d708e67ba3a8898b15a7efa816c3e2
|
18f14d3e86a84aee6342b693d533882730ff19b8
|
/man/aftbino.mbe.Rd
|
cabca816f74227199a14a0368f3617d52ec36563
|
[] |
no_license
|
leandroroser/Ares_1.2-4
|
f645311542922659bde8dac27a4227b69d66b078
|
797c3960f46d5153354bcf6008aecc738f3365c9
|
refs/heads/master
| 2021-01-01T03:56:13.149249
| 2016-05-02T04:11:48
| 2016-05-02T04:11:48
| 56,180,677
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 282
|
rd
|
aftbino.mbe.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/aftbino.mbe.R
\name{aftbino.mbe}
\alias{aftbino.mbe}
\title{aftbino.mbe}
\usage{
aftbino.mbe(count, estimated.richness = NULL, conf.level = 0.95)
}
\description{
aftbino.mbe
}
\keyword{internal}
|
641e273d8b641040de5e3bc1f378aab7ef91b194
|
24515ce15e7d005c952ad6f337722f2fd85af67a
|
/cachematrix.R
|
b0edef391c32d979585ae921a273adad5a7fca8b
|
[] |
no_license
|
Avijit0616/DataScienceCoursera
|
bf9155ab8cee3e4cb64ff6b5ab3d784d4e6bfd0f
|
9f0f7b4597227def94284aab9d5dac266da5160f
|
refs/heads/master
| 2022-05-26T02:22:39.669088
| 2020-05-02T00:39:41
| 2020-05-02T00:39:41
| 256,111,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,393
|
r
|
cachematrix.R
|
## Below are two functions that are used to create a special object that
## stores a matrix and cache's its inverse.
## The first function, makeCacheMatrix creates a special "matrix" object
## in a list containing a function to
## 1.set the value of the input matrix in cache
## 2.get the value of the cache matrix
## 3.set the value of the inverse of the matrix
## 4.get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get <- function() x
setinv <- function(inv) m<<-inv
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve function takes the function created above as an input and
## returns the inverse of matrix stored in cache via makeCacheMatrix
## function.
## If the inverse of the matrix stored in cache is already calculated, it
## skips the computation part and pulls the inverse from the cache.
cacheSolve <- function(x, ...) {
m<-x$getinv()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data<-x$get()
m <- solve(data,...)
x$setinv(m)
m
}
## Below is an example to illustrate the above two functions
mat<-matrix(c(1:8,8),3,3,byrow = T)
k<-makeCacheMatrix(mat)
k$get()
k$getinv()
cacheSolve(k)
k$get()
k$getinv()
cacheSolve(k)
|
0492c5ccb3ca2263e6c25393920a3de92839994d
|
389899d13b1465958f48e85dba154418e7341429
|
/cachematrix.R
|
ff02dac820423519b10caddb3be8462f85b6c760
|
[] |
no_license
|
what2do/ProgrammingAssignment2
|
8cbab3bf4a8d8d310e285a78452873d1e8d7b215
|
4a2df3478794b28af0d88a20b3d3f8e2c5e9aaf9
|
refs/heads/master
| 2021-01-20T21:56:46.679791
| 2014-09-17T17:37:21
| 2014-09-17T17:37:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,471
|
r
|
cachematrix.R
|
## Matrix inversion is usually a costly computation and their may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly
## Below are 2 functions that cache the inverse of a matrix.
## makeCacheMatrix creates a special "matrix" object that can cache its inverse
## It is a list containing a function to:
## - set the value of the vector
## - get the value of the vector
## - set the value of the mean
## - get the value of the mean
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve should retrieve the inverse from
## the cache. Otherwise, it computes the inverse and sets the value the cache
## via the setinverse function.
## Assumes that the matrix supplied is always invertible
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
## Test run output
## > source("cachematrix.R")
## > amatrix = makeCacheMatrix(matrix(c(1,2,3,4), nrow=2, ncol=2))
## > amatrix$get()
## [,1] [,2]
## [1,] 1 3
## [2,] 2 4
## > cacheSolve(amatrix)
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
## > amatrix$getinverse()
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
## > cacheSolve(amatrix)
## getting cached data
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
## > amatrix$set(matrix(c(0,5,99,66), nrow=2, ncol=2))
## > cacheSolve(amatrix)
## [,1] [,2]
## [1,] -0.13333333 0.2
## [2,] 0.01010101 0.0
## > amatrix$get()
## [,1] [,2]
## [1,] 0 99
## [2,] 5 66
## > amatrix$getinverse()
## [,1] [,2]
## [1,] -0.13333333 0.2
## [2,] 0.01010101 0.0
## > cacheSolve(amatrix)
## getting cached data
## [,1] [,2]
## [1,] -0.13333333 0.2
## [2,] 0.01010101 0.0
|
e12dd4c3a85ca8cc032db4568ef1a7a045960078
|
257b39265a6b796d54e0e861825984e7e205bbd8
|
/man/z_HY.Rd
|
82a22a3e61495eaff8b569389beeeb0cbdf52e5a
|
[] |
no_license
|
yaoguodong/zFactor-1
|
230c8576f004efb6bde669c60e249fd36134ca4f
|
66d6f0732e35c8e84bcd98d28251a0badc7fe423
|
refs/heads/master
| 2020-04-20T04:26:18.046950
| 2017-10-23T06:22:46
| 2017-10-23T06:22:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 331
|
rd
|
z_HY.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{z_HY}
\alias{z_HY}
\title{Hall-Yarborough tidy dataset}
\format{An object of class \code{data.frame} with 28 rows and 5 columns.}
\usage{
z_HY
}
\description{
28 observations of 5 variables
}
\keyword{datasets}
|
7e95bc9fe2dec66c743c1b14eca9cfb772dae00e
|
f517f53080a1a833848b9fd3ff8cc2830a8d523c
|
/R/plot_qvalue.R
|
b1549a8d1ccaa2ad8edc2ab545939ffd609b3bf5
|
[
"BSD-2-Clause"
] |
permissive
|
PNNL-Comp-Mass-Spec/Rodin
|
a2b3ddadd312dde9a00e9f03c8deb65a42293579
|
8f93bc5f9e007744d19e3d60c76973aa3e8a115e
|
refs/heads/master
| 2022-02-25T01:34:19.019930
| 2022-02-16T22:19:38
| 2022-02-16T22:19:38
| 144,644,879
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,247
|
r
|
plot_qvalue.R
|
#' @title Plotting function for q-value object
#' @description
#' Graphical display of the q-value object
#'
#' @param x A q-value object.
#' @param rng Range of q-values to show. Optional
#' @param \ldots Additional arguments. Currently unused.
#'
#' @details
#' The function plot allows one to view several plots:
#' \enumerate{
#' \item The estimated \eqn{\pi_0}{pi_0} versus the tuning parameter
#' \eqn{\lambda}{lambda}.
#' \item The q-values versus the p-values.
#' \item The number of significant tests versus each q-value cutoff.
#' \item The number of expected false positives versus the number of
#' significant tests.
#' }
#'
#' This function makes four plots. The first is a plot of the
#' estimate of \eqn{\pi_0}{pi_0} versus its tuning parameter
#' \eqn{\lambda}{lambda}. In most cases, as \eqn{\lambda}{lambda}
#' gets larger, the bias of the estimate decreases, yet the variance
#' increases. Various methods exist for balancing this bias-variance
#' trade-off (Storey 2002, Storey & Tibshirani 2003, Storey, Taylor
#' & Siegmund 2004). Comparing your estimate of \eqn{\pi_0}{pi_0} to this
#' plot allows one to guage its quality. The remaining three plots
#' show how many tests are called significant and how many false
#' positives to expect for each q-value cut-off. A thorough discussion of
#' these plots can be found in Storey & Tibshirani (2003).
#'
#' @return
#' Nothing of interest.
#'
#' @references
#' Storey JD. (2002) A direct approach to false discovery rates. Journal
#' of the Royal Statistical Society, Series B, 64: 479-498. \cr
#' \url{http://onlinelibrary.wiley.com/doi/10.1111/1467-9868.00346/abstract}
#'
#' Storey JD and Tibshirani R. (2003) Statistical significance for
#' genome-wide experiments. Proceedings of the National Academy of Sciences,
#' 100: 9440-9445. \cr
#' \url{http://www.pnas.org/content/100/16/9440.full}
#'
#' Storey JD. (2003) The positive false discovery rate: A Bayesian
#' interpretation and the q-value. Annals of Statistics, 31: 2013-2035. \cr
#' \url{http://projecteuclid.org/DPubS/Repository/1.0/Disseminate?view=body&id=pdf_1&handle=euclid.aos/1074290335}
#'
#' Storey JD, Taylor JE, and Siegmund D. (2004) Strong control,
#' conservative point estimation, and simultaneous conservative
#' consistency of false discovery rates: A unified approach. Journal of
#' the Royal Statistical Society, Series B, 66: 187-205. \cr
#" \url{http://onlinelibrary.wiley.com/doi/10.1111/j.1467-9868.2004.00439.x/abstract}
#'
#' Storey JD. (2011) False discovery rates. In \emph{International Encyclopedia of Statistical Science}. \cr
#' \url{http://genomine.org/papers/Storey_FDR_2011.pdf} \cr
#' \url{http://www.springer.com/statistics/book/978-3-642-04897-5}
#'
#' @author John D. Storey, Andrew J. Bass
#' @seealso \code{\link{qvalue}}, \code{\link{write.qvalue}}, \code{\link{summary.qvalue}}
#' @keywords plot
#' @aliases plot, plot.qvalue
#' @export
plot.qvalue <- function(x, rng = c(0.0, 0.1), ...) {
# Plotting function for q-object.
#
# Args:
# x: A q-value object returned by the qvalue function.
# rng: The range of q-values to be plotted (optional).
#
# Returns
# Four plots-
# Upper-left: pi0.hat(lambda) versus lambda
# Upper-right: q-values versus p-values
# Lower-left: number of significant tests per each q-value cut-off
# Lower-right: number of expected false positives versus number of
# significant tests
# Initilizations
plot.call <- match.call()
rm_na <- !is.na(x$pvalues)
pvalues <- x$pvalues[rm_na]
qvalues <- x$qvalues[rm_na]
q.ord <- qvalues[order(pvalues)]
if (min(q.ord) > rng[2]) {
rng <- c(min(q.ord), quantile(q.ord, 0.1))
}
p.ord <- pvalues[order(pvalues)]
lambda <- x$lambda
pi0Smooth <- x$pi0.smooth
if (length(lambda) == 1) {
lambda <- sort(unique(c(lambda, seq(0, max(0.90, lambda), 0.05))))
}
pi0 <- x$pi0.lambda
pi00 <- round(x$pi0, 3)
pi0.df <- data.frame(lambda = lambda, pi0 = pi0)
# Spline fit- pi0Smooth NULL implies bootstrap
if (is.null(pi0Smooth)) {
p1.smooth <- NULL
} else {
spi0.df <- data.frame(lambda = lambda, pi0 = pi0Smooth)
p1.smooth <- geom_line(data = spi0.df, aes_string(x = 'lambda', y = 'pi0'),
colour="red")
}
# Subplots
p1 <- ggplot(pi0.df, aes_string(x = 'lambda', y = 'pi0')) +
geom_point() +
p1.smooth +
geom_abline(intercept = pi00,
slope = 0,
lty = 2,
colour = "red",
size = .6) +
xlab(expression(lambda)) +
ylab(expression(hat(pi)[0](lambda))) +
xlim(min(lambda) - .05, max(lambda) + 0.05) +
annotate("text", label = paste("hat(pi)[0] ==", pi00),
x = min(lambda, 1) + (max(lambda) - min(lambda))/20,
y = x$pi0 - (max(pi0) - min(pi0))/20,
parse = TRUE, size = 3) + theme_bw() + scale_color_brewer(palette = "Set1")
p2 <- ggplot(data.frame(pvalue = p.ord[q.ord >= rng[1] & q.ord <= rng[2]],
qvalue = q.ord[q.ord >= rng[1] & q.ord <= rng[2]]),
aes_string(x = 'pvalue', y = 'qvalue')) +
xlab("p-value") +
ylab("q-value") +
geom_line()+ theme_bw()
p3 <- ggplot(data.frame(qCuttOff = q.ord[q.ord >= rng[1] & q.ord <= rng[2]],
sig=(1 + sum(q.ord < rng[1])):sum(q.ord <= rng[2])),
aes_string(x = 'qCuttOff', y = 'sig')) +
xlab("q-value cut-off") +
ylab("significant tests") +
geom_line()+ theme_bw()
p4 <- ggplot(data.frame(sig = (1 + sum(q.ord < rng[1])):sum(q.ord <= rng[2]),
expFP = q.ord[q.ord >= rng[1] & q.ord <= rng[2]] *
(1 + sum(q.ord < rng[1])):sum(q.ord <= rng[2])),
aes_string(x = 'sig', y = 'expFP')) +
xlab("significant tests") +
ylab("expected false positives") +
geom_line()+ theme_bw()
multiplot(p1, p2, p3, p4, cols = 2)
}
|
7ca8c9b26e80c69ef026535117b496e7bfbedb20
|
758d3d0b7ed4efe17f41e656bfa4c551ff4e5f63
|
/TS.R
|
9f3b5436dde9eb5972d5e360ca916405bd6556a0
|
[] |
no_license
|
szsongyj/time-series-analysis
|
a844b48ec5b8f5054f6bef194fcc4aea6f0d495a
|
e1c280c2b39b0eeadfaa331373367f235fe372d6
|
refs/heads/master
| 2020-06-18T14:17:12.989296
| 2019-07-11T06:12:31
| 2019-07-11T06:12:31
| 196,329,812
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 13,775
|
r
|
TS.R
|
#-------------------------------------------------------------------------------
# Description: Time Series Analysis Forecast for AIoT
# Project Name:
# Name: Multi-Season Time Series Analysis
# Author: Song yongjun
# DateTime: 2019/7/5 14:49
#-------------------------------------------------------------------------------
################################################################
library(data.table)
library(TSA)
library(Metrics)
library(tseries)
library(forecast)
library(fpp2)
library(GGally)
library(gridExtra)
library(seasonal)
library(urca)
library(hts)
library(tidyverse)
library(imputeTS)
#######################数据读入和预处理开始######################
hmAirport=read.csv('hm.csv',header = T,sep=',')
mete=data.frame(as.vector(unique(hmAirport$mete_id)),c('烟雾','网络状态','水浸','视屏信号','温度','智能电表电流','智能电表电压','智能电表功率','湿度'),c('smoke','net','water','video','temp','curr','volt','power','hum'))
colnames(mete)=c('mete_id','mete_name','mete_name_abbr')
mete<-within(mete,{
mete_name<-as.character(mete_name)
mete_name_abbr<-as.character((mete_name_abbr))
})
str(mete)
hmAirport=merge(hmAirport,mete,by='mete_id')
hmAirport<-within(hmAirport,{
data_time<-strptime(data_time,'%Y%m%d%H%M')
})
colnames(hmAirport)[3]<-'data_datetime'
hmAirport$create_time<-strptime(as.character(hmAirport$create_time),'%Y-%m-%d %H:%M')
#新增两列,分别是data_datetime列的date和time
hmAirport<-within(hmAirport,{
data_date<-format(data_datetime,'%Y-%m-%d')
data_time<-format(data_datetime,'%H:%M:%S')
})
#把factor类型转化成number类型
hmAirport$min_value<-as.numeric(as.character(hmAirport$min_value))
hmAirport$max_value<-as.numeric(as.character(hmAirport$max_value))
#检查min_value和max_value是否有缺失值
#结果:视频信号监控值有6个缺失值,保存在x中
#结果:数据日期没有缺失值
sum(is.na(hmAirport$data_datetime))
sum(is.na(hmAirport$min_value))
sum(is.na(hmAirport$max_value))
x=hmAirport[which(is.na(hmAirport$min_value)),]
str(hmAirport)
hmAirport<-hmAirport[,c(2,4,1,10,11,3,13,12,6,5,9)]
hmAirport<-hmAirport[order(hmAirport$id),]
##################################################################
#对device进行分析,每个device_id的记录条数
count=NULL
sum=0
for(i in 1:length(unique(hmAirport$device_id))){
count[i]=nrow(hmAirport[which(hmAirport$device_id==unique(hmAirport$device_id)[i]),])
sum<-sum+count[i]
}
sum
count
as.data.frame(cbind(Device_id=unique(as.character(hmAirport$device_id)),
Count=count))
#hm_0220为device_id为422c05550220的记录
hm_0220<-subset(hmAirport,hmAirport$device_id=='422c05550220')
nrow(hm_0220)
unique(hm_0220$mete_name)#hm_0220有全部9项监测指标
nrow(subset(hm_0220,hm_0220$mete_name=='湿度'))#12940
nrow(subset(hm_0220,hm_0220$mete_name=='温度'))#12942
nrow(subset(hm_0220,hm_0220$mete_name=='智能电表电压'))#12942
nrow(subset(hm_0220,hm_0220$mete_name=='智能电表电流'))#12942
nrow(subset(hm_0220,hm_0220$mete_name=='智能电表功率'))#12942
nrow(subset(hm_0220,hm_0220$mete_name=='网络状态'))#14093
nrow(subset(hm_0220,hm_0220$mete_name=='水浸'))#14093
nrow(subset(hm_0220,hm_0220$mete_name=='烟雾'))#5
nrow(subset(hm_0220,hm_0220$mete_name=='视频信号'))#0
#进一步筛选出4个指标的监测记录
hm_0220<-subset(hm_0220,is.element(hm_0220$mete_name,c('智能电表电流','智能电表电压','智能电表功率','温度')))
#为了按照时间排序对列类型进行转化
hm_0220<-within(hm_0220,{
data_datetime<-as.POSIXct(data_datetime)
create_time<-as.POSIXct(create_time)
})
#进一步把有很少监测数据的那些天的数据过滤掉
difftime(strptime('2019-05-09 19:10:00','%Y-%m-%d %H:%M:%S'),strptime('2019-02-27 19:10:00','%Y-%m-%d %H:%M:%S'),units = 'days')#71 days
#'2019-02-27 19:10:00'---'2019-05-09 19:10:00' 共71天数据,4个监测指标(温度、智能电表电流、智能电表电压、智能电表功率)
hm_0220<-subset(hm_0220,hm_0220$data_datetime>=strptime('2019-02-27 19:10:00','%Y-%m-%d %H:%M:%S') & hm_0220$data_datetime<=strptime('2019-05-09 19:10:00','%Y-%m-%d %H:%M:%S'))
#把长表转化成宽表,生成的hm0220为接近时间序列格式的data frame
df_temp=subset(hm_0220,hm_0220$mete_name=='温度')[,c('data_datetime','min_value','max_value')]
df_curr=subset(hm_0220,hm_0220$mete_name=='智能电表电流')[,c('data_datetime','min_value','max_value')]
df_volt=subset(hm_0220,hm_0220$mete_name=='智能电表电压')[,c('data_datetime','min_value','max_value')]
df_power=subset(hm_0220,hm_0220$mete_name=='智能电表功率')[,c('data_datetime','min_value','max_value')]
colnames(df_temp)[2]<-c('temp_min')
colnames(df_temp)[3]<-c('temp_max')
colnames(df_curr)[2]<-c('curr_min')
colnames(df_curr)[3]<-c('curr_max')
colnames(df_volt)[2]<-c('volt_min')
colnames(df_volt)[3]<-c('volt_max')
colnames(df_power)[2]<-c('power_min')
colnames(df_power)[3]<-c('power_max')
hm0220<-list(df_temp,df_curr,df_volt,df_power)%>%
reduce(left_join,by='data_datetime')
#把相邻两个监测数据的时间间隔限制在5分钟和10分钟,就可以认为是连续时间的序列
a=which(difftime(hm0220$data_datetime,lag(hm0220$data_datetime,1))>=15)
b=lag(a,1)
c=a-b
x1=a[which(c==max(c,na.rm = T),arr.ind = T)]-1;x1
x2=a[which(c==max(c,na.rm = T),arr.ind = T)-1];x2
#得到连续时间的序列hm0220,满足条件:相邻两个间隔绝大多数为5分钟,极少数为10分钟
hm0220=hm0220[x1:x2,]
nrow(hm0220)#4593条记录
#按照时间排序
hm0220<-hm0220[order(hm0220$data_datetime),]
#对连续时间的序列进一步选取整数周期的数据,并验证时间的周期性
#16天
#一天24小时,每5分钟一条记录,一天是287条记录
days.hm0220=floor(nrow(hm0220)/287);days.hm0220#16
#验证时间的周期性
hm0220[1+287*0:days.hm0220,c('data_datetime')]
end=1+287*days.hm0220
##########最后,得到整数周期的连续时间序列数据ts.hm0220##########
hm0220=hm0220[1:end,]
ts.hm0220=ts(hm0220[,2:9],start = 1,frequency = 287 )
#Detecting of seasonality
p_temp_min=periodogram(hm0220$temp_min,plot = T)
data.table(period=1/p_temp_min$freq, spec=p_temp_min$spec)[order(-spec)][1:2]
p_temp_max=periodogram(hm0220$temp_max,plot = T)
data.table(period=1/p_temp_max$freq, spec=p_temp_max$spec)[order(-spec)][1:2]
p_curr_min=periodogram(hm0220$curr_min,plot = T)
data.table(period=1/p_curr_min$freq, spec=p_curr_min$spec)[order(-spec)][1:2]
p_curr_max=periodogram(hm0220$curr_max,plot = T)
data.table(period=1/p_curr_max$freq, spec=p_curr_max$spec)[order(-spec)][1:2]
p_volt_min=periodogram(hm0220$volt_min,plot = T)
data.table(period=1/p_volt_min$freq, spec=p_volt_min$spec)[order(-spec)][1:2]
p_volt_max=periodogram(hm0220$volt_max,plot = T)
data.table(period=1/p_volt_max$freq, spec=p_volt_max$spec)[order(-spec)][1:2]
p_power_min=periodogram(hm0220$power_min,plot = T)
data.table(period=1/p_power_min$freq, spec=p_power_min$spec)[order(-spec)][1:2]
p_power_max=periodogram(hm0220$power_max,plot = T)
data.table(period=1/p_power_max$freq, spec=p_power_max$spec)[order(-spec)][1:2]
#################
p_temp_min_s=periodogram(hm0220$temp_min[1:864],plot = T)
data.table(period=1/p_temp_min_s$freq, spec=p_temp_min_s$spec)[order(-spec)][1:2]
#Multi-Seasonal Time Series
help(msts)
msts.hm0220=msts(hm0220[,2:9],seasonal.periods = c(144,288),start=1)
str(msts.hm0220)
#选择3天数据做训练集
train_msts.hm0220=window(msts.hm0220,end=4)
test_msts.hm0220=window(msts.hm0220,start=4,end=5)
tsp(test_msts.hm0220)
###############################################
###############################################
#多个时间序列在一个时序图
autoplot(ts.hm0220)+
scale_x_continuous(breaks=seq(1,17,by=1))
glimpse(ts.hm0220)
summary(ts.hm0220)
#每个时间序列一个时序图
par(mfrow=c(4,2))
plot(ts.hm0220[,'temp_min'],type='l',xlab='time of day(5 minutes interval)',ylab='temp_min')
plot(ts.hm0220[,'temp_max'],type='l',xlab='time of day(5 minutes interval)',ylab='temp_max')
plot(ts.hm0220[,'curr_min'],type='l',xlab='time of day(5 minutes interval)',ylab='curr_min')
plot(ts.hm0220[,'curr_max'],type='l',xlab='time of day(5 minutes interval)',ylab='curr_max')
plot(ts.hm0220[,'volt_min'],type='l',xlab='time of day(5 minutes interval)',ylab='volt_min')
plot(ts.hm0220[,'volt_max'],type='l',xlab='time of day(5 minutes interval)',ylab='volt_max')
plot(ts.hm0220[,'power_min'],type='l',xlab='time of day(5 minutes interval)',ylab='power_min')
plot(ts.hm0220[,'power_max'],type='l',xlab='time of day(5 minutes interval)',ylab='power_max')
#######开始对temp_min进行分析########
#差分建议
ndiffs(ts.hm0220[,'temp_min'])
ndiffs(hm0220$temp_min)
#temp_min的时序图,把16天数据以及4天数据的时序图画在一起,看清楚24小时变化情况
ap_temp_min<-autoplot(ts.hm0220[,'temp_min'])+
xlab('time of day(by hourly break)')+ylab('temp_min')+
scale_x_continuous(breaks = seq(1,17,by=1))+
ggtitle('time plot of temp_min(16 days)')
ap_temp_min_minor<-autoplot(window(ts.hm0220[,'temp_min'],end=4))+
xlab('time of day(by hourly break)')+ylab('temp_min')+
scale_x_continuous(minor_breaks = seq(1,4,by=1/24))+
ggtitle('time plot of temp_min(3 days)')
gridExtra::grid.arrange(ap_temp_min,ap_temp_min_minor)
#对temp_min的season变化用图形表示
#seasonal plot
ggseasonplot(ts.hm0220[,'temp_min'],col=rainbow(12))+
ylab('seasonal index of temp_min')+
ggtitle('seasonal plot of temp_min(16 days)')
ggseasonplot(ts.hm0220[,'temp_min'],polar = T,col=rainbow(12))+
ylab('seasonal index of temp_min')+
ggtitle('seasonal plot of temp_min(16 days)')
ggsubseriesplot(ts.hm0220[,'temp_min'],year.labels=TRUE, year.labels.left=TRUE)+
ylab('seasonal index of temp_min')+
ggtitle('seasonal plot of temp_min(16 days)')
#reveal relationships between time series.
autoplot(ts.hm0220[,c('temp_min','temp_max')],facets=T)+
xlab('time of day')+ylab('temp_min vs temp_max')+
ggtitle('temp_min and temp_max')
qplot(hm0220$temp_min,hm0220$temp_max)+
ylab('temp_max')+
xlab('temp_min')
#linear relationship
ggpairs(hm0220[,c('temp_min','temp_max')])
#Forecasting with long seasonal periods
# dynamic harmonic regression model(Arima+Fourier)
#for multi-season time series,Choose the best model by AICc
bestK=c(0,0)
bestfit.temp_min <- list(aicc=Inf)
for(i in seq(5)){
for(j in seq(5)){
fit<-auto.arima(train_msts.hm0220[,'temp_min'],
xreg = fourier(train_msts.hm0220[,'temp_min'],K=c(i,j)),
seasonal = F)
if(fit[['aicc']]< bestfit.temp_min[['aicc']]){
bestfit.temp_min<-fit
bestK<- c(i,j)
}
}
}
bestfit.temp_min;bestK
#预测1天的数据
#Arima+Fourier model forecast
fc.fourier.temp_min <- forecast(bestfit.temp_min,
xreg=fourier(test_msts.hm0220[,'temp_min'], K=bestK, h=288))
#训练数据、拟合数据、预测数据
autoplot(fc.fourier.temp_min,series='forecast by Arima fourier')+
autolayer(window(msts.hm0220[,'temp_min'],start=1,end=5),series='original')+
autolayer(fitted((fc.fourier.temp_min)),series = 'fitted ')+
scale_x_continuous(minor_breaks =seq(1,5,by=1/24))
autoplot(train_msts.hm0220[,'temp_min'],series='original')+
autolayer(test_msts.hm0220[,'temp_min'],series='original')+
autolayer(fc.fourier.temp_min,series='forecast by Arima+Fourier model',PI=F)+
autolayer(fitted((fc.fourier.temp_min)),series = 'fitted ')+
xlab('time of day(5 mins interval,4 days in total)')+
ylab('temp_min')+
scale_x_continuous(minor_breaks =seq(1,5,by=1/24))+
ggtitle('forecast of temp_min by Arima+Fourier model ')
summary(fc.fourier.temp_min)
#Arima+Fourier model accuracy evaluated on training and test set
accuracy(fc.fourier.temp_min,test_msts.hm0220[,'temp_min'])
#TBATS model forecast
fc.tbats.temp_min<-train_msts.hm0220[,'temp_min']%>%
tbats(seasonal.periods = c(144,288))%>%
forecast(h=288)
autoplot(train_msts.hm0220[,'temp_min'],series='original')+
autolayer(test_msts.hm0220[,'temp_min'],series='original')+
autolayer(fc.tbats.temp_min,series='forecast of temp_min by TBATS model',PI=F)+
autolayer(fitted(fc.tbats.temp_min),series='fitted')+
xlab('time of day(5 mins interval,4 days in total)')+
ylab('temp_min')+
scale_x_continuous(minor_breaks =seq(1,5,by=1/24))+
ggtitle('forecast of temp_min by TBATS model')
# TBATS model accuracy evaluated on training and test set
accuracy(fc.tbats.temp_min,test_msts.hm0220[,'temp_min'])
# mstl function for time series decomposition
msts.hm0220[,'temp_min']%>%mstl()%>%
autoplot()+
xlab('time of day(16 days)')+ylab('temp_min')+
scale_x_continuous(breaks = seq(1,17,by=1))+
ggtitle('decomposition(Trend+Season+Residual) of temp_min by STL model')
#STLF model forecast
fc.stlf.temp_min<-train_msts.hm0220[,'temp_min']%>%stlf(h=288)
autoplot(train_msts.hm0220[,'temp_min'],series='original')+
autolayer(test_msts.hm0220[,'temp_min'],series='original')+
autolayer(fc.stlf.temp_min,series='forecast of temp_min by STLF(STL+ETS(A,Ad,N)) model',PI=F)+
autolayer(fitted(fc.stlf.temp_min),series='fitted')+
xlab('time of day(5 mins interval,4 days in total)')+
ylab('temp_min')+
scale_x_continuous(minor_breaks =seq(1,5,by=1/24))+
ggtitle('forecast of temp_min by STLF(STL+ETS(A,Ad,N)) model')
#STL+ETS(A,Ad,N) model accuracy evaluated on training and test set
accuracy(fc.stlf.temp_min,test_msts.hm0220[,'temp_min'])
|
8aaa73d76fa46ac502a7cfd3c0733617aee5fbb4
|
710663bd84adf670030680db9358e29011eac5b4
|
/app.R
|
57f39c6e0781af726fdd5c3e41bdb8f774a90154
|
[] |
no_license
|
lanhama/Fama_French
|
67632d9783f2d3e06761663b02449c3eb660fcdb
|
034a75e82755e0f525f9039a9e5bb4f6cf57ae55
|
refs/heads/master
| 2020-06-05T01:54:24.270073
| 2019-09-09T00:30:46
| 2019-09-09T00:30:46
| 192,272,516
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,350
|
r
|
app.R
|
library(shiny)
library(shinythemes)
library(jsonlite)
library(ggplot2)
library(DT)
library(pracma)
library(stringr)
#read in Fama & French information before app processing starts
annual_market_returns <- as.data.frame(read.csv("annual_FF.CSV"))
monthly_market_returns <- read.csv("monthly_FF.CSV")
daily_market_returns <- as.data.frame(read.csv("daily_FF.CSV"))
daily_market_returns$date <- as.Date(as.character(daily_market_returns$day), format="%Y%m%d")
monthly_market_returns$date <- paste(as.character(monthly_market_returns$month), "01", sep = "")
monthly_market_returns$date <- as.Date(monthly_market_returns$date, format="%Y%m%d")
annual_market_returns$date <- as.Date(paste(as.character(annual_market_returns$annual), "0101", sep=""), format="%Y%m%d")
# Define UI for application that draws a histogram
ui <- fluidPage(
titlePanel(tags$p("Fama French Visualization Tool", style="color:black; font-size:130%; padding-bottom:0vw; padding-top:.5vw")),
titlePanel(tags$p("By: Andrew Lanham", style="color:black; font-size: 35%; padding:top:0vw; margin-top:0vw")),
column(3, wellPanel(
textInput('ticker', "Ticker Symbol", value = "", width = '22vw',
placeholder = "AMZN"), #selectInput("ticker_sym", "Company", state.name, multiple = FALSE, selectize = TRUE), textOutput("error_message"),
sliderInput("years", label = "Years", value = c(1980, 2019), min = 1920, max = 2019, sep = "", width = '22vw'),
radioButtons("chooser", "Time Interval", choices = c("Monthly" = "monthly", "Daily" = "daily"), selected = "monthly", inline = TRUE),
actionButton("gobutton", "Go!", width = '10vw')
)),
column(9, wellPanel(
tabsetPanel(id = "tabPanel",
tabPanel("Return Visuals",htmlOutput("outputter"), plotOutput("distPlot")
),
tabPanel("Data", DT::dataTableOutput("table")),
tabPanel("Other", htmlOutput("summary"))
)
),
tags$section(style="background:#D5D9C7;", htmlOutput("stats"))
)
)
server <- function(input, output) {
#API pull for stock names
output$summary <- renderUI({ tags$div(tags$p("Take a look at the code on Github", tags$a(href="https://github.com/lanhama/Fama_French", "here")), tags$p("Or checkout an explanation of Fama & French", tags$a(href="https://andrewlanham.me/ff_data_app", "here")))})
observeEvent(input$gobutton, {
# output$outer <- renderPrint(input$ticker_sym)
sym <- input$ticker
#error checking
#user doesn't input ticker symbol
if (sym == "") {
output$outputter <- renderUI({tags$h1("Please input valid ticker", style="color:red; font-size: 120%")})
return()
}
output$outputter <- renderText("")
#inter <- "5min"
# daily vs monthly
if (strcmp(input$chooser, "monthly")) {
func <- "monthly"
} else {func <- "daily"
}
begin <- paste(input$years[1], "-01-01")
begin <- str_replace_all(string=begin, pattern=" ", repl="")
end <- paste(input$years[2], "-01-01")
end <- str_replace_all(string=end, pattern=" ", repl="")
df <- try(quantmod::getSymbols(toupper(sym), src = "yahoo", from=begin, to=end, env = NULL, periodicity = func), silent = TRUE)
#error-checking
if (class(df) == "try-error") {
output$outputter <- renderUI({tags$h1("ticker or date failure; try again", style="color:red; font-size: 120%")})
return()
}
names(df) <- c("open", "high", "low", "close", "volume", "adjusted")
df <- as.data.frame(df)
df$adjusted <- NULL
df$daily_change <- df$close - df$open
df$percent_change <- df$daily_change / df$open
df$percent_change <- df$percent_change * 100
df[,] <-round(df[,],2)
#formatColumns(df$percent_change, digits = 4)
pct_sdev <- sd(df$percent_change)
expected <- mean(df$percent_change)
#output$distProint <- renderPrint({print("nothing")})
output$distPlot <- renderPlot({
ggplot2::ggplot(df, aes(df$percent_change)) + ggplot2::geom_bar(stat="count") + stat_bin(bins = 30, color = "lightblue") + ggplot2::xlab("% Change") + ggplot2::ylab("frequency") + ggplot2::ggtitle(paste("Frequency of (", input$chooser, ") Returns", sep = "")) + ggplot2::theme(plot.title = element_text(family = "Helvetica", face = "bold", size = (15)))
# generate bins based on input$bins from ui.R
#hist(df$percent_change, breaks = 40, col = 'deepskyblue3', border = 'black', xlab = "Percent Change", ylab = "Number of Ocurrences", main = "Return Frequency")
})
output$residualplot <- renderPlot({
hist(c(3, 4, 54))
})
output$ticker_name <- ({
renderText(input$ticker)
})
output$start_date <- ({
renderText(input$dateRange[1])
})
output$end_date <- ({
renderText(input$dateRange[2])
})
df_tester <- as.data.frame(df)
#make dates a column in df
df_tester <- tibble::rownames_to_column(df_tester, "date")
df_tester$date <- as.Date(df_tester$date)
df_tester <- na.omit(df_tester)
#merge fama french information with stock data
if (strcmp(input$chooser, "monthly")) {
df2 <- merge(monthly_market_returns, df_tester, by = "date")
df2$month <- NULL
} else {
df2 <- merge(daily_market_returns, df_tester, by = "date")
df2$day <- NULL
}
df2$volume <- NULL
print(nrow(df2))
#calculate change for each row in time period
i = 2
df2$change <- NA
while (i < (nrow(df2) + 1)) {
df2$change[i] = ((df2$close[i] - df2$close[i - 1]) / df2$close[i - 1]) * 100
i = i + 1
}
#remove first data point. Only used for reference point for first month/day
df2 <- df2[-c(1), ]
market_beta <- cov(df2$Mkt.RF, df2$change) / var(df2$Mkt.RF)
smb_beta <- cov(df2$change, df2$SMB) / var(df2$SMB)
hml_beta <- cov(df2$HML, df2$change) / var(df2$HML)
mb_fit <- lm(df2$change ~ df2$Mkt.RF)
smb_fit <- lm(df2$change ~ df2$SMB)
hml_fit <- lm(df2$change ~ df2$HML)
#output stock data in table format
output$table <- DT::renderDataTable({
DT::datatable(df, options = list(lengthMenu = c(5, 30, 50), pageLength = 10))
})
#create table of information
output$stats <- renderUI({
tags$div(
strong(h3("Measures of Central Tendency")),
tags$br(),
tags$table(style="border: 1px solid black",
tags$thead(
tags$tr(style="border: 1px solid black;",
tags$td("", style="border: 1px solid black;"),
tags$td(HTML(paste(strong(toupper(input$ticker)), " (", input$chooser, ")", sep="")), style="padding: 8px"))
),
tags$tbody(style="border: 1px solid black",
tags$tr(style="border: 1px solid black;",
tags$th("Mean", style="border: 1px solid black; padding: 8px"),
tags$td(paste(round(mean(df$daily_change), 4), "%", sep=""), style="padding:8px"), autowidth = TRUE),
tags$tr(style="border: 1px solid black",
tags$th("Median", style="border: 1px solid black; padding: 8px"),
tags$td(paste(round(median(df$daily_change), 4), "%", sep=""), style="padding:8px"), autowidth = TRUE),
tags$tr(style="border: 1px solid black",
tags$th("Std dev", style="border: 1px solid black; padding: 8px"),
tags$td(paste(round(sd(df$daily_change), 4), "%", sep=""), style="padding: 8px"), autowidth = TRUE)
) # end body
), # end table
tags$br(),
strong(h3(paste("Fama French", "(",toupper(input$ticker), input$chooser), ")")),
tags$table(
tags$thead(
tags$tr(style="border: 1px solid black;",
tags$td("", style="border: 1px solid black;"),
#tags$td(HTML(paste(strong(toupper(input$ticker)), " (", input$chooser, ")", sep="")), style="padding: 8px"),
tags$td("Beta", style="border: 1px solid black; padding: 8px;"),
tags$td("p-value", style="border: 1px solid black; padding: 8px;"),
tags$td("R-Squared", style="border: 1px solid black; padding: 8px;"),
tags$td("Adj R-Squared", style="border: 1px solid black; padding: 8px;"))
),
tags$tbody(style="border: 1px solid black",
tags$tr(style="border: 1px solid black;",
tags$th("Mkt-RF Beta", style="border: 1px solid black; padding: 8px"),
tags$td(round(market_beta, 4), style="border: 1px solid black; padding:8px"),
tags$td(round(summary(mb_fit)$coefficients[2, 4], 4), style="border: 1px solid black; padding:8px"),
tags$td(round(summary(mb_fit)$r.squared, 4), style="border: 1px solid black; padding:8px"),
tags$td(round(summary(mb_fit)$adj.r.squared, 4), style="border: 1px solid black; padding:8px"),
autowidth = TRUE), # end row
tags$tr(style="border: 1px solid black",
tags$th("SMB Beta", style="border: 1px solid black; padding: 8px"),
tags$td(round(smb_beta, 4), style="border: 1px solid black; padding:8px"),
tags$td(round(summary(smb_fit)$coefficients[2, 4], 4), style="border: 1px solid black; padding:8px"),
tags$td(round(summary(smb_fit)$r.squared, 4), style="border: 1px solid black; padding:8px"),
tags$td(round(summary(smb_fit)$adj.r.squared, 4), style="border: 1px solid black; padding:8px"),
autowidth = TRUE), # end row
tags$tr(style="border: 1px solid black;",
tags$th("HML Beta", style="border: 1px solid black; padding: 8px"),
tags$td(round(hml_beta, 4), style="border: 1px solid black; padding:8px"),
tags$td(round(summary(hml_fit)$coefficients[2, 4], 4), style="border: 1px solid black; padding:8px"),
tags$td(round(summary(hml_fit)$r.squared, 4), style="border: 1px solid black; padding:8px"),
tags$td(round(summary(hml_fit)$adj.r.squared, 4), style="border: 1px solid black; padding:8px"),
autowidth = TRUE) # end row
) # end body
)
) # end div
}) # end renderUI
} #observeEvent
) # observeEvent
}
shinyApp(ui = ui, server = server)
|
f8b0cd8467b290a24aac8319f0c434f061a255ad
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/stringfish/man/sf_collapse.Rd
|
4f0ccc894f8334340a12c7fccacb6d4dd8d8adf6
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 736
|
rd
|
sf_collapse.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zz_help_files.R
\name{sf_collapse}
\alias{sf_collapse}
\title{sf_collapse}
\usage{
sf_collapse(x, collapse)
}
\arguments{
\item{x}{A character vector}
\item{collapse}{A single string}
}
\value{
A single string with all values in `x` pasted together, separated by `collapse`.
}
\description{
Pastes a series of strings together separated by the `collapse` parameter
}
\details{
This works the same way as `paste0(x, collapse=collapse)`
}
\examples{
x <- c("hello", "\\\\xe4\\\\xb8\\\\x96\\\\xe7\\\\x95\\\\x8c")
Encoding(x) <- "UTF-8"
sf_collapse(x, " ") # "hello world" in Japanese
sf_collapse(letters, "") # returns the alphabet
}
\seealso{
paste0, paste
}
|
fb98a0faa34fac74ae0197a9ecec904546653cf5
|
dc11c41d4d7eaceb81b269a0c57ceba7d2a2d674
|
/StaticalTesting_assignment3.R
|
77e91364e64b713c97bde99b71a8f516787dfa2a
|
[] |
no_license
|
Munish0123/spotify_data_analysis
|
cd1b04cab636d17db213a00da7da4bcd96d37890
|
c35f1383b7fe08a8f901c4466b17dd4ee8e11712
|
refs/heads/main
| 2023-04-03T07:53:35.439476
| 2021-04-10T12:52:25
| 2021-04-10T12:52:25
| 356,309,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,804
|
r
|
StaticalTesting_assignment3.R
|
tidyD <- read.csv(file= "/home/Documents/spotify/TidyData_assignment2.csv")
#hypothesis :- Rating and Liked(+ve Feedback) of products must be interrelated
table(tidyD$Rating)
table(tidyD$Liked)
table(tidyD$Rating,tidyD$Liked)
#checking statistical independance between rating and liked(+ve feedback) using chi-squared test
#as we have categorical data
summary(table(tidyD$Rating,tidyD$Liked)) #less p-value indicates there is no realtion
#between rating and liking of any product, not compulsurily highly rated product got more likes
#but practically speaking -> highly rated got more likes compared to others
mean(tidyD$Rating) #true mean
sd(tidyD$Rating) #true SD
#NORMALIZING DATA
scale(tidyD$Rating)
str(tidyD)
x <- rnorm(100, mean = 4, sd=1)
x
t.test(x, mu= 4, conf.level = 0.99) # p-value>0.05 indicating acceptance of null hypothesis
#confidence level for median
wilcox.test(x, conf.int = T) #rejecting null hypothesis
#ACC. to hypothesis let mean=4 with confidence of 80%
t.test(tidyD$Rating, mu= 4, conf.level = 0.80)#smaller p value indicated rejection of hypothesis
#indicating mean is <4 or >4
#confidence level for median
wilcox.test(tidyD$Rating, conf.int = T) #rejecting null hypothesis
#Testing for NORMALITY
shapiro.test(x) #normally distributed
#checking for correlation between Rating and Id of product
#as our data is normally distributed we will use "PEARSON METHOD"
cor(tidyD$Rating, tidyD$ID)
cor.test(tidyD$Rating, tidyD$ID)# significant correlation doesn't exists
#that means a product with same ID must have got different ratings by different customers
#of different age group
#even the correlation between Customer Age and Product Id doesn't exist indicating
#Different choice for one particular age group
cor(tidyD$Age, tidyD$ID)
cor.test(tidyD$Age, tidyD$ID)
|
da4225f4252b36287f49ea7537f9332d585a9f28
|
9a430b05c1e8cd124be0d0323b796d5527bc605c
|
/wsim.io/R/logging.R
|
5bdc762ba6ffa7703c761238199b0148093aab4e
|
[
"Apache-2.0"
] |
permissive
|
isciences/wsim
|
20bd8c83c588624f5ebd8f61ee5d9d8b5c1261e6
|
a690138d84872dcd853d2248aebe5c05987487c2
|
refs/heads/master
| 2023-08-22T15:56:46.936967
| 2023-06-07T16:35:16
| 2023-06-07T16:35:16
| 135,628,518
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,566
|
r
|
logging.R
|
# Copyright (c) 2018 ISciences, LLC.
# All rights reserved.
#
# WSIM is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Strip trailing newline characters from text
#' @param text text to strip
strip <- function(text) {
gsub('[\r\n]*$', '', text)
}
#' Generate a log message at the \code{ERROR} level
#'
#' @param ... Any number of string-convertible objects
#'
#' @export
error <- function(...) {
futile.logger::flog.error(strip(paste(lapply(list(...), toString), collapse=' ')))
}
#' Format and generate a log message at the \code{ERROR} level
#'
#' @param msg A message to log. May be a format string.
#' @param ... Arguments to fill placeholders in format string
#'
#' @export
errorf <- function(msg, ...) {
futile.logger::flog.error(msg, ...)
}
#' Generate a log message at the \code{FATAL} level
#'
#' @param ... Any number of string-convertible objects
#'
#' @export
fatal <- function(...) {
futile.logger::flog.fatal(strip(paste(lapply(list(...), toString), collapse=' ')))
}
#' Format and generate a log message at the \code{FATAL} level
#'
#' @param msg A message to log. May be a format string.
#' @param ... Arguments to fill placeholders in format string
#'
#' @export
fatalf <- function(msg, ...) {
futile.logger::flog.fatal(msg, ...)
}
#' Generate a log message at the \code{INFO} level
#'
#' @param ... Any number of string-convertible objects
#'
#' @export
info <- function(...) {
futile.logger::flog.info(strip(paste(lapply(list(...), toString), collapse=' ')))
}
#' Format and generate a log message at the \code{INFO} level
#'
#' @param msg A message to log. May be a format string.
#' @param ... Arguments to fill placeholders in format string
#'
#' @export
infof <- function(msg, ...) {
futile.logger::flog.info(msg, ...)
}
#' Generate a log message at the \code{WARN} level
#'
#' @param ... Any number of string-convertible objects
#'
#' @export
warn <- function(...) {
futile.logger::flog.warn(strip(paste(lapply(list(...), toString), collapse=' ')))
}
#' Format and generate a log message at the \code{WARN} level
#'
#' @param msg A message to log. May be a format string.
#' @param ... Arguments to fill placeholders in format string
#'
#' @export
warnf <- function(msg, ...) {
futile.logger::flog.warn(msg, ...)
}
#' Initialize logging functionality
#'
#' Initializes logging at the level set in the environment
#' variable \code{WSIM_LOGGING}. Calling this function be
#' the first thing any WSIM program does.
#'
#' @param tool_name Name of the program, to be used in
#' formatting log messages
#'
#' @export
logging_init <- function(tool_name) {
level <- tolower(Sys.getenv('WSIM_LOGGING', 'INFO'))
flevel <- switch(level,
error= futile.logger::ERROR,
warn= futile.logger::WARN,
fatal= futile.logger::FATAL,
info= futile.logger::INFO,
futile.logger::DEBUG
)
ok<-futile.logger::flog.threshold(flevel)
ok<-futile.logger::flog.layout(futile.logger::layout.format(paste0(tool_name, ' [~l]: ~t ~m')))
}
|
e96b45b4380316193a446ec31db6658309bc3481
|
d0589a18766ddf5e3a10daf28b05a91bcf727959
|
/scripts/relatedness_plots.R
|
d996c01e9068042d2cd0aeb3eff24a957065f866
|
[] |
no_license
|
devonorourke/wnspopgen
|
3b2676ef63e764434dc8f071f82e50144c1bc33e
|
0e3663b44bda4505bfc3000b7b5ced57df776579
|
refs/heads/master
| 2021-02-03T23:17:51.429013
| 2021-01-21T16:47:32
| 2021-01-21T16:47:32
| 243,570,308
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,397
|
r
|
relatedness_plots.R
|
## Script used to generate plots of kinship and relatedness using PLINK outputs
## kinship from 'plink --make-king-table'
## relatedness from 'plink --pca'
## written 12 Jan 2021 by Devon O'Rourke
library(tidyverse)
library(ggpubr)
library(scico)
library(ggrepel)
library(scales)
###############################################################################
########## 1) kinship analysis
###############################################################################
## data import
kinship_df <- read_delim(file="https://github.com/devonorourke/wnspopgen/raw/master/data/PLINK/LUking.kin0.gz",
delim="\t", comment = "#",
col_names=c("FID1", "ID1", "FID2", "ID2", "NSNP", "HETHET", "IBS0", "KINSHIP"))
kinship_names <- unique(c(unique(kinship_df$ID1), unique(kinship_df$ID2)))
sample_metadata <- read_csv(file="https://raw.githubusercontent.com/devonorourke/wnspopgen/master/data/metadata/resolved_sample_metadata.csv") %>%
filter(libPrepAlias %in% kinship_names)
## look at distribution of kinship values, in 0.01 (1%) increments
kinship_df %>%
mutate(KINSHIP=round(KINSHIP,2)) %>%
ggplot(aes(KINSHIP)) +
geom_histogram(fill="gray50", alpha=0.5, color="black") +
labs(x="kinship value", y="number of sample pairs") +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14))
## identify those with kinship...
## > 0.354 (indicates duplicate or MZ twin)
## between 0.177-0.354 (1st-degree)
## between 0.0884-0.177 (2nd degree)... these would absolutely be expected
## between 0.0442-0.0884 (3rd degree)... these are likely the majority of our scores
kinship_d0 <- kinship_df %>% filter(KINSHIP > 0.354)
## just one pair: tcap084 and res28... both postWNS group
## tcap084: KG17MA798, Pepperell MA, female Juvenile, MADFW16997, recaptured 8/3/17
## res28: KG17MA452, Pepperell MA, female Adult, MADFW15797, recaptured 6/13/17
## likely a mother/daughter (can't be same animal because both captured in same year, the earlier one being the adult, and the later in year being the juvenile!)
kinship_d1 <- kinship_df %>% filter(KINSHIP <= 0.354 ) %>% filter(KINSHIP > 0.177)
nrow(kinship_d1) / nrow(kinship_df) * 100 ## so 0.05% of all data are first degree pairs
## 9 pairs that are also likely siblings:
## tcap189 & tcap176 (HailesCave, Pepperell - original MA capture in 2016)
## tcap190 & tcap141 (HailesCave, Princeton MA - original MA capture in 2013)
## sus26 & sus23 (both Stockbridge, VT)
## tcap015 & tcap189 (Chester, Hailes cave) **** pre/post mix
## tcap072 & tcap009 (Aeolus, Chester)
## tcap073 & tcap159 (Aeolus, Pepperell) *** pre/post mix
## tcap075 & tcap183 (Aeolus, Hailes)
## tcap075 & tcap056 (Aeolus, Aeolus)
kinship_d2 <- kinship_df %>% filter(KINSHIP <= 0.117 ) %>% filter(KINSHIP > 0.0884)
nrow(kinship_d2) / nrow(kinship_df) * 100 ## so ~ 1% of all pairwise comps are cousins
kinship_d3 <- kinship_df %>% filter(KINSHIP <= 0.0884 ) %>% filter(KINSHIP > 0.0442)
nrow(kinship_d3) / nrow(kinship_df) * 100 ## so ~ 9% of all data are second cousins
kinship_d4toNonNeg <- kinship_df %>% filter(KINSHIP <= 0.0442 ) %>% filter(KINSHIP >= 0)
nrow(kinship_d4toNonNeg) / nrow(kinship_df) * 100 ## so ~ 87% of all data are third or less
### what about negative values? any extreme negatives indicating significant structure?
kinship_neg <- kinship_df %>% filter(KINSHIP < 0)
kinship_neg %>% mutate(KINSHIP = round(KINSHIP, 2)) %>% pull(KINSHIP) %>% table()
### nearly all negative values are close to zero, and are likely driven by population structure inherent in the data
### make scatterplot of kinship and proportion of zeroIBS states
ggplot(kinship_df,
aes(x=IBS0, y=KINSHIP)) +
geom_point(alpha=0.5) +
labs(x="proportion of zero IBS", y="estimated kinship coefficient") +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14))
ggsave(filename = "~/github/wnspopgen/figures_tables/king_kinship.png", height=15, width=15, units="cm", dpi=150)
ggsave(filename = "~/github/wnspopgen/figures_tables/king_kinship.pdf", height=15, width=15, units="cm", dpi=300)
###############################################################################
########## 2) population stratification
###############################################################################
## visualizing loadings from 'plink --pca'
## import eigenvals per PC
eigval_df <- read_tsv(col_names = 'eigenval',
file="https://github.com/devonorourke/wnspopgen/raw/master/data/PLINK/LUpca.eigenval")
eigval_df <- eigval_df %>%
mutate(PC = as.numeric(row.names(.)))
## make screepot of eigenvals
p_scree <- ggplot(eigval_df, aes(x=PC, y=eigenval/100)) +
geom_line() +
scale_x_continuous(breaks = seq(1,10)) +
scale_y_continuous(breaks = c(0, 0.02, 0.04), limits = c(0, 0.04)) +
labs(y="fraction variance explained") +
theme_bw() +
theme(axis.text=element_text(size=12),
axis.title=element_text(size=14))
p_scree
ggsave(file="~/github/wnspopgen/figures_tables/plink_pca_screeplot.png", height=10, width=10, units="cm", dpi=150)
ggsave(file="~/github/wnspopgen/figures_tables/plink_pca_screeplot.pdf", height=10, width=10, units="cm", dpi=300)
## import loadings
loadings_df <- read_tsv(file="https://github.com/devonorourke/wnspopgen/raw/master/data/PLINK/LUpca.eigenvec")
colnames(loadings_df)[1] <- "FID"
## add metadata as needed:
sample_metadata <- read_csv(file="https://raw.githubusercontent.com/devonorourke/wnspopgen/master/data/metadata/resolved_sample_metadata.csv") %>%
filter(libPrepAlias %in% loadings_df$IID) %>%
select(libPrepAlias, WNSgroup, Location) %>%
mutate(WNSgroup = ifelse(WNSgroup=="pre", "SUS", "RES"))
loadings_df <- merge(loadings_df, sample_metadata, by.x='IID', by.y='libPrepAlias')
## plot three figures: PC1:2, PC1:3, PC2:3
p_pc12 <- ggplot() +
geom_point(data=loadings_df, aes(x=PC1, y=PC2, color=Location, shape=WNSgroup)) +
theme_bw() +
coord_fixed() +
geom_label_repel(data=loadings_df %>% filter(PC2 > 0.6), aes(x=PC1, y=PC2, label=IID), size=3)
p_pc13 <- ggplot() +
geom_point(data=loadings_df, aes(x=PC1, y=PC3, color=Location, shape=WNSgroup)) +
theme_bw() +
coord_fixed() +
geom_label_repel(data=loadings_df %>% filter(PC3 > 0.4), aes(x=PC1, y=PC3, label=IID), size=3) +
geom_label_repel(data=loadings_df %>% filter(PC3 < -0.1), aes(x=PC1, y=PC3, label=IID), size=3) +
geom_label_repel(data=loadings_df %>% filter(PC1 > 0.25), aes(x=PC1, y=PC3, label=IID), size=3)
p_pc23 <- ggplot() +
geom_point(data=loadings_df, aes(x=PC2, y=PC3, color=Location, shape=WNSgroup)) +
theme_bw() +
coord_fixed() +
geom_label_repel(data=loadings_df %>% filter(PC3 > 0.4), aes(x=PC2, y=PC3, label=IID), size=3) +
geom_label_repel(data=loadings_df %>% filter(PC3 < -0.1), aes(x=PC2, y=PC3, label=IID), size=3) +
geom_label_repel(data=loadings_df %>% filter(PC2 > 0.4), aes(x=PC2, y=PC3, label=IID), size=3)
## stitch together:
p_allPC <- ggarrange(p_pc12, p_pc13, p_pc23, nrow=1, ncol=3,
common.legend = TRUE, labels = c("A", "B", "C", "D"))
p_allPC
ggsave("~/github/wnspopgen/figures_tables/plink_pca_threeOrdinations_wAliases.png",
height=17, width=25, units="cm", dpi=150)
ggsave("~/github/wnspopgen/figures_tables/plink_pca_threeOrdinations_wAliases.pdf",
height=17, width=25, units="cm", dpi=300)
### also replot these so that we don't have any label names:
## plot three figures: PC1:2, PC1:3, PC2:3
p_pc12_nl <- ggplot() +
geom_point(data=loadings_df, aes(x=PC1, y=PC2, color=Location, shape=WNSgroup), size=2.5) +
theme_bw() +
coord_fixed()
p_pc13_nl <- ggplot() +
geom_point(data=loadings_df, aes(x=PC1, y=PC3, color=Location, shape=WNSgroup), size=2.5) +
theme_bw() +
coord_fixed()
p_pc23_nl <- ggplot() +
geom_point(data=loadings_df, aes(x=PC2, y=PC3, color=Location, shape=WNSgroup), size=2.5) +
theme_bw() +
coord_fixed()
ggarrange(p_pc12_nl, p_pc13_nl, p_pc23_nl, nrow=1, ncol=3,
common.legend = TRUE, labels = c("A", "B", "C", "D"))
ggsave("~/github/wnspopgen/figures_tables/plink_pca_threeOrdinations_noLabels.png",
height=17, width=25, units="cm", dpi=150)
ggsave("~/github/wnspopgen/figures_tables/plink_pca_threeOrdinations_noLabels.pdf",
height=17, width=25, units="cm", dpi=300)
## will edit to pdf to align x axes in all 3 panels along bottom and move legend into space created in top right
###############################################################################
########## 3) Fst sliding windows
###############################################################################
## generated from 'vcftools --fst' command of RES/SUS populations
fstwindows_df <- read_delim(file="https://github.com/devonorourke/wnspopgen/raw/master/data/vcftools/RES_SUS_fst_100kbWindow_25kbStep.windowed.weir.fst.gz",
delim="\t", col_names=TRUE)
## calculate the median weighted FST per chromosome, ...
### rather than sticking with just a single global value when plotting the horizontal line
redlineMedianFst_df <- fstwindows_df %>% group_by(CHROM) %>% summarise(medianFst = median(WEIGHTED_FST))
## merge together into single dataframe for plotting
fstwindows_df <- merge(fstwindows_df, redlineMedianFst_df)
## order the plot facets properly
scaffOrder <- paste0("scaffold", (c(seq(1,3), seq(5,22))))
fstwindows_df$CHROM <- factor(fstwindows_df$CHROM,
levels = scaffOrder)
## plot
ggplot() +
geom_point(data = fstwindows_df,
aes(x=(BIN_START/1000000), y=MEAN_FST),# scale x axis in millins of bp
alpha=0.5) +
geom_hline(data = fstwindows_df,
aes(yintercept = medianFst),
color="red", alpha=0.5) +
labs(x="position (Mb)", y=expression(F[ST])) +
facet_wrap(~CHROM, ncol=3, scales = "free_x") +
theme_bw()
ggsave("~/github/wnspopgen/figures_tables/fst_100kbWindow_25kbStep.png",
height = 28, width = 22, units = "cm", dpi=150)
ggsave("~/github/wnspopgen/figures_tables/fst_100kbWindow_25kbStep.pdf",
height = 28, width = 22, units = "cm", dpi=300)
### unused:
# if so desired, could also grab the topN windows with outliers as follows:
# top10fst_perChrom <- fstwindows_df %>%
# group_by(CHROM) %>%
# slice_max(order_by = WEIGHTED_FST, n = 10)
|
bd45d19cf72d58c60bdc83d4498f76f9c3e68a41
|
e20ae46b59c09099a3b49e1ea018dfc855c11541
|
/dev/dev_ancestry.R
|
2150c4cbe3ec2d39e42276433960d7b3056ca683
|
[] |
no_license
|
Daenecompass/eliter
|
a081eb7f89ab34e9f2e34c3937e791c60a776873
|
884885a2e747e6cbb9560d4b11851ddc4e7c640b
|
refs/heads/master
| 2023-03-17T18:19:04.883599
| 2020-11-16T21:25:50
| 2020-11-16T21:25:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 247
|
r
|
dev_ancestry.R
|
# Ancestry ----
# An ancestry graph is a graph which only contains directed edges between CEOs and the chairmen who elected them.
library(eliter)
library(readr)
den.db <- read_delim(file = "~/Dropbox/GNA/Til DST/den_cvr.csv", delim = ",")
|
6be42bc566941a0095f36e076186e6bfd03fc06f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NightDay/examples/plot.NightDay.Rd.R
|
e2ad1cb24f9a8503a7fffae6c816e43bced9d86f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 203
|
r
|
plot.NightDay.Rd.R
|
library(NightDay)
### Name: plot.NighDay
### Title: Night and Day Boundary Plot Funtion
### Aliases: plot.NightDay
### ** Examples
Time <- Sys.time()
timezone <- 1
plot(NightDay(Time, timezone))
|
4a795a1959d6d5ac101108815518c97abeb53d20
|
9468392003ae0b1c050b3717ea004427c364c4f3
|
/plot-results.R
|
0a3dca9c60874dc5003c9107d2fd3537a1e74034
|
[
"Apache-2.0"
] |
permissive
|
belugadb/druid-benchmark
|
9193b8878a436a4e658f850b3f36ff106cef538e
|
9e64579663bb3b398c93691a07c2590ed773da6f
|
refs/heads/master
| 2021-06-20T17:30:58.304249
| 2017-07-13T20:27:57
| 2017-07-13T20:27:57
| 97,158,776
| 0
| 0
| null | 2017-07-13T20:27:58
| 2017-07-13T19:40:07
|
R
|
UTF-8
|
R
| false
| false
| 3,948
|
r
|
plot-results.R
|
library(plyr)
library(ggplot2)
library(reshape2)
benchmarks = list(
`druid` = "druid-m3-2xlarge.tsv",
`mysql` = "mysql-m3-2xlarge-ssd-myisam.tsv",
`druid-100-x1` = "100gb-druid-m3-2xlarge-1x.tsv",
`mysql-100` = "100gb-mysql-m3-2xlarge-ssd-myisam.tsv",
`druid-100-x6` = "100gb-druid-m3-2xlarge-6x.tsv"
)
results <- NULL
for(x in names(benchmarks)) {
filename <- file.path("results", benchmarks[[x]])
if(file.exists(filename)) {
r <- read.table(filename)
names(r) <- c("query", "time")
r$engine <- x
results <- rbind(results, r)
}
}
results$engine <- factor(results$engine, levels=c("druid", "mysql", "druid-100-x1", "mysql-100", "druid-100-x6"))
results$datasize <- "1GB"
results$datasize[grep("100", results$engine)] <- "100GB"
rowcounts <- NULL
for(datasource in c("tpch_lineitem_small", "tpch_lineitem")) {
r <- read.table(file.path("results", paste(datasource, "-rowcounts.tsv", sep="")))
names(r) <- c("query", "rows")
if(grepl("small", datasource)) r$datasize <- "1GB"
else r$datasize <- "100GB"
rowcounts <- rbind(rowcounts, r)
}
results <- join(results, rowcounts, by=c("query", "datasize"))
results_summary <- ddply(results, .(engine, query, datasize), summarise, time = median(time), rps=median(rows/time), count=length(query))
results_summary$type <- "aggregation"
results_summary$type[grep("top", results_summary$query)] <- "top-n"
baseline <- subset(results_summary, engine == c("druid-100-x1"), select=c("query", "time"))
baseline <- rename(baseline, c("time" = "baseline"))
results_summary <- join(results_summary, baseline, by=c("query"))
# table-1gb
dcast(subset(results_summary, datasize == "1GB", select=c("engine", "query", "time")), query ~ engine)
# table-100gb
dcast(subset(results_summary, datasize == "100GB", select=c("engine", "query", "time")), query ~ engine)
# druid-benchmark-1gb-median
ggplot(subset(results_summary, datasize == "1GB"),
aes(x=query, y=time, fill=engine)) +
geom_bar(position="dodge", stat="identity") +
scale_fill_discrete(breaks=c("druid", "mysql"), labels=c("Druid", "MySQL")) +
ylab("Time (seconds)") +
xlab("Query") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ggtitle(label="Median query time (100 runs) — 1GB data — single node")
# druid-benchmark-100gb-median
ggplot(subset(results_summary, datasize == "100GB" & engine != "druid-100-x6"),
aes(x=query, y=time, fill=engine, order=engine)) +
geom_bar(position="dodge", stat="identity") +
scale_fill_discrete(breaks=c("druid-100-x1", "mysql-100"), labels=c("Druid", "MySQL")) +
ylab("Time (seconds)") +
xlab("Query") +
facet_wrap(~ type, scales="free") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ggtitle(label="Median Query Time (3+ runs) — 100GB data — single node")
# druid-benchmark-scaling
ggplot(subset(results_summary, datasize == "100GB" & !(engine %in% c("mysql", "mysql-100"))),
aes(x=query, y=time, fill=engine, order=engine)) +
geom_bar(position="dodge", stat="identity") +
scale_fill_discrete(breaks=c("druid-100-x1", "druid-100-x6"), labels=c("8 (1 node)", "48 (6 nodes)")) +
ylab("Time (seconds)") +
xlab("Query") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ggtitle(label="Druid Scaling — 100GB") +
guides(fill=guide_legend(title="Cores"))
# druid-benchmark-scaling-factor
ggplot(subset(results_summary, datasize == "100GB" & !(engine %in% c("mysql", "mysql-100"))),
aes(x=query, y=baseline/time, fill=engine, order=engine)) +
geom_bar(position="dodge", stat="identity") +
scale_fill_discrete(breaks=c("druid-100-x1", "druid-100-x6"), labels=c("8 (1 node)", "48 (6 nodes)")) +
scale_y_continuous(breaks=c(1:7)) +
ylab("Speedup Factor") +
xlab("Query") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ggtitle(label="Druid Scaling — 100GB") +
guides(fill=guide_legend(title="Cores"))
|
ba49b7918ccc0e00a3ccf5c8ba13d9ed5cf98677
|
ff6198c86808f03b83d0476750f2ae79de2d9c85
|
/abcstats/man/dnn_predict_slow.Rd
|
cab396ebd0e3e4d2df82ed7412713c6f7086ec11
|
[] |
no_license
|
snarles/abc
|
5b2c727fd308591be2d08461add2ae4e35c7a645
|
fefa42cf178fd40adca88966c187d0cd41d36dcb
|
refs/heads/master
| 2020-12-24T17:35:48.864649
| 2015-07-23T06:07:14
| 2015-07-23T06:07:14
| 39,470,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 453
|
rd
|
dnn_predict_slow.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/slow.R
\name{dnn_predict_slow}
\alias{dnn_predict_slow}
\title{Predict given DNN and tanh transfer function}
\usage{
dnn_predict_slow(Ws, bs, x)
}
\arguments{
\item{Ws}{Weight matrices of DNN}
\item{bs}{Bias terms of DNN}
\item{x}{New points, dimension n x p}
}
\value{
An n x ? matrix of predictions
}
\description{
Predict given DNN and tanh transfer function
}
|
78336c1802d827b853581c598fa92ede10c38389
|
beb91d0e06e5b260011ea5c55da32ab21bece500
|
/R/assoc.R
|
936496684d0354c0cf5655a45da943d5c0d2a64d
|
[] |
no_license
|
cran/vcd
|
7169e004f662d4d33305a3b7d1246bba7058b924
|
86cb80436f2a1d4733905710f52c5f630a348cef
|
refs/heads/master
| 2023-02-02T18:55:54.519633
| 2023-02-01T12:22:08
| 2023-02-01T12:22:08
| 17,700,741
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,900
|
r
|
assoc.R
|
#################################################################333
## assocplot
assoc <- function(x, ...)
UseMethod("assoc")
assoc.formula <-
function(formula, data = NULL, ..., subset = NULL, na.action = NULL,
main = NULL, sub = NULL)
{
if (is.logical(main) && main)
main <- deparse(substitute(data))
else if (is.logical(sub) && sub)
sub <- deparse(substitute(data))
assoc.default(structable(formula, data, subset = subset,
na.action = na.action),
main = main, sub = sub, ...)
}
assoc.default <- function(x,
row_vars = NULL, col_vars = NULL,
compress = TRUE, xlim = NULL, ylim = NULL,
spacing = spacing_conditional(sp = 0),
spacing_args = list(),
split_vertical = NULL,
keep_aspect_ratio = FALSE,
xscale = 0.9, yspace = unit(0.5, "lines"),
main = NULL,
sub = NULL,
...,
residuals_type = "Pearson",
gp_axis = gpar(lty = 3)
) {
if (is.logical(main) && main)
main <- deparse(substitute(x))
else if (is.logical(sub) && sub)
sub <- deparse(substitute(x))
if (!inherits(x, "ftable"))
x <- structable(x)
tab <- as.table(x)
dl <- length(dim(tab))
## spacing
cond <- rep(TRUE, dl)
cond[length(attr(x, "row.vars")) + c(0, length(attr(x, "col.vars")))] <- FALSE
if (inherits(spacing, "grapcon_generator"))
spacing <- do.call("spacing", spacing_args)
spacing <- spacing(dim(tab), condvars = which(cond))
## splitting arguments
if (is.null(split_vertical))
split_vertical <- attr(x, "split_vertical")
if(match.arg(tolower(residuals_type), "pearson") != "pearson")
warning("Only Pearson residuals can be visualized with association plots.")
strucplot(tab,
spacing = spacing,
split_vertical = split_vertical,
core = struc_assoc(compress = compress, xlim = xlim, ylim = ylim,
yspace = yspace, xscale = xscale, gp_axis = gp_axis),
keep_aspect_ratio = keep_aspect_ratio,
residuals_type = "Pearson",
main = main,
sub = sub,
...)
}
## old code: more elegant conceptually, but less performant
##
## struc_assoc2 <- function(compress = TRUE, xlim = NULL, ylim = NULL,
## yspace = unit(0.5, "lines"), xscale = 0.9,
## gp_axis = gpar(lty = 3))
## function(residuals, observed = NULL, expected, spacing, gp, split_vertical, prefix = "") {
## dn <- dimnames(expected)
## dnn <- names(dn)
## dx <- dim(expected)
## dl <- length(dx)
## ## axis limits
## resid <- structable(residuals, split_vertical = split_vertical)
## sexpected <- structable(sqrt(expected), split_vertical = split_vertical)
## rfunc <- function(x) c(min(x, 0), max(x, 0))
## if (is.null(ylim))
## ylim <- if (compress)
## matrix(apply(as.matrix(resid), 1, rfunc), nrow = 2)
## else
## rfunc(as.matrix(resid))
## if (!is.matrix(ylim))
## ylim <- matrix(as.matrix(ylim), nrow = 2, ncol = nrow(as.matrix(resid)))
## attr(ylim, "split_vertical") <- rep(TRUE, sum(!split_vertical))
## attr(ylim, "dnames") <- dn[!split_vertical]
## class(ylim) <- "structable"
## if(is.null(xlim))
## xlim <- if (compress)
## matrix(c(-0.5, 0.5) %o% apply(as.matrix(sexpected), 2, max), nrow = 2)
## else
## c(-0.5, 0.5) * max(sexpected)
## if (!is.matrix(xlim))
## xlim <- matrix(as.matrix(xlim), nrow = 2, ncol = ncol(as.matrix(resid)))
## attr(xlim, "split_vertical") <- rep(TRUE, sum(split_vertical))
## attr(xlim, "dnames") <- dn[split_vertical]
## class(xlim) <- "structable"
## ## split workhorse
## split <- function(res, sexp, i, name, row, col) {
## v <- split_vertical[i]
## splitbase <- if (v) sexp else res
## splittab <- lapply(seq(dx[i]), function(j) splitbase[[j]])
## len <- sapply(splittab, function(x) sum(unclass(x)[1,] - unclass(x)[2,]))
## d <- dx[i]
## ## compute total cols/rows and build split layout
## dist <- unit.c(unit(len, "null"), spacing[[i]] + (1 * !v) * yspace)
## idx <- matrix(1:(2 * d), nrow = 2, byrow = TRUE)[-2 * d]
## layout <- if (v)
## grid.layout(ncol = 2 * d - 1, widths = dist[idx])
## else
## grid.layout(nrow = 2 * d - 1, heights = dist[idx])
## vproot <- viewport(layout.pos.col = col, layout.pos.row = row,
## layout = layout, name = remove_trailing_comma(name))
## ## next level: either create further splits, or final viewports
## name <- paste(name, dnn[i], "=", dn[[i]], ",", sep = "")
## rows <- cols <- rep.int(1, d)
## if (v) cols <- 2 * 1:d - 1 else rows <- 2 * 1:d - 1
## f <- if (i < dl) {
## if (v)
## function(m) split(res, splittab[[m]], i + 1, name[m], rows[m], cols[m])
## else
## function(m) split(splittab[[m]], sexp, i + 1, name[m], rows[m], cols[m])
## } else {
## if (v)
## function(m) viewport(layout.pos.col = cols[m], layout.pos.row = rows[m],
## name = remove_trailing_comma(name[m]),
## yscale = unclass(res)[,1],
## xscale = unclass(sexp)[,m], default.units = "null")
## else
## function(m) viewport(layout.pos.col = cols[m], layout.pos.row = rows[m],
## name = remove_trailing_comma(name[m]),
## yscale = unclass(res)[,m],
## xscale = unclass(sexp)[,1], default.units = "null")
## }
## vpleaves <- structure(lapply(1:d, f), class = c("vpList", "viewport"))
## vpTree(vproot, vpleaves)
## }
## ## start spltting on top, creates viewport-tree
## pushViewport(split(ylim, xlim, i = 1, name = paste(prefix, "cell:", sep = ""),
## row = 1, col = 1))
## ## draw tiles
## mnames <- paste(apply(expand.grid(dn), 1,
## function(i) paste(dnn, i, collapse = ",", sep = "=")
## )
## )
## for (i in seq_along(mnames)) {
## seekViewport(paste(prefix, "cell:", mnames[i], sep = ""))
## grid.lines(y = unit(0, "native"), gp = gp_axis)
## grid.rect(y = 0, x = 0,
## height = residuals[i],
## width = xscale * unit(sqrt(expected[i]), "native"),
## default.units = "native",
## gp = structure(lapply(gp, function(x) x[i]), class = "gpar"),
## just = c("center", "bottom"),
## name = paste(prefix, "rect:", mnames[i], sep = "")
## )
## }
## }
## class(struc_assoc2) <- "grapcon_generator"
struc_assoc <- function(compress = TRUE, xlim = NULL, ylim = NULL,
yspace = unit(0.5, "lines"), xscale = 0.9,
gp_axis = gpar(lty = 3))
function(residuals, observed = NULL, expected, spacing,
gp, split_vertical, prefix = "") {
if(is.null(expected)) stop("Need expected values.")
dn <- dimnames(expected)
dnn <- names(dn)
dx <- dim(expected)
dl <- length(dx)
## axis limits
resid <- structable(residuals, split_vertical = split_vertical)
sexpected <- structable(sqrt(expected), split_vertical = split_vertical)
rfunc <- function(x) c(min(x, 0), max(x, 0))
if (is.null(ylim))
ylim <- if (compress)
matrix(apply(as.matrix(resid), 1, rfunc), nrow = 2)
else
rfunc(as.matrix(resid))
if (!is.matrix(ylim))
ylim <- matrix(as.matrix(ylim), nrow = 2, ncol = nrow(as.matrix(resid)))
ylim[2,] <- ylim[2,] + .Machine$double.eps
attr(ylim, "split_vertical") <- rep(TRUE, sum(!split_vertical))
attr(ylim, "dnames") <- dn[!split_vertical]
class(ylim) <- "structable"
if(is.null(xlim))
xlim <- if (compress)
matrix(c(-0.5, 0.5) %o% apply(as.matrix(sexpected), 2, max), nrow = 2)
else
c(-0.5, 0.5) * max(sexpected)
if (!is.matrix(xlim))
xlim <- matrix(as.matrix(xlim), nrow = 2, ncol = ncol(as.matrix(resid)))
attr(xlim, "split_vertical") <- rep(TRUE, sum(split_vertical))
attr(xlim, "dnames") <- dn[split_vertical]
class(xlim) <- "structable"
## split workhorse
split <- function(res, sexp, i, name, row, col, index) {
v <- split_vertical[i]
d <- dx[i]
splitbase <- if (v) sexp else res
splittab <- lapply(seq(d), function(j) splitbase[[j]])
len <- abs(sapply(splittab, function(x) sum(unclass(x)[1,] - unclass(x)[2,])))
## compute total cols/rows and build split layout
dist <- if (d > 1)
unit.c(unit(len, "null"), spacing[[i]] + (1 * !v) * yspace)
else
unit(len, "null")
idx <- matrix(1:(2 * d), nrow = 2, byrow = TRUE)[-2 * d]
layout <- if (v)
grid.layout(ncol = 2 * d - 1, widths = dist[idx])
else
grid.layout(nrow = 2 * d - 1, heights = dist[idx])
pushViewport(viewport(layout.pos.col = col, layout.pos.row = row,
layout = layout, name = paste(prefix, "cell:",
remove_trailing_comma(name), sep = "")))
## next level: either create further splits, or final viewports
rows <- cols <- rep.int(1, d)
if (v) cols <- 2 * 1:d - 1 else rows <- 2 * 1:d - 1
for (m in 1:d) {
nametmp <- paste(name, dnn[i], "=", dn[[i]][m], ",", sep = "")
if (i < dl) {
if (v) sexp <- splittab[[m]] else res <- splittab[[m]]
split(res, sexp, i + 1, nametmp, rows[m], cols[m], cbind(index, m))
} else {
pushViewport(viewport(layout.pos.col = cols[m], layout.pos.row = rows[m],
name = paste(prefix, "cell:",
remove_trailing_comma(nametmp), sep = ""),
yscale = unclass(res)[,if (v) 1 else m],
xscale = unclass(sexp)[,if (v) m else 1],
default.units = "npc")
)
## draw tiles
grid.lines(y = unit(0, "native"), gp = gp_axis)
grid.rect(y = 0, x = 0,
height = residuals[cbind(index, m)],
width = xscale * unit(sqrt(expected[cbind(index, m)]), "native"),
default.units = "native",
gp = structure(lapply(gp, function(x) x[cbind(index,m)]),
class = "gpar"),
just = c("center", "bottom"),
name = paste(prefix, "rect:", remove_trailing_comma(nametmp), sep = "")
)
}
upViewport(1)
}
}
split(ylim, xlim, i = 1, name = "", row = 1, col = 1, index = cbind())
}
class(struc_assoc) <- "grapcon_generator"
|
71ff53a012716ccf2d8d04f4d03fe13326905893
|
bd25f293d9bf61bd605946ea348e72c6f0cf6600
|
/R/jh1511_1.R
|
c2071a51f1c60e6b462cca84c52556eb568f7110
|
[] |
no_license
|
toyofuku/StatisticalLearning
|
afbe5a453980bd485b75ba02df2a9a85f467d962
|
d1204d99ad3c1019c284fca3b1e6dcdc0ab865dd
|
refs/heads/master
| 2020-03-28T15:49:05.402057
| 2018-09-18T08:21:35
| 2018-09-18T08:21:35
| 148,628,929
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,063
|
r
|
jh1511_1.R
|
### Variational Bayes of Normal Mixture ############################
rm(list=ls())
##close all
#############################################################################
K0 <- 3 ### True clusters
STDTRUE <- 0.3 ### True Standard deviation of each clusters
K <- 3 ### Components of learning clusters
STD <- 0.3 ### 0.1 0.2 0.3 0.4 0.5 ### Standard deviation in learning machine
########################################################################
NNN <- 100 ### Number of samples
KURIKAESHI <- 100 ### Number of recursive process
PRIORSIG <- 0.01 ### 1/PRIORSIG = Variance of Prior
PHI0 <- 0.5 ### Hyperparameter of mixture ratio : 3/2 Kazuho's critical point
####################### True mixture ratios ###############################
KP1 <- 0.2
KP2 <- 0.3
KP3 <- 1-KP1-KP2
############################### make samples ###########################
truecase <- 1
if(truecase==1){
X0 <- matrix(c(0, 0, 1, 0, 1, 1),nrow=2,byrow=T)
}
if(truecase==2){
X0 <- matrix(c(0, 0.0, 0.5, 0, 0.5, 0.0),nrow=2,byrow=T)
}
if(truecase==3){
X0 <- matrix(c(0.5, 0.5, 0.5, 0.5, 0.5, 0.5),nrow=2,byrow=T)
}
YP <- runif(NNN)
Y0 <- matrix(0,1,NNN)
for(i in 1:NNN){
if(YP[i]>KP1+KP2){
Y0[i] <- 3
} else {
if(YP[i]>=KP1)
Y0[i] <- 2
else
Y0[i] <- 1
}
}
XX <- STDTRUE * matrix(rnorm(2*NNN),2,NNN)
for(i in 1:NNN){
XX[,i] <- XX[,i]+X0[,Y0[i]]
}
######################## make data end ##############
#####################################################
# digamma <- function(x){return((log(x) - 0.5/x - 1/(12*x*x)))}
###################################################
########## Initialize VB
PHI <- NNN/K*matrix(1,1,K)
ETA0 <- NNN/K*matrix(1,1,K)
ETA1 <- NNN/K*(mean(XX[1,])+0.1*rnorm(K))
ETA2 <- NNN/K*(mean(XX[2,])+0.1*rnorm(K))
YYY <- matrix(0,K,NNN)
MR <- matrix(0,1,K)
########## Recursive VB Start
for(kuri in 1:KURIKAESHI){
for(i in 1:NNN){
DD1 <- ETA1/ETA0-XX[1,i]
DD2 <- ETA2/ETA0-XX[2,i]
DDD <- digamma(PHI)-digamma(NNN+3*PHI0)-1/ETA0-(DD1*DD1+DD2*DD2)/(2*STD*STD)
YYY[,i] <- exp(DDD-max(DDD))/sum(exp(DDD-max(DDD)))
}
for(k in 1:K){
PHI[k] <- PHI0+sum(YYY[k,])
ETA0[k] <- PRIORSIG+sum(YYY[k,])
ETA1[k] <- sum(YYY[k,] * XX[1,])
ETA2[k] <- sum(YYY[k,] * XX[2,])
}
}
#################Free Energy
library(logOfGamma)
FF1 <- -sum(gammaln(PHI))
FF2 <- sum(log(ETA0)-(ETA1*ETA1+ETA2*ETA2)/(2*STD*STD*ETA0))
FF3 <- sum((XX[1,]*XX[1,]+XX[2,]*XX[2,])/(2*STD*STD)+log(STD*STD))
SSS <- -sum(sum(YYY*log(YYY)))
FreeEnergy <- FF1+FF2+FF3+SSS
#################
cat(sprintf('Free Energy=%.2f, Mixture Ratio=(',FreeEnergy))
Y01 <- rep(0,K)
Y02 <- rep(0,K)
for(j in 1:K){
MR[j] <- ETA0[j]/(NNN+PRIORSIG*K)
Y01[j] <- ETA1[j]/ETA0[j]
Y02[j] <- ETA2[j]/ETA0[j]
cat(sprintf('%.2f ',MR[j]))
}
cat(')\n')
#######################################################
probgauss <- function(x,y,a,b,VA2){
outer(x,y,
function(x,y){
return(exp(-((x-a)^2+(y-b)^2)/VA2)/sqrt(2*pi*VA2))
})
}
####################### plot samples ##################################
par(mfrow=c(2,2))
plot(XX[1,],XX[2,],col='blue',pch=1,main='Samples',xlim=c(-1,2),ylim=c(-1,2))
#################### plot true and estimated #############
plot(X0[1,],X0[2,],col='red',pch=0,main='True:Red Squares, Estimated:Brue +', xlim=c(-1,2), ylim=c(-1,2))
points(Y01,Y02,col='blue',pch=3, xlim=c(-1,2), ylim=c(-1,2))
########################################
va2 <- 2*STDTRUE*STDTRUE
x1 <- seq(-1,2,0.1)
y1 <- seq(-1,2,0.1)
zzz <- KP1*probgauss(x1,y1,X0[1,1],X0[2,1],va2) +
KP2*probgauss(x1,y1,X0[1,2],X0[2,2],va2) +
KP3*probgauss(x1,y1,X0[1,3],X0[2,3],va2)
persp(x1,y1,zzz)
title('True Probability Density Function')
###############################################################
va2 <- 2*STD*STD
zzz <- 0*x1
for(kk in 1:K){
zzz <- zzz + MR[kk]*probgauss(x1,y1,Y01[kk],Y02[kk],va2)
}
persp(x1,y1,zzz)
title('Estimated Probability Density Function')
###############################################################
|
af7fa055115e9b882b9b98b94a52b5f85656de34
|
4da1047966dc6e20ef0c3d967ffde604b0d7061a
|
/cachematrix.R
|
fee59d20d3bee8c4edebb1fa2da95fcc61f2c4ff
|
[] |
no_license
|
djwf/ProgrammingAssignment2
|
0650265ed8d57b2710dead49a0fbf2298be86da1
|
12cee591c3ecc6cf9ee3401fe32bb0bcde679be8
|
refs/heads/master
| 2021-01-15T00:56:45.118170
| 2015-12-28T02:51:48
| 2015-12-28T02:51:48
| 48,467,062
| 0
| 0
| null | 2015-12-23T03:32:29
| 2015-12-23T03:32:28
| null |
UTF-8
|
R
| false
| false
| 859
|
r
|
cachematrix.R
|
## Create method of storing matrices and calculating their inversions that only
## calculates the inversion for a given matrix once.
## Cached matrix data type (functions to store/retrieve matrix/inversion).
makeCacheMatrix <- function(x = matrix()) {
m = NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Calculate inversion of matrix stored in cached matrix data type if needed.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x' (calculate if necessary).
i <- x$getinverse()
if(!is.null(i)) {
message('getting cached data')
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
af33eaedf958b93b8cbca92997f2b2754332f3c3
|
492e3f5140509da0b6a150d7225eee33ed173c2b
|
/ref_sqlvis_raster.R
|
a7b1117283f4c335418c16246be52208a546d0b2
|
[] |
no_license
|
iamchuan/NYC-TaxiVis
|
82c389791080b0cd7debc3eeceff5ca0e139b227
|
efa087695268bc3036b92cf0c2b12628510fffff
|
refs/heads/master
| 2021-06-20T04:55:53.592592
| 2017-07-27T04:52:32
| 2017-07-27T04:52:32
| 84,584,327
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,998
|
r
|
ref_sqlvis_raster.R
|
### Big data tile plot
# data <- tbl(sc, "trips_model_data")
# x_field <- "pickup_longitude"
# y_field <- "pickup_latitude"
# resolution <- 50
sqlvis_compute_raster <- function(data, x_field, y_field, resolution = 300){
data_prep <- data %>%
select_(x = x_field, y = y_field) %>%
filter(!is.na(x), !is.na(y))
s <- data_prep %>%
summarise(max_x = max(x),
max_y = max(y),
min_x = min(x),
min_y = min(y)) %>%
mutate(rng_x = max_x - min_x,
rng_y = max_y - min_y,
resolution = resolution) %>%
collect()
counts <- data_prep %>%
mutate(res_x = round((x - s$min_x) / s$rng_x * resolution, 0),
res_y = round((y - s$min_y) / s$rng_y * resolution, 0)) %>%
count(res_x, res_y) %>%
collect
list(counts = counts,
limits = s,
vnames = c(x_field, y_field)
)
}
sqlvis_ggplot_raster <- function(data, ...) {
d <- data$counts
s <- data$limits
v <- data$vnames
xx <- setNames(seq(1, s$resolution, len = 6), round(seq(s$min_x, s$max_x, len = 6),2))
yy <- setNames(seq(1, s$resolution, len = 6), round(seq(s$min_y, s$max_y, len = 6),2))
ggplot(d, aes(res_x, res_y)) +
geom_raster(aes(fill = n)) +
coord_fixed() +
scale_fill_distiller(palette = "Spectral", trans = "log", name = "Frequency") +
scale_x_continuous(breaks = xx, labels = names(xx)) +
scale_y_continuous(breaks = yy, labels = names(yy)) +
labs(x = v[1], y = v[2], ...)
}
### Facets
sqlvis_compute_raster_g <- function(data, x_field, y_field, g_field, resolution = 300){
data_prep <- data %>%
mutate_(group = g_field) %>%
select_(g = "group", x = x_field, y = y_field) %>%
filter(!is.na(x), !is.na(y))
s <- data_prep %>%
summarise(max_x = max(x),
max_y = max(y),
min_x = min(x),
min_y = min(y)) %>%
mutate(rng_x = max_x - min_x,
rng_y = max_y - min_y,
resolution = resolution) %>%
collect()
counts <- data_prep %>%
mutate(res_x = round((x-s$min_x)/s$rng_x*resolution, 0),
res_y = round((y-s$min_y)/s$rng_y*resolution, 0)) %>%
count(g, res_x, res_y) %>%
collect
list(counts = counts,
limits = s,
vnames = c(x_field, y_field)
)
}
sqlvis_ggplot_raster_g <- function(data, ncol = 4, ...) {
s <- data$limits
d <- data$counts
v <- data$vnames
xx <- setNames(seq(1, s$resolution, len = 3), round(seq(s$min_x, s$max_x, len = 3), 1))
yy <- setNames(seq(1, s$resolution, len = 3), round(seq(s$min_y, s$max_y, len = 3), 1))
ggplot(d, aes(res_x, res_y)) +
geom_raster(aes(fill = n)) +
coord_fixed() +
facet_wrap(~ g, ncol = ncol) +
scale_fill_distiller(palette = "Spectral", trans = "log", name = "Frequency") +
scale_x_continuous(breaks = xx, labels = names(xx)) +
scale_y_continuous(breaks = yy, labels = names(yy)) +
labs(x = v[1], y = v[2], ...)
}
|
b53b1043ea4722d77414c9ddaa8dc4f91eed24da
|
683adeb90ce6051572ff073e5d0937f0743630d0
|
/R_create_Visuals/dep_by_model.R
|
d9e86e4c0980cb99b69ac91a114c0f242dbe03eb
|
[] |
no_license
|
lin1234227/EDA-Luxury-Sedan-Residual-Values
|
c007b848a97bb83f4ebf551b3e5c8267ac6e6b24
|
1dbca8486184ecc297b257b305d4e00ae5ae06ec
|
refs/heads/master
| 2020-04-23T19:06:31.408324
| 2017-08-06T16:41:33
| 2017-08-06T16:41:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
dep_by_model.R
|
cars <- read.csv("~/Document/data_science/Visual/R_create_Visuals/dataset/dep_by_model.csv", header=TRUE, stringsAsFactors=FALSE, row.names = 1)
cars['year'] = 2017-cars['year']
png(filename="~/Document/data_science/Visual/R_create_Visuals/png/dep_by_model.png")
cars1<-subset(cars, model_level==1)
cars2<-subset(cars, model_level==2)
cars3<-subset(cars, model_level==3)
cars4<-subset(cars, model_level==0)
#cars5<-subset(cars, capa_range==5)
plot(x=0, y=0, type="l", cex.main=1.5, xlim=c(0, 20), ylim=c(0.1, 1), main="Model Segment", ylab="Residual Ratio", xlab="Years", las=1, lwd=2, bty="n", cex.lab=1.5, cex.axis=1.5)
lines(x=cars1$year, y=cars1$dep_rate, col="blue", type='l', lwd=2.5)
lines(x=cars2$year, y=cars2$dep_rate, col="red", type='l', lwd=2.5)
lines(x=cars3$year, y=cars3$dep_rate, col="black", type='l', lwd=2.5)
#lines(x=cars4$year, y=cars4$dep_rate, col="yellow", type='l', lwd=2.5)
legend(8.5, 1, inset=.05, c("D-segment","E-segment","F-segment"), bty='n', fill=c('blue','red', 'black'), cex=1.5, horiz=FALSE)
dev.off()
|
959f47fd208497d7c88c8c30d4d43c6a21265e9a
|
ced24eb85a914e223dbee72e3da09a2ec946bee1
|
/plot2.R
|
279f64151f44797b45aef96ba3e0a6b6a2de5064
|
[] |
no_license
|
Megan-Gee/ExData_Plotting1
|
f29998b8663c8ad3c583a43d67877cf5f8a0d9d0
|
2a417bdb307fc3d291c6f53b7446595e5f2dbcb6
|
refs/heads/master
| 2022-11-14T00:35:20.604303
| 2020-07-07T22:40:02
| 2020-07-07T22:40:02
| 277,936,122
| 0
| 0
| null | 2020-07-07T22:36:41
| 2020-07-07T22:36:40
| null |
UTF-8
|
R
| false
| false
| 692
|
r
|
plot2.R
|
## Read data file into R and subset to only two dates of information
full_data <- read.table("household_power_consumption.txt", sep = ";",
header = TRUE, na.strings = "?")
full_data$DT <- paste(full_data$Date, full_data$Time)
full_data$DT <- strptime(full_data$DT, format = "%d/%m/%Y %H:%M:%S")
full_data$Date <- as.Date(full_data$Date,format = "%d/%m/%Y")
data <- subset(full_data, Date == "2007-02-01" | Date == "2007-02-02")
## Create plot 2 line graph in png format
png(filename = "plot2.png", width = 480, height = 480)
plot(data$DT, data$Global_active_power, type = "l", col = "black", xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
8027310b4ab43ac56b5fbd1b5716201e6f04b8e1
|
04a98a7e184fd449985628ac7b8a92f19c1785a4
|
/man/crs-package.Rd
|
b62283f3f970057df374940f7382ab4bb0657617
|
[] |
no_license
|
JeffreyRacine/R-Package-crs
|
3548a0002f136e9e7c1d5c808f6a3867b20b417e
|
6112a3914e65f60a45c8bcfc4076e9b7ea1f8e7a
|
refs/heads/master
| 2023-01-09T18:23:59.615927
| 2023-01-03T16:20:22
| 2023-01-03T16:20:22
| 1,941,853
| 12
| 6
| null | 2023-01-03T16:20:23
| 2011-06-23T14:11:06
|
C++
|
UTF-8
|
R
| false
| false
| 3,677
|
rd
|
crs-package.Rd
|
\name{crs-package}
\alias{crs-package}
\docType{package}
\title{Nonparametric Regression Splines with Continuous and Categorical Predictors}
\description{
This package provides a method for nonparametric regression that
combines the (global) approximation power of regression splines for
continuous predictors (\sQuote{\code{x}}) with the (local) power of
kernel methods for categorical predictors (\sQuote{\code{z}}). The
user also has the option of instead using indicator bases for the
categorical predictors. When the predictors contain both continuous
and categorical (discrete) data types, both approaches offer more
efficient estimation than the traditional sample-splitting
(i.e. \sQuote{frequency}) approach where the data is first broken into
subsets governed by the categorical \code{z}.
To cite the \pkg{crs} package type: \sQuote{\code{citation("crs")}}
(without the single quotes).
For a listing of all routines in the \pkg{crs} package type:
\sQuote{\code{library(help="crs")}}.
For a listing of all demos in the \pkg{crs} package type:
\sQuote{\code{demo(package="crs")}}.
For a \sQuote{\code{\link{vignette}}} that presents an overview of the
\pkg{crs} package type: \sQuote{\code{vignette("crs")}}.
}
\details{
For the continuous predictors the regression spline model employs the
B-spline basis matrix using the B-spline routines in the GNU
Scientific Library (\url{https://www.gnu.org/software/gsl/}).
The \code{\link{tensor.prod.model.matrix}} function is used to
construct multivariate tensor spline bases when \code{basis="tensor"}
and uses additive B-splines otherwise (i.e. when
\code{basis="additive"}).
For the discrete predictors the product kernel function is of the
\sQuote{Li-Racine} type (see Li and Racine (2007) for details) which is
formed by constructing products of one of the following univariate
kernels:
\describe{
\item{(\eqn{z} is discrete/nominal)}{
\eqn{l(z_i,z,\lambda) = 1 }{l(z[i],z,lambda) =
1} if \eqn{z_i=z}{z[i] = z}, and
\eqn{\lambda}{lambda} if \eqn{z_i \neq z}{z[i] != z}. Note that
\eqn{\lambda}{lambda} must lie between \eqn{0} and \eqn{1}.
}
\item{(\eqn{z} is discrete/ordinal)}{
\eqn{l(z_i,z,\lambda) = 1}{l(z[i],z,lambda) = 1} if
\eqn{|z_i-z|=0}{|z[i] - z| = 0}, and
\eqn{\lambda^{|z_i-z|}}{lambda^|z_i-z|} if \eqn{|z_i -
z|\ge1}{|z[i] - z|>=1}. Note that \eqn{\lambda}{lambda} must lie
between \eqn{0} and \eqn{1}.
}
}
Alternatively, for the ordinal/nominal predictors the regression
spline model will use indicator basis functions.
}
\author{
Jeffrey S. Racine \email{racinej@mcmaster.ca} and Zhenghua Nie \email{niez@mcmaster.ca}
Maintainer: Jeffrey S. Racine \email{racinej@mcmaster.ca}
I would like to gratefully acknowledge support from the Natural
Sciences and Engineering Research Council of Canada
(\url{https://www.nserc-crsng.gc.ca}), the Social Sciences and Humanities
Research Council of Canada (\url{https://www.sshrc-crsh.gc.ca}), and the Shared
Hierarchical Academic Research Computing Network
(\url{https://www.sharcnet.ca}).
}
\references{
Li, Q. and J.S. Racine (2007), \emph{Nonparametric Econometrics:
Theory and Practice,} Princeton University Press.
Ma, S. and J.S. Racine and L. Yang (2015), \dQuote{Spline
Regression in the Presence of Categorical Predictors,} Journal of
Applied Econometrics, Volume 30, 705-717.
Ma, S. and J.S. Racine (2013), \dQuote{Additive Regression
Splines with Irrelevant Categorical and Continuous Regressors,}
Statistica Sinica, Volume 23, 515-541.
}
\keyword{package}
|
f56a633949cade63db4a4d111a0f041e2f6d4a2a
|
2b13c58d7b92b9299216cf3373d2aa074af21fd5
|
/Unit 3 - Assignment/Predicting the Baseball World Series Champion (OPTIONAL) _ Assignment 3 _ 15.R
|
8d9dec9bd78f4bda12895ad5d5dd06c0f008f350
|
[] |
no_license
|
florintoth/The-Analytics-Edge
|
7408f520c675465d562220d776ac969ed9ccb696
|
3402da7f95517d4e6d231894e2a40052bcef034a
|
refs/heads/master
| 2020-05-18T14:14:32.730173
| 2015-05-18T10:02:45
| 2015-05-18T10:02:45
| 40,883,090
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,397
|
r
|
Predicting the Baseball World Series Champion (OPTIONAL) _ Assignment 3 _ 15.R
|
# PREDICTING THE BASEBALL WORLD SERIES CHAMPION (OPTIONAL)
# Problem 1
baseball = read.csv("baseball.csv")
str(baseball)
length(table(baseball$Year))
baseball = subset(baseball, Playoffs == 1)
nrow(baseball)
table(table(baseball$Year))
# Problem 2
PlayoffTable = table(baseball$Year)
PlayoffTable
names(PlayoffTable)
PlayoffTable[c("1990", "2001")]
baseball$NumCompetitors = PlayoffTable[as.character(baseball$Year)]
table(baseball$NumCompetitors)
# Problem 3
baseball$WorldSeries = as.numeric(baseball$RankPlayoffs == 1)
table(baseball$WorldSeries)
summary(glm(WorldSeries~Year, data=baseball, family="binomial"))
# Problem 4
LogModel = glm(WorldSeries ~ Year + RA + RankSeason + NumCompetitors, data=baseball, family=binomial)
summary(LogModel)
cor(baseball[c("Year", "RA", "RankSeason", "NumCompetitors")])
Model1 = glm(WorldSeries ~ Year + RA, data=baseball, family=binomial)
Model2 = glm(WorldSeries ~ Year + RankSeason, data=baseball, family=binomial)
Model3 = glm(WorldSeries ~ Year + NumCompetitors, data=baseball, family=binomial)
Model4 = glm(WorldSeries ~ RA + RankSeason, data=baseball, family=binomial)
Model5 = glm(WorldSeries ~ RA + NumCompetitors, data=baseball, family=binomial)
Model6 = glm(WorldSeries ~ RankSeason + NumCompetitors, data=baseball, family=binomial)
summary(Model1)
summary(Model2)
summary(Model3)
summary(Model4)
summary(Model5)
summary(Model6)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.