blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57b2fdbfc88844b0d3cae64688c1e6c84d66489b
|
34ac695a2de980764e2e95b3e6157dd436318872
|
/inst/application/ui/ui_dist_home.R
|
9485981315f1269286552ec8214bf72496ab43d5
|
[
"MIT"
] |
permissive
|
rsquaredacademy/nse2r
|
f560eb6e347c4705de21fb298570827d4900a02b
|
15a41b20c7919649f151b0e6a241519e11352272
|
refs/heads/master
| 2023-08-30T17:02:03.385691
| 2022-11-10T15:01:16
| 2022-11-10T15:01:16
| 219,264,577
| 26
| 9
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,584
|
r
|
ui_dist_home.R
|
tabPanel('Market Data', value = 'tab_dist_home',
fluidPage(theme = shinytheme('cerulean'),
includeCSS("mystyle.css"),
fluidRow(
column(12),
br(),
column(12, align = 'center',
h5('Fetch Data from NSE')
),
br(),
br(),
br(),
column(3),
column(4, align = 'left',
h5('Pre Open Session Data')
),
column(2, align = 'left',
actionButton(
inputId = 'button_dist_home_1',
label = 'Click Here',
width = '120px'
)
),
column(3),
br(),
br(),
br(),
column(3),
column(4, align = 'left',
h5('Advances & Declines')
),
column(2, align = 'left',
actionButton(
inputId = 'button_dist_home_2',
label = 'Click Here',
width = '120px'
)
),
column(3),
br(),
br(),
br(),
column(3),
column(4, align = 'left',
h5('Indices')
),
column(2, align = 'left',
actionButton(
inputId = 'button_dist_home_3',
label = 'Click Here',
width = '120px'
)
),
column(3),
br(),
br(),
br(),
column(3),
column(4, align = 'left',
h5('Stock')
),
column(2, align = 'left',
actionButton(
inputId = 'button_dist_home_4',
label = 'Click Here',
width = '120px'
)
),
column(3),
br(),
br(),
br(),
column(3),
column(4, align = 'left',
h5('Futures & Options')
),
column(2, align = 'left',
actionButton(
inputId = 'button_dist_home_5',
label = 'Click Here',
width = '120px'
)
),
column(3)
)
)
)
|
c42949d490b3b501c944672283d4810df1a1f9a2
|
2c2e81584ab81a45ebc1ce5b42e5ad3988fe889f
|
/plot1.R
|
9dbd0fdf0ef8dc80277ad1f357641f42cbcbba21
|
[] |
no_license
|
sumeshv/ExData_Plotting1
|
f2730b6b973c15eec04f91d416c0ee6c7649808a
|
5909bde60a86bec603f027e66a1278af3360cd97
|
refs/heads/master
| 2021-01-16T22:00:37.864972
| 2015-03-07T16:50:10
| 2015-03-07T16:50:10
| 30,421,525
| 0
| 0
| null | 2015-02-06T16:29:18
| 2015-02-06T16:29:17
| null |
UTF-8
|
R
| false
| false
| 990
|
r
|
plot1.R
|
#
# The plot1 function creates a png file based on the hist() function.
# The file is created in the work directory by name plot1.png. The function
# takes an argument which specifes the directory where the data file is kept.
# The file name is assumed to be household_power_consumption.txt.
# usage eg: plot1("D:/R/data/exdata-data-household_power_consumption") if
# the txt file is kept under D:/R/data/exdata-data-household_power_consumption
#
plot1 <-function(dir){
# Including a common R file which has got the logic to load the data and this
# will be re-used to draw all graphs in this prject
source("data_plot.R")
# this function is available in data_plot.R
ds <- get_data(dir)
png(filename = "plot1.png", width = 480, height = 480, units = "px", bg = "white")
histinfo<-hist(ds$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power")
dev.off()
print("plot 1 done")
#clearing all the variables
rm(list=ls())
}
|
343a24e503b4092480c6a5354918ef4c29c1ab44
|
a7d4b236f9b33180d6325c4f235dfde5d21229c5
|
/start_code.R
|
1ba2d833323a148e738f04a9f241ee9061dafc4e
|
[] |
no_license
|
UrszulaCzerwinska/AppliedPredictiveModeling
|
9d4bbd9c6a0a7467cc1e8e779e78c48542609821
|
48cefb1086c2a4bd5608c3c7d772de689a5a2779
|
refs/heads/master
| 2020-05-29T17:25:49.826878
| 2017-03-13T09:31:15
| 2017-03-13T09:31:15
| 82,602,747
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 140
|
r
|
start_code.R
|
install.packages("AppliedPredictiveModeling")
library(AppliedPredictiveModeling)
help(package=AppliedPredictiveModeling)
scriptLocation()
|
aa1a47f63bc9d3fffbca75d46ba0f7f55c66dc87
|
633cc5b6a3d3b29a95f41268cf9db7f751ceee0f
|
/quiz1.R
|
5321a88cf59be20ba0f6de503d44e668406a4d5a
|
[] |
no_license
|
FarhadHHridoy/datasciencecoursera
|
de222b78696727285734d09102c1cb411da90118
|
6617194af4fc6f2435ac000aad4f1c7974abe25f
|
refs/heads/master
| 2022-09-29T06:13:29.098867
| 2020-05-30T03:15:13
| 2020-05-30T03:15:13
| 266,480,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 804
|
r
|
quiz1.R
|
x<-read.csv("hw1_data.csv")
head(x)
#What is the mean of the Ozone column in this dataset? Exclude missing values (coded as NA) from this calculation.
a<-x
d<-a["Ozone"]
s<-is.na(d)
NROW(d[s])#How many missing values are in the Ozone column of this data frame?
mean(d[!s])
#Extract the subset of rows of the data frame where Ozone values are above 31 and Temp values are above 90. What is the mean of Solar.R in this subset?
c<-x
subsetofc<-c["Ozone"]>31 & c["Temp"]>90
g<-c["Solar.R"]
h<-g[subsetofc]
mean(h[!is.na(h)])
#What is the mean of "Temp" when "Month" is equal to 6?
head(c)
mon<-c["Month"]==6
tem<-c["Temp"]
mean(tem[mon])
#What was the maximum ozone value in the month of May (i.e. Month is equal to 5)?
head(c)
mon<-c["Month"]==5
ozon<-c["Ozone"]
j<-ozon[mon]
k<-is.na(j)
max(j[!k])
|
8168a0f4624d994909712a575f59ace4f5c212f7
|
7d0a2b8df9f3def8c1e4b57cf8b7a9fe4581c660
|
/man/dm_searchM2_logit.Rd
|
46887765fa17a143e5ca235bdc8de01b4e485c1f
|
[
"MIT"
] |
permissive
|
xiaobo199405/dualmarker
|
64e1152d71eaca26d5bc4918c20ccca52dcd636c
|
5bda3c156b890ff106b777593eade8625ccd6b08
|
refs/heads/master
| 2023-02-01T13:35:17.798302
| 2020-12-18T13:26:45
| 2020-12-18T13:26:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,780
|
rd
|
dm_searchM2_logit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dm_searchM2.R
\name{dm_searchM2_logit}
\alias{dm_searchM2_logit}
\title{search marker2 using logistic regression}
\usage{
dm_searchM2_logit(
data,
response,
response.pos,
response.neg = NULL,
marker1,
m2.candidates,
covariates = NULL,
m1.binarize = F,
m2.binarize = F,
m1.num.cut = "median",
m1.cat.pos = NULL,
m1.cat.neg = NULL,
m2.num.cut = "median",
m2.cat.pos = NULL,
m2.cat.neg = NULL,
auc = T,
p.adjust.method = "BH"
)
}
\arguments{
\item{data}{data frame}
\item{response}{response variable}
\item{response.pos}{positive value(s) for response variable}
\item{response.neg}{negative value(s) for response variable}
\item{marker1}{marker1}
\item{m2.candidates}{candidates marker2}
\item{covariates}{confounding factor}
\item{m1.binarize}{binarize marker1, default FALSE}
\item{m2.binarize}{binarize marker2, default FALSE}
\item{m1.num.cut}{cut method/values for numeric marker1
if m1.binarize is TRUE and marker1 is numeric}
\item{m1.cat.pos}{positive value for categorical marker1
if m1.binarize is TRUE and marker1 is categorical}
\item{m1.cat.neg}{negative value for categorical marker1
if m1.binarize is TRUE and marker1 is categorical}
\item{m2.num.cut}{cut method/values for numeric marker2
if m1.binarize is TRUE and marker2 is numeric}
\item{m2.cat.pos}{positive value for categorical marker2
if m1.binarize is TRUE and marker2 is categorical}
\item{m2.cat.neg}{negative value for categorical marker2
if m1.binarize is TRUE and marker2 is categorical}
\item{auc}{report AUC, default FALSE}
\item{p.adjust.method}{see also p.adjust.methods}
}
\description{
search marker2 to combine with marker1
}
\seealso{
\code{\link[stats]{p.adjust}}
}
|
072073515f274c8e5a7a9dd75f275f0a9e72bdf3
|
ea178de2d1926451fd0549b3d7c95c17804f8230
|
/cs573/code/bootstrap.R
|
a3767ab24bbac76aa436eaaf34fd3db2bb412624
|
[] |
no_license
|
jdavis/college-spring-2014
|
fe40aebef73b832edf49d426d1938673a170112d
|
20d8234b160e0640aadb299dd30ffe3db85fcac8
|
refs/heads/master
| 2016-09-05T23:10:44.832167
| 2014-05-11T03:22:35
| 2014-05-11T03:22:35
| 15,485,826
| 8
| 13
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,921
|
r
|
bootstrap.R
|
# Bootstrapping the mean
theta <- 1
x <- runif(100,0,theta)
max.boot <- replicate(1e5,mean(sample(x,replace=TRUE)))
sd(max.boot)
# Compare with theoretical formula
sd(x)/sqrt(length(x))
# distribution of the mean
hist((max.boot-mean(x))/(sd(max.boot)),breaks="Scott",freq=FALSE,ylim=c(0,0.4))
xs <- seq(-4,4,length.out=200)
lines(xs,dnorm(xs,0,1),col="red",lwd=2)
##################
# Example 1 #
##################
lawstat <- read.table("lawstat.dat")
s <- c(4,6,13,15,31,35,36,45,47,50,52,53,70,79,82)
# Given data of 15 observations
d <- lawstat[s,]
cor(d$LSAT,d$GPA)
# Calculate std? Use bootstrap
B<-3200
cor.boot <- rep(0, B)
for (i in 1:B){
ind <- round(15*runif(15,0,1))
cor.boot[i] <- cor(d[ind,])[2]
}
sd(cor.boot)
# We have the full data = population
B<-3200
corr <- rep(0, B)
for (i in 1:B){
ind <- round(82*runif(15,0,1))
corr[i] <- cor(lawstat[ind,])[2]
}
sd(corr)
# Display histograms
layout(matrix(c(1,2),2,1))
hist(cor.boot)
hist(corr)
layout(matrix(c(1),1,1))
boxplot(cor.boot,corr)
# plot bootstrap ecdf and true one
plot(ecdf(cor.boot),verticals=TRUE,do.points=FALSE)
lines(ecdf(corr),verticals=TRUE,do.points=FALSE,col="red")
# textbook formula for sd(corr) only valid if the population is bivariate normal
(1-cor(d)[2]^2)/sqrt(15-3)
##############################
# Example 2: Regression #
##############################
library(boot)
library(ISLR)
data(Auto)
boot.fn <- function (data,index){
return (coef(lm(mpg~horsepower ,data=data ,subset =index)))
}
# use bootstrap
boot(Auto ,boot.fn ,1000)
# Compare
summary (lm(mpg~horsepower ,data=Auto))$coef
# second order model
boot.fn<-function (data ,index ){
coefficients(lm(mpg~horsepower +I(horsepower^2),data=data,subset =index))
}
boot(Auto ,boot.fn ,1000)
# compare
summary (lm(mpg~horsepower+I(horsepower^2) ,data=Auto))$coef
|
392036cacb44a27e7238d877a5d1a682a5f95803
|
31325bac0088dbc4725f01794ff72413d5856c64
|
/R/simulate_synlik.R
|
7082d1e529957b6b08976c1429bce6da4ee943d0
|
[] |
no_license
|
mfasiolo/synlik_dev
|
6b2bae4f357cbe028050917f5a264e1698fcfe76
|
2508506cffe09e752c6fa221e6431997f739168e
|
refs/heads/master
| 2021-03-27T10:56:50.668629
| 2018-05-30T10:37:00
| 2018-05-30T10:37:00
| 17,673,353
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,921
|
r
|
simulate_synlik.R
|
.simulate.synlik <- function(object,
nsim,
seed = NULL,
param = object@param,
stats = FALSE,
clean = TRUE,
multicore = !is.null(cluster),
cluster = NULL,
ncores = detectCores() - 1,
verbose = TRUE,
...)
{
if(!is(object, "synlik")) stop("object has to be of class \"synlik\" ")
# Reduce the object to "synlik" so that I avoid moving around all the additional slots of the "synMaxlik" class
if( !class(object)[[1]] != "synlik" ) object <- as(object, "synlik")
if(is.null(seed) == FALSE) set.seed(seed)
# I copy these function so I can mtrace() them
simulator <- object@simulator
summaries <- object@summaries
extraArgs <- object@extraArgs
if( multicore ){
# Force evaluation of everything in the environment, so it will available to singleChain on cluster
.forceEval(ALL = TRUE)
tmp <- .clusterSetUp(cluster = cluster, ncores = ncores, libraries = "synlik", exportALL = TRUE)
cluster <- tmp$cluster
ncores <- tmp$ncores
clusterCreated <- tmp$clusterCreated
registerDoSNOW(cluster)
}
# Divide simulations between nodes
coresSchedule <- if(multicore) c( rep(floor(nsim / ncores), ncores - 1), floor(nsim / ncores) + nsim %% ncores) else nsim
# Launch simulations
withCallingHandlers({
tmp <- alply(.data = coresSchedule,
.margins = 1,
.fun = function(input, ...){
# Simulate data
simul <- simulator(param = param, nsim = input, extraArgs = extraArgs, ...)
# Transform into summary statistics
if( stats == TRUE ) {
if(!is.null(summaries) ) simul <- summaries(x = simul, extraArgs = extraArgs, ...)
}
return( simul )
},
.parallel = multicore,
...
)
}, warning = function(w) {
# There is a bug in plyr concerning a useless warning about "..."
if (length(grep("... may be used in an incorrect context", conditionMessage(w))))
invokeRestart("muffleWarning")
})
# Close the cluster if it was opened inside this function
if(multicore && clusterCreated) stopCluster(cluster)
# We can't call rbind if we are simulating row data, as we don't know it's form (matrix, list, ect)
if( length(coresSchedule) == 1 )
{
simul <- tmp[[1]]
} else {
if( stats ) simul <- do.call("rbind", tmp)
}
# Cleaning the stats from NANs
if( clean ) simul <- .clean(X = simul, verbose = verbose)$cleanX
return( simul )
}
##########
#' Simulate data or statistics from an object of class \code{synlik}.
#'
#' @param object An object of class \code{synlik}.
#' @param nsim Number of simulations from the model.
#' @param seed Random seed to be used. It is not passed to the simulator, but simply passed to \code{set.seed()} from within
#' \code{simulate.synlik}.
#' @param param Vector of parameters passed to \code{object@@simulator}.
#' @param stats If \code{TRUE} the function trasforms the simulated data into statistics using \code{object@@summaries}.
#' @param clean If \code{TRUE} the function tries to clean the statistics from NaNs or non-finite values.
#' Given that \code{object@@summaries} has to returns a numeric vector or
#' a matrix where each row is a simulation, rows containing non-finite values will be discarded.
#' @param verbose If \code{TRUE} the function will complain if, for instance, the simulations contain lots of non-finite values.
#' @param ... additional arguments to be passed to \code{object@@simulator} and \code{object@@summaries}.
#' In general I would avoid using it and including \code{object@@extraArgs} everything they need.
#' @return If \code{stats == FALSE} the output will that of \code{object@@simulator}, which depends on the simulator used by the user.
#' If \code{stats == TRUE} the output will be a matrix where each row is vector of simulated summary statistics.
#' @author Matteo Fasiolo <matteo.fasiolo@@gmail.com>
#' @aliases simulate,synlik-method
#' @method simulate synlik
#' @seealso \code{\link{synlik-class}}, \code{\link{simulate}}.
#' @rdname simulate-synlik
#' @examples
#' data(ricker_sl)
#'
#' # Simulate data
#' simulate(ricker_sl, nsim = 2)
#'
#' # Simulate statistics
#' simulate(ricker_sl, nsim = 2, stats = TRUE)
setMethod("simulate",
signature = signature(object = "synlik"),
definition = .simulate.synlik)
|
5d6a17ebe9be9388f78affd70072c389eedd735a
|
b9e00c7839b512ddf81e24a477a5ba806a49a746
|
/server.R
|
76e434a1a1759984c17012b5a5548e6e66fbdd65
|
[] |
no_license
|
mystatgit/Interest-Calculation-APP
|
81fe59e374033e0b8104d1f6876ecc28ae1ded97
|
10d04cca118c85ff8c34d7c562c671636cfcc643
|
refs/heads/master
| 2020-06-03T16:46:55.730557
| 2015-02-19T17:41:59
| 2015-02-19T17:41:59
| 31,027,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 745
|
r
|
server.R
|
library(shiny)
shinyServer(function(input, output) {
output$sitable<-renderPrint({
si<- (input$principal *input$roi*input$years)/100
amount=input$principal+si
data.frame(Principal = input$principal,
ROI=input$roi,
years=input$years,
SI=si,
repayable = amount
)
})
output$citable<-renderPrint({
compterm<-as.numeric(input$compoundterm)
amount<- input$principal *((1+(input$roi/(compterm * 100))) ^(compterm*input$years))
ci=amount-input$principal
data.frame(Principal = input$principal,
ROI=input$roi,
years=input$years,
CI=ci,
repayable = amount)
})
})
|
62348fc26e31d9f6a3b63484bd3d82cb39414dd7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/HaploSim/examples/haploListClass.Rd.R
|
55c538d20c8e55ca71098177a117cd6f03e08265
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 271
|
r
|
haploListClass.Rd.R
|
library(HaploSim)
### Name: haploList-class
### Title: Class "haploList"
### Aliases: haploList-class [,haploList,ANY,missing-method
### c,haploList-method print,haploList-method show,haploList-method
### Keywords: datagen
### ** Examples
showClass("haploList")
|
e7bc89cdf1c946653c2995e2e40c705dab5a6b04
|
30cc5a35d3576a6207f3fa165d1fbc2f1a6f4dd7
|
/man/weather_norms_fields.Rd
|
52dcf9700dc89e697a2bcf937732da0201b9e772
|
[
"MIT"
] |
permissive
|
modernresearchconsulting/aWhere-R-Library
|
0a783d3de181bf30046dd475600d78b6881efe68
|
62f06f94a0360b8223620609c6c349b9e45ef6cd
|
refs/heads/master
| 2021-08-19T16:33:11.744449
| 2017-04-05T16:58:48
| 2017-04-05T16:58:48
| 112,121,244
| 0
| 0
| null | 2017-11-26T22:19:28
| 2017-11-26T22:19:27
| null |
UTF-8
|
R
| false
| true
| 2,495
|
rd
|
weather_norms_fields.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weather-norms.R
\name{weather_norms_fields}
\alias{weather_norms_fields}
\title{weather_norms_fields}
\usage{
weather_norms_fields(field_id, monthday_start, monthday_end, year_start,
year_end, exclude_years = c())
}
\arguments{
\item{-}{field_id: the field_id associated with the location for which you want to pull data.
Field IDs are created using the create_field function. (string)}
\item{-}{monthday_start: character string of the first month and day for which you want to retrieve data,
in the form: MM-DD. This is the start of your date range. e.g. '07-01' (July 1) (required)}
\item{-}{monthday_end: character string of the last month and day for which you want to retrieve data,
in the form: MM-DD. This is the end of your date range. e.g. '07-01' (July 1) (required)}
\item{-}{year_start: character string of the starting year (inclusive) of the range of years for which
you're calculating norms, in the form YYYY. e.g., 2008 (required)}
\item{-}{year_end: character string of the last year (inclusive) of the range of years for which
you're calculating norms, in the form YYYY. e.g., 2015 (required)}
\item{-}{exclude_year: character string of a year or years which you'd like to exclude from
your range of years on which to calculate norms. To exclude
multiple years, provide a vector of years. You must include
at least three years of data with which to calculate the norms. (optional)}
}
\value{
dataframe of requested data for dates requested
}
\description{
\code{weather_norms_fields} pulls long term norm weather data from aWhere's API based on field id
}
\details{
This function allows you to calculate the averages for weather attributes
across any range of years for which data are available. The data pulled includes
meanTemp, maxTemp, minTemp, precipitation average, solar radiation average,
minHumidity, maxHumidity, maxWind and averageWind, along with the standard deviations
for these variables. The data pulled is for the field id identified.
The data returned in this function
allow you to compare this year or previous years to the long-term normals, calculated as
the average of those weather conditions on that day in that location over the years specified.
}
\examples{
\dontrun{weather_norms_fields("aWhere", monthday_start = "06-01", monthday_end = "09-01",
year_start = 2006, year_end = 2015)}
}
\references{
http://developer.awhere.com/api/reference/weather/norms
}
|
444fb7996861b2aae2c0fbbe7581fae838491b61
|
e10b530b0f32b9df6e1b168cf8c309abd1f17b93
|
/inst/doc/FAQ.R
|
5506f2a9287ec80b101d4cef51cf0f7bd1c36010
|
[] |
no_license
|
scchess/fitdistrplus
|
ff7163842c65b545247f0272b5656fb8099ac761
|
be968ad860b5f431d8f86b820cf8b47041ab8975
|
refs/heads/master
| 2021-06-12T23:35:45.883389
| 2017-03-24T13:25:38
| 2017-03-24T13:25:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,080
|
r
|
FAQ.R
|
## ----setup, echo=FALSE, message=FALSE, warning=FALSE---------------------
require(fitdistrplus)
set.seed(1234)
options(digits = 3)
## ---- eval=FALSE---------------------------------------------------------
# dgumbel <- function(x, a, b) 1/b*exp((a-x)/b)*exp(-exp((a-x)/b))
# pgumbel <- function(q, a, b) exp(-exp((a-q)/b))
# qgumbel <- function(p, a, b) a-b*log(-log(p))
# data(groundbeef)
# fitgumbel <- fitdist(groundbeef$serving, "gumbel", start=list(a=10, b=10))
## ---- eval=FALSE---------------------------------------------------------
# dzmgeom <- function(x, p1, p2) p1 * (x == 0) + (1-p1)*dgeom(x-1, p2)
# pzmgeom <- function(q, p1, p2) p1 * (q >= 0) + (1-p1)*pgeom(q-1, p2)
# rzmgeom <- function(n, p1, p2)
# {
# u <- rbinom(n, 1, 1-p1) #prob to get zero is p1
# u[u != 0] <- rgeom(sum(u != 0), p2)+1
# u
# }
# x2 <- rzmgeom(1000, 1/2, 1/10)
# fitdist(x2, "zmgeom", start=list(p1=1/2, p2=1/2))
## ---- message=FALSE------------------------------------------------------
data("endosulfan")
library("actuar")
fendo.B <- fitdist(endosulfan$ATV, "burr", start = list(shape1 = 0.3, shape2 = 1, rate = 1))
summary(fendo.B)
## ---- fig.height=3, fig.width=6------------------------------------------
x3 <- rlnorm(1000)
f1 <- fitdist(x3, "lnorm", method="mle")
f2 <- fitdist(x3, "lnorm", method="mme")
par(mfrow=1:2)
cdfcomp(list(f1, f2), do.points=FALSE, xlogscale = TRUE, main = "CDF plot")
denscomp(list(f1, f2), demp=TRUE, main = "Density plot")
## ------------------------------------------------------------------------
c("E(X) by MME"=as.numeric(exp(f2$estimate["meanlog"]+f2$estimate["sdlog"]^2/2)),
"E(X) by MLE"=as.numeric(exp(f1$estimate["meanlog"]+f1$estimate["sdlog"]^2/2)),
"empirical"=mean(x3))
c("Var(X) by MME"=as.numeric(exp(2*f2$estimate["meanlog"]+f2$estimate["sdlog"]^2)*(exp(f2$estimate["sdlog"]^2)-1)),
"Var(X) by MLE"=as.numeric(exp(2*f1$estimate["meanlog"]+f1$estimate["sdlog"]^2)*(exp(f1$estimate["sdlog"]^2)-1)),
"empirical"=var(x3))
## ------------------------------------------------------------------------
set.seed(1234)
x <- rnorm(100, mean = 1, sd = 0.5)
(try(fitdist(x, "exp")))
## ------------------------------------------------------------------------
fitdist(x[x >= 0], "exp")
fitdist(x - min(x), "exp")
## ------------------------------------------------------------------------
set.seed(1234)
x <- rnorm(100, mean = 0.5, sd = 0.25)
(try(fitdist(x, "beta")))
## ------------------------------------------------------------------------
fitdist(x[x > 0 & x < 1], "beta")
fitdist((x - min(x)*1.01) / (max(x) * 1.01 - min(x) * 1.01), "beta")
## ---- message=FALSE, fig.height=4, fig.width=8---------------------------
dtexp <- function(x, rate, low, upp)
{
PU <- pexp(upp, rate=rate)
PL <- pexp(low, rate=rate)
dexp(x, rate) / (PU-PL) * (x >= low) * (x <= upp)
}
ptexp <- function(q, rate, low, upp)
{
PU <- pexp(upp, rate=rate)
PL <- pexp(low, rate=rate)
(pexp(q, rate)-PL) / (PU-PL) * (q >= low) * (q <= upp) + 1 * (q > upp)
}
n <- 200
x <- rexp(n); x <- x[x > .5 & x < 3]
f1 <- fitdist(x, "texp", method="mle", start=list(rate=3), fix.arg=list(low=min(x), upp=max(x)))
f2 <- fitdist(x, "texp", method="mle", start=list(rate=3), fix.arg=list(low=.5, upp=3))
gofstat(list(f1, f2))
cdfcomp(list(f1, f2), do.points = FALSE, xlim=c(0, 3.5))
## ---- message=FALSE, fig.height=4, fig.width=8---------------------------
dtiexp <- function(x, rate, low, upp)
{
PU <- pexp(upp, rate=rate, lower.tail = FALSE)
PL <- pexp(low, rate=rate)
dexp(x, rate) * (x >= low) * (x <= upp) + PL * (x == low) + PU * (x == upp)
}
ptiexp <- function(q, rate, low, upp)
pexp(q, rate) * (q >= low) * (q <= upp) + 1 * (q > upp)
n <- 100; x <- pmax(pmin(rexp(n), 3), .5)
# the loglikelihood has a discontinous point at the solution
par(mar=c(4,4,2,1), mfrow=1:2)
llcurve(x, "tiexp", plot.arg="low", fix.arg = list(rate=2, upp=5), min.arg=0, max.arg=.5, lseq=200)
llcurve(x, "tiexp", plot.arg="upp", fix.arg = list(rate=2, low=0), min.arg=3, max.arg=4, lseq=200)
## ---- fig.height=4, fig.width=6------------------------------------------
(f1 <- fitdist(x, "tiexp", method="mle", start=list(rate=3, low=0, upp=20)))
(f2 <- fitdist(x, "tiexp", method="mle", start=list(rate=3), fix.arg=list(low=min(x), upp=max(x))))
gofstat(list(f1, f2))
cdfcomp(list(f1, f2), do.points = FALSE, addlegend=FALSE, xlim=c(0, 3.5))
curve(ptiexp(x, 1, .5, 3), add=TRUE, col="blue", lty=3)
legend("bottomright", lty=1:3, col=c("red", "green", "blue", "black"),
leg=c("full MLE", "MLE fixed arg", "true CDF", "emp. CDF"))
## ---- fig.height=3, fig.width=6------------------------------------------
set.seed(1234)
x <- rgamma(n = 100, shape = 2, scale = 1)
# fit of the good distribution
fgamma <- fitdist(x, "gamma")
# fit of a bad distribution
fexp <- fitdist(x, "exp")
g <- gofstat(list(fgamma, fexp), fitnames = c("gamma", "exp"))
denscomp(list(fgamma, fexp), legendtext = c("gamma", "exp"))
# results of the tests
## chi square test (with corresponding table with theoretical and observed counts)
g$chisqpvalue
g$chisqtable
## Anderson-Darling test
g$adtest
## Cramer von Mises test
g$cvmtest
## Kolmogorov-Smirnov test
g$kstest
## ---- fig.height=3, fig.width=6------------------------------------------
set.seed(1234)
x1 <- rpois(n = 100, lambda = 100)
f1 <- fitdist(x1, "norm")
g1 <- gofstat(f1)
g1$kstest
x2 <- rpois(n = 10000, lambda = 100)
f2 <- fitdist(x2, "norm")
g2 <- gofstat(f2)
g2$kstest
par(mfrow=1:2)
denscomp(f1, demp = TRUE, addlegend = FALSE, main = "small sample")
denscomp(f2, demp = TRUE, addlegend = FALSE, main = "big sample")
## ---- fig.height=3, fig.width=6------------------------------------------
set.seed(1234)
x3 <- rpois(n = 500, lambda = 1)
f3 <- fitdist(x3, "norm")
g3 <- gofstat(f3)
g3$kstest
x4 <- rpois(n = 50, lambda = 1)
f4 <- fitdist(x4, "norm")
g4 <- gofstat(f4)
g4$kstest
par(mfrow=1:2)
denscomp(f3, addlegend = FALSE, main = "big sample")
denscomp(f4, addlegend = FALSE, main = "small sample")
## ------------------------------------------------------------------------
g3$chisqtable
g3$chisqpvalue
g4$chisqtable
g4$chisqpvalue
## ------------------------------------------------------------------------
set.seed(1234)
g <- rgamma(100, shape = 2, rate = 1)
(f <- fitdist(g, "gamma"))
(f0 <- fitdist(g, "exp"))
L <- logLik(f)
k <- length(f$estimate) # number of parameters of the complete distribution
L0 <- logLik(f0)
k0 <- length(f0$estimate) # number of parameters of the simplified distribution
(stat <- 2*L - 2*L0)
(critical_value <- qchisq(0.95, df = k - k0))
(rejected <- stat > critical_value)
## ------------------------------------------------------------------------
dshiftlnorm <- function(x, mean, sigma, shift, log = FALSE) dlnorm(x+shift, mean, sigma, log=log)
pshiftlnorm <- function(q, mean, sigma, shift, log.p = FALSE) plnorm(q+shift, mean, sigma, log.p=log.p)
qshiftlnorm <- function(p, mean, sigma, shift, log.p = FALSE) qlnorm(p, mean, sigma, log.p=log.p)-shift
dshiftlnorm_no <- function(x, mean, sigma, shift) dshiftlnorm(x, mean, sigma, shift)
pshiftlnorm_no <- function(q, mean, sigma, shift) pshiftlnorm(q, mean, sigma, shift)
## ------------------------------------------------------------------------
data(dataFAQlog1)
y <- dataFAQlog1
D <- 1-min(y)
f0 <- fitdist(y+D, "lnorm")
start <- list(mean=as.numeric(f0$estimate["meanlog"]),
sigma=as.numeric(f0$estimate["sdlog"]), shift=D)
# works with BFGS, but not Nelder-Mead
f <- fitdist(y, "shiftlnorm", start=start, optim.method="BFGS")
summary(f)
## ---- error=FALSE--------------------------------------------------------
f2 <- try(fitdist(y, "shiftlnorm_no", start=start, optim.method="BFGS"))
print(attr(f2, "condition"))
## ------------------------------------------------------------------------
sum(log(dshiftlnorm_no(y, 0.16383978, 0.01679231, 1.17586600 )))
log(prod(dshiftlnorm_no(y, 0.16383978, 0.01679231, 1.17586600 )))
sum(dshiftlnorm(y, 0.16383978, 0.01679231, 1.17586600, TRUE ))
## ---- eval=FALSE, echo=TRUE----------------------------------------------
# double dlnorm(double x, double meanlog, double sdlog, int give_log)
# {
# double y;
#
# #ifdef IEEE_754
# if (ISNAN(x) || ISNAN(meanlog) || ISNAN(sdlog))
# return x + meanlog + sdlog;
# #endif
# if(sdlog <= 0) {
# if(sdlog < 0) ML_ERR_return_NAN;
# // sdlog == 0 :
# return (log(x) == meanlog) ? ML_POSINF : R_D__0;
# }
# if(x <= 0) return R_D__0;
#
# y = (log(x) - meanlog) / sdlog;
# return (give_log ?
# -(M_LN_SQRT_2PI + 0.5 * y * y + log(x * sdlog)) :
# M_1_SQRT_2PI * exp(-0.5 * y * y) / (x * sdlog));
# /* M_1_SQRT_2PI = 1 / sqrt(2 * pi) */
#
# }
## ---- eval=FALSE, echo=TRUE----------------------------------------------
# -(M_LN_SQRT_2PI + 0.5 * y * y + log(x * sdlog))
## ---- eval=FALSE, echo=TRUE----------------------------------------------
# M_1_SQRT_2PI * exp(-0.5 * y * y) / (x * sdlog))
## ------------------------------------------------------------------------
f2 <- fitdist(y, "shiftlnorm", start=start, lower=c(-Inf, 0, -min(y)), optim.method="Nelder-Mead")
summary(f2)
print(cbind(BFGS=f$estimate, NelderMead=f2$estimate))
## ------------------------------------------------------------------------
data(dataFAQscale1)
head(dataFAQscale1)
summary(dataFAQscale1)
## ------------------------------------------------------------------------
for(i in 6:0)
cat(10^i, try(mledist(dataFAQscale1*10^i, "cauchy")$estimate), "\n")
## ------------------------------------------------------------------------
data(dataFAQscale2)
head(dataFAQscale2)
summary(dataFAQscale2)
## ------------------------------------------------------------------------
for(i in 0:5)
cat(10^(-2*i), try(mledist(dataFAQscale2*10^(-2*i), "cauchy")$estimate), "\n")
## ----scalenormal, echo=TRUE, warning=FALSE-------------------------------
set.seed(1234)
x <- rnorm(1000, 1, 2)
fitdist(x, "norm", lower=c(-Inf, 0))
## ----shapeburr, echo=TRUE, warning=FALSE---------------------------------
x <- rburr(1000, 1, 2, 3)
fitdist(x, "burr", lower=c(0, 0, 0), start=list(shape1 = 1, shape2 = 1,
rate = 1))
## ----probgeom, echo=TRUE, warning=FALSE----------------------------------
x <- rgeom(1000, 1/4)
fitdist(x, "geom", lower=0, upper=1)
## ----shiftexp, echo=TRUE, warning=FALSE----------------------------------
dsexp <- function(x, rate, shift)
dexp(x-shift, rate=rate)
psexp <- function(x, rate, shift)
pexp(x-shift, rate=rate)
rsexp <- function(n, rate, shift)
rexp(n, rate=rate)+shift
x <- rsexp(1000, 1/4, 1)
fitdist(x, "sexp", start=list(rate=1, shift=0), lower= c(0, -min(x)))
## ---- fig.height=3, fig.width=6------------------------------------------
pgeom(0:3, prob=1/2)
qgeom(c(0.3, 0.6, 0.9), prob=1/2)
par(mar=c(4,4,2,1), mfrow=1:2)
curve(pgeom(x, prob=1/2), 0, 10, n=301, main="c.d.f.")
curve(qgeom(x, prob=1/2), 0, 1, n=301, main="q.f.")
## ------------------------------------------------------------------------
x <- c(0, 0, 0, 0, 1, 1, 3, 2, 1, 0, 0)
median(x[-1]) #sample size 10
median(x) #sample size 11
## ---- fig.height=4, fig.width=6------------------------------------------
x <- rgeom(100, 1/3)
L2 <- function(p)
(qgeom(1/2, p) - median(x))^2
L2(1/3) #theoretical value
curve(L2(x), 0.10, 0.95, xlab=expression(p), ylab=expression(L2(p)), main="squared differences", n=301)
## ------------------------------------------------------------------------
fitdist(x, "geom", method="qme", probs=1/2, start=list(prob=1/2), control=list(trace=1, REPORT=1))
fitdist(x, "geom", method="qme", probs=1/2, start=list(prob=1/20), control=list(trace=1, REPORT=1))
## ------------------------------------------------------------------------
fitdist(x, "geom", method="qme", probs=1/2, optim.method="SANN", start=list(prob=1/20))
fitdist(x, "geom", method="qme", probs=1/2, optim.method="SANN", start=list(prob=1/2))
## ---- fig.height=4, fig.width=6------------------------------------------
par(mar=c(4,4,2,1))
x <- rpois(100, lambda=7.5)
L2 <- function(lam)
(qpois(1/2, lambda = lam) - median(x))^2
curve(L2(x), 6, 9, xlab=expression(lambda), ylab=expression(L2(lambda)), main="squared differences", n=201)
## ------------------------------------------------------------------------
fitdist(x, "pois", method="qme", probs=1/2, start=list(lambda=2))
fitdist(x, "pois", method="qme", probs=1/2, optim.method="SANN", start=list(lambda=2))
## ---- fig.height=4, fig.width=4, warning = FALSE-------------------------
set.seed(1234)
n <- rnorm(30, mean = 10, sd = 2)
fn <- fitdist(n, "norm")
bn <- bootdist(fn)
bn$CI
fn$estimate + cbind("estimate"= 0, "2.5%"= -1.96*fn$sd, "97.5%"= 1.96*fn$sd)
llplot(fn, back.col = FALSE)
## ---- fig.height=4, fig.width=4, warning = FALSE-------------------------
set.seed(1234)
g <- rgamma(30, shape = 0.1, rate = 10)
fg <- fitdist(g, "gamma")
bg <- bootdist(fg)
bg$CI
fg$estimate + cbind("estimate"= 0, "2.5%"= -1.96*fg$sd, "97.5%"= 1.96*fg$sd)
llplot(fg, back.col = FALSE)
## ---- fig.height=3, fig.width=4, warning = FALSE-------------------------
data(salinity)
log10LC50 <-log10(salinity)
fit <- fitdistcens(log10LC50, "norm")
# Bootstrap
bootsample <- bootdistcens(fit, niter = 101)
#### We used only 101 iterations in that example to limit the calculation time but
#### in practice you should take at least 1001 bootstrap iterations
# Calculation of the quantile of interest (here the 5 percent hazard concentration)
(HC5 <- quantile(bootsample, probs = 0.05))
# visualizing pointwise confidence intervals on other quantiles
CIcdfplot(bootsample, CI.output = "quantile", CI.fill = "pink", xlim = c(0.5,2), main = "")
## ------------------------------------------------------------------------
exposure <- 1.2
# Bootstrap sample of the PAF at this exposure
PAF <- pnorm(exposure, mean = bootsample$estim$mean, sd = bootsample$estim$sd)
# confidence interval from 2.5 and 97.5 percentiles
quantile(PAF, probs = c(0.025, 0.975))
## ---- fig.height=6, fig.width=6, warning = FALSE-------------------------
data(groundbeef)
serving <- groundbeef$serving
fit <- fitdist(serving, "gamma")
par(mfrow = c(2,2), mar = c(4, 4, 1, 1))
denscomp(fit, addlegend = FALSE, main = "", xlab = "serving sizes (g)", fitcol = "orange")
qqcomp(fit, addlegend = FALSE, main = "", fitpch = 16, fitcol = "grey", line01lty = 2)
cdfcomp(fit, addlegend = FALSE, main = "", xlab = "serving sizes (g)", fitcol = "orange", lines01 = TRUE)
ppcomp(fit, addlegend = FALSE, main = "", fitpch = 16, fitcol = "grey", line01lty = 2)
## ---- fig.height= 4, fig.width= 6, warning = FALSE-----------------------
library(ggplot2)
fitW <- fitdist(serving, "weibull")
fitln <- fitdist(serving, "lnorm")
fitg <- fitdist(serving, "gamma")
dcomp <- denscomp(list(fitW, fitln, fitg), legendtext = c("Weibull", "lognormal", "gamma"),
xlab = "serving sizes (g)", xlim = c(0, 250),
fitcol = c("red", "green", "orange"), fitlty = 1,
xlegend = "topright", plotstyle = "ggplot", addlegend = FALSE)
dcomp + ggplot2::theme_minimal() + ggplot2::ggtitle("Ground beef fits")
|
07586a3e703fea49fc11396a5cb3cb3100bbb3dd
|
d89fbe824fbe31a1be50131ed4abbbc3ad7b9c3d
|
/m2/m2_exercise.R
|
e2e8e44a98310d3ff3fd21f532fe40419c2e61d1
|
[] |
no_license
|
lbl1985/RLearning
|
a2391241eda89f44d6d6b3b40b608152a89c2286
|
0f000b4248941ed06e9f5ad60eb2d13ab7a4d1ae
|
refs/heads/master
| 2020-04-19T04:53:29.932878
| 2016-10-19T06:12:51
| 2016-10-19T06:12:51
| 67,836,316
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,861
|
r
|
m2_exercise.R
|
setwd("C:/Experiment/R/RLearning/m2")
movies <- read.table(
file = "Movies.txt",
sep = "\t",
header = TRUE,
quote = "\""
)
#peek at data
head(movies)
# Look at column names
names(movies)
#Problem #1: column name is incorrect
names(movies[5])
#Rename variables (i.e. columsn)
names(movies)[5] <- "Critic.Score"
names(movies)
# Problem #2: Missing values
# Count missing values
sum(is.na(movies))
#inspect rows with missing values
tail(movies)
# Exclude observations with missing values
movies <- na.omit(movies)
# Problem 2 Resolved
sum(is.na(movies))
# Problem #3 units in runtime column
# Peek at the movie runtime data
head(movies$Runtime)
#Note: This next line will throw an error
# mean(movies$Runtime)
# Detemine the data type
class(movies$Runtime)
# Cast from factor to character string
runtimes <- as.character(movies$Runtime)
head(runtimes)
class(runtimes)
#eliminates the unit of measure
runtimes <- sub(" min", "", runtimes)
head(runtimes)
# Cast the charater string to integer
movies$Runtime <- as.integer(runtimes)
mean(movies$Runtime)
#problem 4: Box office uses three units of measure
head(movies$Box.Office)
#Create a fucntion to convert box office revenue
convertBoxOffice <- function(boxOffice)
{
stringBoxOffice <- as.character(boxOffice)
replacedBoxOffice <- gsub("[$|k|M]", "", stringBoxOffice)
numericBoxOffice <- as.numeric(replacedBoxOffice)
if (grepl("M", boxOffice)){
numericBoxOffice
} else if (grepl("k", boxOffice)){
numericBoxOffice * 0.001
} else {
numericBoxOffice * 0.000001
}
}
# Convert box office to single unit of measure (millions)
movies$Box.Office <- sapply(movies$Box.Office, convertBoxOffice)
# Problem 4 is solved
head(movies$Box.Office)
class(movies$Box.Office)
mean(movies$Box.Office)
#Save data to a CSV file
write.csv(movies, "Movies_self.csv")
|
bb7317ddc44cb8e6eedeab37eea98942c043d84d
|
d9f41bcafe2bc5385bd11f845d15dd6c0b9eabae
|
/Plot1.R
|
0868c22c93f7f64161c1d25d45fbe0ff4c6c1a60
|
[] |
no_license
|
Harrykoch/ExData_Plotting1
|
2e2c929936575595383d420c9929f4da0ac3fbd9
|
f1e13d515b06a5aca9e3084045f803bf678eb1b6
|
refs/heads/master
| 2021-01-17T14:09:56.318433
| 2014-07-12T14:09:55
| 2014-07-12T14:09:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 741
|
r
|
Plot1.R
|
#
#
#
setwd("~/Desktop/Coursera - Exploratory data analysis")
# Plot 1
#================
#
# 1. Read the dataset into a dataframe
# 2. Determine which rows of the frame are of interest and create a new variable
# hhpwc (household power consumption) that will be used for the analysis
# 3. Clean up the intermediate variables to release memory
# 4. Create the histogram
# 5. Save the plot
hh_power <- read.csv("household_power_consumption.txt", sep=";",na.strings=c("NA","?"))
x<-hh_power$Date=="1/2/2007" | hh_power$Date=="2/2/2007"
hhpwc<-hh_power[x,]
rm(hh_power)
rm(x)
hist(hhpwc$Global_active_power, xlab="Global Active Power (kilowatts)", main="Global Active Power", col="red", ylim=c(0,1200))
dev.copy(png, "plot1.png")
dev.off()
|
1ca771a345853892d0217e63f982e2a0a66df606
|
c712c674aaa9aeeb406177911ca5ec204d8f46f7
|
/man/scoRched.Rd
|
792edfc8b9863fdaa2fe687c81951ca390b049e3
|
[] |
no_license
|
trinker/tsgames
|
736c91dc7731cfc92c5298f5ff9f0da51509f03a
|
8914dae55b39630e788b475215647b7ef4c61e82
|
refs/heads/master
| 2020-12-25T10:42:08.290819
| 2013-06-26T01:17:10
| 2013-06-26T01:17:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
rd
|
scoRched.Rd
|
\name{scoRched}
\alias{scoRched}
\title{Played scoRched}
\usage{
scoRched(fps = 45, mass = 1)
}
\arguments{
\item{fps}{Numeric. Frames per second to update}
\item{mass}{Numeric. The mass of something}
}
\description{
Play scoRched. Then try to not die
}
\author{
Marco Visser
}
|
a2c193e85340f4bddfb509376d53c4c0d30670c6
|
709c16710d7cae612de6c779cafb7199813e0f24
|
/AhasHfBkleAmputation/R/PriorOutcomeCovariateBuilder.R
|
1d2340fa6d1642b4f21a09b9782a476c5997c7de
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/StudyProtocols
|
87a17fc3c00488b350f9416c584a1d0334d8dfcb
|
8de0454c6be4c120ba97d7376907d651327573a4
|
refs/heads/master
| 2023-04-27T18:59:35.785026
| 2020-02-16T00:32:52
| 2020-02-16T00:32:52
| 27,415,586
| 37
| 41
| null | 2023-04-25T19:55:45
| 2014-12-02T04:49:53
|
R
|
UTF-8
|
R
| false
| false
| 6,175
|
r
|
PriorOutcomeCovariateBuilder.R
|
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of AhasHfBkleAmputation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Create settings for adding prior outcomes as covariates
#'
#' @param outcomeDatabaseSchema The name of the database schema that is the location
#' where the data used to define the outcome cohorts is
#' available.
#' @param outcomeTable The tablename that contains the outcome cohorts.
#' @param outcomeIds A vector of cohort_definition_ids used to define outcomes
#' @param outcomeNames A vector of names of the outcomes, to be used to create
#' covariate names.
#' @param windowStart Start day of the window where covariates are captured,
#' relative to the index date (0 = index date).
#' @param windowEnd End day of the window where covariates are captured,
#' relative to the index date (0 = index date).
#'
#' @return
#' A covariateSettings object.
#'
#' @export
createPriorOutcomesCovariateSettings <- function(outcomeDatabaseSchema = "unknown",
outcomeTable = "unknown",
outcomeIds,
outcomeNames,
windowStart = -365,
windowEnd = -1) {
covariateSettings <- list(outcomeDatabaseSchema = outcomeDatabaseSchema,
outcomeTable = outcomeTable,
outcomeIds = outcomeIds,
outcomeNames = outcomeNames,
windowStart = windowStart,
windowEnd = windowEnd)
attr(covariateSettings, "fun") <- "AhasHfBkleAmputation::getDbPriorOutcomesCovariateData"
class(covariateSettings) <- "covariateSettings"
return(covariateSettings)
}
#' @export
getDbPriorOutcomesCovariateData <- function(connection,
oracleTempSchema = NULL,
cdmDatabaseSchema,
cohortTable = "#cohort_person",
cohortId = -1,
cdmVersion = "5",
rowIdField = "subject_id",
covariateSettings,
aggregated = FALSE) {
if (aggregated)
stop("Aggregation not supported")
writeLines("Creating covariates based on prior outcomes")
sql <- SqlRender::loadRenderTranslateSql("getPriorOutcomeCovariates.sql",
packageName = "AhasHfBkleAmputation",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
window_start = covariateSettings$windowStart,
window_end = covariateSettings$windowEnd,
row_id_field = rowIdField,
cohort_temp_table = cohortTable,
cohort_id = cohortId,
outcome_database_schema = covariateSettings$outcomeDatabaseSchema,
outcome_table = covariateSettings$outcomeTable,
outcome_ids = covariateSettings$outcomeIds)
covariates <- DatabaseConnector::querySql.ffdf(connection, sql)
colnames(covariates) <- SqlRender::snakeCaseToCamelCase(colnames(covariates))
covariateRef <- data.frame(covariateId = covariateSettings$outcomeIds * 1000 + 999,
covariateName = paste("Prior outcome:", covariateSettings$outcomeNames),
analysisId = 999,
conceptId = 0)
covariateRef <- ff::as.ffdf(covariateRef)
# Construct analysis reference:
analysisRef <- data.frame(analysisId = as.numeric(1),
analysisName = "Prior outcome",
domainId = "Cohort",
startDay = as.numeric(covariateSettings$windowStart),
endDay = as.numeric(covariateSettings$windowEnd),
isBinary = "Y",
missingMeansZero = "Y")
analysisRef <- ff::as.ffdf(analysisRef)
# Construct analysis reference:
metaData <- list(sql = sql, call = match.call())
result <- list(covariates = covariates,
covariateRef = covariateRef,
analysisRef = analysisRef,
metaData = metaData)
class(result) <- "covariateData"
return(result)
}
#' @export
setOutcomeDatabaseSchemaAndTable <-function(settings, outcomeDatabaseSchema, outcomeTable) {
if (class(settings) == "covariateSettings") {
if (!is.null(settings$outcomeDatabaseSchema)) {
settings$outcomeDatabaseSchema <- outcomeDatabaseSchema
settings$outcomeTable <- outcomeTable
}
} else {
if (is.list(settings) && length(settings) != 0) {
for (i in 1:length(settings)) {
if (is.list(settings[[i]])) {
settings[[i]] <- setOutcomeDatabaseSchemaAndTable(settings[[i]], outcomeDatabaseSchema, outcomeTable)
}
}
}
}
return(settings)
}
|
750c4b9e8fb2dc532bb8895b142b7e4c416b5175
|
871b9d64f1630f6e2c64dd5f7e238cf8eadf5000
|
/LLB_twitter_stream.R
|
a1cfe2a9c5ee39c7ccca9657a7fa31d8722a5095
|
[] |
no_license
|
akmalirham96/Highway-Traffic-Status-Using-R
|
1e0566b1ddaa9b6a5faf55034e5026794261be98
|
a6582e777ad2276d28fec455daf655b3d5f2ae37
|
refs/heads/master
| 2020-04-07T15:50:11.901939
| 2018-11-21T14:26:07
| 2018-11-21T14:26:07
| 158,503,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,274
|
r
|
LLB_twitter_stream.R
|
#Read file
highway <- read.csv(file.choose(),header = T)
str(highway)
#Build Corpus
library(tm)
library(stopwords)
corpus <- iconv(highway$text,to = 'UTF-8', sub = "byte")
corpus <- Corpus(VectorSource(corpus))
#Cleaning text
corpus <- tm_map(corpus, tolower)
inspect(corpus)
#Remove comar
corpus <- tm_map(corpus, removePunctuation)
inspect(corpus)
#remove number
corpus <- tm_map(corpus, removeNumbers)
inspect(corpus)
#Exception word
exceptions <- c('dari','dan','di','ke','masa','maklumat')
my_stopwords <- setdiff(stopwords("ms", source = "stopwords-iso"), exceptions)
#stopword
cleanset <- tm_map(corpus,removeWords,my_stopwords)
cleanset <- tm_map(corpus, removeWords, stopwords('english'))
inspect(cleanset)
#Remove url
removeURL <- function(x) gsub('http[[:alnum:]]*','',x)
cleanset <- tm_map(cleanset, content_transformer(removeURL))
inspect(corpus)
#remove word
cleanset <- tm_map(cleanset, removeWords, c('am','pm','amp'))
#remove whitespace
cleanset <- tm_map(cleanset,stripWhitespace)
inspect(cleanset)
#insert filter tweet to data
data <- cleanset
#check wheather word are in the data
simpang <- regexpr("simpang",data[1:4])
substr(data,139,139 + 7 -1)
#checking data
if(simpang > 1){
print("Traffic Busy")
}else{
print("Traffic Clear")
}
|
615a921f8e045f42846d7df98b9d4d055bb52264
|
94dcbff4ef2072f5a5ecbb95af1f259f31ad3b20
|
/R/misc.utils.R
|
9406d71a4718cb6a8a4af23915658a8d796f8a0f
|
[] |
no_license
|
DistanceDevelopment/WiSP
|
bf51406076ded020098f4973003eafc05a45d437
|
e0e2665d6b3b49ba634944b4bb7303be41620e5a
|
refs/heads/master
| 2021-06-05T02:54:08.957306
| 2020-09-14T20:03:59
| 2020-09-14T20:03:59
| 9,773,511
| 0
| 1
| null | 2020-09-14T09:30:06
| 2013-04-30T15:05:50
|
R
|
WINDOWS-1252
|
R
| false
| false
| 6,098
|
r
|
misc.utils.R
|
calculate.plot.margin <- function(reg.x, reg.y, area.x, area.y)
#-----------------------------------------------------
# description:
# The function calculates the x and y width of the
# outer border of the region that shall be plotted
# later.
# The aim is to calculate the outer margins in the
# way that population area is right proportioned.
# That means that its borders are right scaled in
# ratio (reg.x:reg.y).
#
# author: M. Erdelmeier
#-----------------------------------------------------
#-----------------------------------------------------
# input/output-variables
#-----------------------------------------------------
# name | type | I/O | description
#---------------------------------------------------------------------
# area.x | real | I | x-length of possible plot area
# area.y | real | I | y-length of possible plot area
# margin | list | O | object that contains the calculated width of
# | | | the x/y margins so that the plot is right
# | | | proportioned
# reg.x | real | I | x-length of population region
# reg.y | real | I | y-length of population region
#-----------------------------------------------------
# used objects
#-----------------------------------------------------
# name | type | R/W | description
#---------------------------------------------------------------------
#-----------------------------------------------------
# local variables
#-----------------------------------------------------
# name | type | description
#-----------------------------------------------------------------
# margin.width | real | minimum width of the margins
# margin.x | real | calculated margin in x direction
# margin.y | real | calculated margin in y direction
#-------------------------------------------------------
# programming part
#-------------------------------------------------------
{
# reduce area dimensions by margin width because we need a
# certain outer margin
margin.width <- 0.5
area.x <- area.x- 2*margin.width
area.y <- area.y- 2*margin.width
# calculate outer border of plot area (depending whether plot is limited
# in x or y direction)
if ((area.y/area.x) < (reg.y/reg.x))
{
# plot is limited vertically: calculate horizontal border
margin.x <- margin.width + 0.5 * (area.x - area.y * (reg.x/reg.y))
margin.y <- margin.width
} else
{
# plot is limited horizontally: calculate vertical border
margin.x <- margin.width
margin.y <- margin.width + 0.5 * (area.y - area.x * (reg.y/reg.x))
}
# return result
margin <- list(x=margin.x, y=margin.y)
return(margin)
}
#-------------------------------------------------------
# Obscure: to remove unobserved data from sample objects
#-------------------------------------------------------
obscure<-function(x,...) {UseMethod("obscure")}
plot.text<-function(x,col="black", cex=1)
#------------------------------------------------------------------------
# Utility function to put a message in the plot window – used when
# there's no appropriate thing to plot.
#------------------------------------------------------------------------
{
if(!is.character(x)) stop("Argument <x> must be a character variable.\n")
plot(c(0,100),c(0,100),type="n",bty="n",ann=FALSE,xaxt="n",yaxt="n")
text(50,50,label=x,col=col,cex=cex)
}
# Some miscellalneous functions:
n.sturges<-function(x)
#------------------------------------------------------------------------
# Uses Sturges' Rule to calculate number of intervals for histogram of x.
#------------------------------------------------------------------------
{
round(1+log2(length(x)))
}
#===========================================================
# equal (generic function)
#===========================================================
equal <- function(obj1, obj2)
#-----------------------------------------------------
# description:
# The function tries to apply the 'equal' function
# corresponding to the given parameter <obj1>.
#
# If for example <obj1> is of type 'region',
# the function applies the method
# equal.region (obj1, obj2)
# and returns the result of the comparison.
#
# This function only works properly if a method
# 'equal' is defined for <obj1>.
#
# author: M. Erdelmeier
#-----------------------------------------------------
#-----------------------------------------------------
# input/output-variables: #-----------------------------------------------------
# name | type | I/O | description
#---------------------------------------------------------------------
# obj1 | object | I | object for comparison
# obj1 | object | I | object for comparison
#-----------------------------------------------------
# used objects
#-----------------------------------------------------
# name | type | R/W | description
#---------------------------------------------------------------------
#-----------------------------------------------------
# local variables
#-----------------------------------------------------
# name | type | description
#-----------------------------------------------------------------
#-------------------------------------------------------
# programming part
#-------------------------------------------------------
{
# use 'equal' method belonging to <obj1>
UseMethod("equal", obj1, obj2)
}
#-------------------------------------------------------
# Obscure: to remove unobserved data from sample objects
#-------------------------------------------------------
obscure<-function(x,...) {UseMethod("obscure")}
|
a662b32d6afae460069aa9faf5021711e27a06ab
|
87092bd3c5d1e8c864502f851085ec80bda39705
|
/olmar.R
|
1a267fe2551fb7159bb955e55fc1717610e4153f
|
[] |
no_license
|
ngokchaoho/robust-median-mean-reversion
|
0c66c964883ecf2ec9f4736f1e267a07dd8feeee
|
5cf3fa4e28f1dbd36217441b71254cf7456ed8c0
|
refs/heads/master
| 2020-04-10T10:40:56.094305
| 2018-11-20T05:57:48
| 2018-11-20T05:57:48
| 160,973,104
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,112
|
r
|
olmar.R
|
olmar_run <- function(fid,data_matrix)
{
datamatrix1=data_matrix
n = nrow(datamatrix1)
m = ncol(datamatrix1)
cum_ret = 1
cumpro_ret = NULL
daily_ret = NULL
epsilon=10
alpha=0.5
tc=0
sumreturn=1
day_weight = as.matrix(rep(1/m,m))
day_weight_o = as.matrix(rep(0,m))
daily_portfolio = as.vector(rep(NULL,m))
phi=t(as.matrix(rep(1,m)))
for(i in seq(from=1, to=n))
{data<-t(as.matrix(datamatrix1[i,]))
if(i>=2){
phi=alpha+(1-alpha)*phi/datamatrix1[i-1,]
ell=max(0,epsilon-phi%*%day_weight)
xbar=mean(phi)
denominator=(phi-xbar)%*%t(phi-xbar)
if(denominator!=0){
lambda=ell/denominator
}else{
lambda=0
}
day_weight<-day_weight+as.numeric(lambda)*(t(phi)-xbar)
day_weight<-simplex_projection(day_weight,1)
}
day_weight<-day_weight/sum(day_weight)
if(i==1)
{
daily_portfolio=day_weight
}else{
daily_portfolio=cbind(daily_portfolio,day_weight)
}
daily_ret=cbind(daily_ret,(data%*%day_weight)*(1-tc/2*sum(abs(day_weight-day_weight_o))))
cum_ret=cum_ret*daily_ret[i]
cumpro_ret=cbind(cumpro_ret,cum_ret)
day_weight_o = day_weight*t(data)/daily_ret[i]
}
return(list(cum_ret,cumpro_ret,daily_ret))
}
#install.packages('R.matlab')
library("R.matlab")
#install.packages("readxl")
#install.packages("stats")
#library(stats)
#library(readxl)
path <- ('Data')
#input
pathname <- file.path(path,'sp500.mat')
data_1 <- as.vector(readMat(pathname))
#data_matrix <- read_excel(pathname, sheet = "P4", skip=4, col_names = FALSE)
#data_matrix <- data.matrix(data_matrix[,2:ncol(data_matrix)])
#data_matrix <- data_matrix[complete.cases(data_matrix),]
#data_matrix <- read.csv(pathname,sep=',',stringsAsFactors = FALSE,skip=3,header=TRUE)
#class(data_1)
#print(data_1)
data_matrix <- as.matrix(as.data.frame(data_1))
#class(data_matrix)
fid = "olmar.txt"
#implementation
result = olmar_run(fid,data_matrix)
write.csv(file = "olmar.csv",result)
source("ra_result_analyze.R")
ra_result_analyze(paste(pathname,"olmar.csv",sep = '_'),data_matrix,as.numeric(result[[1]]),as.numeric(result[[2]]),as.numeric(result[[3]]))
|
f24cc4698c4ac64e2857e391b308b9e13a80f0c1
|
d1e3e2e431d078fdfe92cc01014a0f05fb06a683
|
/R Code/HOMER_GO_sorting_and_graphs.R
|
ed1cc5c6024d6d0197027112cdde48802fb1aaee
|
[] |
no_license
|
jchap14/R
|
a6a9eba819f1b4517b6edaa1a42628fecc770f38
|
f7c0c8aa0c0ba003a52ee73bca1cd169eecced73
|
refs/heads/master
| 2021-04-03T10:17:28.136527
| 2018-03-11T06:16:49
| 2018-03-11T06:16:49
| 124,725,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,466
|
r
|
HOMER_GO_sorting_and_graphs.R
|
###########################################################################################
########################## Take HOMER GO output and condense and graph it #################
## set Title to the "enriched terms" file in directory
enriched_terms <- list.files(pattern="*.enriched_terms.txt$")
Title <- gsub("\\.enriched_terms.txt", "", enriched_terms, perl=T)
##### Read in the functional_enrichment files from HOMER w/ header junk removed (made manually from HTML) ####
Terms.df <- read.delim(enriched_terms, quote="\"", header=T)
## all functional terms here
GO_Tree_terms <- as.vector(unique(Terms.df$GO.Tree))
## subset by 1 functional term at a time into a new df
GO_bp.df <- subset(Terms.df, Terms.df$GO.Tree == "biological process")
GO_bp.df <- GO_bp.df[,c("GO.ID","P.value","Term","ln.P.","GO.Tree","X..of.Genes.in.Term",
"X..of.Target.Genes.in.Term","X..of.Total.Genes",
"X..of.Target.Genes","Common.Genes")]
## write out df for use w Revigo online tool
write.table(GO_bp.df, paste(Title, ".RevigoIDs", sep=''), col.names= T, row.names=F, sep='\t')
##### Run Revigo online (column 1+2, "Small" option), save output ####
##### Read in Revigo output & filter ####
ReviGO <- read.delim("REVIGO.csv", quote="\"", header=T, sep=',')
## remove NULL (redundant) terms
ReviGO <- subset(ReviGO, plot_X != "null")
## merge w genes in GO terms
ReviGO.df <- merge(ReviGO, GO_bp.df, by.x= "term_ID" , by.y= "GO.ID")
##### remove terms who target >20% of tested terms
number_tested_terms <- unique(ReviGO.df$X..of.Target.Genes) * 0.2
##or specify a number based on inspection
# number_tested_terms <- 71
ReviGO.df <- subset(ReviGO.df,X..of.Target.Genes.in.Term < number_tested_terms &
X..of.Target.Genes.in.Term > 2)
## subset interesting columns
ReviGO.df2 <- ReviGO.df[,c("term_ID","description","log10.p.value",
"GO.Tree","X..of.Genes.in.Term","X..of.Target.Genes.in.Term",
"Common.Genes")]
## sort by p-value
require("dplyr")
ReviGO.df2 <- arrange(ReviGO.df2, log10.p.value)
write.table(ReviGO.df2, paste(Title, ".ReviGO.GO_BP", sep=''), col.names= T, row.names=F, sep='\t')
##### Graph the results (plot more terms if going to remove manually) ####
require("ggplot2")
termNum <- nrow(ReviGO.df2) #"20"
df <- ReviGO.df2
size <- element_text(size= 14) #font size on plot
## make p-val positive, sort by p-value
df$log10.p.value <- df$log10.p.value * -1
df <- arrange(df, desc(log10.p.value))
##### Graph results (plot more terms if going to remove manually)
df$description <- factor(df$description, levels= df$description) #set X as factor preventing ABC order
a <- ggplot() + geom_bar(aes(y= log10.p.value, x= description), data= df, stat="identity") +
coord_flip() + ggtitle("GO BP") + theme(plot.title= element_text(size= 14, face= "bold"),
axis.text= size, legend.text= size,
legend.title= size, axis.title= size) +
geom_text(data=df, aes(x=description, y=log10.p.value, label=as.factor(X..of.Target.Genes.in.Term)),hjust=-0.5)
plot(a)
## export to powerpoint
require("export")
graph2ppt(file=paste(Title,".GO_and_Pathways.ppt",sep=''), width=10, height=9, append=T)
##### Subset & make graph for Reactome, KEGG, WikiPathways, BIOCYC, Pathway Interaction DB ####
Pathways.df <- subset(Terms.df, GO.Tree== "REACTOME pathways" | GO.Tree== "KEGG pathways" |
GO.Tree== "WikiPathways" | GO.Tree== "Pathway Interaction DB"| GO.Tree== "BIOCYC pathways")
df <- Pathways.df[,c("GO.ID","P.value","Term", "ln.P.","GO.Tree","X..of.Genes.in.Term",
"X..of.Target.Genes.in.Term","X..of.Total.Genes","X..of.Target.Genes",
"Common.Genes")]
## remove non-unique terms here by keeping version w/ most genes
df <- arrange(df, desc(X..of.Target.Genes.in.Term), Term)
df <- subset(df, !duplicated(Term))
## remove terms who target >20% of tested terms
number_tested_terms <- max(unique(df$X..of.Target.Genes)) * 0.2
df <- subset(df, X..of.Target.Genes.in.Term < number_tested_terms &
X..of.Target.Genes.in.Term > 2) #specify min here
## calculate -log10pVal & subset/reorg columns
df$log10pVal <- log10(df$P.value) * -1
df <- df[,c("Term", "log10pVal", "Common.Genes", "GO.Tree",
"X..of.Target.Genes.in.Term", "X..of.Genes.in.Term",
"GO.ID", "X..of.Total.Genes", "X..of.Target.Genes")]
## sort by # of Target genes in Term
df <- arrange(df, desc(X..of.Target.Genes.in.Term))
write.table(df, paste(Title, ".pathways", sep=''), col.names= T, row.names=F, sep='\t')
##### Plot Pathways results ####
df <- df[1:44,] #plot more terms if going to remove manually
df <- arrange(df, desc(log10pVal)) ## sort by p-value
df$Term <- factor(df$Term, levels= df$Term) #set X as factor preventing ABC order
a <- ggplot() + geom_bar(aes(y= log10pVal, x= Term), data= df, stat="identity") +
coord_flip() + ggtitle("Pathways") + theme(plot.title= element_text(size= 14, face= "bold"),
axis.text= size, legend.text= size,
legend.title= size, axis.title= size) +
geom_text(data=df, aes(x=Term, y=log10pVal, label=as.factor(X..of.Target.Genes.in.Term)),hjust=-0.5)
plot(a)
graph2ppt(file=paste(Title,".GO_and_Pathways.ppt",sep=''), width=10, height=9, append=T)
|
1c7c2e8c3790c549cf8f62676d4d49ca19eaceb5
|
b2ce380eab3c91688b7a04b65d2c5afb880947f3
|
/cachematrix.R
|
b139bcfd03b961c3004b111d4b0695c1819735bb
|
[] |
no_license
|
nowayz44/ProgrammingAssignment2
|
18babc83b1da62cac468a7030f0670727ea30ab0
|
da03e32b6353546df9b8d5d5fd2b5de89e7e7735
|
refs/heads/master
| 2020-12-24T06:13:48.648051
| 2016-11-08T08:37:21
| 2016-11-08T08:37:21
| 73,161,572
| 0
| 0
| null | 2016-11-08T07:43:22
| 2016-11-08T07:43:22
| null |
UTF-8
|
R
| false
| false
| 1,827
|
r
|
cachematrix.R
|
# Caching the Inverse of a Matrix
# Using the caching of the Inverse of a Matrix will enable to gain time for the user
# The first function makeCacheMatrix creates a special "vector", which is really a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setMatrixInverse <- function(solve) m <<- solve
getMatrixInverse <- function() m
list(set = set, get = get,
setMatrixInverse = setMatrixInverse,
getMatrixInverse = getMatrixInverse)
}
# This function returns the inverse of the matrix.
# It will first check if the inverse was not already computed, if it was it will get the result directly from the cache
# and skip the computation, if it was not already computed it will compute the inverse and store it in the cache thanks
# to our first function
# Hypothesis: the matrix is always invertible.
cacheSolve <- function(x, ...) {
m <- x$getMatrixInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setMatrixInverse(m)
m
}
## Let's do a sample test!
##> test <- diag(2,6)
##> CachedMarix <- makeCacheMatrix(test)
##> cacheSolve(CachedMarix)
## [,1] [,2] [,3] [,4] [,5] [,6]
##[1,] 0.5 0.0 0.0 0.0 0.0 0.0
##[2,] 0.0 0.5 0.0 0.0 0.0 0.0
##[3,] 0.0 0.0 0.5 0.0 0.0 0.0
##[4,] 0.0 0.0 0.0 0.5 0.0 0.0
##[5,] 0.0 0.0 0.0 0.0 0.5 0.0
##[6,] 0.0 0.0 0.0 0.0 0.0 0.5
|
100c8f746606111fec2d47bab1f2b208542863c2
|
2682467f8cf36d7427a63d947ff0d945e828170c
|
/charter2/2장.R
|
d34c4ed7f8544fe06d19cce47e8a730ba6a3bbf7
|
[] |
no_license
|
byeongmu-jo/R-Statistics
|
ae317b4df2e285892fca02a4689c168a4e05742c
|
afa5affeee5255d2283004196e31e144f4acf35f
|
refs/heads/master
| 2020-06-05T15:06:20.029661
| 2019-07-19T08:49:52
| 2019-07-19T08:49:52
| 192,467,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,073
|
r
|
2장.R
|
data <- read.csv("cafedata.csv", stringsAsFactors = F)
class(data)
str(data)
head(data)
dim(data)
data$Coffees <- as.numeric(data$Coffees)
class(data$Coffees)
sort(data$Coffees, decreasing = T)
sort(data$Coffees)
min(data$Coffees, na.rm = T)
max(data$Coffees, na.rm = T)
stem(data$Coffees)
rc <- data$Coffees
weight=1/(length(rc)-1)
sum(rc*weight,na.rm=T)
mean(rc,na.rm=T)
rc[rc == max(rc, na.rm = T)] <- 480
mean(rc,na.rm=T)
median.idx <- (1 + length(rc)-1) /2
sort(rc[median.idx])
median(rc, na.rm = T)
(which.max(rc))
library(ggplot2)
library(dplyr)
height=c(164, 166, 168, 170,172,174,176)
height.m <- mean(height)
h.dev <- height-height.m
h.dev2 <- h.dev^2
sum(h.dev2)
variance <- sum(h.dev2) / length(height)
standard_deviation <- sqrt(variance)
mean(height)
var(height)
sd(height)
qt <- quantile(rc, na.rm = T)
boxplot(rc, axes=F)
boxplot(cars, axes=F)
qs <- quantile(cars$dist)
qs
iqr <- qs[4] - qs[2]
upperLimit <- qs[4] + 1.5 *iqr
lowerLimit <- qs[4] - 1.5 *iqr
cars$dist[cars$dist > upperLimit]
cars$dist[cars$dist < lowerLimit]
|
2f2b1b32950d4f9bab6ac24c114f96682beef010
|
aeb58f5f6f35bfe2eae165cd6400b512510e9859
|
/Plot 3.R
|
2fb046a413eeed91c0e5cde0735a85859b24e030
|
[] |
no_license
|
jrmilks74/Exploratory_Data_Analysis_project_2
|
99bd5cfda9211357dbae14189771bedcd5936133
|
063686cf4f6783ec10412e5868ffb02c4c9b33e0
|
refs/heads/main
| 2023-02-03T20:08:20.116143
| 2020-12-21T00:21:21
| 2020-12-21T00:21:21
| 323,128,681
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 646
|
r
|
Plot 3.R
|
setwd("~/Desktop/Data_science/Exploratory Data Analysis Week 4 project")
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
head(NEI)
BC <- subset(NEI, fips == "24510")
BC$type = as.factor(BC$type)
BC$year = as.factor(BC$year)
BC_type <- aggregate(Emissions~type + year, BC, sum)
ggplot(BC_type, aes(x = year, y = Emissions, color = type, group = type)) +
theme_bw() +
geom_point() +
geom_line() +
labs(title = "Baltimore City",
subtitle = "2.5 PM Emissions 1999 - 2008 by Source",
y = "2.5 PM Emissions (tons)",
x = "Year")
|
b979e6d624c28a59a655b8578f192d50ce5333b9
|
ba8fa8ddf56a7a7ba582c06dc72779fac49f2c64
|
/man/os_overlay_data.Rd
|
71e02e3b3daaf8821562236606d45b12bc7b503e
|
[
"MIT"
] |
permissive
|
chenpo3725/opensportml
|
addc85c7bf2aaa9df5cd1f22965c7eb3fa3196fe
|
495d107c539360b1a363dce03f6245b09f039392
|
refs/heads/master
| 2023-03-20T21:30:18.030671
| 2021-01-21T08:18:42
| 2021-01-21T08:18:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,061
|
rd
|
os_overlay_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{os_overlay_data}
\alias{os_overlay_data}
\title{Generate data suitable for creating a court overlay plot}
\usage{
os_overlay_data(court = "tennis", space = "court", court_ref, crop = TRUE)
}
\arguments{
\item{court}{string: court to plot, currently only "tennis"}
\item{space}{string: if "court", the data will be in court coordinates. If "image", the data will be transformed to image coordinates via \code{\link[ovideo:ov_transform_points]{ovideo::ov_transform_points()}}}
\item{court_ref}{data.frame: as returned by \code{\link[=os_shiny_court_ref]{os_shiny_court_ref()}}. Only required if \code{space} is "image"}
\item{crop}{logical: if \code{space} is "image", and \code{crop} is TRUE, the data will be cropped to the c(0, 1, 0, 1) bounding box (i.e. the limits of the image, in normalized coordinates). Requires that the \code{sf} package be installed}
}
\value{
A list of data.frames
}
\description{
Generate data suitable for creating a court overlay plot
}
|
c43f419ea117f07bdb477ed9ccf03945f8a76365
|
489c839925b3b192fced36810e02f6c00e921a90
|
/tests/testthat/test-toJSON-zerovec.R
|
b55ffcbf8a75cf9affdfac043ff535565d895ceb
|
[
"MIT"
] |
permissive
|
jeroen/jsonlite
|
0dd77b1f7c8d9b2674ea4f12e9623e8c0ddffc27
|
2d527d9e916b0a8020a1dfcd37d19fb647060292
|
refs/heads/master
| 2023-08-02T06:54:55.429046
| 2023-07-24T20:52:25
| 2023-07-24T20:52:25
| 13,305,534
| 248
| 82
|
NOASSERTION
| 2023-07-24T20:52:27
| 2013-10-03T18:05:25
|
C
|
UTF-8
|
R
| false
| false
| 1,148
|
r
|
test-toJSON-zerovec.R
|
test_that("Encoding Factor Objects", {
expect_identical(toJSON(character()), "[]");
expect_identical(toJSON(logical()), "[]");
expect_identical(toJSON(complex()), "[]");
expect_identical(toJSON(complex(), complex="list"), "{\"real\":[],\"imaginary\":[]}");
expect_identical(toJSON(double()), "[]");
expect_identical(toJSON(integer()), "[]");
expect_identical(toJSON(list()), "[]");
expect_identical(toJSON(factor()), "[]");
expect_identical(toJSON(factor(levels=c("foo", "bar"))), "[]");
expect_identical(toJSON(matrix(nrow=0, ncol=0)), "[]");
expect_identical(toJSON(as.matrix(numeric())), "[]");
expect_identical(toJSON(data.frame()), "[]");
expect_identical(toJSON(data.frame(foo=vector())), "[]");
expect_identical(toJSON(data.frame(foo=vector(), bar=logical())), "[]");
expect_identical(toJSON(Sys.time()[0], POSIXt="string"), "[]");
expect_identical(toJSON(Sys.time()[0], POSIXt="epoch"), "[]");
expect_identical(toJSON(Sys.time()[0], POSIXt="mongo"), "[]");
expect_identical(toJSON(Sys.time()[0], POSIXt="ISO8601"), "[]");
expect_identical(toJSON(as.Date(Sys.time())[0], POSIXt="ISO8601"), "[]");
});
|
8f399b2c48048f991777894bbf756486fe3399f5
|
d859174ad3cb31ab87088437cd1f0411a9d7449b
|
/autonomics.integrate/man/write_correlations.Rd
|
a03f75447da26876dafb47f828fb5d78df433d45
|
[] |
no_license
|
bhagwataditya/autonomics0
|
97c73d0a809aea5b4c9ef2bf3f886614eceb7a3c
|
c7ca7b69161e5181409c6b1ebcbeede4afde9974
|
refs/heads/master
| 2023-02-24T21:33:02.717621
| 2021-01-29T16:30:54
| 2021-01-29T16:30:54
| 133,491,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 868
|
rd
|
write_correlations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_correlations.R
\name{write_correlations}
\alias{write_correlations}
\title{Write correlations to file}
\usage{
write_correlations(
cor.dt,
eset1,
eset2,
file,
fvars1 = character(0),
fvars2 = character(0)
)
}
\arguments{
\item{cor.dt}{correlation datable}
\item{eset1}{eset}
\item{eset2}{eset}
\item{file}{file}
\item{fvars1}{eset1 fvars}
\item{fvars2}{eset2 fvars}
}
\description{
Write correlations to file
}
\details{
Note that fvars are included in the file, but feature ids not
(except if fvars are empty).
}
\examples{
if (require(subramanian.2016)){
cor.dt <- subramanian.2016::top.cor.exiqon.metabolon
eset1 <- subramanian.2016::exiqon
eset2 <- subramanian.2016::metabolon
fvars1 <- character(0)
fvars2 <- c('BIOCHEMICAL', 'SUB_PATHWAY')
}
}
|
986559e9b2bc5a262660a26cbd99812e9a40dd00
|
ad23c9d39337f7c94764d7447dd5c26a2152ad64
|
/src/main/resources/plotting_scripts/segmented_exp.R
|
4fca7c2f779c5cd825e9ee5095c4e89eab9c3a00
|
[] |
no_license
|
junseonghwan/sgmwsmc
|
ee11454b6cfd820023a5d7fd2dff859df0a36ee9
|
a3f304e0e6858e5d97afd760fd820371e92d4f7a
|
refs/heads/master
| 2021-01-21T10:09:28.624569
| 2020-06-04T23:05:24
| 2020-06-04T23:05:24
| 83,387,887
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,307
|
r
|
segmented_exp.R
|
library(dplyr)
library(ggplot2)
d<-read.csv("~/Google Drive/Research/papers/probabilistic-matching/sgmwsmc/output/knot-matching/segmented_simulated_data_exp.csv", header=T)
d$accuracy<-d$prediction/d$total
d$jaccard_accuracy<-d$jaccard/d$num_nodes
#p <- ggplot(d, aes(x=as.factor(board), y=accuracy, fill=I("black"))) + geom_bar(stat = "identity", position=position_dodge(width=.9))
p <- ggplot(d, aes(x=idx, y=accuracy, fill=I("black"))) + geom_bar(stat = "identity", position=position_dodge(width=.9))
p <- p + theme_bw()
p
ggsave("~/Google Drive/Research/papers/probabilistic-matching/paper/figures/sim_data_segmented_exp.pdf", p)
p <- ggplot(d, aes(x=as.factor(board), y=jaccard_accuracy, fill=I("black"))) + geom_bar(stat = "identity", position=position_dodge(width=.9))
p <- p + theme_bw()
p
ggsave("~/Google Drive/Research/papers/probabilistic-matching/paper/figures/sim_data_jaccard.pdf", p)
### Below is the code used to generate the figure for the paper
d_simulated<-read.csv("~/Google Drive/Research/papers/probabilistic-matching/sgmwsmc/output/knot-matching/segmented_sim_data_validation_exp.csv", header=T)
d_simulated$idx<-1:30
d_simulated$accuracy<-d_simulated$prediction/d_simulated$total
d_simulated$jaccard_accuracy<-d_simulated$jaccard/d_simulated$num_nodes
sum(d_simulated$prediction)/sum(d_simulated$total)
p <- ggplot(d_simulated, aes(x=as.factor(idx), y=accuracy, fill=I("black"))) + geom_bar(stat = "identity", position=position_dodge(width=.9))
p <- p + theme_bw() + xlab("Board")
p
#ggsave("~/Google Drive/Research/papers/probabilistic-matching/paper/figures/sim_data_segmented_exp.pdf", p)
#d_real<-read.csv("~/Google Drive/Research/papers/probabilistic-matching/sgmwsmc/output/knot-matching/segmented_real_data_exp.csv", header=T)
d_real<-read.csv("~/Google Drive/Research/papers/probabilistic-matching/sgmwsmc/output/knot-matching/segmented_real_boards_em_training.csv", header=T)
d_real$idx<-1:dim(d_real)[1]
d_real$accuracy<-d_real$prediction/d_real$total
sum(d_real$prediction)/sum(d_real$total)
d_real$jaccard_accuracy<-d_real$jaccard/d_real$num_nodes
p <- ggplot(d_real, aes(x=as.factor(idx), y=accuracy, fill=I("black"))) + geom_bar(stat = "identity", position=position_dodge(width=.9))
p <- p + theme_bw()
p
#ggsave("~/Google Drive/Research/papers/probabilistic-matching/paper/figures/real_data_segmented_exp.pdf", p)
# combine d_simulated and d_real
d_simulated$TrainingType<-"SIMULATED_DATA"
d_real$TrainingType<-"LOO_CV"
dim(d_real)
dim(d_simulated)
names(d_simulated)
names(d_real)
dd<-rbind(d_simulated[,-7], d_real)
dd$accuracy<-dd$prediction/dd$total
dd$jaccard_accuracy<-dd$jaccard/dd$num_nodes
p <- ggplot(dd, aes(x=as.factor(idx), y=accuracy, fill=TrainingType)) + geom_bar(stat = "identity", position=position_dodge(width=.9))
p <- p + theme_bw() + xlab("Board") + ylab("Prediction Accuracy") + theme(legend.position="none")
p
ggsave("~/Google Drive/Research/papers/probabilistic-matching/paper/figures/real_data_prediction_accuracy.pdf", p)
p <- ggplot(dd, aes(x=as.factor(idx), y=jaccard_accuracy, fill=TrainingType)) + geom_bar(stat = "identity", position=position_dodge(width=.9))
p <- p + theme_bw() + xlab("Board") + ylab("Jaccard Index")
p
ggsave("~/Google Drive/Research/papers/probabilistic-matching/paper/figures/real_data_jaccard.pdf", p)
|
9e3fca53f99ee6a8fc53cf5dfaedf2db94100080
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/hermiter/man/update_batch.Rd
|
77cef50b92f7b550259dc21226a94fa1602310f1
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,140
|
rd
|
update_batch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hermite_estimator.R
\name{update_batch}
\alias{update_batch}
\title{Updates the Hermite series based estimator with a batch of data}
\usage{
update_batch(this, x)
}
\arguments{
\item{this}{A hermite_estimator_univar or hermite_estimator_bivar object.}
\item{x}{A numeric vector or a numeric matrix. Note that for univariate
estimators, x is a numeric vector of observations to be incorporated. For
bivariate estimators, x is a numeric matrix with n rows for n observations
and 2 columns.}
}
\value{
An object of class hermite_estimator_univar or
hermite_estimator_bivar.
}
\description{
This method can be applied in one-pass batch estimation settings. This
method cannot be used with an exponentially weighted estimator.
}
\examples{
hermite_est <- hermite_estimator(N = 10, standardize = TRUE,
est_type="univariate")
hermite_est <- update_batch(hermite_est, x = c(1, 2))
hermite_est <- hermite_estimator(N = 10, standardize = TRUE,
est_type="bivariate")
hermite_est <- update_batch(hermite_est, x = matrix(c(1,1,2,2,3,3),
nrow=3, ncol=2,byrow=TRUE))
}
|
b51788057aa067075ade21fc8b3ffb6965428a8e
|
b2820746858cf4d36ab88aa4fa94c0c8201ef514
|
/02-R/monty_hall/kevin_monty.R
|
9d2302a246d4bd9afcb44170e05611a0f53c7d72
|
[] |
no_license
|
ml-ai-nlp-ir/gadsdc1
|
fa5c1b3b5c7e073ab78c691da02c34daf46e5338
|
a41731f1e276f52e9b9fe6d73a5a049d7c7df479
|
refs/heads/master
| 2021-01-15T14:01:41.147157
| 2015-03-28T04:42:39
| 2015-03-28T04:42:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,353
|
r
|
kevin_monty.R
|
### function: monty()
### argument: numtries = number of simulations to run
### example: monty(50000)
monty <- function(numtries) {
## APPROACH 1: NOT SWITCHING
# create a vector to store the results of not switching
dontswitch <- vector(mode="logical", length=numtries)
for(i in 1:numtries) {
# which door has the car?
car <- sample(1:3, 1)
# I make a guess, and don't switch it
guess <- sample(1:3, 1)
# check if I won
if(car == guess) {
dontswitch[i] <- TRUE
}
}
## APPROACH 2: SWITCHING
# create a vector to store the results of switching
doswitch <- vector(mode="logical", length=numtries)
for(i in 1:numtries) {
# which door has the car?
car <- sample(1:3, 1)
# I make a guess
guess <- sample(1:3, 1)
# The following if/else code is completely unnecessary, because
# if you pick incorrectly and then switch you are guaranteed to win.
# And if you pick correctly and then switch you are guaranteed to lose.
# Nevertheless, it's good to write for demo purposes.
if(car != guess) {
# I picked incorrectly, and they open the other door
theyopen <- 6-car-guess
# I switch my guess to be not my original guess, nor the one they opened
newguess <- 6-theyopen-guess
} else {
# I picked correctly, and they open one other door
if(car == 1) {
theyopen <- sample(c(2,3), 1)
} else if(car == 2) {
theyopen <- sample(c(1,3), 1)
} else {
theyopen <- sample(c(1,2), 1)
}
# I switch my guess to be not my original guess, nor the one they opened
newguess <- 6-theyopen-guess
}
# check if I won
if(car == newguess) {
doswitch[i] <- TRUE
}
}
# calculate the win probability for each approach
dontswitch.won <- sum(dontswitch)/numtries
doswitch.won <- sum(doswitch)/numtries
# print the results
cat("Win probability if not switching:\n")
print(dontswitch.won)
cat("Win probability if switching:\n")
print(doswitch.won)
}
|
3b04a52da8865d19e1d1504bd603cd035bd9365e
|
ccd6d2d69e7c1e9680b954b96e96c3bb52c865ec
|
/R/var_names.R
|
92218099e70ab85bdd571d6568494ac62aa67c70
|
[] |
no_license
|
jmgirard/sjstats
|
3578aa71921d94f9173b93ca108279e353f2a965
|
d951c125e8d35cc2ff339472a47b1e56aeb72b9a
|
refs/heads/master
| 2020-05-24T04:39:15.173761
| 2019-04-29T14:17:09
| 2019-04-29T14:17:09
| 187,097,228
| 1
| 0
| null | 2019-05-16T20:30:57
| 2019-05-16T20:30:56
| null |
UTF-8
|
R
| false
| false
| 1,512
|
r
|
var_names.R
|
#' @rdname pred_vars
#' @importFrom purrr map_chr
#' @export
var_names <- function(x) {
.Deprecated("insight::clean_names()")
if (is.character(x))
get_vn_helper(x)
else
colnames(model_frame(x))
}
#' @importFrom sjmisc is_empty trim
#' @importFrom purrr map_chr
get_vn_helper <- function(x) {
# return if x is empty
if (sjmisc::is_empty(x)) return("")
# for gam-smoothers/loess, remove s()- and lo()-function in column name
# for survival, remove strata(), and so on...
pattern <- c(
"as.factor", "factor", "offset", "log-log", "log", "lag", "diff", "lo", "bs", "ns",
"t2", "te", "ti", "tt", "mi", "mo", "gp", "pspline", "poly", "strata", "scale",
"interaction", "s", "I"
)
# do we have a "log()" pattern here? if yes, get capture region
# which matches the "cleaned" variable name
purrr::map_chr(1:length(x), function(i) {
for (j in 1:length(pattern)) {
if (pattern[j] == "offset") {
x[i] <- sjmisc::trim(unique(sub("^offset\\(([^-+ )]*).*", "\\1", x[i])))
} else if (pattern[j] == "I") {
x[i] <- sjmisc::trim(unique(sub("I\\((\\w*).*", "\\1", x[i])))
} else if (pattern[j] == "log-log") {
x[i] <- sjmisc::trim(unique(sub("^log\\(log\\(([^,)]*)).*", "\\1", x[i])))
} else {
p <- paste0("^", pattern[j], "\\(([^,)]*).*")
x[i] <- unique(sub(p, "\\1", x[i]))
}
}
# for coxme-models, remove random-effect things...
sjmisc::trim(sub("^(.*)\\|(.*)", "\\2", x[i]))
# x[i]
})
}
|
30729bb59a88da18ee7df24fef20545348bff590
|
9b381ed9641d81fc0853a44473d80d93db05422f
|
/Assignment1_RDD/code/r_code/reference/hansen-replication_BriceGreen.r
|
2969f69b36e02dbbe4d551b38d27ef875e26378f
|
[] |
no_license
|
nfra/CausalInferenceCourseHomework
|
f56889f41455d71a575e3ef8b0af4a02e53569d5
|
b9660468ca08dc30a404b66834a24c4c23fccdbe
|
refs/heads/master
| 2022-09-01T13:08:28.590473
| 2020-05-19T19:51:04
| 2020-05-19T19:51:04
| 242,606,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,994
|
r
|
hansen-replication_BriceGreen.r
|
###################
# Author: Brice Green
# Last Edited: 2/23/2020
# Summary: Reproduce Chris Hansen's 2013
# AER paper examining drunk driving
###################
pkgs <- c("data.table", # best data package of ALL TIME
"rdrobust", # robust kernel regression for rdd
"rdd", # regression discontinuity design
"magrittr", # pipes for functional programming
"haven", # read .dta files in R
"ggplot2", # plots
"ggthemes", # plot themes
"stargazer") # tables
# attach the packages I use
invisible(sapply(pkgs, function(x) {
if(!require(x, character.only = T, quietly = T)) {
install.packages(x)
} else {
require(x, character.only = T, quietly = T)
message(paste0(x, " already installed, attaching library now."))
}
})
)
hansen_data <- as.data.table(read_dta("data/hansen_dwi.dta"))
# construct cutoff variable
hansen_data[bac1 < 0.08, aboveBACThreshold := 0]
hansen_data[bac1 >= 0.08, aboveBACThreshold := 1]
# create variable centered at the threshold
hansen_data[,bac1MinusThresh := bac1 - 0.08]
hansen_data[,bac1LevelOverThreshold := bac1MinusThresh*aboveBACThreshold]
# generate histogram used as "Figure 1" in Hansen's paper
bac_hist <- ggplot(hansen_data, aes(x = bac1)) +
geom_histogram(binwidth = 0.001) +
geom_vline(xintercept = 0.08) +
theme_minimal() +
ggtitle("Frequency of Measurements of Blood Alcohol Levels",
subtitle = "Bin width of 0.001, corresponding to instrument's measurement process") +
xlab("BAC") +
ylab("Frequency")
# save in Figures/ directory
ggsave(plot = bac_hist,
file = "Figures/hansen-figure1-bac-hist.png",
dpi = 300,
width = 9,
height = 6)
# run all of the regressions!
fits <-
list("male",
"white",
"aged",
"acc") %>%
lapply(function(x, DT) {
lm(as.formula(paste0(x, " ~ aboveBACThreshold*bac1MinusThresh")), data = DT)
}, DT = hansen_data)
# function for using asymptotic (robust) standard errors
get_robust_ses <- function(fit) {
sqrt(
diag(
sandwich::vcovHC(fit, type = "HC1")
)
)
}
# print out a nice table
tbl <- capture.output(stargazer(fits, header = F, style = "aer",
title = "Covariate Balance Tests",
column.labels = c("Male", "White","Age", "Accidents"),
covariate.labels = c("DUI"),
omit = c("Constant", "bac1MinusThresh",
"aboveBACThreshold:bac1MinusThresh"),
se = lapply(fits, get_robust_ses),
dep.var.caption = "",
dep.var.labels.include = F,
out = "Tables/covariate-balance-tests.tex"))
# generate binned data for the plot
# using the binwidth 0.001
plot_data <-
hansen_data[,.(bac1, bin = findInterval(bac1, seq(0, 1,
by = 0.001),
all.inside = T),
aged, male, white, acc)] %>%
merge(data.table(bin = 1:1001, level = seq(0, 1, by = 0.001)),
by = "bin") %>%
melt(c("bin","bac1","level"),
variable.name = "Covariate",
value.name = "Value") %>%
.[,overThreshold := fifelse(level >= 0.08, 1, 0)]
# make panel names pretty
label_panel <- function(cov) {
cov <- as.character(cov)
if(cov == "aged") {
"Age"
} else if (cov == "white") {
"White"
} else if (cov == "acc") {
"Accident at Scene"
} else if (cov == "male") {
"Male"
} else {
stringr::str_to_title(cov)
}
}
label_panels <- function(cov) {
# vectorize it
# but apparently ggplot2 takes in a data.frame of all labels
# this applies a vectorized function across all label variables
lapply(cov, function(x) sapply(x, label_panel))
}
# replicate figure 2
lin_cov_balance_plots <- ggplot(
plot_data[level < 0.2],
aes(x = level, y = Value, group = overThreshold)
) +
stat_summary(fun.y = "mean", geom = "point") +
geom_smooth(method = 'lm') +
facet_wrap(~Covariate, scales = 'free_y',
labeller = label_panels) +
theme_fivethirtyeight() +
geom_vline(xintercept = 0.08) +
ggtitle("Measuring Covariate Balance at the Threshold",
subtitle = "Linear model with y ~ x")
# save in Figures/ directory
ggsave(plot = lin_cov_balance_plots,
file = "Figures/lin_cov_balance_plots.png",
dpi = 300,
width = 9,
height = 9)
# replicate figure 2, quadratic formula
quad_cov_balance_plots <- ggplot(plot_data[level < 0.2],
aes(x = level,
y = Value,
group = overThreshold)) +
stat_summary(fun.y = "mean", geom = "point") +
geom_smooth(method = 'lm', formula = y ~ x + I(x^2)) +
facet_wrap(~Covariate, scales = 'free_y',
labeller = label_panels) +
theme_fivethirtyeight() +
geom_vline(xintercept = 0.08) +
ggtitle("Measuring Covariate Balance at the Threshold",
subtitle = bquote("Linear model with y ~ x + " ~x^2))
# save in Figures/ directory
ggsave(plot = quad_cov_balance_plots,
file = "Figures/quad_cov_balance_plots.png",
dpi = 300,
width = 9,
height = 9)
## First bandwidth, bac \in (0.03 0.13)
# control for bac1 linearly
lin_control <- lm(recidivism ~ 1 + white +
aged + male +
bac1 +
aboveBACThreshold,
data = hansen_data[bac1 >= 0.03 & bac1 <= 0.13])
# add interaction with threshold
lin_plus_interact <- lm(recidivism ~ 1 + white + aged +
male +
bac1*aboveBACThreshold,
data = hansen_data[bac1 >= 0.03 & bac1 <= 0.13])
# add quadratic controls
quad_plus_interact <- lm(recidivism ~ 1 + white +
aged +
male + aboveBACThreshold +
bac1 + bac1:aboveBACThreshold +
I(bac1^2) +
I(bac1^2):aboveBACThreshold,
data = hansen_data[bac1 >= 0.03 &
bac1 <= 0.13])
rd_panel_a <-
list(
lin_control,
lin_plus_interact,
quad_plus_interact
)
tbl <- capture.output(stargazer(rd_panel_a, header = F, style = "aer",
title = "LATE Estimates under different specifications for subsample between 0.03 and 0.13 BAC",
column.labels = c(
"Linear Control",
"With Interaction",
"Quadratic Controls"
),covariate.labels = "DUI",
omit = c(
setdiff(names(
quad_plus_interact$coefficients),
"aboveBACThreshold"
),
"Constant"),
se = lapply(rd_panel_a, get_robust_ses),
out = "Tables/rd-panel-a.tex"))
## second bandwidth bac \in (0.55, 0.105)
# control for bac1 linearly
lin_control_panelb <- lm(recidivism ~ 1 + white +
aged + male +
bac1 +
aboveBACThreshold,
data = hansen_data[bac1 >= 0.055 &
bac1 <= 0.105])
# add interaction with threshold
lin_plus_interact_panelb <- lm(recidivism ~ 1 + white + aged +
male +
bac1*aboveBACThreshold,
data = hansen_data[bac1 >= 0.055 &
bac1 <= 0.105])
# add quadratic controls
quad_plus_interact_panelb <- lm(recidivism ~ 1 + white +
aged +
male + aboveBACThreshold +
bac1 + bac1:aboveBACThreshold +
I(bac1^2) +
I(bac1^2):aboveBACThreshold,
data = hansen_data[bac1 >= 0.055 &
bac1 <= 0.105])
rd_panel_b <-
list(
lin_control_panelb,
lin_plus_interact_panelb,
quad_plus_interact_panelb
)
tbl <- capture.output(stargazer(rd_panel_b, header = F, style = "aer",
title = "LATE Estimates under different specifications for subsample between 0.055 and 0.105 BAC",
column.labels = c(
"Linear Control",
"With Interaction",
"Quadratic Controls"
),covariate.labels = "DUI",
omit = c(
setdiff(names(
quad_plus_interact$coefficients),
"aboveBACThreshold"
),
"Constant"),
se = lapply(rd_panel_b, get_robust_ses),
out = "Tables/rd-panel-b.tex"))
# generate figure 3, RD plots
rd_plot_data <- hansen_data[,.(bac1, recidivism,
bin = findInterval(bac1, seq(0, 1,
by = 0.001),
all.inside = T))] %>%
merge(data.table(bin = 1:1001, level = seq(0, 1, by = 0.001)),
by = "bin") %>%
.[,overThreshold := fifelse(level >= 0.08, 1, 0)]
# replicate figure 2
linear_rd_plot <- ggplot(rd_plot_data[level < 0.15], aes(x = level,
y = recidivism,
group = overThreshold)) +
stat_summary(fun.y = "mean", geom = "point") +
geom_smooth(method = 'lm') +
theme_fivethirtyeight() +
geom_vline(xintercept = 0.08) +
ggtitle("Regression Discontinuity: All Offenders",
subtitle = "Linear model with y ~ x")
# save in Figures/ directory
ggsave(plot = linear_rd_plot,
file = "Figures/linear_rd_plot.png",
dpi = 300,
width = 9,
height = 9)
quad_rd_plot <- ggplot(rd_plot_data[level < 0.15], aes(x = level,
y = recidivism,
group = overThreshold)) +
stat_summary(fun.y = "mean", geom = "point") +
geom_smooth(method = 'lm', formula = y ~ x + I(x^2)) +
theme_fivethirtyeight() +
geom_vline(xintercept = 0.08) +
ggtitle("Regression Discontinuity: All Offenders",
subtitle = "Linear model with y ~ x + x^2")
# save in Figures/ directory
ggsave(plot = quad_rd_plot,
file = "Figures/quad_rd_plot.png",
dpi = 300,
width = 9,
height = 9)
|
83da0f13af0ad6ad123a80f8b065688e1dc65bb9
|
df5b20d6f0958f4e2d817cc2d17c1d7397235cf9
|
/R/eWrapper.data.R
|
c9563eb5a3a998fc5fb60a6eed4d8c4d93d01a25
|
[] |
no_license
|
joshuaulrich/IBrokers
|
95e29522f1f9cd6bd2eb9a615b00c1b29aaa582a
|
ac8f12cff2f884044061fb458d4902372be881c4
|
refs/heads/master
| 2023-07-06T13:40:11.976460
| 2023-06-30T15:09:12
| 2023-06-30T15:09:12
| 32,220,781
| 65
| 61
| null | 2023-04-20T15:18:07
| 2015-03-14T16:23:55
|
R
|
UTF-8
|
R
| false
| false
| 2,883
|
r
|
eWrapper.data.R
|
# eWrapper.data is a event wrapper that
# updates an in memory data base of values
# upon new input from the TWS
#
# This is only implemented for realtimeBars callbacks
# at present, but will be extended in the near future
# to include all events
eWrapper.data <- function(n) {
# internally updated data
# .data. <- character(8)
#
# get.data <- function() return(.data.)
#
eW <- eWrapper(NULL) # use basic template
eW$assign.Data("data", rep(list(structure(.xts(matrix(rep(NA_real_,7),ncol=7),0),
.Dimnames=list(NULL,
c("BidSize","BidPrice",
"AskPrice","AskSize",
"Last","LastSize","Volume")))),n))
eW$tickPrice <- function(curMsg, msg, timestamp, file, ...)
{
tickType = msg[3]
msg <- as.numeric(msg)
id <- msg[2] #as.numeric(msg[2])
data <- eW$get.Data("data") #[[1]] # list position of symbol (by id == msg[2])
attr(data[[id]],"index") <- as.numeric(Sys.time())
# data[[1]] <- rbind(data[[1]],.xts(matrix(rep(NA_real_,7),nc=7), Sys.time()))
nr.data <- NROW(data[[id]])
#data[[id]][1] <- as.numeric(Sys.time()) #timestamp
if(tickType == .twsTickType$BID) {
data[[id]][nr.data,1:2] <- msg[5:4]
} else
if(tickType == .twsTickType$ASK) {
data[[id]][nr.data,3:4] <- msg[4:5]
} else
if(tickType == .twsTickType$LAST) {
data[[id]][nr.data,5] <- msg[4]
}
eW$assign.Data("data", data)
c(curMsg, msg)
}
eW$tickSize <- function(curMsg, msg, timestamp, file, ...)
{
data <- eW$get.Data("data")
tickType = msg[3]
msg <- as.numeric(msg)
id <- as.numeric(msg[2])
# data[[1]] <- rbind(data[[1]],.xts(matrix(rep(NA_real_,7),nc=7), Sys.time()))
attr(data[[id]],"index") <- as.numeric(Sys.time())
nr.data <- NROW(data[[id]])
#data[[id]][1] <- as.numeric(Sys.time()) #timestamp
if(tickType == .twsTickType$BID_SIZE) {
data[[id]][nr.data,1] <- msg[4]
} else
if(tickType == .twsTickType$ASK_SIZE) {
data[[id]][nr.data,4] <- msg[4]
} else
if(tickType == .twsTickType$LAST_SIZE) {
data[[id]][nr.data,6] <- msg[4]
} else
if(tickType == .twsTickType$VOLUME) {
data[[id]][nr.data,7] <- msg[4]
}
eW$assign.Data("data", data)
c(curMsg, msg)
}
return(eW)
}
eWrapper.RealTimeBars <- function(nbars=1, nsymbols=1) {
eW <- eWrapper(NULL) # use basic template
eW$realtimeBars <- function(curMsg, msg, timestamp, file, ...)
{
id <- as.numeric(msg[2])
data <- eW$get.Data("data") #[[1]] # list position of symbol (by id == msg[2])
data[[id]][1] <- as.numeric(msg[3])
data[[id]][2:8] <- as.numeric(msg[4:10])
eW$assign.Data("data", data)
c(curMsg, msg)
}
return(eW)
}
|
29095da44c84a6ff5e3dcc49bb5f9b96311195ee
|
02863cfdbd8b8f6cb56b7446368a6aef8b310fdb
|
/tests/testthat.R
|
e78435f691a38ba3a3e962cfc42c8471efe1d9ec
|
[
"Apache-2.0"
] |
permissive
|
gravitytrope/fasstr
|
cbf6ba7c7a09362fba043286891a005ec77a11d8
|
66fa223a463feec227fba5c186044cdd1c3787a0
|
refs/heads/master
| 2020-12-20T16:04:51.917662
| 2020-01-21T00:07:05
| 2020-01-21T00:07:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
testthat.R
|
library(testthat)
library(fasstr)
library(dplyr)
test_check("fasstr")
|
612d67471348c3229d1b4307a3f0bc4b105f8d88
|
0c25351947af94ebec9f6f1785a8d9925c1ab36b
|
/getData.R
|
ea17080bacac340122a16e845723c7a46bf4c3a7
|
[] |
no_license
|
amchercashin/EDA_CP1_BaseAndGgplot2
|
2cdde281a6da433b63d7cb8df901a9da3272e555
|
7d89a82181015ba4521c2d0633d72974eb0c555d
|
refs/heads/master
| 2016-09-05T19:14:55.417577
| 2014-12-17T11:32:18
| 2014-12-17T11:32:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 981
|
r
|
getData.R
|
#Create dir and download file
if (!file.exists("./data")) {dir.create("./data")}
if (!file.exists("./data/NEIdata.zip")) {download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
, "./data/NEIdata.zip", mode = "wb")}
#Unzip unless already done
if (!file.exists("./data/summarySCC_PM25.rds")) {unzip("./data/NEIdata.zip",
files = "summarySCC_PM25.rds",
exdir = "./data")}
if (!file.exists("./data/Source_Classification_Code.rds")) {unzip("./data/NEIdata.zip",
files = "Source_Classification_Code.rds",
exdir = "./data")}
#Read data
NEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
|
c513318efc5ae15a8c77095c266b66b5a6f92a34
|
27674239c0da0b7afc6ad9dc2622e084c3f5c004
|
/data/longthin.R
|
827015bdbccf0c4be42245d47d0319eae7052e44
|
[] |
no_license
|
RobinHankin/knotR
|
112248605c8a89a21641be35f2363c19db1c3783
|
0a5a6015a51340faa1ee43066d76be8f39adb499
|
refs/heads/master
| 2023-05-15T03:19:57.311824
| 2023-05-14T09:03:35
| 2023-05-14T09:03:35
| 99,854,849
| 5
| 0
| null | 2017-10-15T04:48:05
| 2017-08-09T21:37:28
|
R
|
UTF-8
|
R
| false
| false
| 4,504
|
r
|
longthin.R
|
`longthin` <-
structure(list(minsymvec = structure(c(-28.5035329158461, -30.6427305021048,
-28.9035709343773, -21.6308687829837, 25.1745791478077, -61.9341949962751,
-75.0995084417725, -23.9080636588711, -28.6448878135769, -28.7626628508933,
-44.1271090946195, -247.416747588595, -268.383683896461, -28.7342881221848,
-771.929754713053, -511.808029168039, -261.122920597492, 4.44558502219131,
361.0469144805, 227.786305535864, 8.66565017958346, -258.500073476994,
-508.697026189732, -839.675864269374, -528.565377459585, -181.243896088559,
-26.3040487237808, -27.101723239265, -29.0455086411953, -27.7612150202702,
-77.2243665811211, -41.2155060527248, -26.3514840378754, -26.0770858783953,
-28.8243698620585, -138.970553825386, -250.222376074292, -211.463525323319,
-711.508693169178, -444.996177414323, -187.772055004132, 66.4438538042949,
374.735281942228, 317.154941904633, 186.893835603822, -53.8862181498383,
-321.479556608727, -581.652497540376, -771.828591644233, -489.292760269357,
32.118926721989, 249.44483988629), .Names = c("handle_A1", "handle_A3",
"handle_A5", "handle_A7", "handle_A10", "handle_A11", "handle_A12",
"handle_A14", "handle_A16", "handle_A18", "handle_A20", "handle_A21",
"handle_A22", "handle_A23", "handle_A27", "handle_A29", "handle_A31",
"handle_A33", "handle_A37", "handle_A38", "handle_A40", "handle_A42",
"handle_A44", "handle_A46", "handle_A47", "handle_A48", "node1",
"node3", "node5", "node7", "node11", "node12", "node14", "node16",
"node18", "node20", "node21", "node22", "node27", "node29", "node31",
"node33", "node36", "node37", "node38", "node40", "node42", "node44",
"node46", "node47", "node48", "node49"), class = "minsymvec"),
overunderobj = structure(c(1, 19, 3, 17, 5, 15, 7, 13, 9,
23, 20, 2, 18, 4, 16, 6, 14, 8, 24, 12), .Dim = c(10L, 2L
)), symobj = structure(list(Mver = structure(c(11, 22, 21,
20, 1, 18, 3, 16, 5, 14, 7, 12, 9, 24, 25, 26, 19, 2, 17,
4, 15, 6, 13, 8), .Dim = c(12L, 2L)), xver = c(10, 23), Mhor = NULL,
xhor = NULL, Mrot = NULL, mcdonalds = FALSE, celtic = FALSE,
indep = structure(c(TRUE, FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE,
FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE,
FALSE, FALSE), .Names = c("handle_A1", "handle_A2", "handle_A3",
"handle_A4", "handle_A5", "handle_A6", "handle_A7", "handle_A8",
"handle_A9", "handle_A10", "handle_A11", "handle_A12",
"handle_A13", "handle_A14", "handle_A15", "handle_A16",
"handle_A17", "handle_A18", "handle_A19", "handle_A20",
"handle_A21", "handle_A22", "handle_A23", "handle_A24",
"handle_A25", "handle_A26", "handle_A27", "handle_A28",
"handle_A29", "handle_A30", "handle_A31", "handle_A32",
"handle_A33", "handle_A34", "handle_A35", "handle_A36",
"handle_A37", "handle_A38", "handle_A39", "handle_A40",
"handle_A41", "handle_A42", "handle_A43", "handle_A44",
"handle_A45", "handle_A46", "handle_A47", "handle_A48",
"handle_A49", "handle_A50", "handle_A51", "handle_A52",
"node1", "node2", "node3", "node4", "node5", "node6",
"node7", "node8", "node9", "node10", "node11", "node12",
"node13", "node14", "node15", "node16", "node17", "node18",
"node19", "node20", "node21", "node22", "node23", "node24",
"node25", "node26", "node27", "node28", "node29", "node30",
"node31", "node32", "node33", "node34", "node35", "node36",
"node37", "node38", "node39", "node40", "node41", "node42",
"node43", "node44", "node45", "node46", "node47", "node48",
"node49", "node50", "node51", "node52"))), .Names = c("Mver",
"xver", "Mhor", "xhor", "Mrot", "mcdonalds", "celtic", "indep"
))), .Names = c("minsymvec", "overunderobj", "symobj"), class = "knot")
|
8e464476a50cb5873271f1b4b2e0081a46c3df0b
|
9289d9822353467a0cdfe8faa8cd67abf7dc3a03
|
/data-raw/fsr_data.R
|
674902d6f1e88dcae20a8817d1f9f07f637abd64
|
[] |
no_license
|
Tony-Chen-Melbourne/readrba
|
0e2424a58ff272142e75d31437f29cd2c5477c89
|
1d70c3a3cc43c24179445795a643107e9ac747df
|
refs/heads/master
| 2022-12-16T20:06:21.595378
| 2020-09-19T11:03:32
| 2020-09-19T11:03:32
| 293,800,782
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
r
|
fsr_data.R
|
## code to prepare `mydataset` dataset goes here
# Data from here: https://www.rba.gov.au/about-rba/history/governors/
library(dplyr)
library(lubridate)
library(magrittr)
usethis::use_data(governor_tenure, overwrite = TRUE)
|
83e095ba93355391a374ef564b2201d2d9c40599
|
67de61805dd839979d8226e17d1316c821f9b1b4
|
/R/MxFitFunctionAlgebra.R
|
a3b8f4715bb57c67afba0cb3911ab0fcf5960a22
|
[
"Apache-2.0"
] |
permissive
|
falkcarl/OpenMx
|
f22ac3e387f6e024eae77b73341e222d532d0794
|
ee2940012403fd94258de3ec8bfc8718d3312c20
|
refs/heads/master
| 2021-01-14T13:39:31.630260
| 2016-01-17T03:08:46
| 2016-01-17T03:08:46
| 49,652,924
| 1
| 0
| null | 2016-01-14T14:41:06
| 2016-01-14T14:41:05
| null |
UTF-8
|
R
| false
| false
| 5,155
|
r
|
MxFitFunctionAlgebra.R
|
#
# Copyright 2007-2016 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
setClass(Class = "MxFitFunctionAlgebra",
representation = representation(
algebra = "MxCharOrNumber",
units = "character",
numObs = "numeric",
numStats = "numeric",
gradient = "MxCharOrNumber",
hessian = "MxCharOrNumber",
verbose = "integer"),
contains = "MxBaseFitFunction")
setMethod("initialize", "MxFitFunctionAlgebra",
function(.Object, algebra, units, numObs, numStats, gradient, hessian, verbose, name = 'fitfunction') {
.Object@name <- name
.Object@algebra <- algebra
.Object@units <- units
.Object@numObs <- numObs
.Object@numStats <- numStats
.Object@gradient <- gradient
.Object@hessian <- hessian
.Object@verbose <- verbose
return(.Object)
}
)
setMethod("genericFitDependencies", signature("MxFitFunctionAlgebra"),
function(.Object, flatModel, dependencies) {
dependencies <- callNextMethod()
for (sl in c('algebra', 'gradient', 'hessian')) {
thing <- slot(.Object, sl)
if (is.na(thing)) next
dependencies <- imxAddDependency(thing, .Object@name, dependencies)
}
return(dependencies)
})
setMethod("genericFitFunConvert", signature("MxFitFunctionAlgebra"),
function(.Object, flatModel, model, labelsData, dependencies) {
name <- .Object@name
algebra <- .Object@algebra
if (is.na(algebra) && is.na(.Object@gradient) && is.na(.Object@hessian)) {
modelname <- imxReverseIdentifier(model, .Object@name)[[1]]
msg <- paste("The algebra name cannot be NA",
"for the algebra fit function of model", omxQuotes(modelname))
stop(msg, call. = FALSE)
}
modelname <- imxReverseIdentifier(model, .Object@name)[[1]]
expectName <- paste(modelname, "expectation", sep=".")
if (expectName %in% names(flatModel@expectations)) {
expectIndex <- imxLocateIndex(flatModel, expectName, name)
} else {
expectIndex <- as.integer(NA)
}
.Object@expectation <- expectIndex
for (sl in c('algebra', 'gradient', 'hessian')) {
slot(.Object, sl) <- imxLocateIndex(flatModel, slot(.Object, sl), name)
}
return(.Object)
})
setMethod("qualifyNames", signature("MxFitFunctionAlgebra"),
function(.Object, modelname, namespace) {
.Object@name <- imxIdentifier(modelname, .Object@name)
for (sl in c('algebra', 'gradient', 'hessian')) {
slot(.Object, sl) <- imxConvertIdentifier(slot(.Object, sl), modelname, namespace)
}
return(.Object)
})
setMethod("genericFitRename", signature("MxFitFunctionAlgebra"),
function(.Object, oldname, newname) {
for (sl in c('algebra', 'gradient', 'hessian')) {
slot(.Object, sl) <- renameReference(slot(.Object, sl), oldname, newname)
}
return(.Object)
})
setMethod("generateReferenceModels", "MxFitFunctionAlgebra",
function(.Object, model) {
msg <- paste("Don't know how to make reference models for a model with a ",
class(.Object), " fit function.", sep="")
msg <- paste(msg, "\n",
"If you're using this for a mutligroup model, very likely, you can replace your mxFitFunctionAlgebra() call with", "\n",
"mxFitFunctionMultigroup(c('submodelName1', 'submodelName2', ...))", "\n\n",
"See ?mxFitFunctionMultigroup() to learn more.", sep="")
stop(msg)
})
mxFitFunctionAlgebra <- function(algebra, numObs = NA, numStats = NA, ...,
gradient=NA_character_, hessian=NA_character_,
verbose=0L, units="-2lnL")
{
garbageArguments <- list(...)
if (length(garbageArguments) > 0) {
stop("mxFitFunctionAlgebra does not accept values for the '...' argument")
}
if (is.null(algebra)) {
algebra <- NA_character_
} else if (missing(algebra) || typeof(algebra) != "character") {
stop("Algebra argument is not a string (the name of the algebra)")
}
if (single.na(numObs)) {
numObs <- as.numeric(NA)
}
if (single.na(numStats)) {
numStats <- as.numeric(NA)
}
return(new("MxFitFunctionAlgebra", algebra, units, numObs, numStats, gradient, hessian, verbose))
}
displayMxFitFunctionAlgebra <- function(fitfunction) {
cat("MxFitFunctionAlgebra", omxQuotes(fitfunction@name), '\n')
cat("$algebra: ", omxQuotes(fitfunction@algebra), '\n')
cat("$units: ", omxQuotes(fitfunction@units), '\n')
cat("$numObs: ", fitfunction@numObs, '\n')
cat("$numStats: ", fitfunction@numStats, '\n')
if (length(fitfunction@result) == 0) {
cat("$result: (not yet computed) ")
} else {
cat("$result:\n")
}
print(fitfunction@result)
invisible(fitfunction)
}
setMethod("print", "MxFitFunctionAlgebra", function(x,...) {
displayMxFitFunctionAlgebra(x)
})
setMethod("show", "MxFitFunctionAlgebra", function(object) {
displayMxFitFunctionAlgebra(object)
})
|
34cebfc2de45b96b1ff0da9f092824a1efc76be6
|
5db2dac679963587ac50ad850ea3a2ccb508465a
|
/phd-scripts/R/metasim.R
|
61864b9b668296c81c6900cee778b4d6677ef3a6
|
[
"MIT"
] |
permissive
|
softloud/simeta
|
be88fe336eeee9610086823adce839493781c0ef
|
2a7e979077c57812a7d29c3e23e8c00080e1cb03
|
refs/heads/master
| 2023-04-16T23:27:16.936986
| 2023-03-25T11:49:23
| 2023-03-25T11:49:23
| 200,359,586
| 2
| 2
|
NOASSERTION
| 2020-01-28T09:55:16
| 2019-08-03T09:56:12
|
HTML
|
UTF-8
|
R
| false
| false
| 1,112
|
r
|
metasim.R
|
#' one row, one simulations
#'
#' runs on one row, returns coverage probability
#'
#' @param trial_fn the function to repeat
#' @param trials the number of trials per simulation
#' @param ... \code{trial_fn} arguments, i.e., simulation nparameters
#' @inheritParams metatrial
#'
#' @family neet_test_one One neet test has been written
#' @family simulation Functions that contribute to simulation pipeline.
#'
#' @export
metasim <- function(...,
id = "simulation1",
trial_fn = metatrial,
trials = 4) {
neet::assert_neet(id, "character")
neet::assert_neet(trial_fn, "function")
neet::assert_neet(trials, "numint")
all_trials <-
# map_peacefully(1:trials, .f = function(x) {trial_fn(...)})
map_df(1:trials, .f = function(x) {trial_fn(...)})
results <-
all_trials %>%
dplyr::summarise(
tau_sq = mean(tau_sq),
ci_width = mean(ci_ub - ci_lb),
bias = mean(bias),
coverage = sum(covered) / length(covered),
successful_trials = length(covered)
) %>%
mutate(sim_id = id)
return(results)
}
|
e93abed84189e7e3c2246af593e4c1781f674915
|
5f9c843d3784b837e73397540881be572be3172a
|
/R/twitter_termfrequency.R
|
987a2a0363c71c059847792c5a6b072c008499cd
|
[] |
no_license
|
ds10/Personal-Corpus
|
ebbde2c65f7795a8815fbc2b2a4a39b1b7df153b
|
5f009ac29b53a27f69855cdc4b787bf48791b524
|
refs/heads/master
| 2016-09-06T02:23:40.899466
| 2014-10-16T14:06:48
| 2014-10-16T14:06:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,041
|
r
|
twitter_termfrequency.R
|
userTimeline<-userTimeline(me, 3200)
df <- do.call("rbind", lapply(userTimeline, as.data.frame))
tw.df=do.call("rbind",lapply(userTimeline, as.data.frame))
a <- Corpus(VectorSource(tw.df$text)) # create corpus object
a <- tm_map(a, tolower) # convert all text to lower case
a <- tm_map(a, removePunctuation)
a <- tm_map(a, removeNumbers)
a <- tm_map(a, removeWords, stopwords("english")) # this list needs to be edited and this function repeated a few times to remove high frequency context specific words with no semantic value
mydata.dtm <- TermDocumentMatrix(a)
mydata.dtm2 <- removeSparseTerms(mydata.dtm, sparse=0.9)
mydata.df <- as.data.frame(inspect(mydata.dtm2))
mydata.df.scale <- scale(mydata.df)
d <- dist(mydata.df.scale, method = "euclidean") # distance matrix
fit <- hclust(d, method="ward")
plot(fit) # display dendogram?
mostusedterms <- rownames(mydata.df)
print("your most used terms:")
mostusedterms
n <- readline("Would you like to see where people tweeting about these terms are in relation to you? (Notworking)")
|
15ff94c34b9b811baa9ea214f0af37447f7d3902
|
559713216d4fe05838b1450981d8f6a2bd838135
|
/profiling/8.Luis_data/8.C.4_collect_pp.R
|
1ba3211682e1969f31d7390a25b0aa8abf1a7ee7
|
[] |
no_license
|
yangjl/phasing
|
6ac18f067c86d225d7351dfb427b6ae56713ce1b
|
99a03af55171dac29b51961acb6a044ea237cb3a
|
refs/heads/master
| 2020-04-06T06:58:38.234815
| 2016-06-10T00:40:36
| 2016-06-10T00:40:36
| 38,838,697
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,033
|
r
|
8.C.4_collect_pp.R
|
### Jinliang Yang
### use impute_parent in CJ data
#library(imputeR)
#library(data.table, lib="~/bin/Rlib/")
### updated geno matrix
imp4 <- read.csv("largedata/ip/imp4.csv")
source("lib/get_pp.R")
ppr1 <- get_pp(path="largedata/obs1", pattern=".csv", imp=imp4)
#### to a tab delimited format
newformat <- function(pp67){
plantid <- names(pp67[[1]])[6]
tab <- pp67[[1]][, c("snpid", "chr", "pos", "chunk", "hap1", "hap2" )]
names(tab)[4:6] <- c(paste0(plantid, "_chunk"),paste0(plantid, "_hap1"),paste0(plantid, "_hap2") )
for(i in 2:length(pp67)){
plantid <- names(pp67[[i]])[6]
res <- pp67[[i]][, c("snpid", "chunk", "hap1", "hap2" )]
names(res)[2:4] <- c(paste0(plantid, "_chunk"),paste0(plantid, "_hap1"),paste0(plantid, "_hap2") )
tab <- merge(tab, res, by="snpid", sort = FALSE)
}
return(tab)
}
####
hap <- newformat(pp67=ppr1)
write.table(hap, "largedata/teo_hap_AGPv2_4parents.txt", sep="\t", row.names=FALSE, quote=FALSE)
|
08ad05659bab4aab8141c6fdaa1fbc70e0987e99
|
d479524ac5ac93612f847ce5f83656f254143e41
|
/MSBA/Optimization/queueSimOpt.R
|
f124538a14dda2fea4cbe297f8a35c386d465c08
|
[] |
no_license
|
yannickheard/Yannicks-Portfolio
|
6140cc080a15470e6f5626afe853b90860a30a79
|
6c85ce1cbff4427d5417c73b2a5a1bc8eaa156fd
|
refs/heads/master
| 2023-07-16T08:45:12.182058
| 2021-09-01T00:07:16
| 2021-09-01T00:07:16
| 306,383,359
| 0
| 0
| null | 2021-09-01T00:25:34
| 2020-10-22T15:37:44
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 987
|
r
|
queueSimOpt.R
|
N=1000
#The avg service times from which we need to pick one
avgServiceTimes = seq(3,7,0.1)
#Corresponding Costs
cost = 5*avgServiceTimes^2 - 60*avgServiceTimes +200
AvgWaitTimes = rep(NA,length(avgServiceTimes))
PercentAnnoyed = rep(NA,length(avgServiceTimes))
#Random number generator's seed. Randomly picked and remembered for use in decision.
seed = sample(1000,1)
for (si in 1:length(avgServiceTimes) ){
#reset seed everytime
set.seed(seed)
#tau = sampled from an exponential with lambda=1/8
tau = rexp(N-1,1/8)
#S = sampled from an exponential with mu = 1/avgServieTime
S= rexp(N,1/avgServiceTimes[si])
A = c(0,cumsum(tau));
T = rep(NA,N)
D = rep(NA,N)
W = rep(NA,N)
T[1] = 0
D[1] = S[1]
W[1] = 0
for (i in 2:N){
T[i] = max(D[i-1],A[i])
D[i] = T[i] + S[i]
W[i] = T[i] - A[i]
}
AvgWaitTimes[si] = mean(W)
PercentAnnoyed[si] = mean(W>20)*100
}
obj = PercentAnnoyed+cost
plot(avgServiceTimes,obj,type="l")
|
3f385ffd989583d322d0e44703105312b1606144
|
0142f0c7759198ead6ce25967f377f550e4c7bf4
|
/man/hatvalues.ddhazard.Rd
|
f57e6eea5b6287fdc0dcc5b2025532ab686e8b58
|
[] |
no_license
|
boennecd/dynamichazard
|
e155c92c5ca8fe3733e512c45492a4523952673d
|
3c5964085e3d3c458710f0b5c60355793188b377
|
refs/heads/master
| 2022-10-24T06:01:31.111864
| 2022-10-04T21:14:05
| 2022-10-04T21:14:05
| 66,570,932
| 7
| 3
| null | 2017-06-18T11:12:19
| 2016-08-25T15:35:41
|
R
|
UTF-8
|
R
| false
| true
| 1,035
|
rd
|
hatvalues.ddhazard.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hatvalues.R
\name{hatvalues.ddhazard}
\alias{hatvalues.ddhazard}
\title{Hat Values for ddhazard Object}
\usage{
\method{hatvalues}{ddhazard}(model, ...)
}
\arguments{
\item{model}{a fit from \code{\link{ddhazard}}.}
\item{...}{not used.}
}
\value{
A list of matrices. Each matrix has three columns: the hat values, the row number of the original data point and the id the row belongs to.
}
\description{
Computes hat-"like" values from usual L2 penalized binary regression.
}
\details{
Computes hat-"like" values in each interval for each individual at risk in the interval. See the \code{vignette("ddhazard", "dynamichazard")} vignette for details.
}
\examples{
library(dynamichazard)
fit <- ddhazard(
Surv(time, status == 2) ~ log(bili), pbc, id = pbc$id, max_T = 3000,
Q_0 = diag(1, 2), Q = diag(1e-4, 2), by = 100,
control = ddhazard_control(method = "GMA"))
hvs <- hatvalues(fit)
head(hvs[[1]])
head(hvs[[2]])
}
\seealso{
\code{\link{ddhazard}}
}
|
a69a0a72d2e6f03aeb64459185a8e292efeb7002
|
5a87297f6dbcd7027fa8412018e0dee36a2b42ba
|
/man/stage_national_data.Rd
|
56eca476f7cee9c0ff74a30e3be012bdfee4cd14
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer",
"CC0-1.0"
] |
permissive
|
hydroinfo-gis/nhdplusTools
|
fce3b719a52f1c00d1b3eb87c1b4522c8f841627
|
48020b1b7aca68c4e4fc641ff3391d12d032e2c2
|
refs/heads/master
| 2023-02-05T08:38:37.307951
| 2020-12-09T16:43:43
| 2020-12-09T16:43:43
| 321,866,343
| 1
| 0
|
CC0-1.0
| 2020-12-16T04:20:12
| 2020-12-16T04:20:11
| null |
UTF-8
|
R
| false
| true
| 1,548
|
rd
|
stage_national_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subset_nhdplus.R
\name{stage_national_data}
\alias{stage_national_data}
\title{Stage NHDPlus National Data (deprecated)}
\usage{
stage_national_data(
include = c("attribute", "flowline", "catchment"),
output_path = NULL,
nhdplus_data = NULL,
simplified = TRUE
)
}
\arguments{
\item{include}{character vector containing one or more of:
"attributes", "flowline", "catchment".}
\item{output_path}{character path to save the output to defaults
to the directory of the nhdplus_data.}
\item{nhdplus_data}{character path to the .gpkg or .gdb
containing the national seamless dataset. Not required if
\code{\link{nhdplus_path}} has been set.}
\item{simplified}{boolean if TRUE (the default) the CatchmentSP layer
will be included.}
}
\value{
list containing paths to the .rds files.
}
\description{
Breaks down the national geo database into a collection
of quick to access R binary files.
}
\details{
"attributes" will save `NHDFlowline_Network` attributes
as a separate data.frame without the geometry. The others will save
the `NHDFlowline_Network` and `Catchment` or `CatchmentSP`
(per the `simplified` parameter) as sf data.frames with
superfluous Z information dropped.
The returned list of paths is also added to the nhdplusTools_env
as "national_data".
}
\examples{
sample_data <- system.file("extdata/sample_natseamless.gpkg",
package = "nhdplusTools")
stage_national_data(nhdplus_data = sample_data, output_path = tempdir())
}
|
b7959eb814d8ee8762a09e15e946c018be85597a
|
45458f1f3af18ce80dc3df8adcd167388adfed0a
|
/tests/testthat.R
|
514bd9cde2dc2454d354f810c4057539eb19bfff
|
[
"MIT"
] |
permissive
|
adam-m-mcelhinney/helpRFunctions
|
19ad8c6fe1f75eddbaf4df18436d6b222cdb209e
|
9eb16e8c13ad5c0dc108b06b1b1f7bb087870631
|
refs/heads/master
| 2020-04-23T08:58:44.820860
| 2014-12-16T23:07:09
| 2014-12-16T23:07:09
| 25,843,127
| 0
| 1
| null | 2014-12-19T15:41:10
| 2014-10-27T22:58:15
|
R
|
UTF-8
|
R
| false
| false
| 47
|
r
|
testthat.R
|
library(testthat)
test_check("helpRFunctions")
|
e40250c32b7ec089d174b60bb2afd1bb73c26713
|
3d7d2018911ca154da0c12017c968cd100f88256
|
/r_programming/project_1/corr.R
|
960ad8146089ec61eb88b0737f18e29204dca2ec
|
[] |
no_license
|
anhnguyendepocen/data_science_coursera
|
0e56f295b2647d0e3ba3ab7e20d5ec926222c80c
|
4c8e33f278f6c6dcaacd9d7c6d438dff81ed37be
|
refs/heads/master
| 2020-03-18T00:47:00.862327
| 2014-06-06T13:52:35
| 2014-06-06T13:52:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 860
|
r
|
corr.R
|
corr <- function(directory, threshold = 1) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
list_files <- list.files(path = paste(c('./',directory,'/'), collapse = ''))
data <- vector("numeric", length = 0)
for (i in 1:length(list_files)) {
filename = paste(c('./',directory,'/',list_files[i]), collapse = '')
df = read.csv(filename)
if (sum(complete.cases(df)) >= threshold) {
cr <- cor(df$nitrate, df$sulfate, use = "complete.obs")
data <- append(data, cr)
}
}
return(data)
}
|
bbe71b5e34bc6334909e6513eb1f5bea74d9c710
|
c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d
|
/timeSeries/unitTests/runit.colCum.R
|
6ce9f96c1f19a3c9f782bbfbae0a4a8453ac08a3
|
[
"MIT",
"GPL-2.0-only"
] |
permissive
|
solgenomics/R_libs
|
bcf34e00bf2edef54894f6295c4f38f1e480b3fc
|
e8cdf30fd5f32babf39c76a01df5f5544062224e
|
refs/heads/master
| 2023-07-08T10:06:04.304775
| 2022-05-09T15:41:26
| 2022-05-09T15:41:26
| 186,859,606
| 0
| 2
|
MIT
| 2023-03-07T08:59:16
| 2019-05-15T15:57:13
|
C++
|
UTF-8
|
R
| false
| false
| 1,754
|
r
|
runit.colCum.R
|
# Rmetrics is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# Rmetrics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General
# Public License along with this library; if not, write to the
# Free Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
################################################################################
test.colCum <-
function()
{
# RUnit Test:
# Signal Series
ts <- dummySeries(format = "counts")
colCumsums(ts)
colCummaxs(ts)
colCummins(ts)
colCumprods(ts)
colCumreturns(ts)
# Time Series:
ts <- dummySeries()
colCumsums(ts)
colCummaxs(ts)
colCummins(ts)
colCumprods(ts)
colCumreturns(ts)
# check that timeSeries with one row still works ...
t <- ts[1,]
checkTrue(is(colCumsums(t), "timeSeries"))
checkTrue(is(colCummaxs(t), "timeSeries"))
checkTrue(is(colCummins(t), "timeSeries"))
checkTrue(is(colCumprods(t), "timeSeries"))
checkTrue(is(colCumreturns(t), "timeSeries"))
checkEquals(nrow(colCumsums(t)), 1)
checkEquals(nrow(colCummaxs(t)), 1)
checkEquals(nrow(colCummins(t)), 1)
checkEquals(nrow(colCumprods(t)), 1)
checkEquals(nrow(colCumreturns(t)), 1)
}
################################################################################
|
e474b1230ebf7957ac792d68da178f8b5c2cf27e
|
64fa472159285426577d615e3c4fdca622efcb85
|
/netAnalysis/scoring/score.R
|
cf5e12408a43d11f70aecbbef7290f2c214a5036
|
[] |
no_license
|
doaa-altarawy/PEAK
|
f05fcf19d19470a2c9728542ab7d2d8c1ea3ee60
|
3c0e722b3549c2315b5b7ec32e7e01ce3ce4d9c0
|
refs/heads/master
| 2021-01-17T16:41:41.822353
| 2018-05-29T20:13:59
| 2018-05-29T20:13:59
| 75,315,636
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 808
|
r
|
score.R
|
library(PRROC)
A <- read.table("/home/doaa/Dropbox/2_My_Results/EvaluationScripts_Matlab/INPUT/predictions/pred.tsv", sep=",")
B <- read.table("/home/doaa/Dropbox/2_My_Results/EvaluationScripts_Matlab/INPUT/predictions/gold.tsv", sep=",")
pr<-pr.curve(scores.class0 = B[['V2']], scores.class1 = A[['V2']])
---------------
library(ROCR)
A <- read.table("/home/doaa/Dropbox/2_My_Results/EvaluationScripts_Matlab/INPUT/predictions/pred.tsv", sep=",")
B <- read.table("/home/doaa/Dropbox/2_My_Results/EvaluationScripts_Matlab/INPUT/predictions/gold.tsv", sep=",")
test = A[['V2']]
gold = B[['V2']]
pred <- prediction( test, gold )
## precision/recall curve (x-axis: recall, y-axis: precision)
prec <- performance(pred, "prec")
rec <- performance(pred, "rec")
library(caTools)
auc <- trapz(rec, prec)
|
fd4bcf8327fd863fe1de047b984ab29c6c3fafdb
|
988aada9dce03b33f26f8975b34fc0ca1563df7e
|
/pollutantcorr.R
|
8f1ccec451de7ee6adb0c6516868b91f6fb79bb2
|
[] |
no_license
|
lbennett04/AirPollution
|
4b2cff186e88b021bea7b73ab8c23bd36f256164
|
7a8ab2850159c3dfca070ff75c06c83f88d910f6
|
refs/heads/master
| 2021-06-16T12:08:48.944364
| 2017-05-05T15:06:30
| 2017-05-05T15:06:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 673
|
r
|
pollutantcorr.R
|
#the function below takes in a directory of data files and a threshold for complete cases
#and calculates the correlation between sulfate and nitrate for monitor locations
#where numobs>=threshold.
corr <- function(directory='specdata',threshold=0){
source('pollutantscomplete.R')
completepolls <- complete()
id <- completepolls[completepolls$numobs>threshold,'id']
corrs <- c()
for (i in id){
while (nchar(i)<3){
i <- paste(0,i,sep='')
} #end of while loop
file <- paste(directory,'/',i,'.csv',sep='')
monitor <- na.omit(read.csv(file))
corrs <- c(corrs, cor(monitor$sulfate,monitor$nitrate))
} #end of monitor loop
return(corrs)
}
|
d837a5d8b001ae86725e8c0ecd7b16b3fd82e259
|
77d041917ba2a44da3ce27a57dc2ab52c14e4e2f
|
/jonas/C2.02 slope_graph.R
|
69d3835784c9f00d19414065ec7868625d014834
|
[] |
no_license
|
calvin-f/quantitative-hci-assignments
|
4b5c339f0f4a19806f8df9687caf90504858e4e7
|
86d98d7ef4d8c9e6d645e6c6392ec493c60ef611
|
refs/heads/main
| 2023-04-29T10:47:45.917888
| 2021-05-25T13:23:37
| 2021-05-25T13:23:37
| 348,293,361
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 955
|
r
|
C2.02 slope_graph.R
|
# Slope graph
# ===
# Goal:
# Slope graphs are useful to show the trend of the differences between conditions within each participant.
# Add a text to the slopegraph to indicate which participant it is from for each point.
#
# Example output: `goals/C2.02 slope_graph_goal.png`
#
# Functions:
# * mutate()
# * if_else()
# * aes(group = interaction(…)),
# * geom_text(aes(label = ...), nudge_x = ...)
# * geom_point()
# * geom_line()
#
# Relevant R4DS chapter: https://r4ds.had.co.nz/graphics-for-communication.html?q=geom_text#annotations
#===============================================================================
library(tidyverse)
source("R/prepare_data.R")
# Answer:
data_knobology_within %>%
mutate(label=if_else(device=='Touch', "", participant)) %>%
ggplot(aes(x = device, y = time, group=interaction(vision,participant))) +
geom_point() +
geom_line() +
geom_text(aes(label=label), nudge_x=-0.05)
|
c5a662e143348d730c0b513d24b68ab113c47fe8
|
d2625551031207b1dd195abd88dfe92057c0a859
|
/load_and_prepare_data.R
|
983743dd50bf58540ec5256486b3b4e9ce5ee2a3
|
[] |
no_license
|
dionmagnus/ExData_Plotting1
|
6772293c60bf9fd1020ffecb5a01259b265a4215
|
5e4a19bd14f7109c4d9a5aab2e05d5925bb17569
|
refs/heads/master
| 2021-01-18T06:45:05.388062
| 2016-02-07T13:45:47
| 2016-02-07T13:45:47
| 51,240,018
| 0
| 0
| null | 2016-02-07T07:55:44
| 2016-02-07T07:55:44
| null |
UTF-8
|
R
| false
| false
| 1,055
|
r
|
load_and_prepare_data.R
|
#creating the data folder
dataRootDir <- "data"
if(!dir.exists(dataRootDir)) {dir.create(dataRootDir)}
#downloading the datafile
dataZipFile <- "data/household_data.zip"
if(!file.exists(dataZipFile)) {
# loading data
dataUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(dataUrl, "data/household_data.zip", method = "curl")
}
#extracting data
dataFile <- "data/household_power_consumption.txt"
if(!file.exists(dataFile)) {
unzip(dataZipFile, exdir = dataRootDir)
}
#reading data
dataColClasses <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
mainDataSet <- read.csv(dataFile, sep = ";", na.strings = "?", colClasses = dataColClasses)
#converting the Date column to the Date type
mainDataSet$Datetime <- strptime(paste(mainDataSet$Date, mainDataSet$Time), format = "%d/%m/%Y %H:%M:%S")
mainDataSubset <- mainDataSet[mainDataSet$Datetime > as.POSIXlt("2007-02-01") & mainDataSet$Datetime < as.POSIXlt("2007-02-03"),]
|
fb7aeea556c953ddd017a11d7a51a929c42af8a4
|
776c032b0d34bccb8735d73daf040b96668762fa
|
/Making_Packages_How-To.r
|
f0be947b325a34a6f6f23e892d89390c3d23eb5e
|
[] |
no_license
|
ekortenhoeven-usgs/R-packages
|
926f6318fc4c4ba21a2b06b31236cd8c3c65d1ed
|
465182250fe1ffcd89c4a7286de1addb7a53948c
|
refs/heads/master
| 2020-12-25T06:24:03.093348
| 2016-06-20T16:45:19
| 2016-06-20T16:45:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,920
|
r
|
Making_Packages_How-To.r
|
##### Create an R package #####
##### Set working directory, create folder, load libraries
## Set working directory (local)
setwd('C:/Users/jmuehlbauer/Documents/R/Custom')
## Load requisite libraries
if('devtools' %in% rownames(installed.packages())==FALSE){install.packages('devtools')}
if('roxygen2' %in% rownames(installed.packages())==FALSE){install.packages('roxygen2')}
require(devtools)
require(roxygen2)
## Create folder for package
create('TEST')
##### Write functions #####
## In the folder just created (above), go to the R folder, and add any functions (as files with no filetype) you wish to include in the library. Call the example below "testfx.r"
## Manipulate the header content of each function to include parameters, info, examples, etc. For example (below verbatim, including hashtags):
#' @title A basic function
#' @description This is a test function I wrote.
#' @param test Tests if the function is working. Defaults to TRUE.
#' @examples test()
#' @export
testfx<-function(test=TRUE){
if(test==TRUE){print('It works!')}
else{'Hey, it still works!'}
}
## Create documentation
setwd('./TEST')
document()
##### Install the package #####
## Install to the local directory and try it!
setwd('..')
install('TEST')
library(TEST)
testfx()
##### Set up the local repository using GitBASH #####
## Create a local Git repository
## Use the following commands verbatim in GitBASH (hint: paste in BASH is Shift+Insert):
cd "C:/Users/jmuehlbauer/Documents/R/Custom"
git init
git add TEST/
git commit -m "Initial commit"
## Push to a GitHub repository
git remote add origin https://github.com/jmuehlbauer-usgs/R-packages.git
git pull origin master
git commit -m "Merging with GitHub"
git push origin master
##### Download and install the package from GitHUB #####
## Other users can now install the package from GitHub:
install_github(repo='jmuehlbauer-usgs/R-packages',subdir='TEST')
|
40d55095876017b8e2d59522ef4b5c1490a84f21
|
e08424eb7743323f470775d2cedb893d9d2080db
|
/package/analogues/R/util.R
|
b92f47b53aae9d6ff9ac2bf6cfba567ff2782541
|
[] |
no_license
|
neojavan/ccafs-analogues
|
9ea474537aa5f7e351b163ca27920df9b3240634
|
5faed60c61c3d44d642c76e4ca0f15700175ae22
|
refs/heads/master
| 2020-05-30T14:53:52.302986
| 2012-04-26T12:47:19
| 2012-04-26T12:47:19
| 39,160,381
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,426
|
r
|
util.R
|
makeRoll <- function(params) {
# create roll
roll.v <- c()
months <- 1:params$ndivisions
for (i in 1:length(months)) {
roll.v <- c(roll.v,(months[c(i:length(months),0:(i-1))]))
}
roll <- matrix(data=roll.v, ncol=length(months), byrow=T)
# cut roll to the actual growin period
roll <- roll[ , params$growing_season]
# only keep first row, if accross the years is false
if (!params$across_year) roll <- roll[1, , drop=FALSE]
}
summarizeResults <- function(object, ...) {
UseMethod("summarizeResults", object)
}
summarizeResults.CcafsResults <- function(res_all, params) {
if (is.list(res_all)) {
if (params$keep_lag & params$across_year) {
# create stack with lagged
res_return <- do.call(stack,res_all)
} else if (!params$keep_lag & params$across_year) {
# take the minimum of each each month
res_sum <- do.call(stack,res_all)
res_return <- stackApply(res_sum,rep(1,nlayers(res_sum)),min)
} else if(!params$across_year) {
res_return <- res_all[[1]]
}
} else {
if (params$keep_lag & params$across_year) {
# create stack with lagged
res_return <- res_all
} else if (!params$keep_lag & params$across_year) {
# take the minimum of each each month
res_return <- apply(res_all,1,min)
} else if(!params$across_year) {
res_return <- res_all
}
}
return(res_return)
}
summarizeResults.HalResults <- function(res_all, params) {
if (is.list(res_all)) {
if (params$keep_lag & params$across_year) {
# create stack with lagged
res_return <- do.call(stack,res_all)
} else if (!params$keep_lag & params$across_year) {
# take the minimum of each each month
res_sum <- do.call(stack,res_all)
res_return <- stackApply(res_sum,rep(1,nlayers(res_sum)),max)
} else if(!params$across_year) {
res_return <- res_all[[1]]
}
} else {
if (params$keep_lag & params$across_year) {
# create stack with lagged
res_return <- res_all
} else if (!params$keep_lag & params$across_year) {
# take the minimum of each each month
res_return <- apply(res_all,1,function(x) any(x==TRUE))
} else if(!params$across_year) {
res_return <- res_all
}
}
return(res_return)
}
|
e042b43f60dc96a09628cfb2d62f07a70650df18
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/hdme/inst/doc/hdme.R
|
ddd4789b6e45097ff16887f51f6aa3d3e4764898
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,710
|
r
|
hdme.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.width=6
)
## -----------------------------------------------------------------------------
# Load the hdme package
library(hdme)
## -----------------------------------------------------------------------------
create_example_data <- function(n, p, s = 5, sdX = 1, sdU = 0.5,
sdEpsilon = 0.1, family = "gaussian") {
# Independent true covariates with mean zero and standard deviation sdX
X <- matrix(rnorm(n * p, sd = sdX), nrow = n, ncol = p)
# if Gaussian, response has standard deviation sdEpsilon, and zero intercept
# if binomial, response is binomial with mean (1 + exp(-X %*% beta))^(-1)
beta <- c(-2, -1, 0.5, 1, 2, rep(0, p - s))
if(family == "gaussian") {
# True coefficient vector has s non-zero elements and p-s zero elements
y <- X %*% beta + rnorm(n, sd = sdEpsilon)
} else if (family == "binomial") {
# Need an amplification in the binomial case
beta <- beta * 3
y <- rbinom(n, size = 1, prob = (1 + exp(-X %*% beta))**(-1))
}
# The measurements W have mean X and standard deviation sdU.
# We assume uncorrelated measurement errors
W <- X + matrix(rnorm(n * p, sd = sdU), nrow = n, ncol = p)
return(list(X = X, W = W, y = y, beta = beta, sigmaUU = diag(p) * sdU))
}
## ---- message=FALSE-----------------------------------------------------------
n <- 100
p <- 500
set.seed(1000)
ll <- create_example_data(n, p)
## ---- message=FALSE-----------------------------------------------------------
library(glmnet)
library(dplyr)
# Lasso with cross-validation on data without measurement error
fit1 <- cv.glmnet(ll$X, ll$y)
# Lasso with cross-validation on data with measurement error
fit2 <- cv.glmnet(ll$W, ll$y)
# Create a data frame with results ([-1] because we drop the intercept)
lassoEstimates <- tibble(
index = rep(1:p, times = 3),
beta = c(ll$beta, as.numeric(coef(fit1)[-1]), coef(fit2)[-1]),
label = c(rep("True values", p), rep("No measurement error", p), rep("Measurement error", p))
)
## -----------------------------------------------------------------------------
library(ggplot2)
ggplot(lassoEstimates, aes(x = index, y = beta, color = label)) +
geom_point() +
xlab("p") +
theme(legend.title=element_blank()) +
ggtitle("Measurement error leading to false positives")
## ---- message=FALSE, warning=FALSE--------------------------------------------
library(tidyr)
estimatesOfNonzero <- lassoEstimates %>%
spread(key = label, value = beta) %>%
filter(`True values` != 0) %>%
gather(key = label, value = beta, -index)
ggplot(estimatesOfNonzero, aes(x = index, y = beta, color = label)) +
geom_point() +
xlab("p") +
theme(legend.title=element_blank()) +
ggtitle("Measurement error leading to attenuation")
## -----------------------------------------------------------------------------
# Number of samples
n <- 1000
# Number of covariates
p <- 50
# Create example data
ll <- create_example_data(n, p, family = "binomial")
## -----------------------------------------------------------------------------
args(gds)
## -----------------------------------------------------------------------------
# Fit the Generalized Dantzig Selector
gds_estimate <- gds(ll$X, ll$y, family = "binomial")
## -----------------------------------------------------------------------------
class(gds_estimate)
## -----------------------------------------------------------------------------
str(gds_estimate)
## -----------------------------------------------------------------------------
set.seed(1000)
# Generate example data
ll <- create_example_data(n, p)
# Fit the corrected lasso
corrected_fit <- corrected_lasso(W = ll$W, y = ll$y, sigmaUU = ll$sigmaUU)
## -----------------------------------------------------------------------------
# Class of the object
class(corrected_fit)
# The coef() method prints the number of nonzero estimates as a function of the radius
coef(corrected_fit)
## -----------------------------------------------------------------------------
args(corrected_lasso)
## -----------------------------------------------------------------------------
plot(corrected_fit)
## -----------------------------------------------------------------------------
plot(corrected_fit, type = "path")
## -----------------------------------------------------------------------------
set.seed(323)
n <- 100
p <- 50
ll <- create_example_data(n, p, sdU = 0.2, family = "binomial")
## -----------------------------------------------------------------------------
corrected_fit <- corrected_lasso(ll$W, ll$y, ll$sigmaUU, family = "binomial")
## -----------------------------------------------------------------------------
plot(corrected_fit)
## -----------------------------------------------------------------------------
plot(corrected_fit, type = "path")
## -----------------------------------------------------------------------------
set.seed(1000)
# Generate example data
ll <- create_example_data(n, p)
# Run lasso with cross-validation
cv_corrected_fit <- cv_corrected_lasso(W = ll$W, y = ll$y, sigmaUU = ll$sigmaUU)
## -----------------------------------------------------------------------------
class(cv_corrected_fit)
## -----------------------------------------------------------------------------
str(cv_corrected_fit)
## -----------------------------------------------------------------------------
plot(cv_corrected_fit)
## -----------------------------------------------------------------------------
corrected_fit <- corrected_lasso(ll$W, ll$y, ll$sigmaUU, radii = cv_corrected_fit$radius_1se)
## -----------------------------------------------------------------------------
str(corrected_fit)
## -----------------------------------------------------------------------------
set.seed(1)
# Number of samples
n <- 1000
# Number of covariates
p <- 50
# Generate data
ll <- create_example_data(n, p, sdU = 0.2)
## -----------------------------------------------------------------------------
mus_fit <- mus(ll$W, ll$y)
class(mus_fit)
## -----------------------------------------------------------------------------
coef(mus_fit)
## -----------------------------------------------------------------------------
plot(mus_fit)
## -----------------------------------------------------------------------------
mus_fit <- mus(ll$W, ll$y, delta = 0.1)
## -----------------------------------------------------------------------------
plot(mus_fit)
## -----------------------------------------------------------------------------
set.seed(323)
n <- 100
p <- 50
ll <- create_example_data(n, p, sdU = 0.2, family = "binomial")
gmus_fit <- gmus(ll$W, ll$y, family = "binomial")
## -----------------------------------------------------------------------------
class(gmus_fit)
str(gmus_fit)
## -----------------------------------------------------------------------------
plot(gmus_fit)
## -----------------------------------------------------------------------------
gmus_fit <- gmus(ll$W, ll$y, delta = 0.1, family = "binomial")
## -----------------------------------------------------------------------------
plot(gmus_fit)
## -----------------------------------------------------------------------------
set.seed(323)
n <- 100
p <- 50
ll <- create_example_data(n, p, sdU = 0.2, family = "binomial")
gmu_lasso_fit <- gmu_lasso(ll$W, ll$y, family = "binomial")
## -----------------------------------------------------------------------------
class(gmu_lasso_fit)
str(gmu_lasso_fit)
## -----------------------------------------------------------------------------
plot(gmu_lasso_fit)
|
1e568eb89caaf43b8f90b34b7c0dfda6c96de281
|
356b4537c1b107f265412c1ef1741ad0ad466ddd
|
/R/tests/testdir_javapredict/runit_DL_javapredict_iris.R
|
c99f9f8305376373b9fe2322cc12b398e5608ff6
|
[
"Apache-2.0"
] |
permissive
|
krishnatray/h2o
|
5445285dd078cd9cd32d8ac9a1f0ce76c1a18551
|
fa150a604dfeaec0580d361b92e795bedeb5d2ac
|
refs/heads/master
| 2021-01-23T23:20:12.316972
| 2014-07-02T22:14:12
| 2014-07-02T22:14:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,578
|
r
|
runit_DL_javapredict_iris.R
|
#----------------------------------------------------------------------
# Purpose: This test exercises the DeepLearning model downloaded as java code
# for the iris data set.
#
# Notes: Assumes unix environment.
# curl, javac, java must be installed.
# java must be at least 1.6.
#----------------------------------------------------------------------
options(echo=FALSE)
TEST_ROOT_DIR <- ".."
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source(paste(TEST_ROOT_DIR, "findNSourceUtils.R", sep="/"))
#----------------------------------------------------------------------
# Parameters for the test.
#----------------------------------------------------------------------
train <- locate("smalldata/iris/iris_train.csv")
test <- locate("smalldata/iris/iris_test.csv")
x = c("sepal_len","sepal_wid","petal_len","petal_wid");
y = "species"
classification = T
#----------------------------------------------------------------------
# Run the tests
#----------------------------------------------------------------------
activation = "Tanh"
balance_classes = T
source('../Utils/shared_javapredict_DL.R')
balance_classes = F
source('../Utils/shared_javapredict_DL.R')
activation = "TanhWithDropout"
source('../Utils/shared_javapredict_DL.R')
activation = "Rectifier"
source('../Utils/shared_javapredict_DL.R')
activation = "RectifierWithDropout"
source('../Utils/shared_javapredict_DL.R')
classification = F
x = c("sepal_len","sepal_wid","petal_len")
y = c("petal_wid")
source('../Utils/shared_javapredict_DL.R')
|
a3755a8205b2da9d4844367742ef10c2fb903fb9
|
fcc8d6b26723f653148390dbadc5b7747102e85d
|
/Scripts i pdfs-20190114/T1-E05-dcrown.R
|
fc914aac713cc9956e441c747bc303d8cbebfd96
|
[] |
no_license
|
Huguet57/xuleta-pie
|
813b7d009e8a752c18bfc9f0c86bc4371e3af4e1
|
893b4db177a8866154fa560145cf1a97e1d5b4f8
|
refs/heads/master
| 2020-04-16T15:46:02.082429
| 2019-01-16T00:19:00
| 2019-01-16T00:19:00
| 165,713,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,765
|
r
|
T1-E05-dcrown.R
|
setwd("~/Documents/CURS 2018-2019/PIE2")
library(car)
dd <- read.csv2("./Dades/dcrown.csv")
dd$RP<-dd$PB/dd$PT
scatterplotMatrix(dd,smooth=F,diagonal=F)
dd$LDCrown<-log(dd$DCrown)
dd$LRP<-log(dd$RP)
dd$LPT<-log(dd$PT)
dd$LHT<-log(dd$HT)
dd$LA<-log(dd$A)
write("___________________________________________________________________","")
write("a)","")
summary(modAc<-lm(DCrown~I(PB/PT)+PT+HT+A,dd))
summary(modA<-lm(DCrown~RP+PT+HT+A,dd))
plot(predict(modA),resid(modA),pch=3)
abline(h=0,lty=2)
plot(modA,ask=F)
plot(rstudent(modA),pch=3)
abline(h=c(-3,-2,0,2,3),lty=2)
write("___________________________________________________________________","")
write("b)","")
summary(modBc<-lm(log(DCrown)~log(PB/PT)+log(PT)+log(HT)+log(A),dd))
summary(modB<-lm(LDCrown~LRP+LPT+LHT+LA,dd))
plot(predict(modB),resid(modB),pch=3)
abline(h=0,lty=2)
plot(modB,ask=F)
plot(rstudent(modB),pch=3)
abline(h=c(-3,-2,0,2,3),lty=2)
# Extra: modB no lineal nls
# start parametres estimats de modB (arrodonits)
summary(modBnl<-nls(DCrown~exp(b0+b1*LRP+b2*LPT+b3*LHT+b4*LA),start=list(b0=1.7,b1=0.3,b2=0.9,b3=0.2,b4=0.06),data=dd))
plot(predict(modBnl),resid(modBnl),pch=3)
abline(h=0,lty=2)
library(nlme)
plot(modBnl,abline=c(-3,-2,0,2,3))
write("___________________________________________________________________","")
write("a)+b) => c)","")
write("___________________________________________________________________","")
write("d)","")
dp0<-data.frame(PT=c(0.4,0.64),PB=c(0.6,0.9),HT=c(2.3,2.8),A=10)
dpb<-data.frame(LPT=log(c(0.4,0.64)),LRP=log(c(0.6,0.9)/c(0.4,0.64)),LHT=log(c(2.3,2.8)),LA=log(10))
exp(predict(modB,dpb,interval="prediction",level=0.95))
exp(predict(modBc,dp0,interval="prediction",level=0.95))
predict(modAc,dp0,interval="prediction",level=0.95)
|
9e61cee30113e827c60ef9bae6720b3d760cba00
|
8f16e7d61774f151e7c31aba5d16af8badcae3a0
|
/cachematrix.R
|
916a13002f0b5abfaaf67fb7174fef04b0f72e9b
|
[] |
no_license
|
JennyQu0605/ProgrammingAssignment2
|
dbe5b71209db5e2cade0a681ca64061ab3c6b23a
|
b1eae8ba12b1187dbbd369e8932da68e7f7697f7
|
refs/heads/master
| 2021-01-16T00:22:40.145756
| 2015-09-24T15:01:44
| 2015-09-24T15:01:44
| 43,007,486
| 0
| 0
| null | 2015-09-23T14:50:47
| 2015-09-23T14:50:45
| null |
UTF-8
|
R
| false
| false
| 1,053
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Creates a vector, which containig four functions to:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x<- y
inv <<-NuLL
}
get <- function()x
setinv <- function(inverse) inv <<- inverse
getinv <- function()inv
list(get = get ,set= set,getinv = getinv,setinv = setinv)
}
## This function solve the matrix if the inverse of the matrix does not exist
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if (!is.null(inv)){
message("getting cached data")
return (inv)
}
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
}
|
9b0da77fe64bcf71308cfbd2b7a48314943ef2c3
|
8af2bb658adb671bcfef6534cdef27eea9f6b564
|
/cancer_evolution/ce2/sim_ccube.r
|
13ddb269b6dc2c2e46ac9c9361a202d20c437b16
|
[] |
no_license
|
KrisJensen/mphil_compbio
|
ffd0282d2d2bb2e249ae4217d1acaa2821615c78
|
a6aa08d25bd66f5fe7972b8e5f053071e9b5f1c0
|
refs/heads/master
| 2021-06-28T21:41:34.557380
| 2020-10-07T15:08:49
| 2020-10-07T15:08:49
| 163,220,424
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,639
|
r
|
sim_ccube.r
|
require('ccube')
require('dplyr')
numSnv <- 500
ccfSet <- c(1, 0.4, 0.6) # true ccf pool
ccfTrue <- sample(ccfSet, numSnv, c(0.5,0.2,0.3), replace = T) # simulate true clusters
purity <- 0.9
cnPoolMaj <- c(1,2,3,4) # a pool of possible major copy numbers
cnPoolMin <- c(0,1,2) # a pool of possible minor copy numbers
cnPoolMajFractions <- c(0.30, 0.30, 0.2,0.2) # prevalence of possible major copy numbers
cnPoolMinFractions <- c(1/4, 1/2, 1/4) # prevalence of possible minor copy numbers
cnProfile = GenerateCopyNumberProfile(cnPoolMaj, cnPoolMin,
cnPoolMajFractions, cnPoolMinFractions, numSnv)
head(cnProfile) # column 1: minor copy number, column 2: major copy number, column 3: total copy number
baseDepth = 50
mydata <- data.frame(mutation_id = paste0("ss","_", seq_len(numSnv)) ,
ccf_true = ccfTrue,
minor_cn = cnProfile[,1],
major_cn = cnProfile[,2],
total_cn = cnProfile[,3],
purity = purity,
normal_cn = 2)
mydata <- dplyr::mutate(rowwise(mydata),
mult_true = sample(seq(1,if (major_cn ==1) { 1 } else {major_cn}), 1), # simulate multiplicity
vaf = cp2ap(ccf_true, purity, normal_cn, total_cn, total_cn, mult_true), # simulate vaf
total_counts = rpois(1, total_cn/2 * baseDepth), # simulate total read counts
var_counts = rbinom(1, total_counts, vaf), # simulate variant read counts
ref_counts = total_counts - var_counts)
head(mydata)
|
dc8c7dbfd019f2808cf735a1c6390b62124956d7
|
7cab203dda4ef3d8ddae82fac710ab591fa83289
|
/analisis_de_regresion/practica_final/TP_Final-Version1.R
|
acb4d8ddd20db6511d46e7ffa66ba9c71a01f879
|
[] |
no_license
|
jprocha84/Maestria_Estadistica_2019
|
c4b2c54dc99c81fd22701bd4ba535dfba1d38628
|
9721eb646812ba79c2d4cb7434dd5369a35deb7f
|
refs/heads/master
| 2020-05-24T10:00:45.001736
| 2019-06-17T22:07:35
| 2019-06-17T22:07:35
| 187,219,291
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 668
|
r
|
TP_Final-Version1.R
|
library("readxl")
tas_data <- read_excel("Datos_TAS-base1.1.xls")
tas_data$id <- as.factor(tas_data$id)
summary(tas_data)
colMeans(tas_data[,2:5])
attach(tas_data)
plot(tas~edad,main="TAS vs Edad",cex.main=0.8,ylab="TAS",xlab="Edad",
cex.lab=0.8,xlim=c(40,70),ylim=c(100,350), cex.axis=0.8,col="red",cex=0.75, pch=19)
plot(tas~peso,main="TAS vs Peso",cex.main=0.8,ylab="TAS",xlab="Peso",
cex.lab=0.8,xlim=c(50,110),ylim=c(100,350), cex.axis=0.8,col="red",cex=0.75, pch=19)
plot(tas~colesterol,main="TAS vs Colesterol",cex.main=0.8,ylab="TAS",xlab="Colesterol",
cex.lab=0.8,xlim=c(160,295),ylim=c(100,350), cex.axis=0.8,col="red",cex=0.75, pch=19)
|
94deae35136578377f1520d8881ddcadca41e0e5
|
ea805d721a3cdc2db7a75e38a9b212e4e1885778
|
/ribiosArg/R/parseFuncs.R
|
5ef852facd6e912d908051f239699283fbef886e
|
[] |
no_license
|
grst/ribios
|
28c02c1f89180f79f71f21a00ba8ad8c22be3251
|
430056c85f3365e1bcb5e565153a68489c1dc7b3
|
refs/heads/master
| 2023-06-01T04:48:20.792749
| 2017-04-10T14:28:23
| 2017-04-10T14:28:23
| 68,606,477
| 0
| 0
| null | 2016-09-19T13:04:00
| 2016-09-19T13:04:00
| null |
UTF-8
|
R
| false
| false
| 4,046
|
r
|
parseFuncs.R
|
parseNumVec <- function(str, expLen=2, failVal=c(5,5), sep=",") {
## for cases like 2, 2 (note tht extra blank after the comma)
if(is.null(str))
return(failVal)
str <- paste(str, collapse=sep)
## remove quotings if any
str <- gsub("\"", "", str)
if(length(str)==1) {
str <- strsplit(str, sep)[[1]]
}
str <- str[str!=""]
isNum <- suppressWarnings(all(!is.na(as.numeric(str))))
if(!is.null(expLen)) {
isNum <- isNum && length(str) == expLen
}
if(isNum) {
return(as.numeric(str))
} else {
return(failVal)
}
}
parsePairs <- function(str, collapse=",", sep="=",
colnames=c("key", "value"),
trim=TRUE,...) {
if(is.null(str)) return(NULL)
strv <- parseStrings(str, collapse=collapse, trim=trim, ...)
strl <- strsplit(strv, sep)
res <- data.frame(key=I(sapply(strl, "[", 1L)),
value=I(sapply(strl, "[", 2L)))
colnames(res) <- colnames
return(res)
}
parseStrings <- function(str, collapse=",", trim=TRUE, ...) {
if(is.null(str)) return(NULL)
res <- strsplit(str, collapse)[[1]]
if(trim)
res <- sapply(res, trim, ...)
return(res)
}
## makeFactor and parseFactor
makeFactor <- function(groups, levels=NULL, make.names=TRUE, verbose=FALSE) {
if(missing(levels) || is.null(levels)) {
if(is.factor(groups)) {
levels <- levels(groups)
} else {
levels <- levels(factor(groups))
}
}
if(!all(groups %in% levels)) {
missing.groups <- setdiff(groups, levels)
stop("Following groups were not in levels:", paste(missing.groups, collapse=","),"\n")
}
groups <- factor(groups, levels=levels)
if(make.names) {
groups.back <- groups
levels(groups) <- make.unique(make.names(levels(groups)))
if(!identical(levels(groups.back), levels(groups))) {
isChanged <- levels(groups.back)!=levels(groups)
if(verbose) {
msg <- sprintf("%s->%s",
levels(groups.back)[isChanged],
levels(groups)[isChanged])
warning("The following group names has been changed:\n",
paste(msg, collapse="\n"))
}
}
}
return(groups)
}
parseFactor <- function(rgroups, rlevels=NULL, make.names=TRUE, collapse=",") { ## CL=command line
if(is.null(rgroups))
stop("raw string of groups cannot be NULL")
groups <- unname(parseStrings(rgroups, collapse=collapse))
if(!missing(rlevels) && !is.null(rlevels)) {
grouplevels <- parseStrings(rlevels, collapse=collapse)
} else {
grouplevels <- NULL
}
makeFactor(groups, grouplevels, make.names=make.names)
}
## parse files from command line option, which can be (1) a string vector of files, (2) a file listing input files (e.g. pointer file), (3) a directory, or (4) a zip/tar/gz file (determined by suffix). In the later two cases, file patterns can be specified
## in case of compressed files, a temp dir will be created: the user should take care of cleaning up!
isDir <- function(str) file.info(str)$isdir
## TODO: parseFiles is not checked yet!
parseFiles <- function(str, sep=",", pattern=NULL, recursive=TRUE, ignore.case=TRUE) {
if(file.exists(str)) { ## a compressed file or a directory
if(isDir(str)[1]) { ## directory
selfiles <- dir(str, pattern=pattern, full.names=TRUE,
recursive=recursive, ignore.case=ignore.case)
} else {
inext <- extname(str, lower.case=TRUE)
if(!is.na(inext) & inext %in% c("zip", "tar", "gz")) { ## compressed file
indir <- tempdir()
if(inext=="zip") {
unzip(zipfile=str, exdir=indir)
} else { ## assume that the file is a tar.* file
untar(tarfile=str, exdir=indir)
}
selfiles <- dir(indir, pattern=pattern, full.names=TRUE,
recursive=recursive, ignore.case=ignore.case)
} else { ## list file
selfiles <- readLines(str)
}
}
} else { ## file names concatenated by commas(,)
selfiles <- parseStrings(str)
}
return(selfiles)
}
|
52bd1d9eee6cb0b0c24cbeffdb8727fc9c04f7be
|
c6ea82baf66f2c14a2a11a3b738cb47fe5d632ac
|
/plot4.R
|
4e10a09ca97b15a2755d0dd3fd08e5ed8ba653fd
|
[] |
no_license
|
Sapbasu15/Exploratory-Data-Analysis
|
a446b63479734c24df47eaa37ec0aac43446d815
|
23a5768bb5c200ec669ec6e1f5d6aaa7133536a6
|
refs/heads/master
| 2021-01-17T10:18:50.150917
| 2015-02-09T12:44:42
| 2015-02-09T12:44:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,009
|
r
|
plot4.R
|
require(sqldf)
file <- c("household_power_consumption.txt")
data <- read.csv.sql(file, header = T, sep=";", sql = "select * from file where (Date == '1/2/2007' OR Date == '2/2/2007')" )
data <- na.omit(data)
dtm <- as.POSIXct(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
png('plot4.png',width = 480, height = 480, units = "px",bg='transparent')
par(mfrow = c(2,2))
plot(dtm,data$Global_active_power,type='l',col = 'black',xlab='',ylab='Global Active Power')
plot(dtm,data$Voltage,type='l',col = 'black',xlab='datetime',ylab='Voltage')
plot(dtm,data$Sub_metering_1,type='l',col = 'black',xlab='',ylab='Energy Sub Metering')
lines(dtm,data$Sub_metering_2,type='l',col = 'red')
lines(dtm,data$Sub_metering_3,type='l',col = 'blue')
legend("topright",lty=1,lwd = 3, cex = 0.9, bty = 'n', col=c('black','red','blue'), legend = c('Sub_metering_1','Sub_metering_2','Sub_metering_3'))
plot(dtm,data$Global_reactive_power,type='l',col = 'black',xlab='datetime',ylab='Global Reactive Power')
dev.off()
|
0e901f1a76e264b24bb4013581bfababec5bcd5a
|
d1670ed594fe1ca0d88368a722d721d50f4643ba
|
/app_server.R
|
1be4e208e7fb6f3fa529fd634fd6bcf80b62bde5
|
[
"MIT"
] |
permissive
|
jwc225/airbnb-singapore-visualization
|
3d209915e7d0b380a5380ead5e49ba70d791e0e9
|
911199b323cf7d9c0d01ec98514d9f5ee4bdbaa3
|
refs/heads/main
| 2023-08-26T21:43:44.744937
| 2021-10-24T08:37:52
| 2021-10-24T08:37:52
| 320,154,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,711
|
r
|
app_server.R
|
# Load packages
library("shiny")
library("tidyverse")
library("leaflet")
library("RColorBrewer")
library("batman")
# Read data (set wdir to root)
data_sing <- read.csv("data/singapore_listings.csv")
# Define a server for the application
server <- function(input, output, session) {
##### Interactive Page One ##################################################
# Change max price range when button clicked
observeEvent(input$button, {
max <- input$textbox
updateSliderInput(session, "price_slider", max = max)
updateTextInput(session, "textbox", value = "") # clear input after click
})
# Construct a color palette (scale) based on the `room-type` column
palette_fn <- colorFactor(palette = "Dark2", domain = data_sing$room_type)
# Replace price column with a vector of numbers
data_sing$price <- as.numeric(gsub("[$,]", "", data_sing$price))
# Replace superhost column with boolean values
data_sing$host_is_superhost <- to_logical(data_sing$host_is_superhost)
# Render leaflet map
output$m_sing <- renderLeaflet({
# Set listings with no reviews to 0 (assume default stars is zero)
data_sing$review_scores_rating <- ifelse(data_sing$number_of_reviews == 0,
0,
data_sing$review_scores_rating)
# Dynamic user filtering
plot_data <- data_sing %>%
filter(review_scores_rating >= input$score_slider[1] &
review_scores_rating <= input$score_slider[2]) %>%
filter(if (input$has_reviews == TRUE) number_of_reviews > 0
else id == id) %>%
filter(price >= input$price_slider[1] &
price <= input$price_slider[2]) %>%
filter(accommodates >= input$accom_slider) %>%
filter(if (input$is_superhost == TRUE) host_is_superhost == TRUE
else id == id) %>%
filter(if (input$select == "All") id == id
else neighbourhood_cleansed == input$select)
# Get the count of filtered listings
filter_count <- nrow(plot_data)
# Get map pop-up content for listing rating
popup_rating <- ifelse(plot_data$number_of_reviews > 0,
paste0("<b style='color:#FF5A5F;'>★ ",
plot_data$review_scores_rating,
"</b> (",
plot_data$number_of_reviews, ")"),
"No Reviews")
# Get map pop-up content for host status
popup_superhost <- ifelse(plot_data$host_is_superhost == T,
paste0(" · <b style='color:#FF5A5F;'>
🎖</b> Superhost"),
"")
# Get map pop-up content for guest capacity
popup_guests <- ifelse(plot_data$accommodates > 1,
paste0(plot_data$accommodates, " guests"),
paste0(plot_data$accommodates, " guest")
)
# Compile all content for map pop-up
popup_content <- paste0(sep = "<br/>",
paste0("<h5><span style='color:#767676;'>",
popup_rating, popup_superhost,
" · <u>", plot_data$neighbourhood_cleansed,
", Singapore</u></span></h5><hr>"),
paste0("<center><h4><b>$", plot_data$price,
"</b> / night</h4></center>"),
paste0("<center><h6>", popup_guests, "</h6></center>"),
paste0("<center><h5><b><a href=", plot_data$listing_url,
">", plot_data$name, "</a></b></h5></center>"),
paste0("<center><img src=", plot_data$picture_url,
" width=300 height=180></center>")
)
# Create Leaflet map of user-filtered Singapore listings
leaflet(data = plot_data) %>%
addTiles(
urlTemplate = paste0("https://tile.jawg.io/ba3f805c-04fb-4fa7-99ef-b9",
"05aa38b3c8/{z}/{x}/{y}.png?access-token=eIlOZCXWfZIR2t5pqcGt6vcc25pb",
"scLwwCKzFgtOjISymDP6p3nvlwwLl4mA0qeH"),
) %>%
setView(lng = 103.841959, lat = 1.3521, zoom = 11.5) %>%
addCircles(
lat = ~latitude,
lng = ~longitude,
stroke = FALSE,
label = ~paste0("$", price),
labelOptions = labelOptions(textsize = "20px"),
popup = ~popup_content,
color = ~palette_fn(room_type),
radius = 20,
fillOpacity = 0.5
) %>%
addLegend(
position = "bottomright",
title = paste0(
"Room Type (", filter_count, " results)"),
pal = palette_fn,
values = ~room_type,
opacity = 1
)
})
}
|
277fcde5cb471a516d0776d0671ff3ebf7354656
|
f876a682c9873b115b720c25f886dfc6e9ca4f79
|
/Supervised_unsupervised_classification.R
|
e575dfcc6bd58551c4b036334c2a4a1d35fea13f
|
[] |
no_license
|
priyanka9991/Machine-learning-Deep-learning-and-Reinforcement-Learning
|
894ae21bf9acdbf1c93d27a509f2f5ba1be09c7a
|
ddb1a11aa60ee847f4645b76ff34d254d95b335c
|
refs/heads/main
| 2023-08-11T17:58:21.068314
| 2021-10-01T04:58:06
| 2021-10-01T04:58:06
| 381,575,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,709
|
r
|
Supervised_unsupervised_classification.R
|
###### PRIYANKA VASANTHAKUMARI #####
## Supervised and Unsupervised Classification ##
###### Part 1 - Supervised learning
library(magrittr)
library(tidyverse)
library(caret)
library(MASS)
library(boot)
library(klaR)
load("/Users/priyanka/Documents/Course works/Data Mining /Final Project/class_data.RData")
data <- data.frame(x,y)
##### No feature selection
#Logistic regression
glm.fit=glm(y~.,data=data,family=binomial,maxit=100)
summary(glm.fit)
#Cross validation with LR
set.seed(5)
glmcv<-cv.glm(data = data, glm.fit,K=10)
glmcv$delta #CV error
##A vector of length two. The first component is the raw cross-validation estimate of prediction error.
#The second component is the adjusted cross-validation estimate.
#The adjustment is designed to compensate for the bias introduced by not using leave-one-out cross-validation.
1-glmcv$delta # Accuracy
train_control <- trainControl(method="cv", number=10)
set.seed(5)
model <- caret::train(as.factor(y)~., data=data, trControl=train_control, method="glm")
print(model)
#LDA
require(MASS)
train_control <- trainControl(method="cv", number=10)
set.seed(5)
model <- caret::train(as.factor(y)~., data=data, trControl=train_control, method="lda")
print(model)
#SVM
library(e1071)
data$y <- as.factor(data$y)
svmfit=svm(data$y~.,data=data, kernel="linear", cost=1)
set.seed(5) #Linear
model <- caret::train(as.factor(y)~., data=data, trControl=train_control, method="svmLinear")
print(model)
set.seed(5) # Radial
model <- caret::train(as.factor(y)~., data=data, trControl=train_control, method="svmRadial")
print(model)
## 3 KNN & Cross-validation
source("/Users/priyanka/Documents/Course works/Data Mining /my.cv.knn.R")
knn_x <- x # Feature set
knn_y <- data$y # Labels
k1 <- c( 2, 5, 10, 20, 50, 100, 150, 200, 300) # KNN tuning parameter k
nk=length(k1)
class_error=rep(0,nk) #Misclassification error
# Crossvalidation across all values of k (tuning parameters)
for (i in 1:nk){
k2=as.integer(k1[i])
class_error[i]<- my.cv.knn(k2,knn_x,knn_y,10) # 10-fold cross-validation
}
# Scatter plot
plot(k1,class_error,xlab="k", ylab="Misclassification error")
# Line plot
lines(k1,class_error,xlab="k", ylab="Misclassification error")
## 4 Tuning k - Choosing k correponding to minimum Misclassification error
k_opt = k1[which.min(class_error)]
# Optimum value of k
k_opt
1-min(class_error) # CV Accuracy
#### Random forests on whole data and feature selection with importance
library(tree)
library(randomForest)
library(gbm)
set.seed(5)
data.rf <- randomForest(as.factor(y) ~ ., data=data, ntree=1000,
keep.forest=FALSE, importance=TRUE)
varImpPlot(data.rf,main="Importance of variables") # Importance plot
imp <- data.rf$importance
imp_sort <- imp[order(-imp[,2]),] # Sort variables in descending order of importance
flag = rep(0, 500)
# RF error on all the features
train=sample(1:nrow(data),200)
oob.err=double(500)
test.err=double(500)
for(mtry in 1:500){
fit=randomForest(y~.,data=data,subset=train,mtry=mtry,ntree=400)
oob.err[mtry]=fit$mse[400]#Mean squared error for 400 trees
pred=predict(fit,data[-train,])
test.err[mtry]=mean((data[-train,]$y-pred)^2)
}
# 81.44% accuracy in random forests
matplot(1:500,cbind(test.err,oob.err),pch=19,col=c("red","blue"),type="b",ylab="Mean Squared Error")
legend("topright",legend=c("Test", "OOB"),pch=19,col=c("red","blue"))
min(oob.err)
min(test.err)
### Feature Selection - Using RF variable importance & select the percentage of important variables
for(i in 1:500){
flag[i] <- sum(imp_sort[1:i,2])<0.9*sum(imp_sort[,2]) # 0.7 corresponds to 70% of variables
}
imp_var <- rownames(imp_sort[1:sum(flag),])
sel_feature<-data[,imp_var]
sel_feature <- sapply(sel_feature, function(p) as.numeric(unlist(p)))
newdatarf <- data.frame(sel_feature,y)# data frame of selected features from Variable importance
#Logistic regression after RF imp selection
glm.fit=glm(y~.,data=newdatarf,family=binomial,maxit=100)
summary(glm.fit)
#10 fold Cross validation with LR
set.seed(5)
glmcv<-cv.glm(data = newdatarf, glm.fit,K=10)
glmcv$delta #CV error
##A vector of length two. The first component is the raw cross-validation estimate of prediction error.
#The second component is the adjusted cross-validation estimate.
#The adjustment is designed to compensate for the bias introduced by not using leave-one-out cross-validation.
1-glmcv$delta # CV Accuracy
#LDA after RF imp selection
require(MASS)
lda.fit=lda(y~.,data=newdatarf)
plot(lda.fit)
# Crossvalidation
set.seed(5)
train_control <- trainControl(method="cv", number=10)
model <- caret::train(as.factor(y)~., data=newdatarf, trControl=train_control, method="lda")
# summarize results
print(model) #CV Accuracy
##SVM after RF imp selection
library(e1071)
svmfit=svm(as.factor(y)~.,data=newdatarf, kernel="linear", cost=1)
set.seed(5) # Linear
model <- caret::train(as.factor(y)~., data=newdatarf, trControl=train_control, method="svmLinear")
print(model)
set.seed(5) # Radial
train_control <- trainControl(method="cv", number=10)
model <- caret::train(as.factor(y)~., data=newdatarf, trControl=train_control, method="svmRadial")
# summarize results
print(model)
#QDA after RF var sel
require(MASS)
qda.fit=qda(as.factor(y)~.,data=newdatarf)
set.seed(5)
train_control <- trainControl(method="cv", number=10)
model <- caret::train(as.factor(y)~., data=newdatarf, trControl=train_control, method="qda")
print(model)
###### LASSO
library(glmnet)
p=model.matrix((y~.),data)[,-1] # take out the first column which are all 1's for intercept
y=data$y
dim(p)
set.seed(5)
glmmod <- glmnet(p, y=as.factor(y), alpha=1, family="binomial")
summary(glmmod)
# Plot variable coefficients vs. shrinkage parameter lambda.
plot(glmmod, xvar="lambda",label=TRUE)
cv.lasso=cv.glmnet(p,y,alpha = 1, family = "binomial")
summary(cv.lasso)
plot(cv.lasso)
lasso.best.lambda=cv.lasso$lambda.min # best lambda value corresponding to min cv.error
cv.lasso$lambda.1se # lambda corresponding to 2nd dashed line - 1 standard error
lasso.best.lambda
# It is to be noted that the coefficients of some of the predictors are zero
predict(glmmod, s=lasso.best.lambda, type="coefficients")
model <- glmnet(p, y, alpha = 1, family = "binomial",
lambda = cv.lasso$lambda.min)
summary(model)
# Select non zero coefficients after Lasso
tmp_coef <- nonzeroCoef(model$beta, bystep = FALSE)
selected_var <- p[,tmp_coef] # Contains only the non-zero coefficients
newdata <- data.frame(selected_var,y) # New dataframe containng the selected variables after LASSO
#Logistic regression after LASSO
glm.fit=glm(as.factor(y)~.,data=newdata,family=binomial)
summary(glm.fit)
#Cross validation with LR
set.seed(5)
glmcv<-cv.glm(data = newdata, glm.fit,K=5)
glmcv$delta #CV error
1-glmcv$delta # CV Accuracy
#LDA after LASSO
require(MASS)
lda.fit=lda(as.factor(y)~.,data=newdata)
plot(lda.fit)
set.seed(5)
train_control <- trainControl(method="cv", number=10)
model <- caret::train(as.factor(y)~., data=newdata, trControl=train_control, method="lda")
print(model)
#QDA after LASSO
require(MASS)
qda.fit=qda(as.factor(y)~.,data=newdata)
set.seed(5)
train_control <- trainControl(method="cv", number=10)
model <- caret::train(as.factor(y)~., data=newdata, trControl=train_control, method="qda")
print(model)
#SVM after LASSO
library(e1071)
svmfit=svm(newdata$y~.,data=newdata, kernel="linear", cost=1)
set.seed(5)
model <- caret::train(as.factor(y)~., data=newdata, trControl=train_control, method="svmLinear")
print(model)#CV Accuracy linear
set.seed(5)
model <- caret::train(as.factor(y)~., data=newdata, trControl=train_control, method="svmRadial")
print(model) #CV Accuracy radial
## Feature selection 3
library (FSelector)
trainTask <- makeClassifTask(data = data,target = "y",positive = "1")
trainTask
trainTask <- normalizeFeatures(trainTask,method = "standardize")
#Sequential Forward Search - SVM Radial
library (mlr)
library(dplyr)
ctrl = makeFeatSelControlSequential(method = "sfs", alpha = 0.02)
rdesc = makeResampleDesc("CV", iters = 10)
sfeats = selectFeatures(learner = "classif.svm", task = trainTask, resampling = rdesc, control = ctrl,
show.info = FALSE) # default is svm radial
sel_var_sfs <- data %>% select(one_of(sfeats$x))
set.seed(5)
model <- caret::train(data.frame(sel_var_sfs), as.factor(y), trControl=train_control, method="svmRadial")
print(model) #86.52 %
#Sequential Forward Method-knn
ctrl = makeFeatSelControlSequential(method = "sfs", alpha = 0.02)
rdesc = makeResampleDesc("CV", iters = 10)
sfeats_knn = selectFeatures(learner = "classif.knn", task = trainTask, resampling = rdesc, control = ctrl,
show.info = FALSE)
sel_var_sfs_knn <- data %>% select(one_of(sfeats_knn$x))
set.seed(5)
model <- caret::train(data.frame(sel_var_sfs_knn), as.factor(y), trControl=train_control, method="knn")
print(model)
#Sequential Forward Floating Search - SVM Radial
ctrl = makeFeatSelControlSequential(method = "sffs", alpha = 0.02)
rdesc = makeResampleDesc("CV", iters = 10)
sfeats_sffs = selectFeatures(learner = "classif.svm", task = trainTask, resampling = rdesc, control = ctrl,
show.info = FALSE)
sel_var_sff <- data %>% select(one_of(sfeats_sffs$x))
set.seed(5)
model <- caret::train(data.frame(sel_var_sff), as.factor(y), trControl=train_control, method="svmRadial")
print(model) # 86.77
#Sequential Floating Forward Search - LDA/QDA - Only 66.78 %
#Sequential Floating Forward Method - KNN
ctrl = makeFeatSelControlSequential(method = "sffs", alpha = 0.02)
rdesc = makeResampleDesc("CV", iters = 10)
sfeats_sffs_knn = selectFeatures(learner = "classif.knn", task = trainTask, resampling = rdesc, control = ctrl,
show.info = FALSE)
sel_var_sff_knn <- data %>% select(one_of(sfeats_sffs_knn$x))
set.seed(5)
model <- caret::train(data.frame(sel_var_sff_knn), as.factor(y), trControl=train_control, method="knn")
print(model)
test_err <- 1 - max(model$results$Accuracy) # Testing error estimate
# Generating y_new
data_final <- data.frame(sel_var_sff,y)
svm_final = svm(as.factor(y)~.,data=data_final, kernel="radial", cost=1)
ynew=predict(svm_final, xnew)
ynew
save(ynew,test_err,file="Sup_results.RData")
#################################################################
##################################################################
##### PART 2- UNSUPERVISED LEARNING ##########
load("/Users/priyanka/Documents/Course works/Data Mining /Final Project/cluster_data.RData")
dim(y)
# Heirarchial Clustering - to visualise the dendrogram
library(mclust)
hc.complete=hclust(dist(y),method="complete")
plot(hc.complete)
## FEATURE SELECTION ##
#tSNE feature selection
library(Rtsne)
set.seed(1)
tsne <- Rtsne(scale(y), dims = 2, perplexity=30, verbose=TRUE, max_iter = 1000)
plot(tsne$Y[,1],tsne$Y[,2]) # Selected features
#K-means with tsne variables with 5 clusters - Better visualization
tsne_x<-as.matrix(tsne$Y)
set.seed(5)
km.out=kmeans(tsne_x,5,nstart=15)
km.out$cluster
plot(tsne_x,col=km.out$cluster,cex=2,pch=1,lwd=2,xlab='t-SNE feature 1',ylab='t-SNE feature 2', main='k means clustering on t-SNE features')
#Isomap feature selection
library(vegan)
dis <- vegdist(y) # generating dissimiliarities
set.seed(5)
simData_dim2_IM = isomap(dis, dims=10, k=3)
dim(simData_dim2_IM$points ) # Selected features
## Sammon mapping feature selection
library(Rdimtools)
set.seed(5)
sam <- do.sammon(y, ndim = 5, preprocess = c("null", "center", "scale",
"cscale", "decorrelate", "whiten"), initialize = c("random", "pca"))
sam$Y # Selected features
#PCA
set.seed(5)
y.pca <- prcomp(y, center = TRUE,scale. = TRUE)
summary(y.pca)
library(devtools)
library(ggbiplot)
biplot(y.pca,scale =0)
std_dev <- y.pca$sde
#compute variance
pr_var <- std_dev^2
prop_varex <- pr_var/sum(pr_var)
#scree plot
plot(prop_varex, xlab = "Principal Component",
ylab = "Proportion of Variance Explained",
type = "b")
#cumulative scree plot
plot(cumsum(prop_varex), xlab = "Principal Component",
ylab = "Cumulative Proportion of Variance Explained",
type = "b")
str(y.pca)
y.pca$x # Principle components to be selected
## CLUSTER SELECTION METHODS ##
# Elbow method - inbuilt package
library(factoextra)
set.seed(5)
fviz_nbclust(y.pca$x[,1:10], kmeans, method = "wss") + # Change the feature set depending on the method PCA/tsne/sammon
geom_vline(xintercept = 5, linetype = 2)+
labs(subtitle = "Elbow method - PCA - 10 componets")
#Elbow Method for finding the optimal number of clusters
# Within sum of squares (WSS) is the measure
set.seed(123)
# Compute and plot wss for k = 2 to k = 15.
k.max <- 15
data_elbow <- as.matrix(tsne$Y )
wss <- sapply(1:k.max,
function(k){kmeans(data_elbow, k, nstart=50,iter.max = 15 )$tot.withinss})
wss
plot(1:k.max, wss,
type="b", pch = 19, frame = FALSE,
xlab="Number of clusters K",
ylab="Total within-clusters sum of squares")
#Kmeans BIC AIC
kmeansAIC = function(fit){
m = ncol(fit$centers)
n = length(fit$cluster)
k = nrow(fit$centers)
D = fit$tot.withinss
return(data.frame(AIC = D + 2*m*k,
BIC = D + log(n)*m*k))
}
K_val = c(2,3,4,5,6,7)
AIC <- rep(0, length(K_val))
BIC <- rep(0, length(K_val))
set.seed(1)
for (j in 1:length(K_val)){
fit <- kmeans(x = y.pca$x[,1:50] ,centers = K_val[j])
AIC_BIC<-kmeansAIC(fit)
AIC[j]<-AIC_BIC$AIC
BIC[j]<-AIC_BIC$BIC
}
plot(K_val, BIC,
type="b", pch = 19, frame = FALSE,
xlab="Number of clusters K",
ylab="BIC",main="PCA 50 components")
abline(v=c(4,5),col=c("blue","red"))
|
586d5d315c6ee68e9555867e7ebb89773ce0cf60
|
04c5806551ee9fa11c02ab90b2e4062f8330606f
|
/cachematrix.R
|
6985e0a2d8d45d86630997d81b97835247ac7846
|
[] |
no_license
|
OrenMorav/ProgrammingAssignment2
|
0a29e2100dfe70d8826ea3a34a7894a8e458a1ed
|
1e9156a455a0cef02666fa5c221d622bf87c3d03
|
refs/heads/master
| 2021-01-14T08:50:49.665959
| 2015-06-20T20:22:26
| 2015-06-20T20:22:26
| 37,367,910
| 0
| 0
| null | 2015-06-13T10:47:18
| 2015-06-13T10:47:16
| null |
UTF-8
|
R
| false
| false
| 1,266
|
r
|
cachematrix.R
|
## Implementation of a Cached matrix
## The implementation allows saving the inversion result to object so that upon repeated calls,
## if the data hadn't changed, the value from cache will be returned, saving repeat computations.
## Create a cache matrix object
## input: an inversible matrix
## output: a list of functions for getting and setting the matrix and getting and setting the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(invert) inv <<- invert
getInv <- function() inv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## Inverse and cache a matrix
## input: an inversible matrix created using makeCacheMatrix
## output: the inverse of the input matrix
## notes: 1. The input matrix must be inversible. 2. the result is cached for efficiency.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInv(inv)
inv
}
|
ba757fc23a5f27359b8a7de00cb97008b9888b3e
|
3fc0f5ceb2d4cac7611f75278db5620047d5ef5f
|
/03-summarize-tables-mssql.R
|
85f047d2b0332938d20696785a07f6127946140a
|
[] |
no_license
|
dbmi-pitt/docker-proteus
|
a511f38ab34995b36bd4091bcec7e8ca9c70238d
|
691fc1946620dcfc9edc655d76f1edca5f8d9c49
|
refs/heads/master
| 2020-12-12T14:13:51.911117
| 2020-04-08T18:00:06
| 2020-04-08T18:00:06
| 234,147,200
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,210
|
r
|
03-summarize-tables-mssql.R
|
# Load libraries
library(tidyverse)
library(magrittr)
library(dbplyr)
# Source in config and function objects
source('/app/01-functions.R')
# Establish connection to db
conn <- DBI::dbConnect(odbc::odbc(),
Driver = "ODBC Driver 17 for SQL Server",
Server = Sys.getenv("server"),
uid = Sys.getenv("user"),
pwd = Sys.getenv("pass"),
database = Sys.getenv("db"))
cdm_schema <- Sys.getenv("cdm_schema")
cdm_version <- Sys.getenv("cdm_version")
# Declare list of tables to characterize
table_list <- c("CONDITION", "DEATH", "DEATH_CAUSE", "DEMOGRAPHIC", "DIAGNOSIS",
"DISPENSING", "ENCOUNTER", "ENROLLMENT", "LAB_RESULT_CM", "MED_ADMIN", "OBS_CLIN",
"OBS_GEN", "PCORNET_TRIAL", "PRESCRIBING", "PROCEDURES", "PRO_CM",
"PROVIDER", "VITAL", "IMMUNIZATION")
# Create directory structure to store reports
dir.create('/app/summaries/CSV', recursive = TRUE)
dir.create('/app/summaries/HTML')
# Loop through list of tables and run data characterization
for (i in table_list) {
generate_summary(conn, backend = "mssql", version = version, schema = NULL, table = i)
}
|
a1826e6689002eadb12bf501e7e72bf168570a73
|
d5e9b2f409d41d39dcd662545bca5db5422a3466
|
/hdimDB/man/checkDb.Rd
|
998097a145311a9cf3c1eefe4950c7cecb656e8a
|
[] |
no_license
|
hawaiiDimensions/db
|
09d4c6bd37e065d00e80f0be8e7a78267cbb1ad5
|
0edc09936c69bfd240880cde1ea2b779a6fc7d1f
|
refs/heads/master
| 2021-04-09T17:18:34.491690
| 2017-07-05T19:16:51
| 2017-07-05T19:16:51
| 51,867,693
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 689
|
rd
|
checkDb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkDb.R
\name{checkDb}
\alias{checkDb}
\title{Checks Dimensions Database for errors}
\usage{
checkDb(db, match = "index")
}
\arguments{
\item{db}{The database to be checked}
\item{match}{The autocorrection method to be used with misspelled entries}
}
\value{
Dataframe with HDIM identifier, error type, verbatim entry, and suggested correction.
}
\description{
\code{checkDb} processes the online database and returns a dataframe of errors and suggested corrections
}
\details{
Developed specifically for the Dimensions in Biodiversity Evolab Database.
}
\author{
Edward Greg Huang <edwardgh@berkeley.edu>
}
|
a05d4c7886a6ed5d94954fc009ef94c60dd259ed
|
db626fd1391e1d349d022d54606bf635e858d5c1
|
/Summary/var_imp_graph.R
|
bd535488938ab6b668e55a7a86a63213ea087cff
|
[] |
no_license
|
som-shahlab/HTE_SL_Prediction
|
4aa0db55fa398f023ae9eef0532352ccb5abd8aa
|
600f6d9be5417c536d8e966f6e72da42634eaea9
|
refs/heads/master
| 2023-08-30T02:11:45.697716
| 2021-11-14T00:40:41
| 2021-11-14T00:40:41
| 408,545,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,572
|
r
|
var_imp_graph.R
|
setwd("C:/Users/cryst/Documents/Stanford Postdoc/NHLBI R01 Aim 2/Analyses Stanford Team/Analysis Results/Tuned Results")
bart_imp_vars <- read.csv("IV_imp_bart.csv")
deepsurv_imp_vars <- read.csv("IV_imp_deepsurv.csv")
gbm_imp_vars <- read.csv("IV_imp_gbm.csv")
sf_imp_vars <- read.csv("IV_imp_sf.csv")
# CVD
vars <- bart_imp_vars$cvd_vars[1:15]
bart_imp_vars_sub <- bart_imp_vars[bart_imp_vars$cvd_vars %in% vars,1:2]
deepsurv_imp_vars_sub <- deepsurv_imp_vars[deepsurv_imp_vars$cvd_vars %in% vars,1:2]
gbm_imp_vars_sub <- gbm_imp_vars[gbm_imp_vars$cvd_vars %in% vars,1:2]
sf_imp_vars_sub <- sf_imp_vars[sf_imp_vars$cvd_vars %in% vars,1:2]
cvd_data <- rbind(bart_imp_vars_sub, sf_imp_vars_sub, gbm_imp_vars_sub, deepsurv_imp_vars_sub)
cvd_data$Methods <- c(rep("BART",15),rep("SF",15),rep("GBM",15),rep("Deepsurv",15))
cvd_data$cvd_vars <- as.character(cvd_data$cvd_vars)
cvd_data$cvd_vars <- factor(cvd_data$cvd_vars, levels = cvd_data$cvd_vars[1:15])
p1 <- ggplot(cvd_data, aes(x = cvd_vars, y = cvd_imp, group = Methods, fill = Methods))+
#theme_classic()+
scale_fill_manual(values=c("bisque4", "darkgoldenrod1","cadetblue3","darkgoldenrod4"))+
geom_bar(stat = "identity", width = 0.5, position = "dodge")+
theme_bw()+
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
theme(axis.text.x = element_text(angle = 90))+
theme(axis.title.x=element_text(size=11, vjust=2)) +
theme(axis.title.y=element_text(size=11, angle=90,vjust=3)) +
theme(plot.title=element_text(size=15, vjust=3, hjust=0.5))+
#scale_x_discrete(labels= xlabels)+
xlab("Variables")+ylab("Variable Importance for CVD Outcome");print(p1)
# SAE
vars <- bart_imp_vars$sae_vars[1:15]
bart_imp_vars_sub <- bart_imp_vars[bart_imp_vars$sae_vars %in% vars,3:4]
deepsurv_imp_vars_sub <- deepsurv_imp_vars[deepsurv_imp_vars$sae_vars %in% vars,3:4]
gbm_imp_vars_sub <- gbm_imp_vars[gbm_imp_vars$sae_vars %in% vars,3:4]
sf_imp_vars_sub <- sf_imp_vars[sf_imp_vars$sae_vars %in% vars,3:4]
sae_data <- rbind(bart_imp_vars_sub, sf_imp_vars_sub, gbm_imp_vars_sub, deepsurv_imp_vars_sub)
sae_data$Methods <- c(rep("BART",15),rep("SF",15),rep("GBM",15),rep("Deepsurv",15))
sae_data$sae_vars <- as.character(sae_data$sae_vars)
sae_data$sae_vars <- factor(sae_data$sae_vars, levels = sae_data$sae_vars[1:15])
p2 <- ggplot(sae_data, aes(x = sae_vars, y = sae_imp, group = Methods, fill = Methods))+
#theme_classic()+
scale_fill_manual(values=c("bisque4", "darkgoldenrod1","cadetblue3","darkgoldenrod4"))+
geom_bar(stat = "identity", width = 0.5, position = "dodge")+
theme_bw()+
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
theme(axis.text.x = element_text(angle = 90))+
theme(axis.title.x=element_text(size=11, vjust=2)) +
theme(axis.title.y=element_text(size=11, angle=90,vjust=3)) +
theme(plot.title=element_text(size=15, vjust=3, hjust=0.5))+
theme(legend.position="bottom")+
#scale_x_discrete(labels= xlabels)+
xlab("Variables")+ylab("Variable Importance for Severe Adverse Events");print(p2)
filename <- paste0("./vars_imp.png")
png(filename, width = 8, height = 10, units = 'in', res = 300)
print(grid.arrange(arrangeGrob(p1 + theme(legend.position="none"), p2, nrow=2, ncol=1), nrow=2, heights=c(10,1)))
dev.off()
|
0179160b580060c3f83e73f0eaaa4f2bed7b0674
|
1169fafd807a7597a4548e75ccdce163cd01f060
|
/code/emas.r
|
28bcfb32529fc9c258a4ed6841de259d616b7b89
|
[] |
no_license
|
ransnit/FinProj
|
8bda11f9e4cb58e7e0ff88728ee9ef7e0bd4a4db
|
01413b601fbe844874a8b01ab86e4e027c1a9bf2
|
refs/heads/master
| 2021-01-15T10:42:57.824523
| 2014-12-29T09:09:34
| 2014-12-29T09:09:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 691
|
r
|
emas.r
|
source("config.r")
init.ema <- function(l, values)
{
alpha <- 2 / (l+1)
result <- rep(NA, length(values))
result[1] <- values[1]
for (j in 2:length(values))
result[j] <- (1-alpha)*result[j-1] + alpha*values[j]
return (result)
}
init.emstd <- function(l, values, means)
{
stopifnot(length(values) == length(means))
alpha <- 2 / (l+1)
result <- rep(NA, length(values))
result[1] <- 0
for (j in 2:length(values))
{
prev_var <- result[j-1]^2
prev_mean <- means[j-1]
curr_mean <- means[j]
curr_val <- values[j]
result[j] <- sqrt((1-alpha)*prev_var + alpha*(curr_val - prev_mean)*(curr_val - curr_mean))
}
return (result)
}
|
2848155097c504a91f77e85a989cce6be9656cd6
|
362f41415fbc3f859501ec89e143bb02afe8e895
|
/scalar-on-image - paper.R
|
6756d1cd86b14fc15d7ef7cd45c9714280caaa0f
|
[] |
no_license
|
royarkaprava/PING
|
a460d736319cb6e2145af74cb6493c808d852249
|
c1ee6713c463559134d64686af4b9db68af68fc8
|
refs/heads/master
| 2020-04-18T19:16:21.203738
| 2019-01-26T16:14:52
| 2019-01-26T16:14:52
| 167,708,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,114
|
r
|
scalar-on-image - paper.R
|
library(mvtnorm)
library(geoR)
library(matrixStats)
library(doParallel)
library(foreach)
library(fields)
registerDoParallel(20)
setwd("/mnt/home/aroy2")
# Transform that to matern parameters
theta2mat <- function(theta,nugget=TRUE){
c(exp(theta[1]),
ifelse(nugget,1/(1+exp(-theta[2])),1),
exp(theta[3]),
exp(theta[4]))
}
# Transform matern parameters to theta
mat2theta <- function(mat,nugget=TRUE){
c(log(mat[1]),
ifelse(nugget,log(mat[2]/(1-mat[2])),Inf),
log(mat[3]),
log(mat[4]))
}
simulateMatern <- function(theta, d){
thetan <- rep(0, 4)
temp <- theta2mat(theta)
thetan[1] <- temp[1]*(1-temp[2])
thetan[2] <- temp[1]*temp[2]
thetan[3:4] <- temp[3:4]
Var <- thetan[2] * matern(d, thetan[3], thetan[4])
diag(Var) <- diag(Var) + thetan[1]
ret <- rmvnorm(1, sigma = Var)
return(ret)
}
Maternvar <- function(theta, d){
thetan <- rep(0, 4)
temp <- theta2mat(theta)
thetan[1] <- temp[1] * (1 -temp[2])
thetan[2] <- temp[2] * temp[1]
thetan[3:4] <- temp[3:4]
Var <- thetan[2] * matern(d, thetan[3], thetan[4])
diag(Var) <- diag(Var) + thetan[1]
return(Var)
}
updateBeta <- function(l, y, IVarc, Beta, IVare, X, B = NULL){
if(is.null(B)){B <- rep(1, n)}
mean <- apply(X, 3, function(x){rowSums(Beta[, c(2:11)[-l]]*x[, -l])})
Beta.mean <- rowSums(sapply(1:m, function(k){diag((X[, l,k])*(B))%*%IVare%*%(y[, k]-mean[, k])})) #rowSums((y - mean)*X[, l,])*(B)
Beta.ivar <- lapply(1:m, function(k){diag((X[,l,k])*B)%*%IVare%*%diag((X[,l,k])*B)})
Beta.ivar <- Reduce('+', Beta.ivar)+ IVarc
Beta.var <- solve(Beta.ivar)
Beta.var <- (Beta.var + t(Beta.var))/2
Beta.mean <- Beta.var %*% Beta.mean
gen <- rmvnorm(1, Beta.mean, Beta.var)
return(gen)
}
foreach(repli = 1:10) %dopar% {
foreach(nuind = 1:2) %dopar% {
foreach(vind = 1:2) %dopar% {
q=3
vx <- c(3, 6)
var <- c(.1, 2)
n1 <- 20
n2 <- 20
n <- 100
set.seed(8)
A1 <- rep(1:n2, each = n1)
A2 <- rep(1:n1, n2)
tempA <- cbind(A2, A1)
#Ap <- matrix(rep(array(t(tempA)), n3), ncol=2, byrow = T)
Ap <- tempA #cbind(Ap, rep(1:n3, each=n1*n2))
m <- 20 # Number of subjects
RE <- TRUE # Generate data with random effects?
pri.mn=c(0,0,0,0)
pri.sd=c(10,2,10,1)
L=1
MHY=.01
X <- matrix(0, n, n1*n2)
loc <- Ap
dis <- as.matrix(dist(loc))
nux <- vx[nuind]
xvar <- Exponential(dis, range = nux)
for(i in 1:n){
X[i, ] <- rmvnorm(1,sigma = xvar)
}
h <- 2 #round(runif(1,0,2)) + 1
u <- matrix(runif(h*2), nrow=2)
# d <- .4*exp(-5*rowSums((Ap-matrix(c(n1*u[1,1], n2*u[1,2]), nrow = nrow(Ap), 2, byrow=T) )^2)/50)
if(h == 2){
d <- 1*exp(-5*rowSums((Ap-matrix(c(n1*u[1,1], n2*u[1,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50) + 1*exp(-5*rowSums((Ap-matrix(c(n1*u[2,1], n2*u[2,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50)
}
# if(h == 3){
# d <- .4*exp(-5*rowSums((Ap-matrix(c(n1*u[1,1], n2*u[1,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50) +
# .4*exp(-5*rowSums((Ap-matrix(c(n1*u[2,1], n2*u[2,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50) + .4*exp(-5*rowSums((Ap-matrix(c(n1*u[3,1], n2*u[3,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50)
# }
h <- 5 #round(runif(1,0,2)) + 1
u <- matrix(c(.2,.8,.2,.8,.5,.8,.2,.2,.8,.5), nrow=5)
# d <- .4*exp(-5*rowSums((Ap-matrix(c(n1*u[1,1], n2*u[1,2]), nrow = nrow(Ap), 2, byrow=T) )^2)/50)
# if(h == 2){
# d <- .4*exp(-5*rowSums((Ap-matrix(c(n1*u[1,1], n2*u[1,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50) + .4*exp(-5*rowSums((Ap-matrix(c(n1*u[2,1], n2*u[2,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50)
# }
# if(h == 3){
# d <- .4*exp(-5*rowSums((Ap-matrix(c(n1*u[1,1], n2*u[1,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50) +
# .4*exp(-5*rowSums((Ap-matrix(c(n1*u[2,1], n2*u[2,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50) + .4*exp(-5*rowSums((Ap-matrix(c(n1*u[3,1], n2*u[3,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50)
# }
d <- 0
for(i in 1:5){
d <- d + 2*exp(-20*rowSums((Ap-matrix(c(n1*u[i,1], n2*u[i,2]), nrow = nrow(Ap), 2, byrow=T))^2)/50)
}
B0 <- d
B0[which(B0<1e-1)]<- 0
sigma0 <- var[vind]
set.seed(repli)
Y <- rnorm(n, mean = X%*%B0, sd=sigma0)
init.theta= c(0,2,2,0)#c(0.3216614, 0.1695090, -1.2245358, 1.0982590)#c(0,2,2,0)
theta <- init.theta
sigma = 1
isdmat <- solve(Maternvar(theta, dis))
Beta.mean <- t(X) %*% Y / sigma^2
Beta.ivar <- t(X) %*% X / sigma^2 + isdmat
Beta.var <- solve(Beta.ivar)
Beta.var <- (Beta.var + t(Beta.var))/2
Beta.mean <- Beta.var %*% Beta.mean
temp <- rmvnorm(1, Beta.mean, Beta.var)
temp1 <- sign(temp)*(abs(temp))^(1/q)
Betac <- matrix(rep(temp1, q), ncol = q)
Beta <- array(temp)
Total_itr <- 2000
itr = 0
theta_p <- list()
Beta_p <- list()
sigma_p <- rep(0, Total_itr)
acceptedthetano_p <- rep(0, Total_itr)
tol=0.000001
sdl <- 1e-1
alpha0 <- 0.1
beta0 <- 0.1
while(itr < Total_itr){
itr <- itr + 1
al <- alpha0 + n / 2
be <- beta0 + sum((Y-X%*%Beta) ^ 2) / 2
sigma <- sqrt(1 / rgamma(1, al, be))
sigma_p[itr] <- sigma
if(q>1){
for(k in 1:q){
B <- Betac[, -k]
if(q > 2){
B <- rowProds(Betac[, -k])
}
ivar <- isdmat*(k==1) + isdmat*exp(theta[1])*(k>1)
X1 <- X*matrix(B, n, n1*n2, byrow = T)
Beta.mean <- t(X1) %*% Y / sigma^2
Beta.ivar <- t(X1) %*% X1 / sigma^2 + ivar
Beta.var <- solve(Beta.ivar)
Beta.var <- (Beta.var + t(Beta.var))/2
Beta.mean <- Beta.var %*% Beta.mean
Betac[, k] <- rmvnorm(1, Beta.mean, Beta.var)
}
temp <- array(rowProds(Betac))
Beta <- temp
thetaA <- theta
cant <- rep(0, 4)
cant[-1] <- thetaA[-1] + rnorm(3,sd = sdl) #MH[2]*tCthetaA%*%
cansd <- solve(Maternvar(cant, dis))
psd <- isdmat #Maternvar(thetaA, dis)
y <- Betac
bb <- (t(y[, 1])%*%(cansd)%*%(y[, 1]))/2+.1
cant1 <- -log(rgamma(1,(n1*n2)/2+.1,bb))
cant[1] <- cant1
cansd <- cansd/exp(cant1)
BB <- exp(thetaA[1])*(t(y[, 1])%*%(psd)%*%(y[, 1]))/2+.1
term1 <- t(y[, 2])%*%psd%*%y[, 2]*exp(thetaA[1]) / 2
if(q > 2){
term1 <- sum(apply(y[, 2:q], 2, function(x){t(x)%*%psd%*%x*exp(thetaA[1])}))/2
}
term2 <- t(y[, 2])%*%cansd%*%y[, 2]*exp(cant[1]) / 2
if(q > 2){
term2 <- sum(apply(y[, 2:q], 2, function(x){t(x)%*%cansd%*%x*exp(cant[1])}))/2
}
curll <- 0.5*as.numeric(determinant(psd)$modulus) + 0.5*(q-1)*as.numeric(determinant(psd*exp(thetaA[1]))$modulus)-
(t(y[, 1])%*%(psd)%*%(y[, 1]))/2 - term1 +
sum(dnorm(thetaA[-1],pri.mn[-1],pri.sd[-1],log=TRUE))+
dgamma(exp(-thetaA[1]),.1,.1,log=TRUE)
canll <- 0.5*as.numeric(determinant(cansd)$modulus) + 0.5*(q-1)*as.numeric(determinant(cansd*exp(cant1))$modulus)-
(t(y[, 1])%*%(cansd)%*%(y[, 1]))/2 - term2 +
sum(dnorm(cant[-1],pri.mn[-1],pri.sd[-1],log=TRUE))+
dgamma(exp(-cant[1]),.1,.1,log=TRUE)
Q1 <- dgamma(exp(-thetaA[1]),(n1*n2)/2+.1,BB,log=TRUE)
Q2 <- dgamma(exp(-cant[1]),(n1*n2)/2+.1,bb,log=TRUE)
R <- canll-curll+Q1-Q2
if(!is.na(R)){if(log(runif(1))< R){
acceptedthetano_p[itr] <- 1
theta <- cant
isdmat <- cansd
}}
}
if(q==1){
Beta.mean <- t(X) %*% Y / sigma^2
Beta.ivar <- t(X) %*% X / sigma^2 + isdmat
Beta.var <- solve(Beta.ivar)
Beta.var <- (Beta.var + t(Beta.var))/2
Beta.mean <- Beta.var %*% Beta.mean
temp <- array(rmvnorm(1, Beta.mean, Beta.var))
Beta <- temp
thetaA <- theta
cant <- rep(0, 4)
cant[-1] <- thetaA[-1] + rnorm(3,sd = sdl) #MH[2]*tCthetaA%*%
cansd <- solve(Maternvar(cant, dis))
psd <- isdmat #Maternvar(thetaA, dis)
y <- Beta
bb <- (t(y)%*%(cansd)%*%(y))/2+.1
cant1 <- -log(rgamma(1,(n1*n2)/2+.1,bb))
cant[1] <- cant1
cansd <- cansd/exp(cant1)
BB <- exp(thetaA[1])*(t(y)%*%(psd)%*%(y))/2+.1
curll <- 0.5*as.numeric(determinant(psd)$modulus) - (t(y)%*%(psd)%*%(y))/2 +
sum(dnorm(thetaA[-1],pri.mn[-1],pri.sd[-1],log=TRUE))+
dgamma(exp(-thetaA[1]),.1,.1,log=TRUE)
canll <- 0.5*as.numeric(determinant(cansd)$modulus) - (t(y)%*%(cansd)%*%(y))/2 +
sum(dnorm(cant[-1],pri.mn[-1],pri.sd[-1],log=TRUE))+
dgamma(exp(-cant[1]),.1,.1,log=TRUE)
Q1 <- dgamma(exp(-thetaA[1]),(n1*n2)/2+.1,BB,log=TRUE)
Q2 <- dgamma(exp(-cant[1]),(n1*n2)/2+.1,bb,log=TRUE)
R <- canll-curll+Q1-Q2
if(!is.na(R)){if(log(runif(1))< R){
acceptedthetano_p[itr] <- 1
theta <- cant
isdmat <- cansd
}}
}
Beta_p[[itr]] <- Beta
if(itr %% 100 == 0){
if(mean(acceptedthetano_p[1:itr]) > 0.45){sdl <- sdl*1.2}
if(mean(acceptedthetano_p[1:itr]) < 0.3){sdl <- sdl*0.8}
print(itr)
print(mean((Beta-B0)^2))
}
}
Beta_post <- Beta_p[501:2000]
save(Beta_post, file = paste("3rdwork3rdsim", q,"g", sigma0,"_",nux,"_", repli,".rda", sep =""))
}
}
}
|
17b1d2adf10742a05c1b903b9611bb720152ecea
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/reReg/examples/reReg.Rd.R
|
1cc8f969af2ba0f6e8a59c576088a51b29bda413
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 676
|
r
|
reReg.Rd.R
|
library(reReg)
### Name: reReg
### Title: Fits Semiparametric Regression Models for Recurrent Event Data
### Aliases: reReg
### ** Examples
## readmission data
data(readmission, package = "frailtypack")
set.seed(123)
## Accelerated Mean Model
(fit <- reReg(reSurv(t.stop, id, event, death) ~ sex + chemo,
data = subset(readmission, id < 50),
method = "am.XCHWY", se = "resampling", B = 20))
summary(fit)
## Generalized Scale-Change Model
set.seed(123)
(fit <- reReg(reSurv(t.stop, id, event, death) ~ sex + chemo,
data = subset(readmission, id < 50),
method = "sc.XCYH", se = "resampling", B = 20))
summary(fit)
|
cfe9b715066b72ab374dfb2cebd1b9876e08c4ce
|
cfdbca2e80b393dee23647a41810b7e0827c5359
|
/Z Old Tex Files/program R/Conceptos/binomial_convergen_poisson.r
|
6f1836683ba7d2f66c8928f20ffb42a9f6989b11
|
[] |
no_license
|
cualquiercosa327/2ed-team
|
99e1d67f861785ef6ec003901e9869525128178f
|
cd596e8ff5a657a3c0118763d1cd6a7fbc4bfc69
|
refs/heads/master
| 2022-01-06T05:38:44.019913
| 2019-06-25T20:54:11
| 2019-06-25T20:54:11
| null | 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,646
|
r
|
binomial_convergen_poisson.r
|
###############################
#### Para p pequeño ###########
###############################
n1<-10
n2<-50
n3<-100
p1<-0.2
p2<-0.5
p3<-0.8
a11<-dbinom(c(0:n1),n1,p1)
a12<-dbinom(c(0:n1),n1,p2)
a13<-dbinom(c(0:n1),n1,p3)
a21<-dbinom(c(0:n2),n2,p1)
a22<-dbinom(c(0:n2),n2,p2)
a23<-dbinom(c(0:n2),n2,p3)
a31<-dbinom(c(0:n3),n3,p1)
a32<-dbinom(c(0:n3),n3,p2)
a33<-dbinom(c(0:n3),n3,p3)
par(mfrow=c(3,3))
plot(a11,type="h",ylab="n=10",xlab="",main="p=0.2")
lines(dpois(c(0:n1),n1*p1),col=2)
plot(a12,type="h",ylab="n=10",xlab="",main="p=0.5")
lines(dpois(c(0:n1),n1*p2),col=2)
plot(a13,type="h",ylab="n=10",xlab="",main="p=0.8")
lines(dpois(c(0:n1),n1*p3),col=2)
plot(a21,type="h",ylab="n=50",xlab="")
lines(dpois(c(0:n2),n2*p1),col=2)
plot(a22,type="h",ylab="n=50",xlab="")
lines(dpois(c(0:n2),n2*p2),col=2)
plot(a23,type="h",ylab="n=50",xlab="")
lines(dpois(c(0:n2),n2*p3),col=2)
plot(a31,type="h",ylab="n=100",xlab="")
lines(dpois(c(0:n3),n3*p1),col=2)
plot(a32,type="h",ylab="n=100",xlab="")
lines(dpois(c(0:n3),n3*p2),col=2)
plot(a33,type="h",ylab="n=100",xlab="")
lines(dpois(c(0:n3),n3*p3),col=2)
##############################
#### Para p grande ###########
##############################
n1<-10
n2<-50
n3<-100
p1<-0.5
p2<-0.7
p3<-0.9
lam11<-n1*p1
lam12<-n1*p2
lam13<-n1*p3
lam21<-n2*p1
lam22<-n2*p2
lam23<-n2*p3
lam31<-n3*p1
lam32<-n3*p2
lam33<-n3*p3
b11<-dbinom(c(0:n1),n1,p1)
b12<-dbinom(c(0:n1),n1,p2)
b13<-dbinom(c(0:n1),n1,p3)
b21<-dbinom(c(0:n2),n2,p1)
b22<-dbinom(c(0:n2),n2,p2)
b23<-dbinom(c(0:n2),n2,p3)
b31<-dbinom(c(0:n3),n3,p1)
b32<-dbinom(c(0:n3),n3,p2)
b33<-dbinom(c(0:n3),n3,p3)
pois<-function(x,lambda,n){
dpois(x,lambda)*(factorial(x))*(lambda^(n-2*x))/(factorial(n-x))
}
par(mfrow=c(3,3))
plot(b11,type="h",ylab="n=10",xlab="",main="p=0.5")
lines(pois(c(0:n1),n1*(1-p1),n1),col=2)
plot(b12,type="h",ylab="n=10",xlab="",main="p=0.7")
lines(pois(c(0:n1),n1*(1-p2),n1),col=2)
plot(b13,type="h",ylab="n=10",xlab="",main="p=0.9")
lines(pois(c(0:n1),n1*(1-p3),n1),col=2)
plot(b21,type="h",ylab="n=50",xlab="")
lines(pois(c(0:n2),n2*(1-p1),n2),col=2)
plot(b22,type="h",ylab="n=50",xlab="")
lines(pois(c(0:n2),n2*(1-p2),n2),col=2)
plot(b23,type="h",ylab="n=50",xlab="")
lines(pois(c(0:n2),n2*(1-p3),n2),col=2)
plot(b31,type="h",ylab="n=100",xlab="")
lines(pois(c(0:n3),n3*(1-p1),n3),col=2)
plot(b32,type="h",ylab="n=100",xlab="")
lines(pois(c(0:n3),n3*(1-p2),n3),col=2)
plot(b33,type="h",ylab="n=100",xlab="")
lines(pois(c(0:n3),n3*(1-p3),n3),col=2)
|
aff7dda579dcb240a6b735c852e5374a5db0e77b
|
d3e02276ecbd771fd37379ed487883006fc54cab
|
/Exercise-3/1/analysis.R
|
d1315e0843cd0725f2c5f03be374705ea48ce377
|
[] |
no_license
|
shotii/Big-Data-Analytics
|
08912adfe5ac2f3f6546df763899b078a78edae0
|
a83025692e47d55f80c228b90d08f2c6310b4d20
|
refs/heads/master
| 2021-05-02T09:32:56.796756
| 2017-02-01T21:10:37
| 2017-02-01T21:10:37
| 72,871,379
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,860
|
r
|
analysis.R
|
library(ggplot2)
data(diamonds)
summary(diamonds)
# Variables for easier access
carat = diamonds$carat
cut = diamonds$cut
color = diamonds$color
clarity = diamonds$clarity
depth = diamonds$depth
price = diamonds$price
volume = diamonds$x * diamonds$y * diamonds$z
table = diamonds$table
maxCorrelation = function()
{
priceCaratCor = cor(price, carat)
priceDepthCor = cor(price, depth)
priceVolumeCor = cor(price, volume)
priceTableCor = cor(price, table)
correlationsList = c("Price-Carat with corr coefficient" = priceCaratCor,
"Price-Depth with corr coefficient " = priceDepthCor,
"Price-Volume with corr coefficient" = priceVolumeCor,
"Price-Table with corr coefficient" = priceTableCor)
sortedCorrelations = sort(correlationsList, decreasing = TRUE)
return(sortedCorrelations)
}
main = function ()
{
print("Correlations for: ")
print( maxCorrelation())
# Density plot for carat since it is the one, for which the price correlates the most
#qplot(carat, data = diamonds, geom = "density") -- DONE
# The plot leads to the assumption that price and carat might have a exponential model as relation
#qplot(log(carat), log(price), data = diamonds) + geom_smooth(method="lm") -- DONE
# Depth seems to be Normal distributed, but why isn't it reflected in the price?
qplot(depth, data = diamonds, geom = "density") #
# Strong correlation also with price and volume, so plot price/volume scatter plot
# Clearly, price rises exponentially with volume, but there are quiet a lot of outliers maybe due to carat?
# Also the variance seems to increase with volume
#qplot(log(volume), log(price), data=diamonds) + geom_smooth(method = "lm")
#g = qplot(log(carat), log(price), data=diamonds)
#g + geom_point() + facet_grid(. ~ cut)
#g2 = qplot(log(carat), log(price), data=diamonds)
#g2 + geom_point(color="red") + facet_grid(. ~ color)
# Shows really nicely the difference that clarity has on the price!
#p = qplot(log(carat), log(price), colour=clarity, data=diamonds)
#p
# Strong correlation between carat and volume obviously
print(cor(carat,volume))
# Shows that volume and carat are correlated
#p = qplot(log(carat), log(price), data=diamonds) + geom_point(aes(size=volume))
#p
# Shows no influence of depth on price
#p = qplot(log(carat), log(price), alpha=depth, data=diamonds)
#p
# Carat and volume have a linear relationship altough quiet a lot outliers! How can that be??
#p = qplot(carat, volume, data=diamonds)
#p
pdf(file="mypdf.pdf")
# Shows how price per carat is distributed over colors (see reference link on pc for explanation!)
p = qplot(color, price/carat, data = diamonds, geom = "boxplot")
p
dev.off()
}
main()
|
26a08323c7a92797271b391d61123072be91a249
|
6efddb8a3e3055345d36d0fb44dba7d66ca8f87d
|
/R/utils.R
|
5d601bbd75a5ba6543c24732c85c0aa0007e47e1
|
[
"MIT"
] |
permissive
|
krzjoa/torch
|
9d1a765e76d7d172c5e17d755f9eecaadc5daae4
|
e58f322562504a9d503179ca2a1c318e6431cc32
|
refs/heads/master
| 2021-06-29T15:42:32.360235
| 2020-11-11T21:45:30
| 2020-11-11T21:45:30
| 299,896,062
| 0
| 0
|
NOASSERTION
| 2020-09-30T11:23:25
| 2020-09-30T11:23:24
| null |
UTF-8
|
R
| false
| false
| 320
|
r
|
utils.R
|
nullptr <- function() {
x <- cpp_nullptr()
class(x) <- "nullptr"
x
}
# https://stackoverflow.com/a/27350487/3297472
is_null_external_pointer <- function(pointer) {
a <- attributes(pointer)
attributes(pointer) <- NULL
out <- identical(pointer, methods::new("externalptr"))
attributes(pointer) <- a
out
}
|
4907f1aba63a9d4ba08d52bdf146ed972370487f
|
05d086f9121da2dc96a00087d7d6d8789aea8500
|
/RushRanch_Preliminary.R
|
f74877b8b45cec8a7487a3678e6f0c7daddcca60
|
[] |
no_license
|
srussell2416/Rush_Ranch
|
f050bc6763ad799f8f54afd16b5183e3d701e4b1
|
ade5ef2599bda5717969e9cbbfd0f275ac56ff8d
|
refs/heads/master
| 2020-08-08T07:24:31.553784
| 2019-10-29T20:52:06
| 2019-10-29T20:52:06
| 213,777,310
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,486
|
r
|
RushRanch_Preliminary.R
|
#Rush Ranch Exploration
library(tidyverse)
library(lubridate)
library(bigleaf)
data <- read.csv("~/Documents/MS_Thesis/USSrr.csv")
names(data)
data <- data %>%
select(-Year, - Hour) %>%
separate(TIMESTAMP_END, into = c("Year", "Month", "Day", "Hour", "Minute"), sep = c(4, 6, 8, 10)) %>%
mutate(datetime = paste0(Year,"-", Month, "-", Day," ", Hour, ":", Minute, ":00"),
datetime = ymd_hms(datetime),
Month = as.numeric(Month),
Year = as.numeric(Year),
DOY2 = if_else(DOY >= 91, DOY - 90, DOY + 275)
#GPP_DT = umolCO2.to.gC(GPP_DT),
#RECO_DT = umolCO2.to.gC(RECO_DT)
)
data$Year[data$Month < 04 & data$Year == 2014] <- "2013-2014"
data$Year[data$Month >= 04 & data$Year == 2014] <- "2014-2015"
data$Year[data$Month < 04 & data$Year == 2015] <- "2014-2015"
data$Year[data$Month >= 04 & data$Year == 2015] <- "2015-2016"
data$Year[data$Month < 04 & data$Year == 2016] <- "2015-2016"
data$Year[data$Month >= 04 & data$Year == 2016] <- "2016-2017"
data$Year[data$Month < 04 & data$Year == 2017] <- "2016-2017"
#Comparing NEE to NEE = RECO - GPP
data %>%
select(datetime, DOY2, GPP_DT, RECO_DT, NEE, Year, Month) %>%
drop_na() %>%
group_by(Year) %>%
mutate(calc_NEE = RECO_DT - GPP_DT,
cum_NEE = cumsum(calc_NEE),
cum_NEE2 = cumsum(NEE)) %>%
select(datetime, DOY2, Year, cum_NEE, cum_NEE2) %>%
pivot_longer(c(cum_NEE, cum_NEE2), names_to = "NEE") %>%
ggplot(aes(x = DOY2, y = value, color = NEE, linetype = Year)) +
geom_line() +
theme_bw()
#Yearly cumulative NEE
data %>%
select(datetime, DOY2, GPP_NT, RECO_NT, NEE, Year, Month) %>%
drop_na() %>%
group_by(Year) %>%
mutate(calc_NEE = RECO_NT - GPP_NT,
cum_NEE = cumsum(calc_NEE)) %>%
select(datetime, DOY2, Year, cum_NEE) %>%
ggplot(aes(x = DOY2, y = cum_NEE, color = Year)) +
ylab("Cumulative NEE( µmol CO2 m-2 s-1)") +
xlab("Days since April 1st") +
geom_line() +
theme_bw()
#Comparing GPP to NEE
data %>%
select(datetime, DOY2, GPP_DT, RECO_DT, Year, Month) %>%
drop_na() %>%
group_by(Year) %>%
mutate(cum_GPP = cumsum(GPP_DT),
cum_GPP = -cum_GPP,
cum_RECO = cumsum(RECO_DT))%>%
pivot_longer(c(cum_GPP, cum_RECO), names_to = "flux") %>%
ggplot(aes(DOY2, value, color = Year, linetype = flux)) +
geom_line() +
ylab("Cumulative CO2 Flux (µmol CO2 m-2 s-1)") +
xlab("Days since April 1st") +
theme_bw()
#
data <- read.csv("~/Documents/MS_Thesis/USSrr_v2.csv")
|
8e536c424ade053b990235d7b3c1d19c84b7cf0c
|
6eeffc5b83a920bc7f357af3312970fa0a5a84d3
|
/R/ols-data-hsb.R
|
d5b5a2be9d9d9abd951b73796ebc388e1fd09476
|
[] |
no_license
|
cran/olsrr
|
81fe16ddb7b43e33254a7262283d39e37ce4a533
|
215958dfa67b03943c34a12cf6e3774d628fcda7
|
refs/heads/master
| 2021-06-24T07:12:32.809254
| 2020-02-10T11:00:02
| 2020-02-10T11:00:02
| 90,952,056
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25
|
r
|
ols-data-hsb.R
|
#' Test Data Set
"hsb"
|
13a90e81e45b2531d448ddfb50afc62d2118d601
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/gbd_2019/nonfatal_code/digest_gastritis/prepnewdata_545_total_gastritis.R
|
7d03286b09a4617f3aacc0b19c481135f658d62a
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,091
|
r
|
prepnewdata_545_total_gastritis.R
|
## Gastritis and duodenitis; processing data new in GBD 2019
rm(list=ls())
## Set up working environment
if (Sys.info()["sysname"] == "Linux") {
j <- "FILEPATH_J"
h <- "FILEPATH_H"
l <- "FILEPATH_L"
} else {
j <- "FILEPATH_J"
h <- "FILEPATH_H"
l <- "FILEPATH_L"
}
my.lib <- paste0(h, "R/")
central_fxn <- paste0(j, "FILEPATH_CENTRAL_FXNS")
date <- gsub("-", "_", Sys.Date())
pacman::p_load(data.table, ggplot2, openxlsx, readxl, readr, RMySQL, stringr, tidyr, plyr, dplyr, mvtnorm, msm)
install.packages("msm", lib = my.lib)
library("msm", lib.loc = my.lib)
## Source central functions
source(paste0(central_fxn, "get_age_metadata.R"))
# source(paste0(central_fxn, "get_location_metadata.R"))
source(paste0(central_fxn, "save_bundle_version.R"))
source(paste0(central_fxn, "get_bundle_version.R"))
source(paste0(central_fxn, "save_crosswalk_version.R"))
source(paste0(central_fxn, "get_crosswalk_version.R"))
source(paste0(central_fxn, "get_bundle_data.R"))
source(paste0(central_fxn, "upload_bundle_data.R"))
source(paste0(central_fxn, "save_bulk_outlier.R"))
## Source other functions
source(paste0(h, "code/getrawdata.R"))
# source(paste0(h, "code/sexratio.R"))
source(paste0(h, "code/datascatters.R"))
source(paste0(h, "code/samplematching_wageaggregation.R"))
# source(paste0(h, "code/prepmatchesforMRBRT.R"))
source(paste0(j, "FILEPATH/mr_brt_functions.R"))
source(paste0(h, "code/applycrosswalks.R"))
source(paste0(h, "code/outlierbyMAD.R"))
source(paste0(h, "code/update_seq.R"))
## Get metadata
all_fine_ages <- as.data.table(get_age_metadata(age_group_set_id=12))
not_babies <- all_fine_ages[!age_group_id %in% c(2:4)]
not_babies[, age_end := age_group_years_end-1]
all_fine_babies <- as.data.table(get_age_metadata(age_group_set_id=18))
group_babies <- all_fine_babies[age_group_id %in% c(28)]
age_dt <- rbind(not_babies, group_babies, fill=TRUE)
age_dt[, age_start := age_group_years_start]
age_dt[age_group_id==28, age_end := 0.999]
## Clear Step 4 bundle data
step4_bundle <- get_bundle_data(7001, "step4")
step4_bundle <- step4_bundle[ , "seq"]
write.xlsx(step4_bundle, paste0(j, "FILEPATH/clear_step4_bundle.xlsx"), col.names=TRUE, sheetName = "extraction")
clear <- upload_bundle_data(7001, "step4", paste0(j, "FILEPATH/clear_step4_bundle.xlsx"))
step4_bundle <- get_bundle_data(7001, "step4", export = TRUE)
## Get Step 3 bundle data
step3_bundle <- get_bundle_data(7001, "step3")
# Rename column seq to step3_seq
step3_bundle[ , "step3_seq" := seq]
# Make empty seq column
step3_bundle[ , seq := NA]
# Upload as raw data to Step 4 bundle
write.xlsx(step3_bundle, paste0(j, "FILEPATH/manually_upload_step3_bundle_to_step4.xlsx"), col.names=TRUE, sheetName = "extraction")
upload <- upload_bundle_data(7001, "step4", paste0(j, "FILEPATH/manually_upload_step3_bundle_to_step4.xlsx"))
step4_bundle <- get_bundle_data(7001, "step4", export = TRUE)
## Save a Step 4 bundle version without new clinical data
save_bundle_version(7001, "step4")
step4_bv_only_old_data <- get_bundle_version(14828, export = TRUE)
# Drop all columns except step3_seq and seq, rename step3_parent_seq and step4_parent_seq
old_data_paired_seqs_steps3and4 <- step4_bv_only_old_data[ , c("step3_seq", "seq")]
#setnames(bundle_dt, "seq", "step3_seq")
setnames(old_data_paired_seqs_steps3and4, "step3_seq", "step3_parent_seq")
setnames(old_data_paired_seqs_steps3and4, "seq", "step4_parent_seq")
## Get crosswalk version 9563, subset to data carried over from Step3, rename column crosswalk_parent_seq to step3_parent_seq
step4_crosswalk <- get_crosswalk_version(9563)
step4_crosswalk_olddt <- step4_crosswalk[crosswalk_origin_id==2, ]
range(step4_crosswalk_olddt$crosswalk_parent_seq)
setnames(step4_crosswalk_olddt, "crosswalk_parent_seq", "step3_parent_seq")
step4_crosswalk_olddt <- merge(step4_crosswalk_olddt, old_data_paired_seqs_steps3and4, by = "step3_parent_seq")
# Rename column step4_parent_seq as crosswalk_parent_seq
setnames(step4_crosswalk_olddt, "step4_parent_seq", "crosswalk_parent_seq")
step4_crosswalk_olddt[!is.na(crosswalk_parent_seq), seq := NA]
## Get new Step 4 clinical data from old bundle and upload to Step 4 new bundle, get in a bundle version, subset for crosswalking
add_545_step4_new_clininfo <- upload_bundle_data(7001, "step4", paste0(j, "FILEPATH/step4_GetBundleVersion_bundle_545_request_335345.xlsx"))
save_bundle_version(7001, "step4")
step4_bundleversion_complete <- get_bundle_version(14846, export = TRUE)
head(step4_bundleversion_complete[is.na(step3_seq), ])
tail(step4_bundleversion_complete[is.na(step3_seq), ])
str(step4_bundleversion_complete[is.na(step3_seq), ]) # data.table...7774 obs of 67 variables
# Subset to just the new clinical data
step4_bundleversion_new_only <- step4_bundleversion_complete[is.na(step3_seq), ]
# Label with cv_* for clinical informatics subsets
step4_bundleversion_new_only <- market_scan_cv_labels(step4_bundleversion_new_only)
# Store bundle columns for later
bundle_columns <- names(step4_bundleversion_new_only)
## Apply crosswalk coefficients and update seq/crosswalk_parent_seq
cv_alts <- c("cv_marketscan_other")
old_model_summary <- paste0(j, "FILEPATH/totalgd_xwmodel_2019_07_01/model_summaries.csv")
out_path <- paste0(j, "FILEPATH")
## Make definitions for plotting and subsetting
cv_drop <- bundle_columns[grepl("^cv_", bundle_columns) & !bundle_columns %in% cv_alts]
step4_bundleversion_new_only <- get_definitions(step4_bundleversion_new_only)
## Plot all new data without adjustments
scatter_bydef(step4_bundleversion_new_only)
## Subset to data to crosswalk and not crosswalk
to_crosswalk_dt <- step4_bundleversion_new_only[definition!="reference", ]
reference_dt <- step4_bundleversion_new_only[definition=="reference", ]
## Fill out cases, sample size, standard error using Epi Uploader formulae, Update parent seq and seq
get_cases_sample_size(to_crosswalk_dt)
get_se(to_crosswalk_dt)
update_seq(to_crosswalk_dt)
## Get predicted coefficients with all sources of uncertainty, the predictions for training data are fine since there are no continuous covariates or multi-dimensional case-definitions
new_predicted_xws <- unique(predict_xw(choice_fit = NULL, "logit_dif", to_crosswalk_dt, old_model_summary), by = cv_alts)
## Transform data
crosswalked_dt <- transform_altdt(to_crosswalk_dt, new_predicted_xws, "logit_dif")
crosswalked_dt <- crosswalked_dt[standard_error<=1, ]
## Bind reference data and crosswalked data; make scatter-plot
step4_new_for_xwv <- rbind(crosswalked_dt, reference_dt, fill=TRUE)
scatter_bydef(step4_new_for_xwv, raw = FALSE)
## Clean up columns on transformed data
columns_keep <- unique(c(bundle_columns, "crosswalk_parent_seq"))
columns_drop <- c("cv_admin", "cv_marketscan_2000", "cv_marketscan_other")
columns_keep <- setdiff(columns_keep, columns_drop)
step4_new_for_xwv <- step4_new_for_xwv[, ..columns_keep]
## Bind table from last step to the preceding one
step4_crosswalk <- rbind(step4_crosswalk_olddt, step4_new_for_xwv, fill = TRUE)
table(step4_crosswalk$measure)
## Apply outlier criteria (make sure only prevalence rows are taken into account)
step4_crosswalk <- step4_crosswalk[ , is_outlier_old := is_outlier]
step4_crosswalk_prev <- step4_crosswalk[measure=="prevalence", ]
step4_crosswalk_prev_out <- auto_outlier(step4_crosswalk_prev)
scatter_markout(step4_crosswalk_prev_out, upper = 0.8)
step4_crosswalk_outliered <- rbind(step4_crosswalk[measure != "prevalence", ], step4_crosswalk_prev_out, fill = TRUE)
## Save crosswalk version, associated with new Step 4 bundle version with clinical data created above
upload_path <- paste0(j, "FILEPATH/7001_step3and4seqsfixed_2MADonprev_", date, ".xlsx")
write.xlsx(step4_crosswalk_outliered, upload_path, col.names=TRUE, sheetName = "extraction")
##Then add a description and upload
description <- "Step3 best crosswalk with Step4 clinical info xw'd and appended, 2MAD applied to all prev points, seqs and steps fixed from previous"
xwv_upload <- save_crosswalk_version(bundle_version_id=14846, data_filepath=upload_path, description = description)
# xwv 9890
|
eb48d93765061e0732aebe466761c3ed7b4ffa1b
|
0ff20eb80dec8348c77e57a48f4298515e311352
|
/Examples In R/Correl.R
|
c12bcbfb7a521d6549383163cbe3bffa0c71f365
|
[] |
no_license
|
tusharkumar0404/TestRepo
|
bf58f42dd4c247eebf9a9c7d361099a60e7279c2
|
1350e72edd7150a15156063feb4ca13646b65269
|
refs/heads/master
| 2021-01-12T04:00:23.808841
| 2018-08-20T13:05:32
| 2018-08-20T13:05:32
| 77,460,419
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,625
|
r
|
Correl.R
|
##################################################
# Demonstrate plotting graphs of paired variables
# (X1, X2) where cor(X1, X2) varies in a range
#
# Load the library for multivariate
# normal distribution
library("mvtnorm")
# Square plots
opar <- par(no.readonly=TRUE)
par(pty="s")
par(mfrow=c(2,3))
##################################################
# A function to plot a dataset with two variables
# such that cor(X1, X2) = cor
plotVars <- function(cor) {
mu <- c(0, 0) # WLOG, let the means be at the origin
# Given a correlation between X1, X2, we construct
# a 2 x 2 correlation matrix
# Recall: The correlation of a variable with itself is 1
sig <- matrix(c(1,cor,cor,1),
byrow=TRUE, ncol=2)
# Simulate a bivariate normal distribution of
# 200 datapoints with the stated characteristics
x <- rmvnorm(n=200,
mean=mu,
sigma=sig)
colnames(x) <- c("X1", "X2")
plot(x,
xlab="X1", ylab="X2",
xlim=c(-3,3), ylim=c(-3,3),
pch=20, cex=0.7, col="navy",
main=paste("Dataset with Correlation", cor))
# Paint the origina red
points(mu[1], mu[2], pch=7, lwd=2, col="red")
# Draw the axes
abline(v=mu[1], lty=3)
abline(h=mu[2], lty=3)
# Should you wish to check, uncomment
# print("***********************************************")
# print(paste("Case: Correlation = ", cor))
# print("The SAMPLE means of the variables are")
# print(round(colMeans(x), 2))
# print("The SAMPLE covariance matrix is")
# print(round(var(x), 2))
}
# Obtain plots for pairs of variables with
# a range of correlation values from -1 to 1
plotVars(1)
plotVars(0.75)
plotVars(0.25)
plotVars(0)
plotVars(-0.5)
plotVars(-1)
# Reset to single plot parameters and graph the function
par(opar)
# Here's a technique to plot any function f(X1)
# First construct a sequence with 200 X1-values
# spanning between -3 and 3
X1 <- seq(-3, 3, length=200)
# Next obtain the function values - the syntax is intuitive
# We get X2 as f(all points in the X1 array)
X2 <- sapply(X1, function(x) { x*x})
correl <- round(cor(X1, X2), 2)
# Be surprised at the correlation number!
plot(X1, X2,
main=paste("Variables with corelation", correl),
cex=0.4,
col="navy")
# Perfect correlation
par(mfrow=c(1,2))
X1 <- seq(-3, 3, 0.05)
plot(X1, 0.5*X1 + 1,
main="Perfectly correlated variables",
xlab="X1", ylab="X2",
pch=20, cex=0.7, col="navy")
abline(h=0, v=0)
plot(X1, -0.5*X1 + 1,
main="Perfectly correlated variables",
xlab="X1", ylab="X2",
pch=20, cex=0.7, col="navy")
abline(h=0, v=0)
|
429630a809905c62549146f3d6011e035dc80235
|
8ff609111a7e80b787c1f000259d83d295c03beb
|
/CV.elastic/divide.data.r
|
111729696400b39c26b2f2578a13f5a1b2450364
|
[] |
no_license
|
SoyeonKimStat/PROMISE
|
71a57eb2558854b22089f5da5505a10c7df5a2f1
|
263f0366aaa31776d3ddd6f44c1d7e6910514052
|
refs/heads/master
| 2021-08-16T05:25:11.718753
| 2019-07-17T13:07:54
| 2019-07-17T13:07:54
| 91,021,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
divide.data.r
|
# train.portion : what percent of data is used for train set
# response variable is in the last column
divide.data <- function(data, train.portion) {
data <- apply(data, 2, as.double)
n <- nrow(data)
ntr <- floor(n*train.portion)
trainid <- sample(seq(n), size = ntr)
ntrainid <- !seq(n)%in%trainid
x.train <- data[trainid, -ncol(data)]
y.train <- data[trainid, ncol(data)]
x.test <- data[ntrainid, -ncol(data)]
y.test <- data[ntrainid, ncol(data)]
p <- ncol(data) - 1
list(x.train = x.train, y.train = y.train, x.test = x.test, y.test = y.test, p = p)
}
# x.train=div.data$x.train;y.train= div.data$y.train; x.test= div.data$x.test; y.test= div.data$y.test; p= div.data$p;
|
c20e74c40d5aeb9969a5386ccc5b2998bf97c097
|
a896750e89c4f610b73f59b539b546d6f024af07
|
/DESeq2 Script.R
|
4f586f51b7d89f72e0fac4ebde5a386c1a024107
|
[] |
no_license
|
QIU912/1
|
060a7a983395ab3f4067eac60ad1d42020b245a7
|
c00fe4baba9385f47ab436060bead517ab2b7bb4
|
refs/heads/main
| 2023-04-11T09:57:11.081884
| 2021-04-22T09:41:09
| 2021-04-22T09:41:09
| 298,272,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,045
|
r
|
DESeq2 Script.R
|
#install.packages("BiocManager")
#BiocManager::install("DESeq2")
#BiocManager::install("GenomeInfoDb")
#BiocManager::install("latticeExtra")
#library(DESeq2)
getwd()
require(DESeq2)
setwd("F:/临时/class/")
data <- read.table("P7vsPAO1.txt", row.names = 1,header = T, na.strings = c("","NA"), skipNul=TRUE)
dim(data)
colnames(data)
rownames(data)
nrow(data)
colSums(data)
Sample <- c("PAO1", "PAO1", "P7", "P7")
samples <- data.frame(row.names=colnames(data), Group=as.factor(Sample))
DS_Table <- DESeqDataSetFromMatrix(countData = data, colData=samples, design=~Group)
rowSums(counts(DS_Table))
DS_Table_sort <- DS_Table[ rowSums(counts(DS_Table)) > 1, ]
dim(DS_Table)
dim(DS_Table_sort)
DS_Table_sort <- estimateSizeFactors(DS_Table_sort)
normalized_counts <- counts(DS_Table_sort, normalized=TRUE)
write.table(normalized_counts, file="PAO1_P7_2samples_normalized.csv")
DS <- DESeq(DS_Table_sort)
# testing two transformation and one non-transformed
rld <- rlogTransformation(DS, blind=TRUE) # transformation
vsd <- varianceStabilizingTransformation(DS, blind=TRUE) # transformation
nt <- normTransform(DS) # non transform
comparison11 <- results(DS, contrast=c("Group","P7","PAO1"))# ,alpha=0.05)
summary(comparison11)
comparison11 <- subset(comparison11, padj < 0.05)
comparison11 <- comparison11[abs(comparison11$log2FoldChange) >1,]
comparison11_df <- as.data.frame(comparison11)
head(comparison11_df)
dim(comparison11_df)
write.table(comparison11_df, file="P7 vs PAO1.csv", sep=",")
####
library(pheatmap)
pheatmap(assay(nt),
kmeans_k = NA, breaks = NA, border_color = "white",
cellwidth = NA, cellheight = NA, scale = "none", cluster_rows = TRUE,
cluster_cols = F, clustering_distance_rows = "euclidean",
clustering_distance_cols = "euclidean", clustering_method = "average",
cutree_rows = NA, cutree_cols = NA,
legend = TRUE, legend_breaks = NA,
legend_labels = NA, annotation_row = NA,
annotation = NA, annotation_colors = NA, annotation_legend = TRUE,
annotation_names_row = T, annotation_names_col = TRUE,
drop_levels = TRUE, show_rownames = F, show_colnames = T, main = NA,
fontsize = 10, fontsize_row = 4.5, fontsize_col = 10, display_numbers = F,
gaps_row = NULL, gaps_col = NULL, labels_row = NULL,
labels_col = NULL, filename = NA, width = NA, height = NA,
silent = FALSE, na_col = "#DDDDDD") #color = colorRampPalette(rev(brewer.pal(n = 7, name ="RdBu")))(256),
dev.off() ###sometimes PCA command gives errors because previous command might have overloaded the graphics. Here the heatmap gives errors as its too big. thats why run this command and then plot the PCA###
plotPCA((nt),intgroup = 'Group')
library(ggplot2)
library(ggrepel)
p = plotPCA((nt),intgroup = 'Group')
p <- p + theme(legend.position = 'none') + geom_point(size = 2) +geom_text_repel(aes_string(label = "name"), size = 5)
print(p)
dev.off()
|
e2dd07c8edf4dd257c6f2b96d1c5bf7d2238cb58
|
590653560395302059275ff4bcc9551283a0cd73
|
/Reto/Punto1,1.R
|
ee1893586eec6e1fb39f1c3d39af096f5e46515a
|
[] |
no_license
|
LuisPenaranda/AnalisisNumerico
|
3247251541f992918d90f1e2770a893617ad9b0c
|
29b8cc0e55b5320c9d805ed2a6e1619b2d8a4d42
|
refs/heads/master
| 2020-12-19T05:31:21.748084
| 2020-05-21T13:23:42
| 2020-05-21T13:23:42
| 235,633,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 720
|
r
|
Punto1,1.R
|
# * Luis Peñaranda
# * Diego Gomez
# * Camilo Moreno
horner <- function(coeficientes, x){
y <- coeficientes[1]
i <-0
for(k in coeficientes[2:length(coeficientes)]){
y <- x*y + k
i <- i + 2
}
return(cat("resultado: ",y ,", El numero de operaciones realizadas fueron de: ", i, " siendo ", i/2 , "el numero de multiplicaciones y ", i/2 , "el numero de sumas realizadas"))
}
derivar <- function(coeficientes){
grado <- length(coeficientes)-1
deriv <- c()
for(i in coeficientes[1:length(coeficientes)-1]){
aux <- i*grado
deriv <- c(deriv, aux)
grado <- grado - 1
}
return (deriv)
}
x0 <- -2
coeficientes <- c(2,0,-3,3,-4)
derivada <- derivar(coeficientes)
horner(derivada,x0)
|
ca8dafba11a1185bba2379393df7ede3e0ec3777
|
0cbcb07d7129ab917d974e59f9cc573dde7cca3d
|
/R/ehg_dbConnect.R
|
fee064287626871dffb76e008b48dfae4653595d
|
[
"MIT"
] |
permissive
|
mmajluta/enhogar
|
9d88cf5c0bf0fe570edadde810414f34588b4a94
|
f32addfd86d7bd28e8c44c0296f1d68bf7310b01
|
refs/heads/main
| 2023-03-08T13:48:48.034981
| 2021-02-24T16:41:54
| 2021-02-24T16:56:10
| 341,257,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 320
|
r
|
ehg_dbConnect.R
|
#' Conexión a base de datos
#'
#' `r lifecycle::badge("experimental")`
#'
#' Vea \code{Dmisc::\link[Dmisc:dbConnect]{dbConnect}}
#'
#' @return Conexión a base de datos.
#' @export
#'
#' @examples
#' \dontrun{
#' conn <- ehg_dbConnect()
#' }
ehg_dbConnect <- function(){
Dmisc::dbConnect(db_name = "enhogar")
}
|
f722c6b5a3a960f28698cc8a839888227f76051a
|
4273f49c3e1ff875273c10f381e6bd5896da7333
|
/R/lunarBand.R
|
ae3a0cfc448c3860da113f7e59c370dba7b4cd35
|
[] |
no_license
|
ChaddFrasier/planetLearn
|
bb01a07c0400224a7ed6b77f0c624f64e0868c99
|
6ed0c77df311804089b4e0ca1316875be48b2e86
|
refs/heads/master
| 2022-11-12T18:35:56.423863
| 2020-07-01T22:17:11
| 2020-07-01T22:17:11
| 265,106,314
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
r
|
lunarBand.R
|
#' Lunar Data from LROC WAC
#'
#' A dataset containing the DN, phase angle, emission angle, incidence angle, local emission angle, local incidence angle, latitude,
#' longitude, Sun azimuthal direction and scapecraft azimuthal direction.
#'
#' @format A data frame with 4,183,770 rows and 10 features:
#' \describe{
#' \item{DN}{surface scattering}
#' \item{Phase}{The angle }
#' \item{Emission}{DESCRIPTION}
#' \item{Incidence}{DESCRIPTION}
#' \item{LEmission}{DESCRIPTION}
#' \item{LIncidence}{DESCRIPTION}
#' \item{Lat}{DESCRIPTION}
#' \item{Long}{DESCRIPTION}
#' \item{SunAz}{DESCRIPTION}
#' \item{CraftAz}{DESCRIPTION}
#' }
#' @source \url{http://wms.lroc.asu.edu/lroc}
"lunarData"
|
01618918a8f64a03812714066a9ea290d991a487
|
a447fd4b189cd1098b165c5c160445b0eec4ad11
|
/papers/Osprey/corr_Osprey.R
|
6b7af7caeefe2d2b87592a10707826010c6f1353
|
[
"MIT"
] |
permissive
|
shui5/SpecVis
|
963808de519675945593d2ebeeca9592f63c9a84
|
724f9b68b85948c2bffccb86422bed3aa895ad90
|
refs/heads/master
| 2023-06-16T10:59:53.591095
| 2021-05-08T19:38:37
| 2021-05-08T19:38:37
| 288,817,142
| 0
| 0
| null | 2020-08-19T19:16:02
| 2020-08-19T19:16:02
| null |
UTF-8
|
R
| false
| false
| 3,833
|
r
|
corr_Osprey.R
|
source('functions/dependencies.R')
source('functions/spvs_importResults.R')
source('functions/spvs_Correlation.R')
source('functions/spvs_AddStatsToDataframe.R')
source('functions/spvs_Boxplot.R')
source('functions/spvs_ConcatenateDataFrame.R')
source('functions/spvs_Statistics.R')
dfPhOsp <- spvs_importResults('/Volumes/Samsung_T5/working/ISMRM/Philips/derivativesLCM/QuantifyResults/off_tCr.csv')
dfSiOsp <- spvs_importResults('/Volumes/Samsung_T5/working/ISMRM/Siemens/derivativesLCM/QuantifyResults/off_tCr.csv')
dfGEOsp <- spvs_importResults('/Volumes/Samsung_T5/working/ISMRM/GE/derivativesLCM/QuantifyResults/off_tCr.csv')
dataPhLCM <- spvs_importResults('/Volumes/Samsung_T5/working/ISMRM/Philips/derivativesLCM/LCMBaseline/LCMoutput_015')
dfPhLCM <- dataPhLCM[[1]]
dataSiLCM <- spvs_importResults('/Volumes/Samsung_T5/working/ISMRM/Siemens/derivativesLCM/LCMBaseline/LCMoutput_015')
dfSiLCM <- dataSiLCM[[1]]
dataGELCM <- spvs_importResults('/Volumes/Samsung_T5/working/ISMRM/GE/derivativesLCM/LCMBaseline/LCMoutput_015')
dfGELCM <- dataGELCM[[1]]
dataPhTar <- spvs_importResults('/Volumes/Samsung_T5/working/ISMRM/Philips/derivativesLCM/TarquinBaseline/TarquinAnalysis_Basis_10ms')
dfPhTar <- dataPhTar[[1]]
dataSiTar <- spvs_importResults('/Volumes/Samsung_T5/working/ISMRM/Siemens/derivativesLCM/TarquinBaseline/TarquinAnalysis_Basis_10ms')
dfSiTar <- dataSiTar[[1]]
dataGETar <- spvs_importResults('/Volumes/Samsung_T5/working/ISMRM/GE/derivativesLCM/TarquinBaseline/TarquinAnalysis_Basis_10ms')
dfGETar <- dataGETar[[1]]
dfPhTar <- spvs_AddStatsToDataframe(dfPhTar,'/Volumes/Samsung_T5/working/ISMRM/Philips/stat.csv')
dfPhLCM <- spvs_AddStatsToDataframe(dfPhLCM,'/Volumes/Samsung_T5/working/ISMRM/Philips/stat.csv')
dfPhOsp <- spvs_AddStatsToDataframe(dfPhOsp,'/Volumes/Samsung_T5/working/ISMRM/Philips/stat.csv')
dfSiOsp <- spvs_AddStatsToDataframe(dfSiOsp,'/Volumes/Samsung_T5/working/ISMRM/Siemens/stat.csv')
dfSiLCM <- spvs_AddStatsToDataframe(dfSiLCM,'/Volumes/Samsung_T5/working/ISMRM/Siemens/stat.csv')
dfSiTar <- spvs_AddStatsToDataframe(dfSiTar,'/Volumes/Samsung_T5/working/ISMRM/Siemens/stat.csv')
dfGETar <- spvs_AddStatsToDataframe(dfGETar,'/Volumes/Samsung_T5/working/ISMRM/GE/stat.csv')
dfGELCM <- spvs_AddStatsToDataframe(dfGELCM,'/Volumes/Samsung_T5/working/ISMRM/GE/stat.csv')
dfGEOsp <- spvs_AddStatsToDataframe(dfGEOsp,'/Volumes/Samsung_T5/working/ISMRM/GE/stat.csv')
lowerLimit <- c(1.2,0.12,0.4,1.2)
upperLimit <- c(1.75,0.25,1,2.4)
p <- spvs_Correlation(list(dfGEOsp[c(32:43),c(1:33)],dfGELCM[c(32:43),c(1:34)])," / [tCr]",c("tNAA","tCho","Ins","Glx"),c('Osprey','LCModel'),c('',''),NULL,lowerLimit,upperLimit, 4)
p2 <- spvs_Correlation(list(dfGEOsp[c(32:43),c(1:33)],dfGETar[c(32:43),c(1:35)])," / [tCr]",c("tNAA","tCho","Ins","Glx"),c('Osprey','Tarquin'),c('',''),NULL,lowerLimit,upperLimit, 4,c(''))
p3 <- spvs_Correlation(list(dfGETar[c(32:43),c(1:35)],dfGELCM[c(32:43),c(1:34)])," / [tCr]",c("tNAA","tCho","Ins","Glx"),c('Tarquin','LCModel'),c('',''),NULL,lowerLimit,upperLimit, 4,c(''))
p4 <- grid.arrange(p, p2, p3, ncol=1, nrow =3)
g <- arrangeGrob(p, p2, p3, ncol=1) #generates g
ggsave(file="CorrelationRevision.pdf", p4, width = 10, height = 10,device=cairo_pdf) #saves g
dfPaper <- spvs_ConcatenateDataFrame(list(dfGELCM[c(32:43),c(1:34)],dfGEOsp[c(32:43),c(1:33)],dfGETar[c(32:43),c(1:35)]),c('LCModel','Osprey','Tarquin'))
p <- spvs_RainCloud(dfPaper, '/ [tCr]',list('tNAA','tCho','Ins','Glx'),c('Group'),c("Philips S03 KKI"),4)
ggsave(file="RaincloudKKI.pdf", p, width = 12, height = 3,device=cairo_pdf) #saves g
p <- spvs_Boxplot(dfPaper, '/ [tCr]',list('tNAA','tCho','Ins','Glx'),c('Group'),c("Philips S03 KKI"),4)
ggsave(file="BoxplotRevision.pdf", p, width = 12, height = 3,device=cairo_pdf) #saves g
Norm <- spvs_Statistics(dfPaper,list('tNAA','tCho','Ins','Glx'))
|
73dac174bed9e61df242f5984c0db01a5e328cdb
|
ed814820a52aa3d29816496eb7c305366d0abe47
|
/code/simulation/simulation_manuscript_run.R
|
3700eb899652cee8e08f6f9e3615eefd7d1df858
|
[] |
no_license
|
DongyueXie/deconference
|
c96221f9b659ca051e567c5560c6686196b84244
|
ae975014687859a7e38ad8fc2d6a11d7da725d5d
|
refs/heads/master
| 2022-03-01T23:05:43.328391
| 2022-02-23T17:28:36
| 2022-02-23T17:28:36
| 253,314,515
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,530
|
r
|
simulation_manuscript_run.R
|
#
# xin_raw <- readRDS("data/pancreas/xin_raw.rds")
# cell_types = c('alpha', 'beta', 'delta', 'gamma')
# K = length(cell_types)
# rm.indi = c("Non T2D 4","Non T2D 7","Non T2D 10","Non T2D 12")
# rm.indi.idx = which(xin_raw$individual%in%rm.indi)
#
# datax.xin = set_data_decon(Y = xin_raw[,-rm.indi.idx],cell_types = cell_types,
# gene_thresh = 0.05,max_count_quantile_celltype = 0.95,
# max_count_quantile_indi = 0.95,
# w=1)
# design.mat.xin = scRef_multi_proc(datax.xin$Y,datax.xin$cell_type_idx,
# datax.xin$indi_idx,estimator="separate",
# est_sigma2 = TRUE,meta_mode = 'local',smooth.sigma = F)
#
# ref = design.mat.xin$X
# sigma2 = design.mat.xin$Sigma
#
# ref = ref+1/nrow(ref)
# sigma2 = sigma2 + 1/nrow(ref)
#
# saveRDS(list(ref=ref,sigma2=sigma2),file='data/pancreas/xin_ref_sigma9496.rds')
########################################################
########### select marker genes, output rmse 12/09/2021 #############
library(gtools)
source('code/simulation/simulation_manuscript.R')
xin = readRDS('data/pancreas/xin_ref_sigma9496.rds')
ref = xin$ref
sigma2 = xin$sigma2
G = nrow(ref)
K = ncol(ref)
d = 500
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
cases = c("null","all_diff")
nbs = c(100)
dirichlet.scale = c(5,10)
# test
# temp = simu_study(ref[1:100,],sigma2[1:100,],c(0.5,0.3,0.1,0.1),c(0.5,0.3,0.1,0.1),
# n_bulk = 50,dirichlet.scale=5,
# R=A[1:100,1:100],printevery = 1,est_cor=FALSE,nreps = 5)
set.seed(12345)
for(case in cases){
if(case=='null'){
p1 = c(0.5,0.3,0.1,0.1)
p2 = c(0.5,0.3,0.1,0.1)
}else if(case=='all_diff'){
p1 = c(0.15,0.2,0.45,0.2)
p2 = c(0.1,0.1,0.3,0.5)
}
for(nb in nbs){
for(aa in dirichlet.scale){
print(paste('Running:',case,'nb=',nb,'aa=',aa))
simu_out = simu_study_marker(ref,sigma2,p1,p2,n_bulk = nb,dirichlet.scale=aa,
R=A,printevery = 1)
saveRDS(simu_out,file = paste('output/manuscript/simulation/simulation_',nb,'bulk_500genecor_marker_meaerr_phat_',case,'_dirichlet',aa,'no_pd.rds',sep=''))
}
}
}
########################################################
########### use neuron data for simulation #############
source('code/simulation/simulation_manuscript.R')
indis_ref = readRDS('data/neuron/indis_ref_12400by6by97.rds')
ref = apply(indis_ref,c(1,2),mean,na.rm=TRUE)
sigma2 = apply(indis_ref,c(1,2),var,na.rm=TRUE)
G = nrow(ref)
K = ncol(ref)
d = 500
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
alpha.cors = c(0)
cases = c("null")
nbs = c(4)
dirichlet.scale = c(5)
# test
# temp = simu_study(ref[1:100,],sigma2[1:100,],c(0.5,0.3,0.1,0.1),c(0.5,0.3,0.1,0.1),
# n_bulk = 50,dirichlet.scale=5,
# R=A[1:100,1:100],printevery = 1,est_cor=FALSE,nreps = 5)
set.seed(12345)
for(case in cases){
if(case=='null'){
#p1 = c(0.3,0.2,0.15,0.15,0.1,0.1)
#p2 = c(0.3,0.2,0.15,0.15,0.1,0.1)
p1 = c(0.15,0.15,0.1,0.1,0.2,0.3)
p2 = c(0.15,0.15,0.1,0.1,0.2,0.3)
}else if(case=='all_diff'){
p1 = c(0.15,0.15,0.1,0.1,0.2,0.3)
p2 = c(0.1,0.1,0.2,0.3,0.15,0.15)
}
for(nb in nbs){
for(aa in dirichlet.scale){
for(alpha.cor in alpha.cors){
if(alpha.cor==0){
est_cor = FALSE
cor.status = 'trueR'
}else{
est_cor=TRUE
cor.status = paste('cor0',alpha.cor*10,sep = '')
}
print(paste('Running:',case,'nb=',nb,'cor:',cor.status,'aa=',aa))
simu_out2 = simu_study(ref,sigma2,p1,p2,n_bulk = nb,dirichlet.scale=aa,nreps = 100,
R=A,printevery = 1,alpha.cor = alpha.cor,est_cor=est_cor)
#saveRDS(simu_out,file = paste('output/manuscript/simulation/simulation_',nb,'bulk_500genecor_',cor.status,'_',case,'_dirichlet',aa,'no_pd.rds',sep=''))
}
}
}
}
########10/20evening/2021#####################
# do not use real reference and sigma2
source('code/simulation/simulation_manuscript.R')
G = 500
K = 4
set.seed(12345)
ref = matrix(rnorm(G*K),nrow=G)
ref = abs(ref)
ref = apply(ref, 2, function(z){z/sum(z)})*G
sigma2 = ref/2
d = 25
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
alpha.cors = c(0,0.1,0.5)
cases = c("null","all_diff")
nbs = c(100)
dirichlet.scale = c(10,5)
set.seed(12345)
for(case in cases){
if(case=='null'){
p1 = c(0.5,0.3,0.1,0.1)
p2 = c(0.5,0.3,0.1,0.1)
}else if(case=='all_diff'){
p1 = c(0.15,0.2,0.45,0.2)
p2 = c(0.1,0.1,0.3,0.5)
}
for(nb in nbs){
for(aa in dirichlet.scale){
for(alpha.cor in alpha.cors){
if(alpha.cor==0){
est_cor = FALSE
cor.status = 'trueR'
}else{
est_cor=TRUE
cor.status = paste('cor0',alpha.cor*10,sep = '')
}
print(paste('Running:',case,'nb=',nb,'cor:',cor.status,'aa=',aa))
simu_out = simu_study(ref,sigma2,p1,p2,n_bulk = nb,dirichlet.scale=aa,
R=A,printevery = 1,alpha.cor = alpha.cor,est_cor=est_cor,nreps = 100)
saveRDS(simu_out,file = paste('output/manuscript/simulation/test/',nb,'bulk_500genecor_',cor.status,'_',case,'_dirichlet',aa,'no_pd.rds',sep=''))
}
}
}
}
#######################################
########10/20/2021#####################
# random p, do not make.pos
library(gtools)
source('code/simulation/simulation_manuscript.R')
xin = readRDS('data/pancreas/xin_ref_sigma9496.rds')
ref = xin$ref
sigma2 = xin$sigma2
G = nrow(ref)
K = ncol(ref)
d = 500
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
alpha.cors = c(0,0.1,0.5,0.8)
cases = c("null","all_diff")
nbs = c(100)
dirichlet.scale = c(5,10,100)
# test
# temp = simu_study(ref[1:100,],sigma2[1:100,],c(0.5,0.3,0.1,0.1),c(0.5,0.3,0.1,0.1),
# n_bulk = 50,dirichlet.scale=5,
# R=A[1:100,1:100],printevery = 1,est_cor=FALSE,nreps = 5)
set.seed(12345)
for(case in cases){
if(case=='null'){
p1 = c(0.5,0.3,0.1,0.1)
p2 = c(0.5,0.3,0.1,0.1)
}else if(case=='all_diff'){
p1 = c(0.15,0.2,0.45,0.2)
p2 = c(0.1,0.1,0.3,0.5)
}
for(nb in nbs){
for(aa in dirichlet.scale){
for(alpha.cor in alpha.cors){
if(alpha.cor==0){
est_cor = FALSE
cor.status = 'trueR'
}else{
est_cor=TRUE
cor.status = paste('cor0',alpha.cor*10,sep = '')
}
print(paste('Running:',case,'nb=',nb,'cor:',cor.status,'aa=',aa))
simu_out = simu_study(ref,sigma2,p1,p2,n_bulk = nb,dirichlet.scale=aa,
R=A,printevery = 1,alpha.cor = alpha.cor,est_cor=est_cor)
saveRDS(simu_out,file = paste('output/manuscript/simulation/simulation_',nb,'bulk_500genecor_',cor.status,'_',case,'_dirichlet',aa,'no_pd.rds',sep=''))
}
}
}
}
#######################################
########10/19/2021#####################
# try to fix p while keep all other things unchanged
source('code/simulation/simulation_manuscript.R')
xin = readRDS('data/pancreas/xin_ref_sigma9496.rds')
ref = xin$ref
sigma2 = xin$sigma2
G = nrow(ref)
K = 4
d = 500
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
alpha.cors = c(0,0.1,0.5)
cases = c("null","all_diff")
nbs = c(100)
# test
# temp = simu_study(ref[1:100,],sigma2[1:100,],c(0.5,0.3,0.1,0.1),c(0.5,0.3,0.1,0.1),
# n_bulk = 50,dirichlet.scale=5,
# R=A[1:100,1:100],printevery = 1,est_cor=FALSE,nreps = 5)
set.seed(12345)
for(case in cases){
if(case=='null'){
p1 = c(0.5,0.3,0.1,0.1)
p2 = c(0.5,0.3,0.1,0.1)
}else if(case=='all_diff'){
p1 = c(0.15,0.2,0.45,0.2)
p2 = c(0.1,0.1,0.3,0.5)
}
for(nb in nbs){
for(alpha.cor in alpha.cors){
if(alpha.cor==0){
est_cor = FALSE
cor.status = 'trueR'
}else{
est_cor=TRUE
cor.status = paste('cor0',alpha.cor*10,sep = '')
}
print(paste('Running:',case,'nb=',nb,'cor:',cor.status))
simu_out = simu_study(ref,sigma2,p1,p2,n_bulk = nb,dirichlet=FALSE,
R=A,printevery = 1,alpha.cor = alpha.cor,est_cor=est_cor)
saveRDS(simu_out,file = paste('output/manuscript/simulation/fixp/simulation_',nb,'bulk_500genecor_',cor.status,'_',case,'_fixp','.rds',sep=''))
}
}
}
##################################
########10/12/2021#####################
# do not use real reference and sigma2
G = 500
set.seed(12345)
ref = matrix(rnorm(G*K),nrow=G)
ref = abs(ref)
ref = apply(ref, 2, function(z){z/sum(z)})*G
sigma2 = ref/2
K = 4
d = 25
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
alpha.cors = c(0)
cases = c("null","all_diff")
nbs = c(50,100)
dirichlet.scale = c(5,10)
set.seed(12345)
for(case in cases){
if(case=='null'){
p1 = c(0.5,0.3,0.1,0.1)
p2 = c(0.5,0.3,0.1,0.1)
}else if(case=='all_diff'){
p1 = c(0.15,0.2,0.45,0.2)
p2 = c(0.1,0.1,0.3,0.5)
}
for(nb in nbs){
for(aa in dirichlet.scale){
for(alpha.cor in alpha.cors){
if(alpha.cor==0){
est_cor = FALSE
cor.status = 'trueR'
}else{
est_cor=TRUE
cor.status = paste('cor0',alpha.cor*10,sep = '')
}
print(paste('Running:',case,'nb=',nb,'cor:',cor.status,'aa=',aa))
simu_out = simu_study(ref,sigma2,p1,p2,n_bulk = nb,dirichlet.scale=aa,
R=A,printevery = 1,alpha.cor = alpha.cor,est_cor=est_cor,nreps = 100)
saveRDS(simu_out,file = paste('output/manuscript/simulation/test/',nb,'bulk_500genecor_',cor.status,'_',case,'_dirichlet',aa,'.rds',sep=''))
}
}
}
}
#######################################
########10/06/2021#####################
library(gtools)
source('code/simulation/simulation_manuscript.R')
xin = readRDS('data/pancreas/xin_ref_sigma9496.rds')
ref = xin$ref
sigma2 = xin$sigma2
G = nrow(ref)
K = 4
d = 500
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
alpha.cors = c(0,0.1,0.5)
cases = c("null","all_diff")
nbs = c(10,50,100)
dirichlet.scale = c(5,10)
# test
# temp = simu_study(ref[1:100,],sigma2[1:100,],c(0.5,0.3,0.1,0.1),c(0.5,0.3,0.1,0.1),
# n_bulk = 50,dirichlet.scale=5,
# R=A[1:100,1:100],printevery = 1,est_cor=FALSE,nreps = 5)
set.seed(12345)
for(case in cases){
if(case=='null'){
p1 = c(0.5,0.3,0.1,0.1)
p2 = c(0.5,0.3,0.1,0.1)
}else if(case=='all_diff'){
p1 = c(0.15,0.2,0.45,0.2)
p2 = c(0.1,0.1,0.3,0.5)
}
for(nb in nbs){
for(aa in dirichlet.scale){
for(alpha.cor in alpha.cors){
if(alpha.cor==0){
est_cor = FALSE
cor.status = 'trueR'
}else{
est_cor=TRUE
cor.status = paste('cor0',alpha.cor*10,sep = '')
}
print(paste('Running:',case,'nb=',nb,'cor:',cor.status,'aa=',aa))
simu_out = simu_study(ref,sigma2,p1,p2,n_bulk = nb,dirichlet.scale=aa,
R=A,printevery = 1,alpha.cor = alpha.cor,est_cor=est_cor)
saveRDS(simu_out,file = paste('output/manuscript/simulation/simulation_',nb,'bulk_500genecor_',cor.status,'_',case,'_dirichlet',aa,'.rds',sep=''))
}
}
}
}
#######################################
########08/18/2021###################
source('code/simulation/simulation_manuscript.R')
xin = readRDS('data/pancreas/xin_ref_sigma9496.rds')
ref = xin$ref
sigma2 = xin$sigma2
b1 = c(0.1,0.1,0.3,0.5)
b2 = c(0.1,0.2,0.5,0.2)
nb = 10
b = cbind(b1%*%t(rep(1,nb/2)),b2%*%t(rep(1,nb/2)))
G = nrow(ref)
K = 4
d = 500
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
set.seed(12345)
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1)
saveRDS(simu_out,file = 'output/manuscript/simulation_10bulk_500genecor_fdr05.rds')
##############09/28/2021###############
b1 = c(0.5,0.3,0.1,0.1)
b2 = c(0.5,0.3,0.1,0.1)
nb = 50
b = cbind(b1%*%t(rep(1,nb/2)),b2%*%t(rep(1,nb/2)))
set.seed(12345)
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1)
saveRDS(simu_out,file = 'output/manuscript/simulation_50bulk_500genecor_fdr05_null.rds')
b1 = c(0.5,0.3,0.1,0.1)
b2 = c(0.1,0.1,0.3,0.5)
nb = 30
b = cbind(b1%*%t(rep(1,nb/2)),b2%*%t(rep(1,nb/2)))
set.seed(12345)
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1)
saveRDS(simu_out,file = 'output/manuscript/simulation_50bulk_500genecor_fdr05_all_diff.rds')
b1 = c(0.1,0.1,0.3,0.5)
b2 = c(0.1,0.15,0.4,0.35)
nb = 30
b = cbind(b1%*%t(rep(1,nb/2)),b2%*%t(rep(1,nb/2)))
set.seed(12345)
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1)
saveRDS(simu_out,file = 'output/manuscript/simulation_50bulk_500genecor_fdr05_one_diff.rds')
#######################################
##############10/05/2021###############
library(gtools)
source('code/simulation/simulation_manuscript.R')
xin = readRDS('data/pancreas/xin_ref_sigma9496.rds')
ref = xin$ref
sigma2 = xin$sigma2
G = nrow(ref)
K = 4
d = 500
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
alpha.cors = 0.5
for(nb in c(50,100)){
set.seed(12345)
b1 = t(rdirichlet(nb/2,p1*10))
b2 = t(rdirichlet(nb/2,p2*10))
b = cbind(b1,b2)
set.seed(12345)
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1,alpha.cor = alpha.cor)
saveRDS(simu_out,file = paste('output/manuscript/simulation/simulation_',nb,'bulk_500genecor_fdr0',alpha.cor*10,'_null_dirichlet.rds',sep=''))
}
## all diff
p1 = c(0.5,0.3,0.1,0.1)
p2 = c(0.1,0.1,0.3,0.5)
nb = 100
# generate group proportions using dirichlet distribution
set.seed(12345)
b1 = t(rdirichlet(nb/2,p1*10))
b2 = t(rdirichlet(nb/2,p2*10))
b = cbind(b1,b2)
set.seed(12345)
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1,alpha.cor = alpha.cor)
saveRDS(simu_out,file = paste('output/manuscript/simulation/simulation_',nb,'bulk_500genecor_fdr0',alpha.cor*10,'_all_diff_dirichlet.rds',sep=''))
nb = 50
# generate group proportions using dirichlet distribution
set.seed(12345)
b1 = t(rdirichlet(nb/2,p1*10))
b2 = t(rdirichlet(nb/2,p2*10))
b = cbind(b1,b2)
set.seed(12345)
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1,alpha.cor = alpha.cor)
saveRDS(simu_out,file = paste('output/manuscript/simulation/simulation_',nb,'bulk_500genecor_fdr0',alpha.cor*10,'_all_diff_dirichlet.rds',sep=''))
## one diff
p1 = c(0.1,0.1,0.3,0.5)
p2 = c(0.1,0.15,0.4,0.35)
nb = 100
# generate group proportions using dirichlet distribution
set.seed(12345)
b1 = t(rdirichlet(nb/2,p1*10))
b2 = t(rdirichlet(nb/2,p2*10))
b = cbind(b1,b2)
set.seed(12345)
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1,alpha.cor = alpha.cor)
saveRDS(simu_out,file = paste('output/manuscript/simulation/simulation_',nb,'bulk_500genecor_fdr0',alpha.cor*10,'_one_diff_dirichlet.rds',sep=''))
nb = 50
# generate group proportions using dirichlet distribution
set.seed(12345)
b1 = t(rdirichlet(nb/2,p1*10))
b2 = t(rdirichlet(nb/2,p2*10))
b = cbind(b1,b2)
set.seed(12345)
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1,alpha.cor = alpha.cor)
saveRDS(simu_out,file = paste('output/manuscript/simulation/simulation_',nb,'bulk_500genecor_fdr0',alpha.cor*10,'_one_diff_dirichlet.rds',sep=''))
#######################################
##############10/06/2021###############
library(gtools)
source('code/simulation/simulation_manuscript.R')
xin = readRDS('data/pancreas/xin_ref_sigma9496.rds')
ref = xin$ref
sigma2 = xin$sigma2
G = nrow(ref)
K = 4
d = 500
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
alpha.cors = c(0,0.5)
cases = c("null","all_diff")
nbs = c(50,100)
dirichlet.scale = c(5,10)
set.seed(12345)
for(case in cases){
if(case=='null'){
p1 = c(0.5,0.3,0.1,0.1)
p2 = c(0.5,0.3,0.1,0.1)
}else if(case=='all_diff'){
p1 = c(0.4,0.3,0.2,0.1)
p2 = c(0.1,0.1,0.3,0.5)
}
for(nb in nbs){
for(aa in dirichlet.scale){
b1 = t(rdirichlet(nb/2,p1*aa))
b2 = t(rdirichlet(nb/2,p2*aa))
b = cbind(b1,b2)
for(alpha.cor in alpha.cors){
if(alpha.cor==0){
est_cor = FALSE
cor.status = 'trueR'
}else{
est_cor=TRUE
cor.status = paste('cor0',alpha.cor*10,sep = '')
}
print(paste('Running:',case,'nb=',nb,'cor:',cor.status,'aa=',aa))
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1,alpha.cor = alpha.cor,est_cor=est_cor)
saveRDS(simu_out,file = paste('output/manuscript/simulation/simulation_',nb,'bulk_500genecor_',cor.status,'_',case,'_dirichlet',aa,'.rds',sep=''))
}
}
}
}
#######################################
d = 300
A = matrix(0,nrow=G,ncol=G)
for(i in 1:G){
for(j in i:min(i+d,G)){
A[i,j] = max(1-abs(i-j)/d,0)
}
}
A = A+t(A) - diag(G)
library(Matrix)
A = Matrix(A,sparse = TRUE)
set.seed(12345)
simu_out = simu_study(ref,b,R=A,sigma2,printevery = 1)
saveRDS(simu_out,file = 'output/manuscript/simulation/simulation_10bulk_300genecor_fdr05.rds')
set.seed(12345)
simu_out = simu_study(ref,b,R=NULL,sigma2,printevery = 1)
saveRDS(simu_out,file = 'output/manuscript/simulation/simulation_10bulk_0genecor_fdr05.rds')
# for(t in 1:7){
# temp = simu_out[[t]]
# temp_out = array(dim=c(4,10,100))
# for(r in 1:100){
# temp_out[,,r] = matrix(c(temp[,,r]),nrow=4)
# }
# simu_out[[t]] = temp_out
# }
# saveRDS(simu_out,file = 'output/manuscript/simulation1.rds')
#
# diff_hat_se = matrix(nrow=100,ncol=4)
# diff_hat_se_cor = matrix(nrow=100,ncol=4)
# diff_hat_weight_se_cor = matrix(nrow=100,ncol=4)
# for(i in 1:100){
# diff_hat_se[i,] = two_group_test(simu_out$all_fit[[i]]$fit.err,c(1,1,1,1,1,2,2,2,2,2))$diff_se
# diff_hat_se_cor[i,] = two_group_test(simu_out$all_fit[[i]]$fit.err.cor,c(1,1,1,1,1,2,2,2,2,2))$diff_se
# diff_hat_weight_se_cor[i,] = two_group_test(simu_out$all_fit[[i]]$fit.err.cor.weight,c(1,1,1,1,1,2,2,2,2,2))$diff_se
# }
# simu_out$diff_hat_se = diff_hat_se
# simu_out$diff_hat_se_cor = diff_hat_se_cor
# simu_out$diff_hat_weight_se_cor = diff_hat_weight_se_cor
## Look at rmse
rmse = function(x,y){sqrt(mean((x-y)^2))}
get_rmse = function(p_hat,b){
K = dim(p_hat)[1]
nb = dim(p_hat)[2]
nreps = dim(p_hat)[3]
rmses = c()
for(i in 1:nb){
err = c()
for(j in 1:nreps){
err[j] = sum((p_hat[,i,j]-b[,i])^2)
}
rmses[i] = sqrt(mean(err))
}
names(rmses) = paste('bulk',1:nb)
rmses
}
get_rmse(simu_out$p_hat_ols,b)
get_rmse(simu_out$p_hat,b)
get_rmse(simu_out$p_hat_weight,b)
# Look at coverage
get_coverage_p = function(p_hat,p_hat_se,b){
K = dim(z)[1]
nb = dim(z)[2]
z = array(dim = dim(p_hat))
for(i in 1:dim(z)[3]){
z[,,i] = (p_hat[,,i]-b)/p_hat_se[,,i]
}
crg = apply(z,c(1,2),function(z){round(mean(abs(z)<1.96,na.rm=T),3)})
rownames(crg) = paste('cell',1:K)
colnames(crg) = paste('bulk',1:nb)
crg
}
get_coverage_p(simu_out$p_hat_ols,simu_out$p_hat_ols_se,b)
get_coverage_p(simu_out$p_hat,simu_out$p_hat_se,b)
get_coverage_p(simu_out$p_hat,simu_out$p_hat_se_cor,b)
get_coverage_p(simu_out$p_hat_weight,simu_out$p_hat_weight_se_cor,b)
get_power_diff = function(diff_hat,diff_hat_se){
colMeans(abs(diff_hat/diff_hat_se)>1.96,na.rm=TRUE)
}
get_power_diff(simu_out$diff_hat_ols,simu_out$diff_hat_ols_se)
get_power_diff(simu_out$diff_hat,simu_out$diff_hat_se)
get_power_diff(simu_out$diff_hat,simu_out$diff_hat_se_cor)
get_power_diff(simu_out$diff_hat_weight,simu_out$diff_hat_weight_se_cor)
|
3fbba3da181fefaa56eb158d5d260a4246a16193
|
810a8f9cb3439bacc4885803dbc1e8251f50a51b
|
/functions.R
|
cb3abdee4fc75116f0da770b1937b36b5b62fc3f
|
[] |
no_license
|
janfait/nmf
|
cb5e9adaf0b9f69ed741ba1b42fa85cb598c89cf
|
2be4b0b0df1858e70ff31cb4d8fefa259d406943
|
refs/heads/master
| 2020-05-04T20:41:22.040034
| 2014-11-24T11:02:08
| 2014-11-24T11:02:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,510
|
r
|
functions.R
|
####################################################################################################
# FAILURE DETECTION
####################################################################################################
# function that returns TRUE if your attempted operation results in an error
# @x whichever object you pick that is a result of try(something(x))
fail <- function(x) inherits(x, "try-error")
####################################################################################################
# READ IN SPSS
####################################################################################################
# read in an SPSS file and return a list that has data, variable and value labels nicely prepared for analysis
# @filename = directory+name of the file you want to read in such as "Z:/kvoty/data_01.sav"
read_in_spss <- function(filename=NULL){
if(is.null(filename)){
stop("Select a path to the file!")
return(NULL)
}else{
if(file.exists(filename)){
#get list object with foreign::read.spss
data_object <- read.spss(paste(filename,sep=""), use.value.labels = T, use.missings = T)
data<-as.data.frame(data_object)
#get data
#get variable labels
varlabels <- attr(data_object,"variable.labels")
#get value labels
vallabels <- attr(data_object,"label.table")
#data_list that returns everything at once
datalist <-list(data_object=data_object,data=data,varlabels=varlabels,vallabels=vallabels)
return(datalist)
}else{
stop("File not found or not able to load")
return(NULL)
}
}
}
####################################################################################################
# Wrapper
####################################################################################################
# Odřádkuje dlouhý string
# @ width vhodný parametr po kolika znacích zalomit
wrapper <- function(x, ...)
{
paste(strwrap(x, ...), collapse = "\n")
}
####################################################################################################
# Plot SM
####################################################################################################
# Grafovaci funkce
# Parametry
# @ Xdata
# @ Typ - urcujici typ grafu 1-pruhovy, 2-pruhovy trideny, 3 kolac atd..
# @ Otazka - zobrazovana promenna zadava se bud jednoduse "sex" nebo "c("sex","edu")
# @ Trideni - tridici promenna
# @ Labels - zobrazuj popisky dat , default = TRUE
# @ Lablim - nezobrazuj popisky mensi nez , default = -Inf (zobrazuje vse)
# @ Decim - ukaz popisky zaokrouhlenne na pocet desetin , default = 0
# @ Barva - barevná skla
plot_sm<- function(xdata=dataf, typ, otazka,trideni,trideni2,id, labels = T, lablim = -Inf, decim = 0,barva = 0,varlabels){
if(!is.null(xdata)){
if(nrow(xdata)>0){
xdata$otaz<-xdata[,c(otazka)]
paleta = "Paired"
if (barva ==0) {paleta = "Set3"}
if (barva ==1) {paleta = "RdBu"}
if (barva ==2) {paleta = "Pastel1"}
if (barva ==3) {paleta = "Accent"}
if (typ ==1){
grafdata <- aggregate(xdata$otaz ,by = list(xdata$otaz),FUN = length)
colnames(grafdata)<-c("Var","Freq")
grafdata$Freq <- grafdata$Freq * 100 / sum(grafdata$Freq)
grafdata$Freqpos <- grafdata$Freq / 2
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
grafdata$Lab[grafdata$Freq<lablim] <- ""
grafdata$Var2 <- sapply(grafdata$Var, function(x) wrapper(toString(x), width = 17))
p <- ggplot(grafdata,aes(x=Var2), fill=Var2) + geom_bar(aes(y=Freq, fill = Var2), stat = "identity" )
p <- p + theme(legend.position= "none") + xlab("")+ylab("")
p <- p + theme(axis.text = element_text(size = rel(1.5)))
p <- p + theme(panel.background = element_blank())
p <- p + scale_fill_brewer( type = "div" , palette = paleta )
if (labels==T) {p <- p + geom_text(aes(y=Freqpos, label=Lab)) }
}
else if (typ ==2){
xdata$trid<-xdata[,c(trideni)]
grafdata <- aggregate(xdata$otaz ,by = list(xdata$trid, xdata$otaz),FUN = length)
datasum <- aggregate(xdata$otaz,by = list(xdata$trid),FUN=length)
grafdata <- merge(grafdata,datasum, by="Group.1")
colnames(grafdata) <- c("Cro","Var","Freq","Sum")
grafdata <- grafdata[order(grafdata$Cro,grafdata$Var),]
grafdata$Freq <- grafdata$Freq *100/ grafdata$Sum
grafdata$Freqpos <- grafdata$Freq / 2
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
grafdata$Lab[grafdata$Freq<lablim] <- ""
p <- ggplot(grafdata, aes(Var, fill=Cro)) + geom_bar(aes(y = Freq),stat= "identity") + facet_grid(. ~ Cro) + coord_flip()
p <- p + theme(legend.position= "none")+ xlab("")+ylab("")
p <- p + theme(strip.text = element_text(size = rel(1.5)),axis.text = element_text(size = rel(1.5)))
p <- p + theme(panel.background = element_blank())
p <- p + scale_fill_brewer( type = "div" , palette = paleta )
if (labels==T) {p <- p + geom_text(aes(y=Freqpos, label=Lab)) }
}
else if (typ ==3){
grafdata <- aggregate(xdata$otaz ,by = list(xdata$otaz),FUN = length)
colnames(grafdata)<-c("Var","Freq")
grafdata$Freq <- grafdata$Freq *100/ sum(grafdata$Freq )
grafdata$Freqpos <- cumsum(grafdata$Freq) - 0.5 * grafdata$Freq
grafdata$Lab <- round(grafdata$Freq, digits=decim)
grafdata$Lab[grafdata$Freq<lablim] <- ""
p <- ggplot(grafdata, aes(x =factor(1),y = Freq, fill = Var) ) + geom_bar(stat="identity",width=1) + coord_polar(theta = "y") + xlab("")+ ylab("")
p <- p + theme(axis.text = element_blank(), axis.ticks = element_blank(), panel.grid = element_blank(),legend.title = element_blank())
p <- p + theme(legend.text = element_text(size = rel(1.5)))
p <- p + theme(panel.background = element_blank())
p <- p + scale_fill_brewer( type = "div" , palette = paleta )
if (labels==T) {p <- p + geom_text(aes(y=Freqpos,label=Lab) ) }
}
else if (typ ==4){
xdata$trid<-xdata[,c(trideni)]
grafdata <- aggregate(xdata$otaz ,by = list(xdata$trid, xdata$otaz),FUN = length)
datasum <- aggregate(grafdata$x,by = list(grafdata$Group.1),FUN=sum)
grafdata <- merge(grafdata,datasum, by.x=c("Group.1"), by.y=c("Group.1"))
colnames(grafdata) <- c("Cro","Var","Freq","Sum")
grafdata <- grafdata[order(grafdata$Cro,grafdata$Var),]
grafdata$Freq <- grafdata$Freq *100/ grafdata$Sum
grafdata$Freqpos <- (cumsum(grafdata$Freq) - 0.5 * grafdata$Freq) %% 100
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
grafdata$Lab[grafdata$Freq<lablim] <- ""
p <- ggplot(grafdata, aes(x = factor(Cro), fill=factor(Var))) + geom_bar(aes(y = Freq),stat= "identity") + coord_flip()
p <- p + theme(axis.text = element_text(size = rel(1.5)))
p <- p + theme(legend.title = element_blank()) + xlab("")+ylab("")
p <- p + theme(legend.text = element_text(size = rel(1.5)))
p <- p + theme(panel.background = element_blank())
p <- p + scale_fill_brewer( type = "div" , palette = paleta )
if (labels==T) {p <- p + geom_text(aes(y=Freqpos,label=Lab) ) }
}
else if (typ ==5){
datamin <- xdata[,c(id,otazka)]
datamelt <- melt(datamin,id=1)
grafdata <- aggregate(datamelt[,1] , by =list(datamelt$variable,datamelt$value), FUN = length)
datasum <- aggregate(grafdata$x, by= list(grafdata$Group.1),FUN = sum)
grafdata <- merge(grafdata,datasum, by="Group.1")
colnames(grafdata) <- c("Cro","Var","Freq","Sum")
grafdata <- grafdata[order(grafdata$Cro,grafdata$Var),]
grafdata$cs <- cumsum(grafdata$Freq)
grafdata$Freq <- grafdata$Freq *100/ grafdata$Sum
grafdata$Freqpos <- (cumsum(grafdata$Freq) - 0.5 * grafdata$Freq) %%100
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
grafdata$Lab[grafdata$Freq<lablim] <- ""
grafdata$Labs <- sapply(grafdata$Cro, function(x) wrapper(varlabels[toString(x)], width = 40))
grafdata$Labs2 <- factor(grafdata$Labs, as.character(unique(grafdata$Labs)))
grafdata$Labs2 <- factor(grafdata$Labs2, levels = rev(levels(grafdata$Labs2)))
p <- ggplot(grafdata, aes(x = Labs2, fill=Var)) + geom_bar(aes(y = Freq),stat= "identity") + coord_flip()
p <- p + theme(legend.title = element_blank()) + xlab("")+ylab("")
p <- p + theme(axis.text = element_text(size = rel(1.5)))
p <- p + theme(panel.background = element_blank())
p <- p + scale_fill_brewer( type = "div" , palette = paleta )
p <- p + theme(legend.text = element_text(size = rel(1.5)))
if (labels==T) {p <- p + geom_text(aes(y=Freqpos,label=Lab) ) }
}
else if (typ ==6){
xdata$trid<-xdata[,c(trideni)]
grafdata <- aggregate(xdata$otaz[!is.na(xdata$otaz)] ,by = list(xdata$trid[!is.na(xdata$otaz)]),FUN = mean)
colnames(grafdata)<-c("Cro","Freq")
grafdata$Freq
grafdata$Freqpos <- grafdata$Freq / 2
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
grafdata$Lab[grafdata$Freq<lablim] <- ""
p <- ggplot(grafdata,aes(x=Cro), fill="grey") + geom_bar(aes(y=Freq, fill = "grey"), stat = "identity" ) + coord_flip()
p <- p + theme(legend.position= "none") + xlab("")+ylab("")
p <- p + theme(axis.text = element_text(size = rel(1.5)))
p <- p + theme(panel.background = element_blank())
#p <- p + scale_fill_brewer( type = "div" , palette = paleta )
if (labels==T) {p <- p + geom_text(aes(y=Freqpos, label=Lab)) }
}
else if (typ ==42){
xdata$trid<-xdata[,c(trideni)]
xdata$trid2<-xdata[,c(trideni2)]
grafdata <- aggregate(xdata$otaz ,by = list(xdata$trid,xdata$trid2, xdata$otaz),FUN = length)
datasum <- aggregate(grafdata$x,by = list(grafdata$Group.1,grafdata$Group.2),FUN=sum)
grafdata <- merge(grafdata,datasum, by.x=c("Group.1","Group.2"), by.y=c("Group.1", "Group.2"))
colnames(grafdata) <- c("Cro","Cro2","Var","Freq","Sum")
grafdata <- grafdata[order(grafdata$Cro,grafdata$Cro2,grafdata$Var),]
grafdata$Freq <- grafdata$Freq *100/ grafdata$Sum
grafdata$Freqpos <- (cumsum(grafdata$Freq) - 0.5 * grafdata$Freq) %% 100
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
grafdata$Lab[grafdata$Freq<lablim] <- ""
p <- ggplot(grafdata, aes(x = factor(Cro), fill=factor(Var))) + geom_bar(aes(y = Freq),stat= "identity") + coord_flip()
p <- p + facet_grid(Cro2 ~ .)
p <- p + theme(axis.text = element_text(size = rel(1)))
p <- p + theme(legend.title = element_blank()) + xlab("")+ylab("")
p <- p + theme(legend.text = element_text(size = rel(1.25)))
p <- p + theme(panel.background = element_blank())
p <- p + theme(strip.text = element_text(size = rel(1.25)))
p <- p + scale_fill_brewer( type = "div" , palette = paleta )
if (labels==T) {p <- p + geom_text(aes(y=Freqpos,label=Lab) ) }
}
else if (typ ==52){
datamin <- xdata[,c(id,otazka, trideni)]
datamelt <- melt(datamin,id=c(id,trideni))
datamelt$val2 <- as.numeric(datamelt$value)
datamelt <- datamelt[!is.na(datamelt$val2),]
grafdata <- aggregate(datamelt$val2 , by =list(datamelt$variable,datamelt[,2]), FUN = mean)
#datasum <- aggregate(grafdata$x, by= list(grafdata$Group.1),FUN = sum)
#grafdata <- merge(grafdata,datasum, by="Group.1")
colnames(grafdata) <- c("Var","Cro","Freq")
grafdata <- grafdata[order(grafdata$Cro,grafdata$Var),]
#grafdata$cs <- cumsum(grafdata$Freq)
#grafdata$Freq <- grafdata$Freq *100/ grafdata$Sum
#grafdata$Freqpos <- (cumsum(grafdata$Freq) - 0.5 * grafdata$Freq) %%100
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
grafdata$Lab[grafdata$Freq<lablim] <- ""
grafdata$Labs <- sapply(grafdata$Var, function(x) wrapper(varlabels[toString(x)], width = 40))
grafdata$Labs2 <- factor(grafdata$Labs, as.character(unique(grafdata$Labs)))
grafdata$Labs2 <- factor(grafdata$Labs2, levels = rev(levels(grafdata$Labs2)))
p <- ggplot(grafdata, aes(x = Labs2,y = Freq, group=Cro)) + coord_flip()
p <- p + geom_point(aes(colour= factor(Cro), size = 25 ),stat= "identity") + geom_path(aes(colour=factor(Cro)))
p <- p + guides(size=FALSE)
p <- p + theme(legend.title = element_blank()) + xlab("")+ylab("")
p <- p + theme(axis.text = element_text(size = rel(1.5)))
p <- p + theme(panel.background = element_blank())
p <- p + scale_fill_brewer( type = "div" , palette = paleta )
p <- p + theme(legend.text = element_text(size = rel(1.5)))
#if (labels==T) {p <- p + geom_text(aes(y=Freq,label=Lab) ) }
}
else if (typ ==62){
xdata$trid<-xdata[,c(trideni)]
xdata$trid2<-xdata[,c(trideni2)]
grafdata <- aggregate(xdata$otaz[!is.na(xdata$otaz)] ,by = list(xdata$trid[!is.na(xdata$otaz)],xdata$trid2[!is.na(xdata$otaz)]),FUN = mean)
colnames(grafdata)<-c("Cro","Cro2","Freq")
grafdata$Freq
grafdata$Freqpos <- grafdata$Freq / 2
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
grafdata$Lab[grafdata$Freq<lablim] <- ""
p <- ggplot(grafdata,aes(x=Cro), fill="grey") + geom_bar(aes(y=Freq, fill = "grey"), stat = "identity" ) + coord_flip()
p <- p + facet_grid(Cro2 ~ .)
p <- p + theme(legend.position= "none") + xlab("")+ylab("")
p <- p + theme(axis.text = element_text(size = rel(1.5)))
p <- p + theme(panel.background = element_blank())
#p <- p + scale_fill_brewer( type = "div" , palette = paleta )
if (labels==T) {p <- p + geom_text(aes(y=Freqpos, label=Lab)) }
}
} else { p <- NULL }
} else { p <- NULL }
p
}
####################################################################################################
# Tab SM
####################################################################################################
# Tabulkovací funkce
# Parametry
# @ Xdata
# @ Typ - urcujici typ tabulky
# @ Otazka - zobrazovana promenna zadava se bud jednoduse "sex" nebo "c("sex","edu")
# @ Trideni - tridici promenna
# @ Decim - ukaz popisky zaokrouhlenne na pocet desetin , default = 0
tab_sm<- function(xdata=dataf, typ, otazka,trideni,trideni2,id, decim = 0,barva = 0,varlabels){
if(!is.null(xdata)){
if(nrow(xdata)>0){
xdata$otaz<-xdata[,c(otazka)]
if (typ ==1 | typ==3){
grafdata <- aggregate(xdata$otaz ,by = list(xdata$otaz),FUN = length)
colnames(grafdata)<-c("Var","Freq")
print(grafdata)
grafdata$n <- grafdata$Freq
grafdata$Freq <- grafdata$Freq * 100 / sum(grafdata$Freq)
grafdata$Freqpos <- grafdata$Freq / 2
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
p <- grafdata[c("Var","Freq","n")]
colnames(p)<-c("Varianta","Podíl", "n=")
}
else if (typ ==2 | typ==4){
xdata$trid<-xdata[,c(trideni)]
grafdata <- aggregate(xdata$otaz ,by = list(xdata$trid, xdata$otaz),FUN = length)
datasum <- aggregate(xdata$otaz,by = list(xdata$trid),FUN=length)
grafdata <- merge(grafdata,datasum, by="Group.1")
colnames(grafdata) <- c("Cro","Var","Freq","Sum")
grafdata <- grafdata[order(grafdata$Cro,grafdata$Var),]
grafdata$n <- grafdata$Freq
grafdata$Freq <- grafdata$Freq *100/ grafdata$Sum
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
p <- grafdata[c("Cro","Var","Lab","n")]
colnames(p)<-c("Třídění","Varianta","Podíl","n")
}
else if (typ ==5){
datamin <- xdata[,c(id,otazka)]
datamelt <- melt(datamin,id=1)
grafdata <- aggregate(datamelt[,1] , by =list(datamelt$variable,datamelt$value), FUN = length)
datasum <- aggregate(grafdata$x, by= list(grafdata$Group.1),FUN = sum)
grafdata <- merge(grafdata,datasum, by="Group.1")
colnames(grafdata) <- c("Cro","Var","Freq","Sum")
grafdata <- grafdata[order(grafdata$Cro,grafdata$Var),]
grafdata$cs <- cumsum(grafdata$Freq)
grafdata$Freq <- grafdata$Freq *100/ grafdata$Sum
grafdata$Freqpos <- (cumsum(grafdata$Freq) - 0.5 * grafdata$Freq) %%100
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
grafdata$Labs <- sapply(grafdata$Cro, function(x) wrapper(varlabels[toString(x)], width = 40))
p <- grafdata[c("Labs","Var","Lab")]
colnames(p)<-c("Subotázka","Varianta","Podíl")
}
else if (typ ==6){
xdata$trid<-xdata[,c(trideni)]
grafdata <- aggregate(xdata$otaz[!is.na(xdata$otaz)] ,by = list(xdata$trid[!is.na(xdata$otaz)]),FUN = mean)
colnames(grafdata)<-c("Cro","Freq")
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
p <- grafdata[c("Cro","Lab")]
colnames(p) <- c("Třídění","Průměr")
}
else if (typ ==42){
xdata$trid<-xdata[,c(trideni)]
xdata$trid2<-xdata[,c(trideni2)]
grafdata <- aggregate(xdata$otaz ,by = list(xdata$trid,xdata$trid2, xdata$otaz),FUN = length)
datasum <- aggregate(grafdata$x,by = list(grafdata$Group.1,grafdata$Group.2),FUN=sum)
grafdata <- merge(grafdata,datasum, by.x=c("Group.1","Group.2"), by.y=c("Group.1", "Group.2"))
colnames(grafdata) <- c("Cro","Cro2","Var","Freq","Sum")
grafdata <- grafdata[order(grafdata$Cro,grafdata$Cro2,grafdata$Var),]
grafdata$Freq <- grafdata$Freq *100/ grafdata$Sum
grafdata$Freqpos <- (cumsum(grafdata$Freq) - 0.5 * grafdata$Freq) %% 100
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
p <- grafdata[c("Cro","Cro2","Var","Freq")]
colnames(p)<-c("Třídění 1","Třídění 2","Varianta","Podíl")
}
else if (typ ==52){
datamin <- xdata[,c(id,otazka, trideni)]
datamelt <- melt(datamin,id=c(id,trideni))
datamelt$val2 <- as.numeric(datamelt$value)
datamelt <- datamelt[!is.na(datamelt$val2),]
grafdata <- aggregate(datamelt$val2 , by =list(datamelt$variable,datamelt[,2]), FUN = mean)
colnames(grafdata) <- c("Var","Cro","Freq")
grafdata <- grafdata[order(grafdata$Cro,grafdata$Var),]
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
grafdata$Labs <- sapply(grafdata$Var, function(x) wrapper(varlabels[toString(x)], width = 40))
grafdata$Labs2 <- factor(grafdata$Labs, as.character(unique(grafdata$Labs)))
grafdata$Labs2 <- factor(grafdata$Labs2, levels = rev(levels(grafdata$Labs2)))
p<- grafdata[c("Labs2","Cro","Lab")]
colnames(p) <- c("Proměnná","Třídění","Průměr")
#if (labels==T) {p <- p + geom_text(aes(y=Freq,label=Lab) ) }
}
else if (typ ==62){
xdata$trid<-xdata[,c(trideni)]
xdata$trid2<-xdata[,c(trideni2)]
grafdata <- aggregate(xdata$otaz[!is.na(xdata$otaz)] ,by = list(xdata$trid[!is.na(xdata$otaz)],xdata$trid2[!is.na(xdata$otaz)]),FUN = mean)
colnames(grafdata)<-c("Cro","Cro2","Freq")
grafdata$Freqpos <- grafdata$Freq / 2
grafdata$Lab <- round(x=grafdata$Freq, digits=decim)
p <- grafdata[c("Cro","Cro2","Freq")]
colnames(p) <- c("Třídění 1","Třídění 2","Průměr")
}
} else { p <- NULL }
} else { p <- NULL }
p
}
####################################################################################################
# Podklad
####################################################################################################
# Funkce ktera z csv nacte ktere promenne se budou zobrazovat (variables), ktere jsou sociodemografika (socio) a
# ktere budou pouzity jako tridici
# Parametry
# @filename cesta k souboru csv
podklad <- function(filename=NULL)
{
if(file.exists(filename)) {
podklad <- read.csv(filename,sep = ";", header= T,stringsAsFactors=F)
} else {
stop("Add Podklad.csv file to the directory")
}
vgroup <- podklad[!is.na(podklad$GROUP),c("ID","NAME","LABEL","GROUP")]
vsocio <- podklad[!is.na(podklad$SOCIO),c("ID","NAME","LABEL","SOCIO")]
vcross <- podklad[!is.na(podklad$CROSS),c("ID","NAME","LABEL","CROSS")]
vars1 <- c(1:max(vgroup$GROUP))
vars2 <- c(1:max(vsocio$SOCIO))
vars3 <- c(1:max(vcross$CROSS))
vars4 <- c(1:nrow(vgroup))
variablesAll <- vgroup$NAME
names(variablesAll) <- vgroup$LABEL
variables <- as.list(lapply(vars1, function(x) vgroup$NAME[vgroup$GROUP==x][1]))
names(variables) <- lapply(vars1, function(x) vgroup$LABEL[vgroup$GROUP==x][1])
variablesG <- as.list(lapply(vars1, function(x) vgroup$NAME[vgroup$GROUP==x]))
names(variablesG) <- lapply(vars1, function(x) vgroup$LABEL[vgroup$GROUP==x][1])
socio <- as.list(lapply(vars2, function(x) vsocio$NAME[vsocio$SOCIO==x]))
names(socio) <- lapply(vars2, function(x) vsocio$LABEL[vsocio$SOCIO==x][1])
cross <- as.list(lapply(vars3, function(x) vcross$NAME[vcross$CROSS==x]))
names(cross) <- lapply(vars3, function(x) vcross$LABEL[vcross$CROSS==x][1])
vars<- list(variables=variables,socio=socio, cross=cross, variablesAll = variablesAll,variablesG=variablesG)
return(vars)
}
####################################################################################################
# Prom
####################################################################################################
# Vyrobi list ze struktury labelu
# @ vallabels - tabulka labelu
# @ prom - promenna
val2list<-function(prom){
s <- as.list(laply(prom,function(x) {
y<-as.character(x)
names(y)<-names(x)
return(y)
}))
return(s)
}
|
bf2514b9c8bec0e34f61817e75164547616fa657
|
ceb26dde1c145989ad9a3b516fec297951b41b3c
|
/R/choplump.formula.R
|
188956932a4e84bcbe9a6137acaca0a551632d3e
|
[] |
no_license
|
cran/choplump
|
ecbe66a193bb0ddc20c5ffb4795ab07e4f0bbe76
|
7c20d519796bda4cdd1629c649c5d4f5b2503bbf
|
refs/heads/master
| 2022-05-14T22:01:40.865158
| 2022-05-09T14:50:02
| 2022-05-09T14:50:02
| 17,695,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 919
|
r
|
choplump.formula.R
|
`choplump.formula` <-
function(formula, data, subset, na.action, ...){
## mostly copied from wilcox.test.formula
if (missing(formula) || (length(formula) != 3) || (length(attr(terms(formula[-2]),
"term.labels")) != 1))
stop("'formula' missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m[[1]] <- as.name("model.frame")
m$... <- NULL
mf <- eval(m, parent.frame())
DNAME <- paste(names(mf), collapse = " by ")
names(mf) <- NULL
response <- attr(attr(mf, "terms"), "response")
g <- factor(mf[[-response]])
if (nlevels(g) != 2)
stop("grouping factor must have exactly 2 levels")
DATA <- split(mf[[response]], g)
names(DATA) <- c("x", "y")
y <- do.call("choplump", c(DATA, list(...)))
y$data.name <- DNAME
y
}
|
1f776fb8851c67e856621774c07175b444fa83cd
|
303ecdc998923dc101dfc42b8dbf42853ce7a7ec
|
/man/runFAIMS.Rd
|
88b40340ef3c8db9c2e0c644f90b63ec96dcc37d
|
[] |
no_license
|
mattdneal/FAIMSToolkit
|
7e2640eb979110c2fca1cad639beb78fb9b25be4
|
751bfba992587bb7e5edba272a3890b088e19e33
|
refs/heads/master
| 2021-01-11T17:37:52.367113
| 2018-12-01T13:54:33
| 2018-12-01T13:54:33
| 79,808,086
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,332
|
rd
|
runFAIMS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runFAIMS.R
\name{runFAIMS}
\alias{runFAIMS}
\title{Run a standard analysis given a FAIMS object and class labels}
\usage{
runFAIMS(FAIMSObject, targetValues, models = c("rf", "glmnet",
"svmRadial", "svmLinear", "gbm", "nnet", "glm"),
modelSelectFolds = NULL, modelSelectScores = NULL,
bestModelFolds = NULL, bestModelScores = NULL, waveletData = NULL,
SGoF = TRUE, nKeep = TRUE, extraData = NULL)
}
\arguments{
\item{FAIMSObject}{a FAIMS object}
\item{targetValues}{class labels}
\item{models}{a list of \link{caret::train} models}
\item{modelSelectFolds}{pre-generated folds for model selection}
\item{modelSelectScores}{pre-generated scores for model selection}
\item{bestModelFolds}{pre-generated folds for best model assessment}
\item{bestModelScores}{pre-generated scores for best model assessment}
\item{waveletData}{pre-computed wavelet data}
\item{SGoF}{Select variables using sequential goodness of fit? (only for PCA
analysis)}
\item{nKeep}{Select variables using keep top N?}
\item{extraData}{Additional data to feed to the classifier}
}
\value{
A list of results (see out$bestModelSummary and
out$modelSelectSummary for a summary of results)
}
\description{
Run a standard analysis given a FAIMS object and class labels
}
|
3956489044efb88797ed99697244a708314d9953
|
e30ed8ebaf7ea390550593027296dcc587f04698
|
/modelTestR/testSmoothModel.R
|
0497b15a1f8368f7f3a57e6aed88e348d767043d
|
[
"MIT"
] |
permissive
|
vdedyukhin/FluMapModel
|
93543537452adfc7fe81af70508c2fbf08305cee
|
8ba432fb70f4178c1c3a07fd231d99a834ed23fb
|
refs/heads/master
| 2020-06-13T08:30:27.953805
| 2019-05-10T22:36:57
| 2019-05-10T22:36:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,497
|
r
|
testSmoothModel.R
|
# testModelTrainR
# script to test incidenceMapR package
library(dbViewR)
library(incidenceMapR)
library(modelTestR)
library(dplyr)
shp <- masterSpatialDB() # census-tract shapefiles
# ggplot build eventually will be replaced by function ggplotSmoothSequential
library(ggplot2)
plotSettings <- ggplot() + theme_bw() + theme(panel.border = element_blank()) + xlab('')
###################################
##### smoothing models ############
###################################
# simulated data kiosk catchment map
queryIn <- list(
SELECT =list(COLUMN=c('site_type','residence_census_tract')),
WHERE =list(COLUMN='site_type', IN = c('kiosk')),
GROUP_BY =list(COLUMN=c('site_type','residence_census_tract')),
SUMMARIZE=list(COLUMN='site_type', IN= c('kiosk'))
)
db <- expandDB( selectFromDB( queryIn ) )
modelDefinition <- smoothModel(db=db, shp=shp)
model <- modelTrainR(modelDefinition)
ggplotSmoothMap(model,shp)
# simulated data at_home catchment map
queryIn <- list(
SELECT =list(COLUMN=c('site_type','residence_census_tract')),
WHERE =list(COLUMN='site_type', IN = c('at_home')),
GROUP_BY =list(COLUMN=c('site_type','residence_census_tract')),
SUMMARIZE=list(COLUMN='site_type', IN= c('at_home'))
)
db <- expandDB( selectFromDB( queryIn ) )
modelDefinition <- smoothModel(db=db, shp=shp)
model <- modelTrainR(modelDefinition)
ggplotSmoothMap(model,shp)
# test: real childrensHospital data
queryIn <- list(
SELECT =list(COLUMN=c('site_type','residence_cra_name')),
WHERE =list(COLUMN='site_type', IN = c('childrensHospital')),
GROUP_BY =list(COLUMN=c('site_type','residence_cra_name')),
SUMMARIZE=list(COLUMN='site_type', IN= c('all'))
)
db <- expandDB( selectFromDB( queryIn, source='production', na.rm=TRUE ) )
shp<-masterSpatialDB(shape_level = 'cra_name', source = 'seattle_geojson')
modelDefinition <- smoothModel(db=db, shp=shp)
model <- modelTrainR(modelDefinition)
ggplotSmoothMap(model,shp,'childrensHospital', shape_level = 'residence_cra_name')
######################
########### age ######
######################
# simulated data h1n1pdm age fraction
queryIn <- list(
SELECT =list(COLUMN=c('pathogen','age')),
MUTATE =list(COLUMN='age', AS='age_bin'),
GROUP_BY =list(COLUMN=c('age_bin')),
SUMMARIZE=list(COLUMN='pathogen', IN= 'h1n1pdm')
)
db<-selectFromDB( queryIn )
db <- expandDB( db )
modelDefinition <- smoothModel(db=db, shp=shp)
model <- modelTrainR(modelDefinition)
plotDat <- model$modeledData
p1 <- plotSettings + geom_point(data=plotDat,aes(x=age_bin,y=positive/n))
p1 <- p1 + geom_line(data=plotDat,aes(x=age_bin,y=modeled_fraction_mode)) +
geom_ribbon(data=plotDat,aes(x=age_bin,ymin=modeled_fraction_0_025quant,ymax=modeled_fraction_0_975quant),alpha=0.3)
p1 + ggtitle('h1n1pdm fraction')
# simulated data rsva age fraction
queryIn <- list(
SELECT =list(COLUMN=c('pathogen','age')),
MUTATE =list(COLUMN='age', AS='age_bin'),
GROUP_BY =list(COLUMN=c('age_bin')),
SUMMARIZE=list(COLUMN='pathogen', IN= 'rsva')
)
db <- expandDB( selectFromDB( queryIn ) )
modelDefinition <- smoothModel(db=db, shp=shp)
model <- modelTrainR(modelDefinition)
plotDat <- model$modeledData
p1 <- plotSettings + geom_point(data=plotDat,aes(x=age_bin,y=positive/n))
p1 <- p1 + geom_line(data=plotDat,aes(x=age_bin,y=modeled_fraction_mode)) +
geom_ribbon(data=plotDat,aes(x=age_bin,ymin=modeled_fraction_0_025quant,ymax=modeled_fraction_0_975quant),alpha=0.3)
p1 + ggtitle('rsva fraction')
|
893ece5cbb277feba76bb59ed728daa4228d7dd4
|
29370cadec080cc724c00e372b3381155e233bbc
|
/HW1/walker_jocelyne_hw1.R
|
0947f8b8667d361243222dc24771fc85b59709b8
|
[] |
no_license
|
jocelynewalker/SupplyChainAnalytics1
|
e61d112114507d91e2c20b3be7e9bac3bbe84038
|
16a41eab76471743e19629e211c388cc7ac56b28
|
refs/heads/master
| 2023-02-02T23:16:53.301185
| 2020-12-15T04:47:09
| 2020-12-15T04:47:09
| 321,555,004
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 361
|
r
|
walker_jocelyne_hw1.R
|
#1
A = matrix(c(1,7,9,3,2,8,5,3,0),nrow=3,ncol=3)
#2
x <- rnorm(10)
mean(x)
var(x)
y <- rnorm(10)
mean(y)
var(y)
#3
set.seed(2567)
x <- rnorm(10)
mean(x)
var(x)
set.seed(2567)
y <- rnorm(10)
mean(y)
var(y)
#4
e <- exp(1)
y = e^(-x/8)*sin(x)
x = seq(1,20, by=1)
plot(x,y)
#5
B <- matrix(10:25,4,4)
B[c(2,3,4),c(2,4)]
#6
Auto$name[38]
#7
pairs(Auto[,c(1,4,5,6)])
|
be163b0458be86da667245f9a89a5c13b113f6e3
|
3ba4cd64b22d83c89e567fc0124bc6bc754511f6
|
/code/贵州省县区区划/figure.R
|
6d64de5bdb76bef525485b842f3f52aeeaebf6e1
|
[] |
no_license
|
pfan8/R_Study
|
4f589c887450bd6c497762740776cdc4a9915826
|
d13d7677eb4142ae737c43df52340df7cf60d8be
|
refs/heads/master
| 2020-03-10T12:42:04.902454
| 2018-04-13T10:32:35
| 2018-04-13T10:32:35
| 129,383,731
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 191
|
r
|
figure.R
|
library(RColorBrewer)
library(spData)
library(Matrix)
library(spdep)
library(maptools)
library(rgdal)
rob.shp=readOGR(".\\R\\贵州省县区区划\\2017县界.shp")
plot(rob.shp,axes = TRUE)
|
8fecc1b0ab11cddcfcd1ed84b270f3084bb11f72
|
b4c1bc98a83dc8f4ad492911faca4c709c146288
|
/R/ithim_setup_baseline_scenario.R
|
21e95c231a6486b78c7198012e34e2acb8679ae7
|
[] |
no_license
|
danielgils/ITHIM-R
|
facd3e74e3b4cdea279245372e3f2ec3bcb35cc0
|
7e306f0aea3e6ea21521104206a0281e5882af13
|
refs/heads/master
| 2023-09-05T05:33:00.779268
| 2021-11-10T10:07:42
| 2021-11-10T10:07:42
| 277,875,917
| 0
| 0
| null | 2020-07-07T17:04:21
| 2020-07-07T17:04:20
| null |
UTF-8
|
R
| false
| false
| 1,064
|
r
|
ithim_setup_baseline_scenario.R
|
#' Set up baseline scenario data frame
#'
#' Create scenario by adding distance categories and scenario=baseline column to trip set data frame
#'
#' @param trip_set data frame of trips
#'
#' @return trip_set as baseline scenario
#'
#' @export
ithim_setup_baseline_scenario <- function(trip_set){
##?? do we need any/all of rid, trip_id, row_id?
## SET UP TRAVEL DATA
# Create a row id
#trip_set$rid <- 1:nrow(trip_set)
# Initialize distance categories
## Distance categories are used in scenario generation. They correspond to e.g. ``long trips'' and ``short trips''
trip_set$trip_distance_cat <- 0
##!! assuming more than one distance category
for(i in 2:length(DIST_LOWER_BOUNDS)-1){
trip_set$trip_distance_cat[trip_set$trip_distance >= DIST_LOWER_BOUNDS[i] & trip_set$trip_distance < DIST_LOWER_BOUNDS[i+1]] <- DIST_CAT[i]
}
trip_set$trip_distance_cat[trip_set$trip_distance >= DIST_LOWER_BOUNDS[length(DIST_LOWER_BOUNDS)]] <- DIST_CAT[length(DIST_LOWER_BOUNDS)]
trip_set$scenario <- "Baseline"
return(trip_set)
}
|
df0cf3454328ea78dff2986f2780b4ae6db191f5
|
fa1218bd0d9de845695f85c260140545a335a3dc
|
/R/exasol.R
|
283fbad1174de40764de8c60094a48daa5306172
|
[] |
no_license
|
fxcebx/r-exasol
|
421df98fae236b4ade1c3659e785931bc86a1256
|
c25b40d413349e6e737912288f63ccb5996f00e7
|
refs/heads/master
| 2021-01-18T03:26:24.842337
| 2015-12-21T14:41:11
| 2015-12-21T14:41:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,107
|
r
|
exasol.R
|
#' @docType package
#' @name exasol-package
#' @aliases exasol
#' @useDynLib exasol, .registration = TRUE, .fixes = "C_"
#' @exportPattern ^[[:alpha:]]+
#' @import RODBC
#'
#' @title EXASolution R Package
#'
#' @description The EXASolution R Package offers functionality to interact with
#' the EXASolution database out of R programs. It is developed as a wrapper
#' around ORDBC and extends ORDBC in two main aspects:
#'
#' \enumerate{
#' \item It offers fast data transfer between EXASolution and R, multiple
#' times faster than RODBC. This is achieved by using a proprietary transfer
#' channel which is optimized for batch loading.
#' Please read the R help of \code{exa.readData()} and \code{exa.writeData()} for details.
#'
#' \item It makes it convenient to run parts of your R code in parallel on the
#' EXASolution database, using EXASolution R UDF scripts behind the scenes.
#' For example you can define an R function and execute it in parallel on
#' different groups of data in an EXASolution table.
#' Please read the R help of \code{exa.createScript()} function for details.
#' }
#'
#' The help is available directly in R via:
#' \itemize{
#' \item \code{help(exa.readData)}
#' \item \code{help(exa.writeData)}
#' \item \code{help(exa.createScript)}
#' }
#'
#' @author EXASOL AG <support@@exasol.com>
#'
#' @keywords sql
#' @keywords distributed
#' @keywords in-memory
NULL
#' SET input type of UDF script will call the function once for each group
SET <- "SET"
#' SCALAR input type of UDF script will call the function once for each record.
SCALAR <- "SCALAR"
#' EMITS output type of UDF script -- function emits any number of values.
EMITS <- "EMITS"
#' RETURNS output type of UDF script -- function emits just a single value.
RETURNS <- "RETURNS"
#' All input types of UDF scripts
ALLOWED_UDF_IN_TYPES <- c(SET, SCALAR)
#' All output types of UDF scripts
ALLOWED_UDF_OUT_TYPES <- c(EMITS, RETURNS)
#' TODO comment
"C_asyncRODBCQueryStart"
#' TODO comment
"C_asyncRODBCIOStart"
#' TODO comment
"C_asyncRODBCIsDone"
#' TODO comment
"C_asyncRODBCMax"
#' TODO comment
"C_asyncRODBCProxyHost"
#' TODO comment
"C_asyncRODBCProxyPort"
#' TODO comment
"C_asyncRODBCQueryCheck"
#' TODO comment
"C_asyncRODBCQueryFinish"
.onAttach <- function(libname, pkgname) {
# show startup message
message <- paste("EXASOL RODBC", utils::packageVersion("exasol"), "loaded.")
packageStartupMessage(message, appendLF = TRUE)
}
# require(RODBC); require(exasol)
# cnx <- odbcDriverConnect("Driver=/var/Executables/bc/install/ok7500-e8/lib/libexaodbc-uo2214.so;UID=sys;PWD=exasol;EXAHOST=cmw72;EXAPORT=8563")
# sqlQuery(cnx, "OPEN SCHEMA TEST")
# require(RODBC); require(exasol); cnx <- odbcDriverConnect("Driver=/var/Executables/bc/install/ok7500-e8/lib/libexaodbc-uo2214.so;UID=sys;PWD=exasol;EXAHOST=cmw67;EXAPORT=8563"); sqlQuery(cnx, "OPEN SCHEMA TEST")
#cnx <- odbcDriverConnect("Driver=/var/Executables/bc/install/ok7500-e8/lib/libexaodbc-uo2214.so;UID=sys;PWD=exasol;EXAHOST=cmw72;EXAPORT=8563")
#testScript <- exa.createScript(cnx, testScript,
#env = list(a = 1, b1 = 2, b2 = 2, b3 = 2, b4 = 2, b5 = 2, b6 = 2, b7 = 2, b8 = 2, b9 = 2, ba = 2, bo = 2, be = 2, bu = 2, bi = 2, bd = 2, bh = 2, bt = 2, bn = 2),
#inArgs = { INT(a) },
#outArgs = { INT(b); INT(c) },
#outputAddress = c('192.168.5.61', 3000),
#initCode = {
# require(RODBC); require(data.table)
# print(paste("initialize", exa$meta$vm_id));
#},
#func = function(data) {
# print("begin group")
# data$next_row(NA);
# data$emit(data$a, data$a + 3);
# print("end group")
#})
#
#
#res <- testScript(1, test)
#res <- exa.readData(cnx, 'select testScript(1) from test')
#exa.writeData(cnx, test)
#
#res <- sqlQuery(cnx, 'select testScript(1) from test')
# print(testScript(int_index, table = enginetable, groupBy = mod(int_index, 4), returnSQL = TRUE))
# print(summary(testScript(int_index, table = enginetable, groupBy = mod(int_index, 4))))
# require(RODBC)
# require(exasol); cnx <- odbcDriverConnect("DSN=EXA"); sqlQuery(cnx, "open schema test"); exa.readData(cnx, "select * from cat")
|
69256e8d48a45b76d6ed2f89a1f939b6fbae848e
|
645ff6a53c2093037c7154cdd87714942385ffd4
|
/R/delete.R
|
cd32ed2cde32e8fe8e61e2e5c966456994d269af
|
[
"MIT"
] |
permissive
|
1havran/solrium
|
04c6754d14509e0e46e50d39f074d17b190eb050
|
a30015c1d1a28fc7293d67854c12d8f3fc99fad0
|
refs/heads/master
| 2021-01-01T06:09:13.350637
| 2017-02-01T19:44:27
| 2017-02-01T19:44:27
| 97,371,390
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,286
|
r
|
delete.R
|
#' Delete documents by ID or query
#'
#' @name delete
#' @param ids Document IDs, one or more in a vector or list
#' @param name (character) A collection or core name. Required.
#' @param query Query to use to delete documents
#' @param commit (logical) If \code{TRUE}, documents immediately searchable.
#' Deafult: \code{TRUE}
#' @param commit_within (numeric) Milliseconds to commit the change, the document will be added
#' within that time. Default: NULL
#' @param overwrite (logical) Overwrite documents with matching keys. Default: \code{TRUE}
#' @param boost (numeric) Boost factor. Default: NULL
#' @param wt (character) One of json (default) or xml. If json, uses
#' \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to
#' parse
#' @param raw (logical) If \code{TRUE}, returns raw data in format specified by
#' \code{wt} param
#' @param ... curl options passed on to \code{\link[httr]{GET}}
#' @details We use json internally as data interchange format for this function.
#' @examples \dontrun{
#' solr_connect()
#'
#' # add some documents first
#' ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
#' add(ss, name = "gettingstarted")
#'
#' # Now, delete them
#' # Delete by ID
#' # delete_by_id(ids = 9)
#' ## Many IDs
#' # delete_by_id(ids = c(3, 4))
#'
#' # Delete by query
#' # delete_by_query(query = "manu:bank")
#' }
#' @export
#' @name delete
delete_by_id <- function(ids, name, commit = TRUE, commit_within = NULL, overwrite = TRUE,
boost = NULL, wt = 'json', raw = FALSE, ...) {
conn <- solr_settings()
check_conn(conn)
args <- sc(list(commit = asl(commit), wt = wt))
body <- list(delete = lapply(ids, function(z) list(id = z)))
obj_proc(file.path(conn$url, sprintf('solr/%s/update/json', name)), body, args, raw, conn$proxy, ...)
}
#' @export
#' @name delete
delete_by_query <- function(query, name, commit = TRUE, commit_within = NULL, overwrite = TRUE,
boost = NULL, wt = 'json', raw = FALSE, ...) {
conn <- solr_settings()
check_conn(conn)
args <- sc(list(commit = asl(commit), wt = wt))
body <- list(delete = list(query = query))
obj_proc(file.path(conn$url, sprintf('solr/%s/update/json', name)), body, args, raw, conn$proxy, ...)
}
|
ac8ea0096836c057aed4c99114f79328550a3ee8
|
ec048a600a5d6693f8925b6552c24d3597b22a01
|
/inst/unitTests/test_DoparParam.R
|
3705222659002836f1b89fc867deb666f5304222
|
[] |
no_license
|
Bioconductor/BiocParallel
|
532510accf92487b0e19040d67e0b46aadbc4948
|
ce0be51cf06c501e203f6ed743aa04821a7beded
|
refs/heads/devel
| 2023-07-26T09:40:20.761793
| 2023-07-07T17:01:32
| 2023-07-07T17:02:06
| 6,694,227
| 56
| 33
| null | 2023-08-16T20:05:09
| 2012-11-14T20:16:37
|
R
|
UTF-8
|
R
| false
| false
| 2,720
|
r
|
test_DoparParam.R
|
message("Testing DoparParam")
test_DoparParam_orchestration_error <- function() {
test <-
requireNamespace("foreach", quietly = TRUE) &&
requireNamespace("doParallel", quietly = TRUE)
if (!test)
DEACTIVATED("'foreach' or 'doParallel' not available")
if (identical(.Platform$OS.type, "windows"))
DEACTIVATED("'DoparParam' orchestration error test not run on Windows")
y <- tryCatch({
cl <- parallel::makeCluster(1L)
doParallel::registerDoParallel(cl)
bplapply(1L, function(x) quit("no"), BPPARAM = DoparParam())
}, error = function(e) {
conditionMessage(e)
}, finally = {
parallel::stopCluster(cl)
})
checkTrue(startsWith(y, "'DoparParam()' foreach() error occurred: "))
}
test_DoparParam_bplapply <- function() {
test <-
requireNamespace("foreach", quietly = TRUE) &&
requireNamespace("doParallel", quietly = TRUE)
if (!test)
DEACTIVATED("'foreach' or 'doParallel' not available")
cl <- parallel::makeCluster(2L)
on.exit(parallel::stopCluster(cl))
doParallel::registerDoParallel(cl)
res0 <- bplapply(1:9, function(x) x + 1L, BPPARAM = SerialParam())
res <- bplapply(1:9, function(x) x + 1L, BPPARAM = DoparParam())
checkIdentical(res, res0)
}
test_DoparParam_bplapply_rng <- function() {
test <-
requireNamespace("foreach", quietly = TRUE) &&
requireNamespace("doParallel", quietly = TRUE)
if (!test)
DEACTIVATED("'foreach' or 'doParallel' not available")
cl <- parallel::makeCluster(2L)
on.exit(parallel::stopCluster(cl))
doParallel::registerDoParallel(cl)
res0 <- bplapply(1:9, function(x) runif(1),
BPPARAM = SerialParam(RNGseed = 123))
res <- bplapply(1:9, function(x) runif(1),
BPPARAM = DoparParam(RNGseed = 123))
checkIdentical(res, res0)
}
test_DoparParam_stop_on_error <- function() {
test <-
requireNamespace("foreach", quietly = TRUE) &&
requireNamespace("doParallel", quietly = TRUE)
if (!test)
DEACTIVATED("'foreach' or 'doParallel' not available")
cl <- parallel::makeCluster(2L)
on.exit(parallel::stopCluster(cl))
doParallel::registerDoParallel(cl)
fun <- function(x) {
if (x == 2) stop()
x
}
res1 <- bptry(bplapply(1:4, fun, BPPARAM = DoparParam(stop.on.error = F)))
checkEquals(res1[c(1,3,4)], as.list(c(1,3,4)))
checkTrue(is(res1[[2]], "error"))
res2 <- bptry(bplapply(1:6, fun, BPPARAM = DoparParam(stop.on.error = T)))
checkEquals(res2[c(1,4:6)], as.list(c(1,4:6)))
checkTrue(is(res2[[2]], "error"))
checkTrue(is(res2[[3]], "error"))
}
|
b7ef0a36e2ea4deea0b2de435003f6202e19da26
|
c8b9d55207fc4e8e2e7c39cb4c074455b3e2c5fa
|
/man/scDIFtest-Methods.Rd
|
82c7ff2b0f78b96ff38af79cf6ece3cfc0144504
|
[] |
no_license
|
cran/scDIFtest
|
b0f93999632cc5cc68e15e0ea9123a68e34efd38
|
f5d4bb89b988e04aaf5a890ef160c9e15508a25b
|
refs/heads/master
| 2022-11-19T03:23:38.521332
| 2020-07-02T06:20:03
| 2020-07-02T06:20:03
| 276,706,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,328
|
rd
|
scDIFtest-Methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scDIFtest-Methods.R
\name{scDIFtest-Methods}
\alias{scDIFtest-Methods}
\alias{print.scDIFtest}
\alias{summary.scDIFtest}
\alias{plot.scDIFtest}
\title{Methods for the scDIFtest-class}
\usage{
\method{print}{scDIFtest}(x, item_selection = NULL, ...)
\method{summary}{scDIFtest}(object, method = "fdr", ...)
\method{plot}{scDIFtest}(x, item_selection = NULL, ...)
}
\arguments{
\item{x}{an object of class \code{scDIFtest}}
\item{item_selection}{either \code{NULL} or an integer vector selecting the
item numbers. When \code{items = NULL} (the default), the DIF test
is done for all items.}
\item{...}{other arguments passed to the method.}
\item{object}{an object of class \code{scDIFtest}}
\item{method}{one of the strings in \code{p.adjust.methods}.}
}
\description{
\code{print}, \code{summary}, and \code{plot} methods for objects of the
\code{scDIFtest-class}, as returned by \code{\link{scDIFtest}}. See details
for more information about the methods.
}
\details{
The \code{print} method, when\code{item_selection = NULL}, gives a summary
of all the tests that were executed (i.e., for all items). When specific
items are selected, the \code{print} method is called repeatedly for each
individual \code{sctest} corresponding with the selected items.
The \code{summary} method computes a data frame with a row for each item
that was included in the test. The columns are:
\describe{
\item{item_type}{The estimated IRT model per item}
\item{n_est_pars}{The number of estimated parameters per item}
\item{stat}{The value for the used statistic per item}
\item{p_value}{The p-value per item}
\item{p_fdr}{The corrected p-value controlling the false discovery rate
(Benjamini & Hochberg, 1995). See \code{\link[stats]{p.adjust}} for
details.}
}
The \code{plot} method call the \code{plot} method repeatedly for the
\code{gepf} that corresponds with the executed score test for each of the
selected items. When no items are selected, the \code{plot} method results
in an error.
}
\references{
Benjamini, Y., and Hochberg, Y. (1995). Controlling the false
discovery rate: a practical and powerful approach to multiple testing.
\emph{Journal of the Royal Statistical Society Series B, 57,} 289-300.
}
|
3fd4ac10188a7bab75472b2ad67af878e8726d3a
|
867b7fb3466aabe7bc8c5eded17a643ae077e5df
|
/ServerFiles/generate.R
|
54e7e855c99867f4d93957e33413388aa532ed89
|
[] |
no_license
|
kdaust/PortfolioSensitivity
|
7c2b1e059be5d85373d1c11a47a95e7c857b40ec
|
f7c79fea352a0e50c81fd025eeb378c3acb04c58
|
refs/heads/main
| 2023-07-12T06:09:19.936553
| 2021-08-19T21:00:01
| 2021-08-19T21:00:01
| 394,799,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,311
|
r
|
generate.R
|
observeEvent(input$generate_results, priority = 100, {
# On generate click, we are taking a snapshot of the current points
# and calculating results. All relevant results will be stored in the
# userdata environment for further reuse. User has the ability to update
# results on demand instead of on app state change. This reduce the load
# on the app and give some room in case computation get more costly
# in the future. Shared functions will be stored in userdata environment
# as well as they will be reused to build report. uData is an alias for
# the userdata environment.
# Input from the app
avg <- uData$avg <- as.logical(input$aggregation)
pts <- uData$pts <- userpoints$dt
bgc <- uData$bgc <- bgc(pool, pts$Site, avg, all_weight)
cciss <- uData$cciss <- cciss(bgc, c(0.3,0.35,0.35),c(0.5,0.5))
cciss_results <- uData$cciss_results <- cciss_results(cciss, pts, avg)
cciss_summary <- uData$cciss_summary <- cciss_summary(cciss, pts, avg)
# UI select choices
siterefs <- uData$siterefs <- sort(unique(bgc$SiteRef))
ss_opts <- sort(unique(uData$sspreds$SS_NoSpace))
bgc_opts <- unique(uData$bgc$BGC)
##prepare tree choices for portfolio selection
suitTrees <- copy(cciss_summary)
#print(colnames(suitTrees))
suitTrees <- suitTrees[NewSuit %in% c(1,2,3,4),.(Spp, BGC = ZoneSubzone)]
suitTrees <- unique(suitTrees)
tree_opts <- suitTrees[BGC == bgc_opts[1],Spp]
updateSelectInput(inputId = "tree_species",
choices = tree_opts,selected = tree_opts)
uData$tree_opts <- suitTrees
ssl <- lapply(siterefs, function(sr) {
ss <- sort(unique(cciss_results[SiteRef %in% sr]$SS_NoSpace))
names(ss) <- paste(
ss,
stocking_info$SiteSeriesName[match(ss, stocking_info[, paste(ZoneSubzone, SiteSeries, sep = "/")])]
)
ss
})
names(ssl) <- siterefs
ssa <- sort(unique(cciss_results$SS_NoSpace))
names(ssa) <- paste(
ssa,
stocking_info$SiteSeriesName[match(ssa, stocking_info[, paste(ZoneSubzone, SiteSeries, sep = "/")])]
)
siteseries_list <- uData$siteseries_list <- ssl
siteseries_all <- uData$siteseries_all <- ssa
if (!isTRUE(avg)) {
# ordering choices to match order in points table and create a name for each choice
siterefs <- pts[Site %in% siterefs,
{x <- Site; names(x) <- paste(ID, Site, sep = " / "); return(x)}
]
uData$siterefs <- siterefs
}
# Dynamic UI select choices that depends on previous select choice
siteref <- head(siterefs, 1)
siteseries <- siteseries_list[[siteref]]
updateSelectInput(inputId = "siteref_feas", choices = siterefs, selected = siteref)
updateSelectInput(inputId = "siteref_bgc_fut", choices = siterefs, selected = siteref)
updateSelectInput(inputId = "ss_bgc_fut", choices = siteseries, selected = siteseries[1])
updateSelectInput(inputId = "siteref_silv", choices = siterefs, selected = siteref)
updateSelectInput(inputId = "site_series_feas", choices = siteseries, selected = head(siteseries, 1))
updateSelectInput(inputId = "site_series_silv", choices = siteseries, selected = head(siteseries, 1))
updateSelectInput(inputId = "port_bgc", choices = bgc_opts, select = bgc_opts[1])
updateCheckboxGroupInput(inputId = "report_filter",choices = siteseries_all, selected = siteseries_all)
# Use UI injected javascript to show download button and hide generate button
session$sendCustomMessage(type="jsCode", list(
code= "$('#download_report_span').show()"))
session$sendCustomMessage(type="jsCode", list(
code= "$('#download_data_span').show()"))
session$sendCustomMessage(type="jsCode", list(
code= "$('#generate_results').prop('disabled', true)"))
updateActionButton(inputId = "generate_results", label = "Refresh results")
# Render models info + timings in About
output$modelsinfo <- function() {
knitr::kable(models_info, format = "html", table.attr = 'class="table table-hover table-centered"')
}
output$timings <- plotly::renderPlotly({
tocker
})
})
generateState <- function() {
# This prevent the generate button from being enabled when
# points do not have valid geometry. There is another
# validation in new_points to make sure the newly
# added points are located inside the cciss geometry.
# Only valid points are used to calculated
if (nrow(userpoints$dt[!is.na(Long) & !is.na(Lat)])) {
session$sendCustomMessage(type="jsCode", list(code= "$('#generate_results').prop('disabled', false)"))
} else {
session$sendCustomMessage(type="jsCode", list(code= "$('#generate_results').prop('disabled', true)"))
}
}
# These are the triggers to check if we need to change button state
observeEvent(userpoints$dt, {generateState()})
observeEvent(input$aggregation, {generateState()})
observeEvent(input$rcp_scenario, {generateState()})
# Data processing
bgc <- function(con, siteno, avg, modWeights) {
siteno <- siteno[!is.na(siteno)]
dbGetCCISS(con, siteno, avg, modWeights = modWeights)
}
##bgc <- dbGetCCISS(pool,siteno = c(4532735,4546791,4548548),avg = T, all_weight)
# bgc <- sqlTest(pool,siteno = c(6476259,6477778,6691980,6699297),avg = T, scn = "ssp370")
cciss <- function(bgc,estabWt,midWt) {
SSPred <- edatopicOverlap(bgc, Edatope = E1)
setorder(SSPred,SiteRef,SS_NoSpace,FuturePeriod,BGC.pred,-SSratio)
uData$eda_out <- SSPred
ccissOutput(SSPred = SSPred, suit = S1, rules = R1, feasFlag = F1,
histWeights = estabWt, midWeights = midWt)
}
#SSPred2 <- SSPred[SS_NoSpace == "ICHmw1/01",]
## function for creating summary table
cciss_summary <- function(cciss, pts, avg, SS = ccissdev::stocking_standards, period_map = uData$period_map) {
withProgress(message = "Processing...", detail = "Feasibility summary", {
# use a copy to avoid modifying the original object
summary <- copy(cciss$Summary)
# Append region
region_map <- pts[[{if (avg) {"BGC"} else {"Site"}}]]
summary$Region <- pts$ForestRegion[match(summary$SiteRef, region_map)]
summary$ZoneSubzone <- pts$BGC[match(summary$SiteRef, region_map)]
# Append Chief Forester Recommended Suitability
summary[
SS,
CFSuitability := as.character(i.Suitability),
on = c(Region = "Region", ZoneSubzone = "ZoneSubzone", SS_NoSpace = "SS_NoSpace", Spp = "Species"),
]
summary[is.na(CFSuitability), CFSuitability := "X"]
current = names(period_map)[match("Current", period_map)]
# Format for printing
summary[, `:=`(
Species = T1[Spp, paste(paste0("<b>", TreeCode, "</b>"), EnglishName, sep = ": ")],
ProjFeas = NewSuit,
Period = "2021-2040<br />2041-2060<br />2061-2080<br />2081-2100",
#Period = paste0(period_map[names(period_map) > current], collapse = "<br />"),
FutProjFeas = paste0(Suit2025, "<br />", Suit2055, "<br />", Suit2085,"<br />", Suit2100),
FailRisk = paste0(FailRisk2025, "<br />", FailRisk2055, "<br />", FailRisk2085,"<br />", FailRisk2100)
)]
# Order
setorder(summary, SiteRef, ProjFeas, Species)
return(summary)
})
}
# This map is used to determine output labels from raw period
#uData$period_map <- c("1975" = "Historic", "2000" = "Current", "2025" = "2010-2040", "2055" = "2040-2070", "2085" = "2070-2100")
uData$period_map <- c("1961" = "Historic", "1991" = "Current", "2021" = "2021-2040", "2041" = "2041-2060", "2061" = "2061-2080","2081" = "2081-2100")
## SVGs for mid rot trend
swap_up_down <- '<svg xmlns="http://www.w3.org/2000/svg" width="30px" height="30px" viewBox="0 0 512 512"><polyline points="464 208 352 96 240 208" style="fill:none;stroke:#000;stroke-linecap:round;stroke-linejoin:round;stroke-width:32px"/><line x1="352" y1="113.13" x2="352" y2="416" style="fill:none;stroke:#000;stroke-linecap:round;stroke-linejoin:round;stroke-width:32px"/><polyline points="48 304 160 416 272 304" style="fill:none;stroke:#000;stroke-linecap:round;stroke-linejoin:round;stroke-width:32px"/><line x1="160" y1="398" x2="160" y2="96" style="fill:none;stroke:#000;stroke-linecap:round;stroke-linejoin:round;stroke-width:32px"/></svg>'
trending_up <- '<svg xmlns="http://www.w3.org/2000/svg" width="30px" height="30px" viewBox="0 0 512 512"><title>ionicons-v5-c</title><polyline points="352 144 464 144 464 256" style="fill:none;stroke:#000;stroke-linecap:round;stroke-linejoin:round;stroke-width:32px"/><path d="M48,368,169.37,246.63a32,32,0,0,1,45.26,0l50.74,50.74a32,32,0,0,0,45.26,0L448,160" style="fill:none;stroke:#000;stroke-linecap:round;stroke-linejoin:round;stroke-width:32px"/></svg>'
trending_down <- '<svg xmlns="http://www.w3.org/2000/svg" width="30px" height="30px" viewBox="0 0 512 512"><title>ionicons-v5-c</title><polyline points="352 368 464 368 464 256" style="fill:none;stroke:#000;stroke-linecap:round;stroke-linejoin:round;stroke-width:32px"/><path d="M48,144,169.37,265.37a32,32,0,0,0,45.26,0l50.74-50.74a32,32,0,0,1,45.26,0L448,352" style="fill:none;stroke:#000;stroke-linecap:round;stroke-linejoin:round;stroke-width:32px"/></svg>'
stable <- '<svg xmlns="http://www.w3.org/2000/svg" width="30px" height="30px" viewBox="0 0 512 512"><line x1="118" y1="304" x2="394" y2="304" style="fill:none;stroke:#000;stroke-linecap:round;stroke-linejoin:round;stroke-width:44px"/><line x1="118" y1="208" x2="394" y2="208" style="fill:none;stroke:#000;stroke-linecap:round;stroke-linejoin:round;stroke-width:44px"/></svg>'
##function for creating full results table
cciss_results <- function(cciss, pts, avg, SS = ccissdev::stocking_standards, period_map = uData$period_map) {
withProgress(message = "Processing...", detail = "Feasibility results", {
# use a copy to avoid modifying the original object
results <- copy(cciss$Raw)
sumResults <- copy(cciss$Summary)
# dcast (pivot)
midRotID <- data.table(MidRotTrend = c("Strongly Improving","Improving","Stable","Declining","Strongly Declining","Bifurcating",NA_character_),
MidRotSVG = c(trending_up,trending_up,stable,trending_down,trending_down,swap_up_down,stable))
results <- dcast(results, SiteRef + SS_NoSpace + Spp + Curr ~ FuturePeriod,
value.var = c("NewSuit", "1", "2", "3", "X", "ModAgree", "SuitDiff"))
# Required columns, set them if not created by dcast (safety)
reqj <- c(
"1_1961","2_1961","3_1961","X_1961", "NewSuit_1961",
"1_1991","2_1991","3_1991","X_1991", "NewSuit_1991",
"1_2021","2_2021","3_2021","X_2021", "NewSuit_2021",
"1_2041","2_2041","3_2041","X_2041", "NewSuit_2041",
"1_2061","2_2061","3_2061","X_2061", "NewSuit_2061",
"1_2081","2_2081","3_2081","X_2081", "NewSuit_2081"
)
set(results, j = reqj[!reqj %in% names(results)], value = NA_real_)
setnafill(results, fill = 0, cols = c(
"1_1961","2_1961","3_1961","X_1961",
"1_1991","2_1991","3_1991","X_1991",
"1_2021","2_2021","3_2021","X_2021",
"1_2041","2_2041","3_2041","X_2041",
"1_2061","2_2061","3_2061","X_2061",
"1_2081","2_2081","3_2081","X_2081"
))
# Append region
region_map <- pts[[{if (avg) {"BGC"} else {"Site"}}]]
results$Region <- pts$ForestRegion[match(results$SiteRef, region_map)]
results$ZoneSubzone <- pts$BGC[match(results$SiteRef, region_map)]
# Append Chief Forester Recommended Suitability
results[
SS,
CFSuitability := as.character(i.Suitability),
on = c(Region = "Region", ZoneSubzone = "ZoneSubzone", SS_NoSpace = "SS_NoSpace", Spp = "Species")
]
# Append summary vars
results[
sumResults,
`:=`(EstabFeas = i.NewSuit,
MidRotTrend = i.Trajectory2055,
Risk60 = i.FailRisk2085,
Risk80 = i.FailRisk2100),
on = c("SiteRef","SS_NoSpace","Spp")
]
## Append SVG for mid rot trend
results[
midRotID,
MidRotSVG := i.MidRotSVG,
on = "MidRotTrend"
]
results[is.na(CFSuitability), CFSuitability := "X"]
# Append custom generated feasibility svg bars and Trend + ETL
current = as.integer(names(period_map)[match("Current", period_map)])
results[, `:=`(
Species = T1[Spp, paste(paste0("<b>", TreeCode, "</b>"), EnglishName, sep = ": ")],
Period = paste0(period_map, collapse = "<br />"),
ProjFeas = EstabFeas,
PredFeasSVG = paste0(
feasibility_svg(`1_1961`,`2_1961`,`3_1961`,`X_1961`), "<br />",
feasibility_svg(`1_1991`,`2_1991`,`3_1991`,`X_1991`), "<br />",
feasibility_svg(`1_2021`,`2_2021`,`3_2021`,`X_2021`), "<br />",
feasibility_svg(`1_2041`,`2_2041`,`3_2041`,`X_2041`), "<br />",
feasibility_svg(`1_2061`,`2_2061`,`3_2061`,`X_2061`), "<br />",
feasibility_svg(`1_2081`,`2_2081`,`3_2081`,`X_2081`)
)
)]
setorder(results, SiteRef, SS_NoSpace, EstabFeas, MidRotSVG, Risk60, Risk80, na.last = TRUE)
return(results)
})
}
#' @param ... a list of numeric vector, column names will be used as color. This
#' function assumes that x rowSums are all equal to 1 and that there is no NA values.
#' @param width output width of svg
#' @param height output height of svg
#' @param colors character vector of colors to use for svg, same length as
#' ncol x.
#' @return an svg image of feasibility prediction, one per row in data.frame
feasibility_svg <- function(..., width = 220L, height = 14L, colors = c("limegreen", "deepskyblue", "gold", "grey")) {
x <- list(...)
col_x <- length(x)
x <- matrix(unlist(x), ncol = col_x)
row_x <- nrow(x)
row_cumsums <- matrixStats::rowCumsums(x)
# When cumsum is zero at X just output a 100% grey bar
x[which(row_cumsums[,4L] == 0L), 4L] <- 1L
pos_x <- row_cumsums
pos_x[, 1L] <- 0L
pos_x[, 2L:4L] <- row_cumsums[, 1L:3L] * width
width_el <- x * width
pos_text <- width_el / 2 + pos_x
xdt <- data.table("x" = x, "pos_x" = pos_x, "width_el" = width_el, "pos_text" = pos_text)
xdt[,paste0(
'<svg viewBox="0 0 ', width,' ', height,'" x="0px" y="0px" width="', width,'px" height="', height,'px">',
pfsvg(x.V1, pos_x.V1, width_el.V1, pos_text.V1, height, colors[1L]),
pfsvg(x.V2, pos_x.V2, width_el.V2, pos_text.V2, height, colors[2L]),
pfsvg(x.V3, pos_x.V3, width_el.V3, pos_text.V3, height, colors[3L]),
pfsvg(x.V4, pos_x.V4, width_el.V4, pos_text.V4, height, colors[4L]),
'</svg>'
)]
}
uData$feasibility_svg <- feasibility_svg
pfsvg <- function(x, pos_x, width_el, pos_text, height, color) {
# Format svg text
xtxt <- paste0(round(100*x), "%")
# Avoid printing values lower than 7.5% as they are unreadable
xtxt[which(x < 0.065)] <- ""
svgs <- rep("", length.out = length(x))
gzw <- width_el > 0
svgs[gzw] <- paste0(
'<rect x="', pos_x[gzw], '" y="0" width="', width_el[gzw], '" height="', height,
'" style="fill: ', color, '" /><text text-anchor="middle" style="font: 600 ', height / 2 + 2,
'px Arial" x="', pos_text[gzw], '" y="', height * 0.75, '">', xtxt[gzw], '</text>'
)
svgs
}
uData$pfsvg <- pfsvg
# Timings functions to build the "donut"
tic <- function(split = "unnamed block", var = numeric()) {
name <- substitute(var)
var <- c(var, `names<-`(.Internal(Sys.time()), split))
if (is.name(name)) {
name <- as.character(name)
assign(name, var, parent.frame(), inherits = TRUE)
}
return(invisible(var))
}
toc <- function(var) {
# timings in milliseconds
timings <- (c(var, .Internal(Sys.time()))[-1] - var) * 1000L
df <- data.frame(split = names(var), timings = timings)
# the donut plot
plotly::plot_ly(data = df, labels = ~split, values = ~timings,
textposition = 'inside',
texttemplate = "%{value:.0f} ms",
hovertemplate = "<extra></extra>%{label}") %>%
plotly::add_pie(hole = 0.6) %>%
plotly::add_annotations(text = paste(round(sum(timings), 0), "ms"),
showarrow = FALSE, yanchor = "middle", xanchor = "middle",
font = list(size = 40)) %>%
plotly::layout(title = "", showlegend = FALSE,
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
}
|
b480a2b4d66da9a3099a618922466e3c61882e60
|
140fb0b962691c7fbf071587e2b950ce3da121be
|
/Code/integration_analysis/01F_OraScoresSEEP.R
|
0e80fcc01578874b4ca00bd2d92d21f6c8d25f02
|
[] |
no_license
|
davidbmorse/SEEP
|
cb0f8d33ca01a741db50ebf21e0f56ab3c5bb372
|
357645a6beb428e4c091f45f278c5cdb4d4d3745
|
refs/heads/main
| 2023-04-09T10:55:01.267306
| 2023-03-24T02:53:21
| 2023-03-24T02:53:21
| 305,464,980
| 0
| 0
| null | 2020-10-31T16:15:55
| 2020-10-19T17:38:20
|
R
|
UTF-8
|
R
| false
| false
| 1,622
|
r
|
01F_OraScoresSEEP.R
|
rm(list=ls())
setwd("C://PROJECTS/P2022/SEEP_Manuscript")
library(tidyverse); library(dplyr); library(Seurat); library(UCell)
# data
data("SEEP_ORA")
#data("SEEP_Regions")
data("SS2trans_Regions")
iso = isoSEEP_ref
DefaultAssay(iso) = "RNA"
iso$Regions = factor(iso$DA_regions_layer)
Idents(iso) = iso$Regions
colo2=ggsci::pal_d3()(4)
names(colo2) = levels(Idents(iso))
lpaths = lorasig_main
lgenes = lapply(lpaths, function(x) {
y = data.frame(x)
ly = y$overlapGenes
names(ly) = y$pathway
return(ly)
})
lgenes = lapply(lgenes, function(x){
x[sapply(x, length) >= 5]
})
#lgenes_pos = lapply(lgenes, function(x) lapply(x, paste0, "+"))
so = iso
for(i in names(lgenes)){
so = AddModuleScore_UCell(so, features=lgenes[[i]])
colnames(so@meta.data) =
ifelse(grepl("_UCell$", colnames(so@meta.data)),
paste(colnames(so@meta.data), i, sep="."),
colnames(so@meta.data))
}
mat = t(so@meta.data %>% select(contains(c("UCell"))))
scores = gsub("_", "-", rownames(mat))
rownames(mat) = scores
sof <- CreateSeuratObject(counts = mat, meta.data = so@meta.data)
sof@assays$RNA@var.features = rownames(sof)
sof <- ScaleData(object = sof, do.center=TRUE, do.scale=TRUE, scale.max = Inf)
coord = Embeddings(iso, "umap")
sof[["umap"]] <- CreateDimReducObject(embeddings = coord, key = "UMAP_", assay = "RNA")
coord = Embeddings(iso, "pca")
sof[["pca"]] <- CreateDimReducObject(embeddings = coord, key = "PC_", assay = "RNA")
Idents(sof) = sof@meta.data$Regions
iso2_paths = sof
iso2_scores = so
seep_pathwayScores = mat
save(iso2_paths, iso2_scores, seep_pathwayScores, file='data/SEEP_ORApaths.rda')
|
eae59c8dbb27e54a7bb838eb4c7a3da917bec6b9
|
446373433355171cdb65266ac3b24d03e884bb5d
|
/R/grass7_i_tasscap.R
|
ee94bceed0d19f85f5dd988cbe36a3cb55b8d1b4
|
[
"MIT"
] |
permissive
|
VB6Hobbyst7/r_package_qgis
|
233a49cbdb590ebc5b38d197cd38441888c8a6f3
|
8a5130ad98c4405085a09913b535a94b4a2a4fc3
|
refs/heads/master
| 2023-06-27T11:52:21.538634
| 2021-08-01T01:05:01
| 2021-08-01T01:05:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,071
|
r
|
grass7_i_tasscap.R
|
##' QGIS Algorithm provided by GRASS i.tasscap (grass7:i.tasscap)
##'
##' @title QGIS algorithm i.tasscap
##'
##' @param input `multilayer` - Input rasters. Landsat4-7: bands 1,2,3,4,5,7; Landsat8: bands 2,3,4,5,6,7; MODIS: bands 1,2,3,4,5,6,7. .
##' @param sensor `enum` of `("landsat4_tm", "landsat5_tm", "landsat7_etm", "landsat8_oli", "modis")` - Satellite sensor. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.
##' @param output `folderDestination` - Output Directory. Path for an existing or new folder.
##' @param GRASS_REGION_PARAMETER `extent` - GRASS GIS 7 region extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..
##' @param GRASS_REGION_CELLSIZE_PARAMETER `number` - GRASS GIS 7 region cellsize (leave 0 for default). A numeric value.
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * output - outputFolder - Output Directory
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
grass7_i_tasscap <- function(input = qgisprocess::qgis_default_value(), sensor = qgisprocess::qgis_default_value(), output = qgisprocess::qgis_default_value(), GRASS_REGION_PARAMETER = qgisprocess::qgis_default_value(), GRASS_REGION_CELLSIZE_PARAMETER = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("grass7:i.tasscap")
output <- qgisprocess::qgis_run_algorithm("grass7:i.tasscap", `input` = input, `sensor` = sensor, `output` = output, `GRASS_REGION_PARAMETER` = GRASS_REGION_PARAMETER, `GRASS_REGION_CELLSIZE_PARAMETER` = GRASS_REGION_CELLSIZE_PARAMETER,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "output")
}
}
|
80d7822ffcdd13bbacd443b3d7c4f5ed2afeca2b
|
48f50fb1adc9ef70f6eb46f8cb679f68a58d61a2
|
/shiny_charts_maps/app.R
|
83d3fce8179ba893e1040fe70944490dab85e7de
|
[] |
no_license
|
meb2308/p8105_maternity_leave_nyc
|
6b4e4031aef63ac03560668ba5b89f2dc63e1669
|
ec34f9b20ad7d3d201d57b2ed81a71b795721f70
|
refs/heads/main
| 2023-01-20T13:00:07.728921
| 2020-12-01T19:09:25
| 2020-12-01T19:09:25
| 310,341,969
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 158
|
r
|
app.R
|
library(shiny)
library(plotly)
dataset <- charts_df
ui <- fluidPage(
)
server = function(input, output) {
}
shinyApp(ui = ui, server = server)
|
321ffb056740e7ee07dbe82c9c38623a2694bb92
|
34420dcc928bc282159b0680a87a674f493f87ce
|
/R/theme_shrub.R
|
2672a1d73404cb8f785abb76fd3176fa06fa1b23
|
[] |
no_license
|
gndaskalova/TeamShrubR
|
42f7ccbd4a40b94eca7c50139095758cdeb8a2a9
|
1f4f4e806168181c7c8e3cbd4506ba4d105a32dd
|
refs/heads/master
| 2021-01-21T05:20:10.802727
| 2017-03-03T12:08:19
| 2017-03-03T12:08:19
| 83,169,941
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,129
|
r
|
theme_shrub.R
|
#' A ggplot2 function
#'
#' This function allows you to quickly use a pre-customised theme, no angled x axis labels.
#' @param theme Makes the theme
#' @keywords theme
#' @export
#' @examples
#' ggplot() + geom_point(...) + theme_QHI()
theme_shrub <- function(){
theme_bw() +
theme(axis.text = element_text(size = 16),
axis.title = element_text(size = 20),
axis.line.x = element_line(color="black"), axis.line.y = element_line(color="black"),
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.y = element_blank(),
plot.margin = unit(c(1, 1, 1, 1), units = , "cm"),
plot.title = element_text(size=20, vjust=1, hjust=0.5),
legend.text = element_text(size=12, face="italic"),
legend.title = element_blank(),
legend.position = c(0.9, 0.9),
legend.key = element_blank(),
legend.background = element_rect(color = "black", fill = "transparent", size = 2, linetype="blank"))
}
|
b3018ff7ae69f16043b3980a3ba60502340830a6
|
76b42d15696eb2ca18412e99a064798a21c4b277
|
/06_clustering/kmeans.r
|
baf7f2a02f72e460b864ea434db6e2531cd2dfc3
|
[
"MIT"
] |
permissive
|
dipanwita2019/ml_projects
|
5afab1a2f46120a2b7a95378d11053562664e646
|
8791d5e3b79472cdd109feacf106934276b80b15
|
refs/heads/master
| 2023-08-06T18:57:53.861647
| 2021-09-17T04:42:39
| 2021-09-17T04:42:39
| 263,816,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 394
|
r
|
kmeans.r
|
# Reading in data
ds = read.csv('Mall_Customers.csv')
X = ds[4:5]
# Finding k
wcss = vector()
for (i in 1:10)
wcss[i] =sum(kmeans(X, i)$withinss)
plot(1:10, wcss, type = 'b', main=paste("Elbow method"), xlab = 'number clusters' )
# Clustering
kmeans = kmeans(X, 5)
y_kmeans = kmeans$cluster
# Visualising the clusters
plot(X, col = y_kmeans)
points(kmeans$center,col=1:2,pch=8,cex=1)
|
22cecd56c60734efc8a4675328062c995a4dccae
|
9f3eb8261116f0f6abb142cf89abed8dffb9519e
|
/time.R
|
ff708177f4e357ab1cfe4c06b2a6a5cc8ee36d21
|
[] |
no_license
|
kokatoo/R-examples
|
fb8ff172e68a018491dad3f3ee2fa00622c1ea27
|
9d6ae7fdf3c86f225c7a45d764ea353ddaec0a2f
|
refs/heads/master
| 2021-01-19T10:21:13.735226
| 2013-09-03T11:23:24
| 2013-09-03T11:23:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 96
|
r
|
time.R
|
##---- Time Interval
time.span <- as.numeric(difftime(max.time, min.time, units="secs")
#----
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.