blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
93f32fba51bcc4cacc1b7d0aa0d5aaf982c99401 | 25c33307cbe7267732a8c120cb8ed49788c19173 | /app/shiny/app_native.R | c3861b541547af9cbb946ccb0719d6c5b9324723 | [
"Apache-2.0"
] | permissive | alfanugraha/shinyLUMENS | 6cb58849b81e99efc080b47fae6c89e72d12e9a4 | 5ab1be19061d8b03569c48472876e6958ce77752 | refs/heads/master | 2021-01-17T06:55:25.087760 | 2016-09-20T09:47:43 | 2016-09-20T09:47:43 | 68,285,811 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,767 | r | app_native.R | # header
header <- dashboardHeader(title='LUMENS Dashboard')
# sidebar
sidebar <- dashboardSidebar(
sidebarMenu(
menuItem('Dashboard', tabName = 'dashboard', icon = icon('dashboard')),
menuItem('GDP', tabName = 'GDPWidgets', icon = icon('th'))
)
)
# body
body <- dashboardBody(
tags$head(
tags$link(ref='stylesheet', type='text/css', href='style.css')
),
tabItems(
tabItem(tabName='dashboard',
withTags({
h2(b(
span('L', style='color:red'),
span('U', style='color:rgb(146,208,80)'),
span('M', style='color:rgb(0,176,240)'),
span('E', style='color:rgb(194,214,155)'),
span('N', style='color:rgb(0,112,192)'),
span('S', style='color:rgb(79,98,40)')
))
}),
p(em('Land Use Planning for Multiple Environmental Services'))
),
tabItem(tabName='GDPWidgets',
h2('GDP tab content'),
box(
title='Penambahan Luasan (%)',
status='primary',
collapsible=TRUE,
sliderInput('sliderUndisturbedForest', 'Undisturbed Forest', 0, 100, 0),
sliderInput('sliderOilPalmMonoculture', 'Oil Palm Monoculture', 0, 100, 0),
sliderInput('sliderSettlement', 'Settlement', 0, 100, 0),
sliderInput('sliderWaterbody', 'Waterbody', 0, 100, 0)
),
box(
title=HTML('ΔGDP (%)'),
status='info',
collapsible=TRUE,
textOutput('textOutputPrecentageDeltaGDP')
),
box(
title=HTML('ΔGDP (Million Rupiahs)'),
status='info',
collapsible=TRUE,
textOutput('textOutputMillionDeltaGDP')
)
)
)
)
# setup ui shiny
# dashboard page
ui <- dashboardPage(
skin = 'black',
header,
sidebar,
body
)
#setup ui server
server <- function(input, output, session) {
output$textOutputPrecentageDeltaGDP <- renderText({
mean(c(input$sliderUndisturbedForest, input$sliderOilPalmMonoculture, input$sliderSettlement, input$sliderWaterbody))
})
output$textOutputMillionDeltaGDP <- renderText({
paste(
sum(input$sliderUndisturbedForest, input$sliderOilPalmMonoculture, input$sliderSettlement, input$sliderWaterbody)*100
)
})
#session$onSessionEnded(function(){
# stopApp()
#})
}
shinyApp(ui, server)
|
374e16dd496c39cc9a778000281d7ef75498d9c5 | ee724d06cf31f8d6cd8f28ed06a1127a2490ca17 | /2_data_preprocessing.R | d040c09da66e2a7dd315ecfaf03d6a5fbc0f94ac | [] | no_license | xuefliang/UCI_imbalanced_data | fde95a4327d6b25afc328f2ea2cef2322bd6d908 | da50623a97d2f91d24882cad76647085670dcfa0 | refs/heads/master | 2021-06-14T12:50:11.680171 | 2017-05-02T07:31:05 | 2017-05-02T07:31:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 997 | r | 2_data_preprocessing.R | ## 数据预处理
# 查看目标变量取值,返回"-50000" "+50000"
unique(train$income_level)
unique(test$income_level)
# 替换目标变量取值为0,1,ifelse(test,yes,no)
train[,income_level:= ifelse(income_level == "-50000",0,1)]
test[,income_level:= ifelse(income_level == "-50000",0,1)]
# 计算目标变量各个取值占比
round(prop.table(table(train$income_level))*100)
# 设置变量的对应类别为数值型和名义型
factcols <- c(2:5,7,8:16,20:29,31:38,40,41)
numcols <- setdiff(1:40,factcols)
# lapply的.SD的用法
train[,(factcols) := lapply(.SD, factor), .SDcols = factcols]
train[,(numcols) := lapply(.SD, as.numeric), .SDcols = numcols]
test[,(factcols) := lapply(.SD, factor), .SDcols = factcols]
test[,(numcols) := lapply(.SD, as.numeric), .SDcols = numcols]
cat_train <- train[,factcols, with=FALSE]
cat_test <- test[,factcols,with=FALSE]
num_train <- train[,numcols,with=FALSE]
num_test <- test[,numcols,with=FALSE]
# 移除以节省内存
rm(train,test)
|
a64ebcba56a783999142386a7e366702f052a194 | ba855e045647547b6f704c39fe80fece37311340 | /man/remap.q2t.Rd | 814bc60bce24e6674168898f971d2c3a59424a87 | [] | no_license | tobyjohnson/gtx | bb9f6a6ea9ec4ec720e16c988580ffc5fbf22573 | 9afa9597a51d0ff44536bc5c8eddd901ab3e867c | refs/heads/master | 2021-01-17T00:47:26.344059 | 2019-08-29T19:34:54 | 2019-08-29T19:34:54 | 29,961,962 | 20 | 12 | null | 2019-09-11T17:33:40 | 2015-01-28T10:18:52 | R | UTF-8 | R | false | false | 624 | rd | remap.q2t.Rd | \name{remap.q2t}
\alias{remap.q2t}
\title{Remap coordinates from BLAT query sequence to BLAT target sequence.}
\description{
For a single BLAT match of a query sequence against a target sequence,
a vector of nucleotide positions in the query sequence are remapped to
the corresponding nucleotide positions in the target sequence.
}
\usage{
remap.q2t(qpos, blatres)
}
\arguments{
\item{qpos}{a vector of 0-offset positions in the query sequence.}
\item{blatres}{a single match from BLAT.}
}
\value{
A vector of remapped positions in the target sequence.
}
\author{
Toby Johnson \email{Toby.x.Johnson@gsk.com}
}
|
6829feef94bf80c3cb236aa0fcd5324e1b69e8b7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Haplin/examples/genDataPreprocess.Rd.R | 5ad40cbb02727bb7663f5640f96165232b70438c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 833 | r | genDataPreprocess.Rd.R | library(Haplin)
### Name: genDataPreprocess
### Title: Pre-processing of the genetic data
### Aliases: genDataPreprocess
### ** Examples
# The argument 'overwrite' is set to TRUE!
# First, read the data:
examples.dir <- system.file( "extdata", package = "Haplin" )
example.file <- paste0( examples.dir, "/exmpl_data.ped" )
ped.data.read <- genDataRead( example.file, file.out = "exmpl_ped_data",
format = "ped", overwrite = TRUE )
ped.data.read
# Take only part of the data (if needed)
ped.data.part <- genDataGetPart( ped.data.read, design = "triad", markers = 10:12,
file.out = "exmpl_ped_data_part", overwrite = TRUE )
# Preprocess as "triad" data:
ped.data.preproc <- genDataPreprocess( ped.data.part, design = "triad",
file.out = "exmpl_data_preproc", overwrite = TRUE )
ped.data.preproc
|
afa758494f56be35aec4dd76cb7dc7954f501452 | cedf79a328d8a0fbb9869c234b0ce8735d602f95 | /man/randsample.Rd | 6ffefc310bba26d53b6dad41de1aa415cd1e68b7 | [
"BSD-3-Clause"
] | permissive | ChipsXu/MACSr | 930a06145524546816ab0f790db5f8867273b150 | 653d62633240df603c386dcfe358dfc8d3b01b87 | refs/heads/master | 2023-03-07T15:39:39.727168 | 2021-02-10T15:06:57 | 2021-02-10T15:06:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,846 | rd | randsample.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/randsample.R
\name{randsample}
\alias{randsample}
\title{randsample}
\usage{
randsample(
ifile,
outdir = ".",
outputfile = character(),
percentage = numeric(),
number = numeric(),
seed = -1L,
tsize = NULL,
format = c("AUTO", "BAM", "SAM", "BED", "ELAND", "ELANDMULTI", "ELANDEXPORT",
"BOWTIE", "BAMPE", "BEDPE"),
buffer_size = 100000L,
verbose = 2L,
log = TRUE
)
}
\arguments{
\item{ifile}{Alignment file. If multiple files are given as '-t A B
C', then they will all be read and combined. Note that pair-end
data is not supposed to work with this command. REQUIRED.}
\item{outdir}{The output directory.}
\item{outputfile}{Output bedGraph file name. If not specified, will
write to standard output. REQUIRED.}
\item{percentage}{Percentage of tags you want to keep. Input 80.0
for 80\%\%. This option can't be used at the same time with
-n/--num. REQUIRED}
\item{number}{Number of tags you want to keep. Input 8000000 or
8e+6 for 8 million. This option can't be used at the same time
with -p/--percent. Note that the number of tags in output is
approximate as the number specified here. REQUIRED}
\item{seed}{Set the random seed while down sampling data. Must be a
non-negative integer in order to be effective. DEFAULT: not set}
\item{tsize}{Tag size. This will override the auto detected tag
size. DEFAULT: Not set}
\item{format}{Format of tag file, \"AUTO\", \"BED\" or \"ELAND\" or
\"ELANDMULTI\" or \"ELANDEXPORT\" or \"SAM\" or \"BAM\" or
\"BOWTIE\" or \"BAMPE\" or \"BEDPE\". The default AUTO option
will \%(prog)s decide which format the file is. Please check the
definition in README file if you choose
ELAND/ELANDMULTI/ELANDEXPORT/SAM/BAM/BOWTIE or
BAMPE/BEDPE. DEFAULT: \"AUTO\""}
\item{buffer_size}{Buffer size for incrementally increasing
internal array size to store reads alignment information. In
most cases, you don't have to change this parameter. However,
if there are large number of chromosomes/contigs/scaffolds in
your alignment, it's recommended to specify a smaller buffer
size in order to decrease memory usage (but it will take longer
time to read alignment files). Minimum memory requested for
reading an alignment file is about # of CHROMOSOME *
BUFFER_SIZE * 8 Bytes. DEFAULT: 100000}
\item{verbose}{Set verbose level. 0: only show critical message, 1:
show additional warning message, 2: show process information,
3: show debug messages. If you want to know where are the
duplicate reads, use 3. DEFAULT:2}
\item{log}{Whether to capture logs.}
}
\value{
\code{macsList} object.
}
\description{
Randomly sample number/percentage of total reads.
}
\examples{
eh <- ExperimentHub::ExperimentHub()
CHIP <- eh[["EH4558"]]
randsample(CHIP, number = 1000, outdir = tempdir(), outputfile = "randsample.bed")
}
|
ff4bcda3953a2a731d8305a51dfe2c194bba2094 | 454c1254be5ec2d6f7d3a9d864fa603a9fd95a74 | /R/Interactive Update 51517.R | 0456051140befc033fe8ed454de20aa6ab1baeeb | [] | no_license | CRKOMassSpecComputing/HomoSeriesMatcher | aae13d201d34eca3ef8a0491de2e4fdac85e7c46 | 4e89a6615eec300ebd835a808035ea3ad11cde9f | refs/heads/master | 2020-03-18T13:06:40.777168 | 2018-05-24T21:24:44 | 2018-05-24T21:24:44 | 134,761,200 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,199 | r | Interactive Update 51517.R | # Interactive plot for homol series ######################################################
#
# Christopher Ruybal and Karl Oetjen
# Created 5/17/2016
# Modified 5/17/2016
##########################################################################################
# Uses outputs from Martin Loos' "nontarget" and "nontargetData" packages found on GitHub
plot_interact <-
function (homol)
{
dat1 <- data.frame(mz = homol[[1]][,1],
RT = homol[[1]][,3],
PeakID = homol[[1]][,4],
GroupID = homol[[1]][,5])
I <- which(dat1$GroupID == 0)
dat2 <- data.frame(mz = homol[[1]][I,1],
RT = homol[[1]][I,3], col = 0)
list2 <- matrix(data=NA,nrow=strtoi(max(summary(homol[[5]])[,1])),ncol=length(homol[[5]]))
for (ll in 1:length(homol[[5]])) {
for (mm in 1:length(homol[[5]][[ll]])) {
list2[mm,ll] <- homol[[5]][[ll]][[mm]][1]
}
}
this <- round(homol[[3]][, 3], digits = 2)
that <- levels(as.factor(this))
colo <- rainbow(length(that))
df <- NULL
for (p in 1:length(homol[[5]])) {
temp_df <- data.frame(x=dat1$RT[list2[,p]], y=dat1$mz[list2[,p]], col = p, MZ = this[p],PeakID = paste("PeadkID: " ,dat1$PeakID[list2[,p]]))
df <- rbind(df,temp_df)
}
names(df)[1] = "RT"
names(df)[2] = "mz"
#plot_ly(data = df, x = RT, y = mz, mode = "markers+lines",
# color = factor(col,labels = this))
#plot_ly(data = dat2, x = RT, y = mz, mode = "markers", marker=list(color="grey" , size=5 , opacity=0.5))
f <- list(
family = "sans-serif",
size = 12,
color = "#000"
)
l <- list(
font = f,
bgcolor = NA,
bordercolor = NA,
borderwidth = 2
)
yy <- list(title = "m/z")
idx = !is.na(df$RT)
df2 <- df[idx,]
# Get unique MZ
mzU <- unique(df2$MZ)
cols <- grDevices::rainbow(length(unique(df$MZ)))
p <- plot_ly()
# for each unique MZ
for (ii in 1:length(mzU)){
p <- add_trace(p,data = filter(df2, df2$MZ == mzU[ii]),
type = "scatter",
x = ~RT,
y = ~mz,
mode = "markers",
color = ~factor(MZ,as.character(mzU), mzU),
colors = grDevices::rainbow(length(unique(df$MZ))),
legendgroup = as.character(mzU[ii]),
name = as.character(mzU[ii]),
hoverinfo = "none",
showlegend = TRUE)
p <- add_trace(p,data = filter(df2, MZ == mzU[ii]),
type = "scatter",
x = ~RT,
y = ~mz,
text = df2$PeakID[which(df2$MZ== mzU[ii])],
split = ~col,
color = ~factor(MZ,as.character(mzU), mzU),
colors = grDevices::rainbow(length(unique(df$MZ))),
mode = "lines",
legendgroup = as.character(mzU[ii]),
name = as.character(mzU[ii]),
hoverinfo = "x+y+text",
showlegend = FALSE)
}
p <- add_trace(p,data = dat2,
type = "scatter",
x = ~RT,
y = ~mz,
mode = "markers",
marker=list(color="grey" , size=5 , opacity=0.5),
name="No Series",
hoverinfo = "x+y",
showlegend=TRUE)
p
# Set the font and size for axis labels
f1 <- list(
family = "Arial, sans-serif",
size = 18,
color = "black"
)
# Set the font and size for the tick text
f2 <- list(
family = "Old Standard TT, serif",
size = 14,
color = "black"
)
# Y axis setup
axy <- list(
title = "m/z",
titlefont = f1,
showticklabels = TRUE,
tickangle = 0,
tickfont = f2,
autotick = TRUE,
ticks = "inside",
tick0 = NA,
dtick = NA,
ticklen = 5,
tickwidth = 2,
showline = TRUE,
mirror = "ticks",
gridcolor = toRGB("white"),
gridwidth = 0.5,
linecolor = toRGB("black"),
linewidth = 1
)
# X axis setup
axx <- list(
title = "RT (minutes)",
titlefont = f1,
showticklabels = TRUE,
tickangle = 0,
tickfont = f2,
autotick = TRUE,
ticks = "inside",
tick0 = NA,
dtick = NA,
ticklen = 5,
tickwidth = 2,
showline = TRUE,
mirror = "ticks",
gridcolor = toRGB("white"),
gridwidth = 0.5,
linecolor = toRGB("black"),
linewidth = 1
)
layout(p,xaxis = axx, yaxis = axy)
} |
7fdc985b9a67fbafd3b744c626d5c1aae0223bd4 | ba3411ee93d26459ceb961305169026b01d53575 | /R/.svn/text-base/test1.r.svn-base | 89527175dd559092bfa090cb62381273018de06d | [] | no_license | Will66/testsigou | 72ea6c3395188d28267be65cf52b1a6cdeffd9d3 | 1fc2ea756b1bae84a11c1f1a4ecf740bc268b91e | refs/heads/master | 2021-01-23T12:38:45.278413 | 2011-05-25T11:37:50 | 2011-05-25T11:37:50 | 1,798,529 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,211 | test1.r.svn-base | # plotting a simple ring graph, all default parameters, except the layout
# g <- graph.ring(10)
# g$layout <- layout.circle
# plot(g)
# tkplot(g)
# rglplot(g)
# plotting a random graph, set the parameters in the command arguments
g <- barabasi.game(100)
plot(g, layout=layout.fruchterman.reingold, vertex.size=4,
vertex.label.dist=0.5, vertex.color="red", edge.arrow.size=0.5)
# plot a random graph, different color for each component
g <- erdos.renyi.game(100, 1/100)
comps <- edge.betweenness.community(g)$membership
colbar <- rainbow(max(comps)+1)
V(g)$color <- colbar[comps+1]
plot(g, layout=layout.fruchterman.reingold, vertex.size=5, vertex.label=NA)
#
#
# # plot communities in a graph
g <- graph.full(5) %du% graph.full(5) %du% graph.full(5)
g <- add.edges(g, c(0,5, 0,10, 5,10))
com <- spinglass.community(g, spins=5)
V(g)$color <- com$membership+1
g <- set.graph.attribute(g, "layout", layout.kamada.kawai(g))
plot(g, vertex.label.dist=1.5)
# # draw a bunch of trees, fix layout
igraph.par("plot.layout", layout.reingold.tilford)
plot(graph.tree(20, 2))
# plot(graph.tree(50, 3), vertex.size=3, vertex.label=NA)
# plot(graph.tree(50, 2, mode="undirected"), vertex.size=10, vertex.color="green")
| |
cc96d0b2d73366d67cd55f229da4d4816df105be | 9c5a7859c5d73cbadf6582ca7262e05d91faf145 | /Labs/Lab 1/lab1_R_147_su19.R | 739eeafc86af399f9fbd47360a7d78c13342b520 | [] | no_license | wesleywchang/STAT-147 | 077a961e65151e330dcd7c31789305a8f2c67a0a | 30379aac9fc1bed16ade940127fe1c9866e44b9d | refs/heads/master | 2022-11-30T05:43:39.789606 | 2020-08-15T06:33:30 | 2020-08-15T06:33:30 | 287,691,034 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,909 | r | lab1_R_147_su19.R | # Statistics 147 Lab #1 Summer 2020
# Wesley Chang
#
x = 21.56 # Assigns a value to the variable x
x # Prints the value of x
class(x) # Prints the class of x
is.integer(x) # is x an integer?
x1 <- as.integer(x)
x1 ` # Print the value of x1
class(x1) # Print the class of x1
my_data = c(4,6,2,7,9,5) # read in the data
my_data # Print the data
class(my_data) # Print the class name of the data
my_data_sorted = sort(my_data) # Create new variable containing the sorted data
my_data_sorted # Print the sorted data
my_colors = c("brown", "red", "yellow", "blue", "orange", "green") # enter the data
my_colors # Print the data
class(my_colors) # Print the class name of the data
combined1 = c(my_data,my_colors) # Combine the two data sets
combined1 # Print the new data set
x = c( 1,2,3,4,5) # enter values of x
x # print x
y = c(2,4,6,8,10) # enter values of y
y # print values of y
x1 = 3.5*x # create a new variable x1 = 3.5x
x1 # print x1
# Create new variable sum1 = x + y
sum1 = x + y
sum1 # print sum1
# Create new variable diff1 = x - y
diff1 = x - y
diff1 # print diff1
# Create new variable prod1 = x*y
prod1 = x*y
prod1 # print prod1
# Create new variable div1 = x/y
div1 = x/y
div1 # print div1
# Create a new variable s = 2x+ 3sqrt(y)
s = 2*x + 3*sqrt(y) # fill in your code
s # print the values of s
cartest = read.table(file = "c:/Users/wesle/iCloudDrive/Summer 2020 (UCR)/STAT 147 (Session A)/Labs/Lab 1/cartest1.dat", header = TRUE)
cartest
attach(cartest)
names(cartest)
cartest$Car
cartest$BrandA
cartest$BrandB
sum2 = BrandA + BrandB
sum2
setwd("c:/Users/wesle/iCloudDrive/Summer 2020 (UCR)/STAT 147 (Session A)/Labs/Lab 1")
dir()
gas_data = read.csv("gas1.csv", header = TRUE)
gas_data
names(gas_data)
attach(gas_data)
premium
regular
|
d3e284c1ce8508b0d4405d163eba98faa801ee32 | 32d7ccac04d2b74099f354027e29dcc185cc2c62 | /RU_code/error_test2.R | 29b38c138bb89997ea495bad91484650f62146e2 | [] | no_license | jhhughes256/optinterval | e6f6e343a00614957a026ece77a926c91cc92444 | 1d514b5bce8692a903552cb8a28a61cda14b5d5e | refs/heads/master | 2022-01-21T03:15:11.941841 | 2019-08-01T16:59:40 | 2019-08-01T16:59:40 | 79,285,564 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,255 | r | error_test2.R | # Some notes on achieving constant numerical integration error
time <- seq(0,120,by=10)
comp1abs <- function(time)
{
Dose <- 100
V <- 2
ka <- 0.05
kel <- 0.02
Cp <- (Dose/V)*ka/(ka-kel)*(exp(-kel*time)-exp(-ka*time))
}
#Calculate a concentration - time course (representing observed data)
testdf <- data.frame(time,Cp=comp1abs(time))
plot(Cp~time, data=testdf)
#Calculate the first derivative (slope) - deltay/deltax
testdf$firstd <- c(0,diff(testdf$Cp))/c(0,diff(testdf$time))
testdf$firstd[1] <- 0
#Calculate the second derivative (slope of slope)
testdf$secondd <- c(0,diff(testdf$firstd))/c(0,diff(testdf$time))
testdf$secondd[1] <- 0
#As direction of gradient isn't important, use abs()
plot(abs(secondd)~time, data=testdf)
#delta t to get constant integration error
k <- 0.002
testdf$deltat <- (1/(k*abs(testdf$secondd)))^(1/3)
testdf
# deltat means that for for the preceding time-interval and this error
# you could have samples every deltat min?
# needs to be framed as: given x pk samples what interval apart should they be?
# Some sort of iterative process that minimises the intergration error?
# Need spline function to estimate derivatives at times based on observed data?
|
f24735bc408976f22123a226e488ad8215423471 | 8983307bae1e6ce5be84c70a14ecdea663907612 | /disability_weights.R | 6bbd3bc1b81fae8c98796e90cb41c4deb135c130 | [
"MIT"
] | permissive | info478-s20/dw-analysis | c640840549bc9f9d74786b4057597a613c3018de | bd94cc512072da9a6c2275ee28dbdfd0997b8997 | refs/heads/master | 2022-04-15T01:58:04.032667 | 2020-04-16T16:48:16 | 2020-04-16T16:48:16 | 255,634,883 | 0 | 0 | MIT | 2020-04-14T14:37:42 | 2020-04-14T14:37:41 | null | UTF-8 | R | false | false | 2,743 | r | disability_weights.R | # Load Packages
library(readxl)
library(tidyr)
library(dplyr)
library(ggplot2)
library(ggrepel)
# Load and reshape responses
responses <- read_xlsx("responses.xlsx")
responses_long <- responses %>%
gather(description, severity, -Timestamp)
# Create a short description (sorted by the mean severity for plotting)
responses_long$short_description <- reorder(
factor(substr(responses_long$description, 1, 100)),
responses_long$severity,
mean
)
# Look at histograms of the disability weight for each description
ggplot(responses_long) +
geom_histogram(mapping = aes(x = severity)) +
facet_wrap(~short_description)
ggsave("charts/hist_cause.png", width=10, height = 10)
# Create a violin plot to show distribution of responses (by cause)
ggplot(responses_long, aes(short_description, severity)) +
geom_violin(draw_quantiles = c(0.5)) +
labs(x="Description", y="Disability Weight",
title="Distribution of Disability Weights") +
coord_flip()
ggsave("charts/violin_cause.png", width=10, height = 10)
# Create a timestamp (sorted by the mean severity for plotting)
responses_long$person <- reorder(
factor(as.character(responses_long$Timestamp)),
responses_long$severity,
median
)
# Create a violin plot to show distribution of responses (by person)
ggplot(responses_long, aes(person, severity)) +
geom_violin(draw_quantiles = c(0.5)) +
labs(x="Person", y="Disability Weight",
title="Distribution of Disability Weights") +
coord_flip()
ggsave("charts/violin_person.png", width=10, height = 10)
# Load and format GBD data
gbd_file <- "IHME_GBD_2016_DISABILITY_WEIGHTS_3/IHME_GBD_2016_DISABILITY_WEIGHTS_Y2017M09D14.CSV"
ihme_weights <- read.csv(gbd_file, stringsAsFactors = F) %>%
rename(description = Health.state.lay.description) %>%
distinct(description, .keep_all=T) %>%
mutate(short_description = substr(description, 1, 100))
# Group by description
grouped <- responses_long %>% group_by(description) %>%
summarize(mean_score = mean(severity, na.rm=T))
# Join data
joined <- grouped %>%
left_join(ihme_weights, by="description")
# Create labeled scatter
ggplot(joined) +
geom_label_repel(aes(x=mean_score/10, y=disability.weight, label=Health.state.name)) +
labs(x="In Class", y="IHME", title="In Class v.s. IHME Weights") +
xlim(0, 1) +
ylim(0, 1) +
geom_abline(slope=1, intercept=0, alpha = .3)
ggsave("charts/comparison_labeled.png", width=10, height = 10)
# Create scatter
ggplot(joined) +
geom_point(aes(x=mean_score/10, y=disability.weight)) +
labs(x="In Class", y="IHME", title="In Class v.s. IHME Weights") +
xlim(0, 1) +
ylim(0, 1) +
geom_abline(slope=1, intercept=0, alpha = .3)
ggsave("charts/comparison.png", width=10, height = 10)
|
cd0bdfd35c108ee77df13a2db668f5ff3418606a | fadbe575bae0403f32a3f96c0b74b1112863ab44 | /list.R | 6c3a6ec106b2620d74588fc677beea6e0e5e70cd | [] | no_license | melavo/r-project-read-write-sort-csv | 6978984bb43d740dce127c5562678c7c620799c9 | f094490d6feebf1082b30e33813ce868b118b910 | refs/heads/master | 2020-07-06T08:44:04.805459 | 2016-11-17T17:52:26 | 2016-11-17T17:52:26 | 74,051,074 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 92 | r | list.R | # List: x2,x3,x4.
powerful <- function(x) {
return(list(x2=x*x, x3=x*x*x, x4=x*x*x*x));
} |
a9318428b23ad7ffa30dc98e27b42379a0c838c2 | d7ff71e8ffb07419aad458fb2114a752c5bf562c | /tests/testthat/indention_operators/not_first_trigger-out.R | c5569c8ce244cd81580a87a592500677cdf38ab7 | [
"MIT"
] | permissive | r-lib/styler | 50dcfe2a0039bae686518959d14fa2d8a3c2a50b | ca400ad869c6bc69aacb2f18ec0ffae8a195f811 | refs/heads/main | 2023-08-24T20:27:37.511727 | 2023-08-22T13:27:51 | 2023-08-22T13:27:51 | 81,366,413 | 634 | 79 | NOASSERTION | 2023-09-11T08:24:43 | 2017-02-08T19:16:37 | R | UTF-8 | R | false | false | 99 | r | not_first_trigger-out.R | 1 + (
3
) %>%
j()
a <- c(
x, y,
z
) %>%
k()
a + (
c
) + (
c(
2
)
) %>%
j()
|
06c727ebe7f225b6b0ddbe0546a8e7ba4b3cec06 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/fastcox/examples/FHT.Rd.R | 32077deb4afb1cc3fed6902555378cf7da3a07e3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 159 | r | FHT.Rd.R | library(fastcox)
### Name: FHT
### Title: FHT data introduced in Simon et al. (2011).
### Aliases: FHT
### Keywords: datasets
### ** Examples
data(FHT)
|
db15c8c0b9187476044c4aadfcc0f707a21322e8 | a3a59ebe1a41f1bc23d641e0f26673c684ecf72b | /R/createOmics_JoinPhenoAssay.R | cf7685ecb2b37d238fb7fa0bfcc3f3283f388f01 | [] | no_license | gabrielodom/pathwayPCA | 78c801aaf51c6f16eaac1e2bbbd7c7bb743492c8 | 552e1f378040e6080aa3ac13a7f8a302e579532d | refs/heads/master | 2023-07-08T14:17:13.486479 | 2023-06-28T17:29:22 | 2023-06-28T17:29:22 | 107,602,989 | 12 | 2 | null | 2019-03-28T19:43:40 | 2017-10-19T21:57:30 | R | UTF-8 | R | false | false | 2,686 | r | createOmics_JoinPhenoAssay.R | #' Merge Phenotype and Assay Data by First Column (Sample ID)
#'
#' @description Match the records from the phenotype data to the values in the
#' assay data by sample ID. Return rows from each data frame with matches in
#' both data frames. The sample ID must be the first column in both data
#' frames.
#'
#' @param pheno_df Phenotype data frame with the sample IDs in the first column
#' @param assay_df Assay data frame with the sample IDs in the first column
#'
#' @return A list of three elements:
#' \itemize{
#' \item{\code{assay} : }{A data frame with the rows from \code{assay_df}
#' which are contained in \code{pheno_df}, ordered by their position in
#' \code{pheno_df}.}
#' \item{\code{response} : }{A data frame with the rows from
#' \code{pheno_df} which are contained in \code{assay_df}.}
#' \item{\code{sampleID} : }{A vector of the sample IDs shared by both data
#' frames, ordered by their position in \code{pheno_df}.}
#' }
#'
#' @details Don't use this function. This is simply a wrapper around the
#' \code{\link{merge}} function with extra checks for the class of the ID
#' column. If you want to merge your two data frames by sample ID, you should
#' use the \code{inner_join} function from the \code{dplyr} package instead.
#' It's easier. See \url{https://dplyr.tidyverse.org/reference/join.html}.
#'
#' @keywords internal
#'
#'
#' @examples
#' # DO NOT CALL THIS FUNCTIONS DIRECTLY. USE CreateOmics() INSTEAD.
#'
#' \dontrun{
#' data("colonSurv_df")
#' JoinPhenoAssay(
#' pheno_df = colonSurv_df[, 1:3],
#' assay_df = colonSurv_df[, -(2:3)]
#' )
#' }
#'
JoinPhenoAssay <- function(pheno_df, assay_df){
phSamp_char <- pheno_df[, 1, drop = TRUE]
assSamp_char <- assay_df[, 1, drop = TRUE]
### Check Equality ###
keepIDs <- intersect(assSamp_char, phSamp_char)
if(identical(assSamp_char, phSamp_char)){
out_ls <- list(
assay = assay_df[, -1],
response = pheno_df[, -1, drop = FALSE],
sampleID = phSamp_char
)
} else if(length(keepIDs) > 0){
message(
sprintf("There are %i samples shared by the assay and phenotype data.",
length(keepIDs))
)
out_df <- merge(pheno_df, assay_df, 1)
outClass_char <- union(
class(pheno_df), class(assay_df)
)
class(out_df) <- outClass_char
out_ls <- list(
assay = out_df[, -seq_len(ncol(pheno_df))],
response = out_df[, 2:ncol(pheno_df), drop = FALSE],
sampleID = out_df[, 1, drop = TRUE]
)
} else {
stop("There are no samples with the same sample IDs in the assay and phenotype data.")
}
### Return ###
out_ls
}
|
c8acee21a1dbbf66c71e52aaa2c4f0465a9501eb | c61e57f44b4a74b5aa69331ab04445d10e6454af | /R/aaa.R | 070561f969592e2ab5691f159791794f5137a743 | [] | no_license | MhAmine/gitgadget | 00352b6eb6d44bf2562d1bef4ea929914ed2f3b5 | 5bda8914e92c5c32222444daa7f3220fc18ee4c1 | refs/heads/master | 2020-03-09T03:03:35.942203 | 2018-01-09T17:29:22 | 2018-01-09T17:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 517 | r | aaa.R | # to avoid 'no visible binding for global variable' NOTE
globalVariables(c(".", "un", "directory", "base64_enc", "server"))
#' gitgadget
#'
#' @name gitgadget
#' @docType package
#' @import shiny miniUI curl dplyr
#' @importFrom jsonlite fromJSON
#' @importFrom rstudioapi isAvailable getActiveProject selectFile selectDirectory openProject
#' @importFrom markdown markdownToHTML
#' @importFrom utils read.csv
#' @importFrom stats na.omit
#' @importFrom methods is
#' @importFrom utils packageVersion browseURL
NULL
|
27242ffa4bf351a8134f85dee00f446433df1771 | 49928aacb833073fe225ae2cff02f32423e4da34 | /R/summary.nomclust.R | ac784dfbfe6237aef7aa33187d93d045b384973a | [] | no_license | cran/nomclust | a30d00e74894e06fe4a063dadf696143a53aac48 | 63d7ad4664c5fe23ebb8bf1b961ae421b0329229 | refs/heads/master | 2023-08-31T05:09:52.306488 | 2023-08-18T10:12:38 | 2023-08-18T11:30:56 | 36,813,240 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 947 | r | summary.nomclust.R | summary.nomclust <- function(object, ...){
if (names(object)[2] == "eval"){ # nomclust object and nomprox with data
cat("\nSizes of the created clusters:\n")
for (i in 1:length(object$mem)) {
cat("\n", i+1," clusters:", sep = "")
print(table(object[[1]][i]))
}
cat("\nOptimal number of clusters based on the evaluation criteria:\n")
print(as.data.frame(object$opt))
cat("\nAgglomerative coefficient:", object$dend$ac, "\n")
}
if (names(object)[2] == "opt"){ # evalclust
cat("\nOptimal number of clusters based on the evaluation criteria:\n")
print(as.data.frame(object$opt))
}
if (names(object)[2] == "dend"){ # nomprox without data
cat("\nSizes of the created clusters:\n")
for (i in 1:length(object$mem)) {
cat("\n", i+1," clusters:", sep = "")
print(table(object[[1]][i]))
}
cat("\nAgglomerative coefficient:", object$dend$ac, "\n")
}
}
|
77a7e5566422622cd57ee99148f34c6f13fe7d11 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/multicon/examples/Profile.ICC.Rd.R | baff4da1ff731038826d9041031b355a51a7c416 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 477 | r | Profile.ICC.Rd.R | library(multicon)
### Name: Profile.ICC
### Title: Profile Intra-class Correlation
### Aliases: Profile.ICC
### Keywords: intraclass correlation profile similarity agreement
### ** Examples
data(acq1)
data(acq2)
#lets look at the Profile ICC between two aquaintance ratings of subjects' personality
names(acq1)
names(acq2)
Profile.ICC(acq1, acq2)
#We can get the descriptives for these using describe() from the 'psych' package
describe(Profile.ICC(acq1, acq2))
|
c9962112dc5d3fd0296bcb30122838f6fd457e4e | 706b9220accbc885c06b2f2448f0941c5fae3978 | /man/sdm_asc_from_raster.Rd | 7576a766a4855162c0c1d9f44f4413030364f149 | [] | no_license | cran/raincpc | 959882e3121bff0bae25779fa3268b67ab442eb2 | 10b06c78ac7485f4fe9fe894e7d6fdc65b9c91aa | refs/heads/master | 2021-05-05T05:15:57.199876 | 2020-01-31T05:10:03 | 2020-01-31T05:10:03 | 17,698,978 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 594 | rd | sdm_asc_from_raster.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sdm_asc_from_raster.R
\name{sdm_asc_from_raster}
\alias{sdm_asc_from_raster}
\title{Raster conversion functions}
\usage{
sdm_asc_from_raster(x)
}
\arguments{
\item{x}{is an object of class 'RasterLayer'}
}
\value{
Returns an object of class requested.
}
\description{
Raster conversion functions
}
\details{
\code{sdm_asc_from_raster} is an adaptation of
\code{asc.from.raster} from SDMTools; extracts data from objects of
class 'RasterLayer' (raster package) into an object of class 'asc'.
}
\author{
Gopi Goteti
}
|
32462b8f9b956852e623e2992887eddbf754e4df | 320d311d89bfa6eb6b60effaa49194337554c097 | /2-communication/1-rscripts/archive/explore_transit_stops.R | f6b9032347c7792b51fa5c7a2fffead27d0bd72d | [
"MIT"
] | permissive | tiernanmartin/home-and-hope | a75c4354abca7554a278317989edac71228c3ef5 | 4c26217ad5366e8b673a536fd2670074b25bbcf9 | refs/heads/master | 2021-01-23T17:39:22.577037 | 2018-09-24T22:44:53 | 2018-09-25T15:27:33 | 102,772,219 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,138 | r | explore_transit_stops.R | library(sf)
library(osmdata)
library(tidyverse)
library(miscgis)
library(mapview)
library(visdat)
library(snakecase)
library(skimr)
q <- opq(bbox = "King County, Washington") %>%
add_osm_feature(key = "public_transport", value = "",value_exact = FALSE)
transit_pts <- q %>%
osmdata_sf() %>%
pluck("osm_points") %>%
rename_all(to_screaming_snake_case)
transit_pts %>%
st_drop_geometry() %>%
vis_dat()
transit_pts %>%
st_drop_geometry() %>%
select(OSM_ID, NAME,BUS, TRAIN, TRAM, TROLLEYBUS, FERRY, PUBLIC_TRANSPORT, SOURCE) %>%
skim
transit_pts %>%
st_drop_geometry() %>%
count(PUBLIC_TRANSPORT, sort = TRUE)
trns_pts <- transmute(transit_pts,
OSM_ID = as.character(OSM_ID))
ready <-
transit_pts %>%
st_drop_geometry() %>%
select(OSM_ID ,NAME,BUS, TRAIN, TRAM, TROLLEYBUS, FERRY, PUBLIC_TRANSPORT, SOURCE) %>%
gather(TRANSIT_TYPE, VALUE, -OSM_ID, -NAME,-PUBLIC_TRANSPORT, -SOURCE) %>%
filter(!is.na(VALUE)) %>%
mutate_if(is.factor, as.character) %>%
left_join(trns_pts, by = "OSM_ID") %>%
st_sf
mapview(ready, zcol = "TRANSIT_TYPE", legend = TRUE)
|
059836ebd6ef8f3e1e45cbfc28924d633f49a622 | 7adbcf6de8013c848f3b39975a945a7de8a556c5 | /R/io.R | cab4f3dfbb243b92a85d40f72e6f087e857b1fdf | [] | no_license | zietzm/argyle | 7401f3c042c3a95c98252786f49498d8d9301359 | e665c54c1edcebe534d9d661163bb7cb413bc5fe | refs/heads/master | 2022-04-17T15:39:48.513880 | 2020-04-15T00:07:30 | 2020-04-15T00:07:30 | 255,756,666 | 0 | 0 | null | 2020-04-14T23:59:56 | 2020-04-14T23:59:55 | null | UTF-8 | R | false | false | 25,888 | r | io.R | ## io.R
## functions for import and export of data from 'genotypes' objects
#' Read genotype calls and hybridization from Illumina BeadStudio output.
#'
#' @param prefix filename prefix, without working directory: the \code{*} in \code{*_FinalReport.zip}
#' @param snps dataframe containing marker map for this array, in PLINK's \code{*.bim} format
#' (chromosome, marker name, cM position, bp position); rownames should be set to marker names,
#' and those names should match those in the BeadStudio output.
#' @param in.path directory in which to search for input files
#' @param keep.intensity should hybridization intensities be kept in addition to genotype calls?
#' @param colmap named character vector mapping column names in \code{*FinalReport} to required columns
#' for \code{argyle} (see Details)
#' @param verify logical; if \code{TRUE}, check that \code{FinalReport} file is of expected size
#' @param checksum logical; if \code{TRUE}, generate an md5 checksum for the result
#' @param ... ignored
#'
#' @return A \code{genotypes} object with genotype calls, marker map, sample metadata and (as requested)
#' intensity data.
#'
#' @details This function initializes a \code{genotypes} object from Illumina BeadStudio output. (For an
#' example of the format, see the files in this package's \code{data/} directory.) The two relevant
#' files are \code{Sample_Map.zip} and \code{*FinalReport.zip}, which contain the sample manifest
#' and genotype/intensity data, respectively. On platforms with \code{unzip} available on the
#' command line, files will be unzipped on the fly. Otherwise \code{FinalReport.zip} (but not
#' \code{Sample_Map.zip}) must be unzipped first. This is due to the use of \code{data.table} to
#' handle the usually very large genotypes file.
#'
#' Use the \code{colmap} vector to assign column names in the \code{*FinalReport} file to the required
#' columns for argyle. The required columns are \code{iid} (individual ID), \code{marker} (SNP/marker name),
#' \code{call1} (allele 1, in the same strand as in the marker map), \code{call2} (allele 2, in the
#' same strand as in the marker map), \code{x} (hybridization x-intensity) and \code{y} (hybridization
#' y-intensity). The default column mapping is:
#' \itemize{
#' \item \code{SNP Name} = \code{marker}
#' \item \code{Sample ID} = \code{iid}
#' \item \code{Allele1 - Forward} = \code{call1}
#' \item \code{Allele2 - Forward} = \code{call2}
#' \item \code{X} = \code{x}
#' \item \code{Y} = \code{y}
#' }
#' Note that \code{colmap} must be a named character vector, with old column headers in the \code{names()}
#' and new column names in the vector itself: eg. write \code{colmap = setNames( new, old )}. An error
#' will be thrown if the column mapping does not provide enough information to read the input properly.
#' Particular attention should be paid to the encoding of the alleles in the \code{snps} object, which
#' will be platform-specific. For users of the Mouse Universal Genotyping Array series from Neogen Inc,
#' alleles \code{A1,A2} in \code{snps} will be on the forward strand, so columns \code{Allele * - Forward}
#' (not \code{Allele * - Top} or \code{Allele * - AB}) are the ones to use.
#'
#' The behavior of this function with respect to missing data in the genotypes versus the contents
#' of \code{snps} is asymmetric. Markers in \code{snps} which are absent in the input files will
#' be present in the output, but with missing calls and intensities. Markers in the input files
#' which are missing from \code{snps} will simply be dropped. If that occurs, check that the marker
#' names in \code{snps} match exactly those in the input file.
#'
#' Provenance of the resulting object can be traced by checking \code{attr(,"source")}. For the paranoid,
#' a timestamp and checksum are provided in \code{attr(,"timestamp")} and \code{attr(,"md5")}.
#'
#' @references
#' Inspiration from Dan Gatti's DOQTL package: <https://github.com/dmgatti/DOQTL/blob/master/R/extract.raw.data.R>
#'
#' @export
read.beadstudio <- function(prefix, snps, in.path = ".", keep.intensity = TRUE, colmap = NULL, verify = TRUE, checksum = TRUE, ...) {
## stop here if marker map is not well-formed
if (!.is.valid.map(snps)) {
if (!all(rownames(snps) == snps$marker))
stop(paste("Marker manifest is not well-formed. It should follow the format of a PLINK",
"*.bim file: a dataframe with columns <chr,marker,cM,pos> with rownames",
"same as 'marker' column. If genetic positions are unknown, set them to zero."))
}
## read files from Illumina BeadStudio
data <- .read.illumina.raw(prefix, in.path, colmap, check.dims = verify)
rownames(data$samples) <- gsub(" ","", rownames(data$samples))
## convert to matrices using data.table's optimized code
message("Constructing genotype matrix...")
calls <- .raw.to.matrix(data$intens, snps, keep.map = TRUE, value.col = "call")
if (keep.intensity) {
message("Constructing intensity matrices...")
x <- .raw.to.matrix(data$intens, snps, value.col = "x", keep.map = FALSE)
y <- .raw.to.matrix(data$intens, snps, value.col = "y", keep.map = FALSE)
## verify that shapes match
all(dim(calls) == dim(x), dim(calls) == dim(y))
## verify that sample names are in sync
all(colnames(calls) == colnames(x), colnames(calls) == colnames(y))
## verify that marker names are in sync
all(rownames(calls) == rownames(x), rownames(calls) == rownames(y))
}
message(paste("\t", nrow(calls), "sites x", ncol(calls), "samples"))
samples.kept <- colnames(calls)
fam <- make.fam(samples.kept, sex = data$samples[ samples.kept,"Gender" ])
## construct the return 'genotypes' object
if (keep.intensity) {
calls <- genotypes(.copy.matrix.noattr(calls),
map = attr(calls, "map"), ped = fam,
alleles = "native",
intensity = list(x = x, y = y), normalized = FALSE,
check = FALSE)
}
else {
calls <- genotypes(.copy.matrix.noattr(calls),
map = attr(calls, "map"), ped = fam,
alleles = "native",
check = FALSE)
}
## make a checksum, then record file source and timestamp (which would mess up checksum comparisons)
if (checksum)
attr(calls, "md5") <- digest::digest(calls, algo = "md5")
attr(calls, "source") <- normalizePath(file.path(in.path))
attr(calls, "timestamp") <- Sys.time()
## check that all pieces of result have matching dimensions, names, ...
if (!validate.genotypes(calls)) {
warning("The assembled genotypes object failed validation. See messages for possible reasons.")
}
message("Done.")
return(calls)
}
## process files from BeadStudio into a dataframe (of samples) and data.table (of calls/intensities)
.read.illumina.raw <- function(prefix, in.path = ".", colmap = NULL, check.dims = FALSE, ...) {
if (length(paste(prefix, in.path)) > 1)
stop("Only read one batch at a time. To handle multiple batches, wrap with an lapply().")
## Get the sample IDs from the Sample_Map.txt file.
rawfile <- dir(path = in.path, pattern = "Sample_Map", full.names = TRUE)
infile <- rawfile[ grep("zip$", rawfile) ]
as.zip <- TRUE
## If not found, then quit.
if (length(infile) == 0) {
infile <- rawfile[ grep("txt$", rawfile) ]
if (length(infile) == 0) {
stop(paste("No file with 'Sample_Map' in the filename was found in directory",
in.path))
}
else {
as.zip <- FALSE
}
}
samplefile <- infile
if (as.zip)
samplefile <- unz(infile, "Sample_Map.txt")
message(paste("Reading sample manifest from <", infile, "> ..."))
samples.df <- read.delim(samplefile, stringsAsFactors = FALSE)
## handle case of duplicated IDs
renamer <- make.unique(as.character(samples.df$Name))
rownames(samples.df) <- renamer
nsamp <- length(renamer)
## Find a file with "FinalReport" in the filename.
rawfile <- dir(path = in.path, pattern = "FinalReport", full.names = TRUE)
infile <- rawfile[ grep("zip$", rawfile) ]
as.zip <- TRUE
## If not found, then quit.
if (length(infile) == 0) {
infile <- rawfile[ grep("txt$", rawfile) ]
if (length(infile) == 0) {
stop(paste("No file with 'FinalReport' in the filename was found in directory",
in.path, ". Please make sure that the FinalReport file is",
"in the specified directory, and that there is exactly one."))
}
else {
as.zip <- FALSE
}
}
## If there is more than one FinalReport file, then quit.
if (length(infile) > 1) {
stop(paste("There is more than one file with FinalReport in the filename.",
"Please place only one data set in each directory. If you have",
"unzipped the file, keep the original in a different directory."))
}
## try to unzip on fly
piper <- infile
if (as.zip) {
## check that system supports unzip
piper <- paste0("unzip -ap '", infile, "'")
rez <- system(paste(piper, "| head -n 10"), intern = FALSE, ignore.stdout = TRUE, ignore.stderr = TRUE)
if (rez) {
## nope, system can't unzip
stop(paste("Looks like this system doesn't support on-the-fly decompression: please unzip the",
"FinalReport file manually and try again."))
}
}
## first check header to get # of SNPs
genofile <- infile
if (as.zip)
genofile <- unz(infile, gsub("\\.zip$",".txt", basename(infile)))
header <- read.delim(genofile, header = FALSE, sep = "\t", nrows = 8, skip = 1,
colClasses = "character", stringsAsFactors = FALSE)
if (!all(ncol(header) >= 2, "NUM SNPS" %in% toupper(header[,1])))
stop("Header of FinalReport file should be key-value pairs, one of which is 'Total SNPs {x}'")
header <- setNames( header[,2], toupper(header[,1]) )
nsnps <- as.integer(header["NUM SNPS"])
if (!(is.numeric(nsnps) && nsnps > 0))
stop("Can't understand number of markers to read; header says '", nsnps, "'")
## slurp file into a data.table, skipping the 9 header lines
message(paste("Reading genotypes and intensities for", nsnps, "markers x",
nsamp, "samples from <", infile, "> ..."))
data <- data.table::fread(piper, skip = 9, showProgress = interactive(), stringsAsFactors = FALSE, sep = "\t")
## Construct the column-naming map
cols.needed <- c("marker","iid","x","y","call1","call2")
if (is.null(colmap)) {
colmap <- c("SNP Name" = "marker",
"Sample ID" = "iid",
"X" = "x","Y" = "y",
"Allele1 - Forward" = "call1",
"Allele2 - Forward" = "call2")
}
## Check that all required columns are specified in the map
columns <- match(cols.needed, colmap)
if (any(is.na(columns))) {
stop(paste0("The column-name map must specify columns mapping to each of the following:\n",
"'iid' (individual ID), 'marker' (SNP/marker name), 'x' (x-intensity), 'y' (y-intensity),\n",
"''call1' (allele 1, in same strand as specified in marker map), and 'call2' (allele 2).\n\n",
"You are missing the following: ", paste(colmap[is.na(columns)], sep = ", ")))
}
## Check that all mapped columns are present in the input
columns <- match(names(colmap), names(data))
if (any(is.na(columns))) {
stop(paste("All of the required column names were not found in the",
"FinalReport file. The missing column(s) are:", paste(
names(colmap)[is.na(columns)], collapse = ", ")))
}
## assign new column names using column map
data.table::setnames(data, names(colmap), colmap)
## check that data is of expected size
if (check.dims) {
if (nrow(data) != (nsnps*nsamp))
stop(paste0("Row count of genotype data (", nrow(data), ") doesn't match what was expected (", nsnps, " x ", nsamp, " = ", nsnps*nsamp, ").",
" FinalReport file might be corrupt."))
}
else {
# live dangerously
}
## rename samples by index
nsamples <- length(samples)
newids <- rep(renamer, 1, each = nsnps)
#print(tail(cbind(data, newid = newids)))
data.table::set(data, i = NULL, "iid", newids)
## convert 2-column allele calls to single column; mark hets, missing, etc.
data.table::set(data, i = NULL, "call", paste0(data$call1, data$call2))
data.table::set(data, i = NULL, "is.het", (data$call1 != data$call2))
data.table::set(data, i = NULL, "is.na", (data$call1 == "-" | data$call2 == "-"))
data.table::set(data, i = which(data$is.het), "call", "H")
data.table::set(data, i = which(data$is.na), "call", "N")
data.table::set(data, i = NULL, "call", substr(data$call, 1, 1))
## pre-key by marker (SNP name) for next step
data.table::setkey(data, marker)
return( list(samples = samples.df, intens = data) )
}
## convert data.table of calls/intensities to a (sites x samples) matrix
.raw.to.matrix <- function(data, snps, keep.map = FALSE, make.names = FALSE, verbose = FALSE,
sample.id.col = "iid", value.col = "call", ...) {
if (!inherits(data, "data.table"))
stop("Input should be an object of class 'data.table'.")
## strip column names which might conflict between input and marker map
if ("cM" %in% colnames(data))
data.table::set(data, i = NULL, "cM", NULL)
if ("chr" %in% colnames(data))
data.table::set(data, i = NULL, "chr", NULL)
if ("pos" %in% colnames(data))
data.table::set(data, i = NULL, "pos", NULL)
if (make.names)
data.table::set(data, i = NULL, "marker", make.names(data$marker))
## reshape to big matrix
fm <- paste("marker ~", sample.id.col)
gty.mat <- data.table::dcast.data.table(data, as.formula(fm), value.var = value.col)
data.table::setkey(gty.mat, marker)
before <- unique(gty.mat$marker)
nsnps.before <- length(unique(gty.mat$marker))
.map <- data.table::data.table(snps[ ,c("chr","marker","cM","pos") ])
data.table::setkey(.map, marker)
if (verbose)
message(paste("Attaching map: started with", nsnps.before, "markers"))
gty.mat <- data.table:::merge.data.table(gty.mat, .map)
nsnps.after <- length(unique(gty.mat$marker))
if (verbose)
message(paste("Done attaching map: ended with", nsnps.after, "markers"))
if (nsnps.before != nsnps.after && verbose) {
if (nsnps.after - nsnps.before < 100) {
message(paste("Dropped the following markers:"))
message( paste(setdiff(before, gty.mat$marker) , collapse = ",") )
}
else {
message(paste("\tdropped", (nsnps.after-nsnps.before), "markers."))
}
}
## sort by position
data.table::setorder(gty.mat, chr, pos, cM, marker)
cn <- names(gty.mat)
cols <- c("chr","marker","cM","pos")
oth <- setdiff(cn, cols)
data.table::setcolorder(gty.mat, c(cols, oth))
## demote back to dataframe
gty.mat <- as.data.frame(gty.mat)
newmap <- gty.mat[ ,1:4, drop = FALSE ]
rownames(newmap) <- as.character(newmap$marker)
newmap <- data.frame(newmap, snps[ rownames(newmap),!(colnames(snps) %in% c("chr","marker","cM","pos")) ])
gty.mat <- as.matrix(gty.mat[ ,-(1:4), drop = FALSE ])
rownames(gty.mat) <- as.character(newmap$marker)
colnames(gty.mat) <- gsub(" ","", colnames(gty.mat))
if (keep.map)
attr(gty.mat, "map") <- newmap
return(gty.mat)
}
#' Export genotyping result in format suitable for DOQTL
#'
#' @param gty a \code{genotypes} object with intensity data attached
#' @param where name of output file, including path (else it goes in working directory)
#' @param recode if \code{TRUE}, genotype calls will be recoded 0/1/2 with respect to reference alleles
#' before the genotypes matrix is saved
#' @param ... ignored
#'
#' @return Returns \code{TRUE} on completion. The Rdata file at \code{where} contains the following
#' objects:
#' \itemize{
#' \code{G} -- genotype calls matrix
#' \code{x} -- matrix of x-intensities
#' \code{y} -- matrix of y-intensities
#' \code{sex} -- named vector of sample sexes (\code{NA} if missing)
#' \code{snps} -- the marker map attached to the input object
#' }
#' All matrices are sites x samples, following the convention of this pacakge, and have both row and column names.
#'
#' @references
#' DOQTL home: \url{http://cgd.jax.org/apps/doqtl/DOQTL.shtml}
#'
#' Gatti DM et al. (2014) Quantitative trait locus mapping methods for Diversity Outbred mice. G3 4(9): 1623-1633. doi:10.1534/g3.114.013748.
#'
#' Svenson KL et al. (2012) High-resolution genetic mapping using the mouse Diversity Outbred population. Genetics 190(2): 437-447. doi:10.1534/genetics.111.132597.
#'
#' @export
export.doqtl <- function(gty, where = "doqtl.Rdata", recode = FALSE, ...) {
if (!(inherits(gty, "genotypes") && .has.valid.intensity(gty)))
stop("To export stuff for DOQTL, need (1) genotype matrix; (2) intensity matrices; (3) marker map.")
message("Preparing objects...")
where <- file.path(where)
x <- attr(gty, "intensity")$x
y <- attr(gty, "intensity")$y
if (!recode)
G <- .copy.matrix.noattr(gty)
else
G <- .copy.matrix.noattr(recode.genotypes(gty, "01"))
sex <- setNames( rep(NA, ncol(G)), colnames(G) )
if (.has.valid.ped(gty) && "sex" %in% colnames(attr(gty, "ped")))
sex[ rownames(attr(gty, "ped")) ] <- as.character(attr(gty, "ped")$sex)
sex[ sex == "1" ] <- "M"
sex[ sex == "2" ] <- "F"
sex[ sex == "0" ] <- NA
#print(sex)
snps <- attr(gty, "map")
message(paste("Saving DOQTL input objects in <", where, ">..."))
save(x, y, G, sex, snps, file = where)
message("Done.")
invisible(TRUE)
}
#' Convert a \code{genotypes} object to an \code{R/qtl} object
#'
#' @param gty a \code{genotypes} object
#' @param type cross type for \code{R/qtl} (only \code{"bc"} [backcross] and \code{"f2"} [F2 intercross] currently supported)
#' @param chroms vector of chromosome names to include in output (in order)
#' @param ... ignored
#'
#' @return an object of class \code{cross}, with the specified cross type
#'
#' @details Karl Broman's \code{R/qtl} is a widely-used package for mapping quantiative traits
#' in experimental crosses of laboratory organisms and crop plants. It expects genotypes to
#' be coded with respect to parental lines: eg. AA, AB, BB for an F2 cross between (true-breeding)
#' lines A and B. Be sure to recode genotypes in that mannyer way before passing them to this function.
#'
#' Marker positions in \code{R/qtl} are expressed in centimorgans, not basepairs, so only markers with
#' non-zero, non-missing genetic positions will be included in the output of this function.
#'
#' @references
#' \code{R/qtl}: \url{http://www.rqtl.org}
#'
#' Broman KW, Wu H, Sen S, Churchill GA. (2003) R/qtl: QTL mapping in experimental crosses.
#' Bioinformatics 19:889-890. doi:10.1093/bioinformatics/btg112.
#'
#' Broman KW, Sen S. (2009) A Guide to QTL Mapping with R/qtl. Springer, New York.
#'
#' @seealso \code{\link[qtl]{read.cross}}, \code{\link{as.genotypes.cross}} (for inverse operation)
#'
#' @export as.rqtl
as.rqtl <- function(gty, type = c("f2","bc"), chroms = paste0("chr", c(1:19,"X")), ...) {
if (!(inherits(gty, "genotypes") && .has.valid.map(gty) && .has.valid.ped(gty)))
stop("Please supply an object of class 'genotypes' with valid marker map and sample metadata.")
if (!(is.numeric(gty) && attr(gty, "alleles") == "parent"))
warning(paste("For export to R/qtl, genotypes should be encoded numerically, and by reference to",
"the parental strains of a cross. See ?recode.to.parent."))
type <- match.arg(type)
## dump intensity data
gty <- drop.intensity(gty)
## drop unfamiliar chromosomes and positionless markers
gty <- subset(gty, chr %in% chroms & !is.na(cM) & cM > 0)
map <- attr(gty, "map")
map$chr <- factor(map$chr, levels = chroms)
map <- droplevels(map)
message(paste("Exporting genotypes at", nrow(gty), "markers on",
length(unique(map$chr)), "chromosomes."))
## make sure sex is right
sex <- .fix.sex(attr(gty, "ped")$sex)
sex <- factor(c("male","female",NA)[ sex+1 ],
levels = c("female","male"))
.make.rqtl <- function(cc) {
g <- .copy.matrix.noattr(subset(gty, chr == cc))
g <- matrix(t(g)+1,
nrow = ncol(g), ncol = nrow(g),
dimnames = list(colnames(g), rownames(g)))
m <- subset(map, chr == cc)
this.chr <- list(data = g,
map = setNames( as.vector(m$cM), as.character(m$marker) ))
if (grepl("X", cc))
class(this.chr) <- "X"
else
class(this.chr) <- "A"
return(this.chr)
}
## loop on chromosomes
message("Converting genotypes...")
geno <- lapply(levels(map$chr), .make.rqtl)
names(geno) <- gsub("^chr","", levels(map$chr))
rez <- list(geno = geno)
## convert PLINK to R/qtl style of phenotype definition
pheno <- attr(gty, "ped")
pheno$pheno[ pheno$pheno == -9 ] <- NA
newsex <- pheno$sex
newsex[ newsex == 0 ] <- NA
newsex[ newsex == 2 ] <- 0
pheno$sex <- newsex
rez$pheno <- pheno[ ,c("pheno","sex",setdiff(colnames(pheno), c("pheno","sex"))) ]
class(rez) <- c(type,"cross")
attr(rez, "alleles") <- c("A","B")
message("Done.")
return(rez)
}
#' Convert an \code{R/qtl} object to a \code{genotypes} object
#'
#' @param x a \code{qtl::cross} object
#' @param ... ignored
#'
#' @return an object of class \code{genotypes}
#'
#' @details Karl Broman's \code{R/qtl} is a widely-used package for mapping quantiative traits
#' in experimental crosses of laboratory organisms and crop plants. It expects genotypes to
#' be coded with respect to parental lines: eg. AA, AB, BB for an F2 cross between (true-breeding)
#' lines A and B. Be sure to recode genotypes in that mannyer way before passing them to this function.
#'
#' Marker positions in \code{R/qtl} are expressed in centimorgans, not basepairs. On conversion,
#' physical positions are faked by assuming recombination rate 1 cM per 1 Mbp and rounding to
#' the next-lowest integer.
#'
#' Only crosses of type \code{"f2"} (F2 intercross) or \code{"bc"} are supported, and partially-
#' informative genotypes will probably be mangled.
#'
#' @references
#' \code{R/qtl}: \url{http://www.rqtl.org}
#'
#' Broman KW, Wu H, Sen S, Churchill GA. (2003) R/qtl: QTL mapping in experimental crosses.
#' Bioinformatics 19:889-890. doi:10.1093/bioinformatics/btg112.
#'
#' Broman KW, Sen S. (2009) A Guide to QTL Mapping with R/qtl. Springer, New York.
#'
#' @seealso \code{\link[qtl]{read.cross}}, \code{\link{as.rqtl.genotypes}} (for inverse operation)
#'
#' @export as.genotypes
as.genotypes <- function(x, ...) {
if (!inherits(x, "cross") && any(inherits(x, "f2"), inherits(x, "bc")))
stop("Please supply an object of class 'cross' (from KW Broman's R/qtl package).")
geno <- do.call( rbind, lapply(x$geno, function(chr) t(chr$data)) )
geno <- geno-1
map <- do.call( rbind, lapply(names(x$geno), function(chr) data.frame(chr = chr, marker = names(x$geno[[chr]]$map),
cM = x$geno[[chr]]$map, pos = floor(x$geno[[chr]]$map*1e6))) )
map$A1 <- attr(x, "alleles")[1]
map$A2 <- attr(x, "alleles")[2]
if (!any(grepl("^chr", map$chr)))
map$chr <- paste0("chr", map$chr)
fam <- make.fam(rownames(x$pheno))
if (!is.null(x$pheno$sex))
fam$sex <- .fix.sex(x$pheno$sex)
for (col in setdiff(colnames(x), c("sex","pgm","id","ID")))
fam[ ,col ] <- x$pheno[ ,col ]
fam$pheno <- x$pheno[,1]
colnames(geno) <- as.character(rownames(fam))
rez <- genotypes(geno, map = map, ped = fam, alleles = "01")
return(rez)
}
#' Export genotypes in Stanford HGDP format
#'
#' @param gty a \code{genotypes} object
#' @param prefix filename prefix for output; result will be two files, \code{{prefix}.geno} and
#' with genotypes and \code{{prefix}.map} with marker map.
#' @param ... ignored
#'
#' @details Write genotypes to disk in the Stanford HGDP format, which can be read by (among
#' others) the PGDSpider format-conversion suite.
#'
#' @references
#' Lischer HEL and Excoffier L (2012) PGDSpider: An automated data conversion tool for connecting
#' population genetics and genomics programs. Bioinformatics 28: 298-299.
#'
#' @export write.hgdp
write.hgdp <- function(gty, prefix, ...) {
if (!(inherits(gty, "genotypes") && .has.valid.map(gty)))
stop("Please supply an object of class 'genotypes' which includes a valid marker map.")
## convert genotypes to numeric
gty <- recode.genotypes(gty, "01")
## now convert to HGDP style (AA, AB, BB, -)
message("Converting genotypes to HGDP encoding (AA/AB/BB/-)...")
out <- matrix("-", nrow = nrow(gty), ncol = ncol(gty),
dimnames = dimnames(gty))
for (i in seq_len(ncol(gty))) {
majr <- with(attr(gty, "map"), paste0(A1, A1))
hetr <- with(attr(gty, "map"), paste0(A1, A2))
minr <- with(attr(gty, "map"), paste0(A2, A2))
is.miss <- is.na(gty[ ,i ])
is.maj <- gty[ ,i ] == 0 & !is.miss
is.het <- gty[ ,i ] == 1 & !is.miss
is.min <- gty[ ,i ] == 2 & !is.miss
out[ is.maj,i ] <- majr[is.maj]
out[ is.het,i ] <- hetr[is.het]
out[ is.min,i ] <- minr[is.min]
out[ is.miss,i ] <- "-"
}
colnames(out)[1] <- paste0("\t", colnames(out)[1])
write.table(out, paste0(prefix, ".geno"), col.names = TRUE, row.names = TRUE,
quote = FALSE, sep = "\t")
## prepare marker map
message("Writing marker map...")
write.table(attr(gty, "map")[ ,c("marker","chr","pos") ], paste0(prefix, ".map"),
col.names = FALSE, row.names = FALSE,
quote = FALSE, sep = "\t")
message("Done.")
invisible(TRUE)
}
#' Convert genotypes to a dataframe
#'
#' @param gty a \code{genotypes} object
#' @param ... ignored
#'
#' @return a \code{data.frame} with marker information in the leftmost columns, followed by a
#' matrix of genotypes with samples in columns
#'
#' @details In general the dataframe will be a less-efficient way to store genotypes, but is a
#' useful intermediate for writing genotypes to disk in a human-readable format.
#'
#' @export
as.data.frame.genotypes <- function(gty, ...) {
if (!(inherits(gty, "genotypes") && .has.valid.map(gty)))
stop("Please supply an object of class 'genotypes' which includes a valid marker map.")
map <- attr(gty, "map")[ ,c("chr","pos","cM","marker","A1","A2") ]
geno <- .copy.matrix.noattr(gty)
df <- as.data.frame(geno)
colnames(df) <- colnames(geno)
df <- cbind(map, df)
return(df)
}
#as.data.frame <- function(x, ...) UseMethod("as.data.frame") |
af185f83d0969a3e84fdea8e443a90ab8e2d6789 | cef5a010d1d31a85c40ad699a74ef6a0deb54abf | /analysis/calculate_co-occurence.R | 3355c7a17d5feac31d3e20ebc9740b5c27a57cfd | [] | no_license | stefanavey/datahack-team6 | 297dce8598d98f9ee98fb29026f1f759fafe82a4 | 1ffc8bb04aa255b1243fece2df66281a9f8ca9db | refs/heads/master | 2021-01-19T13:29:58.587090 | 2017-03-02T01:28:41 | 2017-03-02T01:28:41 | 82,393,778 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 362 | r | calculate_co-occurence.R | ## Calculate co-occurence of officers across complaints
library(tidyverse)
library(lineprof)
source("R/calc_network.R")
complaint <- read_csv("data/toy.complaint_data.csv")
net <- calc_network(complaint, group = "crid", indicator = "officer_id")
colnames(net) <- c("officer_id.1", "officer_id.2", "complaints")
write_csv(net, "co_occurence_complaints.csv")
|
0232591987b2d67fb94cb275a0463ae233f0ae08 | 8585dd8814d82d9a0870804d8a5acf9ad650d0ed | /man/coefbounds.Rd | 773ccb5d44ecd9e2937bedb9a062f21389172576 | [] | no_license | brentonk/coefbounds | 7500d38188c87b41c2b6ebdbef5f1d5f04517dce | 7c7b65a7d34ecec01ac6a6f1062c4eeab24cab08 | refs/heads/master | 2021-01-17T19:17:25.817055 | 2016-06-28T21:33:03 | 2016-06-28T21:33:03 | 59,677,826 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,620 | rd | coefbounds.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coefbounds.r
\name{coefbounds}
\alias{coefbounds}
\title{Coefficient bounds for linear models}
\usage{
coefbounds(formula, data, subset, na.action, model = c("linear", "logit"),
boot = 100, cluster_id = NULL, maxit = 10, remove_collinear = TRUE,
return_boot_est = FALSE)
}
\arguments{
\item{formula}{Model formula of the form \code{yl + yu ~ x1 + x2 + ...},
where \code{yl} is the lower bound on the response, \code{yu} is the
upper bound, and \code{x1 + x2 + ...} are the covariates. For
instrumental variables estimation, use a formula like \code{yl + yu ~ x1
+ x2 + ... | z1 + z2 + ...}, as in \code{\link[AER]{ivreg}} in the
\pkg{AER} package. IV estimates not available for logit models.}
\item{data, subset, na.action}{As in \code{\link{lm}}}
\item{model}{\code{"linear"} for linear regression (default), \code{"logit"}
for logistic regression.}
\item{boot}{Number of bootstrap iterations used to estimate the critical
values for inference.}
\item{cluster_id}{Vector of cluster IDs for cluster bootstrap. If
\code{NULL} (the default), an ordinary bootstrap is used.}
\item{maxit}{Maximum number of iterations for the approximation in logistic
regression models. Ignored when \code{model = "linear"}.}
\item{remove_collinear}{How to treat boostrap iterations in which the design
matrix is rank-deficient. If \code{TRUE} (the default), a warning is
issued and the bad iterations are removed. If \code{FALSE}, the
function fails with an error when a rank-deficient design matrix is
encountered.}
\item{return_boot_est}{Whether to include the bootstrap estimates of the
coefficient bounds in the returned object.}
}
\value{
A list of class \code{"coefbounds"} containing: \describe{
\item{\code{coefficients}}{Matrix containing the sample estimates of the
coefficient bounds.}
\item{\code{dist}}{List of matrices containing the
bootstrap Hausdorff distances (undirected and directed) used for
inference.}
\item{\code{boot_est}}{(if requested) List of matrices of
bootstrap estimates of the coefficient bounds.}
\item{\code{nobs}}{Number of observations used in fitting.}
\item{\code{call}}{Original function call.}
\item{\code{model}}{Model used.}
}
}
\description{
Estimates the projections of the identification region along each
coefficient dimension for a linear or logistic regression model with
interval-censored outcomes (Beresteanu and Molinari 2008, Corollary 4.5).
If requested, uses a nonparametric bootstrap to estimate critical values for
hypothesis tests about these projections (Beresteanu and Molinari 2008,
Algorithm 4.2).
}
\details{
In the linear case, implements largely the same functionality as
\code{oneDproj} and \code{CI1D} in Beresteanu et al.'s (2010) Stata
program.
}
\examples{
## Simulate data
set.seed(18)
x1 <- rnorm(50)
x2 <- rnorm(50)
y <- 1 - x1 + x2 + rnorm(50)
yl <- floor(y)
yu <- ceiling(y)
## Fit model without covariates
fit_mean <- coefbounds(yl + yu ~ 1, boot = 0)
all.equal(coef(fit_mean)[1, "lower"], mean(yl))
all.equal(coef(fit_mean)[1, "upper"], mean(yu))
## Fit model with covariates
fit_full <- coefbounds(yl + yu ~ x1 + x2, boot = 10)
coef(fit_full)
}
\author{
Brenton Kenkel
}
\references{
Arie Beresteanu and Francesca Molinari. 2008.
"Asymptotic Properties for a Class of Partially Identified Models."
\emph{Econometrica} 76 (4): 763--814.
Arie Beresteanu, Francesca Molinari and Darcy Steeg Morris. 2010.
"Asymptotics for Partially Identified Models in Stata."
\url{https://molinari.economics.cornell.edu/programs.html}
}
|
f8acd56688c1087e9a75d264063aab2b9cf8371d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/nomclust/examples/evalclust.Rd.R | ce53c4bdd03b62fd7d59bc73e0448a298cb45647 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 434 | r | evalclust.Rd.R | library(nomclust)
### Name: evalclust
### Title: Evaluation of the Clustering
### Aliases: evalclust
### ** Examples
#sample data
data(data20)
#creation of a dataset with cluster memberships
data_clu <- nomclust(data20, iof, clu_high = 7)
#binding an original dataset to cluster memberships variables
data_clu2 <- cbind(data20, data_clu$mem)
#evaluation of created clusters
evaluation <- evalclust(data_clu2, 5, clu_high = 7)
|
ba67633a184ccacad24f783662115af6fe35400a | 8833c5d697fde59fd2a2536df13495f832ed81eb | /matchData.R | 8c5755bb2d12535a4b3d7058f74a28864d7e8a36 | [] | no_license | XiangBu/ArterialLineBacteremia | f9aa55c48bcb8671c764ea83a761b44bbc36e349 | a8a5cb4925e05578ffb994a864361c7ff3c829e6 | refs/heads/master | 2022-09-10T17:53:06.536389 | 2020-06-02T20:29:12 | 2020-06-02T20:29:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 562 | r | matchData.R | library(tidyverse)
library(lubridate)
library(magrittr)
library(MatchIt)
data <- read_rds("data/line_data.rds")
data %<>% select(
any_arterial_line,
any_central_line,
arterial_line,
central_line,
age_at_admission,
admission_type,
sapsii,
sepsis,
culture_positive,
duration
)
data %<>% drop_na()
match <-
matchit(
any_arterial_line ~ any_central_line + age_at_admission + admission_type + sepsis + sapsii,
data,
method = 'nearest')
matched_data <- get_matches(match, data)
write_rds(matched_data, "data/matched_data.rds")
|
673b9c1f4f57b91312600797370d5b5c27be56a2 | f6b4fa000d301a68b0e7f55be730e71fca449daf | /man/readPGM.Rd | d6dc14c77630b8f2a160e4b0f7bf0fd7e477fd46 | [] | no_license | cran/melody | 816cd001774bc578e9afac2ed16f6e20db11625a | c32a5242613270d3d15e2c6c5c23f94a2fcac6e2 | refs/heads/master | 2021-01-01T15:41:16.113803 | 2013-03-07T00:00:00 | 2013-03-07T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 548 | rd | readPGM.Rd |
\name{readPGM}
\alias{readPGM}
\title{Read a PGM-3 Formatted Image File.}
\description{
Reads a PGM version 3 formatted text image file into R as a two-dimensional matrix.
}
\usage{
readPGM(file)
}
\arguments{
\item{file}{file path to read}
}
\value{
an image matrix of grayscale values
}
\examples{
tarsier.song <- system.file('extdata','calls','Tarsius-spectrum-Duet.female-Nietsch1998-2b2.pgm', package='melody')
spectrogram.matrix <- readPGM(tarsier.song)
dim(spectrogram.matrix)
image(t(spectrogram.matrix))
}
|
53b70c22365523b80d7a1a019a1850b2391a719f | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/spinBayes/man/BVCfit.Rd | 1bd1149197d74ae131bccdf68094a1209a99b6fa | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 5,223 | rd | BVCfit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BVCFit.R
\name{BVCfit}
\alias{BVCfit}
\title{fit a Semi-parametric Bayesian variable selection}
\usage{
BVCfit(X, Y, Z, E = NULL, clin = NULL, iterations = 10000,
burn.in = NULL, sparse = TRUE, structural = TRUE, VC = TRUE,
kn = 2, degree = 2, hyper = NULL, debugging = FALSE)
}
\arguments{
\item{X}{the matrix of predictors (genetic factors) without intercept. Each row should be an observation vector. A column of 1 will be added to the X matrix
as the intercept.}
\item{Y}{the response variable. The current version of BVCfit only supports continuous response.}
\item{Z}{a vector of environmental factor for non-linear G×E interactions.}
\item{E}{a vector of environmental factor for linear G×E interactions.}
\item{clin}{a matrix of clinical variables. Clinical variables are not subject to penalty.}
\item{iterations}{the number of MCMC iterations.}
\item{burn.in}{the number of iterations for burn-in.}
\item{sparse}{logical flag. If TRUE, spike-and-slab priors will be used to shrink coefficients of irrelevant covariates to zero exactly. 'sparse' has effect only when VC=TRUE.}
\item{structural}{logical flag. If TRUE, the coefficient functions with varying effects and constant effects will be penalized separately. 'structural' has effect only when VC=TRUE.}
\item{VC}{logical flag. If TRUE, varying coefficient functions will be used for modeling the interactions between Z and X.
If FALSE, interactions between Z and X will be modeled as linear interactions.}
\item{kn}{the number of interior knots for B-spline.}
\item{degree}{the degree of B spline basis.}
\item{hyper}{a named list of hyperparameters.}
\item{debugging}{logical flag. If TRUE, progress will be output to the console and extra information will be returned.}
}
\value{
an object of class "BVCfit" is returned, which is a list with components:
\item{posterior}{posterior samples from the MCMC}
\item{coefficients}{a list of posterior estimates of coefficients}
\item{burn.in}{the number of iterations for burn-in}
\item{iterations}{the number of MCMC iterations.}
}
\description{
fit a Bayesian semi-parametric model for both linear and non-linear G×E interactions. Users can also specify all the interactions as linear and fit a Bayesian LASSO type of model.
}
\details{
By default, varying coefficient functions are used for modeling the nonlinear interactions between Z and X. Assuming both E and clin are NULL, the model can be expressed as
\deqn{Y = \beta_{0}(Z)+\sum\beta_{j}(Z)X_{j} + \epsilon }
The basis expansion and changing of basis with B splines will be done automatically:
\deqn{\beta_{j}(\cdot)\approx \gamma_{j1} + \sum_{k=2}^{q}{B}_{jk}(\cdot)\gamma_{jk}}
where \eqn{B_{jk}(\cdot)} represents B spline basis. \eqn{\gamma_{j1}} and \eqn{(\gamma_{j2}, \ldots, \gamma_{jq})^\top} correspond to the constant and varying parts of the coefficient functional, respectively.
q=kn+degree+1 is the number of basis functions. By default, kn=degree=2. User can change the values of kn and degree to any other positive integers.
If E is provided, the linear interactions between E and X will be added modeled as pairwise-products:
\deqn{Y = \beta_{0}(Z)+\sum\beta_{j}(Z)X_{j} + \zeta_{0}E + \sum \zeta_{j}EX_{j} + \epsilon}
If clin is provided, clinical variables
will be added to the model.
If VC=FALSE, all interactions are treated as linear and a Bayesian LASSO model will be used. With non-null values of E and clin, the full linear model is:
\deqn{Y \sim Z + ZX + clin + E + EX}
Please check the references for more details about the model.
Users can modify the hyper-parameters by providing a named list of hyper-parameters via the argument 'hyper'.
The list can have the following named components
\itemize{
\item{a.c, a.v, a.e: }{ shape parameters of the Gamma priors on \eqn{\lambda_{c}}, \eqn{\lambda_{v}} and \eqn{\lambda_{e}}, respectively.}
\item{b.c, b.v, b.e: }{ rate parameters of the Gamma priors on \eqn{\lambda_{c}}, \eqn{\lambda_{v}} and \eqn{\lambda_{e}}, respectively.}
\item{r.c, r.v, r.e: }{ shape parameters of the Beta priors (\eqn{\pi^{r-1}(1-\pi)^{w-1}}) on \eqn{\pi_{c}}, \eqn{\pi_{v}} and \eqn{\pi_{e}}, respectively.}
\item{w.c, w.v, w.e: }{ shape parameters of the Beta priors on \eqn{\pi_{c}}, \eqn{\pi_{v}} and \eqn{\pi_{e}}, respectively.}
\item{s: }{ shape parameters of the Inverse-gamma prior on \eqn{\sigma^{2}}.}
\item{h: }{ scale parameters of the Inverse-gamma prior on \eqn{\sigma^{2}}.}
}
Please check the references for more details about the prior distributions.
}
\examples{
data(gExp)
## default method
spbayes=BVCfit(X, Y, Z, E, clin)
spbayes
\donttest{
## non-structural
structural=FALSE
spbayes=BVCfit(X, Y, Z, E, clin, structural=structural)
spbayes
## non-sparse
sparse=FALSE
spbayes=BVCfit(X, Y, Z, E, clin, sparse=sparse)
spbayes
}
}
\references{
Ren, J., Zhou, F., Li, X., Chen, Q., Zhang, H., Ma, S., Jiang, Y., Wu, C. (2019) Semi-parametric Bayesian variable selection for gene-environment interactions.
\url{https://arxiv.org/abs/1906.01057}
}
\keyword{models}
|
5e4abcae30c6b68463795934999f19e029edcc1d | 54766e4ccd23433be9c113c90f1b03d4d2254516 | /plot2.R | fcd1ef399db3f3c042640a24e6ea7625c204768c | [] | no_license | PGarciaPacheco/sortida-bot | 1e31d8e1872ec5e32ab3b84e36e1d19d9df3d5b9 | 82163a74ae01667d7173ed0b093207dfda1d1e7f | refs/heads/main | 2023-05-14T15:24:55.964923 | 2021-06-10T10:53:48 | 2021-06-10T10:53:48 | 372,248,864 | 0 | 0 | null | null | null | null | IBM852 | R | false | false | 379 | r | plot2.R | dat <- read.csv("plantesexcursio.csv", sep=";", stringsAsFactors=TRUE)
esp <- as.character(dat$ESP╚CIE)
esp2 <- factor(esp)
par(mar=c(5,10,5,2))
plot(dat$DIST└NCIA.AL.CAM═ ~ esp2, main= "EspŔcies segons la distÓncia al camÝ", horizontal=TRUE, xlab= "", ylab= "DistÓncia al camÝ", frame.plot= FALSE, las=1, col=c(2,3,4,5,6,7,8,9,10))
mtext("EspŔcie", side=2, line=9)
|
dc73ad2a97767c4ff3d8f943eb571c8de5113063 | ea686221d463bca490b76b76be3ce50ee7b522db | /Skew t Shiny App/ui.R | 20eef243f6126886cbfebe41de2ef6564c6888ba | [] | no_license | joshbrowning2358/Robust-Skew-t | 4214a8340f7d693bf2ee134cf532954eed830574 | ee00854c66fe3cbc914a1c13220139faad72d943 | refs/heads/master | 2023-02-21T15:23:18.361111 | 2016-03-17T13:39:00 | 2016-03-17T13:39:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,391 | r | ui.R | library(shiny)
# Define UI for application that plots random distributions
shinyUI(pageWithSidebar(
# Application title
headerPanel("Skew-t Distribution"),
# Sidebar with a slider input for number of observations
sidebarPanel(
sliderInput("xi_1", "xi_1", value=0, min=-20, max=20)
,sliderInput("xi_2", "xi_2", value=0, min=-20, max=20)
,sliderInput("omega_11", "omega_11", value=1, min=0, max=20)
,sliderInput("omega_12", "omega_12", value=0, min=-20, max=20)
,sliderInput("omega_22", "omega_22", value=1, min=0, max=20)
,sliderInput("alpha_1", "alpha_1", value=0, min=-20, max=20)
,sliderInput("alpha_2", "alpha_2", value=0, min=-20, max=20)
,sliderInput("logNu", "log(nu)", value=1, min=0, max=10)
,sliderInput("k", "k", value=5, min=1, max=10)
,sliderInput("gridPts", "Number of grid points:", value=50, min=5, max=500)
,sliderInput("xRng", "Range of observation values", value=c(-10,10), min=-50, max=50)
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel("Influence Functions", plotOutput("influence", height="600px"))
# ,tabPanel("Confidence Regions", plotOutput("confRegion", height="600px"))
# ,tabPanel("Change of Variance Functions", plotOutput("cvf", height="600px"))
# ,tabPanel("Skewed Portion of Likelihood", plotOutput("skewed_ll", height="600px"))
)
)
)) |
3c0a567d48bb81e7dc27f3c235daa48ade4dbcee | b6becaacb1775450bf5770cc25ddde2b26705814 | /functions/startup/params.R | ca62711a1d18ec065821eb6332c5e2de952c0c17 | [] | no_license | pedroj/functions_repo | 8d0180fe6429eb75baa5f81270d7f73315febda7 | 812692c6403a4d12e9f7eee9936fb6f1ad58f48c | refs/heads/master | 2023-04-03T03:59:16.907931 | 2023-04-01T07:58:50 | 2023-04-01T07:58:50 | 185,389,001 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,548 | r | params.R | # Parameters ------------------------------------------------------------
# Function to estimate network parameters.
require(bipartite)
params <- function(mat)
# Obtain basic parameters from an interaction matrix
# From library(bipartite) functions. We are getting just 8 indexes.
{networklevel(mat, #tapis
index=c(# "species",
"connectance", #1-C
# "web asymmetry", #2-WA
# "links per species",
# "number of compartments",
# "compartment diversity",
# "cluster coefficient",
# "weighted cluster coefficient", #2-WCC
# "degree distribution",
# "mean number of shared partners",
# "togetherness",
# "C score",
# "V ratio",
# "discrepancy",
"nestedness", #2-N
# "weighted nestedness",
"weighted NODF", #3-wNODF
# "extinction slope",
# "robustness",
"ISA", #4-ISA
# "SA",
# "niche overlap",
# "generality,vulnerability",
"linkage density", #5-LD
# "Fisher alpha",
# "mean interaction diversity",
# "interaction evenness", #7-IE
# "Alatalo interaction evenness",
"Shannon diversity"), #6-H
# "H2"
ISAmethod="Bascompte", SAmethod = "log", CCfun=median,
normalise=TRUE, empty.web=TRUE, logbase="e", intereven="sum")
}
#------------------------------------------------------------------------
|
b55615e0b979b3289433dfdaa7ea05bdedb158ab | c71545775d344b30b4741dd16260c42049299a00 | /man/simBatlen.Rd | 0a384870b1c29898dfdc9de4a759a1114ae15cf2 | [] | no_license | mcoshima/moMSE | 524ac9692ca936f6dd150f1b17de66b660180fd7 | 155519b91551f8cbaf4740db700691c9669afe03 | refs/heads/master | 2021-07-03T07:32:59.380753 | 2020-11-24T20:29:35 | 2020-11-24T20:29:35 | 199,483,586 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 556 | rd | simBatlen.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_funs.R
\name{simBatlen}
\alias{simBatlen}
\title{Calculates biomass at age for fishery independent surveys}
\usage{
simBatlen(Nlen, dat.list, year)
}
\arguments{
\item{Nlen}{matrix with numbers at length}
\item{dat.list}{list with the number of fishery independent surveys and a selectivity-at-length matrix}
\item{year}{current year in simulation}
}
\description{
Calculates biomass at age for fishery independent surveys
}
\keyword{biomass-at-length}
|
f9a13b2307c99fca1ed62d5c46755acf7a4febb4 | b0b61cfd9fec47fc94b5da595fd81372cd5ec369 | /Basics5.r | 204192637d73301f421ad2ad8cf2363a48caccc2 | [] | no_license | ArjunAranetaCodes/MoreCodes-Rlang | 4c6246e67cec99ab3961260308a02b333b39dbf3 | 555b37e8ee316a48c586327cfc61069e0ce1e198 | refs/heads/master | 2021-01-01T19:22:15.672176 | 2018-11-25T04:00:00 | 2018-11-11T23:01:07 | 98,572,790 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 264 | r | Basics5.r | //Basic Operations
print(paste("1 + 1 = ", (1 + 1)))
print(paste("1 - 1 = ", (1 - 1)))
print(paste("1 * 1 = ", (1 * 1)))
print(paste("1 / 1 = ", (1 / 1)))
print(paste("4 % 2 = ", (4 %% 2), "- no remainder"))
print(paste("5 % 2 = ", (5 %% 2), "- remainder 1"))
|
b610771a1b337c06a3f9f33201d936a95106324e | 048c50c635294b8d51ce3c820ba94c17dbfc12a5 | /man/filterGeno.Rd | e6fb1709999cbd7ec0c6f9c11b2ccb95cd544205 | [] | no_license | nsantantonio/processGenotypes | 3397945146313f4c8d649483361034984c59d61a | ed25838f85d84e01a4683a30b8bab0fa58787422 | refs/heads/master | 2023-02-03T19:50:56.716382 | 2020-12-14T16:41:27 | 2020-12-14T16:41:27 | 321,408,616 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,102 | rd | filterGeno.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filterGeno.R
\name{filterGeno}
\alias{filterGeno}
\title{filterGeno function}
\usage{
filterGeno(M, MARGIN = 2, MAF = 0.01, MISS = 0.5, HET = 0.05,
returnMatrix = TRUE, returnStats = FALSE, returnImputed = FALSE,
rmDupl = FALSE, rmDuplBy = NULL, maxGdiff = 0, maxiter = 10,
checkAllMiss = FALSE, returnBool = FALSE)
}
\arguments{
\item{M}{[value]}
\item{MARGIN}{[value]. Default is 2}
\item{MAF}{[value]. Default is 0.01}
\item{MISS}{[value]. Default is 0.5}
\item{HET}{[value]. Default is 0.05}
\item{returnMatrix}{[value]. Default is TRUE}
\item{returnStats}{[value]. Default is FALSE}
\item{returnImputed}{[value]. Default is FALSE}
\item{rmDupl}{[value]. Default is FALSE}
\item{rmDuplBy}{[value]. Default is NULL}
\item{maxGdiff}{[value]. Default is 0}
\item{maxiter}{[value]. Default is 10}
\item{checkAllMiss}{[value]. Default is FALSE}
\item{returnBool}{[value]. Default is FALSE}
}
\value{
[value]
}
\description{
function to (do something)
}
\details{
[fill in details here]
}
\examples{
none
}
|
bf05d4f5dc45aaa45d903e85c597fdf079ee5994 | a60ba6f207da941842e8e9a11c2753d4ed606bd4 | /plot4.R | d0c63f0b2fec1df2004f5191534349cc393102b1 | [] | no_license | kaazal/ExData_Plotting1 | ff79b0e957849855b14119376b1dc2efec7f2fe7 | 007d2e46119b0075e33b4d0d74fd9c0c1546988e | refs/heads/master | 2020-04-04T10:50:11.490416 | 2018-11-04T04:03:01 | 2018-11-04T04:03:01 | 155,787,518 | 0 | 0 | null | 2018-11-01T23:18:54 | 2018-11-01T23:18:54 | null | UTF-8 | R | false | false | 1,382 | r | plot4.R | library(dplyr)
power <- read.table("household_power_consumption.txt", skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
spower <- subset(power,power$Date=="1/2/2007" | power$Date=="2/2/2007")
spowerm <- mutate(spower,GlobalPower= as.numeric(as.character(spower$Global_active_power)), DateTime = as.POSIXct(paste(spower$Date, spower$Time, sep=" "), format = "%d/%m/%Y %H:%M:%S", tz = ""))
png("plot4.png",width=480, height=480)
par(mfrow = c(2,2))
plot(spowerm$DateTime, spowerm$GlobalPower, xlab="",ylab="Global Active Power(kilowatts)", type="l")
plot(spowerm$DateTime, as.numeric(as.character(spowerm$Voltage)), xlab="datetime",ylab="Voltage", type="l")
plot(spowerm$DateTime, as.numeric(as.character(spowerm$Sub_metering_1)), xlab="",ylab="Energy sub metering", type="l")
with(spowerm,lines(spowerm$DateTime,as.numeric(as.character(spowerm$Sub_metering_2)),col="red"))
with(spowerm,lines(spowerm$DateTime,as.numeric(as.character(spowerm$Sub_metering_3)),col="blue"))
legend("topright",lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(spowerm$DateTime, as.numeric(as.character(spowerm$Global_reactive_power)), xlab="datetime", ylab="Global_reactive_power", type="l")
dev.off()
|
9e2e4a79b04cb85da52861124c27fccd66c3ac99 | 04f349102910e5052ea34d3e7744e4d79a2fbb4f | /R/pof_future_cables_66_33kv.R | f2bc69113d3669623daeb20d792b302bfdc420c1 | [
"MIT"
] | permissive | scoultersdcoe/CNAIM | f0728b00f0d0628e554975c78d767ee2c472fb3b | 5c77ce4c50ef92fd05b9bb44b33fdca18302d020 | refs/heads/master | 2023-08-23T22:54:59.450292 | 2021-03-12T15:52:54 | 2021-03-12T15:52:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,905 | r | pof_future_cables_66_33kv.R | #' @importFrom magrittr %>%
#' @title Future Probability of Failure for 33-66kV cables
#' @description This function calculates the future
#' annual probability of failure per kilometer for a 33-66kV cables.
#' The function is a cubic curve that is based on
#' the first three terms of the Taylor series for an
#' exponential function. For more information about the
#' probability of failure function see section 6
#' on page 30 in CNAIM (2017).
#' @inheritParams pof_cables_66_33kv
#' @param simulation_end_year Numeric. The last year of simulating probability
#' of failure. Default is 100.
#' @return Numeric array. Future probability of failure
#' per annum per kilometre for 33-66kV cables.
#' @source DNO Common Network Asset Indices Methodology (CNAIM),
#' Health & Criticality - Version 1.1, 2017:
#' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf}
#' @export
#' @examples
#' # Future probability of failure for 66kV UG Cable (Non Pressurised)
#' pof_66kV_non_pressurised <-
#' pof_future_cables_66_33kv(cable_type = "66kV UG Cable (Non Pressurised)",
#'sub_division = "Aluminium sheath - Aluminium conductor",
#'utilisation_pct = 75,
#'operating_voltage_pct = 50,
#'sheath_test = "Default",
#'partial_discharge = "Default",
#'fault_hist = "Default",
#'leakage = "Default",
#'reliability_factor = "Default",
#'age = 1,
#'simulation_end_year = 100)
#' # Plot
#'plot(pof_66kV_non_pressurised$PoF * 100,
#'type = "line", ylab = "%", xlab = "years",
#'main = "PoF per kilometre - 66kV UG Cable (Non Pressurised)")
pof_future_cables_66_33kv <-
function(cable_type = "66kV UG Cable (Gas)",
sub_division = "Aluminium sheath - Aluminium conductor",
utilisation_pct = "Default",
operating_voltage_pct = "Default",
sheath_test = "Default",
partial_discharge = "Default",
fault_hist = "Default",
leakage = "Default",
reliability_factor = "Default",
age,
simulation_end_year = 100) {
`Asset Register Category` = `Health Index Asset Category` =
`Generic Term...1` = `Generic Term...2` = `Functional Failure Category` =
`K-Value (%)` = `C-Value` = `Asset Register Category` = `Sub-division` =
`Condition Criteria: Sheath Test Result` =
`Condition Criteria: Partial Discharge Test Result` =
`Condition Criteria: Leakage Rate` = NULL
# due to NSE notes in R CMD check
# Ref. table Categorisation of Assets and Generic Terms for Assets --
asset_category <- gb_ref$categorisation_of_assets %>%
dplyr::filter(`Asset Register Category` == cable_type) %>%
dplyr::select(`Health Index Asset Category`) %>% dplyr::pull()
generic_term_1 <- gb_ref$generic_terms_for_assets %>%
dplyr::filter(`Health Index Asset Category` == asset_category) %>%
dplyr::select(`Generic Term...1`) %>% dplyr::pull()
generic_term_2 <- gb_ref$generic_terms_for_assets %>%
dplyr::filter(`Health Index Asset Category` == asset_category) %>%
dplyr::select(`Generic Term...2`) %>% dplyr::pull()
# Normal expected life ------------------------------
normal_expected_life_cable <- gb_ref$normal_expected_life %>%
dplyr::filter(`Asset Register Category` == cable_type &
`Sub-division` == sub_division) %>%
dplyr::pull()
# Constants C and K for PoF function --------------------------------------
if (asset_category == "EHV UG Cable (Non Pressurised)") {
type_k_c <- gb_ref$pof_curve_parameters$`Functional Failure Category`[which(
grepl("Non Pressurised",
gb_ref$pof_curve_parameters$`Functional Failure Category`,
fixed = TRUE) == TRUE
)]
} else {
type_k_c <- gb_ref$pof_curve_parameters$`Functional Failure Category`[which(
grepl(asset_category,
gb_ref$pof_curve_parameters$`Functional Failure Category`,
fixed = TRUE) == TRUE
)]
}
k <- gb_ref$pof_curve_parameters %>%
dplyr::filter(`Functional Failure Category` ==
type_k_c) %>% dplyr::select(`K-Value (%)`) %>%
dplyr::pull()/100
c <- gb_ref$pof_curve_parameters %>%
dplyr::filter(`Functional Failure Category` ==
type_k_c) %>% dplyr::select(`C-Value`) %>%
dplyr::pull()
# Duty factor -------------------------------------------------------------
duty_factor_cable <-
duty_factor_cables(utilisation_pct,
operating_voltage_pct,
voltage_level = "EHV")
# Expected life ------------------------------
expected_life_years <- expected_life(normal_expected_life_cable,
duty_factor_cable,
location_factor = 1)
# b1 (Initial Ageing Rate) ------------------------------------------------
b1 <- beta_1(expected_life_years)
# Initial health score ----------------------------------------------------
initial_health_score <- initial_health(b1, age)
## NOTE
# Typically, the Health Score Collar is 0.5 and
# Health Score Cap is 10, implying no overriding
# of the Health Score. However, in some instances
# these parameters are set to other values in the
# Health Score Modifier calibration tables.
# These overriding values are shown in Table 34 to Table 195
# and Table 200 in Appendix B.
# Measured condition inputs ---------------------------------------------
asset_category_mmi <- stringr::str_remove(asset_category, pattern = "UG")
asset_category_mmi <- stringr::str_squish(asset_category_mmi)
mcm_mmi_cal_df <-
gb_ref$measured_cond_modifier_mmi_cal
mmi_type <- mcm_mmi_cal_df$`Asset Category`[which(
grepl(asset_category_mmi,
mcm_mmi_cal_df$`Asset Category`,
fixed = TRUE) == TRUE
)]
mcm_mmi_cal_df <-
mcm_mmi_cal_df[which(mcm_mmi_cal_df$`Asset Category` == asset_category_mmi), ]
factor_divider_1 <-
as.numeric(mcm_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Factor Divider 1`)
factor_divider_2 <-
as.numeric(mcm_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Factor Divider 2`)
max_no_combined_factors <-
as.numeric(mcm_mmi_cal_df$
`Parameters for Combination Using MMI Technique - Max. No. of Combined Factors`
)
# Sheath test -------------------------------------------------------------
if (asset_category == "EHV UG Cable (Non Pressurised)") {
mci_ehv_cbl_non_pr_sheath_test <-
gb_ref$mci_ehv_cbl_non_pr_sheath_test %>% dplyr::filter(
`Condition Criteria: Sheath Test Result` == sheath_test
)
ci_factor_sheath <- mci_ehv_cbl_non_pr_sheath_test$`Condition Input Factor`
ci_cap_sheath <- mci_ehv_cbl_non_pr_sheath_test$`Condition Input Cap`
ci_collar_sheath <- mci_ehv_cbl_non_pr_sheath_test$`Condition Input Collar`
mci_ehv_cbl_non_pr_prtl_disch <-
gb_ref$mci_ehv_cbl_non_pr_prtl_disch %>% dplyr::filter(
`Condition Criteria: Partial Discharge Test Result` == partial_discharge
)
# partial discharge ------------------------------------------------------
ci_factor_partial <- mci_ehv_cbl_non_pr_prtl_disch$`Condition Input Factor`
ci_cap_partial <- mci_ehv_cbl_non_pr_prtl_disch$`Condition Input Cap`
ci_collar_partial <- mci_ehv_cbl_non_pr_prtl_disch$`Condition Input Collar`
mci_ehv_cbl_non_pr_fault_hist <-
gb_ref$mci_ehv_cbl_non_pr_fault_hist
# Fault -------------------------------------------------------------
for (n in 2:4) {
if (fault_hist == 'Default' || fault_hist == 'No historic faults recorded') {
no_row <- which(mci_ehv_cbl_non_pr_fault_hist$Upper == fault_hist)
ci_factor_fault <- mci_ehv_cbl_non_pr_fault_hist$`Condition Input Factor`[no_row]
ci_cap_fault <- mci_ehv_cbl_non_pr_fault_hist$`Condition Input Cap`[no_row]
ci_collar_fault <- mci_ehv_cbl_non_pr_fault_hist$`Condition Input Collar`[no_row]
break
} else if (fault_hist >= as.numeric(mci_ehv_cbl_non_pr_fault_hist$Lower[n]) &
fault_hist <
as.numeric(mci_ehv_cbl_non_pr_fault_hist$Upper[n])) {
ci_factor_fault <- mci_ehv_cbl_non_pr_fault_hist$`Condition Input Factor`[n]
ci_cap_fault <- mci_ehv_cbl_non_pr_fault_hist$`Condition Input Cap`[n]
ci_collar_fault <- mci_ehv_cbl_non_pr_fault_hist$`Condition Input Collar`[n]
break
}
}
# Measured conditions
factors <- c(ci_factor_sheath,
ci_factor_partial,
ci_factor_fault)
measured_condition_factor <- mmi(factors,
factor_divider_1,
factor_divider_2,
max_no_combined_factors)
caps <- c(ci_cap_sheath,
ci_cap_partial,
ci_cap_fault)
measured_condition_cap <- min(caps)
# Measured condition collar -----------------------------------------------
collars <- c(ci_collar_sheath,
ci_collar_partial,
ci_collar_fault)
measured_condition_collar <- max(collars)
} else if (asset_category == "EHV UG Cable (Oil)") {
mci_ehv_cable_oil_leakage <-
gb_ref$mci_ehv_cable_oil_leakage %>% dplyr::filter(
`Condition Criteria: Leakage Rate` == leakage
)
ci_factor_leakage_oil <- mci_ehv_cable_oil_leakage$`Condition Input Factor`
ci_cap_leakage_oil <- mci_ehv_cable_oil_leakage$`Condition Input Cap`
ci_collar_leakage_oil <- mci_ehv_cable_oil_leakage$`Condition Input Collar`
# Measured conditions
measured_condition_factor <- ci_factor_leakage_oil
measured_condition_cap <- ci_cap_leakage_oil
measured_condition_collar <- ci_collar_leakage_oil
} else if (asset_category == "EHV UG Cable (Gas)") {
mci_ehv_cbl_gas <-
gb_ref$mci_ehv_cable_gas_leakage %>% dplyr::filter(
`Condition Criteria: Leakage Rate` == leakage
)
ci_factor_leakage_gas <- mci_ehv_cbl_gas$`Condition Input Factor`
ci_cap_leakage_gas <- mci_ehv_cbl_gas$`Condition Input Cap`
ci_collar_leakage_gas <- mci_ehv_cbl_gas$`Condition Input Collar`
# Measured conditions
measured_condition_factor <- ci_factor_leakage_gas
measured_condition_cap <- ci_cap_leakage_gas
measured_condition_collar <- ci_collar_leakage_gas
}
# Measured condition modifier ---------------------------------------------
measured_condition_modifier <- data.frame(measured_condition_factor,
measured_condition_cap,
measured_condition_collar)
# Health score factor ---------------------------------------------------
health_score_factor <- measured_condition_modifier$measured_condition_factor
# Health score cap --------------------------------------------------------
health_score_cap <- measured_condition_modifier$measured_condition_cap
# Health score collar -----------------------------------------------------
health_score_collar <- measured_condition_modifier$measured_condition_collar
# Health score modifier ---------------------------------------------------
health_score_modifier <- data.frame(health_score_factor,
health_score_cap,
health_score_collar)
# Current health score ----------------------------------------------------
current_health_score <-
current_health(initial_health_score,
health_score_modifier$health_score_factor,
health_score_modifier$health_score_cap,
health_score_modifier$health_score_collar,
reliability_factor = reliability_factor)
# Probability of failure------------------------------------------------
probability_of_failure <- k *
(1 + (c * current_health_score) +
(((c * current_health_score)^2) / factorial(2)) +
(((c * current_health_score)^3) / factorial(3)))
# Future probability of failure -------------------------------------------
# the Health Score of a new asset
H_new <- 0.5
# the Health Score of the asset when it reaches its Expected Life
b2 <- beta_2(current_health_score, age)
if (b2 > 2*b1){
b2 <- b1
} else if (current_health_score == 0.5){
b2 <- b1
}
if (current_health_score < 2) {
ageing_reduction_factor <- 1
} else if (current_health_score <= 5.5) {
ageing_reduction_factor <- ((current_health_score - 2)/7) + 1
} else {
ageing_reduction_factor <- 1.5
}
# Dynamic part
pof_year <- list()
year <- seq(from=0,to=simulation_end_year,by=1)
for (y in 1:length(year)){
t <- year[y]
future_health_Score <- current_health_score*exp((b2/ageing_reduction_factor) * t)
H <- future_health_Score
future_health_score_limit <- 15
if (H > future_health_score_limit){
H <- future_health_score_limit
}
pof_year[[paste(y)]] <- k * (1 + (c * H) +
(((c * H)^2) / factorial(2)) +
(((c * H)^3) / factorial(3)))
}
pof_future <- data.frame(year=year, PoF=as.numeric(unlist(pof_year)))
pof_future$age <- NA
pof_future$age[1] <- age
for(i in 2:nrow(pof_future)) {
pof_future$age[i] <- age + i -1
}
return(pof_future)
}
|
affe905bcb726dd0078b54eeec581b29cc79b23a | dcc929fb7e7a26f832eb0e794688492c9a2f69e3 | /man/createBiggModel.Rd | ec19dc8ca68ecc54e2474d77df9198e0866c41ce | [] | no_license | cran/BiGGR | 2afe53320fda7e52bf76d6f1d6c4f13c1e56b4e1 | 6af2f69a95cf70f388759b37560eab2fd19a42c8 | refs/heads/master | 2021-01-01T15:51:15.742647 | 2013-01-31T00:00:00 | 2013-01-31T00:00:00 | 17,717,251 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,339 | rd | createBiggModel.Rd | \name{createBiggModel}
\encoding{latin1}
\Rdversion{1.1}
\alias{createBiggModel}
\title{createBiggModel}
\description{creates a model file to be run for simulations of metabolic fluxes}
\usage{createBiggModel(metabolites_file,reactions_file,maximize,
equation_var,equation_value,constraint,externals)}
\arguments{
\item{metabolites_file}{is a dataframe of metabolites participating in a reaction. This file is exported from database of metabolic reconstructions (BiGG) at
\url{http://bigg.ucsd.edu/bigg/searchMet.pl} }
\item{reactions_file}{is a dataframe of reactions participating in a metabolic pathway. This file can be exported from database of metabolic reconstructions (BiGG) at
\url{http://bigg.ucsd.edu/bigg/main.pl} }
\item{maximize}{
is a character vector consisting the tag of the reaction(s) to be maximized or minimized
}
\item{equation_var}{
is a character vector specifying the name for the reaction to be maximized or minimized
}
\item{equation_value}{
is a numeric value for the reactions specified in \code{equation_var}
}
\item{constraint}{
is a character vector specifying the minimum and maximum values(boundary) under which the solution for the \code{maximize} reaction should fall
}
\item{externals}{
a character vector of metabolites as provided by the user for speficific pathways for which FBA (flux balance analysis needs to be performed)
}
}
\value{A model file with with extension \code{".lim"} is created}
\references{Soetaert K, van Oevelen D (2009). LIM: Linear Inverse Model examples and solution methods.
R package version 1.3}
\author{Anand K. Gavai <anand.gavai@bioinformatics.nl>}
\note{none}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
## The function is currently defined as
\dontrun{
metabolites_file<-"glycolysis_M.csv" # download from the BiGG database
reactions_file<-"glycolysis_R.csv" # download from the BiGG database
maximize<-"R_PYK"
equation_var<-"R_HEX1"
equation_value<-1
constraint<-"[0,1000]"
externals<-c("glcD","pyr")
model.lim<-createBiggModel(metabolites_file,reactions_file,maximize,equation_var,equation_value,constraint,externals)
}
}
\keyword{Linear Inverse Models}
\keyword{Linear optimization model file}
|
14b772966c04fca82496db524d30349921a99974 | 7f77545b408fb7f2b7fd3b9e682a0b8e6d6a0c91 | /knnlog.R | dc3ad52f0168e9034748852325631119a1b071bd | [] | no_license | bojavardhan/ANALYSING-VARIOUS-DATASETS-USING-MACHINE-LEARNING-AND-DATA-MINING-METHODS | f8d4202bb00e26468b9c2add9b2abba8e56e3de6 | 23a0812fa41bea5db562255d02e743efe5dd68d7 | refs/heads/main | 2023-03-01T23:14:12.141762 | 2021-02-10T01:38:47 | 2021-02-10T01:38:47 | 337,585,939 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,267 | r | knnlog.R | #KNN Classifier
library(caTools)
library(class)
library(gmodels)
library(caret)
library(randomForest)
library(car)
#Retreving the dataset from the location
setwd("C:/Users/bojav/Downloads/ML")
data<- read.csv("knn.csv",stringsAsFactors = FALSE,header = TRUE, sep = ",")
str(data)
sapply(data,function(x) sum(is.na(x)))
str(data)
for(i in 1:ncol(data)){
data[is.na(data[,i]), i] <- mean(data[,i], na.rm = TRUE)
}
set.seed(1234)
ind <- sample(2,nrow(data), replace=T, prob=c(0.7,0.3))
training <- data[ind ==1,]
test <- data[ind == 2,]
# randamoly assigns rows to the each fold
trcontrol <- trainControl(method="repeatedcv",number=10,repeats=3)
#trcontrol <- trainControl(method="repeatedcv",number=10,repeats=3,classProbs = TRUE,summaryFunction = twoClassSummary())
set.seed(333)
# by default in outcome is accuracy instead of it we can use roc for that we need to ass class probs and summary in trcontrol
fit <- train(Side~., data=training,tuneLength= 20, method='knn',trControl=trcontrol, preProc=c('center','scale'))
fit
plot(fit)
pred <-predict(fit,newdata=training)
pred1 <-predict(fit,newdata=test)
#confusion matrix
confusionMatrix(table(pred,training$Side))
confusionMatrix(table(pred1,test$Side))
|
5eba7d595caf6a15104acada0bec39ab7bd07715 | 1fde1d04f9b7745a4680da1d468f1a98832d2e4e | /osf.R | 160a711fc1e365235a0a6d2d7440d2f648ae6885 | [] | no_license | jrosen48/airs-ngss-survey | cc38c7cb3f949e87ce84eb3e7bfdeab6b0b09684 | e8ea363d457797d20e63aaa30af1828fc5bc776e | refs/heads/master | 2023-03-16T19:37:54.298533 | 2021-03-11T11:57:44 | 2021-03-11T11:57:44 | 345,803,130 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 455 | r | osf.R | library(osfr)
library(here)
library(fs)
if (!dir_exists("data")) dir_create("data")
if (!file_exists(here("data", "ngss-adoption-states.xlsx"))) {
ngss_adoption_states_download <- osf_retrieve_file("bymvz") %>%
osf_download(here("data"), conflicts = "skip")
}
if (!file_exists(here("data", "ngss-adoption-survey.xlsx"))) {
ngss_adoption_survey_download <- osf_retrieve_file("me2qr") %>%
osf_download(here("data"), conflicts = "skip")
}
|
909616ffb662a1a4df7ad5c749d9fd4db44d27aa | c85471f60e9d5c462de6c60c880d05898ec81411 | /cache/rladies-ames|tidytuesday|data__2019__2019-03-26__r-ladies-ames-soln.R | 4f20c37506d9d30867732e69ee31baad4908eb30 | [
"CC-BY-4.0",
"MIT"
] | permissive | a-rosenberg/github-content-scraper | 2416d644ea58403beacba33349ee127e4eb42afe | ed3340610a20bb3bd569f5e19db56008365e7ffa | refs/heads/master | 2020-09-06T08:34:58.186945 | 2019-11-15T05:14:37 | 2019-11-15T05:14:37 | 220,376,154 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,276 | r | rladies-ames|tidytuesday|data__2019__2019-03-26__r-ladies-ames-soln.R | ## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE, fig.align="center")
## ----getdat--------------------------------------------------------------
library(tidyverse)
pets <- read_csv("seattle_pets.csv")
head(pets)
## ----dates---------------------------------------------------------------
library(lubridate)
pets %>%
mutate(date = parse_date(license_issue_date, format = "%B %d %Y"),
zip = parse_integer(zip_code)) -> pets
# check for missings
pets %>% filter(is.na(date))
pets %>% filter(is.na(zip))
pets %>% filter(is.na(zip), !is.na(zip_code))
# if zip_code is not NA, only take the first 5 digits
pets <- pets %>%
mutate(zip = ifelse((!is.na(zip_code) & is.na(zip)), parse_integer(str_sub(zip_code, 1, 5)), zip))
head(pets)
## ----lets----------------------------------------------------------------
pets %>%
mutate(first_letter = toupper(str_sub(animals_name, 1,1))) -> pets
pets %>%
ggplot() +
geom_bar(aes(x = first_letter, fill = species))
## ----weird---------------------------------------------------------------
pets %>% filter(!(first_letter %in% LETTERS) , !(is.na(animals_name)))
# only 12 that are non-alpha
## ----lets2---------------------------------------------------------------
pets %>% filter(first_letter %in% LETTERS, species %in% c("Cat", "Dog")) %>%
ggplot() +
geom_bar(aes(x = first_letter, fill = species), position = "dodge")
pets %>% filter(first_letter %in% LETTERS, species %in% c("Goat", "Pig")) %>%
ggplot() +
geom_bar(aes(x = first_letter, fill = species), position = "dodge")
## ----goats---------------------------------------------------------------
filter(pets, species == "Goat") %>%
select(animals_name) %>% count(animals_name) %>% arrange(desc(n))
filter(pets, species == "Pig") %>%
select(animals_name) %>% count(animals_name) %>% arrange(desc(n))
## ----chisq---------------------------------------------------------------
pets %>% filter(species %in% c("Cat", "Dog"), first_letter %in% LETTERS) %>%
group_by(species, first_letter) %>% count() -> test
# ?chisq.test
cats <- (test %>% filter(species == "Cat"))$n
dogs <- (test %>% filter(species == "Dog"))$n
chisq.test(cats, dogs, correct = FALSE)
|
abcb3521e487ed63bfb513553ca120a034de29bb | 52ac04f7c1918eb52ae4462720f6087c2446e316 | /process_neutrophils_final.R | cd6a08a64cf35dba25bedf2763175ebbc4d25f48 | [] | no_license | UCSF-DSCOLAB/combes_et_al_COVID_2020 | fe448c4ecae4a0ccec6b2f054ff9036c41d56f05 | 47ae7eea41bf8e5fc85be91ce1ebaaea378cc8c5 | refs/heads/master | 2023-06-12T19:35:32.670793 | 2021-07-07T22:43:24 | 2021-07-07T22:43:24 | 323,254,169 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 56,035 | r | process_neutrophils_final.R | ## Author: Tristan Courau
# Call libraries
library(assertthat)
library(ggplot2)
library(cowplot)
library(dittoSeq)
library(dplyr)
library(grid)
library(gridExtra)
library(reshape2)
library(scales)
library(reshape)
library(ggrepel)
library(RColorBrewer)
library(pheatmap)
library(Seurat)
library(beepr)
library(ggpubr)
library(EnhancedVolcano)
# Set working directory
setwd("~/Tristan/Labo MK/Manips/COVID/200428 patient_data_SO/201001 SEURAT WD/")
##################################################################################################
####################################### CLINICAL DATA ############################################
##################################################################################################
# Call clinical scores csv file
COMET_10X_CLINICAL_SCORES <- read.csv("~/Tristan/Labo MK/Manips/COVID/COMET_10X_CLINICAL_SCORES_PAPER.csv", sep=',', header=T)
# Apply values to patients in the neutrophil dataset
merged_colossal_full_harmony_Neuts@meta.data$covid_status <- COMET_10X_CLINICAL_SCORES$covid_status[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$Age <- COMET_10X_CLINICAL_SCORES$Age[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$Day_onset_category <- COMET_10X_CLINICAL_SCORES$Day_onset_category[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$ICU_vs_FLOOR <- COMET_10X_CLINICAL_SCORES$ICU_vs_FLOOR[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$Other_infection_type <- COMET_10X_CLINICAL_SCORES$Other_infection_type[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$Death <- COMET_10X_CLINICAL_SCORES$Death[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$NIH_score <- COMET_10X_CLINICAL_SCORES$NIH_score[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$Sampling_score <- COMET_10X_CLINICAL_SCORES$Sampling_score[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$Overall_score <- COMET_10X_CLINICAL_SCORES$Overall_score[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$Qualitative_score <- COMET_10X_CLINICAL_SCORES$Qualitative_score[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$Spike_score <- COMET_10X_CLINICAL_SCORES$Spike_score[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$Nucleocapsid_score <- COMET_10X_CLINICAL_SCORES$Nucleocapsid_score[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$ORF3a_score <- COMET_10X_CLINICAL_SCORES$ORF3a_score[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
merged_colossal_full_harmony_Neuts@meta.data$RBD_score <- COMET_10X_CLINICAL_SCORES$RBD_score[match(merged_colossal_full_harmony_Neuts@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES$SAMPLE.by.SNPs)]
# Create color scheme for COVID-19 status and disease severity
mycolorsseverity <- setNames(c("grey40", "orange", "orangered2"), c('CTRL', 'MILD', 'SEVERE'))
mycolorsstatus <- setNames(c("grey40", "dodgerblue3", "firebrick3"), c('CTRL', 'NEG', 'POS'))
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
################################################## FINAL NEUTS ########################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#Load the data
load("~/Tristan/Labo MK/Manips/COVID/200428 patient_data_SO/merged/merged_colossal_paper_final/annotated_res_0.6/neuts/merged_colossal_paper_final_res0.6_neuts.RData")
merged_colossal_full_harmony_Neuts <- neuts
rm(neuts)
#Reset clustering and try resolution 1
merged_colossal_full_harmony_Neuts_1 <- FindClusters(merged_colossal_full_harmony_Neuts, verbose = TRUE, algorithm = 1, resolution = 1, method = "igraph", random.seed = 21212)
DimPlot(merged_colossal_full_harmony_Neuts_1 , reduction='umap', label = T, label.size = 6, repel = T) + labs(color = "Resolution 1")
#Explore resolution 1 (calculate DEG between clusters, rank them, create a top10 list of them and plot it in a dotplot)
merged_colossal_full_harmony_Neuts_1_Markers <- FindAllMarkers(merged_colossal_full_harmony_Neuts_1, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.40, test.use="poisson", latent.vars = "LIBRARY", assay = "RNA")
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers[which(merged_colossal_full_harmony_Neuts_1_Markers$p_val_adj<0.1),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$avg_logFC,decreasing = TRUE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$cluster,decreasing = FALSE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
DotPlot(merged_colossal_full_harmony_Neuts_1, features = unique(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10$gene), cols = "RdBu", assay = "RNA") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14)) + coord_flip()
############################################################## 1st CLEANUP ###########################################################
# Annotate all cells as Neuts
merged_colossal_full_harmony_Neuts_1@meta.data$All_Neuts_annotations <- ifelse(merged_colossal_full_harmony_Neuts_1@meta.data$seurat_clusters %in% c('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21'), 'NEUTS', '?')
# According to previous dotplot, annotate cells that we KEEP and those that we REMOVE (= RBC/T/B/Platelets contaminants)
merged_colossal_full_harmony_Neuts_1@meta.data$Subsetting_annotations <- ifelse(merged_colossal_full_harmony_Neuts_1@meta.data$seurat_clusters %in% c('0', '1', '2', '3', '4', '5', '6', '10', '11', '12', '13', '15', '16', '17', '18', '19', '21'), 'KEEP',
ifelse(merged_colossal_full_harmony_Neuts_1@meta.data$seurat_clusters %in% c('7', '8', '9', '14', '20'), 'REMOVE', '?'))
# Observe annotation to confirm its accuracy
DimPlot(merged_colossal_full_harmony_Neuts_1, reduction='umap', label = F, group.by = "Subsetting_annotations")
ggplot(merged_colossal_full_harmony_Neuts_1@meta.data) + geom_bar(aes(x=All_Neuts_annotations, fill=Subsetting_annotations) , stat="count" , position="fill")
# Subset the cells that we KEEP
Idents(merged_colossal_full_harmony_Neuts_1) <- "Subsetting_annotations"
merged_colossal_full_harmony_Neuts <- subset(merged_colossal_full_harmony_Neuts_1, idents = c('KEEP'), invert = FALSE)
DimPlot(merged_colossal_full_harmony_Neuts , reduction='umap')
# Explore 20 first PCs to see if they are all relevant to use
png("NEUTS_DimHeatmap_20_PCs.png" , width = 14 , height = 10, units = "in", res = 200)
DimHeatmap(merged_colossal_full_harmony_Neuts, dims = 1:20, nfeatures = 5)
dev.off()
# Use the 20 first PCs to recalculate the UMAP space and the clusters neighboring
merged_colossal_full_harmony_Neuts <- RunUMAP(merged_colossal_full_harmony_Neuts, dims = 1:20, n.neighbors = 30, min.dist = 0.3, spread = 1, verbose = FALSE, seed.use = 21212, reduction = 'harmony')
merged_colossal_full_harmony_Neuts<- FindNeighbors(merged_colossal_full_harmony_Neuts, dims = 1:20, k.param = 20, verbose = FALSE, reduction = 'harmony')
#Reset clustering, try and explore resolution 1
merged_colossal_full_harmony_Neuts_1 <- FindClusters(merged_colossal_full_harmony_Neuts, verbose = TRUE, algorithm = 1, resolution = 1, method = "igraph", random.seed = 21212)
DimPlot(merged_colossal_full_harmony_Neuts_1 , reduction='umap', label = T, label.size = 6, repel = T) + labs(color = "Resolution 1")
merged_colossal_full_harmony_Neuts_1_Markers <- FindAllMarkers(merged_colossal_full_harmony_Neuts_1, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.40, test.use="poisson", latent.vars = "LIBRARY", assay = "RNA")
beep(2)
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers[which(merged_colossal_full_harmony_Neuts_1_Markers$p_val_adj<0.1),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$avg_logFC,decreasing = TRUE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$cluster,decreasing = FALSE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
DotPlot(merged_colossal_full_harmony_Neuts_1, features = unique(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10$gene), cols = "RdBu", assay = "RNA") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14)) + coord_flip()
############################################################## 2nd CLEANUP ###########################################################
merged_colossal_full_harmony_Neuts_1@meta.data$Subsetting_annotations <- ifelse(merged_colossal_full_harmony_Neuts_1@meta.data$seurat_clusters %in% c('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '11', '13', '14', '15', '16', '17', '20', '21'), 'KEEP',
ifelse(merged_colossal_full_harmony_Neuts_1@meta.data$seurat_clusters %in% c('10', '12', '18', '19'), 'REMOVE', '?'))
DimPlot(merged_colossal_full_harmony_Neuts_1, reduction='umap', label = F, group.by = "Subsetting_annotations")
ggplot(merged_colossal_full_harmony_Neuts_1@meta.data) + geom_bar(aes(x=All_Neuts_annotations, fill=Subsetting_annotations) , stat="count" , position="fill")
Idents(merged_colossal_full_harmony_Neuts_1) <- "Subsetting_annotations"
merged_colossal_full_harmony_Neuts <- subset(merged_colossal_full_harmony_Neuts_1, idents = c('KEEP'), invert = FALSE)
DimPlot(merged_colossal_full_harmony_Neuts , reduction='umap')
merged_colossal_full_harmony_Neuts <- RunUMAP(merged_colossal_full_harmony_Neuts, dims = 1:20, n.neighbors = 30, min.dist = 0.3, spread = 1, verbose = FALSE, seed.use = 21212, reduction = 'harmony')
merged_colossal_full_harmony_Neuts <- FindNeighbors(merged_colossal_full_harmony_Neuts, dims = 1:20, k.param = 20, verbose = FALSE, reduction = 'harmony')
merged_colossal_full_harmony_Neuts_1 <- FindClusters(merged_colossal_full_harmony_Neuts, verbose = TRUE, algorithm = 1, resolution = 1, method = "igraph", random.seed = 21212)
DimPlot(merged_colossal_full_harmony_Neuts_1 , reduction='umap', label = T, label.size = 6, repel = T) + labs(color = "Resolution 1")
merged_colossal_full_harmony_Neuts_1_Markers <- FindAllMarkers(merged_colossal_full_harmony_Neuts_1, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.40, test.use="poisson", latent.vars = "LIBRARY", assay = "RNA")
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers[which(merged_colossal_full_harmony_Neuts_1_Markers$p_val_adj<0.1),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$avg_logFC,decreasing = TRUE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$cluster,decreasing = FALSE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
DotPlot(merged_colossal_full_harmony_Neuts_1, features = unique(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10$gene), cols = "RdBu", assay = "RNA") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14)) + coord_flip()
############################################################## 3rd CLEANUP ###########################################################
merged_colossal_full_harmony_Neuts_1@meta.data$Subsetting_annotations <- ifelse(merged_colossal_full_harmony_Neuts_1@meta.data$seurat_clusters %in% c('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '12', '14'), 'KEEP',
ifelse(merged_colossal_full_harmony_Neuts_1@meta.data$seurat_clusters %in% c('11', '13', '15'), 'REMOVE', '?'))
DimPlot(merged_colossal_full_harmony_Neuts_1, reduction='umap', label = F, group.by = "Subsetting_annotations")
ggplot(merged_colossal_full_harmony_Neuts_1@meta.data) + geom_bar(aes(x=All_Neuts_annotations, fill=Subsetting_annotations) , stat="count" , position="fill")
Idents(merged_colossal_full_harmony_Neuts_1) <- "Subsetting_annotations"
merged_colossal_full_harmony_Neuts <- subset(merged_colossal_full_harmony_Neuts_1, idents = c('KEEP'), invert = FALSE)
DimPlot(merged_colossal_full_harmony_Neuts , reduction='umap')
merged_colossal_full_harmony_Neuts <- RunUMAP(merged_colossal_full_harmony_Neuts, dims = 1:20, n.neighbors = 30, min.dist = 0.3, spread = 1, verbose = FALSE, seed.use = 21212, reduction = 'harmony')
merged_colossal_full_harmony_Neuts <- FindNeighbors(merged_colossal_full_harmony_Neuts, dims = 1:20, k.param = 20, verbose = FALSE, reduction = 'harmony')
merged_colossal_full_harmony_Neuts_1 <- FindClusters(merged_colossal_full_harmony_Neuts, verbose = TRUE, algorithm = 1, resolution = 1, method = "igraph", random.seed = 21212)
DimPlot(merged_colossal_full_harmony_Neuts_1 , reduction='umap', label = T, label.size = 6, repel = T) + labs(color = "Resolution 1")
merged_colossal_full_harmony_Neuts_1_Markers <- FindAllMarkers(merged_colossal_full_harmony_Neuts_1, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.40, test.use="poisson", latent.vars = "LIBRARY", assay = "RNA")
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers[which(merged_colossal_full_harmony_Neuts_1_Markers$p_val_adj<0.1),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$avg_logFC,decreasing = TRUE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$cluster,decreasing = FALSE),]
write.table(format(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1, digits=2), file="merged_colossal_full_harmony_Neuts_1_Markers_padj0.1.tsv", row.names=T, col.names=T, quote=F, sep="\t")
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
DotPlot(merged_colossal_full_harmony_Neuts_1, features = unique(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10$gene), cols = "RdBu", assay = "RNA") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14)) + coord_flip()
# Write metadata file and save clean object, to be re-harmonized before final clustering and annotations
write.table(format(merged_colossal_full_harmony_Neuts_1@meta.data, digits=2), file="201001_merged_colossal_full_harmony_Neuts_1_metadata.tsv", row.names=T, col.names=T, quote=F, sep="\t")
save(merged_colossal_full_harmony_Neuts_1, file="201001_merged_colossal_full_harmony_Neuts_1_1.Robj")
############################################################## 4th CLEANUP ###########################################################
load("~/Tristan/Labo MK/Manips/COVID/200428 patient_data_SO/merged/merged_colossal_paper_final/annotated_res_0.6/neuts/CLEANED/merged_colossal_paper_final_res0.6_neuts.RData")
merged_colossal_full_harmony_Neuts <- neuts
rm(neuts)
merged_colossal_full_harmony_Neuts_1 <- FindClusters(merged_colossal_full_harmony_Neuts, verbose = TRUE, algorithm = 1, resolution = 1, method = "igraph", random.seed = 21212)
DimPlot(merged_colossal_full_harmony_Neuts_1 , reduction='umap', label = T, label.size = 6, repel = T) + labs(color = "Resolution 1")
merged_colossal_full_harmony_Neuts_1_Markers <- FindAllMarkers(merged_colossal_full_harmony_Neuts_1, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.40, test.use="poisson", latent.vars = "LIBRARY", assay = "RNA")
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers[which(merged_colossal_full_harmony_Neuts_1_Markers$p_val_adj<0.1),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$avg_logFC,decreasing = TRUE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$cluster,decreasing = FALSE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
DotPlot(merged_colossal_full_harmony_Neuts_1, features = unique(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10$gene), cols = "RdBu", assay = "RNA") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14)) + coord_flip()
merged_colossal_full_harmony_Neuts_1@meta.data$All_Neuts_annotations <- ifelse(merged_colossal_full_harmony_Neuts_1@meta.data$seurat_clusters %in% c('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19'), 'NEUTS', '?')
merged_colossal_full_harmony_Neuts_1@meta.data$Subsetting_annotations <- ifelse(merged_colossal_full_harmony_Neuts_1@meta.data$seurat_clusters %in% c('0', '1', '2', '3', '4', '5', '6', '10', '11', '12', '14', '15'), 'KEEP',
ifelse(merged_colossal_full_harmony_Neuts_1@meta.data$seurat_clusters %in% c('7', '8', '9', '13', '16', '17', '18', '19'), 'REMOVE', '?'))
DimPlot(merged_colossal_full_harmony_Neuts_1, reduction='umap', label = F, group.by = "Subsetting_annotations")
ggplot(merged_colossal_full_harmony_Neuts_1@meta.data) + geom_bar(aes(x=All_Neuts_annotations, fill=Subsetting_annotations) , stat="count" , position="fill")
Idents(merged_colossal_full_harmony_Neuts_1) <- "Subsetting_annotations"
merged_colossal_full_harmony_Neuts <- subset(merged_colossal_full_harmony_Neuts_1, idents = c('KEEP'), invert = FALSE)
DimPlot(merged_colossal_full_harmony_Neuts , reduction='umap')
merged_colossal_full_harmony_Neuts <- RunUMAP(merged_colossal_full_harmony_Neuts, dims = 1:20, n.neighbors = 30, min.dist = 0.3, spread = 1, verbose = FALSE, seed.use = 21212, reduction = 'harmony')
merged_colossal_full_harmony_Neuts <- FindNeighbors(merged_colossal_full_harmony_Neuts, dims = 1:20, k.param = 20, verbose = FALSE, reduction = 'harmony')
merged_colossal_full_harmony_Neuts_1 <- FindClusters(merged_colossal_full_harmony_Neuts, verbose = TRUE, algorithm = 1, resolution = 1, method = "igraph", random.seed = 21212)
DimPlot(merged_colossal_full_harmony_Neuts_1 , reduction='umap', label = T, label.size = 6, repel = T) + labs(color = "Resolution 1")
merged_colossal_full_harmony_Neuts_2 <- FindClusters(merged_colossal_full_harmony_Neuts, verbose = TRUE, algorithm = 1, resolution = 2, method = "igraph", random.seed = 21212)
DimPlot(merged_colossal_full_harmony_Neuts_2 , reduction='umap', label = T, label.size = 6, repel = T) + labs(color = "Resolution 2")
merged_colossal_full_harmony_Neuts_1_Markers <- FindAllMarkers(merged_colossal_full_harmony_Neuts_1, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.40, test.use="poisson", latent.vars = "LIBRARY", assay = "RNA")
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers[which(merged_colossal_full_harmony_Neuts_1_Markers$p_val_adj<0.1),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$avg_logFC,decreasing = TRUE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1$cluster,decreasing = FALSE),]
merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10 <- merged_colossal_full_harmony_Neuts_1_Markers_padj0.1 %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
DotPlot(merged_colossal_full_harmony_Neuts_1, features = unique(merged_colossal_full_harmony_Neuts_1_Markers_padj0.1_Top10$gene), cols = "RdBu", assay = "RNA") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14)) + coord_flip()
merged_colossal_full_harmony_Neuts_2_Markers <- FindAllMarkers(merged_colossal_full_harmony_Neuts_2, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.40, test.use="poisson", latent.vars = "LIBRARY", assay = "RNA")
merged_colossal_full_harmony_Neuts_2_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_2_Markers[which(merged_colossal_full_harmony_Neuts_2_Markers$p_val_adj<0.1),]
merged_colossal_full_harmony_Neuts_2_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_2_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_2_Markers_padj0.1$avg_logFC,decreasing = TRUE),]
merged_colossal_full_harmony_Neuts_2_Markers_padj0.1 <- merged_colossal_full_harmony_Neuts_2_Markers_padj0.1[order(merged_colossal_full_harmony_Neuts_2_Markers_padj0.1$cluster,decreasing = FALSE),]
merged_colossal_full_harmony_Neuts_2_Markers_padj0.1_Top10 <- merged_colossal_full_harmony_Neuts_2_Markers_padj0.1 %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
DotPlot(merged_colossal_full_harmony_Neuts_2, features = unique(merged_colossal_full_harmony_Neuts_2_Markers_padj0.1_Top10$gene), cols = "RdBu", assay = "RNA") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14)) + coord_flip()
######################################################### FINAL ANNOTATIONS RES 2 ###########################################################
#Annotate the objects with different levels of resolutions, going from ALL cells to Coarse subtypes to Fine subtypes
merged_colossal_full_harmony_Neuts_2@meta.data$ALL_Neuts <- ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('0','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'), 'NEUTS', '?')
DimPlot(merged_colossal_full_harmony_Neuts_2, reduction='umap', label = F, group.by = "ALL_Neuts")
merged_colossal_full_harmony_Neuts_2@meta.data$Fine_Subs_annotations <- ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('0'), 'S100A12_1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('1'), 'RIBO_1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('2'), 'G0S2_1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('3'), 'ISG_1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('4'), 'ISG_2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('5'), 'RIBO_2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('6'), 'NEAT1_1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('7'), 'SLPI_1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('8'), 'S100A12_2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('9'), 'NEAT1_2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('10'), 'S100A12_3',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('11'), 'S100A12_4',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('12'), 'S100A12_5',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('13'), 'NEAT1_3',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('14'), 'G0S2_2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('15'), 'S100A12_6',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('16'), 'SLPI_2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('17'), 'ISG_3',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('18'), 'NEAT1_4',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('19'), 'LCN2_1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('20'), 'ISG_4',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('21'), 'S100A12_7',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('22'), 'S100A12_8',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('23'), 'ISG_5',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('24'), 'LCN2_2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('25'), 'S100A12_10',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('26'), 'S100A12_11', '?')))))))))))))))))))))))))))
DimPlot(merged_colossal_full_harmony_Neuts_2 , reduction='umap', label = T, repel = T, group.by = "Fine_Subs_annotations")
merged_colossal_full_harmony_Neuts_2@meta.data$Coarse_Subs_annotations <- ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('0'), 'S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('1'), 'RIBO.',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('2'), 'G0S2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('3'), 'ISG',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('4'), 'ISG',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('5'), 'RIBO.',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('6'), 'NEAT1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('7'), 'SLPI',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('8'), 'S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('9'), 'NEAT1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('10'), 'S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('11'), 'S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('12'), 'S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('13'), 'NEAT1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('14'), 'G0S2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('15'), 'S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('16'), 'SLPI',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('17'), 'ISG',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('18'), 'NEAT1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('19'), 'LCN2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('20'), 'ISG',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('21'), 'S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('22'), 'S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('23'), 'ISG',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('24'), 'LCN2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('25'), 'S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('26'), 'S100A12', '?')))))))))))))))))))))))))))
DimPlot(merged_colossal_full_harmony_Neuts_2 , reduction='umap', label = T, repel = T, group.by = "Coarse_Subs_annotations")
#Organize the annotations for plotting purposes
Idents(merged_colossal_full_harmony_Neuts_2) <- "seurat_clusters"
My_level_seurat<- c('0', '8', '10', '11', '12', '15', '21', '22', '25', '26', '1', '5', '2', '14', '3', '4', '17', '20', '23', '6', '9', '13', '18', '7', '16', '19', '24')
levels(merged_colossal_full_harmony_Neuts_2) <- My_level_seurat
DotPlot(merged_colossal_full_harmony_Neuts_2, features = unique(merged_colossal_full_harmony_Neuts_2_Markers_padj0.1_Top10$gene), cols = "RdBu", assay = "RNA") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14)) + coord_flip()
DotPlot(merged_colossal_full_harmony_Neuts_2, features = c('RPS9', 'PI3', 'SLPI', 'NFKBIA', 'CXCL8', 'G0S2', 'FTH1', 'MALAT1', 'NEAT1', 'FCGR3B', 'IFITM3', 'IFITM1', 'IFIT3', 'IFIT2', 'IFIT1', 'LY6E', 'ISG15', 'MT2A', 'S100A11', 'GCA', 'CST7', 'ACTB', 'S100A4', 'S100A6', 'S100A9', 'MYL6', 'TSPO', 'S100A8', 'S100A12', 'PGLYRP1', 'MMP9', 'CAMP', 'RETN', 'LTF', 'LCN2'), cols = "RdBu") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14)) + coord_flip()
Idents(merged_colossal_full_harmony_Neuts_2) <- "Coarse_Subs_annotations"
My_level_coarse<- c('LCN2', 'S100A12', 'ISG', 'NEAT1', 'G0S2', 'SLPI', 'RIBO.')
levels(merged_colossal_full_harmony_Neuts_2) <- My_level_coarse
pdf('1C Neuts DotPlot.pdf', width=5, height=6, useDingbats=F)
DotPlot(merged_colossal_full_harmony_Neuts_2, features = c('RPS9', 'PI3', 'SLPI', 'NFKBIA', 'CXCL8', 'G0S2', 'FTH1', 'MALAT1', 'NEAT1', 'FCGR3B', 'IFITM3', 'IFITM1', 'IFIT3', 'IFIT2', 'IFIT1', 'LY6E', 'ISG15', 'MT2A', 'S100A11', 'GCA', 'CST7', 'ACTB', 'S100A4', 'S100A6', 'S100A9', 'MYL6', 'TSPO', 'S100A8', 'S100A12', 'PGLYRP1', 'MMP9', 'CAMP', 'RETN', 'LTF', 'LCN2'), cols = "RdBu") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14)) + coord_flip()
dev.off()
#Export counts used to create prism graphs
merged_colossal_full_harmony_Neuts_2_Counts <- table(merged_colossal_full_harmony_Neuts_2@meta.data[,c('SAMPLE.by.SNPs', 'Coarse_Subs_annotations')])
write.table(merged_colossal_full_harmony_Neuts_2_Counts, file="merged_colossal_full_harmony_Neuts_2_Counts.tsv", row.names=T, col.names=T, sep="\t")
# Write metadata file and save clean object
write.table(format(merged_colossal_full_harmony_Neuts_2@meta.data, digits=2), file="201001_merged_colossal_full_harmony_Neuts_2_metadata_FINAL.tsv", row.names=T, col.names=T, quote=F, sep="\t")
save(merged_colossal_full_harmony_Neuts_2, file="201001_merged_colossal_full_harmony_Neuts_2_FINAL.Robj")
# Produce UMAPs of Figure 1
pdf('1D Neuts UMAP.pdf', width=6, height=5, useDingbats=F)
DimPlot(merged_colossal_full_harmony_Neuts_2 , reduction='umap', label = F, group.by = "Coarse_Subs_annotations", cols = "Paired", order = c('RIBO.', 'SLPI', 'G0S2', 'NEAT1', 'ISG', 'S100A12', 'LCN2'))
dev.off()
pdf('1E Neuts UMAP status.pdf', width=6, height=5, useDingbats=F)
DimPlot(merged_colossal_full_harmony_Neuts_2 , reduction='umap', label = F, group.by = "covid_status", cols = mycolorsstatus)
dev.off()
pdf('1F Neuts UMAP severity.pdf', width=6, height=5, useDingbats=F)
DimPlot(merged_colossal_full_harmony_Neuts_2 , reduction='umap', label = F, group.by = "Qualitative_score", cols = mycolorsseverity)
dev.off()
############################################## ANNOTATIONS FOR PHEMD GRAPHS ########################################################
merged_colossal_full_harmony_Neuts_2@meta.data$harmony_cluster_final <- ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('0'), 'NEUTS_S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('1'), 'NEUTS_RIBO',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('2'), 'NEUTS_G0S2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('3'), 'NEUTS_ISG',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('4'), 'NEUTS_ISG',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('5'), 'NEUTS_RIBO',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('6'), 'NEUTS_NEAT1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('7'), 'NEUTS_SLPI',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('8'), 'NEUTS_S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('9'), 'NEUTS_NEAT1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('10'), 'NEUTS_S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('11'), 'NEUTS_S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('12'), 'NEUTS_S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('13'), 'NEUTS_NEAT1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('14'), 'NEUTS_G0S2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('15'), 'NEUTS_S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('16'), 'NEUTS_SLPI',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('17'), 'NEUTS_ISG',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('18'), 'NEUTS_NEAT1',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('19'), 'NEUTS_LCN2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('20'), 'NEUTS_ISG',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('21'), 'NEUTS_S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('22'), 'NEUTS_S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('23'), 'NEUTS_ISG',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('24'), 'NEUTS_LCN2',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('25'), 'NEUTS_S100A12',
ifelse(merged_colossal_full_harmony_Neuts_2@meta.data$seurat_clusters %in% c('26'), 'NEUTS_S100A12', '?')))))))))))))))))))))))))))
DimPlot(merged_colossal_full_harmony_Neuts_2, group.by = "harmony_cluster_final")
write.table(merged_colossal_full_harmony_Neuts_2@meta.data, file="201001_NEUTS_METADATA_FOR_WILL.tsv", row.names=T, col.names=T, sep="\t")
##################################################################################################
##################################### NEUTS SIGNATURES ###########################################
##################################################################################################
# Call neutrophil degranulation genes and use them to produce a degranulation score
Neuts_Degranulation_genes <- scan("~/Tristan/Labo MK/Manips/COVID/NEUTROPHIL_DEGRANULATION.csv", what = "character", sep = NULL)
gene_df <- get_genes_s3(Neuts_Degranulation_genes, merged_colossal_full_harmony_Neuts_2, drop = T)
Score_Degranulation <- colMeans(gene_df)
merged_colossal_full_harmony_Neuts_2@meta.data$DegranuScore_0 <- saturate(vec=Score_Degranulation, sat=0, binary=FALSE)
# Call ISG genes and use them to produce an ISG score
Sig_IFNNeuts = c('MT2A', 'ISG15', 'LY6E', 'IFIT1', 'IFIT2', 'IFIT3', 'IFITM1', 'IFITM3', 'IFI44L', 'IFI6', 'MX1', 'IFI27')
gene_df <- get_genes_s3(Sig_IFNNeuts, merged_colossal_full_harmony_Neuts_2, drop = T)
Score_ISG <- colMeans(gene_df)
merged_colossal_full_harmony_Neuts_2@meta.data$ISGScore_0 <- saturate(vec=Score_ISG, sat=0, binary=FALSE)
# Subset neutrophils according to status and/or severity to produce graphs and stats
Idents(merged_colossal_full_harmony_Neuts_2) <- merged_colossal_full_harmony_Neuts_2@meta.data$covid_status
merged_colossal_full_harmony_Neuts_2_CTRL <- subset(merged_colossal_full_harmony_Neuts_2, idents = c('CTRL'), invert = FALSE)
merged_colossal_full_harmony_Neuts_2_NEG <- subset(merged_colossal_full_harmony_Neuts_2, idents = c('NEG'), invert = FALSE)
merged_colossal_full_harmony_Neuts_2_POS <- subset(merged_colossal_full_harmony_Neuts_2, idents = c('POS'), invert = FALSE)
Idents(merged_colossal_full_harmony_Neuts_2) <- merged_colossal_full_harmony_Neuts_2@meta.data$Qualitative_score
merged_colossal_full_harmony_Neuts_2_MILD <- subset(merged_colossal_full_harmony_Neuts_2, idents = c('MILD'), invert = FALSE)
merged_colossal_full_harmony_Neuts_2_SEVERE <- subset(merged_colossal_full_harmony_Neuts_2, idents = c('SEVERE'), invert = FALSE)
Idents(merged_colossal_full_harmony_Neuts_2_NEG) <- merged_colossal_full_harmony_Neuts_2_NEG@meta.data$Qualitative_score
Idents(merged_colossal_full_harmony_Neuts_2_POS) <- merged_colossal_full_harmony_Neuts_2_POS@meta.data$Qualitative_score
merged_colossal_full_harmony_Neuts_2_NEG_MILD <- subset(merged_colossal_full_harmony_Neuts_2_NEG, idents = c('MILD'), invert = FALSE)
merged_colossal_full_harmony_Neuts_2_POS_MILD <- subset(merged_colossal_full_harmony_Neuts_2_POS, idents = c('MILD'), invert = FALSE)
merged_colossal_full_harmony_Neuts_2_NEG_SEVERE <- subset(merged_colossal_full_harmony_Neuts_2_NEG, idents = c('SEVERE'), invert = FALSE)
merged_colossal_full_harmony_Neuts_2_POS_SEVERE <- subset(merged_colossal_full_harmony_Neuts_2_POS, idents = c('SEVERE'), invert = FALSE)
# Produce violin plots of Figure 1 and S1
pdf('S1M ISG_VIOLIN.pdf', width=4.5, height=5, useDingbats=F)
VlnPlot(merged_colossal_full_harmony_Neuts_2, features = "ISGScore_0", group.by = "covid_status", pt.size = 0.0, split.by = "Qualitative_score", cols = mycolorsseverity)
dev.off()
pdf('S1N ISG VIOLIN ALL SUBS ALL.pdf', width=6.5, height=5, useDingbats=F)
VlnPlot(merged_colossal_full_harmony_Neuts_2, features = "ISGScore_0", group.by = "Coarse_Subs_annotations", pt.size = 0.0, split.by = "Qualitative_score", cols = mycolorsseverity)
dev.off()
pdf('S1O ISG VIOLIN ALL SUBS NEG.pdf', width=6.5, height=5, useDingbats=F)
VlnPlot(merged_colossal_full_harmony_Neuts_2_NEG, features = "ISGScore_0", group.by = "Coarse_Subs_annotations", pt.size = 0.0, split.by = "Qualitative_score", cols = mycolorsseverity)
dev.off()
pdf('1K ISG VIOLIN ALL SUBS POS.pdf', width=6.5, height=5, useDingbats=F)
VlnPlot(merged_colossal_full_harmony_Neuts_2_POS, features = "ISGScore_0", group.by = "Coarse_Subs_annotations", pt.size = 0.0, split.by = "Qualitative_score", cols = mycolorsseverity)
dev.off()
pdf('S1P DEGRANU_VIOLIN_ALL.pdf', width=3, height=5, useDingbats=F)
VlnPlot(merged_colossal_full_harmony_Neuts_2, features = "DegranuScore_0", group.by = "Qualitative_score", pt.size = 0.0, cols = mycolorsseverity)
dev.off()
pdf('S1Q DEGRANU_VIOLIN_SPLIT.pdf', width=4.5, height=5, useDingbats=F)
VlnPlot(merged_colossal_full_harmony_Neuts_2, features = "DegranuScore_0", group.by = "covid_status", pt.size = 0.0, split.by = "Qualitative_score", cols = mycolorsseverity)
dev.off()
# Wilcoxon tests
CTRL_ISG <- merged_colossal_full_harmony_Neuts_2_CTRL@meta.data$ISGScore_0
NEG_ISG <- merged_colossal_full_harmony_Neuts_2_NEG@meta.data$ISGScore_0
POS_ISG <- merged_colossal_full_harmony_Neuts_2_POS@meta.data$ISGScore_0
MILD_ISG <- merged_colossal_full_harmony_Neuts_2_MILD@meta.data$ISGScore_0
SEV_ISG <- merged_colossal_full_harmony_Neuts_2_SEVERE@meta.data$ISGScore_0
PM_ISG <- merged_colossal_full_harmony_Neuts_2_POS_MILD@meta.data$ISGScore_0
PS_ISG <- merged_colossal_full_harmony_Neuts_2_POS_SEVERE@meta.data$ISGScore_0
NM_ISG <- merged_colossal_full_harmony_Neuts_2_NEG_MILD@meta.data$ISGScore_0
NS_ISG <- merged_colossal_full_harmony_Neuts_2_NEG_SEVERE@meta.data$ISGScore_0
my_data <- data.frame(group = c(rep("NEG_MILD", length(NM_ISG)), rep("NEG_SEVERE", length(NS_ISG))),
ISG_score = c(NM_ISG, NS_ISG))
ggboxplot(my_data, x = "group", y = "ISG_score",
color = "group", palette = c("#00AFBB", "#E7B800"),
ylab = "ISG_score", xlab = "Groups")
wilcox.test(ISG_score ~ group, data = my_data, exact = FALSE)
CTRL_DEGR <- merged_colossal_full_harmony_Neuts_2_CTRL@meta.data$DegranuScore_0
NEG_DEGR <- merged_colossal_full_harmony_Neuts_2_NEG@meta.data$DegranuScore_0
POS_DEGR <- merged_colossal_full_harmony_Neuts_2_POS@meta.data$DegranuScore_0
MILD_DEGR <- merged_colossal_full_harmony_Neuts_2_MILD@meta.data$DegranuScore_0
SEV_DEGR <- merged_colossal_full_harmony_Neuts_2_SEVERE@meta.data$DegranuScore_0
PM_DEGR <- merged_colossal_full_harmony_Neuts_2_POS_MILD@meta.data$DegranuScore_0
PS_DEGR <- merged_colossal_full_harmony_Neuts_2_POS_SEVERE@meta.data$DegranuScore_0
NM_DEGR <- merged_colossal_full_harmony_Neuts_2_NEG_MILD@meta.data$DegranuScore_0
NS_DEGR <- merged_colossal_full_harmony_Neuts_2_NEG_SEVERE@meta.data$DegranuScore_0
my_data <- data.frame(group = c(rep("NEG_MILD", length(NM_DEGR)), rep("POS_MILD", length(PM_DEGR))),
DEGR_score = c(NM_DEGR, PM_DEGR))
ggboxplot(my_data, x = "group", y = "DEGR_score",
color = "group", palette = c("#00AFBB", "#E7B800"),
ylab = "DEGR_score", xlab = "Groups")
wilcox.test(DEGR_score ~ group, data = my_data, exact = FALSE)
Idents(merged_colossal_full_harmony_Neuts_2) <- "Coarse_Subs_annotations"
LCN2_Neuts <- subset(merged_colossal_full_harmony_Neuts_2, idents = c('LCN2'), invert = FALSE)
DimPlot(LCN2_Neuts , reduction='umap')
Idents(LCN2_Neuts) <- LCN2_Neuts@meta.data$covid_status
LCN2_Neuts_POS <- subset(LCN2_Neuts, idents = c('POS'), invert = FALSE)
Idents(LCN2_Neuts_POS) <- "Qualitative_score"
LCN2_Neuts_POS_MILD <- subset(LCN2_Neuts_POS, idents = c('MILD'), invert = FALSE)
LCN2_Neuts_POS_SEVERE <- subset(LCN2_Neuts_POS, idents = c('SEVERE'), invert = FALSE)
PM_ISG <- LCN2_Neuts_POS_MILD@meta.data$ISGScore_0
PS_ISG <- LCN2_Neuts_POS_SEVERE@meta.data$ISGScore_0
my_data <- data.frame(group = c(rep("POS_MILD", length(PM_ISG)), rep("POS_SEVERE", length(PS_ISG))),
ISG_score = c(PM_ISG, PS_ISG))
ggboxplot(my_data, x = "group", y = "ISG_score",
color = "group", palette = c("#00AFBB", "#E7B800"),
ylab = "ISG_score", xlab = "Groups")
wilcox.test(ISG_score ~ group, data = my_data, exact = FALSE)
##################################################################################################
####################################### DEG + VOLCANO ############################################
##################################################################################################
# VOLCANO PLOT ALL NEUTS POS vs NEG
Idents(merged_colossal_full_harmony_Neuts_2) <- merged_colossal_full_harmony_Neuts_2@meta.data$covid_status
merged_colossal_full_harmony_Neuts_2_DEG_STATUS_Markers <- FindMarkers(merged_colossal_full_harmony_Neuts_2, ident.1="POS", ident.2="NEG", min.pct = 0.25, test.use="poisson", latent.vars = "LIBRARY", assay = "RNA")
write.table(format(merged_colossal_full_harmony_Neuts_2_DEG_STATUS_Markers, digits=2), file="merged_colossal_full_harmony_Neuts_2_DEG_STATUS_Markers_prevolcano.tsv", row.names=T, col.names=T, quote=F, sep="\t")
#merged_colossal_full_harmony_Neuts_2_DEG_STATUS_Markers = read.table("~/Tristan/Labo MK/Manips/COVID/200428 patient_data_SO/200822 SEURAT WD/merged_colossal_full_harmony_Neuts_2_DEG_STATUS_Markers_prevolcano.tsv", sep='\t', header=T)
# VOLCANO PLOT ALL NEUTS MILD vs SEVERE
Idents(merged_colossal_full_harmony_Neuts_2) <- merged_colossal_full_harmony_Neuts_2@meta.data$Qualitative_score
merged_colossal_full_harmony_Neuts_2_DEG_SEVERITY_Markers <- FindMarkers(merged_colossal_full_harmony_Neuts_2, ident.1="MILD", ident.2="SEVERE", min.pct = 0.25, test.use="poisson", latent.vars = "LIBRARY", assay = "RNA")
write.table(format(merged_colossal_full_harmony_Neuts_2_DEG_SEVERITY_Markers, digits=2), file="merged_colossal_full_harmony_Neuts_2_DEG_SEVERITY_Markers_prevolcano.tsv", row.names=T, col.names=T, quote=F, sep="\t")
#merged_colossal_full_harmony_Neuts_2_DEG_SEVERITY_Markers = read.table("~/Tristan/Labo MK/Manips/COVID/200428 patient_data_SO/200822 SEURAT WD/merged_colossal_full_harmony_Neuts_2_DEG_SEVERITY_Markers_prevolcano.tsv", sep='\t', header=T)
pdf('S1I Volcano STATUS.pdf', width=6, height=6, useDingbats=F)
EnhancedVolcano(merged_colossal_full_harmony_Neuts_2_DEG_STATUS_Markers,
lab = rownames(merged_colossal_full_harmony_Neuts_2_DEG_STATUS_Markers),
x = 'avg_logFC',
y = 'p_val',
xlim = c(-1,0.75),
title = 'ALL NEG VS POS',
pCutoff = 10e-20,
FCcutoff = 0.2,
pointSize = 2.0,
labSize = 2,
col=c('black', 'black', 'black', 'red3'),
colAlpha = 1,
drawConnectors = T,
widthConnectors = 0.25)
dev.off()
pdf('S1K Volcano SEVERITY.pdf', width=6, height=6, useDingbats=F)
EnhancedVolcano(merged_colossal_full_harmony_Neuts_2_DEG_SEVERITY_Markers,
lab = rownames(merged_colossal_full_harmony_Neuts_2_DEG_SEVERITY_Markers),
x = 'avg_logFC',
y = 'p_val',
xlim = c(-1.5,1),
title = 'ALL MILD VS SEVERE',
pCutoff = 10e-20,
FCcutoff = 0.2,
pointSize = 2.0,
labSize = 2,
col=c('black', 'black', 'black', 'red3'),
colAlpha = 1,
drawConnectors = T,
widthConnectors = 0.25)
dev.off()
# Enjoy |
9897499070a5738841c1f0d13cb27890985641a3 | b587fbd68a3d03639771165aaed0bac6647e1bbe | /plot3.R | 19520bf8375b9e93ab660dd64d8c45d57fc3bf22 | [] | no_license | DonRegan/ExData_Plotting1 | af884a19ad584a6b81f38603d12564d673f86192 | 960cb6d6f21ff1702be54955afb7dcabbe8a598a | refs/heads/master | 2021-01-24T21:08:03.343042 | 2015-11-07T23:29:34 | 2015-11-07T23:29:34 | 45,721,119 | 0 | 0 | null | 2015-11-07T03:34:27 | 2015-11-07T03:34:27 | null | UTF-8 | R | false | false | 486 | r | plot3.R | ##THIRD PLOT GENERATOR
my_plot3<-function(){
plot(elec_power$DateTime,elec_power$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering")
lines(elec_power$DateTime,elec_power$Sub_metering_2,col="red")
lines(elec_power$DateTime,elec_power$Sub_metering_3,col="blue")
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),cex=0.8)
dev.copy(png,file="./figure/plot3.png",width=480,height=480)
dev.off()
}
my_plot3()
|
182b9bba1f6e1c3c687b80c93d1727ad5a37e1d2 | 0ff06478c18026955ebf512cd3dcaef7293e1c30 | /man/erccSpikeConcentration.Rd | 455e1400033ec3e69baf4465e402edf6d5171161 | [
"CC0-1.0"
] | permissive | charles-plessy/smallCAGEqc | 83d19b21890eed9455eaca13c87455bd53f45950 | e3642f25b43314779c33388129b5d47a5a1538ec | refs/heads/master | 2021-03-13T01:36:47.956099 | 2018-01-25T04:27:20 | 2018-01-25T04:27:20 | 34,089,765 | 1 | 1 | null | 2017-03-22T05:47:31 | 2015-04-17T01:24:16 | R | UTF-8 | R | false | true | 1,661 | rd | erccSpikeConcentration.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/erccSpikeConcentration.R
\name{erccSpikeConcentration}
\alias{erccSpikeConcentration}
\title{erccSpikeConcentration}
\usage{
erccSpikeConcentration(file = "http://tools.thermofisher.com/content/sfs/manuals/cms_095046.txt")
}
\arguments{
\item{file}{File name or URL where to find the \sQuote{cms_095046.txt} text file.
Defaults to the current URL on the Thermo Fisher website.}
}
\value{
A data frame representing the file \sQuote{cms_095046.txt} from the vendors
website.
The original column names are \sQuote{Re-sort ID}, \sQuote{ERCC ID}, \sQuote{subgroup},
\sQuote{concentration in Mix 1 (attomoles/ul)},
\sQuote{concentration in Mix 2 (attomoles/ul)}, \sQuote{expected fold-change ratio},
\sQuote{log2(Mix 1/Mix 2)}, but this not fit well for a R data frame. Therefore,
they are renamed as: \sQuote{sortID}, \sQuote{erccID}, \sQuote{subgroup},
\sQuote{concMix1}, \sQuote{concMix2}, \sQuote{FC}, \sQuote{log2FC}.
}
\description{
Various data related to ERCC spikes
}
\details{
In quantitative transcriptome analysis, we often add synthetic RNA to the reaction for
quality control and normalisation, and the External RNA Controls Consortium (ERCC)
spikes are a popular choice, available commercially from Invitrogen (now Thermo Fisher).
In the commercial product, the spikes have different concentrations, covering six
orders of magnitude. These concentrations are given in a text file on the vendor's
webiste, and I do not know if the file is freely redistributable, hence this function
to retreive the data from the Internet or a local file.
}
\seealso{
loadMoiraiStats
}
|
f86ddf5d0206723c7b0be6e7e5cd30dfa23a1b30 | 81174d9ed16aaced1e2db4d36f914d35b4244874 | /plot2.R | 8e429a2c8b33fc891c6b5a1aa11a47dfbce0891c | [] | no_license | jc037/ExData_Plotting1 | 72b93a0c56284aeb2397ecff06c4dcc3ff215117 | eda46e0f41fdc56c29c399b8993e03dea4baf05d | refs/heads/master | 2021-01-16T21:20:55.453037 | 2015-02-09T07:28:25 | 2015-02-09T07:28:25 | 30,504,977 | 0 | 0 | null | 2015-02-08T20:40:10 | 2015-02-08T20:40:10 | null | UTF-8 | R | false | false | 847 | r | plot2.R | ##Read the txt file into R as a table
projdata <- read.table("household_power_consumption.txt", header=FALSE, sep=";",nrows=2880, skip=66637, na.strings="?", col.names=c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
##Strip off the date and time columns and combine them into a single character vector
##for use in strptime
datetime <- paste(projdata$Date,projdata$Time)
##Change the dtclass character vector into a date-time class
dtclass <- strptime(datetime, format="%d/%m/%Y %H:%M:%S")
##Add the date-time class vector back on to the original data
projdataplus <- cbind(projdata, dtclass)
##Plot the data graphically
plot(projdataplus$dtclass,projdataplus$Global_active_power,type="l", ylab = "Global Active Power (kilowatts)", xlab="")
|
adc65aa5e9985e624f96783bc14f70ab76d6046b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/GMMBoost/examples/bGLMM.rd.R | 3c674966d96ca0d7421bdef35b0cee196cb5a288 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,410 | r | bGLMM.rd.R | library(GMMBoost)
### Name: bGLMM
### Title: Fit Generalized Mixed-Effects Models
### Aliases: bGLMM
### Keywords: models methods
### ** Examples
data("soccer")
## linear mixed models
lm1 <- bGLMM(points ~ transfer.spendings + I(transfer.spendings^2)
+ ave.unfair.score + transfer.receits + ball.possession
+ tackles + ave.attend + sold.out, rnd = list(team=~1), data = soccer)
lm2 <- bGLMM(points~transfer.spendings + I(transfer.spendings^2)
+ ave.unfair.score + transfer.receits + ball.possession
+ tackles + ave.attend + sold.out, rnd = list(team=~1 + ave.attend),
data = soccer, control = list(steps=10, lin=c("(Intercept)","ave.attend"),
method="REML", nue=1, sel.method="bic"))
## linear mixed models with categorical covariates
lm3 <- bGLMM(points ~ transfer.spendings + I(transfer.spendings^2)
+ as.factor(red.card) + as.factor(yellow.red.card)
+ transfer.receits + ball.possession + tackles + ave.attend
+ sold.out, rnd = list(team=~1), data = soccer, control = list(steps=10))
## generalized linear mixed model
glm1 <- bGLMM(points~transfer.spendings + I(transfer.spendings^2)
+ ave.unfair.score + transfer.receits + ball.possession
+ tackles + ave.attend + sold.out, rnd = list(team=~1),
family = poisson(link = log), data = soccer,
control = list(start=c(5,rep(0,31))))
|
5060f9e03979d752cb406b8b38f978c96ce10072 | 468b11da444b64157a6e089c73308fda8bd9f760 | /tests/testthat/test-arabia.R | 2f2a7fea8e8b5f96c44b63901398909170a86794 | [] | no_license | Quantum-Bot/arabia | 1c1bb77318cb2ba973be39d19772ac568cc1f2eb | b2c15112cbc634c750fc0c9f612f0d356c428312 | refs/heads/master | 2020-09-03T06:36:40.203639 | 2019-06-05T20:26:02 | 2019-06-05T20:26:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 295 | r | test-arabia.R | context("We can read an example SL2 file")
test_that("we can do something", {
xdf <- read_sl2(system.file("exdat", "example.sl2", package="arabia"), verbose = FALSE)
expect_true(nrow(xdf) == 1308)
expect_true(ncol(xdf) == 22)
expect_equal(as.integer(sum(xdf$keelDepth)), 429L)
})
|
977ae8fe39c4c03e25cc7efcffd9ed4dfc456324 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mixtools/examples/wkde.Rd.R | abd807901b3bed503d6c1a797e6194194e84b8a2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 982 | r | wkde.Rd.R | library(mixtools)
### Name: wkde
### Title: Weighted Univariate (Normal) Kernel Density Estimate
### Aliases: wkde wkde.symm
### Keywords: file
### ** Examples
# Mixture with mv gaussian model
set.seed(100)
m <- 2 # no. of components
r <- 3 # no. of repeated measures (coordinates)
lambda <- c(0.4, 0.6)
mu <- matrix(c(0, 0, 0, 4, 4, 6), m, r, byrow=TRUE) # means
sigma <- matrix(rep(1, 6), m, r, byrow=TRUE) # stdevs
centers <- matrix(c(0, 0, 0, 4, 4, 4), 2, 3, byrow=TRUE) # initial centers for est
blockid = c(1,1,2) # block structure of coordinates
n = 100
x <- rmvnormmix(n, lambda, mu, sigma) # simulated data
a <- npEM(x, centers, blockid, eps=1e-8, verb=FALSE)
par(mfrow=c(2,2))
u <- seq(min(x), max(x), len=200)
for(j in 1:2) {
for(b in 1:2) {
xx <- as.vector(x[,a$blockid==b])
wts <- rep(a$post[,j], length.out=length(xx))
bw <- a$bandwidth
title <- paste("j =", j, "and b =", b)
plot(u, wkde(xx, u, wts, bw), type="l", main=title)
}
}
|
43ba8b38ae7124214db1366722d9d827c0ab30d7 | feccb7a55b15dfb76423e841a854e8483859b452 | /scripts/analysis/cmu2_mlm.R | ed39ad3d8397f484894d9c4a6353db1b4271ac63 | [] | no_license | pbeloborodova/EMA | 26bf7447c34a9c7bb01377d0e0075b34d9de25dd | 699585f1d45ed6be252dc6762d5e5382dadf759a | refs/heads/master | 2022-08-14T04:03:13.011528 | 2020-05-12T21:09:53 | 2020-05-12T21:09:53 | 235,207,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,088 | r | cmu2_mlm.R | # Load packages ----------------------------------------
options(scipen=99) # Change scientific notation to decimals
options(stringsAsFactors = FALSE) # Stop conversion of strings to factors
library(lme4)
library(lmerTest)
library(nlme)
# Load data --------------------------------------------
load("~/R/EMA/data/cleaned/cmu/cmu2_clean.Rda")
cmu2_mlm <- cmu2_clean
rm(cmu2_clean)
# Transform time varaibles -----------------------------
# Add sine and cosine terms for weekly cyclicity
cmu2_mlm$weekcycle_sin <- sin(2*pi*(cmu2_mlm$day + 1)/7)
cmu2_mlm$weekcycle_cos <- cos(2*pi*(cmu2_mlm$day + 1)/7)
# Add continuous time within person variable
cmu2_mlm <- cmu2_mlm %>% group_by(id) %>%
mutate(ema_timecont = date - date[1],
ema_timecont_cent = ema_timecont - mean(ema_timecont)) # Center within person
# Transform mindfulness variable -----------------------
cmu2_mlm <- cmu2_mlm %>% group_by(id) %>%
mutate(mindfulness_cent = mindfulness - mean(mindfulness, na.rm = TRUE)) # Center within person
# MLM models -------------------------------------------
# Function for calculting % of variance between and within cases
icc_between <-
function(model) {
var_df <- as.data.frame(VarCorr(model, comp = "Variance"))
result <- round(100*var_df[1,4] / (var_df[1,4] + var_df[var_df$grp == "Residual",4]), 2)
cat("Variance between groups:", result, "%,",
"variance within groups:", 100 - result, "%")
}
# Function for calculating decrease of residual variance
decr_var <-
function(model1, model2) {
var1_df <- as.data.frame(VarCorr(model1, comp = "Variance"))
var2_df <- as.data.frame(VarCorr(model2, comp = "Variance"))
result <- round(100*(var1_df[var1_df$grp == "Residual",4] - var2_df[var2_df$grp == "Residual",4]) / var1_df[var1_df$grp == "Residual",4], 2)
cat("Decrease in residual variance explained by additional predictors:", result, "%,")
}
# (1) Loneliness
# (1.1) Unconditional
# Variance between and within participants
cmu2_lonely_null <- lme4::lmer(feel_lonely ~ 1 + (1|id),
REML = TRUE, data = cmu2_mlm)
summary(cmu2_lonely_null)
rand(cmu2_lonely_null)
icc_between(cmu2_lonely_null)
# Unstructured
cmu2_lonely_null_uc <- nlme::lme(feel_lonely ~ 1,
random = list(~1|id),
method = "REML",
data = cmu2_mlm,
na.action = na.exclude)
# Compound symmetry
cmu2_lonely_null_cs <- nlme::lme(feel_lonely ~ 1,
random = list(~1|id),
method = "REML",
data = cmu2_mlm,
na.action = na.exclude,
correlation = corCompSymm(form = ~1|id))
# Autoregressive
cmu2_lonely_null_ar <- nlme::lme(feel_lonely ~ 1,
random = list(~1|id),
method = "REML",
data = cmu2_mlm,
na.action = na.exclude,
correlation = corAR1(form = ~1|id))
# Compare models
anova(cmu2_lonely_null_uc, cmu2_lonely_null_cs, cmu2_lonely_null_ar)
# (1.2) Time variables as predictors
cmu2_lonely_full1 <- lme4::lmer(feel_lonely ~ 1 + ema_timecont_cent +
weekcycle_cos + day + time +
(1|id), REML = TRUE, data = cmu2_mlm)
summary(cmu2_lonely_full1)
rand(cmu2_lonely_full1)
decr_var(cmu2_lonely_null, cmu2_lonely_full1)
tab_model(cmu2_lonely_full1)
# (1.3) Time variables + mindfulness as predictors
cmu2_lonely_full2 <- lme4::lmer(feel_lonely ~ 1 + ema_timecont_cent +
weekcycle_cos + day + time + mindfulness_cent +
(1|id), REML = TRUE, data = cmu2_mlm)
summary(cmu2_lonely_full2)
rand(cmu2_lonely_full2)
decr_var(cmu2_lonely_full1, cmu2_lonely_full2)
tab_model(cmu2_lonely_full2)
# (2) Connection
# (2.1) Unconditional
# Variance between and within participants
cmu2_connected_null <- lme4::lmer(feel_connected ~ 1 + (1|id),
REML = TRUE, data = cmu2_mlm)
summary(cmu2_connected_null)
rand(cmu2_connected_null)
icc_between(cmu2_connected_null)
# Unstructured
cmu2_connected_null_uc <- nlme::lme(feel_connected ~ 1,
random = list(~1|id),
method = "REML",
data = cmu2_mlm,
na.action = na.exclude)
# Compound symmetry
cmu2_connected_null_cs <- nlme::lme(feel_connected ~ 1,
random = list(~1|id),
method = "REML",
data = cmu2_mlm,
na.action = na.exclude,
correlation = corCompSymm(form = ~1|id))
# Autoregressive
cmu2_connected_null_ar <- nlme::lme(feel_connected ~ 1,
random = list(~1|id),
method = "REML",
data = cmu2_mlm,
na.action = na.exclude,
correlation = corAR1(form = ~1|id))
# Compare models
anova(cmu2_connected_null_uc, cmu2_connected_null_cs, cmu2_connected_null_ar)
# (2.2) Time variables as predictors
cmu2_connected_full1 <- lme4::lmer(feel_connected ~ 1 + ema_timecont_cent +
weekcycle_cos + day + time +
(1|id), REML = TRUE, data = cmu2_mlm)
summary(cmu2_connected_full1)
rand(cmu2_connected_full1)
decr_var(cmu2_connected_null, cmu2_connected_full1)
tab_model(cmu2_connected_full1)
# (2.3) Time variables + mindfulness as predictors
cmu2_connected_full2 <- lme4::lmer(feel_connected ~ 1 + ema_timecont_cent +
weekcycle_cos + day + time + mindfulness_cent +
(1|id), REML = TRUE, data = cmu2_mlm)
summary(cmu2_connected_full2)
rand(cmu2_connected_full2)
decr_var(cmu2_connected_full1, cmu2_connected_full2)
tab_model(cmu2_connected_full2)
# (4) Mindfulness
# (2.1) Unconditional
# Variance between and within participants
cmu2_mindfulness_null <- lme4::lmer(mindfulness ~ 1 + (1|id),
REML = TRUE, data = cmu2_mlm)
summary(cmu2_mindfulness_null)
rand(cmu2_mindfulness_null)
icc_between(cmu2_mindfulness_null)
# Unstructured
cmu2_mindfulness_null_uc <- nlme::lme(mindfulness ~ 1,
random = list(~1|id),
method = "REML",
data = cmu2_mlm,
na.action = na.exclude)
# Compound symmetry
cmu2_mindfulness_null_cs <- nlme::lme(mindfulness ~ 1,
random = list(~1|id),
method = "REML",
data = cmu2_mlm,
na.action = na.exclude,
correlation = corCompSymm(form = ~1|id))
# Autoregressive
cmu2_mindfulness_null_ar <- nlme::lme(mindfulness ~ 1,
random = list(~1|id),
method = "REML",
data = cmu2_mlm,
na.action = na.exclude,
correlation = corAR1(form = ~1|id))
# Compare models
anova(cmu2_mindfulness_null_uc, cmu2_mindfulness_null_cs, cmu2_mindfulness_null_ar)
# (2.2) Time variables as predictors
cmu2_mindfulness_full <- lme4::lmer(mindfulness ~ 1 + ema_timecont_cent +
weekcycle_cos + day + time +
(1|id), REML = TRUE, data = cmu2_mlm)
summary(cmu2_mindfulness_full)
rand(cmu2_mindfulness_full)
tab_model(cmu2_mindfulness_full)
decr_var(cmu2_mindfulness_null, cmu2_mindfulness_full)
|
1fb893dd4a1f11f3782f2d3abd1611047219e69c | c6dbd68ef1c3eb65256d5c6a32241b5d78d15b62 | /man/ngs_multiqc.Rd | b8d2e9e550017e0e7c2cd537c47f599a4c2059f0 | [] | no_license | amar00k/UBIquitousNGS | c08403ac8c911cb59395e025d65c4dab3092f874 | 17533734f277b1bb8048813a111a5fca3ef3ffb5 | refs/heads/master | 2021-09-13T16:27:16.912989 | 2018-05-02T08:47:14 | 2018-05-02T08:47:14 | 125,870,551 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 336 | rd | ngs_multiqc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ngs_quality_control.R
\name{ngs_multiqc}
\alias{ngs_multiqc}
\title{NGS multiqc}
\usage{
ngs_multiqc(input.dir, output.dir, multiqc.command = "multiqc",
log.basename = NULL, overwrite = FALSE)
}
\arguments{
\item{overwrite}{}
}
\description{
NGS multiqc
}
|
3de2f96b3c80ac4c607f2077784c3009ed297a0a | 17c73a6dc28a704fabef5fdd263c2fda52f32822 | /Oslo2019/Multi-DICE_files/exercise1.R | e595937ed1b44412f920e09ed03931aa9d406e65 | [] | no_license | compphylo/compphylo.github.io | 1f95cc342a6cd692d87878d40a6431a2146cf4c9 | 16d23b565f1992c713c65aa0892dd7ebe698ebbd | refs/heads/master | 2023-01-19T09:48:28.904189 | 2023-01-13T17:05:19 | 2023-01-13T17:05:19 | 173,135,336 | 0 | 4 | null | 2019-07-29T20:02:21 | 2019-02-28T15:18:15 | CSS | UTF-8 | R | false | false | 435 | r | exercise1.R | library(MultiDICE)
output = dice.sims(num.sims = 501, num.taxa = 1, num.partitions = 3, num.haploid.samples = 10, num.ind.sites = 1000, tau.psi.prior = 0, tau.idio.prior = c(10000:5000000), epsilon.idio.prior = list(c(500:2000)/10000, c(1), 10000/c(500:2000)), NE.idio.prior = list(c(10000:100000),c(1000:100000),c(1000:10000)), output.hyper.draws = F, output.taxa.draws = T, fsc2path = 'fsc26_linux64/fsc26', output.directory = '.')
|
0c0899c78396052eef1282f1b32ce0d9e5cb056b | b019b5d80c3df3605376420cf5ecc3452427635e | /man/session.Rd | f58ca7b7c4637ee0d5448b26c4ff5b36d0cb2239 | [
"MIT"
] | permissive | ropensci/sofa | 081f215da9a2f6f0bb40929e8b913383e1d19741 | 2c6a8aa0240e0566062af923d841ddd57ea0b96c | refs/heads/master | 2023-05-23T23:15:16.634089 | 2023-04-13T07:06:51 | 2023-04-13T07:06:51 | 9,566,868 | 27 | 18 | NOASSERTION | 2022-09-09T12:12:04 | 2013-04-20T16:22:31 | R | UTF-8 | R | false | true | 699 | rd | session.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/session.R
\name{session}
\alias{session}
\title{session}
\usage{
session(cushion, as = "list", ...)
}
\arguments{
\item{cushion}{A \code{\link{Cushion}} object. Required.}
\item{as}{(character) One of list (default) or json}
\item{...}{Curl args passed on to \code{\link[crul]{HttpClient}}}
}
\value{
JSON as a character string or a list (determined by the
\code{as} parameter)
}
\description{
session
}
\examples{
\dontrun{
# Create a CouchDB connection client
user <- Sys.getenv("COUCHDB_TEST_USER")
pwd <- Sys.getenv("COUCHDB_TEST_PWD")
(x <- Cushion$new(user=user, pwd=pwd))
session(x)
session(x, as = 'json')
}
}
|
81dd4e4ef9c10952b47bf1060f1affa2125df49e | d5f2b151ba616d3cbd822bf4257243781bf3eb2f | /app.R | 4716841e2f5f1fb21f92e2dc94a314b7f26e0a02 | [] | no_license | RazzleDaz/CanadaPopulation | 8d8cf9ecb754fd44d9fe43082264785c7b275e18 | a04121541b569d4c51af63df713e1fcba8d57d94 | refs/heads/master | 2020-03-19T05:20:54.991064 | 2018-06-03T16:51:52 | 2018-06-03T16:51:52 | 135,922,135 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,750 | r | app.R | # Packages ----
library(shiny)
library(dplyr)
library(ggplot2)
library(scales)
library(shinythemes)
#Data-------
PopData <- readRDS("PopData.rds")
#Body
ui = fluidPage(theme = shinytheme("cosmo"),
# Title
titlePanel("Canada Population Distribution By Age" ),
# Sidebar with slider and controls for animation
sidebarLayout(
# sidebar with slider
sidebarPanel(
#drop down for region
selectInput(inputId = "selReg", "Region", choices= unique(PopData$Location), selected = 'Canada'),
# Slider with looping
sliderInput("selYear", "Year", min = min(PopData$Year), max = max(PopData$Year), max(PopData$Year), step = 1,
sep = "", animate=animationOptions(interval=500, loop = FALSE,
playButton = NULL, pauseButton = NULL)),
textOutput(outputId = "totPop")
),
# Show the animated graph
mainPanel(
plotOutput(outputId="pop_plot", height=800)
)
)
)
server <- function(input, output, session) {
# Reactive expression to create data frame and graph
aniGraph <- reactive({
# subset the data
Sub_PopData <- filter(PopData, Location==input$selReg)
#Determine the axis variables
iBar <- ceiling(max(abs(Sub_PopData$Population)/40))*10
tBar <- iBar*4
#subset the data for the year for the chart itself
Sub_PopData <- filter(PopData, Location==input$selReg, Year==input$selYear)
# create the graph
plot(Sub_PopData %>%
ggplot(aes(x = Numeric.Age, y = Population, group = Sex.Type, fill = Sex.Type)) +
geom_bar(stat = "identity", width = .5) +
coord_flip() +
scale_y_continuous(limits = c(-tBar,tBar), breaks = seq(-tBar, tBar, iBar),
labels = comma(abs(seq(-tBar, tBar, iBar)))) +
scale_x_continuous(limits= c(0,100), breaks = seq(0,100,10),
labels = seq(0,100,10)) +
geom_hline(color = "black", linetype='dashed', yintercept = seq(-tBar, tBar, iBar)) +
labs(x = "Age", y = "Total Population") +
theme(legend.position = "bottom",
legend.title = element_blank(),
plot.title = element_text(hjust = 0.5),
panel.background = element_rect(fill = "white"))
)
})
totPopCalc <- reactive({
temp_PopData <- filter(PopData, Location==input$selReg, Year==input$selYear)
comma(sum(abs(temp_PopData$Population)))
})
# Show Graph
output$pop_plot <- renderPlot({
aniGraph()
})
#Show total Population
output$totPop <-
renderText({paste("Total Population: ", totPopCalc())})
}
shinyApp(ui, server)
|
83c4aacbb2ea987affddf18243fb76a1a97e672a | 35d509cfae96c56db2f8f5964da6e18f9e7fd837 | /man/lkmt_test.Rd | 8450d85324d37d0b3286e15e61fea1373940efc4 | [] | no_license | jmanitz/kangar00 | 420f52303eee3b40491fb1389d90dcb95492c7d6 | 1055f38042e3e244cb76685a2c991ae0a0c2e019 | refs/heads/master | 2022-12-11T12:39:45.489997 | 2022-12-05T20:00:32 | 2022-12-05T20:00:32 | 31,761,573 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,495 | rd | lkmt_test.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lkmt.r
\name{lkmt_test}
\alias{lkmt_test}
\alias{score_test,matrix-method}
\alias{davies_test,matrix-method}
\title{A function to calculate the p-values for kernel matrices.}
\usage{
lkmt_test(formula, kernel, GWASdata, method = c("satt", "davies"), ...)
\S4method{score_test}{matrix}(x1, x2)
\S4method{davies_test}{matrix}(x1, x2)
}
\arguments{
\item{formula}{The formula to be used for the regression nullmodel.}
\item{kernel}{An object of class \code{kernel} including the pathway
representing kernel-matrix based on which the test statistic will be calculated.}
\item{GWASdata}{A \code{GWASdata} object stating the data used in analysis.}
\item{method}{A \code{character} specifying which method will be used for
p-value calculation. Available are \code{'satt'} for the Satterthwaite
approximation and \code{'davies'} for Davies' algorithm. For more details
see the references.}
\item{...}{Further arguments can be given to the function.}
\item{x1}{A \code{\link{matrix}} which is the
similarity matrix calculated for the pathway to be tested.}
\item{x2}{An \code{lm} or \code{glm} object of the nullmodel with fixed
effects covariates included, but no genetic random effects.}
}
\value{
An \code{lkmt} object including the following test results
\itemize{
\item The formula of the regression nullmodel used in the variance component test.
\item An object of class \code{\link{kernel}} including the similarity matrix of the individuals based on which the pathways influence is evaluated.
\item An object of class \code{\link{GWASdata}} stating the data on which the test was conducted.
\item statistic A \code{vector} giving the value of the variance component test statistic.
\item df A \code{vector} giving the number of degrees of freedom.
\item p.value A \code{vector} giving the p-value calculated for the pathway in the variance component test.
}
}
\description{
For parameter \code{'satt'} a pathway's influence on the probability of
beeing a case is evaluated in the logistic kernel machine test and p-values
are determined using a Sattherthwaite approximation as described by Dan Schaid.
For parameter \code{'davies'} a pathways influence on the probability
of beeing a case is evaluated using the p-value calculation method described
by Davies. Here the function \code{\link[CompQuadForm]{davies}} from package
\pkg{CompQuadForm} is used.
}
\examples{
data(hsa04020)
data(gwas)
net_kernel <- calc_kernel(gwas, hsa04020, knots=NULL, type='net', calculation='cpu')
lkmt_test(pheno ~ sex + age, net_kernel, gwas, method='satt')
}
\references{
For details on the variance component test
\itemize{
\item Wu MC, Kraft P, Epstein MP, Taylor DM, Chanock SJ, Hunter DJ, Lin X: Powerful SNP-Set Analysis for Case-Control Genome-Wide Association Studies. Am J Hum Genet 2010, 86:929-42
\item Liu D, Lin X, Ghosh D: Semiparametric regression of multidimensional genetic pathway data: least-squares kernel machines and linear mixed models. Biometrics 2007, 63(4):1079-88.
}
For details on the p-value calculation see
\itemize{
\item Schaid DJ: Genomic Similarity and Kernel Methods I: Advancements by Building on Mathematical and Statistical Foundations. Hum Hered 2010, 70:109-31
}
\itemize{
\item Davies R: Algorithm as 155: the distribution of a linear combination of chi-2 random variables. J R Stat Soc Ser C 1980, 29:323-333.
}
}
\author{
Stefanie Friedrichs, Juliane Manitz
}
|
0f8d2cb21efb79edc8294b669c540aadcb01d3cf | 04976a38cdca4610cd3fd9ef3814a6a3068797ce | /Expression Analysis/patient_check.R | 2461e830b6feb7e05b3f6a367c365cb372d7b6f6 | [] | no_license | aazhur/Biomechanics-Endothelial-Cells | 412f90df84c0ed097a7653180f4086c0bef9eb7b | 54acdfb52b76c4b9c9347ac6dba46f13ae840bcf | refs/heads/main | 2023-02-03T11:44:53.639482 | 2020-12-23T05:17:30 | 2020-12-23T05:17:30 | 323,497,682 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,539 | r | patient_check.R | library('plyr')
library('ggplot2')
library('plotrix')
library('cowplot')
library('reshape')
library('gplots')
library('gridExtra')
M <- read.delim('patients.txt', header = TRUE, sep = "\t", dec = ".")
#select <- c('ENSG00000105221','ENSG00000173402','ENSG00000111229','ENSG00000143878', 'ENSG00000114251',
# 'ENSG00000001631','ENSG00000136280','ENSG00000114209')
#ccms <- c('ENSG00000173402','ENSG00000072415','ENSG00000164692')
ccms <- c('ENSG00000173402','ENSG00000072415','ENSG00000164692','ENSG00000001631','ENSG00000136280','ENSG00000114209')
M <- M[M$Geneid %in% ccms,]
#M$Gene <- c('CCM1','AKT2','ARPC3','CCM3','WNT5A','CCM2','RHOB','DAG1')
#M$Geneid <- c('MPP5','COL1A2','DAG1')
M$Geneid <- c('CCM1','MPP5','CCM2','CCM3','COL1A2','DAG1')
ord_genes <- c('MPP5','COL1A2','DAG1','CCM1','CCM2','CCM3')
M <- M[match(ord_genes, M$Geneid),]
x <- as.matrix(M[,c(2:15)])
row.names(x) <- M$Geneid
heatmap.2(x, cellnote=round(x,2), notecol = 'black',
scale = "row", col = bluered(100), dendogram = 'none', Colv=TRUE, Rowv = FALSE,
trace = "none", density.info = "none")
heatmap.2(x, cellnote=round(x,2), notecol = 'black',
scale = "col", col = bluered(100), dendogram = 'none', Colv=TRUE, Rowv = FALSE,
trace = "none", density.info = "none")
S <- melt(M, by='Geneid')
names(S) <- c('Gene','Type','Value')
ggplot(data = S, aes(x = Type, y = Value, color = Type)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, lwd = .1) +
facet_wrap(~Gene, scales = "free_y")
|
879a0946c5b03dc0c7978528725425708443c229 | c28247a524320e540991f57952ab6d19260a3ef7 | /kmeans_from_scratch.R | 5eb4e2baf58dd39770d26e2c97f7f42f58c04238 | [] | no_license | tkanngiesser/kmeans_from_scratch | 1abd372391c95bf8a1275a34e194b908143044b9 | deae0703d6f3b4585806cf724342084dae6b48de | refs/heads/master | 2020-03-10T21:06:13.800045 | 2018-04-15T07:07:05 | 2018-04-15T07:07:05 | 129,585,262 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,750 | r | kmeans_from_scratch.R | require(MASS)
require(ggplot2)
set.seed(1234)
set1=mvrnorm(n = 300, c(-4,10), matrix(c(1.5,1,1,1.5),2))
set2=mvrnorm(n = 300, c(5,7), matrix(c(1,2,2,6),2))
set3=mvrnorm(n = 300, c(-1,1), matrix(c(4,0,0,4),2))
set4=mvrnorm(n = 300, c(10,-10), matrix(c(4,0,0,4),2))
set5=mvrnorm(n = 300, c(3,-3), matrix(c(4,0,0,4),2))
DF=data.frame(rbind(set1,set2,set3,set4,set5),cluster=as.factor(c(rep(1:5,each=300))))
ggplot(DF,aes(x=X1,y=X2,color=cluster))+geom_point()
###Iterating over observations
kmeans=function(data,K=4,stop_crit=10e-5)
{
#Initialisation of clusters
centroids=data[sample.int(nrow(data),K),]
current_stop_crit=1000
cluster=rep(0,nrow(data))
converged=F
it=1
while(current_stop_crit>=stop_crit & converged==F)
{
it=it+1
if (current_stop_crit<=stop_crit)
{
converged=T
}
old_centroids=centroids
##Assigning each point to a centroid
for (i in 1:nrow(data))
{
min_dist=10e10
for (centroid in 1:nrow(centroids))
{
distance_to_centroid=sum((centroids[centroid,]-data[i,])^2)
if (distance_to_centroid<=min_dist)
{
cluster[i]=centroid
min_dist=distance_to_centroid
}
}
}
##Assigning each point to a centroid
for (i in 1:nrow(centroids))
{
centroids[i,]=apply(data[cluster==i,],2,mean)
}
current_stop_crit=mean((old_centroids-centroids)^2)
}
return(list(data=data.frame(data,cluster),centroids=centroids))
}
#test of the kmeans function
res=kmeans(DF[1:2],K=5)
res$centroids$cluster=1:5
res$data$isCentroid=F
res$centroids$isCentroid=T
data_plot=rbind(res$centroids,res$data)
ggplot(data_plot,aes(x=X1,y=X2,color=as.factor(cluster),size=isCentroid,alpha=isCentroid))+geom_point()
|
e0c90beaaddd4884baacb17124c66c53923b56fd | 05a8bdb7671f73bbbc5c4106a1a11b4d3d95accb | /DataAnalysis/voltagePrediction.R | a3c0bbdc393c729a8611d8dd3b3dd302af5f703e | [] | no_license | RunFranks525/IoTBatteryAnalysis | 6834c2c39a6ea32ce5544c37da8ff98b68346a76 | 8f9a0c0056fa0e95383c7e08d296f7b2d0a6398f | refs/heads/master | 2021-01-11T12:16:52.753205 | 2016-12-15T04:32:46 | 2016-12-15T04:32:46 | 76,513,979 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,518 | r | voltagePrediction.R | dtset = maml.mapInputPort(1) #Database Port
dt = data.frame(dtset)
# dateStamp = substr(as.character(dt$DateStamp), 1, 19)
# dateStamp = strptime(dateStamp, format = "%Y-%m-%d %H:%M:%S", tz = "America/Chicago")
# x = as.numeric(dateStamp-dateStamp[1])
# y = dt$BatteryVoltage
# plot(x, y)
x = dt$Time
y = dt$Voltage
m4 = nls(y ~ a*(x^3) + b*(x^2) + c*x + d, start = list(a = 0.01, b = 0.01, c = 0.01, d = 16))
# summary(m4)
# cor(y, predict(m4))
m5 = nls(y ~ a*(x^3) + b*(x^2) + c*x + d + e*(x^4) + f*(x^5), start = list(a = 0.01, b = 0.01, c = 0.01, d = 16, e = 0.01, f = 0.01))
# summary(m5)
# cor(y, predict(m5))
# z = predict(m5)
# You can take x and z to draw the plots.
# The model we're using is m5. y = -1.141e-0 * x^5 + 1.712e-01 * x^4 + -9.920e-01 * x^3 + 2.764e+00 * x^2 + -3.750e+00 * x + 1.406e+01
# Since we don't have time to get data of different temperature, power and other factors, the whole idea is this:
# We will create different models for different life cycle numbers and users can enter corresponding life cycle numbers to get the predicted voltage at chose time point.
timeInput = maml.mapInputPort(2) #time
timeInput = timeInput$DateTime[1]
# str(timeInput)
timeInput = as.character(timeInput)
timeInput = strptime(timeInput, format = "%Y-%m-%d %H:%M:%S", tz = "America/Chicago")
x = as.numeric(timeInput-dt$Time[1])
prediction = predict(m5, newdata = data.frame(x=x))
prediction = data.frame(prediction)
maml.mapOutputPort("prediction"); # Voltage prediction |
73972ab094d66cf6c4840254266e28e870e31114 | a3a83195fdc3100bdf16eb50623f151ad38698fb | /etc/STN/dados-stn.R | eaf220439680427e75fc4619f783207dfae18472 | [
"Apache-2.0"
] | permissive | bcbernardo/alternancia-eleicoes-municipais | 3ed74a286fe8a38dd4b99c2a8a19d1979e2b4281 | f22daf9e4945796f9bd48ed3a8a8e49ddecb9b46 | refs/heads/master | 2023-01-18T14:51:48.947405 | 2020-11-22T21:20:39 | 2020-11-22T21:20:39 | 258,642,391 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,519 | r | dados-stn.R | #################################### PREPARACAO ###############################
# instalar e carregar pacote data.table
if (!require("data.table")) install.packages("data.table")
library(data.table)
# instalar e carregar pacote ipeadatar
if (!require("ipeadatar")) install.packages("ipeadatar")
library(ipeadatar)
# instalar e carregar pacote GmAMisc (para deteccao de outliers)
if (!require("outliers")) install.packages("outliers")
setDTthreads(8)
setwd("D:/alternancia-eleicoes-municipais/etc/STN")
##################### DOWNLOAD DOS DADOS VIA IPEADATA #########################
# descricao dos conjuntos de dados de interesse no Ipeadata
ipea_financasMun <- search_series(terms = "municipal", fields = c("name"))
setDT(ipea_financasMun)
ipea_financasMun <- ipea_financasMun[theme=="Regional",]
if (file.exists("./2017-1985_STN_FinancasMunicipais.rds")) {
financasMun <- readRDS("./2017-1985_STN_FinancasMunicipais.rds")
} else {
financasMun <- data.table(code=character(), date=as.Date(character()),
value = numeric(), uname = factor(), tcode = integer())
for (dataset_code in ipea_financasMun$code) {
dataset <- setDT(ipeadata(code = dataset_code, quiet = TRUE))
dataset <- dataset[uname=="Municipality",]
financasMun <- rbind(financasMun, dataset) }
financasMun[,`:=`(GEOCOD_IBGE = as.integer(tcode),
ANO = as.integer(format(date, "%Y")))]
financasMun[,c("date", "tcode", "uname"):=NULL]
saveRDS(financasMun, "./2017-1985_STN_FinancasMunicipais.rds")
remove(datasetm, dataset_code)
}
####################### APLICAR CORREÇÃO MONETÁRIA #############################
# descricao dos conjuntos de dados do IPCA no Ipeadata
##ipea_IPCA <- search_series(terms = "IPCA", fields = c("name"))
# download e tratamento dos dados
ipca <- setDT(ipeadata("PRECOS_IPCAG", quiet = T))
ipca[,`:=`(ANO = as.integer(format(date, "%Y")), IPCA = 1 + value * 0.01)]
ipca[,c("date", "tcode", "uname", "code", "value"):=NULL]
ipca[, Acumulado := numeric()]
for (year in ipca$ANO) {
ipca[ANO==year, Acumulado:=prod(ipca[ANO %in% c(year:max(ipca$ANO)), IPCA])] }
financasMun <- financasMun[ipca[, .(ANO, Fator_IPCA = Acumulado)],
on = c("ANO"), nomatch = 0]
financasMun[, value := value * Fator_IPCA]
financasMun[, Fator_IPCA:=NULL]
remove(year, ipca)
############################# ADICIONAR POSTOS #################################
financasMun[, Posto:=frank(-value, ties.method="min"), by=c("code", "ANO")]
############################## REMOVER OUTLIERS ################################
financasMun <- financasMun[ANO %in% c(2005:2015),]
financasMun[, outlier := (abs(outliers::scores(Posto, type = "mad")) > 3.5),
by = c("GEOCOD_IBGE", "code")]
financasMun <- financasMun[!financasMun[outlier == T &
code %in% c("DESPORM", "RECORRM"),],
on = "GEOCOD_IBGE"]
################################# PER CAPITA ###################################
pop <- setDT(rbind(ipeadata("ESTIMA_PO"), ipeadata("POPTOT")))
pop <- pop[uname=="Municipality",]
pop[,`:=`(GEOCOD_IBGE = tcode, ANO = as.integer(format(date, "%Y")), POP = value)]
financasMun <- financasMun[pop[,.(GEOCOD_IBGE, ANO, POP)],
on = c("GEOCOD_IBGE", "ANO")]
financasMun[, value := round(value/POP, 2)]
remove(pop)
####################### POR TRIENIO INICIAL DO MANDATO #########################
financasMun[ANO %in% c(2005:2007), Referencia := 2008]
financasMun[ANO %in% c(2009:2011), Referencia := 2012]
financasMun[ANO %in% c(2013:2016), Referencia := 2016]
financasMun <- financasMun[, .(value=sum(value)),
by=c("GEOCOD_IBGE", "Referencia", "code")]
####################### COMPARAR TRIENIOS ######################################
financasMun <- financasMun[
financasMun[, .(GEOCOD_IBGE, code, value_last=value, Referencia=Referencia+4)],
on = c("GEOCOD_IBGE", "code", "Referencia"), nomatch = NULL]
financasMun[, value_last := value - value_last]
financasMun <- na.omit(financasMun)
############################### VARIAVEIS EM COLUNAS ###########################
valores <- dcast(financasMun, GEOCOD_IBGE + Referencia ~ code,
value.var = "value")
valores_ult <- dcast(financasMun, GEOCOD_IBGE + Referencia ~ code,
value.var = "value_last")
#################### ADICIONAR INDICADORES DE GESTAO FISCAL ####################
# adaptados de https://www.firjan.com.br/ifgf/metodologia/
valores[, `:=`(Pessoal = DESPCUPM/RECORRM, Endividamento = DFENCEM/RECORRM,
Investimento = (DINVESTM + DINVFINAM) / RECORRM)]
valores_ult[, `:=`(Pessoal = DESPCUPM/RECORRM, Endividamento = DFENCEM/RECORRM,
Investimento = (DINVESTM + DINVFINAM) / RECORRM)]
##################### CRITICAS DE CONSISTENCIA E JUNTAR TUDO ###################
financasMun <- merge.data.table(x = valores, y = valores_ult,
by = c("GEOCOD_IBGE", "Referencia"),
suffixes = c("", "_ult"))
financasMun <- financasMun[!financasMun[(DESPORM > RECORM) | (RECORM < 1) |
(RECORRM < 1) | (RECTRIBM < 1) |
(DESPORM < 1),],
on = c("GEOCOD_IBGE")]
financasMun <- na.omit(financasMun)
saveRDS(financasMun, "./dados-tratados-stn.rds")
remove(valores, valores_ult, financasMun)
setwd("../..") |
19cb0649995288d122b8e39e5d1305c4c94f13b7 | b1506deb942c8acaa3f117d3f21c55d4f7c719f9 | /R/matchcontrols.R | e772d5e562c0510ed01e68719b2ed667c419d9a9 | [] | no_license | vcastro/Ri2b2matchcontrols | dd506167bcef47f7b7772b16c4abf6ffe6af0859 | 8e0a9fcec33a8c95c33b803c71608f6b7c71404e | refs/heads/master | 2021-08-06T08:44:56.156120 | 2017-11-04T13:04:16 | 2017-11-04T13:04:16 | 109,324,222 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,579 | r | matchcontrols.R | #' matchcontrols
#'
#' The main match controls function to match controls using cem and generate
#' an html report and a list of matched controls
#'
#' @param d A data frame of patients with matching variables and cohort field
#' @param case_patientset_name Name of the case patient set (for report)
#' @param controlpool_patientset_name Name of the control pool patient set (for report)
#' @param controls_to_match Number of controls to match to each patient set
#' @param match_variables A string vector of variable names to use for matching
#'
#' @return A list with the elements
#' \item{report_text}{String of the html report}
#' \item{html_report}{Full path to html report file in tmp directory}
#' \item{match_data}{The full patient data frame with match strata and which
#' controls were matched}
#' \item{cem_data}{The cem object from the cem function.}
#' \item{matched_controls}{The final list of matched controls (can be used to
#' create an i2b2 patientset)}
#' @export
#' @import dplyr
#' @import tidyr
#'
#' @examples
#' #Not run
matchcontrols <- function(d, case_patientset_name = "None Given",
controlpool_patientset_name = "None Given", controls_to_match = 1,
match_variables = c("age", "gender", "race")) {
if(length(levels(d$cohort)) != 2)
stop(paste0("The dataset includes ", length(levels(d$cohort)), " cohorts. Only 2 cohorts are allowed (usually 'Case' and 'Control')"))
# remove cases from controls
if(nrow(d[d$cohort == "Control",]) / nrow(d[d$cohort == "Case",]) < controls_to_match)
stop(paste0("There are insufficient control pool patients to run the match."))
# run the matching procedure
m <- cem_match(d, match_variables = match_variables, patientid_variable = "patient_num", controls_to_match = controls_to_match)
#generate the report
a <- rmarkdown::render(system.file("rmd/matchcontrols_report.Rmd", package="Ri2b2matchcontrols"),
output_file = tempfile(fileext = ".html"),
params = list(match_data = m,
case_patientset_name = case_patientset_name,
controlpool_patientset_name = controlpool_patientset_name)
)
#output results
list(report_text = paste(readLines(a), collapse = "\n"),
html_file = a,
match_data = m$match_data,
cem_data = m$cem_result,
strata_summary = m$cem_result,
matched_controls = m$match_data[which(m$match_data$k2k_control==TRUE), "patient_num"])
##refactor: warnings
}
|
b32048cc3cd62b43f6f7ab0f2e7ce779bbf312d2 | 818dd3954e873a4dcb8251d8f5f896591942ead7 | /Shijie Lyu/pipeline.R | 0b2a9d2e7475564ab2bdfbe9140c59297d6a389b | [] | no_license | DannyArends/HU-Berlin | 92cefa16dcaa1fe16e58620b92e41805ebef11b5 | 16394f34583e3ef13a460d339c9543cd0e7223b1 | refs/heads/master | 2023-04-28T07:19:38.039132 | 2023-04-27T15:29:29 | 2023-04-27T15:29:29 | 20,514,898 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,143 | r | pipeline.R | # Pipeline for DNA re-seq analysis on chicken
#
# copyright (c) 2015-2020 - Brockmann group - HU Berlin, Danny Arends
# last modified Jan, 2015
# first written Jan, 2015
cmdlineargs <- commandArgs(trailingOnly = TRUE)
fileBase <- as.character(cmdlineargs[1])
execute <- function(x, intern = FALSE){
cat("----", x, "\n")
res <- system(x, intern = intern)
cat(">>>>", res[1], "\n")
if(res[1] >= 1) q("no")
}
referenceDir <- "genomes"
reference <- paste0(referenceDir, "/Leptin_Mouse.fasta")
## Do not forget to index the fasta file
#/opt/bwa-0.7.10/bwa index Gallus_gallus.Galgal4.fasta
## Also index the fasta for GATK
#java -jar /opt/picard-tools-1.99/CreateSequenceDictionary.jar R=Gallus_gallus.Galgal4.fasta O=Gallus_gallus.Galgal4.dict
#samtools faidx Gallus_gallus.Galgal4.fasta
########### ANALYSIS ###########
### Trimmomatic: Remove adapters and trim reads based on quality scores (1 to 2 hours) ###
logfile <- paste0(fileBase,"log.txt")
trimmomatic <- "/opt/Trimmomatic-0.32/"
gatk <- "/opt/GenomeAnalysisTK-3.2-2/GenomeAnalysisTK.jar"
picard <- "/opt/picard-tools-1.99/"
inputfiles <- c(paste0(fileBase, "_1.fq.gz"), paste0(fileBase, "_2.fq.gz"))
outputfiles <- c(paste0(fileBase, "_1.P_trimmed.fastq.gz"), paste0(fileBase, "_1.U_trimmed.fastq.gz"),
paste0(fileBase, "_2.P_trimmed.fastq.gz"), paste0(fileBase, "_2.U_trimmed.fastq.gz"))
cmdBase <- paste0("java -jar ", trimmomatic, "trimmomatic-0.32.jar PE")
params <- paste0("ILLUMINACLIP:", trimmomatic, "adapters/TruSeq3-PE.fa:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36")
command <- paste(cmdBase, inputfiles[1], inputfiles[2], outputfiles[1], outputfiles[2], outputfiles[3], outputfiles[4], params)
#execute(command)
### BWA: Alignment against genome (2 to 8 hours), we pipe the output to the next step ###
# -v 2 : Verbose level 2 only errors and warnings
# -t 6 : Number of threads to use with BWA
command <- paste0("/opt/bwa-0.7.10/bwa mem -v 2 -t 6 -A 3 -B 2 -U 4 -O 2 -E 0 -T 10 -a ", reference," ", outputfiles[1], " ", outputfiles[3], " | ")
### Convert SAM to BAM (1 hour), we pipe the output to the next step ###
# -Sb : Input sam, output bam
command <- paste0(command, "samtools view -Sb - | ")
### Sort the BAM (1 hour) ###
# -@ : Number of CPU cores to use
# -m : Memory per CPU core
outputSBAM <- paste0(fileBase, "P_trimmed.aligned.sorted.bam")
command <- paste0(command, "samtools sort -@ 4 -m 2G -o - ", fileBase, " > ", outputSBAM)
execute(command)
### Add a read group ###
outputSRGBAM <- paste0(fileBase, "P_trimmed.aligned.sorted.rg.bam")
IDcode <- substr(fileBase, 8, 12)
command <- paste0("java -Xmx4g -jar ", picard, "AddOrReplaceReadGroups.jar INPUT=", outputSBAM, " OUTPUT=", outputSRGBAM, " CREATE_INDEX=false RGID=", IDcode, " RGLB=LIB", IDcode, " RGPL=Illumina RGPU=X RGSM=", IDcode)
execute(command)
### Move the file with the read group over the previous file ###
command <- paste0("mv ", outputSRGBAM, " ", outputSBAM)
execute(command)
### Index the BAM file (10 minutes) ###
outputSBAI <- paste0(fileBase, "P_trimmed.aligned.sorted.bai")
command <- paste0("samtools index ", outputSBAM, " ", outputSBAI)
execute(command)
### Mark duplicates, using the Picard tools (~ 30 minutes) ###
outputSBAID <- paste0(fileBase, "P_trimmed.aligned.sorted.dedup.bam")
outputMetrics <- paste0(fileBase, ".metrics.txt")
command <- paste0("java -jar ", picard, "MarkDuplicates.jar INPUT=", outputSBAM, " OUTPUT=", outputSBAID," METRICS_FILE=", outputMetrics," MAX_FILE_HANDLES_FOR_READ_ENDS_MAP=1000")
execute(command)
### Index the BAM file (10 minutes) ###
outputSBAIDI <- paste0(fileBase, "P_trimmed.aligned.sorted.dedup.bai")
command <- paste0("samtools index ", outputSBAID, " ", outputSBAIDI)
execute(command)
### Get some basic statistics (5 to 10 minutes)
command <- paste0("samtools flagstat ", outputSBAID)
execute(command)
### Indel Realign
outputSNPS <- "output.snp.intervals"
outputSIBAM <- paste0(fileBase, "P_trimmed.aligned.sorted.realigned.bam")
knownindels <- paste0(referenceDir, "/Gallus_gallus.reordered.vcf") # Reference, download from: ftp://ftp.ensembl.org/pub/release-78/variation/vcf/gallus_gallus/
if(!file.exists(outputSNPS)){
command <- paste0("java -Xmx4g -jar ", gatk, " -nt 4 -T RealignerTargetCreator -R ", reference, " -known ", knownindels, " -o ", outputSNPS, " -U ALLOW_N_CIGAR_READS")
execute(command) # Call the GATK RealignerTargetCreator, only need to be done because the knownSNPlist does not change
}
command <- paste0("java -Xmx4g -jar ", gatk, " -T IndelRealigner -R ", reference, " -targetIntervals ", outputSNPS, " -I ", outputSBAID, " -o ",outputSIBAM," -known ",knownindels, " --consensusDeterminationModel KNOWNS_ONLY --fix_misencoded_quality_scores")
execute(command) # Call the GATK IndelRealigner
### Base Recalibration
knownsnps <- paste0(referenceDir, "/Gallus_gallus.reordered.vcf") # Reference, download from: ftp://ftp.ensembl.org/pub/release-78/variation/vcf/gallus_gallus/
covfile1 <- paste0(fileBase, ".1.covariates")
covfile2 <- paste0(fileBase, ".2.covariates")
plotfile <- paste0(fileBase, "recalibration.pdf")
outputSIRBAM <- paste0(fileBase, "P_trimmed.aligned.sorted.realigned.recalibrated.bam")
command <- paste0("java -Xmx4g -jar ", gatk, " -nct 4 -T BaseRecalibrator -R ", reference, " -knownSites ", knownsnps, " -I ", outputSIBAM," -o ", covfile1, " -U ALLOW_N_CIGAR_READS")
execute(command) # Call the GATK BaseRecalibrator
command <- paste0("java -Xmx4g -jar ", gatk, " -nct 4 -T PrintReads -R ", reference," -I ", outputSIBAM," -BQSR ", covfile1, " -U ALLOW_N_CIGAR_READS -o ", outputSIRBAM)
execute(command) # Call the GATK PrintReads
command <- paste0("java -Xmx4g -jar ", gatk, " -nct 4 -T BaseRecalibrator -R ", reference, " -knownSites ", knownsnps, " -I ", outputSIRBAM," -o ", covfile2, " -U ALLOW_N_CIGAR_READS")
execute(command) # Call the GATK BaseRecalibrator
command <- paste0("java -Xmx4g -jar ", gatk, " -T AnalyzeCovariates -R ", reference, " -before ", covfile1, " -after ", covfile2, " -U ALLOW_N_CIGAR_READS -plots ", plotfile)
execute(command) # Call the GATK AnalyzeCovariates
### SNP calling
s1 <- "sample_6929P_trimmed.aligned.sorted.realigned.recalibrated.bam"
s2 <- "sample_6954P_trimmed.aligned.sorted.realigned.recalibrated.bam"
s3 <- "sample_8425-2P_trimmed.aligned.sorted.realigned.recalibrated.bam"
command <- paste0("java -Xmx4g -jar ", gatk," -T HaplotypeCaller -R ", reference," -I ", s1," -I ", s2, " -I ", s3, " --dbsnp ",knownsnps," -stand_call_conf 30 -stand_emit_conf 10 -L ", outputSNPS," -o output.raw.snps.indels.vcf")
execute(command)
#q("no")
|
1fcb6b2b45d7c95c73498a6c73d0549f0094ab63 | ce341675a8052132e17cc02b1be0de05959de32a | /analysis/compareRPKMfromBamFlux_ByExon.R | 8f3aaea79818a13dac80593d59298ef2a9de1b7d | [] | no_license | adamwespiser/lncNet | 00c5c8bd7d306b85eeff6ae363bab82ca44a0cdb | 80d85aba7965812387874e1b796b6776dc8638e6 | refs/heads/master | 2021-01-22T08:59:01.438827 | 2015-07-29T10:23:34 | 2015-07-29T10:23:34 | 33,346,941 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 28,594 | r | compareRPKMfromBamFlux_ByExon.R |
bothRuns <- function(){
uniqOutfile=getFullPath("data/rpkmFromBam-ExonCounting-TopTransCellType-UNIQ-RRPM.tab")
uniqReportFile = getFullPath("data/rpkmFromBam-ExonCounting-TopTransCellType-UNIQ-RRPM-REPORT.tab")
getDataTotalReadsBtwnReps_rpkmFromBamTopTrans_ByExon(infile=uniqOutfile,reportFile=uniqReportFile)
getDataTotalReadsBtwnReps_rpkmFromBamTopTrans_ByExon()
source('~/work/research/researchProjects/coexpr/lncNET/analysis/rsemVsRpkmFromBam.R')
plotMultiVsUniqReads()
}
doit <- function(){
processCellsMaxTransExpr_ByExon(suffix=".transByExon.gtf",transSuffix=".transFromExon.gtf")
getDataTotalReadsBtwnReps_rpkmFromBamTopTrans_ByExon()
}
doitUniqReads <- function(){
getRpkmFromBamDataForOneCellByExon(suffix=".uniq.star_sort.transByExon.gtf",transSuffix=".uniq.star_sort.trans.gtf",writeCopyScript=TRUE)
system("~/sandbox/rpkmFromBamExonFetch")
uniqOutfile=getFullPath("data/rpkmFromBam-ExonCounting-TopTransCellType-UNIQ-RRPM.tab")
uniqReportFile = getFullPath("data/rpkmFromBam-ExonCounting-TopTransCellType-UNIQ-RRPM-REPORT.tab")
processCellsMaxTransExpr_ByExon(suffix=".uniq.star_sort.transByExon.gtf",transSuffix=".uniq.star_sort.trans.gtf",outfile=uniqOutfile)
procFile = getDataTotalReadsBtwnReps_rpkmFromBamTopTrans_ByExon(infile=uniqOutfile,reportFile=uniqReportFile)
plotRatiosTopTrans(infile=getFullPath("data/rpkmFromBam-ExonCounting-TopTransCellType-UNIQ-RRPM-proc.tab"),
outdir = getFullPath("plots/rnaExpr/mappedReads/RPKMfromBamTopTrans/cytFracByExon_uniqRead/"),
plotMsg="unique reads mapped by STAR")
}
getRpkmFromBamDataForOneCellByExon <- function( filesTxtTab="~/data/wgEncodeCshlLongRnaSeqFiles.tab",writeCopyScript=FALSE,suffix=".transByExon.gtf",transSuffix){
df <- read.csv(file=filesTxtTab, stringsAsFactors=FALSE, sep="\t")
df$readLength <- as.numeric(gsub(sapply(strsplit(df$readType, "x"), function(x)x[2]),pattern="D",replacement=""))
df.fastq <- subset(df,type=="fastq" & (localization == "nucleus" | localization == "cytosol"))
read1 <- grep(df.fastq$filename,pattern="Rd1")
read2 <- grep(df.fastq$filename,pattern="Rd2")
df.comb <- data.frame(read1 = df.fastq[read1,], read2=df.fastq[read2,])
df.comb$bare <- gsub(gsub(df.comb$read1.filename,pattern="Rd1",replacement=""),pattern=".fastq.gz",replacement="")
#/project/umw_zhiping_weng/wespisea/rna-seq//starSpikeIn/
df.comb$remote <- file.path(rnaseqdir,"starSpikeIn/",paste0(df.comb$bare,suffix))
if(writeCopyScript){
o1 <- paste0("scp aw30w@ghpcc06.umassrc.org:",df.comb$remote, " /home/wespisea/data/rpkmFromBam/",paste0(df.comb$bare,suffix))
write(o1,file="~/sandbox/rpkmFromBamExonFetch")
system("~/sandbox/rpkmFromBamExonFetch")
}
df.comb$rpkmFromBamFile <- paste0("/home/wespisea/data/rpkmFromBam/",df.comb$bare,suffix)
df.comb <- df.comb[c("read1.localization", "read1.cell", "read1.rnaExtract","read2.replicate" ,"rpkmFromBamFile", "bare","read1.readLength")]
df.comb$rfbTrans <- paste0("/home/wespisea/data/rpkmFromBam/",df.comb$bare,transSuffix)
colnames(df.comb) <- c("localization", "cell", "rnaExtract", "replicate",
"rpkmFromBamFile", "bare", "readLength","rfbTrans")
df.comb
}
convertExonsToTrans <- function(transFile,exonFile){
if(!file.exists(exonFile)){
return(0)
}
print(paste(transFile,exonFile,sep="\t"))
tf <- tempfile()
system( paste("cat",exonFile," | sed 's/[;\"]//g' |awk -F' ' '{print $4,$5,$6,$10,$12,$14}' > ",tf))
exon.df <- read.csv(file=tf, sep=" ", stringsAsFactors=FALSE,header=FALSE)
file.remove(tf)
colnames(exon.df) <- c("startPos","stopPos","reads","gene_id", "transcript_id", "RPKM")
exon.df$RPKM <- NULL
exon.df$length <- with(exon.df,stopPos - startPos)
exon.df$stopPos <- NULL
exon.df$startPos <- NULL
trans.df <- as.data.frame(group_by(exon.df, transcript_id) %.%
summarise(reads = sum(reads),
length = sum(length),
gene_id = gene_id[1]))
trans.df$readsPerLen <- with(trans.df, reads/length)
trans.df$readsPerKb <- with(trans.df, reads/(length/1000))
spike.df <- getSpikeInDf()
df.spike <- trans.df[grep(pattern="ERCC",trans.df$gene_id),]
if(dim(df.spike)[1] == 0){
trans.df$concBySpikeIn <- NA
} else {
df.spike$readsPerKb <- df.spike$reads / (df.spike$length/1000)
#df.spike$RPKM <- df.spike$readsPerKb / millionsOfReads
spike <- merge(df.spike,spike.df, by="gene_id")
#s.lm <- glm(Pool14nmol.ul ~ reads + length, data=spike,family="poisson")
s.lm <- glm(Pool14nmol.ul ~ readsPerKb + 0, data=spike)
trans.df$concBySpikeIn <- predict(s.lm, newdata=trans.df)
}
exportAsTable(df=trans.df ,file=transFile)
1
}
processCellsMaxTransExpr_ByExon <- function(suffix=".transFromExon.gtf",
transSuffix=".transByExon.gtf",
skipCells=c("none"),
outfile=getFullPath("data/rpkmFromBam-ExonCounting-TopTransCellType-RRPM.tab")){
annot.df <- getRpkmFromBamDataForOneCellByExon(suffix=suffix,transSuffix=transSuffix)
annot.df <- annot.df[which(annot.df$rnaExtract == "longPolyA"),]
annot.df <- annot.df[-which(annot.df$cell == "H1-hESC"),]
trans <- annot.df$rfbTrans
exons <- annot.df$rpkmFromBamFile
annot.df$rep <- ifelse(annot.df$replicate >2,annot.df$replicate -2,annot.df$replicate )
# annot.df$transFullReads<- gsub(x=annot.df$rfbGene,pattern="genes",replacement="transFullReads")
# transFullReads <- annot.df$transFullReads
sapply(seq_along(exons), function(x)convertExonsToTrans(exonFile=exons[x],transFile=trans[x]))
df.together <- data.frame()
for ( cell in unique(annot.df$cell)){
print(cell)
readLen.vec <- annot.df[which(annot.df$cell == cell),"readLength"]
if (FALSE == (sum(readLen.vec[1] == readLen.vec) == length(readLen.vec))){
print(paste("READ LENGTHS DIFFER :-( ", cell, "problematic"))
}
a.cell <- annot.df[which(annot.df$cell == cell),]
cyt1.file <- a.cell[which(a.cell$rep == 1 & a.cell$localization == "cytosol"),"rfbTrans"]
cyt2.file <- a.cell[which(a.cell$rep == 2 & a.cell$localization == "cytosol"),"rfbTrans"]
nuc1.file <- a.cell[which(a.cell$rep == 1 & a.cell$localization == "nucleus"),"rfbTrans"]
nuc2.file <- a.cell[which(a.cell$rep == 2 & a.cell$localization == "nucleus"),"rfbTrans"]
if(file.exists(cyt1.file) && file.exists(cyt2.file) && file.exists(nuc1.file) && file.exists(nuc2.file)){
#cyt rep 1
a.cell.cyt1 <- read.csv(file=cyt1.file,sep="\t",stringsAsFactors=FALSE)
a.cell.cyt1$localization <- "cytosol"
a.cell.cyt1$replicate <- 1
# cyt rep 2
a.cell.nuc1 <- read.csv(file=cyt2.file,sep="\t",stringsAsFactors=FALSE)
a.cell.nuc1$localization <- "cytosol"
a.cell.nuc1$replicate <- 2
# nuc rep1
a.cell.cyt2 <- read.csv(file=nuc1.file,sep="\t",stringsAsFactors=FALSE)
a.cell.cyt2$localization <- "nucleus"
a.cell.cyt2$replicate <- 1
#nuc rep 2
a.cell.nuc2 <- read.csv(file=nuc2.file,sep="\t",stringsAsFactors=FALSE)
a.cell.nuc2$localization <- "nucleus"
a.cell.nuc2$replicate <- 2
comb <- rbind(a.cell.cyt1,a.cell.cyt2,a.cell.nuc1,a.cell.nuc2)
transExpr <- as.data.frame(group_by(comb,gene_id,transcript_id) %.% summarise(readsPerLen=sum(readsPerLen)))
#colnames(transExpr) <- c("transcript_id", "gene_id", "readsPerLen")
transExpr$readsPerLen_tieBreaker <- transExpr$readsPerLen + runif(seq_along(transExpr$readsPerLen))/(10^9)
gene.df <- as.data.frame(group_by(transExpr, gene_id) %.% filter(readsPerLen_tieBreaker == max(readsPerLen_tieBreaker)))
cellTranscripts <- gene.df$transcript_id
cTrans <- comb[which(comb$transcript_id %in% cellTranscripts),]
cTrans$cell <- cell
cTrans$reads <- as.numeric(cTrans$reads)
cTrans$length <- as.numeric(cTrans$length)
cTrans$readLength = readLen.vec[1]
cTransRPKM <- as.data.frame(group_by(cTrans,localization,replicate) %.% mutate(millReads = sum(reads,na.rm=TRUE)/(10^6),
RPKM = (reads * 10^9)/(length * sum(reads,na.rm=TRUE)),
TPM = (reads * readLength * 10^6)/(length * (sum(reads * readLength/length))) ))
df.together <- rbind(df.together,cTransRPKM)
}
}
exportAsTable(file=outfile,df=df.together)
}
getDataTotalReadsBtwnReps_rpkmFromBamTopTrans_ByExon <- function(infile=getFullPath("data/rpkmFromBam-ExonCounting-TopTransCellType-RRPM.tab"),
reportFile =getFullPath("/data/rpkmFromBam-ExonCounting-TopTransCellType-RRPM-REPORT.tab") ){
df.together <- read.csv(file=infile,sep="\t")
pc <- readLines(pc.v19.list)
lnc <- readLines(lnc.v19.list)
df.together$region <- "other"
df.together[which(df.together$gene_id %in% lnc),"region"] <- "lnc"
df.together[which(df.together$gene_id %in% pc),"region"] <- "mRNA"
df.together$gene_type <- df.together$region
df.together$rnaExtract = "longPolyA"
df.together$RPKM <- as.numeric(df.together$RPKM)
df.together$isSpikeIn <- 0
df.together[grep(pattern="ERCC",df.together$gene_id),"isSpikeIn"] <- 1
report.df <- as.data.frame(group_by(df.together,cell,localization,replicate) %.%
summarise(length(gene_id),
mean(RPKM),
sum(RPKM),
sum(RPKM > 0),
sum(isSpikeIn)))
report.df$experiment <- paste(ifelse(report.df$localization == "cytosol", "cyt", "nuc"),report.df$replicate,sep=".")
colnames(report.df) <- c("cell", "localization", "replicate", "genesFound", "meanRPKM",
"sumRPKM","genesExpressed","spikeInDetected" ,"experiment")
exportAsTable(df=report.df, file = reportFile)
reportTPM.df <- as.data.frame(group_by(df.together,cell,localization,replicate) %.%
summarise(length(gene_id),
mean(RPKM),
sum(RPKM),
sum(RPKM > 0),
sum(isSpikeIn)))
reportTPM.df$experiment <- paste(ifelse(reportTPM.df$localization == "cytosol", "cyt", "nuc"),reportTPM.df$replicate,sep=".")
colnames(reportTPM.df) <- c("cell", "localization", "replicate", "genesFound", "meanTPM",
"sumTPM","genesExpressed","spikeInDetected" ,"experiment")
exportAsTable(df=report.df, file = getFullPath("/data/rpkmFromBam-ExonCounting-TopTransCellType-TPM-REPORT.tab"))
df.together <- as.data.frame(group_by(df.together, cell, localization,rnaExtract,replicate) %.%
mutate(RPKM_80norm = apply80norm(RPKM) * 1000000))
# group_by(df.together, cell, localization,rnaExtract,replicate) %.% summarise(mean(RPKM_80norm/transTotalRPKM, na.rm=TRUE))
#exportAsTable(file=getFullPath("/data/rpkmFromBamTopTransAllCells.tab"), df=df.together)
df.together$gene_type <- df.together$region
df.abbrev <- df.together[ c("region","replicate", "gene_id","gene_type", "localization","rnaExtract","cell", "isSpikeIn", "RPKM_80norm","RPKM","reads","concBySpikeIn","TPM","readsPerKb")]
df.rep.1 <- subset(df.abbrev, replicate == 1)
df.rep.2 <- subset(df.abbrev, replicate == 2)
df.cyt.rep1 <- subset(df.rep.1, localization == "cytosol")
df.cyt.rep2 <- subset(df.rep.2, localization == "cytosol")
idVars <- c("gene_id","gene_type", "localization","rnaExtract","cell", "isSpikeIn","replicate","region")
idVarsNorep <- c("variable","gene_id","gene_type", "localization","rnaExtract","cell", "isSpikeIn","region")
df.cyt.rep1.melt <- melt(df.cyt.rep1, id.vars = idVars)
df.cyt.rep2.melt <- melt(df.cyt.rep2, id.vars = idVars)
df.cyt <- merge(df.cyt.rep1.melt, df.cyt.rep2.melt, by = idVarsNorep,suffixes=c(".rep1", ".rep2"))
df.cyt$expr <- paste(df.cyt$localization,df.cyt$rnaExtract)
df.cyt$value.rep1 <- ifelse(is.na(as.numeric(df.cyt$value.rep1)), 0, as.numeric(df.cyt$value.rep1))
df.cyt$value.rep2 <- ifelse(is.na(as.numeric(df.cyt$value.rep2)), 0, as.numeric(df.cyt$value.rep2))
df.cyt$value.rep1.pseudo <- applyPseudoValByVar2(value= df.cyt$value.rep1, var=df.cyt$variable)
df.cyt$value.rep2.pseudo <- applyPseudoValByVar2(value = df.cyt$value.rep2 , var=df.cyt$variable)
df.cyt$rep1.frac <- df.cyt$value.rep1/(df.cyt$value.rep1 + df.cyt$value.rep2)
df.cyt$rep1.frac.pseudo <- df.cyt$value.rep1.pseudo/(df.cyt$value.rep1.pseudo + df.cyt$value.rep2.pseudo)
df.cyt$rep2.frac <- df.cyt$value.rep2/(df.cyt$value.rep1 + df.cyt$value.rep2)
df.cyt$rep2.frac.pseudo <- df.cyt$value.rep2.pseudo/(df.cyt$value.rep1.pseudo + df.cyt$value.rep2.pseudo)
df.cyt$rep.ratio <- df.cyt$value.rep1/( df.cyt$value.rep2)
df.cyt$rep.ratio.pseudo <- df.cyt$value.rep1.pseudo/(df.cyt$value.rep2.pseudo)
df.cyt$value.ave <- (df.cyt$value.rep1 + df.cyt$value.rep2)/2
df.nuc.rep1 <- subset(df.rep.1, localization == "nucleus")
df.nuc.rep2 <- subset(df.rep.2, localization == "nucleus")
idVars <- c("gene_id","gene_type", "localization","rnaExtract","cell", "isSpikeIn","replicate","region")
idVarsNorep <- c("variable","gene_id","gene_type", "localization","rnaExtract","cell", "isSpikeIn","region")
df.nuc.rep1.melt <- melt(df.nuc.rep1, id.vars=idVars)
df.nuc.rep2.melt <- melt(df.nuc.rep2, id.vars = idVars)
df.nuc <- merge(df.nuc.rep1.melt, df.nuc.rep2.melt, by = idVarsNorep,suffixes=c(".rep1", ".rep2"))
df.nuc$expr <- paste(df.nuc$localization,df.nuc$rnaExtract)
df.nuc$value.rep1 <- ifelse(is.na(as.numeric(df.nuc$value.rep1)), 0, as.numeric(df.nuc$value.rep1))
df.nuc$value.rep2 <- ifelse(is.na(as.numeric(df.nuc$value.rep2)), 0, as.numeric(df.nuc$value.rep2))
df.nuc$value.rep1.pseudo <- applyPseudoValByVar2(value= df.nuc$value.rep1, var=df.nuc$variable)
df.nuc$value.rep2.pseudo <- applyPseudoValByVar2(value = df.nuc$value.rep2 , var=df.nuc$variable)
df.nuc$rep1.frac <- df.nuc$value.rep1/(df.nuc$value.rep1 + df.nuc$value.rep2)
df.nuc$rep1.frac.pseudo <- df.nuc$value.rep1.pseudo/(df.nuc$value.rep1.pseudo + df.nuc$value.rep2.pseudo)
df.nuc$rep2.frac <- df.nuc$value.rep2/(df.nuc$value.rep1 + df.nuc$value.rep2)
df.nuc$rep2.frac.pseudo <- df.nuc$value.rep2.pseudo/(df.nuc$value.rep1.pseudo + df.nuc$value.rep2.pseudo)
df.nuc$rep.ratio <- df.nuc$value.rep1/( df.nuc$value.rep2)
df.nuc$rep.ratio.pseudo <- df.nuc$value.rep1.pseudo/(df.nuc$value.rep2.pseudo)
df.nuc$value.ave <- (df.nuc$value.rep1 + df.nuc$value.rep2)/2
#df.cytNuc <- rbind(df.cyt,df.nuc)
#df.cytNuc[which(df.cytNuc$gene_id %in% pc),"region"] <- "mRNA"
#df.cytNuc[which(df.cytNuc$gene_id %in% lnc),"region"] <- "lncRNA"
df.cytNuc <- merge(df.cyt,df.nuc,by=c("gene_id","cell","variable"),suffixes=c(".cyt",".nuc"))
df.cytNuc.rbind <- rbind(df.cyt,df.nuc)
in.vec <- unlist(strsplit(infile,"\\."))
outfile = paste0(in.vec[1],"-proc.",in.vec[2])
exportAsTable(file=outfile, df=df.cytNuc)
exportAsTable(file=paste0(outfile,".rbind"), df=df.cytNuc.rbind)
outfile
}
plotRatiosTopTrans <- function(infile,outdir = getFullPath("plots/rnaExpr/mappedReads/RPKMfromBamTopTrans/cytFracByExon/"),
plotMsg=" "){
df.cytNuc <- read.csv(sep="\t",file=infile)
df.cytNuc$cytFracPseudo <- with(df.cytNuc, (value.rep1.pseudo.cyt+value.rep2.pseudo.cyt)/(value.rep1.pseudo.cyt + value.rep2.pseudo.cyt + value.rep1.pseudo.nuc + value.rep2.pseudo.nuc))
df.cytNuc$cytFrac <- with(df.cytNuc, (value.ave.cyt)/(value.ave.cyt + value.ave.nuc))
df.cytNuc.pos <- df.cytNuc[which(df.cytNuc$value.ave.cyt != 0 & df.cytNuc$value.ave.nuc != 0),]
df.lpa.ratio.rpkm <- df.cytNuc.pos[which(df.cytNuc.pos$variable =="RPKM"),]
df.lpa.ratio.tpm <- df.cytNuc.pos[which(df.cytNuc.pos$variable =="TPM"),]
df.lpa.ratio.rpkm80 <- df.cytNuc.pos[which(df.cytNuc.pos$variable =="RPKM_80norm"),]
if(!file.exists(paste0(outdir,"/"))){
dir.create(outdir)
}
ggplot(df.lpa.ratio.rpkm, aes(y=log10(value.ave.cyt*2 + value.ave.nuc*2),x=cytFracPseudo,color=factor(region.cyt)))+
geom_density2d() + theme_bw() + thisTheme +
facet_grid(cell~region.cyt)+
ggtitle(paste("RPKMfromBAM Top Trans Count By Exon\nFraction of Cytosolic RNA-seq expr\nRPKM: cytPseudo/(nucPseudo + cytPseudo)\n",plotMsg))
ggsave(paste0(outdir,"/rpkmPseudo-cells.png"), height=12,width=5)
ggplot(df.lpa.ratio.rpkm, aes(y=log10(value.ave.cyt*2 + value.ave.nuc*2),x=cytFrac,color=factor(region.cyt)))+
geom_density2d() + theme_bw() + thisTheme +
facet_grid(cell~region.cyt)+
ggtitle(paste0("RPKMfromBAM Top Trans Count By Exon\nFraction of Cytosolic RNA-seq expr\nRPKM: cyt/(nuc + cyt)\n",plotMsg))
ggsave(paste0(outdir,"/rpkm-cells.png"), height=12,width=5)
rfbTopExpr <- as.data.frame(group_by(df.lpa.ratio.rpkm,cell) %.%
mutate(exprRank = rank(1/(value.ave.nuc + value.ave.cyt))))
rfbTopExpr7000 <- as.data.frame(group_by(rfbTopExpr[which(rfbTopExpr$exprRank < 7000),], cell) %.%
mutate(value.rep1.cyt = apply80norm(value.rep1.cyt) * 1000000,
value.rep2.cyt = apply80norm(value.rep2.cyt) * 1000000,
value.rep1.nuc = apply80norm(value.rep1.nuc) * 1000000,
value.rep2.nuc = apply80norm(value.rep2.nuc) * 1000000,
value.ave.cyt = (value.rep1.cyt + value.rep2.cyt)/2,
value.ave.nuc = (value.rep1.nuc + value.rep2.nuc)/2,
cytFrac = (value.ave.cyt)/(value.ave.cyt + value.ave.nuc),
total.cyt = sum(value.ave.cyt),
total.nuc = sum(value.ave.nuc)))
ggplot(rfbTopExpr7000, aes(y=log10(value.ave.cyt*2 + value.ave.nuc*2),x=cytFrac,color=factor(region.cyt)))+
geom_density2d() + theme_bw() + thisTheme +
facet_grid(cell~region.cyt)+xlim(0,1)+
ggtitle(paste0("RPKMfromBAM Top Trans Count By Exon\nTop 7000 Genes Per Cell\nRe-normalized after cutoff\nFraction of Cytosolic RNA-seq expr\nRPKM: cyt/(nuc + cyt)\n",plotMsg))
ggsave(paste0(outdir,"/rpkm-top7000-cells.png"), height=12,width=5)
exprProp7000 <-as.data.frame(group_by(rfbTopExpr7000,cell,region.cyt) %.%
summarise(cytExprFrac = sum(value.ave.cyt),
nucExprFrac = sum(value.ave.nuc)))
ggplot(exprProp7000,aes(x=cell,y=cytExprFrac,fill=region.cyt))+
geom_bar(stat="identity",position="fill")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
ggtitle(paste0("RPKMfromBAM Top Trans Count By Exon\nTop 7000 Genes Per Cell\nRe-normalized after cutoff\nProportion of Exression Per Biotype\nCytosol",plotMsg))
ggsave(paste0(outdir,"/cytProportion-top7000-cells.png"), height=7,width=7)
ggplot(exprProp7000,aes(x=cell,y=nucExprFrac,fill=region.cyt))+
geom_bar(stat="identity",position="fill")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
ggtitle(paste0("RPKMfromBAM Top Trans Count By Exon\nTop 7000 Genes Per Cell\nRe-normalized after cutoff\nProportion of Exression Per Biotype\nNucleus",plotMsg))
ggsave(paste0(outdir,"/nucProportion-top7000-cells.png"), height=7,width=7)
m7000 <- melt(exprProp7000[which(exprProp7000$region.cyt == "lnc"),],id.var=c("cell","region.cyt"))
ggplot(m7000,aes(x=cell,y=value,fill=variable))+
geom_bar(stat="identity",position="fill")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+ ylab("Expression Fraction loc1/(loc1 + loc2)") +
ggtitle(paste0("RPKMfromBAM Top Trans Count By Exon\nTop 7000 Genes Per Cell\nRe-normalized after cutoff\nProportion of Exression Per Biotype\nLncRNA comparison",plotMsg))
ggsave(paste0(outdir,"/lncRNA-proportionComp-top7000-cells.png"), height=7,width=7)
exprProp.rpkm <-as.data.frame(group_by(df.lpa.ratio.rpkm,cell,region.cyt) %.%
summarise(cytExprFrac = sum(value.ave.cyt),
nucExprFrac = sum(value.ave.nuc)))
ggplot(exprProp.rpkm,aes(x=cell,y=cytExprFrac,fill=region.cyt))+
geom_bar(stat="identity",position="fill")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
ggtitle(paste0("RPKMfromBAM Top Trans Count By Exon\nAll Genes\nProportion of Exression Per Biotype(RPKM\nCytosol",plotMsg))
ggsave(paste0(outdir,"/cytProportion-allGenes-cells.png"), height=7,width=7)
ggplot(exprProp.rpkm,aes(x=cell,y=nucExprFrac,fill=region.cyt))+
geom_bar(stat="identity",position="fill")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
ggtitle(paste0("RPKMfromBAM Top Trans Count By Exon\nAll Genes\nProportion of Exression Per Biotype(RPKM)\nNucleus",plotMsg))
ggsave(paste0(outdir,"/nucProportion-allGenes-cells.png"), height=7,width=7)
lncRNA <- names(table(df.lpa.ratio.rpkm$region.cyt))[grep(x=names(table(df.lpa.ratio.rpkm$region.cyt)),pattern="lnc")]
mAll <- melt(exprProp.rpkm[which(exprProp.rpkm$region.cyt == lncRNA),],id.var=c("cell","region.cyt"))
ggplot(mAll,aes(x=cell,y=value,fill=variable))+
geom_bar(stat="identity",position="fill")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+ ylab("Expression Fraction, loc1/(loc1 + loc2)") +
ggtitle(paste0("RPKMfromBAM Top Trans Count By Exon\nAll Genes\nProportion of Exression Per Biotype(RPKM)\nLncRNA comparison",plotMsg))
ggsave(paste0(outdir,"/lncRNA-proportionComp-allGenes-cells.png"), height=7,width=7)
#TPM
ggplot(df.lpa.ratio.tpm, aes(y=log10(value.ave.cyt*2 + value.ave.nuc*2),x=cytFracPseudo,color=factor(region.cyt)))+
geom_density2d() + theme_bw() + thisTheme +
facet_grid(cell~region.cyt)+
ggtitle(paste("RPKMfromBAM Top Trans Count By Exon\nFraction of Cytosolic RNA-seq expr\nTPM: cytPseudo/(nucPseudo + cytPseudo)\n",plotMsg))
ggsave(paste0(outdir,"/tpmPseudo-cells.png"), height=12,width=5)
ggplot(df.lpa.ratio.tpm, aes(y=log10(value.ave.cyt*2 + value.ave.nuc*2),x=cytFrac,color=factor(region.cyt)))+
geom_density2d() + theme_bw() + thisTheme +
facet_grid(cell~region.cyt)+
ggtitle(paste("RPKMfromBAM Top Trans Count By Exon\nFraction of Cytosolic RNA-seq expr\nTPM: cyt/(nuc + cyt)\n",plotMsg))
ggsave(paste0(outdir,"/tpm-cells.png"), height=12,width=5)
ggplot(df.lpa.ratio.rpkm, aes(x=cytFracPseudo,fill=factor(region.cyt)))+
geom_bar(position="dodge") + theme_bw() + thisTheme +
facet_grid(cell~.)+
ggtitle(paste("RPKMfromBAM Top Trans Count By Exon\nFraction of Cytosolic RNA-seq expr\nRPKM: cytPseudo/(nucPseudo + cytPseudo)\n",plotMsg))
ggsave(paste0(outdir,"/rpkmPseudo-bars-cells.png"), height=12,width=5)
ggplot(df.lpa.ratio.rpkm, aes(x=cytFrac,,fill=factor(region.cyt)))+
geom_bar(position="dodge") + theme_bw() + thisTheme +
facet_grid(cell~.)+
ggtitle(paste("RPKMfromBAM Top Trans Count By Exon\nFraction of Cytosolic RNA-seq expr\nRPKM: cyt/(nuc + cyt)\n",plotMsg))
ggsave(paste0(outdir,"/rpkm-bars-cells.png"), height=12,width=5)
#RPKM80 norm
ggplot(df.lpa.ratio.rpkm80, aes(y=log10(value.ave.cyt*2 + value.ave.nuc*2),x=cytFracPseudo,color=factor(region.cyt)))+
geom_density2d() + theme_bw() + thisTheme +
facet_grid(cell~region.cyt)+
ggtitle(paste("RPKMfromBAM Top Trans Count By Exon\nFraction of Cytosolic RNA-seq expr\nRPKM80: cytPseudo/(nucPseudo + cytPseudo)\n",plotMsg))
ggsave(paste0(outdir,"/rpkm80Pseudo-cells.png"), height=12,width=5)
ggplot(df.lpa.ratio.rpkm80, aes(y=log10(value.ave.cyt*2 + value.ave.nuc*2),x=cytFrac,color=factor(region.cyt)))+
geom_density2d() + theme_bw() + thisTheme +
facet_grid(cell~region.cyt)+
ggtitle(paste("RPKMfromBAM Top Trans Count By Exon\nFraction of Cytosolic RNA-seq expr\nRPKM80: cyt/(nuc + cyt)\n",plotMsg))
ggsave(paste0(outdir,"/rpkm80-cells.png"), height=12,width=5)
df.cytNuc.rbind <- read.csv(sep="\t",file=paste0(infile,".rbind"))
m.df <- as.data.frame(group_by(df.cytNuc.rbind , cell, localization,variable,region) %.%
summarize(sum.rep1 = sum(value.rep1),
sum.rep2 = sum(value.rep2),
expr.rep1 = sum(value.rep1 > 0 ),
expr.rep2 = sum(value.rep2 > 0 )))
colnames(m.df) <- c("cell", "localization", "measure", "region", "sum.rep1", "sum.rep2", "expr.rep1", "expr.rep2")
melt.df <- melt(m.df, id.vars=c("cell", "localization", "measure","region"))
m.df$frac.rep1 = with(m.df, (sum.rep1)/(sum.rep1 + sum.rep2))
ggplot(melt.df[which(melt.df$variable %in% c("expr.rep1", "expr.rep2") & melt.df$measure == "RPKM"),], aes(x=variable,y=value,fill=region)) +
geom_bar(stat="identity") +
facet_grid(cell ~localization) + xlab("replicates") + ylab("count") +
ggtitle(paste("RPKMfromBam Top Trans Count by Exon\nnumber of expressed genes\n",plotMsg))+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggsave(paste0(outdir,"/expr-cytNuc.png"), height=12,width=5)
# localization vs. cell
# "RPKM_80norm","RPKM","reads","concBySpikeIn","spikeIn_norm
ggplot(m.df, aes(x=measure,y=frac.rep1,color=region)) +
geom_boxplot() + geom_abline(slope=0,intercept=1/2,color="red") +
facet_grid(localization~cell) +
scale_x_discrete(limits=c("reads","RPKM", "RPKM_80norm","concBySpikeIn","TPM"),
labels=c("reads","RPKM" ,"RPKM_80","conc.","TPM"))+ ylim(0,1) +
thisTheme +
ggtitle(paste("RPKMfromBam Top Trans Count by Exon\nfraction of cytosol & nucleus \nreads/RPKM/RPKM80/conc/TPM\nfrac.rep1=(rep1)/(rep1 + rep2)\n",plotMsg))+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggsave(paste0(outdir,"/readCount-cytNuc-region.png"), height=5,width=12)
# combined
ggplot(m.df, aes(x=measure,y=frac.rep1,color=region)) +
geom_boxplot() + geom_abline(slope=0,intercept=1/2,color="red") +
scale_x_discrete(limits=c("reads","RPKM", "RPKM_80norm","concBySpikeIn","TPM"),
labels=c("reads","RPKM" ,"RPKM_80","conc.","TPM"))+ ylim(0,1) +
thisTheme2 +
ggtitle(paste("RPKMfromBam Top Trans Count by Exon\nfraction of cytosol & nucleus \nreads/RPKM/RPKM80/RPKMspikeIn/Conc/TPM\n frac.rep1=(rep1)/(rep1 + rep2)\n",plotMsg))+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggsave(paste0(outdir,"/readCount-cytNuc-all-combined.png"), height=5,width=10)
ggplot(m.df, aes(x=measure,y=frac.rep1)) +
geom_boxplot() + geom_abline(slope=0,intercept=1/2,color="red") +
scale_x_discrete(limits=c("reads","RPKM", "RPKM_80norm","concBySpikeIn","TPM"),
labels=c("reads","RPKM" ,"RPKM_80","conc.","TPM"))+ ylim(0,1) +
thisTheme2 +
ggtitle(paste("RPKMfromBam Top Trans Count by Exon\nfraction of cytosol & nucleus\nreads/RPKM/RPKM80/RPKMspikeIn/Conc/TPM\nfrac.rep1=(rep1)/(rep1 + rep2)\n",plotMsg))+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggsave(paste0(outdir,"/readCount-cytNuc-all-combined-join.png"), height=5,width=10)
#seperate by localization only
ggplot(m.df, aes(x=measure,y=frac.rep1,color=region)) +
geom_boxplot() + geom_abline(slope=0,intercept=1/2,color="red") +
facet_grid(~localization) +
scale_x_discrete(limits=c("reads","RPKM", "RPKM_80norm","concBySpikeIn","TPM"),
labels=c("reads","RPKM" ,"RPKM_80","conc.","TPM"))+ ylim(0,1) +
thisTheme2 + ggtitle(paste("RPKMfromBam Top Trans Count by Exon\nfraction of cytosol & nucleus \nreads/RPKM/RPKM80/RPKMspikeIn/Conc/TPM\nfrac.rep1=(rep1)/(rep1 + rep2)\n",plotMsg))+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggsave(paste0(outdir,"/readCount-cytNuc-combined.png"), height=6,width=10)
}
|
b1747725e1824bdd44c22928980d1fae3a611664 | 2079387e1bedeae40572c6f486168faea1eb4c09 | /R/DiagrammeR/diagrammeR.R | 22b7217f813ce1c1faa3d77a8976716d21f23e01 | [] | no_license | jamesgitting/visualizations | aa312dbcbf62ac32c75eef6d262bdce9bc8a1934 | 5611a22f0f27dab8af5ce3b00434374aac8e8902 | refs/heads/master | 2020-03-21T13:44:06.731512 | 2018-07-13T19:02:05 | 2018-07-13T19:02:05 | 138,622,591 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 721 | r | diagrammeR.R | # DiagrammeR Example ----------------------------------------------------------------------------
# <Insert Short Description and Use Cases>
# Libraries -----------------------------------------------------------------------------------
library(DiagrammeR)
library(DiagrammeRsvg)
# Configuration -------------------------------------------------------------------------------
output_dir <- file.path("./DiagrammeR/visuals/")
# Samples -------------------------------------------------------------------------------------
simple_gv <- grViz("
digraph boxes_and_circles {
node [shape = circle]
A; B; C; D; E; F
node [shape = triangle]
1; 2; 3; 4; 5; 6; 7; 8
# Edge Statements
A->1; B->2; B->3;
}
") |
f17a1b1c73e55f2e92343920de11b7a9f3b7fb2d | 6c0fe5499cd7c3d00867f51f9e257d2a938b0632 | /preprocessing/distr_plots.R | cc481b429b4c7c4563c3259ffb2865aec2ff686e | [] | no_license | simonfqy/SimBoost | eb2f973c24509671353c439375338217e3c0f250 | d8ee4d928eb2685f37fc512041090a90f81231c8 | refs/heads/master | 2020-03-15T03:12:38.715210 | 2018-06-15T05:28:07 | 2018-06-15T05:28:07 | 131,936,897 | 0 | 0 | null | 2018-05-03T03:27:21 | 2018-05-03T03:27:20 | null | UTF-8 | R | false | false | 652 | r | distr_plots.R | par(mfrow=c(1,3))
load('../data/davis_data.Rda')
hist(davis_triplet[,3], main="Distribution of values in dataset Davis", xlab="binding affinity in pKd", col='grey', border='white', cex.lab = 1.4)
abline(v=7.0, col="red")
load('../data/metz_data.Rda')
hist(metz_triplet[,3], main="Distribution of values in dataset Metz", xlab="binding affinity in pKi", col='grey', border='white', cex.lab = 1.4)
abline(v=7.6, col="red")
load('../data/kiba_data.Rda')
hist(kiba_triplet[,3], main="Distribution of values in dataset KIBA", xlab="binding affinity as KIBA score", col='grey', border='white', cex.lab = 1.4)
abline(v=12.1, col="red")
par(mfrow=c(1,1))
|
4bed51de0f9ac985eac63478fb96c2ce7e9beb0f | cce54237311aab66c2914e4992eb844fe5d260c7 | /Ants/man/sim_pencov_troph.Rd | 5c6048880cb510dda3b6a092047fc94f0bfec536 | [] | no_license | MLBartley/Ant-Research | 765ec5fb04366c289b950ba8614472cb26f5b302 | 921e560d6802bdc930ec622834a51d39a2c12487 | refs/heads/master | 2020-04-05T23:16:03.410628 | 2019-12-05T19:13:54 | 2019-12-05T19:13:54 | 42,834,698 | 0 | 0 | null | 2017-11-03T18:04:38 | 2015-09-21T00:08:07 | HTML | UTF-8 | R | false | true | 1,268 | rd | sim_pencov_troph.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation_functions.R
\name{sim_pencov_troph}
\alias{sim_pencov_troph}
\title{Simulation of Ant Trophallaxis Start Times - PenCov Model}
\usage{
sim_pencov_troph(states, time_max, delta_t, start_state = 1, int_rate,
num_locations = 1, covariate, switch_betas)
}
\arguments{
\item{switch_betas}{}
}
\value{
This function will return the following:
\enumerate{
\item inter_persec: (0, ...) 'observed' number of
interactions per 1 second
\item state: (1 = low, 2 = high) unobserved two state process
\item cumu_inter: (0, ...) cumulative count of interactions
\item bin_inter: Trophallaxis interactions binned into smaller
intervals determined by sum over delta_t
\item bin_state: Trophallaxis rate state binned into smaller
intervals, determined by average over delta_t
\item bin_sec: (0, ...) time (in seconds) binned by delta_t
\item start_time: vector of interaction start times
\item location: location of each ant interaction
\item Visuals: 1x1 visual of cumlative counts over time, colored
by state, separated by location if applicable.
}
}
\description{
Simulation of Ant Trophallaxis Start Times - PenCov Model
}
|
22ada324dba7b3ccdc32ba4ac3cfbb4468e3c9c2 | c2b34e631c39b992e928fb758634880f47cc679a | /man/vcovCR.rma.mv.Rd | 81e6e643f56462c4c8615d13e723903ec35e69af | [] | no_license | jepusto/clubSandwich | b74b04f3d8a1fc7aab3b385317436f4fd80b69bb | 9084e82485b74723c4ca39fed8e7e9c062ad2468 | refs/heads/main | 2023-08-19T07:31:47.250073 | 2023-08-04T22:50:22 | 2023-08-04T22:50:22 | 38,020,216 | 47 | 11 | null | 2023-08-04T22:48:21 | 2015-06-25T01:17:20 | HTML | UTF-8 | R | false | true | 3,055 | rd | vcovCR.rma.mv.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rma-mv.R
\name{vcovCR.rma.mv}
\alias{vcovCR.rma.mv}
\title{Cluster-robust variance-covariance matrix for a robu object.}
\usage{
\method{vcovCR}{rma.mv}(obj, cluster, type, target, inverse_var, form = "sandwich", ...)
}
\arguments{
\item{obj}{Fitted model for which to calculate the variance-covariance matrix}
\item{cluster}{Optional expression or vector indicating which observations
belong to the same cluster. If not specified, will be set to the factor in
the random-effects structure with the fewest distinct levels. Caveat
emptor: the function does not check that the random effects are nested.}
\item{type}{Character string specifying which small-sample adjustment should
be used, with available options \code{"CR0"}, \code{"CR1"}, \code{"CR1p"},
\code{"CR1S"}, \code{"CR2"}, or \code{"CR3"}. See "Details" section of
\code{\link{vcovCR}} for further information.}
\item{target}{Optional matrix or vector describing the working
variance-covariance model used to calculate the \code{CR2} and \code{CR4}
adjustment matrices. If not specified, the target is taken to be the
estimated variance-covariance structure of the \code{rma.mv} object.}
\item{inverse_var}{Optional logical indicating whether the weights used in
fitting the model are inverse-variance. If not specified, \code{vcovCR}
will attempt to infer a value.}
\item{form}{Controls the form of the returned matrix. The default
\code{"sandwich"} will return the sandwich variance-covariance matrix.
Alternately, setting \code{form = "meat"} will return only the meat of the
sandwich and setting \code{form = B}, where \code{B} is a matrix of
appropriate dimension, will return the sandwich variance-covariance matrix
calculated using \code{B} as the bread. \code{form = "estfun"} will return the
(appropriately scaled) estimating function, the transposed crossproduct of
which is equal to the sandwich variance-covariance matrix.}
\item{...}{Additional arguments available for some classes of objects.}
}
\value{
An object of class \code{c("vcovCR","clubSandwich")}, which consists
of a matrix of the estimated variance of and covariances between the
regression coefficient estimates.
}
\description{
\code{vcovCR} returns a sandwich estimate of the variance-covariance matrix
of a set of regression coefficient estimates from a
\code{\link[metafor]{rma.mv}} object.
}
\examples{
pkgs_available <-
requireNamespace("metafor", quietly = TRUE) &
requireNamespace("metadat", quietly = TRUE)
if (pkgs_available) withAutoprint({
library(metafor)
data(dat.assink2016, package = "metadat")
mfor_fit <- rma.mv(yi ~ year + deltype,
V = vi, random = ~ 1 | study / esid,
data = dat.assink2016)
mfor_fit
mfor_CR2 <- vcovCR(mfor_fit, type = "CR2")
mfor_CR2
coef_test(mfor_fit, vcov = mfor_CR2, test = c("Satterthwaite", "saddlepoint"))
Wald_test(mfor_fit, constraints = constrain_zero(3:4), vcov = mfor_CR2)
})
}
\seealso{
\code{\link{vcovCR}}
}
|
224cd40c45f212cca5e87e481ed488fdec52bdf8 | bacba0d49109344c7f303269a9d991be2840297f | /src/dspg19_final_job_dataset_define.R | b0c7260721afa945e2f7c2f46ab6c75905c3d374 | [] | no_license | uva-bi-sdad/stem_edu | 6dbf0d814fec5d5fcf6234f0533ae5db63322307 | 24d56f420d254379a729fb7c73847f19f21e5930 | refs/heads/master | 2020-05-23T09:22:56.901163 | 2019-08-08T19:10:35 | 2019-08-08T19:10:35 | 186,703,989 | 4 | 1 | null | 2019-09-30T19:14:39 | 2019-05-14T21:36:30 | R | UTF-8 | R | false | false | 4,969 | r | dspg19_final_job_dataset_define.R | ####CREATING FINAL JOB AD DATA FILES FOR ANALYSIS####
library(data.table)
library(tidyverse)
library(lubridate)
###Files needed: all main job ads and job ad skills in Virginia for 2016 and 2017, classification of
###skills as hard and soft, classificatoins of ONET codes as STW/non-STW
###load in all jobs and skills in 2016 and 2017
loc <- file.path("data/stem_edu/working/burning_glass_ad_combine_16_17")
job_main <- fread(file.path(loc, "combine_16_17_job_ad_main.csv"))
job_skill <- fread(file.path(loc, "combine_16_17_job_ad_skill.csv"))
job_main <- job_main[,2:54]
job_skill <- job_skill[,2:10]
#cleaning up -999 as NA in "edu" in main job file
job_main %>% group_by(edu) %>% summarise(count = n())
job_main[job_main$edu == -999, "edu"] <- NA
job_main %>% group_by(edu) %>% summarise(count = n())
###load in list of STW occupations
loc <- file.path("src/ONET_define")
stw_occs <- fread(file.path(loc, "Rothwell_STW_list.csv"))
###get down to just STW jobs
stw_job_main <- job_main[onet %chin% stw_occs$onet]
stw_job_main$bgtjobid <- as.character(stw_job_main$bgtjobid)
#how many of these jobs do not require a bachelor's degree or above?
stw_job_main %>% group_by(edu) %>% summarise(count = n())
nrow(filter(stw_job_main, edu <= 14 | is.na(edu) == TRUE))
##splitting to Richmond and Blacksburg
r_stw_job_main <- filter(stw_job_main, msaname == "Richmond, VA")
b_stw_job_main <- filter(stw_job_main, msaname == "Blacksburg-Christiansburg-Radford, VA")
###determining top 5 jobs and associated skills in each place
top5job <- function(adList){
assocBelow <- adList %>% filter(edu <= 14 | is.na(edu) == TRUE, jobhours != "parttime", internship == 0)
jobDesc <- assocBelow %>% group_by(onet, onetname) %>% summarise(count = n()) %>% arrange(desc(count))
top5 <- jobDesc[1:5,]
print(top5)
jobFilter <- filter(assocBelow, onetname %in% top5$onetname)
jobFilter$bgtjobid <- as.character(jobFilter$bgtjobid)
jobFilter
}
r_top5_main <- top5job(r_stw_job_main)
b_top5_main <- top5job(b_stw_job_main)
###Getting skills data for STW jobs in Richmond and Blacksburg
##both hard and soft skills:
loc <- file.path("data/stem_edu/working/hard_soft_skills")
hard_soft_skills <- fread(file.path(loc, "stw_hard-soft-skill.csv"))
findAllSkill <- function(jobList, skillList, skillClassList){
skillList$bgtjobid <- as.character(skillList$bgtjobid)
jobList$bgtjobid <- as.character(jobList$bgtjobid)
skillClass <- select(skillClassList, skill, hard_soft)
skillFilter <- filter(skillList, bgtjobid %in% jobList$bgtjobid)
skillFilter$bgtjobid <- as.character(skillFilter$bgtjobid)
skillFilter <- left_join(skillFilter, jobList[,c("bgtjobid", "onet", "onetname")], by = "bgtjobid")
skillFilter <- inner_join(skillFilter, skillClass)
skillFilter
}
r_top5_job_all_skill <- findAllSkill(r_top5_main, job_skill, hard_soft_skills)
b_top5_job_all_skill <- findAllSkill(b_top5_main, job_skill, hard_soft_skills)
##finding just hard skills
hard_skills <- hard_soft_skills[hard_soft == "hard"]
findHardSkill <- function(jobList, skillList, hardSkillList){
skillList$bgtjobid <- as.character(skillList$bgtjobid)
jobList$bgtjobid <- as.character(jobList$bgtjobid)
skillFilter <- filter(skillList, bgtjobid %in% jobList$bgtjobid, skill %in% hardSkillList$skill)
skillFilter$bgtjobid <- as.character(skillFilter$bgtjobid)
skillFilter <- left_join(skillFilter, jobList[,c("bgtjobid", "onet", "onetname")], by = "bgtjobid")
skillFilter
}
r_top5_job_hard_skill <- findHardSkill(r_top5_main, job_skill, hard_skills)
b_top5_job_hard_skill <- findHardSkill(b_top5_main, job_skill, hard_skills)
###Sanity checks
#should only return top 5 job types:
r_top5_job_all_skill %>% group_by(onetname) %>% summarise(count = n()) %>% arrange(desc(count))
b_top5_job_all_skill %>% group_by(onetname) %>% summarise(count = n()) %>% arrange(desc(count))
#should include some soft skills:
r_top5_job_all_skill %>% group_by(skill) %>% summarise(count = n()) %>% arrange(desc(count))
b_top5_job_all_skill %>% group_by(skill) %>% summarise(count = n()) %>% arrange(desc(count))
#should NOT include soft skills, and should match numbers of above:
r_top5_job_hard_skill %>% group_by(skill) %>% summarise(count = n()) %>% arrange(desc(count))
b_top5_job_hard_skill %>% group_by(skill) %>% summarise(count = n()) %>% arrange(desc(count))
###Writing out data sets:
loc <- file.path("data/stem_edu/working/burning_glass_ad_combine_16_17")
write.csv(r_top5_main, file.path(loc, "richmond_top5_stw_jobs_main.csv"))
write.csv(b_top5_main, file.path(loc, "blacksburg_top5_stw_jobs_main.csv"))
write.csv(r_top5_job_all_skill, file.path(loc, "richmond_top5_stw_jobs_all_skills.csv"))
write.csv(b_top5_job_all_skill, file.path(loc, "blacksburg_top5_stw_jobs_all_skills.csv"))
write.csv(r_top5_job_hard_skill, file.path(loc, "richmond_top5_stw_jobs_hard_skills.csv"))
write.csv(b_top5_job_hard_skill, file.path(loc, "blacksburg_top5_stw_jobs_hard_skills.csv"))
|
e3ef3ea0571f6753c078f75a5b4c7a62d06bbf16 | 76d088c82890b83c41d6e86e56b6729e9c9734f3 | /cachematrix.R | d6f537e3c38f4d8a111a8a8f220c3432c553ba80 | [] | no_license | philipdlcruz/ProgrammingAssignment2 | 11f56511d6402a431e2bb861ef9cac5f31873449 | e101f1994230276311fda06eb05b87b5022057bf | refs/heads/master | 2021-01-15T16:10:30.538779 | 2014-08-23T06:04:56 | 2014-08-23T06:04:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,130 | r | cachematrix.R | ## These are functions that cache the inverse of a matrix. And matrices that
## are supplied as input should always be always invertible.
## makeCacheMatrix creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
mi <- NULL
set <- function(y) {
x <<- y
mi <<- NULL
}
get <- function() x
setmatinv <- function(matinv) mi <<- matinv
getmatinv <- function() mi
list(set = set, get = get, setmatinv = setmatinv, getmatinv = getmatinv)
}
## cacheSolve computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mi <- x$getmatinv()
if(!is.null(mi)) {
message("getting cached data")
return(mi)
}
data <- x$get()
mi <- solve(data, ...)
x$setmatinv(mi)
mi
}
|
459a0056be85a2c24f36de53a7da95b1ed07b1b4 | 253d7e852f0add5f4da947ba5d5888e289c6733f | /circle1.R | f89c2a71d68b28a1da2e13a72c31c140f90574d2 | [] | no_license | robertandrewstevens/R | 6736d2b40bf27d723a7203814d04967e170048b6 | dd1cba9f9eb9abf81c41014f45f6f42cc26888bd | refs/heads/master | 2020-05-21T04:29:27.166127 | 2018-07-16T00:28:14 | 2018-07-16T00:28:14 | 20,109,029 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 514 | r | circle1.R | circle1 <- function(x, y, x0, y0, r, x.lab, y.lab) {
a <- seq(0, 2 * pi, len = 100)
circle.x <- x0 + r * cos(a)
circle.y <- y0 + r * sin(a)
min.x <- min(min(x), min(circle.x))
max.x <- max(max(x), max(circle.x))
min.y <- min(min(y), min(circle.y))
max.y <- max(max(y), max(circle.y))
plot(x, y, xlim = c(min.x, max.x), ylim = c(min.y, max.y), xlab = x.lab, ylab = y.lab)
lines(circle.x, circle.y)
points(mean(x), mean(y), pch = 3, cex = 6)
abline(h = y0, lty = 2)
abline(v = x0, lty = 2)
}
|
41ec12529d3ab8eefcf44a0add091ba2669a659c | c5425a22021e1bc0b3f2778666c8127fa845f65a | /prs/PCoA.R | cfab4bd0fef21cd8240624c15fb410630b497d8d | [] | no_license | the8thday/16s_plot_R | 2ed6c4f2dc63a971070089422d5661da7084e5e3 | 87e55b67049f5f34250a2cf9981ecb19d9779ee8 | refs/heads/master | 2021-03-01T04:35:07.406088 | 2020-04-30T09:02:28 | 2020-04-30T09:02:28 | 245,754,266 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,087 | r | PCoA.R | #PCoA
#Unweighted Unifrac/Bray-curtis
library(vegan)
library(tidyverse)
otu <- read.delim("/Users/congliu/Downloads/16s_PCoA/otu_table.txt", row.names = 1, sep = '\t', stringsAsFactors = F,
check.names = FALSE)
otu <- data.frame(t(otu))
distance <- vegan::vegdist(otu, method = 'bray')
pcoa <- cmdscale(distance, k = (nrow(otu) - 1), eig = TRUE)
ordiplot(scores(pcoa)[ ,c(1, 2)], type = 't')
pcoa$eig
point <- data.frame(pcoa$points)
species <- wascores(pcoa$points[,1:2], otu)#计算物种坐标
#
pcoa_eig <- (pcoa$eig)[1:2] / sum(pcoa$eig)
#提取样本点坐标(前两轴)
sample_site <- data.frame({pcoa$point})[1:2]
sample_site$names <- rownames(sample_site)
names(sample_site)[1:2] <- c('PCoA1', 'PCoA2')
#为样本点坐标添加分组信息
group <- read.delim('/Users/congliu/Downloads/16s_PCoA/group.txt', sep = '\t', stringsAsFactors = FALSE)
sample_site <- merge(sample_site, group, by = 'names', all.x = TRUE)
#可选输出,例如输出为 csv 格式
write.csv(sample_site, 'sample_site.csv', quote = F)
sample_site$site <- factor(sample_site$site, levels = c('A', 'B', 'C', 'D'))
sample_site$deal <- factor(sample_site$deal, levels = c('low', 'high'))
sample_site$time <- factor(sample_site$time, levels = c('1', '2', '3', '4'))
library(plyr)
group_border <- ddply(sample_site, 'site', function(df) df[chull(df[[2]], df[[3]]), ])
#注:group_border 作为下文 geom_polygon() 的做图数据使用
pcoa_plot <- ggplot(sample_site, aes(PCoA1, PCoA2, group = site)) +
theme(panel.grid = element_line(color = 'gray', linetype = 2, size = 0.1), panel.background = element_rect(color = 'black', fill = 'transparent'), legend.key = element_rect(fill = 'transparent')) + #背景框
geom_vline(xintercept = 0, color = 'gray', size = 0.4) +
geom_hline(yintercept = 0, color = 'gray', size = 0.4) +
geom_polygon(data = group_border, aes(fill = site)) + #绘制多边形区域
geom_point(aes(color = time, shape = deal), size = 1.5, alpha = 0.8) + #可在这里修改点的透明度、大小
scale_shape_manual(values = c(17, 16)) + #可在这里修改点的形状
scale_color_manual(values = c('yellow', 'orange', 'red', 'red4')) + #可在这里修改点的颜色
scale_fill_manual(values = c('#C673FF2E', '#73D5FF2E', '#49C35A2E', '#FF985C2E')) + #可在这里修改区块的颜色
guides(fill = guide_legend(order = 1), shape = guide_legend(order = 2), color = guide_legend(order = 3)) + #设置图例展示顺序
labs(x = paste('PCoA axis1: ', round(100 * pcoa_eig[1], 2), '%'), y = paste('PCoA axis2: ', round(100 * pcoa_eig[2], 2), '%')) +
#可通过修改下面四句中的点坐标、大小、颜色等,修改“A、B、C、D”标签
annotate('text', label = 'A', x = -0.31, y = -0.15, size = 5, colour = '#C673FF') +
annotate('text', label = 'B', x = -0.1, y = 0.3, size = 5, colour = '#73D5FF') +
annotate('text', label = 'C', x = 0.1, y = 0.15, size = 5, colour = '#49C35A') +
annotate('text', label = 'D', x = 0.35, y = 0, size = 5, colour = '#FF985C')
ggsave('PCoA.png', pcoa_plot, width = 6, height = 5)
|
00eec65e20a1daf8f266cdbee468fb7f497c03f8 | 4450235f92ae60899df1749dc2fed83101582318 | /ThesisRpackage/man/get_Article2.Rd | 9255e18e4e1b7737fa5051af81c86da9e7a5c1da | [
"MIT"
] | permissive | cayek/Thesis | c2f5048e793d33cc40c8576257d2c9016bc84c96 | 14d7c3fd03aac0ee940e883e37114420aa614b41 | refs/heads/master | 2021-03-27T20:35:08.500966 | 2017-11-18T10:50:58 | 2017-11-18T10:50:58 | 84,567,700 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 247 | rd | get_Article2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in 2Article/2Article.R
\name{get_Article2}
\alias{get_Article2}
\title{Retrun Article2 env functions}
\usage{
get_Article2()
}
\description{
Retrun Article2 env functions
}
|
ffc0e63034144b98d882f643cf64620dd2dba0a8 | afc30a6dee4e857e90878152cc3c1e179023bebe | /UBCF.R | 697d53099162c48a504ee16066c5d7ff46d952b8 | [] | no_license | anandk/recommender | f55fa0a9f58a41b25e97b7f8850a261d860c0c58 | 702261fa822ed6f800027b9b44a823cc14e20045 | refs/heads/master | 2021-01-17T11:30:10.170718 | 2014-04-17T07:36:28 | 2014-04-17T07:36:28 | 18,054,790 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,129 | r | UBCF.R | library("recommenderlab")
setwd("C:/TW-Projects/PS-Projects/AbcamAnalytics/RSandbox/recommender")
df <- read.csv(file="FinalWeightsCombined",sep="\t",
colClasses=c("character","character","numeric"))
colnames(df)<- c("user","abIDs","weight")
userItemMatrix <- as(df,"realRatingMatrix")
rec <- Recommender(userItemMatrix[1:1400], method="UBCF")
finalMatr <- matrix(nrow=1,ncol=11)
recom <- predict(rec,userItemMatrix[1401],n=3)
for(index in 1:dim(userItemMatrix)[1])
{
userID <- names(as(userItemMatrix[index],"list"))
recom <- predict(rec,userItemMatrix[userIndex],n=10)
finalMatr <- rbind(finalMatr, c(userID,unlist(as(recom,"list"))))
if(index%%100 == 0)
{
write.table(finalMatr[-1,], file="./Output.txt", sep=",", append=TRUE, row.names=FALSE,col.names=FALSE)
finalMatr <- matrix(nrow=1,ncol=11)
}
}
write.table(finalMatr[-1,], file="./Output.txt", sep=",", append=TRUE, row.names=FALSE,col.names=FALSE)
#userIndex <- 407983
#as(recom,"list")
userList <- as(userItemMatrix,"list")
dfUser <- as(userList,"data.frame")
userIndex <- which(colnames(dfUser)=="8549280749670823302")
|
2c878a355a4c17f63cea88e5ec61d18091b50933 | b3c30e4683df07096c0aa0ecf6be1443dfa60de9 | /deseq_analysis_col_leaf_vs_col_leaf_bio_reps.R | 3fa4fbedec64e12b4f80ac0b549cd72542a5f578 | [] | no_license | B-Rich/Suecica | cd26cacf90daa546dc74d64e7ff79e696f9278a6 | bbd3b66750b03f3cefe06e10b60370f08a06df2e | refs/heads/master | 2021-01-22T02:12:47.790301 | 2013-06-14T16:27:43 | 2013-06-14T16:27:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,327 | r | deseq_analysis_col_leaf_vs_col_leaf_bio_reps.R | setwd("C:/Dropbox/SuecicaDupSearch/Data/RNA-Seq/AsLeaf_vs_ColLeaf")
myPath = file.path("C:/Dropbox/SuecicaDupSearch/Data/RNA-Seq/AsLeaf_vs_ColLeaf/mRNA_Expression_Technicals_Combined.tsv")
counts = read.table(myPath, header=TRUE, row.names=1)
metadata = data.frame(
row.names = colnames(counts),
condition = c("col_leaf_rep1","col_leaf_rep2, sue_leaf_rep1, sue_leaf_rep2"),
libType = c("single-end", "single-end", "single-end", "single-end") )
singleSamples = metadata$libType == "single-end"
countTable = counts[,singleSamples]
condition = metadata$condition[singleSamples]
library("DESeq")
cds = newCountDataSet(countTable, condition)
cds = estimateSizeFactors(cds)
sizeFactors(cds)
#head( counts( cds, normalized=TRUE) ) # Prints library-normalized read counts
cds = estimateDispersions(cds, method='blind', sharingMode="fit-only")
#plotDispEsts(cds) # Plot dispersion estimates
# Examine differential expression between A. thaliana (Col) leaf tissue for the two biological replicates
res = nbinomTest(cds, "col_leaf_rep1", "col_leaf_rep2")
#head(res)
#plotMA(res)
#hist(res$pval, breaks=100, col="skyblue", border="slateblue", main="") # Prints histogram of p-values
#hist(res$padj, breaks=100, col="skyblue", border="slateblue", main="") # Prints histogram of adjusted p-values
resSig = res[res$pval < 0.1,]
resSig = na.omit(resSig)
resSigUp = resSig[resSig$foldChange > 1,]
resSigDown = resSig[resSig$foldChange < 1,]
head(resSig[order(resSig$pval),]) # Most significantly differentially expressed genes
head(resSigUp[order(-resSigUp$foldChange, -resSigUp$baseMean ), ] ) # Most strongly up-regulated significant genes
head(resSigDown[order(resSigDown$foldChange, -resSigDown$baseMean ), ] ) # Most strongly down-regulated significant genes
write.table(resSig[order(resSig$pval), ], file="Col_to_Sue_SigDifExp.tsv", quote=FALSE, sep="\t", row.names=FALSE )
write.table(resSigUp[order(-resSigUp$foldChange, -resSigUp$baseMean ), ], file="Col_to_Sue_SigMostUpReg.tsv", quote=FALSE, sep="\t", row.names=FALSE )
write.table(resSigDown[order(resSigDown$foldChange, -resSigDown$baseMean ), ], file="Col_to_Sue_SigMostDownReg.tsv", quote=FALSE, sep="\t", row.names=FALSE )
# Examine differential expression between A. thaliana (Col) leaf tissue for the two biological replicates with no p-value cutoff applied
rm(resSig,resSigUp,resSigDown)
resSig = res[res$pval < 1,]
resSig = na.omit(resSig)
resSigUp = resSig[resSig$foldChange > 1,]
resSigDown = resSig[resSig$foldChange < 1,]
head(resSig[order(resSig$pval),]) # Most significantly differentially expressed genes
head(resSigUp[order(-resSigUp$foldChange, -resSigUp$baseMean ), ] ) # Most strongly up-regulated significant genes
head(resSigDown[order(resSigDown$foldChange, -resSigDown$baseMean ), ] ) # Most strongly down-regulated significant genes
write.table(resSig[order(resSig$pval), ], file="ColTime0_to_Time+_SigDifExp_NOCUTOFF.tsv", quote=FALSE, sep="\t", row.names=FALSE )
write.table(resSigUp[order(-resSigUp$foldChange, -resSigUp$baseMean ), ], file="ColTime0_to_Time+_SigMostUpReg_NOCUTOFF.tsv", quote=FALSE, sep="\t", row.names=FALSE )
write.table(resSigDown[order(resSigDown$foldChange, -resSigDown$baseMean ), ], file="ColTime0_to_Time+_SigMostDownReg_NOCUTOFF.tsv", quote=FALSE, sep="\t", row.names=FALSE )
|
1c62fb8d271a84dd008250fd413abec0508a9cce | bb07714dd4b167663a8eb8d54c33264f2d2df9e8 | /functions/Get_AR_Simulations.R | fe3937c94b623b732ed1135bedf6b86b99d38003 | [
"MIT"
] | permissive | sophiaprieto/LDS-Inferences | b35db7a78de33d8afd3365c964443f22f5a700dc | a9658257abd508fb7804769ac3b4b0c8051e5aae | refs/heads/main | 2023-06-20T03:49:13.836183 | 2021-07-20T12:43:14 | 2021-07-20T12:43:14 | 402,172,479 | 1 | 0 | MIT | 2021-09-01T19:00:57 | 2021-09-01T19:00:56 | null | UTF-8 | R | false | false | 2,773 | r | Get_AR_Simulations.R | #______________________________________________________________________________#
###Baseline-Threshold Model###
#Method
#Step 1:- AR on Sites.
#Step 2:- Simulate fitted AR model and to get simulations.
#Input
#1. Data Field
#2. Data Field Name
#3. Number of Simulations
#4. ic - Information Criteria e.g. AIC or BIC.
#Output
#1. Ensemble Simulations.
Get_AR_Simulations <- function(Data_Field, Field_Name, NSims, ic){
library(MASS)
library(forecast)
#Scale the transformed Data.
site_means <- apply(Data_Field, 2, mean)
site_sd <- apply(Data_Field,2, sd)
Data_Field_trans <- scale(Data_Field)
###Fitting ARIMA Processes
ar.fit <- list()
pb = txtProgressBar(min = 1, max = ncol(Data_Field_trans), initial = 1)
for(i in 1:ncol(Data_Field_trans)){
setTxtProgressBar(pb,i)
ar.fit[[i]] <- auto.arima(Data_Field_trans[,i], seasonal = TRUE, allowmean = FALSE,
allowdrift = FALSE, start.p = 1, start.q=1, ic = ic)
}
###Visualization of the fitted ARMA Process
plot(0:5,0:5, type='n', xlab = "AR Model", ylab = "MA Model",
main = "Site Fitted AR and MA Models")
for(i in 1:length(ar.fit)){
ord = arimaorder(ar.fit[[i]])
points(jitter(ord[1]),jitter(ord[3]),pch=19)
}
###Generate Emsemble Simulations
Ensem_Sims <- list()
for(j in 1:NSims){
Sims <- matrix(NA, ncol = ncol(Data_Field), nrow = nrow(Data_Field))
for(i in 1:ncol(Data_Field_trans)){
#Get the Arima Order
ord = arimaorder(ar.fit[[i]])
if(ord[1]==0 && ord[3] > 0){
ma = ar.fit[[i]]$coef
ts.sim <- arima.sim(list(order = ord, ma = ma),
n = nrow(Data_Field),
sd = sqrt(ar.fit[[i]]$sigma2))
} else if(ord[3]==0 && ord[1] > 0){
ar = ar.fit[[i]]$coef
ts.sim <- arima.sim(list(order = ord, ar = ar),
n = nrow(Data_Field),
sd = sqrt(ar.fit[[i]]$sigma2))
} else if(ord[3]==0 && ord[1]==0){
ts.sim <- arima.sim(list(order = ord),
n = nrow(Data_Field),
sd = sqrt(ar.fit[[i]]$sigma2))
} else{
ar = ar.fit[[i]]$coef[1:ord[1]]
ma = ar.fit[[i]]$coef[(ord[1]+1):(ord[1]+ord[3])]
ts.sim <- arima.sim(list(order = ord, ar = ar, ma = ma),
n = nrow(Data_Field),
sd = sqrt(ar.fit[[i]]$sigma2))
}
Sims[,i] <- tail(ts.sim,nrow(Sims))
#Renormalize the Data
Sims[,i] <- Sims[,i]*site_sd[i] + site_means[i]
}
Ensem_Sims[[j]] <- Sims
}
#Retun the Simulations
return(Ensem_Sims)
} |
e69caf8e1acbeb103f769ed27555c4172d68902f | 83f461519bff4467a1a175ca686ad06a2a7e257b | /R/linear_regression.R | b5c5cc31b8c5645a91352108acb69b38dca7708b | [] | no_license | Yashwants19/RcppMLPACK | 3af64c6b1327e895b99637649591d1671adf53a5 | 2d256c02058aa7a183d182079acff9037a80b662 | refs/heads/master | 2022-12-04T05:06:17.578747 | 2020-07-22T12:45:42 | 2020-07-22T12:45:42 | 252,217,735 | 9 | 0 | null | 2020-08-18T06:15:14 | 2020-04-01T15:41:38 | C++ | UTF-8 | R | false | false | 4,600 | r | linear_regression.R | #' @title Simple Linear Regression and Prediction
#'
#' @description
#' An implementation of simple linear regression and ridge regression using
#' ordinary least squares. Given a dataset and responses, a model can be
#' trained and saved for later use, or a pre-trained model can be used to output
#' regression predictions for a test set.
#'
#' @param input_model Existing LinearRegression model to use (LinearRegression).
#' @param lambda Tikhonov regularization for ridge regression. If 0, the method
#' reduces to linear regression. Default value "0" (numeric).
#' @param test Matrix containing X' (test regressors) (numeric matrix).
#' @param training Matrix containing training set X (regressors) (numeric matrix).
#' @param training_responses Optional vector containing y (responses). If not given,
#' the responses are assumed to be the last row of the input file (numeric
#' row).
#' @param verbose Display informational messages and the full list of parameters and
#' timers at the end of execution. Default value "FALSE" (logical).
#'
#' @return A list with several components:
#' \item{output_model}{Output LinearRegression model (LinearRegression).}
#' \item{output_predictions}{If --test_file is specified, this matrix is where the
#' predicted responses will be saved (numeric row).}
#'
#' @details
#' An implementation of simple linear regression and simple ridge regression
#' using ordinary least squares. This solves the problem
#'
#' y = X * b + e
#'
#' where X (specified by "training") and y (specified either as the last column
#' of the input matrix "training" or via the "training_responses" parameter) are
#' known and b is the desired variable. If the covariance matrix (X'X) is not
#' invertible, or if the solution is overdetermined, then specify a Tikhonov
#' regularization constant (with "lambda") greater than 0, which will regularize
#' the covariance matrix to make it invertible. The calculated b may be saved
#' with the "output_predictions" output parameter.
#'
#' Optionally, the calculated value of b is used to predict the responses for
#' another matrix X' (specified by the "test" parameter):
#'
#' y' = X' * b
#'
#' and the predicted responses y' may be saved with the "output_predictions"
#' output parameter. This type of regression is related to least-angle
#' regression, which mlpack implements as the 'lars' program.
#'
#' @author
#' mlpack developers
#'
#' @export
#' @examples
#' # For example, to run a linear regression on the dataset "X" with responses
#' # "y", saving the trained model to "lr_model", the following command could be
#' # used:
#'
#' \donttest{
#' output <- linear_regression(training=X, training_responses=y)
#' lr_model <- output$output_model
#' }
#'
#' # Then, to use "lr_model" to predict responses for a test set "X_test",
#' # saving the predictions to "X_test_responses", the following command could
#' # be used:
#'
#' \donttest{
#' output <- linear_regression(input_model=lr_model, test=X_test)
#' X_test_responses <- output$output_predictions
#' }
linear_regression <- function(input_model=NA,
lambda=NA,
test=NA,
training=NA,
training_responses=NA,
verbose=FALSE) {
# Restore IO settings.
IO_RestoreSettings("Simple Linear Regression and Prediction")
# Process each input argument before calling mlpackMain().
if (!identical(input_model, NA)) {
IO_SetParamLinearRegressionPtr("input_model", input_model)
}
if (!identical(lambda, NA)) {
IO_SetParamDouble("lambda", lambda)
}
if (!identical(test, NA)) {
IO_SetParamMat("test", to_matrix(test))
}
if (!identical(training, NA)) {
IO_SetParamMat("training", to_matrix(training))
}
if (!identical(training_responses, NA)) {
IO_SetParamRow("training_responses", to_matrix(training_responses))
}
if (verbose) {
IO_EnableVerbose()
} else {
IO_DisableVerbose()
}
# Mark all output options as passed.
IO_SetPassed("output_model")
IO_SetPassed("output_predictions")
# Call the program.
linear_regression_mlpackMain()
# Add ModelType as attribute to the model pointer, if needed.
output_model <- IO_GetParamLinearRegressionPtr("output_model")
attr(output_model, "type") <- "LinearRegression"
# Extract the results in order.
out <- list(
"output_model" = output_model,
"output_predictions" = IO_GetParamRow("output_predictions")
)
# Clear the parameters.
IO_ClearSettings()
return(out)
}
|
f279a6610c3e886eb7374277a87dc2b9c2804ebd | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/GreedySBTM/R/greedy_icl.R | aca29cbce5686961786aadd44fdca50c13eacfb4 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 140 | r | greedy_icl.R |
GreedyICL <- function (adj_cube, allocations, max_n_iter = 100, verbose = FALSE) cpp_GreedyICL(adj_cube, allocations, max_n_iter, verbose)
|
b3daec1ace16ba194d7c8edffbdb27f5a29a3d95 | 6ff227373b7fce8775b70c65f426c3ba14bfc692 | /man/hcop.Rd | 9d475282d77b4d61dcc17ceb2b229060578230ee | [] | no_license | Shicheng-Guo/hcop | ec865158608ca98fd808666d6fe6e32589687e13 | 0985fddc91a6ef2308f4800958dfd11c25fe6a98 | refs/heads/master | 2023-03-16T18:47:57.233837 | 2018-03-22T13:27:20 | 2018-03-22T13:27:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,471 | rd | hcop.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hcop.R
\docType{data}
\name{anole_lizard}
\alias{anole_lizard}
\alias{c.elegans}
\alias{chicken}
\alias{chimpanzee}
\alias{cow}
\alias{dog}
\alias{fruitfly}
\alias{horse}
\alias{macaque}
\alias{mouse}
\alias{opossum}
\alias{pig}
\alias{platypus}
\alias{rat}
\alias{s.cerevisiae}
\alias{xenopus}
\alias{zebrafish}
\title{HCOP ortholog data}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 88020 rows and 9 columns.}
\source{
\url{https://www.genenames.org/cgi-bin/hcop} via ftp at
\url{ftp://ftp.ebi.ac.uk/pub/databases/genenames/hcop/}.
}
\usage{
anole_lizard
c.elegans
chicken
chimpanzee
cow
dog
fruitfly
horse
macaque
mouse
opossum
pig
platypus
rat
s.cerevisiae
xenopus
zebrafish
}
\description{
Human ortholog data from HGNC Comparison of Orthology Predictions (HCOP). The
HGNC Comparison of Orthology Predictions (HCOP) integrates and displays the
orthology assertions predicted for a specified human gene, or set of human
genes, by eggNOG, Ensembl Compara, HGNC, HomoloGene, Inparanoid, NCBI Gene
Orthology, OMA, OrthoDB, OrthoMCL, Panther, PhylomeDB, TreeFam and ZFIN. An
indication of the reliability of a prediction is provided by the number of
databases which concur. Data collected from HCOP at
<https://www.genenames.org/cgi-bin/hcop> via ftp at
<ftp://ftp.ebi.ac.uk/pub/databases/genenames/hcop/>.
}
\keyword{datasets}
|
7a96169d9e50a8dce009a3905d9859939f029801 | fece583af3cd8985606e793464d3bbb28e03c2cf | /cachematrix.R | 4f75787354aeecada3a3563329936ddf78ec606b | [] | no_license | kketzal/ProgrammingAssignment2 | 7c6c6421c8d73eceb80095e029b31fb1922feefe | d87bb86aa61e2ba15c0047360fd5537d1b7207e0 | refs/heads/master | 2021-01-15T09:27:51.628804 | 2015-04-23T17:15:40 | 2015-04-23T17:15:40 | 34,073,168 | 0 | 0 | null | 2015-04-16T18:28:22 | 2015-04-16T18:28:22 | null | UTF-8 | R | false | false | 7,501 | r | cachematrix.R | ##///////////////////////////////////////////////////////////////////////////
##
## File Name: cachematrix.R (prototype from R. Peng)
## Author: Manuel Martínez Peinado (a.k.a kketzal). Spain. 2015
##
## Content:
## * makeCacheMatrix() function
## * cacheSolve() function
## * Example
##
##///////////////////////////////////////////////////////////////////////////
############################################################################
## Function: makeCacheMatrix(my_matrix = matrix())
##
## This function make a "special matrix" with methods to SET and GET
## a "normal matrix" and his "inverted matrix". These matrices are cached.
## Usage:
## ----- Create an empty "special matrix" ------
## > my_special_matrix <- makeCacheMatrix()
## ----- Create a "special matrix" with a previously created matrix "my_matrix" ------
## > my_special_matrix <- makeCacheMatrix(my_matrix)
##
## Return: a list with the special object "Set and Get" functions
###########################################################################
makeCacheMatrix <- function(my_matrix = matrix()) {
# setting initial value in this environment
my_inverted_matrix <- NULL
## Set a normal matrix ("my_mat") and initializing "my_inverted_matrix".
## Both in other environment or "caching" (using '<<-' operator).
## Usage:
## my_special_matrix$set_matrix(my_mat)
##-----------------------------------------------
set_matrix <- function(my_mat) {
my_matrix <<- my_mat
my_inverted_matrix <<- NULL
}
## Get the normal matrix ("my_matrix")
## Usage:
## my_special_matrix$get_matrix()
## ----------------------------------------------
get_matrix <- function() my_matrix
## Set the inverted matrix "inverted_matrix"
## Usage:
## my_special_matrix$set_inverted_matrix(inverted_matrix)
## ----------------------------------------------
set_inverted_matrix <- function(inverted_matrix) {
my_inverted_matrix <<- inverted_matrix
}
## Get the inverted matrix "inverted_matrix"
## Usage:
## my_special_matrix$get_inverted_matrix()
## ----------------------------------------------
get_inverted_matrix <- function() my_inverted_matrix
## return a List with the get and set functions or methods
list(set_matrix = set_matrix,
get_matrix = get_matrix,
set_inverted_matrix = set_inverted_matrix,
get_inverted_matrix = get_inverted_matrix)
}
###########################################################################
## Function: cacheSolve(my_special_matrix, ...)
##
## This function computes the inverted matrix for a given squared matrix.
## To work properly, the argument "my_special_matrix" must be an object
## created with the "makeCacheMatrix" function.
## Usage:
## > cacheSolve(my_special_matrix)
##
## Return: show the inverted matrix, or an error if the matrix isn't a
## squared matrix.
###########################################################################
cacheSolve <- function(my_special_matrix, ...) {
## get the inverted matrix of the argument object "my_special_matrix",
## if it exists...
my_inverted_matrix <- my_special_matrix$get_inverted_matrix()
## If inverted matrix had been computed, return cached data..
if(!is.null(my_inverted_matrix)) {
message("getting cached data...")
return(my_inverted_matrix)
}
## ELSE...
##
## 1st: --> get the matrix data
my_matrix <- my_special_matrix$get_matrix()
## 2nd: --> check if the matrix is squared: "num cols == num. rows..."
## if not, stop the execution
if(ncol(my_matrix) != nrow(my_matrix)) {
stop("Matrix isn't squared, therefore, it's not invertible.")
}
## 3th: --> computting inverted matrix
message("computting inverted matrix...")
my_inverted_matrix <- solve(my_matrix)
## 4th: --> caching inverted matrix
message("caching inverted matrix...")
my_special_matrix$set_inverted_matrix(my_inverted_matrix)
## Show the inverted matrix...
my_inverted_matrix
}
##########################################################################
## Example:
##
##### Let's create a dummy 4x4 matrix with random values with "rnorm" function. Then
##### we can create a special matrix with makeCacheMatrix() function.
##
## > my_matrix <- matrix(rnorm(16), nrow=4, ncol=4)
## > my_matrix
## [,1] [,2] [,3] [,4]
## [1,] -0.29246828 2.1374212 1.6247866 -1.5972936
## [2,] 1.02133007 -2.1373991 -1.8863948 -1.3842185
## [3,] -0.04096128 -0.2478461 0.1524899 -0.2129463
## [4,] -0.07191958 -0.2174002 -0.7338735 -0.4507851
##
## > my_special_matrix <- makeCacheMatrix(my_matrix)
##
###### ----- Check if the dummy matrix is inside the object:
##
## > my_special_matrix$get_matrix()
## [,1] [,2] [,3] [,4]
## [1,] -0.29246828 2.1374212 1.6247866 -1.5972936
## [2,] 1.02133007 -2.1373991 -1.8863948 -1.3842185
## [3,] -0.04096128 -0.2478461 0.1524899 -0.2129463
## [4,] -0.07191958 -0.2174002 -0.7338735 -0.4507851
##
###### Getting the inverted matrix:
## > my_special_matrix$get_inverted_matrix()
## NULL
##
###### It's NULL!!!
###### We need to use "cacheSolve" function for get the inverted matrix
##
## > cacheSolve(my_special_matrix)
## computting inverted matrix...
## caching inverted matrix...
## [,1] [,2] [,3] [,4]
## [1,] 0.26270575 0.797193971 -2.8099443 -2.0514024
## [2,] 0.24039258 -0.004145921 -2.0317485 0.1207107
## [3,] 0.07357938 0.009953413 1.3732178 -0.9399759
## [4,] -0.27763345 -0.141391262 -0.8074262 -0.4190091
## >
###### If we call the same function, we get the inverted matrix from cache!
##
## > cacheSolve(my_special_matrix)
## getting cached data...
## [,1] [,2] [,3] [,4]
## [1,] 0.26270575 0.797193971 -2.8099443 -2.0514024
## [2,] 0.24039258 -0.004145921 -2.0317485 0.1207107
## [3,] 0.07357938 0.009953413 1.3732178 -0.9399759
## [4,] -0.27763345 -0.141391262 -0.8074262 -0.4190091
## >
##
###### To get the original Matrix, we can call to get_matrix() function
##
## > my_special_matrix$get_matrix()
## [,1] [,2] [,3] [,4]
## [1,] -0.29246828 2.1374212 1.6247866 -1.5972936
## [2,] 1.02133007 -2.1373991 -1.8863948 -1.3842185
## [3,] -0.04096128 -0.2478461 0.1524899 -0.2129463
## [4,] -0.07191958 -0.2174002 -0.7338735 -0.4507851
## >
##
###### To get the inverted matrix, we can call to get_inverted_matrix()
##
## > my_special_matrix$get_inverted_matrix()
## [,1] [,2] [,3] [,4]
## [1,] 0.26270575 0.797193971 -2.8099443 -2.0514024
## [2,] 0.24039258 -0.004145921 -2.0317485 0.1207107
## [3,] 0.07357938 0.009953413 1.3732178 -0.9399759
## [4,] -0.27763345 -0.141391262 -0.8074262 -0.4190091
## > |
b2c71cf19181d32fd82e33cd76a89d256a098802 | 6ceab1bf9c435b523d2f8e7e9440da39770d741b | /man/f7Padding.Rd | 3ba8d97ddb80756354103820ab4525bc532bcb09 | [] | no_license | RinteRface/shinyMobile | a8109cd39c85e171db893d1b3f72d5f1a04f2c62 | 86d36f43acf701b6aac42d716adc1fae4f8370c6 | refs/heads/master | 2023-07-25T16:28:41.026349 | 2022-11-25T17:04:29 | 2022-11-25T17:04:29 | 139,186,586 | 328 | 92 | null | 2023-03-26T05:58:53 | 2018-06-29T19:13:06 | R | UTF-8 | R | false | true | 1,014 | rd | f7Padding.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/typography.R
\name{f7Padding}
\alias{f7Padding}
\title{Framework7 padding utility}
\usage{
f7Padding(tag, side = NULL)
}
\arguments{
\item{tag}{Tag to apply the padding.}
\item{side}{padding side: "left", "right", "top", "bottom",
"vertical" (top and bottom), "horizontal" (left and right).
Leave NULL to apply on all sides.}
}
\description{
\code{f7Padding} adds padding to the given tag.
}
\examples{
if(interactive()){
library(shiny)
library(shinyMobile)
cardTag <- f7Card(
title = "Card header",
f7Padding(
p("The padding is applied here.")
),
footer = tagList(
f7Button(color = "blue", label = "My button", href = "https://www.google.com"),
f7Badge("Badge", color = "green")
)
)
shinyApp(
ui = f7Page(
title = "Padding",
f7SingleLayout(navbar = f7Navbar(title = "f7Padding"), cardTag)
),
server = function(input, output) {}
)
}
}
\author{
David Granjon, \email{dgranjon@ymail.com}
}
|
7d3be09d014581d700c421a5ccd161ba7d90deb6 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/TreeDist/man/TreeDistance.Rd | 305d96660ec2d3fddf05c45e5272c1b0a59dcbea | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 10,423 | rd | TreeDistance.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_distance_info.R, R/tree_distance_mmsi.R
\encoding{UTF-8}
\name{TreeDistance}
\alias{TreeDistance}
\alias{SharedPhylogeneticInfo}
\alias{DifferentPhylogeneticInfo}
\alias{PhylogeneticInfoDistance}
\alias{ClusteringInfoDistance}
\alias{ClusteringInfoDist}
\alias{ExpectedVariation}
\alias{MutualClusteringInfo}
\alias{MutualClusteringInformation}
\alias{SharedPhylogeneticInfoSplits}
\alias{MutualClusteringInfoSplits}
\alias{MatchingSplitInfo}
\alias{MatchingSplitInfoDistance}
\alias{MatchingSplitInfoSplits}
\title{Information-based generalized Robinson-Foulds distances}
\usage{
TreeDistance(tree1, tree2 = tree1)
SharedPhylogeneticInfo(
tree1,
tree2 = tree1,
normalize = FALSE,
reportMatching = FALSE,
diag = TRUE
)
DifferentPhylogeneticInfo(
tree1,
tree2 = tree1,
normalize = FALSE,
reportMatching = FALSE
)
PhylogeneticInfoDistance(
tree1,
tree2 = tree1,
normalize = FALSE,
reportMatching = FALSE
)
ClusteringInfoDistance(
tree1,
tree2 = tree1,
normalize = FALSE,
reportMatching = FALSE
)
ExpectedVariation(tree1, tree2, samples = 10000)
MutualClusteringInfo(
tree1,
tree2 = tree1,
normalize = FALSE,
reportMatching = FALSE,
diag = TRUE
)
SharedPhylogeneticInfoSplits(
splits1,
splits2,
nTip = attr(splits1, "nTip"),
reportMatching = FALSE
)
MutualClusteringInfoSplits(
splits1,
splits2,
nTip = attr(splits1, "nTip"),
reportMatching = FALSE
)
MatchingSplitInfo(
tree1,
tree2 = tree1,
normalize = FALSE,
reportMatching = FALSE,
diag = TRUE
)
MatchingSplitInfoDistance(
tree1,
tree2 = tree1,
normalize = FALSE,
reportMatching = FALSE
)
MatchingSplitInfoSplits(
splits1,
splits2,
nTip = attr(splits1, "nTip"),
reportMatching = FALSE
)
}
\arguments{
\item{tree1, tree2}{Trees of class \code{phylo}, with leaves labelled identically,
or lists of such trees to undergo pairwise comparison.}
\item{normalize}{If a numeric value is provided, this will be used as a
maximum value against which to rescale results.
If \code{TRUE}, results will be rescaled against a maximum value calculated from
the specified tree sizes and topology, as specified in the 'Normalization'
section below.
If \code{FALSE}, results will not be rescaled.}
\item{reportMatching}{Logical specifying whether to return the clade
matchings as an attribute of the score.}
\item{diag}{Logical specifying whether to return similarities along the
diagonal, i.e. of each tree with itself. Applies only if \code{tree2} is
a list identical to \code{tree1}, or \code{NULL}.}
\item{samples}{Integer specifying how many samplings to obtain;
accuracy of estimate increases with \code{sqrt(samples)}.}
\item{splits1, splits2}{Logical matrices where each row corresponds to a leaf,
either listed in the same order or bearing identical names (in any sequence),
and each column corresponds to a split, such that each leaf is identified as
a member of the ingroup (\code{TRUE}) or outgroup (\code{FALSE}) of the respective
split.}
\item{nTip}{(Optional) Integer specifying the number of leaves in each split.}
}
\value{
If \code{reportMatching = FALSE}, the functions return a numeric
vector specifying the requested similarities or differences.
If \code{reportMatching = TRUE}, the functions additionally return details
of which clades are matched in the optimal matching, which can be viewed
using \code{\link[=VisualizeMatching]{VisualizeMatching()}}.
}
\description{
Calculate tree similarity and distance measures based on the amount of
phylogenetic or clustering information that two trees hold in common, as
proposed in Smith (2020).
}
\details{
\href{https://ms609.github.io/TreeDist/articles/Robinson-Foulds.html#generalized-robinson-foulds-distances}{Generalized Robinson-Foulds distances}
calculate tree similarity by finding an
optimal matching that the similarity between a split on one tree
and its pair on a second, considering all possible ways to pair splits
between trees (including leaving a split unpaired).
The methods implemented here use the concepts of
\href{https://ms609.github.io/TreeDist/articles/information.html}{entropy and information}
(MacKay 2003) to assign a similarity score between each pair of splits.
The returned tree similarity measures state the amount of information,
in bits, that the splits in two trees hold in common
when they are optimally matched, following Smith (2020).
The complementary tree distance measures state how much information is
different in the splits of two trees, under an optimal matching.
}
\section{Concepts of information}{
The phylogenetic (Shannon) information content and entropy of a split are
defined in
\href{https://ms609.github.io/TreeDist/articles/information.html}{a separate vignette}.
Using the mutual (clustering) information (Meil\ifelse{html}{ă}{a}
2007, Vinh \emph{et al.} 2010) of two splits to quantify their similarity gives
rise to the Mutual Clustering Information measure (\code{MutualClusteringInfo()},
\code{MutualClusteringInfoSplits()}); the entropy distance
gives the Clustering Information Distance (\code{ClusteringInfoDistance()}).
This approach is optimal in many regards, and is implemented with
normalization in the convenience function \code{TreeDistance()}.
Using the amount of phylogenetic information common to two splits to measure
their similarity gives rise to the Shared Phylogenetic Information similarity
measure (\code{SharedPhylogeneticInfo()}, \code{SharedPhylogeneticInfoSplits()}).
The amount of information distinct to
each of a pair of splits provides the complementary Different Phylogenetic
Information distance metric (\code{DifferentPhylogeneticInfo()}).
The Matching Split Information measure (\code{MatchingSplitInfo()},
\code{MatchingSplitInfoSplits()}) defines the similarity between a pair of
splits as the phylogenetic information content of the most informative
split that is consistent with both input splits; \code{MatchingSplitInfoDistance()}
is the corresponding measure of tree difference.
\href{https://ms609.github.io/TreeDist/articles/Generalized-RF.html}{(More information here.)}
\subsection{Conversion to distances}{
To convert similarity measures to distances, it is necessary to
subtract the similarity score from a maximum value. In order to generate
distance \emph{metrics}, these functions subtract the similarity twice from the
total information content (SPI, MSI) or entropy (MCI) of all the splits in
both trees (Smith 2020).
}
\subsection{Normalization}{
If \code{normalize = TRUE}, then results will be rescaled such that distance
ranges from zero to (in principle) one.
The maximum \strong{distance} is the sum of the information content or entropy of
each split in each tree; the maximum \strong{similarity} is half this value.
(See Vinh \emph{et al.} (2010, table 3) and Smith (2020) for
alternative normalization possibilities.)
Note that a distance value of one (= similarity of zero) will seldom be
achieved, as even the most different trees exhibit some similarity.
It may thus be helpful to rescale the normalized value such that the
\emph{expected} distance between a random pair of trees equals one. This can
be calculated with \code{ExpectedVariation()}; or see package
'\href{https://ms609.github.io/TreeDistData/reference/randomTreeDistances.html}{TreeDistData}'
for a compilation of expected values under different metrics for trees with
up to 200 leaves.
Alternatively, to scale against the information content or entropy of all
splits in the most or least informative tree, use \verb{normalize = }\code{\link{pmax}} or
\code{\link{pmin}} respectively.
To calculate the relative similarity against a reference tree that is known
to be 'correct', use \verb{normalize = ``SplitwiseInfo(trueTree)} (SPI, MSI) or
\code{ClusteringEntropy(trueTree)} (MCI).
}
}
\examples{
tree1 <- ape::read.tree(text='((((a, b), c), d), (e, (f, (g, h))));')
tree2 <- ape::read.tree(text='(((a, b), (c, d)), ((e, f), (g, h)));')
tree3 <- ape::read.tree(text='((((h, b), c), d), (e, (f, (g, a))));')
# Best possible score is obtained by matching a tree with itself
DifferentPhylogeneticInfo(tree1, tree1) # 0, by definition
SharedPhylogeneticInfo(tree1, tree1)
SplitwiseInfo(tree1) # Maximum shared phylogenetic information
# Best possible score is a function of tree shape; the splits within
# balanced trees are more independent and thus contain less information
SplitwiseInfo(tree2)
# How similar are two trees?
SharedPhylogeneticInfo(tree1, tree2) # Amount of phylogenetic information in common
VisualizeMatching(SharedPhylogeneticInfo, tree1, tree2) # Which clades are matched?
DifferentPhylogeneticInfo(tree1, tree2) # Distance measure
DifferentPhylogeneticInfo(tree2, tree1) # The metric is symmetric
# Are they more similar than two trees of this shape would be by chance?
ExpectedVariation(tree1, tree2, sample=12)['DifferentPhylogeneticInfo', 'Estimate']
# Every split in tree1 conflicts with every split in tree3
# Pairs of conflicting splits contain clustering, but not phylogenetic,
# information
SharedPhylogeneticInfo(tree1, tree3) # = 0
MutualClusteringInfo(tree1, tree3) # > 0
# Converting trees to Splits objects can speed up multiple comparisons
splits1 <- TreeTools::as.Splits(tree1)
splits2 <- TreeTools::as.Splits(tree2)
SharedPhylogeneticInfoSplits(splits1, splits2)
MatchingSplitInfoSplits(splits1, splits2)
MutualClusteringInfoSplits(splits1, splits2)
}
\references{
\itemize{
\item \insertRef{Mackay2003}{TreeDist}
\item \insertRef{Meila2007}{TreeDist}
\item \insertRef{SmithDist}{TreeDist}
\item \insertRef{Vinh2010}{TreeDist}
}
}
\seealso{
Other tree distances:
\code{\link{JaccardRobinsonFoulds}()},
\code{\link{KendallColijn}()},
\code{\link{MASTSize}()},
\code{\link{MatchingSplitDistance}()},
\code{\link{NNIDist}()},
\code{\link{NyeSimilarity}()},
\code{\link{PathDist}()},
\code{\link{Robinson-Foulds}},
\code{\link{SPRDist}()}
}
\author{
\href{https://orcid.org/0000-0001-5660-1727}{Martin R. Smith}
(\href{mailto:martin.smith@durham.ac.uk}{martin.smith@durham.ac.uk})
}
\concept{tree distances}
|
ef82747dac833ef7274d0927bebaed94ae3ed3d9 | 0dad68bd3a28894180f18ea147026c8438912a73 | /man/parseRegion.Rd | f61907808f96e88ebc6d1e2df62f2dfe0c98bec8 | [] | no_license | sherrillmix/dnar | 1bcc2fac63d8af059215dea6fd3e5cdc7200e81f | dead343faebda27057a1e7a5789e853b5b73316d | refs/heads/master | 2022-08-12T14:04:05.052121 | 2022-07-13T18:59:18 | 2022-07-13T18:59:18 | 54,718,599 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 615 | rd | parseRegion.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dna.R
\name{parseRegion}
\alias{parseRegion}
\title{Parse a region string into chr, start, end and strand}
\usage{
parseRegion(reg)
}
\arguments{
\item{reg}{vector of region strings in the format "chrX:123545-123324" or "chr12:1234-1236+"}
}
\value{
data.frame with one row per reg string and columns chr, start, end and strand
}
\description{
Parse a region string into chr, start, end and strand
}
\examples{
parseRegion(c('chr1:12345-23456','chrX:2222:3333'))
parseRegion(c('chr1:12345-23456+','chrX:2222:3333-','chrX:2222:3333*'))
}
|
12dbc6747739b713c5b06f1562384b7c149e7e5e | 3e0a507bd74eb3a445c42c2dd410987c7bddd8b6 | /cachematrix.R | 1f0d458345a05281714b92513894ccca55d71bf9 | [] | no_license | thahoos/ProgrammingAssignment2 | 01262787eee82da955cc143b22e89f24b02d4444 | 516a8f68eea3a218afa085dc9c75732f55470dd4 | refs/heads/master | 2020-03-25T09:00:45.904505 | 2018-08-07T07:47:07 | 2018-08-07T07:47:07 | 143,642,911 | 0 | 0 | null | 2018-08-05T19:46:50 | 2018-08-05T19:46:50 | null | UTF-8 | R | false | false | 1,702 | r | cachematrix.R | ## Functions written to fulfill Coursera R-Programming Week 3 programming assignment
## The functions are to produce the inverse of a matrix with taking advantage of
## caching and thus avoiding repeated computation. The first function creates a list
## containing a function to set a matrix, get a matrix, set the inverse of a matrix,
## and get the inverse of a matrix.
## The second function generates the inverse of the matrix
## If the inverse has already been calculated and the matrix has not changed then
## cacheSolve will pull the inverse from the cache
## matrix function to cache its inverse.
makeCacheMatrix <- function(x = matrix()) { # default aurgement as matrix
inv <- NULL ## inv to hold value of matrix inverse
set <- function(y) { ## set function to assign value of matrix to parent environment
x <<- y
inv <<- NULL ## If new matrix, assign null to inv
} ## get fucntion - returns value of the matrix
get <- function() x
setinverse <- function(inverse) inv <<- inverse ## assigns value of inv in parent environment
getinverse <- function() inv ## gets the value of inv where called
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
76118004dcd1bd43e8b4374834ca1a61bd9367c4 | c34fcf2ce85ffc4b45371f9e73ac478ed0ffe876 | /chap7_choi_regression_수정.R | f4d134459dcc3331f5f6c031fcaf3363109d402e | [] | no_license | ChungSeok/2021_graduate | 28f7126a7ad62ae310e34da3d1ece3538d17ba3a | 30f3e352d8547cb55faed6edcc69d256fe344123 | refs/heads/main | 2023-08-24T18:44:07.496915 | 2021-11-01T06:13:34 | 2021-11-01T06:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,595 | r | chap7_choi_regression_수정.R | #---------------------------------------------------------------------------------------------------------
#R Code for Chapter 7 of:
#
#Field, A. P. & Miles, J. N. V. (2012). Discovering Statistics Using R:
#-----------------------------------------------------------------------------------------------------------
###2018 fall, Prof. choi#######
#----Set the working directory------
setwd("D:/RRR/chap7")
#----Install Packages-----
install.packages("QuantPsyc")
install.packages("car")
install.packages("boot")
#------And then load these packages, along with the boot package.-----
library(QuantPsyc)
library(car)
library(boot)
#Jane superbrain box
pubs<-read.delim("pubs.dat", header = TRUE)
pubReg <- lm(mortality ~ pubs, data = pubs)
summary(pubReg)
resid(pubReg)
rstandard(pubReg)
rstudent(pubReg)
PearsonResidual <- (resid(pubReg)-mean(resid(pubReg)))/sd(resid(pubReg))
##################################################
##### 1. Simple Linear Regression ###############
##################################################
#----run the command to access the album1 data-----
album1<-read.delim("Album Sales 1.dat", header = TRUE)
#----run the simple linear regression model---
albumSales.1 <- lm(sales ~ adverts, data = album1, na.action=na.fail) # na.fail(fail with missing value ), na.omit(casewise deletion)
summary(albumSales.1)
sqrt(0.3346) ## pearson correlation coefficient
#how to interprete coeffecients
###################################################
##### 2. Multipe Linear Regression ###############
###################################################
#----access the album2 data----
album2<-read.delim("Album Sales 2.dat", header = TRUE)
#---Run the multiple regression model----
albumSales.2<-lm(sales ~ adverts, data = album2)
albumSales.3<-lm(sales ~ adverts + airplay + attract, data = album2)
summary(albumSales.2)
summary(albumSales.3)
#---We can obtain standardized parameter estimates with the lm.beta() function---
lm.beta(albumSales.3) #how to interprete coeffecients pp.281~283
#---Confidence intervals are obtained with the confint() function----
confint(albumSales.3) # include 0 means ~
#----To compare the R2 in two models, use the ANOVA command---
anova(albumSales.2, albumSales.3)
###################################################
##### 3. Assesiing the outlier ## ###############
###################################################
##assessing the outlier
#----Obtain casewise diagnostics and add them to the original data file.---
album2$residuals<-resid(albumSales.3)
album2$standardized.residuals <- rstandard(albumSales.3)
album2$studentized.residuals <- rstudent(albumSales.3)
album2$cooks.distance<-cooks.distance(albumSales.3)
album2$dfbeta <- dfbeta(albumSales.3)
album2$dffit <- dffits(albumSales.3)
album2$leverage <- hatvalues(albumSales.3)
album2$covariance.ratios <- covratio(albumSales.3)
#Save file
write.table(album2, "Album Sales With Diagnostics.dat", sep = "\t", row.names = FALSE)
#look at the data (and round the values)
round(album2, digits = 3)
#residual interpretation pp.268~269
#----List of standardized residuals greater than 2--------------
album2$standardized.residuals>2| album2$standardized.residuals < -2
#---Create a variable called large.residual, which is TRUE (or 1) if the residual is greater than 2, or less than -2.----------
album2$large.residual <- album2$standardized.residuals > 2 | album2$standardized.residuals < -2
#---Count the number of large residuals-------------
sum(album2$large.residual)
#---Display the value of sales, airplay, attract, adverts, and the standardized residual, for those cases which have a residual greater than 2 or less than -2.-------------
album2[album2$large.residual,c("sales", "airplay", "attract", "adverts", "standardized.residuals")]
# interpretation pp.290~291
#-----Cook's distance, leverage and covariance ratio for cases with large residuals.---------
album2[album2$large.residual , c("cooks.distance", "leverage", "covariance.ratios")]
###################################################
##### 4. Assessing Assumption ###################
###################################################
## Independent errors: For any two observations the residual terms should be uncorrelated : lack of autocorrelation
install.packages("car")
library("car")
#----The Durbin-Watson test is obtained with either dwt() or durbinWatsonTest()---292page
durbinWatsonTest(albumSales.3)
dwt(albumSales.3)
## Multicollinearity tesitng
#----Obtaining the VIF---
vif(albumSales.3)
#----The tolerance is 1/VIF---
1/vif(albumSales.3)
#----The mean VIF---
mean(vif(albumSales.3))
##############################################
############5. Creating Dummy Variables#######
##############################################
album100<-read.delim("Album Sales 1.dat", header = TRUE)
album100
###########################
###Dummy coding ###########
###########################
## INDICATOR (a,b,c)
album100$sales_d1<-ifelse(album100$sales<100,1,0)
album100$sales_d2<-ifelse(album100$sales>=100&album100$sales<300,1,0)
album100$sales_d3<-ifelse(album100$sales>=300,1,0)
#---Histogram of studentized residuals---
hist(album2$studentized.residuals)
hist(rstudent(albumSales.3))
#--Plot of residuals against fitted (predicted) values, with a flat line at the mean--
plot(albumSales.3$fitted.values,rstandard(albumSales.3))
abline(0, 0)
#same as above
plot(albumSales.3)
#Publication quality graphs
album2$fitted <- albumSales.3$fitted.values
histogram<-ggplot(album2, aes(studentized.residuals)) + opts(legend.position = "none") + geom_histogram(aes(y=..density..), colour="black", fill="white") + labs(x = "Studentized Residual", y = "Density")
histogram + stat_function(fun = dnorm, args = list(mean = mean(album2$studentized.residuals, na.rm = TRUE), sd = sd(album2$studentized.residuals, na.rm = TRUE)), colour = "red", size = 1)
ggsave(file = paste(imageDirectory,"07 album sales ggplot Hist.png",sep="/"))
scatter <- ggplot(album2, aes(fitted, studentized.residuals))
scatter + geom_point() + geom_smooth(method = "lm", colour = "Red")+ labs(x = "Fitted Values", y = "Studentized Residual")
ggsave(file=paste(imageDirectory,"07 Album sales ggplot scatter.png",sep="/"))
qqplot.resid <- qplot(sample = album2$studentized.residuals, stat="qq") + labs(x = "Theoretical Values", y = "Observed Values")
qqplot.resid
ggsave(file=paste(imageDirectory,"07 Album sales ggplot QQ.png",sep="/"))
#---R tends to give values to too many decimal places, you can usefully round these values to 2 decimals.
round(rstandard(albumSales.3), 2)
|
e062b8d7b101438c09782f3747932c7ff7663be6 | 1475495fa928ba1b208194c3a5235f8e82db846b | /style.R | 99bd6119b08df901a38601ef48a428fb39e5dd33 | [] | no_license | geraldgartner/bpw-stichwahlwiederung | 38dc35d4040987b45ab3f6520fc5731f905216b7 | e228d1defb66f9e51faef9b3f981e03c021438e7 | refs/heads/master | 2020-06-13T08:46:23.603763 | 2016-12-06T09:57:40 | 2016-12-06T09:57:40 | 75,432,432 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,196 | r | style.R | #Unser Style
library(ggthemes)
theme <- theme(plot.background = element_rect(fill = "gray97"), panel.grid.major = element_line(colour = "gray86", linetype = "dotted"),
panel.grid.minor = element_line(colour = "gray86", linetype = "dotted")) +
theme(plot.title = element_text(size = 22, face = "bold"),
plot.background = element_rect(fill = "gray97", colour = "antiquewhite", size = 10, linetype = "solid")) +
theme(axis.ticks = element_blank(),
axis.line = element_blank(),
axis.title = element_text(vjust = 8),
panel.background = element_rect(fill = "grey97", linetype = "solid"),
plot.background = element_rect(colour = "gray97"),
plot.title = element_text(hjust=0, margin=unit(c(0,1,0.2,1), "cm")),
plot.margin = unit(c(1,0.5,0.5,0.5), "cm")) +
theme(axis.text=element_text(size=16))
#Parteifarben festlegen
kandidatenfarben <- c("hofer" = "#7a8fcc","vdb" = "#548750","griss" = "#b398aa","hundstorfer" ="#b34848", "khol" = "#282828", "lugner" = "#bfb58e")
kandidaten <- c("hofer" = "Hofer", "khol" = "Khol","hundstorfer" = "Hundstorfer", "griss" = "Griss", "vdb" = "Van der Bellen", "lugner" = "Lugner")
|
48594d7d71df8b15a83fd846aa26a68a1aca823e | d357b64acdad4bdfdb0debd38425f346d0eb5f7a | /tests/test_geoplts.R | e3528fc35a69fc591dc96dd0cb1a632278e593f7 | [] | no_license | Andrew-Clappison/AC.R.Capstone | 36eb93fb819632460e34f5bfd5c378fbc4bd12c6 | 6306d606a7c30503538f08daad4033f9ae56c8c5 | refs/heads/master | 2020-09-02T21:01:35.913838 | 2019-11-03T16:47:16 | 2019-11-03T16:47:16 | 219,303,559 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 819 | r | test_geoplts.R | testthat::test_that("eq_create_label() Tests", {
data <- data.frame(LOCATION_NAME = c("Location 1", "Location 2"),
EQ_PRIMARY = c(NA, 4),
TOTAL_DEATHS = c(10, NA),
OTHER = c("some text", "some more text"),
stringsAsFactors = FALSE)
testthat::expect_that(AC.R.Capstone::eq_create_label(data), testthat::is_a("character"))
})
testthat::test_that("eq_map() Tests", {
data <- data.frame(LATITUDE = c(123.45, -6.789, 0.0),
LONGITUDE = c(-1.23, 45.678, 0.0),
EQ_PRIMARY = c(7, 4.5, 5),
ANNOT_COL = c("INFO1", "INFO2", "INFO3"),
stringsAsFactors = FALSE)
testthat::expect_that(AC.R.Capstone::eq_map(data, "ANNOT_COL"), testthat::is_a("leaflet"))
})
|
8a34af17e3fd0ca452320f5554e9634c04c98270 | a61104488f204a969a825fae8aa292ba53267ceb | /R/synthetic_lethality.R | 3e08ab93500b188f6137a28784e15612533f6eb7 | [
"MIT"
] | permissive | sigven/oncoEnrichR | 2dbfdca6d49d4b40862942d2997611f841d9c80c | 3a42581a7fdf90ff33d955b0b8135f71217412ec | refs/heads/master | 2023-08-17T00:01:36.046133 | 2023-08-16T10:10:05 | 2023-08-16T10:10:05 | 223,118,510 | 45 | 9 | MIT | 2023-08-16T09:49:02 | 2019-11-21T07:53:42 | R | UTF-8 | R | false | false | 5,943 | r | synthetic_lethality.R | annotate_synleth_paralog_pairs <- function(
qgenes,
genedb = NULL,
slparalogdb = NULL) {
lgr::lgr$appenders$console$set_layout(
lgr::LayoutFormat$new(timestamp_fmt = "%Y-%m-%d %T"))
stopifnot(is.character(qgenes))
stopifnot(!is.null(genedb))
stopifnot(is.data.frame(slparalogdb))
validate_db_df(slparalogdb, dbtype = "slparalog")
validate_db_df(genedb, dbtype = "genedb")
lgr::lgr$info(
paste0("Annotation of membership in predicted synthetic lethal interactions - De Kegel et al., Cell Systems, 2021"))
paralog_synleth_interactions <- list()
paralog_synleth_interactions[['both_in_pair']] <- data.frame()
paralog_synleth_interactions[['single_pair_member']] <- data.frame()
targetA_interactions <- as.data.frame(
data.frame("target" = qgenes, stringsAsFactors = F) |>
dplyr::inner_join(slparalogdb,
by = c("target" = "symbol_A1"), relationship = "many-to-many") |>
dplyr::rename(gene_A = "target") |>
dplyr::left_join(
dplyr::select(genedb, c("entrezgene", "genename")),
by = c("entrezgene_A1" = "entrezgene"), relationship = "many-to-many"
) |>
dplyr::rename(genename_A = "genename") |>
dplyr::left_join(
dplyr::select(genedb, c("entrezgene", "genename")),
by = c("entrezgene_A2" = "entrezgene"), relationship = "many-to-many"
) |>
dplyr::rename(genename_B = "genename",
gene_B = "symbol_A2") |>
dplyr::select(-c("entrezgene_A1", "entrezgene_A2")) |>
dplyr::select(c("gene_A",
"genename_A",
"gene_B",
"genename_B"),
dplyr::everything()) |>
dplyr::arrange(dplyr::desc(.data$prediction_score))
)
targetB_interactions <- as.data.frame(
data.frame("target" = qgenes, stringsAsFactors = F) |>
dplyr::inner_join(slparalogdb,
by = c("target" = "symbol_A2"), relationship = "many-to-many") |>
dplyr::rename(gene_B = "target") |>
dplyr::left_join(
dplyr::select(genedb, c("entrezgene", "genename")),
by = c("entrezgene_A2" = "entrezgene"), relationship = "many-to-many"
) |>
dplyr::rename(genename_B = "genename") |>
dplyr::left_join(
dplyr::select(genedb, c("entrezgene", "genename")),
by = c("entrezgene_A1" = "entrezgene"), relationship = "many-to-many"
) |>
dplyr::rename(genename_A = "genename",
gene_A = "symbol_A1") |>
dplyr::select(-c("entrezgene_A1", "entrezgene_A2")) |>
dplyr::select(c("gene_A",
"genename_A",
"gene_B",
"genename_B"),
dplyr::everything()) |>
dplyr::arrange(dplyr::desc(.data$prediction_score))
)
if (NROW(targetB_interactions) > 0 &
NROW(targetA_interactions) > 0) {
paralog_synleth_interactions[['both_in_pair']] <- as.data.frame(
dplyr::select(targetA_interactions,
c("gene_A",
"gene_B")) |>
dplyr::inner_join(targetB_interactions,
by = c("gene_A","gene_B"), relationship = "many-to-many")
)
if (NROW(paralog_synleth_interactions[['both_in_pair']]) > 0) {
targetA_interactions <- targetA_interactions |>
dplyr::anti_join(paralog_synleth_interactions[['both_in_pair']],
by = c("gene_A", "gene_B"))
targetB_interactions <- targetB_interactions |>
dplyr::anti_join(paralog_synleth_interactions[['both_in_pair']],
by = c("gene_A", "gene_B")) |>
dplyr::rename(tmp_symbol = "gene_A",
tmp_genename = "genename_A") |>
dplyr::mutate(gene_A = .data$gene_B,
genename_A = .data$genename_B,
gene_B = .data$tmp_symbol,
genename_B = .data$tmp_genename) |>
dplyr::select(-c("tmp_symbol", "tmp_genename")) |>
dplyr::select(c("gene_A",
"genename_A",
"gene_B",
"genename_B"),
dplyr::everything())
paralog_synleth_interactions[['single_pair_member']] <-
targetA_interactions |>
dplyr::bind_rows(targetB_interactions) |>
dplyr::arrange(dplyr::desc(.data$prediction_score))
}
} else {
if (NROW(targetB_interactions) > 0) {
paralog_synleth_interactions[['single_pair_member']] <-
targetB_interactions |>
dplyr::rename(tmp_symbol = "gene_A",
tmp_genename = "genename_A") |>
dplyr::mutate(gene_A = .data$gene_B,
genename_A = .data$genename_B,
gene_B = .data$tmp_symbol,
genename_B = .data$tmp_genename) |>
dplyr::select(-c("tmp_symbol", "tmp_genename")) |>
dplyr::select(c("gene_A",
"genename_A",
"gene_B",
"genename_B"),
dplyr::everything()) |>
dplyr::arrange(dplyr::desc(.data$prediction_score))
}
if (NROW(targetA_interactions) > 0) {
paralog_synleth_interactions[['single_pair_member']] <-
targetA_interactions |>
dplyr::arrange(dplyr::desc(.data$prediction_score))
}
}
rm(targetA_interactions)
rm(targetB_interactions)
lgr::lgr$info(
paste0("Found n = ", NROW(paralog_synleth_interactions[['both_in_pair']]),
" predicted interactions of synthetic lethality for which both members are present in the query set"))
lgr::lgr$info(
paste0("Found n = ", NROW(paralog_synleth_interactions[['single_pair_member']]),
" predicted interactions of synthetic lethality for whih a single member is present in the query set"))
return(paralog_synleth_interactions)
}
|
41ab3528abb2f1edb6b802d63a106262486cbb9f | 6d68c7a9f8bcc34dbb1b8345f2b00e8ee395af2b | /R/LMM_Results_to_Word.R | 28ae1c9c4ce18507568268f5a15bb7e0e8715d2b | [
"MIT"
] | permissive | mscsep/SAM_sAA_Cortisol | df66fe661ebbca7b9027355e8afb3b39acca882e | 719e4ec9e912993b81caacb6a00091aed9526845 | refs/heads/main | 2023-04-28T07:52:17.747906 | 2021-05-12T07:45:49 | 2021-05-12T07:45:49 | 366,634,181 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,596 | r | LMM_Results_to_Word.R | #'---
#' title: "Source script with functions to format & export LMER Chi2 and PostHoc test tables to word"
#' author: "Milou Sep"
#' date: "8/29/2018"
#' output: html_document
#'---
# Load required packages --------------------------------------------------
library(flextable) # To make publication ready tables for word | info: https://davidgohel.github.io/flextable/articles/overview.html
library(officer) # https://davidgohel.github.io/flextable/articles/layout.html # To export chi tables to word
# Function for Chi2 Tables ------------------------------------------------
Export.Chi2.Table <- function(Chi2Table, TableName, file.export){
# Create Flextable
Chi2_Table <- flextable(Chi2Table)
# Add Layout settings to flextable.
# Make significant value's Bold [Info: https://cran.r-project.org/web/packages/flextable/vignettes/format.html#bold]
Chi2_Table <- bold(Chi2_Table, i= ~pvalue < 0.05 , j="pvalue") # NB is j= is included, only value in not complete row bold
#italic(sAA.PostHoc.Table, i= ~pvalue < 0.05 )#, j="pvalue")
# set digits # https://davidgohel.github.io/flextable/articles/format.html#set_formatter-function
Chi2_Table <- display(Chi2_Table, col_key="df", pattern = "{{df}}", formatters = list(df~sprintf("%.00f",df))) # no decimals for df [for 1 decimal: %.01f etc]
Chi2_Table <- display(Chi2_Table, col_key="deltadf", pattern = "{{deltadf}}", formatters = list(deltadf~sprintf("%.00f",deltadf)))
Chi2_Table <- theme_vanilla(Chi2_Table) # remove thick lines
Chi2_Table <- set_header_labels(Chi2_Table, LogLikelihood="Log Likelihood" ,deltadf= "delta df", pvalue="p-value") # change names
Chi2_Table <- autofit(Chi2_Table) # change table dimensions to content
if (file.export == T){
doc <- read_docx()
doc <- body_add_flextable(doc, value = Chi2_Table, align="center")
print(doc, target = paste0(TableName,".docx"))
}
return(Chi2_Table)
}
# Function for PostHoc Pairwise Comparisons Table -------------------------
Export.PostHoc.Table <- function(PostHocTable, TableName, file.export){
# Create Flextable
PostHoc_Table <- flextable(PostHocTable)
# Add Layout settings to flextable.
PostHoc_Table<- bold(PostHoc_Table, i= ~p.value < 0.05 )
PostHoc_Table<- theme_vanilla(PostHoc_Table) # dikke lijnen weg
PostHoc_Table<- autofit(PostHoc_Table) # afmetingen tabel aanpassen aan content
if (file.export == T){
doc <- read_docx()
doc <- body_add_flextable(doc, value = PostHoc_Table, align="center")
print(doc, target = paste0(TableName,".docx"))
}
return(PostHoc_Table)
}
|
bb07a7f794ab703691cd168b1ee0390624d70c2b | b1c1e9d146157d14c142d24a9e02b95b3a31f584 | /Doutorado/Chapter-1/Analysis/Old/Analysis Cap1.R | 9e9f16516e4377d94bc372be3a220c48a9aa7f33 | [] | no_license | Eduardoqm/Science-Repository | 1ef37904f290cbbea3c060c0a4cf37265f60b699 | d655a12fb833a9dd128672576c93cc6f9303f6ea | refs/heads/master | 2023-07-17T08:24:52.460738 | 2023-07-05T17:22:07 | 2023-07-05T17:22:07 | 200,397,253 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,814 | r | Analysis Cap1.R | #Analysis Cap-1 Hyperion Indexs on Fire
#By: Eduardo Q Marques 04-12-2019
library(tidyverse)
library(reshape2)
library(GGally)
#Load data
setwd("C:/Users/Eduardo Q Marques/Documents/My Jobs/Doutorado/Deposito/Banco de Dados Tanguro/Dados para analise cap1")
hy = read.csv("Hyperion_indexs_median by plot_Clean Vs.csv", sep = ",", header = TRUE)
biomass = read.csv("Biomassa_Tang.csv", sep = ",", header = TRUE)
lai = read.csv("LAI_Area1_Tang.csv", sep = ",", header = TRUE)
fuel = read.csv("Combustivel_Brown_Tang.csv", sep = ",", header = TRUE)
litt = read.csv("Liteira_Area1_Tang.csv", sep = ",", header = TRUE)
#fire = read.csv("Fire.csv", sep = ",", header = TRUE) #Make the fire intensity!
#Data organization
#Biomass ====================================
#biomass = biomass[,c(1:7)]#Select date
colnames(biomass) = c('plot', 'transcto', '2004', '2008', '2010', '2011', '2012', '2014')
biomass = melt(biomass)
colnames(biomass) = c('plot', 'transcto', 'data', 'biomass')
biomass$transcto = as.character(biomass$transcto)
biomass$plot = as.character(biomass$plot)
#Add plot info
biomass$plot[biomass$plot == "A"] <- c("controle")
biomass$plot[biomass$plot == "B"] <- c("b3yr")
biomass$plot[biomass$plot == "C"] <- c("b1yr")
#Add info edge and core
#By transect
biomass$transcto[biomass$transcto == "AA"] <- c("borda")
biomass$transcto[biomass$transcto == "AB"] <- c("borda")
biomass$transcto[biomass$transcto == "A"] <- c("borda")
biomass$transcto[biomass$transcto == "B"] <- c("borda")
biomass$transcto[biomass$transcto == "C"] <- c("borda")
biomass$transcto[biomass$transcto == "D"] <- c("borda")
biomass$transcto[biomass$transcto == "E"] <- c("borda")
biomass$transcto[biomass$transcto == "F"] <- c("borda")
biomass$transcto[biomass$transcto == "G"] <- c("nucleo")
biomass$transcto[biomass$transcto == "H"] <- c("nucleo")
biomass$transcto[biomass$transcto == "I"] <- c("nucleo")
biomass$transcto[biomass$transcto == "J"] <- c("nucleo")
biomass$transcto[biomass$transcto == "K"] <- c("nucleo")
biomass$transcto[biomass$transcto == "L"] <- c("nucleo")
biomass$transcto[biomass$transcto == "M"] <- c("nucleo")
biomass$transcto[biomass$transcto == "N"] <- c("nucleo")
biomass$transcto[biomass$transcto == "O"] <- c("nucleo")
biomass$transcto[biomass$transcto == "P"] <- c("nucleo")
biomass$transcto[biomass$transcto == "Q"] <- c("nucleo")
biomass$transcto[biomass$transcto == "R"] <- c("nucleo")
biomass$transcto[biomass$transcto == "S"] <- c("nucleo")
biomass$transcto[biomass$transcto == "T"] <- c("nucleo")
biomass$transcto[biomass$transcto == "U"] <- c("nucleo")
#By edge dist
biomass$transcto[biomass$transcto == "(0,250]"] <- c("borda")
biomass$transcto[biomass$transcto == "(250,500]"] <- c("nucleo")
biomass$transcto[biomass$transcto == "(500,750]"] <- c("nucleo")
biomass$transcto[biomass$transcto == "(750,1e+03]"] <- c("nucleo")
#Summary data
biomass = biomass %>%
group_by(plot, transcto, data) %>%
summarise(biomass = sum(biomass, na.rm = TRUE))
colnames(biomass) = c('parcela', 'dist', 'data', 'biomass')
#LAI ====================================
lai = lai[,c(1,3,11,21)]
lai$transecto = as.character(lai$transecto)
#Add plot info
for (x in 1:10) {
lai$linhas[lai$linhas == x] <- "controle"
}
for (x in 11:20) {
lai$linhas[lai$linhas == x] <- "b3yr"
}
for (x in 21:31) {
lai$linhas[lai$linhas == x] <- "b1yr"
}
colnames(lai) = c("parcela", "dist", "lai", "data")
#Add info edge and core
#By transect
lai$dist[lai$dist == "AA"] <- c("borda")
lai$dist[lai$dist == "AB"] <- c("borda")
lai$dist[lai$dist == "A"] <- c("borda")
lai$dist[lai$dist == "B"] <- c("borda")
lai$dist[lai$dist == "C"] <- c("borda")
lai$dist[lai$dist == "D"] <- c("borda")
lai$dist[lai$dist == "E"] <- c("borda")
lai$dist[lai$dist == "F"] <- c("borda")
lai$dist[lai$dist == "G"] <- c("nucleo")
lai$dist[lai$dist == "H"] <- c("nucleo")
lai$dist[lai$dist == "I"] <- c("nucleo")
lai$dist[lai$dist == "J"] <- c("nucleo")
lai$dist[lai$dist == "K"] <- c("nucleo")
lai$dist[lai$dist == "L"] <- c("nucleo")
lai$dist[lai$dist == "M"] <- c("nucleo")
lai$dist[lai$dist == "N"] <- c("nucleo")
lai$dist[lai$dist == "O"] <- c("nucleo")
lai$dist[lai$dist == "P"] <- c("nucleo")
lai$dist[lai$dist == "Q"] <- c("nucleo")
lai$dist[lai$dist == "R"] <- c("nucleo")
lai$dist[lai$dist == "S"] <- c("nucleo")
lai$dist[lai$dist == "T"] <- c("nucleo")
lai$dist[lai$dist == "U"] <- c("nucleo")
#Summary data
lai = lai %>%
group_by(parcela, dist, data) %>%
summarise(lai = median(lai)) %>%
filter(data == 2005:2012)
#Fuel ==================================
fuel = fuel %>%
na.omit() %>%
mutate(fuel = NI06 + NI25 + NI76)
fuel = fuel[,c(1,2,7)]
#Separete Lines and Transects
fuel$linha <- as.numeric(str_extract(fuel$ponto, "[0-9]+"))
fuel$dist <- as.character(str_extract(fuel$ponto, "[A-Z]+"))
fuel = fuel[,c(-1)]
colnames(fuel) = c("data", "fuel","linha", "dist")
#Add plot info
for (x in 1:10) {
fuel$linha[fuel$linha == x] <- "controle"
}
for (x in 11:20) {
fuel$linha[fuel$linha == x] <- "b3yr"
}
for (x in 21:31) {
fuel$linha[fuel$linha == x] <- "b1yr"
}
#Add info edge and core
#By transect
fuel$dist[fuel$dist == "BORDA"] <- c("borda")
fuel$dist[fuel$dist == "CAPIM"] <- c("borda")
fuel$dist[fuel$dist == "AA"] <- c("borda")
fuel$dist[fuel$dist == "AB"] <- c("borda")
fuel$dist[fuel$dist == "A"] <- c("borda")
fuel$dist[fuel$dist == "B"] <- c("borda")
fuel$dist[fuel$dist == "C"] <- c("borda")
fuel$dist[fuel$dist == "D"] <- c("borda")
fuel$dist[fuel$dist == "E"] <- c("borda")
fuel$dist[fuel$dist == "F"] <- c("borda")
fuel$dist[fuel$dist == "G"] <- c("nucleo")
fuel$dist[fuel$dist == "H"] <- c("nucleo")
fuel$dist[fuel$dist == "I"] <- c("nucleo")
fuel$dist[fuel$dist == "J"] <- c("nucleo")
fuel$dist[fuel$dist == "K"] <- c("nucleo")
fuel$dist[fuel$dist == "L"] <- c("nucleo")
fuel$dist[fuel$dist == "M"] <- c("nucleo")
fuel$dist[fuel$dist == "N"] <- c("nucleo")
fuel$dist[fuel$dist == "O"] <- c("nucleo")
fuel$dist[fuel$dist == "P"] <- c("nucleo")
fuel$dist[fuel$dist == "Q"] <- c("nucleo")
fuel$dist[fuel$dist == "R"] <- c("nucleo")
fuel$dist[fuel$dist == "S"] <- c("nucleo")
fuel$dist[fuel$dist == "T"] <- c("nucleo")
fuel$dist[fuel$dist == "U"] <- c("nucleo")
#Summary data
colnames(fuel) = c("data", "fuel","parcela", "dist")
fuel = fuel %>%
na.omit() %>%
group_by(parcela, dist, data) %>%
summarise(fuel = median(fuel))
#Litterfall ========================
litt = litt[,c(1,8,10,13)]
colnames(litt) = c("parcela", "dist", "data", "litt")
litt$dist = as.character(litt$dist)
litt$parcela = as.character(litt$parcela)
#Add plot info
litt$parcela[litt$parcela == "A"] <- c("controle")
litt$parcela[litt$parcela == "B"] <- c("b3yr")
litt$parcela[litt$parcela == "C"] <- c("b1yr")
#Add info edge and core
#By transect
litt$dist[litt$dist == "Bo"] <- c("borda")
litt$dist[litt$dist == "AA"] <- c("borda")
litt$dist[litt$dist == "AB"] <- c("borda")
litt$dist[litt$dist == "A"] <- c("borda")
litt$dist[litt$dist == "B"] <- c("borda")
litt$dist[litt$dist == "C"] <- c("borda")
litt$dist[litt$dist == "D"] <- c("borda")
litt$dist[litt$dist == "E"] <- c("borda")
litt$dist[litt$dist == "F"] <- c("borda")
litt$dist[litt$dist == "G"] <- c("nucleo")
litt$dist[litt$dist == "H"] <- c("nucleo")
litt$dist[litt$dist == "I"] <- c("nucleo")
litt$dist[litt$dist == "J"] <- c("nucleo")
litt$dist[litt$dist == "K"] <- c("nucleo")
litt$dist[litt$dist == "L"] <- c("nucleo")
litt$dist[litt$dist == "M"] <- c("nucleo")
litt$dist[litt$dist == "N"] <- c("nucleo")
litt$dist[litt$dist == "O"] <- c("nucleo")
litt$dist[litt$dist == "P"] <- c("nucleo")
litt$dist[litt$dist == "Q"] <- c("nucleo")
litt$dist[litt$dist == "R"] <- c("nucleo")
litt$dist[litt$dist == "S"] <- c("nucleo")
litt$dist[litt$dist == "T"] <- c("nucleo")
litt$dist[litt$dist == "U"] <- c("nucleo")
#Summary data
litt = litt %>%
na.omit() %>%
group_by(parcela, dist, data) %>%
summarise(litt = mean(litt)) %>%
filter(data == 2004:2012)
#Join Data =========================
#struc = hy[,c(2, 4, 10, 12, 18, 19, 21)]
hy = hy %>%
unite(col = "id", c("parcela", "data", "dist"), sep = '_')
#biomass = as.data.frame(biomass)#To work
#biomass = biomass %>%
# unite(col = "id", c("parcela", "data", "dist"), sep = '_')
lai = lai %>%
unite(col = "id", c("parcela", "data", "dist"), sep = '_')
fuel = fuel %>%
unite(col = "id", c("parcela", "data", "dist"), sep = '_')
litt = litt %>%
unite(col = "id", c("parcela", "data", "dist"), sep = '_')
df = hy
#df = full_join(df, biomass, by="id")
df = full_join(df, lai, by="id")
df = full_join(df, fuel, by="id")
df = full_join(df, litt, by="id")
df = df %>%
separate(col = "id", c("parcela", "data", "dist"), sep = '_')
#write.table(df, "Area1_data_edge_core.csv", sep = ",")
#Correlation Matrix ======================
ggpairs(df)
#GLM and Dredge
|
f591085a998215f83e51cfb0e2684e39abbd0256 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RandomFields/examples/RMcov.Rd.R | f436496d69468f4d9156fbe68acb54ba649c5c92 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,122 | r | RMcov.Rd.R | library(RandomFields)
### Name: RMcov
### Title: Non-stationary covariance model corresponding to a variogram
### model
### Aliases: RMcov RMcov RMCOV_X
### Keywords: spatial models
### ** Examples
## Don't show:
StartExample()
## End(Don't show)
RFoptions(seed=0) ## *ANY* simulation will have the random seed 0; set
## RFoptions(seed=NA) to make them all random again
bm <- RMfbm(alpha=1)
plot(bm)
x <- seq(0, 6, if (interactive()) 0.125 else 3)
plot(RFsimulate(bm, x))
## standardizing with the random variable at the origin
z1 <- RFsimulate(RMcov(bm), x)
plot(z1)
z1 <- as.vector(z1)
zero <- which(abs(x) == 0)
stopifnot(abs(z1[zero]) < 1e-13)
## standardizing with the random variable at the center of the interval
z2 <- RFsimulate(RMcov(bm, "center"), x)
plot(z2)
z2 <- as.vector(z2)
stopifnot(abs(z2[(length(z2) + 1) / 2]) < 1e-13)
## standardizing with the random variables at the end points of the interval
z3 <- RFsimulate(RMcov(bm, "extremals"), x)
plot(z3)
z3 <- as.vector(z3)
stopifnot(abs(z3[1] + z3[length(z3)]) < 1e-13)
## Don't show:
FinalizeExample()
## End(Don't show)
|
a89b877f85bbae2617ca0b114a0cdad175bdc5da | 023cc8d9e86ffbb39b80738494dea879d2970273 | /health_clean_up.R | abe2597d1aae62829ed373e60cd942d46c54e335 | [] | no_license | medewitt/health_connector | 4405d2091cc2761d74f221aaa7843ce8bb354254 | 1bd480cd3fd313cab328e82866b2ebea4d328a63 | refs/heads/master | 2020-03-27T06:28:43.500276 | 2018-09-26T01:29:21 | 2018-09-26T01:29:21 | 146,109,441 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,651 | r | health_clean_up.R | # Import Wake Forest Baptist Financial Statements
# libraries ---------------------------------------------------------------
library(tidyverse)
library(lubridate)
# import data -------------------------------------------------------------
df_1 <- read_csv("https://docs.google.com/spreadsheets/d/e/2PACX-1vQ7WgYmAts46m1xQDLFpE3wLiDXzPi_1PKLosDQfFZPSlho_WHWpjg8oAoM88-Eo6lXjgHS_F2wmpdd/pub?gid=0&single=true&output=csv")
#fix names
df_2 <- df_1 %>%
setNames(make.names(names(.))) %>%
setNames(tolower(names(.))) %>%
setNames(str_replace_all(string = names(.), pattern = "\\.", "_")) %>%
mutate(date = mdy(date))
fix_rolling_agg <- function(df){
df %>%
arrange(quarter) %>%
mutate(diff = rvus - lag(rvus, n = 1, default = 1)) %>%
select(quarter, rvus, diff)
}
# convert to quarterly rather than aggregated
df_3 <- df_2 %>%
group_by(fiscal_year) %>%
mutate(diff_rvus = rvus - lag(rvus, n = 1, default = 1)) %>%
mutate(diff_case_mix_adjusted_equivalent_discharges =
case_mix_adjusted_equivalent_discharges - lag(case_mix_adjusted_equivalent_discharges, n = 1, default = 1)) %>%
mutate(diff_patient_days =
patient_days - lag(patient_days, n = 1, default = 1)) %>%
mutate(diff_inpatient_admissions =
inpatient_admissions - lag(inpatient_admissions, n = 1, default = 1)) %>%
mutate(diff_inpatient_operating_room_cases =
inpatient_operating_room_cases- lag(inpatient_operating_room_cases, n = 1, default = 1)) %>%
mutate(diff_outpatient_operating_room_cases =
outpatient_operating_room_cases- lag(outpatient_operating_room_cases, n = 1, default = 1)) %>%
mutate(diff_total_operating_room_cases =
total_operating_room_cases- lag(total_operating_room_cases, n = 1, default = 1)) %>%
mutate(diff_emergency_department_visits =
emergency_department_visits- lag(emergency_department_visits, n = 1, default = 1))
df_3 %>%
ungroup() %>%
select(date, contains("diff_")) %>%
gather(parameter, value, -date) %>%
mutate(title_param = str_replace_all(string = parameter, pattern = "_", replacement = " ") %>%
str_to_title(.)) %>%
ggplot(aes(date, value))+
geom_line() +
geom_smooth(se = FALSE)+
facet_wrap(~title_param, scales = "free_y")+
theme_minimal()+
labs(
title = "Quarter by Quarter Reporting for WFUBMC",
caption = "Data from https://www.wakehealth.edu/About-Us/Financial-Statement"
) -> quarterly_hospital
ggsave(quarterly_hospital, filename = "2018_08-Hospital_Numbers_Quarter_by_Quarter.pdf",
height = 8, width = 10, units = "in", device = "pdf")
|
a23a7015e6b8a12d30f70195ca45b545b6d85546 | c9134ea0d1873a0a7a6157b30da2699ec4df9478 | /R/adfgRead_DSD.R | 1c4f3316c159de558753492bb0a4d2a9f6b3cc21 | [
"MIT"
] | permissive | wStockhausen/tcsamFisheryDataADFG | 2031a5302e4590dbf0eee10f065ea0610bf56010 | d1c9c72d0a54d32ed62cee1763376fa65f412ae2 | refs/heads/master | 2023-04-13T09:34:49.059459 | 2023-04-01T00:28:47 | 2023-04-01T00:28:47 | 180,407,629 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,792 | r | adfgRead_DSD.R | #'
#' @title Extract dockside data (DSD) as a tibble from a csv file
#'
#' @description Function to extract dockside data as a tibble from a csv file.
#'
#' @param csv - csv filename with dockside data
#' @param date_format - string ("yyyy-mm-dd" or "mm-dd-yyyy") indicating date format
#'
#' @return a tibble with columns named "fishery","area","year","fishery_code","code_year","adfg","sampdate",
#' "sex","maturity","shell","size","legal", and "count".
#'
#' @details Uses functions \code{readr::read_csv}, \code{stringr::str_sub}.
#'
#' @note The 'year' values are 'crab year' based on the sample date. The 'code_year' is the
#' ADFG fishery year based on the fishery code. Prior to rationalization, there may be differences
#' in these two values.
#'
#' @note 'count' is the number of measured crab associated with the rest of the row categories.
#' Unlike the measure pot data, this may be > 1.
#'
#' @importFrom readr read_csv
#' @importFrom stringr str_sub
#'
#' @export
#'
adfgRead_DSD<-function(csv="TANNER-1990-2018_dockside.csv",
date_format="yyyy-mm-dd"){
#--read dockside data file
dfr <- readr::read_csv(csv);
#column names should be:
expCols<-c("fishery","adfg","sampdate","spcode",
"size","legal","shell","numcrab");
#check column names
if (any(names(dfr)!=expCols)){
idx<-names(dfr)!=expCols;
str<-paste0("--Error! Input column names \n\t", paste(names(dfr)[idx],collapse=", "),
"\nshould match \n\t", paste(expCols[idx], collapse=", "));
stop(str);
}
#unique fishery names:
# CO05 EI90 EI91 EI92
# QO05o QO05r QO06 QO07 QO08 QO09 QO10 QO11 QO12 QO13 QO14 QO15 QO16 QO17 QO18
# QT05 QT06 QT07 QT08 QT13 QT14 QT15 QT17 QT18 QT93 QT94 QT95 QT96
# TR06 TR07 TR08 TR09 TR11 TR12 TR13 TR14 TR15
# TT06 TT07 TT08 TT09 TT13 TT14 TT15
#unique crab areas (character 1): C (CDQ fisheries), E (???), Q (Bering Sea), T (Bristol Bay)
#unique targets (character 2): K (red or blue king crab), O (snow crab), R (red king crab), I (Tanner crab), T (Tanner crab)
#unique area/targets: "CK" (CDQ red or blue king crab), "CO" (CDQ snow crab), "CR" (CDQ red king crab),
# "EI" (?? Tanner crab), "EO" (??? snow crab),
# "QO" (Bering Sea snow crab) "QR" (Bering Sea red king crab) "QT" (Tanner crab West),
# "TR" (BBRKC), "TT" (Tanner crab East)
#assign sex
dfr$sex <- "male";
#assign maturity
dfr$maturity <- "undetermined";
#convert shell condition codes to labels
dfr$shell <- adfgConvert_ShellConditionCodes(dfr$shell);
#--parse 4-character fishery codes
dfr.pf<-adfgConvert_FisheryCodes(dfr$fishery);
#combine columns, add crab year as 'year' and drop some columns
#--code_year will be ADFG fishery year from dfr.pf
dfrp <- cbind(dfr,dfr.pf)
dfrp <- dfrp[,2:ncol(dfrp)];
#--determine crab year corresponding to sample date
if (date_format=="yyyy-mm-dd"){
dfrp$year<-adfgConvert_DateYYYYMMDDtoFisheryYear(dfrp$sampdate);
} else if (date_format=="mm-dd-yyyy"){
dfrp$year<-adfgConvert_DateMMDDYYYYtoFisheryYear(dfrp$sampdate);
} else {
stop("#--ERROR!\n\tUnrecognized date format in adfgRead_DSD(...).\n")
}
#names(dfrp)
# [1] "adfg" "sampdate" "spcode" "size" "legal" "shell" "numcrab" "sex" "maturity" "year" "fishery_code" "fishery" "area"
cols <- c("fishery","area","year","fishery_code","code_year","adfg","sampdate",
"sex","maturity","shell","size","legal","numcrab");
dfrp <- dfrp[,cols];
names(dfrp)[13]<-"count";#--rename 'numcrab' as 'count'
dfrp1 <- dfrp; #change name
#assign area designations "all EBS", "East 166W" and "West 166W"
dfrp1$area <- "all EBS"; #all RKC and snow crab
#--can't split EI Tanner crab based on statarea (i.e., no EWbySA--see adfgRead_MPD)
idx <- (stringr::str_sub(dfrp1$fishery_code,1,2)=="EI");
dfrp1$area[idx] <- "all EBS";#show explicitly
#--can't split QT Tanner crab prior to 2005 based on statarea (i.e., no EWbySA--see adfgRead_MPD)
idx <- (stringr::str_sub(dfrp1$fishery_code,1,2)=="QT")&(dfrp1$year<2005);
dfrp1$area[idx] <- "all EBS";#show explicitly
#--All Tanner crab in QT after 2004 are West 166W
idx <- (stringr::str_sub(dfrp1$fishery_code,1,2)=="QT")&(dfrp1$year>2004);
dfrp1$area[idx] <- "West 166W";
#--All Tanner crab in TT after 2004 are East 166W
idx <- (stringr::str_sub(dfrp1$fishery_code,1,2)=="TT")&(dfrp1$year>2004);
dfrp1$area[idx] <- "East 166W";
#rename fisheries to canonical forms
dfrp1$fishery <- adfgConvert_FisheryNames(dfrp1$fishery);
return(dfrp1);
}
# csv<-file.path("~/StockAssessments-Crab/Data/Fishery.ADFG/2019.07/ObserverDataFiles","TANNER-1990-2018_dockside.csv");
# dfr<-adfgRead_DSD(csv);
|
e04c04914a81b15123830d86649a17e9035323fa | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/3364_0/rinput.R | c1975eb238ed9121393af488d8b7d15eb4cde779 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("3364_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3364_0_unrooted.txt") |
dbfdf8c3b537bb0f388d9be8aa1fc02502df4336 | 9d85a4873ae8218fe681b0f2f8f11d85b68b33be | /tests/testthat/test-yeet.R | a6a5fb63f4920137620843e64c9af40c8fb2d2c2 | [
"MIT"
] | permissive | EmilHvitfeldt/extrasteps | 27d5cbb0695b45b35bd8ce95c6410e83fc132e59 | 6a7d18335a9c4515dffc12cf10eeda09198964cc | refs/heads/main | 2023-08-22T02:28:21.712990 | 2023-08-14T18:15:39 | 2023-08-14T18:15:39 | 241,030,358 | 4 | 1 | NOASSERTION | 2023-08-05T18:08:12 | 2020-02-17T05:52:30 | R | UTF-8 | R | false | false | 242 | r | test-yeet.R | library(recipes)
# Infrastructure ---------------------------------------------------------------
test_that("printing", {
rec <- recipe(~., data = mtcars) %>%
step_yeet()
expect_snapshot(print(rec))
expect_snapshot(prep(rec))
})
|
0a9bcdf790e86d16ea0e25d50f1ca0e73d385633 | 28d121adf069bb7ac8578d673236364672e3aade | /man/createMenuItem.Rd | d27d6887663dfd3e0e2d9a5f481c21668276db24 | [] | no_license | cusom/CUSOM.ShinyHelpers | e8d1a3d31925137d1033a63ebd12ec8b2150993f | c562d0762d739bd8fc983c8eb37208105d4e3060 | refs/heads/master | 2023-09-03T08:01:26.224068 | 2023-08-18T14:52:39 | 2023-08-18T14:52:39 | 299,706,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 419 | rd | createMenuItem.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createMenuItem.R
\name{createMenuItem}
\alias{createMenuItem}
\title{Creates Shiny Dashboard Menu Item / Tab}
\usage{
createMenuItem(text, tabName, iconName)
}
\arguments{
\item{text}{string -}
\item{tabName}{string -}
\item{iconName}{string -}
}
\value{
shiny dashboard menu item
}
\description{
Creates Shiny Dashboard Menu Item / Tab
}
|
283064c4c88ce4d0b8ba9531205690980badd3c8 | 2b9d0b9fadf292889b2a90563e176a887cadd54a | /analysis/R/data_summarise.R | e4bc399ddb5c5bda962fa061fc8765601b129876 | [
"MIT"
] | permissive | opensafely/post-vax-outcomes-report | 30736f00579fae5cc79c7056467d110a2c4ea13e | 99707f97cf50a022ded4ab561a2dc431759d653d | refs/heads/master | 2023-08-23T10:44:59.747925 | 2023-01-27T10:22:50 | 2023-01-27T10:22:50 | 343,352,666 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,902 | r | data_summarise.R | # Import libraries ----
library('tidyverse')
library('lubridate')
source(here::here("lib", "redaction_functions.R"))
# Import processed data ----
data_vaccinated <- read_rds(here::here("output", "data", "data_vaccinated.rds"))
#data_vaccinated$empty_date = date(NA)
# output redacted summaries ----
dir.create(here::here("output", "variable_summary"), showWarnings = FALSE, recursive=TRUE)
dir.create(here::here("output", "variable_summary", "tables"), showWarnings = FALSE, recursive=TRUE)
## categorical ----
sumtabs_cat <-
data_vaccinated %>%
select(where(is.character), where(is.logical), where(is.factor)) %>%
map(redacted_summary_cat) %>%
enframe()
capture.output(
walk2(sumtabs_cat$value, sumtabs_cat$name, print_cat),
file = here::here("output", "variable_summary", "categorical.txt"),
append=FALSE
)
sumtabs_cat %>%
unnest(value) %>%
write_csv(path=here::here("output", "variable_summary", "categorical.csv"), na="")
sumtabs_cat %>%
transmute(
x=value,
path=paste0(here::here("output", "variable_summary", "tables", paste0("categorical_", name, ".csv"))),
na=""
) %>%
pwalk(write_csv)
## continuous ----
sumtabs_num <-
data_vaccinated %>%
select(where(~ {!is.logical(.x) & is.numeric(.x) & !is.Date(.x)})) %>%
map(redacted_summary_num) %>%
enframe()
capture.output(
walk2(sumtabs_num$value, sumtabs_num$name, print_num),
file = here::here("output", "variable_summary", "numeric.txt"),
append=FALSE
)
data_vaccinated %>%
select(where(~ {!is.logical(.x) & is.numeric(.x) & !is.Date(.x)})) %>%
map(redacted_summary_num) %>%
enframe() %>%
unnest(value) %>%
write_csv(path=here::here("output", "variable_summary", "continuous.csv"), na="")
## dates ----
sumtabs_date <-
data_vaccinated %>%
select(where(is.Date)) %>%
map(redacted_summary_date) %>%
enframe()
capture.output(
walk2(sumtabs_date$value, sumtabs_date$name, print_num),
file = here::here("output", "variable_summary", "date.txt"),
append=FALSE
)
# this doesn't work because date attribute isn't kept for NA vectors in OPENSAFELY version of ??rlang
#sumtabs_date %>%
# unnest(value) %>%
# write_csv(path=here::here("output", "variable_summary", "date.csv"), na="")
# summary stats for report ----
# won't been needed when we can run R via notebook
vars_list <- jsonlite::fromJSON(txt=here::here("lib", "global-variables.json"))
summary_stats <- append (
vars_list,
list(
run_date =date(file.info(here::here("metadata","extract_vaccinated.log"))$ctime),
total_vaccinated = sum(!is.na(data_vaccinated$covid_vax_1_date)),
total_vaccinated_az = sum(data_vaccinated$covid_vax_1_type=="Ox-AZ", na.rm=TRUE),
total_vaccinated_pfizer = sum(data_vaccinated$covid_vax_1_type=="P-B", na.rm=TRUE)
)
)
jsonlite::write_json(summary_stats, path=here::here("output", "summary_stats.json"), auto_unbox = TRUE, pretty=TRUE)
|
593bbe57fedc6eebec8ea74b44f977a328cb6415 | 892033d3c631aba636532cfabe007583c534d298 | /Code/EMR_Deid_Functions.R | 8f79f31e742ec9b052a61745fae795fc2535b4dd | [] | no_license | UCSF-MSLAB/EMR-dePhi_Release | 8382f780b7bbf0f7a5f9562038b10b1929c37345 | fdb21361ebd51dca26df736481d99392c9ad15f9 | refs/heads/master | 2021-01-20T11:09:43.932204 | 2016-09-11T22:14:05 | 2016-09-11T22:14:05 | 59,807,476 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 679 | r | EMR_Deid_Functions.R | #EMR_Deid_Functions.R Info ----
# Functions for the de identification of the free-text present in the folder Files_Stored
# The script operates per subfolder. This piece of code is not intended to be shared over the network.
# The scrip is meant to be launched on the top level.
#
#Copyright Wendy Tang & Antoine Lizee @ UCSF 09/14
#tangwendy92@gmail.com
#Functions for structured fields ----
shift_field_dates <- function(dates_col = notes$CONTACT_DATE,
date_shift_constant = T){
# stopifnot(is.integer(offset_days))
if(date_shift_constant == T){
return(dates_col + offset_days)
} else{
#will implement functionality if needed
}
}
|
059680d5027ab0ed5557ecbfaf1c8c41685ba22d | 03578016a8d79d9f2b16de9598694a7a76af28c7 | /R/zzz.R | dc689a99e66e6def158980c357870e4195a58596 | [] | no_license | murattasdemir/EVDS | 3ef13d1211bd6c4313bf6b3d19836838c7df6273 | a806c807ccbbbfe4c8c3cdff43b492a04938ee5b | refs/heads/master | 2021-01-03T22:41:41.968843 | 2018-07-11T11:30:04 | 2018-07-11T11:30:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 277 | r | zzz.R | .onLoad <- function(...){
packageStartupMessage("EVDS requires an API key from CBRT. Get yours from https://evds2.tcmb.gov.tr/index.php?/evds/editProfile \n
All functions require an API key so you should set your key with set_evds_key(\"YOURAPIKEY\") function.")
}
|
34ba83999b8d23efc318452c7c7679e4cad6651f | c6a8c13031488c4927dcd2de9b5fc0a960bb81f6 | /man/sparse_fr_spectrum.Rd | a9732bd2cc6e05e03b9e4e450e8da2a4a5e92178 | [
"MIT"
] | permissive | pmcharrison/hrep | dcfe44f5f9dc5eced687011db59addeecb429836 | 9132b7a937063097ef0b7d91c2c7fcc5c0f83366 | refs/heads/master | 2023-08-31T10:48:06.875846 | 2023-01-12T09:51:52 | 2023-01-12T09:51:52 | 158,516,625 | 8 | 2 | NOASSERTION | 2023-03-11T14:26:31 | 2018-11-21T08:38:18 | R | UTF-8 | R | false | true | 2,439 | rd | sparse_fr_spectrum.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparse-fr-spectrum.R
\name{sparse_fr_spectrum}
\alias{sparse_fr_spectrum}
\alias{sparse_fr_spectrum.sparse_fr_spectrum}
\alias{sparse_fr_spectrum.sparse_pi_spectrum}
\alias{sparse_fr_spectrum.pi_chord}
\alias{sparse_fr_spectrum.default}
\alias{sparse_fr_spectrum.list}
\title{Sparse frequency spectrum}
\usage{
sparse_fr_spectrum(x, ...)
\method{sparse_fr_spectrum}{sparse_fr_spectrum}(x, ...)
\method{sparse_fr_spectrum}{sparse_pi_spectrum}(x, ...)
\method{sparse_fr_spectrum}{pi_chord}(x, ...)
\method{sparse_fr_spectrum}{default}(x, ...)
\method{sparse_fr_spectrum}{list}(x, ...)
}
\arguments{
\item{x}{Input sonority.
\itemize{
\item Numeric vectors will be treated as vectors of MIDI note numbers,
and expanded into their implied harmonics.
\item Two-element lists will be treated as finalised spectra.
The first element should be labelled "frequency",
and correspond to a numeric vector of frequencies;
the second element should be labelled "amplitude",
and correspond to a numeric vector of amplitudes.
}}
\item{...}{
Arguments passed on to \code{\link[=expand_harmonics]{expand_harmonics}}
\describe{
\item{\code{num_harmonics}}{(Integerish scalar)
Number of harmonics (including the fundamental) to which
each tone should be expanded.}
\item{\code{roll_off}}{(Numeric scalar) Parametrises the amount of amplitude roll-off
in the harmonics, with greater values corresponding to higher roll-off.}
\item{\code{digits}}{Number of digits to which each partial's MIDI pitch should be rounded.}
\item{\code{label_harmonics}}{If TRUE, then the harmonics in the resulting spectrum are labelled with their harmonic numbers.}
\item{\code{coherent}}{Whether the amplitudes from different spectral components should be combined
assuming coherent summation, where the amplitudes simply add together
(default is \code{FALSE}).
Otherwise incoherent summation is used, where the amplitudes are squared, added, then
square rooted.}
}}
}
\value{
An object of class \code{sparse_fr_spectrum}.
}
\description{
This function represents an input object as a
sparse frequency spectrum.
}
\details{
A sparse frequency spectrum comprises a finite set of spectral components,
each defined by a frequency (in Hz)
and an amplitude (expressed in arbitrary units, but with the
fundamental frequencies of chord pitches typically taking the value 1).
}
|
01c66a22f9011e537801ef8e048d39b177f9e43d | 37d96f6130095abb15131c7984ee2ca83fd46d5c | /Scripts/2samplestests.R | fd0285c2b8a276298f0a10f884a2424f8c647fb5 | [] | no_license | joaovissoci/R-Scripts | 84f40461643ee848d5aded39983dcab21b3a3af0 | 5573a20fe31e4b7dee6ba168bd9cf2b691b6cde3 | refs/heads/master | 2020-05-19T07:49:39.396323 | 2012-06-10T05:53:57 | 2012-06-10T05:53:57 | 2,769,491 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 419 | r | 2samplestests.R | daily.intake <- c(5260,5470,5640,6180,6390,6515,6805,7515,7515,8230,8770)
daily.intake
wilcox.test(daily.intake, mu=7725)
require(ISwR)
attach(energy)
energy
var.test(expend~stature)
t.test(expend~stature) #Variances not equivalent
t.test(expend~stature, var.equal=T)
wilcox.test(expend~stature)#nonparametric distribution
attach(intake)
intake
t.test(pre,post, paired=T)
summary(intake)
wilcox.test(pre,post,paired=T) |
890f0d0a1970cff6ca20d1d140c4d6abbbca998a | f6c871a87f64a5ca6d8dcf6931e05e6dc0fb0c1f | /R-Package/man/EstablishDialects.Rd | 90166d59a323ae680b761bdd3738b08128aab955 | [] | no_license | CreanzaLab/SongEvolutionModel | 073342a07eaeca061f92b6798b2ab1ff40567b58 | 449087c85280e11b97924575d2e3c618169a0f8b | refs/heads/master | 2020-06-13T18:23:13.603932 | 2019-08-15T22:57:51 | 2019-08-15T22:57:51 | 194,747,721 | 0 | 1 | null | 2019-08-15T22:57:52 | 2019-07-01T21:52:31 | R | UTF-8 | R | false | true | 567 | rd | EstablishDialects.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Source_BirdCharacteristics.R
\name{EstablishDialects}
\alias{EstablishDialects}
\title{Establish Dialects}
\usage{
EstablishDialects(P, fSongs)
}
\arguments{
\item{P}{a list of parameters}
\item{fSongs}{a matrix of syllable vectors}
}
\description{
Modifies a matrix of syllable vectors to create dialects (regions of syllables that are separated from one another in the syllable space). Regions are defined so that each dialect space is as square as possible.
}
\keyword{song-template}
|
99a3e08307299f40e33a2155bc132fe16f545a87 | 8e56020b1eb876a1769f5db43e2299b679a3eb0a | /man/physiol.Rd | e70e85f1dba7a553cd1df21112af31506e526975 | [] | no_license | bgulbis/icuriskr | 45a80659bccbe810a3854bf5e58b8c54a0952b65 | 0fa19f838b37c28cc34e6e3397db2f1ead5a5d3a | refs/heads/master | 2022-06-21T22:57:05.553670 | 2022-06-14T14:46:41 | 2022-06-14T14:46:41 | 66,739,301 | 3 | 2 | null | 2022-06-14T14:46:42 | 2016-08-27T23:02:01 | R | UTF-8 | R | false | true | 1,041 | rd | physiol.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\name{physiol}
\alias{as.aa_grad}
\alias{as.admit}
\alias{as.age}
\alias{as.albumin}
\alias{as.bili}
\alias{as.bun}
\alias{as.comorbidity}
\alias{as.gcs}
\alias{as.glucose}
\alias{as.hco3}
\alias{as.hct}
\alias{as.hr}
\alias{as.map}
\alias{as.pao2}
\alias{as.ph}
\alias{as.physiol}
\alias{as.potassium}
\alias{as.rr}
\alias{as.sbp}
\alias{as.scr}
\alias{as.sodium}
\alias{as.temp}
\alias{as.uop}
\alias{as.wbc}
\alias{physiol}
\title{Construct generic data types for use in risk scores}
\usage{
physiol(x)
as.physiol(x)
as.temp(x)
as.sbp(x)
as.map(x)
as.hr(x)
as.rr(x)
as.ph(x)
as.sodium(x)
as.potassium(x)
as.scr(x)
as.bun(x)
as.hct(x)
as.wbc(x)
as.gcs(x)
as.hco3(x)
as.pao2(x)
as.aa_grad(x)
as.bili(x)
as.albumin(x)
as.glucose(x)
as.uop(x)
as.age(x)
as.admit(x)
as.comorbidity(x)
}
\arguments{
\item{x}{object to set class}
}
\description{
Takes an R object and sets class to a specific physiologic type.
}
\keyword{internal}
|
1d6ef9717e8ed041b171bc0159d6a94cf9e8a39c | 9d496563f2e90ff3772f8c9b4fa23929caaa6bb2 | /ui.R | e040c9bab294b8945c8b2b9c393a7c10cd32c77b | [] | no_license | XiusiMa/ShinyProject | b6fe625851eb853aa9860acc500121ebb1067986 | c5566555c9081aa6d3e128003dc74946b3f2c533 | refs/heads/master | 2021-01-19T05:24:48.384418 | 2016-07-15T22:03:53 | 2016-07-15T22:03:53 | 63,452,623 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,818 | r | ui.R | library(shiny)
library(DT)
shinyUI(fluidPage(
titlePanel("Energy Outage"),
sidebarLayout(
sidebarPanel(
checkboxGroupInput('show_vars', 'Columns in Table to show:',
names(mydata), selected = names(mydata)),
br(),
helpText("Click the Columns that you want to choose")
),
mainPanel(
tabsetPanel(
id = 'dataset',
tabPanel('mydata', DT::dataTableOutput('mytable'))
))),
fluidRow(
column(6,
plotOutput('ReasonPlot')
),
column(6,
plotOutput('CompanyFreqhist')),
hr()),
fluidRow(
column(3,
h4("Reason Cause the Outage(2002 - 2016)"),
selectInput(inputId = "cause", label = "Cause :",
choices=colnames(msubdata)),
br()
),
column(4, offset = 1,
h4("Frequency of Outage happended by Each Company"),
selectInput(inputId = "company", label = "Company :",
choices=names(mset)),
br(),
checkboxInput(inputId = "density",
label = strong("Show density estimate"),
value = FALSE)
),
column(4,
# Display this only if the density is shown
conditionalPanel(condition = "input.density == true",
sliderInput(inputId = "bw_adjust",
label = "Bandwidth adjustment:",
min = 0.2, max = 2, value = 1, step = 0.2)
)
))
)) |
f481380366144784af234248fb3b003916730eb4 | bd0a0bff1b5b108adefd632fbc8d16c2bad41bc0 | /dataframe.R | 06042374f89dc79f5ce3ea5e2251c1f1d8435465 | [] | no_license | amitkayal/r-tw-training | 43aebc8faf0bb5c15a35d9d9b91f3b02244d814f | 1d2ed6e711461392f6c1d189f75f1adb1d22336c | refs/heads/master | 2021-04-30T01:47:32.291828 | 2018-02-08T06:55:53 | 2018-02-08T06:55:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,904 | r | dataframe.R | # Data manipulation using data frames
#part of Base R
#creating data frames
idf<-data.frame(gender=c("M","F"),ht=c(10,20), wt=c(30,40))
idf
names(idf)
colnames(idf)<-c("C1","C2","C3")
idf$C1
names(idf)
names<- rownames(idf)
names
idf
#from external data
myfile<-read.csv("D:\\niit\\datascience and R\\new content slides\\day5\\titanic.csv")
myfile
names(myfile)
colnames(myfile)[7]<-"Siblingcount"
names(myfile)
#total value of the ages of all passengers
totalage<-sum(myfile$Age,na.rm = TRUE)
totalage
#new column/variable
myfile$diff<-totalage-myfile$Age
head(myfile)
#for our class we will use hflights data
library(hflights)
#data(hflights)
mydata<-hflights
summary(mydata)
head(mydata)
#check the structure
str(mydata)
#get the column names
names(mydata)
#extract column names into a vector
mycolumns<-names(mydata)
mycolumns
#selected rows and columns
mydata[1:10,c("Year","Month","ArrTime","AirTime")]
mycols<-c("Year","Month","ArrTime","AirTime")
#using a column vector
mydata[1:10,mycols]
#by row numbers
mydata[1:100,]
#row and column numbers
mydata[1:10,1:3]
mydata[500:600,5:8]
#filter on Flight number
myfilter<-subset(mydata,FlightNum==428)
nrow(myfilter)
print(myfilter)
#multiple filters
myfilter<-subset(mydata,FlightNum==428 & ArrDelay==0)
nrow(myfilter)
myfilter
#show select columns
myfilter<-subset(mydata,FlightNum==428 & ArrDelay==0,
select=c("FlightNum","Origin","Dest",
"UniqueCarrier"))
print(myfilter)
#if you want exclude just one or two columns
subset(myfilter ,select=-Dest)
#adding rows and columns
#columns first
#create a vector for the column with the required values
myfilter
Dest1<-"India"
Dest1
cbind(myfilter,Dest1)
#now rows
myrow<-c(428,"ABC","WN","USA","India")
rbind(myfilter,myrow)
#one more example of filter
myfilter1<-subset(mydata,AirTime<25)
myfilter1
#How to delete a column
myfilter1$AirTime
myfilter1$AirTime<-NULL
myfilter1$AirTime
names(myfilter1)
#how to update
myfilter2<-cbind(myfilter,Dest1)
myfilter2<-rbind(myfilter,myrow)
myfilter2
myfilter2$Dest<-"India"
myfilter2
#specific cell
myfilter2[1,3]="AUS"
myfilter2
#create a new column
myfilter2$neworigin <- myfilter2$Origin
myfilter2
names(myfilter2)
mydata$totaltime<-mydata$AirTime+mydata$DepDelay
head(mydata)
#ordering the results
#sorted by ascending order by default
mydata[order(mydata$UniqueCarrier),c("UniqueCarrier","DepDelay")]
#this is for descending order
mydata[order(mydata$UniqueCarrier,decreasing = TRUE),c("UniqueCarrier","DepDelay")]
#DF class Exercise
#load the titanic data from your local drive
#display only the following columns
#passengerId, survived, Name, age , Sex
#Rename the column Sex to Gender
#save this data a new csv file on the local storage
#with the name titanic_yourname |
404a4226ed463a8b5b44b4bdabd8cd01412b5631 | 0d2b4e04678fc968058de0efafdc6134a7089a77 | /fixed_price_statistics_sample.r | 20af83d891f4f12f42016aa4e7b08785d3005fdd | [] | no_license | CSISdefense/Fixed-price | 4ebb527547a7f0404639804f3c530505d08ead67 | 09eb0babd2dde587d54d0eefe7eaa6c00eec962f | refs/heads/master | 2022-05-06T16:43:22.057108 | 2016-10-06T19:47:34 | 2016-10-06T19:47:34 | 29,840,689 | 5 | 5 | null | 2015-03-11T14:27:27 | 2015-01-26T02:03:28 | HTML | UTF-8 | R | false | false | 19,769 | r | fixed_price_statistics_sample.r | #*************************************Required Libraries******************************************
require(plyr)
require(grid)
require(reshape2)
require(stringr)
require(ggplot2)
# require(logging)
# debug(VariableNumericalFormat)
#*************************************Options*****************************************************
options(error=recover)
options(warn=1)
# basicConfig()
# logdebug("not shown, basic is INFO")
# logwarn("shown and timestamped")
# system("defaults write org.R-project.R force.LANG en_US.UTF-8")
# debug("CreateCSV")
# debug(apply_lookups)
# debug(CreateDuration)
#*************************************Lookup Files*****************************************************
setwd("K:\\Development\\Fixed-price")
Path<-"K:\\2007-01 PROFESSIONAL SERVICES\\R scripts and data\\"
set.seed(1)
# Path<-"~\\FPDS\\R scripts and data\\"
# Path<-"C:\\Users\\Greg Sanders\\SkyDrive\\Documents\\R Scripts and Data SkyDrive\\"
source(paste(Path,"helper.r",sep=""))
source(paste(Path,"lookups.r",sep=""))
source(paste(Path,"statistics_aggregators.r",sep=""))
options(error=recover)
options(warn=1)
#
sample.criteria <-read.csv(
paste(Path,"data\\defense_contract_SP_ContractSampleCriteriaDetailsCustomer.csv",sep=""),
header=TRUE, sep=",", dec=".", strip.white=TRUE,
na.strings=c("NULL","NA"),
stringsAsFactors=FALSE
)
sample.size<-100000
#Drop contracts starting before the study period
sample.criteria$LastCurrentCompletionDate<-strptime(sample.criteria$LastCurrentCompletionDate,"%Y-%m-%d")
# as.Date(sample.criteria$LastCurrentCompletionDate)
sample.criteria<-subset(sample.criteria,StartFiscal_Year>=2007 & (LastCurrentCompletionDate<=strptime("2013-09-30","%Y-%m-%d") | IsClosed==1))
View(sample.criteria)
sample.SumofObligatedAmount<-sample.criteria[sample(nrow(sample.criteria)
, size=sample.size
, prob=abs(sample.criteria$SumofObligatedAmount)
),]
sample.SumofObligatedAmount<-subset(sample.SumofObligatedAmount,select=-c(StartFiscal_Year,SumofObligatedAmount,IsClosed))
rm(sample.criteria)
#These are a hold over from doing samples with different criteria.
# sample.SumOfbaseandexercisedoptionsvalue.gte.2007<-sample.criteria[sample(nrow(sample.criteria)
# , size=15000
# , prob=abs(sample.criteria$SumOfbaseandexercisedoptionsvalue)
# ),]
# sample.Sumofbaseandalloptionsvalue.gte.2007<-sample.criteria[sample(nrow(sample.criteria)
# , size=15000
# , prob=abs(sample.criteria$Sumofbaseandalloptionsvalue)
# ),]
#data\\defense_contract_contractdiscretization.csv
sample.SumofObligatedAmount<-read_and_join(Path
,"defense_contract_contractdiscretization.csv"
,sample.SumofObligatedAmount
,"data\\"
,by="CSIScontractID"
)
#data\\defense_contract_SP_ContractInterlinkedUnmodifiedSystemEquipmentPlatform.csv
sample.SumofObligatedAmount<-read_and_join(Path,
"defense_contract_SP_ContractInterlinkedUnmodifiedSystemEquipmentPlatform.csv",
sample.SumofObligatedAmount,
"data\\",
by="CSIScontractID"
)
#data\\Office_processedCSIScontractIDtoContractingOfficeID_linked.csv
sample.SumofObligatedAmount<-read_and_join(Path,
"Office_processedCSIScontractIDtoContractingOfficeID_linked.csv",
sample.SumofObligatedAmount,
"data\\",
by="CSIScontractID"
)
#CSIScontractID
# sample.SumofObligatedAmount<-read_and_join(Path
# ,"contract_CSIScontractID.csv"
# ,sample.SumofObligatedAmount
# ,"lookups\\"
# )
# # CSIScontractID.systemequipmentcode<-subset(CSIScontractID.lookup,!is.na(systemequipmentcode))
#defense_contract_SP_ContractModificationDeltaCustomer.csv
sample.SumofObligatedAmount<-read_and_join(Path
,"defense_contract_SP_ContractModificationDeltaCustomer.csv"
,sample.SumofObligatedAmount
,"data\\"
,by="CSIScontractID"
)
#defense_contract_SP_ContractLocationCustomer.csv
sample.SumofObligatedAmount<-read_and_join(Path
,"defense_contract_SP_ContractLocationCustomer.csv"
,sample.SumofObligatedAmount
,"data\\"
,by="CSIScontractID"
)
#"data\\Defense_contract_SP_ContractUnmodifiedandOutcomeDetailsCustomer.csv"
sample.SumofObligatedAmount<-read_and_join(Path
,"Defense_contract_SP_ContractUnmodifiedandOutcomeDetailsCustomer.csv"
,sample.SumofObligatedAmount
,"data\\"
,by="CSIScontractID"
)
#lookups\\contract_CSIScontractID.csv
# sample.SumofObligatedAmount<-read_and_join(Path
# ,"contract_CSIScontractID.csv"
# ,sample.SumofObligatedAmount
# ,"lookups\\"
# ,by="CSIScontractID"
# )
#data\\defense_contract_SP_ContractBucketPlatformPortfolioCustomer.csv
sample.SumofObligatedAmount<-read_and_join(Path
,"defense_contract_SP_ContractBucketPlatformPortfolioCustomer.csv"
,sample.SumofObligatedAmount
,"data\\"
,by="CSIScontractID"
)
#data\\defense_contract_SP_ContractUnmodifiedCompetitionvehicleCustomer.csv
sample.SumofObligatedAmount<-read_and_join(Path
,"defense_contract_SP_ContractUnmodifiedCompetitionvehicleCustomer.csv"
,sample.SumofObligatedAmount
,"data\\"
,by="CSIScontractID"
)
sample.SumofObligatedAmount<-subset(sample.SumofObligatedAmount,select=-c(Customer,UnmodifiedCustomer,SubCustomer,UnmodifiedSubCustomer,ObligatedAmountIsArmy,ObligatedAmountIsNavy,ObligatedAmountIsAirForce,ObligatedAmountIsOtherDoD))
#data\\Defense_Contract_SP_ContractDefenseSubCustomer.csv
debug(read_and_join)
sample.SumofObligatedAmount<-read_and_join(Path
,"Defense_Contract_SP_ContractDefenseSubCustomer.csv"
,sample.SumofObligatedAmount
,"data\\"
,by="CSIScontractID"
)
#
# Use this to add just a single file
sample.SumofObligatedAmount <-read.csv(
paste("data\\defense_contract_CSIScontractID_sample_100000_SumofObligatedAmount.csv",sep=""),
header=TRUE, sep=",", dec=".", strip.white=TRUE,
na.strings=c("NULL","NA"),
stringsAsFactors=FALSE
)
#
#
# sample.SumofObligatedAmount <-subset(sample.SumofObligatedAmount,select=-c(
# StartFiscal_Year,
# SumofObligatedAmount,
# IsClosed,
# LastSignedLastDateToOrder,
# LastUltimateCompletionDate,
# LastCurrentCompletionDate
# )
# )
#defense_Contract_SP_ContractDetailsR&DCustomer.csv
sample.SumofObligatedAmount<-read_and_join(Path
,"defense_Contract_SP_ContractDetailsR&DCustomer.csv"
,sample.SumofObligatedAmount
,"data\\"
,by="CSIScontractID"
)
# subset(sample.SumofObligatedAmount,select=-c("isAnyRnD1to5","obligatedAmountRnD1to5","firstSignedDateRnD1to5","UnmodifiedRnD1to5"))
# sample.SumofObligatedAmount<-sample.SumofObligatedAmount[,!names(sample.SumofObligatedAmount) %in% c("isAnyRnD1to5","obligatedAmountRnD1to5","firstSignedDateRnD1to5","UnmodifiedRnD1to5")]
#defense_contract_SP_ContractCompetitionVehicleCustomer.csv
sample.SumofObligatedAmount<-read_and_join(Path
,"defense_contract_SP_ContractCompetitionVehicleCustomer.csv"
,sample.SumofObligatedAmount
,"data\\"
,by="CSIScontractID"
)
#defense_contract_SP_ContractPricingCustomer
sample.SumofObligatedAmount<-read_and_join(Path
,"defense_contract_SP_ContractPricingCustomer.csv"
,sample.SumofObligatedAmount
,"data\\"
,by="CSIScontractID"
)
write.table(sample.SumofObligatedAmount
,file=paste("data\\defense_contract_CSIScontractID_sample_"
,sprintf("%i",sample.size)
,"_SumofObligatedAmount.csv"
,sep=""
)
# ,header=TRUE
, sep=","
, row.names=FALSE
, append=FALSE
)
#
# combined.MCC<-fixed.price.statistics(Path
# ,sample.SumofObligatedAmount
# ,"MajorCommandID"
# )
#
# write.table(combined.MCC
# ,file=paste("data\\defense_office_MajorCommandID_sample_"
# ,sample.size
# ,"_SumofObligatedAmount.csv"
# ,sep=""
# )
# # ,header=TRUE
# , sep=","
# , row.names=FALSE
# , append=FALSE
# )
# rm(combined.MCC)
#
#
# sample.SumofObligatedAmount.gte.2007<-subset(sample.SumofObligatedAmount,StartFiscal_Year>=2007)
#
#
# write.table(sample.SumofObligatedAmount.gte.2007
# ,file=paste("data\\defense_contract_CSIScontractID_sample_"
# ,sample.size
# ,"_SumofObligatedAmount_gte_2007.csv"
# ,sep=""
# )
# # ,header=TRUE
# , sep=","
# , row.names=FALSE
# , append=FALSE
# )
#
# combined.MCC<-fixed.price.statistics(Path
# ,sample.SumofObligatedAmount.gte.2007
# ,"MajorCommandID"
# )
#
# write.table(combined.MCC
# ,file=paste("data\\defense_office_MajorCommandID_sample_"
# ,sample.size
# ,"_SumofObligatedAmount_gte_2007.csv"
# ,sep=""
# )
# # ,header=TRUE
# , sep=","
# , row.names=FALSE
# , append=FALSE
# )
# rm(combined.MCC)
#
#
# sample.SumofObligatedAmount.IsCompeted<-subset(sample.SumofObligatedAmount,IsSomeCompetition==1)
#
# write.table(sample.SumofObligatedAmount.IsCompeted
# ,file=paste("data\\defense_contract_CSIScontractID_sample_"
# ,sample.size
# ,"_SumofObligatedAmount_IsCompeted.csv"
# ,sep=""
# )
# # ,header=TRUE
# , sep=","
# , row.names=FALSE
# , append=FALSE
# )
#
# sample.SumofObligatedAmount.gte.2007.isCompeted<-subset(sample.SumofObligatedAmount.gte.2007,IsSomeCompetition==1)
#
#
# write.table(sample.SumofObligatedAmount.gte.2007.isCompeted
# ,file=paste("data\\defense_contract_CSIScontractID_sample_"
# ,sample.size
# ,"_SumofObligatedAmount_gte_2007_isCompeted.csv"
# ,sep=""
# )
# # ,header=TRUE
# , sep=","
# , row.names=FALSE
# , append=FALSE
# )
# sample.SumofObligatedAmount <-read.csv(
# paste("data\\defense_contract_CSIScontractID_sample_15000_SumofObligatedAmount.csv",sep=""),
# header=TRUE, sep=",", dec=".", strip.white=TRUE,
# na.strings=c("NULL","NA"),
# stringsAsFactors=FALSE
# )
# debug(fixed.price.statistics)
# debug(apply_lookups)
#
# sample.SumOfbaseandexercisedoptionsvalue.gte.2007<-join(
# sample.SumOfbaseandexercisedoptionsvalue.gte.2007,
# CSIScontractOutcomeID.lookup,
# match="first"
# )
#
# write.table(sample.SumOfbaseandexercisedoptionsvalue.gte.2007
# ,file=paste(Path,"data\\defense_contract_CSIScontractID_sample"
# ,sample.size
# ,"_SumOfbaseandexercisedoptionsvalue.csv"
# ,sep=""
# )
# # ,header=TRUE
# , sep=","
# , row.names=FALSE
# , append=FALSE
# )
#
# sample.Sumofbaseandalloptionsvalue.gte.2007<-join(
# sample.Sumofbaseandalloptionsvalue.gte.2007,
# CSIScontractOutcomeID.lookup,
# match="first"
# )
#
# write.table(sample.Sumofbaseandalloptionsvalue.gte.2007
# ,file=paste(Path,"data\\defense_contract_CSIScontractID_sample"
# ,sample.size
# ,"_sample.Sumofbaseandalloptionsvalue.gte.2007.csv"
# ,sep=""
# )
# # ,header=TRUE
# , sep=","
# , row.names=FALSE
# , append=FALSE
# )
#
#
# #"defense_SumofObligatedAmount_SumOfbaseandexercisedoptionsvalue_StartFiscal_Year_exercisedweighted"
#
# SpentvExpected<-qplot(log10(SumofObligatedAmount)
# ,log10(SumOfbaseandexercisedoptionsvalue)
# ,data=sample.SumofObligatedAmount
# ,color=IsIDV
# )+geom_abline(slope=1)+geom_abline(slope=1,intercept=3)+facet_wrap(StartFiscal_Year~IsIDV)
#
# png(
# paste(Path
#
# ,"defense_SumofObligatedAmount_SumOfbaseandexercisedoptionsvalue_StartFiscal_Year_obligationweighted"
# ,".png"
# , sep=""
# )
# , width=6#VAR.width
# , height=6#VAR.height
# , units='in'
# , res=300
# )
#
# print(SpentvExpected)
#
# if (!(dev.cur()[[1]]==1)){
# dev.off()
# }
#
# #"defense_SumofObligatedAmount_Sumofbaseandalloptionsvalue_StartFiscal_Year_obligationweighted"
# SpentvExpected<-qplot(log10(SumofObligatedAmount)
# ,log10(Sumofbaseandalloptionsvalue)
# ,data=sample.SumofObligatedAmount
# ,color=IsIDV
# )+geom_abline(slope=1)+geom_abline(slope=1,intercept=3)+facet_wrap(StartFiscal_Year~IsIDV)
#
#
# png(
# paste(Path
#
# ,"defense_SumofObligatedAmount_Sumofbaseandalloptionsvalue_StartFiscal_Year_obligationweighted"
# ,".png"
# , sep=""
# )
# , width=6#VAR.width
# , height=6#VAR.height
# , units='in'
# , res=300
# )
#
# print(SpentvExpected)
#
# if (!(dev.cur()[[1]]==1)){
# dev.off()
# }
#
#
#
# #"defense_SumofObligatedAmount_SumOfbaseandexercisedoptionsvalue_StartFiscal_Year_exercisedweighted"
# SpentvExpected<-qplot(log10(SumofObligatedAmount)
# ,log10(SumOfbaseandexercisedoptionsvalue)
# ,data=sample.SumOfbaseandexercisedoptionsvalue
# ,color=IsIDV
# )+geom_abline(slope=1)+geom_abline(slope=1,intercept=3)+facet_wrap(StartFiscal_Year~IsIDV)
#
#
# png(
# paste(Path
#
# ,"defense_SumofObligatedAmount_SumOfbaseandexercisedoptionsvalue_StartFiscal_Year_exercisedweighted"
# ,".png"
# , sep=""
# )
# , width=6#VAR.width
# , height=6#VAR.height
# , units='in'
# , res=300
# )
#
# print(SpentvExpected)
#
# if (!(dev.cur()[[1]]==1)){
# dev.off()
# }
#
#
# #"defense_SumofObligatedAmount_Sumofbaseandalloptionsvalue_StartFiscal_Year_allweighted"
# SpentvExpected<-qplot(log10(SumofObligatedAmount)
# ,log10(Sumofbaseandalloptionsvalue)
# ,data=massive.data[sample(nrow(massive.data), size=15000, prob=abs(massive.data$Sumofbaseandalloptionsvalue)),]
# ,color=IsIDV
# )+geom_abline(slope=1)+geom_abline(slope=1,intercept=3)+facet_wrap(StartFiscal_Year~IsIDV)
#
#
# png(
# paste(Path
#
# ,"defense_SumofObligatedAmount_Sumofbaseandalloptionsvalue_StartFiscal_Year_allweighted"
# ,".png"
# , sep=""
# )
# , width=6#VAR.width
# , height=6#VAR.height
# , units='in'
# , res=300
# )
#
# print(SpentvExpected)
#
# if (!(dev.cur()[[1]]==1)){
# dev.off()
# }
#System equipment code
#
#
# systemequipmentlist <-read.csv(
# paste(Path,"Lookups\\Lookup_CSIScontractIDforIdentifiedSystemEquipment.csv",sep=""),
# header=TRUE, sep=",", dec=".", strip.white=TRUE,
# na.strings="NULL",
# stringsAsFactors=FALSE
# )
#
# systemequipmentlist<-read_and_join(Path,"LOOKUP_systemequipmentcode.csv",systemequipmentlist)
#
# systemequipmentlist<-subset(systemequipmentlist,SystemEquipmentInSample=TRUE
#
# )
#
# systemequipmentlist<-subset(systemequipmentlist,select=-c(Unseperated
# ,systemequipmentcodeText
# ,systemequipmentshorttext
# ,SystemEquipmentInSample
# )
# )
#
#
# #defense_contract_contractdiscretization.csv
# systemequipmentlist<-read_and_join(Path
# ,"defense_contract_contractdiscretization.csv"
# ,sample.SumofObligatedAmount
# ,"data\\"
# )
#
#
#
# #defense_contract_SP_ContractModificationDeltaCustomer.csv
# systemequipmentlist<-read_and_join(Path
# ,"defense_contract_SP_ContractModificationDeltaCustomer.csv"
# ,sample.SumofObligatedAmount
# ,"data\\"
# )
#
#
# #lookups\\contract_CSIScontractID.csv"
# systemequipmentlist<-read_and_join(Path
# ,"contract_CSIScontractID.csv"
# ,sample.SumofObligatedAmount
# ,"lookups\\"
# )
#
# #data\\Defense_contract_SP_ContractUnmodifiedandOutcomeDetailsCustomer.csv"
# systemequipmentlist<-read_and_join(Path
# ,"Defense_contract_SP_ContractUnmodifiedandOutcomeDetailsCustomer.csv"
# ,sample.SumofObligatedAmount
# ,"data\\"
# )
#
#
# write.table(systemequipmentlist
# ,file=paste(Path,"data\\defense_contract_CSIScontractID_"
# # ,sample.size
# ,"_systemEquipmentCode.csv"
# ,sep=""
# )
# # ,header=TRUE
# , sep=","
# , row.names=FALSE
# , append=FALSE
# ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.