blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d14a5b84533c7a5daafcf3274dbb7c42dc6c612e
|
05944c8290bb327657629ddf17c8756e14b7057b
|
/session3.R
|
7d3e853e8c94ccc7ca4ef989364652645316f529
|
[] |
no_license
|
nateapathy/hsR
|
f2bd0a020b60803084db50afa6c90eaf7c944c2b
|
4ee0cefb184c611c7302d78a7da1378b40040bbd
|
refs/heads/master
| 2023-06-23T16:41:45.423148
| 2019-01-11T16:17:07
| 2019-01-11T16:17:07
| 388,572,626
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,379
|
r
|
session3.R
|
##################################################
############### Session 3 R Script ###############
################## hsR tutorials #################
################## Feb 15, 2018 ##################
################### Nate Apathy ##################
##################################################
## LOAD PACKAGES ##
# In case you need to install these packages, uncomment and run this first line.
# install.packages("dplyr","expss","ggplot2","tidyverse","reshape2","ggthemes","skimr")
library(dplyr)
library(expss)
library(ggplot2)
library(tidyverse)
library(reshape2)
library(ggthemes)
library(skimr)
# Reminder: *always* include the necessary packages at the top of any script.
# It makes life so much easier, and enables easy replicability.
##################################################
## LOAD DATA FILES
# Load the data files from your working directory
# You should already have the five files downloaded, unzipped, and moved into your project folder
# Five files: hix14.Rdata, hix15.Rdata, hix16.Rdata, hix17.Rdata, hix18.Rdata
# These are the raw, unprocessed flat files simply imported and then saved as .Rdata files for import
# The download link is: https://github.iu.edu/natea/hsR/blob/master/hix_rdata_files.zip
# You will need an IU GitHub account and to be added to the repository in order to download
# Double-check that the files are in your project folder (bottom right panel, "Files" tab in RStudio)
load(file="hix14.Rdata")
load(file="hix15.Rdata")
load(file="hix16.Rdata")
load(file="hix17.Rdata")
load(file="hix18.Rdata")
# All five should now appear as "Data" objects in your "Environment" tab (top right panel)
##################################################
## MERGE THE DATASETS INTO ONE & SUBSET TO WHAT WE WANT
# We use the tidyverse for this, and utilize what are called "pipes"
# These pass updated data step by step and apply things all in a row
# This allows us to stack everything one by one to create a master data set
hix1418 <- hix14 %>% rbind(hix15) %>% rbind(hix16) %>% rbind(hix17) %>% rbind(hix18)
# Subset the data down to non-child-only and non-CSR plans
# These complicate analysis and the HIX Compare documentation recommends dropping them
hix1418 <- hix1418[hix1418$CHILDONLY==0 & hix1418$CSR==0,]
# Now cut out the duplicate plans using the HIX Compare methodology for identifying unique plans
# The following fields are used to match:
# year st carrier metal plantype planmarket networkid
# ab_copayinn_tiers *thru* rh_coinsoutofneta
# sp_copayinn_tiers *thru* tehboutofnetfamilymoopa
# This leaves out skilled nursing columns (there are 14 of them) because they are an EHB that can be changed by a rider
# We're going to use the UNIQUE field (col #1) once we compress the data to hold our unique ID for each plan that remains
# check for duplications T/F
# not actually going to run this duplicated() function
# returns a logical vector that matches with the rows that are duplicated
# duplicated(hix1418[,c(2,5,7,9,10,17,19,20:383,398:503)])
# we can also check to make sure the fields we kept are correct
# not going to run this either; huge output
# colnames(hix1418[1:3,c(2,5:7,9,10,17,19,20:383,398:503)])
# now we can just use the duplicated() function and its arguments as our subset for rows
un_hix1418 <- hix1418[!duplicated(hix1418[,c(2,5:7,9,10,17,19,20:383,398:503)]),]
# this cuts us down to 139,516 unique plans, down from 168,177
# now we can generate our unique identifier in the UNIQUE field
un_hix1418$UNIQUE <- 1:length(un_hix1418$UNIQUE)
# notice we still have 503 variables. UNIQUE was already a field (the first one)
# so we just overwrote whatever was there (they were all NAs, but this is worth checking)
# save the file for faster loading later
# save(un_hix1418, file="un_hix1418.Rdata")
# Note: in 10 lines of code, we have:
#### 1. loaded 5 data sets
#### 2. merged them all into a longitudinal data set
#### 3. removed observations we aren't concerned with analyzing
#### 4. applied a method for identifying unique observations
#### 5. created a new unique identifier field
##################################################
## YOUR TURN
# Each of you have a section below to do something with the un_hix1418 dataset we've created
# Find your section and write whatever you need in order to get the answer/do the thing.
# Some things that may come in handy:
# - A few of these will require the data dictionary as a reference
# - Keep in mind that subsetting by column number is much easier when fields have unweildy names and there are lots of them
# - You'll only need the first 19 columns for all the steps below
# - colnames() can help identify the number of the column you are trying to find
# - the syntax for subsetting is dataframename[rows,columns]
# - table() can help with cross-tabs/counts of variable pairs
# - length() counts how many elements are in a given object
# - skim(dataframename) can be very useful. try it! no need to create an object, just look at it in the Console output
# - you can subset within other functions without changing the object you are subsetting (if you don't overwrite it)
# - example: length(dataframename[dataframename$column1==1 & dataframename$column2==4,])
# - will count the number of observations that match your criteria (like filtering in excel)
##################################################
## Kevin
# Subset the data frame down to just 2017 California plans (create a new data frame object)
# How many "areas" were there in California in 2017?
# What is the average premium in 2017 for a 27-year-old in area 10?
# How does this premium compare to the average premium for a 27-year-old in the whole state?
##################################################
## Casey
# Subset the data frame down to the 2016 plans from two states of your choice (create a new data frame object)
# How many plans are in each of the metallic tiers for each state?
# Which area (from the state first in alphabetical order) had the most silver plan options in 2016?
# Which state had the higher average premium for a 50-year-old purchasing a silver plan? What was that amount?
##################################################
## Saurabh
# Subset the data frame down to plans in Illinois in 2018 (create a new data frame object)
# How many plans did each insurance carrier offer in 2018 in the state?
# What was the most common plan type in area 3?
# Among HMO plans, what is the average premium for a 27-year-old?
##################################################
## Tim
# Subset the data frame down to plans in Colorado and Wyoming in 2017 (create a new data frame object)
# How many plans are in each of the metallic tiers for each state?
# Are there any areas in either state without a silver plan option? Without a gold option?
# Which state had the higher average premium for a family of four purchasing a silver plan? What was that amount?
##################################################
## Riz
# Subset the data frame down to plans in Indiana in 2015 (create a new data frame object)
# How many "areas" were there in Indiana in 2015?
# How many plans did each insurance carrier offer in 2015 in the state?
# What was the average premium for a family of four among the insurance carrier with the most plans offered?
|
e75ae6278e31eec08078bdb0f9d61f62546f99f1
|
78d7ca4e749d5fba192c2dc3c05035e0ef342b87
|
/script.R
|
36113eaad6a03e2aa8fdb83819dac17c4db92e4d
|
[] |
no_license
|
rougerbaptiste/RPCE.last
|
a6b4f993ff39ddebed8b8f30af83ff7cc5aa3e3a
|
3d889d00f57f670cc222ea417ec9284d16133330
|
refs/heads/master
| 2016-09-13T04:17:59.532556
| 2016-04-18T13:44:20
| 2016-04-18T13:44:20
| 56,410,000
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,989
|
r
|
script.R
|
rm(list=ls())
library(ggplot2)
dpi8 <- read.table("8dpi.csv", header = TRUE, sep = ";", stringsAsFactors = FALSE)
dpi8$KNO3 <- as.factor(dpi8$KNO3)
dpi8$NaCl <- as.factor(dpi8$NaCl)
dpi8$souches <- as.factor(dpi8$souches)
data <- data.frame()
for (S in levels(dpi8$souches)) {
for (K in levels(dpi8$KNO3)) {
for (N in levels(dpi8$NaCl)) {
print(!is.nan(mean(dpi8[dpi8[,"NaCl"]==N & dpi8[,"KNO3"]==K & dpi8[,"souches"]==S,"app.col"])))
if(!is.nan(mean(dpi8[dpi8[,"NaCl"]==N & dpi8[,"KNO3"]==K & dpi8[,"souches"]==S,"app.col"])))
print(c(S,K,N))
moy <- mean(dpi8[dpi8[,"NaCl"]==N & dpi8[,"KNO3"]==K & dpi8[,"souches"]==S,"app.col"], na.rm=T)
sd <- sd(dpi8[dpi8[,"NaCl"]==N & dpi8[,"KNO3"]==K & dpi8[,"souches"]==S,"app.col"], na.rm=T)
if(length(dpi8[dpi8[,"NaCl"]==N & dpi8[,"KNO3"]==K & dpi8[,"souches"]==S,"SR"])==1){sd <- 0}
nod <- mean(dpi8[dpi8[,"NaCl"]==N & dpi8[,"KNO3"]==K & dpi8[,"souches"]==S,"nodules"], na.rm=T)
nodsd <- sd(dpi8[dpi8[,"NaCl"]==N & dpi8[,"KNO3"]==K & dpi8[,"souches"]==S,"nodules"], na.rm=T)
fixp <- mean(dpi8[dpi8[,"NaCl"]==N & dpi8[,"KNO3"]==K & dpi8[,"souches"]==S,"fixp"], na.rm=T)
fixm <- mean(dpi8[dpi8[,"NaCl"]==N & dpi8[,"KNO3"]==K & dpi8[,"souches"]==S,"fixm"], na.rm=T)
data <- rbind(data,cbind(S,K,N,moy,sd,nod, nodsd, fixm, fixp))
}
}
}
data$moy <- as.numeric(as.character(data$moy))
data$sd <- as.numeric(as.character(data$sd))
data$nod <- as.numeric(as.character(data$nod))
data$nodsd <- as.numeric(as.character(data$nodsd))
data$fixm <- as.numeric(as.character(data$fixm))
data$fixp <- as.numeric(as.character(data$fixp))
data <- data[!is.nan(data[,"moy"])&!is.na(data[,"sd"])&!is.nan(data[,"nod"]),]
p <- ggplot(data, aes(interaction(K,N,S), moy, colour=factor(N), fill=factor(K))) +
geom_bar(stat = "identity", position = position_dodge())+
geom_errorbar(aes(ymax=moy + sd, ymin=moy - sd), width = 0.2, size = 1)+
annotate("text", x = 1:16, y = -0.1, label = c("A",rep("B",9),"C", rep("NI",5)))+
xlab("Concentrations de KNO3 et de NaCl") +
ylab("Moyenne de la taille des hypocotyles (cm)")+
labs(title="Représentation de la taille des hypocotyles en fonction\nde la souche inoculée, du KNO3 et du NaCl à 8 DPI") +
guides(fill = guide_legend(title="KNO3 (mM)"), color = guide_legend(title="NaCl (mM)"))+
theme(axis.ticks = element_blank(), axis.text.x = element_blank())+
scale_fill_hue(l=40, c=30)
pdf(file="taille8.pdf", width=10)
print(p)
dev.off()
p2 <- ggplot(data, aes(interaction(K,N,S), nod, colour=factor(N), fill=factor(K))) +
geom_bar(stat = "identity",position = position_dodge())+
geom_errorbar(aes(ymax=nod + nodsd, ymin=nod - nodsd), width = 0.2, size = 1)+
annotate("text", x = 1:16, y = -0.4, label = c("A",rep("B",9),"C", rep("NI",5)))+
xlab("Concentrations de KNO3 et de NaCl\n(KNO3.NaCl.Souche)") +
ylab("Nombre moyen de nodules")+
labs(title="Représentation du nombre moyen de nodules en fonction\nde la souche inoculée, du KNO3 et du NaCl à 8 DPI") +
guides(fill = guide_legend(title="KNO3 (mM)"), color = guide_legend(title="NaCl (mM)"))+
theme(axis.ticks = element_blank(), axis.text.x = element_blank())+
scale_fill_hue(l=40, c=30)
pdf(file="nodmean8.pdf", width=10)
print(p2)
dev.off()
p3 <- ggplot(data, aes(interaction(K,N,S), fixp, colour=factor(N), fill=factor(K))) +
geom_bar(stat = "identity",position = position_dodge())+
# geom_errorbar(aes(ymax=nod + nodsd, ymin=nod - nodsd), width = 0.2, size = 1)+
# annotate("text", x = 1:16, y = -0.1, label = c("A",rep("B",9),"C", rep("NI",5)))+
xlab("Concentrations de KNO3 et de NaCl\n(KNO3.NaCl.Souche)") +
ylab("Moyenne du nombre de nodules fix+")+
labs(title="Représentation du nombre de nodules fix+ en fonction\nde la souche inoculée, du KNO3 et du NaCl à 8 DPI") +
guides(fill = guide_legend(title="KNO3 (mM)"), color = guide_legend(title="NaCl (mM)"))+
# theme(axis.ticks = element_blank(), axis.text.x = element_blank())+
scale_fill_hue(l=40, c=30)
pdf(file="nod+mean8.pdf", width=10)
print(p3)
dev.off()
p4 <- ggplot(data, aes(interaction(K,N,S), fixm, colour=factor(N), fill=factor(K))) +
geom_bar(stat = "identity",position = position_dodge())+
# geom_errorbar(aes(ymax=nod + nodsd, ymin=nod - nodsd), width = 0.2, size = 1)+
# annotate("text", x = 1:16, y = -0.1, label = c("A",rep("B",9),"C", rep("NI",5)))+
xlab("Concentrations de KNO3 et de NaCl\n(KNO3.NaCl.Souche)") +
ylab("Moyenne du nombre de nodules fix-")+
labs(title="Représentation du nombre de nodules fix- en fonction\nde la souche inoculée, du KNO3 et du NaCl à 8 DPI") +
guides(fill = guide_legend(title="KNO3 (mM)"), color = guide_legend(title="NaCl (mM)"))+
# theme(axis.ticks = element_blank(), axis.text.x = element_blank())+
scale_fill_hue(l=40, c=30)
pdf(file="nod-mean8.pdf", width=10)
print(p4)
dev.off()
data$nod <- round(data$nod)
data$fixp <- round(data$fixp)
print(data$fixp)
print(data$nod)
p5 <- ggplot(data, aes(interaction(K,N,S), (fixp/round(nod,0))*100, colour=factor(N), fill=factor(K))) +
geom_bar(stat = "identity",position = position_dodge())+
# geom_errorbar(aes(ymax=nod + nodsd, ymin=nod - nodsd), width = 0.2, size = 1)+
# annotate("text", x = 1:16, y = -0.1, label = c("A",rep("B",9),"C", rep("NI",5)))+
xlab("Concentrations de KNO3 et de NaCl\n(KNO3.NaCl.Souche)") +
ylab("Pourcentage de nodosités fixatrices")+
labs(title="Représentation du pourcentage de nodules fix+ en fonction\nde la souche inoculée, du KNO3 et du NaCl à 8 DPI") +
guides(fill = guide_legend(title="KNO3 (mM)"), color = guide_legend(title="NaCl (mM)"))+
# theme(axis.ticks = element_blank(), axis.text.x = element_blank())+
scale_fill_hue(l=40, c=30)
pdf(file="rapfix+nod8.pdf", width=10)
print(p5)
dev.off()
rm(list=ls())
dpi14 <- read.table("14dpi.csv", header = TRUE, sep = ";", stringsAsFactors = FALSE)
dpi14$KNO3 <- as.factor(dpi14$KNO3)
dpi14$NaCl <- as.factor(dpi14$NaCl)
dpi14$souches <- as.factor(dpi14$souches)
dpi14$nodo <- as.numeric(dpi14$nodo)
dpi14$SR <- dpi14$app.col / dpi14$app.rac
data <- data.frame()
for (S in levels(dpi14$souches)) {
for (K in levels(dpi14$KNO3)) {
for (N in levels(dpi14$NaCl)) {
# print(!is.nan(mean(dpi14[dpi14[,"NaCl"]==N & dpi14[,"KNO3"]==K & dpi14[,"souches"]==S,"app.col"])))
if(!is.nan(mean(dpi14[dpi14[,"NaCl"]==N & dpi14[,"KNO3"]==K & dpi14[,"souches"]==S,"SR"], na.rm=T)))
print(c(S,K,N))
moy <- mean(dpi14[dpi14[,"NaCl"]==N & dpi14[,"KNO3"]==K & dpi14[,"souches"]==S,"SR"], na.rm=T)
sd <- sd(dpi14[dpi14[,"NaCl"]==N & dpi14[,"KNO3"]==K & dpi14[,"souches"]==S,"SR"], na.rm=T)
if(length(dpi14[dpi14[,"NaCl"]==N & dpi14[,"KNO3"]==K & dpi14[,"souches"]==S,"SR"])==1){sd <- 0}
nod <- mean(dpi14[dpi14[,"NaCl"]==N & dpi14[,"KNO3"]==K & dpi14[,"souches"]==S,"nodo"], na.rm=T)
nodsd <- sd(dpi14[dpi14[,"NaCl"]==N & dpi14[,"KNO3"]==K & dpi14[,"souches"]==S,"nodo"], na.rm=T)
somme <- sum(!is.na(dpi14[dpi14[,"NaCl"]==N & dpi14[,"KNO3"]==K & dpi14[,"souches"]==S,"nodo"]))
if(somme == 1){nodsd <- 0}
fixp <- mean(dpi14[dpi14[,"NaCl"]==N & dpi14[,"KNO3"]==K & dpi14[,"souches"]==S,"fixp"], na.rm=T)
fixm <- mean(dpi14[dpi14[,"NaCl"]==N & dpi14[,"KNO3"]==K & dpi14[,"souches"]==S,"fixm"], na.rm=T)
data <- rbind(data,cbind(S,K,N,moy,sd,nod, nodsd, fixm, fixp))
}
}
}
data$moy <- as.numeric(as.character(data$moy))
data$sd <- as.numeric(as.character(data$sd))
data$nod <- as.numeric(as.character(data$nod))
data$nodsd <- as.numeric(as.character(data$nodsd))
data$fixm <- as.numeric(as.character(data$fixm))
data$fixp <- as.numeric(as.character(data$fixp))
data <- data[!is.nan(data[,"moy"])&!is.na(data[,"sd"])&!is.nan(data[,"nod"]),]
p <- ggplot(data, aes(interaction(K,N,S), moy, colour=factor(N), fill=factor(K))) +
geom_bar(stat = "identity", position = position_dodge())+
geom_errorbar(aes(ymax=moy + sd, ymin=moy - sd), width = 0.2, size = 1)+
annotate("text", x = 1:19, y = -0.1, label = c(rep("A",3),rep("B",9),"C", rep("NI",6)))+
xlab("Concentrations de KNO3 et de NaCl") +
ylab("Moyenne du ratio S/R")+
labs(title="Représentation du ratio masse de l'appareil caulinaire sur masse racinaire\nen fonction de la souche inoculée, du KNO3 et du NaCl à 14 DPI") +
guides(fill = guide_legend(title="KNO3 (mM)"), color = guide_legend(title="NaCl (mM)")) +
theme(axis.ticks = element_blank(), axis.text.x = element_blank())+
scale_fill_hue(l=40, c=30)
pdf(file="SR14.pdf", width=10)
print(p)
dev.off()
p2 <- ggplot(data, aes(interaction(K,N,S), nod, colour=factor(N), fill=factor(K))) +
geom_bar(stat = "identity",position = position_dodge())+
geom_errorbar(aes(ymax=nod + nodsd, ymin=nod - nodsd), width = 0.2, size = 1)+
# annotate("text", x = 1:16, y = -0.1, label = c("A",rep("B",9),"C", rep("NI",5)))+
xlab("Concentrations de KNO3 et de NaCl\n(KNO3.NaCl.Souche)") +
ylab("Nombre moyen de nodules")+
labs(title="Représentation du nombre moyen de nodules en fonction\nde la souche inoculée, du KNO3 et du NaCl à 14 DPI") +
guides(fill = guide_legend(title="KNO3 (mM)"), color = guide_legend(title="NaCl (mM)"))+
# theme(axis.ticks = element_blank(), axis.text.x = element_blank())+
scale_fill_hue(l=40, c=30)
pdf(file="nodmean14.pdf", width=10)
print(p2)
dev.off()
p3 <- ggplot(data, aes(interaction(K,N,S), fixp, colour=factor(N), fill=factor(K))) +
geom_bar(stat = "identity",position = position_dodge())+
# geom_errorbar(aes(ymax=nod + nodsd, ymin=nod - nodsd), width = 0.2, size = 1)+
# annotate("text", x = 1:16, y = -0.1, label = c("A",rep("B",9),"C", rep("NI",5)))+
xlab("Concentrations de KNO3 et de NaCl\n(KNO3.NaCl.Souche)") +
ylab("Moyenne du nombre de nodules fix+")+
labs(title="Représentation du nombre de nodules fix+ en fonction\nde la souche innoculée, du KNO3 et du NaCl à 14 DPI") +
guides(fill = guide_legend(title="KNO3 (mM)"), color = guide_legend(title="NaCl (mM)"))+
# theme(axis.ticks = element_blank(), axis.text.x = element_blank())+
scale_fill_hue(l=40, c=30)
pdf(file="nod+mean14.pdf", width=10)
print(p3)
dev.off()
p4 <- ggplot(data, aes(interaction(K,N,S), fixm, colour=factor(N), fill=factor(K))) +
geom_bar(stat = "identity",position = position_dodge())+
# geom_errorbar(aes(ymax=nod + nodsd, ymin=nod - nodsd), width = 0.2, size = 1)+
# annotate("text", x = 1:16, y = -0.1, label = c("A",rep("B",9),"C", rep("NI",5)))+
xlab("Concentrations de KNO3 et de NaCl\n(KNO3.NaCl.Souche)") +
ylab("Moyenne du nombre de nodules fix-")+
labs(title="Représentation du nombre de nodules fix- en fonction\nde la souche inoculée, du KNO3 et du NaCl à 14 DPI") +
guides(fill = guide_legend(title="KNO3 (mM)"), color = guide_legend(title="NaCl (mM)"))+
# theme(axis.ticks = element_blank(), axis.text.x = element_blank())+
scale_fill_hue(l=40, c=30)
pdf(file="nod-mean14.pdf", width=10)
print(p4)
dev.off()
data$nod <- round(data$nod)
data$fixp <- round(data$fixp)
print(data$fixp)
print(data$nod)
p5 <- ggplot(data, aes(interaction(K,N,S), (fixp/round(nod))*100, colour=factor(N), fill=factor(K))) +
geom_bar(stat = "identity",position = position_dodge())+
# geom_errorbar(aes(ymax=nod + nodsd, ymin=nod - nodsd), width = 0.2, size = 1)+
# annotate("text", x = 1:16, y = -0.1, label = c("A",rep("B",9),"C", rep("NI",5)))+
xlab("Concentrations de KNO3 et de NaCl\n(KNO3.NaCl.Souche)") +
ylab("Pourcentage de nodosités fixatrices")+
labs(title="Représentation du pourcentage de nodules fix+ en fonction\nde la souche inoculée, du KNO3 et du NaCl à 14 DPI") +
guides(fill = guide_legend(title="KNO3 (mM)"), color = guide_legend(title="NaCl (mM)"))+
# theme(axis.ticks = element_blank(), axis.text.x = element_blank())+
scale_fill_hue(l=40, c=30)
pdf(file="rapfix+nod14.pdf", width=10)
print(p5)
dev.off()
|
e68a97e94958c208e70ed3037fb3aa795ad4da44
|
5fe207e5b903cae727b8b006b9063599d70bc9cd
|
/man/cClass.Rd
|
d4e185b768aca81591a31d268c1bf3752e3990f8
|
[] |
no_license
|
cran/BIOdry
|
dc36fa1158684e5fc79c19ec83f82252ad5690b5
|
a8c849bb7b577debcabe177afde5d9ed9232f8a4
|
refs/heads/master
| 2022-05-11T09:58:57.665427
| 2022-05-02T18:52:02
| 2022-05-02T18:52:02
| 48,850,280
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,062
|
rd
|
cClass.Rd
|
\name{cClass}
\alias{cClass}
\title{Column-class extraction.}
\description{Column names of multilevel data sets are extracted according to
three classes: \code{numeric} values, \code{integer} sequences,
and \code{factor} levels.}
\usage{cClass(rd, cl = "all")}
\arguments{
\item{rd}{\code{data.frame}. Multilevel data series.}
\item{cl}{\code{character} or \code{NULL}. Character vector
of classes to be considered. These can be
'numeric', 'integer', or 'factor'. If \code{'all'}
then all column names of \code{rd} are extracted.}
}
\value{\code{character} names.}
\author{Wilson Lara <wilarhen@gmail.com>, Felipe Bravo <fbravo@pvs.uva.es>}
\examples{
##Multilevel data frame of tree-ring widths:
data(Prings05,envir = environment())
## Names of variables in Prings05 data containing numeric classes:
cClass(Prings05, 'numeric') # 'x'
## Names of variables containing time units:
cClass(Prings05, 'integer') # 'year'
## Names of variables containing factors:
cClass(Prings05, 'factor') # 'sample', 'tree', 'plot'
}
|
bc495b7f73e28ff699cf5f6f27b42a65fbea3859
|
c8d24378e70933c30a99f4cb0097d9eeccca2b30
|
/code/archive/katjaReplication/table6.R
|
cf6f9b86ebe43b71d57e7bc6dc55adc3165ee0f5
|
[] |
no_license
|
emallickhossain/OnlineShoppingSalesTax
|
83fe86d18b8b3755261f8fac3cf0dcf75b3896c0
|
d7f26f0dc1dd0760c3d1cb42717808fecf8bc953
|
refs/heads/master
| 2021-01-14T01:14:21.098275
| 2020-07-09T19:10:30
| 2020-07-09T19:10:30
| 242,553,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,252
|
r
|
table6.R
|
# Loading data
library(data.table)
library(readxl)
library(Quandl)
load('/home/mallick/Desktop/comScore/Transactions.rda')
# Restricting to Amazon categories and transactions between 2006-2014
amazon_categories <- c(1:40, 54:56)
start_count <- nrow(transactions)
transactions <- transactions[year >= 2006 & year <= 2014 & prod_category_id %in% amazon_categories]
end_count <- nrow(transactions)
start_count - end_count # Number of transactions removed
rm(amazon_categories, start_count, end_count)
# Generating Amazon dummy
transactions[, 'amazon' := ifelse(domain_name == 'amazon.com', 1, 0)]
setkey(transactions, machine_id, year)
# Merging with demographics (all transactions can be matched)
load('/home/mallick/Desktop/comScore/Demographics.rda')
setkey(demographics, machine_id, year)
fullData <- merge(transactions, demographics)
setkey(fullData, year, zip_code)
rm(demographics, transactions)
# Dropping households with ZIP codes of 99999 because they are invalid
start_count <- nrow(fullData)
fullData <- fullData[zip_code != 99999]
end_count <- nrow(fullData)
start_count - end_count # Number of transactions removed
# Merging with ZIP data (only 350 transactions cannot be matched)
load('/home/mallick/Desktop/comScore/zip_tax.rda')
setkey(zipTax, year, zip_code)
start_count <- nrow(fullData)
fullData <- merge(fullData, zipTax)
end_count <- nrow(fullData)
start_count - end_count
setkey(fullData, state)
rm(zipTax)
# Adding state names
states <- data.table(state = c(state.abb, 'DC'),
stateName = c(state.name, 'District of Columbia'))
setkey(states, state)
fullData <- merge(fullData, states)
setkey(fullData, zip_code)
rm(states)
# # Adding county names (13,623 could not be matched to 2014 county names)
# zip_county <- fread("/home/mallick/Desktop/comScore/zipState2014.csv",
# select = c('zcta5', 'county14'))
# zip_county <- zip_county[-1]
# setnames(zip_county, c('zip_code', 'county'))
# zip_county$zip_code <- as.numeric(zip_county$zip_code)
# zip_county$county <- as.numeric(zip_county$county)
# zip_county <- zip_county[zip_code != 99999]
# zip_county <- unique(zip_county, by = 'zip_code')
# setkey(zip_county, zip_code)
# start_count <- nrow(fullData)
# fullData <- merge(fullData, zip_county)
# end_count <- nrow(fullData)
# start_count - end_count
# rm(zip_county)
# setkey(fullData, stateName)
# Adding in sales tax collection dates
taxDates <- setDT(read_excel(path = './Research/OnlineShopping/AmazonLaws.xls', sheet = 'Data'))
taxDates <- fread('/home/mallick/Dropbox/Research/OnlineShopping/AmazonLaws.csv',
select = c('State', 'DateCollected'))
taxDates$DateCollected <- as.Date(taxDates$DateCollected, format = '%y/%m/%d')
setnames(taxDates, c('stateName', 'collectDate'))
setkey(taxDates, stateName)
start_count <- nrow(fullData)
fullData <- merge(fullData, taxDates)
end_count <- nrow(fullData)
start_count - end_count
rm(taxDates)
# Setting sales tax indicator
fullData[, 'date' := as.Date(paste(year, month, '01', sep = '-'))]
fullData[, 'collect' := ifelse(event_date >= collectDate, 1, 0)]
fullData$collect <- ifelse(is.na(fullData$collect), 0, fullData$collect)
# Deflating to real prices
cpi <- setDT(Quandl('FRED/CPIAUCSL', start_date = '2006-01-01'))
setnames(cpi, c('date', 'cpi'))
setkey(cpi, date)
setkey(fullData, date)
fullData <- merge(fullData, cpi)
fullData[, c('realProdPrice', 'realBasketPrice') :=
.(prod_totprice / cpi * 100, basket_tot / cpi * 100)]
rm(cpi)
# Adding monthYear fixed effect dummy
fullData[, 'monthYear' := (year - 2006) * 12 + month]
# Generating tau
fullData[, 'tau' := amazon * collect * ave_tax + (1 - amazon) * ave_tax]
save(fullData, file = '../Desktop/comScore/katjaTable6.rda', compress = TRUE)
# Doing regression (linear probability)
library(data.table)
load('./NewComScore/Data/katjaTable6.rda')
katjaReg <- lm(amazon ~ log(1 + tau) + factor(state) + factor(prod_category_id) + factor(monthYear), data = fullData)
summary(katjaReg)
# Doing regression (Amazon expenditures)
reg2_data <- fullData[amazon == 1, .(log_exp = log(sum(prod_totprice))), keyby = .(county, year, collect, state)]
katjaReg2 <- lm(log_exp ~ collect + factor(state) + factor(year), data = reg2_data[log_exp >= 0])
|
f3bfe98f1f06866dfcc3c3392a51d8209f990938
|
0476f2bd245afe4b630aeab628499df2d91517db
|
/R/GetIsotopeDistribution.R
|
ed69538ef28caff383bde49efc3b8c23b33c5039
|
[] |
no_license
|
cran/InterpretMSSpectrum
|
d07f32034e3f68ab719c6827a4b1529f8d7fb503
|
ecf9604cfde5dd22a057b17ad2272cde7351157d
|
refs/heads/master
| 2023-07-24T03:18:25.154905
| 2023-07-07T14:00:02
| 2023-07-07T14:00:02
| 67,487,289
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,339
|
r
|
GetIsotopeDistribution.R
|
#' @title GetIsotopeDistribution.
#'
#' @description \code{GetIsotopeDistribution} will generate an isotopic distribution for a given formula.
#'
#' @details not exported
#'
#' @param fml sum formula.
#' @param res MS resolution. Yet experimental, may fail.
#' @param n Number of isotopes to calculate.
#' @param ele_vec Character vector of elements to consider.
#' @param check.fml The 'fml' needs to be in enviPat style, i.e. not CH4 but C1H4. This will be ensured but can be skipped setting check to FALSE to speed up.
#' @param vdetect.detect Will be deprecated soon. Only for testing this enviPat parameter.
#'
#' @return Isotope distribution formatted similar to Rdisop result but more precise using enviPat.
#'
#' @importFrom enviPat check_chemform isopattern envelope vdetect
#'
#' @example GetIsotopeDistribution("C12H40O2S2Si3")
#'
#' @keywords internal
#' @noRd
#'
GetIsotopeDistribution <- function(fml=NULL, res=NULL, n=2, ele_vec=c("C","H","N","O","P","S","Si"), check.fml=TRUE, vdetect.detect=c("centroid","intensoid")[1]) {
# load and restrict isotope list locally
utils::data("isotopes", package="enviPat", envir=environment())
isotopes <- isotopes[as.character(isotopes[,"element"]) %in% ele_vec & isotopes[,"abundance"]>=0.001,]
# ensure formula to be in enviPat style
fml <- enviPat::check_chemform(isotopes, chemforms=fml)$new_formula
# calculate and transform isotopic pattern
if (is.null(res)) {
isopat <- enviPat::isopattern(isotopes = isotopes, chemforms = fml, threshold=0, verbose = FALSE, emass = 0.00054858)[[1]]
g <- GetGroupFactor(x=isopat[,1], gap=0.2)
theo <- sapply(levels(g), function(x) { c(round(stats::weighted.mean(x = isopat[g==x,1], w = isopat[g==x,2]),4), sum(isopat[g==x,2]/100)) })
} else {
isopat <- enviPat::isopattern(isotopes = isotopes, chemforms = fml, threshold=0, verbose=FALSE, emass = 0.00054858)
env <- enviPat::envelope(isopat, resolution=res, verbose = FALSE)
ipt <- enviPat::vdetect(env, detect=vdetect.detect, plotit=FALSE, verbose = FALSE)
theo <- t(ipt[[1]])
#browser()
theo <- sapply(theo[1,1]+(0:n)*1.003, function(mz){ theo[,which.max(abs(theo[1,]-mz)<0.1)] })
}
theo <- theo[,1:min(c(ncol(theo),(n+1))),drop=F]
theo[2,] <- round(theo[2,]/sum(theo[2,]),4)
return(theo)
}
|
f85c53e341a12674f6f11e814c8c40cae70ea341
|
4d0000760bdcb420f51b23f8f571751db7f265fa
|
/man/theme_set_update_ffsched.Rd
|
0280319527a90bcfe6f4b262027f6196fc19502c
|
[
"MIT"
] |
permissive
|
tanho63/ffsched
|
430a9f03ab04c0c13e8f693b25d95c19bb49bc70
|
dbefa7be279ea08efaa562ebb5648962bb197900
|
refs/heads/master
| 2023-06-05T05:32:26.482019
| 2021-06-27T17:03:04
| 2021-06-27T17:03:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 333
|
rd
|
theme_set_update_ffsched.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme.R
\name{theme_set_update_ffsched}
\alias{theme_set_update_ffsched}
\title{Theme for ggplots}
\usage{
theme_set_update_ffsched(...)
}
\arguments{
\item{...}{Extra arguments to pass to \code{ggplot2::theme_update}}
}
\description{
Theme for ggplots
}
|
0ca71f71cfba224c6297bc4267c964522a368efe
|
b54f3f525c94e1deadd8175d5ce0a43230ba4aa0
|
/R codes/predict_class.R
|
70222f2daa3551db7c50bf0c800c14d46c199502
|
[
"Apache-2.0"
] |
permissive
|
AjitR/Kym-credit-score-master
|
75a6967d667a8a9b97c429d6489d17a8ac3b7e60
|
c8118700c71e54846a174b6cbf6efb2db3a07314
|
refs/heads/master
| 2020-07-17T08:52:17.990251
| 2019-09-03T04:26:55
| 2019-09-03T04:26:55
| 205,988,433
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,807
|
r
|
predict_class.R
|
library(mongolite)
library("nnet")
users <- mongo(collection = "users", db = "r_db", url = "mongodb://localhost", verbose = TRUE)
user_info <- mongo(collection = "user_info", db = "r_db", url = "mongodb://localhost", verbose = TRUE)
currentDate<-Sys.Date()
month<-format(currentDate,"%m")
year<-format(currentDate,"%Y")
#month <- 1
#year <- 2017
user <- users$aggregate(
paste0( '[{"$match":{"month" : ', as.integer(month) , ',"year" :', year, '}}]' )
)
###################
for(i in 1 : dim(user)[1]) {
name <- user[i, "name"]
balance_score_intermediate <- user[i, "total_bank_balance"]
income_score_intermediate <- user[i, "totalamount_transactions_credit"]
social_media_score_intermediate <- user[i, "tweets_sentiment"]
loan_history_score <- user[i, "loan_history_score"]
repay_score <- user[i, "repay_score"]
balance_score <- balance_score_intermediate / 100000
income_score <- income_score_intermediate / 100000
social_media_score <- (social_media_score_intermediate + 5) * 10
total_score <- (repay_score * 0.35) + (social_media_score * 0.05) + (loan_history_score * 0.2) + (income_score * 0.2) + (balance_score * 0.2)
print(total_score)
#data <- data.frame(social_media_score, loan_history_score, balance_score, income_score, repay_score)
#print(data)
#predicted_class = predict_class(data)
#print(predicted_class)
if (total_score > 66) {
result[i] = '2000'
}
else if (total_score > 50 && total_score <= 66) {
result[i] = '1000'
}
else
result[i] = '500'
print(result[i])
user_info$update(query = paste0('{"name":"', name,'"}'), update = paste0('{"$set":{"credit_limit": ', result[i], ', "credit_balance" :', as.integer(0),'}}'), upsert = TRUE)
}
|
a026c2a60957139e71de9ccf3efd99c096bd290b
|
a2d1e6c040cf70f9f4b6c224bf767538efc15c61
|
/czesc1/man/htmls_movie.Rd
|
8c0083ecd6f43fab072d6796dc8009539844c4d5
|
[] |
no_license
|
jjankowiak/Filmy
|
3f054542d23abb7fc5c47539d530a9b7689d70a3
|
801683a300df9d4abb5d0893b184be13c2191454
|
refs/heads/master
| 2021-06-02T21:37:32.279871
| 2016-04-02T15:40:34
| 2016-04-02T15:40:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 542
|
rd
|
htmls_movie.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/htmls_movie.R
\name{htmls_movie}
\alias{htmls_movie}
\alias{subtitle}
\title{Parse many of HTML pages for actorS AND director}
\usage{
htmls_movie(www, selector)
}
\arguments{
\item{www}{A link - see full cast from imdb.}
\item{selector}{named character vector with selectors which we are going to analyse.}
}
\value{
list of parsed pages or 'NA' if pages don't exist.
}
\description{
Function \code{htmls_movie} parses html pages
}
|
53c942697a4384d8729fbf4c9f8d4cf7fc4825f2
|
f67f623d4ce2f082b89bfad9edaae6e139df141e
|
/3- Getting and cleaning data/week2/week2 Quiz code.R
|
060d530a918d503df323019bde0186c78fc19e7d
|
[] |
no_license
|
pauldublanche/Coursera-Data-Science-Specialization
|
249d17fd5b881dd9d60a2367be8c786ce17fc148
|
4d7db99f4196dee6117cc7531cd737ad97e559fe
|
refs/heads/main
| 2023-04-02T08:06:34.007256
| 2021-04-14T21:31:34
| 2021-04-14T21:31:34
| 350,350,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,479
|
r
|
week2 Quiz code.R
|
### Question 1
library(httr)
oauth_endpoints("github")
myapp <- oauth_app("github",
key = "My Cliend ID","
secret = "My Client Secret",
)
library(httpuv)
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
gtoken <- config(token = github_token)
req <- GET("https://api.github.com/users/jtleek/repos", gtoken)
stop_for_status(req)
data_json <- content(req)
library(jsonlite)
data_fr <- jsonlite::fromJSON(jsonlite::toJSON(data_json))
names(data_fr)
data_fr[data_fr$full_name == "jtleek/datasharing", "created_at"]
### Question 2
library(sqldf)
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv",
destfile= "data_w2.csv")
acs <- read.csv("data_w2.csv", header=TRUE)
sqldf("select pwgtp1 from acs where AGEP \lt< 50")
### Question 3
unique(acs$AGEP) == sqldf("select unique * from acs")
unique(acs$AGEP) == sqldf("select AGEP where unique from acs")
unique(acs$AGEP) == sqldf("select distinct AGEP from acs")
unique(acs$AGEP) == sqldf("select distinct pwgtp1 from acs")
### Question 4
con <- url("http://biostat.jhsph.edu/~jleek/contact.html")
htmlcode <- readLines(con)
close(con)
sapply(htmlcode, nchar)[c(10,20,30,100)]
### Question 5
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
data <- read.fwf(file=url,widths=c(-1,9,-5,4,4,-5,4,4,-5,4,4,-5,4,4), skip = 4)
sum(data[, 4])
|
d6dc62ed75d3bfef8e7bdb391f8955b4061ec86a
|
d0f7612a8fefc15f5645e99769b590c60f402058
|
/Spring2016_PS2.R
|
56d9f9a9f4475fa81f6f8fb2145cd2fe16825c55
|
[] |
no_license
|
jngod2011/nyu-econometrics
|
fd1b1c75d35bab0932e9a4ce47528c30883ad400
|
ed9f4cd501f89257dc8a181a98fdde258b592bfa
|
refs/heads/master
| 2021-01-11T22:59:20.849238
| 2016-11-03T16:44:59
| 2016-11-03T16:44:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,513
|
r
|
Spring2016_PS2.R
|
### PS2 - ASE - Richard Godfrey Due 03 March
crime_edit <- read.delim("crime_edit.csv", header=T, sep=",")
# ### Use the data in crime.txt for the years 1972 and 1978 for a two-year panel
# data analysis. The model is a simple distributed lag model: log(crimerateit) =
# θ0 + θ1d78t + β1clrprci,t−1 + β2clrprci,t−2 + ci + uit The variable clrprc is
# the clear-up percentage (the percentage of crimes solved). The data are stored
# for two years, with the needed lags given as variables for each year. 1. First
# estimate this equation using a pooled OLS analysis. Comment on the de- terrent
# effect of the clear-up percentage, including interpreting the size of the
# coefficients. Test for serial correlation in the composite error vit assuming
# strict exogeneity. 2. Estimate the equation by fixed effects, and compare the
# estimates with the pooled OLS estimates. Is there any reason to test for
# serial correlation? Optional: obtain heteroscedasticity - robust standard
# errors for the FE estimates. 3. Estimate the model using random effects.
# Indicate how to test for random effects and show the result of your test. 4.
# Using FE analysis, test the hypothesis H0 : β1 = β2. What do you conclude? If
# the hypothesis is not rejected, what would be a more parsimonious model?
# Estimate this model.
library(plm)
class(crime_edit)
model <- log(crime) ~ d78 + clrprc1 + clrprc2
## Pooled Effects
reg1.pool <- plm(model, data=crime_edit, model="pooling", index=c("district","year"))
summary(reg1.pool)
require(car)
# Comment on the deterrent effect of the clear-up percentage, including interpreting the size of the
# coefficients:
# -> Crimes solved par the growth rate of crime by 2% per year each.
# -> DW test for SC
require(lmtest)
dwtest(model, data=crime_edit) # DW test on (u_t, u_t-1)
linearHypothesis(reg1.pool, c("clrprc1","clrprc2"), test="F")
# tests for the joint signifcance of lags 1 and 2.
## Fixed Effects
reg1.fe <- plm(model, data=crime_edit, model="within", index= c("district","year"))
summary(reg1.fe)
# test for individ effects
pFtest(reg1.fe,reg1.pool)
# test for indiv FE present in the Pool
plmtest(reg1.pool, effect = "individual")
# no need to run a SC test as the model is correctly specificed
## Random
reg1.rand <- plm(model, data=crime_edit, model="random", index= c("district","year"))
summary(reg1.rand)
### B-P LM test to determine RE or Pooled OLS.
# Ho null: variance of unobserved heterogeneity is zero.
# H1 alt: variance_alpha is not zero
# Acceptance of null => more efficient estimates via OLS
plmtest(reg1.pool, type="bp")
### Hausman test to determine FE or RE
# H0: corr[X_it, a_i] = 0
# H1: corr[X_it, a_i] != 0
phtest(reg1.fe,reg1.rand)
### Wald test on hypothesis H0: β1 = β2
#
model2 <- log(crime) ~ d78 + clrprc1 + clrprc2
reg2.fe <- plm(model2, data=crime_edit, model="within", index= c("district","year"))
#waldtest(reg1.fe, reg2.fe)
#anova(reg1.fe, reg2.fe)
### Qn 3
# ML
# Posterior density
# Compute posterior mean in 3D plot
#install.packages("tcltk")
#install.packages("TeachingDemos")
#library(tcltk)
#library(TeachingDemos)
## E(theta)=x/(x+y)
# x<- y <- seq(1, 10, len=100)
#z <- outer(x,y, FUN=function(x,y) x/(x+y))
#filled.contour(x,y,z, main="3D plot")
#filled.contour(x,y,z, color.palette = heat.colors)
#filled.contour(x,y,z, color.palette = colorRampPalette(c("red","white","blue")) )
#persp(x,y,z, shade=0.75, col="yello")
#rotate.persp(x,y,z)
#view <- persp(x,y,z, shade=0.75, col="red")
|
a73b4b73752dc6e347e0c27f0e1fd9110547e609
|
83773b7e19d021b6e0edfe12f11334dfeb5f2875
|
/LSE_inference.R
|
f31600bbdc80e4ed2b820e0d75059cac0d7603d0
|
[] |
no_license
|
shizelong1985/MSAR_code
|
6f1ed79fa499711b5df98221b0d922f248c1337e
|
fcbef6674e457335da7c61666eb3fa131a3792a0
|
refs/heads/master
| 2023-03-20T19:04:18.220442
| 2021-03-13T08:17:58
| 2021-03-13T08:17:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,524
|
r
|
LSE_inference.R
|
MSAR.Lse.Sig1<-function(vecY, W, ww, N, p, q, D, Sige, Omee,
S, tS, S1, m, OmeeS, tSe, SYX, IX, IXbeta)
{
### define frequently used matrices
In = Diagonal(N, x = 1)
Inp = Diagonal(N*p, x = 1)
### by eigenvalue decomposition, we have \Sigma_e = Q Lambda t(Q)
### here Q = ee$vectors
ee = eigen(Sige)
Sige_half = Matrix(t(sqrt(ee$values)*t(ee$vectors))) # QLambda^{1/2}
ISige_half = kronecker(Sige_half, In)
tISige_half = kronecker(t(Sige_half), In)
IOmee_half = kronecker(Omee%*%Sige_half, In)
### (I-t(D)\otimes W)^{-1}(X\beta)
SIXbeta = S1%*%IXbeta
### matrices used
mtSe = m*tSe
m2tSe = m^2*tSe
Sem2tSe = OmeeS%*%(m2tSe)
### first order: E(Q_{j1j2}^d Q_{k1k2}^d)
A1 = list()
A2 = list()
A3 = list()
A4 = list()
G = matrix(0, nrow = N*p, ncol = p^2)
dM = matrix(0, nrow = N*p, ncol = p^2)
# mS1 = as.matrix(S1)
tmp2 = tISige_half %*% tSe
tmp3 = tISige_half %*% OmeeS
#tmp4 = as.matrix(tmp3 %*% m2tSe)
tmp4 = tmp3 %*% m2tSe
right = S1%*%ISige_half
# mS1 = as.matrix(S1)
### first order: E(Q_{j1j2}^d Q_beta)
Sig1dx = matrix(0, p^2, p*q)
SSX = -as.matrix(Sem2tSe) %*% S %*% as.matrix(m2tSe) %*% IX
mOmeeS = as.matrix(OmeeS)
mSem2tS = as.matrix(Sem2tSe)
gc()
#cat("now start computational\n")
for (j1 in 1:p)
{
for (j2 in 1:p)
{
#cat(j1, j2, "\n")
jj = (j2-1)*p+j1 # jj is the row
Ij2j1 = Matrix(0, nrow = p, ncol = p)
Ij2j1[j2, j1] = 1
Ij1j2 = t(Ij2j1)
### matrices gradients
Se_g = -kronecker(Omee%*%Ij2j1, W)
S_g = -kronecker(Ij2j1, W)
V_g = kronecker(Ij1j2%*%Omee%*%t(D)+D%*%Omee%*%Ij2j1, ww)
m_g = -m^2*diag(V_g)
### calculate tr(Mj1j2 Mk1k2)
A1[[jj]] = m*m_g*tS + m^2*t(S_g)
A2[[jj]] = as.matrix(OmeeS%*%A1[[jj]])
A3[[jj]] = m2tSe%*%S_g
A4[[jj]] = as.matrix(mSem2tS %*% S_g)
A1[[jj]] = as.matrix(A1[[jj]])
A3[[jj]] = as.matrix(A3[[jj]])
G1j = tmp3 %*% ((m*m_g)*tSe) %*% SYX
G2j = tmp3 %*% ((m^2)*t(Se_g)) %*% SYX
G3j = tISige_half %*% A4[[jj]] %*% vecY
G[,jj] = G1j[,1] + G2j[,1] + G3j[,1]
Sig1dx[jj,] = as.numeric(t(SIXbeta)%*%t(S_g)%*%SSX)
#Sig1dx[jj,] = as.numeric(t(SSX)%*%S_g%*%vecY)
### calculate diag(Mj1j2)
left = tmp4 %*% S_g
dM[,jj] = rowSums(left*t(right)) + rowSums((tISige_half%*%A2[[jj]])* t(IOmee_half))
gc()
}
}
wdel = as.vector(kronecker((solve(Sige_half)), In)%*%SYX)
del4 = mean(wdel^4) - 3*mean(wdel^2)^2
GG = crossprod(G)
dM2 = crossprod(dM)
#cat("now no computational\n")
### calculate
Sig1d1 = matrix(0, p^2, p^2)
# Sig1d2 = matrix(0, p^2, p^2)
# Sig1d3 = matrix(0, p^2, p^2)
# Sig1d4 = matrix(0, p^2, p^2)
for (j1 in 1:p)
{
for (j2 in 1:p)
{
for (k1 in 1:p)
{
for (k2 in 1:p)
{
#cat(j1,j2,k1,k2,'\n')
jj = (j2-1)*p+j1 # jj is the row
kk = (k2-1)*p+k1 # kk is the column
if (kk>=jj)
{
#cat(kk, jj, "\n")
### E(Q_{j1j2}^dQ_{k1k2}^d)
### the same as Sig1d[jj,kk] = as.numeric(tr(M_g[[jj]]%*%t(M_g[[kk]])) + tr(M_g[[jj]]%*%M_g[[kk]])+ t(U_g[[jj]])%*%U_g[[kk]])
### calculate tr(Mj1j2 Mk1k2)
Sig1d1[jj,kk] = sum(A2[[jj]]*t(A2[[kk]]))+sum(A4[[jj]]*t(A1[[kk]]))+
sum(A4[[kk]]*t(A1[[jj]])) + sum(A3[[jj]]*t(A3[[kk]]))
# Sig1d2[jj,kk] = sum(M_g[[jj]] * (t(M_g[[kk]])))
#
# Sig1d3[jj,kk] = sum(M_g[[jj]] * ( M_g[[kk]])) +
# sum(U_g[[jj]]*U_g[[kk]])
# Sig1d4[jj,kk] = del4*sum(diag(M_g[[jj]])*diag(M_g[[kk]]))
# Sig1d[jj,kk] = sum(M_g[[jj]] * (t(M_g[[kk]]) + M_g[[kk]])) +
# sum(U_g[[jj]]*U_g[[kk]]) +
# del4*sum(diag(M_g[[jj]])*diag(M_g[[kk]]))
}
gc()
}
}
}
}
Sig1d1[lower.tri(Sig1d1)] = t(Sig1d1)[lower.tri(Sig1d1)]
Sig1d = Sig1d1 + GG + dM2*del4
### first order: E(Q_beta Q_beta^\top)
Sig1x = t(IX)%*%Sem2tSe%*%S%*%(m2tSe)%*%IX
### the exact form of Sig1 = E{Q(theta)Q(theta)^\top}
Sig1 = matrix(0, nrow = p^2+p*q, ncol = p^2+p*q)
Sig1[1:p^2, 1:p^2] = Sig1d
Sig1[1:p^2, (p^2+1):(p^2+p*q)] = Sig1dx
Sig1[(p^2+1):(p^2+p*q), 1:p^2] = t(Sig1dx)
Sig1[(p^2+1):(p^2+p*q),(p^2+1):(p^2+p*q)] = as.matrix(Sig1x)
Sig1 = 4*Sig1
gc()
return(Sig1)
}
|
e3dbe691802542d22d493ec082d0154d01cc3886
|
bc3a58c0f3abd24f4f64f641152c09b79efefe38
|
/man/geno_pca_pooled_addPC2GenoDS.Rd
|
a63e9d50c834024b718e8259088cd4607e6ec0ac
|
[
"MIT"
] |
permissive
|
isglobal-brge/dsOmics
|
96aa2594cbe009f2899d99fdc5be43a96f50d6bf
|
78fee19320cdf360db7ec1aed2fb07ee4c533951
|
refs/heads/master
| 2023-04-07T09:23:17.202083
| 2023-03-15T09:31:40
| 2023-03-15T09:31:40
| 158,839,360
| 1
| 12
|
MIT
| 2021-02-02T10:21:06
| 2018-11-23T13:55:17
|
R
|
UTF-8
|
R
| false
| true
| 484
|
rd
|
geno_pca_pooled_addPC2GenoDS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PCADS.R
\name{geno_pca_pooled_addPC2GenoDS}
\alias{geno_pca_pooled_addPC2GenoDS}
\title{Add PCA results to the phenotype slot}
\usage{
geno_pca_pooled_addPC2GenoDS(geno, pca)
}
\arguments{
\item{geno}{\code{GenotypeData} object}
\item{pca}{\code{data.frame} of the PCA results}
}
\value{
\code{GenotypeData} object
}
\description{
Add the PCA results to be used on an association analysis as covariates
}
|
b3823e3b73d0db2cbd6b37db1920980c5ad94076
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/intrinsicDimension/examples/ide.Rd.R
|
7bfaee91ca73f96c67b9a1261cf4b2ec67948911
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 462
|
r
|
ide.Rd.R
|
library(intrinsicDimension)
### Name: ide
### Title: Intrinsic Dimension Estimation
### Aliases: localIntrinsicDimension globalIntrinsicDimension
### pointwiseIntrinsicDimension
### ** Examples
data <- hyperBall(100, 4, 15, .05)
localIntrinsicDimension(data, .method='essLocalDimEst', ver = 'a', d = 1)
globalIntrinsicDimension(data, 'dancoDimEst', k = 8, D = 20)
pointwiseIntrinsicDimension(data, .method='maxLikPointwiseDimEst', k = 8, dnoise = NULL)
|
17cb3fc87f4db29bb54a50e3caa87bff480a2ce9
|
118e7bdbe4353670ee1b39a71ec6824177f1260d
|
/beta_comparison_using_r/beta_comparison_over_time.R
|
2efb6ca18cdfe8a374cd429406dc777ee09deb8d
|
[
"MIT"
] |
permissive
|
GabrielReisR/econometrics
|
547a2d23682bd34dab5845ba51715173f768a16f
|
4a0f01c5ca360d9ef055e55f08c49638be2a45c2
|
refs/heads/main
| 2023-04-01T23:48:43.135204
| 2021-04-02T15:14:31
| 2021-04-02T15:14:31
| 353,723,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,922
|
r
|
beta_comparison_over_time.R
|
# Initializing ====
#' This code is based upon the great work of Henrique Martins
#' https://www.linkedin.com/in/henriquecastror/
#' Post that this was based on:
#' https://henriquemartins.net/post/2020-08-18-betas/
#'
#' The originality here refers to the creation of a gif showing the betas over
#' time, with annotations and markers to enhance compreension
# Reading libraries
library(BatchGetSymbols)
library(dplyr)
library(gganimate)
library(ggplot2)
library(gifski)
library(plotly)
library(roll)
library(tidyquant)
library(tidyr)
# Creating MGLU ====
# defining period
first.date <- "2015-01-01"
last.date <- "2020-08-18"
freq.data <- 'daily'
# getting data
ibov <- BatchGetSymbols(tickers = "^BVSP",
first.date = first.date,
last.date = last.date,
thresh.bad.data = 0.5,
freq.data = freq.data)
asset_mglu <- BatchGetSymbols(tickers = "MGLU3.SA",
first.date = first.date,
last.date = last.date,
thresh.bad.data = 0.5,
freq.data = freq.data)
ret_ibov <- ibov$df.tickers %>%
tq_transmute(
select = price.adjusted,
mutate_fun = periodReturn,
period = 'daily',
col_rename = 'return',
type = 'log'
)
ret_asset_mglu <- asset_mglu$df.tickers %>%
tq_transmute(
select = price.adjusted,
mutate_fun = periodReturn,
period = 'daily',
col_rename = 'return',
type = 'log'
)
# joining data
ret_mglu <- ret_ibov %>%
left_join(ret_asset_mglu, by = "ref.date")
# creating variance
window <- 230
ret_mglu$var <- roll_cov(ret_mglu$return.x, ret_mglu$return.x, width = window)
ret_mglu$cov <- roll_cov(ret_mglu$return.x, ret_mglu$return.y, width = window)
ret_mglu$beta <- ret_mglu$cov / ret_mglu$var
# excluding missings
ret_mglu <- subset(ret_mglu, ret_mglu$beta != "NA" )
# Creating VVAR ====
# defining period
first.date <- "2015-01-01"
last.date <- "2020-08-18"
freq.data <- 'daily'
# getting data
ibov <- BatchGetSymbols(tickers = "^BVSP",
first.date = first.date,
last.date = last.date,
thresh.bad.data = 0.5,
freq.data = freq.data)
asset_vvar <- BatchGetSymbols(tickers = "VVAR3.SA",
first.date = first.date,
last.date = last.date,
thresh.bad.data = 0.5,
freq.data = freq.data)
ret_ibov <- ibov$df.tickers %>%
tq_transmute(
select = price.adjusted,
mutate_fun = periodReturn,
period = 'daily',
col_rename = 'return',
type = 'log'
)
ret_asset_vvar <- asset_vvar$df.tickers %>%
tq_transmute(
select = price.adjusted,
mutate_fun = periodReturn,
period = 'daily',
col_rename = 'return',
type = 'log'
)
# joining data
ret_vvar <- ret_ibov %>%
left_join(ret_asset_vvar, by = "ref.date")
# creating variances
window <- 230
ret_vvar$var <- roll_cov(ret_vvar$return.x, ret_vvar$return.x, width = window)
ret_vvar$cov <- roll_cov(ret_vvar$return.x, ret_vvar$return.y, width = window)
ret_vvar$beta <- ret_vvar$cov / ret_vvar$var
# excluding missings
ret_vvar <- subset(ret_vvar, ret_vvar$beta != "NA" )
# Joining and pivoting MGLU & VVAR ====
# creating final dataframe: ret_total
ret_total <- ret_mglu %>%
inner_join(ret_vvar, by = "ref.date", suffix = c("_MGLU3", "_VVAR3"))
head(ret_total)
# pivoting beta
ret_long <- ret_total %>%
pivot_longer(
cols = starts_with("beta"),
names_to = "stock",
names_pattern = "beta_(.*)",
values_to = "beta"
)
ret_long
# Creating final gif plot ====
p <- ret_long %>%
# Initial aesthetics
ggplot(aes(x = ref.date, y = beta, colour = stock)) +
# Creating line geom
geom_line(size = 0.8) +
# Theme chosen: theme_minimal()
theme_minimal() +
# Labeling axis
labs( y = "", x="", title = "MGLU3 X VVAR3: Comparing Betas Over Time (2016-2020)") +
# Choosing to show yintercept '1' (to better compare betas around this line)
geom_hline(yintercept = 1, color = "black", size = .1) +
# Choosing the colors of lines using ggplot2::scale_color_brewer
scale_color_brewer(name = "", palette = "Set2") +
# Annotations
annotate(geom = "point", x = as.Date("2020-01-29"), y = 1.22,
size = 10, shape = 21, fill = "transparent") +
annotate(geom = "text", x = as.Date("2019-06-29"), y = 0.22,
label = "In January 29th of 2020, \n VVAR3's Beta surpasses MGLU3's") +
transition_reveal(ref.date)
# Animating into gif object 'p'
p <- animate(p,
# end_pause indicates the amount to be paused after ending
end_pause = 10,
renderer = gifski_renderer())
p
# Saving gif object 'p'
anim_save("beta_comparison_mglu_vvar.gif", p)
|
7b0e7a2752a69e6212935f00983b151324d43c3e
|
c459dd32d88158cb064c3af2bc2ea8c7ab77c667
|
/recluster/recluster_cell_groups_in_integrated_data/findallmarkers_roc.R
|
f9ca0f6b62a7f4111018fab6fcc225fb6886b843
|
[] |
no_license
|
ding-lab/ccRCC_snRNA_analysis
|
d06b8af60717779671debe3632cad744467a9668
|
ac852b3209d2479a199aa96eed3096db0b5c66f4
|
refs/heads/master
| 2023-06-21T15:57:54.088257
| 2023-06-09T20:41:56
| 2023-06-09T20:41:56
| 203,657,413
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,056
|
r
|
findallmarkers_roc.R
|
#!/usr/bin/env Rscript
## library
packages = c(
"ggplot2",
"Seurat",
"dplyr",
"plyr",
"data.table"
)
for (pkg_name_tmp in packages) {
if (!(pkg_name_tmp %in% installed.packages()[,1])) {
print(paste0("No ", pkg_name_tmp, " Installed!"))
} else {
print(paste0("", pkg_name_tmp, " Installed!"))
}
library(package = pkg_name_tmp, character.only = T, quietly = T)
}
cat("Finish loading libraries!\n")
cat("###########################################\n")
## get the path to the seurat object
args = commandArgs(trailingOnly=TRUE)
## argument: directory to the output
path_output_dir <- args[1]
cat(paste0("Path to the output directory: ", path_output_dir, "\n"))
cat("###########################################\n")
## argument 2: filename for the output file
path_output_filename <- args[2]
cat(paste0("Filename for the output: ", path_output_filename, "\n"))
cat("###########################################\n")
path_output <- paste0(path_output_dir, path_output_filename)
## argument : path to seurat object
path_srat <- args[3]
cat(paste0("Path to the seurat object: ", path_srat, "\n"))
cat("###########################################\n")
## argument : path to the cell type marker table
path_gene2celltype_df <- args[4]
cat(paste0("Path to the cell type marker table: ", path_gene2celltype_df, "\n"))
cat("###########################################\n")
## input cell type marker table
gene2celltype_df <- fread(input = path_gene2celltype_df, data.table = F)
cat("finish reading the cell type marker table!\n")
cat("###########################################\n")
## input srat
cat(paste0("Start reading the seurat object: ", "\n"))
srat <- readRDS(path_srat)
print("Finish reading the seurat object!\n")
cat("###########################################\n")
## run findallmarkers
markers_roc <- FindAllMarkers(object = srat, test.use = "roc", only.pos = T, return.thresh = 0.5)
print("Finish running FindAllMarkers!\n")
cat("###########################################\n")
## filter by clster distinguishing power
markers_roc <- markers_roc %>%
filter(power > 0)
## annotate genes to cell types
markers_roc$Cell_Type_Group <- mapvalues(x = ifelse(markers_roc$gene %in% gene2celltype_df$Gene, markers_roc$gene, NA), from = gene2celltype_df$Gene, to = gene2celltype_df$Cell_Type_Group)
markers_roc$Cell_Type1 <- mapvalues(x = ifelse(markers_roc$gene %in% gene2celltype_df$Gene, markers_roc$gene, NA), from = gene2celltype_df$Gene, to = gene2celltype_df$Cell_Type1)
markers_roc$Cell_Type2 <- mapvalues(x = ifelse(markers_roc$gene %in% gene2celltype_df$Gene, markers_roc$gene, NA), from = gene2celltype_df$Gene, to = gene2celltype_df$Cell_Type2)
markers_roc$Cell_Type3 <- mapvalues(x = ifelse(markers_roc$gene %in% gene2celltype_df$Gene, markers_roc$gene, NA), from = gene2celltype_df$Gene, to = gene2celltype_df$Cell_Type3)
## write output
write.table(markers_roc, file = path_output, quote = F, sep = "\t", row.names = F)
cat("Finished saving the output\n")
cat("###########################################\n")
|
f8a0088d5a38f3d71236f14b1448b995ed5fe5f7
|
d03fa242790f0fae15250021be21c5594ce0529d
|
/man/totlos.fs.Rd
|
46a3a1af025745f12bd780e97657cd649f5b24af
|
[] |
no_license
|
Rumenick/flexsurv-dev
|
31efb020780f97787650070c2da5f52536648f66
|
7d4ed2f8c59b52626ae35ec347c4e58e91dddfb5
|
refs/heads/master
| 2021-01-16T21:13:04.435349
| 2015-02-13T16:33:13
| 2015-02-13T16:33:13
| 30,904,969
| 1
| 0
| null | 2015-02-17T06:09:20
| 2015-02-17T06:09:20
| null |
UTF-8
|
R
| false
| false
| 5,621
|
rd
|
totlos.fs.Rd
|
\name{totlos.fs}
\alias{totlos.fs}
\title{Total length of stay in particular states for a fully-parametric,
time-inhomogeneous Markov multi-state model}
\description{
The matrix whose \eqn{r,s} entry is the expected amount of time
spent in state \eqn{s} for a time-inhomogeneous, continuous-time Markov
multi-state process that starts in state \eqn{r}, up to a maximum time
\eqn{t}. This is defined
as the integral of the corresponding transition probability up to that time.
}
\usage{
totlos.fs(x, trans, t=1, newdata=NULL, ci=FALSE, tvar="trans",
sing.inf=1e+10, B=1000, cl=0.95, ...)
}
\arguments{
\item{x}{A model fitted with \code{\link{flexsurvreg}}. See
\code{\link{msfit.flexsurvreg}}
for the required form of the model and the data.
Additionally, this must be a Markov / clock-forward model,
but can be time-inhomogeneous. See the package vignette for further
explanation.
}
\item{trans}{Matrix indicating allowed transitions. See
\code{\link{msfit.flexsurvreg}}.}
\item{t}{Time or vector of times to predict up to. Must be finite.}
\item{newdata}{A data frame specifying the values of covariates in
the fitted model, other than the transition number. See
\code{\link{msfit.flexsurvreg}}.
}
\item{ci}{Return a confidence interval calculated by simulating from
the asymptotic normal distribution of the maximum likelihood
estimates. Turned off by default, since this is computationally
intensive. If turned on, users should increase
\code{B} until the results reach the desired precision.}
\item{tvar}{Variable in the data representing the transition type.}
\item{sing.inf}{If there is a singularity in the observed hazard,
for example a Weibull distribution with \code{shape < 1} has infinite
hazard at \code{t=0}, then as a workaround, the hazard is assumed to
be a large finite number, \code{sing.inf}, at this time. The
results should not be sensitive to the exact value assumed, but
users should make sure by adjusting this parameter in these cases.
}
\item{B}{Number of simulations from the normal asymptotic distribution
used to calculate variances. Decrease for greater speed at the
expense of accuracy.}
\item{cl}{Width of symmetric confidence intervals, relative to 1.}
\item{...}{Arguments passed to \code{\link{ode}} in \pkg{deSolve}.}
}
\value{
The matrix of lengths of stay \eqn{T(t)}, if \code{t} is of length 1, or a
list of matrices if \code{t} is longer.
If \code{ci=TRUE}, each element has attributes \code{"lower"} and
\code{"upper"} giving matrices of the corresponding confidence limits.
These are formatted for printing but may be extracted using
\code{attr()}.
The result also has an attribute \code{P} giving the transition
probability matrices, since these are unavoidably computed as a side
effect. These are suppressed for printing, but can be extracted with
\code{attr(...,"P")}.
}
\details{
This is computed by solving a second order extension of the Kolmogorov
forward differential equation numerically, using the methods in the
\code{\link{deSolve}} package. The equation is expressed as a linear
system
\deqn{\frac{dT(t)}{dt} = P(t)}
\deqn{\frac{dP(t)}{dt} = P(t) Q(t)}
and solved for \eqn{T(t)} and \eqn{P(t)} simultaneously, where
\eqn{T(t)} is the matrix of total lengths of stay, \eqn{P(t)} is the transition
probability matrix for time \eqn{t}, and \eqn{Q(t)} is the transition
hazard or intensity as a function of \eqn{t}. The initial conditions
are \eqn{T(0) = 0} and \eqn{P(0) = I}.
Note that the package \pkg{msm} has a similar method
\code{totlos.msm}. \code{totlos.fs} should give the same results as
\code{totlos.msm} when both of these conditions hold:
\itemize{
\item the time-to-event distribution is exponential for all
transitions, thus the \code{flexsurvreg} model was fitted with
\code{dist="exp"}, and is time-homogeneous.
\item the \pkg{msm} model was fitted with \code{exacttimes=TRUE},
thus all the event times are known, and there are no time-dependent covariates.
}
\pkg{msm} only allows exponential or piecewise-exponential
time-to-event distributions, while \pkg{flexsurvreg} allows more
flexible models. \pkg{msm} however was designed in particular for
panel data, where the process is observed only at arbitrary times,
thus the times of transition are unknown, which makes flexible models
difficult.
This function is only valid for Markov ("clock-forward") multi-state
models, though no warning or error is currently given if the model is
not Markov. See \code{\link{totlos.simfs}} for the equivalent for
semi-Markov ("clock-reset") models.
}
\seealso{
\code{\link{totlos.simfs}}, \code{\link{pmatrix.fs}}, \code{\link{msfit.flexsurvreg}}.
}
\examples{
# BOS example in vignette, and in msfit.flexsurvreg
bexp <- flexsurvreg(Surv(Tstart, Tstop, status) ~ trans,
data=bosms3, dist="exp")
tmat <- rbind(c(NA,1,2),c(NA,NA,3),c(NA,NA,NA))
# predict 4 years spent without BOS, 3 years with BOS, before death
# As t increases, this should converge
totlos.fs(bexp, t=10, trans=tmat)
totlos.fs(bexp, t=1000, trans=tmat)
totlos.fs(bexp, t=c(5,10), trans=tmat)
# Answers should match results in help(totlos.simfs) up to Monte Carlo
# error there / ODE solving precision here, since with an exponential
# distribution, the "semi-Markov" model there is the same as the Markov
# model here
}
\author{Christopher Jackson \email{chris.jackson@mrc-bsu.cam.ac.uk}.}
\keyword{models,survival}
|
2cdd287fa683bc8bbf8d9e6fdebfd640c577efb1
|
ab6b305b5b85bf7a97adf4f96a2c005d2d55a16f
|
/Bachelor thesis/Podstawowe_statystyki_opisowe_korelacja.R
|
c24ad813eb3cdd992e683e122c12609c8b004e93
|
[] |
no_license
|
Smialku/Statistical-Research
|
7b368fe7b2dd5ee4650683f1d61b4f536c359dfe
|
62b571db792c23e0893f217818330f9914afcdd9
|
refs/heads/master
| 2022-12-24T12:42:47.295131
| 2020-10-06T10:05:33
| 2020-10-06T10:05:33
| 300,281,972
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,335
|
r
|
Podstawowe_statystyki_opisowe_korelacja.R
|
install.packages("klaR")
install.packages("devtools")
install.packages("mda")
install.packages("corrplot")
install.packages('car')
install.packages("Hmisc")
library(tidyverse)
library("Hmisc")
library(MASS)
library(klaR)
library(psych)
library(caret)
library(car)
library(corrplot)
theme_set(theme_classic())
rm(list=ls())
data_manu <- read.csv(file="Dane_lic(nb_b_manufacture_16_year_15).csv", header=TRUE, sep=";")
data_agri <- read.csv(file="Dane_lic(nb_b_agriculture_16_year_13).csv", header=TRUE, sep=";")
data_cons <- read.csv(file="Dane_lic(nb_b_construction_16_year_15).csv", header=TRUE, sep=";")
summary(data_manu)
summary(data_agri)
summary(data_cons)
data_manu1 <- data_manu[,2:22]
data_agri1 <- data_agri[,2:22]
data_cons1 <- data_cons[,2:22]
round(psych::describe(data_manu1), 2)
round(psych::describe(data_agri1), 2)
round(psych::describe(data_cons1), 2)
# data_manu1 <- data_manu[,2:22]
# data_agri1 <- data_agri[,2:22]
# data_cons1 <- data_cons[,2:22]
forcorrplot1<-cor(data_manu1)
forcorrplot2<-cor(data_agri1)
forcorrplot3<-cor(data_cons1)
corrplot.mixed(forcorrplot1,upper="number",lower="color",order="hclust")
corrplot.mixed(forcorrplot2,upper="number",lower="color",order="hclust")
corrplot.mixed(forcorrplot3,upper="number",lower="color",order="hclust")
|
0b77801256a6c4a0a8c309063358b5cf2d6cb47b
|
80dcfe7d11a2c4825584fb9470c43aafd28d44ca
|
/man/getdir-methods.Rd
|
99b6a006c943c5d73dc27b325e90062bfef0cf92
|
[] |
no_license
|
kieranrcampbell/SpatialPRo
|
53880ef9925dceb48b3092d0fe6e25c2299ead73
|
248b90ca1e743e3fd7e9520a49af9a61489a0353
|
refs/heads/master
| 2020-04-14T12:41:35.080843
| 2014-09-05T09:04:15
| 2014-09-05T09:04:15
| 20,485,528
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 382
|
rd
|
getdir-methods.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{methods}
\name{getDir}
\alias{getDir}
\alias{getDir,SPExp-method}
\alias{getDir,SPExp-methods}
\title{Experiment directory}
\usage{
getDir(object)
\S4method{getDir}{SPExp}(object)
}
\arguments{
\item{object}{The instance of SPExp to use}
}
\description{
Returns the directory where the experiment files are located.
}
|
dce61a42344b83f5bbd15787a76a7606408fc86c
|
296ce960a1effec3f575461a86ccaa97f99c6cef
|
/Scripts/02_DDC_HFR_Validations.R
|
cce0e692f5dd4e17d23e37177e15aa1621c0eb7f
|
[
"MIT"
] |
permissive
|
baboyma/hfr-wrangler
|
cd4bad523c869c80a98de60b2ffcfd3fa257c2c9
|
84653f52a9db5def57fac714ab613d5e9d486ef8
|
refs/heads/master
| 2023-03-01T02:33:28.350261
| 2021-01-27T19:27:31
| 2021-01-27T19:27:31
| 298,098,196
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,663
|
r
|
02_DDC_HFR_Validations.R
|
## PROJECT: DDC/HFR Data Processing
## AUTHOR: B.Kagniniwa | USAID
## LICENSE: MIT
## PURPOSE: Validate Trifacta Outputs
## Date: 2020-01-06
# LIBRARIES ------
library(tidyverse)
library(glamr)
library(janitor)
library(lubridate)
library(aws.s3)
library(glitr)
library(extrafont)
# QUERIES ----
# DSE Tables
dse_tbls <- c("cntry_agg_hfr_prcssd_sbmsn",
"cntry_agg_hfr_sbmsn", #
"cntry_hfr_err_sbmsn",
"cntry_hfr_prcssd_sbmsn", # Processed files + MER Data
"cntry_hfr_sbmsn", # Raw submissions files
"cntry_hfr_sbmsn_status",
"cntry_ou_hierarchy", # Orgunits
"cntry_ou_mechanisms", # Mechs
"cntry_ou_trgts", # MER Targets
"cntry_sbmsn_evnts", # Tracks the submissions of HFR data from ingestion through processing
"cntry_vldtn_chk_evnts",
"ddc_prcssd_sbmsn")
# Test Queries
q_sbm_status <- "Select * from cntry_hfr_sbmsn_status;"
q_sbm_data <- "select * from cntry_hfr_prcssd_sbmsn limit 10;"
# DATA ----
# Global Reference
df_refs <- glamr::s3_objects(
bucket = 'gov-usaid',
prefix = "ddc/uat/processed/hfr/receiving/HFR_FY21_GLOBAL"
) %>%
glamr::s3_unpack_keys(df_objects = .)
# Site targets
df_targets <- df_refs %>%
filter(str_detect(key, "DATIM_targets")) %>%
arrange(desc(last_modified)) %>%
pull(key) %>%
first() %>%
glamr::s3_read_object(
bucket = 'gov-usaid',
object = .
)
df_targets %>% glimpse()
df_targets %>%
clean_agency() %>%
count(operatingunit) %>%
arrange(desc(n)) %>%
prinf()
# Orgs
df_orgs <- df_refs %>%
filter(str_detect(key, "orghierarchy")) %>%
arrange(desc(last_modified)) %>%
pull(key) %>%
first() %>%
glamr::s3_read_object(
bucket = 'gov-usaid',
object = .
)
df_orgs %>% glimpse()
df_orgs <- df_orgs %>%
mutate_at(vars(ends_with("tude")), as.numeric)
# Site list
df_sites <- df_refs %>%
filter(str_detect(key, "sitelist")) %>%
arrange(desc(last_modified)) %>%
pull(key) %>%
first() %>%
glamr::s3_read_object(
bucket = 'gov-usaid',
object = .
)
df_sites %>% glimpse()
df_sites <- df_sites %>%
mutate_at(
vars(ends_with(c("reporting", "original"))),
funs(as.logical)
) %>%
filter(expect_reporting == TRUE) %>%
select(-c(last_col(), last_col(1))) %>%
separate(operatingunit,
into = c("operatingunit", "countryname"),
sep = "/") %>%
mutate(countryname = if_else(
is.na(countryname),
operatingunit,
countryname))
df_sites %>%
distinct(operatingunit, countryname) %>%
pull(operatingunit)
df_sites %>%
filter(operatingunit == 'Namibia') %>%
head()
df_sites <- df_sites %>%
left_join(df_orgs %>%
select(orgunituid, longitude, latitude) %>%
filter(!is.na(longitude) & !is.na(longitude)),
by = "orgunituid")
df_sites %>% glimpse()
# HFR Submissions
df_raws <- s3_objects(
bucket = 'gov-usaid',
prefix = "ddc/uat/raw/hfr/incoming",
n = Inf
) %>%
s3_unpack_keys()
df_raws %>%
filter(nchar(sys_data_object) > 1,
str_detect(str_to_lower(sys_data_object), "war")) %>%
pull(sys_data_object)
# HFR Processed
df_procs <- glamr::s3_objects(
bucket = 'gov-usaid',
prefix = "ddc/uat/processed/hfr/incoming/HFR_FY21"
) %>%
glamr::s3_unpack_keys(df_objects = .)
df_procs %>%
filter(str_detect(key, ".*.xlsx$")) %>%
View()
df_procs %>%
filter(str_detect(key, ".*.csv$")) %>%
View()
# Tableau outputs
df_outputs <- s3_objects(
bucket = 'gov-usaid',
prefix = "ddc/uat/processed/hfr/outgoing/hfr"
) %>%
s3_unpack_keys()
# Tableau outputs - latest files
df_hfr <- df_outputs %>%
filter(str_detect(sys_data_object, "^hfr_2021.*.csv$")) %>%
mutate(hfr_pd = str_extract(sys_data_object, "\\d{4}_\\d{2}")) %>%
group_by(hfr_pd) %>%
arrange(desc(last_modified)) %>%
slice(1) %>%
ungroup() %>%
pull(key) %>%
map_dfr(.x, .f = ~ s3_read_object(
bucket = 'gov-usaid',
object = .x
))
df_hfr %>% glimpse()
df_hfr %>% distinct(hfr_freq)
df_hfr %>%
distinct(operatingunit, hfr_pd, indicator, hfr_freq) %>%
view()
cntry <- "Zambia"
df_hfr %>%
filter(operatingunit == cntry) %>%
distinct(hfr_pd, indicator, hfr_freq) %>%
arrange(hfr_pd, indicator) %>%
prinf()
df_hfr %>%
filter(operatingunit == cntry,
is.na(hfr_freq),
expect_reporting == T) %>%
count(hfr_pd, indicator, hfr_freq, wt = mer_targets) %>%
spread(hfr_pd, n) %>%
View(title = "sum")
df_hfr_cntry <- df_hfr %>%
filter(hfr_pd == "02", expect_reporting == "TRUE") %>%
select(operatingunit, mech_code, mech_name, indicator, orgunituid, val) %>%
mutate(mech_name = if_else(str_detect(mech_name, "\\("),
glamr::extract_text(mech_name),
mech_name)) %>%
full_join(df_sites %>%
filter(!is.na(longitude) & !is.na(longitude)) %>%
distinct(orgunituid, longitude, latitude),
by = "orgunituid") %>%
filter(operatingunit == cntry,
!is.na(longitude) & !is.na(longitude)) %>%
mutate(val = as.integer(val),
completeness = if_else(is.na(val), FALSE, TRUE))
df_hfr_cntry <- df_hfr_cntry %>%
add_count(operatingunit, mech_name, indicator, wt = completeness) %>%
group_by(operatingunit, mech_name, indicator) %>%
mutate(mech_ind_completeness = round(n / n() * 100))
df_hfr_cntry %>%
distinct(mech_ind_completeness)
df_hfr_cntry <- df_hfr_cntry %>%
mutate(indicator = factor(indicator, labels = ))
comp_labeller <- function() {
}
# Completeness
ggplot() +
geom_sf(data = gisr::get_admin0(cntry), fill = NA) +
geom_point(data = df_hfr_cntry,
aes(longitude, latitude, fill = completeness),
shape = 21, size = 2, color = 'white', alpha = .8) +
scale_fill_si("burnt_sienna") +
facet_grid(mech_name ~ indicator) +
labs(title = "ZAMBIA - HFR Sites Reporting FY2021.02 Data") +
gisr::si_style_map() +
theme(strip.text.y = element_text(angle = 90))
|
c208fed8675e21c384b9cfd62c3282def1028a7e
|
dbc2af76893a0b669f2d9a032980c2111bfbc4d5
|
/tests/testthat/test-add-up.R
|
260306d19c8478aaac800df2cc3f732e502fa851
|
[
"MIT"
] |
permissive
|
thomasblanchet/gpinter
|
e974de36c0efd4c8070fb9b8cc0311bb10c356df
|
0ce91dd088f2e066c7021b297f0ec3cecade2072
|
refs/heads/master
| 2022-11-28T11:18:10.537146
| 2022-11-22T16:22:40
| 2022-11-22T16:22:40
| 72,655,645
| 19
| 5
| null | 2017-04-19T08:25:44
| 2016-11-02T15:51:21
|
R
|
UTF-8
|
R
| false
| false
| 1,995
|
r
|
test-add-up.R
|
test_that("Adding up is consistent with Monte-Carlo", {
set.seed(19920902)
n <- 1e6
# Parameters of the tabulation
p <- seq(0, 0.9, 0.1)
k <- length(p)
# Parameters of the first Pareto distribution
alpha1 <- runif(1, min=1, max=3)
mu1 <- 5*rexp(1)
# Parameters of the second Pareto distribution
alpha2 <- runif(1, min=1, max=3)
mu2 <- 5*rexp(1)
# Parameter of the Gumbel copula
theta <- runif(1, min=2, max=4)
# Simulate
u <- gumbel::rgumbel(n, theta)
x1 <- mu1/(1 - u[, 1])^(1/alpha1)
x2 <- mu2/(1 - u[, 2])^(1/alpha2)
y <- x1 + x2
# Generate tabulations
q1 <- quantile(x1, p)
topavg1 <- sapply(q1, function(q) mean(x1[x1 >= q]))
average1 <- mean(x1)
q2 <- quantile(x2, p)
topavg2 <- sapply(q2, function(q) mean(x2[x2 >= q]))
average2 <- mean(x2)
dist1 <- tabulation_fit(p, q1, average1, topavg=topavg1)
dist2 <- tabulation_fit(p, q2, average2, topavg=topavg2)
dist_addup <- addup_dist(dist1, dist2, theta)
# Generate test tabulation
p_test <- seq(0, 0.90, 0.01)
q_test <- quantile(y, p_test)
average_test <- mean(y)
topavg_test <- sapply(q_test, function(q) mean(y[y >= q]))
topshare_test <- (1 - p_test)*topavg_test/average_test
expect_equal(
fitted_quantile(dist_addup, p_test),
q_test,
tolerance = 1e-3,
check.attributes = FALSE
)
expect_equal(
fitted_cdf(dist_addup, q_test),
p_test,
tolerance = 1e-3,
check.attributes = FALSE
)
expect_equal(
threshold_share(dist_addup, q_test),
topshare_test,
tolerance = 1e-3,
check.attributes = FALSE
)
expect_equal(
top_share(dist_addup, p_test),
topshare_test,
tolerance = 1e-3,
check.attributes = FALSE
)
expect_equal(
gini(dist_addup),
reldist::gini(y),
tolerance = 1e-3,
check.attributes = FALSE
)
})
|
9f7b4a3be72e8cf977b9f48492e6da9d96e303e7
|
40789ceef1acaddd0d52c325edf867ccda56fcc6
|
/cachematrix.R
|
9fcc3646246fed307bfaf284d97b49d5486045fa
|
[] |
no_license
|
SSD97/Rprogramming
|
7c8943d613bbc4d45f1076de545749fcdb152bb7
|
0cb8de223e872a4261c4dadfc8791cfaeb0453b9
|
refs/heads/master
| 2022-10-01T22:58:00.914112
| 2020-06-06T19:44:07
| 2020-06-06T19:44:07
| 270,079,806
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,213
|
r
|
cachematrix.R
|
## Creates a matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
#Empty inverse matrix
a <- NULL
set <- function(y)
{
#sets matrix value in parent env
x <<- y
#sets empty inverse matrix in parent env
a <<- NULL
}
#To get the value of matrix
get <- function() { x }
#Sets inverse matrix value in parent env
setInvMatrix <- function(y)
{ a <<- y }
#To get the value of inverse matrix
getInvMatrix <- function() {a}
list(set = set, get = get, setInvMatrix = setInvMatrix, getInvMatrix = getInvMatrix)
}
## It computes the inverse of matrix returned by previous function,
## If inverse is not empty then it will return cached matrix
cacheSolve <- function(x, ...) {
#To get inverse matrix
a <- x$getInvMatrix()
#if inverse matrix already exist (not NULL) then print msg and return cached matrix
if(!is.null(a))
{
message("Accessing Cache")
return(a)
}
#If inverse matrix does not exist
#To get the matrix
matrix <- x$get()
#Inverse calculation
a <- solve(matrix, ...)
#Update inverse matrix in parent env
x$setInvMatrix(a)
#Return inverse matrix
a
}
|
5ea1667ef4455a5e13abdcf81fbbac0105a7e9d0
|
527a5c6166dce36e26c26ce3a331dc0826f75062
|
/MAHENDRA_NANDI_R.R
|
528700075b23457916b18e53c48c9a4ffa310da0
|
[] |
no_license
|
dal3006/books-review
|
ac8d1ff6b21afd61782148ac7c3282b5c3f73ac2
|
b208343140fbae7c335f10c290f34c07df9d1b00
|
refs/heads/master
| 2023-06-29T01:16:09.120753
| 2021-08-10T19:36:54
| 2021-08-10T19:36:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,395
|
r
|
MAHENDRA_NANDI_R.R
|
# VISUALIZATION PROJECT ON " GOOD_READ_BOOKS "
# UNDER THE GUIDENCE OF : PROF. SUDEEP MALLICK
#Visualizing different factors for having a good review of books
#
#________________________________________________________________________###
#..
library(ggplot2)
library(dplyr)
#Read data from file
books<-read.csv("booksP.csv")
books<-na.omit(books)
#datatype conversion
books$language_code<-as.factor(books$language_code)
books$average_rating<-as.numeric(books$average_rating)
books$num_pages<-as.integer(books$num_pages)
books$ratings_count<-as.integer(books$ratings_count)
books$text_reviews_count<-as.integer(books$text_reviews_count)
#Adding derived column
books<-cbind(books,review_index=books$text_reviews_count/books$ratings_count)
books<-na.omit(books)
#(1) average rating vs. number of books of that rating
ggplot(books, aes(average_rating)) + geom_freqpoly(binwidth=0.01,color = "blue")+
labs( x = "Average Rating", y = "Number of books",title ="Distribution of average rating in the dataset",caption = "Fig. 1" )
#(2) Number of reviews
#(a) ratings_count distribution
ratingcount.df<-data.frame(table(books$ratings_count))
names(ratingcount.df)<-c("ratings_count","cum_freq")
ratingcount.df$cum_freq<-rev(cumsum(rev(ratingcount.df$cum_freq)))
ggplot(ratingcount.df, aes(x=ratings_count, y=cum_freq)) + geom_col()+
labs(x="Numer of Ratings",y="Cummulative Frequency",title="Cummulative frequency(greater than type) plot for Ratings count",caption="Fig. 2.a")+
scale_x_discrete(breaks = levels(ratingcount.df$ratings_count)[c(T,rep(F,999))])
#(b) text reviews distribution
treviewcount.df<-data.frame(table(books$text_reviews_count))
names(treviewcount.df)<-c("text_reviews_count","cum_freq")
treviewcount.df$cum_freq<-rev(cumsum(rev(treviewcount.df$cum_freq)))
ggplot(treviewcount.df, aes(x=text_reviews_count, y=cum_freq)) + geom_col()+
labs(x="Numer of Text Reviews",y="Cummulative Frequency",title="Cummulative frequency(greater than type) plot for Number of Text Reviews",caption = "Fig. 2.b")+
scale_x_discrete(breaks = levels(ratingcount.df$ratings_count)[c(T,rep(F,205))])
#(c) total reviews distribution
reviews.df<-data.frame(table(books$ratings_count+books$text_reviews_count))
names(reviews.df)<-c("reviews_count","cum_freq")
reviews.df$cum_freq<-rev(cumsum(rev(reviews.df$cum_freq)))
ggplot(reviews.df, aes(x=reviews_count, y=cum_freq)) + geom_col()+
labs(x="Total Numer of Reviews",y="Cummulative Frequency",title="Cummulative frequency(greater than type) Total Number of Reviews",caption = "Fig. 2.c")+
scale_x_discrete(breaks = levels(ratingcount.df$ratings_count)[c(T,rep(F,299))])
#(d) review index distribution
ggplot(books,aes(review_index))+
geom_freqpoly(binwidth=0.007,colour="red")+
labs(x="Review Index",y="Frequency",title="Frequency polygon for Review Index",caption = "Fig. 2.d")
#(3) Number of books for different languages
lang<-data.frame(table(books$language_code))
lang<-lang[order(lang$Freq,decreasing=T),]
levels(lang$Var1)<-c(levels(lang$Var1),"others")
lang<-rbind(lang %>% top_n(7,lang$Freq),c("others",sum(lang$Freq[8:31],na.rm=T)))
ggplot(lang, aes(x="", y=as.integer(Freq), fill=Var1))+
geom_bar(width = 1, stat = "identity")+
coord_polar("y", start=0)+
labs(x="Languages",y="Number of books",title="Pie Chart for number of books of different languages",caption = "Fig. 3")
#(4) Number of books of different pages
ggplot(books,aes(num_pages))+
geom_histogram(binwidth = 5)+
labs(x="Number of pages",y="Number of Books",title="Histogram of books of different page numbers",caption = "Fig. 4")+
coord_cartesian(xlim = c(0,2000),ylim = c(0,350))
#(5) number of authers having exactly certain number of books barplot
# creates a dataframe with number of authors having n number of books
author<-data.frame(table(table(unlist(strsplit(books$authors,split = "/")))))
names(author)<-c("no_of_books","no_of_authors")
ggplot(author, aes(no_of_books, no_of_authors)) +
geom_col() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
scale_x_discrete(breaks=levels(author$no_of_books)[c(T,rep(F,4))])+
labs(x="Number of Books",y="Number of authors",title="Number of Authors having n Number of Books",caption = "Fig. 5")+
coord_cartesian(xlim = c(0,25))+
scale_fill_brewer(palette = "Blues")
#(6) Book published in different years
pubdate<-substr(books$publication_date, nchar(books$publication_date)-4+1, nchar(books$publication_date))
pubdate<-as.integer(pubdate)
pubdate<-pubdate[pubdate>=1900]
pubdate<-data.frame(table(pubdate))
ggplot(data=pubdate, aes(x=pubdate, y=Freq)) +
geom_bar(stat = 'identity') +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
scale_x_discrete(breaks=levels(pubdate)[T,rep(F,9)])+
scale_fill_brewer(palette = "Blues")+
labs(x="Publication year",y="Number of Books",title = "Books published in different year",caption = "Fig.6")
# (7) Language vs average rating
avg.lang<-aggregate(average_rating~language_code, data=books, FUN = mean)
ggplot(avg.lang, aes(language_code, average_rating)) +
geom_col(aes(colour=language_code))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(title="Average rating for different languages",caption="Fig. 7",x="language",y="average rating")
#(8) number of pages vs average ratings
ggplot(books,aes(num_pages,average_rating))+
geom_rug(aes(colour="red"))+geom_density_2d()+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(title = "Rug plot: Average Rating vs. Number of Pages",
caption = "Fig. 8",x="total no of pages",y="average rating")
#(9) Reviews vs average rating
#(a) rating count vs average rating
ggplot(books,aes(average_rating,ratings_count))+
geom_jitter(aes(colour=average_rating))+
labs(caption="Fig. 9.a",y="ratings count",x="average rating")+
coord_cartesian(ylim = c(0,2500000))
#(b) text reviews vs average rating
ggplot(books,aes(average_rating,text_reviews_count))+
geom_jitter(aes(colour=average_rating))+
labs(caption="Fig. 9.b",y="text reviews count",x="average rating")+
coord_cartesian(ylim = c(0,50000))
#(c) Total reviews vs average rating
ggplot(books,aes(average_rating,ratings_count+text_reviews_count))+
geom_jitter(aes(colour=average_rating))+
labs(caption="Fig. 9.c",y="text reviews count + ratings count",x="average rating")+
coord_cartesian(ylim = c(0,2500000))
#(d) review_index vs average rating
ggplot(books,aes(review_index,average_rating))+
geom_smooth(aes(colour=review_index))+
labs(caption="Fig. 9.d",x="review index",y="average rating")
#(10) average ratings for different publishers
# [ just to see wheather rating is above 4.5]
avg.pub<-aggregate(average_rating~publisher, data=books[], FUN = mean)
ggplot(avg.pub, aes(publisher,average_rating)) +
geom_col()+
scale_x_discrete(breaks=NULL)+
labs(caption = "Fig.10 ",x="publishers",y="average rating")
#(11) pages per book for different languages
pagesperbook<-aggregate(num_pages~language_code,data=books, FUN=mean)
ggplot(pagesperbook,aes(language_code,num_pages))+
geom_col(aes(colour=language_code))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(caption="Fig. 11",x="language code",y="no of pages")+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
#(12) total reviews for different languages
reviews.lang<-aggregate(ratings_count+text_reviews_count~language_code,data=books, FUN=sum)
names(reviews.lang)<-c("language","reviews")
ggplot(reviews.lang,aes(x=language,y=as.integer(reviews)))+
geom_bar(stat = "identity")+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(y="Number of reviews",caption = "Fig. 12")
#(13) number of pages vs reviews
#(a) pages vs rating count
ggplot(books,aes(num_pages,ratings_count))+
geom_count(aes(colour=ratings_count))+
scale_y_continuous( labels = scales::comma)+
coord_cartesian(ylim = c(0,3000000))+
labs(caption="Fig. 13.a",x="no of pages",y="ratings count")
#(b) pages vs text reviews count
ggplot(books,aes(num_pages,text_reviews_count))+
geom_count(aes(colour=text_reviews_count))+
scale_y_continuous( labels = scales::comma)+
coord_cartesian(ylim = c(0,60000))+
labs(caption="Fig. 13.b",x="no of pages",y="text reviews count")
#(c) total reviews vs number of pages
ggplot(books,aes(num_pages,ratings_count+text_reviews_count))+
geom_count(aes(colour=ratings_count+text_reviews_count))+
scale_y_continuous( labels = scales::comma)+
coord_cartesian(ylim = c(0,3000000))+
labs(caption="Fig. 13.c",x="no of pages",y="ratings count + text reviews count")
#(d) review_index vs number of pages
ggplot(books,aes(num_pages,review_index))+
geom_count(aes(colour=review_index))+
scale_y_continuous( labels = scales::comma)+
labs(caption="Fig. 13.d",x="no of pages",y="review index")
#(14) 3 authors having most number of books
book.author<-data.frame(table(unlist(strsplit(books$authors,split = "/"))))
book.author<-book.author%>% slice_max(Freq,n=3)
book.author.df<-books[grep(as.character(book.author$Var1[1]),books$authors),]
book.author.df$num_pages<-cut_width(book.author.df$num_pages,100,boundary=0)
book.author.df<-aggregate(average_rating~num_pages,data = book.author.df,mean)
author<-rep(book.author$Var1[1],nrow(book.author.df))
book.author.df1<-cbind(book.author.df,author)
book.author.df<-books[grep(as.character(book.author$Var1[2]),books$authors),]
book.author.df$num_pages<-cut_width(book.author.df$num_pages,100,boundary=0)
book.author.df<-aggregate(average_rating~num_pages,data = book.author.df,mean)
author<-rep(book.author$Var1[2],nrow(book.author.df))
book.author.df2<-cbind(book.author.df,author)
book.author.df<-books[grep(as.character(book.author$Var1[3]),books$authors),]
book.author.df$num_pages<-cut_width(book.author.df$num_pages,100,boundary=0)
book.author.df<-aggregate(average_rating~num_pages,data = book.author.df,mean)
author<-rep(book.author$Var1[3],nrow(book.author.df))
book.author.df3<-cbind(book.author.df,author)
book.author.df<-rbind(rbind(book.author.df1,book.author.df2),book.author.df3)
#(a) number of pages vs average ratings
ggplot(book.author.df,aes(num_pages,average_rating))+
geom_line(aes(group = author,colour=author))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(title = "Comparison among the authors having most number of books",
x="no of pages",y="average rating",caption = "Fig. 14.a")
#(b) total reviews vs average rating for top 3 author
# book.author.review<-books[c(grep(as.character(book.author$Var1[1]),books$authors),grep(as.character(book.author$Var1[2]),books$authors),grep(as.character(book.author$Var1[3]),books$authors)),]
book.author.review.df1<-books[grep(as.character(book.author$Var1[1]),books$authors),]
book.author.review.df2<-books[grep(as.character(book.author$Var1[2]),books$authors),]
book.author.review.df3<-books[grep(as.character(book.author$Var1[3]),books$authors),]
author1<-rep(book.author$Var1[1],nrow(book.author.review.df1))
author2<-rep(book.author$Var1[2],nrow(book.author.review.df2))
author3<-rep(book.author$Var1[3],nrow(book.author.review.df3))
book.1<-cbind(book.author.review.df1,author=author1)
book.2<-cbind(book.author.review.df2,author=author2)
book.3<-cbind(book.author.review.df3,author=author3)
book.author.review<-rbind(rbind(book.1,book.2),book.3)
ggplot(book.author.review,aes(ratings_count+text_reviews_count,average_rating))+
geom_line(aes(group = author,colour=author))+ (xlim(0,5000))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(x="Total reviews",y="average rating",caption = "Fig.14.b",title ="Comparison among the authors having most number of books" )
#(c) total reviews vs number of pages
ggplot(book.author.review,aes(num_pages,ratings_count+text_reviews_count))+
geom_line(aes(group = author,colour=author))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(y="Total reviews",caption = "Fig.14.c",title ="Comparison among the authors having most number of books" )
#(15) For top 3 publishers
publishers.top<-data.frame(table(books$publisher))
publishers.top<-publishers.top %>% slice_max(Freq,n=3)
pub.top<-as.character(publishers.top$Var1)
publishers.top.df1<-books[books$publisher==pub.top[1] | books$publisher==pub.top[2] | books$publisher==pub.top[3] ,]
# (a) number of pages vs average ratings
ggplot(publishers.top.df1,aes(num_pages,average_rating))+
geom_line(aes(group = publisher,colour=publisher))+(xlim(400,800))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(caption = "Fig.15.a",x="total no of pages of book",y="average rating",title ="Comparison among the publishers having most number of books" )
# (b) total reviews vs average ratings
ggplot(publishers.top.df1,aes(ratings_count+text_reviews_count,average_rating))+
geom_line(aes(group = publisher,colour=publisher))+(xlim(200,10000))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(x="ratings count + text reviews count",y="average rating",caption = "Fig.15.b",title ="Comparison among the publishers having most number of books" )
#(c) number of pages vs total number of reviews
ggplot(publishers.top.df1,aes(num_pages,ratings_count+text_reviews_count))+
geom_line(aes(group = publisher,colour=publisher))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(y="Total Reviews",x="total no of book",y="ratings count + text reviews count",caption = "Fig.15.c",title ="Comparison among the publishers having most number of books" )
#(16)
#pub<-data.frame(table(books$publisher))
#pub<-pub[order(pub$Freq,decreasing=T),]
#pub7 <-pub %>% top_n(7,pub$Freq)
#top7publisher <-as.vector(pub7$Var1)
#lang.pub <-table(books[books$publisher==top7publisher,c(7,12)])
#barplot(lang.pub,1, beside = T,legend.text= rownames(lang.pub),col =blues9,args.legend = list(x=ncol(lang.pub)+350,y=50))
#library(RColorBrewer)
#barplot(lang.pub,beside=T,xlim= c(0,ncol(lang.pub)+300),col=brewer.pal(nrow(lang.pub),"Paired"),ylab="no of books",xlab= "name of top 7 pblishers",legend.text= T,args.legend= list(x=ncol(lang.pub)+370))
pub<-data.frame(table(books$publisher))
pub<-pub[order(pub$Freq,decreasing=T),]
pub7 <- pub %>% top_n(7,pub$Freq)
top7publisher <- as.vector(pub7$Var1); top7publisher # sera sera
lang.pub <- table(book[book$publisher==top7publisher,c(7,12)])
barplot(lang.pub,1,horiz = FALSE,main = "top 7 publishers and lanuguage used ", beside = T,xlab = "publishers ",angle = 90,legend.text = rownames(lang.pub),col =blues9,args.legend = list(x=ncol(lang.pub)+10))
#(17)
library(plot3D)
rating_count<-books$ratings_count
text_reviews_count<-books$text_reviews_count
average_rating<-books$average_rating
scatter3D(rating_count,text_reviews_count,average_rating,xlab="Rating Count",ylab="Text Reviews Count",zlab="Average Rating",pch = 19, bty = "g", type = "h", phi = 0,ticktype = "detailed",cex=0.5)
#(18)
ggplot(books,aes(review_index,average_rating,colour=text_reviews_count))+
geom_jitter()+
facet_grid(vars(language_code))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
labs(caption = "Fig.18",x="review index",y="average rating" )+
scale_y_continuous(n.breaks = 2)
#(20)
ggplot(books,aes(ratings_count,average_rating,colour=text_reviews_count))+
geom_point()+
facet_grid(vars(language_code))+
theme(axis.text.x = element_text(angle = 0, vjust = 0.5, hjust=1))+
labs(caption = "Fig.19" ,x="rating count",y="average rating")+
scale_y_continuous(n.breaks = 2)
#########################################################################################################################
##########################################################################################################################
|
24f64ac115a4f431f4a721af938678f991346cfc
|
98fd03ebd9de52038f06cd89200a460432f9cc5c
|
/man/is_url_subpath_of.Rd
|
e59f69b2360e7e69a92cb49ae65c4bb1e5feb001
|
[
"MIT"
] |
permissive
|
pharmaR/riskmetric
|
51d3b067da6db6ad1252f3ba706db1d922b5df64
|
3d1501880edc07cff5cd72129c0df0899db83029
|
refs/heads/master
| 2023-07-26T07:33:56.471690
| 2023-05-31T14:58:21
| 2023-05-31T14:58:21
| 173,354,970
| 148
| 32
|
NOASSERTION
| 2023-09-12T20:41:31
| 2019-03-01T19:11:16
|
R
|
UTF-8
|
R
| false
| true
| 520
|
rd
|
is_url_subpath_of.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{is_url_subpath_of}
\alias{is_url_subpath_of}
\title{check if a url originates from a list of repo urls}
\usage{
is_url_subpath_of(url, urls)
}
\arguments{
\item{url}{a url which may stem from one of the provided base urls}
\item{urls}{vector of base urls}
}
\value{
logical vector indicating which base urls have a sub url of
\code{url}
}
\description{
check if a url originates from a list of repo urls
}
\keyword{internal}
|
0f71991cc5524e89677c4df7207cc848eed965c5
|
17cc1e57a778ad66aaebde9c5da53304f396888a
|
/R/tbls.r
|
4892578ae94a0ed944d863759e0614892fe73245
|
[
"MIT"
] |
permissive
|
romainfrancois/valr
|
f50735394380d9d3b80142c6160b56083aeaebae
|
0ea8fe97390157e138798800423bb4858a21cd49
|
refs/heads/master
| 2022-06-17T12:45:19.518621
| 2020-04-23T18:22:12
| 2020-04-23T18:22:12
| 261,138,333
| 0
| 0
|
NOASSERTION
| 2020-05-04T10:02:07
| 2020-05-04T10:02:07
| null |
UTF-8
|
R
| false
| false
| 5,501
|
r
|
tbls.r
|
#' Tibble for intervals.
#'
#' Required column names are `chrom`, `start` and `end`.
#'
#' @param x A `data_frame`
#' @param ... params for [tibble::tibble()]
#' @param .validate check valid column names
#'
#' @rdname tbl_interval
#'
#' @examples
#' x <- tibble::tribble(
#' ~chrom, ~start, ~end,
#' 'chr1', 1, 50,
#' 'chr1', 10, 75,
#' 'chr1', 100, 120
#' )
#'
#' is.tbl_interval(x)
#'
#' x <- tbl_interval(x)
#' is.tbl_interval(x)
#'
#' @export
tbl_interval <- function(x, ..., .validate = TRUE) {
if(tibble::is_tibble(x)){
out <- x
} else {
out <- tibble::as_tibble(x, ...)
}
if (.validate) {
out <- check_interval(out)
}
class(out) <- union("tbl_ivl", class(out))
out
}
#' Coerce objects to tbl_intervals.
#'
#' This is an S3 generic. valr includes methods to coerce tbl_df and GRanges
#' objects.
#'
#' @param x object to convert to tbl_interval.
#'
#' @return [tbl_interval()]
#'
#' @examples
#' \dontrun{
#' gr <- GenomicRanges::GRanges(
#' seqnames = S4Vectors::Rle(
#' c("chr1", "chr2", "chr1", "chr3"),
#' c(1, 1, 1, 1)),
#' ranges = IRanges::IRanges(
#' start = c(1, 10, 50, 100),
#' end = c(100, 500, 1000, 2000),
#' names = head(letters, 4)),
#' strand = S4Vectors::Rle(
#' c("-", "+"), c(2, 2))
#' )
#'
#' as.tbl_interval(gr)
#'
#' # There are two ways to convert a tbl_interval to GRanges:
#'
#' gr <- GenomicRanges::GRanges(
#' seqnames = S4Vectors::Rle(x$chrom),
#' ranges = IRanges::IRanges(
#' start = x$start + 1,
#' end = x$end,
#' names = x$name),
#' strand = S4Vectors::Rle(x$strand)
#' )
#' # or:
#'
#' gr <- GenomicRanges::makeGRangesFromDataFrame(dplyr::mutate(x, start = start +1))
#'
#' }
#'
#' @export
as.tbl_interval <- function(x) {
UseMethod("as.tbl_interval")
}
#' @export
#' @rdname as.tbl_interval
as.tbl_interval.tbl_df <- function(x) {
tbl_interval(x)
}
#' @export
#' @rdname as.tbl_interval
as.tbl_interval.data.frame <- function(x) {
tbl_interval(x)
}
#' @export
#' @rdname as.tbl_interval
as.tbl_interval.GRanges <- function(x) {
# https://www.biostars.org/p/89341/
res <- tibble(
chrom = as.character(x@seqnames),
start = x@ranges@start - 1,
end = x@ranges@start - 1 + x@ranges@width,
name = rep(".", length(x)),
score = rep(".", length(x)),
strand = as.character(x@strand)
)
res <- mutate(res, strand = ifelse(strand == "*", ".", strand))
tbl_interval(res)
}
#' Construct a tbl_interval using tribble formatting.
#'
#' @rdname tbl_interval
#'
#' @return [tbl_interval()]
#
#' @export
trbl_interval <- function(...) {
out <- tibble::tribble(...)
out <- as.tbl_interval(out)
out
}
#' Test if the object is a tbl_interval.
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the [tbl_interval()] class.
#' @export
is.tbl_interval <- function(x) {
"tbl_ivl" %in% class(x)
}
#' Tibble for reference sizes.
#'
#' Equivalent to information in UCSC "chromSizes" files. Required column names are:
#' `chrom` and `size`
#'
#' @param x A `data_frame`
#' @param ... params for [tibble::tibble()]
#' @param .validate check valid column names
#'
#' @rdname tbl_genome
#'
#' @examples
#' genome <- tibble::tribble(
#' ~chrom, ~size,
#' 'chr1', 1e6,
#' 'chr2', 1e7
#' )
#'
#' is.tbl_genome(genome)
#' genome <- tbl_genome(genome)
#' is.tbl_genome(genome)
#'
#' @export
tbl_genome <- function(x, ..., .validate = TRUE) {
out <- tibble::as_tibble(x, ...)
if (.validate) {
out <- check_genome(out)
}
class(out) <- union("tbl_gnm", class(out))
out
}
#' Coerce objects to tbl_genome.
#'
#' This is an S3 generic. valr includes methods to coerce tbl_df and data.frame
#' objects.
#'
#' @param x object to convert to tbl_genome.
#'
#' @return [tbl_genome()]
#'
#' @export
as.tbl_genome <- function(x) {
UseMethod("as.tbl_genome")
}
#' @export
#' @rdname as.tbl_genome
as.tbl_genome.tbl_df <- function(x) {
tbl_genome(x)
}
#' @export
#' @rdname as.tbl_genome
as.tbl_genome.data.frame <- function(x) {
tbl_genome(x)
}
#' Construct a tbl_genome using tribble formatting.
#'
#' @return [tbl_genome()]
#'
#' @rdname tbl_genome
#'
#' @examples
#' trbl_genome(
#' ~chrom, ~size,
#' 'chr1', 1e6
#' )
#'
#' @export
trbl_genome <- function(...) {
out <- tibble::tribble(...)
out <- tbl_genome(out)
out
}
#' Test if the object is a tbl_genome.
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the [tbl_genome()] class.
#' @export
is.tbl_genome <- function(x) {
"tbl_gnm" %in% class(x)
}
# Validity checks ---------------------------------------------------
check_interval <- function(x) {
expect_names <- c("chrom", "start", "end")
check_names(x, expect_names)
x
}
check_genome <- function(x) {
expect_names <- c("chrom", "size")
check_names(x, expect_names)
# check for unique refs
chroms <- x[["chrom"]]
dups <- duplicated(chroms)
if (any(dups)) {
stop(sprintf(
"duplicate chroms in genome: %s",
paste0(chroms[dups], collapse = ", ")
))
}
x
}
check_names <- function(x, expected) {
missing <- setdiff(expected, names(x))
if (length(missing) != 0) {
stop(sprintf(
"expected %d required names, missing: %s",
length(expected),
paste0(missing, collapse = ", ")
))
}
}
|
e4e0bfee631fab616117a8094b37fef5207fc006
|
0d315ff6485911c05b531ebb60a6262b8b87c1ba
|
/ticket or warning.R
|
4ed316027a816c1f12fdb25d9149f9fc5592aef1
|
[] |
no_license
|
AJTorgesen/Random-Forest
|
3d5cc57ae758dc1b46747c389771dc04e8f6cc9f
|
4edc697f608f925d5d3ecf39e295ada4eed20176
|
refs/heads/master
| 2020-03-18T04:55:37.911948
| 2018-05-21T19:17:22
| 2018-05-21T19:17:22
| 134,314,100
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,091
|
r
|
ticket or warning.R
|
# Tree-based Classification
#Analysis Ticket or Warning?
#Montgomery County Traffic Stops Data
source('https://grimshawville.byu.edu/TrafficStops2017a.R')
#EDA
#Table of Categorical Response Variable
table(ticket.last$Ticket)
prop.table(table(ticket.last$Ticket))
#Create a dataset with half goods (warnings) and half bads (tickets)
all.bad <- subset(ticket.last, Ticket=="TRUE")
n.bad <- dim(all.bad)[1]
#SRS without replacement from the goods
all.good <- subset(ticket.last, Ticket=="FALSE")
n.good <- dim(all.good)[1]
set.seed(12)
rows.good <- sample(n.good,n.bad)
sample.good <- all.good[rows.good,]
ticket.model <- rbind(all.bad, sample.good)
#Create Train and Test
train.rows <- sample(159134,125000)
ticket.train <- ticket.model[train.rows,]
ticket.test <- ticket.model[-train.rows,]
#Validate similarities between train and test
summary(ticket.train$Ticket)
summary(ticket.test$Ticket)
#Grow a Random Forest
library(randomForest)
#fit model
out.ticket <- randomForest(x=ticket.train[,-17], y=ticket.train$Ticket,
xtest=ticket.test[,-17], ytest=ticket.test$Ticket,
replace = TRUE,
keep.forest = TRUE,
ntree = 100, mtry = 4, nodesize = 25)
#Predict new obs
ticket.new.obs <- ticket.model[145685,]
predict(out.ticket, ticket.new.obs)
#Prediction Performance
out.ticket
#31.75% - TRAIN
#31.3% - TEST
#Model Insight (interpredation)
importance(out.ticket)
varImpPlot(out.ticket)
#Color Hour and Auto Year most "important"
#Research Task: Predict whether or not a ticket would be issued given certain eplanatory variables
#Data Features: tall and wide, random forests work well with this type of data
#Analysis Weakness: It is a black box, it gives answers, but we don't really know how
#Not perfectly reproducaable because of random samples as well as a chance of Overfit Bias
#Challenge Question: Predict driver's gender based on explanatory variables found in Montgomery County traffic Data
|
56d10da2e399779183cafa36eb7066e223bb4955
|
c3ad72409aa613e85ab48ff71444fd6731f0dd90
|
/R/my_ggarrange.R
|
d8ec56dc7a0febe59bac447361d4a77ad517c08b
|
[] |
no_license
|
Nmoiroux/MalTransMod
|
69e0a46061d88938be555712386d3b0c55cb847a
|
6f3244aa09791a29d516857cdbd9e9886e1f093e
|
refs/heads/master
| 2022-06-09T14:50:02.634739
| 2020-05-07T08:18:38
| 2020-05-07T08:18:38
| 169,445,777
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,766
|
r
|
my_ggarrange.R
|
ggarrange <- function (..., plotlist = NULL, ncol = NULL, nrow = NULL, labels = NULL,
label.x = 0, label.y = 1, hjust = -0.5, vjust = 1.5, font.label = list(size = 14,
color = "black", face = "bold", family = NULL), align = c("none", "h", "v", "hv"),
widths = 1, heights = 1, legend = NULL, common.legend = FALSE, plot_legend = NULL)
{
plots <- c(list(...), plotlist)
align <- match.arg(align)
nb.plots <- length(plots)
nb.plots.per.page <- .nbplots_per_page(ncol, nrow)
if (is.null(legend) & common.legend)
legend <- "top"
legend <- .check_legend(legend)
if (!is.null(legend))
plots <- purrr::map(plots, function(x) {
if (!is.null(x))
x + theme(legend.position = legend)
else x
})
leg <- NULL
if (common.legend) {
if (!is.null(plot_legend) & plot_legend <= nb.plots)
leg <- get_legend(plots[plot_legend])
plots <- purrr::map(plots, function(x) {
if (!is.null(x))
x + theme(legend.position = "none")
else x
})
}
if (nb.plots > nb.plots.per.page) {
plots <- split(plots, ceiling(seq_along(plots)/nb.plots.per.page))
}
else plots <- list(plots)
.lab <- .update_label_pms(font.label, label.x = label.x,
label.y = label.y, hjust = hjust, vjust = vjust)
res <- purrr::map(plots, .plot_grid, ncol = ncol, nrow = nrow,
labels = labels, label_size = .lab$size, label_fontfamily = .lab$family,
label_fontface = .lab$face, label_colour = .lab$color,
label_x = .lab$label.x, label_y = .lab$label.y, hjust = .lab$hjust,
vjust = .lab$vjust, align = align, rel_widths = widths,
rel_heights = heights, legend = legend, common.legend.grob = leg)
if (length(res) == 1)
res <- res[[1]]
class(res) <- c(class(res), "ggarrange")
res
}
|
79defd0f3703df3f7dbcf97477e5b889079c9411
|
c8d9fdea31b02611539f3cd121be88991f085fa7
|
/Tests/Paper/substanceP_masses.R
|
f331e39eeff836a1efae13d3ab2a9d6e43982226
|
[] |
no_license
|
MatteoLacki/MassTodonPy
|
1f593da71540f6a855ebd950ab1db6e6b050e976
|
eaed6678fa6a442c9c346674d40404dc781a2f52
|
refs/heads/master
| 2021-01-19T18:30:59.082013
| 2019-01-18T19:22:07
| 2019-01-18T19:22:07
| 88,361,578
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 620
|
r
|
substanceP_masses.R
|
library(IsoSpecR)
library(tidyverse)
ENVELOPE = IsoSpecify(molecule = c(C=63, H=97, N=17, O=14, S=1), .99, showCounts = T)
ENVELOPE = ENVELOPE %>%
tbl_df() %>%
mutate(prob = exp(logProb))
monoisotopic = ENVELOPE %>% filter(prob == max(prob))
m_mono = monoisotopic$mass
sprintf("%.10f", m_mono)
data(isotopicData)
isotopicData$IsoSpec %>% filter()
m_H = IsoSpecify(molecule = c(H=1), 2.0, showCounts = T)[1,1]
round((m_mono + 3*m_H), 3)
round((m_mono + 3*m_H)/2, 3)
round((m_mono + 3*m_H)/3, 3)
m_C13_peak = ENVELOPE[2,1]
round((m_C13_peak + 2*m_H)/2, 3)
round((m_C13_peak + 2*m_H)/2, 3)
|
5579be31125cd165d395ee14f2a13b340e9c0490
|
117bdbc2b2380aeacec87cf6c8b24b18ab8c5bee
|
/man/permutation.wrapper.cat.inter.Y.and.X.Rd
|
204b95f390f60291182f5c5c7fa49e9854cb5192
|
[] |
no_license
|
cran/PIGE
|
1cc3f6aa9bfd47408be850188b1e3b7dfad90557
|
682c43bd19a050c6b5eb055f7184e5849e60cf94
|
refs/heads/master
| 2021-06-11T21:06:21.948691
| 2017-08-30T07:23:09
| 2017-08-30T07:23:09
| 17,681,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 396
|
rd
|
permutation.wrapper.cat.inter.Y.and.X.Rd
|
\name{permutation.wrapper.cat.inter.Y.and.X}
\alias{permutation.wrapper.cat.inter.Y.and.X}
\title{Internal function used for the parallel computation on the permutation sample}
\usage{
permutation.wrapper.cat.inter.Y.and.X(x, mat, data,
model, var.inter, Outcome.model)
}
\description{
Internal function used for the parallel computation on
the permutation sample
}
\keyword{internal}
|
87d182e746aab17da04825cd006886a996de6bfe
|
ce4bf7d18053aee8a5b9a85bbfe91ade7b895e6a
|
/Diabetes Risk Stratification/APFE1781393_main.R
|
333750ddc8db688b5db5402020aab128cf501fb1
|
[] |
no_license
|
manohajx/Data-Analysis
|
215547a82383e573cebf0f70c58842acb5f92f42
|
cfaafda0a419e0198753ce666287e61ce20bd18f
|
refs/heads/master
| 2020-04-14T14:39:34.689749
| 2019-01-05T04:08:38
| 2019-01-05T04:08:38
| 163,903,040
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,122
|
r
|
APFE1781393_main.R
|
##################################################################################################################################
#A care management organisation called WeCare wants to identify among its diabetic patients, the ones that are at high risk #
#of getting re-admitted to the hospital. They wish to intervene by providing some incentive to these patients that will #
#help them improve their health identify high-risk diabetic patients through risk stratification. #
#This will help the payer to decide what are the right intervention programs for these patients. #
# #
# #
# #
# #
##################################################################################################################################
options("scipen"=100, "digits"=4)
library(icd)
library(caret)
library(dplyr)
library(scales)
library(gridExtra)
library(caTools)
library(corrplot)
library(MASS)
library(car)
library(ROCR)
library(Metrics)
library(randomForest)
library(stringr)
rm(list=ls())
setwd("C:\\Users\\johnp\\Desktop\\Risk stratification")
set.seed(2018)
#Importing the diabetes data
diabetic_data<- read.csv("diabetic_data.csv",stringsAsFactors = F, na.strings = c("NA","#DIV/0!", "","NaN","?"))
nrow(diabetic_data)
ncol(diabetic_data)
#renaming the columns to lower case
names(diabetic_data)<- tolower(names(diabetic_data))
################################################ Data cleaning ###################################################################
###Viewing the top and bottom 50 rows to identify if the data has been correctly parsed
#View(head(diabetic_data ,50))
#View(tail(diabetic_data ,50))
str(diabetic_data)
#Check for duplicated records
any(duplicated(diabetic_data))
#Describing the unique values to have a picture of data and checking for spelling
lapply(diabetic_data,unique)
#Renaming the African American category for race
diabetic_data$race<-ifelse(diabetic_data$race =="AfricanAmerican","African American",diabetic_data$race)
#Idenifying the dependent variable and classfying it into binary
diabetic_data$readmitted<-ifelse(diabetic_data$readmitted ==">30" | diabetic_data$readmitted =="<30" ,"YES",diabetic_data$readmitted)
#Removing variables that has one unique value or very much unproportinate in the distribution of counts.
lapply(diabetic_data,table)
novariability<-names(diabetic_data)[sapply(diabetic_data, function(x){any(data.frame(table(x)*100/length(x))$Freq >=99)})]
diabetic_data<-diabetic_data[,!(names(diabetic_data) %in% novariability)]
##MISSING VALUES
na<-colSums(is.na(diabetic_data))
perc.na <- data.frame(colname=names(diabetic_data),cnt.na=na,percent.na=colSums(is.na(diabetic_data))/nrow(diabetic_data),stringsAsFactors = F)
row.names(perc.na)<-NULL
perc.na[order(-perc.na$cnt.na),]
na.cols<-perc.na$colname[perc.na$percent.na >=0.30]
na.impute <- perc.na$colname[perc.na$percent.na < 0.30 & perc.na$percent.na >0]
#Removing variable that have more than 30% NA
diabetic_data<-diabetic_data[,!(names(diabetic_data) %in% na.cols)]
na.impute
#Instead of imputing some random value ,since variable with NA are of categorical type , takin a seperate category Unknown
diabetic_data$race[is.na(diabetic_data$race)]<-"unknown"
#Since icd9 codes have more 700 categories(diag_1,diag_2,diag_3) it could be difficut to analyze.Binning them based on icd9_chapter
#into 19 distinct groups.
#https://en.wikipedia.org/wiki/List_of_ICD-9_codes
icd9_chapters[[18]][2]<-"V91"
class_19<-paste("Class",toupper(letters)[1:19] ,sep=" ")
icd_classificaiton<-data.frame(type=names(icd9_chapters),class=class_19,start=sapply(icd9_chapters,'[[',1),end=sapply(icd9_chapters,'[[',2),stringsAsFactors = F)
row.names(icd_classificaiton)<-NULL
icd_classificaiton
#ICD with respect to diabetes is 250.XX create a variable to identify diabetic diagnosis
diabetic_data$diag1_diabetes<-grepl("^250",diabetic_data$diag_1)
diabetic_data$diag2_diabetes<-grepl("^250",diabetic_data$diag_2)
diabetic_data$diag3_diabetes <-grepl("^250",diabetic_data$diag_3)
diabetic_data$diag_diabetes <-ifelse(diabetic_data$diag1_diabetes | diabetic_data$diag2_diabetes | diabetic_data$diag3_diabetes,T,F)
diabetic_data$diag1_diabetes<-NULL
diabetic_data$diag2_diabetes<-NULL
diabetic_data$diag3_diabetes <-NULL
diabetic_data$diag1_class<-"unknown"
diabetic_data$diag2_class<-"unknown"
diabetic_data$diag3_class<-"unknown"
for(i in 1:nrow(icd_classificaiton)){
print(i)
icd<-as.character(icd_short_to_decimal(icd_expand_range(icd_classificaiton$start[i],icd_classificaiton$end[i])))
diabetic_data$diag1_class<-ifelse(str_pad(diabetic_data$diag_1,3,pad="0") %in% icd,icd_classificaiton$class[i],diabetic_data$diag1_class)
diabetic_data$diag2_class<-ifelse(str_pad(diabetic_data$diag_2,3,pad="0") %in% icd,icd_classificaiton$class[i],diabetic_data$diag2_class)
diabetic_data$diag3_class<-ifelse(str_pad(diabetic_data$diag_3,3,pad="0") %in% icd,icd_classificaiton$class[i],diabetic_data$diag3_class)
}
#Validating the count of NA
sum(diabetic_data$diag1_class=="unknown")
sum(diabetic_data$diag2_class=="unknown")
sum(diabetic_data$diag3_class=="unknown")
sum(is.na(diabetic_data$diag_1))
sum(is.na(diabetic_data$diag_2))
sum(is.na(diabetic_data$diag_3))
#Remaoving the icd codes
diabetic_data$diag_1<-NULL
diabetic_data$diag_2<-NULL
diabetic_data$diag_3<-NULL
#Indentifying the circulatory diagnostics[Class G]
diabetic_data$diag1_circulatory<- ifelse(diabetic_data$diag1_class=="Class G",T,F)
diabetic_data$diag2_circulatory<- ifelse(diabetic_data$diag2_class=="Class G",T,F)
diabetic_data$diag3_circulatory <- ifelse(diabetic_data$diag3_class=="Class G",T,F)
#Having any of the circulatory diagnostics
diabetic_data$diag_circulatory <- ifelse(diabetic_data$diag1_circulatory | diabetic_data$diag2_circulatory | diabetic_data$diag3_circulatory ,T,F)
diabetic_data$diag1_circulatory<- NULL
diabetic_data$diag2_circulatory<- NULL
diabetic_data$diag3_circulatory <- NULL
#Derived variable
#Creating a Comorbidity
diabetic_data$diag_comorbidity<- ifelse(diabetic_data$diag_circulatory & diabetic_data$diag_diabetes ,3,
ifelse(diabetic_data$diag_circulatory==F & diabetic_data$diag_diabetes==T ,1,
ifelse(diabetic_data$diag_circulatory==T & diabetic_data$diag_diabetes==F,2,0)))
diabetic_data$diag_circulatory<-NULL
diabetic_data$diag_diabetes<-NULL
#Checking for NA
sum(is.na(diabetic_data))
str(diabetic_data)
###########################################Exploratary data analysis #########################################################
#Based on data dictionay converting admission_type_id , discharge_disposition_id , admission_source_id to char
diabetic_data$admission_type_id<-as.character(diabetic_data$admission_type_id)
diabetic_data$discharge_disposition_id<- as.character(diabetic_data$discharge_disposition_id)
diabetic_data$admission_source_id <- as.character(diabetic_data$admission_source_id )
diabetic_data$diag_comorbidity<- as.character(diabetic_data$diag_comorbidity)
#Binning and outlier treatment after viewing plots Data expoloration
diabetic_data<-diabetic_data[-which(diabetic_data$gender=="Unknown/Invalid"),]
diabetic_data$metformin<-ifelse(diabetic_data$metformin=="No","No","Yes")
diabetic_data$repaglinide<-ifelse(diabetic_data$repaglinide=="No","No","Yes")
diabetic_data$glimepiride<-ifelse(diabetic_data$glimepiride=="No","No","Yes")
diabetic_data$glipizide<-ifelse(diabetic_data$glipizide=="No","No","Yes")
diabetic_data$glyburide<-ifelse(diabetic_data$glyburide=="No","No","Yes")
diabetic_data$pioglitazone<-ifelse(diabetic_data$pioglitazone=="No","No","Yes")
diabetic_data$rosiglitazone<-ifelse(diabetic_data$rosiglitazone=="No","No","Yes")
diabetic_data$admission_type_id<-ifelse( diabetic_data$admission_type_id %in% c("1","2","3"),diabetic_data$admission_type_id ,"0")
diabetic_data$discharge_disposition_id<-ifelse( diabetic_data$discharge_disposition_id %in% c("1","3","6"),diabetic_data$discharge_disposition_id ,"0")
diabetic_data$admission_source_id<-ifelse( diabetic_data$admission_source_id %in% c("1","7","17"),diabetic_data$admission_source_id ,"0")
diabetic_data$age<-ifelse(diabetic_data$age %in% c("[0-10)","[10-20)","[20-30)"),'[0-30)',
ifelse (diabetic_data$age %in% c("[30-40)","[40-50)","[50-60)"),'[30-60)',
ifelse(diabetic_data$age %in% c("[60-70)","[70-80)","[80-90)"),"[60-90)","[90-100)")))
diabetic_data$number_outpatient<-ifelse(diabetic_data$number_outpatient>1,"Yes","No")
diabetic_data$number_emergency<-ifelse(diabetic_data$number_emergency>1,"Yes","No")
diabetic_data$number_inpatient<-ifelse(diabetic_data$number_inpatient>2,">2","<2")
diabetic_data$time_in_hospital<-ifelse(diabetic_data$time_in_hospital>=10,"High",
ifelse(diabetic_data$time_in_hospital>=4,"Medium","Low"))
diabetic_data$num_procedures<-ifelse(diabetic_data$num_procedures>0,"Yes","No")
diabetic_data$num_medications<-ifelse(diabetic_data$num_medications>35,35,diabetic_data$num_medications)
diabetic_data$num_lab_procedures<-ifelse(diabetic_data$num_lab_procedures>96,96,diabetic_data$num_lab_procedures)
diabetic_data$number_diagnoses<-ifelse(diabetic_data$number_diagnoses>=9,"G8","8L")
catagorical_var <- names(diabetic_data)[which(sapply(diabetic_data, is.character))]
measure_var <- names(diabetic_data)[which(sapply(diabetic_data, is.numeric))]
catagorical_var<-catagorical_var[!catagorical_var=="readmitted"]
for(i in catagorical_var) {
readline(prompt="press enter to view plots")
print(i)
plot1<-plot1<-ggplot(diabetic_data,aes(factor(diabetic_data[,i])))+geom_bar(fill="steelblue")+
xlab(i) + ylab("Frequency") +geom_text(stat='count',aes(label=..count..),hjust=0)+coord_flip()
print(diabetic_data %>% group_by(diabetic_data[,i]) %>%
summarise(percent=100*n()/length(diabetic_data[,i])) %>% arrange(desc(percent)))
plot2<-ggplot(diabetic_data,aes(factor(diabetic_data[,i]),fill=factor(diabetic_data[,"readmitted"])))+geom_bar(position = 'fill') +
xlab(i) + ylab("Relative perccentage") +scale_y_continuous(label=percent) + labs(fill="readmitted") + coord_flip()
grid.arrange(plot1,plot2,nrow=2)
}
meas_freq_line<-function(df,measure)
{
plot1<-ggplot(df,aes(df[,measure]))+
geom_histogram(bins=nclass.Sturges(df[,measure]))+
xlab(measure) + ylab("Frequency")
plot2<- ggplot(df,aes(y=df[,measure],x="")) + geom_boxplot(outlier.color = "red") + ylab(measure)
plot3<- ggplot(df,aes(y=df[,measure],x=df[,"readmitted"])) + geom_boxplot(outlier.color = "red") + ylab(measure) + xlab("readmitted")
grid.arrange(plot1,plot2,plot3)
}
#Frequency plot of measure variables
for(i in measure_var) {
readline(prompt="press enter to view plots")
print(i)
print(quantile(diabetic_data[,i], probs = seq(0,1,0.01),na.rm=T))
print(mean(diabetic_data[,i]))
print(meas_freq_line(diabetic_data,i))
cat("\nUpper Limit:",quantile(diabetic_data[,i],0.75)+1.5*IQR(diabetic_data[,i]),"\n")
cat("Lower Limit:",quantile(diabetic_data[,i],0.25)-1.5*IQR(diabetic_data[,i]),"\n")
}
##############################Dummy variable creation for Logistic ############################
dummy_conv<-function(vector,vec_name)
{
vector<-as.factor(vector)
if(length(unique(vector))>2)
{
output<-as.data.frame(model.matrix(~vector))[,-1]
names(output)<-gsub("vector",paste(vec_name,"."),names(output))
names(output)<-gsub(" ","",names(output))
}
else{
levels(vector)<-0:length(unique(vector))
output<-as.data.frame(as.numeric(levels(vector))[vector])
names(output)<-vec_name
}
output
}
diabetic_data_l<-diabetic_data
diabetic_data_l$diag1_class<-NULL
diabetic_data_l$diag2_class<-NULL
diabetic_data_l$diag3_class<-NULL
catagorical_var <- names(diabetic_data_l)[which(sapply(diabetic_data_l, is.character))]
measure_var <- names(diabetic_data_l)[which(sapply(diabetic_data_l, is.numeric))]
for(i in c(catagorical_var,"readmitted"))
{
print(i)
diabetic_data_l<-cbind(diabetic_data_l[,-which(names(diabetic_data_l)==i)] ,dummy_conv(diabetic_data_l[,i],i))
}
#Removing the id columns for model building
patient_nbr<-diabetic_data$patient_nbr
encounter_id<-diabetic_data$encounter_id
diabetic_data$patient_nbr<- NULL
diabetic_data$encounter_id<- NULL
#Seperate df for random forrest
diabetic_data<-data.frame(unclass(diabetic_data))
#splitting of train and test dataset
set.seed(2017)
train_indices <- sample.split(diabetic_data$readmitted,SplitRatio=0.8)
train <- diabetic_data[train_indices,]
test <- diabetic_data[!(train_indices),]
train1 <- diabetic_data_l[train_indices,]
test1 <- diabetic_data_l[!(train_indices),]
##################################################### Random Forrest& Model Evaluation #########################################
set.seed(2017)
bestmtry <- tuneRF(train[,names(train)!="readmitted"],train$readmitted, stepFactor=1.5, improve=1e-5, ntree=2000)
?randomForest
rf_model <- randomForest(readmitted ~ ., data=train,mtry=3, proximity=FALSE,ntree=1800, do.trace=TRUE,na.action=na.omit)
rfpredicted<-predict(rf_model,test,type="prob")[,2]
rpredicted <-factor(ifelse(rfpredicted>=0.7,"YES","NO"))
confusionMatrix(rpredicted,test$readmitted,positive="YES")
#Accuracy : 0.6087
#Sensitivity : 0.6118
#Specificity : 0.6060
levels(rpredicted)<-c(0,1)
rpredicted<-levels(rpredicted)[rpredicted]
act_readmit<-test$readmitted
levels(act_readmit)<-c(0,1)
act_readmit<-levels(act_readmit)[act_readmit]
auc(rpredicted,act_readmit)
#0.6067
var.imp <- data.frame(importance(rf_model,
type=2))
var.imp$variables<-row.names(var.imp)
row.names(var.imp)<-NULL
var.imp[order(var.imp$MeanDecreaseGini,decreasing = T),]
############################################# logistic Regression & Model evaluation ################################################
logistic_1 <- glm(readmitted~.,data=train1,family="binomial")
summary(logistic_1)
vif(logistic_1)
#Warning message:
#glm.fit: fitted probabilities numerically 0 or 1 occurred
logistic_2 <- stepAIC(logistic_1,direction="both")
summary(logistic_2)
vif(logistic_2)
#Removed variable based vif and lower p values
logistic_3 <-glm(formula = readmitted ~ num_medications + race.Asian + race.Hispanic +
race.Other + race.unknown + gender + `age.[30-60)` + admission_type_id.2 +
discharge_disposition_id.1 + discharge_disposition_id.3 +
discharge_disposition_id.6 + admission_source_id.17 +
admission_source_id.7 + time_in_hospital.Low + num_procedures +
number_outpatient + number_emergency + number_inpatient +
number_diagnoses +
`a1cresult.>8` + a1cresult.None + metformin + insulin.No + diabetesmed + diag_comorbidity.1 + diag_comorbidity.2 +
diag_comorbidity.3, family = "binomial", data = train1)
summary(logistic_3)
vif(logistic_3)
l_predic_prob<-predict(logistic_3,test1,type="response")
l_predic <- ifelse(l_predic_prob>=0.446 ,1,0)
confusionMatrix(l_predic,test1$readmitted,,positive="1")
auc(l_predic,test1$readmitted)
#0.59
##################################################### Risk stratification ###################################################
#The RandomForres Model is slightly better than Logistic regression
diabetic_data_risk<-predict(rf_model ,diabetic_data,type="prob")[,2]
diabetic_data$risk_strat<-ifelse(diabetic_data_risk>=0.7,"High risk",
ifelse(diabetic_data_risk>=0.3,"Medium risk","Low risk"))
diabetic_data$risk_strat<-factor(diabetic_data$risk_strat,levels = c("Low risk","Medium risk","High risk"))
barplot(table(diabetic_data$readmitted))
barplot(table(diabetic_data$risk_strat))
diabetic_data$patient_nbr<-patient_nbr
diabetic_data$encounter_id<-encounter_id
#Filtering the population based on unique patient_nbr and the maximum encounter_id
temp<-diabetic_data %>% group_by(patient_nbr) %>% summarise(encounter_id=max(encounter_id))
unique_patients<-merge(diabetic_data,temp,by=c("patient_nbr","encounter_id"))
length(unique(patient_nbr))
table(unique_patients$risk_strat)
*100/length(unique_patients$risk_strat)
barplot(table(unique_patients$risk_strat)*100/length(unique_patients$risk_strat))
barplot(table(unique_patients$readmitted))
|
1acb557e6c430f8ce72ebad3303e3bab79f3c06d
|
015bf4c3a06f14cc1355b9c64bc73609c12cfafc
|
/labels.R
|
0df426b0e8c997ab43fb5eb48b9e856e141a7f64
|
[] |
no_license
|
PurityNyakundi/testDummy
|
86ca93d858e362aca824c5895477246a44f99634
|
32906583fc9a74272bd4833921e4ef6b07f70028
|
refs/heads/master
| 2020-09-09T10:57:58.825891
| 2019-11-17T20:39:11
| 2019-11-17T20:39:11
| 221,428,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 103
|
r
|
labels.R
|
ggplot(data = mpg, mapping = aes(x = class, y = hwy)) +
labs(caption = "(based on data from...)")
|
2797f6e030e784e3ec1f588a16495290fd13f656
|
0515fd8336ff2e95434aec3266b34edfe22068fa
|
/inst/shiny/v1.3/marker_genes/select_content.R
|
4ff223147dae047f8046f3c788632a431816801f
|
[
"MIT"
] |
permissive
|
romanhaa/cerebroApp
|
d4480945c0d80c49f94d9befe0cd5a8b7c0a624d
|
0de48b67746bf5d5ad8d64f63ead3b599322fb26
|
refs/heads/master
| 2021-11-24T00:53:19.851143
| 2021-11-20T11:48:16
| 2021-11-21T17:09:50
| 178,224,933
| 35
| 17
|
NOASSERTION
| 2021-03-03T21:33:20
| 2019-03-28T14:52:29
|
HTML
|
UTF-8
|
R
| false
| false
| 2,772
|
r
|
select_content.R
|
##----------------------------------------------------------------------------##
## Select method and table (group).
##----------------------------------------------------------------------------##
##----------------------------------------------------------------------------##
## UI element to set layout for selection of method and group, which are split
## because the group depends on which method is selected.
##----------------------------------------------------------------------------##
output[["marker_genes_select_method_and_table_UI"]] <- renderUI({
if (
!is.null(getMethodsForMarkerGenes()) &&
length(getMethodsForMarkerGenes()) > 0
) {
tagList(
fluidRow(
column(
6,
uiOutput("marker_genes_selected_method_UI")
),
column(
6,
uiOutput("marker_genes_selected_table_UI")
)
)
)
} else {
fluidRow(
cerebroBox(
title = boxTitle("Marker genes"),
textOutput("marker_genes_message_no_method_found")
)
)
}
})
##----------------------------------------------------------------------------##
## UI element to select from which method the results should be shown.
##----------------------------------------------------------------------------##
output[["marker_genes_selected_method_UI"]] <- renderUI({
tagList(
div(
HTML('<h3 style="text-align: center; margin-top: 0"><strong>Choose a method:</strong></h2>')
),
fluidRow(
column(2),
column(8,
selectInput(
"marker_genes_selected_method",
label = NULL,
choices = getMethodsForMarkerGenes(),
width = "100%"
)
),
column(2)
)
)
})
##----------------------------------------------------------------------------##
## UI element to select which group should be shown.
##----------------------------------------------------------------------------##
output[["marker_genes_selected_table_UI"]] <- renderUI({
req(input[["marker_genes_selected_method"]])
tagList(
div(
HTML('<h3 style="text-align: center; margin-top: 0"><strong>Choose a table:</strong></h2>')
),
fluidRow(
column(2),
column(8,
selectInput(
"marker_genes_selected_table",
label = NULL,
choices = getGroupsWithMarkerGenes(input[["marker_genes_selected_method"]]),
width = "100%"
)
),
column(2)
)
)
})
##----------------------------------------------------------------------------##
## Alternative text message if data is missing.
##----------------------------------------------------------------------------##
output[["marker_genes_message_no_method_found"]] <- renderText({
"No data available."
})
|
4efb6e70f26461be2cb0cd2ce687aed2b210f7e2
|
420cac816c739b8f6a3581c1628d706f7d398beb
|
/R/E2vect.R
|
3b234d46386c24c365f7c37c9d0a0d80b39b20ba
|
[] |
no_license
|
cran/RobustAFT
|
d80a89efb8ffcc80b604d5959893210aab0ae31b
|
357b7400ae0a4d0be157b6a46970eb04d8b9ea51
|
refs/heads/master
| 2023-08-31T10:42:53.415730
| 2023-08-21T16:40:02
| 2023-08-21T17:30:23
| 17,693,388
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 193
|
r
|
E2vect.R
|
"E2vect" <-
function(xbar,kl,ku,l,u) {
i1 <- integrate(s2psiphi.w, lower=kl,upper=ku)$value
i2 <- integrate(s2chiphi.w, lower=l,upper=u)$value
E2 <- matrix(c(i1*xbar,i2),ncol=1)
E2}
|
81db3a6d7497393ab59fb3056334b51aa3f26eb1
|
9f59174bd4fe4f6912953446c4e675b30d040688
|
/plot4.R
|
f4393d12a33ea0a3105d0a56d7080adde2a25749
|
[] |
no_license
|
kmajeed/ExData_Plotting1
|
61ac093fa14d35ff0abb2643687038e102c9ed44
|
41c5c4ef49eb6b06fc4e1e76c7e1d64b301c19a6
|
refs/heads/master
| 2020-12-27T08:57:02.901858
| 2014-09-06T15:22:16
| 2014-09-06T15:22:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,086
|
r
|
plot4.R
|
plot4 = function(){
#Author: Khurram Majeed
#Date : 09/14
#------------------------------------------------------------------------
# Source the helper functions
source('setup.R')
source('get.feb.data.R')
#------------------------------------------------------------------------
setup();
get.feb.data();
#------------------------------------------------------------------------
febFile = "./data/febData.txt";
cat("[plot4.R]", "Reading the extracted data into memory", "\n");
data <- fread("./data/febData.txt",
sep=";",
header=TRUE,
na.strings="?")
#------------------------------------------------------------------------
cat("[plot4.R]", "convert dates", "\n");
data$Date = as.Date(data$Date, "%d/%m/%Y")
#convert to "date time" string to be later converted as.POSIXct
plotData = as.POSIXct(paste(as.character(data$Date), data$Time, sep=" "))
#------------------------------------------------------------------------
cat("[plot4.R]", "Opening PNG device for plotting", "\n");
png("./plots/plot4.png",width = 480,height = 480);
# Set 2 rows and 2 columns
par(mfrow=c(2,2));
# plot topleft
plot(plotData, data$Global_active_power, xlab="", type="l", ylab="Global Active Power");
# plot topright
plot(plotData,data$Voltage, type="l",xlab = "datetime", ylab= "Voltage");
# plot bottom left
plot(plotData,data$Sub_metering_1, xlab="", type="l", ylab="Energy sub metering");
lines(plotData,data$Sub_metering_2,col="red");
lines(plotData,data$Sub_metering_3,col="blue");
legend("topright", cex=1, col=c("black", "red", "blue"),lwd=2,bty="n",y.intersp=0.8,legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"));
# plot bottomright
plot(plotData, data$Global_reactive_power, type="l",xlab = "datetime", ylab= "Global_reactive_power");
cat("[plot4.R]", "Closing the device", "\n");
dev.off();
cat("[plot4.R]", "plot4.PNG saved...", "\n");
#__________________________________________________________________
}
|
7421aae86650281ab8a93ca6b0ad226c944d502e
|
0d31d8a8b63ff605ab762dee441d3f45260f44bb
|
/ExPanDaR_examples.R
|
e5ef4cf924b107191dceb03b7bf5c2182ee12aa7
|
[] |
no_license
|
mdelcorvo/ExPanDaR
|
891df423da6d41a85544d4331886fb0c171e21ea
|
b0f3e545046b1bae2e50d2be29e26bf6374ee259
|
refs/heads/master
| 2022-10-15T12:49:29.891071
| 2020-06-13T13:48:53
| 2020-06-13T13:48:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,913
|
r
|
ExPanDaR_examples.R
|
# --- Header -------------------------------------------------------------------
# (C) Joachim Gassen 2020, gassen@wiwi.hu-berlin.de, see LICENSE for license
#
# This file contains some simple use cases for the ExPanDaR package.
# It is not a part of the package itself.
# ------------------------------------------------------------------------------
# Start this with a virgin R session
library(ExPanDaR)
ExPanD(export_nb_option = TRUE)
# --- Use ExPanD with cross-sectional data -------------------------------------
ExPanD(mtcars, export_nb_option = TRUE)
# --- Use ExPanD on a condensed Worldbank data set -----------------------------
library(ExPanDaR)
library(tidyverse)
assign_vars <- function(var_name, definition) {
assignments <- paste0(var_name, " = ", definition, ",")
assignments[length(assignments)] <- substr(assignments[length(assignments)], 1,
nchar(assignments[length(assignments)])-1)
return(assignments)
}
calc_variables <- function(df, var_name, definition, type, can_be_na) {
cs_id <- definition[type == "cs_id"]
ts_id <- definition[type == "ts_id"]
code <- c("df %>% arrange(",
paste(c(cs_id, ts_id), collapse=", "),
") %>%")
vars_to_assign <- which(var_name %in% cs_id)
code <- c(code, "mutate(",
assign_vars(var_name[vars_to_assign], definition[vars_to_assign]),
") %>% ")
code <- c(code,"group_by(",
paste(cs_id, collapse=", "),
") %>%")
vars_to_assign <- which(!var_name %in% cs_id)
code <- c(code, "transmute(",
assign_vars(var_name[vars_to_assign], definition[vars_to_assign]),
") %>%")
code <- c(code, "drop_na(",
paste(var_name[can_be_na != 1], collapse = ","),
") -> ret ")
eval(parse(text = code))
return(as.data.frame(ret))
}
wb_var_def <- worldbank_var_def %>%
slice(c(1:4,8,16:23))
wb_var_def <- wb_var_def[c(1:5, 13, 6:12),]
wb_var_def$can_be_na[wb_var_def$var_name == "lifeexpectancy"] <- 0
wb <- calc_variables(worldbank,
wb_var_def$var_name,
wb_var_def$var_def,
wb_var_def$type,
wb_var_def$can_be_na)
# write_csv(wb, "wb_condensed.csv")
ExPanD(wb, cs_id = "country", ts_id ="year", export_nb_option = TRUE)
# A niced ExPanD version with variable definitions and
# a short info text to put online.
wb_data_def <- wb_var_def %>%
left_join(worldbank_data_def, by = c("var_def" = "var_name")) %>%
select(-var_def) %>%
rename(var_def = var_def.y,
type = type.x) %>%
select(var_name, var_def, type, can_be_na)
# write_csv(wb_data_def, "wb_data_def.csv")
title <- "Explore the Preston Curve with ExPanDaR"
abstract <- paste(
"The data for this sample has been collected using the",
"<a href=https://data.worldbank.org>World Bank API</a>.",
"See this <a href=https://joachim-gassen.github.io/2018/12/interactive-panel-eda-with-3-lines-of-code>",
"blog post</a> for further information."
)
ExPanD(wb, df_def = wb_data_def,
title = title, abstract = abstract,
export_nb_option = TRUE)
# --- Customize ExPanD to explore EPA fuel economy data-------------------------
# See https://joachim-gassen.github.io/2019/04/customize-your-interactive-eda-explore-the-fuel-economy-of-the-u.s.-car-market/
# for more info
# The following two chuncks borrow
# from the raw data code of the
# fueleconomy package by Hadley Wickham,
# See: https://github.com/hadley/fueleconomy
library(tidyverse)
library(ExPanDaR)
if(!file.exists("vehicles.csv")) {
tmp <- tempfile(fileext = ".zip")
download.file("http://www.fueleconomy.gov/feg/epadata/vehicles.csv.zip",
tmp, quiet = TRUE)
unzip(tmp, exdir = ".")
}
raw <- read.csv("vehicles.csv", stringsAsFactors = FALSE)
countries <- read.csv("https://joachim-gassen.github.io/data/countries.csv",
stringsAsFactors = FALSE)
vehicles <- raw %>%
mutate(car = paste(make, model, trany),
mpg_hwy = ifelse(highway08U > 0, highway08U, highway08),
mpg_city = ifelse(city08U > 0, city08U, city08)) %>%
left_join(countries) %>%
select(car, make, country, trans = trany,
year,
class = VClass, drive = drive, fuel = fuelType,
cyl = cylinders, displ = displ,
mpg_hwy, mpg_city) %>%
filter(drive != "",
year > 1985,
year < 2020) %>%
mutate(fuel = case_when(
fuel == "CNG" ~ "gas",
fuel == "Gasoline or natural gas" ~ "hybrid_gas",
fuel == "Gasoline or propane" ~ "hybrid_gas",
fuel == "Premium and Electricity" ~ "hybrid_electro",
fuel == "Premium Gas or Electricity" ~ "hybrid_electro",
fuel == "Premium Gas and Electricity" ~ "hybrid_electro",
fuel == "Regular Gas or Electricity" ~ "hybrid_electro",
fuel == "Electricity" ~ "electro",
fuel == "Diesel" ~ "diesel",
TRUE ~ "gasoline"
),
class = case_when(
grepl("Midsize", class) ~ "Normal, mid-size",
grepl("Compact", class) ~ "Normal, compact",
grepl("Small Station Wagons", class) ~ "Normal, compact",
grepl("Large Cars", class) ~ "Normal, large",
grepl("Minicompact", class) ~ "Normal, sub-compact",
grepl("Subcompact", class) ~ "Normal, sub-compact",
grepl("Two Seaters", class) ~ "Two Seaters",
grepl("Pickup Trucks", class) ~ "Pickups",
grepl("Sport Utility Vehicle", class) ~ "SUVs",
grepl("Special Purpose Vehicle", class) ~ "SUVs",
grepl("Minivan", class) ~ "(Mini)vans",
grepl("Vans", class) ~ "(Mini)vans"
),
drive = case_when(
grepl("4-Wheel", drive) ~ "4-Wheel Drive",
grepl("4-Wheel", drive) ~ "4-Wheel Drive",
grepl("All-Wheel", drive) ~ "4-Wheel Drive",
grepl("Front-Wheel", drive) ~ "Front-Wheel Drive",
grepl("Rear-Wheel", drive) ~ "Rear-Wheel Drive"
),
trans = case_when(
grepl("Automatic", trans) ~ "Automatic",
grepl("Manual", trans) ~ "Manual"
)) %>%
na.omit()
df_def <- data.frame(
var_name = names(vehicles),
var_def = c("Make, model and transition type indentifying a unique car in the data",
"Make of car",
"Country where car producing firm is loacted",
"Transition type (automatic or manual)",
"Year of data",
"Classification type of car (simplified from orginal data)",
"Drive type of car (Front Wheel, Rear Wheel or 4 Wheel)",
"Fuel type (simplified from orginal data)",
"Number of engine cylinders",
"Engine displacement in liters",
"Highway miles per gallon (MPG). For electric and CNG vehicles this number is MPGe (gasoline equivalent miles per gallon).",
"City miles per gallon (MPG). For electric and CNG vehicles this number is MPGe (gasoline equivalent miles per gallon)."),
type = c("cs_id", rep("factor", 3), "ts_id", rep("factor", 3), rep("numeric", 4))
)
html_blocks <- c(
paste("<div class='col-sm-12'>",
"By default, this display uses all data from car makes with more",
"than 100 cars in the 'fueleconomy.gov' database.",
"Above, you can limit the analysis to cars from a certain make,",
"class, country, fuel type or other factor present in the data.",
"</div>"),
paste("<div class='col-sm-12'>",
"In the display above, remove the check mark to see the absolute",
"number of cars included in the data each year.",
"Also, change the additional factor to see how the distribution",
"of cars across countries, transition types, etc. changes over time",
"</div>"),
paste("<div class='col-sm-12'>",
"In the two tables above, you can assess the distributions of the",
"four numerical variables of the data set. Which car has the",
"largest engine of all times?",
"</div>"),
paste("<div class='col-sm-12'>",
"Explore the numerical variables across factors. You will see,",
"not surprisingly, that fuel economy varies by car class.",
"Does it also vary by drive type?",
"</div>"),
paste("<div class='col-sm-12'>",
"The above two panels contain good news. Fuel economy has",
"increased over the last ten years. See for yourself:",
"Has the size of engines changed as well?",
"</div>"),
paste("<div class='col-sm-12'>",
"The scatter plot documents a clear link between engine size",
"and fuel economy in term of miles per gallon.",
"Below, you can start testing for associations.",
"</div>"),
paste("<div class='col-sm-12'>",
"Probably, you will want to test for some associations that",
"require you to construct new variables. No problem. Just enter the",
"variable definitions above. Some ideas on what to do:",
"<ul><li>Define country dummies (e.g., country == 'US') to see",
"whether cars from certain countries are less fuel efficient than others.</li>",
"<li>Define a dummy for 4-Wheel drive cars to assess the penalty",
"of 4-Wheel drives on fuel economy.</li>",
"<li>If you are from a metric country, maybe your are mildly annoyed",
"by the uncommon way to assess fuel economy via miles per gallon.",
"Fix this by defining a liter by 100 km measure",
"(hint: 'l100km_hwy := 235.215/mpg_hwy').</li></ul>",
"</div>"),
paste("<div class='col-sm-12'>",
"Above, you can play around with certain regression parameters.",
"See how robust coefficients are across car classes by estimating",
"the models by car class ('subset' option).",
"Try a by year regression to assess the development of fuel economy",
"over time. <br> <br>",
"If you like your analysis, you can download a zipfile containing",
"the data and an R notebook reporting the analysis. Alternatively,",
"you can store the ExPanD configuration and reload it at a later",
"stage.",
"</div>")
)
cl <- list(
ext_obs_period_by = "2019",
bgbg_var = "mpg_hwy",
bgvg_var = "mpg_hwy",
scatter_loess = FALSE,
delvars = NULL,
scatter_size = "cyl",
bar_chart_relative = TRUE,
reg_x = c("cyl", "displ", "trans"),
scatter_x = "displ",
reg_y = "mpg_hwy",
scatter_y = "mpg_hwy",
bgvg_byvar = "class",
quantile_trend_graph_var = "mpg_hwy",
bgtg_var = "mpg_hwy",
bgtg_byvar = "class",
bgbg_byvar = "country",
scatter_color = "country", bar_chart_var2 = "class",
ext_obs_var = "mpg_hwy",
trend_graph_var1 = "mpg_hwy",
trend_graph_var2 = "mpg_city",
sample = "vehicles"
)
abstract <- paste(
"This interactive display features the",
"<a href=https://www.fueleconomy.gov/>",
"fuel economy data provided by the U.S. Environmental Protection Agency.</a>",
"It allows you to explore the fuel economy of cars in the U.S. market",
"across time and other dimensions.",
"<br> <br>",
"It is based on the 'ExPanD' display provided by the",
"<a href=https://joachim-gassen.github.io/ExPanDaR>'ExPanDaR' package</a>.",
"Click <a href=https://jgassen.shinyapps.io/expand>here</a> to explore your",
"own data with 'ExPanD'.",
"<br> <br>",
"Otherwise: Scroll down and start exploring!"
)
ExPanD(vehicles, df_def = df_def, config_list = cl,
title = "Explore the Fuel Economy of Cars in the U.S. Market",
abstract = abstract,
components = c(subset_factor = TRUE,
html_block = TRUE,
bar_chart = TRUE,
html_block = TRUE,
descriptive_table = TRUE,
ext_obs = TRUE,
html_block = TRUE,
by_group_bar_graph = TRUE,
by_group_violin_graph = TRUE,
html_block = TRUE,
trend_graph = TRUE,
quantile_trend_graph = TRUE,
by_group_trend_graph = TRUE,
html_block = TRUE,
scatter_plot = TRUE,
html_block = TRUE,
udvars = TRUE,
html_block = TRUE,
regression = TRUE,
html_block = TRUE),
html_blocks = html_blocks,
export_nb_option = TRUE
)
# --- Use ExPanD to explore IMDB data ------------------------------------------
library(tidyverse)
name_basics <- read_tsv("https://datasets.imdbws.com/name.basics.tsv.gz",
na = "\\N", quote = '')
title_basics <- read_tsv("https://datasets.imdbws.com/title.basics.tsv.gz",
na = "\\N", quote = '')
title_ratings <- read_tsv("https://datasets.imdbws.com/title.ratings.tsv.gz",
na = "\\N", quote = '')
title_akas <- read_tsv("https://datasets.imdbws.com/title.akas.tsv.gz",
na = "\\N", quote = '')
title_crew <- read_tsv("https://datasets.imdbws.com/title.crew.tsv.gz",
na = "\\N", quote = '')
title_episode <- read_tsv("https://datasets.imdbws.com/title.episode.tsv.gz",
na = "\\N", quote = '')
title_principals <- read_tsv("https://datasets.imdbws.com/title.principals.tsv.gz",
na = "\\N", quote = '')
name_basics %>%
filter(str_detect(primaryProfession, "actor|actress")) %>%
select(nconst, primaryName, birthYear) -> actors
name_basics %>%
filter(str_detect(primaryProfession, "director")) %>%
select(nconst, primaryName, birthYear) -> directors
lead_actor <- title_principals %>%
filter(str_detect(category, "actor|actress")) %>%
select(tconst, ordering, nconst, category) %>%
group_by(tconst) %>%
filter(ordering == min(ordering)) %>%
mutate(lead_actor_gender = ifelse(category == "actor", "male", "female")) %>%
left_join(name_basics) %>%
rename(lead_actor_name = primaryName,
lead_actor_yob = birthYear,
lead_actor_yod = deathYear) %>%
select(tconst, lead_actor_name, lead_actor_gender,
lead_actor_yob, lead_actor_yod)
director <- title_principals %>%
filter(str_detect(category, "director")) %>%
select(tconst, ordering, nconst, category) %>%
group_by(tconst) %>%
filter(ordering == min(ordering)) %>%
left_join(name_basics) %>%
rename(director_name = primaryName,
director_yob = birthYear,
director_yod = deathYear) %>%
select(tconst, director_name, director_yob, director_yod)
imdb <- title_ratings %>%
left_join(title_basics) %>%
left_join(lead_actor) %>%
left_join(director) %>%
filter(titleType == "movie" | titleType == "tvSeries",
numVotes >= 10000,
isAdult == 0) %>%
mutate(year = startYear,
lead_actor_age = ifelse(startYear - lead_actor_yob > 0,
startYear - lead_actor_yob, NA),
director_age = ifelse(startYear - director_yob > 0,
startYear - director_yob, NA),
genre = str_split(genres, ',', simplify = TRUE)[,1],
type = ifelse(titleType == "movie", "Movie", "TV Series")) %>%
rename(avg_rating = averageRating,
num_votes = numVotes,
length_minutes = runtimeMinutes,
title = primaryTitle) %>%
select(tconst, year, type, title, genre,
num_votes, avg_rating, length_minutes,
director_name, director_age,
lead_actor_name, lead_actor_age, lead_actor_gender)
cl <- readRDS("IMDb_ExPanD.RDS")
ExPanD(
imdb, cs_id = c("tconst", "title"), config_list = cl,
components = c(bar_chart = FALSE),
title = "Explore IMDb Data", abstract = paste(
"Data as provided by the fabulous",
"<a href=https://www.imdb.com>Internet Movie Database</a>."
),
export_nb_option = TRUE
)
# ------------------------------------------------------------------------------
|
9e849be578229daaff74a50f2059af9bbc966636
|
02659617733feef0c99257d9db5e8d550cd3036b
|
/data-geocoding-preprocessing/combine_locations.R
|
1ad4a222527118d43bded0a384fc6617c3989b26
|
[] |
no_license
|
anqichen9856/carpark-availability
|
1d69a51912a16a0680a20c61fdc34fa29af7d11c
|
359bc8de1b6608d00a1ea896a0654ec66a936129
|
refs/heads/master
| 2023-06-22T23:55:57.614506
| 2021-07-22T06:14:43
| 2021-07-22T06:14:43
| 313,694,141
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,502
|
r
|
combine_locations.R
|
library(dplyr)
attractions <- read.csv("data/data-processed/attractions.csv") %>% mutate(category="Tourist Attractions")
condominiums <- read.csv("data/data-processed/condominiums.csv") %>% mutate(category="Condominiums")
hawker_centers <- read.csv("data/data-processed/hawker_centers.csv") %>% mutate(category="Hawker Centers")
hdb <- read.csv("data/data-processed/hdb.csv") %>% mutate(category="HDB Flats")
hospitals_clinics <- read.csv("data/data-processed/hospitals_clinics.csv") %>% mutate(category="Hospitals & Clinics")
hotels <- read.csv("data/data-processed/hotels.csv") %>% mutate(category="Hotels")
malls <- read.csv("data/data-processed/malls.csv") %>% mutate(category="Shopping Malls")
mrt_lrt <- read.csv("data/data-processed/mrt_lrt.csv") %>% mutate(category="MRT/LRT Stations")
bus <- read.csv("data/data-processed/bus.csv") %>% mutate(category="Bus Stations")
schools <- read.csv("data/data-processed/schools.csv") %>% mutate(category="Schools")
sport_facilities <- read.csv("data/data-processed/sport_facilities.csv") %>% mutate(category="Sports Facilities")
supermarkets <- read.csv("data/data-processed/supermarkets.csv") %>% mutate(category="Supermarkets")
locations <- rbind(attractions, condominiums, hawker_centers, hdb,
hospitals_clinics, hotels, malls, mrt_lrt, bus,
schools, sport_facilities, supermarkets)
write.csv(locations, "data/data-processed/locations.csv", row.names = F)
View(read.csv("data/data-processed/locations.csv"))
|
ca91b68272726b595178e072c070cb73133a1284
|
b76d6e98a247b75733f91398705c87680b884928
|
/pipeline/scripts/plotting/hmm_rlefit.R
|
f7d9c9802bf9e8c6c73add116c51cab4f11f0c23
|
[] |
no_license
|
BenjaminPeter/admixfrog
|
f62042abab950db57b01fdc2db5d73831ef9e1b5
|
c05bc5354d32848e14063c89cb4b4025d7f7e3d5
|
refs/heads/master
| 2023-03-15T16:22:27.679771
| 2023-03-03T17:47:02
| 2023-03-03T17:47:02
| 178,419,114
| 7
| 3
| null | 2022-12-06T12:46:05
| 2019-03-29T14:20:02
|
Python
|
UTF-8
|
R
| false
| false
| 1,792
|
r
|
hmm_rlefit.R
|
source("scripts/plotting/lib.R")
library(corrplot)
library(viridis)
bin_size = as.integer(snakemake@wildcards$bin_size)
panel = snakemake@wildcards$panel
infile = snakemake@input$bin
snpfile = snakemake@input$snp
names = snakemake@config$panels[[panel]]
cutoff = as.numeric(snakemake@wildcards$cutoff)
l_cutoffs = snakemake@params$lengths / bin_size * 1000
TRACK = strsplit(snakemake@wildcards$TRACK, "_")[[1]]
data = load_data(infile, names)
if(cutoff > 0){
data$TRACK = rowSums(data[,TRACK]) > cutoff
}else{
data$TRACK = rowSums(data[,TRACK]) < (-cutoff)
}
coords <- data %>% select(chrom, bin_pos, bin_id)
mycov = function(...)cov(...) %>% cov2cor %>% replace_na(0)
df = lapply(l_cutoffs, get_rundf, data=data)
names(df) = l_cutoffs
df = df %>% bind_rows(.id="Length") %>% mutate(Length=as.integer(Length) * bin_size / 1000)
x = df %>% select(-bin_id) %>% group_by(Length) %>% do(c=mycov(.[,-1]))
o = hclust(as.dist(1-x$c[[1]]))$order
png(filename=snakemake@output$pwplot, width=16, height=10, units="in", res=300)
par(mfrow=c(2,3))
for(i in 1:6)
corrplot(x$c[[i]][o,o], diag=F, is.corr=F, main = sprintf("> %s kb",x$Length[i]), mar=c(0,0,2,0))
dev.off()
X = df %>% gather(sample, run, -1:-2)
Y = X %>% filter(run) %>% group_by(Length, bin_id) %>% summarize(n=n()) %>% arrange(-n)
Z = Y %>% left_join(coords)
Z %>% filter( n>=1, Length > 0) %>%
ungroup %>%
arrange(-n) %>%
ggplot(aes(x=bin_pos, y=n, color=Length)) +
geom_col(position="identity") +
facet_wrap(~chrom, ncol=2, strip.position="left") +
xlab("Position") +
ylab("# individuals") +
scale_color_viridis_c(trans="log")
ggsave(snakemake@output$trackplot, width=20, height=11)
#save.image("pw.rdebug")
|
b2d72e321576a2c8b2499c827d8b5d2c1d41a159
|
713597d4904ba5916f3d41f95bdeb42958eec54f
|
/Dscore_EPIC.R
|
3474472baecca94570393b2b58d441c863504ab6
|
[] |
no_license
|
changwn/DCONVscore
|
12f7c6d793b0f7b44c903cd0ea399fea15927f65
|
4f40bd21773165a0fcd235133054d8f8a2d34d76
|
refs/heads/master
| 2020-03-27T16:32:11.107061
| 2018-10-10T03:08:12
| 2018-10-10T03:08:12
| 146,790,039
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,264
|
r
|
Dscore_EPIC.R
|
#
#
#
library(EPIC)
setwd("C:/Users/wnchang/Documents/F/PhD_Research/2018_08_23_deconvolution_score")
#---------------------------------------------------------------------------
# load data, which is ttt
load("C:/Users/wnchang/Documents/F/PhD_Research/2018_05_07_try_TIMER/data/RNAseq/coadRNAseq.RData")
# remove same gene
ttt1 <- ttt[unique(rownames(ttt)),]
bulk <- ttt1
# out <- EPIC(bulk, scaleExprs=F)
out <- EPIC(bulk)
names(out)
commonGene <- intersect(rownames(bulk),rownames(EPIC::TRef$refProfiles))
length(EPIC::TRef$sigGenes)
length(intersect(commonGene,EPIC::TRef$sigGenes))
commonSiga <- intersect(commonGene, EPIC::TRef$sigGenes)
commonSiga <- sort(commonSiga)
length(commonSiga)
sigGeneEpic <- EPIC::TRef$sigGenes
# the order in new_data_est is mess
sigGeneEpic <- sort(sigGeneEpic)
S <- EPIC::TRef$refProfiles[sigGeneEpic,]
new_data <- bulk[commonSiga,]
bulk1 <- new_data
# out1 <- EPIC(bulk1, scaleExprs=F)
out1 <- EPIC(bulk1)
Prop_EPIC <- out1$cellFraction[,1:7]
# names(out1)
# If scaleExprs is false, we find the results of cellFraction of out and out1 is exactly same
# Use predicted proportion(P matrix) to find how much it can explain from data(X matrix),
##----- Constrained regression method implemented in Abbas et al., 2009 -----##
getFractions.Abbas <- function(XX,YY,w=NA){
ss.remove=c()
ss.names=colnames(XX)
while(T){
if(length(ss.remove)==0)tmp.XX=XX else{
if(is.null(ncol(tmp.XX)))return(rep(0,ncol(XX)))
tmp.XX=tmp.XX[,-ss.remove]
}
if(length(ss.remove)>0){
ss.names=ss.names[-ss.remove]
if(length(ss.names)==0)return(rep(0,ncol(XX)))
}
if(is.na(w[1]))tmp=lsfit(tmp.XX,YY,intercept=F) else tmp=lsfit(tmp.XX,YY,w,intercept=F)
if(is.null(ncol(tmp.XX)))tmp.beta=tmp$coefficients[1] else tmp.beta=tmp$coefficients[1:(ncol(tmp.XX)+0)]
if(min(tmp.beta>0))break
ss.remove=which.min(tmp.beta)
}
tmp.F=rep(0,ncol(XX))
names(tmp.F)=colnames(XX)
tmp.F[ss.names]=tmp.beta
return(tmp.F)
}
P_2nd <- c()
n_gene <- nrow(bulk1)
n_sample <- ncol(bulk1)
for(i in 1:n_gene){
coeff <- getFractions.Abbas(Prop_EPIC, t(bulk1)[,i]) # first,use complete proportion to get rank of gene
g_tmp <- rownames(bulk1)[i]
P_2nd <- rbind(P_2nd, coeff)
rownames(P_2nd)[i] <- g_tmp
}
bulk_est <- P_2nd %*% t(Prop_EPIC)
ccc <- cor(t(bulk_est), t(bulk1)) # want to get rank of gene-wise
#rownames(ccc) <- colnames(ccc)
dd <- diag(ccc)
dd_copy <- dd
Dscore_whole <- mean(dd)
cal_Dscore <- function(proportion=prop_base, data=bulk1){
P_2nd <- c()
n_gene <- nrow(data)
n_sample <- ncol(data)
for(i in 1:n_gene){
coeff <- getFractions.Abbas(proportion, t(data)[, i])
P_2nd <- rbind(P_2nd, coeff)
}
bulk_est <- P_2nd %*% t(proportion)
# then, how to evaluation the similarity of two matrix (bulk and bulk_est)?
# try sample-wise correlation
ccc <- cor(t(bulk1), t(bulk_est)) #??? still gene-wise
dd <- diag(ccc)
dd_copy <- dd
Dscore <- mean(dd)
return(Dscore)
}
# choice 1:Find top10 genes which has high correlation with the original data
while(F){
topN <- 10
top_gene <- c()
for(i in 1:topN){
gene_tmp <- names(dd[which(dd_copy == max(dd_copy))])
assign(paste("top", i, sep=""), gene_tmp)
dd_copy[gene_tmp] <- 0
top_gene <- c(top_gene, gene_tmp)
}
base <- top_gene
out_base <- EPIC(bulk1, sigGenes = base)
prop_base <- out_base$cellFraction[,1:7]
Dscore_init <- cal_Dscore(prop_base, bulk1)
}
# choice 2: sort the gene based on correlation and then extract top 10 gene
dd_copy_order <- sort(dd_copy, decreasing = T)
base <- names(dd_copy_order[1:10])
out_base <- EPIC(bulk1, sigGenes = base)
prop_base <- out_base$cellFraction[, 1:7]
Dscore_init <- cal_Dscore(prop_base, bulk1)
#remainSet <- setdiff(sigGeneEpic, base)
remainSet <- names(dd_copy_order[11:98])
top_gene_add <- base
Dscore_Yaxis <- c()
increase_gene <- c()
for(i in 1:length(remainSet)){
#for(i in 1 : 5){
top_gene_add <- union(top_gene_add, remainSet[i])
out_ep <- EPIC(bulk1, sigGenes=top_gene_add)
prop_add <- out_ep$cellFraction[,1:7]
Dscore_add <- cal_Dscore(prop_add, bulk1)
Dscore_Yaxis[i] <- Dscore_add
print(Dscore_add)
if(Dscore_add < Dscore_init){
top_gene_add <- top_gene_add[1:length(top_gene_add)-1]
print("less, delete")
}else{
Dscore_init <- Dscore_add
print("large, change init value")
increase_gene <- c(increase_gene, remainSet[i])
}
}
print(top_gene_add)
# results : the order put gene in the "top_gene_add" will change the final gene list and score.
# should check the rank within the interation
x <- c(11:98)
wholeName <- matrix(NA,1,88)
wholeName[match(increase_gene, remainSet)] <- increase_gene
plot(x, Dscore_Yaxis, type = 'l', main = "coad, score")
text(x, Dscore_Yaxis, wholeName, cex=0.8, col = "red", srt = 30)
gene_char <- paste(setdiff(top_gene_add, base), sep=" ", collapse=",")
text(55, 0.26, gene_char, cex=0.6, col = "blue")
base_char <- paste(base, sep="", collapse=",")
text(38, 0.265, base_char, cex=0.6, col="black")
# --------------------------------------------------------------------
# evaluation on single cell simulated data
# melanoma data
load("C:/Users/wnchang/Documents/F/PhD_Research/2018_06_28_singleCellSimulation/GSE72056_tg_data_list.RData")
# using 28 top_gene_add
storage1_top <- list()
names(storage1) <- names(Cell_Prop_GSE72056)
for(i in 1:length(Cell_Prop_GSE72056)){
bulk1 <- GSE72056_tg_data_list[[i]][[1]]
#bulk1 <- bulk1[top_gene_add,]
out1 <- EPIC(bulk1, sigGenes = top_gene_add)
prop_sc <- out1$cellFraction[, 1:7]
prop_true <- Cell_Prop_GSE72056[[i]]
prop_true <- t(prop_true)
corr_sc <- cor(prop_sc, prop_true)
corr_sc_top <- corr_sc
storage1_top[[length(storage1_top)+1]] <- corr_sc_top
}
# using all sigGeneEpic
storage1_all <- list()
names(storage1_all) <- names(Cell_Prop_GSE72056)
for(i in 1:length(Cell_Prop_GSE72056)){
bulk1 <- GSE72056_tg_data_list[[i]][[1]]
#bulk1 <- bulk1[top_gene_add,]
out1 <- EPIC(bulk1)
prop_sc <- out1$cellFraction[, 1:7]
prop_true <- Cell_Prop_GSE72056[[i]]
prop_true <- t(prop_true)
corr_sc <- cor(prop_sc, prop_true)
corr_sc_all <- corr_sc
storage1_all[[length(storage1_all)+1]] <- corr_sc_all
}
diff <- corr_sc_top - corr_sc_all
# results show few gene signature still produce high correlation,
# excepting T cell(CD4, CD8 T cell)
|
7a88e34d4cba21f25100ec790af6b082a0a06f4a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/inventorize/examples/MPN_singleperiod.Rd.R
|
365fc1bc0b36a010bfbba827a03f9ee5bfefe5f2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 215
|
r
|
MPN_singleperiod.Rd.R
|
library(inventorize)
### Name: MPN_singleperiod
### Title: MPN_singleperiod
### Aliases: MPN_singleperiod
### ** Examples
MPN_singleperiod(mean= 32000,standerddeviation= 11000,p=24,c=10.9,g=7,b=0,na.rm=TRUE)
|
bc53d4a323147b8d45ae70f281ece68e3c3db178
|
d10994a78f1c1f458eea0e02449ac3640bacc48a
|
/Regression/GeneralizedLinearRegression.R
|
76c92cc22f16ff8f9f9215d642395df763c4bc2a
|
[] |
no_license
|
awe153/RDataMinningStudy
|
f2b3dd7bce39909f862d998e1e8abe6e0175d36b
|
07652057745cfdfecc284ea4cf8b2a99106582b7
|
refs/heads/master
| 2023-05-26T09:28:52.926670
| 2014-11-11T12:20:09
| 2014-11-11T12:20:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,006
|
r
|
GeneralizedLinearRegression.R
|
#GLM(广义线性模型)
#广义线性模型通过一个链接函数将响应变量与线性模型建立关联来概括线性回归,
#并允许每个测量的方差的大小是预测值的一个函数。它统一了其他统计模型,包括
#线性回归,logistic回归和泊松回归
#函数glm用于拟合广义线性模型,通过为线性预测通过一个象征性描述和误差分布的描述
#下面基于bodyfat数据集构建广义线性模型
data("bodyfat",package="mboost")
myFormula<-DEXfat ~ age+waistcirc+hipcirc+elbowbreadth+kneebreadth
bodyfat.glm<-glm(myFormula,family=gaussian("log"),data=bodyfat)
summary(bodyfat.glm)
#进行预测,type标识预测类型,默认为线性预测,可选项response,是对响应变量
pred<-predict(bodyfat.glm,type="response")
plot(bodyfat$DEXfat,pred,xlab="观测值",ylab="预测值")
#如果将family设置为gaussian("identity"),则结果与线性回归类似。如果设置
#binomial("logit")则是logistic回归。
|
79a869ec927b8f1aacb8d150ee0ef49d8884129c
|
15617bea19680089ec4b425b507f6b0b328eb86c
|
/sport-or-not!/scripts/sport_or_not.R
|
3e01d314e150bf718e3c257ae964f8e51167a748
|
[] |
no_license
|
GWarrenn/this-and-that
|
d8e11094dcf04c682362a1b4fe0b7d9143b79b0a
|
18b1c1b7f023bcd938e9ae2ebcc8e7c5160448a7
|
refs/heads/master
| 2021-10-11T09:16:53.348763
| 2021-09-30T21:29:10
| 2021-09-30T21:29:10
| 214,073,327
| 0
| 0
| null | 2020-04-26T23:56:13
| 2019-10-10T02:58:28
|
R
|
UTF-8
|
R
| false
| false
| 28,019
|
r
|
sport_or_not.R
|
## Author: August Warren
## Description: Analysis of Fringe Sports Survey
## Date: 1/13/2020
## Status: Draft
## Specs: R version 3.4.4 (2018-03-15)
library(tidyverse)
library(googledrive)
library(reshape2)
library(scales)
library(viridis)
library(tidytext)
library(tm)
library(ggnewscale)
library(stringr)
#####################################################
##
## download data from Google Sheets/Drive
##
#####################################################
setwd("./GitLab/this-and-that/sport-or-not!/")
drive_find(type = "spreadsheet")
sheet_id = ""
drive_download(as_id(sheet_id), type = "csv",overwrite = T)
survey_data <- read.csv("Sport or Not! (Responses).csv")
#####################################################
##
## clean data
##
#####################################################
## get rid of question text in column/variable names
columns <- colnames(survey_data)
columns <- sub(x=columns,pattern = "Are.these.things.sports...",
replacement = "")
columns <- gsub(x=columns,pattern = "\\.$",
replacement = "")
colnames(survey_data) <- columns
survey_data$gender_recode <- ifelse(survey_data$To.which.gender.do.you.most.closely.identify == "Male","Male",
ifelse(survey_data$To.which.gender.do.you.most.closely.identify == "Female","Female","Other"))
survey_data$race_recode <- ifelse(survey_data$Which.race.ethnicity.best.describes.you...Please.choose.only.one. == "White/Caucasian","White","POC")
survey_data$income_recode <- factor(survey_data$What.was.your.total.household.income.before.taxes.during.the.past.12.months,
levels = c("Under $50,000","$50,000 to $100,000","Over $100,000","Not sure/Refuse"))
survey_data$sports_fans <- ifelse((survey_data$How.often.would.you.say.you.watch.televised.sports.or.sports.content.on.channels.like.ESPN == "A few times a week" |
survey_data$How.often.would.you.say.you.watch.televised.sports.or.sports.content.on.channels.like.ESPN == "Daily" |
survey_data$How.often.would.you.say.you.watch.televised.sports.or.sports.content.on.channels.like.ESPN == "Once a week") &
(survey_data$Which.one.of.the.following.best.describes.you == "Avid mainstream/traditional sports fan" |
survey_data$Which.one.of.the.following.best.describes.you == "Casual mainstream/traditional sports fan"),"Sports Fans","Non-Sports Fans")
survey_data$pe_recode <- ifelse(survey_data$Please.rate.your.opinion.towards.P.E..Gym.Class.when.you.were.in.school <= 2,"Unfavorable",
ifelse(survey_data$Please.rate.your.opinion.towards.P.E..Gym.Class.when.you.were.in.school >= 4,"Favorable",NA))
survey_data$num_sports_played <- str_count(survey_data$Which.of.the.following.have.you.played.in.the.past.year,",") + 1
survey_data$mentioned_physical <- ifelse(grepl("physical",
survey_data$In.a.few.words..what.makes.a.sport.a.sport.in.your.opinion,ignore.case = T),1,0)
survey_data$mentioned_physical <- factor(survey_data$mentioned_physical,
levels = c("0","1"),
labels = c("No Physical Mention","Mentioned Physical"))
survey_data <- survey_data %>%
mutate(num_sports_quartiles = ntile(num_sports_played,4))
survey_data$age_recode <- ifelse(survey_data$What.is.your.age == "40-44" | survey_data$What.is.your.age == "45-49" | survey_data$What.is.your.age == "50+","40+",
ifelse(survey_data$What.is.your.age == "18-24" | survey_data$What.is.your.age == "25-29","18-29",
ifelse(survey_data$What.is.your.age == "30-34"| survey_data$What.is.your.age == "35-39","30-39",
as.character(survey_data$What.is.your.age))))
count <- nrow(survey_data)
## reshape sports data to long for top-level aggregation
sports <- c("Chess","eSports..Videogames.","Ping.Pong..Table.Tennis.","Foosball","Skiing",
"Snowboarding","Cycling","Bowling","Golf","Ultimate.Frisbee","Sailing",
"Rowing..Crew.","Frisbee.Golf","Kickball","Scrabble","Cornhole","Pickleball",
"NASCAR","Crossfit")
clean <- survey_data %>%
select(sports)
clean$id <- seq.int(nrow(clean))
clean_l <- melt(clean,id.vars = "id")
clean_l$value_recode <- ifelse(clean_l$value == "Not a Sport - Don't Feel Strongly" | clean_l$value == "Not a Sport - Feel Strongly","Not a Sport!",
ifelse(clean_l$value == "Sport - Don't Feel Strongly" | clean_l$value == "Sport - Feel Strongly","Sport!",
clean_l$value))
clean_l$variable <- trimws(gsub(x = clean_l$variable,pattern = "\\.",replacement=" "))
clean_l$variable <- gsub(x = clean_l$variable,pattern = " ",replacement=" ")
clean_l$variable <- ifelse(clean_l$variable == "eSports Videogames","eSports/Videogames",
ifelse(clean_l$variable == "Ping Pong Table Tennis","Ping Pong/Table Tennis",clean_l$variable))
#####################################################
##
## Plot 1: Overall distributions on average
##
#####################################################
overall_stats <- clean_l %>%
filter(value != "") %>%
group_by(value) %>%
summarise(n=n()) %>%
mutate(freq=n/sum(n))
overall_stats$value <- factor(overall_stats$value,levels = c("Sport - Feel Strongly","Sport - Don't Feel Strongly","Not a Sport - Don't Feel Strongly","Not a Sport - Feel Strongly","Never heard of/Don't know what this is","Don't Know/Care"))
overall_bar_plot <- ggplot(overall_stats,aes(x=value,y=freq,fill=value)) +
geom_bar(stat= "identity",color="black") +
geom_text(aes(x=value,y=freq,label=percent(round(freq,2))),vjust = -.5) +
scale_fill_manual(values = c("#1a9641","#a6d96a","#fdae61","#d7191c","#D3D3D3","#D3D3D3")) +
scale_x_discrete(labels = function(grouping) str_wrap(grouping, width = 20)) +
scale_y_continuous(labels = scales::percent) +
labs(title = "Average Sports Rankings",
subtitle = paste("among a very non-random sample of people with opinions about sports")) +
guides(fill=F) +
theme(axis.title = element_blank(),
axis.text = element_text(size=12))
ggsave(plot = overall_bar_plot, "images/1.0 Overall Ratings on Average.png", w = 10.67, h = 8,type = "cairo-png")
#####################################################
##
## Plot 2: Overall distributions by Sport
##
#####################################################
stats <- clean_l %>%
filter(value != "") %>%
group_by(variable,value) %>%
summarise(n=n()) %>%
mutate(freq=n/sum(n)) %>%
filter(value != "Never heard of/Don't know what this is")
## add zero percents
sports <- stats %>%
select(variable) %>%
distinct()
responses <- stats %>%
ungroup() %>%
select(value) %>%
distinct()
all_combinations <- merge(sports,responses, by = NULL)
stats <- merge(stats,all_combinations,by = c("variable","value"),all.y = T)
stats$freq <- ifelse(is.na(stats$freq),0,stats$freq)
stats_a_tier <- stats %>%
ungroup() %>%
filter(value == "Sport - Feel Strongly") %>%
rename(a_freq = freq) %>%
select(a_freq,variable)
stats <- merge(stats,stats_a_tier)
stats$value <- factor(stats$value,levels = c("Sport - Feel Strongly","Sport - Don't Feel Strongly",
"Not a Sport - Don't Feel Strongly","Not a Sport - Feel Strongly"))
sports_heatmap_plot <- ggplot(stats,aes(x=value,y=reorder(variable,a_freq))) +
geom_tile(aes(fill = freq),colour = "white") +
geom_text(aes(x=value,y=reorder(variable,a_freq),label=percent(round(freq,3)),color = as.numeric(freq) > 0.25)) +
scale_color_manual(guide = FALSE, values = c("white", "black")) +
scale_fill_viridis(name="",labels = scales::percent) +
labs(title = "Overall Sports Rankings",
subtitle = paste("among a very non-random sample of",count,"people with opinions about what is & isn't a sport")) +
theme(legend.position = "bottom",
axis.title = element_blank(),
axis.text = element_text(size=12),
legend.key.width = unit(1, "cm")) +
scale_y_discrete(expand = c(0, 0)) +
scale_x_discrete(expand = c(0, 0),labels = function(grouping) str_wrap(grouping, width = 20))
ggsave(plot = sports_heatmap_plot, "images/2.0 Ratings by Sport.png", w = 10.67, h = 8,type = "cairo-png")
#####################################################
##
## Plot 2A: Overall distributions by Sport (alternate)
##
#####################################################
overall_stats <- clean_l %>%
filter(value != "") %>%
group_by(variable,value_recode) %>%
summarise(n=n()) %>%
mutate(freq=n/sum(n))
overall_sports_w <- dcast(overall_stats,variable ~ value_recode, value.var = "freq")
overall_sports_w$ruling <- overall_sports_w$`Sport!` - overall_sports_w$`Not a Sport!`
stats_strong <- stats %>%
filter(value == "Sport - Feel Strongly" | value == "Not a Sport - Feel Strongly") %>%
select(variable,value,freq) %>%
rename(strong_freq = freq)
stats_strong <- dcast(stats_strong,variable ~ value, value.var = "strong_freq")
stats_strong$strong_freq <- stats_strong$`Sport - Feel Strongly` - stats_strong$`Not a Sport - Feel Strongly`
overall_sports_w <- merge(overall_sports_w,stats_strong,by="variable")
sports_bar_plot <- ggplot(overall_sports_w,aes(x=reorder(variable,ruling),y=ruling,fill=strong_freq)) +
geom_bar(stat="identity",color="black") +
geom_text(aes(x=variable,y=ruling + .04 * sign(ruling),label=percent(round(ruling,2)))) +
coord_flip() +
scale_fill_distiller(palette = "Spectral",direction = 1,labels=scales::percent) +
labs(title = "Overall Sports Rankings - Difference Between Total Sport & Not Sport",
subtitle = paste("among a very non-random sample of",count,"people with opinions about what is & isn't a sport"),
fill="% Strongly Sport - Not Sport") +
theme(legend.position = "bottom",
axis.title = element_blank(),
axis.text = element_text(size=12),
legend.key.width = unit(1, "cm")) +
scale_y_continuous(labels = scales::percent)
ggsave(plot = sports_bar_plot, "images/2.0A Ratings by Sport.png", w = 10.67, h = 8,type = "cairo-png")
#####################################################
##
## Correlations
##
#####################################################
sports <- c("Chess","eSports..Videogames.","Ping.Pong..Table.Tennis.","Foosball","Skiing",
"Snowboarding","Cycling","Bowling","Golf","Ultimate.Frisbee","Sailing",
"Rowing..Crew.","Frisbee.Golf","Kickball","Scrabble","Cornhole","Pickleball",
"NASCAR","Crossfit")
clean <- survey_data %>%
select(sports,
gender_recode,
income_recode,
age_recode,
race_recode,
pe_recode,
mentioned_physical,
sports_fans,
num_sports_quartiles)
recode_sports <- function(df,sport) {
new_var <- paste0(sport,"_recode")
df[new_var] <- ifelse(df[,sport] == "Sport - Feel Strongly",1,
ifelse(df[,sport] == "Sport - Don't Feel Strongly",.75,
ifelse(df[,sport] == "Never heard of/Don't know what this is",.5,
ifelse(df[,sport] == "Not a Sport - Don't Feel Strongly",.25,
ifelse(df[,sport] == "Not a Sport - Feel Strongly",0,
NA)))))
return(as.data.frame(df))
}
for (f in sports) {
clean <- recode_sports(clean,f)
}
sports_recode <- c("Chess_recode","eSports..Videogames._recode","Ping.Pong..Table.Tennis._recode","Foosball_recode","Skiing_recode",
"Snowboarding_recode","Cycling_recode","Bowling_recode","Golf_recode","Ultimate.Frisbee_recode","Sailing_recode",
"Rowing..Crew._recode","Frisbee.Golf_recode","Kickball_recode","Scrabble_recode","Cornhole_recode","Pickleball_recode",
"NASCAR_recode","Crossfit_recode")
clean_filtered <- clean %>%
select(sports_recode)
correlations <- cor(clean_filtered,use="complete.obs")
wide_corr <- melt(correlations)
wide_corr <- wide_corr %>%
filter(Var1 != "id" & Var2 != "id") %>%
mutate(Var1 = gsub(pattern = "_recode",replacement = "",x=Var1),
Var2 = gsub(pattern = "_recode",replacement = "",x=Var2))
wide_corr$Var1 <- trimws(gsub(x = wide_corr$Var1,pattern = "\\.",replacement=" "))
wide_corr$Var1 <- gsub(x = wide_corr$Var1,pattern = " ",replacement=" ")
wide_corr$Var1 <- ifelse(wide_corr$Var1 == "eSports Videogames","eSports/Videogames",
ifelse(wide_corr$Var1 == "Ping Pong Table Tennis","Ping Pong/Table Tennis",wide_corr$Var1))
wide_corr$Var2 <- trimws(gsub(x = wide_corr$Var2,pattern = "\\.",replacement=" "))
wide_corr$Var2 <- gsub(x = wide_corr$Var2,pattern = " ",replacement=" ")
wide_corr$Var2 <- ifelse(wide_corr$Var2 == "eSports Videogames","eSports/Videogames",
ifelse(wide_corr$Var2 == "Ping Pong Table Tennis","Ping Pong/Table Tennis",wide_corr$Var2))
correlations_matrix <- ggplot(wide_corr, aes(x=Var1, y=Var2, fill=value)) +
geom_tile(aes(fill = value),colour = "white") +
geom_text(aes(x=Var1,y=Var2,label=round(value,2))) +
scale_fill_gradientn(colours = c("red","white","#1a9641"),
values = rescale(c(-.3,0,.9)),
guide = "colorbar", limits=c(-.3,.9)) +
labs(title = "Sports Correlation Matrix",
subtitle = paste("among a very non-random sample of",count,"people with opinions about what is & isn't a sport"),
fill = "R-Squared") +
theme(legend.position = "bottom",
axis.title = element_blank(),
axis.text = element_text(size=12),
axis.text.x = element_text(angle = 45, hjust = 1),
legend.key.width = unit(1, "cm"))
ggsave(plot = correlations_matrix, "images/3.0 Correlation Matrix.png", w = 10.67, h = 8,type = "cairo-png")
#####################################################
##
## Demographics
##
#####################################################
## create generalizable funciton to handle all demographic aggregations and plotting
demographic_plots <- function(df,demo,label) {
#########################################
##
## Plot Average Sport Scores
##
#########################################
new_df <- df %>%
select(sports,demo)
new_df_l <- melt(new_df,id.vars = demo)
new_df_l$value_recode <- ifelse(new_df_l$value == "Not a Sport - Don't Feel Strongly" | new_df_l$value == "Not a Sport - Feel Strongly","Not a Sport!",
ifelse(new_df_l$value == "Sport - Don't Feel Strongly" | new_df_l$value == "Sport - Feel Strongly","Sport!",
new_df_l$value))
demos <- new_df_l %>%
ungroup() %>%
group_by(new_df_l[,demo],variable,value_recode) %>%
summarise(n=n()) %>%
mutate(freq=n/sum(n)) %>%
filter(value_recode != "Never heard of/Don't know what this is" &
`new_df_l[, demo]` != "Other" & value_recode != "" &
`new_df_l[, demo]` != "Not sure/Refuse" &
value_recode == "Sport!") %>%
select(`new_df_l[, demo]`,variable,value_recode,freq)
demos_strong <- new_df_l %>%
ungroup() %>%
group_by(new_df_l[,demo],variable,value) %>%
summarise(n=n()) %>%
mutate(freq=n/sum(n)) %>%
filter((value == "Not a Sport - Feel Strongly" | value == "Sport - Feel Strongly") &
`new_df_l[, demo]` != "Other" & value != "" &
`new_df_l[, demo]` != "Not sure/Refuse") %>%
rename(value_recode = value) %>%
select(`new_df_l[, demo]`,variable,value_recode,freq)
demos <- rbind(demos,demos_strong)
demos_all <- new_df_l %>%
ungroup() %>%
group_by(new_df_l[,demo],value_recode) %>%
summarise(n=n()) %>%
mutate(freq=n/sum(n)) %>%
filter(value_recode != "Never heard of/Don't know what this is" &
`new_df_l[, demo]` != "Other" & value_recode != "" &
`new_df_l[, demo]` != "Not sure/Refuse" &
value_recode == "Sport!") %>%
select(`new_df_l[, demo]`,value_recode,freq) %>%
mutate(variable = "All Sports (Mean)")
demos <- rbind(demos_all,demos)
demos_all_strong <- new_df_l %>%
ungroup() %>%
group_by(new_df_l[,demo],value) %>%
summarise(n=n()) %>%
mutate(freq=n/sum(n)) %>%
filter((value == "Not a Sport - Feel Strongly" | value == "Sport - Feel Strongly") &
`new_df_l[, demo]` != "Other" &
`new_df_l[, demo]` != "Not sure/Refuse") %>%
rename(value_recode = value) %>%
select(`new_df_l[, demo]`,value_recode,freq) %>%
mutate(variable = "All Sports (Mean)")
demos <- rbind(demos_all_strong,demos)
## zero counts
demos <- demos %>%
complete(variable,nesting(value_recode))
demos$freq <- ifelse(is.na(demos$freq),0,demos$freq)
demos_l <- dcast(demos,variable + value_recode ~ `new_df_l[, demo]`, value.var = c("freq"))
## for specific "binary" demos: calculate differences
if(demo == "gender_recode" | demo == "sports_fans" | demo == "race_recode" | demo == "pe_recode" | demo == "mentioned_physical"){
demos_l$zdiff <- demos_l[,3] - demos_l[,4]
}
demos_l <- demos_l %>% rename(sport = variable)
## determine %Sport for sort order
stats_a_tier <- demos_l %>%
ungroup() %>%
filter(value_recode == "Sport!") %>%
rename(sport_freq = 3) %>%
select(sport_freq,sport) %>%
mutate(sport_freq = if_else(sport == "All Sports (Mean)",1,sport_freq))
demos_l <- merge(demos_l,stats_a_tier)
demos_w <- melt(demos_l,id.vars = c("value_recode","sport","sport_freq"))
## reorder columns
demos_w$sport <- trimws(gsub(x = demos_w$sport,pattern = "\\.",replacement=" "))
demos_w$sport <- gsub(x = demos_w$sport,pattern = " ",replacement=" ")
demos_w$value_recode <- factor(demos_w$value_recode,
levels = c("Sport!","Sport - Feel Strongly","Not a Sport - Feel Strongly"))
## plotting!
sports_heatmap_plot <- ggplot(demos_w,aes(x=variable,y=reorder(sport,sport_freq))) +
geom_tile(data=filter(demos_w,variable != 'zdiff'),aes(fill = value),colour = "white") +
scale_fill_viridis(name="",labels = scales::percent) +
facet_wrap(~value_recode) +
ggnewscale::new_scale_fill() +
geom_tile(data = filter(demos_w, variable == 'zdiff'),
aes(fill = value)) +
scale_fill_distiller(palette ="Spectral",direction = 1,guide = F) +
geom_text(aes(x=variable,y=sport,label=percent(round(value,3)),color = (as.numeric(value) > 0.25) | demos_w$variable == 'zdiff')) +
scale_color_manual(guide = FALSE, values = c("white", "black")) +
labs(title = paste0("Overall Sports Rankings by ",label),
subtitle = paste("among a very non-random sample of 113 people with opinions about what is & isn't a sport")) +
theme(legend.position = "bottom",
axis.title = element_blank(),
axis.text = element_text(size=12),
strip.text = element_text(size=12),
legend.key.width = unit(1, "cm")) +
scale_y_discrete(expand = c(0, 0)) +
scale_x_discrete(expand = c(0, 0),labels = function(grouping) str_wrap(grouping, width = 10))
ggsave(plot = sports_heatmap_plot, paste0("images/4.0 Sport Ratings by ",label,".png"), w = 10.67, h = 8,type = "cairo-png")
}
## Now plot all demos of interest
demographic_plots(clean,"gender_recode","Gender")
demographic_plots(clean,"sports_fans","Sports Fans")
demographic_plots(clean,"income_recode","Income")
demographic_plots(clean,"race_recode","Race")
demographic_plots(clean,"pe_recode","PE Therm")
demographic_plots(clean,"num_sports_quartiles","Number of Sports Played")
demographic_plots(clean,"mentioned_physical","Mentioned Physical")
demographic_plots(clean,"age_recode","Age")
#####################################################
##
## Let's talk about PE...
##
#####################################################
new_df <- survey_data %>%
select(Please.rate.your.opinion.towards.P.E..Gym.Class.when.you.were.in.school,gender_recode,sports_fans)
new_df_l <- melt(new_df,id.vars = c("gender_recode"))
new_df_l$value <- ifelse(is.na(new_df_l$value),"Neutral",new_df_l$value)
gender_tabs <- new_df_l %>%
group_by(gender_recode,variable,value) %>%
summarise(n=n()) %>%
mutate(freq=n/sum(n)) %>%
filter(gender_recode != "Other")
labs <- c("PE/Gym Class Favorability","Sports Fans")
names(labs) <- c("Please.rate.your.opinion.towards.P.E..Gym.Class.when.you.were.in.school", "sports_fans")
gender_tabs$value <- factor(gender_tabs$value,
levels = c("1","2","3","4","5","Non-Sports Fans","Sports Fans"),
labels = c("Very Unfavorable","Somewhat Unfavorable","Neutral","Somewhat Favorable","Very Favorable","Non-Sports Fans","Sports Fans"))
gender_sports <- ggplot(gender_tabs,aes(x=gender_recode,y=freq,fill=value,label = percent(round(freq,3)))) +
geom_bar(stat="identity",color="black") +
geom_text(size = 4, position = position_stack(vjust = 0.5)) +
facet_grid(~variable,scales="free",labeller = labeller(variable = labs)) +
scale_fill_manual(values = c("#de2d26","#fee0d2","#D3D3D3","#e5f5e0","#31a354","#deebf7","#3182bd")) +
labs(title = "Attitudes Towards Sports by Gender",
subtitle = paste("among a very non-random sample of people with opinions about what is & isn't a sport"),
fill ="") +
scale_y_continuous(labels = scales::percent) +
theme_bw() +
theme(legend.position = "bottom",
axis.title = element_blank(),
axis.text = element_text(size=12),
legend.key.width = unit(1, "cm"))
ggsave(plot = gender_sports, "images/Sports & Gender.png", w = 10.67, h = 8,type = "cairo-png")
## are views of PE more strongly driven by gender or sports fandom?
survey_data$pe_cont <- ifelse(survey_data$Please.rate.your.opinion.towards.P.E..Gym.Class.when.you.were.in.school == 1,0,
ifelse(survey_data$Please.rate.your.opinion.towards.P.E..Gym.Class.when.you.were.in.school == 2,.25,
ifelse(survey_data$Please.rate.your.opinion.towards.P.E..Gym.Class.when.you.were.in.school == 3,.5,
ifelse(survey_data$Please.rate.your.opinion.towards.P.E..Gym.Class.when.you.were.in.school == 4,.75,
ifelse(survey_data$Please.rate.your.opinion.towards.P.E..Gym.Class.when.you.were.in.school == 5,1,NA)))))
survey_data$male <- ifelse(survey_data$gender_recode == "Male",1,0)
survey_data$sports <- ifelse(survey_data$sports_fans == "Sports Fans",1,0)
model <- glm(data = survey_data,formula = pe_cont ~ male + sports,family = "binomial")
## export model results to table
stargazer(model,
dep.var.labels=c("Gym Class Favorability"),
covariate.labels=c("Gender (Men=1)","Sports Fandom (Sports Fan=1)"),
type = "html",
out = "images/regression_table.html")
model_df <- as.data.frame(summary.glm(model)$coefficients,row.names = F)
model_df$iv <- rownames(as.data.frame(summary.glm(model)$coefficients))
model_df$odds <- exp(model_df$Estimate)
df <- survey_data %>% select(male,sports,pe_cont)
cor(df,method = "pearson", use = "complete.obs")
## ...both? But more so driven by sports fandom
#####################################################
##
## Regressions
##
#####################################################
clean$male <- ifelse(clean$gender_recode == "Male",1,0)
clean$white <- ifelse(clean$race_recode == "White",1,0)
clean$youth <- ifelse(clean$age_recode == "18-29",1,0)
clean$low_income <- ifelse(clean$income_recode == "Under $50,000",1,0)
clean$sports_fan <- ifelse(clean$sports_fans == "Sports Fans",1,0)
model_results <- data.frame()
for(f in sports) {
dv <- paste0(f,"_recode")
clean_df <- clean %>%
filter(clean[,f] != "")
model <- glm(get(dv) ~ male + white + youth + low_income + sports_fan,
family = "binomial",
data=clean_df)
model_df <- as.data.frame(summary.glm(model)$coefficients,row.names = F)
model_df$iv <- rownames(as.data.frame(summary.glm(model)$coefficients))
model_df$sport <- f
model_df$odds <- exp(model_df$Estimate)
ci <- as.data.frame(confint(model),row.names=F) %>%
filter(!is.na(`2.5 %`))
ci$iv <- rownames(as.data.frame(summary.glm(model)$coefficients))
model_df <- merge(ci,model_df,by="iv")
model_df$sig <- ifelse((model_df$`97.5 %` < 0 & model_df$`2.5 %`< 0) | (model_df$`97.5 %` > 0 & model_df$`2.5 %`> 0),1,0)
model_results <- rbind(model_results,model_df)
}
## plot regression coefs
regression_plot <- ggplot(model_results, aes(iv, Estimate,color=sig))+
facet_wrap(~sport) +
geom_point() +
coord_flip() +
geom_hline(yintercept = 0) +
geom_pointrange(aes(ymin = `2.5 %`, ymax = `97.5 %`)) +
labs(title = "Sport or Not?: Regression Coefficients",
x = "Regression Coefficient") +
theme(axis.title.y = element_blank(),
legend.position = "none")
ggsave(plot = regression_plot, "images/Regression Coefs.png", w = 10, h = 6,type = "cairo-png")
#####################################################
##
## Open-ends: Text Analysis
##
#####################################################
most_common_words <- survey_data %>%
unnest_tokens(bigram,In.a.few.words..what.makes.a.sport.a.sport.in.your.opinion, token = "ngrams", n = 1) %>%
count(bigram, sort = TRUE) %>%
filter(!is.na(bigram) & bigram != "sport" & bigram != "sports") %>%
filter(!bigram %in% stop_words$word) %>%
mutate(type = "Most Common Words") %>%
top_n(10,n)
bigrams <- survey_data %>%
unnest_tokens(bigram,In.a.few.words..what.makes.a.sport.a.sport.in.your.opinion, token = "ngrams", n = 2) %>%
count(bigram, sort = TRUE) %>%
filter(!is.na(bigram)) %>%
separate(bigram,c("word1","word2"),sep=" ") %>%
filter(!word1 %in% stop_words$word & !word2 %in% stop_words$word) %>%
mutate(bigram = paste(word1,word2),
type = "Most Common Word Pairs") %>%
top_n(8,n) %>%
select(bigram,n,type)
ngrams <- rbind(bigrams,most_common_words)
bigram_plot <- ggplot(ngrams,aes(x=reorder(bigram,n),y=n,fill="#900C3F")) +
geom_bar(stat="identity",color="black") +
facet_wrap(~type,scales = "free") +
coord_flip() +
scale_fill_manual(values = c("#900C3F")) +
geom_text(aes(x=bigram,y=n,label=n,hjust = -.25),size=3) +
labs(title = "Most Commonly Used Words to Describe/Define Sports",
subtitle = paste("among a very non-random sample of people with opinions about sports"),
y="Unique number of times mentioned",
x="") +
guides(fill=F) +
theme(axis.text = element_text(size=8))
ggsave(plot = bigram_plot, "images/N-Grams.png", w = 8, h = 4,type = "cairo-png")
bigrams_fandom <- survey_data %>%
group_by(sports_fans) %>%
unnest_tokens(bigram,In.a.few.words..what.makes.a.sport.a.sport.in.your.opinion, token = "ngrams", n = 2) %>%
count(bigram, sort = TRUE) %>%
filter(!is.na(bigram)) %>%
separate(bigram,c("word1","word2"),sep=" ") %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word)
mcw_phys <- survey_data %>%
group_by(mentioned_physical) %>%
unnest_tokens(bigram,In.a.few.words..what.makes.a.sport.a.sport.in.your.opinion, token = "ngrams", n = 1) %>%
count(bigram, sort = TRUE) %>%
filter(!is.na(bigram)) %>%
filter(!bigram %in% stop_words$word)
|
6411628897dc672cd070b5283395273568aff895
|
bc264aa3581a22a7da47caeb57d2ae073323b9e1
|
/man/rast.grad.Rd
|
1eadbab28dd2b5e4b7af911d0a85c890051f4a56
|
[] |
no_license
|
cran/ctmcmove
|
41d6a5b48ebc2fd9498190de327e4ecc727d19c4
|
5cbe337b24700cc377687e1aa0d96f0fd25ca326
|
refs/heads/master
| 2020-05-21T04:22:14.594036
| 2018-04-20T12:58:33
| 2018-04-20T12:58:33
| 48,078,653
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,349
|
rd
|
rast.grad.Rd
|
\name{rast.grad}
\alias{rast.grad}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Creates gradient rasters from a raster object.%% ~~function to do ... ~~
}
\description{
This function takes a raster stack or raster object and creates two
matrices for each raster layer, one which contains the x coordinates
of the gradient of the raster layer and one which contains the y
coordinates of the gradient of the raster layer.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
rast.grad(rasterstack)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{rasterstack}{A raster layer or raster stack from package "raster".
%% ~~Describe \code{rasterstack} here~~
}
}
\details{
The gradient is computed using the "terrain" function in raster.
%% ~~ If necessary, more details than the description above ~~
}
\value{
\item{xy}{A matrix of x and y coordinates of each cell in the raster
stack or raster layer. The order is the order of the cells in the
raster object.}
\item{grad.x}{a matrix where each column is the x-coordinates of the
gradient for one raster layer}
\item{grad.y}{a matrix where each column is the y-coordinates of the
gradient for one raster layer}
\item{rast.grad.x}{A raster stack where each raster layer is the
x-coordinates of the gradient for one covariate}
\item{rast.grad.y}{A raster stack where each raster layer is the
x-coordinates of the gradient for one covariate}
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
Hanks, E. M.; Hooten, M. B. & Alldredge, M. W. Continuous-time
Discrete-space Models for Animal Movement The Annals of Applied
Statistics, 2015, 9, 145-165
%% ~put references to the literature/web site here ~
}
\author{
Ephraim M. Hanks
%% ~~who you are~~
}
%% \note{
%% %% ~~further notes~~
%% }
%% ~Make other sections like Warning with \section{Warning }{....} ~
%% \seealso{
%% %% ~~objects to See Also as \code{\link{help}}, ~~~
%% }
\examples{
## For example code, do
##
## > help(ctmcMove)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
0ad3c5f09acaf52ec527060cc85b93d16bb78ca3
|
c434a9125bace27cfb126ee77457623a518db685
|
/man/RGBM.train.Rd
|
408f3393cdbbbb604d5f02b4b5dee6ef95e64f3b
|
[] |
no_license
|
cran/RGBM
|
9d61162f6efa0e365e2ff0a84916c8a2d2e0a775
|
80c5df56a560c2f044dc968bc733d39a306fea8c
|
refs/heads/master
| 2023-04-29T00:23:42.897347
| 2023-04-14T07:50:14
| 2023-04-14T07:50:14
| 82,651,760
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,680
|
rd
|
RGBM.train.Rd
|
\name{RGBM.train}
\alias{RGBM.train}
\title{
Train RGBM predictor
}
\description{
This function trains a regression model for a given \code{X.train} feature matrix, \code{Y.train} response vector, and working parameters. A model returned by this function can be used to predict response for unseen data with \code{\link{RGBM.test}} function.
}
\usage{
RGBM.train(X.train, Y.train, s_f = 0.3, s_s = 1, lf = 1, M.train = 5000, nu = 0.001)
}
\arguments{
\item{X.train}{
Input S-by-P feature matrix of training samples. Columns correspond to features, rows correspond to samples.
}
\item{Y.train}{
Input S-element response vector of training samples.
}
\item{s_f}{
Sampling rate of features, 0<s_f<=1. Fraction of columns from X.train, which will be sampled without replacement to calculate each extesion in boosting model. By default it's 0.3.
}
\item{s_s}{
Sampling rate of samples, 0<s_s<=1. Fraction of rows from X.train, which will be sampled with replacement to calculate each extension in boosting model. By default it's 1.
}
\item{lf}{
Loss function: 1-> Least Squares and 2 -> Least Absolute Deviation
}
\item{M.train}{
Number of extensions in boosting model, e.g. number of iterations of the main loop of RGBM algorithm. By default it's 5000.
}
\item{nu}{
Shrinkage factor, learning rate, 0<nu<=1. Each extension to boosting model will be multiplied by the learning rate. By default it's 0.001.
}
}
\value{
Regression model is a structure containing all the information needed to predict response for unseen data
}
\author{
Raghvendra Mall <raghvendra5688@gmail.com>
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
083588de59da6304de70845989a7c39dc69a2a88
|
5174953c11e87f54b9804c4b6a87d97088ea9f23
|
/analysis_scripts/cell_cycle_annotation.R
|
69924f595c4f4b0af51cf20961b6bc6021e744c9
|
[] |
no_license
|
bigfacebig/singlecellcd8ibd
|
bbf483c945043a4744d28444e33955b6fe638bb0
|
b536c81d3d075ec999771a4d3a5dfdd2d6141ea2
|
refs/heads/master
| 2022-11-24T23:38:33.729495
| 2020-08-02T20:54:23
| 2020-08-02T20:54:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,099
|
r
|
cell_cycle_annotation.R
|
library(scran)
library(stringr)
#read in seurat object
cd8.seurat <- readRDS("cd8.seurat.RDS")
##read in cell cycle marker pairs
cc.pairs <- readRDS(system.file("exdata", "human_cycle_markers.rds", package="scran"))
##read in 10x cellranger features table to convert to ensembl identifiers
genes <- read.table("features.tsv.gz", header=FALSE)
##Subset to keep only mRNA tables and not feature barcoding/antibodies
genes <- genes[genes$V4 == "Expression", ]
##format gene names
gene.names <- genes$V1
names(gene.names) <- genes$V2
gene.names <- str_replace(gene.names, "\\.\\d+", "")
##run cell cycle predictions
cc <- cyclone( cd8.seurat@assays$RNA@data, pairs=cc.pairs, verbose=T, gene.names=gene.names)
##store in seurat object
cd8.seurat$phases <- cc$phases
cd8.seurat$G1_score <- cc$normalized.scores$G1
cd8.seurat$$G2M_score <- cc$normalized.scores$G2M
cd8.seurat$S_score <- cc$normalized.scores$S
cd8.seurat$G1_score_raw <- cc$scores$G1
cd8.seurat$G2M_score_raw <- cc$scores$G2M
cd8.seurat$$S_score_raw <- cc$scores$S
saveRDS(cd8.seurat, file="cd8.seurat.RDS")
|
e3ddaa4f48c2a26c1d66d5086a4fa904dd29e4d9
|
dbfe5ce272e204a8e1663ced35c9d48ef4870496
|
/man/count.Rd
|
234489fe7b50bac3b88f34802a1c831e838dace8
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hmito/hmRLib
|
fac91a4e2ddfcd899283ec0b63c87c31965fb17f
|
f2cfd54ea491ee79d64f7dd976a94086092b8ef5
|
refs/heads/master
| 2023-08-31T07:21:31.825394
| 2023-08-28T10:02:07
| 2023-08-28T10:02:07
| 41,907,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 511
|
rd
|
count.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algorithm.R
\name{count}
\alias{count}
\title{Return counted number which is matched to the argument}
\usage{
count(what, from, condition = `==`)
}
\arguments{
\item{what}{value of sequence for finding}
\item{from}{index of this argument is returned}
\item{condition}{condition for finding. Default is "=="}
}
\value{
List of count number.
}
\description{
Count what in from
}
\examples{
count(c(1,3,5), c(0,1,2,3,3))
# c(1,2,0)
}
|
d9e22c2508da28719dd3e873ef742ea643c700c1
|
f474da4a51e7b398e12feb53116eae6776780cb6
|
/backups/OneVsPAIRSbroken.R
|
d80e8bf41fa2c6f4b3aa25a4b111e3550d0bb3e7
|
[] |
no_license
|
HyperionRiaz/Matie
|
79c4670015b4ad9134da9d9baaa461fc2ee858ab
|
469bfa28ced1dcbbd46e9086c8fe44ecbae2e01b
|
refs/heads/master
| 2016-09-06T06:50:00.318176
| 2013-06-21T22:09:08
| 2013-06-21T22:09:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,900
|
r
|
OneVsPAIRSbroken.R
|
#THIS POS DOES ONE AGAINST ALL PAIRS. NOT BLOODY HELPFUL!
library("maatie")
args <- commandArgs(trailingOnly = TRUE)
#importedDat <- read.csv(paste("SelectedHealthWHO.csv", sep = ""), header = TRUE)
importedDat <- read.csv(paste("/var/www/files/",args[1],"/upload.csv", sep = ""), header = TRUE)
dims = dim(importedDat)
if(dims[1]<=500&&dims[2]<=25){
#Compute the association matrix
print("Computing the association matrix:")
mat = as.matrix(tap(importedDat,one = args[2]))
spearmanCOD <- cor(as.matrix(importedDat), use='pairwise.complete.obs',method='spearman')^2
spearmanCODframe <- data.frame(spearmanCOD,row.names = NULL)
print("Done.")
#Call the Agram function
#print("Generating AGram...")
#pdf(file=paste("Agram.pdf"), height=16, width=22)
#pdf(file=paste("/var/www/files/",args[1],"/AgramOVR.pdf", sep = ""), height=dim(mat)[1], width=dim(mat)[1]*1.3)
#Agram(importedDat,mat,one=args[2],order=FALSE)
#dev.off()
#png(filename=paste("Agram.png"),height=700, width=1000)
#png(filename=paste("/var/www/files/",args[1],"/AgramOVR.png", sep = ""),height=dim(mat)[1]*40, width=dim(mat)[1]*60)
#Agram(importedDat,mat,one=args[2],order=FALSE)
#dev.off()
#print("Done.")
print("Exporting the data...")
aMatFram<-data.frame(mat)
names(aMatFram)<-names(importedDat)
#write.table(aMatFram,file=paste("testoutputclean.csv", sep = ""),sep=",",row.names=F)
write.table(aMatFram,file=paste("/var/www/files/",args[1],"/outputOVR.csv", sep = ""),sep=",",row.names=F)
write.table(spearmanCODframe,file=paste("/var/www/files/",args[1],"/spearmanCODoutputOVR.csv", sep = ""),sep=",",row.names=F)
print("Done.")
}
else
{
print("Our humblest apologies. We only process data files with a maximum of 25 variables, and 500 observations. I'm afraid all the output links will be broken. If you download the R code, you can run MAATIE without constraints.")
}
|
4a718b6a1a73dcdc6fae6a193e2c762aa3a36216
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gofastr/examples/remove_stopwords.Rd.R
|
c9b33cffac7e12653690756bbc187e9df9cce61a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 506
|
r
|
remove_stopwords.Rd.R
|
library(gofastr)
### Name: remove_stopwords
### Title: Remove Stopwords from a TermDocumentMatrix/DocumentTermMatrix
### Aliases: remove_stopwords prep_stopwords
### Keywords: stopwords
### ** Examples
(x <-with(presidential_debates_2012, q_dtm(dialogue, paste(time, tot, sep = "_"))))
remove_stopwords(x)
(y <- with(presidential_debates_2012, q_tdm(dialogue, paste(time, tot, sep = "_"))))
remove_stopwords(y)
prep_stopwords("the", "ChIcken", "Hello", tm::stopwords("english"), c("John", "Josh"))
|
6fd7314388a09130af781270bd168ca97d350778
|
e3383aab16738b18137e6cfdc31291ab5a2b0a2c
|
/R/utils.R
|
e2a628e435d4c05f272d57740db4c9b90caab470
|
[
"MIT"
] |
permissive
|
TobCap/demagrittr
|
0d1e89ccf4e413eddc8901851e68c9be26df91ec
|
2f5c7b7a453cdab1f7dedcd8d3156c94800182fb
|
refs/heads/master
| 2020-04-06T12:37:45.088084
| 2017-08-20T04:14:19
| 2017-08-20T04:14:19
| 36,976,974
| 35
| 2
| null | 2017-08-20T04:14:20
| 2015-06-06T11:02:57
|
R
|
UTF-8
|
R
| false
| false
| 9,117
|
r
|
utils.R
|
# ops <- c("%>%", "%T>%", "%$%", "%<>%")
# regexp_meta <- c(".", "\\", "|", "(", ")", "[", "]", "{", "}",
# "^", "$", "*", "+", "?")
# varname_prefix <- "#"
# devtools::use_data(ops, regexp_meta, varname_prefix,
# internal=TRUE, overwrite = TRUE)
utils::globalVariables(c("expr_", "iter_"))
# initial values
pf_ <- NULL
var_id <- 0L
mode <- NULL
init_ <- function(pf_, mode) {
pkg_env <- parent.env(environment()) # getNamespace("demagrittr")
rm_tmp_symbols_if_exists(pf_)
assign("var_id", 0L, envir = pkg_env)
assign("mode", mode, envir = pkg_env)
assign("pf_", pf_, envir = pkg_env)
invisible()
}
make_varname <- function(prefix = varname_prefix) {
new_name <- paste0(prefix, var_id)
var_id <<- var_id + 1L
if (exists(new_name, envir = pf_)) {
Recall(prefix = prefix)
} else {
as.symbol(new_name)
}
}
set_varname_prefix <- function(nm) {
stopifnot(length(nm) == 1, is.character(nm), isTRUE(nchar(nm) > 0))
pkg_env <- parent.env(environment()) # getNamespace("demagrittr")
assign("varname_prefix", nm, envir = pkg_env)
}
rm_tmp_symbols_if_exists <- function(env) {
prefix_mod <- vapply(
strsplit(varname_prefix, "")[[1]],
function(x) if (x %in% regexp_meta) paste0("\\", x) else x,
character(1),
USE.NAMES = FALSE)
rm(list = ls(pattern = paste0("^", paste0(prefix_mod, collapse = ""), "\\d+$")
, envir = env, all.names = TRUE)
, envir = env)
}
make_lambda <- function(body_, wrapper) {
arg_ <- as_formals(quote(..))
body_[[1]]$rhs <- quote(..)
call("function", arg_, wrapper(body_), NULL)
}
as_formals <- function(sym, default_value = quote(expr=)) {
as.pairlist(`names<-`(list(default_value), as.character(sym)))
}
construct_lang_manipulation <- function(ifs_expr, env_ = parent.frame()) {
ifs <- substitute(ifs_expr)
if (!"expr_" %in% all.names(ifs)) {
stop("need to use 'expr_' in ifs clause")
}
body_base <- quote(
if (length(expr_) <= 1 && !is.recursive(expr_)) {
expr_
} else if (is.pairlist(expr_)) {
as.pairlist(lapply(expr_, iter_))
} else {
as.call(lapply(expr_, iter_))
}
)
add_else <- function(prev_, next_) {
if (prev_[[1]] != "if") {
stop("not `if` clause")
}
if (length(prev_) == 3) {
as.call(c(as.list(prev_), next_))
} else {
as.call(c(prev_[[1]], prev_[[2]], prev_[[3]], add_else(prev_[[4]], next_)))
}
}
f_body <- add_else(ifs, body_base)
q_f <- bquote(
function (x) {
iter_ <- function(expr_) {
.(f_body)
}
iter_(x)
}
)
eval(q_f, env_)
}
replace_dot_recursive <- function(x, expr_new) {
if (!has_dot_sim(x)) {
# for short-cut porpose
return(dig_ast(x))
}
do_func <- construct_lang_manipulation(
if (is_dot_sym(expr_)) {
expr_new
} else if (is_tilda_call(expr_)) {
as.call(c(quote(`~`), lapply(as.list(expr_[-1]), dig_ast)))
} else if (is_magrittr_call(expr_)) {
build_pipe_call(expr_, expr_new)
}
)
do_func(x)
}
replace_direct_dot <- function(x, expr_new) {
as.call(lapply(x, function(y) {
if (is_dot_sym(y)) expr_new else y
}))
}
get_rhs_paren <- function(rhs_, sym_prev) {
# magrittr can evaluate below language syntax
# language: `1:10 %>% (substitute(f(), list(f = sum)))`
# As vignette says in https://cran.r-project.org/web/packages/magrittr/vignettes/magrittr.html
# `Whenever you want to use a function- or call-generating statement as
# right-hand side, parentheses are used to evaluate the right-hand side
# before piping takes place.`.
# closure:
# `1 %>% (function(x) x + 1))' runs
# '1 %>% (2 %>% (function(x) function(y) x + y))` occurs error in CRAN ver 1.5
# '1 %>% (2 %>% (function(x) {force(x); function(y) x + y}))` runs
rhs_mod <- eval(rhs_, pf_)
# browser()
switch(
typeof(rhs_mod)
, "language" = {
if (class(rhs_mod[[1]]) == "function") {
# N.B. These are different. The first case is handled in this clause.
# 1:10 %>% (substitute(f(), list(f = sum)) -> as.call(list(sum, 1))
# 1:10 %>% (substitute(f(), list(f = quote(sum))) -> as.call(list(quote(sum), 1))
if (is.primitive(rhs_mod[[1]])) {
rhs_mod[[1]] <- as.symbol(asNamespace("methods")$.primname(rhs_mod[[1]]))
} else {
# FIX-ME: is there another way?
rhs_mod[[1]] <- parse(text = deparse(rhs_mod[[1]], width.cutoff = 500L))[[1]]
}
}
call("(", build_pipe_call(call("%>%", sym_prev, rhs_mod), NULL))
}
, as.call(c(dig_ast(rhs_), sym_prev))
)
}
transform_rhs <- function(rhs_, lang_prev, op_) {
if (is_dollar_pipe(op_)) {
call("with", lang_prev, replace_dot_recursive(rhs_, lang_prev))
} else if (is.symbol(rhs_)) {
as.call(c(rhs_, lang_prev))
} else if (is_paren_call(rhs_)) {
get_rhs_paren(rhs_, lang_prev)
} else if (is_braket_call(rhs_)) {
replace_dot_recursive(rhs_, lang_prev)
} else if (has_direct_dot_arg(rhs_)) {
rhs_mod <- replace_direct_dot(rhs_, lang_prev)
replace_dot_recursive(rhs_, lang_prev)
} else if (!has_direct_dot_arg(rhs_)) {
rhs_mod <- add_first_dot_to_rhs(rhs_, lang_prev)
replace_dot_recursive(rhs_mod, lang_prev)
} else {
stop("missing pattern in transform_rhs()")
}
}
wrap_lazy <- function(lst) {
iter <- function(l, acc) {
if (length(l) == 0) {
return(acc)
}
rhs_ <- l[[1]]$rhs
op_ <- l[[1]]$op
body_ <- transform_rhs(rhs_, acc, op_)
if (is_tee_pipe(op_)) {
call("{", build_pipe_call(call("%>%", acc, rhs_), NULL), iter(l[-1], acc))
} else {
iter(l[-1], body_)
}
}
iter(lst[-1], lst[[1]]$rhs)
}
wrap_promise <- function(lst) {
iter <- function(l, acc) {
if (length(l) == 0) {
return(acc)
}
rhs_ <- l[[1]]$rhs
op_ <- l[[1]]$op
sym_new <- make_varname()
body_ <- transform_rhs(rhs_, sym_new, op_)
if (is_tee_pipe(op_)) {
# The 4th NULL is required for compiler::compile()
body_2 <- call("function", as_formals(sym_new),
call("{", body_, sym_new), NULL)
# "(" is needed to be compatible with R's regular parse. See the next code.
# > .Internal(inspect(quote((function(x) x)(1))))
# > .Internal(inspect(
# as.call(list(call("function", as.pairlist(alist(x=)), quote(x)), 1))))
body_3 <- as.call(list(call("(", body_2), acc))
iter(l[-1], body_3)
} else {
body_2 <- call("function", as_formals(sym_new), body_, NULL)
body_3 <- as.call(list(call("(", body_2), acc))
iter(l[-1], body_3)
}
}
iter(lst[-1], lst[[1]]$rhs)
}
wrap_eager <- function(lst) {
iter <- function(l, sym_prev, acc = NULL) {
if (length(l) == 0) {
return(acc)
}
rhs_ <- l[[1]]$rhs
op_ <- l[[1]]$op
body_ <- transform_rhs(rhs_, sym_prev, op_)
if (is_tee_pipe(op_)) {
if (length(l) > 1) {
iter(l[-1], sym_prev, c(acc, body_))
} else {
iter(l[-1], NULL, c(acc, body_, sym_prev))
}
} else {
if (length(l) > 1) {
sym_new <- make_varname()
iter(l[-1], sym_new, c(acc, call("<-", sym_new, body_)))
} else {
iter(l[-1], NULL, c(acc, body_))
}
}
}
first_sym <- make_varname()
first_assign <- call("<-", first_sym, lst[[1]]$rhs)
as.call(c(quote(`{`), iter(lst[-1], first_sym, acc = first_assign)))
}
replace_rhs_origin <- function(rhs, replace_sym) {
if (!has_dot_sim(rhs)) {
# rhs is already applied by dig_ast()
return(rhs)
} else {
# maybe ok?
methods::substituteDirect(rhs, list(. = replace_sym))
}
}
add_first_dot_to_rhs <- function(rhs, new_call) {
## rhs[[1]] should be passed recuresively
# > demagrittr(1 %>% (. %>% round(2))(), mode = "lazy")
# (function(..) round(.., 2))(1.2345) #-> 1.23
as.call(c(dig_ast(rhs[[1]]), new_call, as.list(rhs)[-1]))
}
build_pipe_call <- function(expr, replace_sym) {
# `lst` should have more than one element
lst <- get_pipe_info(expr)
origin <- lst[[1]]$rhs
first_op <- lst[[2]]$op
wrapper <- switch(mode,
"eager" = wrap_eager,
"lazy" = wrap_lazy,
"promise" = wrap_promise,
stop("The selected mode was invalid."))
body_ <-
if (is_pipe_lambda(origin, first_op)) {
make_lambda(lst, wrapper)
} else if (is.null(replace_sym)) {
wrapper(lst)
} else {
lst[[1]]$rhs <- replace_rhs_origin(origin, replace_sym)
wrapper(lst)
}
if (is_compound_pipe(first_op)) {
call("<-", origin, body_)
} else {
body_
}
}
get_pipe_info <- function(x, acc = NULL) {
if (!is_magrittr_call(x)) {
# the most left-side of pipe-stream is needed to be recursively
# parsed by dig_ast()
c(list(list(op = NULL, rhs = dig_ast(x))), acc)
} else {
get_pipe_info(x[[2]], c(list(list(op = x[[1]], rhs = x[[3]])), acc))
}
}
dig_ast <- construct_lang_manipulation(
if (is_magrittr_call(expr_)) {
build_pipe_call(expr_, NULL)
}
)
|
5b7fce482ff19bf46c59b9b7c6e0e0ce54c87314
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/unfoldr/examples/simPoissonSystem.Rd.R
|
4a876605d25910b0e19eb3e325bcbfd0129f6677
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 639
|
r
|
simPoissonSystem.Rd.R
|
library(unfoldr)
### Name: simPoissonSystem
### Title: Poisson germ-grain process
### Aliases: simPoissonSystem
### ** Examples
# intensity parameter
lam <- 100
# simulation bounding box
box <- list("xrange"=c(0,5),"yrange"=c(0,5),"zrange"=c(0,5))
# log normal size distribution with a constant shape factor and
# concentration parameter (\code{kappa=1}) for the orientation, see reference [1]
theta <- list("size"=list("meanlog"=-2.5,"sdlog"=0.5),
"shape"=list("s"=0.5),
"orientation"=list("kappa"=1))
S <- simPoissonSystem(theta,lam,size="rlnorm",box=box,type="oblate",pl=1)
length(S)
|
434f22b5b97b049c490badbc3d46ea99f7a50a8c
|
e68e99f52f3869c60d6488f0492905af4165aa64
|
/man/nn_bce_with_logits_loss.Rd
|
e4a44d859f7103fe24653d15b95ff3ddaf1c2f2f
|
[
"MIT"
] |
permissive
|
mlverse/torch
|
a6a47e1defe44b9c041bc66504125ad6ee9c6db3
|
f957d601c0295d31df96f8be7732b95917371acd
|
refs/heads/main
| 2023-09-01T00:06:13.550381
| 2023-08-30T17:44:46
| 2023-08-30T17:44:46
| 232,347,878
| 448
| 86
|
NOASSERTION
| 2023-09-11T15:22:22
| 2020-01-07T14:56:32
|
C++
|
UTF-8
|
R
| false
| true
| 3,650
|
rd
|
nn_bce_with_logits_loss.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-loss.R
\name{nn_bce_with_logits_loss}
\alias{nn_bce_with_logits_loss}
\title{BCE with logits loss}
\usage{
nn_bce_with_logits_loss(weight = NULL, reduction = "mean", pos_weight = NULL)
}
\arguments{
\item{weight}{(Tensor, optional): a manual rescaling weight given to the loss
of each batch element. If given, has to be a Tensor of size \code{nbatch}.}
\item{reduction}{(string, optional): Specifies the reduction to apply to the output:
\code{'none'} | \code{'mean'} | \code{'sum'}. \code{'none'}: no reduction will be applied,
\code{'mean'}: the sum of the output will be divided by the number of
elements in the output, \code{'sum'}: the output will be summed.}
\item{pos_weight}{(Tensor, optional): a weight of positive examples.
Must be a vector with length equal to the number of classes.}
}
\description{
This loss combines a \code{Sigmoid} layer and the \code{BCELoss} in one single
class. This version is more numerically stable than using a plain \code{Sigmoid}
followed by a \code{BCELoss} as, by combining the operations into one layer,
we take advantage of the log-sum-exp trick for numerical stability.
}
\details{
The unreduced (i.e. with \code{reduction} set to \code{'none'}) loss can be described as:
\deqn{
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log \sigma(x_n)
+ (1 - y_n) \cdot \log (1 - \sigma(x_n)) \right],
}
where \eqn{N} is the batch size. If \code{reduction} is not \code{'none'}
(default \code{'mean'}), then
\deqn{
\ell(x, y) = \begin{array}{ll}
\mbox{mean}(L), & \mbox{if reduction} = \mbox{'mean';}\\
\mbox{sum}(L), & \mbox{if reduction} = \mbox{'sum'.}
\end{array}
}
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets \code{t[i]} should be numbers
between 0 and 1.
It's possible to trade off recall and precision by adding weights to positive examples.
In the case of multi-label classification the loss can be described as:
\deqn{
\ell_c(x, y) = L_c = \{l_{1,c},\dots,l_{N,c}\}^\top, \quad
l_{n,c} = - w_{n,c} \left[ p_c y_{n,c} \cdot \log \sigma(x_{n,c})
+ (1 - y_{n,c}) \cdot \log (1 - \sigma(x_{n,c})) \right],
}
where \eqn{c} is the class number (\eqn{c > 1} for multi-label binary
classification,
\eqn{c = 1} for single-label binary classification),
\eqn{n} is the number of the sample in the batch and
\eqn{p_c} is the weight of the positive answer for the class \eqn{c}.
\eqn{p_c > 1} increases the recall, \eqn{p_c < 1} increases the precision.
For example, if a dataset contains 100 positive and 300 negative examples of a single class,
then \code{pos_weight} for the class should be equal to \eqn{\frac{300}{100}=3}.
The loss would act as if the dataset contains \eqn{3\times 100=300} positive examples.
}
\section{Shape}{
\itemize{
\item Input: \eqn{(N, *)} where \eqn{*} means, any number of additional dimensions
\item Target: \eqn{(N, *)}, same shape as the input
\item Output: scalar. If \code{reduction} is \code{'none'}, then \eqn{(N, *)}, same
shape as input.
}
}
\examples{
if (torch_is_installed()) {
loss <- nn_bce_with_logits_loss()
input <- torch_randn(3, requires_grad = TRUE)
target <- torch_empty(3)$random_(1, 2)
output <- loss(input, target)
output$backward()
target <- torch_ones(10, 64, dtype = torch_float32()) # 64 classes, batch size = 10
output <- torch_full(c(10, 64), 1.5) # A prediction (logit)
pos_weight <- torch_ones(64) # All weights are equal to 1
criterion <- nn_bce_with_logits_loss(pos_weight = pos_weight)
criterion(output, target) # -log(sigmoid(1.5))
}
}
|
2065edc5f65ed091903dbf65c15b5d7ed5909999
|
fde3e9217c41d1f3c43add920a5486a90a675a5d
|
/R/labResults.R
|
4bc666b226a775ce86b57daab0bb0005261b398c
|
[
"MIT"
] |
permissive
|
hzi-braunschweig/epla-muspad-interactive-report
|
81fa544935059ad5c71af0cc194672f81d101787
|
064d0b01b8396cd2273f9f645ec2805a172ff783
|
refs/heads/master
| 2023-05-28T11:45:17.780830
| 2021-05-27T09:47:21
| 2021-05-27T09:47:21
| 363,172,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,068
|
r
|
labResults.R
|
read_lab_results <- function(labresults_path){
# Reads results of lab analyses of blood samples
## Alle Datein
datein <- list.files(labresults_path, pattern = "^cov\\_202.*\\_hzi.*\\.csv$",
full.names = TRUE, recursive = TRUE)
# Add further CSV files manually
datein <- c(
datein,
paste0(
labresults_path,
"/Laborergebnisse Original CSV 2020/",
c(
"4_Reutlingen 2/Nachtrag Reutlingen 2 20202511.csv",
"5_Osnabrueck/Nachtrag Osnabrueck 1 20202511.csv",
"6_Magdeburg/Magdeburg 1 Erste Ergebnisse.csv",
"7_Freiburg 2/Freiburg 2 Erste Ergebnisse.csv"
)
)
)
## Lesen
# N.B. the quantitative results have sometimes non-numeric values such as
# "<3,80", thus all variables are set to character.
daten <- purrr::map(datein, read.csv2, header = TRUE,
colClasses = "character", na.strings = c("", NA))
data_raw <- NULL
for (i in seq_along(daten)) {
# After visual inspection: skip 1 row after header (the option `skip` of
# `read.csv2` is not used as it removes column names).
# Then remove empty rows.
# Quantitative lab results have "," replaced with "." as they are otherwise
# not correctly exported to Excel, even though it's a string.
index_stadt <- grep(
"Laborergebnisse Original CSV 202",
strsplit(datein[i], "/")[[1]]
) + 1
stadt <- strsplit(datein[i], "/")[[1]][index_stadt]
stadt <- gsub("^.\\_", "", stadt)
data_raw[[i]] <- daten[[i]][2:nrow(daten[[i]]), ] %>%
as_tibble() %>%
dplyr::filter_all(any_vars(!is.na(.))) %>%
dplyr::mutate_all(as.character) %>%
dplyr::mutate(
stadt = stadt,
datum = stringr::str_sub(Analysedatum, 1, 10),
datum = lubridate::dmy(datum),
dateinname = basename(datein[i]),
Ergebnis..quantitativ. =
stringr::str_replace(Ergebnis..quantitativ., ",", ".")
)
}
labr <- dplyr::bind_rows(data_raw)
return(labr)
}
|
69955275f3878db0862ded18531ba1bce2367008
|
5812dff35a85efb65e3de3ff4f5cf28220dfe4a5
|
/Course 2 - R programming/Week 4/SimulatingRandomSampling.R
|
e6e2c2ce762ec904516affb3fe5fc5af34a7c077
|
[] |
no_license
|
migduroli/datasciencecoursera
|
4cbebd56ffdbd4412a552bbc5ccc2d61255b89d5
|
0c426f1a3baa87fb3f1d5ec23c30d5ec75628eb0
|
refs/heads/master
| 2021-01-17T11:24:45.237928
| 2017-02-09T23:34:25
| 2017-02-09T23:34:25
| 61,479,061
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 160
|
r
|
SimulatingRandomSampling.R
|
set.seed(1)
# sample (List, howmany)
sample(1:10, 4)
sample(letters, 5)
sample(1:10) # This is a permutation
sample(1:10, replace = TRUE) # with replacement
|
980f1f7dc531e746d03091f05aa5d9bbfca25acf
|
a863b4265df643045e38df741f948e4c72773332
|
/run_analysis.R
|
42ae004ef57e8de4c3f26b02c42e887de9f61d5d
|
[] |
no_license
|
bcdp5/getdataProject
|
dcf9b305bf94a106eafe16d198ca0b3f9cb26a90
|
f9c4b4762e80410c5548bed0ebf3b3d65ff1a4bb
|
refs/heads/master
| 2016-09-05T09:12:49.636868
| 2014-10-22T13:11:22
| 2014-10-22T13:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,533
|
r
|
run_analysis.R
|
################
## GETTING & CLEANING DATA: COURSE PROJECT
## Made by: bcdp5
###############
# Load library
library(data.table)
# 1.MERGE DATASET (train+test) -----------------------------------------------
#1.1 Handle common features, i.e. variables
variables <- read.table(file=".//UCI HAR Dataset//features.txt", stringsAsFactors = F,header = F, sep = " ")[,2]
head(variables)
class(variables)
unique(variables)
#1.2 Handle the Training & set column names
train <- as.data.table(read.table(file=".//UCI HAR Dataset//train//X_train.txt", stringsAsFactors = F,header = F))
names(train) <- variables
sapply(train,class)
#Retrive training dataset of the activities
activityTrain <- as.data.table(read.table(file=".//UCI HAR Dataset//train//y_train.txt", stringsAsFactors = F,header = F))
unique(activityTrain)
#Retrive training dataset of the subjects
subjectTrain <- as.data.table(read.table(file=".//UCI HAR Dataset//train//subject_train.txt", stringsAsFactors = F,header = F))
unique(subjectTrain)
#Merge all the training sets (subject,activity,train) & set column names
finalTrain <- cbind(subjectTrain,activityTrain,train)
finalTrain <- setNames(object = finalTrain,nm = c("subject","activity",names(train)))
#1.3 Handle the Test set
test <- as.data.table(read.table(file=".//UCI HAR Dataset//test//X_test.txt", stringsAsFactors = F,header = F))
names(test) <- variables
#Retrive test dataset of the activities
activityTest <- as.data.table(read.table(file=".//UCI HAR Dataset//test//y_test.txt", stringsAsFactors = F,header = F))
unique(activityTest)
#Retrive test dataset of the subjects
subjectTest <- as.data.table(read.table(file=".//UCI HAR Dataset//test//subject_test.txt", stringsAsFactors = F,header = F))
unique(subjectTest)
#Merge all the testing set (subject,activity,train)
finalTest <- cbind(subjectTest,activityTest,test)
finalTest <- setNames(object = finalTest,nm = c("subject","activity",names(test)))
#1.4 Final Merge (finalTrain + finalTest)
total.Data <- rbindlist(l = list(finalTest,finalTrain),use.names = T)
#Check the data.table
class(total.Data)
names(total.Data)
# set the keys of the data.table DT for subject and activity
setkeyv(total.Data,c("subject","activity"))
#1.5 Remove unused data.tables from current environment
rm(list = c("train","test","finalTest","finalTrain",
"subjectTrain","subjectTest","activityTrain","activityTest",
"variables"))
# 2.EXTRACT MEAN & STD ----------------------------------------------------
#2.1 Extract all the variables which name include "mean"
mean.val <- grep(pattern = "*Mean",x = names(total.Data),ignore.case = T)
# Test
total.Data[,mean.val, with=F]
#2.2 Extract all the variables which name include "std" (i.e. Standard deviation)
std.val <- grep(pattern = "*std",x = names(total.Data),ignore.case = T)
#2.3 Extract only subjects, activity, mean & std dev
total.Data <- total.Data[,c(1,2,mean.val,std.val), with =F]
#2.4 Remove unused objects
rm(list = c("mean.val","std.val"))
# 3.RENAME ACTIVITIES IN total.Data ---------------------------------------
#3.1 Load activities
activities <- as.data.table(read.table(file=".//UCI HAR Dataset//activity_labels.txt", stringsAsFactors = F,header = F))
names(activities) <- c("activity","description")
#3.2 Merge 'activities' and 'total.data'
# Set the keys for the merge operation
setkey(activities,"activity")
setkey(total.Data,"activity")
# Merge the two data.tables in order to have the description of each activity, instead of its code
total.Data <- total.Data[activities,]
# Assign the values to the column 'activity' based on the column 'description'
total.Data[,activity:=description]
# Drop the column 'description'
total.Data[,description := NULL]
#3.3 Remove unused data.table 'activities'
rm("activities")
# 4.LABELS THE DATASET ----------------------------------------------------
# Previously handled during the step 1
# 5.COMPUTE THE AVERAGE FOR EACH SUBJECT AND ACTIVITY ---------------------------------------------------------
#5.1 Re-set the keys of the data.table
setkeyv(total.Data,c("subject","activity"))
#5.2 Create a new tidy data.table with the average of the mean and sd measures (point 2) for each subject and activity
final <- total.Data[,lapply(.SD,mean), by="subject,activity"]
#5.3 Write the 'final' data.table in a text file
write.table(final, file = "tidyData.txt",row.names=F,col.names = T)
|
d5a664214a3484021757b94a9e521c48fcfb7250
|
040db01c15e4e029f426bccdc76aa7c21f94bb35
|
/tests/testthat/testtaskdatauncertain3.R
|
8699721d2dbe753a6853d888db4dd0d83588df32
|
[] |
no_license
|
david-hammond/projmanr
|
6bc4a974f39544ea06ec66294262f8dd0f367a49
|
e4b0e1e301468293b533f5b3910067091a2e0a9d
|
refs/heads/master
| 2023-06-25T09:33:12.024674
| 2023-06-15T05:28:50
| 2023-06-15T05:28:50
| 172,191,640
| 0
| 0
| null | 2019-02-23T08:41:40
| 2019-02-23T08:41:40
| null |
UTF-8
|
R
| false
| false
| 3,398
|
r
|
testtaskdatauncertain3.R
|
context("Critican Path on taskdatauncertain3")
library(projmanr)
library(igraph)
library(reshape2)
library(R6)
library(ggplot2)
test_that("Check approximate value of duration mean", {
res <- simulation(projmanr::taskdatauncertain3, 1000)
expect_equal(mean(res$durations) > 38 && mean(res$durations) < 42, TRUE)
})
test_that("Check the return size of simulation", {
res <- simulation(projmanr::taskdatauncertain3, 100)
expect_equal(length(res), 3)
expect_equal(length(res$durations), 100)
expect_equal(nrow(res$critical_indexes), 13)
# Run the same tests, change the itr parameter to
# ensure that it's working
res <- simulation(projmanr::taskdatauncertain3, 1000)
expect_equal(length(res), 3)
expect_equal(length(res$durations), 1000)
expect_equal(nrow(res$critical_indexes), 13)
})
test_that("Make sure the the error check on distribution works", {
temp <- projmanr::taskdatauncertain3
temp[12, 5] <- "t"
expect_error(simulation(temp, 100), paste("Distribution t not supported,",
"please use triangle, pert,",
"uniform, normal or log_normal"))
})
# The following are the same tests from 'testtaskdata3.R'
# ensuring that the introduction of the uncertain columns did
# not cause any issues
test_that("Correct critical path", {
res <- critical_path(projmanr::taskdatauncertain3)
expect_equal(length(res$critical_path), 8)
expect_equal(res$critical_path, c("2", "3", "6", "7",
"9", "10", "11", "13"))
expect_equal(length(res), 5)
expect_equal(res$total_duration, 40)
expect_equal(nrow(res$results), nrow(projmanr::taskdatauncertain3))
})
test_that("Correct critical path is computed with gantt", {
res <- critical_path(projmanr::taskdatauncertain3, gantt = T)
expect_equal(length(res$critical_path), 8)
expect_equal(res$critical_path, c("2", "3", "6", "7",
"9", "10", "11", "13"))
expect_equal(length(res), 6)
expect_equal(res$total_duration, 40)
expect_equal(nrow(res$results), nrow(projmanr::taskdatauncertain3))
})
test_that("Correct critical path is computed with network diagram", {
res <- critical_path(projmanr::taskdatauncertain3, network = T)
expect_equal(length(res$critical_path), 8)
expect_equal(res$critical_path, c("2", "3", "6", "7",
"9", "10", "11", "13"))
expect_equal(length(res), 6)
expect_equal(res$total_duration, 40)
expect_equal(nrow(res$results), nrow(projmanr::taskdatauncertain3))
})
test_that("Correct critical path is computed with both graph", {
res <- critical_path(projmanr::taskdatauncertain3, gantt = T, network = T)
expect_equal(length(res$critical_path), 8)
expect_equal(res$critical_path, c("2", "3", "6", "7",
"9", "10", "11", "13"))
expect_equal(length(res), 7)
expect_equal(res$total_duration, 40)
expect_equal(nrow(res$results), nrow(projmanr::taskdatauncertain3))
})
test_that("Date output is working correctly", {
res <- critical_path(projmanr::taskdatauncertain3, gantt = T, network = T,
start_date = "2017-10-10")
expect_equal(res$end_date, as.Date("2017-11-19"))
res <- critical_path(projmanr::taskdatauncertain3, gantt = T, network = T)
expect_equal(res$end_date, Sys.Date() + 40)
})
|
bf8dfc8623be667c6246f0803b5b005804b5c924
|
03cf1d7d1632d7846e1fb9e3634ad99cdd8bdb31
|
/FinalGrade.R
|
4d1be58ce0e3fb7bad075c7d7ef1982943272438
|
[] |
no_license
|
rahilshaik/SDMDataAnalyticsProject
|
b4ae356548f8352f728cce737b36c6c12e8e2b90
|
736bad0ff51dce9b2e4c3f81d34a713c24cbd2ef
|
refs/heads/master
| 2020-04-16T08:22:59.520258
| 2019-01-12T18:43:53
| 2019-01-12T18:43:53
| 165,423,004
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,812
|
r
|
FinalGrade.R
|
##Reading .csv file into a variable
setwd("E:/USF/ISM6137-SDM/Project/student/Final/Final1");
schoolData=read.csv("SchoolDataFinal.csv")
rm(list = ls())
schoolDataPortu=schoolData[schoolData$Language == 'Portugese',]
nrow(schoolDataPortu)
hist(log(schoolDataPortu$number.of.school.absences))
boxplot(schoolDataPortu$first.period.grade)
hist(log(schoolDataPortu$Failures))
influencePlot(schoolFirstPeriodModel_Math,id.method=identify)
#Maths Model
schoolPeriodModel_Portu=lm(Total.Grade ~ Studytime
+ as.factor(extra.curricular.activities)
+ as.factor(Internet.access.at.home)
+ log(number.of.school.absences+1)
+ as.factor(School.educational.support_Lag1)
#+ as.factor(School.educational.support_Lag1)
# + as.factor(School.educational.support_Lag2)
# + as.factor(Family.educational.support_Lag1)
# + as.factor(Family.educational.support_Lag2)
# + as.factor(extra.paid.classes_Lag1)
# + as.factor(extra.paid.classes_Lag2)
+ workday.alcohol.consumption
+ weekend.alcohol.consumption
+ current.health.status
+ quality.of.family.relationships
+ as.factor(wants.to.take.higher.education)
#+ as.factor(School.educational.support)*as.factor(Family.educational.support)*as.factor(extra.paid.classes)
+ as.factor(School)
+ Failures,data = schoolDataPortu)
summary(schoolPeriodModel_Portu)
AIC(schoolPeriodModel_Portu)
BIC(schoolPeriodModel_Portu)
shapiro.test(schoolFirstPeriodModel_Portu$res)
#Normally Distributed
#Homoskedasticity
plot(schoolFirstPeriodModel_Portu)
bartlett.test(list(schoolFirstPeriodModel_Portu$res, schoolFirstPeriodModel_Portu$fit))
#Second Period Model
schoolSecondPeriodModel_Portu=lm(second.period.grade ~ Studytime
+ as.factor(extra.curricular.activities)
+ as.factor(Internet.access.at.home)
+ log(number.of.school.absences+1)
+ first.period.grade
+ as.factor(School.educational.support_Lag1)
+ as.factor(School.educational.support_Lag2)
+ as.factor(Family.educational.support_Lag1)
+ as.factor(Family.educational.support_Lag2)
+ as.factor(extra.paid.classes_Lag1)
+ as.factor(extra.paid.classes_Lag2)
+ workday.alcohol.consumption
+ weekend.alcohol.consumption
+ current.health.status
+ quality.of.family.relationships
+ as.factor(wants.to.take.higher.education)
+ as.factor(School.educational.support)*as.factor(Family.educational.support)*as.factor(extra.paid.classes)
+ as.factor(School)
+ Failures,data = schoolDataPortu)
summary(schoolSecondPeriodModel_Portu)
AIC(schoolSecondPeriodModel_Portu)
BIC(schoolSecondPeriodModel_Portu)
shapiro.test(schoolSecondPeriodModel_Portu$res)
#Normally Distributed
#Homoskedasticity
plot(schoolSecondPeriodModel_Portu)
bartlett.test(list(schoolSecondPeriodModel_Portu$res, schoolSecondPeriodModel_Portu$fit))
#Second Grade
schoolThirdPeriodModel_Portu=lm(final.grade ~ Studytime
+ as.factor(extra.curricular.activities)
+ as.factor(Internet.access.at.home)
+ log(number.of.school.absences+1)
+ first.period.grade
+ second.period.grade
+ as.factor(School.educational.support_Lag1)
+ as.factor(School.educational.support_Lag2)
+ as.factor(Family.educational.support_Lag1)
+ as.factor(Family.educational.support_Lag2)
+ as.factor(extra.paid.classes_Lag1)
+ as.factor(extra.paid.classes_Lag2)
+ workday.alcohol.consumption
+ weekend.alcohol.consumption
+ current.health.status
+ quality.of.family.relationships
+ as.factor(wants.to.take.higher.education)
+ as.factor(School.educational.support)*as.factor(Family.educational.support)*as.factor(extra.paid.classes)
+ as.factor(School)
+ Failures,data = schoolDataPortu)
summary(schoolThirdPeriodModel_Portu)
AIC(schoolThirdPeriodModel_Portu)
BIC(schoolThirdPeriodModel_Portu)
#Multi variate normality Assumptions
hist(schoolThirdPeriodModel_Portu$residuals)
qqnorm(schoolThirdPeriodModel_Portu$residuals)
qqline(schoolThirdPeriodModel_Portu$residuals,col="red")
shapiro.test(schoolThirdPeriodModel_Portu$res)
#Normally Distributed
#Homoskedasticity
plot(schoolThirdPeriodModel_Portu)
bartlett.test(list(schoolThirdPeriodModel_Portu$res, schoolThirdPeriodModel_Portu$fit))
#heteroskedastic
#GLS
schoolFirstPeriodModel_Portu_GLS=gls(Total.Grade ~ Studytime
+ as.factor(extra.curricular.activities)
+ as.factor(Internet.access.at.home)
+ log(number.of.school.absences+1)
+ as.factor(School.educational.support_Lag1)
+ as.factor(School.educational.support_Lag2)
+ as.factor(Family.educational.support_Lag1)
+ as.factor(Family.educational.support_Lag2)
+ as.factor(extra.paid.classes_Lag1)
+ as.factor(extra.paid.classes_Lag2)
+ workday.alcohol.consumption
+ weekend.alcohol.consumption
+ current.health.status
#+ first.period.grade
#+ second.period.grade
+ quality.of.family.relationships
+ as.factor(wants.to.take.higher.education)
+ as.factor(School.educational.support)*as.factor(Family.educational.support)*as.factor(extra.paid.classes)
+ as.factor(School)
+ Failures,data = schoolDataPortu,na.action=na.exclude)
summary(schoolFirstPeriodModel_Portu_GLS)
AIC(schoolFirstPeriodModel_Portu_GLS)
BIC(schoolFirstPeriodModel_Portu_GLS)
t.test(schoolDataPortu$first.period.grade~schoolDataPortu$School)
library(car)
scatterplot(schoolDataPortu$Studytime~schoolDataPortu$Total.Grade, boxplots=FALSE, smooth=TRUE, reg.line=FALSE)
schoolSecondPeriodModel_Portu_GLS=gls(second.period.grade ~ Studytime
+ as.factor(extra.curricular.activities)
+ as.factor(Internet.access.at.home)
+ log(number.of.school.absences+1)
+ as.factor(School.educational.support_Lag1)
+ as.factor(School.educational.support_Lag2)
+ as.factor(Family.educational.support_Lag1)
+ as.factor(Family.educational.support_Lag2)
+ as.factor(extra.paid.classes_Lag1)
+ as.factor(extra.paid.classes_Lag2)
+ workday.alcohol.consumption
+ weekend.alcohol.consumption
+ current.health.status
+ first.period.grade
#+ second.period.grade
+ quality.of.family.relationships
+ as.factor(wants.to.take.higher.education)
+ as.factor(School.educational.support)*as.factor(Family.educational.support)*as.factor(extra.paid.classes)
+ as.factor(School)
+ Failures,data = schoolDataPortu,na.action=na.exclude)
summary(schoolSecondPeriodModel_Portu_GLS)
AIC(schoolSecondPeriodModel_Portu_GLS)
BIC(schoolSecondPeriodModel_Portu_GLS)
schoolThirdPeriodModel_Portu_GLS=gls(final.grade ~ Studytime
+ as.factor(extra.curricular.activities)
+ as.factor(Internet.access.at.home)
+ log(number.of.school.absences+1)
+ as.factor(School.educational.support_Lag1)
+ as.factor(School.educational.support_Lag2)
+ as.factor(Family.educational.support_Lag1)
+ as.factor(Family.educational.support_Lag2)
+ as.factor(extra.paid.classes_Lag1)
+ as.factor(extra.paid.classes_Lag2)
+ workday.alcohol.consumption
+ weekend.alcohol.consumption
+ current.health.status
+ first.period.grade
+ second.period.grade
+ quality.of.family.relationships
+ as.factor(wants.to.take.higher.education)
+ as.factor(School.educational.support)*as.factor(Family.educational.support)*as.factor(extra.paid.classes)
+ as.factor(School)
+ Failures,data = schoolDataPortu,na.action=na.exclude)
summary(schoolThirdPeriodModel_Portu_GLS)
AIC(schoolThirdPeriodModel_Portu_GLS)
BIC(schoolThirdPeriodModel_Portu_GLS)
First_gradeModel_Portu = lmer(first.period.grade ~ Studytime
+ as.factor(extra.curricular.activities)
+ as.factor(Internet.access.at.home)
+ log(number.of.school.absences+1)
+ as.factor(School.educational.support_Lag1)
+ as.factor(School.educational.support_Lag2)
+ as.factor(Family.educational.support_Lag1)
+ as.factor(Family.educational.support_Lag2)
+ as.factor(extra.paid.classes_Lag1)
+ as.factor(extra.paid.classes_Lag2)
+ workday.alcohol.consumption
+ weekend.alcohol.consumption
+ current.health.status
# + first.period.grade
# + second.period.grade
+ quality.of.family.relationships
+ as.factor(wants.to.take.higher.education)
+ as.factor(School.educational.support)*as.factor(Family.educational.support)*as.factor(extra.paid.classes)
+ as.factor(School)
+ Failures
+ (1|School), data=schoolDataPortu )
summary(First_gradeModel_Portu)
AIC(First_gradeModel_Portu)
BIC(First_gradeModel_Portu)
ranef(First_gradeModel_Portu)
Second_gradeModel_Portgu = lmer(second.period.grade ~ Studytime
+ as.factor(extra.curricular.activities)
+ as.factor(Internet.access.at.home)
+ log(number.of.school.absences+1)
+ as.factor(School.educational.support_Lag1)
+ as.factor(School.educational.support_Lag2)
+ as.factor(Family.educational.support_Lag1)
+ as.factor(Family.educational.support_Lag2)
+ as.factor(extra.paid.classes_Lag1)
+ as.factor(extra.paid.classes_Lag2)
+ workday.alcohol.consumption
+ weekend.alcohol.consumption
+ current.health.status
+ first.period.grade
# + second.period.grade
+ quality.of.family.relationships
+ as.factor(wants.to.take.higher.education)
+ as.factor(School.educational.support)*as.factor(Family.educational.support)*as.factor(extra.paid.classes)
+ as.factor(School)
+ Failures
+ (1|School), data=schoolDataPortu )
summary(Second_gradeModel_Portgu)
AIC(Second_gradeModel_Portgu)
BIC(Second_gradeModel_Portgu)
ranef(Second_gradeModel_Portgu)
Final_gradeModel_Portu = lmer(final.grade ~ Studytime
+ as.factor(extra.curricular.activities)
+ as.factor(Internet.access.at.home)
+ log(number.of.school.absences+1)
+ as.factor(School.educational.support_Lag1)
+ as.factor(School.educational.support_Lag2)
+ as.factor(Family.educational.support_Lag1)
+ as.factor(Family.educational.support_Lag2)
+ as.factor(extra.paid.classes_Lag1)
+ as.factor(extra.paid.classes_Lag2)
+ workday.alcohol.consumption
+ weekend.alcohol.consumption
+ current.health.status
+ first.period.grade
+ second.period.grade
+ quality.of.family.relationships
+ as.factor(wants.to.take.higher.education)
+ as.factor(School.educational.support)*as.factor(Family.educational.support)*as.factor(extra.paid.classes)
+ as.factor(School)
+ Failures
+ (1|School), data=schoolDataPortu )
summary(Final_gradeModel_Portu)
AIC(Final_gradeModel_Portu)
BIC(Final_gradeModel_Portu)
ranef(Final_gradeModel_Portu)
|
0a8a3a1bdb6b62bc536c0fe681df8c35140b4448
|
359c010d8b57231385e80e5f863c915504a4dbeb
|
/lab12.r
|
3637cc277135b7d5b738cfcf3570944903dc9f47
|
[] |
no_license
|
paulinak2107/R-study
|
bd38cb3727f7d0b9c5e8fc587aa3bcbc2aa78c9a
|
f1385ee6e20716765412510046b047a5b0f113c8
|
refs/heads/main
| 2023-04-11T19:41:55.382851
| 2021-05-06T14:43:47
| 2021-05-06T14:43:47
| 364,942,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,153
|
r
|
lab12.r
|
library(survival)
library(survminer)
rak <- ovarian
rak.surv <- Surv(time = ovarian$futime, event = ovarian$fustat)
rak.surv
#ggsurvplot(rak.fit, data=rak, pval = TRUE)
rak.hist <- hist(rak$age)
rak$age <- ifelse(rak$age >= 55, 1, 0) #granica podziału ===> 55 lat
# AGE
rak.fit.age <- survfit(rak.surv ~ age, data=rak)
summary(rak.fit.age)
rak.result.age <- survfit(rak.surv ~ age, data = rak)
g1 <- ggsurvplot(rak.result.age, data=rak, pval = TRUE)
g1
# RESID.DS
rak.fit.resid <- survfit(rak.surv ~ resid.ds, data=rak)
summary(rak.fit.resid)
rak.result.resid <- survfit(rak.surv ~ resid.ds, data = rak)
g2 <- ggsurvplot(rak.result.resid, data=rak, pval = TRUE)
g2
# ECOG.PS
rak.fit.ecog <- survfit(rak.surv ~ ecog.ps, data=rak)
summary(rak.fit.resid)
rak.result.ecog <- survfit(rak.surv ~ ecog.ps, data = rak)
g3 <- ggsurvplot(rak.result.ecog, data=rak, pval = TRUE)
g3
#H0: Funkcje przeżycia w różnych grupach nie różnią się statystycznie od siebie.
# AGE: S(t) różnią się statystycznie w różnych gruupach.
# Resid: S(t) nie różnią się staystycznie w grupach.
# Ecog: S(t) nie różnią się statystycznie w grupach.
|
7bdbf7ef9b962ad27691334a797314778093a7f5
|
f2da63de512183804290bfcabfa60eaca3649e05
|
/exercises/statistics/bayesian/albert/chap03/exercises/exercise-3-9-6/code/albert-exercise-3-9-6.R
|
19f28698687fbf30f35d987f0a623474b1c0db55
|
[] |
no_license
|
paradisepilot/statistics
|
a94bb57ebe453d49c06815c523e8f633423cb68e
|
50daf644baca1f40253edf91083ed42d4c5f9342
|
refs/heads/master
| 2022-07-25T16:19:07.751886
| 2022-06-26T21:18:38
| 2022-06-26T21:18:38
| 5,012,656
| 0
| 2
| null | 2019-04-22T06:52:55
| 2012-07-13T01:11:42
|
HTML
|
UTF-8
|
R
| false
| false
| 911
|
r
|
albert-exercise-3-9-6.R
|
command.arguments <- commandArgs(trailingOnly = TRUE);
output.directory <- command.arguments[1];
####################################################################################################
setwd(output.directory);
library(LearnBayes);
library(ggplot2);
####################################################################################################
### 3.9.6(a)
mu0 <- 70;
sigma <- 10;
s <- 1;
f <- 17;
mu <- seq(0, 2 * mu0, 1e-3);
prior <- 1;
likelihood <- pnorm(q=mu0,mean=mu,sd=sigma)^s * pnorm(q=mu0,mean=mu,sd=sigma,lower.tail=FALSE)^f;
posterior <- prior * likelihood;
posterior <- posterior / sum(posterior);
png("Fig1_posterior.png");
qplot(data = data.frame(mu = mu, posterior = posterior), x = mu, y = posterior, geom = "line");
dev.off();
### 3.9.6(b)
mu.posterior.mean <- sum(posterior * mu);
mu.posterior.mean;
### 3.9.6(c)
sum(posterior[mu > 80]);
|
c0554ae30bebcf3655bf431e468d84aa2706e108
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/revdbayes/examples/quantile_to_gev.Rd.R
|
911fc4a23af924a8043cd1efda275ba12f04f80b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 316
|
r
|
quantile_to_gev.Rd.R
|
library(revdbayes)
### Name: quantile_to_gev
### Title: Converts quantiles to GEV parameters
### Aliases: quantile_to_gev
### ** Examples
my_q <- c(15, 20, 22.5)
my_p <- 1-c(0.5, 0.9, 0.5^0.01)
x <- quantile_to_gev(quant = my_q, prob = my_p)
# Check
qgev(p = 1 - my_p, loc = x[1], scale = x[2], shape = x[3])
|
2c0e3e964e313406aa061cd2462933eca8e8d7fd
|
f445fe1c05a8a343d32787d2e6815bd80546cbfa
|
/R/Statistics_210/midterm2.R
|
997f76fba41dac5018cff0057a9104135e9b8e9d
|
[] |
no_license
|
dillon4287/CodeProjects
|
f6d99986c811c7df7bb27b0fab6196861741bac7
|
b7a9fa9f30cad84c5bdc58c757f051ebcfd4db73
|
refs/heads/master
| 2023-07-20T23:44:39.088933
| 2023-07-18T19:40:46
| 2023-07-18T19:40:46
| 77,807,396
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 241
|
r
|
midterm2.R
|
#midterm2
library(leaps)
library(MASS)
library(permute)
library(corrplot)
wine <- read.csv("/Users/dillonflannery-valadez/Coding/R/Stat210/winequality-red.csv",
sep=";")
corMat <- cor(wine[, 2:12])
corrplot(corMat, method="number")
|
261c799b31e4f4349ea2f61c2f6411a59006a887
|
871d09fd5e25f649636e28148bb4280a4c489e8a
|
/Smile_Lines.R
|
aead70967d26b2d89a90cc91e25027aa0a79aeca
|
[
"MIT"
] |
permissive
|
lynda-nguyen/Research-FALL18
|
06ea33eb7507cb0470833de16966f787c057042d
|
8fd0da8d137c698fb2f4d36172979dfd3279b5c0
|
refs/heads/master
| 2020-03-28T11:16:34.387151
| 2018-10-26T16:40:06
| 2018-10-26T16:40:06
| 148,196,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,589
|
r
|
Smile_Lines.R
|
# R
# 09/18/2018
# data source: https://arxiv.org/pdf/1702.07234.pdf
## Parameters used throughout the paper
# ball minimum circumference = 29.5 in (74.9 cm)
# ball minimum weight = 20 oz (567 g)
# d = distance from free throw line to basket = 4.6m
# H = basket rim to floor height = 3.05 m
# R = radius of basket rim = 0.23 m
# r = radius of basketball = 0.12 m
#making the data table
free.throw.df <- data.frame("d" = c(4.55,4.59,4.22,4.61,4.76,4.24,4.53,
4.12,4.57,4.57,4.55,4.30,4.61,4.20,
4.59,4.55,4.86,4.86,4.34,4.30,4.61,
4.59,4.40,4.22,4.63),
"h_max" = c(4.04,4.12,4.08,4.06,4.08,4.10,
4.10,3.83,4.16,4.14,4.02,3.98,
4.12,3.94,4.08,4.08,4.18,4.10,
4.02,3.85,4.00,4.06,3.85,3.89,4.10),
"t" = c(0.94,0.91,0.92,0.93,0.92,0.92,0.92,
1.02,0.90,0.91,0.94,0.96,0.91,0.97,
0.92,0.92,0.89,0.92,0.94,1.01,0.95,
0.93,1.01,0.99,0.92),
"v_x" = c(4.26,4.18,3.90,4.29,4.40,3.89,4.16,
4.19,4.11,4.14,4.29,4.11,4.20,4.08,
4.24,4.20,4.35,4.46,4.09,4.33,4.38,
4.27,4.43,4.19,4.25),
"v_y"= c(6.25,6.37,6.31,6.28,6.31,
6.34,6.34,5.91,6.43,6.40,6.22,6.16,
6.37,6.10,6.31,6.31,6.46,6.34,6.22,
5.94,6.19,6.28,5.94,6.00,6.34),
"v" = c(7.49,7.61,7.38,7.54,7.65,7.41,7.55,
6.99,7.65,7.62,7.46,7.29,7.62,7.19,
7.56,7.54,7.82,7.72,7.36,7.10,7.47,
7.53,7.15,7.12,7.60),
"theta"=c(55.71,56.69,58.29,55.68,55.10,58.46,
56.74,54.65,57.38,57.09,55.40,56.28,
56.58,56.24,56.10,56.32,56.04,54.86,
56.68,53.89,54.74,55.79,53.28,55.10,56.18),
"score"=c(1,1,0,1,0,0,1,0,1,1,1,0,1,0,1,1,0,0,0,0,
1,1,0,0,1))
free.throw.df
#plot of 25 observed free throws by student (without theoretical lines)
plot(free.throw.df$theta, free.throw.df$v, col = free.throw.df$score+1, main = "Free Throws by Student", ylab = "Release Velocity (m/s)",
xlab = "Release Angle (theta)")
legend("topleft", legend = c("0", "1"), col=c("black", "red"), fill = 1:2)
#################################################################################
#Angle-velocity smile
d = 4.6
R = 0.23
H = 3.05
r = 0.12
h = 2
velocity <-(seq(7,11,0.1))
theta <- (seq(30,70,1))
N = length(velocity) # same as length(theta)
#(x − (d − R))^2 + (y − H)^2 > r^2
x <- numeric(N^2) #initialize x vector
y <- numeric(N^2) #initialize y vector
t <- numeric(N^2) #initialize time vector
k <-1 # counter variable for for loops
#when using cosine, must convert from radians to degrees, use pi/180
for (i in 1:N){
for (j in 1:N){
#time = (d-(R/2)/velocity*cos(theta))
t[k] = (d-(R/2))/(velocity[i]*cos(theta[j]*(pi/180)))
x[k] <- velocity[i]*t[k]*cos(theta[j]*(pi/180))
y[k] <- h + velocity[i]*sin(theta[j]*(pi/180))*t[k] - 0.5*(9.8)*t[k]*t[k]
k = k+1
}
}
# (x − (d − R))^2 = a
# (y − H)^2 = b
a <- numeric(N^2) #initialize a component
b <- numeric(N^2) #initialize b component
for (i in 1:N^2){
a[i] = x[i]- d + R
b[i] = y[i]- H
}
# c adds a and b to compare to r^2
c = a^2 + b^2
# score is a categorical value that indicates if the shot will be made
score <- numeric(N^2)
for(i in 1:N^2){
if (c[i] < r^2){
score[i] = 1
}
}
# determines which values are less than r^2
values <- which(score == 1)
#converts the values to get the theta and velocity values
x.theta <- numeric(length(values)) #initialize x (theta) column
y.velocities <- numeric(length(values)) #initialize y (velocity) column
# values[i] <- score comes from c value, which derives from x[i] and y[i]
# for loop increments by i, j, and k
# k increments by one, for every 41 j's, i increments by 1
# k == values, j == theta
# values[i] - (as.integer(values[1]/41))*41 == theta sequence value
#ie 223 - 5(223) = 18 = theta[18] == 47 degrees
#velocity == i
# if (as.integer(values[i]/41) is greater than 0, then +1 is added b/c of 41 remainder
# ie 223/41 = 5 + 1 == 6
# ie velocity[6] == 7.5
for(i in 1:length(values)){
# EQ.8 says theta min is 39.8, thus we can ignore values less than that
if ((theta[values[i] - (as.integer(values[i]/41))*41]) > 39.8){
x.theta[i] <- theta[values[i] - (as.integer(values[i]/41))*41]
if (values[i]/41 > as.integer(values[i]/41)){
y.velocities[i] <- velocity[as.integer(values[i]/41) + 1]
}
else{
y.velocities[i] <- velocity[as.integer(values[i]/41)]
}
}
}
# displays data in a data frame && cleans the zero values out
angle.vs.vel.df <- rbind(x.theta[x.theta != 0], y.velocities[y.velocities != 0])
transpose.df <- t(angle.vs.vel.df)
#plots the data
plot(x.theta[x.theta != 0], y.velocities[y.velocities != 0], ylab = "v, m/s",
xlab = "theta", main = "Angle-Velocity 'Smile'")
|
a38aa4084ae15406db041ed9115d78e131f70c53
|
c639cbad1939137ae9845d7a3d4d33d60bcaa038
|
/man/databasesAvailables-function.Rd
|
9402c3fd537aa1118132b92dcc20bbec65b0062a
|
[
"Artistic-2.0"
] |
permissive
|
lamdv/rRice
|
0fbff968d5798802726078264fb78e44655b2353
|
d0261f358825fb8c5fe2399d64c2ee5de3c740fe
|
refs/heads/master
| 2021-09-14T18:40:17.762372
| 2018-05-03T16:57:47
| 2018-05-03T16:57:47
| 107,843,725
| 0
| 3
| null | 2017-12-31T17:17:31
| 2017-10-22T06:48:22
|
R
|
UTF-8
|
R
| false
| true
| 411
|
rd
|
databasesAvailables-function.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbInformations-functions.R
\name{databasesAvailables}
\alias{databasesAvailables}
\title{Function to know the number of databases availables}
\usage{
databasesAvailables()
}
\value{
return the number of databases availables
}
\description{
this function return the number of databases availables
}
\examples{
databasesAvailables()
}
|
13b3b6dd262d78d92ae22d6c6d80154fa23f0d4c
|
2ec32b655522e967c9eac24fd949619aa93d5ab7
|
/R/site-stats.R
|
3c07d9669f9ead0c2057a620c9963050e357c35c
|
[
"MIT"
] |
permissive
|
hrbrmstr/pressur
|
d387457e5ca126aa201b853e95cc9204a3c5601f
|
cacf682a85cdc37c2357df30fc241a1f1b73e5fb
|
refs/heads/master
| 2022-11-08T17:08:07.810511
| 2020-06-28T11:57:44
| 2020-06-28T11:57:44
| 115,561,791
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,021
|
r
|
site-stats.R
|
#' Get a site's stats
#'
#' @references <https://developer.wordpress.com/docs/api/1.1/get/sites/$site/stats/>
#' @param site site id or domain; if not specified, the primary site of the
#' authenticated user will be used.
#' @return list with a great deal of stats metadata. You are probably most
#' interested in the `visits` element.
#' @export
#' @examples
#' if (interactive()) {
#' wp_auth()
#' wp_site_stats()
#' }
wp_site_stats <- function(site) {
if (missing(site)) {
site_stats_url <- paste0(.pkg$me$meta$links$site[1], "/stats")
} else {
site_stats_url <- sprintf("https://public-api.wordpress.com/rest/v1.2/sites/%s/stats", site[1])
}
httr::GET(
url = site_stats_url,
.add_bearer_token(),
accept_json()
) -> res
httr::stop_for_status(res)
.stats <- httr::content(res)
.stats$visits <- purrr::map_df(.stats$visits$data, ~purrr::set_names(.x, .stats$visits$fields))
.stats$visits$period <- anytime::anydate(.stats$visits$period)
return(.stats)
}
|
fbcf9c59d71a09f966ab9a6666af2ee98744242c
|
97c2cfd517cdf2a348a3fcb73e9687003f472201
|
/R/src/GSFCore/tests/testSQLConnection.r
|
a0485ceba879f0479c75e853d5000407e91e99f1
|
[] |
no_license
|
rsheftel/ratel
|
b1179fcc1ca55255d7b511a870a2b0b05b04b1a0
|
e1876f976c3e26012a5f39707275d52d77f329b8
|
refs/heads/master
| 2016-09-05T21:34:45.510667
| 2015-05-12T03:51:05
| 2015-05-12T03:51:05
| 32,461,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,837
|
r
|
testSQLConnection.r
|
cat("\n\nTest cases for SQLConnection object\n\n")
library("GSFCore")
testInit <- function() {
conn <- SQLConnection()
checkTrue( is(conn, "SQLConnection") )
checkTrue(!conn$isConnected())
conn$init( )
checkTrue(conn$isConnected())
}
testSelect <- function() {
conn <- initSQLConnection()
query.results = conn$select("SELECT 1 + 1")
checkTrue(is.data.frame(query.results))
checkTrue(length(query.results) == 1)
checkTrue(query.results[[1,1]] == 2)
}
testQuery <- function() {
conn <- initSQLConnection()
conn$query("CREATE TABLE #temp1 (col1 INTEGER, col2 VARCHAR(255))")
conn$query("INSERT INTO #temp1 VALUES (3, 'abcde')")
conn$query("INSERT INTO #temp1 VALUES (312, 'zyxwv')")
conn$query("INSERT INTO #temp1 VALUES (69, 'eric rocks')")
query.results = conn$select("SELECT * from #temp1")
checkEquals(sort(query.results[,1]), c(3, 69, 312))
conn$query("DELETE FROM #temp1")
conn$query(paste("INSERT INTO #temp1 VALUES (", c(2,4,6,8), ", 'test')", sep = ""))
query.results = conn$select("SELECT * from #temp1")
checkEquals(sort(query.results[,1]), c(2,4,6,8))
}
testBadSelectException <- function() {
conn <- initSQLConnection()
shouldBomb(conn$select("SELCT 1+1"))
}
testBadQueryException <- function() {
conn <- initSQLConnection()
shouldBomb(conn$query("CRETA TABLE #temp1 (col1 INTEGER, col2 VARCHAR(255))"))
}
testDisconnect <- function() {
conn <- initSQLConnection()
conn$disconnect()
checkTrue(!conn$isConnected())
}
initSQLConnection <- function() {
(conn <- SQLConnection())$init()
conn
}
testCommitRollback <- function() {
conn <- initSQLConnection()
conn$setAutoCommit(FALSE)
on.exit(conn$setAutoCommit(TRUE))
conn$query("CREATE TABLE #temp1 (col1 INTEGER, col2 VARCHAR(255))")
conn$query("INSERT INTO #temp1 VALUES (3, 'abcde')")
checkSame(the(conn$select("SELECT col1 FROM #temp1")), 3)
conn$rollback()
checkTrue(conn$isConnected())
shouldBombMatching(conn$select("SELECT col1 FROM #temp1"), "Invalid object name '#temp1'")
checkSame(the(conn$select("SELECT 1+2")), 3)
# now set autocommit TRUE and show its behavior is still working.
}
testTransactionSuccess <- function() {
conn <- initSQLConnection()
queries <- function() {
conn$query("CREATE TABLE #temp1 (col1 INTEGER, col2 VARCHAR(255))")
conn$query("INSERT INTO #temp1 VALUES (3, 'abcde')")
checkSame(the(conn$select("SELECT col1 FROM #temp1")), 3)
}
conn$transaction(queries)
checkTrue(conn$getAutoCommit())
checkLength(conn$select("SELECT col1 FROM #temp1"), 1)
}
testTransactionFailure <- function() {
conn <- initSQLConnection()
errorMidQueries <- function() {
conn$query("CREATE TABLE #temp1 (col1 INTEGER, col2 VARCHAR(255))")
conn$query("INSERT INTO #temp1 VALUES (3, 'abcde')")
checkSame(the(conn$select("SELECT col1 FROM #temp1")), 3)
throw("I am not an error")
}
shouldBombMatching(conn$transaction(errorMidQueries), "I am not an error")
checkTrue(conn$getAutoCommit())
shouldBombMatching(conn$select("SELECT col1 FROM #temp1"), "Invalid object name '#temp1'")
shouldBombMatching(errorMidQueries(), "I am not an error")
checkLength(conn$select("SELECT col1 FROM #temp1"), 1)
}
noop <- function() {}
testTransactionBombsIfNotInAutoCommitMode <- function() {
conn <- initSQLConnection()
conn$setAutoCommit(FALSE)
on.exit(function() conn$setAutoCommit(TRUE))
shouldBombMatching(conn$transaction(noop), "not in AutoCommit mode")
}
testNestedTransactionBombs <- function() {
conn <- initSQLConnection()
shouldBombMatching(conn$transaction(function() {
conn$transaction(noop)
}), "within.*transaction")
}
testSelectTimeOutError <- function() {
conn <- initSQLConnection()
checkSame(the(conn$select("select 1 + 2")), 3)
Sys.setenv(RJDBC_THROW_TIMEOUT=1)
on.exit(function() { Sys.setenv(RJDBC_THROW_TIMEOUT="") })
shouldBombMatching(
dbGetQuery(conn$.dbh, "select 1 + 1"),
":ResultSet::next failed (I/O Error: Read timed out)"
)
checkSame(as.numeric(Sys.getenv("RJDBC_THROW_TIMEOUT")), 0)
shouldBombMatching(
conn$select("select 1 + 2"),
"Invalid state, the Connection object is closed."
)
conn$init()
Sys.setenv(RJDBC_THROW_TIMEOUT=1)
checkSame(the(conn$select("select 1 + 4")), 5)
Sys.setenv(RJDBC_THROW_TIMEOUT=1)
shouldBombMatching(
conn$transaction(function() conn$select("select 1 + 5")),
":ResultSet::next failed (I/O Error: Read timed out)"
)
}
|
7abc25d4a53c6acca37591d3bee60eb34670782c
|
d8d5dc6044a25cc6635a65ebd660f072033de5c3
|
/inst/examples/shiny/tests/shinytest/mytest.R
|
914315b8551f673bfd860d8609d65b0a24388f4d
|
[] |
no_license
|
jtnedoctor/nomnoml
|
968c89a49e8c251d1cb307fadddb617fe9c41d83
|
de922462523c266bc4738ddb428291ec76b323a3
|
refs/heads/master
| 2023-02-02T02:31:30.451631
| 2020-12-17T08:30:27
| 2020-12-17T08:30:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
mytest.R
|
app <- ShinyDriver$new("../../")
app$snapshotInit("mytest")
app$snapshot()
app$setInputs(textbox = "foo bar baz")
app$snapshot()
app$setInputs(textbox = "foo")
app$snapshot()
|
ff529a39ca949546dbccda19b9f6b3a56ab17039
|
2a97b1ca4ba91a59b8a6457bfb54a99806864213
|
/man/pnadc_example.Rd
|
4f134fa0bdb5cf99348b05c43c82cfa9238f5e06
|
[] |
no_license
|
BragaD/PNADcIBGE
|
a07020460f26ce34c3e9b7fa800253f0965b70fe
|
a573e9c9fa6675f5012438d717cd9cf554dc6c79
|
refs/heads/master
| 2021-06-26T10:17:13.830930
| 2018-08-23T14:31:56
| 2018-08-23T14:46:11
| 128,931,294
| 1
| 1
| null | 2020-10-15T02:09:10
| 2018-04-10T12:45:48
|
R
|
UTF-8
|
R
| false
| true
| 402
|
rd
|
pnadc_example.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/example.R
\name{pnadc_example}
\alias{pnadc_example}
\title{Path for example data}
\usage{
pnadc_example(path = NULL)
}
\arguments{
\item{path}{Name of file. If `NULL`, the example files will be listed.}
}
\description{
Path for example data
}
\examples{
pnadc_example()
pnadc_example("exampledata.txt")
}
|
5692d30b87ce6ee13def64520f2ae50096c1b5c0
|
af286c8e4688c1ca310605d33d74ac6bc6f0cf5e
|
/R/server-df.R
|
23a029e0c09e2895e19b0f72d8e0a9493857f4ce
|
[
"MIT"
] |
permissive
|
glin/reactable
|
999d3385bad36c4273f9766d8a8663b42a88cef4
|
86bd27670eac8fb330a50413f462cf1fe0ff8e88
|
refs/heads/main
| 2023-08-29T11:15:04.340315
| 2023-07-14T20:33:39
| 2023-07-14T20:33:39
| 178,748,690
| 594
| 84
|
NOASSERTION
| 2023-01-08T17:30:20
| 2019-03-31T22:22:16
|
JavaScript
|
UTF-8
|
R
| false
| false
| 6,600
|
r
|
server-df.R
|
serverDf <- function() {
structure(list(), class = "reactable_serverDf")
}
reactableServerData.reactable_serverDf <- function(
x,
data = NULL,
columns = NULL,
pageIndex = 0,
pageSize = 0,
sortBy = NULL,
filters = NULL,
searchValue = NULL,
groupBy = NULL,
pagination = NULL,
paginateSubRows = NULL,
# Unused/unimplemented props
selectedRowIds = NULL,
expanded = NULL,
searchMethod = NULL,
...
) {
# Column filters - simple text match for now
if (length(filters) > 0) {
data <- dfFilter(data, filters)
}
# Global searching - simple text match for now
if (!is.null(searchValue)) {
data <- dfGlobalSearch(data, searchValue)
}
# Sorting
if (length(sortBy) > 0) {
data <- dfSortBy(data, sortBy)
}
# Grouping and aggregation
if (length(groupBy) > 0) {
data <- dfGroupBy(data, groupBy, columns)
}
# Pagination
dfPaginate(data, pageIndex, pageSize)
}
dfFilter <- function(df, filters) {
for (filter in filters) {
# Ignore invalid columns
if (!filter$id %in% colnames(df)) {
next
}
df <- df[grepl(tolower(filter$value), tolower(df[[filter$id]]), fixed = TRUE), ]
}
df
}
dfGlobalSearch <- function(df, searchValue) {
matched <- FALSE
for (col in colnames(df)) {
matched <- grepl(tolower(searchValue), tolower(df[[col]]), fixed = TRUE) | matched
}
df <- df[matched, ]
df
}
# Sorting is locale dependent and usually different from JavaScript
# (UTF-8 collation vs. C collation in JS)
dfSortBy <- function(df, by) {
columns <- lapply(by, function(col) {
if (is.numeric(df[[col$id]])) {
df[[col$id]]
} else {
xtfrm(df[[col$id]])
}
})
decreasing <- sapply(by, function(col) if (isTRUE(col$desc)) TRUE else FALSE)
df <- df[do.call(order, c(columns, list(decreasing = decreasing))), , drop = FALSE]
df
}
dfGroupBy <- function(df, by, columns = NULL, depth = 0) {
by <- unlist(by)
if (length(by) == depth) {
return(df)
}
groupedColumnId <- by[depth + 1]
splitBy <- if (is.list(df[[groupedColumnId]])) {
# Split doesn't work with list-columns, so convert list-columns to strings
vapply(df[[groupedColumnId]], toJSON, character(1))
} else {
# Filter out unused levels for factor columns (which split would turn into
# empty groups), and ensure group names are character strings (split coerces
# factors/numerics/etc. into strings anyway).
as.character(df[[groupedColumnId]])
}
splitIndices <- split(seq_len(nrow(df)), splitBy)
# NOTE: grouped rows won't necessarily be in the same order as the column values
groups <- lapply(
splitIndices,
function(inds) {
subGroup <- df[inds, , drop = FALSE]
# Reset row names for easier testing. This doesn't really matter though,
# as row names are eventually discarded in the end.
row.names(subGroup) <- NULL
# Omit grouped column
subGroup[[groupedColumnId]] <- NULL
subGroup
}
)
values <- unique(df[[groupedColumnId]])
df <- if (is.list(values)) {
# Preserve list-columns
listSafeDataFrame(values)
} else {
dataFrame(values)
}
colnames(df) <- groupedColumnId
# Find the columns that can be aggregated, including any columns in groupBy.
# groupBy columns that aren't in the row's group are allowed to be aggregated.
groupedColumns <- by[seq_len(depth + 1)]
aggregatedColumns <- Filter(function(column) !column[["id"]] %in% groupedColumns, columns)
for (column in aggregatedColumns) {
aggregate <- column[["aggregate"]]
if (is.null(aggregate)) next
if (!is.function(aggregate)) {
aggregate <- aggregateFuncs[[aggregate]]
}
id <- column[["id"]]
df[[id]] <- unlist(lapply(values, function(x) {
value <- if (is.list(x)) toJSON(x) else as.character(x)
subGroup <- groups[[value]]
aggregate(subGroup[[id]])
}), recursive = FALSE)
}
df[[".subRows"]] <- lapply(values, function(x) {
value <- if (is.list(x)) toJSON(x) else as.character(x)
subGroup <- groups[[value]]
dfGroupBy(subGroup, by, columns = columns, depth = depth + 1)
})
df
}
# Like data.frame() but preserves list-columns without having to wrap them in I().
# Uses the default row.names and always stringsAsFactors = FALSE.
listSafeDataFrame <- function(...) {
columns <- list(...)
rowNames <- seq_len(length(columns[[1]]))
structure(columns, row.names = rowNames, class = "data.frame")
}
# Like data.frame() but always uses stringsAsFactors = FALSE for R 3.6 and below
dataFrame <- function(...) {
data.frame(..., stringsAsFactors = FALSE)
}
dfPaginate <- function(df, pageIndex = 0, pageSize = NULL) {
if (is.null(pageSize)) {
return(resolvedData(df, rowCount = nrow(df)))
}
# Ensure page index is within boundaries
rowCount <- nrow(df)
maxPageIndex <- max(ceiling(rowCount / pageSize) - 1, 0)
if (pageIndex < 0) {
pageIndex <- 0
} else if (pageIndex > maxPageIndex) {
pageIndex <- maxPageIndex
}
rowStart <- min(pageIndex * pageSize + 1, nrow(df))
rowEnd <- min(pageIndex * pageSize + pageSize, nrow(df))
page <- df[rowStart:rowEnd, ]
resolvedData(page, rowCount = rowCount)
}
# For strings, max/min/median are locale dependent and usually different from JavaScript
# (UTF-8 collation vs. C collation in JS)
aggregateFuncs <- list(
"sum" = function(x) sum(x, na.rm = TRUE),
"mean" = function(x) mean(x, na.rm = TRUE),
"max" = function(x) {
if (!all(is.na(x))) {
max(x, na.rm = TRUE)
} else if (is.numeric(x)) {
NaN
} else {
NA
}
},
"min" = function(x) {
if (!all(is.na(x))) {
min(x, na.rm = TRUE)
} else if (is.numeric(x)) {
NaN
} else {
NA
}
},
"median" = function(x) median(x, na.rm = TRUE),
"count" = function(x) length(x),
"unique" = function(x) paste(unique(x), collapse = ", "),
"frequency" = function(x) {
counts <- as.list(table(x))
countStrs <- vapply(seq_along(counts), function(i) {
value <- names(counts)[i]
count <- counts[[i]]
sprintf("%s (%s)", value, count)
}, character(1))
paste(countStrs, collapse = ", ")
}
)
# For testing only. Sorting is locale dependent and different between UTF-8 (typically
# the default in R) and C (which JavaScript uses). See ?Comparison). testthat 3e and
# R CMD check both use a C locale for collation, but this can be used for more explicit tests.
withCollationC <- function(expr) {
locale <- Sys.getlocale("LC_COLLATE")
on.exit({
Sys.setlocale("LC_COLLATE", locale)
})
Sys.setlocale("LC_COLLATE", "C")
expr
}
|
5a53e61f34802a4db5c0009333685c80260e7e77
|
f696d5a4aeccc4e4a9c25824c511bd80c481ba42
|
/Training system (induce models)/03_creating_dataset_for_classification.R
|
10dcceaf2f76cdfff1c91ab69fefb821f3116288
|
[] |
no_license
|
ursusdm/predictingHourlySolarRadiation
|
fd1a13a93418f58f1f752ec34f9e466c909a5cc5
|
5978d460f70703544ee4ff8492c81666b51c24b6
|
refs/heads/master
| 2022-12-17T21:10:26.551012
| 2020-09-17T16:47:32
| 2020-09-17T16:47:32
| 296,289,858
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,626
|
r
|
03_creating_dataset_for_classification.R
|
library(tidyverse)
examples <- read.csv("examples_meteo+radiation+cluster.csv")
# BUILDING NEW DATASET FOR CLASSIFICATION
# CREATING DATASET TO USE REGRESSION MODEL TO PREDICT kd
# previous extraterrial gd was used in the definition of the system,
# but it is not available in the original dataset. It must be calculated
# we must remove real kd (because we are calculating the prediction of kd using information from previous day)
dataset <- examples %>%
mutate(gdext_previo = gd_previo/kd_previo) %>%
select(1,gdext_previo,everything()) %>%
select(-kd)
# LOADING MODEL: random forest in "model_rf"
file <- "random_forest_regression_model.rds"
if (file.exists(file)) {
model_rf <- readRDS(file)
# Prediction of kd
kd_predicted<-predict.train(object=model_rf,dataset,type="raw")
# new dataset for classification purposes
# include new predicted kd
# remove real data about radiation
# and reorder some attributes
dataset_with_prediction <- cbind(dataset,kd_predicted)
dataset_with_prediction <- dataset_with_prediction %>%
select(-gd_previo, -gdext_previo, -kd_previo)
dataset_with_prediction <- dataset_with_prediction %>%
select(1:(length(dataset_with_prediction)-2),
length(dataset_with_prediction),
length(dataset_with_prediction)-1)
write.csv(dataset_with_prediction, "examples_meteo+predicted_kd+cluster.csv", row.names=FALSE)
} else {
stop("Model is not available at ", file, " SOURCE: 1_inducing_RF_for_regression.R")
}
|
da22d6640953b4e3b265d3f1c3d625b7c09a04da
|
fa30d5877052bb8771b5747192aed40a27b0b145
|
/clients_clustering.R
|
246f42ddde9b31c7434fef6eeec45de5b1836761
|
[] |
no_license
|
Dinicharia/clients_clustering
|
6dbdbd50ca62839d09a45641689bbf35930140ea
|
b0d3c91aae0c37a930ed61123b891955d7904146
|
refs/heads/main
| 2023-06-14T09:27:13.350654
| 2021-07-09T08:09:00
| 2021-07-09T08:09:00
| 383,718,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,048
|
r
|
clients_clustering.R
|
#importing essential packages
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(plotrix)) install.packages("plotrix", repos = "http://cran.us.r-project.org")
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
if(!require(purrr)) install.packages("purrr", repos = "http://cran.us.r-project.org")
if(!require(cluster)) install.packages("cluster", repos = "http://cran.us.r-project.org")
if(!require(gridExtra)) install.packages("gridExtra", repos = "http://cran.us.r-project.org")
if(!require(grid)) install.packages("grid", repos = "http://cran.us.r-project.org")
setwd("D:/GitHub/clients_clustering/clients_dataset")
customer_data = read.csv("Mall_Customers.csv") #reading data from file
#analysis
str(customer_data) #structure of the data frame
names(customer_data) #row titles only
head(customer_data) #the first six rows
#dataset summary
summary(customer_data) # age summary
#barplot of gender distribution
a = table(customer_data$Gender) #fetch from Gender column only
barplot(a, main="Gender Comparision",
ylab = "Count",
xlab = "Gender",
col = c("#009999", "#0000FF"),
legend = rownames(a))
#piechart showing the gender ratios
pct = round(a/sum(a)*100)
lbs = paste(c("Female", "Male"), " ", pct, "%", sep = " ")
pie3D(a,labels=lbs,
main = "Ratio of Female and Male")
#age distribution
summary(customer_data$Age) # age summary of our data
#the histogram
hist(customer_data$Age,
col = "grey",
main = "Count of Age Class",
xlab = "Age Class",
ylab = "Frequency",
labels = TRUE#adding frequency to individual bars
)
summary(customer_data$Annual.Income..k..) # summary of the income data
#annual income histogram
hist(customer_data$Annual.Income..k..,
col = "grey",
main = " Annual Income Distribution",
xlab = "Annual Income Class",
ylab = "Frequency",
labels = TRUE
)
#the density plot
plot(density(customer_data$Annual.Income..k..),
col = "blue",
main = "Annual Income Distribution",
xlab = "Annual Income Class",
ylab = "Density")
#filled density plot
polygon(density(customer_data$Annual.Income..k..),
col="grey", border = "blue")
#spending score analysis
summary(customer_data$Spending.Score..1.100.) #the summary
hist(customer_data$Spending.Score..1.100.,
main = "Spending Score",
xlab = "Spending Score Class",
ylab = "Frequency",
col = "grey",
labels = TRUE)
#The elbow method
set.seed(123)
# function to calculate total intra-cluster sum of square
iss <- function(k) {
kmeans(customer_data[,3:5], k, iter.max=100, nstart=100, algorithm = "Lloyd" )$tot.withinss
}
k.values <- 1:10 #Number of clusters K
iss_values <- map_dbl(k.values, iss) #Total intra-clusters sum of squares
plot(k.values, iss_values,
type = "b", pch = 19, frame = FALSE,
xlab = "Number of clusters K",
ylab = "Total intra-clusters sum of squares",
main = "The Elbow Plot")
#Silhouette Method
k2 <- kmeans(customer_data[, 3:5], 2, iter.max = 100, nstart = 50, algorithm = "Lloyd")
s2 <- plot(silhouette(k2$cluster, dist(customer_data[, 3:5], "euclidean")))
k3 <- kmeans(customer_data[, 3:5], 3, iter.max = 100, nstart = 50, algorithm = "Lloyd")
s3 <- plot(silhouette(k3$cluster, dist(customer_data[, 3:5], "euclidean")))
k4 <- kmeans(customer_data[, 3:5], 4, iter.max = 100, nstart = 50, algorithm = "Lloyd")
s4 <- plot(silhouette(k4$cluster, dist(customer_data[, 3:5], "euclidean")))
k5 <- kmeans(customer_data[, 3:5], 5, iter.max = 100, nstart = 50, algorithm = "Lloyd")
s5 <- plot(silhouette(k5$cluster, dist(customer_data[, 3:5], "euclidean")))
k6 <- kmeans(customer_data[, 3:5], 6, iter.max = 100, nstart = 50, algorithm = "Lloyd")
s6 <- plot(silhouette(k6$cluster, dist(customer_data[, 3:5], "euclidean")))
k7 <- kmeans(customer_data[, 3:5], 7, iter.max = 100,nstart = 50,algorithm = "Lloyd")
s7 <- plot(silhouette(k7$cluster, dist(customer_data[, 3:5], "euclidean")))
k8 <- kmeans(customer_data[, 3:5], 8, iter.max = 100, nstart = 50, algorithm = "Lloyd")
s8 <- plot(silhouette(k8$cluster, dist(customer_data[, 3:5], "euclidean")))
k9 <- kmeans(customer_data[, 3:5], 9, iter.max = 100, nstart = 50, algorithm = "Lloyd")
s9 <- plot(silhouette(k9$cluster, dist(customer_data[, 3:5], "euclidean")))
k10 <- kmeans(customer_data[, 3:5], 10, iter.max = 100, nstart = 50, algorithm = "Lloyd")
s10 <- plot(silhouette(k10$cluster, dist(customer_data[, 3:5], "euclidean")))
#visualizing the optimal number of clusters
library(NbClust)
library(factoextra)
fviz_nbclust(customer_data[,3:5], kmeans, method = "silhouette")
#the Gap statistic method using clusGap() function
set.seed(125)
stat_gap <- clusGap(customer_data[,3:5], FUN = kmeans, nstart = 25,
K.max = 10, B = 50)
fviz_gap_stat(stat_gap) #the plot
#output of our optimal cluster
k6<-kmeans(customer_data[,3:5],6,iter.max=100,nstart=50,algorithm="Lloyd")
k6
# visualizing the clusters
pcclust = prcomp(customer_data[, 3:5], scale = FALSE) #principal component analysis
summary(pcclust)
pcclust$rotation[, 1:2]
#the plot
set.seed(1)
ggplot(customer_data, aes(x =Annual.Income..k.., y = Spending.Score..1.100.)) +
geom_point(stat = "identity", aes(color = as.factor(k6$cluster))) +
scale_color_discrete(name=" ",
breaks=c("1", "2", "3", "4", "5","6"),
labels=c("Cluster 1", "Cluster 2", "Cluster 3", "Cluster 4", "Cluster 5","Cluster 6")) +
ggtitle("K-means Clustering")
#ploting k-means against the clusters
kCols = function(vec){
cols = rainbow (length (unique (vec)))
return (cols[as.numeric(as.factor(vec))])
}
digCluster <- k6$cluster;
dignm <- as.character(digCluster); # K-means clusters
plot(pcclust$x[,1:2], #the principle component algorithm
col = kCols(digCluster), pch = 19, xlab = "K-means", ylab = "classes",
main = "Cluster k-means")
legend("bottomright", unique(dignm), fill=unique(kCols(digCluster)))
|
1eaa4781d3dab062825b2dde1e52488c2c64af79
|
53d7e351e21cc70ae0f2b746dbfbd8e2eec22566
|
/man/us_skinfold_data.Rd
|
c6d33526c9d22e80a4dbc4ff93a91ad8bd359aae
|
[] |
no_license
|
tbates/umx
|
eaa122285241fc00444846581225756be319299d
|
12b1d8a43c84cc810b24244fda1a681f7a3eb813
|
refs/heads/master
| 2023-08-31T14:58:18.941189
| 2023-08-31T09:52:02
| 2023-08-31T09:52:02
| 5,418,108
| 38
| 25
| null | 2023-09-12T21:09:45
| 2012-08-14T20:18:01
|
R
|
UTF-8
|
R
| false
| true
| 2,443
|
rd
|
us_skinfold_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{us_skinfold_data}
\alias{us_skinfold_data}
\title{Anthropometric data on twins}
\format{
A data frame with 53940 twin families (1 per row) each twin measured on 10 variables.
}
\usage{
data(us_skinfold_data)
}
\description{
A dataset containing height, weight, BMI, and skin-fold fat measures in several
hundred US twin families participating in the MCV Cardiovascular Twin Study (PI Schieken).
Biceps and Triceps are folds above and below the upper arm (holding arm palm upward),
Calf (fold on the calf muscle), Subscapular (fold over the shoulder blade),
Suprailiacal (fold between the hip and ribs).
}
\details{
\itemize{
\item \emph{fan} FamilyID (t1=male,t2=female)
\item \emph{zyg} Zygosity 1:mzm, 2:mzf, 3:dzm, 4:dzf, 5:dzo
\item \emph{ht_T1} Height of twin 1 (cm)
\item \emph{wt_T1} Weight of twin 1 (kg)
\item \emph{bmi_T1} BMI of twin 1
\item \emph{bml_T1} log BMI of twin 1
\item \emph{bic_T1} Biceps Skinfold of twin 1
\item \emph{caf_T1} Calf Skinfold of twin 1
\item \emph{ssc_T1} Subscapular Skinfold of twin 1
\item \emph{sil_T1} Suprailiacal Skinfold of twin 1
\item \emph{tri_T1} Triceps Skinfold of twin 1
\item \emph{ht_T2} Height of twin 2
\item \emph{wt_T2} Weight of twin 2
\item \emph{bmi_T2} BMI of twin 2
\item \emph{bml_T2} log BMI of twin 2
\item \emph{bic_T2} Biceps Skinfold of twin 2
\item \emph{caf_T2} Calf Skinfold of twin 2
\item \emph{ssc_T2} Subscapular Skinfold of twin 2
\item \emph{sil_T2} Suprailiacal Skinfold of twin 2
\item \emph{tri_T2} Triceps Skinfold of twin 2
}
}
\examples{
\dontrun{
data(us_skinfold_data)
str(us_skinfold_data)
par(mfrow = c(1, 2)) # 1 rows and 3 columns
plot(ht_T1 ~ht_T2, ylim = c(130, 165), data = subset(us_skinfold_data, zyg == 1))
plot(ht_T1 ~ht_T2, ylim = c(130, 165), data = subset(us_skinfold_data, zyg == 3))
par(mfrow = c(1, 1)) # back to as it was
}
}
\references{
Moskowitz, W. B., Schwartz, P. F., & Schieken, R. M. (1999).
Childhood passive smoking, race, and coronary artery disease risk:
the MCV Twin Study. Medical College of Virginia.
\emph{Archives of Pediatrics and Adolescent Medicine}, \strong{153}, 446-453.
\url{https://pubmed.ncbi.nlm.nih.gov/10323623/}
}
\seealso{
Other datasets:
\code{\link{Fischbein_wt}},
\code{\link{GFF}},
\code{\link{docData}},
\code{\link{iqdat}},
\code{\link{umx}}
}
\concept{datasets}
\keyword{datasets}
|
27ebed6007df7b4deab600a26262754cd19a7ef9
|
0575784d79da7d193f187e90b39fb7c87ee77821
|
/man/FixCNVPosition.Rd
|
2dacb646e67c1377b54afb81d17187c93410babc
|
[] |
no_license
|
ej/iPsychCNV
|
6213b816ec7e38d98fda678f95a7f9c0917cedf8
|
b19c2576d1de010c8c71571f05c9c19c6c7d0b83
|
refs/heads/master
| 2021-01-17T22:56:26.865372
| 2015-08-11T09:17:58
| 2015-08-11T09:17:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
rd
|
FixCNVPosition.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/FixCNVPosition_V3.R
\name{FixCNVPosition}
\alias{FixCNVPosition}
\title{FixCNVPosition}
\usage{
FixCNVPosition(Df, subCNV, MinNumSNPs, ID)
}
\value{
Data frame with CNV information.
}
\description{
FixCNVPosition: Trim the CNV position by distance of SNPs.
}
\details{
Specifically designed to handle noisy data from amplified DNA on Phenylketonuria (PKU) cards. The function is a pipeline using many subfunctions.
}
\author{
Marcelo Bertalan
}
|
4b7a488f2cbb1ffd063d053cd727f28a9f171af6
|
23c0e647fe0ca4ac3407182dfea14ec1bd7c75d1
|
/ggplotGuia.R
|
033ee8ed27c439b4f7f245d66088b7fb0f4b3ee2
|
[] |
no_license
|
betomartinezg/ggplot2
|
8c6f6d613cbc54b0593954e185ae603c31d2b9f3
|
361414b13859bdd5a6e5398aee00d693d9594022
|
refs/heads/master
| 2020-08-28T18:16:29.788527
| 2019-10-26T23:31:46
| 2019-10-26T23:31:46
| 217,781,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,480
|
r
|
ggplotGuia.R
|
## Guía Ggplot
#Ggplot consiste en adición de capas. Sus componentes principales son data (dataframe), Aesthetics (Se usa para definir X y Y. También permite definir color, size o shape de puntos, height de barras, etc...) y Geometry (Corresponde al tipo de grafico; histogram, box plot, line plot, density plot, etc...)
### Instalar y cargar el paquete ggplot2
###install.packages('ggplot2') #Instalar paquete
library(ggplot2) #Cargar paquete
### Datos
# Cargar datos
data(iris)
# Iris : Contiene datos sobre 3 especies de plantas del género Iris: iris setosa, iris versicolor e iris virginica.
head(iris) #Revisar qué contiene iris
##########################################################################################
### GRAFICA BASE: Definimos el tema base
# Aún no graficamos puntos, líneas o áreas
ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width)) + #Graficar solo la base del gráfico (Ejes y etiqueta de ejes)
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "white")) +## Definimos el tema (fondo, bordes...)
labs(title = "Longitud del sepalo Vs. Ancho del sepalo", #Definimos el título
subtitle = "Data: Iris") + #Definimos subtítulo
labs(x = "Longitud del sepalo", y = "Ancho del sepalo")#Definimos los nombres de los ejes
#########################################################################################
### GRAFICA PUNTOS
ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width)) + #Grafico base
geom_point() + # Añadimos la capa de puntos
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
labs(title = "Longitud del sepalo Vs. Ancho del sepalo", #Definimos el título
subtitle = "Data: Iris con puntos") + #Definimos subtítulo
labs(x = "Longitud del sepalo", y = "Ancho del sepalo")#Definimos los nombres de los ejes
####### Ahora definimos el color por especie
ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width, color=Species)) + #Grafico base
geom_point(shape=21) + # Añadimos la capa de puntos
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
scale_colour_discrete(name = "Especies")+
labs(title = "Longitud del sepalo Vs. Ancho del sepalo", #Definimos el título
subtitle = "Data: Iris con puntos y color") + #Definimos subtítulo
labs(x = "Longitud del sepalo", y = "Ancho del sepalo")#Definimos los nombres de los ejes
#Podemos usar cualquier forma que queramos, ejemplo:
#ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width, color=Species)) +
# geom_point(shape = 11) + # Usamos la forma de la estrella de David
#theme_bw() + theme(panel.border = element_blank(),
# panel.grid.major = element_blank(),
# panel.grid.minor = element_blank(),
# axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
#scale_colour_discrete(name = "Especies")+
#labs(title = "Longitud del sepalo Vs. Ancho del sepalo", #Definimos el título
# subtitle = "Data: Iris con la estrella de David y color") + #Definimos subtítulo
#labs(x = "Longitud del sepalo", y = "Ancho del sepalo")#Definimos los nombres de los ejes
#ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width, color= Species)) +
# geom_point(shape = 64) + #Usamos la forma del arroba (@)
#theme_bw() + theme(panel.border = element_blank(),
# panel.grid.major = element_blank(),
# panel.grid.minor = element_blank(),
# axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
#scale_colour_discrete(name = "Especies")+
#labs(title = "Longitud del sepalo Vs. Ancho del sepalo", #Definimos el título
# subtitle = "Data: Iris con arroba y color") + #Definimos subtítulo
#labs(x = "Longitud del sepalo", y = "Ancho del sepalo")#Definimos los nombres de los ejes
#########################################################################################
### GRAFICA BARRAS
ggplot(data=iris, aes(x=Species, y=Sepal.Length, fill=Species)) +
geom_col()+ #Grafico de barras basico, color por especie
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
scale_colour_discrete(name = "Especies")+
labs(title = "Longitud del sepalo por especie", #Definimos el título
subtitle = "Data: Iris grafico de barras") + #Definimos subtítulo
labs(x = "Especies", y = "Longitud del sepalo")#Definimos los nombres de los ejes
### GRAFICA HISTOGRAMAS
ggplot(data=iris) +
geom_histogram(aes(x = Sepal.Width, fill = Species),
bins = 12, position = "identity", alpha = 0.4) + # Ya que las columnas se sobrelapan usamos un alpha para trasparentar
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
labs(title = "Longitud del sepalo por especie", #Definimos el título
subtitle = "Data: Iris histograma") + #Definimos subtítulo
labs(x = "Especies", y = "Ancho del sepalo")#Definimos los nombres de los ejes
#Podemos graficar dado variables discretas como especies? Revisar facet_wrap
ggplot(data=iris) +
geom_histogram(aes(x = Sepal.Width, fill = Species), bins = 12) +
facet_wrap(~Species, ncol = 1)+
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
labs(title = "Longitud del sepalo por especie", #Definimos el título
subtitle = "Data: Iris histograma") + #Definimos subtítulo
labs(x = "Ancho del sepalo", y = "Conteo")#Definimos los nombres de los ejes
#########################################################################################
### GRAFICA LÍNEAS (Smooth)
#Graficamos con el método loess y el intervalo de confianza
ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width)) +
geom_smooth(method = "loess", se=TRUE) + # Método loess y con el intervalo
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
scale_colour_discrete(name = "Especies")+
labs(title = "Longitud del sepalo Vs. Ancho del sepalo", #Definimos el título
subtitle = "Data: Iris con líneas e intervalo de confianza") + #Definimos subtítulo
labs(x = "Longitud del sepalo", y = "Ancho del sepalo")#Definimos los nombres de los ejes
## Podemos quitar el intervalo de confianza
ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width)) +
geom_smooth(method = "loess",se = FALSE) + # Con método loess y sin el intervalo de confianza
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
scale_colour_discrete(name = "Especies")+
labs(title = "Longitud del sepalo Vs. Ancho del sepalo", #Definimos el título
subtitle = "Data: Iris con líneas y sin intervalo de confianza") + #Definimos subtítulo
labs(x = "Longitud del sepalo", y = "Ancho del sepalo")#Definimos los nombres de los ejes
### Existen otros métodos para graficar, por ejemplo lm
## Revisar otros métodos como: "auto", "glm", "gam"
ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width)) +
geom_point()+ #Podemos también incluir los puntos de dispersión
geom_smooth(method = "lm",se = TRUE) + # Con método lm y con el intervalo de confianza
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
scale_colour_discrete(name = "Especies")+
labs(title = "Longitud del sepalo Vs. Ancho del sepalo", #Definimos el título
subtitle = "Data: Iris con lm e intervalo de confianza") + #Definimos subtítulo
labs(x = "Longitud del sepalo", y = "Ancho del sepalo")#Definimos los nombres de los ejes
# Podemos graficar una línea de regresión por especie?
ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width, color=Species)) +
geom_point()+ #Podemos también incluir los puntos de dispersión
geom_smooth(method = "lm",se = TRUE) + # Con método lm y con el intervalo de confianza
theme_bw() + theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "white"))+## Definimos el tema (fondo, bordes...)
scale_colour_discrete(name = "Especies")+
labs(title = "Longitud del sepalo Vs. Ancho del sepalo", #Definimos el título
subtitle = "Data: Iris con lm e intervalo de confianza") + #Definimos subtítulo
labs(x = "Longitud del sepalo", y = "Ancho del sepalo")#Definimos los nombres de los ejes
##################################################################################################
############################ Actividades adicionales #############################################
##################################################################################################
# 1. Construyan su propio tema
# 2. Usando el paquete "emoGG" y "ggplot2" definir un emoji por especie en un grafico de dispersión
#devtools::install_github("dill/emoGG") #Instalar el paquete emoGG
# 3. Construir una grafica con las líneas de regresión por especie, pero modificar en itálica los nombres de las especies en la leyenda
# 4. Construir una gráfica con las líneas de regresión por especie, incluir el p, r2 y formula (Pista: Usar la función stat_poly_eq del paquete "ggpmisc")
##################################################################################################
############################# Literatura recomendada #############################################
##################################################################################################
# 1. http://www.ievbras.ru/ecostat/Kiril/R/Biblio_N/R_Eng/Wickham2016.pdf
# 2. https://stat545.com/graphics-overview.html
# 3. https://github.com/jennybc/ggplot2-tutorial
# 4. http://r-statistics.co/Top50-Ggplot2-Visualizations-MasterList-R-Code.html
|
45541649e232de29089b94ab2976ed435d2e2a06
|
ee97a9d589a29d71735e60b96821cf1437f7796e
|
/server.R
|
c30e8211188ca656c7486f6e713291f9042138e4
|
[] |
no_license
|
Morriseylab/NGSViewer
|
093092ed156d1ff192a27463e6df46fdf3024832
|
ce71e4b81fb2b8b1ce899711d5ecafdc07f0eb3e
|
refs/heads/master
| 2021-12-07T08:00:41.197472
| 2021-11-17T17:17:33
| 2021-11-17T17:17:33
| 157,431,160
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71,928
|
r
|
server.R
|
library(shiny)
library(shinyBS)
library("AnnotationDbi")
library("org.Mm.eg.db")
library(gage)
library(gageData)
library(RColorBrewer)
library(NMF)
library(Biobase)
library(reshape2)
library(ggplot2)
library(biomaRt)
library(KEGGREST)
library(png)
library(GO.db)
library(d3heatmap)
library(dplyr)
library(tidyr)
library(plotly)
library(shinyjs)
library(htmlwidgets)
library(DT)
library(FactoMineR)
library(factoextra)
library(shinyRGL)
library(rgl)
library(rglwidget)
library(SPIA)
library(ReactomePA)
library(limma)
library(ggrepel)
library(readxl)
library(biomaRt)
library(data.table)
source("functions.R")
#Specify user-ids and passwords
auth=read.csv("data/authentication.csv")
my_username <- auth$user
my_password <- auth$pwd
#Create a theme for all plots.
plotTheme <-theme_bw() + theme(axis.title.x = element_text(face="bold", size=12),
axis.text.x = element_text(angle=35, vjust=0.5, size=12),
axis.title.y = element_text(face="bold", size=12),
axis.text.y = element_text(angle=0, vjust=0.5, size=12))
theme_fviz <- theme(axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14),
legend.text = element_text(angle=0, vjust=0.5, size=14),
legend.title = element_text(angle=0, vjust=0.5, size=14),
plot.title = element_text(angle=0, vjust=0.5, size=16))
server <- function(input, output, session) {
values <- reactiveValues(authenticated = FALSE)
# Return the UI for a modal dialog with data selection input. If 'failed'
# is TRUE, then display a message that the previous value was invalid.
dataModal <- function(failed = FALSE) {
modalDialog(
textInput("username", "Username:"),
passwordInput("password", "Password:"),
footer = tagList(
actionButton("ok", "OK")
)
)
}
# Show modal when button is clicked.
# This `observe` is suspended only whith right user credential
obs1 <- observe({
showModal(dataModal())
})
# When OK button is pressed, attempt to authenticate. If successful,
# remove the modal.
obs2 <- observe({
req(input$ok)
isolate({
Username <- input$username
Password <- input$password
})
Id.username <- which(my_username == Username)
Id.password <- which(my_password == Password)
if (length(Id.username) > 0 & length(Id.password) > 0) {
if (Id.username == Id.password) {
Logged <<- TRUE
values$authenticated <- TRUE
obs1$suspend()
removeModal()
} else {
values$authenticated <- FALSE
}
}
})
####### LOAD EXCEL AND POPULATE DROP DOWN FOR PROJECTS #########
#Read the parameter file
readexcel = reactive({
user=input$username
file = read.csv(paste("data/param.csv",sep=""))
if(user=="allusers"){
file=file
}else{
file=file[file$user==user,]
}
})
#Get Project list and populate drop-down
output$projects = renderUI({
excel=readexcel()
prj=excel$projects
selectInput("projects","Select a project",as.list(sort(as.character(prj))))
})
################# DISPLAY FILE LIST IN DASHBOARD ###############
#Display file in dashboard
dashdata <- reactive({
user=input$username
file=read.csv('data/param.csv',stringsAsFactors = F)
if(user=="allusers"){
file = file %>% rename("Project Name"="projects","Project Description"="desc","Username"="user") %>% arrange(`Project Name`)
}else{
file=file[file$user==user,] %>% dplyr::select(-user) %>%
rename("Project Name"="projects","Project Description"="desc") %>% arrange(`Project Name`)
}
return(file)
})
output$dashdata = DT::renderDataTable({
DT::datatable(dashdata(),
extensions = 'Buttons', options = list(
dom = 'Bfrtip',
pageLength = 30,
buttons = list()),
rownames=FALSE,selection = list(mode = 'single', selected =1),escape=FALSE)
})
####### LOAD RDATA FILE AND GET CONTRASTS##########
#Load Rdata
fileload <- reactive({
if(input$filetype == 'list'){
inFile = paste('data/',as.character(input$projects),'.RData',sep = '')
load(inFile)
}else{
file=input$rdatafileupload
load(file$datapath)
}
loaddata=results
return(loaddata)
})
#Get contrast list and populate drop-down
output$contrasts = renderUI({
results=fileload()
lim=results$limma
contrasts=as.list(as.character(unlist(lapply((names(lim)),factor))))
selectInput("contrast","Select a comparison",contrasts,"pick one")
})
################################## PCA PLOT ###################################
#Populate drop-down for PC to plot on x-axis
output$pcaxoptions <- renderUI({
selectInput("pcaxaxes","Select Principle Component to plot on the X-axis ",c(1:10))
})
#Populate drop-down for PC to plot on y-axis
output$pcayoptions <- renderUI({
selectInput("pcayaxes","Select Principle Component to plot on the Y-axis",c(1:10),selected=2)
})
#PRint the PC's chosen to be plotted
output$biplottitle <- renderText({
text=as.character(paste("Dim",input$pcaxaxes," vs Dim",input$pcayaxes,sep=""))
return(text)
})
#Textbox to enter number of genes to use to plot
output$pcipslide <- renderUI({
textInput(inputId = 'pcipslide', label = "Enter top number of input genes that show maximum variance", value = '500')
})
#Textbox to enter number of genes to view in the biplot
output$pcslide <- renderUI({
textInput(inputId = 'pcslide', label = "Enter number of genes to view in the biplot", value = '0')
})
#Drop down menu for pc-plot colorby option
output$pcacolorby = renderUI({
results=fileload()
eset=results$eset
pd=pData(eset) #get pheno-data
pd=pd %>% select(starts_with("var")) #get columns from phenodata that start with "var"
kt=as.data.frame(t(na.omit(t(pd)))) #omit columns that have only NA's
bpcols=c("maineffect",colnames(kt))
selectInput("pcacolorby","Color By",bpcols) #populate drop down menu with the phenodata columns
})
#Checkbox to view ellipses in the PCA plot
output$ellipse <- renderUI({
checkboxInput("ellipse", label = "Check to view ellipses", value = FALSE)
})
#Function for PCA plot
plotbiplot = reactive({
res.pca = res_pca()
x=as.numeric(input$pcaxaxes)
y=as.numeric(input$pcayaxes)
results=fileload()
v = results$eset
pData<-pData(v)
colorby=input$pcacolorby
hab=eval(parse(text = paste0("pData$",colorby,sep="")))
validate(
need(input$pcslide, "Enter number of genes to view in biplot")
)
if(input$pcslide==0 & input$ellipse==F){
fviz_pca_ind(res.pca, repel=T,geom='point',label='var',addEllipses=FALSE, habillage = as.factor(hab),pointsize = 3.35,axes=c(x,y))+scale_shape_manual(values = c(rep(19,length(unique(hab)))))+theme_fviz}
else if(input$pcslide==0 & input$ellipse==T){
fviz_pca_ind(res.pca, repel=T,geom='point',label='var',addEllipses=T,ellipse.type="confidence",ellipse.alpha=0.2, habillage = as.factor(hab),pointsize = 3.35,axes=c(x,y))+scale_shape_manual(values = c(rep(19,length(unique(hab)))))+theme_fviz}
else if(input$pcslide!=0 & input$ellipse==F){fviz_pca_biplot(res.pca,repel=T, label=c("var","ind"),habillage = as.factor(hab),pointsize = 3.35,axes=c(x,y),select.var = list(contrib = as.numeric(input$pcslide)))+scale_shape_manual(values = c(rep(19,length(unique(hab)))))+theme_fviz}
else{fviz_pca_biplot(res.pca,repel=T, label=c("var","ind"),addEllipses=T,ellipse.type="confidence",ellipse.alpha=0.1,habillage = as.factor(hab),pointsize = 3.35,axes=c(x,y),select.var = list(contrib = as.numeric(input$pcslide)))+scale_shape_manual(values = c(rep(19,length(unique(hab)))))+theme_fviz}
})
#plotting function for pca plot
output$biplot = renderPlot({
plotbiplot()
})
#Button for dwnloading PCA plot
output$dwldbiplot = renderUI({
downloadButton('downloadbiplot', 'Download Biplot')
})
#Download function for pca plot
output$downloadbiplot <- downloadHandler(
filename = function() {
paste0("biplot.pdf")
},
content = function(file){
pdf(file,width=14,height = 9,useDingbats=FALSE)
plot(plotbiplot())
dev.off()
})
########### VARIANCES OF PCA PLOT #################
#Text explaining PCA variances
output$pcatitle <- renderText({
text="The proportion of variances retained by the principal components can be viewed in the scree plot. The scree plot is a graph of the eigenvalues/variances associated with components"
return(text)
})
#PLot scree plot of all PC's
output$pcaplot_ip = renderPlot({
res.pca = res_pca()
fviz_screeplot(res.pca, ncp=10)
})
#get expression data and perform PCA
res_pca = reactive({
n=as.numeric(input$pcipslide)
validate(
need(as.numeric(input$pcipslide) > 199, "Minimum value of input genes that show maximum variance should at least be 200")
)
results=fileload()
v = results$eset
keepGenes <- v@featureData@data
pData<-phenoData(v)
v.filter = v[rownames(v@assayData$exprs) %in% rownames(keepGenes),]
Pvars <- apply(v.filter@assayData$exprs,1,var)
select <- order(Pvars, decreasing = TRUE)[seq_len(min(n,length(Pvars)))]
v.var <-v.filter[select,]
m<-v.var@assayData$exprs
rownames(m) <- v.var@featureData@data$SYMBOL
m=as.data.frame(m)
m=unique(m)
res.pca = PCA(t(m), graph = FALSE)
})
#Extract PCA information like eigan values, variance of each PC
pcaplo_tab = reactive({
res.pca =res_pca()
eigenvalues = res.pca$eig
return(eigenvalues)
})
#Display above PC information in a table
output$pcaplot_tab = DT::renderDataTable({
DT::datatable(pcaplo_tab(),
extensions = c('Scroller'),
options = list(
searchHighlight = TRUE,
scrollX = TRUE
))
})
##################3D PCA PLOT #####################
#PLot 3D PCA plot
output$pcaplot3d = renderRglwidget({
graphics.off()
pdf(NULL)
v=datasetInput3()
results=fileload()
pData=pData(results$eset)
v=t(v)
v= v[,apply(v, 2, var, na.rm=TRUE) != 0]
pca <- res_pca()
vars <- apply(pca$var$coord, 2, var)
props <- round((vars / sum(vars))*100,1)
groups=factor(gsub('-','_',pData$maineffect))
try(rgl.close())
open3d()
# resize window
par3d(windowRect = c(100, 100, 612, 612))
palette(c('blue','red','green','orange','cyan','black','brown','pink'))
plot3d(pca$ind$coord[,1:3], col =as.numeric(groups), type='s',alpha=1.75,axes=F,
xlab=paste('PC1 (',props[1],'%)',sep=''),
ylab=paste('PC2 (',props[2],'%)',sep=''),
zlab=paste('PC3 (',props[3],'%)',sep='')
)
axes3d(edges=c("x--", "y--", "z"), lwd=2, expand=10, labels=FALSE,box=T)
grid3d("x")
grid3d("y")
grid3d("z")
l=length(levels(groups))
ll=1:l
y=1+(ll*15)
legend3d("topright", legend = levels(groups), pch = 16, col=palette(),cex=1, inset=c(0.02))
rglwidget()
})
######## GET PROJECT DESC AND DISPLAY ###########
#Read parameter file and get project description for the project selected
prjdesc = reactive({
file = readexcel()
prj=input$projects
desc=file$desc[file$projects %in% prj]
desc=as.character(desc)
})
#Display text in main project description panel
output$pdesc <- renderText({
desc=prjdesc()
})
###################################### DOT PLOT ###################################
#Drop down menu for dot-plot x-axis grouping
output$boxplotcol = renderUI({
results=fileload()
pData=pData(results$eset) %>% select(maineffect, sample_name,starts_with("var_")) %>%
select(where(~!all(is.na(.))))
bpcols=as.list(colnames(pData))
selectInput("color","Select an Attribute for the X-axis",bpcols) #populate drop down menu with the phenodata columns
})
#Drop down menu for dot-plot color
output$boxplotcol2 = renderUI({
results=fileload()
pData=pData(results$eset) %>% select(maineffect, sample_name,starts_with("var_")) %>%
select(where(~!all(is.na(.))))
bpcols=as.list(colnames(pData))
selectInput("color2","Color By",bpcols) #populate drop down menu with the phenodata columns
})
#Checkbox for whether or not to display the minimum expression line
output$minexprline = renderUI({
tagList(
checkboxInput("minexprline", label = "Show expression threshold line", value = FALSE),
bsTooltip("minexprline","Please note that not all projects have this option currently", placement = "bottom", trigger = "hover",options = NULL)
)
})
#Extract expression data to create dot-plot
dotplot_out = reactive({
s = input$table_rows_selected #select rows from table
dt = datasetInput() #load limma data
dt$id=rownames(dt)
dt=data.frame(dt$id,dt[,-ncol(dt)])
validate(
need((is.data.frame(dt) && nrow(dt))!=0, "No data in table")
)
dt1 = dt[s, , drop=FALSE]#get limma data corresponding to selected row in table
id = as.character(dt[s,1])
results=fileload()
eset <- results$eset
pData=pData(eset) #get pheno-data
if(is.factor(pData$sample_name)==T){lev=levels(pData$sample_name)}
minexpr=pData$minexpr[1]
signal=as.data.frame(eset@assayData$exprs[id,])
colnames(signal)="signal"
signal$id=rownames(signal)
e=left_join(pData,signal,by=c('sample_name'='id'))
if(is.factor(pData$sample_name)==T){e$sample_name= factor(e$sample_name, levels = levels(pData$sample_name))}
if(is.na(dt1$SYMBOL)) #if gene symbol does not exist,use ENSEMBL id
{genesymbol=dt1$ENSEMBL}
else{
genesymbol=dt1$SYMBOL} #get the gene symbol of the row selected
if(input$minexprline==T){
gg=ggplot(e,aes_string(x=input$color,y="signal",col=input$color2))+plotTheme+guides(color=guide_legend(title=as.character(input$color2)))+
labs(title=genesymbol, x="Condition", y="Expression Value") + geom_point(size=5,position=position_jitter(w = 0.1))+ geom_smooth(method=lm,se=FALSE) +
stat_summary(fun.y = "mean", fun.ymin = "mean", fun.ymax= "mean", size= 0.3, geom = "crossbar",width=.2) + geom_hline(yintercept=minexpr, linetype="dashed", color = "red")}
else{
gg=ggplot(e,aes_string(x=input$color,y="signal",col=input$color2))+plotTheme+guides(color=guide_legend(title=as.character(input$color2)))+
labs(title=genesymbol, x="Condition", y="Expression Value") + geom_point(size=5,position=position_jitter(w = 0.1))+ geom_smooth(method=lm,se=FALSE) +
stat_summary(fun.y = "mean", fun.ymin = "mean", fun.ymax= "mean", size= 0.3, geom = "crossbar",width=.2)
}
gg
})
# plot dotplot
output$dotplot = renderPlot({
dotplot_out()
})
#function to download dot plot
output$downloaddotplot <- downloadHandler(
filename = function() {
paste0(input$projects, '_dotplot.jpg', sep='')
},
content = function(file){
jpeg(file, quality = 100, width = 800, height = 800)
plot(dotplot_out())
dev.off()
})
###########LOAD LIMMA FILE AND DISPLAY#############
#Read limma data from eset
datasetInput0.5 = reactive({
contrast=input$contrast
results=fileload()
k=paste('results$limma$',contrast,sep='')
limmadata=eval(parse(text = k))
})
#Update limma results based on gene selection (upregulated, downregulated, both or none)
datasetInput = reactive({
contrast=input$contrast #select contrast
limmadata=datasetInput0.5() %>% dplyr::select(-logFC)
lfc=as.numeric(input$lfc) #get logFC
apval=as.numeric(input$apval)#get adjusted P.Vals
if(is.null(input$radio))
{
d = limmadata
}
else if(input$radio=='none')
{
d=limmadata
}
else if(input$radio=='down')
{
d=limmadata
d = d[which(d$fc < (-1*(lfc)) & d$adj.P.Val < apval),]
}
else if(input$radio=='up')
{
d=limmadata
d = d[which(d$fc>lfc & d$adj.P.Val < apval),]
}
else if(input$radio=='both')
{
d=limmadata
d = d[which(abs(d$fc) > lfc & d$adj.P.Val < apval),]
}
geneid=d$SYMBOL
url= paste("http://www.genecards.org/cgi-bin/carddisp.pl?gene=",geneid,sep = "")
if(url=="http://www.genecards.org/cgi-bin/carddisp.pl?gene="){
d$link<-NULL
}else{
d$link=paste0("<a href='",url,"'target='_blank'>","Link to GeneCard","</a>")}
d=as.data.frame(d)
return(d)
})
#print limma results in data table
output$table = DT::renderDataTable({
input$lfc
input$apval
input$project
input$contrast
DT::datatable(datasetInput(),
extensions = 'Buttons', options = list(
dom = 'Bfrtip',
buttons = list()),
rownames=FALSE,selection = list(mode = 'single', selected =1),escape=FALSE)
})
#Display text (contrast name) above limma table
output$contrdesc <- renderText({
contrastname=input$contrast
text=paste('CONTRAST: ',contrastname,sep=" ")
return(text)
})
#download limma results data as excel sheet
output$dwld <- downloadHandler(
filename = function() { paste(input$projects, '.csv', sep='') },
content = function(file) {
write.csv(datasetInput(), file)
})
############# DISPLAY VOLCANO PLOT ###############
#Get limma data
datasetInputvol = reactive({
limmadata=datasetInput()
return(limmadata)
})
#Drop down to choose what genes to display on volcano plot
output$volcdrop <- renderUI({
selectInput("volcdrop", "Select input type",c('Significant genes' = "signi",'GO genes' = "go"))
})
#Slider to choose number of genes to display on volcano plot
output$volcslider <- renderUI({
conditionalPanel(
condition = "input.volcdrop == 'signi'",
fluidRow(
column(6,sliderInput("volcslider", label = h4("Select top number of genes"), min = 0,max = 25, value = 5))
))
})
#Function to assign values to volcano plot points
vpt = reactive({
diff_df=datasetInput0.5()
FDR=input$apval
lfc=input$lfc
if(input$volcdrop=="signi"){
diff_df$group <- "NotSignificant"
# change the grouping for the entries with significance but not a large enough Fold change
diff_df[which(diff_df['adj.P.Val'] < FDR & abs(diff_df['logFC']) < lfc ),"group"] <- "Filtered by FDR"
# change the grouping for the entries a large enough Fold change but not a low enough p value
diff_df[which(diff_df['adj.P.Val'] > FDR & abs(diff_df['logFC']) > lfc ),"group"] <- "Filtered by FC"
# change the grouping for the entries with both significance and large enough fold change
diff_df[which(diff_df['adj.P.Val'] < FDR & abs(diff_df['logFC']) > lfc ),"group"] <- "Significant (Filtered by both FDR and FC)"
}
else if(input$volcdrop=="go"){
top_peaks2=GOHeatup()
diff_df$group <- "All genes"
diff_df[which(diff_df$SYMBOL %in% top_peaks2$SYMBOL ),"group"] <- "Selected_genes"
}
return(diff_df)
})
#Function to draw the volcano plot
volcanoplot_out = reactive({
diff_df=vpt()
if(input$volcdrop=="signi"){
# Find and label the top peaks..
n=input$volcslider
if(n>0){
top_peaks <- diff_df[with(diff_df, order(adj.P.Val,logFC)),][1:n,]
top_peaks <- rbind(top_peaks, diff_df[with(diff_df, order(adj.P.Val,-logFC)),][1:n,])
a <- list()
for (i in seq_len(nrow(top_peaks))) {
m <- top_peaks[i, ]
a[[i]] <- list(x = m[["logFC"]],y = -log10(m[["adj.P.Val"]]),text = m[["SYMBOL"]],xref = "x",yref = "y",showarrow = FALSE,arrowhead = 0.5,ax = 20,ay = -40)
}
p <- plot_ly(data = diff_df, x = diff_df$logFC, y = -log10(diff_df$adj.P.Val),text = diff_df$SYMBOL, mode = "markers", color = diff_df$group) %>% layout(title ="Volcano Plot",xaxis=list(title="Log Fold Change"),yaxis=list(title="-log10(FDR)")) %>%
layout(annotations = a)
}
else{
p <- plot_ly(data = diff_df, x = diff_df$logFC, y = -log10(diff_df$adj.P.Val),text = diff_df$SYMBOL, mode = "markers", color = diff_df$group) %>% layout(title ="Volcano Plot",xaxis=list(title="Log Fold Change"),yaxis=list(title="-log10(FDR)"))
}
}
else if(input$volcdrop=="go"){
# Find and label the top peaks..
top_peaks <- diff_df[diff_df$SYMBOL %in% top_peaks2$SYMBOL,]
a <- list()
for (i in seq_len(nrow(top_peaks))) {
m <- top_peaks[i, ]
a[[i]] <- list(x = m[["logFC"]],y = -log10(m[["adj.P.Val"]]),text = m[["SYMBOL"]],xref = "x",yref = "y",showarrow = FALSE,arrowhead = 0.5,ax = 20,ay = -40)
}
p <- plot_ly(data = diff_df, x = diff_df$logFC, y = -log10(diff_df$adj.P.Val),text = diff_df$SYMBOL, mode = "markers", color = diff_df$group) %>% layout(title ="Volcano Plot",xaxis=list(title="Log Fold Change"),yaxis=list(title="-log10(FDR)"))
}
p
})
#Make non-interactive plot for volcano plot download
volcanoplot_dout = reactive({
diff_df=vpt()
if(input$volcdrop=="signi"){
n=input$volcslider
if(n>0){
top_peaks <- diff_df[with(diff_df, order(adj.P.Val,logFC)),][1:n,]
top_peaks <- rbind(top_peaks, diff_df[with(diff_df, order(adj.P.Val,-logFC)),][1:n,])
p <- ggplot(data = diff_df, aes(x = diff_df$logFC, y = -log10(diff_df$adj.P.Val))) + geom_point_rast(aes(color=diff_df$group)) +ggtitle("Volcano Plot") + xlab("Log Fold Change") + ylab("-log10(FDR)") +labs(color="")+ geom_label_repel(data=top_peaks,aes(x = top_peaks$logFC, y = -log10(top_peaks$adj.P.Val),label=top_peaks$SYMBOL)) + theme_bw()
}
else{
p <- ggplot(data = diff_df, aes(x = diff_df$logFC, y = -log10(diff_df$adj.P.Val))) + geom_point_rast(aes(color=diff_df$group)) +ggtitle("Volcano Plot") + xlab("Log Fold Change") + ylab("-log10(FDR)") +labs(color="") + theme_bw()
}
}
else if(input$volcdrop=="go"){
top_peaks <- diff_df[diff_df$SYMBOL %in% top_peaks2$SYMBOL,]
p <- ggplot(data = diff_df, aes(x = diff_df$logFC, y = -log10(diff_df$adj.P.Val))) + geom_point_rast(aes(color=diff_df$group)) +ggtitle("Volcano Plot") + xlab("Log Fold Change") + ylab("-log10(FDR)") +labs(color="") + theme_bw()
}
p
})
#Render and display interactive volcano plot
output$volcanoplot = renderPlotly({
input$radio
input$lfc
input$apval
input$volcslider
input$volcdrop
volcanoplot_out()
})
#Display limma results
output$table_volc = DT::renderDataTable({
DT::datatable(datasetInput(),
extensions = c('Buttons','Scroller'),
options = list(dom = 'Bfrtip',
searchHighlight = TRUE,
pageLength = 10,
lengthMenu = list(c(30, 50, 100, 150, 200, -1), c('30', '50', '100', '150', '200', 'All')),
scrollX = TRUE,
buttons = c('copy', 'print')
),rownames=TRUE,selection = list(mode = 'single', selected =1),escape=FALSE)
})
#Download non-interactive volcano plot
output$dwldvolcanoplot <- downloadHandler(
filename = function() {
paste0("volcano.pdf")
},
content = function(file){
pdf(file,width=14,height = 9,useDingbats=FALSE)
plot(volcanoplot_dout())
dev.off()
})
#######CONDITIONAL PANEL FOR Limma ################
#Create checkboxes with contrasts corresponding to the project (displayed only when multiple contrast checkbox is selected)
output$contrastslimma <- renderUI({
results=fileload()
lim=results$limma
contrasts=as.list(as.character(unlist(lapply((names(lim)),factor))))
checkboxGroupInput("multicontrast",label="Pick Contrasts",choices=contrasts)
})
#create table with p.value and FC value for the contrasts selected
multilimma = reactive({
validate(
need(input$multicontrast, "Please Select at least one comparison ")
)
contr=input$multicontrast
results=fileload()
full_limma = data.frame(id=as.character())
for(i in 1:length(contr)){
k=paste('results$limma$',contr[i],sep='')
limmadata=eval(parse(text = k))
limmadata2=data.frame(id=rownames(limmadata),logFC=limmadata$logFC,adj.P.Val=limmadata$adj.P.Val)
colnames(limmadata2)[-1]=paste(colnames(limmadata2[,c(-1)]),contr[i], sep = "_")
full_limma=full_join(full_limma,limmadata2,by='id')
}
k=data.frame(id=rownames(limmadata),SYMBOL=limmadata$SYMBOL)
m=full_join(k,full_limma,by='id')
return(m)
})
#update table with the dataframe
output$table_TRUE = DT::renderDataTable({
input$project
input$contrast
DT::datatable(multilimma(),
extensions = c('Buttons','Scroller'),
options = list(dom = 'Bfrtip',
searchHighlight = TRUE,
pageLength = 10,
lengthMenu = list(c(30, 50, 100, 150, 200, -1), c('30', '50', '100', '150', '200', 'All')),
scrollX = TRUE,
buttons = list('copy')
),rownames=TRUE,selection = list(mode = 'single', selected =1),escape=FALSE)
})
#action button to download the table
output$dwldmultitab = renderUI({
downloadButton('multidwld','Download Table')
})
#fucntion to download multi-contrast limma table
output$multidwld <- downloadHandler(
filename = function() { paste(input$projects, '_multiple_contrasts.csv', sep='') },
content = function(file) {
write.csv(multilimma(), file,row.names=FALSE)
})
############################## DISPLAY RAW EXPRESSION (VOOM) DATA #########################################
#load voom data from eset
datasetInput3 = reactive({
results=fileload()
exprsdata=results$eset@assayData$exprs
})
#annotate voom data using featuresdata
datasetInput33 = reactive({
results=fileload()
exprsdata=as.data.frame(results$eset@assayData$exprs)
features=as.data.frame(pData(featureData(results$eset)))
features$id=rownames(features)
exprsdata$id=rownames(exprsdata)
genes <- inner_join(features,exprsdata,by=c('id'='id'))
return(genes)
})
#print voom or expression data file
output$table3 = DT::renderDataTable({
DT::datatable(datasetInput33(),
extensions = c('Buttons','Scroller'),
options = list(dom = 'Bfrtip',
searchHighlight = TRUE,
pageLength = 10,
lengthMenu = list(c(30, 50, 100, 150, 200, -1), c('30', '50', '100', '150', '200', 'All')),
scrollX = TRUE,
buttons = c('copy', 'print')
),rownames=FALSE,caption= "Voom data")
})
#action button to download the raw expression matrix
output$dwldrawtab = renderUI({
downloadButton('rawdwld','Download Raw Data')
})
#fucntion to download voom expression data table
output$rawdwld <- downloadHandler(
filename = function() { paste(input$projects, '_rawdata.csv', sep='') },
content = function(file) {
write.csv(datasetInput33(), file,row.names=FALSE)
})
######################################## DISPLAY PHENO DATA ###############################
#load pheno from eset
phenofile = reactive({
results=fileload()
pd=pData(results$eset)
if("minexpr" %in% colnames(pData)){
pd=pd %>% dplyr::select(-minexpr)
}
else{pd=pd}
})
#print pheno data file
output$phenofile = DT::renderDataTable({
DT::datatable(phenofile(),
extensions = c('Buttons','Scroller'),
options = list(dom = 'Bfrtip',
searchHighlight = TRUE,
pageLength = 10,
lengthMenu = list(c(30, 50, 100, 150, 200, -1), c('30', '50', '100', '150', '200', 'All')),
scrollX = TRUE,
buttons = c('copy', 'print')
),rownames=FALSE,caption= "Sample data")
})
################################## CAMERA OUTPUT DISPLAY ###############################
#populate camera dropdown menu in the sidebar with the genesets based on the project RData
output$cameradd = renderUI({
results=fileload()
contrast=input$contrast
cam=paste("results$camera$",contrast,sep="")
cam=eval(parse(text=cam))
cameradd=as.list(names(cam))
selectInput("cameradd","Select a Gene Set",cameradd)
})
#Get camera data from Rdata file for the chosen contrast
geneid = reactive({
results=fileload()
cameradd=input$cameradd
contrast=input$contrast #get user input for contrast/comparison
c=paste('results$camera$',contrast,'$',cameradd,'$camera_result',sep='') #get camera data corresponding to the contrast chosen
cam=eval(parse(text = c)) #convert string to variable
cam=data.frame(name=rownames(cam),cam)
name=cam$name
if (cameradd == "GO")
{
url= paste("http://amigo.geneontology.org/amigo/term/",name,sep = "") #create link to Gene Ontology Consortium
cam$link=paste0("<a href='",url,"'target='_blank'>","Link to Gene Ontology Consortium","</a>")
cam=as.data.frame(cam)
}else{
url= paste("http://software.broadinstitute.org/gsea/msigdb/cards/",name,".html",sep = "")
cam$link=paste0("<a href='",url,"'target='_blank'>","Link to Molecular Dignature Database","</a>")
cam=as.data.frame(cam)}
return(cam) # return datatable with camera results
})
# print out camera results in a table
output$tablecam = DT::renderDataTable({
input$camera
input$cameradd
input$contrast
isolate({
DT::datatable(geneid(),
extensions = c('Buttons','Scroller'),
options = list(dom = 'Bfrtip',
searchHighlight = TRUE,
pageLength = 10,
lengthMenu = list(c(30, 50, 100, 150, 200, -1), c('30', '50', '100', '150', '200', 'All')),
scrollX = TRUE,
buttons = c('copy', 'print')
),rownames= FALSE,selection = list(mode = 'single', selected =1),escape=FALSE,caption = "Camera Results")
})
})
#Generate text title for the gene list table
output$camdesc <- renderText({
s = input$tablecam_rows_selected
dt = geneid()
dt = as.character(dt[s, , drop=FALSE])
camname=dt[1]
text=paste('Gene list for Camera term :',camname,sep="")
return(text)
})
#get the gene-list for every row in camera results table
campick2 = reactive({
results=fileload()
cameradd=input$cameradd
contrast=input$contrast #get user input for contrast/comparison
c=paste('results$camera$',contrast,'$',cameradd,'$indices',sep='') #get camera indices corresponding to the contrast chosen
cameraind=eval(parse(text = c))
cam=geneid() #get datatable with camera data from reactive
s=input$tablecam_rows_selected # get index of selected row from table
cam=cam[s, ,drop=FALSE]
res=datasetInput0.5()
res2=datasetInput33()
if("ENTREZID" %in% colnames(res2)){
res2=res2
}
else{res2=res}
#get gene list from indices
if (cameradd == "GO")
{
k=paste('res2$ENTREZID[cameraind$`',cam$name,'`]',sep='')}
else{
k=paste('res2$ENTREZID[cameraind$',cam$name,']',sep='')
}
genes=eval(parse(text = k)) #get entrez id's corresponding to indices
genesid=res[res$ENTREZID %in% genes,] #get limma data corresponding to entrez id's
return(data.frame(genesid)) #return the genelist
})
#print data table with gene list corresponding to each row in camera datatable
output$campick3 = DT::renderDataTable({
input$cameradd
input$contrast
input$projects
DT::datatable(campick2(),
extensions = c('Buttons','Scroller'),
options = list(dom = 'Bfrtip',
searchHighlight = TRUE,
pageLength = 10,
lengthMenu = list(c(30, 50, 100, 150, 200, -1), c('30', '50', '100', '150', '200', 'All')),
scrollX = TRUE,
buttons = c('copy', 'print')
),rownames=FALSE,escape=FALSE,caption="GENE LIST")
})
#download camera datatable
output$downloadcam <- downloadHandler(
filename = function() { paste('Camera_',input$projects,'_',input$contrast,'.csv', sep='') },
content = function(file) {
write.csv(geneid(), file)
})
###### CREATE ENRICHMENT PLOT FROM CAMERA #########
#Run fgsea on data
fgseares = reactive({
limma_all=datasetInput0.5()
genelist=limma_all$fc
names(genelist)=limma_all$ENTREZID
results=fileload()
org= as.character(unique(pData(results$eset)$organism))
cameradd=input$cameradd
geneset=findgeneset(org,cameradd)
new_res= creategseaobj(geneList = genelist, geneSets = geneset)
return(new_res)
})
#Get fgsea results
fgseares2 = reactive({
new_res= fgseares()
res=new_res@result
})
#Create enrichment plot for the camera term
eplotcamera = reactive({
s = input$camres_rows_selected
dt = geneid()
dt = dt[s, , drop=FALSE]
cat= rownames(dt)
new_res=fgseares()
gseaplot2(new_res, geneSetID = cat, title = cat)
})
#Render enrichment plot
output$eplotcamera = renderPlot({
eplotcamera()
})
# print out camera results in a table
output$camres = DT::renderDataTable({
input$camera
input$cameradd
input$contrast
isolate({
DT::datatable(geneid(),
extensions = c('Buttons','Scroller'),
options = list(dom = 'Bfrtip',
searchHighlight = TRUE,
pageLength = 10,
lengthMenu = list(c(30, 50, 100, 150, 200, -1), c('30', '50', '100', '150', '200', 'All')),
scrollX = TRUE,
buttons = c('copy', 'print')
),rownames= FALSE,selection = list(mode = 'single', selected =1),escape=FALSE,caption = "Camera Results")
})
})
######### CREATE HEATMAP FROM CAMERA ##############
#extract voom expression data of all genes corresponding to selected row in camera datatable
heatmapcam <- reactive({
genesid=campick2() #gene list from camera
voom=as.data.frame(datasetInput3())#voom data
genes_cam<-voom[rownames(voom) %in% rownames(genesid),]
})
#Set limit for number of genes that can be viewed in the heatmap
output$hmplimcam <- renderUI({
pval=campick2()
top_expr=datasetInput3()
top_expr=top_expr[rownames(top_expr) %in% rownames(pval),]
mx=nrow(top_expr)
sliderInput("hmplimcam", label = h5("Select number of genes to view in the heatmap"), min = 2,max =mx, value = mx)
})
#Create scale for heatmap
output$hmpscale_out2 = renderPlot({
hmpscaletest(hmpcol=input$hmpcol2,voom=datasetInput3(),checkbox=input$checkbox2)
})
#create heatmap for heatmap
camheatmap = reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
top_expr=heatmapfun(results=fileload(),expr=heatmapcam(),pval=campick2(),file = readexcel(),prj=input$projects,hmplim=input$hmplimcam,hmpsamp=input$hmpsamp2,
contrast=input$contrast)
sym=rownames(top_expr)
#Remove rows that have variance 0 (This will avoid the Na/Nan/Inf error in heatmap)
ind = apply(top_expr, 1, var) == 0
top_expr <- top_expr[!ind,]
if(input$checkbox2==TRUE){
d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterby2,xaxis_font_size = 10,colors = colorRampPalette(brewer.pal(n = 9, input$hmpcol2))(30),labRow = sym)}
else{d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterby2,xaxis_font_size = 10,colors = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol2)))(30),labRow = sym)}
})
# Render heatmap for camera genes
output$camheatmap <- renderD3heatmap({
input$hmpcol #user input-color palette
input$clusterby #user input-cluster by
input$checkbox #user input-reverse colors
input$gene #user input-slider input for number of genes
input$genelist
input$makeheat
input$gage
input$go_dd
input$table4_rows_selected
input$tablecam_rows_selected
input$projects
input$contrast
input$cameradd
input$hmpsamp2
input$hmplimcam
camheatmap()
})
#Create non-interactive heatmap for download
camheatmapalt = reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
top_expr=heatmapfun(results=fileload(),expr=heatmapcam(),pval=campick2(),file = readexcel(),prj=input$projects,hmplim=input$hmplimcam,hmpsamp=input$hmpsamp2,
contrast=input$contrast)
sym=rownames(top_expr)
#Remove rows that have variance 0 (This will avoid the Na/Nan/Inf error in heatmap)
ind = apply(top_expr, 1, var) == 0
top_expr <- top_expr[!ind,]
if(input$checkbox2==TRUE){
aheatmap(as.matrix(top_expr),distfun=dist2,scale="row",Rowv=TRUE,Colv=TRUE,fontsize = 10,color = colorRampPalette(brewer.pal(n = 9, input$hmpcol2))(30),labRow = sym)}
else{aheatmap(as.matrix(top_expr),distfun=dist2,scale="row",Rowv=TRUE,Colv=TRUE,fontsize = 10,color = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol2)))(30),labRow = sym)}
})
#Download camera heatmap
output$downloadcamheatmap <- downloadHandler(
filename = function(){
paste0('camera_heatmap','.pdf',sep='')
},
content = function(file){
pdf(file,width=9,height = 14,useDingbats=FALSE, onefile = F)
camheatmapalt()
dev.off()
})
####################################### SPIA PATHWAY ANALYSIS##########################
#For the chosen contrast, get SPIA results from the RData
spia_op <- reactive({
results=fileload()
contrast=input$contrast #get user input for contrast/comparison
c=paste('results$spia$',contrast,sep='') #get SPIA data corresponding to the contrast chosen
sp=eval(parse(text = c)) #convert string to variable
spia_result=data.frame(sp)
validate(
need(nrow(spia_result) > 1, "No Results")
)
spia_result$KEGGLINK <- paste0("<a href='",spia_result$KEGGLINK,"' target='_blank'>","Link to KEGG","</a>")
return(spia_result)
})
#Display SPIA results in a table
output$spiaop <- DT::renderDataTable({
input$runspia
input$contrast
input$projects
isolate({
DT::datatable(spia_op(),escape = FALSE,selection = list(mode = 'single', selected =1),
extensions = c('Buttons','Scroller'),
options = list(
dom = 'RMDCT<"clear">lfrtip',
searchHighlight = TRUE,
pageLength = 10,
lengthMenu = list(c(5, 10, 15, 20, 25, -1), c('5', '10', '15', '20', '25', 'All')),
scrollX = TRUE,
buttons = c('copy', 'print')
),rownames=FALSE)
})
})
#Display the SPIA term selected from table above the genelist
output$spiadesc <- renderText({
s = input$spiaop_rows_selected
dt = spia_op()
dt = dt[s, , drop=FALSE]
camname=dt$Name
text=paste('Gene list for SPIA term :',camname,'-',dt[2],sep="")
return(text)
})
#Get genelist for SPIA term selected from the table of SPIA results
spiagenes = reactive({
spiaid=spia_op()
final_res=datasetInput()
s=input$spiaop_rows_selected
row=spiaid[s, ,drop=FALSE]
results=fileload()
pd=pData(results$eset)
org=unique(pd$organism)
if(org %in% c("Mus musculus", "Mouse", "Mm","Mus_musculus", "mouse")){
id=paste("mmu",row$ID,sep="")
allgenelist=keggLink("mmu",id) #for each kegg id, get gene list
}else{
id=paste("hsa",row$ID,sep="")
allgenelist=keggLink("hsa",id) #for each kegg id, get gene list
}
p=strsplit(allgenelist,":")
genes_entrez=sapply(p,"[",2)
genelist=final_res[final_res$ENTREZID %in% genes_entrez,]
return(genelist) #return the genelist
})
#Render table to display the genelist per SPAI term
output$spiagenes = DT::renderDataTable({
DT::datatable(spiagenes(),
extensions = c('Buttons','Scroller'),
options = list(dom = 'Bfrtip',
searchHighlight = TRUE,
pageLength = 10,
lengthMenu = list(c(30, 50, 100, 150, 200, -1), c('30', '50', '100', '150', '200', 'All')),
scrollX = TRUE,
buttons = c('copy', 'print')
),rownames=FALSE,escape=FALSE,selection = list(mode = 'single', selected =1,caption="Genelist"))
})
#Download function to download SPIA results as a csv file
output$dwldspia <- downloadHandler(
filename = function() { paste(input$projects,'_',input$contrast, '_spia.csv', sep='') },
content = function(file) {
write.csv(spia_op(), file)
})
######### CREATE HEATMAP FROM SPIA ####################
#extract voom expression data of all genes corresponding to selected row in spia datatable
heatmapspia <- reactive({
genesid=spiagenes() #gene list from camera
voom=as.data.frame(datasetInput3())#voom data
genes_spia<-voom[rownames(voom) %in% rownames(genesid),]
})
#get max and min genes per SPIA term to show on slider
output$hmplimspia <- renderUI({
pval=spiagenes()
top_expr=datasetInput3()
top_expr=top_expr[rownames(top_expr) %in% rownames(pval),]
mx=nrow(top_expr)
sliderInput("hmplimspia", label = h5("Select number of genes to view in the heatmap"), min = 2,max =mx, value = mx)
})
#Generate a heatmap color scale
output$hmpscale_out2spia = renderPlot({
hmpscaletest(hmpcol=input$hmpcolspia,voom=datasetInput3(),checkbox=input$checkboxspia)
})
#Function to generate d3 camera heatmap
camheatmap = reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
top_expr=heatmapfun(results=fileload(),expr=heatmapcam(),pval=campick2(),file = readexcel(),prj=input$projects,hmplim=input$hmplimcam,hmpsamp=input$hmpsamp2,
contrast=input$contrast)
sym=rownames(top_expr)
#Remove rows that have variance 0 (This will avoid the Na/Nan/Inf error in heatmap)
ind = apply(top_expr, 1, var) == 0
top_expr <- top_expr[!ind,]
if(input$checkbox2==TRUE){
d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterby2,xaxis_font_size = 10,colors = colorRampPalette(brewer.pal(n = 9, input$hmpcol2))(30),labRow = sym)}
else{d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterby2,xaxis_font_size = 10,colors = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol2)))(30),labRow = sym)}
})
# Render SPIA heatmap
output$spiaheatmap <- renderD3heatmap({
input$hmpcolspia #user input-color palette
input$clusterbyspia #user input-cluster by
input$checkboxspia #user input-reverse colors
input$gene #user input-slider input for number of genes
input$genelist
input$spiaop_rows_selected
input$projects
input$contrast
input$hmpsamp2spia
input$hmplimspia
spiaheatmap()
})
#create SPIA heatmap function
spiaheatmap <- reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
top_expr=heatmapfun(results=fileload(),expr=heatmapspia(),pval=spiagenes(),file = readexcel(),prj=input$projects,hmplim=input$hmplimspia,hmpsamp=input$hmpsamp2spia,
contrast=input$contrast)
sym=rownames(top_expr)
#Remove rows that have variance 0 (This will avoid the Na/Nan/Inf error in heatmap)
ind = apply(top_expr, 1, var) == 0
top_expr <- top_expr[!ind,]
if(input$checkboxspia==TRUE){
d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterbyspia,xaxis_font_size = 10,colors = colorRampPalette(brewer.pal(n = 9, input$hmpcolspia))(30),labRow = sym)}
else{d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterbyspia,xaxis_font_size = 10,colors = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcolspia)))(30),labRow = sym)}
})
#Create non-interactive SPIA heatmap function for download
spiaheatmapalt <- reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
top_expr=heatmapfun(results=fileload(),expr=heatmapspia(),pval=spiagenes(),file = readexcel(),prj=input$projects,hmplim=input$hmplimspia,hmpsamp=input$hmpsamp2spia,
contrast=input$contrast)
sym=rownames(top_expr)
#Remove rows that have variance 0 (This will avoid the Na/Nan/Inf error in heatmap)
ind = apply(top_expr, 1, var) == 0
top_expr <- top_expr[!ind,]
if(input$checkboxspia==TRUE){
aheatmap(as.matrix(top_expr),distfun=dist2,scale="row",Rowv=TRUE,Colv=TRUE,fontsize = 10,color = colorRampPalette(brewer.pal(n = 9, input$hmpcolspia))(30),labRow = sym)}
else{aheatmap(as.matrix(top_expr),distfun=dist2,scale="row",Rowv=TRUE,Colv=TRUE,fontsize = 10,color = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcolspia)))(30),labRow = sym)}
})
#Download SPIA heatmap
output$downloadspiaheatmap <- downloadHandler(
filename = function(){
paste0('SPIA_heatmap','.pdf',sep='')
},
content = function(file){
pdf(file,width=9,height = 14,useDingbats=FALSE, onefile = F)
spiaheatmapalt()
dev.off()
})
##################################### REACTOME PA ANALYSIS########################
#Get list of enriched pathways
enrichpath = reactive({
results=fileload()
pd=pData(results$eset)
org=unique(pd$organism)
if(org %in% c("Mus musculus", "Mouse", "Mm","Mus_musculus","mouse")){
org="mouse"
}else{
org="human"
}
deg= datasetInput0.5()
deg=deg[abs(deg$fc) >2,]
res <- enrichPathway(gene=deg$ENTREZID,pvalueCutoff=0.05, readable=T,organism=org)
})
#create different table to display
enrichpath2 = reactive({
withProgress(session = session, message = 'Generating...',detail = 'Please Wait...',{
res= enrichpath()
res=as.data.frame(res)
validate(
need(nrow(res) > 0, "No results")
)
res = res %>% dplyr::select(-geneID)
})
})
#get list of enriched pathways and display in table
output$enrichpath = DT::renderDataTable({
input$project
input$contrast
DT::datatable(enrichpath2(),
extensions = 'Buttons', options = list(
dom = 'Bfrtip',
buttons = list()),
rownames=FALSE,selection = list(mode = 'single', selected =1),escape=FALSE)
})
#Display list of genes in each enrichment pathway
enrichgenes = reactive({
res=enrichpath()
validate(
need(nrow(as.data.frame(res))>0,"No Enriched Pathways")
)
res=as.data.frame(res)
s = input$enrichpath_rows_selected
genes = res[s, , drop=FALSE]
genes = genes$geneID
genes=gsub("/",", ",genes)
return(genes)
})
#print genelist
output$enrichgenes = renderPrint({
enrichgenes()
})
#Create plot for visualizing enrichment results
enrichplot = reactive({
res= enrichpath()
shiny::validate(
need(nrow(as.data.frame(res))>0,"No Enriched Pathways")
)
if(input$enrichradio=='barplot'){
barplot(res, showCategory = input$ncat)
}else if(input$enrichradio=='dotplot'){
dotplot(res,showCategory= input$ncat)
}else if(input$enrichradio=='enrich'){
emapplot(res)
}
})
#Render the plot
output$enrichplot <- renderPlot({
enrichplot()
})
#Render the plot
output$cnetplot <- renderPlot({
withProgress(session = session, message = 'Generating...',detail = 'Please Wait...',{
res= enrichpath()
validate(
need(nrow(as.data.frame(res))>0,"No Enriched Pathways")
)
limmares= datasetInput0.5()
genelist= limmares$fc
names(genelist)=limmares$ENTREZID
cnetplot(res, categorySize="pvalue", foldChange=genelist)
})
})
############################################# REACTOME PA GSEA ###########################################
#Get list of enriched pathways from GSEA
gseapath = reactive({
results=fileload()
pd=pData(results$eset)
org=unique(pd$organism)
if(org %in% c("Mus musculus", "Mouse", "Mm","Mus_musculus","mouse")){
org="mouse"
}else{
org="human"
}
limmares= datasetInput0.5()
genelist= limmares$fc
names(genelist)=limmares$ENTREZID
genelist = sort(genelist, decreasing = TRUE)
y <- gsePathway(genelist, nPerm=10000,pvalueCutoff=0.2,pAdjustMethod="BH", verbose=FALSE,organism=org)
})
#create different table to display
gseapath2 = reactive({
res= gseapath()
res=as.data.frame(res)
})
#Create Results table
output$gseares = DT::renderDataTable({
input$project
input$contrast
withProgress(session = session, message = 'Generating...',detail = 'Please Wait...',{
DT::datatable(gseapath2(),
extensions = 'Buttons', options = list(
dom = 'Bfrtip',
buttons = list()),
rownames=FALSE,selection = list(mode = 'single', selected =1),escape=FALSE)
})
})
#Render the plot emap
output$plotemap <- renderPlot({
withProgress(session = session, message = 'Generating...',detail = 'Please Wait...',{
res= gseapath()
emapplot(res, color="pvalue")
})
})
#Render the plot gsea
output$plotgsea <- renderPlot({
res= gseapath()
gseares=gseapath2()
s = input$gseares_rows_selected
gseares = gseares[s, , drop=FALSE]
id = gseares$ID
gseaplot(res, geneSetID = id)
})
#Render the gsea pathway
output$plotpath <- renderPlot({
gseares=gseapath2()
s = input$gseares_rows_selected
gseares = gseares[s, , drop=FALSE]
id = gseares$Description
limmares= datasetInput0.5()
limmares=limmares[is.na(limmares$ENTREZID)==F,]
limmares=limmares[!duplicated(limmares$ENTREZID),]
genelist= limmares$fc
names(genelist)=limmares$ENTREZID
genelist = sort(genelist, decreasing = TRUE)
results=fileload()
pd=pData(results$eset)
org=unique(pd$organism)
if(org %in% c("Mus musculus", "Mouse", "Mm","Mus_musculus","mouse")){
org="mouse"
}else{
org="human"
}
viewPathway(id, readable=TRUE, foldChange=genelist, organism = org)
})
################################### GAGE GENE ONTOLOGY #####################################
#Run gage and get results
datasetInput7 = reactive({
final_res=datasetInput0.5() #get limma data
logfc=final_res$fc #get FC values from limma data
names(logfc)=final_res$ENTREZID # get entrez ids for each row
results=fileload()
pd=pData(results$eset)
organism=pd$organism
prjs=c("DS_FalcorFoxA2","YT_mir302","RJ_ESC_Laminin","RJ_CardiacHdac7_updated","DS_FalcorKO")
prj2=c("DK_IPSC_lungepi","ZA_Boa_PKM2")
if(!input$projects %in% prjs){
if(!input$projects %in% prj2){
validate(
need(length(unique(organism))==1,"Please check pData file for errors in organism column. Does it have more than one organism or is it empty?")
)
organism=unique(pd$organism)[1]
}}
if(input$projects %in% prjs){
organism="mouse"
}
else if(input$projects %in% prj2){
organism="human"
}
if(organism=="human")
{
data(go.sets.hs) #load GO data from gage
data(go.subs.hs)
if(input$gage=='BP')
{
gobpsets = go.sets.hs[go.subs.hs$BP]
go_res = gage(logfc, gsets=gobpsets)
}
else if(input$gage=='cc')
{
goccsets = go.sets.hs[go.subs.hs$CC]
go_res = gage(logfc, gsets=goccsets, same.dir=TRUE)
}
else if(input$gage=='MF')
{
gomfsets = go.sets.hs[go.subs.hs$MF]
go_res = gage(logfc, gsets=gomfsets, same.dir=TRUE)
}}
else if(organism=="Rat")
{
data(go.sets.rn) #load GO data from gage
data(go.subs.rn)
if(input$gage=='BP')
{
gobpsets = go.sets.rn[go.subs.rn$BP]
go_res = gage(logfc, gsets=gobpsets)
}
else if(input$gage=='cc')
{
goccsets = go.sets.rn[go.subs.rn$CC]
go_res = gage(logfc, gsets=goccsets, same.dir=TRUE)
}
else if(input$gage=='MF')
{
gomfsets = go.sets.rn[go.subs.rn$MF]
go_res = gage(logfc, gsets=gomfsets, same.dir=TRUE)
}
}
else
{
data(go.sets.mm) #load GO data from gage
data(go.subs.mm)
if(input$gage=='BP')
{
gobpsets = go.sets.mm[go.subs.mm$BP]
go_res = gage(logfc, gsets=gobpsets)
}
else if(input$gage=='cc')
{
goccsets = go.sets.mm[go.subs.mm$CC]
go_res = gage(logfc, gsets=goccsets, same.dir=TRUE)
}
else if(input$gage=='MF')
{
gomfsets = go.sets.mm[go.subs.mm$MF]
go_res = gage(logfc, gsets=gomfsets, same.dir=TRUE)
}
}
return(go_res)
})
#Get all GO terms based on user-selection (upregulated/downregulated)
datasetInput8 = reactive({
go_res=datasetInput7()
go_dd=input$go_dd
if(go_dd=="upreg"){
res=data.frame(go_res$greater)} #load limma data
else if(go_dd=="downreg"){
res=data.frame(go_res$less)
}
res = data.frame(GOterm=rownames(res),res)
#Get GO id from GO terms
row=data.frame(lapply(res,as.character),stringsAsFactors = FALSE)
p=strsplit(row[,1], " ")
m=sapply(p,"[",1)
go_up=data.frame(GO_id=m,res)
go_term=go_up$GO_id
url= paste("http://amigo.geneontology.org/amigo/term/",go_term,sep = "") #create link to Gene Ontology Consortium
go_up$link=paste0("<a href='",url,"'target='_blank'>","Link to Gene Ontology Consortium","</a>")
go_up=as.data.frame(go_up)
return(go_up)
})
#Print GO results in datatable
output$table4 = DT::renderDataTable({
input$go_dd
input$gage
input$radio
input$project
input$contrast
withProgress(session = session, message = 'Generating...',detail = 'Please Wait...',{
DT::datatable(datasetInput8(),
extensions = c('Buttons','Scroller'),
options = list(dom = 'Bfrtip',
searchHighlight = TRUE,
pageLength = 10,
lengthMenu = list(c(30, 50, 100, 150, 200, -1), c('30', '50', '100', '150', '200', 'All')),
scrollX = TRUE,
buttons = c('copy','print')
),rownames=FALSE,escape=FALSE,selection = list(mode = 'single', selected =1))
})
})
# Download function to get GO results in csv file
output$downloadgo <- downloadHandler(
filename = function() { paste('GO_',input$projects,'_',input$contrast,'_',input$gage,'_',input$go_dd,'.csv', sep='') },
content = function(file) {
write.csv(datasetInput8(), file)
})
############## GET GENES FROM GO #################
#Text title for gene list table
output$godesc <- renderText({
s = input$table4_rows_selected
dt = datasetInput8() #load GO data
dt = dt[s, , drop=FALSE] #get GO data corresponding to selected row in table
goid=dt$GO_id
text=paste('Gene list for GO term :',goid,sep="")
return(text)
})
# get GO associated genes
GOHeatup = reactive({
s = input$table4_rows_selected
dt = datasetInput8() #load GO data
dt = dt[s, , drop=FALSE] #get GO data corresponding to selected row in table
results=fileload()
pd=pData(results$eset)
organism=pd$organism[1]
prjs=c("DS_FalcorFoxA2","YT_mir302","RJ_ESC_Laminin","RJ_CardiacHdac7_updated","DS_FalcorKO")
prj2=c("DK_IPSC_lungepi","ZA_Boa_PKM2")
if(input$projects %in% prjs){
organism="mouse"
}
else if(input$projects %in% prj2){
organism="human"
}
goid=dt$GO_id
if(organism=="human"){
enterezid=paste("go.sets.hs$`",goid,"`",sep="")
}
else if(organism=="Rat"){
enterezid=paste("go.sets.rn$`",goid,"`",sep="")
}
else{
enterezid=paste("go.sets.mm$`",goid,"`",sep="")
}
entrezid=eval(parse(text=enterezid))
limma=datasetInput0.5()
lim_vals=limma[limma$ENTREZID %in% entrezid,]
})
#Print datatable with gene list
output$x4 = DT::renderDataTable({
input$gage
input$go_dd
input$radio
input$project
input$contrast
goheatup=GOHeatup()
},caption="Gene List",escape=FALSE)
#Download function to get GO gene list as csv file
output$downloadgogene <- downloadHandler(
filename = function() { paste('GO_',input$projects,'_',input$contrast,'_',input$gage,'_',input$go_dd,'.csv', sep='') },
content = function(file) {
write.csv(GOHeatup(), file)
})
########## MAKE HEATMAP WITH GO ###################
#Set limit for number of genes that can be viewed in the heatmap
output$hmplimgo <- renderUI({
pval=GOHeatup()
top_expr=datasetInput3()
top_expr=top_expr[rownames(top_expr) %in% rownames(pval),]
mx=nrow(top_expr)
sliderInput("hmplimgo", label = h5("Select number of genes to view in the heatmap"), min = 2,max =mx, value = mx)
})
#Generate a heatmap color scale
output$hmpscale_out3 = renderPlot({
hmpscaletest(hmpcol=input$hmpcol3,voom=datasetInput3(),checkbox=input$checkbox3)
})
#plot heatmap
goheatmapup <- reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
top_expr=datasetInput3()
pval=GOHeatup()
top_expr=top_expr[rownames(top_expr) %in% rownames(pval),]#voom expression data of all genes corresponding to selected row in GO datatable
top_expr=heatmapfun(results=fileload(),expr=as.data.frame(top_expr),pval=GOHeatup(),file = readexcel(),prj=input$projects,hmplim=input$hmplimgo,hmpsamp=input$hmpsamp3,
contrast=input$contrast)
#Remove rows that have variance 0 (This will avoid the Na/Nan/Inf error in heatmap)
ind = apply(top_expr, 1, var) == 0
top_expr <- top_expr[!ind,]
sym=rownames(top_expr)
if(input$checkbox3==TRUE){
d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterby3,xaxis_font_size = 10,colors = colorRampPalette(brewer.pal(n = 9, input$hmpcol3))(30),labRow = rownames(top_expr))}
else{d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterby3,xaxis_font_size = 10,colors = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol3)))(30),labRow =rownames(top_expr))}
})
# render D3heatmap for GO genes
output$goheatmap <- renderD3heatmap({
input$hmpcol #user input-color palette
input$clusterby #user input-cluster by
input$checkbox #user input-reverse colors
input$gene #user input-slider input for number of genes
input$genelist
input$makeheat
input$gage
input$go_dd
input$table4_rows_selected
input$tablecam_rows_selected
input$projects
input$contrast
input$cameradd
input$hmpsamp3
input$hmplimgo
goheatmapup()
})
#function for non-interactive heatmap for download
goheatmapupalt <- reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
top_expr=datasetInput3()
top_expr=top_expr[rownames(top_expr) %in% rownames(pval),]#voom expression data of all genes corresponding to selected row in GO datatable
top_expr=heatmapfun(results=fileload(),expr=top_expr,pval=GOHeatup(),file = readexcel(),prj=input$projects,hmplim=input$hmplimgo,hmpsamp=input$hmpsamp3,
contrast=input$contrast)
#Remove rows that have variance 0 (This will avoid the Na/Nan/Inf error in heatmap)
ind = apply(top_expr, 1, var) == 0
top_expr <- top_expr[!ind,]
if(input$checkbox3==TRUE){
aheatmap(as.matrix(top_expr),distfun=dist2,scale="row",Rowv=TRUE,Colv =TRUE,fontsize = 10,color = colorRampPalette(brewer.pal(n = 9, input$hmpcol3))(30),labRow = rownames(top_expr))}
else{aheatmap(as.matrix(top_expr),distfun=dist2,scale="row",Rowv=TRUE,Colv = TRUE,fontsize = 10,color = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol3)))(30),labRow = rownames(top_expr))}
})
#Download GO heatmap
output$downloadgoheatmap <- downloadHandler(
filename = function(){
paste0('GO_heatmap','.pdf',sep='')
},
content = function(file){
pdf(file,width=9,height = 14,useDingbats=FALSE, onefile = F)
goheatmapupalt()
dev.off()
})
################################# CREATE HEATMAP FOR LIMMA DATA#####################################
#Text title for type of heatmap being displayed in the heatmap tab
output$htitle <- renderText({
hmip=input$hmip
if(input$hmip=="genenum"){text="Heatmap of Top Genes "}
else if(input$hmip=="geneli"){text="Heatmap of Genelist "}
else if(input$hmip=="vargenes"){text="Heatmap of top n variable genes "}
})
#manually create scale (colorkey) for heatmap
output$hmpscale_out = renderPlot({
hmpscaletest(hmpcol=input$hmpcol,voom=datasetInput3(),checkbox=input$checkbox)
})
#################### TOP GENES ####################
output$dropdown <- renderUI({
radio=input$radio
if(radio=="none"){
selectInput("sortby", "Sort By",c('FDR'="sortnone",'Absolute Fold Change' = "sortab",'Positive Fold Change' = "sortpos",'Negative Fold Change' = "sortneg"))
}
else if(radio=="up"){
selectInput("sortby", "Sort By",c('FDR'="sortnone",'Fold Change' = "sortab"))
}
else if(radio=="down"){
selectInput("sortby", "Sort By",c('FDR'="sortnone",'Fold Change' = "sortab"))
}
else if(radio=="both"){
selectInput("sortby", "Sort By",c('FDR'="sortnone",'Absolute Fold Change' = "sortab",'Positive Fold Change' = "sortpos",'Negative Fold Change' = "sortneg"))
}
})
#create heatmap function for top number of genes as chosen from the slider
datasetInput4 <- reactive({
validate(
need(input$gene, "Please Enter number of genes to plot heatmap ")
)
#sort by pval
n<-input$gene #number of genes selected by user (input from slider)
d<-datasetInput()
sortby=input$sortby
if(sortby=='sortnone'){
res<-d[order(d$adj.P.Val),]
}else if(sortby=='sortab'){
res<-d[order(-abs(d$fc)),]
}else if(sortby=='sortpos'){
res<-d[order(-d$fc),]
}else if(sortby=='sortneg'){
res<-d[order(d$fc),]
}
if(n>nrow(d)){
reqd_res=res[1:nrow(d),]} #get top n number of genes
else{
reqd_res=res[1:n,]
}
return(reqd_res)
})
#create heatmap function for top n genes
heatmap <- reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
pval=datasetInput4()
top_expr= createheatmap(results=fileload(),expr=datasetInput3(),pval=pval,hmpsamp=input$hmpsamp,contrast=input$contrast)
top_expr=as.data.frame(top_expr)
col=colnames(top_expr)
top_expr$ENSEMBL=rownames(top_expr)
top_expr=inner_join(top_expr,pval,by="ENSEMBL")
rownames(top_expr)=top_expr$SYMBOL
top_expr=top_expr %>% dplyr::select(col)
validate(
need(nrow(top_expr) > 1, "No results")
)
if(input$checkbox==TRUE){
d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterby,xaxis_font_size = 10,colors = colorRampPalette(brewer.pal(n = 9, input$hmpcol))(30))}
else{d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterby,xaxis_font_size = 10,colors = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol)))(30))}
})
#alternate hearmap function for download
heatmapalt <- reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
pval=datasetInput4()
top_expr= createheatmap(results=fileload(),expr=datasetInput3(),pval=pval,hmpsamp=input$hmpsamp,contrast=input$contrast)
sym=pval$SYMBOL
validate(
need(nrow(top_expr) > 1, "No results")
)
if(input$checkbox==TRUE){
aheatmap(as.matrix(top_expr),distfun=dist2,scale="row",Rowv = TRUE,Colv = TRUE,fontsize = 10,color = colorRampPalette(brewer.pal(n = 9, input$hmpcol))(30),labRow = sym)}
else{aheatmap(as.matrix(top_expr),distfun=dist2,scale="row",Rowv = TRUE,Colv = TRUE,fontsize = 10,color = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol)))(30),labRow = sym)}
})
####### ENTER GENELIST ############################
# Get gene list from user, annotate to ENSEMBL id and get their expression values
datasetInput41 = reactive({
file=input$genelistfile
genes=read.table(file=file$datapath, stringsAsFactors = F) #get complete gene list as string
df=as.vector(genes$V1)
df=tolower(df)
firstup <- function(x) {
substr(x, 1, 1) <- toupper(substr(x, 1, 1))
x
}
genelist=firstup(df)
results=fileload()
#load limma and voom data
limma=datasetInput()
voom=datasetInput3()
#get expression values of the genes in the gene list
# user-defined identifier for the gene list
if(input$selectidentifier=='ensembl')
{
sym=limma[limma$ENSEMBL %in% genelist,]
sym= sym %>% dplyr::select(ENSEMBL,SYMBOL)
# genes <- getBM(attributes=c('ensembl_gene_id','external_gene_name'), filters ='ensembl_gene_id', values =df, mart = ensembl)
# genelist=genes$ensembl_gene_id
}
else if(input$selectidentifier=='entrez')
{
sym=limma[limma$ENTREZID %in% genelist,]
sym= sym %>% dplyr::select(ENSEMBL,SYMBOL)
}
else if(input$selectidentifier=='genesym')
{
sym=limma[limma$SYMBOL %in% genelist,]
sym= sym %>% dplyr::select(ENSEMBL,SYMBOL)
}
expr_vals=merge(voom,sym,by="row.names")
rownames(expr_vals)=expr_vals$SYMBOL
expr_vals = expr_vals %>% dplyr::select(-Row.names,-SYMBOL,-ENSEMBL)
validate(
need(nrow(expr_vals) > 1, "Please Check Identifier chosen or Select genelist from Raw Expression Data tab")
)
return(expr_vals)
})
#create heatmap function for gene-list given by user
heatmap2 = function(){
dist2 = function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
limma=datasetInput()
expr = datasetInput41()
expr2= createheatmap(results=fileload(),expr=expr,hmpsamp=input$hmpsamp,contrast=input$contrast)
validate(
need(nrow(expr2)>1, "No results")
)
if(input$checkbox==TRUE){
d3heatmap(as.matrix(expr2),distfun=dist2,scale="row",dendrogram=input$clusterby,xaxis_font_size = 10,colors = colorRampPalette(brewer.pal(n = 9, input$hmpcol))(30))}
else{d3heatmap(as.matrix(expr2),distfun=dist2,scale="row",dendrogram=input$clusterby,xaxis_font_size = 10,colors = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol)))(30))}
}
heatmap2alt = function(){
dist2 = function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
expr2 = datasetInput41()
top_expr= createheatmap(results=fileload(),expr=expr2,hmpsamp=input$hmpsamp,contrast=input$contrast)
if(input$checkbox==TRUE){
aheatmap(as.matrix(expr2),distfun=dist2,scale="row",Rowv=TRUE,Colv=TRUE,fontsize = 10,color = colorRampPalette(brewer.pal(n = 9, input$hmpcol))(30))}
else{aheatmap(as.matrix(expr2),distfun=dist2,scale="row",Rowv=TRUE,Colv=TRUE,fontsize = 10,color = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol)))(30))}
}
####### TOP VARIABLE GENES #######################
#Extract top n (user-selected) variable genes
var.genes = reactive({
n=as.numeric(input$vgene)
results=fileload()
v = results$eset
keepGenes <- v@featureData@data
pData<-phenoData(v)
v.filter = v[rownames(v@assayData$exprs) %in% rownames(keepGenes),]
Pvars <- apply(v.filter@assayData$exprs,1,var)
select <- order(Pvars, decreasing = TRUE)[seq_len(min(n,length(Pvars)))]
v.var <-v.filter[select,]
m<-v.var@assayData$exprs
rownames(m) <- v.var@featureData@data$SYMBOL
m=as.data.frame(m)
m=unique(m)
return(m)
})
#D3 heatmap for top n variable genes
varheatmap <- reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
top_expr= createheatmap(results=fileload(),expr=var.genes(),hmpsamp=input$hmpsamp,contrast=input$contrast)
validate(
need(nrow(top_expr) > 1, "No results")
)
if(input$checkbox==TRUE){
d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterby,xaxis_font_size = 10,colors = colorRampPalette(brewer.pal(n = 9, input$hmpcol))(30))}
else{d3heatmap(as.matrix(top_expr),distfun=dist2,scale="row",dendrogram=input$clusterby,xaxis_font_size = 10,colors = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol)))(30))}
})
# Alternate function to download non-interactive heatmap of top n variable genes
varheatmapalt <- reactive({
dist2 <- function(x, ...) {as.dist(1-cor(t(x), method="pearson"))}
top_expr= createheatmap(results=fileload(),expr=var_genes(),hmpsamp=input$hmpsamp,contrast=input$contrast)
validate(
need(nrow(top_expr) > 1, "No results")
)
if(input$checkbox==TRUE){
aheatmap(as.matrix(top_expr),distfun=dist2,scale="row",Rowv = TRUE,Colv = TRUE,fontsize = 10,color = colorRampPalette(brewer.pal(n = 9, input$hmpcol))(30))}
else{aheatmap(as.matrix(top_expr),distfun=dist2,scale="row",Rowv = TRUE,Colv = TRUE,fontsize = 10,color = colorRampPalette(rev(brewer.pal(n = 9, input$hmpcol)))(30))}
})
# Render d3 heatmap function
output$heatmap <- renderD3heatmap({
input$hmpcol #user input-color palette
input$clusterby #user input-cluster by
input$checkbox #user input-reverse colors
input$gene #user input-slider input for number of genes
input$genelist
input$hmip
input$makeheat
input$gage
input$go_dd
input$ga
input$table4_rows_selected
input$tablecam_rows_selected
input$radio
input$projects
input$contrast
input$cameradd
input$hmpsamp
input$hmplim
input$lfc
input$apval
input$sortby
input$vgene
#if user selected enter n num of genes, call heatmap() and if user entered genelist, call heatmap2()
isolate({
if(input$hmip == 'genenum'){heatmap()}
else if(input$hmip == 'geneli'){heatmap2()}
else if(input$hmip == 'vargenes' ){varheatmap()}
})
})
#Download function for heatmaps
output$downloadheatmap <- downloadHandler(
filename = function(){
paste0('heatmap','.pdf',sep='')
},
content = function(file){
pdf(file,width=9,height =14,useDingbats=FALSE, onefile = F)
if(input$hmip == 'genenum'){heatmapalt()}
else if(input$hmip == 'geneli'){heatmap2alt()}
else if(input$hmip == 'vargenes' ){varheatmapalt()}
dev.off()
})
}#end of server
|
2b9b5d35ae9c81684806f1b83edbc155b6d3a167
|
b5491a5d0c85ab57b44931bfd50e5b7dd500a089
|
/2term/05_06_correlation_lm/class/05_correlation.R
|
3cd7bdb573d3991a30c2e61e6089f8446957bf28
|
[] |
no_license
|
rutaolta/R
|
fc88f2c8f2e73782950c559f10c5d26e14c46588
|
3020d1341cbe4de27438ca0f04018b7d0ffe28b4
|
refs/heads/master
| 2023-06-02T02:30:09.578374
| 2021-06-24T07:58:29
| 2021-06-24T07:58:29
| 364,297,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 155
|
r
|
05_correlation.R
|
df <- iris
x <- iris$Petal.Length
y <- iris$Petal.Width
plot(x, y)
cor(x, y)
cor(x, y, method = "spearman")
cor.test(x,y)
datasets::anscombe
|
b5d524dee9a265abbad2e9d5002fb56e9a0f1fbd
|
9a1b4d0627facd3d52ee4e20a6d638f71a482936
|
/man/c2BroadSets.Rd
|
e8348d994ecb320b1c860e4a3a34c46db374ef14
|
[] |
no_license
|
THERMOSTATS/RVA
|
3c52d2b4a647c6b9d99eed98ec5fc86d6a510bcb
|
dbdf9b4f3e2b10f613b2d08ef9f7b04d3261f135
|
refs/heads/master
| 2023-08-18T22:22:32.044742
| 2021-10-29T16:02:06
| 2021-10-29T16:02:06
| 288,225,122
| 7
| 2
| null | 2020-12-07T14:58:10
| 2020-08-17T16:04:04
|
R
|
UTF-8
|
R
| false
| true
| 389
|
rd
|
c2BroadSets.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{c2BroadSets}
\alias{c2BroadSets}
\title{This is data to be included in package}
\format{
GeneSetCollection
\describe{
\item{Genesetcollection}{GeneSetCollection from BroadCollection}
}
}
\usage{
c2BroadSets
}
\description{
This is data to be included in package
}
\keyword{datasets}
|
bfc3df3be205662617f9186520e852f1ece7418e
|
36f9fb876beda5e60fffe851e0530707a8505315
|
/3_Getting_And_Cleaning_Data/Final Project/run_analysis.R
|
6676101a1933f487b36f728f6010692c581723bd
|
[] |
no_license
|
jlucassen/datasciencecoursera
|
285686ccad3c66e3e4b0f3f9b4913831da116200
|
60e28e203b9fe5ec6e723b6a88634572359feed2
|
refs/heads/master
| 2022-12-09T00:25:38.791263
| 2020-08-20T01:40:31
| 2020-08-20T01:40:31
| 258,053,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,449
|
r
|
run_analysis.R
|
#pull in data, bind test and train data
xTest <- read.table("UCI HAR Dataset/test/X_test.txt")
yTest <- read.table("UCI HAR Dataset/test/y_test.txt")
xTrain <- read.table("UCI HAR Dataset/train/X_train.txt")
yTrain <- read.table("UCI HAR Dataset/train/y_train.txt")
x <- rbind(xTest, xTrain)
y <- rbind(yTest, yTrain)
#pull in column names, format
features <- read.table("UCI HAR Dataset/features.txt")
allFields <- features[,2]
allFields <- gsub("-", "", allFields)
allFields <- gsub("\\(\\)", "", allFields)
#set column names, use to filter to mean/std columns only
colnames(x) <- allFields
selectedFields <- grep("mean|std", allFields, value = TRUE)
data <- x[selectedFields]
#pull in activity labels, translate y data, attach to data
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
activities <- factor(y[,1], activity_labels$V1, activity_labels$V2)
data$activity <- activities
#pull in subjects, merge test/train, attach to data
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
subjects <- rbind(subject_test, subject_train)
data$subject <- subjects[,1]
#calculate second set of data, by subject and activity
data2 <- data %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
#output data files
write.table(data, "tidied_data.txt", row.names = FALSE)
write.table(data2, "tidied_data_final.txt", row.names = FALSE)
|
840465b157e95915e4a00ff534f2a6f24b07e8f5
|
1858e5b278188097332bcddba934f970c614e185
|
/prep_datav03.R
|
b1d5ee7a0767a222a1ec41d95c4d4dd95d1cb96a
|
[] |
no_license
|
ChristopherSP/Generalized_Model_Fitter_R
|
6a32275a910fc60a85e7857ad2ac85327c9dffb8
|
88a7ff140d9c7d3a8db300a1b6756c76c3d97746
|
refs/heads/master
| 2022-12-19T17:31:29.292244
| 2020-10-12T02:40:10
| 2020-10-12T02:40:10
| 299,157,696
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,851
|
r
|
prep_datav03.R
|
#################################################################################################
# Preparing Data
#################################################################################################
prep_data = function(df){
dt = copy(df)
names(dt) = stri_replace_all_fixed(stri_trim_both(gsub(' +',' ',gsub('[^[:alnum:][:space:]_]','',tolower(stri_trans_general(names(dt),'latin-ascii')))))," ","_")
# get scale information. usefull when splitting the data in train and test sets
numeric_cols = unique(c(variables$dependentVariables$numerical, variables$independentVariables$ProcessDuration, variables$independentVariables$SentenceValue, variables$independentVariables$AgreementValue))
categorical_cols = unique(c(variables$dependentVariables$categorical, variables$independentVariables$Sentence, variables$independentVariables$AgreementPropensity))
dt = dt[,.SD,.SDcols = intersect(names(dt),c("pasta","status", numeric_cols, categorical_cols, unlist(variables$independentVariables)))]
dt[,c(numeric_cols) := lapply(.SD,as.numeric),.SDcols = numeric_cols]
dt[,c(categorical_cols) := lapply(.SD,as.factor),.SDcols = categorical_cols]
ativos = dt[status == "Ativo"]
encerrados = dt[status == "Encerrado"]
encerrados = encerrados[sentenca %in% variables$filterLabels$Regression]
scale_info_enc = scale_vals(encerrados, numeric_cols)
# scale numeric columns
# dt[, c(numeric_cols) := lapply(.SD, scale), .SDcols=numeric_cols]
encerrados = scale_unseen_data(encerrados,info = scale_info_enc)
ativos <<- scale_unseen_data(ativos,info = scale_info_enc)
# eliminates outliers
enc_outlier = outlier_detection(encerrados, numeric_cols, by="sentenca")
idx_enc = enc_outlier$idx
encerrados <<- enc_outlier$dt
scale_info_enc <<- scale_info_enc
}
|
89a493d04fc4352de0550cb39d09b232d6d70e64
|
6a6ca838a0b0ac552cfe7745af6946623a648c9e
|
/entropy_binomial.R
|
3568710ad84c26e833ddc57b890842bd3974481a
|
[] |
no_license
|
myforkz/probability
|
dc7d6277263c68519a9837235b4bd6356c82db1e
|
c1e28df825076a7416dcbe3911f546bb0523331e
|
refs/heads/master
| 2022-03-02T12:34:44.874788
| 2019-06-28T14:46:48
| 2019-06-28T14:46:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,634
|
r
|
entropy_binomial.R
|
library(tidyverse)
library(RColorBrewer)
library(cowplot)
sim_p <- function(exp_val) {
all_but <- runif(3)
sub <- function(x, exp_val) {
out <- sum(x) * exp_val
for (i in 2:length(x)) {
out <- out - all_but[i]
}
return(out)
}
out <- sub(x = all_but, exp_val = exp_val)
last <- (out / (2 - exp_val))
z <- sum(c(all_but, last))
p <- c(all_but, last) / z
return(list(H = -sum(p * log(p)), p = p))
}
H <- replicate(1e6, sim_p(exp_val = 1.4))
df <-
tibble(
entropy = rep(unlist(H[1, ]), each = 4),
prob = unlist(H[2, ]),
pos = rep(c(1, 2, 3, 4), times = 4000000 / 4)
)
binom_prob <- c((1 - 0.7)^2,
0.7 * (1 - 0.7),
(1 - 0.7) * 0.7 ,
0.7^2)
binom_df <-
tibble(
pos = 1:4,
prob = binom_prob
)
plt_1 <-
ggplot(
data = df %>% filter(pos == 1),
aes(x = entropy)
) +
geom_density(fill = "grey50", bw = 0.0001) +
geom_vline(xintercept = max(df$entropy),
color = brewer.pal(n = 4, name = "Dark2")[4],
size = 1) +
geom_vline(xintercept = 1.1,
color = brewer.pal(n = 3, name = "Dark2")[2],
size = 1) +
geom_vline(xintercept = 0.8,
color = brewer.pal(n = 3, name = "Dark2")[1],
size = 1) +
annotate(geom = "text", label = "H = 0.8",
x = 0.79, y = 2,
hjust = 1,
color = brewer.pal(n = 3, name = "Dark2")[1],
fontface = "bold") +
annotate(geom = "text", label = "H = 1.1",
x = 1.09, y = 18,
hjust = 1,
color = brewer.pal(n = 3, name = "Dark2")[2],
fontface = "bold") +
annotate(geom = "text", label = "H = 1.22",
x = 1.215, y = 38,
hjust = 1,
color = brewer.pal(n = 4, name = "Dark2")[4],
fontface = "bold") +
coord_cartesian(xlim = c(0.6, 1.25),
ylim = c(0, 43),
expand = FALSE) +
labs(
x = "Entropy (H)",
y = "Density"
) +
theme_classic() +
theme(
text = element_text(family = "Gill Sans MT"),
axis.title = element_text(size = 12),
axis.text.x = element_text(size = 10),
axis.text.y = element_blank(),
axis.ticks.y = element_blank()
)
plt_2 <-
ggplot(
data = df %>% filter(entropy %in% c(max(df$entropy))),
aes(x = pos, y = prob,
group = factor(entropy))
) +
geom_line(color = brewer.pal(n = 4, name = "Dark2")[4],
size = 1) +
geom_point(color = brewer.pal(n = 4, name = "Dark2")[4]) +
labs(
y = "Probability",
x = "Draw Result"
) +
coord_cartesian(ylim = c(0, 0.7)) +
scale_x_continuous(labels = c("ww", "bw", "wb", "bb")) +
theme_classic() +
theme(
text = element_text(family = "Gill Sans MT"),
axis.title = element_text(size = 12),
axis.text = element_text(size = 10)
)
plt_3 <-
ggplot(
data = df %>% filter(entropy %in% c(max(df$entropy[df$entropy < 1.1001]))),
aes(x = pos, y = prob,
group = factor(entropy))
) +
geom_line(color = brewer.pal(n = 3, name = "Dark2")[2],
size = 1) +
geom_point(color = brewer.pal(n = 3, name = "Dark2")[2]) + labs(
y = "Probability",
x = "Draw Result"
) +
coord_cartesian(ylim = c(0, 0.7)) +
scale_x_continuous(labels = c("ww", "bw", "wb", "bb")) +
theme_classic() +
theme(
text = element_text(family = "Gill Sans MT"),
axis.title = element_text(size = 12),
axis.text = element_text(size = 10)
)
plt_4 <-
ggplot(
data = df %>% filter(entropy %in% c(max(df$entropy[df$entropy < 0.80001]))),
aes(x = pos, y = prob,
group = factor(entropy))
) +
geom_line(color = brewer.pal(n = 3, name = "Dark2")[1],
size = 1) +
geom_point(color = brewer.pal(n = 3, name = "Dark2")[1]) +
coord_cartesian(ylim = c(0, 0.7)) +
scale_x_continuous(labels = c("ww", "bw", "wb", "bb")) +
labs(
y = "Probability",
x = "Draw Result",
caption = "Graphic by Ben Andrew | @BenYAndrew"
) +
theme_classic() +
theme(
text = element_text(family = "Gill Sans MT"),
axis.title = element_text(size = 12),
axis.text = element_text(size = 10)
)
grid <- plot_grid(plt_1, plt_2, plt_3, plt_4,
align = "hv", ncol = 2)
title <- ggdraw() +
draw_label("The Binomial Distribution as a Maximum Entropy Distribution",
fontface = 'bold',
fontfamily = "Gill Sans MT")
grid_b <- plot_grid(title, grid, ncol = 1, rel_heights = c(0.1, 1))
ggsave("figures/entropy_binomial.jpeg", grid_b,
height = 9, width = 9, device = "jpeg")
|
2f7b5e202933eac7c861206d0c9d91ca10c2c198
|
7a9a8fb85481a80124bb1004eb3f4cfb46cdbede
|
/program1.R
|
7b6f6c8e32e2f896c5340947f9c7ac67b33dfc23
|
[] |
no_license
|
xinyizhao123/Predicting-Future-Ambient-Ozone
|
6459a9eef144bbf68416522f1987cf60f87af6bd
|
1b682e4fcc16f443b4d3d8c9216cb5f823ac2986
|
refs/heads/master
| 2020-05-25T14:58:23.133324
| 2016-10-06T00:52:51
| 2016-10-06T00:52:51
| 69,671,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,433
|
r
|
program1.R
|
##########################################################################################
# This program is to check, clean and subset data of each parameter by year (2000-2011)
# Programmer: Xinyi Zhao
# Date: 09/21/2015
##########################################################################################
setwd("H:/AQproject/data/data files/level 1 - raw data/main parameters")
#################################
# VOC Data #
#################################
z1 <- read.table("RD_501_PAMSVOC_2011-0.txt", sep = "|", stringsAsFactors = F, fill = T)
z1$V3 <- as.numeric(z1$V3)
z1 <- z1[-c(1,2, 14:28)]
names(z1) <- c("state", "county", "site", "parameter", "POC",
"duration", "unit", "method", "date", "time", "value")
z1$location <- paste(z1$state, z1$county, sep = ', ')
z1$siteid <- paste(z1$state, z1$county, z1$site, sep = '-')
z1 <- data.frame(z1, year=rep(2011, nrow(z1)))
z11 <- z1[z1$parameter == 43102 & z1$duration == 7,]
z11 <- subset(z11, z11$value!="NA")
z111 <- subset(z11, z11$location =="18, 89" |
z11$location =="22, 33" |
z11$location =="51, 33" |
z11$location =="25, 9" |
z11$location =="44, 7" |
z11$location =="17, 31" |
z11$location =="25, 13" |
z11$location =="44, 3" |
z11$location =="22, 47" |
z11$location =="6, 37" |
z11$location =="13, 223" |
z11$location =="13, 247" |
z11$location =="13, 89" |
z11$location =="48, 141" |
z11$location =="48, 167" |
z11$location =="48, 201" |
z11$location =="18, 127" |
z11$location =="24, 5" |
z11$location =="25, 25" |
z11$location =="6, 65" |
z11$location =="18, 163" |
z11$location =="18, 97" |
z11$location =="19, 113" |
z11$location =="19, 153" |
z11$location =="19, 163" |
z11$location =="48, 113" |
z11$location =="48, 121" |
z11$location =="46, 99")
#head(z111)
#table(z111$unit)
#table(z111$location)
#sum(is.na(z111$value))
#tapply(z111$date, z111$location, table)
write.csv(z111, "VOC_2011.csv")
#install.packages("xtable")
#library(xtable)
#tb <- xtable(table(z$location))
#################################
# NOx Data #
#################################
######################################################################################
# double check the daily data to see if it is adjusted daily mean or not
### hourly data (old version)
y1 <- read.table("RD_501_42603_2000-0.txt", sep = "|", stringsAsFactors = F)
y1$V3 <- as.numeric(y1$V3)
y1 <- y1[-c(1,2, 14:28)]
names(y1) <- c("state", "county", "site", "parameter", "POC",
"duration", "unit", "method", "date", "time", "value")
y1$location <- paste(y1$state, y1$county, sep = ', ')
y1$siteid <- paste(y1$state, y1$county, y1$site, sep = '-')
y1 <- data.frame(y1, year=rep(2000, nrow(y1)))
y1 <- subset(y1, y1$value!="NA")
y11 <- subset(y1, y1$location =="18, 89" |
y1$location =="22, 33" |
y1$location =="51, 33" |
y1$location =="25, 9" |
y1$location =="44, 7" |
y1$location =="17, 31" |
y1$location =="25, 13" |
y1$location =="44, 3" |
y1$location =="22, 47" |
y1$location =="6, 37" |
y1$location =="13, 223" |
y1$location =="13, 247" |
y1$location =="13, 89" |
y1$location =="48, 141" |
y1$location =="48, 167" |
y1$location =="48, 201" |
y1$location =="18, 127" |
y1$location =="24, 5" |
y1$location =="25, 25" |
y1$location =="6, 65" |
y1$location =="18, 163" |
y1$location =="18, 97" |
y1$location =="19, 113" |
y1$location =="19, 153" |
y1$location =="19, 163" |
y1$location =="48, 113" |
y1$location =="48, 121" |
y1$location =="46, 99")
y111 <- y11
y111$ave = rep(0,dim(y111)[1])
y111$nobs = rep(0,dim(y111)[1])
tmp.id=unique(y11$siteid)
for (iid in tmp.id){
tmp.date=unique(y11[which(y11$siteid==iid),"date"])
tmp=tapply(y11[which(y11$siteid==iid),"value"],y11[which(y11$siteid==iid),"date"],mean)
tmp.len=tapply(y11[which(y11$siteid==iid),"date"],y11[which(y11$siteid==iid),"date"],length)
y111$ave[which(y11$siteid==iid)]=rep(tmp,times=tmp.len)
y111$nobs[which(y11$siteid==iid)]=rep(tmp.len,times=tmp.len)
}
y111 <- subset(y111, y111$nobs>=23)
y111 <- subset(y111, y111$time=="00:00")
y111$complete <- as.numeric(y111$nobs == 24)
y111 <- y111[-c(11)]
#head(y11)
#table(y11$unit)
#sum(is.na(y11$value))
#tapply(y11$date, y11$location, table)
#tb <- xtable(table(y1$location))
# The daily mean is not adjusted, and the new version data are consistent with old version data
# For convenience, use new version data (daily)
#################################################################################################
### daily data (new version)
x1 <- read.csv("daily_NONOxNOy_2011.csv", stringsAsFactors = FALSE)
x1$State.Code <- as.numeric(x1$State.Code)
x1 <- x1[-c(11,16,20,29)]
x1$location <- paste(x1$State.Code, x1$County.Code, sep = ', ')
x1$siteID <- paste(x1$State.Code, x1$County.Code, x1$Site.Num, sep = '-')
x1 <- data.frame(x1, year=rep(2011, nrow(x1)))
x11 <- x1[x1$Parameter.Code == 42603 & x1$Sample.Duration == "1 HOUR",]
x11 <- subset(x11, x11$Arithmetic.Mean!="NA")
x111 <- subset(x11, x11$location =="18, 89" |
x11$location =="22, 33" |
x11$location =="51, 33" |
x11$location =="25, 9" |
x11$location =="44, 7" |
x11$location =="17, 31" |
x11$location =="25, 13" |
x11$location =="44, 3" |
x11$location =="22, 47" |
x11$location =="6, 37" |
x11$location =="13, 223" |
x11$location =="13, 247" |
x11$location =="13, 89" |
x11$location =="48, 141" |
x11$location =="48, 167" |
x11$location =="48, 201" |
x11$location =="18, 127" |
x11$location =="24, 5" |
x11$location =="25, 25" |
x11$location =="6, 65" |
x11$location =="18, 163" |
x11$location =="18, 97" |
x11$location =="19, 113" |
x11$location =="19, 153" |
x11$location =="19, 163" |
x11$location =="48, 113" |
x11$location =="48, 121" |
x11$location =="46, 99")
x111 <- subset(x111, x111$Observation.Count>=23)
x111$quality <- as.numeric(x111$Observation.Count == 24)
write.csv(x111, "NOx_2011.csv")
#################################
# OZone Data #
#################################
x1 <- read.csv("daily_44201_2011.csv", stringsAsFactors = FALSE)
x1$State.Code <- as.numeric(x1$State.Code)
x1 <- x1[-c(11,16,20,29)]
x1$location <- paste(x1$State.Code, x1$County.Code, sep = ', ')
x1$siteID <- paste(x1$State.Code, x1$County.Code, x1$Site.Num, sep = '-')
x1 <- data.frame(x1, year=rep(2011, nrow(x1)))
x11 <- x1[x1$Parameter.Code == 44201 & x1$Sample.Duration == "8-HR RUN AVG BEGIN HOUR",]
x11 <- subset(x11, x11$Arithmetic.Mean!="NA")
x111 <- subset(x11, x11$location =="18, 89" |
x11$location =="22, 33" |
x11$location =="51, 33" |
x11$location =="25, 9" |
x11$location =="44, 7" |
x11$location =="17, 31" |
x11$location =="25, 13" |
x11$location =="44, 3" |
x11$location =="22, 47" |
x11$location =="6, 37" |
x11$location =="13, 223" |
x11$location =="13, 247" |
x11$location =="13, 89" |
x11$location =="48, 141" |
x11$location =="48, 167" |
x11$location =="48, 201" |
x11$location =="18, 127" |
x11$location =="24, 5" |
x11$location =="25, 25" |
x11$location =="6, 65" |
x11$location =="18, 163" |
x11$location =="18, 97" |
x11$location =="19, 113" |
x11$location =="19, 153" |
x11$location =="19, 163" |
x11$location =="48, 113" |
x11$location =="48, 121" |
x11$location =="46, 99")
x111 <- subset(x111, x111$Observation.Count>=23)
x111$quality <- as.numeric(x111$Observation.Count == 24)
write.csv(x111, "ozone_2011.csv")
|
cff64e34d5d4aac0b1782b988bb2c5c7ff0388ed
|
387262d2c0dea8a553bf04e3ff263e14683ea404
|
/R&S_app_v1/appModules_Testing/buildSulfateModule.R
|
2114ba8d89d75e56fe63275dccc8832cec59959f
|
[] |
no_license
|
EmmaVJones/Rivers-StreamsAssessment
|
3432c33d7b53714d3288e1e3fee335dd6fb2af1c
|
580cfaa7edbd7077a2627a128a02c3c6ee195f4d
|
refs/heads/master
| 2020-04-24T09:40:03.613469
| 2019-10-18T13:44:07
| 2019-10-18T13:44:07
| 171,486,365
| 0
| 0
| null | 2019-02-19T20:18:56
| 2019-02-19T14:17:14
|
R
|
UTF-8
|
R
| false
| false
| 6,046
|
r
|
buildSulfateModule.R
|
source('testingDataset.R')
monStationTemplate <- read_excel('data/tbl_ir_mon_stations_template.xlsx') # from X:\2018_Assessment\StationsDatabase\VRO
conventionals_HUC<- filter(conventionals, Huc6_Vahu6 %in% 'JU52') %>%
left_join(dplyr::select(stationTable, FDT_STA_ID, SEC, CLASS, SPSTDS, ID305B_1, ID305B_2, ID305B_3), by='FDT_STA_ID')
AUData <- filter(conventionals_HUC, ID305B_1 %in% 'VAW-I25R_HAM01A02' |
ID305B_1 %in% 'VAW-I25R_CAT04D12' |
ID305B_1 %in% 'VAW-I25R_CAT04C04')%>%
left_join(WQSvalues, by = 'CLASS')
x <-filter(AUData, FDT_STA_ID %in% '2-HAM000.37')
# No Assessment functions bc no std
DSulfatePlotlySingleStationUI <- function(id){
ns <- NS(id)
tagList(
wellPanel(
h4(strong('Single Station Data Visualization')),
uiOutput(ns('DSulfate_oneStationSelectionUI')),
selectInput(ns('sulfateType'),'Select Total or Dissolved Sulfate', choices = c('Total Sulfate', 'Dissolved Sulfate'),
width = '30%'),
plotlyOutput(ns('DSulfateplotly')) )
)
}
DSulfatePlotlySingleStation <- function(input,output,session, AUdata, stationSelectedAbove){
ns <- session$ns
# Select One station for individual review
output$DSulfate_oneStationSelectionUI <- renderUI({
req(stationSelectedAbove)
selectInput(ns('DSulfate_oneStationSelection'),strong('Select Station to Review'),choices= sort(unique(c(stationSelectedAbove(),AUdata()$FDT_STA_ID))),#unique(AUdata())$FDT_STA_ID,
width='300px', selected = stationSelectedAbove())})# "2-JMS279.41" )})
DSulfate_oneStation <- reactive({
req(ns(input$DSulfate_oneStationSelection))
filter(AUdata(),FDT_STA_ID %in% input$DSulfate_oneStationSelection)})
output$DSulfateplotly <- renderPlotly({
req(input$DSulfate_oneStationSelection, DSulfate_oneStation(), input$sulfateType)
if(input$sulfateType == 'Dissolved Sulfate'){
dat <- DSulfate_oneStation()
dat$SampleDate <- as.POSIXct(dat$FDT_DATE_TIME2, format="%m/%d/%y")
maxheight <- ifelse(max(dat$SULFATE_DISS, na.rm=T) < 75, 100, max(dat$SULFATE_DISS, na.rm=T)* 1.2)
box1 <- data.frame(SampleDate = c(min(dat$SampleDate), min(dat$SampleDate), max(dat$SampleDate),max(dat$SampleDate)), y = c(75, maxheight, maxheight, 75))
box2 <- data.frame(x = c(min(dat$SampleDate), min(dat$SampleDate), max(dat$SampleDate),max(dat$SampleDate)), y = c(25, 75, 75, 25))
box3 <- data.frame(x = c(min(dat$SampleDate), min(dat$SampleDate), max(dat$SampleDate),max(dat$SampleDate)), y = c(10, 25, 25, 10))
box4 <- data.frame(x = c(min(dat$SampleDate), min(dat$SampleDate), max(dat$SampleDate),max(dat$SampleDate)), y = c(0, 10, 10, 0))
plot_ly(data=dat)%>%
add_polygons(x = ~SampleDate, y = ~y, data = box1, fillcolor = "firebrick",opacity=0.6, line = list(width = 0),
hoverinfo="text", name =paste('High Probability of Stress to Aquatic Life')) %>%
add_polygons(data = box2, x = ~x, y = ~y, fillcolor = "#F0E442",opacity=0.6, line = list(width = 0),
hoverinfo="text", name =paste('Medium Probability of Stress to Aquatic Life')) %>%
add_polygons(data = box3, x = ~x, y = ~y, fillcolor = "#009E73",opacity=0.6, line = list(width = 0),
hoverinfo="text", name =paste('Low Probability of Stress to Aquatic Life')) %>%
add_polygons(data = box4, x = ~x, y = ~y, fillcolor = "#0072B2",opacity=0.6, line = list(width = 0),
hoverinfo="text", name =paste('No Probability of Stress to Aquatic Life')) %>%
add_markers(data=dat, x= ~SampleDate, y= ~SULFATE_DISS,mode = 'scatter', name="Dissolved Sulfate (mg/L)",marker = list(color= '#535559'),
hoverinfo="text",text=~paste(sep="<br>",
paste("Date: ",SampleDate),
paste("Depth: ",FDT_DEPTH, "m"),
paste("Dissolved Sulfate: ",SULFATE_DISS,"mg/L")))%>%
layout(showlegend=FALSE,
yaxis=list(title="Dissolved Sulfate (mg/L)"),
xaxis=list(title="Sample Date",tickfont = list(size = 10)))
}else{
dat <- mutate(DSulfate_oneStation(), top = 250)
dat$SampleDate <- as.POSIXct(dat$FDT_DATE_TIME2, format="%m/%d/%y")
plot_ly(data=dat)%>%
add_lines(data=dat, x=~SampleDate,y=~top, mode='line', line = list(color = 'black'),
hoverinfo = "none", name="Sulfate PWS Criteria (250,000 ug/L)") %>%
add_markers(data=dat, x= ~SampleDate, y= ~SULFATE_TOTAL,mode = 'scatter', name="Total Sulfate (mg/L)", marker = list(color= '#535559'),
hoverinfo="text",text=~paste(sep="<br>",
paste("Date: ",SampleDate),
paste("Depth: ",FDT_DEPTH, "m"),
paste("Total Sulfate: ",SULFATE_TOTAL," (mg/L)")))%>%
layout(showlegend=FALSE,
yaxis=list(title="Total Sulfate (mg/L)"),
xaxis=list(title="Sample Date",tickfont = list(size = 10)))
}
})
}
ui <- fluidPage(
helpText('Review each site using the single site visualization section. There are no WQS for Specific Conductivity.'),
DSulfatePlotlySingleStationUI('DSulfate')
)
server <- function(input,output,session){
stationData <- eventReactive( input$stationSelection, {
filter(AUData, FDT_STA_ID %in% input$stationSelection) })
stationSelected <- reactive({input$stationSelection})
AUData <- reactive({filter(conventionals_HUC, ID305B_1 %in% 'VAW-I25R_HAM01A02' |
ID305B_1 %in% 'VAW-I25R_CAT04D12' |
ID305B_1 %in% 'VAW-I25R_CAT04C04')%>%
left_join(WQSvalues, by = 'CLASS')})
callModule(DSulfatePlotlySingleStation,'DSulfate', AUData, stationSelected)
}
shinyApp(ui,server)
|
c4ef15319b240321607718bb31ef64d5270a8f0c
|
dcf40ba9b2bd9101d9deaa5b1e6cf23a0bea30ae
|
/Scripts/GLM_Prediction_Model.R
|
7a0c3748d43a834d7bd484a211efdf33cf8ab90a
|
[] |
no_license
|
GeorgetownMcCourt/Predicting-Recidivism
|
e060cfea2d91bb2147fd03aca6985233f1b532e3
|
51ea2bfc91863fc9791ebb2f56aecada68df5062
|
refs/heads/master
| 2021-01-19T11:29:02.265142
| 2017-05-09T21:04:31
| 2017-05-09T21:04:31
| 87,969,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,832
|
r
|
GLM_Prediction_Model.R
|
#Install.packages("mfx")
library(mfx) #Package to calculate marginal effects of logit model
#Creating dataframe of just potential model varaibles and then dropping NAs
model.var <- c("CH_CRIMHIST_COLLAPSED", "OFFENSE_VIOLENT", "OFFENSE_DRUG","OFFENSE_PROPERTY","SES_PHYSABUSED_EVER","CS_SENTENCEMTH",
"SES_PARENTS_INCARCERATED", "SES_FAMILY_INCARCERATED", "SES_HASCHILDREN", "AGE_CAT",
"SES_SEXABUSED_EVER", "DRUG_ANYREG", "DRUG_ANYTME", "black.nh", "hispanic", "asian", "state", "EDUCATION","SES_FATHER_INCARCERATED",
"DRUG_COCRKTME", "DRUG_HROPTME", "DRUG_METHATME", "LIFE_SENTENCE", "GENDER", "TYPEOFFENSE", "DRUG_MARIJTME",
"CH_PRIORARREST_CAT", "SES_LIVE_CHILD_ARREST", "DRUG_ABUSE_ONLY", "DRUG_TRT")
model.data <- full.numeric[model.var]
model.data <- model.data[complete.cases(model.data),]
###Setting up Train/Test/Validate###
set.seed(42)
rand <- runif(nrow(model.data))
trainset <- model.data[rand >= 0.3,]
testset <- model.data[rand >= 0.15 & rand < 0.3,]
valset <- model.data[rand < 0.15,]
#Set up Mean-F1#
meanf1 <- function(actual, predicted){
classes <- unique(actual)
results <- data.frame()
for(k in classes){
results <- rbind(results,
data.frame(class.name = k,
weight = sum(actual == k)/length(actual),
precision = sum(predicted == k & actual == k)/sum(predicted == k),
recall = sum(predicted == k & actual == k)/sum(actual == k)))
}
results$score <- results$weight * 2 * (results$precision * results$recall) / (results$precision + results$recall)
return(sum(results$score))
}
###First Predictive Model###
glm.fit <- glm(CH_CRIMHIST_COLLAPSED ~ OFFENSE_VIOLENT + OFFENSE_DRUG + OFFENSE_PROPERTY + CS_SENTENCEMTH +
SES_PARENTS_INCARCERATED + SES_FAMILY_INCARCERATED + SES_HASCHILDREN + AGE_CAT +
SES_SEXABUSED_EVER + DRUG_ANYREG + state + GENDER + DRUG_COCRKTME + DRUG_HROPTME + DRUG_ANYTME + DRUG_METHATME +
CH_PRIORARREST_CAT + TYPEOFFENSE + DRUG_TRT + EDUCATION,
data = trainset,
family = binomial())
summary(glm.fit)
#Predict Train and Validate#
predict.glm.train <- predict(glm.fit, trainset, type = "response")
predict.glm.val <- predict(glm.fit, valset, type = "response")
##Mean F1 Calculations for cutoff of 0.5##
#Applying predicted labels
train.recid <- predict.glm.train > 0.5
train.recid[train.recid == TRUE] <- "Recidivist"
train.recid[train.recid == FALSE] <- "First Timer"
#Applying labels to trainset
train.real <- trainset$CH_CRIMHIST_COLLAPSED
train.real[train.real == 1] <- "Recidivist"
train.real[train.real == 0] <- "First Timer"
#Calculating Mean-F1 for training set
meanf1(train.real, train.recid) #.801
#Checking confusion matrix#
table(trainset$CH_CRIMHIST_COLLAPSED, train.recid) #High sensitivity, but low specificity. Probably not what we want. Adjusting cutoff
#Applying predicted labels to predicted set
val.recid <- predict.glm.val > 0.5
val.recid[val.recid == TRUE] <- "Recidivist"
val.recid[val.recid == FALSE] <- "First Timer"
#Applying labels to our original set
val.real <- valset$CH_CRIMHIST_COLLAPSED
val.real[val.real== 1] <- "Recidivist"
val.real[val.real == 0] <- "First Timer"
meanf1(val.real, val.recid) # ~.794
#Checking confusion matrix#
table(val.recid, val.real) #High sensitivity, but low specificity. Probably not what we want. Adjusting cutoff
##Mean F1 calculations for cutoff of 0.60##
#Applying predicted labels
train.recid <- predict.glm.train > 0.60
train.recid[train.recid == TRUE] <- "Recidivist"
train.recid[train.recid == FALSE] <- "First Timer"
# Mean F1
meanf1(train.real, train.recid) #.796
#Checking confusion matrix#
table(train.real, train.recid) #Pretty close to a good balance
#Applying predicted labels to validation set
val.recid <- predict.glm.val > 0.60
val.recid[val.recid == TRUE] <- "Recidivist"
val.recid[val.recid == FALSE] <- "First Timer"
# Mean F1
meanf1(val.real, val.recid) #.794
#Checking confusion matrix#
table(val.real, val.recid) #Still close to a good balance
##Calculating model with marginal effects
logitmfx(CH_CRIMHIST_COLLAPSED ~ OFFENSE_VIOLENT + OFFENSE_DRUG + OFFENSE_PROPERTY + CS_SENTENCEMTH +
+ SES_PARENTS_INCARCERATED + SES_FAMILY_INCARCERATED + SES_HASCHILDREN + AGE_CAT +
+ SES_SEXABUSED_EVER + DRUG_ANYREG + state + GENDER + DRUG_COCRKTME + DRUG_HROPTME + DRUG_ANYTME + DRUG_METHATME +
+ CH_PRIORARREST_CAT + TYPEOFFENSE + DRUG_TRT + EDUCATION, data = full.numeric, atmean = FALSE, robust = TRUE)
|
275a1732623b555f918dfef1b620e80367484091
|
6a3a70fedc47ba6c4dccd6b05370b4e9aaded250
|
/man/threestep.Rd
|
c7bb6b23ded7ce52b532ce16faada39e013ed414
|
[] |
no_license
|
bmbolstad/affyPLM
|
59abc1d7762ec5de96e2e698a5665de4fddd5452
|
c6baedfc045824d9cdfe26cd82daaf55f9f1f3b4
|
refs/heads/master
| 2023-01-22T13:55:51.132254
| 2023-01-19T23:55:17
| 2023-01-19T23:55:17
| 23,523,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,250
|
rd
|
threestep.Rd
|
\name{threestep}
\alias{threestep}
\title{Three Step expression measures}
\description{
This function converts an
\code{\link[affy:AffyBatch-class]{AffyBatch}} into an
\code{\link[Biobase:class.ExpressionSet]{ExpressionSet}} using a three
step expression measure.
}
\usage{
threestep(object, subset=NULL, normalize=TRUE, background=TRUE,
background.method="RMA.2", normalize.method="quantile",
summary.method="median.polish", background.param=list(),
normalize.param=list(), summary.param=list(), verbosity.level=0)
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{object}{an \code{\link[affy:AffyBatch-class]{AffyBatch}}.}
\item{subset}{a vector with the names of probesets to be used.
If \code{NULL}, then all probesets are used.}
\item{normalize}{logical value. If \code{TRUE} normalize data using
quantile normalization}
\item{background}{logical value. If \code{TRUE} background correct
using RMA background correction}
\item{background.method}{name of background method to use.}
\item{normalize.method}{name of normalization method to use.}
\item{summary.method}{name of summary method to use.}
\item{background.param}{list of parameters for background correction methods.}
\item{normalize.param}{list of parameters for normalization methods.}
\item{summary.param}{list of parameters for summary methods.}
\item{verbosity.level}{An integer specifying how much to print
out. Higher values indicate more verbose. A value of 0 will print nothing.}
}
\details{
This function computes the expression measure using threestep
methods. Greater details can be found in a vignette.}
\value{
An \code{\link[Biobase:class.ExpressionSet]{ExpressionSet}}
}
\author{Ben Bolstad \email{bmb@bmbolstad.com}}
\references{Bolstad, BM (2004) \emph{Low Level Analysis of High-density
Oligonucleotide Array Data: Background, Normalization and
Summarization}. PhD Dissertation. University of California, Berkeley.}
\seealso{\code{\link[affy]{expresso}}, \code{\link[affy]{rma}}}
\examples{
if (require(affydata)) {
data(Dilution)
# should be equivalent to rma()
eset <- threestep(Dilution)
# Using Tukey Biweight summarization
eset <- threestep(Dilution, summary.method="tukey.biweight")
# Using Average Log2 summarization
eset <- threestep(Dilution, summary.method="average.log")
# Using IdealMismatch background and Tukey Biweight and no normalization.
eset <- threestep(Dilution, normalize=FALSE,background.method="IdealMM",
summary.method="tukey.biweight")
# Using average.log summarization and no background or normalization.
eset <- threestep(Dilution, background=FALSE, normalize=FALSE,
background.method="IdealMM",summary.method="tukey.biweight")
# Use threestep methodology with the rlm model fit
eset <- threestep(Dilution, summary.method="rlm")
# Use threestep methodology with the log of the average
# eset <- threestep(Dilution, summary.method="log.average")
# Use threestep methodology with log 2nd largest method
eset <- threestep(Dilution, summary.method="log.2nd.largest")
eset <- threestep(Dilution, background.method="LESN2")
}
}
\keyword{manip}
|
2c5d62ddc3036a0951bf341a84caea42bcc4576b
|
b6be947528044ce70dcbe383833236fb8061df2e
|
/phospho_network/regression/tables/write_phosphosites_for_protein_paint.R
|
36bbd288b99b9f02fe67bda7498f9d649ba47cb5
|
[] |
no_license
|
ding-lab/phospho-signaling
|
f93ddbb7589a566747c94d93e3e9dceb083cfe09
|
2b5dfe09a62bcb56c2d34e12013c1ef48eff9836
|
refs/heads/master
| 2023-08-11T02:59:51.635972
| 2021-09-15T23:07:39
| 2021-09-15T23:07:39
| 155,909,908
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,630
|
r
|
write_phosphosites_for_protein_paint.R
|
# Yige Wu @ March 2019 WashU
# show the number of associated substrate phosphosites per kinase with functional annotation, especially those that haven't been reported before
# source ------------------------------------------------------------------
baseD = "/Users/yigewu/Box\ Sync/"
setwd(baseD)
source('./cptac2p_analysis/preprocess_files/preprocess_files_shared.R')
source("./cptac2p_analysis/phospho_network/phospho_network_shared.R")
source("./cptac2p_analysis/phospho_network/phospho_network_plotting.R")
# input regression --------------------------------------------------------
regression <- fread(input = paste0(ppnD, "regression/tables/annotate_regression_with_mut_impact/",
"regression_cptac2p_cptac3_tumor_reg_nonNA20_mut_impact_cancer_specificity_annotated.txt"),
data.table = F)
reg_sig <- c(0.05, 0.05); names(reg_sig) <- c("kinase", "phosphatase")
regression %>% nrow()
regression <- regression %>%
filter(pair_pro %in% omnipath_tab$pair_pro[!(omnipath_tab$Source %in% c("NetKIN", "PhosphoNetworks", "MIMP"))] | pair_pro %in% psp_tab$pair_pro)
regression %>% nrow()
regression <- adjust_regression_by_nonNA(regression = regression, reg_nonNA = 20, reg_sig = reg_sig)
regression <- annotate_ks_source(regression = regression)
# set variables -----------------------------------------------------------
# genes2process <- c("MET")
# genes2process <- c("BRAF")
genes2process <- c("RAF1")
# genes2process <- c("PTK2")
# cancers2process <- c("CCRCC")
cancers2process <- unique(regression$Cancer)
cancer2ProteinPaintColor <- function(vector_cancer_type) {
vector_color_string <- vector(mode = "character", length = length(vector_cancer_type))
vector_color_string[vector_cancer_type == "CCRCC"] <- "M"
vector_color_string[vector_cancer_type == "UCEC"] <- "P"
vector_color_string[vector_cancer_type == "CO"] <- "S"
vector_color_string[vector_cancer_type == "OV"] <- "F"
vector_color_string[vector_cancer_type == "BRCA"] <- "deletion"
return(vector_color_string)
}
# Write table -------------------------------------------------------------
for (gene_tmp in genes2process) {
for (cancer_tmp in cancers2process) {
regression_tmp <- regression %>%
# filter(SELF == "cis") %>%
filter(regulated == T) %>%
filter(SUB_GENE %in% gene_tmp) %>%
filter(Cancer %in% cancer_tmp) %>%
mutate(p_coord = str_split_fixed(string = SUB_MOD_RSD, pattern = "[STY]", 3)[,2]) %>%
mutate(is_single = (str_split_fixed(string = SUB_MOD_RSD, pattern = "[STY]", 3)[,3] == "")) %>%
filter(is_single == T)
regression_tmp$color <- cancer2ProteinPaintColor(regression_tmp$Cancer)
table2w <- regression_tmp %>%
select(SUB_MOD_RSD, p_coord, color) %>%
unique()
write.table(x = table2w, file = paste0(makeOutDir(resultD = resultD), cancer_tmp, "_", gene_tmp, ".txt"), sep = ";", quote = F, row.names = F, col.names = F)
}
regression_tmp <- regression %>%
# filter(SELF == "cis") %>%
filter(regulated == T) %>%
filter(SUB_GENE %in% gene_tmp) %>%
mutate(p_coord = str_split_fixed(string = SUB_MOD_RSD, pattern = "[STY]", 3)[,2]) %>%
mutate(is_single = (str_split_fixed(string = SUB_MOD_RSD, pattern = "[STY]", 3)[,3] == "")) %>%
filter(is_single == T)
regression_tmp$color <- cancer2ProteinPaintColor(regression_tmp$Cancer)
table2w <- regression_tmp %>%
select(SUB_MOD_RSD, p_coord, color) %>%
unique()
write.table(x = table2w, file = paste0(makeOutDir(resultD = resultD), gene_tmp, ".txt"), sep = ";", quote = F, row.names = F, col.names = F)
}
|
061c4e47ccdb3d823231feabcc2b59fc1703803f
|
84fe142bf6c0d612c2382418533f715796ab9292
|
/man/ordering.Rd
|
729bbd0291cc60d337cb2322e3de24eeee3d3fd0
|
[] |
no_license
|
jtourig/TSRexploreR
|
7d9176c0b01cdba8cd2da5cc84a741f55ba7154d
|
a6e8c51886b0e667c2d289ba782a1195e45337d3
|
refs/heads/main
| 2023-02-22T06:53:58.823269
| 2021-01-20T17:40:09
| 2021-01-20T17:40:09
| 331,359,638
| 0
| 0
| null | 2021-01-20T16:08:21
| 2021-01-20T16:08:20
| null |
UTF-8
|
R
| false
| true
| 516
|
rd
|
ordering.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_conditionals.R
\name{ordering}
\alias{ordering}
\title{Ordering}
\usage{
ordering(..., .samples = NULL, .aggr_fun = mean)
}
\arguments{
\item{...}{Variables to order by.
Wrap varaible name in desc() for descending order (like in dplyr::arrange).}
\item{samples}{Names of samples to order by aggregate score.}
\item{If}{more than one sample is selected feature values
are aggregated using this function.}
}
\description{
Ordering
}
|
8223d38c0f919e496588e8e4d7848f92faad18cc
|
396fb5e5e39e4490347cfa6927e60c601d86b735
|
/R/count.R
|
0ebcfaa9bbe31c77534c44ca8d28b92cfb9f0ff7
|
[] |
no_license
|
Xiuying/ggstat
|
1d58cb6ed8019abbf78df395d1ef2e6dbad8abc9
|
662b5d14a9e7ec2772d0759073d4f5477f2ff781
|
refs/heads/master
| 2021-05-31T06:46:08.859611
| 2016-05-09T22:29:17
| 2016-05-09T22:29:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,343
|
r
|
count.R
|
#' Count unique observations (vector).
#'
#' This function is very similar to table except that: it counts missing values
#' if present, can use weights, only does 1d, returns a 2 column data frame
#' instead of a named vector, and is much much faster.
#'
#' @param x A logical vector, a factor, a double or integer vector (or
#' S3 object with \code{\link{restore}()} method), or a character vector.
#' @param w Optionally, a vector of weights. If present, weights are summed
#' instead of counting observations. In other words, the default behaviour
#' is to assign weight 1 to each point.
#' @export
#' @keywords internal
#' @return A data frame with columns:
#' \item{x_}{value (same type as \code{x})}
#' \item{count_}{number of observations/sum of weights (numeric)}
#' @examples
#' compute_count_vec(sample(100, 1e4, rep = TRUE))
#' compute_count_vec(sample(c(TRUE, FALSE, NA), 1e4, rep = TRUE))
compute_count_vec <- function(x, w = NULL) {
if (is.null(w)) {
w <- numeric(0)
}
if (is.factor(x)) {
out <- count_factor(x, w)
} else if (is.logical(x)) {
out <- count_lgl(x, w)
} else if (typeof(x) %in% c("double", "integer")) {
out <- count_numeric(x, w)
out$x_ <- restore(x, out$x_)
} else if (is.character(x)) {
out <- count_string(x, w)
}
`as.data.frame!`(out, length(out$x_))
out
}
|
babc88fd274b4dde7d68b22987b3470a6ce8cbd6
|
7aec5ac37b2fb5bc3bc4c86036360a49123e53ef
|
/man/scale_colour_sugarpill.Rd
|
e63484c79c70fd679559b627101c23766bbdc66b
|
[
"MIT"
] |
permissive
|
fredryce/ggcute
|
118e84e2c761e7ff5cb47cb2977b107337c99995
|
dc357b5b0dec881aeccaaa6ed396d36e2126d9c7
|
refs/heads/master
| 2022-07-06T05:55:58.810464
| 2020-03-30T23:15:55
| 2020-03-30T23:15:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 730
|
rd
|
scale_colour_sugarpill.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sugarpill.R
\name{scale_colour_sugarpill}
\alias{scale_colour_sugarpill}
\alias{scale_color_sugarpill}
\title{Sugarpill color scale}
\usage{
scale_colour_sugarpill(discrete = TRUE, reverse = FALSE, ...)
scale_color_sugarpill(discrete = TRUE, reverse = FALSE, ...)
}
\arguments{
\item{discrete}{Whether the colour aesthetic is discrete or not}
\item{reverse}{Whether the palette should be reversed}
\item{...}{Additional arguments}
}
\description{
Sugarpill color scale
}
\examples{
library(ggplot2)
ggplot(nintendo_sales, aes(x = sales_million, y = console, colour = sales_type)) +
geom_point() +
scale_color_sugarpill() +
theme_sugarpill()
}
|
cf5a7a037fcf78d732389028ebe1381a845a5688
|
06221e13a73d03377669f968022676b22e434e9e
|
/analyses/from_james/global_GDP_research.R
|
5c4bc7fd352e21bed8125b13da6c54c75988c41a
|
[] |
no_license
|
baumlab/gya-research
|
c37c18f9406cbecb1cc6c079d5d4c4ea79c92ec2
|
4538afb6916b28e9e7b80ce641178bcaf8bf50ad
|
refs/heads/master
| 2022-08-26T16:22:54.926375
| 2022-08-23T23:13:27
| 2022-08-23T23:13:27
| 62,905,557
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,201
|
r
|
global_GDP_research.R
|
### creating plots for Megan Dodd + Julia Baum for global young academy document
## Aim: plot changes to 1) Basic research (%GDP) and
# 2) GDE on R&D (%GDP) over time for
# Canada, Australia, Netherlands, Israel, Poland, Spain, US.
setwd("/Users/jpwrobinson/Dropbox/R_PROJECTS_DATA/VISUALISATIONS/global-young-academy-gdp")
gdp<-read.csv("research_GDP_MDodd.csv")
gdp$GDP<-NULL
gdp$comb<-gdp$RE/gdp$GDRE
theme_set(theme_minimal())
ggplot(gdp, aes(Year, GDRE, col=Country)) + geom_point()
ggplot(gdp, aes(Year, GDRE, col=Country)) + geom_point() + facet_grid(Country~.)
## rearrange data frame
require(tidyr)
gdp1<-gather(gdp, "Year", "Country")
ggplot(gdp1, aes(Year,value, col=variable)) + geom_point() + facet_grid(Country~., scales="free")
ggplot(gdp1, aes(Year,value, col=variable)) + geom_line() + facet_grid(Country~., scales="free")
### placing on different panels either 1) hides trend by setting to same scale on y-axis;
### or 2) having different scales is misleading.
## so need all data on 1 panel.
g1<-ggplot(gdp, aes(Year, GDRE, col=Country)) + geom_point() +theme(legend.position = "none") #+ facet_grid(Country~.)
g2<-ggplot(gdp, aes(Year, RE, col=Country)) + geom_point() + theme(legend.position = "none") #+ facet_grid(Country~.)
grid.arrange(g1, g2, nrow=1)
# pdf(file="research_GDP_GYA.pdf", height=7, width=7)
theme_set(theme_bw())
ggplot(gdp1, aes(Year,value, fill=variable)) + geom_bar(stat="identity") +
facet_grid(Country~.) + theme(axis.title.y=element_text(vjust=0.9), axis.text.y= element_text(size=8), legend.position="left",strip.background=element_rect(fill = "white", colour = "white")) +
labs(x="Year", y="% GDP", fill="") + scale_fill_discrete(labels=c("GERD", "Basic research"))
ggplot(gdp1, aes(Year,value, fill=variable)) + geom_bar(stat="identity") +
facet_grid(Country~., scales="free") + theme(axis.title.y=element_text(vjust=0.9), axis.text.y= element_text(size=8), legend.position="left",strip.background=element_rect(fill = "white", colour = "white")) +
labs(x="Year", y="% GDP", fill="") + scale_fill_discrete(labels=c("GERD", "Basic research"))
ggplot(gdp1, aes(Year,value, col=variable)) + geom_point() +
facet_grid(Country~.) + theme(axis.title.y=element_text(vjust=0.9), axis.text.y= element_text(size=8), legend.position="left",strip.background=element_rect(fill = "white", colour = "white")) +
labs(x="Year", y="% GDP", colour="") + scale_colour_discrete(labels=c("GERD", "Basic research"))
ggplot(gdp1, aes(Year,value, col=variable)) + geom_point() +
facet_grid(Country~., scales="free") + theme(axis.title.y=element_text(vjust=0.9), axis.text.y= element_text(size=8), legend.position="left",strip.background=element_rect(fill = "white", colour = "white")) +
labs(x="Year", y="% GDP", colour="") + scale_colour_discrete(labels=c("GERD", "Basic research"))
# dev.off()
## plot for RE/GDRE (email from megan 18th Aug)
pdf(file="research_RE_GDRE_prop.pdf", height=7, width=7)
theme_set(theme_bw())
ggplot(gdp1[gdp1$variable=="comb",], aes(Year,value, fill=variable)) + geom_bar(stat="identity") +
facet_grid(Country~.) + theme(axis.title.y=element_text(vjust=0.9),legend.position="none", axis.text.y= element_text(size=8), legend.position="left",strip.background=element_rect(fill = "white", colour = "white")) +
labs(x="Year", y="RE as proportion of GDRE", fill="") + scale_fill_discrete(labels=c("GERD", "Basic research"))
dev.off()
vars <- data.frame(expand.grid(levels(gdp1$Country)))
colnames(vars) <- c("Country")
dat <- data.frame(x = rep(2002, 7), y = rep(0.5, 7), vars, labs=levels(gdp1$Country))
## change NAs to zeroes
## try area plot
ggplot(gdp1, aes(Year,value, fill=variable)) + geom_area(alpha=0.9,stat="identity") + scale_x_continuous(breaks=c(seq(1990, 2012, 2)),labels=c(seq(1990, 2012, 2)), minor_breaks=waiver(),limits=c(1990, 2013), expand = c(0, 0)) +
facet_grid(Country~., scales="free") + labs(x="", y="% GDP") + geom_text(aes(x, y, label=labs, group=NULL, fill=NULL),data=dat, col="white", fontface=2) +
theme(legend.position = "none",axis.line=element_line(colour="black", size=0.4, linetype="solid"), strip.text.y = element_text(size = 0, angle = 0))
|
be8a1c97a2026c5403de368904dcd23e0090ea4e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/KRIS/examples/cal.pc.projection.Rd.R
|
ab641d634532dd90582ca774fbf58c99297e913e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 846
|
r
|
cal.pc.projection.Rd.R
|
library(KRIS)
### Name: cal.pc.projection
### Title: Calculate linear principal component analysis (PCA) with
### projection method for Single-nucleotide polymorphism (SNP) dataset.
### Aliases: cal.pc.projection
### ** Examples
## No test:
data(example_SNP)
#Create a random list of disease status, 1 = Control and 2 = Case
ind_status <- sample(c(1,2), size = length(sample_labels), replace = T)
PCs <- cal.pc.projection(simsnp$snp, status = ind_status,
labels = sample_labels)
summary(PCs)
#Preview $PC
print(PCs$PC[1:5,1:3])
#Preview $status
print(PCs$status[1:3])
plot3views(PCs$PC[,1:3], PCs$label)
#Calculate the top 3 PCs
PCs <- cal.pc.projection(simsnp$snp, status = ind_status,
labels = sample_labels, no.pc = 3)
summary(PCs)
#Preview $PC
print(PCs$PC[1:5,1:3])
plot3views(PCs$PC[,1:3], PCs$label)
## End(No test)
|
5e7f644c3bb2d1734e27f199707748e616faf54e
|
af2743ea7d61bbaa13593e4ea2a920b75d6b45c6
|
/t3.R
|
ab47b4b7c5e21ce2a799732ca6544c2f4babd4ba
|
[] |
no_license
|
ruomengcui/DSP
|
d2d4a465c483aa73d23260f893a6f0aafbbf71d8
|
f71a59cb7e62e4343b6a3042e43e6b89549ccc44
|
refs/heads/master
| 2020-05-17T17:51:41.301127
| 2012-06-05T20:07:15
| 2012-06-05T20:07:15
| 4,564,943
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 72
|
r
|
t3.R
|
x<-function(){
s=d[d$samp==2&d$cid==2,]
if(rnorm(1)<.5) return()
s
}
|
bbe16784686e5f0a156ab9ae46d17e6d7fb6ad1a
|
190aa0875e57ba772abfad3815386e1ba5aae489
|
/R/method/makeDesign2.R
|
7f5c1f9597a0978d7d634927b25be0ad228ecc73
|
[] |
no_license
|
bitmask/B-NEM
|
2a52802d25fe1b32a15ab41367f65245c3c7a5c1
|
3d19bcd6d7862a2518a152827a73f24773d4f140
|
refs/heads/master
| 2020-04-07T17:24:26.373091
| 2018-11-21T15:57:21
| 2018-11-21T15:57:21
| 158,568,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 760
|
r
|
makeDesign2.R
|
makeDesign2 <- function(x, stimuli, inhibitors, batches = NULL, runs = NULL) {
design <- numeric()
designNames <- character()
design2 <- numeric()
designNames2 <- character()
for (i in c(stimuli, inhibitors, batches, runs)) {
tmp <- numeric(ncol(x))
tmp2 <- numeric(ncol(x))
tmp[grep(paste("^", i, "|_", i, sep = ""), colnames(x))] <- 1
tmp2[grep(paste("!", i, sep = ""), colnames(x))] <- 1
if (sum(tmp) != 0) {
design <- cbind(design, tmp)
designNames <- c(designNames, i)
}
if (sum(tmp2) != 0) {
design2 <- cbind(design2, tmp2)
designNames2 <- c(designNames2, i)
}
}
colnames(design) <- designNames
colnames(design2) <- designNames2
return(list(stimuli=design, inhibitors=design2))
}
|
4b9cc56b40832678cab18538fd7959732d250a88
|
228abd3ebb962857a4aa9687899070e251e05ef6
|
/man/bootstrap_iRAM_2node.Rd
|
994343f894d3f0570e73ad841f3edea4cfb78d8f
|
[] |
no_license
|
xinyindeed/pompom
|
e1e964c7a15718fc61001c08f85ff2c9882bea3d
|
587a4bc5fb67423fb3f37e968d099720e07068c4
|
refs/heads/master
| 2021-09-18T09:20:16.519821
| 2018-07-12T15:33:42
| 2018-07-12T15:35:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,538
|
rd
|
bootstrap_iRAM_2node.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-bootstrapped_iRAM_2node.R
\docType{data}
\name{bootstrap_iRAM_2node}
\alias{bootstrap_iRAM_2node}
\title{Bootstrapped iRAM (including replications of iRAM and corresponding time profiles) for the bivariate time-series (simts2node)}
\format{An object of class \code{list} of length 5.}
\usage{
bootstrap_iRAM_2node
}
\description{
Bootstrapped iRAM (including replications of iRAM and corresponding time profiles) for the bivariate time-series (simts2node)
}
\details{
Data bootstrapped from the estimated three-node network structure with 200 replications.
}
\examples{
\dontshow{
bootstrap_iRAM_2node$mean # mean of bootstrapped iRAM
bootstrap_iRAM_2node$upper # Upper bound of confidence interval of bootstrapped iRAM
bootstrap_iRAM_2node$lower # lower bound of confidence interval of bootstrapped iRAM
bootstrap_iRAM_2node$time.profile.data # time profiles generated from the bootstrapped beta matrices
bootstrap_iRAM_2node$recovery.time.reps # iRAMs generated from the bootstrapped beta matrices
}
\donttest{
bootstrap_iRAM_2node$mean # mean of bootstrapped iRAM
bootstrap_iRAM_2node$upper # Upper bound of confidence interval of bootstrapped iRAM
bootstrap_iRAM_2node$lower # lower bound of confidence interval of bootstrapped iRAM
bootstrap_iRAM_2node$time.profile.data # time profiles generated from the bootstrapped beta matrices
bootstrap_iRAM_2node$recovery.time.reps # iRAMs generated from the bootstrapped beta matrices
}
}
\keyword{datasets}
|
f966769a5765bc03005db9e0a191f34a70af967a
|
c5de5d072f5099e7f13b94bf2c81975582788459
|
/R Extension/RMG/Utilities/Environment/R.Release.Notes/moveto_R_2.6.0_.R
|
a0bb06e033fc3644212c8a15ff704988b8e46a04
|
[] |
no_license
|
uhasan1/QLExtension-backup
|
e125ad6e3f20451dfa593284507c493a6fd66bb8
|
2bea9262841b07c2fb3c3495395e66e66a092035
|
refs/heads/master
| 2020-05-31T06:08:40.523979
| 2015-03-16T03:09:28
| 2015-03-16T03:09:28
| 190,136,053
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 320
|
r
|
moveto_R_2.6.0_.R
|
# Document breaks in our code from 2.5.0 to 2.6.0
#
#
#
#
# memory.limit() in 2.6.0 returns size in MB, in 2.5.0 it was in Bytes.
# Code that calculates size of packets will break.
# See H:\user\R\RMG\Models\Price\ForwardCurve\Network/utils.R
# function: get.no.packets
# There is at least another one in VaR/Base/
|
83aa42356c2463ceac58d103f347473ca0bc684f
|
29f139ac8350bd0e65f75d09014acd6b49a8341b
|
/R/modelFit.R
|
49454d1b3ab456fb81a29651bac589a0f703fa69
|
[] |
no_license
|
cran/DrBats
|
4bf007e2360a0e097cc83b932f352f29f3bebf96
|
3a09e1e0e1ec581a00bb59cd3a0891be499d030e
|
refs/heads/master
| 2022-02-22T11:51:33.964413
| 2022-02-13T18:00:12
| 2022-02-13T18:00:12
| 59,128,319
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,109
|
r
|
modelFit.R
|
# Aim: Fit a Bayesian Latent Factor Model
# Persons : Gabrielle Weinrott [cre, aut]
##' Fit a Bayesian Latent Factor to a data set
##' using STAN
##'
##' @param model a string indicating the type of model ("PLT", or sparse", default = "PLT")
##' @param var.prior the family of priors to use for the variance parameters ("IG" for inverse gamma, or "cauchy")
##' @param prog a string indicating the MCMC program to use (default = "stan")
##' @param parallel true or false, whether or not to parelleize (done using the package "parallel")
##' @param Xhisto matrix of simulated data (projected onto the histogram basis)
##' @param nchains number of chains (default = 2)
##' @param nthin the number of thinned interations (default = 1)
##' @param niter number of iterations (default = 1e4)
##' @param R rotation matrix of the same dimension as the number of desired latent factors
##'
##' @return stanfit, a STAN object
##'
##' @references The Stan Development Team Stan Modeling Language User's Guide and Reference Manual. http://mc-stan.org/
##' @author Gabrielle Weinrott
##'
##'
##' @export
##' @import rstan
modelFit <- function(model = "PLT",
var.prior = "IG",
prog = "stan",
parallel = TRUE,
Xhisto = NULL,
nchains = 4,
nthin = 10,
niter = 10000,
R = NULL){
if(is.null(Xhisto)){
stop("No data specified!")
}
if(model != "PLT" & model != "sparse"){
stop("Invalid model type")
}
if(prog != "stan"){
warning("Invalid program type, defaulting to stan")
prog = "stan"
}
if(!is.null(parallel) & parallel != TRUE & parallel != FALSE){
parallel = FALSE
warning("Invalid parallel input (must be TRUE or FALSE), defaulting to FALSE")
}
nchains <- as.integer(nchains)
if(nchains <= 0){
stop("Number of chains must be a positive integer")
}
nthin <- as.integer(nthin)
if(nthin <= 0){
stop("Number of thinning iterations must be a positive integer")
}
niter <- as.integer(niter)
if(niter <= 0){
stop("Number of iterations must be a positive integer")
}
if(is.null(R)){
warning("No rotation matrix specified, using the identity matrix of dimension 3")
R <- diag(1, 3)
}
if(var.prior != "IG" & var.prior != "cauchy"){
stop("Invalid variance prior family, must select either IG or cauchy")
}
Xhisto <- scale(Xhisto, center = TRUE, scale = FALSE)
rstan::rstan_options(auto_write = TRUE)
if(parallel == TRUE){
options(mc.cores = parallel::detectCores())
}
N <- dim(Xhisto)[1]
P <- dim(Xhisto)[2]
D <- nrow(R)
if(D >= P)
stop("D must be smaller than ncol(Xhisto)")
Q <- P*D-(D*(D-1)/2)
if(model == "PLT"){
stan_data <- list(P=P, N=N, D=D, Q=Q, Xhisto = Xhisto, R=R)
if(var.prior == "IG"){
scode <- "data {
int<lower=1> N; // observations
int<lower=1> P; // variables
int<lower=1> D; // latent variables
int<lower=1> Q; // number of off-diagonal elements
vector[P] Xhisto[N]; // data matrix
matrix[D, D] R; // rotation matrix
}
parameters {
vector[D] B[N]; // factor loadings
vector[Q] offdiag;
real<lower=0> sigma2;
real<lower=0> tau2;
}
transformed parameters {
matrix[P, D] tL;
matrix[P, D] W;
{
int index;
for (j in 1:D) {
index <- index + 1;
tL[j,j] <- offdiag[index];
for (i in (j+1):P) {
index <- index + 1;
tL[i,j] <- offdiag[index];
}
}
for(i in 1:(D-1)){
for(j in (i+1):D){
tL[i,j] <- 0;
}
}
}
W <- tL*R;
}
model {
offdiag ~ normal(0, tau2); // priors of the loadings
tau2 ~ inv_gamma(0.001, 0.001);
sigma2 ~ inv_gamma(0.001, 0.001);
for (n in 1:N){
B[n] ~ normal(0, 1); // factor constraints
Xhisto[n] ~ normal(W*B[n], sigma2); //the likelihood
}
}
"
}
if(var.prior == "cauchy"){
scode <- "data {
int<lower=1> N; // observations
int<lower=1> P; // variables
int<lower=1> D; // latent variables
int<lower=1> Q; // number of off-diagonal elements
vector[P] Xhisto[N]; // data matrix
matrix[D, D] R; // rotation matrix
}
parameters {
vector[D] B[N]; // factors
vector[Q] offdiag;
real<lower=0> sigma;
real<lower=0> tau;
}
transformed parameters {
matrix[P, D] tL;
matrix[P, D] W;
{
int index;
for (j in 1:D) {
index <- index + 1;
tL[j,j] <- offdiag[index];
for (i in (j+1):P) {
index <- index + 1;
tL[i,j] <- offdiag[index];
}
}
for(i in 1:(D-1)){
for(j in (i+1):D){
tL[i,j] <- 0;
}
}
}
W <- tL*R ;
}
model {
offdiag ~ normal(0, tau^2); // priors of the loadings
tau ~ cauchy(0, 5);
sigma ~ cauchy(0, 5);
for (n in 1:N){
B[n] ~ normal(0, 1); // factor constraints
Xhisto[n] ~ normal(W*B[n], sigma^2); //the likelihood
}
}
"
}
}
if(model == "sparse"){
stan_data <- list(P=P, N=N, D=D, Xhisto = Xhisto)
if(var.prior == "IG"){
scode <- "data {
int<lower=1> N; // observations
int<lower=1> P; // variables
int<lower=1> D; // latent variables
vector[P] Xhisto[N]; // data matrix
}
parameters {
vector[D] B[N]; // factor loadings
matrix[P, D] W; // latent factors
real<lower=0> sigma2;
vector[D] tau2;
}
model {
sigma2 ~ inv_gamma(0.001, 0.001);
for(i in 1:D){
tau2[i] ~ inv_gamma(0.001, 0.001);
W[ ,i] ~ double_exponential(0, tau2[i]);
}
for (n in 1:N){
B[n] ~ normal(0, 1); // factor constraints
Xhisto[n] ~ normal(W*B[n], sigma2); //the likelihood
}
}
"
}
if(var.prior == "cauchy"){
scode <- "data {
int<lower=1> N; // observations
int<lower=1> P; // variables
int<lower=1> D; // latent variables
vector[P] Xhisto[N]; // data matrix
}
parameters {
vector[D] B[N]; // factor loadings
matrix[P, D] W; // latent factors
real<lower=0> sigma;
vector[D] tau;
}
model {
sigma ~ cauchy(0, 5);
for(i in 1:D){
tau[i] ~ cauchy(0, 5);
W[ ,i] ~ double_exponential(0, tau[i]);
}
for (n in 1:N){
B[n] ~ normal(0, 1); // factor constraints
Xhisto[n] ~ normal(W*B[n], sigma^2); //the likelihood
}
}
"
}
}
stanfit <- rstan::stan(model_code = scode, data = stan_data, chains = nchains,
thin = nthin, iter = niter)
return(stanfit)
}
|
2748964e1ed689f74877e251f3449052fd4e6d86
|
91f40a7659881ba7d43d684efc327f9501c8ed1f
|
/server.R
|
4e620c16446629623356d2dc473b8b16ded46a3b
|
[] |
no_license
|
matsar/VelibRShiny
|
1e23e0ca7ed5b1adb4325d61fc04e1b356665d0e
|
c910cb74f92b3f2fea79c5355793c8bce3ef5f46
|
refs/heads/master
| 2021-01-10T07:15:28.943112
| 2016-04-22T20:21:23
| 2016-04-22T20:21:23
| 53,279,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,919
|
r
|
server.R
|
# encoding: utf-8
###############################################################################.
#
# Titre : server.R
#
# Theme : Data Science - projet VelibR
#
# Creation : 27 février 2016
# MAJ : 22/04/2016
#
# Auteur : CEPE gpe 1
###############################################################################.
require(shiny)
require(leaflet)
shinyServer(function(input, output) {
carteL<-reactive({
m <- faireCarteVierge()
print(input$modeTransport)
if ("vel" %in% input$modeTransport){
m <- ajouterPoints(m, lng = velibs2$longitude, lat = velibs2$latitude,
radius = velibs2$bike_stands,
color = ifelse(velibs2$available_bikes>=2, "green", "red"),
titre = velibs2$name,
attributs = velibs2[,c("bike_stands","available_bike_stands", "available_bikes")])
}
if ("auto" %in% input$modeTransport){
m <- ajouterPoints(m, lng = autolibs2$longitude, lat = autolibs2$latitude,
radius = autolibs2$Autolib.,
color = "blue",
titre = autolibs2$Identifiant.Autolib.,
attributs = autolibs2[,c("Autolib.", "Emplacement","Tiers", "Abri")])
}
coord_dep <- geocode(input$addresse1)
m <- addMarkers(m, lng = coord_dep$lon, lat = coord_dep$lat, popup = paste("Depart : ", input$addresse1))
m
})
carteL2<-reactive({
m<-carteL()
rvelo<-route(from=as.character(input$addresse1), to=as.character(input$addresse2),
mode="bicycling", structure="route", alternatives=FALSE)
m<-m %>% addPolylines(data = rvelo, lng = ~lon , lat = ~lat)
coord_arr <- geocode(input$addresse2)
m <- addMarkers(m, lng = coord_arr$lon, lat = coord_arr$lat, popup = paste("Arrivée : ", input$addresse2))
m
})
tableDep<-reactive({
t<- rechercheVelib(adresse=input$addresse1,
table=velibs2, ##table des stations, velib ou autolib
nb=50)
#names(t)<-c('Adresse de la station','Nombre de vélos','Emplacement disponible','Nombre de vélos disponibles')
t<-t[t$available_bikes>=input$sliderDepart,]
t
})
tableArrivee<-reactive({
t<- rechercheVelib(adresse=input$addresse2,
table=velibs2, ##table des stations, velib ou autolib
nb=50)
t<-t[t$available_bike_stands>=input$sliderArrivee,]
t
})
output$table_depart = renderDataTable({
tableDep()[,c("name","bike_stands","available_bike_stands", "available_bikes")]
})
output$table_arrive = renderDataTable({
tableArrivee()[,c("name","bike_stands","available_bike_stands", "available_bikes")]
})
output$carte <- renderLeaflet({
if (input$objet=="Itineraire"){
carteL2()
}else{
carteL()
}
})
})
|
61b19df0f2d62d9268632d41d29112e65dc35c55
|
2592602560a9568c00f28159cace664fc1ba3ad3
|
/c-gsl/n_e-facet.plot.R
|
2bcf9e11cc2d5ef01ffe7520dc0a7e4caca6716d
|
[] |
no_license
|
diogro/evomod
|
cd0bc39abac6727bbb1a88f72418a3f8d1f7863f
|
934c57147528f75d97ef56e03b6994170af72e84
|
refs/heads/master
| 2020-04-15T03:01:19.578176
| 2015-08-14T19:06:58
| 2015-08-14T19:06:58
| 10,078,557
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,056
|
r
|
n_e-facet.plot.R
|
library(reshape2)
library(ggplot2)
library(EvomodR)
AVGWrap = function(p.cor, n.e, generations = 1:length(p.cor)){
burn.in.avg = AVGRatio(p.cor, 40/10000, F, num.cores = 4, generations = generations)
burn.in.avg['Probability'] = NULL
burn.in.avg['Selection_Strength'] = NULL
m.avg = melt(burn.in.avg[,-c(2,5)], id.vars = c('.id', 'generation'))
m.avg = m.avg[!((m.avg['.id'] != "Full Integration") & (m.avg['variable'] == "AVG-")),]
m.avg = m.avg[!((m.avg['.id'] == "Full Integration") & (m.avg['variable'] == "AVG+")),]
m.avg['Population_Size'] = n.e
return(m.avg)
}
NeDataFrame = function(pop.list, generations = 1:length(p.cor)){
m.avg = ldply(pop.list,
function(x) AVGWrap(x$p.cor[generations],
x$n.e, generations),
.progress = 'text'
)
return(m.avg)
}
NeFacetPlot = function(m.avg){
avg.plot = ggplot(m.avg, aes(generation,
value,
group=interaction(variable, generation, .id),
colour=interaction(.id, variable))) +
layer(geom="point") +
labs(x="Generation",
y="Average Correlation",
color = "Module") +
scale_colour_discrete(labels=c("Within Module 1",
"Within Module 2",
"Between Modules")) +
theme_bw() +
theme(legend.position = "bottom", axis.text.x = element_text(angle = 45, hjust = 1)) +
facet_wrap( ~ Population_Size, ncol = 5)
return(avg.plot)
}
load("./rdatas/ne.data.frame.Rdata")
ne.plot = NeFacetPlot(m.avg)
ggsave("~/n_e.facet.plot.tiff", width= 16, units = "cm", dpi = 600)
load("./rdatas/mu_b.Rdata")
mu.plot = AVGRatioPlot(main.data.mu.b, T, 4, 'mu.ratio', "Mutation rate ratio")
mu.plot = mu.plot + theme(legend.position = c(0, 1),
legend.justification = c(0, 1),
legend.background = element_rect(fill="transparent"))
ggsave("~/mu_ratio_plot.png", width= 22, height = 9, units = "cm", dpi = 600)
|
0bf6c2e08787a191c4e2b9a917043d1dd14f6d56
|
10416e68809b5641ed1eda9b5cf8ee2ae36d7d98
|
/R/addins.R
|
60e1b816af1d23e588d9cde2c6b1eae7a4ec0f68
|
[
"MIT"
] |
permissive
|
BioDataScience-Course/BioDataScience2
|
01856501c0c7e0c4613fbc3f8a1af471dadde50b
|
cf470d0de8a0dd98167f56bb08bc9b555ad868a8
|
refs/heads/master
| 2023-08-22T14:41:10.369340
| 2023-08-14T14:22:07
| 2023-08-14T14:22:07
| 205,365,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,120
|
r
|
addins.R
|
# RStudio addins
run_addin <- function() {
#library(shiny)
#library(miniUI)
selectItem <- function() {
package <- "BioDataScience2"
items <- character(0)
tutorials <- dir(system.file("tutorials", package = package))
is_active <- function(dir, subdir, pattern)
length(dir(system.file(dir, subdir, package = package),
pattern = pattern)) > 0
keep <- logical(length(tutorials))
for (i in seq_along(tutorials))
keep[i] <- is_active("tutorials", tutorials[i], "\\.Rmd$")
tutorials <- tutorials[keep]
if (length(tutorials))
items <- paste(tutorials, "(tutorial)")
apps <- dir(system.file("shiny", package = package))
keep <- logical(length(apps))
for (i in seq_along(apps))
keep[i] <- is_active("shiny", apps[i], "^app.R$")
apps <- apps[keep]
if (length(apps))
items <- c(items, paste(apps, "(Shiny app)"))
if (!length(items)) return()
ui <- miniPage(
miniContentPanel(
selectInput("item", paste0("Items in ", package, ":"),
selectize = FALSE, size = 11, items)
),
gadgetTitleBar("",
left = miniTitleBarCancelButton(),
right = miniTitleBarButton("done", "Select", primary = TRUE)
)
)
server <- function(input, output, session) {
observeEvent(input$done, {
returnValue <- input$item
if (!is.null(returnValue)) {
if (grepl(" \\(tutorial\\)$", returnValue)) {
run(sub(" \\(tutorial\\)$", "", returnValue))
} else {# Must be an app then
run_app(sub(" \\(Shiny app\\)$", "", returnValue))
}
}
stopApp(returnValue)
})
}
runGadget(ui, server,
viewer = dialogViewer("Select an item",
width = 300, height = 250))
}
# Update both BioDataScience & BioDataScience2
learnitdown::update_pkg("BioDataScience",
github_repos = "BioDataScience-course/BioDataScience")
update_pkg()
item <- try(suppressMessages(selectItem()), silent = TRUE)
if (!is.null(item) && !inherits(item, "try-error"))
message("Running item ", item)
}
|
175a6b2897f1c998f172fee2b47db055c76aa606
|
046d89616fd295db30c62fc5cede60246a026c8d
|
/cachematrix.R
|
b658ff3d9aafda4be57260804265cf75245384b7
|
[] |
no_license
|
raulfloresp/ProgrammingAssignment2
|
9e3495d6157048261a675f463dd6fade03a9bb8e
|
6dfdf8867ea9a3d839fc9454ded440d3fe4e9d6d
|
refs/heads/master
| 2021-01-24T22:20:56.781910
| 2016-03-12T19:31:33
| 2016-03-12T19:31:33
| 53,748,613
| 0
| 0
| null | 2016-03-12T19:07:07
| 2016-03-12T19:07:06
| null |
UTF-8
|
R
| false
| false
| 1,487
|
r
|
cachematrix.R
|
## Matrix inversion is usually a costly computation and there
## may be some benefit to caching the inverse of a matrix rather
## than compute it repeatedly (there are also alternatives to
## matrix inversion that we will not discuss here).
## This assignment is to write a pair of functions that cache
## the inverse of a matrix.
## Set the value of the Matrix
makeCacheMatrix <- function(x = matrix()) {
inversematrix <- NULL
set <- function(y) {
x <<- y
inversematrix <<- NULL
}
## get the value of the matrix
get <- function() x
## set the value of the inversion
setInversion <- function(inversion) inversematrix <<- inversion
## get the value of the inversion
getInversion <- function() inversematrix
list(set = set, get = get,
setInversion = setInversion,
getInversion = getInversion)
}
## The following function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already
## been calculated (and the matrix has not changed), then the
## cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inversematrix <- x$getInversion()
if(!is.null(inversematrix)) {
message("getting cached data")
return(inversematrix)
}
matx <- x$get()
inversematrix <- solve(matx, ...)
x$setInversion(inversematrix)
inversematrix
}
|
1539994a391ed71adb639d4bd1279387ae26408e
|
fee595a469087dca8451f834b0ad311ae1f60f2c
|
/exercise2.R
|
5136fe84492990c64fda79e2a3ec295901178c99
|
[] |
no_license
|
lucyliu666/test2
|
19a2bf4d72c391628536b4b087f586a725373671
|
bce025f208631582cdbb38ec9cb67edc12c30ee6
|
refs/heads/master
| 2021-09-11T19:12:28.450475
| 2018-04-11T08:26:44
| 2018-04-11T08:26:44
| 123,310,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,084
|
r
|
exercise2.R
|
x <- 1:3
y <- x
y[[3]] <- 4
x
mat <- matrix(1:4,2)
f1 <- function(mat){
mat[1,1] <- 3
mat[1,1] + 2
}
f1(mat)
mat
mat <- matrix(1:4,2)
mat
f1 <- function(mat){
mat[1,1] <- 3
}
f1(mat)
n <- 1e3
max <- 1:1000
system.time({
mat <- NULL
for (m in max) {
mat <- cbind(mat, runif(n, max = m))
}
})
a <- 2
Sys.sleep(2)
b <- 3
monte_carlo <- function(N) {
hits <- 0
for (i in seq_len(N)) {
u1 <- runif(1)
u2 <- runif(1)
if (u1 ^ 2 > u2) {
hits <- hits + 1
}
}
hits / N
}
mydf <- readRDS(system.file("extdata/one-million.rds", package = "advr38pkg")
)
mydf
skimr::skim(mydf)
system.time({
current_sum <- 0
res2 <- double(length(x))
for (i in seq_along(x)) {
current_sum <- current_sum + x[i]
res2[i] <- current_sum
}
})
n <- 1e3
max <- 1:1000
system.time({
mat <- NULL
for (m in max) {
mat <- cbind(mat, runif(n, max = m))
}
})
system.time({
l <- vector("list", length(max))
for (i in seq_along(max)) {
l[[i]] <- runif(n, max = max[i])
}
mat2 <- do.call("cbind", l)
})
|
b59ad0f5c0502e4448ab61a27c3b14c93146fac9
|
73744a740941b13641c0175c8e583b20cfd023a1
|
/analysis/books/00_get_contractions.R
|
03d470b165652f13acdb4ee0287814a9324eaea0
|
[] |
no_license
|
mllewis/WCBC_GENDER
|
8afe092a60852283fd2aa7aea52b613f7b909203
|
ed2d96361f7ad09ba70b564281a733da187573ca
|
refs/heads/master
| 2021-12-25T22:41:21.914309
| 2021-12-22T19:08:36
| 2021-12-22T19:08:36
| 248,584,454
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 453
|
r
|
00_get_contractions.R
|
# get list of contractions to replace (for some reason the default lists are missing a few)
library(lexicon)
library(tidyverse)
library(here)
CONTRACTION_OUTFILE <- here("data/processed/words/contractions_complete.csv")
contractions <- key_contractions %>%
add_row(contraction = "haven't", expanded = "have not") %>%
add_row(contraction = "hadn't", expanded = "had not") %>%
arrange(contraction)
write_csv(contractions, CONTRACTION_OUTFILE)
|
317693aa6a409ce12b2d7ccf74be337683718f9a
|
ce98e0fe1bc89232754a51943aa17530a16af0cb
|
/analysis/01_count_SS_uncertain_dates.R
|
c9d422afffb2e5c87104cddf3f481180668218d4
|
[] |
no_license
|
kzaret/RQ2_Dendro_v2_PIUVestab
|
bad3f144f36520ce984e3df382dfdf432754a4dd
|
854c2bea5307aca3aec66e816269b6b7835ca082
|
refs/heads/main
| 2023-08-24T17:00:48.467138
| 2021-10-26T20:00:11
| 2021-10-26T20:00:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,739
|
r
|
01_count_SS_uncertain_dates.R
|
#---------------------------------------------------------------------------------
# SIMULATION TESTING FOR POISSON STATE-SPACE MODELS
#
# Simulate data from the basic univariate Poisson SS model and
# fit the pseudo-data with the same model, the equivalent
# Poisson-multinomial model (Poisson for total count, multinomial
# for conditional counts), and a multinomial model (conditioned on
# total count).
#
# Simulate data from a novel model that adds observation error to
# the sample of times generated from the Poisson SS model. Fit
# the observed counts with the generating model or the standard
# multinomial model, and compare fit if the true times were known.
#---------------------------------------------------------------------------------
options(device = windows)
## @knitr setup
library(rstan)
library(shinystan)
library(yarrr)
library(matrixStats)
library(here)
options(mc.cores = parallel::detectCores() - 1)
## @knitr
if(file.exists(here("analysis","results","Poisson_SS.RData")))
load(here("analysis","results","Poisson_SS.RData"))
#---------------------------------------------------------------------------------
# POISSON STATE-SPACE MODEL
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
# Simulate univariate Poisson state-space model
# x[t] = x[t-1] + w[t], w[t] ~ N(0,sigma), x[0] ~ N(0,sigma)
# y[t] ~ Pois(exp(x[t]))
#---------------------------------------------------------------------------------
## @knitr Poisson_SS_sim
set.seed(34567)
sigma <- 0.3
TT <- 200
x <- vector("numeric",TT)
x[1] <- rnorm(1,0,sigma)
for(tt in 2:TT)
x[tt] <- rnorm(1, x[tt-1], sigma)
y <- rpois(TT, exp(x))
## @knitr
#---------------------------------------------------------------------------------
# Fit standard Poisson state-space model
#---------------------------------------------------------------------------------
## @knitr fit_pois
fit_pois <- stan(file = here("analysis","Poisson_SS.stan"),
data = list(T = TT, y = y), pars = c("sigma","x"),
chains = 3, iter = 2000, warmup = 1000,
control = list(adapt_delta = 0.99, max_treedepth = 12))
## @knitr print_fit_pois
print(fit_pois, pars = "sigma", probs = c(0.025,0.5,0.975))
## @knitr
#---------------------------------------------------------------------------------
# Fit Poisson-multinomial state-space model
# (do not condition on total)
#---------------------------------------------------------------------------------
## @knitr fit_pois_mn
fit_pois_mn <- stan(file = here("analysis","Poisson_multinomial_SS.stan"),
data = list(T = TT, y = y), pars = c("sigma","x"),
init = function() list(sigma = runif(1,0.1,0.5)),
chains = 3, iter = 2000, warmup = 1000,
control = list(adapt_delta = 0.99, max_treedepth = 12))
## @knitr print_fit_pois_mn
print(fit_pois_mn, pars = "sigma", probs = c(0.025,0.5,0.975))
## @knitr
#---------------------------------------------------------------------------------
# Fit multinomial model to state-space Poisson counts
# (condition on total)
#---------------------------------------------------------------------------------
## @knitr fit_mn
fit_mn <- stan(file = here("analysis","multinomial_SS.stan"),
data = list(T = TT, y = y), pars = c("sigma","pi","lambda"),
chains = 3, iter = 2000, warmup = 1000,
control = list(adapt_delta = 0.99, max_treedepth = 12))
## @knitr print_fit_mn
print(fit_mn, pars = "sigma", probs = c(0.025,0.5,0.975))
## @knitr
#---------------------------------------------------------------------------------
# Plot data, states, and fits
#---------------------------------------------------------------------------------
dev.new(width = 7, height = 5)
## @knitr plot_pois_mn
par(mar = c(5.1,4.1,2,1))
# Poisson-multinomial state-space model
lambda <- exp(as.matrix(fit_pois_mn, "x"))
plot(1:TT, exp(x), type = "l", col = "dodgerblue", lwd = 3,
cex.axis = 1.2, cex.lab = 1.5, cex.main = 1.5, xlab = "time", ylab = "count",
ylim = range(colQuantiles(lambda, probs = 0.975), y), yaxt = "n")
rug(seq(0,TT,10)[seq(0,TT,10) %% 50 != 0], side = 1, ticksize = -0.03)
axis(2, at = 0:par("usr")[4], las = 1, cex.axis = 1.2)
points(1:TT, y, type = "h", col = transparent("black", 0.3))
polygon(c(1:TT, TT:1),
c(colQuantiles(lambda, probs = 0.025), rev(colQuantiles(lambda, probs = 0.975))),
col = transparent("dimgray", 0.5), border = NA)
lines(1:TT, colMedians(lambda), col = "dimgray", lwd = 3)
legend("topright", bty = "n", text.col = "white", cex = 1.2,
legend = expression(lambda[italic(t)], italic(y)[italic(t)], widehat(lambda[italic(t)])),
pch = c(NA,"I",NA), lwd = c(3,NA,15),
col = c(NA, transparent("black", 0.3), transparent("dimgray", 0.5)))
legend("topright", bty = "n", cex = 1.2,
legend = expression(lambda[italic(t)], italic(y)[italic(t)], widehat(lambda[italic(t)])),
lwd = c(3,NA,3), col = c(transparent("dodgerblue", 0.3), NA, "dimgray"))
## @knitr
#---------------------------------------------------------------------------------
# POISSON STATE-SPACE MODEL WITH OBSERVATION ERROR IN TIMES
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
# Simulate univariate Poisson state-space model with observation error in times
# x[tau] = x[tau-1] + w[tau], w[tau] ~ N(0,sigma), x[0] ~ N(0,sigma)
# y[t] ~ Pois(exp(x[t])) <=> tau[i] ~ Multinom(1,pi) for i = 1, ..., N
# t[i] ~ Multinom(1, gamma[i]),
# where gamma[i] is the observation error distribution for time i.
# Example:
# gamma[i,j] = dgeom(tau[j] - t[i], r) <=> t[i] ~ tau[i] + Geom(r)
#---------------------------------------------------------------------------------
## @knitr Poisson_tobs_SS_sim
set.seed(321)
TT <- 200
N <- 500 # total sample size
sigma <- 0.3
r <- 0.2 # probability parameter for geometric obs error in time
x <- vector("numeric",TT)
x[1] <- rnorm(1,0,sigma)
for(tt in 2:TT)
x[tt] <- rnorm(1, x[tt-1], sigma)
pi <- exp(x)/sum(exp(x))
chi <- as.vector(rmultinom(1,N,pi)) # true counts
tau <- rep(1:TT, times = chi) # true times
tt <- pmin(tau + rgeom(N,r), TT) # observed times
tab <- table(tt)
y <- replace(rep(0,TT), as.numeric(names(tab)), tab) # observed counts
## @knitr
#---------------------------------------------------------------------------------
# Fit standard Poisson-multinomial model to true, unknown times
#---------------------------------------------------------------------------------
## @knitr fit_tau
fit_tau <- stan(file = here("analysis","Poisson_multinomial_SS.stan"),
data = list(T = TT, y = chi), pars = c("sigma","x"),
chains = 3, iter = 2000, warmup = 1000,
control = list(adapt_delta = 0.99, max_treedepth = 12))
## @knitr print_fit_tau
print(fit_tau, pars = "sigma", probs = c(0.025,0.5,0.975))
## @knitr
#---------------------------------------------------------------------------------
# Fit standard Poisson-multinomial model to observed times
#---------------------------------------------------------------------------------
## @knitr fit_t
fit_t <- stan(file = here("analysis","Poisson_multinomial_SS.stan"),
data = list(T = TT, y = y), pars = c("sigma","x"),
chains = 3, iter = 2000, warmup = 1000,
control = list(adapt_delta = 0.99, max_treedepth = 12))
## @knitr print_fit_t
print(fit_t, pars = "sigma", probs = c(0.025,0.5,0.975))
## @knitr
#---------------------------------------------------------------------------------
# Fit multinomial model with obs error in time
#---------------------------------------------------------------------------------
## @knitr fit_tobs
fit_tobs <- stan(file = here("analysis","Poisson_multinomial_tobs_SS.stan"),
data = list(T = TT, y = y, r = r), pars = c("sigma","x"),
init = function() list(sigma = runif(1,0.1,0.5)),
chains = 3, iter = 2000, warmup = 1000,
control = list(adapt_delta = 0.99, max_treedepth = 12))
## @knitr print_fit_tobs
print(fit_tobs, pars = "sigma", probs = c(0.025,0.5,0.975))
## @knitr
#---------------------------------------------------------------------------------
# Plot data, states, and fits
#---------------------------------------------------------------------------------
dev.new(width = 10, height = 6)
## @knitr plot_tobs
par(mfrow = c(2,2), mar = c(3,2.5,2.5,1), oma = c(1.5,2,0,0))
lambda <- N*pi
lambda_t <- exp(as.matrix(fit_t, "x"))
lambda_tau <- exp(as.matrix(fit_tau, "x"))
lambda_tobs <- exp(as.matrix(fit_tobs, "x"))
ul <- max(colQuantiles(lambda_t, probs = 0.975),
colQuantiles(lambda_tau, probs = 0.975),
colQuantiles(lambda_tobs, probs = 0.975))
# states and observations
plot(1:TT, lambda, type = "l", col = "dodgerblue", lwd = 3,
las = 1, cex.axis = 1.2, cex.lab = 1.5, cex.main = 1.5, ylim = c(0,ul),
xlab = "", ylab = "count", main = "States and observations", font.main = 1, xpd = NA)
rug(seq(0,TT,10)[seq(0,TT,10) %% 50 != 0], side = 1, ticksize = -0.02)
points(1:TT, chi, pch = ".", cex = 4, col = "dodgerblue")
points(1:TT, y, type = "h", col = transparent("black", 0.3))
legend("topleft", bty = "n", text.col = "white", cex = 1.2, pt.cex = 1,
legend = expression(lambda[italic(t)], chi[italic(t)], italic(y)[italic(t)]),
pch = c(NA,NA,"I"), col = c(NA, NA, transparent("black", 0.3)))
legend("topleft", bty = "n", cex = 1.2, pt.cex = 4,
legend = expression(lambda[italic(t)], chi[italic(t)], italic(y)[italic(t)]),
pch = c(NA,".",NA), lty = c(1,NA,NA), lwd = c(3,NA,NA),
col = c("dodgerblue", "dodgerblue", NA))
# fit to true, unknown times
plot(1:TT, lambda, type = "l", col = "dodgerblue", lwd = 3,
las = 1, cex.axis = 1.2, cex.lab = 1.5, cex.main = 1.5, ylim = c(0,ul),
xlab = "", ylab = "", main = bquote("Poisson-multinomial fit to" ~ chi[italic(t)]),
font.main = 1)
rug(seq(0,TT,10)[seq(0,TT,10) %% 50 != 0], side = 1, ticksize = -0.02)
polygon(c(1:TT, TT:1),
c(colQuantiles(lambda_tau, probs = 0.025),
rev(colQuantiles(lambda_tau, probs = 0.975))),
col = transparent("dimgray", 0.5), border = NA)
lines(1:TT, colMedians(lambda_tau), col = "dimgray", lwd = 3)
points(1:TT, chi, pch = ".", cex = 4, col = "dodgerblue")
legend("topleft", bty = "n", legend = expression(widehat(italic(lambda)[italic(t)])),
text.col = "white", cex = 1.2, lwd = 15, col = transparent("dimgray", 0.5))
legend("topleft", bty = "n", legend = expression(widehat(italic(lambda)[italic(t)])),
cex = 1.2, lwd = 3, col = "dimgray")
# fit to observed times
plot(1:TT, lambda, type = "l", col = "dodgerblue", lwd = 3,
las = 1, cex.axis = 1.2, cex.lab = 1.5, cex.main = 1.5,
ylim = c(0,ul), xlab = "time", ylab = "count",
main = bquote("Poisson-multinomial fit to" ~ italic(y)[italic(t)]),
font.main = 1, xpd = NA)
rug(seq(0,TT,10)[seq(0,TT,10) %% 50 != 0], side = 1, ticksize = -0.02)
points(1:TT, y, type = "h", col = transparent("black", 0.3))
polygon(c(1:TT, TT:1),
c(colQuantiles(lambda_t, probs = 0.025),
rev(colQuantiles(lambda_t, probs = 0.975))),
col = transparent("dimgray", 0.5), border = NA)
lines(1:TT, colMedians(lambda_t), col = "dimgray", lwd = 3)
# fit to observed times, accounting for obs error
plot(1:TT, lambda, type = "l", col = "dodgerblue", lwd = 3,
las = 1, cex.axis = 1.2, cex.lab = 1.5, cex.main = 1.5,
font.main = 1, ylim = c(0,ul), xlab = "time", ylab = "",
main = bquote("Time-uncertain Poisson-multinomial fit to" ~ italic(y)[italic(t)]), xpd = NA)
rug(seq(0,TT,10)[seq(0,TT,10) %% 50 != 0], side = 1, ticksize = -0.02)
points(1:TT, y, type = "h", col = transparent("black", 0.3))
polygon(c(1:TT, TT:1),
c(colQuantiles(lambda_tobs, probs = 0.025),
rev(colQuantiles(lambda_tobs, probs = 0.975))),
col = transparent("dimgray", 0.5), border = NA)
lines(1:TT, colMedians(lambda_tobs), col = "dimgray", lwd = 3)
## @knitr
#---------------------------------------------------------------------------------
# Plot observation error distribution
#---------------------------------------------------------------------------------
dev.new(width = 7, height = 5)
## @knitr plot_geom_obs
tau_i <- 50 # true time index
r <- 0.2 # probability parameter for geometric obs error in time
p_t_i <- dgeom(1:TT - tau_i, r) # P(t_i | tau_i, r)
par(mar = c(5.1,5.1,2,1))
barplot(p_t_i, las = 1, cex.axis = 1.2, cex.lab = 1.5,
col = "darkgray", border = "white", space = 0,
xlim = c(1,TT), xaxs = "i", xaxt = "n", ylim = range(p_t_i)*1.05,
xlab = bquote(italic(t)[italic(i)]), ylab = bquote(gamma[italic(it)]))
axis(1, at = c(1, seq(50, TT, 50)), cex.axis = 1.2)
rug(seq(0,TT,10)[seq(0,TT,10) %% 50 != 0], side = 1, ticksize = -0.02)
arrows(x0 = tau_i, y0 = -0.035, y1 = -0.025, col = "dodgerblue", length = 0.1, lwd = 2, xpd = NA)
text(tau_i, -0.042, labels = bquote(tau[italic(i)]), cex = 1.5, col = "dodgerblue", xpd = NA)
box()
## @knitr
#---------------------------------------------------------------------------------
# SAVE STANFIT OBJECTS
#---------------------------------------------------------------------------------
save(list = ls()[sapply(ls(), function(x) do.call(class, list(as.name(x)))) == "stanfit"],
file = here("analysis","results","Poisson_SS.RData"))
|
8c810b82ce0dcfbedbb7f9e4c170f717396f2f3d
|
fa18ee2bcec08ba7dd950843cd3547e05eafcb16
|
/man/bh_defineTissue.Rd
|
18953f8a538ffb208a48d6de81afc29f6029a4ad
|
[] |
no_license
|
luigidolcetti/barbieHistologist
|
6da4174defd6228e67ab22c2e226d5bee96cb018
|
4f4bbcd939257d05bac8ec785a99e30d2ca1db93
|
refs/heads/master
| 2023-07-19T01:09:14.843315
| 2021-06-14T16:50:08
| 2021-06-14T16:50:08
| 347,299,283
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 535
|
rd
|
bh_defineTissue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tissue.R
\name{bh_defineTissue}
\alias{bh_defineTissue}
\title{Define a new tissue}
\usage{
bh_defineTissue(coords = NULL, resolution = NULL, bg = NULL, markers = NULL)
}
\arguments{
\item{coords}{numeric, x and y limits.}
\item{resolution}{numeric, resolution}
\item{bg}{numeric, value to use in the background}
\item{markers}{list, makers.}
}
\value{
An object of class tissue (a rasterStack).
}
\description{
helper function to create a new tissue.
}
|
519614d4758def87764df8cccc91a1c1ffc94c6b
|
cbcfee5e7c8512bce52125355bb84141adb9b6a9
|
/LMjw/man/myLasso.Rd
|
a97f13c819f1a5afc4e2d5a37306aae7141bc669
|
[] |
no_license
|
Alice86/StatsProgramming
|
f0252cbb8a6447de20f58a1c6a022f1b7b814b58
|
ca965f266c7d81d3e079f08964ff8622b08f59b1
|
refs/heads/master
| 2021-09-20T05:21:06.539465
| 2018-08-03T19:11:25
| 2018-08-03T19:11:25
| 109,034,894
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 895
|
rd
|
myLasso.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myLasso.R
\name{myLasso}
\alias{myLasso}
\title{Lasso regression}
\usage{
myLasso(X, Y, lambda_all)
}
\arguments{
\item{X}{design matrix}
\item{Y}{response vector}
\item{lambda_all}{vector of regularization parameters}
}
\description{
This function finds lasso solution path for various values of regularization parameter of Y regressed on X, return a matrix with each column corresponding to a regularization parameters.
}
\examples{
n <- 50
p <- 25
X <- matrix(rnorm(n * p), nrow = n)
beta <- c(rep(0,20),rep(1,5))
Y <- X \%*\% beta+rnorm(n)
lambda_all <- (100:1)/10
lasso <- myLasso(X,Y,lambda_all)
beta_sum <- t(matrix(rep(1, (p+1)), nrow = 1)\%*\%abs(beta_all))
matplot(beta_sum, t(beta_all), type = 'l', lty = 1,xlab="|beta|",ylab="beta")
text(max(beta_sum),beta_all[,length(lambda_all)],0:p,cex=1,col=1:p)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.