blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b9e93212aaab5484ac5a92ad91f0a614fbb1cd1
|
f3244f77367f735f30833a5b6e2b69321a28dfb5
|
/R/chronique.figure.R
|
51f8983a563b61df21c3155b5a1a06e22cea4815
|
[] |
no_license
|
jbfagotfede39/aquatools
|
3f36367668c6848ddd53950708222fd79f0e3b7a
|
9c12f80919790ec3d0c1ee7f495e9c15cc2c9652
|
refs/heads/master
| 2023-08-03T09:10:17.314655
| 2023-07-25T08:32:45
| 2023-07-25T08:32:45
| 46,334,983
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,717
|
r
|
chronique.figure.R
|
#' Représentation de chroniques
#'
#' Cette fonction permet de représenter des chroniques de mesures (température, niveaux, etc.)
#' @name chronique.figure
#' @param data Data.frame contenant a minima une colonne chmes_date et une colonne chmes_valeur
#' @param Titre Titre du graphique (vide par défaut)
#' @param typemesure Ignoré si le champ chmes_typemesure est présent dans data. Défini le type de données et modifie les légendes en fonction (\code{Thermie}, \code{Thermie barométrique}, \code{Thermie piézométrique}, \code{Barométrie}, \code{Piézométrie}, \code{Piézométrie brute}, \code{Piézométrie compensée}, \code{Piézométrie NGF}, \code{Oxygénation}, \code{Hydrologie}, \code{Pluviométrie}.
#' @param duree Si \code{Complet} (par défault), affichage de l'année complète. Si \code{Relatif}, affichage uniquement de la période concernée.
#' @param complement Si \code{TRUE}, complément de la chronique avec les données manquantes (\code{FALSE} par défaut)
#' @param Vmm30j Si \code{FALSE} (par défault), n'affiche pas les
#' Vmm30j. Si \code{TRUE}, les affiche.
#' @param Vminmax Si \code{TRUE} (par défault), affiche pas les
#' valeurs journalières minimales et maximales. Si \code{FALSE}, ne les affiche pas.
#' @param Ymin Valeur minimale de l'axe des Y (-1 par défaut)
#' @param Ymax Valeur maximale de l'axe des Y (aucune par défaut)
#' @param save Si \code{FALSE} (par défault), n'enregistre pas les
#' figures. Si \code{TRUE}, les enregistre.
#' @param projet Nom du projet
#' @param format Défini le format d'enregistrement (par défaut .png)
#' @keywords chronique
#' @import tidyverse
#' @export
#' @examples
#' chronique.figure(data)
#' chronique.figure(data = mesdonnees, typemesure = "Thermie", duree = "Complet")
#' chronique.figure(data = tableaudonnee, Titre=nom, typemesure = "Barométrie", save=T, format=".png")
chronique.figure <- function(
data = data,
Titre="",
typemesure = c("Thermie", "Thermie barométrique", "Thermie piézométrique", "Barométrie", "Piézométrie", "Piézométrie brute", "Piézométrie compensée", "Piézométrie calée", "Piézométrie NGF", "Oxygénation", "Hydrologie", "Pluviométrie"),
duree = c("Complet", "Relatif"),
complement = FALSE,
Vmm30j=F,
Vminmax=T,
Ymin=-1,
Ymax=NA,
save=F,
projet = NA_character_,
format=".png")
{
##### -------------- A FAIRE -------------- #####
# il faudra rajouter l'ajout optionnel de lignes horizontales, avec tempmin, tempmax et tempmaxextreme
# Il faudra mettre des interrupteurs pour fixer ou non les limites des axes X (dates)
# Changer ordre max/min/moy dans légende par Max/Moy/Min
# Il faudrait de chmes_validation persiste après l'agrégation s'il est présent, car on pourrait ensuite faire une représentation avec des couleurs différentes selons l'état de validation de la chronique
# -------------- A FAIRE -------------- #
## Évaluation des choix
typemesure <- match.arg(typemesure)
duree <- match.arg(duree)
##### Mise au format des données #####
## Transformation du format des dates
if(class(data$chmes_date) != "Date"){#data$chmes_date <- as.Date(data$chmes_date,format="%Y-%m-%d") # ancien format du 08/04/19
data$chmes_date <- ymd(data$chmes_date)}
#### Test de cohérence ####
if("chmes_unite" %in% colnames(data)){if(data %>% dplyr::filter(chmes_unite == "kPa") %>% count() %>% pull() != 0) warning("Attention pour les piézo, des données en kPa seront représentées avec une échelle en cm d'eau, alors que 1 kPa = 10,1972 cm d'H2O")}
##### Contexte de la chronique #####
# Calcul du nombre de stations ##
if("chmes_coderhj" %in% colnames(data)) Contexte <- tibble(nStations = n_distinct(data$chmes_coderhj))
if("chmes_coderhj" %in% colnames(data) == FALSE){
Contexte <- tibble(nStations = 1)
data <- data %>% mutate(chmes_coderhj = NA)
}
if("chmes_typemesure" %in% colnames(data) == FALSE){
data <- data %>% mutate(chmes_typemesure = typemesure)
}
if(Contexte$nStations == 0) stop("Aucune donnée dans la chronique à analyser")
if(Contexte$nStations > 1) warning("Différentes stations dans la chronique à analyser - Cas en test à partir du rapport N2000 Vogna et de 2019-05-15_Calcul_résultats_chroniques_Vouglans.R")
# chmes_typemesure
if(testit::has_error(data %>%
distinct(chmes_typemesure) %>%
bind_cols(Contexte)) == TRUE) stop("Plusieurs chmes_typemesure au sein du jeu de données")
Contexte <-
data %>%
distinct(chmes_typemesure) %>%
bind_cols(Contexte)
typemesure <- Contexte$chmes_typemesure
# nJours
Contexte$nJours <- n_distinct(data$chmes_date)
#### Valeurs remarquables journalières ####
if(Contexte$nStations == 1){
if(complement == FALSE) DataTravail <- chronique.agregation(data)
if(complement == TRUE) DataTravail <- chronique.agregation(data, complement = T)
syntjour <- DataTravail %>% purrr::pluck(2)
}
if(Contexte$nStations > 1){
listeStations <- distinct(data, chmes_coderhj) %>% dplyr::pull()
for(i in 1:Contexte$nStations){
DataTravail <- data %>% dplyr::filter(chmes_coderhj == listeStations %>% purrr::pluck(i))
if(complement == FALSE){DataTravail <- chronique.agregation(DataTravail)}
if(complement == TRUE){DataTravail <- chronique.agregation(DataTravail, complement = T)}
if(i == 1){syntjour <- DataTravail %>% purrr::pluck(2)}
if(i != 1){syntjour <- syntjour %>% dplyr::union(DataTravail %>% purrr::pluck(2))}
}
}
## Calcul de la Vmm30j ##
if(Contexte$nJours < 30 & Vmm30j == T){
Vmm30j <- F
warning("Durée inférieure à 30 jours : pas d'affichage de la Vmm30j")
}
if(Vmm30j == T & Contexte$nStations == 1){
###T Moymax 30 J
syntjourSansAgregation <- syntjour %>% dplyr::filter(!is.na(VMaxJ)) # Si on fait un complément du jeu de données
cumuleVMaxJ <- numeric(length(syntjourSansAgregation$VMaxJ)-30)
for (i in 1:length(syntjourSansAgregation$VMaxJ)){
if (i+29<=length(syntjourSansAgregation$VMaxJ)) cumuleVMaxJ[i]<-sum(syntjourSansAgregation$VMaxJ[i:(i+29)])}
VMaxMoy30J <- round(max(cumuleVMaxJ)/30,1)
DateDebutVMaxMoy30J <- syntjourSansAgregation$chmes_date[which(cumuleVMaxJ==max(cumuleVMaxJ))]
DateFinVMaxMoy30J <- syntjourSansAgregation$chmes_date[which(cumuleVMaxJ==max(cumuleVMaxJ))+29]
# Pour avoir un affichage propre de l'étiquette de Tmm30j et du trait de Tmm
data.label <- data.frame(
xdeb = DateDebutVMaxMoy30J,
xfin = DateFinVMaxMoy30J,
xtext = DateDebutVMaxMoy30J,
ytmm = VMaxMoy30J,
ytext = VMaxMoy30J+2,
label = "Tmm30j"
)
}
#resume(syntjour)
#head(syntjour)
#tail(syntjour)
#str(syntjour)
#### Ajustement des paramètres en fonction du typemesure ####
if(typemesure == "Thermie" | typemesure == "Thermie barométrique" | typemesure == "Thermie piézométrique"){
legendeY = "Température (°C)"
legendeTitre = "Températures :"
typemesureTitreSortie = "_thermie_"
}
if(typemesure == "Barométrie"){
legendeY = "Pression atmosphérique (kPa)"
legendeTitre = "Barométrie :"
typemesureTitreSortie = "_barométrie_"
}
if(typemesure == "Piézométrie" | typemesure == "Piézométrie brute" | typemesure == "Piézométrie compensée" | typemesure == "Piézométrie calée"){
legendeY = "Hauteur d'eau (cm)"
legendeTitre = "Piézométrie :"
typemesureTitreSortie = "_piézométrie_"
}
if(typemesure == "Piézométrie NGF"){
legendeY = "Hauteur d'eau (NGF)"
legendeTitre = "Piézométrie :"
typemesureTitreSortie = "_piézométrie_"
}
if(typemesure == "Oxygénation"){
legendeY = expression(Oxygene~dissous~(mg~O[2]/L))
legendeTitre = "Oxygénation :"
typemesureTitreSortie = "_oxygénation_"
}
if(typemesure == "Hydrologie"){
legendeY = expression(Débit~(m^3/s))
legendeTitre = "Hydrologie :"
typemesureTitreSortie = "_hydrologie_"
}
if(typemesure == "Pluviométrie"){
legendeY = expression(Précipitations~(L/m^2))
legendeTitre = "Pluviométrie :"
typemesureTitreSortie = "_pluviométrie_"
}
#### Palette ####
data(PaletteSite)
##### Plot temps relatif sur l'échantillon de données #####
## Version grisée avec enveloppe sur fond clair (min/max) ##
plotrelatif <- ggplot(syntjour, aes(chmes_date))
if(Contexte$nStations == 1) plotrelatif <- plotrelatif + geom_ribbon(aes(ymin = VMinJ, ymax = VMaxJ), alpha=0.2)
if(Contexte$nStations != 1) plotrelatif <- plotrelatif + geom_line(aes(y = VMoyJ, colour = chmes_coderhj))
if(Contexte$nStations != 1) plotrelatif <- plotrelatif + scale_colour_manual(values = PaletteSite)
if(Vmm30j == T & Contexte$nStations == 1){
plotrelatif <- plotrelatif + geom_text(data = data.label, aes(x = xtext , y = ytext , label = label ), size = 4, color = "red", fontface="bold")
plotrelatif <- plotrelatif + geom_segment(data = data.label, aes(x = xdeb, y = ytmm, xend = xfin, yend = ytmm), color = "red", size = 2)
}
if(length(unique(format(syntjour$chmes_date,"%m"))) < 9) plotrelatif <- plotrelatif + scale_x_date(date_labels = "%b %Y")
if(length(unique(format(syntjour$chmes_date,"%m"))) >= 9) plotrelatif <- plotrelatif + scale_x_date(date_labels = "%b %Y", date_minor_breaks = "1 month")
if(is.na(Ymax) == FALSE & is.na(Ymin) == TRUE) plotrelatif <- plotrelatif + ylim(0,as.numeric(Ymax))
if(is.na(Ymax) == FALSE & is.na(Ymin) == FALSE) plotrelatif <- plotrelatif + ylim(as.numeric(Ymin),as.numeric(Ymax))
plotrelatif <- plotrelatif + labs(x = "", y = legendeY, title=Titre, color = legendeTitre) # Pour changer le titre
plotrelatif <- plotrelatif + theme_bw()
if(duree == "Relatif"){
plotrelatif
if(save==T){
if(is.na(Ymax) == TRUE & is.na(Ymin) == TRUE & Vmm30j == F) ggsave(file=paste(projet,"/Sorties/Vues/Annuelles_relatif-libre/relatif-libre",typemesureTitreSortie,Titre,format,sep=""))
if(is.na(Ymax) == TRUE & is.na(Ymin) == TRUE & Vmm30j == T) ggsave(file=paste(projet,"/Sorties/Vues/Annuelles_relatif-libre/relatif-libre-vmm30j",typemesureTitreSortie,Titre,format,sep=""))
if(is.na(Ymax) == FALSE & is.na(Ymin) == FALSE & Vmm30j == F) ggsave(file=paste(projet,"/Sorties/Vues/Annuelles_relatif-fixe/relatif-fixe",typemesureTitreSortie,Titre,format,sep=""))
if(is.na(Ymax) == FALSE & is.na(Ymin) == FALSE & Vmm30j == T) ggsave(file=paste(projet,"/Sorties/Vues/Annuelles_relatif-fixe/relatif-fixe-vmm30j",typemesureTitreSortie,Titre,format,sep=""))
if(is.na(Ymax) == FALSE & is.na(Ymin) == TRUE) warning("cas d'export de la figure plotrelatif avec Ymax fixe et Ymin libre non programmé")
if(is.na(Ymax) == TRUE & is.na(Ymin) == FALSE) warning("cas d'export de la figure plotrelatif avec Ymax libre et Ymin fixe non programmé")
}
if(save==F){return(plotrelatif)}
}
##### Plot temps absolu sur une année ####
## Version grisée avec enveloppe sur fond clair (min/max) ##
# if(length(unique(format(syntjour$chmes_date,"%Y"))) == 1){
plotabsolu <- ggplot(syntjour, aes(chmes_date))
if(Contexte$nStations == 1) plotabsolu <- plotabsolu + geom_ribbon(aes(ymin = VMinJ, ymax = VMaxJ), alpha=0.2)
if(Contexte$nStations != 1) plotabsolu <- plotabsolu + geom_line(aes(y = VMoyJ, colour = chmes_coderhj))
if(Contexte$nStations != 1) plotabsolu <- plotabsolu + scale_colour_manual(values = PaletteSite)
if(Vmm30j == T & Contexte$nStations == 1){
plotabsolu <- plotabsolu + geom_text(data = data.label, aes(x = xtext , y = ytext , label = label ), size = 4, color = "red", fontface="bold")
plotabsolu <- plotabsolu + geom_segment(data = data.label, aes(x = xdeb, y = ytmm, xend = xfin, yend = ytmm), color = "red", size = 2)
}
if(is.na(Ymax) == FALSE & is.na(Ymin) == TRUE) plotabsolu <- plotabsolu + ylim(-1,as.numeric(Ymax))
if(is.na(Ymax) == FALSE & is.na(Ymin) == FALSE) plotabsolu <- plotabsolu + ylim(as.numeric(Ymin),as.numeric(Ymax))
if(length(unique(format(syntjour$chmes_date,"%Y"))) < 2) plotabsolu <- plotabsolu + scale_x_date(date_minor_breaks = "1 month", limits = as.Date(c(paste(as.numeric(format(syntjour$chmes_date[1],"%Y"))-1,"-10-01",sep=""),paste(format(syntjour$chmes_date[length(syntjour$chmes_date)],"%Y"),"-09-30",sep=""))))
if(length(unique(format(syntjour$chmes_date,"%Y"))) >= 2) plotabsolu <- plotabsolu + scale_x_date(date_minor_breaks = "1 month", limits = as.Date(c(paste(format(syntjour$chmes_date[1],"%Y"),"-10-01",sep=""),paste(format(syntjour$chmes_date[length(syntjour$chmes_date)],"%Y"),"-09-30",sep=""))))
plotabsolu <- plotabsolu + labs(x = "", y = legendeY, title=Titre, color = legendeTitre) # Pour changer le titre
plotabsolu <- plotabsolu + theme_bw()
# }
if(duree == "Complet"){
plotabsolu
if(save==T){
if(is.na(Ymax) == TRUE & is.na(Ymin) == TRUE & Vmm30j == F) ggsave(file=paste(projet,"/Sorties/Vues/Annuelles_absolu-libre/absolu-libre",typemesureTitreSortie,Titre,format,sep=""))
if(is.na(Ymax) == TRUE & is.na(Ymin) == TRUE & Vmm30j == T) ggsave(file=paste(projet,"/Sorties/Vues/Annuelles_absolu-libre/absolu-libre-vmm30j",typemesureTitreSortie,Titre,format,sep=""))
if(is.na(Ymax) == FALSE & is.na(Ymin) == FALSE & Vmm30j == F) ggsave(file=paste(projet,"/Sorties/Vues/Annuelles_absolu-fixe/absolu-fixe",typemesureTitreSortie,Titre,format,sep=""))
if(is.na(Ymax) == FALSE & is.na(Ymin) == FALSE & Vmm30j == T) ggsave(file=paste(projet,"/Sorties/Vues/Annuelles_absolu-fixe/absolu-fixe-vmm30j",typemesureTitreSortie,Titre,format,sep=""))
if(is.na(Ymax) == FALSE & is.na(Ymin) == TRUE) warning("cas d'export de la figure plotabsolu avec Ymax fixe et Ymin libre non programmé")
if(is.na(Ymax) == TRUE & is.na(Ymin) == FALSE) warning("cas d'export de la figure plotabsolu avec Ymax libre et Ymin fixe non programmé")
}
if(save==F){return(plotabsolu)}
}
} # Fin de la fonction
|
e89d5a9130c998e1544ee84824412f0ca224dcf4
|
6fe3cd1cfbfc0f0bb33338d1c509569dff58f9af
|
/01-bulletchart.R
|
17d4ebe44d972389da1d58a8a35d40be4acecc12
|
[] |
no_license
|
ssa2121/mvp01
|
b428f56097ea9b8463f482ee6ce1953b89eada86
|
83b36afcf69dcc32851c7279931e7712cfc1a89a
|
refs/heads/master
| 2021-05-15T11:35:00.604624
| 2017-06-08T18:43:19
| 2017-06-08T18:43:19
| 106,470,356
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,988
|
r
|
01-bulletchart.R
|
bullet.graph <- function(bg.data){
# compute max and half for the ticks and labels
max.bg <- max(bg.data$high)
mid.bg <- max.bg / 2
gg <- ggplot(bg.data)
gg <- gg + geom_bar(aes(measure, high), fill="#eeeeee", stat="identity", width=0.5, alpha=0.2)
gg <- gg + geom_bar(aes(measure, mean), fill="#dddddd", stat="identity", width=0.5, alpha=0.2)
gg <- gg + geom_bar(aes(measure, low), fill="#cccccc", stat="identity", width=0.5, alpha=0.2)
gg <- gg + geom_bar(aes(measure, value), fill="black", stat="identity", width=0.2)
gg <- gg + geom_errorbar(aes(y=target, x=measure, ymin=target, ymax=target), color="red", width=0.45)
gg <- gg + geom_point(aes(measure, target), colour="red", size=2.5)
gg <- gg + scale_y_continuous(breaks=seq(0,max.bg,mid.bg))
gg <- gg + coord_flip()
gg <- gg + theme(axis.text.x=element_text(size=5),
axis.title.x=element_blank(),
axis.line.y=element_blank(),
axis.text.y=element_text(hjust=1, color="black"),
axis.ticks.y=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
return(gg)
}
# test it out!
# 5/1 seems to be a good ratio for individual bullet graphs but you
# can change it up to fit your dashboard needs
# incidents.pct <- data.frame(
# measure=c("Total Events (%)", "Security Events (%)", "Filtered (%)", "Tickets (%)"),
# high=c(100,100,100,100),
# mean=c(45,40,50,30),
# low=c(25,20,10,5),
# target=c(55,40,45,35),
# value=c(50,45,60,25)
# )
# incidents.pct.bg <- bullet.graph(incidents.pct)
# incidents.pct.bg
# ggsave("incident-total-events-pct.pdf", incidents.pct.bg, width=10, height=5)
|
16720aa0c177bb857112d798946e5285551fb50b
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/MGDrivE/man/calcLarvalPopEquilibrium.Rd
|
522fab9cb2b854f375d39514f2789587ef75a34f
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 469
|
rd
|
calcLarvalPopEquilibrium.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Network-Parameters.R
\name{calcLarvalPopEquilibrium}
\alias{calcLarvalPopEquilibrium}
\title{Calculate Equilibrium Larval Population}
\usage{
calcLarvalPopEquilibrium(alpha, Rm)
}
\arguments{
\item{alpha}{See \code{\link{calcDensityDependentDeathRate}}}
\item{Rm}{See \code{\link{calcPopulationGrowthRate}}}
}
\description{
Equilibrium larval population size to sustain adult population.
}
|
026fcafc3d670f85203d284ac52a7d3e975b4afe
|
7e2550b0dd1d770603c25a62e51967b6be1b34c5
|
/scripting.R
|
28f86bb1a9f193e759a47bcdf0b12552dc2bc7f3
|
[
"MIT"
] |
permissive
|
zivafajfar/R
|
7f4207df2341ea6eec49a23ae33bc79240eaf915
|
9013356ae682703c2dbb033d67d7815ab61172d1
|
refs/heads/master
| 2020-04-09T12:28:26.774715
| 2018-12-04T12:04:13
| 2018-12-04T12:04:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,231
|
r
|
scripting.R
|
# Reading our dataset
MyData <- read.csv(file="/home/zaki/Desktop/countries_R.csv",
header=TRUE, sep=",")
# Performing first visualization using `.ggvis` library. We call `layer_points` function
MyData %>%
ggvis(~Server, ~Nodes) %>%
layer_points() %>%
layer_points(fill = ~Company)
# Performing second visualization using `.ggvis` library. We call `layer_points` function
MyData %>%
ggvis(~Id, ~Nodes) %>%
layer_points(size := 25, shape := "diamond", stroke := "red", fill := NA)
# Performing third visualization using `.ggvis` library. Calling `layer_boxplots` and `layer_points` functions
MyData %>% ggvis(~Id, ~Nodes) %>%
layer_boxplots() %>%
layer_points(fill = ~Company)
# Performing fourth visualization using `.ggvis` library. Calling `layer_lines` and `layer_points` functions
MyData %>%
ggvis(~Id, ~Nodes, stroke := "skyblue",
strokeOpacity := 0.5, strokeWidth := 5) %>%
layer_lines() %>%
layer_points(fill = ~Company,
shape := "triangle-up",
size := 300)
# Performing first visualization using `.ggplot2` package. Using `boxplot` function
boxplot(Nodes~Id,data=MyData, main="Nodes per Company using Hadoop",
xlab="Nodes", ylab="Nodes")
|
5979a19f23e7cb33ba6244f6ff663b36587e81f6
|
20e6d8ae47f494d1b4e4fecbdae1f523fc23de45
|
/scripts/PRS_UKB_201711_step18-01-02_adjust-significance-threshold-for-multiple-testing-in-a-single-data-set_pheno-group4-7.R
|
a7a5898422979f8ffe65ce3097dc2c5673ed4b14
|
[] |
no_license
|
yanch86/PhD_polygenic_risk_score_analysis
|
9db5e8bd2387aff5a892a60f0b53beea15ff2ed8
|
f8db0816eb6b53e968143cf08630ec273ad0b18c
|
refs/heads/master
| 2022-04-11T17:45:03.674782
| 2020-03-21T05:12:50
| 2020-03-21T05:12:50
| 290,157,106
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,432
|
r
|
PRS_UKB_201711_step18-01-02_adjust-significance-threshold-for-multiple-testing-in-a-single-data-set_pheno-group4-7.R
|
#-------------------------------------------------------------------------------------------------
# Program : PRS_UKB_201711_step18-01-02_adjust-significance-threshold-for-multiple-testing-in-a-single-data-set_pheno-group4-7.R
# Modified from : PRS_UKB_201711_step18-01-01_adjust-significance-threshold-for-multiple-testing-in-a-single-data-set_pheno-group2-5.R
# Date created : 20180529
# Purpose : Calculate correlation between any 2 of 8 target phenotypes (project 3, 19Up) as matrix 1
# Calculate correlation between any 2 of 6 target phenotypes (project 3, adults) as matrix 2
# Calculate correlation between any 2 of 5 GSCAN discovery PRSs as a matrix
# Get number of independent variables and significance threhold for the 2 matrixes
# Count number of trait-PRS assoications that survive different p value thresholds
# Note:
#----------------------------------------------------------------------------------------
# Run dependency: /mnt/backedup/home/lunC/scripts/SNPSpD/matSpDlite.R PRS_UKB_201711_step18-04_heatmap_variance-explained-by-PRS_r-square_p-value.R
# function external: multiple_testing()
# Type File
#-----------------------------------------------------------------------------------------------------
# Input paste0(locPheno,"pheno4GSCANPhenotypes-IDremapped_standardised-IDremapped-PRS-GSCAN.txt")
# Input paste0(locPheno,"pheno7AdultsNicotineDependenceAndMore-IDremapped_standardised-IDremapped-PRS-GSCAN.txt")
# Outpu paste0(locPheno,"multiple-testing_pheno-corr-matrix_QIMR-adults-aged-20-90_GSCAN-phenotypes_nicotine-alcohol-dependence-and-more_all-sexes.txt")
# Outpu paste0(locPheno,"multiple-testing_pheno-corr-matrix_QIMR-adults-aged-20-90_GSCAN-phenotypes_nicotine-alcohol-dependence-and-more_males-only.txt")
# Outpu paste0(locPheno,"multiple-testing_pheno-corr-matrix_QIMR-adults-aged-20-90_GSCAN-phenotypes_nicotine-alcohol-dependence-and-more_females-only.txt")
# Outpu paste0(locPheno,"multiple-testing_pheno-corr-matrix_GSCAN-PRS_QIMR-adults-aged-20-90_GSCAN-phenotypes_nicotine-alcohol-dependence-and-more_all-sexes.txt")
# Outpu paste0(locPheno,"multiple-testing_pheno-corr-matrix_GSCAN-PRS_QIMR-adults-aged-20-90_GSCAN-phenotypes_nicotine-alcohol-dependence-and-more_females-only.txt")
# Outpu paste0(locPheno,"multiple-testing_pheno-corr-matrix_GSCAN-PRS_QIMR-adults-aged-20-90_GSCAN-phenotypes_nicotine-alcohol-dependence-and-more_males-only.txt")
#----------------------------------------------------------------------------------------
# Sys.Date() History
#----------------------------------------------------------------------------------------
# 20190102 Exported the 6 files above
# 2018-12-05 Exported the 4 files above
#----------------------------------------------------------------------------------------
library(dplyr)
# Input file location
homeDir <- "/mnt/backedup/home/lunC/"
locRFunction <- paste0(homeDir,"scripts/RFunctions/")
locScripts <- paste0(homeDir,"scripts/PRS_UKB_201711/")
locSNPSpD <- paste0(homeDir,"scripts/SNPSpD/")
workingDir <- "/mnt/lustre/working/lab_nickm/lunC/"
locPRS <- paste0(workingDir,"PRS_UKB_201711/");
locPheno <- paste0(locPRS,"phenotypeData/");
locPlots <- paste0(homeDir,"plots/");
locGCTA <- paste0(locPRS,"GCTA/");
folderName_phenotypeGroup4 <- "phenoGroup4_GSCAN-phenotypes"
folderName_phenotypeGroup7 <- "phenoGroup7_adults-nicotine-dependence-and-more"
input_phenotypeGroup4 <- paste0(locGCTA,"output_tabulated/",folderName_phenotypeGroup4,"/")
input_phenotypeGroup7 <- paste0(locGCTA,"output_tabulated/",folderName_phenotypeGroup7,"/")
#-----------------------------------------------------------------------------
# Import phenotype data files for the 2 datasets used in manuscript 3
## phenotypes from group 4: alcohol (2), tobacco (4) variables (GSCAN phenotypes)
## phenotypes from group 7: 9 binary diagnoses
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Import phenotype data files for the single data set used in manuscript 3
# Import phenotype group 4
#-----------------------------------------------------------------------------
source(paste0(locRFunction,"RFunction_import_export_single_file.R"))
IDRemappedPhenoFile.IDRemappedPRSFile.pheno4 <- "pheno4GSCANPhenotypes-IDremapped_standardised-IDremapped-PRS-GSCAN_sex-PRS-interaction.tsv"
columns_to_select <- c("ID","GSCAN_Q1","GSCAN_Q2_recode","GSCAN_Q3_recode","GSCAN_Q4"
,"GSCAN_Q5_Drinks_per_week","GSCAN_Q6_recode")
# Subset target phenotype data from all sexes per phenotype group 4
## FAMID, ID and related columns should be correctly read as character, preserving their leading zeros
## https://stackoverflow.com/questions/2805357/specifying-colclasses-in-the-read-csv
data4 <- read.table(file=paste0(locPheno,IDRemappedPhenoFile.IDRemappedPRSFile.pheno4)
,header = TRUE
,sep="\t"
,stringsAsFactors = F
,na.strings = c("NA")
,colClasses = c(rep("character",times=6),rep("numeric",times=102)))
data4$ID <- stringr::str_pad(data4$ID, width=7,side="left",pad="0")
data.pheno.gp4.allSexes <- data4 %>% select_(.dots=columns_to_select) # dim(data.pheno.gp4.allSexes) 13654 7
# Subset target phenotype data from males per phenotype group 4
data.pheno.gp4.males <- data4 %>%
filter(grepl("1",sex)) %>%
select_(.dots=columns_to_select) # dim(data.pheno.gp4.males) 5603 7
# Subset target phenotype data from females per phenotype group 4
data.pheno.gp4.females <- data4 %>%
filter(grepl("2",sex)) %>%
select_(.dots=columns_to_select) # dim(data.pheno.gp4.females) 8051 7
# Subset PRS columns, excluding S8, per phenotype group 4, from 3 sex groups: all sexes, males, females
## Subset ID (column 2) and PRS columns (names with prefix GSCAN)
## Exclude PRS calculated at p value < 1 (S8 group)
pattern.PRS <- "^GSCAN.*.S[1-7]$"
PRS.columns.exclu.S8 <- grep(pattern.PRS,colnames(data4),value = TRUE)
data.PRS.phenoGp4.exclu.S8.allSexes <- data4 %>% select_(.dots=c("ID",PRS.columns.exclu.S8)) # dim(data.PRS.phenoGp4.exclu.S8.allSexes) 13654 36
data.PRS.phenoGp4.exclu.S8.males <- data4 %>% filter(grepl("1",sex)) %>% select_(.dots=c("ID",PRS.columns.exclu.S8)) # dim(data.PRS.phenoGp4.exclu.S8.males) 5603 36
data.PRS.phenoGp4.exclu.S8.females <- data4 %>% filter(grepl("2",sex)) %>% select_(.dots=c("ID",PRS.columns.exclu.S8)) # dim(data.PRS.phenoGp4.exclu.S8.females) 8051 36
#-----------------------------------------------------------------------------
# Import phenotype data files for the single data set used in manuscript 3
# Import phenotype group 7
#-----------------------------------------------------------------------------
IDRemappedPhenoFile.IDRemappedPRSFile.pheno7 <- "pheno7AdultsNicotineDependenceAndMore-IDremapped_standardised-IDremapped-PRS-GSCAN_sex-PRS-interaction.tsv"
columns_to_select=c("ID","nicdep4","aspddx4","depdx","dsmiv_conductdx","ftnd_dep","mania_scrn","alcdep4","panic4","sp_dsm4")
# Subset data from all sexes per phenotype group 7
data.pheno.gp7.allSexes <- ImportATabSeparatedFile(input.file.path = paste0(locPheno,IDRemappedPhenoFile.IDRemappedPRSFile.pheno7)
,data.name = "data7") %>%
select_(.dots=columns_to_select) # dim(data.pheno.gp7.allSexes) 8240 10
# Subset data from males per phenotype group 7
data.pheno.gp7.males <- data7 %>%
filter(grepl("1",sex)) %>%
select_(.dots=columns_to_select) # dim(data.pheno.gp7.males) 3590 10
# Subset data from females per phenotype group 7
data.pheno.gp7.females <- data7 %>%
filter(grepl("2",sex)) %>%
select_(.dots=columns_to_select) # dim(data.pheno.gp7.females) 4358 10
# Subset PRS columns, excluding S8, per phenotype group 7, from 3 sex groups: all sexes, males, females
## Subset ID (column 2) and PRS columns (names with prefix GSCAN)
## Exclude PRS calculated at p value < 1 (S8 group)
pattern.PRS <- "^GSCAN.*.S[1-7]$"
PRS.columns.exclu.S8 <- grep(pattern.PRS,colnames(data7),value = TRUE)
data.PRS.phenoGp7.exclu.S8.allSexes <- data7 %>% select_(.dots=c("ID",PRS.columns.exclu.S8)) # dim(data.PRS.phenoGp7.exclu.S8.allSexes) 8240 36
data.PRS.phenoGp7.exclu.S8.males <- data7 %>% filter(grepl("1",sex)) %>% select_(.dots=c("ID",PRS.columns.exclu.S8)) # dim(data.PRS.phenoGp7.exclu.S8.males) 3590 36
data.PRS.phenoGp7.exclu.S8.females <- data7 %>% filter(grepl("2",sex)) %>% select_(.dots=c("ID",PRS.columns.exclu.S8)) # dim(data.PRS.phenoGp7.exclu.S8.females) 4358 36
#------------------------------------------------------------------------------------------------
#------------------Combine phenotype columns from all phenotype groups for manuscript 3
#-------------------- Perform a full outer join for target phenotype per group 4, 7
#------------------------------------------------------------------------------------------------
## Full-join phenotype data from all sexes using column "ID" as merging key for manuscript 2
data.pheno.manu3.allSexes.list <- list(data.pheno.gp4.allSexes,data.pheno.gp7.allSexes)
data.pheno.manu3.allSexes.IDrm <- plyr::join_all(data.pheno.manu3.allSexes.list,by=c("ID"),type ="full") %>%
dplyr::select(-one_of("ID")) # dim(data.pheno.manu3.allSexes.IDrm) 13999 15
## Full-join phenotype data from females using column "ID" as merging key for manuscript 2
data.pheno.manu3.females.IDrm <- plyr::join_all(list(data.pheno.gp4.females,data.pheno.gp7.females)
,by=c("ID"),type ="full") %>%
dplyr::select(-one_of("ID")) # dim(data.pheno.manu3.females.IDrm) 8096 15
## Full-join phenotype data from males using column "ID" as merging key for manuscript 2
data.pheno.manu3.males.IDrm <- plyr::join_all(list(data.pheno.gp4.males,data.pheno.gp7.males)
,by=c("ID"),type ="full") %>%
dplyr::select(-one_of("ID")) # dim(data.pheno.manu3.males.IDrm) 5624 15
#------------------------------------------------------------------------------------------------
# Stack PRS columns from all phenotype groups for manuscript 3 for 3 sex groups: all sexes, males, females
## Note: values of PRSs are pertinent to an ID's genotype, rather than their phenotypes surveys
#------------------------------------------------------------------------------------------------
# Vertically combine PRS columns from all phenotype groups per manu3 for all sexes
# Remove duplicate rows of the dataframe using ID column
data.PRS.manu3.exclu.S8.allSexes.IDunique.IDrm <- rbind(data.PRS.phenoGp4.exclu.S8.allSexes
,data.PRS.phenoGp7.exclu.S8.allSexes) %>%
dplyr::distinct(ID, .keep_all= TRUE) %>%
dplyr::select(-one_of("ID")) # dim(data.PRS.manu3.exclu.S8.allSexes.IDunique.IDrm) 13999 35
# Vertically combine PRS columns from all phenotype groups per manu3 for males
# Remove duplicate rows of the dataframe using ID column
data.PRS.manu3.exclu.S8.males.IDunique.IDrm <- rbind(data.PRS.phenoGp4.exclu.S8.males
,data.PRS.phenoGp7.exclu.S8.males) %>%
dplyr::distinct(ID, .keep_all= TRUE) %>%
dplyr::select(-one_of("ID")) # dim(data.PRS.manu3.exclu.S8.males.IDunique.IDrm) 5624 35
# Vertically combine PRS columns from all phenotype groups per manu3 for females
# Remove duplicate rows of the dataframe using ID column
data.PRS.manu3.exclu.S8.females.IDunique.IDrm <- rbind(data.PRS.phenoGp4.exclu.S8.females
,data.PRS.phenoGp7.exclu.S8.females) %>%
dplyr::distinct(ID, .keep_all= TRUE) %>%
dplyr::select(-one_of("ID")) # dim(data.PRS.manu3.exclu.S8.females.IDunique.IDrm) 8096 35
#--------------------------------------------------------------------------------------------------------
# Calculate correlation between any 2 of all target phenotypes per manu3 for all sexes
# Perform multiple testing using Dale Nyhot's algorithm, which generates effective number of independent phenotypes
## N: Effective Number of Independent Variables [VeffLi] (using Equation 5 of Li and Ji 2005)
## P: Experiment-wide Significance Threshold Required to Keep Type I Error Rate at 5%
#---------------------------------------------------------------------------------------------------------
source(paste0(locRFunction,"RFunction_correlation-matrix.R"))
manu3.sample.phenotypes.ID <- "QIMR-adults-aged-20-90_GSCAN-phenotypes_nicotine-alcohol-dependence-and-more"
out.file.name.rP.matrix.manu3.allSexes <- paste0("pheno-corr-matrix_",manu3.sample.phenotypes.ID,"_all-sexes")
CalculateCorrBetween2Variables(input.data=data.pheno.manu3.allSexes.IDrm
,correlation.method="spearman"
,output.file.path=paste0(locPheno
, out.file.name.rP.matrix.manu3.allSexes
,".txt"))
source(paste0(locSNPSpD,"matSpDlite.R"))
# Correction for target phenotypes per manu3, all sexes
## Analyse 15 target phenotypes
input.file.name.rP.matrix.manu3.allSexes <- out.file.name.rP.matrix.manu3.allSexes
multiple_testing(inputCorMatrixPath=paste0(locPheno,input.file.name.rP.matrix.manu3.allSexes,".txt")
,outputFilePath=paste0(locPheno,"multiple-testing_",input.file.name.rP.matrix.manu3.allSexes,".txt"))
#--------------------------------------------------------------------------------------------------------
# Calculate correlation between any 2 of all target phenotypes per manu3 for males
# Perform multiple testing using Dale Nyhot's algorithm, which generates effective number of independent phenotypes
#--------------------------------------------------------------------------------------------------------
out.file.name.rP.matrix.manu3.males <- paste0("pheno-corr-matrix_",manu3.sample.phenotypes.ID,"_males-only")
CalculateCorrBetween2Variables(input.data=data.pheno.manu3.males.IDrm
,correlation.method="spearman"
,output.file.path=paste0(locPheno
,out.file.name.rP.matrix.manu3.males
,".txt"))
# Correction for target phenotypes per manu3, males
## Analyse 15 target phenotypes
input.file.name.rP.matrix.manu3.males <- out.file.name.rP.matrix.manu3.males
multiple_testing(inputCorMatrixPath=paste0(locPheno,input.file.name.rP.matrix.manu3.males,".txt")
,outputFilePath=paste0(locPheno,"multiple-testing_",input.file.name.rP.matrix.manu3.males,".txt"))
#--------------------------------------------------------------------------------------------------------
# Calculate correlation between any 2 of all target phenotypes per manu3 for females
# Perform multiple testing using Dale Nyhot's algorithm, which generates effective number of independent phenotypes
#--------------------------------------------------------------------------------------------------------
out.file.name.rP.matrix.manu3.females <- paste0("pheno-corr-matrix_",manu3.sample.phenotypes.ID,"_females-only")
CalculateCorrBetween2Variables(input.data=data.pheno.manu3.females.IDrm
,correlation.method="spearman"
,output.file.path=paste0(locPheno
,out.file.name.rP.matrix.manu3.females
,".txt"))
# Correction for target phenotypes per manu3, females
## Analyse 15 target phenotypes
input.file.name.rP.matrix.manu3.females <- out.file.name.rP.matrix.manu3.females
multiple_testing(inputCorMatrixPath=paste0(locPheno,input.file.name.rP.matrix.manu3.females,".txt")
,outputFilePath=paste0(locPheno
,"multiple-testing_"
,input.file.name.rP.matrix.manu3.females
,".txt"))
#--------------------------------------------------------------------------------------------------------
# Calculate correlation between any 2 of 5 PRSs (7 p value thresholds pooled as 1) per manu3 for all sexes
# Perform multiple testing using Dale Nyhot's algorithm, which generates effective number of independent phenotypes
## N: Effective Number of Independent Variables [VeffLi] (using Equation 5 of Li and Ji 2005)
## P: Experiment-wide Significance Threshold Required to Keep Type I Error Rate at 5%
#--------------------------------------------------------------------------------------------------------
# Stack PRS calculated at 7 p value thresholds to a single column, collapsing 35 PRS columns to 5 columns
temp <- data.PRS.manu3.exclu.S8.allSexes.IDunique.IDrm
# Pool 7 p value thresholds as 1 and store pooled PRSs as a data.frame
data.PRS.manu3.exclu.S8.allSexes.IDunique.IDrm.S1toS7Pooled <- data.frame(GSCAN.ai=stack(temp[1:7])[,"values"]
,GSCAN.cpd=stack(temp[8:14])[,"values"]
,GSCAN.dpw=stack(temp[15:21])[,"values"]
,GSCAN.sc=stack(temp[22:28])[,"values"]
,GSCAN.si=stack(temp[29:35])[,"values"]
,stringsAsFactors = F) # dim(data.PRS.manu3.exclu.S8.allSexes.IDunique.IDrm.S1toS7Pooled) 17241 5
# Calculate correlation between any 2 of 5 PRS columns
out.file.name.PRS.matrix.manu3.allSexes <- paste0("pheno-corr-matrix_GSCAN-PRS_",manu3.sample.phenotypes.ID,"_all-sexes")
CalculateCorrBetween2Variables(input.data=data.PRS.manu3.exclu.S8.allSexes.IDunique.IDrm.S1toS7Pooled
,correlation.method="pearson"
,output.file.path=paste0(locPheno, out.file.name.PRS.matrix.manu3.allSexes,".txt"))
# Correction for target phenotypes per manu3, males
## Analyse 5 pooled PRS columns
input.file.name.PRS.matrix.manu3.allSexes <- out.file.name.PRS.matrix.manu3.allSexes
multiple_testing(inputCorMatrixPath=paste0(locPheno,input.file.name.PRS.matrix.manu3.allSexes,".txt")
,outputFilePath=paste0(locPheno,"multiple-testing_",input.file.name.PRS.matrix.manu3.allSexes,".txt"))
#--------------------------------------------------------------------------------------------------------
# Calculate correlation between any 2 of 5 PRSs (7 p value thresholds pooled as 1) per manu3 for MALES
# Perform multiple testing using Dale Nyhot's algorithm, which generates effective number of independent phenotypes
## N: Effective Number of Independent Variables [VeffLi] (using Equation 5 of Li and Ji 2005)
## P: Experiment-wide Significance Threshold Required to Keep Type I Error Rate at 5%
#--------------------------------------------------------------------------------------------------------
# Stack PRS calculated at 7 p value thresholds to a single column, collapsing 35 PRS columns to 5 columns
tem.M <- data.PRS.manu3.exclu.S8.males.IDunique.IDrm
# Pool 7 p value thresholds as 1 and store pooled PRSs as a data.frame
data.PRS.manu3.exclu.S8.males.IDunique.IDrm.S1toS7Pooled <- data.frame(GSCAN.ai=stack(tem.M[1:7])[,"values"]
,GSCAN.cpd=stack(tem.M[8:14])[,"values"]
,GSCAN.dpw=stack(tem.M[15:21])[,"values"]
,GSCAN.sc=stack(tem.M[22:28])[,"values"]
,GSCAN.si=stack(tem.M[29:35])[,"values"]
,stringsAsFactors = F) # dim(data.PRS.manu3.exclu.S8.males.IDunique.IDrm.S1toS7Pooled) 17241 5
# Calculate correlation between any 2 of 5 PRS columns
out.file.name.PRS.matrix.manu3.males <- paste0("pheno-corr-matrix_GSCAN-PRS_",manu3.sample.phenotypes.ID,"_males-only")
CalculateCorrBetween2Variables(input.data=data.PRS.manu3.exclu.S8.males.IDunique.IDrm.S1toS7Pooled
,correlation.method="pearson"
,output.file.path=paste0(locPheno, out.file.name.PRS.matrix.manu3.males,".txt"))
# Correction for target phenotypes per manu3, males
## Analyse 5 pooled PRS columns
input.file.name.PRS.matrix.manu3.males <- out.file.name.PRS.matrix.manu3.males
multiple_testing(inputCorMatrixPath=paste0(locPheno,input.file.name.PRS.matrix.manu3.males,".txt")
,outputFilePath=paste0(locPheno,"multiple-testing_",input.file.name.PRS.matrix.manu3.males,".txt"))
#--------------------------------------------------------------------------------------------------------
# Calculate correlation between any 2 of 5 PRSs (7 p value thresholds pooled as 1) per manu3 for FEMALES
# Perform multiple testing using Dale Nyhot's algorithm, which generates effective number of independent phenotypes
## N: Effective Number of Independent Variables [VeffLi] (using Equation 5 of Li and Ji 2005)
## P: Experiment-wide Significance Threshold Required to Keep Type I Error Rate at 5%
#--------------------------------------------------------------------------------------------------------
# Stack PRS calculated at 7 p value thresholds to a single column, collapsing 35 PRS columns to 5 columns
tem.F <- data.PRS.manu3.exclu.S8.females.IDunique.IDrm
# Pool 7 p value thresholds as 1 and store pooled PRSs as a data.frame
data.PRS.manu3.exclu.S8.females.IDunique.IDrm.S1toS7Pooled <- data.frame(GSCAN.ai=stack(tem.F[1:7])[,"values"]
,GSCAN.cpd=stack(tem.F[8:14])[,"values"]
,GSCAN.dpw=stack(tem.F[15:21])[,"values"]
,GSCAN.sc=stack(tem.F[22:28])[,"values"]
,GSCAN.si=stack(tem.F[29:35])[,"values"]
,stringsAsFactors = F) # dim(data.PRS.manu3.exclu.S8.females.IDunique.IDrm.S1toS7Pooled) 17241 5
# Calculate correlation between any 2 of 5 PRS columns
out.file.name.PRS.matrix.manu3.females <- paste0("pheno-corr-matrix_GSCAN-PRS_",manu3.sample.phenotypes.ID,"_females-only")
CalculateCorrBetween2Variables(input.data=data.PRS.manu3.exclu.S8.females.IDunique.IDrm.S1toS7Pooled
,correlation.method="pearson"
,output.file.path=paste0(locPheno, out.file.name.PRS.matrix.manu3.females,".txt"))
# Correction for target phenotypes per manu3, females
## Analyse 5 pooled PRS columns
input.file.name.PRS.matrix.manu3.females <- out.file.name.PRS.matrix.manu3.females
multiple_testing(inputCorMatrixPath=paste0(locPheno,input.file.name.PRS.matrix.manu3.females,".txt")
,outputFilePath=paste0(locPheno,"multiple-testing_",input.file.name.PRS.matrix.manu3.females,".txt"))
#********************************************************************************************************#
#*********************************** This is the end of this file ***************************************#
#********************************************************************************************************#
#----------------------------------------------------------------------------------
# Calculate correlation for 5 PPRSs matched phenotype data group 3
#----------------------------------------------------------------------------------
# colnames_PRS <-colnames(PRS_pheno_gp3_6_uniqueID_IDrm)
#
# # Stack PRS for 7 p value thresholds to a single column
# PRS_adoles_GSCAN.ai <- stack(PRS_pheno_gp3_6_uniqueID_IDrm[1:7])[,"values"]
# PRS_adoles_GSCAN.cpd <- stack(PRS_pheno_gp3_6_uniqueID_IDrm[8:14])[,"values"]
# PRS_adoles_GSCAN.dpw <- stack(PRS_pheno_gp3_6_uniqueID_IDrm[15:21])[,"values"]
# PRS_adoles_GSCAN.sc <- stack(PRS_pheno_gp3_6_uniqueID_IDrm[22:28])[,"values"]
# PRS_adoles_GSCAN.si <- stack(PRS_pheno_gp3_6_uniqueID_IDrm[29:35])[,"values"]
#
# # Save the binned PRSs as a data.frame
# PRS_adoles <- data.frame(GSCAN.ai=PRS_adoles_GSCAN.ai
# ,GSCAN.cpd=PRS_adoles_GSCAN.cpd
# ,GSCAN.dpw=PRS_adoles_GSCAN.dpw
# ,GSCAN.sc=PRS_adoles_GSCAN.sc
# ,GSCAN.si=PRS_adoles_GSCAN.si
# ,stringsAsFactors = F)
#
# # Calculate correlation between any 2 PRSs
# exported.file.path <- paste0(locPheno,"QIMR19Up_PRS-alcohol-tobacco-FTND_phenotypic-correlation.txt")
#
# CalculateCorrBetween2Variables(input.data=PRS_adoles
# ,correlation.method="pearson"
# ,output.file.path=exported.file.path)
#
#----------------------------------------------------------------------------------
# Calculate correlation for 5 PPRSs matched phenotype data group 4, 7 (adults)
#----------------------------------------------------------------------------------
colnames_PRS <- colnames(PRS_pheno_gp4_7_uniqueID_IDrm)
# Stack PRS for 7 p value thresholds to a single column
PRS_adults_GSCAN.ai <- stack(PRS_pheno_gp4_7_uniqueID_IDrm[1:7])[,"values"]
PRS_adults_GSCAN.cpd <- stack(PRS_pheno_gp4_7_uniqueID_IDrm[8:14])[,"values"]
PRS_adults_GSCAN.dpw <- stack(PRS_pheno_gp4_7_uniqueID_IDrm[15:21])[,"values"]
PRS_adults_GSCAN.sc <- stack(PRS_pheno_gp4_7_uniqueID_IDrm[22:28])[,"values"]
PRS_adults_GSCAN.si <- stack(PRS_pheno_gp4_7_uniqueID_IDrm[29:35])[,"values"]
# Save the binned PRSs as a data.frame
PRS_adults <- data.frame(GSCAN.ai=PRS_adults_GSCAN.ai
,GSCAN.cpd=PRS_adults_GSCAN.cpd
,GSCAN.dpw=PRS_adults_GSCAN.dpw
,GSCAN.sc=PRS_adults_GSCAN.sc
,GSCAN.si=PRS_adults_GSCAN.si
,stringsAsFactors = F)
# Calculate correlation between 2 PRSs
#exported.file.path <- paste0(locPheno,"QIMR-adults_PRS-GSCAN-phenotypes-ND-other-diagnoses_phenotypic-correlation.txt")
output.file.name.PRS.matrix <- "pheno-corr-matrix_GSCAN-PRS_QIMR-adults_GSCAN-phenotypes-ND-other-diagnoses"
CalculateCorrBetween2Variables(input.data=PRS_adults
,correlation.method="pearson"
,output.file.path=paste0(locPheno,output.file.name.PRS.matrix,".txt"))
#------------------------------------------------------------------------------------------
# Calculate the following 2 values using Dale Nyholt's script matSpDlite.R
## N: Effective Number of Independent Variables [VeffLi] (using Equation 5 of Li and Ji 2005)
## P: Experiment-wide Significance Threshold Required to Keep Type I Error Rate at 5%
#------------------------------------------------------------------------------------------
source(paste0(locSNPSpD,"matSpDlite.R"))
# Correction for target phenotypes per QIMR 19Up (adolescents)
## status: output generated
# file.prefix <- "QIMR19Up_alcohol-tobacco-FTND"
# multiple_testing(inputCorMatrixPath=paste0(locPheno,file.prefix,"_phenotypic-correlation",".txt")
# ,outputFilePath=paste0(locPheno,file.prefix,"_multiple-testing",".txt"))
#
# # Correction for PRSs per adolescents
# file.prefix <- "QIMR19Up_PRS-alcohol-tobacco-FTND"
# multiple_testing(inputCorMatrixPath=paste0(locPheno,file.prefix,"_phenotypic-correlation.txt")
# ,outputFilePath=paste0(locPheno,file.prefix,"_multiple-testing",".txt"))
# Correction for target phenotypes per QIMR adults
# Analyse 15 target phenotypes (6 GSCAN phenotypes, 9 binary phenotypes)
input.file.name.rP.matrix <- output.file.name.rP.matrix
multiple_testing(inputCorMatrixPath=paste0(locPheno,input.file.name.rP.matrix,".txt")
,outputFilePath=paste0(locPheno,"multiple-testing_",input.file.name.rP.matrix,".txt"))
# file.prefix <- "QIMR-adults_GSCAN-phenotypes-ND-other-diagnoses"
# multiple_testing(inputCorMatrixPath=paste0(locPheno,file.prefix,"_phenotypic-correlation",".txt")
# ,outputFilePath=paste0(locPheno,file.prefix,"_multiple-testing",".txt"))
# Correction for PRSs per adults
input.file.name.PRS.matrix <- output.file.name.PRS.matrix
multiple_testing(inputCorMatrixPath=paste0(locPheno,input.file.name.PRS.matrix,".txt")
,outputFilePath=paste0(locPheno,"multiple-testing_",input.file.name.PRS.matrix,".txt"))
# file.prefix <- "QIMR-adults_PRS-GSCAN-phenotypes-ND-other-diagnoses"
# multiple_testing(inputCorMatrixPath=paste0(locPheno,file.prefix,"_phenotypic-correlation.txt")
# ,outputFilePath=paste0(locPheno,file.prefix,"_multiple-testing.txt"))
#------------------------------------------------------------------------------------------------------
# -----Account for multiple testing
## -----Adjuste p values by dividing nominal p value (0.05) by the product of N and P from the
## -------Copy N and P from output files in previous code block
#------------------------------------------------------------------------------------------------------
# N P Source File
#-----------------------------------------------------------------------------------------
# 12.0988 0.00423056857089765 QIMR19Up_alcohol-tobacco-FTND_multiple-testing.txt
# 5 0.0102062183130115 QIMR19Up_PRS-alcohol-tobacco-FTND_multiple-testing.txt
# 14 0.00365710319138357 QIMR-adults_GSCAN-phenotypes-ND-other-diagnoses_multiple-testing.txt
# 5 0.0102062183130115 QIMR-adults_PRS-GSCAN-phenotypes-ND-other-diagnoses_multiple-testing.txt
#----------------------------------------------------------------------------------------
# Cohort Corrected-significance-threshold (nominal p/N-pheno*N-PRS)
#------------------------------------------
# 19Up 0.05/(12.0988*5)= 0.0008265283
# Adults 0.05/(14*5)= 0.0007142857
#------------------------------------------
|
10215d9c4b71140ae2739b92183c80ab0bf2b1e1
|
081012f78ce5be302d0066baabb9981f393a8d1c
|
/R/model_fitting.R
|
1aa6872cde072583c16cffff9d7b0f97d37ae06d
|
[
"MIT"
] |
permissive
|
CshlSiepelLab/DENR
|
bcfbc3da9ce7f67c7d3d921aba8a56d6f602071d
|
d17f935d1161e6efb5b8ab265efac29497078e7f
|
refs/heads/master
| 2023-06-07T14:13:44.742140
| 2021-07-11T21:19:55
| 2021-07-11T21:20:05
| 223,423,009
| 1
| 0
| null | 2021-07-02T14:48:25
| 2019-11-22T14:41:40
|
R
|
UTF-8
|
R
| false
| false
| 10,426
|
r
|
model_fitting.R
|
#' @title Mask data
#'
#' @description masks positions in data vector at indices specified in masks
#' vector by replacing them with zeros.
#'
#' @param data a data vector
#' @param masks a vector of masked indicies
#'
#' @name mask_data
#' @rdname mask_data
mask_data <- function(data, masks) {
data[masks] <- 0
return(data)
}
#' @title Predicts signal at locus level
#'
#' @description outputs prediction for read counts across entire locus
#'
#' @param models the matrix of transcript models
#' @param abundance the transcript model abundances (can be either a vector or
#' a matrix where each column is a vecotor of abundances)
#'
#' @name predict_locus
#' @rdname predict_locus
predict_locus <- function(models, abundance) {
return(models %*% abundance)
}
#' @title Sum of squares
#'
#' @description Computes sum-of-squares for a
#'
#' @param x the transcript model abundances
#' @param models the matrix of transcript models
#' @param data a vector of the observed data
#' @param lambda the weight of the lasso penalty (not implemented at this time)
#' @param transform how to transform the data (either "identity" or "log")
#'
#' @name sum_squares_lasso
#' @rdname sum_squares_lasso
sum_squares_lasso <- function(x, models, data, lambda = 0,
transform) {
# Normalize the sum-of-squares by the length of the region so the
# lasso penalty is comparable across regions of different length
locus_pred <- predict_locus(models, x)
if (transform == "identity") {
sum_sq <- sum((data - locus_pred)^2)
} else if (transform == "log") {
sum_sq <- sum((log(data + 1e-3) - log(locus_pred + 1e-3))^2)
} else {
stop("Invalid transform specified")
}
return(sum_sq)
}
#' @title Sum of squares gradient
#'
#' @description Computes gradient for sum-of-squares
#'
#' @inheritParams sum_squares_lasso
#'
#' @name sum_squares_grad
#' @rdname sum_squares_grad
sum_squares_grad <- function(x, models, data, transform, lambda = 0) {
if (transform == "identity") {
gr <- as.vector(-2 * crossprod(models, data - predict_locus(models, x)))
} else if (transform == "log") {
gr <-
as.vector(-2 * crossprod(models / as.vector(predict_locus(models, x) + 1e-3),
log(data + 1e-3) - log(predict_locus(models, x) + 1e-3)))
} else {
stop("Invalid transform option")
}
return(gr)
}
#' @title Fit model
#'
#' @description Estimates trancript abundances for a given
#' \code{\link{transcript_quantifier-class}} object under a lasso penalty
#'
#' @param verbose if TRUE shows progress bar for fitting (default: FALSE)
#' @param inactive_transcripts a character vector listing transcripts for which abundance
#' values should be fixed to 0. IMPORTANT: In the case where multiple transcripts are
#' assigned to a single model (due to identical models at the specified bin scale) this
#' will be overridden if one or more transcripts assigned to the same model are active.
#' This will also be overidden for specific transcripts if they have 10x or more
#' polymerase density downstream from their TSS as they do upstream and there are no
#' other active transcripts in their loci.
#' @param heuristic_inactive_override Boolean. If TRUE uses a series of heuristics to
#' re-activate some of the transcripts labeled as inactive. We reccomend you leave this
#' on unless your're really confident in your inactive calls. See Details for more.
#' @inheritParams add_data
#' @inheritParams sum_squares_lasso
#'
#' @include transcript_quantifier-class.R
#' @name fit
#' @rdname fit
#'
#' @export
methods::setGeneric("fit",
function(tq, lambda = 0,
transform = "log", inactive_transcripts = NA,
verbose = FALSE, heuristic_inactive_override = T) {
standardGeneric("fit")
})
#' @rdname fit
methods::setMethod("fit",
signature(tq = "transcript_quantifier"),
function(tq, lambda = 0, transform = "log", inactive_transcripts = NULL,
verbose = FALSE, heuristic_inactive_override = T) {
if (lambda != 0) {
stop("lambda feature not supported at this time")
}
if (length(tq@counts) == 0) {
stop("No count data has been entered for this model")
}
if (length(tq@counts) != length(tq@models)) {
stop("There is data for a different number of loci than there are models")
}
if (!is.logical(verbose)) {
stop("verbose most be either TRUE or FALSE")
}
if (is.null(inactive_transcripts)) {
inactive_transcripts <- NA_character_
}
if (class(inactive_transcripts) != "character") {
stop("inactive_transcripts must be either NULL or a character vector")
}
# Handle transform option
transform_opts <- c("identity", "log")
if (length(transform) > 1) {
warning("Multiple transform options specified, using the first")
transform <- transform[1]
}
if (!transform %in% transform_opts) {
stop(paste("transform must be one of these:",
paste(transform_opts, collapse = ", ")))
}
# Zip together models, counts, and abundances
sufficient_values <- mapply(function(x, y, z, za) {
list(abundance = x, models = y, counts = z, masks = za)
}, tq@model_abundance, tq@models, tq@counts, tq@masks, SIMPLIFY = FALSE)
estim <- list()
if (verbose) {
message("Estimating abundance ...")
pb <- utils::txtProgressBar(min = 1, max = length(sufficient_values), style = 3)
}
# Fast lookup version of index built from specified inactive transcripts
tq_inactive_ind <- data.table::data.table(tq@transcript_model_key, inactive = FALSE,
key = c("tx_name"))
tq_inactive_ind[.(inactive_transcripts), inactive := TRUE]
# There are currently two heuristics here meant to deal with possible deficiencies in
# inactive lists
# 1) Upstream polymerase ratio
# 2) GOF metric activation at loci with no active transcripts but 1% or more sites
# covered polymerase for a given transcript
if (heuristic_inactive_override) {
# ** UPR heuristic **
# Get transcripts that are marked as inactive but who have a high UPR. Keep those
# whose TSS are not within -3kb/+2kb of an active transcript's TSS on the same
# strand. These numbers are based on the regions used to compute the UPR
# [-3kb, -500bp] and [+500bp, 2kb]. If they pass all these standards, reactivate
# them
min_polymerase_ratio <- log2(5)
inact_upr <- names(which(tq@upstream_polymerase_ratios >= min_polymerase_ratio))
if (length(inact_upr) > 0) {
upstream_check_radius <- 3e3
downstream_check_radius <- 2e3
active_tx_gr <- tq@transcripts[!get_tx_id(tq) %in% inactive_transcripts]
active_checkzone <- GenomicRanges::promoters(
tq@transcripts[get_tx_id(tq) %in% inact_upr],
upstream = upstream_check_radius,
downstream = downstream_check_radius)
over <- GenomicRanges::findOverlaps(active_checkzone, active_tx_gr,
ignore.strand = FALSE)
override <- GenomicRanges::mcols(
active_checkzone[-S4Vectors::queryHits(over)])[, tq@column_identifiers[1]]
tq_inactive_ind[.(override), inactive := FALSE]
}
# ** GOF heuristic **
# This metric looks for loci in which all transcripts have been marked as inactive
# but one or more transcripts show >= 1% of sites covered by polymerase. In that
# case take the one with the best model_match statistic and activate it
gof_tx_tab <- data.table::merge.data.table(
data.table::as.data.table(tq@transcript_model_key),
tq@tx_gof_metrics,
by.x = "tx_name", by.y = "transcript_id")
# Get loci where all transcripts are inactive
loci_active <- tq_inactive_ind[, .(all_inactive = all(inactive)), by = "group"]
# Reactivate transcript in loci that is at least 1% tx and has highest % match
reactivate <-
gof_tx_tab[group %in% loci_active[(all_inactive)]$group &
percent_transcribed >= 0.01, .SD[which.max(percent_match)][1],
by = "group"]$tx_name
tq_inactive_ind[.(reactivate), inactive := FALSE]
}
# Set key for efficient lookup by group
data.table::setkey(tq_inactive_ind, "group")
# Iterate over transcript groups
for (i in seq_along(sufficient_values)) {
sv <- sufficient_values[[i]]
# Initialize upper bounds to infinity
ub <- rep(1e9, length(sv$abundance))
# Set values of elements that are designated as inactive to 0 and set upper bounds
# to 0 as well (all transcripts in model must be inactive for model to be
# considered inactive)
inactive_models <- tq_inactive_ind[.(i), ][which(inactive)]$model
active_models <- tq_inactive_ind[.(i), ][which(!inactive)]$model
final_inactive_models <- setdiff(inactive_models, active_models)
ub[final_inactive_models] <- 1e-100 # using this for now as using 0 throws error
sv$abundance[final_inactive_models] <- 0
opt_result <- stats::optim(sv$abundance, fn = sum_squares_lasso,
gr = sum_squares_grad,
models = sv$models,
data = mask_data(sv$counts, sv$masks),
lambda = lambda,
transform = transform,
lower = rep(0, length(sv$abundance)),
upper = ub,
method = "L-BFGS-B")
estim[[i]] <- opt_result$par
## Force set inactive transcripts to 0 even if they were slightly perturbed
estim[[i]][final_inactive_models] <- 0
names(estim[[i]]) <- colnames(sv$models)
if (verbose) {
utils::setTxtProgressBar(pb, i)
}
}
tq@model_abundance <- estim
return(tq)
}
)
## Appease R CMD check
if (getRversion() >= "2.15.1") {
utils::globalVariables(c("inactive"))
}
|
d24e8b2d47c0facb129fe99e0c752d046a0df6ce
|
c667e08d35ef383668722a376ce0d5572423b464
|
/Plot1.R
|
919fbebde66eaf362c34e47ae2657f8cac4c7e01
|
[] |
no_license
|
christinegaribian/ExData_Plotting1
|
544f9b379499dbf826ee4df5d5dab47d66ed0ca5
|
cd9a83eb6e6ecd7daf7b0c58506abb6033456ed3
|
refs/heads/master
| 2021-01-22T11:29:13.409357
| 2015-05-10T22:42:42
| 2015-05-10T22:42:42
| 35,348,997
| 0
| 0
| null | 2015-05-09T23:13:11
| 2015-05-09T23:13:11
| null |
UTF-8
|
R
| false
| false
| 1,626
|
r
|
Plot1.R
|
## Plot1.R
# Data is the "Individual household electric power consumption Data Set" from the UC Irvine Machine Learning Repository
# Variables are:
# Date: Date in format dd/mm/yyyy
# Time: time in format hh:mm:ss
# Global_active_power: household global minute-averaged active power (in kilowatt)
# Global_reactive_power: household global minute-averaged reactive power (in kilowatt)
# Voltage: minute-averaged voltage (in volt)
# Global_intensity: household global minute-averaged current intensity (in ampere)
# Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy). It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave (hot plates are not electric but gas powered).
# Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy). It corresponds to the laundry room, containing a washing-machine, a tumble-drier, a refrigerator and a light.
# Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy). It corresponds to an electric water-heater and an air-conditioner.
# Read data from February 1, 2007 (1/2/2007) to February 2, 2007 (2/2/2007)
data <- read.table("household_power_consumption.txt",sep=";",na.string="?",skip=66637,nrow=2880,
col.names=c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity",
"Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#Make plot 1 and save to 480x480pixel .png file
png(filename="plot1.png")
hist(data$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power")
dev.off()
|
38b6bb9b9675b1d6149fbc84ce2b26756194cf33
|
b3a59378dc173ab63940ec9e7b77d273ab84ecf9
|
/study.R
|
7184a444639264cbbe26a06aac582b2bcd7a3a0e
|
[] |
no_license
|
kecolson/simulator_functions_next_round
|
b613cd315bdbf54986abdf3b0ed58a993f0b6a5f
|
c50394565c1c06d561d153d2cbcade142cbe0892
|
refs/heads/master
| 2020-04-25T19:51:52.267755
| 2015-03-16T17:00:19
| 2015-03-16T17:00:19
| 32,105,437
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,625
|
r
|
study.R
|
# study.R
# Function: study
# Input: pop (Data frame containing variables named: W*_0, W*_1, A_0, A_1, Y_0, Y_1)
# a single row of the data frame designs (as a list)
# a single row of the data frame matches (as a list)
# Output: a list containing:
# sample (format as pop. This is the unmatched sample even if matching was conducted-- will use this for bias-corrected matching analysis in which matching happens in the analysis phase)
# an_data (format as pop),
# subclass (numeric vector, same # obs as an_data),
# match_weights (numeric vector, same # obs as an_data),
# samp_weights (numeric vector, same # obs as an_data),
# match_input (single row of matches, as a list)
study <- function(pop, design, match_input) {
# Draw sample
sample <- draw_sample(pop, design)
samp_weights <- sample$samp_weights # This will be vector of 1's if the sample was representative
# Match if desired, otherwise return sample
if (match_input$match == 1) {
match_results <- match(sample, match_input)
return(list(sample = sample,
an_data = match_results[[1]],
subclass = match_results[[2]],
match_weights = match_results[[3]],
samp_weights = samp_weights,
match_input = match_results[[4]]))
} else {
return(list(sample = sample,
an_data = sample,
subclass = rep(NA,nrow(sample)),
match_weights = rep(1,nrow(sample)),
samp_weights = samp_weights,
match_input = NA))
}
}
|
92a10a632891ca969728508fb136e4a0a8e6b385
|
262e5903ee6d3f6e0dbd8cbf2d8caa40c70c92e4
|
/r_programming/week_4/rankhospital.R
|
2818cd5a74c9b7c0846c9f6b317d6956c83f3ffc
|
[] |
no_license
|
jdglover/datasciencecoursera
|
ffaf17d28b48c85386c834baeb8f513795a12cdb
|
f115083aadf6478c31c15946e77710deab3ef507
|
refs/heads/master
| 2021-01-10T08:57:07.425414
| 2015-12-08T02:54:42
| 2015-12-08T02:54:42
| 43,981,836
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,382
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num = "best") {
# Determines the Nth ranked hospital for a given condition in a given state.
#
# Args:
# state: A character vector of length 1 that is the two letter
# abbreviation for the state of interest (e.g., "PA").
# outcome: A character vector of length 1. Possible conditions are
# "heart attack", "heart failure", and "pneumonia".
# num: The ranked hospital. Possible values are "best', "worst", or
# a numerical ranking (e.g., 5).
#
# Returns:
# A character vector indicating the Nth ranked hospital in a state for a
# particular outcome.
data <- read.csv("outcome-of-care-measures.csv", stringsAsFactors = FALSE)
poss_states <- unique(data$State)
poss_outcomes <- c("heart attack", "heart failure", "pneumonia")
if(is.na(match(state, poss_states))) {
stop("invalid state")
}
if(is.na(match(outcome, poss_outcomes))) {
stop("invalid outcome")
}
relevant_data <- data.frame(data[,2], data[,7], data[,11],
data[,17], data[,23], stringsAsFactors=FALSE)
colnames(relevant_data) <- c("hospital", "hospital_state", "heart_attack",
"heart_failure", "pneumonia")
relevant_data[,3] <- as.numeric(as.character(relevant_data[,3]))
relevant_data[,4] <- as.numeric(as.character(relevant_data[,4]))
relevant_data[,5] <- as.numeric(as.character(relevant_data[,5]))
state_results <- subset.data.frame(relevant_data, hospital_state==state)
if(outcome == "heart attack") {
winner <- na.omit(state_results[order(state_results$heart_attack,
state_results$hospital),])
}
if(outcome == "heart failure") {
winner <- na.omit(state_results[order(state_results$heart_failure,
state_results$hospital),])
}
if(outcome == "pneumonia") {
winner <- na.omit(state_results[order(state_results$pneumonia,
state_results$hospital),])
}
if (num == "best") {
return(head(winner)[1,1])
} else if (num == "worst") {
return(tail(winner)[6,1])
} else {
return (winner[num,1])
}
}
|
e07d7028dabeb5bc8c79fab376daade47d0aba97
|
9f492b4300d2430115fab594853108281b003016
|
/ropengl/R/package-ropengl.R
|
a79e4111c666fcb3b32779874aa4c2b3892921d3
|
[] |
no_license
|
leoorshansky/ropengl
|
8863188c5907f53fdbd6031d18984925a2c424e3
|
85bc1cc13d47edfe37f440d4f2a1c48b9379a12d
|
refs/heads/master
| 2020-03-29T12:42:03.880408
| 2018-09-22T20:20:26
| 2018-09-22T20:20:26
| 149,913,341
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 201
|
r
|
package-ropengl.R
|
#' ropengl
#'
#' @name ropengl
#' @docType package
#'
#' @description OpenGL bindings for R
#'
#' @useDynLib ropengl
#' @importFrom Rcpp sourceCpp
`%#%` <- function(a, b) {b + 4;getFlag(a)}
|
bac4a9a243c80b38bc02362280bf5ba29859e6aa
|
9cae896d263554bc72faac373005e1dbf6eabb70
|
/Filling in missing bird bands in nest data.R
|
f1a769a3a82721edcf8af4acc5cdeabe05ca2a8a
|
[] |
no_license
|
11arc4/cloud-TRES-data-manipulation
|
948ff317872c23f36b92bad0b0330ebc0d9012f3
|
e80c225520675babbe12b60cb850eb9ac7e2384d
|
refs/heads/master
| 2021-01-13T03:26:57.559098
| 2020-10-16T12:48:38
| 2020-10-16T12:48:38
| 77,549,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,453
|
r
|
Filling in missing bird bands in nest data.R
|
processNestDataFiles <- function(inputDir, outputDir, banddata) {
listfiles<-list.files(inputDir)
for (f in listfiles){
message("Processing ", f)
filename<-paste(inputDir, "/", f, sep="" )
initialnestdata <- read.csv(filename, as.is=TRUE, na.strings = c("", "NA"))
#Add the siteID ased on the function for renests--needed to check in the band data
#Add the renest column based on the function I wrote.
nestdata <- AssignRenestStatus(initialnestdata)
Filledinnestdata <- fillinbandID(nestdata, banddata)
outputFileName <- paste("Nest Data", Filledinnestdata$Year[1],
"with all possible adult bands.csv")
updatedfilename <- paste(outputDir, outputFileName, sep = "/")
write.csv(Filledinnestdata, file=updatedfilename, row.names = FALSE, na="" )
message("updated ", f, " with all possible band IDs", sep=" ")
}
}
if ("Amelia" == Sys.getenv("USERNAME")) {
topLevelDir <- "~/Masters Thesis Project/Tree Swallow Data"
bandDataDir = paste(sep = "/", topLevelDir, "Amelia TRES data 1975-2016", "Improved and Cleaned Data")
bandDataFile = "Updated band data 1975-2001.csv"
resultdir <- paste(sep = "/", topLevelDir, "Amelia TRES data 1975-2016",
"Improved and Cleaned Data")
banddata <- read.csv(paste(bandDataDir, bandDataFile, sep = "/"),
as.is=TRUE, na.strings = c("", "NA"))
} else {
topLevelDir <- "~/GitHub/cloud-TRES-data-manipulation/testData"
bandDataFile <- "banddata.csv"
resultdir <- paste(sep = "/", topLevelDir,
"Improved and Cleaned Data")
banddata <- read.csv(paste(topLevelDir, bandDataFile, sep = "/"),
as.is=TRUE, na.strings = c("", "NA"))
banddata <- fixUpBandData(banddata)
}
if ("Amelia" == Sys.getenv("USERNAME")) {
topLevelDir <- "~/Masters Thesis Project/Tree Swallow Data/Amelia TRES data 1975-2016"
# input files...
nestDataInDir <- paste(topLevelDir, "FINAL QC DONE", sep = "/")
updatedResultDir <- paste(topLevelDir, "Improved and Cleaned Data/1 All available adult band IDs added",
sep = "/")
} else {
nestDataInDir <- paste(sep = "/", topLevelDir, "inputNestData")
updatedResultDir <- paste(sep = "/", topLevelDir, "updatedNestData")
}
processNestDataFiles(inputDir = nestDataInDir,
outputDir = updatedResultDir,
banddata = banddata)
|
7f8e77e64938455143f731e9afb43250e94afc08
|
5e7f15495c93134ff1cc2fa302237132aa6a63c3
|
/backlog_w_engineering_capacity.R
|
97c968987ebe7665017275bd8a8528479c3a604e
|
[] |
no_license
|
jimfelps/S-OP-Project
|
07af7212aef1e76d84ab0cd91ddd9424872db1b8
|
5d7bc84ff2b89a3b57653ecf4523c984b08478b1
|
refs/heads/master
| 2020-08-04T01:33:28.819727
| 2019-12-01T21:59:34
| 2019-12-01T21:59:34
| 211,955,186
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,462
|
r
|
backlog_w_engineering_capacity.R
|
rm(list = ls(all.names = TRUE))
library(tidyverse)
library(lubridate)
library(readxl)
eng_complexity_lookup <- read_csv("~/R/R Data/Engineering/MBR Charts/eng_complexity_lookup.csv")
backlog_tons_detail <- read_csv("~/R/R Data/Engineering/MBR Charts/backlog_tons_detail.csv")
eng_backlog <- read_csv("~/R/R Data/Engineering/MBR Charts/eng_backlog.csv")
# create project number field in a given data frame
eng_hours <- eng_complexity_lookup %>%
group_by(`Order Number`) %>%
summarise(act_hours = round(sum(`Actual Hours`),2),
budget_hours = round(sum(`Budget Hours`),2)) %>%
rename(order_number = `Order Number`)
eng_backlog2 <- eng_backlog %>%
mutate(proj_num = str_sub(`Order Number`, 1, 8)) %>%
select(`Order Number`,
Region,
Division,
`Customer Name`,
proj_num)
eng_complexity <- eng_complexity_lookup %>%
mutate(proj_num = str_sub(`Order Number`, 1, 8)) %>%
select(`Project Name`,
Status,
`Order Number`,
`Budget Hours`,
`Actual Hours`,
Complexity,
Division,
Region,
proj_num)
no_val <- eng_complexity$`Budget Hours` == 0
eng_complexity$Complexity[no_val] <- "No Engineering Budget"
backlog_w_proj_num <- backlog_tons_detail %>%
mutate(proj_num = str_sub(`Order Number`, 1, 8))
mat_backlog_w_complex <- backlog_w_proj_num %>%
filter(Region != "BLA") %>%
left_join(eng_complexity, by = "proj_num") %>%
replace_na(list(Complexity = "Parts Order/Buyout - No Eng", `Budget Hours` = 0, `Actual Hours` = 0)) %>%
rename(division = Division.x,
region = Region.x,
project_name = `Project Name.x`,
order_number = `Order Number.x`,
backlog_dollars = `Backlog Dollars`,
margin_dollars = `Margin with Exch Rate`,
total_tons = `Total Tons`,
buyout_tons = `Buyout Tons`,
budget_hours = `Budget Hours`,
actual_hours = `Actual Hours`) %>%
select(division,
region,
`Project Manager`,
`Customer Name`,
project_name,
order_number,
backlog_dollars,
margin_dollars,
total_tons,
buyout_tons,
Bucket,
`Record Type`,
`Transaction Type`,
`Ordered Date`,
Status,
Complexity) %>%
left_join(eng_hours, by = "order_number")
write.csv(mat_backlog_w_complex, "~/R/R Data/S&OP/mat_backlog_w_complex.csv")
|
fc0c82c2c4b07221fd14f32772d794c21005747e
|
4b742a3192695e460c447275adeb5a27608027fb
|
/Code/mvn-out.R
|
80d2cbaa6c49f17b798f25ca10309da175b18098
|
[] |
no_license
|
gautam-sabnis/Komp-Gait-Analysis
|
6e3ec8892efba2f0bb8f7aae1ee6b0d10df3ef8c
|
20d07768c0848bc15ce11ffa4dfc552429596999
|
refs/heads/master
| 2023-02-13T03:24:05.907266
| 2021-01-11T02:54:06
| 2021-01-11T02:54:06
| 279,411,265
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 59,629
|
r
|
mvn-out.R
|
setwd("/Users/sabnig/Documents/Projects/Komp/Temp")
data_per_stride <- read.delim('../Data/kompdf-corr', stringsAsFactors = FALSE)
Phenos.lin <- c("speed","limb_duty_factor","step_length1","step_width","stride_length",
"temporal_symmetry","base_tail_lateral_displacement","tip_tail_lateral_displacement",
"nose_lateral_displacement")
Phenos.lin.Nomen <- c("Speed","LDF","Step Length","Step Width","Stride Length",
"TS","Base Tail LD","Tip Tail LD","Nose LD")
Phenos.circ <- c("base_tail_lateral_displacement_phase","tip_tail_lateral_displacement_phase",
"nose_lateral_displacement_phase")
names(data_per_stride)[names(data_per_stride) == 'Mouse.ID'] <- 'MouseID'
names(data_per_stride)[names(data_per_stride) == 'Date.of.Birth'] <- 'DOB'
names(data_per_stride)[names(data_per_stride) == 'OFA_Date.of.test.New'] <- 'TestDate'
names(data_per_stride)[names(data_per_stride) == 'OFA_Genotype'] <- 'Strain'
names(data_per_stride)[names(data_per_stride) == 'speed_cm_per_sec'] <- 'speed'
names(data_per_stride)[names(data_per_stride) == "OFA_Arena.ID"] <- 'Arena'
names(data_per_stride)[names(data_per_stride) == "OFA_Experimenter.ID"] <- 'Experimenter'
data_per_stride[,names(data_per_stride) %in% c('MouseID','Strain','Sex','TestAge')] <- lapply(data_per_stride[,names(data_per_stride) %in% c('MouseID','Strain','Sex','TestAge')], function(x) as.factor(x))
levels(data_per_stride$Strain)[1] <- "C57BL/6NJ"
levels(data_per_stride$Strain)[3] <- "Rik1<em1J> -/-"
levels(data_per_stride$Strain)[4] <- "Rik2<tm1.1(KOMP)Vlcg> -/-"
levels(data_per_stride$Strain)[119] <- "Mrps22<tm1.1(KOMP)Vlcg> -/+"
#Remove Strains
toMatch <- c("B6.Cg-Esrrb<tm1(cre)Yba>/J", "<em2J>/J COIN","Tex2")
matches <- unique(grep(paste(toMatch, collapse = "|"), data_per_stride$Strain, value = TRUE))
Strains <- setdiff(unique(data_per_stride$Strain), matches)
data_per_stride <- data_per_stride[data_per_stride$Strain %in% Strains, ]
#Focus on certain speed bins
data_per_stride <- data_per_stride[data_per_stride$bingrpname %in% c('speed_20_ang_vel_neg20',
'speed_25_ang_vel_neg20'),]
data_per_stride$Strain <- sapply(seq(nrow(data_per_stride)), function(x) gsub("<.*>", "", data_per_stride$Strain[x]))
data_per_stride$Strain <- sapply(seq(nrow(data_per_stride)), function(x) gsub(" ", "", data_per_stride$Strain[x]))
data_per_stride$Strain <- as.factor(data_per_stride$Strain)
data_per_stride$MouseID <- droplevels(data_per_stride$MouseID)
data_per_animal <- aggregate(x = data_per_stride[,names(data_per_stride) %in% c(Phenos.lin)], by = data_per_stride[c("MouseID")], FUN = mean)
Strain <- sapply(seq(dim(data_per_animal)[1]), function(x) data_per_stride[data_per_stride$MouseID == data_per_animal$MouseID[x], 'Strain'][1])
TestDate <- sapply(seq(dim(data_per_animal)[1]), function(x) data_per_stride[data_per_stride$MouseID == data_per_animal$MouseID[x], 'TestDate'][1])
data_per_animal <- cbind(Strain, TestDate, data_per_animal)
#Filter Strains for which at least 8 animals were tested
Strains8 <- names(table(data_per_animal$Strain))[table(data_per_animal$Strain) >= 5]
data_per_animal <- data_per_animal[data_per_animal$Strain %in% Strains8,]
data_per_stride <- data_per_stride[data_per_stride$Strain %in% Strains8,]
data_per_animal$Strain <- droplevels(data_per_animal$Strain)
data_per_strain <- aggregate(x = data_per_animal[,names(data_per_animal) %in% c(Phenos.lin)], by = data_per_animal[c("Strain")],
FUN = function(x) mean(x, na.rm=TRUE))
levels(data_per_strain$Strain)[1] <- "C57BL/6NJ"
data_per_strain[1,'Strain'] <- 'C57BL/6NJ'
#<em1J> - em1J, (KOMP) - Mbp, Wtsi, Vlcg, (EUCOMM) - Hmgu
data_per_animal$BG <- as.factor(ifelse(grepl("Mbp", data_per_animal$Strain, fixed = TRUE), "Mbp",
ifelse(grepl("Wtsi", data_per_animal$Strain, fixed = TRUE), "Wtsi",
ifelse(grepl("Vlcg", data_per_animal$Strain, fixed = TRUE),"Vlcg",
ifelse(grepl("em1J", data_per_animal$Strain, fixed = TRUE),"em1J",
ifelse(grepl("Hmgu", data_per_animal$Strain, fixed = TRUE), "Hmgu","C57BL/6NJ"))))))
#C57BL/6NJ em1J Hmgu Mbp Vlcg Wtsi
df <- data_per_animal[data_per_animal$BG %in% c('C57BL/6NJ'),]
tmp <- rrcov::CovRobust(df[,names(df) %in% Phenos.lin])
plot(tmp, which='dd')
df <- data_per_strain
tmp <- mvoutlier::dd.plot(df[,names(df) %in% Phenos.lin])
df.out <- data.frame(rodist = tmp$md.rob, mahadist = tmp$md.cla, Strain = df[,'Strain'],
Outlier = tmp$outliers)
#(ifelse(mvoutlier::aq.plot(df[,names(df) %in% Phenos.lin])$outliers==TRUE,1,0))
df.out[df.out$Strain == 'C57BL/6NJ', 'Outlier'] <- -1
df.out$Outlier <- as.factor(df.out$Outlier)
df.out$Genotype <- ifelse(df.out$Strain == 'C57BL/6NJ', "C57BL/6NJ", "Mutant")
textdf <- df.out[df.out$Strain == 'C57BL/6NJ', ]
mycolors <- c("C57BL/6NJ" = "red", "Mutant" = "grey50")
ggplot(df.out, aes(x = mahadist, y=rodist)) + geom_point(alpha=0.8, aes(color=Outlier), size=4) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier %in% c(1,-1),as.character(Strain),'')),size=6,box.padding=2) +
labs(x = 'Mahalanobis Distance', y = 'Robust Distance') + ggtitle('KOMP Outliers') + scale_color_manual(values=c("black","grey50","red")) +
theme_bw(base_size = 16) + theme(legend.position='none')
ggsave('../Temp3/Mvout-mean-greaterthan8animals.pdf', width=9, height=9)
Mutants.out <- unique(df.out[df.out$Outlier==1,'Strain'])
df <- df[,-1]
names(df) <- Phenos.lin.Nomen
df <- cbind('Strain' = data_per_strain$Strain,df)
df <- cbind(id = 1:dim(df)[1], df)
df.melt <- reshape::melt(df[,-2], id.vars = 'id')
df.melt <- cbind(df.melt, Strain = rep(df$Strain, length(Phenos.lin)), Outlier = as.factor(rep(df.out$Outlier, length(Phenos.lin))))
ggplot(df.melt, aes(y = value, color = Outlier, x = id)) + geom_jitter(alpha=0.7, size = 3) +
facet_wrap(~variable, scales='free') + scale_color_manual(values = c('black','grey50','red')) + theme_bw(base_size = 16) +
theme(legend.position = 'none') + labs(x = 'Index', y='Phenotype')
ggsave('../Temp2/Mvout2.pdf', width=9, height=9)
tmp <- mvoutlier::pcout(df[,names(df) %in% Phenos.lin])
df.out <- data.frame(Distance1 = tmp$x.dist1, Distance2 = tmp$x.dist2, Strain = df[,'Strain'],
Outlier = as.numeric(!tmp$wfinal01))
df.out$Genotype <- ifelse(df.out$Strain == 'C57BL/6NJ', "C57BL/6NJ", "Mutant")
textdf <- df.out[df.out$Strain == 'C57BL/6NJ', ]
mycolors <- c("C57BL/6NJ" = "red", "Mutant" = "grey50")
ggplot(df.out, aes(x = Distance1, y=Distance2)) + geom_point(alpha=0.8, aes(color=Genotype), size=4) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier==1,as.character(Strain),'')),size=4,box.padding=2) +
labs(x = 'Distance1', y = 'Distance2') + ggtitle('KOMP Outliers') + scale_color_manual(values=mycolors) +
theme_bw(base_size = 16)
gBG <- c('em1J','Hmgu','Mbp','Vlcg','Wtsi')
lapply(seq(gBG), function(x) {
df <- data_per_animal[data_per_animal$BG %in% c(gBG[x]),]
tmp <- mvoutlier::dd.plot(df[,names(df) %in% Phenos.lin])
df.out <- data.frame(rodist = tmp$md.rob, mahadist = tmp$md.cla, MouseID = df[,'MouseID'],
Outlier = ifelse(tmp$outliers==TRUE,1,0))
p1 <- ggplot(df.out, aes(x = mahadist, y=rodist)) + geom_point(alpha=0.8) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier==1,as.character(MouseID),'')),hjust=0,vjust=0) +
labs(x = 'Mahalanobis Distance', y = 'Robust Distance') + ggtitle(paste0('Background - ',gBG[x]))
ggsave(paste0('Mvout-',gBG[x],'.pdf'),p1)
})
Mvoutliers_komp <- function(gBG){
df <- data_per_animal[data_per_animal$BG %in% c(gBG),]
tmp <- mvoutlier::dd.plot(df[,names(df) %in% Phenos.lin])
df.out <- data.frame(rodist = tmp$md.rob, mahadist = tmp$md.cla, MouseID = df[,'MouseID'],
Outlier = ifelse(tmp$outliers==TRUE,1,0))
ggplot(df.out, aes(x = mahadist, y=rodist)) + geom_point(alpha=0.8) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier==1,as.character(MouseID),'')),hjust=0,vjust=0) +
labs(x = 'Mahalanobis Distance', y = 'Robust Distance') + ggtitle(paste0('Background - ',gBG))
}
df <- data_per_animal
tmp <- mvoutlier::dd.plot(df[,names(df) %in% Phenos.lin])
df.out <- data.frame(rodist = tmp$md.rob, mahadist = tmp$md.cla, MouseID = df[,'MouseID'],
Outlier = tmp$outliers)
df.out[df.out$rodist > 5.5, 'Outlier'] <- -1
#(ifelse(mvoutlier::aq.plot(df[,names(df) %in% Phenos.lin])$outliers==TRUE,1,0))
df.out$Outlier <- as.factor(df.out$Outlier)
ggplot(df.out, aes(x=mahadist, y=rodist)) + geom_point(alpha=0.6, aes(color=Outlier), size=4) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier %in% c(-1),as.character(MouseID),'')),size=4,box.padding=2) +
labs(x = 'Mahalanobis Distance', y = 'Robust Distance') + ggtitle('KOMP animal outliers') + scale_color_manual(values=c("red","black","grey50")) +
theme_bw(base_size = 16) + theme(legend.position='none')
df <- data.frame(Strain = data_per_animal$Strain, MouseID = data_per_animal$MouseID, Sex = data_per_animal$Sex)
df <- cbind(df, apply(data_per_animal[,names(data_per_animal) %in% Phenos.lin],2,function(x) (x - mean(x))/sd(x)))
tmp <- mvoutlier::pcout(df[,names(df) %in% Phenos.lin],makeplot=TRUE)
df.out <- data.frame(Distance1 = tmp$x.dist1, Distance2 = tmp$x.dist2,
Label = paste0(df[,'MouseID']," (", df[,'Strain'], ")"), MouseID = df[,'MouseID'], Strain = df[,'Strain'],
Outlier = as.numeric(!tmp$wfinal01))
df.out[(df.out$Distance2 >= 5.9 | df.out$Distance1 > 10), 'Outlier'] <- -1
df.out[df.out$MouseID == 'J80962', 'Outlier'] <- -1
df.out[df.out$MouseID == 'J76119', 'Outlier'] <- -1
df.out$Outlier <- as.factor(df.out$Outlier)
ggplot(df.out, aes(x = Distance1, y=Distance2)) + geom_point(alpha=0.8, aes(color=Outlier), size=4) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier==-1,as.character(Label),'')),size=5,box.padding=2) +
labs(x = 'Distance1', y = 'Distance2') + ggtitle('KOMP Outliers') + scale_color_manual(values=c("red","black","grey50")) +
theme_bw(base_size = 28) + theme(legend.position='none') + ggtitle('KOMP animal outliers')
ggsave('../Temp7/komp-animal-outliers2.pdf', width=36, height=36, limitsize=FALSE) #base_size=98 for plots
#df.out[df.out$Outlier %in% c(1,-1),'Outlier'] <- -1
#df.out[df.out$Strain == 'Rab6b-/-','Outlier'] <- 1
#df.out[df.out$Strain == 'Lamp5-/-','Outlier'] <- 1
#df.out$Outlier <- as.factor(df.out$Outlier)
ggplot(df.out, aes(x = Distance1, y=Distance2)) + geom_point(alpha=0.8, aes(color=Outlier), size=4) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier==1,as.character(Label),'')),size=5,box.padding=2) +
labs(x = 'Distance1', y = 'Distance2') + ggtitle('KOMP Outliers') + scale_color_manual(values=c("grey50","black","red")) +
theme_bw(base_size = 28) + theme(legend.position='none') + ggtitle('KOMP animal outliers')
out.df <- df.out[df.out$Outlier %in% c(1,-1), names(df.out) %in% c('Strain','MouseID')]
#out.df <- out.df[out.df$Strain %in% names(table(data_per_animal$Strain)[table(data_per_animal$Strain) < 5]),]
Strains.out <- out.df$Strain
#out.df <- out.df[out.df$Strain %in% setdiff(Strains.out,Strains8),]
#out.df$Strain <- droplevels(out.df$Strain)
out.df$MouseID <- droplevels(out.df$MouseID)
total_animals <- sapply(seq(length(unique(out.df$Strain))), function(x) table(data_per_animal$Strain)[paste0(unique(out.df$Strain)[x])])
out_animals <- sapply(seq(length(unique(out.df$Strain))), function(x) table(out.df$Strain)[paste0(unique(out.df$Strain)[x])])
df <- data.frame(Proportion = out_animals/total_animals,
Animals = total_animals)
df <- cbind(Strain = rownames(df), df)
df <- df[with(df,order(-Proportion)),]
#df$color <- ifelse(df$Strain %in% c('Pcdh9-/+','Alg11-/+','Sfxn5-/+','Nsun5-/-','Whamm-/-'), "red", "black")
#df$Strain <- glue("<i style='color:{color}'>{Strain}</i>")
#sapply(seq(nrow(out.df)), function(x) length(unique(data_per_animal[data_per_animal$Strain %in% out.df$Strain[x],'MouseID'])))
ggplot(df, aes(x=Strain,y=Proportion)) + geom_bar(stat='identity') + theme_minimal(base_size=22) +
theme(axis.text.x=element_text(angle = 90,hjust=1,vjust=0.5),
axis.text.x.top = element_text(vjust=0.5)) + labs(y = 'Proportion') +
geom_text(label = paste0(df$Animals),vjust = -0.1, hjust = 0.5,size=5.5) + coord_cartesian(clip = "off")
ggsave('../Temp7/animal-by-strain-prop-outliers.pdf', width=11.75, height=3.8)
mouseID.out <- sapply(seq(length(unique(out.df$Strain))), function(x)
out.df[out.df$Strain == unique(out.df$Strain)[x],'MouseID'])
df <- data_per_stride[data_per_stride$MouseID %in% unlist(mouseID.out[1:length(mouseID.out)]), ]
df$MouseID <- droplevels(df$MouseID)
df$Strain <- droplevels(df$Strain)
df <- data_per_stride[data_per_stride$Strain == 'Sfxn5-/+',c('MouseID',Phenos.lin)]
#colnames(df)[which(names(df) %in% Phenos.lin)] <- Phenos.lin.Nomen
df$MouseID <- droplevels(df$MouseID)
#df$Strain <- droplevels(df$Strain)
df$Index <- ave(df$stride_length, df$MouseID, FUN = seq_along)
df$Outlier <- as.factor(ifelse(df$MouseID %in% out.df$MouseID, 1, 0))
invisible(lapply(seq(length(Phenos.lin)), function(x) assign(paste0("p",x),
ggplot(df, aes_string(x = 'Index', y = Phenos.lin[x])) + geom_line(aes(color=MouseID)) +
scale_color_manual(values = c(ifelse(sapply(seq(length(unique(df$MouseID))), function(x)
unique(df[df$MouseID==unique(df$MouseID)[x],'Outlier'])) == 1, 'red','grey50'))) +
labs(y = paste0(Phenos.lin.Nomen[x])), inherits=TRUE)))
legend <- get_legend(p1)
p <- plot_grid(p1+labs(x=NULL)+theme(legend.position='none'),p2+labs(x=NULL)+theme(legend.position='none'),p3+labs(x=NULL)+theme(legend.position='none'),
p4+labs(x=NULL)+theme(legend.position='none'),p5+labs(x=NULL)+theme(legend.position='none'),
p6+labs(x=NULL)+theme(legend.position='none'),p7+labs(x=NULL)+theme(legend.position='none'),
p8+labs(x=NULL)+theme(legend.position='none'),p9+labs(x=NULL)+theme(legend.position='none'),nrow=9)
plot_grid(p,legend,rel_widths = c(3, .4))
dev.print(pdf,'../Temp4/Sfxn5.pdf')
#Remove outliers
invisible(sapply(seq(length(unique(data_per_animal$Strain))), function(s) Map(function(p) {
vals <- data_per_animal[data_per_animal$Strain == unique(data_per_animal$Strain)[s],][[Phenos.lin[p]]];
outliers <- boxplot.stats(vals)$out
ids <- match(outliers, vals)
data_per_animal[data_per_animal$Strain == unique(data_per_animal$Strain)[s],][paste0(Phenos.lin[p])][ids,] <<- NA
}, seq(length(Phenos.lin)))))
komp_select_controls <- function(CtrlStrain="C57BL/6NJ", tw){
control.list <- subset(data_per_stride, Strain == CtrlStrain)[,c('MouseID','Strain','TestDate')]
mutant.list <- subset(data_per_stride, Strain != CtrlStrain)[,c('MouseID','Strain','TestDate')]
mutant.list$Type <- 'Mutant'
all.control.dates.df <- NULL
control.dates.df.tmp <- NULL
all.control.mouseid.df <- NULL
for (s in unique(mutant.list$Strain)){
mutant.dates.list <- unique(subset(mutant.list,Strain == s)$TestDate)
control.dates.list <- NULL
mouse.id.list <- NULL
for (d in mutant.dates.list){
start.d <- as.Date(d) - tw
end.d <- as.Date(d) + tw
control.dates <- unique(subset(control.list, as.Date(TestDate) >= start.d & as.Date(TestDate) <= end.d)$TestDate)
control.dates.list <- c(control.dates.list, format(control.dates, format = '%Y-%m-%d'))
mouse.id <- unique(subset(control.list, as.Date(TestDate) >= start.d & as.Date(TestDate) <= end.d)$MouseID)
mouse.id.list <- c(mouse.id.list, as.character(mouse.id))
}
if (identical(mouse.id.list,character(0))) next
control.dates.df <- data.frame(Strain = s, TestDate = format(control.dates.list, format = '%Y-%m-%d'), Type = 'Control')
all.control.dates.df <- rbind(all.control.dates.df, control.dates.df[,c('Strain','Type')])
control.dates.df.tmp <- c(control.dates.df.tmp, format(control.dates.list, format = '%Y-%m-%d'))
control.mouseid.df <- data.frame(Strain = s, MouseID = mouse.id.list, Type = 'Control')
all.control.mouseid.df <- rbind(all.control.mouseid.df,control.mouseid.df[,c('Strain','MouseID','Type')])
}
all.control.dates.df <- cbind(all.control.dates.df, TestDate = format(control.dates.df.tmp, format = '%Y-%m-%d'))
dates.df <- rbind(all.control.dates.df,mutant.list[,c('Strain','TestDate','Type')])
dates.df$TestDate <- as.Date(dates.df$TestDate, format = "%Y-%m-%d")
mouseid.df <- rbind(all.control.mouseid.df, mutant.list[,c('Strain','MouseID','Type')])
return(mouseid.df)
}
controlids.df <- komp_select_controls(CtrlStrain="C57BL/6NJ", tw=21) #tw: time window
Mutants.out <- c('Pcdh9-/+','Alg11-/+','Nsun5-/-','Sfxn5-/+','Whamm-/-','Hoxc12-/-','Mettl10-/-','Tusc3-/+',
'Rmnd5b-/-','Msn-/-','Prss8-/+','Zbtb43-/-','Ccdc28a-/-','Pigc-/+')
#'Whamm-/-'
lapply(seq(length(Mutants)), function(m) {
CtrlIDs <- unique(subset(controlids.df,Strain == Mutants[m])$MouseID)
dfa <- data_per_animal[data_per_animal$MouseID %in% CtrlIDs, ]
dfa$Genotype <- ifelse(dfa$Strain == 'C57BL/6NJ', 'Control', 'Mutant')
dfa$MouseID <- droplevels(dfa$MouseID)
#dfa <- dfa[dfa$TestDate %in% '2016-09-06', ] #only for Alg11 for plotting purposes
dfa <- dfa[dfa$TestDate %in% names(which(table(dfa$TestDate, dfa$Genotype)[,2] >= 1)), ] #for everything else
dfa$TestDate <- droplevels(dfa$TestDate)
#CtrlIDs <- sample(unique(dfa[dfa$Genotype=='Control','MouseID']), length(unique(dfa[dfa$Genotype=='Mutant','MouseID'])))
CtrlIDs <- setdiff(unique(dfa[dfa$Genotype == 'Control', 'MouseID']), unique(out.df$MouseID))
df <- data_per_stride[data_per_stride$Strain == Mutants[m],c('MouseID','BodyLength','Sex',Phenos.lin)]
df <- rbind(df,data_per_stride[data_per_stride$MouseID %in% CtrlIDs, c('MouseID', 'BodyLength','Sex',Phenos.lin)])
df$MouseID <- droplevels(df$MouseID)
df$Outlier <- as.factor(ifelse(df$MouseID %in% out.df$MouseID, 1,
ifelse(df$MouseID %in% CtrlIDs, -1, 0)))
if (!("0" %in% levels(df$Outlier))){df$Outlier <- factor(df$Outlier, levels = c("0",levels(df$Outlier)))}
df2 <- df[,names(df) %in% c(Phenos.lin)]
#df2 <- data.frame(do.call(cbind,lapply(seq(length(Phenos.lin)), function(p)
# as.numeric(resid(lm(df[[Phenos.lin[p]]] ~ BodyLength, data = df))))))
names(df2) <- Phenos.lin.Nomen
df2 <- cbind(id = 1:dim(df)[1], df2)
df.melt <- reshape::melt(df2, id.vars = 'id')
df.melt <- cbind(df.melt, MouseID = rep(rep(names(table(df$MouseID)), as.numeric(table(df$MouseID))),length(Phenos.lin)))
df.melt$Outlier <- as.factor(ifelse(df.melt$MouseID %in% out.df$MouseID, 1,
ifelse(df.melt$MouseID %in% CtrlIDs, -1, 0)))
if (!("0" %in% levels(df.melt$Outlier))){df.melt$Outlier <- factor(df.melt$Outlier, levels = c("0",levels(df.melt$Outlier)))}
p2 <- ggplot(df.melt[df.melt$variable %in% c('Base Tail LD','Tip Tail LD','Nose LD'),], aes(x=MouseID,y=value)) +
geom_boxplot(outlier.shape=NA,aes(fill = Outlier),alpha = 0.4) +
facet_wrap(~ variable, scales = 'free') + ggtitle(paste0(Mutants[m])) +
scale_fill_manual(name = 'Genotype', values =c("0" = "black","1" = "#d94801", "-1" = "#6a51a3"),
labels = c("0"='Mutant',"-1"='Control',"1"="Mutant (Outlier)"), drop = FALSE) + theme_bw(base_size=22) +
theme(axis.text.x=element_text(angle=90,size=16), legend.position='none') + labs(y = 'Residuals')
#ggsave(paste0('../Temp5/',gsub("*./.*","",Mutants[m]),'.pdf'), width=16,height=16)
ggsave(paste0('../Temp5/',gsub("*./.*","",Mutants[m]),'.pdf'), width=25,height=9)
#geom_jitter(color='grey50',alpha=0.3,width=0.01, shape = 1, stroke=1) +
})
ggsave('../Temp7/sfxn-alg11-2.pdf', width = 12, height = 10)
dfa <- data_per_animal[data_per_animal$Strain == 'Nsun5-/-',]
dfa$Outlier <- as.factor(ifelse(dfa$MouseID %in% out.df$MouseID, 1, 0))
dfa$MouseID <- droplevels(dfa$MouseID)
dfa$Strain <- droplevels(dfa$Strain)
t <- list(
size = 14,
color = 'black')
p <- plot_ly(dfa,x=~`stride_length`, y=~`step_width`, z=~`step_length1`, type='scatter3d', color=~`Outlier`,colors=c("black","red")) %>%
layout(scene = list(
xaxis = list(title = "Stride Length"),
yaxis = list(title = "Step Width"),
zaxis = list(title = "Step Length")
), font=t)
p <- plot_ly(dfa,x=~`nose_lateral_displacement`, y=~`tip_tail_lateral_displacement`, z=~`base_tail_lateral_displacement`, type='scatter3d', color=~`Outlier`,colors=c("black","red")) %>%
layout(scene = list(
xaxis = list(title = "Nose LD"),
yaxis = list(title = "Tip Tail LD"),
zaxis = list(title = "Base Tail LD")
), font=t)
ggplot(df,aes(x=Index,y=stride_length)) + geom_line(aes(color=MouseID)) + geom_point(size=0.1)+
scale_color_manual(values=c("red","grey50","grey50","grey50","grey50","grey50","grey50","grey50"))
#df.out[df.out$MouseID %in% c('J79719','J82869','J86327'),'Outlier'] <- 1
#df.out[df.out$MouseID %in% c('J67783','J81952','J81953'),'Outlier'] <- 1
#layout_matrix <- rbind(c(1,1,1,2,2),c(1,1,1,3,3))
#p <- gridExtra::grid.arrange(C,CX,CY,layout_matrix=layout_matrix)
#p1|p2
df <- data_per_strain
tmp <- mvoutlier::pcout(df[,names(df) %in% Phenos.lin])
df.out <- data.frame(Distance1 = tmp$x.dist1, Distance2 = tmp$x.dist2, Strain = df[,'Strain'],
Outlier = as.numeric(!tmp$wfinal01),WeightL = tmp$wloc, WeightS = tmp$wscat)
df.out[df.out$Strain == 'C57BL/6NJ', 'Outlier'] <- -1
df.out$Outlier <- as.factor(df.out$Outlier)
ggplot(df.out, aes(x = Distance1, y=Distance2)) + geom_point(alpha=0.8, aes(color=Outlier), size=11) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier %in% c(-1,1),as.character(Strain),'')),size=24,box.padding=6) +
labs(x = 'Distance1', y = 'Distance2') + ggtitle('KOMP strain outliers') + scale_color_manual(values=c("black","grey50","red")) +
theme_bw(base_size = 60) + theme(legend.position='none')
ggsave('../Temp7/komp-strain-outliers.pdf', width=31.9, height=31.9)
Mutants.out <- c("Cldn13-/-","Kcnd3-/-","Keg1-/-","Rab11a-/+","Rapgef1-/+","Sema6a-/+","Sfxn5-/+")
tmp <- gtools::permutations(n=7,r=2,v=1:7)
lapply(seq(nrow(tmp)), function(x) {cat("Permutation = ", x, "\n");
Mutants <- Mutants.out[tmp[x,]];
komp_lda(Mutants)
}
)
#Mutants <- c('Arpc5l-/+','Fam120b-/+')
#Mutants <- c('Tlk1-/-','Fam120b-/+')
Mutants <- c('Ndufs8-/+','Fam120b-/+')
Mutants <- c('Rusc1-/-','Spin1-/+')
Mutants <- c('Dgkh-/-','Tmem222-/+')
komp_lda <- function(Mutants){
df <- data.frame()
CtrlStrain <- "C57BL/6NJ"
for (m in seq(Mutants)){
#cat("Mutant", paste0(Mutants[m]), "\n")
CtrlIDs <- unique(subset(controlids.df,Strain == Mutants[m])$MouseID)
df1 <- data_per_animal[data_per_animal$MouseID %in% CtrlIDs,]
df1['Genotype'] <- ifelse(df1$Strain == CtrlStrain, 'Control','Mutant')
df1$Genotype <- relevel(factor(df1$Genotype), ref = "Control")
df1 <- df1[df1$TestDate %in% names(which(table(df1$TestDate, df1$Genotype)[,2] >= 1)), ]
df1$Strain <- droplevels(df1$Strain)
df <- rbind(df,df1)
}
#df <- unique(df)
df <- df[,-which(names(df) %in% c('BodyLength','TestDate','TestAge','Sex'))] #Remove BodyLength
#df$Outlier <- as.factor(sapply(seq(nrow(df)), function(x) df.out[df.out$MouseID == df$MouseID[x], 'Outlier']))
#df[df$Strain == CtrlStrain, 'Outlier'] <- 0
MutStrain <- setdiff(levels(df$Strain),CtrlStrain)
df$Strain <- factor(df$Strain, levels = c(CtrlStrain,MutStrain[1],MutStrain[2]), ordered=TRUE)
#df <- df[,-which(names(df) %in% c('BodyLength'))] #Remove BodyLength
df[,sapply(df,is.numeric)] <- apply(df[,sapply(df,is.numeric)], 2, function(x) (x - mean(x,na.rm=TRUE))/sd(x,na.rm=TRUE))
#FReffects <- 'BodyLength'
#formulas <- unname(sapply(Phenos.lin ,function(x) paste(x, "~", FReffects), simplify=TRUE))
#fits <- lapply(formulas, lm, data = df)
#df_resid <- data.frame(sapply(seq(Phenos.lin), function(x) resid(fits[[x]])))
#colnames(df_resid) <- Phenos.lin
#df_resid <- cbind(Strain = df$Strain, df_resid)
#df_lda <- df_resid
#df_lda <- data.frame(Strain = df$Strain, df[,names(df) %in% Phenos.lin])
df_resid <- df
df_svd <- svd(df_resid[,sapply(df_resid,is.numeric)])
df_pca <- df_svd$u %*% diag(df_svd$d)
tmp <- df_svd$d^2/(nrow(df)-1)
df_lda <- data.frame(Strain = df$Strain, df_pca[,1:6])
#colnames(df_lda)[2:ncol(df_lda)] <- Phenos.lin
#df_lda <- data.frame(Strain = df$Strain, df[,names(df) %in% Phenos.lin])
fit_lda <- lda(Strain ~ ., data = df_lda, prior = c(1,1,1)/3)
lda_values <- predict(fit_lda, df_lda[,-1])
C <- ggplot(data = data.frame(Strain = df$Strain, lda_values$x), aes(x=LD1,y=LD2,shape=Strain,color=Strain,fill=Strain)) + geom_point(size = 2,aes(color=Strain)) +
stat_ellipse(geom = "polygon", alpha = 1/3, aes(fill=Strain)) + theme_bw(base_size = 16) + theme(legend.position = 'top') +
scale_color_manual(values = c(assign(CtrlStrain,"#e41a1c"),assign(MutStrain[1],"#377eb8"),assign(MutStrain[2],"#4daf4a"))) +
scale_fill_manual(values = c("C57BL/6NJ" = "#e41a1c",assign(MutStrain[1],"#377eb8"),assign(MutStrain[2],"#4daf4a")))
#ggrepel::geom_text_repel(aes(label=ifelse(df$Outlier ==1,as.character(df$MouseID),'')),size=8,box.padding=2,show.legend=FALSE)
#tmp <- plsda(df[,names(df) %in% c(Phenos.lin)], df$Strain)
#ggplot(data = data.frame(Strain = df$Strain, tmp$variates$X), aes(x=comp1,y=comp2,shape=Strain)) + geom_point(aes(color=Strain)) + stat_ellipse(aes(color = Strain))
#ggord::ggord(fit_lda,df$Strain,veclsz=NA,labcol=NA) + theme_bw(base_size=16) + theme(legend.position = 'top')
#lda_result <- data.frame(Strain = df$Strain, lda = predict(fit_lda)$x)
PCLD_df <- as.data.frame(fit_lda$scaling)
rownames(PCLD_df) <- Phenos.lin
PCLD1 <- data.frame(Phenos = Phenos.lin.Nomen, value = abs(PCLD_df[,1]))
PCLD1$Phenos <- factor(PCLD1$Phenos, levels = c('Speed','Limb Duty Factor', 'Step Length', 'Step Width',
'Stride Length', 'TS', 'Base Tail LD', 'Tip Tail LD', 'Nose LD'))
CX <- ggplot(PCLD1, aes(x = Phenos, y = value)) + geom_bar(stat = 'identity', color = 'black') + theme_bw(base_size = 16) +
theme(axis.text.x = element_text(angle=90,hjust=1,vjust=0.5)) + labs(x = NULL, y = 'Loadings') + ggtitle('LD1')
PCLD2 <- data.frame(Phenos = Phenos.lin.Nomen, value = abs(PCLD_df[,2]))
PCLD2$Phenos <- factor(PCLD1$Phenos, levels = c('Speed','Limb Duty Factor', 'Step Length', 'Step Width',
'Stride Length', 'TS', 'Base Tail LD', 'Tip Tail LD', 'Nose LD'))
CY <- ggplot(PCLD2, aes(x = Phenos, y = value)) + geom_bar(stat = 'identity', color = 'black') + theme_bw(base_size = 16) +
theme(axis.text.x = element_text(angle=90,hjust=1,vjust=0.5)) + labs(x = NULL, y = NULL) + ggtitle('LD2')
#PCLD2 <- data.frame(Phenos = Phenos.lin.Nomen, value = abs(PCLD_df[,2]))
#acc <- sum(df$Strain == predict(fit_lda)$class)/nrow(df)
#C <- ggord::ggord(fit_lda,df$Strain,veclsz=NA,labcol=NA) + theme_classic(base_size=16) + theme(legend.position = 'top')
#+ ggtitle(paste0('Accuracy: ', round(acc,2)))
#blankPlot <- ggplot() + geom_blank(aes(1,1)) + theme_void()
#CC <- gridExtra::grid.arrange(CY,C,blankPlot,CX, ncol=2,nrow=2,widths = c(1,4), heights=c(4,1))
#p <- plot(fit_lda)
#p <- plot_grid(C,CX,CY,ncol=3)
phenotype <- PCLD1$Pheno[which.max(PCLD1$value)]
phenoname <- Phenos.lin[which(Phenos.lin.Nomen == phenotype)]
p1 <- ggplot(df_lda, aes_string(x = 'Strain', y = phenoname, fill = 'Strain')) + geom_boxplot(alpha = 1/3, outlier.shape = NA) + theme_bw(base_size = 16) +
theme(legend.position = 'none', axis.text.x=element_blank(),
axis.ticks.x=element_blank()) + labs(y = paste0(phenotype," ", "(Residuals)")) +
scale_fill_manual(values = c("C57BL/6NJ" = "#e41a1c",assign(MutStrain[1],"#377eb8"),assign(MutStrain[2],"#4daf4a")))
phenotype <- PCLD2$Pheno[which.max(PCLD2$value)]
phenoname <- Phenos.lin[which(Phenos.lin.Nomen == phenotype)]
p2 <- ggplot(df_lda, aes_string(x = 'Strain', y = phenoname, fill = 'Strain')) + geom_boxplot(alpha = 1/3, outlier.shape = NA) + theme_bw(base_size = 16) +
theme(legend.position = 'none', axis.text.x=element_blank(),
axis.ticks.x=element_blank()) + labs(y = paste0(phenotype," ", "(Residuals)")) +
scale_fill_manual(values = c("C57BL/6NJ" = "#e41a1c",assign(MutStrain[1],"#377eb8"),assign(MutStrain[2],"#4daf4a")))
layout_matrix <- rbind(c(1,1,1,2,3),c(1,1,1,4,5))
p <- gridExtra::grid.arrange(C,CX,CY,p1,p2,layout_matrix = layout_matrix)
return(p)
}
#Mutants <- c("Ndufs8-/+","Fam120b-/+")
Mutants <- c('Nsun5-/-','Fam120b-/+')
Mutants <- c('Rusc1-/-','Spin1-/+')
Mutants <- c('Arpc5l-/+','Fam120b-/+')
df <- data.frame()
CtrlStrain <- "C57BL/6NJ"
for (m in seq(Mutants)){
#cat("Mutant", paste0(Mutants[m]), "\n")
CtrlIDs <- unique(subset(controlids.df,Strain == Mutants[m])$MouseID)
df1 <- data_per_animal[data_per_animal$MouseID %in% CtrlIDs,]
df1['Genotype'] <- ifelse(df1$Strain == CtrlStrain, 'Control','Mutant')
df1$Genotype <- relevel(factor(df1$Genotype), ref = "Control")
df1 <- df1[df1$TestDate %in% names(which(table(df1$TestDate, df1$Genotype)[,2] >= 1)), ]
df1$Strain <- droplevels(df1$Strain)
df <- rbind(df,df1)
}
#df <- unique(df)
df <- df[,-which(names(df) %in% c('BodyLength','TestDate','TestAge','Sex'))] #Remove BodyLength
#df$Outlier <- as.factor(sapply(seq(nrow(df)), function(x) df.out[df.out$MouseID == df$MouseID[x], 'Outlier']))
#df[df$Strain == CtrlStrain, 'Outlier'] <- 0
MutStrain <- setdiff(levels(df$Strain),CtrlStrain)
df$Strain <- factor(df$Strain, levels = c(CtrlStrain,MutStrain[1],MutStrain[2]), ordered=TRUE)
#df <- df[,-which(names(df) %in% c('BodyLength'))] #Remove BodyLength
#Step 1
df0 <- df[df$Strain %in% CtrlStrain,]
df1 <- df[df$Strain %in% Mutants[1],]
df2 <- df[df$Strain %in% Mutants[2],]
mean_vec <- matrix(0,nrow=length(Phenos.lin),3)
mean_vec[,1] <- apply(df0[,sapply(df0,is.numeric)],2,mean)
mean_vec[,2] <- apply(df1[,sapply(df1,is.numeric)],2,mean)
mean_vec[,3] <- apply(df2[,sapply(df2,is.numeric)],2,mean)
#Computing the scatter matrices using Ledoit-Wolf Shrinkage estimator
cov0 <- nlshrink::linshrink_cov(as.matrix(df0[,-which(names(df0) %in% c('Strain','MouseID','Genotype'))]))
cov1 <- nlshrink::linshrink_cov(as.matrix(df1[,-which(names(df1) %in% c('Strain','MouseID','Genotype'))]))
cov2 <- nlshrink::linshrink_cov(as.matrix(df2[,-which(names(df2) %in% c('Strain','MouseID','Genotype'))]))
#cov0 <- cov(as.matrix(df0[,-which(names(df0) %in% c('Strain','MouseID','Genotype'))]))
#cov1 <- cov(as.matrix(df1[,-which(names(df1) %in% c('Strain','MouseID','Genotype'))]))
#cov2 <- cov(as.matrix(df2[,-which(names(df2) %in% c('Strain','MouseID','Genotype'))]))
covw <- cov0 + cov1 + cov2
#Computing Between-class scatter matrix
overall_mean <- apply(mean_vec,1,mean)
N <- c(nrow(df0),nrow(df1),nrow(df2))
sum <- 0
for (i in 1:3){
sum <- sum + N[i]*(mean_vec[,i] - overall_mean)%*%t(mean_vec[,i] - overall_mean)
}
covb <- sum
tmp <- eigen(solve(covw)%*%covb)
tmp2 <- tmp$vectors[,1:2]
Y <- as.matrix(df[,-which(names(df) %in% c('Strain','MouseID','Genotype'))])%*%tmp2
Y <- as.data.frame(Y)
Y$Strain <- df$Strain
C <- ggplot(data = Y, aes(x=V1,y=V2,shape=Strain,color=Strain,fill=Strain)) + geom_point(size = 2,aes(color=Strain)) +
stat_ellipse(geom = "polygon", alpha = 1/3, aes(fill=Strain)) + theme_bw(base_size = 16) + theme(legend.position = 'top') +
scale_color_manual(values = c(assign(CtrlStrain,"#e41a1c"),assign(MutStrain[1],"#377eb8"),assign(MutStrain[2],"#4daf4a"))) +
scale_fill_manual(values = c("C57BL/6NJ" = "#e41a1c",assign(MutStrain[1],"#377eb8"),assign(MutStrain[2],"#4daf4a"))) +
labs(x = 'LD1', y = 'LD2')
PCLD_df <- as.data.frame(tmp2)
rownames(PCLD_df) <- Phenos.lin
PCLD1 <- data.frame(Phenos = Phenos.lin.Nomen, value = abs(PCLD_df[,1]))
PCLD1$Phenos <- factor(PCLD1$Phenos, levels = c('Speed','Limb Duty Factor', 'Step Length', 'Step Width',
'Stride Length', 'TS', 'Base Tail LD', 'Tip Tail LD', 'Nose LD'))
CX <- ggplot(PCLD1, aes(x = Phenos, y = value)) + geom_bar(stat = 'identity', color = 'black') + theme_bw(base_size = 16) +
theme(axis.text.x = element_text(angle=90,hjust=1,vjust=0.5)) + labs(x = NULL, y = 'Loadings') + ggtitle('LD1')
PCLD2 <- data.frame(Phenos = Phenos.lin.Nomen, value = abs(PCLD_df[,2]))
PCLD2$Phenos <- factor(PCLD1$Phenos, levels = c('Speed','Limb Duty Factor', 'Step Length', 'Step Width',
'Stride Length', 'TS', 'Base Tail LD', 'Tip Tail LD', 'Nose LD'))
CY <- ggplot(PCLD2, aes(x = Phenos, y = value)) + geom_bar(stat = 'identity', color = 'black') + theme_bw(base_size = 16) +
theme(axis.text.x = element_text(angle=90,hjust=1,vjust=0.5)) + labs(x = NULL, y = NULL) + ggtitle('LD2')
phenotype <- PCLD1$Pheno[which.max(PCLD1$value)]
phenoname <- Phenos.lin[which(Phenos.lin.Nomen == phenotype)]
p1 <- ggplot(df, aes_string(x = 'Strain', y = phenoname, fill = 'Strain')) + geom_boxplot(alpha = 1/3, outlier.shape = NA) + theme_bw(base_size = 16) +
theme(legend.position = 'none', axis.text.x=element_blank(),
axis.ticks.x=element_blank()) + labs(y = paste0(phenotype," ", "(Residuals)")) +
scale_fill_manual(values = c("C57BL/6NJ" = "#e41a1c",assign(MutStrain[1],"#377eb8"),assign(MutStrain[2],"#4daf4a")))
phenotype <- PCLD2$Pheno[which.max(PCLD2$value)]
phenoname <- Phenos.lin[which(Phenos.lin.Nomen == phenotype)]
p2 <- ggplot(df, aes_string(x = 'Strain', y = phenoname, fill = 'Strain')) + geom_boxplot(alpha = 1/3, outlier.shape = NA) + theme_bw(base_size = 16) +
theme(legend.position = 'none', axis.text.x=element_blank(),
axis.ticks.x=element_blank()) + labs(y = paste0(phenotype," ", "(Residuals)")) +
scale_fill_manual(values = c("C57BL/6NJ" = "#e41a1c",assign(MutStrain[1],"#377eb8"),assign(MutStrain[2],"#4daf4a")))
layout_matrix <- rbind(c(1,1,1,2,3),c(1,1,1,4,5))
p <- gridExtra::grid.arrange(C,CX,CY,p1,p2,layout_matrix = layout_matrix)
#sort(setdiff(unique(data_per_animal$Strain), tmp3v[[4]]))
komp_lda_supp <- function(Mutants){
df <- data.frame()
CtrlStrain <- "C57BL/6NJ"
for (m in seq(Mutants)){
#cat("Mutant", paste0(Mutants[m]), "\n")
CtrlIDs <- unique(subset(controlids.df,Strain == Mutants[m])$MouseID)
df1 <- data_per_animal[data_per_animal$MouseID %in% CtrlIDs,]
df1['Genotype'] <- ifelse(df1$Strain == CtrlStrain, 'Control','Mutant')
df1$Genotype <- relevel(factor(df1$Genotype), ref = "Control")
df1 <- df1[df1$TestDate %in% names(which(table(df1$TestDate, df1$Genotype)[,2] >= 1)), ]
df1$Strain <- droplevels(df1$Strain)
df <- rbind(df,df1)
}
df <- unique(df)
df <- df[complete.cases(df),]
df$Outlier <- as.factor(sapply(seq(nrow(df)), function(x) df.out[df.out$MouseID == df$MouseID[x], 'Outlier']))
df[df$Strain == CtrlStrain, 'Outlier'] <- 0
MutStrain <- setdiff(levels(df$Strain),CtrlStrain)
df$Strain <- factor(df$Strain, levels = c(CtrlStrain,MutStrain[1],MutStrain[2]), ordered=TRUE)
#FReffects <- 'BodyLength'
#formulas <- unname(sapply(Phenos.lin ,function(x) paste(x, "~", FReffects), simplify=TRUE))
#fits <- lapply(formulas, lm, data = df)
#df_resid <- data.frame(sapply(seq(Phenos.lin), function(x) resid(fits[[x]])))
#colnames(df_resid) <- Phenos.lin
#df_resid <- cbind(Strain = df$Strain, df_resid)
#df_lda <- df_resid
#df_lda <- data.frame(Strain = df$Strain, df[,names(df) %in% Phenos.lin])
df_resid <- df
df_svd <- svd(df_resid[,sapply(df_resid,is.numeric)])
df_pca <- df_svd$u %*% diag(df_svd$d)
df_lda <- data.frame(Strain = df$Strain, df_pca[,1:6])
#colnames(df_lda)[2:ncol(df_lda)] <- Phenos.lin
#df_lda <- data.frame(Strain = df$Strain, df[,names(df) %in% Phenos.lin])
fit_lda <- lda(Strain ~ ., data = df_lda)
lda_values <- predict(fit_lda, df_lda[,-1])
C <- ggplot(data = data.frame(Strain = df$Strain, lda_values$x), aes(x=LD1,y=LD2,shape=Strain,color=Strain,fill=Strain)) +
geom_point(size = 5,aes(color=Strain)) + stat_ellipse(geom = "polygon", alpha = 1/3, aes(fill=Strain)) +
theme_bw(base_size = 25) + theme(legend.position = 'none') +
scale_color_manual(values = c(assign(CtrlStrain,"#e41a1c"),assign(MutStrain[1],"#377eb8"),assign(MutStrain[2],"#4daf4a"))) +
scale_fill_manual(values = c(assign(CtrlStrain,"#e41a1c"),assign(MutStrain[1],"#377eb8"),assign(MutStrain[2],"#4daf4a")))
#+ ggrepel::geom_text_repel(aes(label=ifelse(df$Outlier ==1,as.character(df$MouseID),'')),size=8,box.padding=2,show.legend=FALSE)
return(C)
}
C1 <- komp_lda_supp(c("Adgre4-/-","Zfp422-/-"))
C2 <- komp_lda_supp(c("Xpnpep3-/+","Tomm22-/+"))
C3 <- komp_lda_supp(c("Hap1-/+","Sema6a-/+"))
C4 <- komp_lda_supp(c("Cmtr2-/+","Mrps22-/+"))
C5 <- komp_lda_supp(c("Rapgef1-/+","Elavl1-/+"))
C6 <- komp_lda_supp(c("Ofcc1-/-","Il1a-/-"))
(C1|C2|C3)/(C4|C5|C6)
dev.print(pdf,'../Temp7/LDA-supp-LabM.pdf',width=17.8, height = 11.1)
#"#e41a1c","#377eb8"
#####Legends#####
Mutants <- c("Ofcc1-/-","Il1a-/-")
df.tmp <- data.frame(value = c(0,1,-1), Strain = c(CtrlStrain,Mutants[1],Mutants[2]))
df.tmp$Strain <- factor(df.tmp$Strain, levels = c(CtrlStrain,Mutants[1],Mutants[2]), ordered=TRUE)
p0 <- ggplot(df.tmp,aes(y=value,x=Strain,color=Strain,shape=Strain,fill=Strain)) + geom_point() +
scale_color_manual(values=c("#e41a1c","#377eb8","#4daf4a")) + theme_bw(base_size=22) +
theme(legend.position='top') +
guides(color = guide_legend(override.aes = list(size = 5)))
legend <- cowplot::get_legend(p0)
grid.newpage()
grid.draw(legend)
dev.print(pdf,'../Temp7/LDA-supp-legend-F.pdf', width=6.26, height=0.66)
df <- data.frame(Strain = data_per_animal$Strain, MouseID = data_per_animal$MouseID, Sex = data_per_animal$Sex)
df <- cbind(df, apply(data_per_animal[,names(data_per_animal) %in% Phenos.lin],2,function(x) (x - mean(x))/sd(x)))
tmp <- mvoutlier::pcout(df[,names(df) %in% Phenos.lin])
df.out <- data.frame(Distance1 = tmp$x.dist1, Distance2 = tmp$x.dist2,
Label = paste0(df[,'MouseID']," (", df[,'Strain'], ")"), MouseID = df[,'MouseID'], Strain = df[,'Strain'],
Outlier = as.numeric(!tmp$wfinal01))
komp_lda_outliers <- function(Mutants){
df <- data.frame()
CtrlStrain <- "C57BL/6NJ"
for (m in seq(Mutants)){
#cat("Mutant", paste0(Mutants[m]), "\n")
CtrlIDs <- unique(subset(controlids.df,Strain == Mutants[m])$MouseID)
df1 <- data_per_animal[data_per_animal$MouseID %in% CtrlIDs,]
df1['Genotype'] <- ifelse(df1$Strain == CtrlStrain, 'Control','Mutant')
df1$Genotype <- relevel(factor(df1$Genotype), ref = "Control")
df1 <- df1[df1$TestDate %in% names(which(table(df1$TestDate, df1$Genotype)[,2] >= 1)), ]
df1$Strain <- droplevels(df1$Strain)
df <- rbind(df,df1)
}
df <- unique(df)
df$Outlier <- as.factor(sapply(seq(nrow(df)), function(x) df.out[df.out$MouseID == df$MouseID[x], 'Outlier']))
df[df$Strain == CtrlStrain, 'Outlier'] <- 0
#df <- cbind(df,Outlier = as.factor(df.out[df.out$MouseID %in% df$MouseID, 'Outlier']))
MutStrain <- setdiff(levels(df$Strain),CtrlStrain)
#df <- df[,-which(names(df) %in% c('BodyLength'))] #Remove BodyLength
df[,sapply(df,is.numeric)] <- apply(df[,sapply(df,is.numeric)], 2, function(x) (x - mean(x))/sd(x))
FReffects <- 'BodyLength'
formulas <- unname(sapply(Phenos.lin ,function(x) paste(x, "~", FReffects), simplify=TRUE))
fits <- lapply(formulas, lm, data = df)
df_resid <- data.frame(sapply(seq(Phenos.lin), function(x) resid(fits[[x]])))
colnames(df_resid) <- Phenos.lin
df_resid <- cbind(Strain = df$Strain, df_resid)
#df_lda <- df_resid
#df_lda <- data.frame(Strain = df$Strain, df[,names(df) %in% Phenos.lin])
df_svd <- svd(df_resid[,sapply(df_resid,is.numeric)])
df_pca <- df_svd$u %*% diag(df_svd$d)
df_lda <- data.frame(Strain = df$Strain, df_pca[,1:2])
df_lda <- data.frame(Strain = df$Strain, df[,names(df) %in% Phenos.lin])
fit_lda <- rda::rda(Strain ~ ., data = df_lda)
lda_values <- predict(fit_lda, df_lda[,-1])
C <- ggplot(data = data.frame(Strain = df$Strain, lda_values$x), aes(x=LD1,y=LD2,shape=Strain)) + geom_point(size = 2,aes(color=Strain)) +
stat_ellipse(geom = "polygon", alpha = 1/3, aes(fill = Strain)) + theme_bw(base_size = 16) + theme(legend.position = 'top') +
scale_color_manual(values = c("C57BL/6NJ" = "#F8766D",assign(MutStrain[1],"#619CFF"),assign(MutStrain[2],"#00BA38"))) +
scale_fill_manual(values = c("C57BL/6NJ" = "#e41a1c",assign(MutStrain[1],"#619CFF"),assign(MutStrain[2],"#00BA38"))) +
ggrepel::geom_text_repel(aes(label=ifelse(df$Outlier ==1,as.character(df$MouseID),'')),size=8,box.padding=2,show.legend=FALSE)
#tmp <- plsda(df[,names(df) %in% c(Phenos.lin)], df$Strain)
#ggplot(data = data.frame(Strain = df$Strain, tmp$variates$X), aes(x=comp1,y=comp2,shape=Strain)) + geom_point(aes(color=Strain)) + stat_ellipse(aes(color = Strain))
#ggord::ggord(fit_lda,df$Strain,veclsz=NA,labcol=NA) + theme_bw(base_size=16) + theme(legend.position = 'top')
#lda_result <- data.frame(Strain = df$Strain, lda = predict(fit_lda)$x)
PCLD_df <- as.data.frame(fit_lda$scaling)
rownames(PCLD_df) <- Phenos.lin
PCLD1 <- data.frame(Phenos = Phenos.lin.Nomen, value = abs(PCLD_df[,1]))
PCLD1$Phenos <- factor(PCLD1$Phenos, levels = c('Speed','Limb Duty Factor', 'Step Length', 'Step Width',
'Stride Length', 'TS', 'Base Tail LD', 'Tip Tail LD', 'Nose LD'))
CX <- ggplot(PCLD1, aes(x = Phenos, y = value)) + geom_bar(stat = 'identity', color = 'black') + theme_bw(base_size = 16) +
theme(axis.text.x = element_text(angle=90,hjust=1,vjust=0.5)) + labs(x = NULL, y = 'Loadings') + ggtitle('LD1')
PCLD2 <- data.frame(Phenos = Phenos.lin.Nomen, value = abs(PCLD_df[,2]))
PCLD2$Phenos <- factor(PCLD1$Phenos, levels = c('Speed','Limb Duty Factor', 'Step Length', 'Step Width',
'Stride Length', 'TS', 'Base Tail LD', 'Tip Tail LD', 'Nose LD'))
CY <- ggplot(PCLD2, aes(x = Phenos, y = value)) + geom_bar(stat = 'identity', color = 'black') + theme_bw(base_size = 16) +
theme(axis.text.x = element_text(angle=90,hjust=1,vjust=0.5)) + labs(x = NULL, y = NULL) + ggtitle('LD2')
#PCLD2 <- data.frame(Phenos = Phenos.lin.Nomen, value = abs(PCLD_df[,2]))
#acc <- sum(df$Strain == predict(fit_lda)$class)/nrow(df)
#C <- ggord::ggord(fit_lda,df$Strain,veclsz=NA,labcol=NA) + theme_classic(base_size=16) + theme(legend.position = 'top')
#+ ggtitle(paste0('Accuracy: ', round(acc,2)))
#blankPlot <- ggplot() + geom_blank(aes(1,1)) + theme_void()
#CC <- gridExtra::grid.arrange(CY,C,blankPlot,CX, ncol=2,nrow=2,widths = c(1,4), heights=c(4,1))
#p <- plot(fit_lda)
#p <- plot_grid(C,CX,CY,ncol=3)
#phenotype <- PCLD1$Pheno[which.max(PCLD1$value)]
#phenoname <- Phenos.lin[which(Phenos.lin.Nomen == phenotype)]
#p1 <- ggplot(df_lda, aes_string(x = 'Strain', y = phenoname, fill = 'Strain')) + geom_boxplot(alpha = 1/3, outlier.shape = NA) + theme_bw(base_size = 16) +
# theme(legend.position = 'none', axis.text.x=element_blank(),
# axis.ticks.x=element_blank()) + labs(y = paste0(phenotype," ", "(Residuals)")) +
# scale_fill_manual(values = c("C57BL/6NJ" = "#e41a1c",assign(MutStrain[1],"#619CFF"),assign(MutStrain[2],"#00BA38")))
#phenotype <- PCLD2$Pheno[which.max(PCLD2$value)]
#phenoname <- Phenos.lin[which(Phenos.lin.Nomen == phenotype)]
#p2 <- ggplot(df_lda, aes_string(x = 'Strain', y = phenoname, fill = 'Strain')) + geom_boxplot(alpha = 1/3, outlier.shape = NA) + theme_bw(base_size = 16) +
# theme(legend.position = 'none', axis.text.x=element_blank(),
# axis.ticks.x=element_blank()) + labs(y = paste0(phenotype," ", "(Residuals)")) +
# scale_fill_manual(values = c("C57BL/6NJ" = "#e41a1c",assign(MutStrain[1],"#619CFF"),assign(MutStrain[2],"#00BA38")))
layout_matrix <- rbind(c(1,2),c(1,3))
p <- gridExtra::grid.arrange(C,CX,CY,layout_matrix = layout_matrix)
return(p)
}
pairs <- data.frame(expand.grid(Mutants.out,Mutants.out))
apply(pairs, 1, function(x) if (unname(pairs[x,])[1] == unname(pairs[x,])[2]) {next} else {komp_lda_outliers(as.vector(unname(pairs[x,])))})
p1 <- plot_grid(C,CX,CY,ncol = 3)
p2 <- plot_grid(C,CX,CY,ncol = 3)
p3 <- plot_grid(C,CX,CY, ncol = 3)
p4 <- plot_grid(C,CX,CY, ncol = 3)
plot_grid(p1,p2,p3,p4,nrow = 4)
dev.print(pdf,'../Temp5/lda_new_plot.pdf', width = 16, height = 22)
#Specific to Pcdh and Sfxn5 (mv outlier animal lines)
lda_result$MouseID <- rep(0,30)
lda_result$MouseID[c(1,10,11,22)] <- 1
lda_result$MouseID <- as.factor(lda_result$MouseID)
C <- C + ggrepel::geom_text_repel(aes(label=ifelse(lda_result$MouseID %in% c(1),as.character(df$MouseID),'')),size=8,box.padding=2)
Mutants.out <- df.out[df.out$Outlier==1,'Strain']
Mutants <- Mutants.out[c(1,6)]
komp_lda(Mutants)[[2]]
dev.print(pdf,paste0('../Temp5/',paste(c(gsub("./.","",Mutants)),collapse="-"),"-lda.pdf"),width=11,height=11)
all_comb <- combn(Mutants.out,2)
acc.mat <- matrix(0,nrow=length(Mutants.out),ncol=length(Mutants.out))
rownames(acc.mat) <- Mutants.out; colnames(acc.mat) <- Mutants.out; diag(acc.mat) <- rep(1,length(Mutants.out))
apply(all_comb,2,function(x) {komp_lda(Mutants = all_comb[,x])[[2]];})
acc <- sapply(seq(ncol(all_comb)), function(x) komp_lda(Mutants=all_comb[,x])[[2]])
acc.mat[lower.tri(acc.mat,diag=FALSE)] <- acc
acc.mat <- forceSymmetric(acc.mat,uplo="L")
col_fun <- circlize::colorRamp2(c(0.70,0.80,0.90,1),c("#e6f598",
"#99d594","#3288bd","#5e4fa2"))
########
Mutants <- Mutants.out[c(12,16)]
df <- data.frame()
for (m in seq(Mutants)){
#cat("Mutant", paste0(Mutants[m]), "\n")
CtrlIDs <- unique(subset(controlids.df,Strain == Mutants[m])$MouseID)
df1 <- data_per_animal[data_per_animal$MouseID %in% CtrlIDs,]
df1['Genotype'] <- ifelse(df1$Strain == CtrlStrain, 'Control','Mutant')
df1$Genotype <- relevel(factor(df1$Genotype), ref = "Control")
df1 <- df1[df1$TestDate %in% names(which(table(df1$TestDate, df1$Genotype)[,2] >= 2)), ]
df1$Strain <- droplevels(df1$Strain)
df <- rbind(df,df1)
}
df <- unique(df)
df <- df[,-which(names(df) %in% c('BodyLength'))] #Remove BodyLength
df[,sapply(df,is.numeric)] <- apply(df[,sapply(df,is.numeric)], 2, function(x) (x - mean(x))/sd(x))
df_svd <- svd(df[,sapply(df,is.numeric)])
df_pca <- df_svd$u %*% diag(df_svd$d)
df1 <- df[,Phenos.lin]
rownames(df1) <- paste0(df$MouseID,"(",df$Strain,")")
kmu.list <- list(); tmp <- numeric(); nclusters <- 3
invisible(lapply(1:choose(62,nclusters), function(c) {kmu.list[[c]] <<- kmeans(df1, centers = df1[sample(nrow(df1),nclusters),],
nstart = 25);tmp[c] <<- as.numeric(kmu.list[[c]]['tot.withinss']);}))
mykmeans <- kmu.list[[which.min(tmp)]]
factoextra::fviz_cluster(mykmeans, data = df1,
star.plot = TRUE, ggtheme = theme_minimal(), repel = TRUE, axes = c(1,2),
palette = c("#999999", "#E69F00", "#56B4E9"),
geom = 'text',ellipse = TRUE, ellipse.type = 'convex', ellipse.level = 0.80) +
theme_minimal(base_size = 18)
#"#ffffb2","#fed976","#feb24c","#fd8d3c","#fc4e2a","#e31a1c","#b10026"
Heatmap(as.matrix((acc.mat)), cluster_columns = FALSE, cluster_rows = FALSE, row_names_side = 'left',
column_names_side = 'bottom',col = col_fun, row_names_gp = gpar(fontsize = 16, fontface="italic"),
column_names_gp = gpar(fontsize = 16, fontface="italic"),
heatmap_legend_param = list(at = c(0.70,0.80,0.90,1),title = "Accuracy", title_position = "leftcenter-rot",
border = "black",legend_height = unit(8, "cm"), just = c("right", "top")), border=TRUE,
cell_fun = function(j, i, x, y, width, height, fill) {
grid.text(sprintf("%.2f", acc.mat[i, j]), x, y, gp = gpar(fontsize = 15))
})
dev.print(pdf,'../Temp4/lda-accuracy.pdf',width=9,height=9)
#Strains: em1J
df <- data_per_animal[data_per_animal$Strain %in% c(as.character(df.out[df.out$Outlier==1,'Strain']), 'C57BL/6NJ'),]
df$Strain <- droplevels(df$Strain)
df <- data_per_animal[data_per_animal$Strain %in% c('C57BL/6NJ','C57BL/6NJ-Zfp422<em1J>/J',
'B6N(Cg)-Coq8b<tm1b(EUCOMM)Hmgu>/J','B6N(Cg)-Tox4<tm1b(KOMP)Mbp>/J',
'B6N(Cg)-Nsun5<tm2b(EUCOMM)Wtsi>/J','B6N(Cg)-Ptcd3<tm1.1(KOMP)Vlcg>/J'),c('Strain','BG',Phenos.lin)]
df$Strain <- droplevels(df$Strain)
df$BG <- droplevels(df$BG)
df[sample(df$Strain=='C57BL6/NJ',10),]
df <- data_per_animal[data_per_animal$BG %in% c('em1J','C57BL/6NJ','Wtsi'),names(df) %in% c('BG',Phenos.lin)]
df$BG <- droplevels(df$BG)
df[,names(df) %in% c(Phenos.lin)] <- apply(df[,names(df) %in% c(Phenos.lin)], 2, function(x) (x - mean(x))/sd(x))
df_svd <- svd(df[,sapply(df,is.numeric)])
df_eigvals <- (df_svd$d^2)/(dim(df)[1] - 1)
B <- ggplot(data.frame(evals = df_eigvals/sum(df_eigvals)), aes(y = evals, x = seq(1,9))) + geom_bar(stat="identity") +
geom_line(linetype = 'solid', color = 'black', size = 1) +
geom_text(label = paste0(round(df_eigvals/sum(df_eigvals),2), '%'),vjust = -.2, hjust = 0) +
labs(x = 'Dimension', y = 'Percent of Variance') + theme_bw() + scale_x_discrete(limits=seq(1,9))
df_eigvecs <- df_svd$v
df_pca <- df_svd$u %*% diag(df_svd$d)
df_lda <- data.frame(Strain = df$Strain, df_pca)
fit_lda <- lda(Strain ~ ., data = df_lda)
fit_lda <- rrcov::Linda(BG ~ ., data = df_lda, method='fsa')
lda_result <- data.frame(Strain = df$Strain, lda = predict(fit_lda)$x)
C <- ggplot(lda_result, aes(lda.LD1,lda.LD2, color=Strain)) + geom_point(alpha=0.5,size=3) + labs(x='LD1',y='LD2')
PCLD_df <- as.data.frame(df_eigvecs %*% fit_lda$scaling)
rownames(PCLD_df) <- Phenos.lin
PCLD1 <- data.frame(Phenos = Phenos.lin.Nomen, value = abs(PCLD_df[,1]))
CX <- ggplot(PCLD1, aes(x = reorder(Phenos, value), y = value)) + geom_bar(stat = 'identity', color = 'black') +
theme(axis.text.x = element_text(angle=45,vjust=.5)) + labs(x = 'Phenotypes', y = 'Loadings')
PCLD2 <- data.frame(Phenos = Phenos.lin.Nomen, value = abs(PCLD_df[,2]))
CY <- ggplot(PCLD2, aes(x = reorder(Phenos, value), y = value)) + geom_bar(stat = 'identity', color = 'black') +
theme(axis.text.y = element_text(angle=45)) + labs(x = 'Phenotypes', y = 'Loadings') +
coord_flip()
#element_text(angle = 45, vjust = 0.5, hjust=1) #element_blank
blankPlot <- ggplot() + geom_blank(aes(1,1)) + theme_void()
CC <- gridExtra::grid.arrange(CY,C,blankPlot,CX, ncol=2,nrow=2,widths = c(1,4), heights=c(4,1))
cov_df <- as.data.frame(cov(df[,sapply(df, is.numeric)]))
cov_df$Phenos <- Phenos.lin.Nomen
cov_melt <- reshape::melt(cov_df, id = "Phenos")
cov_melt$Phenos <- factor(cov_melt$Phenos,levels = unique(cov_melt$Phenos))
#Analysis at the individual animal level
#1
Mutants <- unique(data_per_animal$Strain)
df <- data_per_animal[data_per_animal$Strain %in% Mutants[1],
names(data_per_animal) %in% c('MouseID','Sex','speed','step_length1','step_width','stride_length')]
tmp <- mvoutlier::dd.plot(df[,names(df) %in% Phenos.lin])
df.out <- data.frame(rodist = tmp$md.rob, mahadist = tmp$md.cla, MouseID = df[,'MouseID'],
Outlier = (ifelse(mvoutlier::aq.plot(df[,names(df) %in% Phenos.lin])$outliers==TRUE,1,0)))
df.out$Outlier <- as.factor(df.out$Outlier)
ggplot(df.out, aes(x = mahadist, y=rodist)) + geom_point(alpha=0.8, aes(color=Outlier), size=4) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier %in% c(1,-1),as.character(MouseID),'')),size=6,box.padding=2) +
labs(x = 'Mahalanobis Distance', y = 'Robust Distance') + ggtitle('KOMP Outliers') + scale_color_manual(values = c('grey50','red'))
theme_bw(base_size = 16) + theme(legend.position='none')
df <- df[,-1]
df <- cbind(id = 1:dim(df)[1], df)
df.melt <- reshape::melt(df[,-2], id.vars = 'id')
df.melt <- cbind(df.melt, Strain = rep(df$MouseID, 4), Outlier = as.factor(rep(df.out$Outlier, 4)))
ggplot(df.melt, aes(y = value, color = Outlier, x = id)) + geom_jitter(alpha=0.7, size = 3) +
facet_wrap(~variable, scales='free') + scale_color_manual(values = c('grey50','red')) + theme_bw(base_size = 16) +
theme(legend.position = 'none') + labs(x = 'Index', y='Phenotype')
#2
Mutants <- unique(data_per_animal$Strain)
df <- data_per_animal[data_per_animal$Strain %in% Mutants[1],names(data_per_animal) %in% c('MouseID',Phenos.lin)]
df[,sapply(df,is.numeric)] <- apply(df[,sapply(df,is.numeric)], 2, function(x) (x - mean(x))/(sd(x)))
#3
Mutants <- unique(data_per_animal$Strain)
df <- data_per_animal[data_per_animal$Strain %in% Mutants[1], names(data_per_animal) %in% c('MouseID',Phenos.lin)]
X <- as.matrix(df)
lambda <- 1/sqrt(max(dim(X)[1],dim(X)[2]))
i <- 1
tau <- 0.5
eps <- 0.2
while((tau > 0.3) & (eps > 0.1)){
X.svd <- svd(X)
d <- thresh.l1(X.svd$d,lambda)
L <- X.svd$u %*% diag(d) %*% X.svd$v
S <- X - L
eps <- sum(abs(as.numeric(X - L - S)))
X <- L + S
lambda <- lambda - 0.01
tau <- sum(S!=0)/(dim(S)[1]*dim(S)[2])
cat("Iteration = ", i + 1, "\n")
}
Mutants <- unique(data_per_animal$Strain)
tmp <- list()
lapply(seq(length(Mutants)), function(m) {
df <- data_per_animal[data_per_animal$Strain %in% Mutants[m], names(data_per_animal) %in% c('MouseID',Phenos.lin)];
X <- as.matrix(df[,-1]);
rpcaX <- rpca::rpca(X,max.iter=30000,lambda=0.3);
od1 <- X - rpcaX$S;
od2 <- rpcaX$L - rpcaX$S;
tmp[[m]] <- sort(apply(od1,1,function(x) sum(x^2))/apply(od2,1,function(x) sum(x^2)));
})
#4
Mutants <- sort(unique(data_per_animal$Strain))
m <- 59
df <- data_per_animal[data_per_animal$Strain %in% 'C57BL/6NJ', names(data_per_animal) %in% c('MouseID',Phenos.lin)];
tmp <- rospca::robpca(as.matrix(df[,-1]))
df.plot <- data.frame(sd = tmp$sd, od = tmp$od, Outlier = as.factor(1 - as.numeric(tmp$flag.all)))
ggplot(df.plot, aes(x = sd, y = od, color = Outlier)) + geom_point(size = 2) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier %in% c(1),as.character(df$MouseID),'')),size=6,box.padding=2) +
geom_hline(yintercept = tmp$cutoff.od) + geom_vline(xintercept = tmp$cutoff.sd) +
scale_color_manual(values = c('black','red')) + theme(legend.position = 'none') + labs(x = 'Score distance',
y='Orthogonal distance')
df.plot <- list()
df.cutoff <- list()
Mutants <- setdiff(Mutants, 'C57BL/6NJ')
lapply(seq(length(Mutants)), function(m){
df <- data_per_animal[data_per_animal$Strain %in% Mutants[m], names(data_per_animal) %in% c('MouseID',Phenos.lin)];
tmp <- rospca::robpca(as.matrix(df[,-1]))
df.plot[[m]] <<- data.frame(Strain = rep(paste0(Mutants[m]),nrow(df)), MouseID = df$MouseID,
sd = tmp$sd, od = tmp$od,Outlier = as.factor(1 - as.numeric(tmp$flag.all)))
df.cutoff[[m]] <<- data.frame(Strain = Mutants[m], sdcut = rep(tmp$cutoff.sd, nrow(df)), odcut = rep(tmp$cutoff.od, nrow(df)))
#ggplot(df.plot, aes(x = sd, y = od, color = Outlier)) + geom_point(size = 2) +
#ggrepel::geom_text_repel(aes(label=ifelse(Outlier %in% c(1),as.character(df$MouseID),'')),size=6,box.padding=2) +
#geom_hline(yintercept = tmp$cutoff.od) + geom_vline(xintercept = tmp$cutoff.sd) + theme_bw(base_size=16) +
#scale_color_manual(values = c('black','red')) + theme(legend.position = 'none') + labs(x = 'Score distance',
# y='Orthogonal distance') + ggtitle(paste0(Mutants[m]))
})
df <- do.call(rbind,df.plot)
dfc <- do.call(rbind, df.cutoff)
ggplot(df, aes(x = sd, y = od, color = Outlier)) + geom_point(size = 1) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier %in% c(1),as.character(MouseID),'')),size=3,box.padding=1) +
theme_bw(base_size=18) + geom_hline(data=dfc, aes(yintercept = odcut), linetype='dashed') + geom_vline(data=dfc, aes(xintercept = sdcut), linetype='dashed') +
scale_color_manual(values = c('black','red')) + theme(legend.position = 'none') + labs(x = 'Score distance',
y='Orthogonal distance') + facet_wrap(.~Strain)
ggsave('../Temp2/allstrains-out2.pdf', width=18, height=22)
df <- data_per_animal[data_per_animal$Strain %in% 'C57BL/6NJ', names(data_per_animal) %in% c('MouseID',Phenos.lin)];
tmp <- rospca::robpca(as.matrix(df[,-1]))
df$Outlier <- as.factor(1 - as.numeric(tmp$flag.all))
df.plot <- data.frame(sd = tmp$sd, od = tmp$od, Outlier = as.factor(1 - as.numeric(tmp$flag.all)))
ggplot(df.plot, aes(x = sd, y = od, color = Outlier)) + geom_point(size = 2) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier %in% c(1),as.character(df$MouseID),'')),size=6,box.padding=2) + theme_bw(base_size = 18) +
geom_hline(yintercept = tmp$cutoff.od, linetype='dashed') + geom_vline(xintercept = tmp$cutoff.sd, linetype='dashed') +
scale_color_manual(values = c('black','red')) + theme(legend.position = 'none') + labs(x = 'Score distance',
y='Orthogonal distance') + ggtitle('C57BL/6NJ')
ggsave('../Temp2/ctrlstrain-out.pdf', width=8, height=8)
T <- (as.matrix(df[,-1]) - as.matrix(rep(1,nrow(df)))%*%t(as.matrix(tmp$center)))%*%tmp$loadings
od.out <- df[which((1 - as.numeric(tmp$flag.od))==1),'MouseID']
sd.out <- df[which((1 - as.numeric(tmp$flag.sd))==1),'MouseID']
tmp2 <- data.frame(MouseID = df$MouseID, PC1 = T[,1], PC2 = T[,2],
Outlier.all = as.factor(1 - as.numeric(tmp$flag.all)), Outlier.sd = as.factor(1 - as.numeric(tmp$flag.sd)),
Outlier.x = as.factor(ifelse(df$MouseID %in% setdiff(sd.out,od.out),1,0)))
ggplot(tmp2, aes(x = PC1, y = PC2)) + geom_point(size = 3,aes(color = Outlier.all),alpha=0.7) +
stat_ellipse(level=0.989, type='t', linetype=2) +
ggrepel::geom_text_repel(aes(label=ifelse(Outlier.sd %in% c(1),as.character(df$MouseID),'')),size=6,box.padding=2) + theme_bw(base_size = 18) +
scale_color_manual(values = c('black','red')) + theme(legend.position='none') + ggtitle('C57BL/6NJ')
ggsave('../Temp2/ctrlstrain-pc.pdf',width=8,height=8)
Mutants <- sort(unique(data_per_animal$Strain))
df.plot <- list()
df.cutoff <- list()
lapply(seq(length(Mutants)), function(m){
df <- data_per_animal[data_per_animal$Strain %in% Mutants[m], names(data_per_animal) %in% c('MouseID',Phenos.lin)];
tmp <- rospca::robpca(as.matrix(df[,-1]))
df.plot[[m]] <<- data.frame(Strain = rep(paste0(Mutants[m]),nrow(df)), MouseID = df$MouseID,
sd = tmp$sd, od = tmp$od,Outlier = as.factor(1 - as.numeric(tmp$flag.all)))
df.cutoff[[m]] <<- data.frame(Strain = Mutants[m], sdcut = rep(tmp$cutoff.sd, nrow(df)), odcut = rep(tmp$cutoff.od, nrow(df)))
#ggplot(df.plot, aes(x = sd, y = od, color = Outlier)) + geom_point(size = 2) +
#ggrepel::geom_text_repel(aes(label=ifelse(Outlier %in% c(1),as.character(df$MouseID),'')),size=6,box.padding=2) +
#geom_hline(yintercept = tmp$cutoff.od) + geom_vline(xintercept = tmp$cutoff.sd) + theme_bw(base_size=16) +
#scale_color_manual(values = c('black','red')) + theme(legend.position = 'none') + labs(x = 'Score distance',
# y='Orthogonal distance') + ggtitle(paste0(Mutants[m]))
})
df <- do.call(rbind,df.plot)
data_per_animal$Outlier <- df$Outlier
ggplot(data_per_animal, aes(x = Strain, y = step_width)) + geom_boxplot() + geom_point(aes(color=Outlier), alpha = 0.7) +
scale_color_manual(values = c('black','red')) + theme(axis.text.x=element_text(angle=45,hjust=1), legend.position = 'none') +
labs(x = 'Strain')
df.ctrl <- data_per_animal[data_per_animal$Strain == 'C57BL/6NJ',names(data_per_animal) %in% c(Phenos.lin)]
colnames(df.ctrl) <- Phenos.lin.Nomen
df.ctrl <- cbind(id = as.factor(1:nrow(df.ctrl)), df.ctrl)
df.melt <- reshape::melt(df.ctrl, id.vars = 'id')
df.melt <- cbind(Strain = rep('C57BL/6NJ', 1674), MouseID = rep(df[df$Strain %in% 'C57BL/6NJ','MouseID'], length(Phenos.lin)),
df.melt, Outlier = rep(df[df$Strain %in% 'C57BL/6NJ','Outlier'], length(Phenos.lin)))
ggplot(df.melt, aes(x = value, y = Strain)) + geom_boxplot() + geom_point(aes(color=Outlier)) + theme_bw(base_size=20) +
scale_color_manual(values = c('black','red')) + facet_wrap(.~variable, scales='free',ncol= 1, strip.position="right") +
theme(legend.position='none',axis.text.y = element_blank(), strip.text.y = element_text(size = 9)) +
labs(y = NULL, x = NULL) + ggtitle('C57BL/6NJ')
ggsave('../Temp2/ctrl-out-box.pdf', height=12,width=8)
df.ctrl <- data_per_animal[data_per_animal$Strain == 'C57BL/6NJ',names(data_per_animal) %in% c(Phenos.lin,'Outlier')]
t <- list(
size = 14,
color = 'black')
p <- plot_ly(df,x=~`stride_length`, y=~`step_width`, z=~`step_length1`, type='scatter3d', color=~`Outlier`,colors=c("black","red")) %>%
layout(scene = list(
xaxis = list(title = "Stride Length"),
yaxis = list(title = "Step Width"),
zaxis = list(title = "Step Length")
), font=t)
p <- plot_ly(df,x=~`nose_lateral_displacement`, y=~`tip_tail_lateral_displacement`, z=~`base_tail_lateral_displacement`, type='scatter3d', color=~`Outlier`,colors=c("black","red")) %>%
layout(scene = list(
xaxis = list(title = "Nose LD"),
yaxis = list(title = "Tip Tail LD"),
zaxis = list(title = "Base Tail LD")
), font=t)
plotly_IMAGE(p, width = 500, height = 500, format = "pdf", scale = 2, out_file = "test.pdf")
|
fd10acc33db6b32e67f11da3ce901e18810b03dc
|
1554244b48facb5143523f83ef3cf263b4df4ff5
|
/tests/testthat/test-my_knn_cv.R
|
580e9843c42f4a2215c34e730150709e34c89df7
|
[] |
no_license
|
dhruvrc/STAT302Package
|
65ccccba65bc85bc63e0021f2ee309f913c44d2d
|
a1bd702990cc933624580a2aad6a20721f9b7c44
|
refs/heads/master
| 2021-03-22T12:20:13.110108
| 2020-03-17T05:32:14
| 2020-03-17T05:32:14
| 247,363,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 87
|
r
|
test-my_knn_cv.R
|
test_that("non-numeric input throws error", {
expect_error(my_knn_cv("a string"))
})
|
ea877437b22e428d3cf2279ad178078f6d305ec8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/kappalab/examples/variance-methods.Rd.R
|
716e3ca6deafc40de0469c6f39d8f9dfb23318ea
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 534
|
r
|
variance-methods.Rd.R
|
library(kappalab)
### Name: variance-methods
### Title: Normalized variance of a capacity
### Aliases: variance variance-methods variance,Mobius.capacity-method
### variance,capacity-method variance,card.capacity-method
### Keywords: methods
### ** Examples
## a capacity
mu <- capacity(c(0,0,0,0:12)/12)
## its Mobius transform
a <- Mobius(mu)
## their normalized variance
variance(mu)
variance(a)
## similar examples with card.capacity objects
mu <- lower.capacity(4)
variance(mu)
mu <- uniform.capacity(4)
variance(mu)
|
04361450250472d16c69bb677b33194ee7051317
|
21c36ac56a10e92f37cf8e6bd23ad54eccb0132c
|
/knn_class.R
|
78da62ed29dd3e90cca7fca08ee8991b4a3b4bb4
|
[] |
no_license
|
snarles/stat312
|
ef1fe1870e49c21f8034a199edb7509729966412
|
5b998b8d02ba31f0326f90d8f79d930619d669e2
|
refs/heads/master
| 2021-01-01T15:44:22.471038
| 2015-03-10T04:51:16
| 2015-03-10T04:51:16
| 29,076,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,267
|
r
|
knn_class.R
|
choose_k = function(vecs, labels){
train_trials = dim(vecs)[1] # the number of trials
distance <- as.matrix(dist(vecs)) # calculate distances between trials
nearest <- matrix(0,train_trials, train_trials)
for (i in 1:train_trials){
nearest[i,] = sort(distance[i,], index.return = TRUE)$ix # figure out nearest other trials
}
assigned_label <- matrix(0, train_trials)
tie_count = matrix(0,100)
errors = matrix(0,100)
for (k in 1:100){
for (i in 1:train_trials){
near_labels <- table(as.vector(labels[nearest[i,2:(k+1)]])) # take k nearest trials
temp=as.numeric(names(near_labels)[near_labels == max(near_labels)]); # majority vote
if (length(temp) > 1){
tie_count[k] = tie_count[k]+1
temp = temp[sample(c(1:length(temp)),1)] # if there's a tie, randomly break it
}
assigned_label[i] = temp;
}
errors[k] = sum(assigned_label != labels) # calculate error rate
}
# Choose the K with the least number of errors and ties (which could also easily be errors)
k = which.min(tie_count+errors)
return(list("k" = k, "error" = errors[k]/train_trials)) # return the optimal k and its error rate
}
# --------
# test vecs, training vecs, labels, k
knn_test = function(test_vecs, vecs, labels, k = 0){
if (k==0){
k = choose_k(vecs, labels)
}
train_trials = dim(vecs)[1] # the number of trials
test_trials = dim(test_vecs)[1] # the number of trials
neur = dim(vecs)[2]
temp_matrix <- matrix(0, train_trials+1, neur)
KNN_assigned_label <- matrix(0, test_trials)
temp_matrix[1:train_trials, ] <- vecs
tie_count = 0
for (i in 1:test_trials){
temp_matrix[train_trials+1,] <- t(as.matrix(test_vecs[i,]))
distance <- as.matrix(dist(temp_matrix))[train_trials+1,] # calculate the distance to training points
nearest <- sort(distance, index.return = TRUE)$ix # find k nearest training points
near_labels <- table(as.vector(labels[nearest[2:(k+1)]]))
temp=as.numeric(names(near_labels)[near_labels == max(near_labels)]); # majority vote
if (length(temp) > 1){
tie_count = tie_count+1
temp = temp[sample(c(1:length(temp)),1)] # randomly break ties
}
KNN_assigned_label[i] = temp;
}
print(tie_count)
return(KNN_assigned_label)
}
|
126c636e125ba62fa4f4c6ba58633ea7eb3b4478
|
7e4e52c37b700452d573a15f533b6ef346b22a84
|
/app.R
|
b69c2ae42e932aced2d036516cebf29b42970d08
|
[] |
no_license
|
achu3080803/PROM02_Project
|
5292b1fa039f51502e716546502fdba8e30c2930
|
eb687c827cb8b396b24ab036f2125885654e747a
|
refs/heads/main
| 2023-04-01T01:26:08.921847
| 2021-04-01T12:20:28
| 2021-04-01T12:20:28
| 335,670,524
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54,259
|
r
|
app.R
|
## app.R ##
library(shinydashboard)
library(DT)
library(ggplot2)
library(leaflet)
library(RCurl)
library(rjson)
library(ShinyRatingInput)
source("./global.R", local=TRUE)
###############################################################################################
# UI (START)
###############################################################################################
ui <- dashboardPage(
skin="red",
dashboardHeader(title = "Movie Recommender"),
dashboardSidebar(
sidebarMenu(
menuItem("Recommended Movies", tabName = "recommended_movies", icon = icon("film")),
menuItem("Movie Rating", tabName = "movie_rating", icon = icon("film")),
menuItem("Basic Analysis", tabName = "analysis", icon = icon("th")),
menuItem("Deep Analysis", tabName = "graph", icon = icon("th"))
)
),
dashboardBody(
includeCSS("stars.css"),
tags$style(HTML("
.content, .container-fluid {color: #fff; background-color: #000;}
.box.box-solid.box-primary>.box-header {
color:#fff;
background:#000000
}
.box.box-solid.box-primary{
border-bottom-color:#00000;
border-left-color:#000000;
border-right-color:#000000;
border-top-color:#000000;
background:#000000
}
#sidebar {
background-color: #000000;
}
")),
tabItems(
tabItem(
tabName = "recommended_movies",
fluidRow(
#selectizeInput("selectUser", "Login as", unique(ratings_df$userId))
selectizeInput("selectUser", "Login as", all_userid_df$value)
),
fluidRow(
h1("Top 10 Movies"),
htmlOutput("top10Movies2")
),
fluidRow(
h1("Recently rated by you"),
htmlOutput("watchedMovies")
),
# fluidRow(
# h1("Your favorite movies"),
# htmlOutput("favoriteMovies")
# ),
fluidRow(
h1("Movies similar to what you like"),
htmlOutput("cbMovies")
),
fluidRow(
h1("Others similar to you like these movies"),
htmlOutput("cfPopularMovies")
),
fluidRow(
h1("Movies with your favorite actors / actresses"),
htmlOutput("cbActorMovies")
)
),
tabItem(
tabName = "movie_rating",
fluidRow(
h1("Movie Rating"),
box(
width = 2,
selectizeInput("selectUser2", "Login as", all_userid_df$value),
)
),
fluidRow(
h1("Movie Search"),
box(
width = 10,
column(7,textInput("searchMovieTitle", "Please enter Movie Title below:")),
)
),
fluidRow(
y <- uiOutput("radioMovieTilesForSearch"),
HTML("<br>"),
HTML("<br>")
),
fluidRow(
h1("Give your rating here:"),
box(
width = 10,
mainPanel(
column(3,
z <- uiOutput("chosenMovie")
),
column(7,
htmlOutput("moviePlot")
)
)
)
)
),
tabItem(tabName = "analysis",
fluidRow(
htmlOutput("top10Movies")
),
fluidRow(
tags$style(".box {background-color:#000000;}"),
box(
width = 10,
sliderInput("ratedYear",
"Year of Ratings:",
step=1,
min = min(as.integer(ratings_df$ratedYear)), max = max(as.integer(ratings_df$ratedYear)), value = c(min(as.integer(ratings_df$ratedYear)),max(as.integer(ratings_df$ratedYear)))
),
sliderInput("minRatingCnt",
"Minimum Number of Ratings:",
step=1,
min = 0, max = 50, value = c(25)
)
)
),
fluidRow(
h1("Analysis on Movie Genres and Tags"),
sidebarLayout(
# Sidebar with a slider and selection inputs
sidebarPanel(
tags$style(".well {background-color:#000000;}"),
sliderInput("year",
"Production Year:",
min = min(genres_df$year), max = max(genres_df$year), value = c(min(genres_df$year),max(genres_df$year))),
sliderInput("rating",
"Average Rating:",
min = 1, max = 5, value = c(1,5)),
actionButton("update", "Update"),
hr(),
sliderInput("freq",
"Minimum Frequency:",
min = 1, max = 50, value = 15),
sliderInput("max",
"Maximum Number of Words:",
min = 1, max = 30, value = 100)
),
# Show Word Cloud
mainPanel(
column(5,
h1("Movie Genres"),
plotOutput("genreWordCloud")
),
column(5,
h1("Movie Tags"),
plotOutput("tagWordCloud")
)
)
)
),
fluidRow(
h1("Analysis on Ratings")
,
sidebarLayout(
# Sidebar with a slider and selection inputs
sidebarPanel(
tags$style(".well {background-color:#000000;}"),
sliderInput("year2",
"Production Year:",
min = min(ratings_df$year), max = max(ratings_df$year), value = c(min(ratings_df$year),max(ratings_df$year)))
),
# Show Rating Distribution
mainPanel(
column(5,
h1("Historgram of Movie Ratings"),
plotOutput("ratingHistogram")
),
column(5,
h1("Distribution of Average Movie Ratings"),
plotOutput("avgRatingHistogram")
)
)
),
hr(),
h1("Average Number of Ratings per User"),
box(width=12,
plotOutput("userRating")
)
)
),
tabItem(
tabName = "graph",
fluidRow(
h1("Deep Analysis"),
#selectizeInput("selectUser", "Login as", unique(ratings_df$userId))
selectizeInput("selectUser1", "User", all_userid_df$value),
selectizeInput("selectCategory", "Category", c("Movies similar to what you like","Others similar to you like these movies","Movies with your favorite actors / actresses"))
),
fluidRow(
h1("User's favorite movies"),
htmlOutput("favoriteMovies")
),
fluidRow(
htmlOutput("selectedMovieCategory"),
x <- uiOutput('radioMovieTiles')
),
visNetworkOutput("movieGraph",
height <- "2000",
width <- "1000")
)
)
)
)
###############################################################################################
# UI (END)
###############################################################################################
###############################################################################################
# Server (START)
###############################################################################################
server <- function(input, output) {
poster.height=200
poster.width=150
omdbapi.key="35b0b051"
set.seed(122)
histdata <- rnorm(500)
###############################################################################################
#
# Recommender TAB (START)
#
###############################################################################################
###############################################################################################
# Function get_curr_login
#
# Description: Return the current selected USER ID
# Input:
# Output: Return the current selected USER ID
###############################################################################################
get_curr_login <- reactive({
return(input$selectUser)
})
###############################################################################################
# Function output$top10Movie2
#
# Description: Render function to return the HTML for composing top 10 movies result
# Input:
# Output: HTML for composing top 10 movies result
###############################################################################################
output$top10Movies2<-renderText({
#print("Hello")
m_posters=NULL
min_year = min(as.integer(ratings_df$ratedYear))
max_year = max(as.integer(ratings_df$ratedYear))
m_movies <- getTop10MovieDF(min_year,max_year, 50)
print("Top 10 Movies 2")
movie_title <- ""
movie_rating <- 0
if (length(m_movies)>0) {
m_posters <- append(m_posters, '<div>')
for (i in 1:nrow(m_movies$title)){
movie_title <- m_movies$title[i,]$value
movie_poster <- m_movies$poster[i,]$value
movie_rating <- m_movies$avg_rating[i,]$value
star_rating <- movie_rating/5 * 100
m_posters <- append(m_posters, '<div class="gallery">')
m_posters <- append(m_posters, paste0('<img src="',movie_poster,'" alt="',movie_title,'" height="',poster.height,'" width="',poster.width,'" ContentType="Images/jpeg" >'))
m_posters <- append(m_posters, '<div class="ratings">')
m_posters <- append(m_posters, '<div class="empty-stars"></div>')
m_posters <- append(m_posters, paste0('<div class="full-stars", style="width:',star_rating,'%"></div>'))
m_posters <- append(m_posters, '</div>')
m_posters <- append(m_posters, paste0('<div class="desc" >',movie_title,'</div>'))
m_posters <- append(m_posters, '</div>')
}
m_posters <- append(m_posters, '</div>')
}
else{
m_posters <- append(m_posters, '<div><H1 style="text-align:center">No Movies Found</H1></div>')
}
#print(m_posters)
return(m_posters)
})
###############################################################################################
# Function output$watchedMovies
#
# Description: Return the movie posters of the movies that are rated by the user
# Input:
# Output: Return the movie posters of the movies that are rated by the user
###############################################################################################
output$watchedMovies<-renderText({
print("watchedMovies")
currUser=get_curr_login()
m_posters=NULL
m_graph <- getRecentlyRatedMovies(currUser,10)
m_movies <- m_graph$nodes[m_graph$nodes[]$node_type=="Movie",]
#print(currUser)
#print(length(m_graph$nodes))
l <- nrow(m_movies)
m_posters <- append(m_posters, '<div>')
for (i in 1:nrow(m_movies)){
movie_title <- m_movies[i,]$movie_title
movie_poster <- m_movies[i,]$poster
movie_rating <- m_movies[i,]$user_rating
star_rating <- movie_rating/5 * 100
m_posters <- append(m_posters, '<div class="gallery">')
m_posters <- append(m_posters, paste0('<img src="',movie_poster,'" alt="',movie_title,'" height="',poster.height,'" width="',poster.width,'" ContentType="Images/jpeg" >'))
m_posters <- append(m_posters, '<div class="ratings">')
m_posters <- append(m_posters, '<div class="empty-stars"></div>')
m_posters <- append(m_posters, paste0('<div class="full-stars", style="width:',star_rating,'%"></div>'))
m_posters <- append(m_posters, '</div>')
m_posters <- append(m_posters, paste0('<div class="desc" >',movie_title,'</div>'))
m_posters <- append(m_posters, '</div>')
}
m_posters <- append(m_posters, '</div>')
print(m_posters)
})
###############################################################################################
# Function output$favoriteMovies
#
# Description: Return the movie posters of the movies that are rated by the user
# Input:
# Output: Return the movie posters of the movies that are rated by the user
###############################################################################################
output$favoriteMovies<-renderText({
print("favoriteMovies")
currUser=get_analyze_user()
m_posters=NULL
m_graph <- getFavoriteMovies(currUser,20)
m_movies <- m_graph$nodes[m_graph$nodes[]$node_type=="Movie",]
#print(currUser)
#print(length(m_graph$nodes))
l <- nrow(m_movies)
m_posters <- append(m_posters, '<div>')
for (i in 1:nrow(m_movies)){
movie_title <- m_movies[i,]$movie_title
movie_poster <- m_movies[i,]$poster
movie_rating <- m_movies[i,]$user_rating
star_rating <- movie_rating/5 * 100
m_posters <- append(m_posters, '<div class="gallery">')
m_posters <- append(m_posters, paste0('<img src="',movie_poster,'" alt="',movie_title,'" height="',poster.height,'" width="',poster.width,'" ContentType="Images/jpeg" >'))
m_posters <- append(m_posters, '<div class="ratings">')
m_posters <- append(m_posters, '<div class="empty-stars"></div>')
m_posters <- append(m_posters, paste0('<div class="full-stars", style="width:',star_rating,'%"></div>'))
m_posters <- append(m_posters, '</div>')
m_posters <- append(m_posters, paste0('<div class="desc" >',movie_title,'</div>'))
m_posters <- append(m_posters, '</div>')
}
m_posters <- append(m_posters, '</div>')
print(m_posters)
})
###############################################################################################
# Function output$cbMovies
#
# Description: Return the movie posters of the movies that are recommended by the Content-based Recommender
# Input:
# Output: Return the movie posters of the movies that are recommended by the Content-based Recommender
###############################################################################################
output$cbMovies<-renderText({
print("cbMovies")
currUser=get_curr_login()
m_posters=NULL
m_movies <- getContentBasedMovies(currUser,10,"N")
#print(currUser)
#print(length(m_graph$nodes))
m_posters <- append(m_posters, '<div>')
for (i in 1:nrow(m_movies$title)){
movie_title <- m_movies$title[i,]$value
movie_poster <- m_movies$poster[i,]$value
movie_rating <- m_movies$avg_rating[i,]$value
star_rating <- movie_rating/5 * 100
m_posters <- append(m_posters, '<div class="gallery">')
m_posters <- append(m_posters, paste0('<img src="',movie_poster,'" alt="',movie_title,'" height="',poster.height,'" width="',poster.width,'" ContentType="Images/jpeg" >'))
m_posters <- append(m_posters, '<div class="ratings">')
m_posters <- append(m_posters, '<div class="empty-stars"></div>')
m_posters <- append(m_posters, paste0('<div class="full-stars", style="width:',star_rating,'%"></div>'))
m_posters <- append(m_posters, '</div>')
m_posters <- append(m_posters, paste0('<div class="desc" >',movie_title,'</div>'))
m_posters <- append(m_posters, '</div>')
}
m_posters <- append(m_posters, '</div>')
print(m_posters)
})
###############################################################################################
# Function output$cfPopularMovies
#
# Description: Return the movie posters of the movies that are recommended by the Content-based Recommender
# Input:
# Output: Return the movie posters of the movies that are recommended by the Content-based Recommender
###############################################################################################
output$cfPopularMovies<-renderText({
print("cfPopularMovies")
currUser=get_curr_login()
m_posters=NULL
m_movies <- getCollaborativeFilteringMovies(currUser,10,"N")
#print(currUser)
#print(length(m_graph$nodes))
m_posters <- append(m_posters, '<div>')
for (i in 1:nrow(m_movies$title)){
movie_title <- m_movies$title[i,]$value
movie_poster <- m_movies$poster[i,]$value
movie_rating <- m_movies$avg_rating[i,]$value
star_rating <- movie_rating/5 * 100
m_posters <- append(m_posters, '<div class="gallery">')
m_posters <- append(m_posters, paste0('<img src="',movie_poster,'" alt="',movie_title,'" height="',poster.height,'" width="',poster.width,'" ContentType="Images/jpeg" >'))
m_posters <- append(m_posters, '<div class="ratings">')
m_posters <- append(m_posters, '<div class="empty-stars"></div>')
m_posters <- append(m_posters, paste0('<div class="full-stars", style="width:',star_rating,'%"></div>'))
m_posters <- append(m_posters, '</div>')
m_posters <- append(m_posters, paste0('<div class="desc" >',movie_title,'</div>'))
m_posters <- append(m_posters, '</div>')
}
m_posters <- append(m_posters, '</div>')
print(m_posters)
})
###############################################################################################
# Function output$cbActorMovies
#
# Description: Return the movie posters of the movies that are recommended by the users' favorite actors / actresses
# Input:
# Output: Return the movie posters of the movies that are recommended by the users' favorite actors / actresses
###############################################################################################
output$cbActorMovies<-renderText({
print("cbActorMovies")
currUser=get_curr_login()
m_posters=NULL
m_movies <- getActorMovies(currUser,10,"N")
#print(currUser)
#print(length(m_graph$nodes))
m_posters <- append(m_posters, '<div>')
for (i in 1:nrow(m_movies$title)){
movie_title <- m_movies$title[i,]$value
movie_poster <- m_movies$poster[i,]$value
movie_rating <- m_movies$avg_rating[i,]$value
star_rating <- movie_rating/5 * 100
m_posters <- append(m_posters, '<div class="gallery">')
m_posters <- append(m_posters, paste0('<img src="',movie_poster,'" alt="',movie_title,'" height="',poster.height,'" width="',poster.width,'" ContentType="Images/jpeg" >'))
m_posters <- append(m_posters, '<div class="ratings">')
m_posters <- append(m_posters, '<div class="empty-stars"></div>')
m_posters <- append(m_posters, paste0('<div class="full-stars", style="width:',star_rating,'%"></div>'))
m_posters <- append(m_posters, '</div>')
m_posters <- append(m_posters, paste0('<div class="desc" >',movie_title,'</div>'))
m_posters <- append(m_posters, '</div>')
}
m_posters <- append(m_posters, '</div>')
print(m_posters)
})
###############################################################################################
# Function get_movie_url
#
# Description: Return the url of movie poster of the given movie ID
# Input:
# Output: Return the url of movie poster of the given movie ID
###############################################################################################
get_movie_url <-function(inMovieId) {
p <- movies_df[movies_df$movieId==inMovieId,]$poster
return(p)
}
###############################################################################################
# Function get_imdbid
#
# Description: Return the corresponding IMDB ID of the given MovieLens ID
# Input:
# Output: Return the corresponding IMDB ID of the given MovieLens ID
###############################################################################################
get_imdbid <- function(inMovieId){
return(paste0("tt",links_df[links_df$movieId==inMovieId,]$imdbId))
}
###############################################################################################
# Function get_movie_title
#
# Description: Return the movie title of the given MovieLens ID
# Input:
# Output: Return the movie title of the given MovieLens ID
###############################################################################################
get_movie_title <- function(inMovieId, plusSignNeeded){
t <- movies_df[movies_df$movieId==inMovieId,]$title
#print(t)
t <- substr(t,1,nchar(t)-7)
print(t)
if (substr(t,nchar(t)-4,nchar(t)) == ", The"){
t <- substr(t,1,nchar(t)-5)
}
if (plusSignNeeded){
t <- gsub(" ","+",t)
}
print(t)
return(t)
}
###############################################################################################
# Function get_watched_movies
#
# Description: Return a vector that contains the most recent 10 movie ID rated by the given User
# Input: User ID
# Output: Return a vector that contains the most recent 10 movie ID rated by the given User
###############################################################################################
get_watched_movies <- function(inUserId){
m1 <- ratings_df[ratings_df$userId==inUserId,]
m2 <- m1[order(-m1$timestamp),]$movieId
#m2 <- as.data.frame(c(1,2))
return(m2[1:10])
}
###############################################################################################
# Function get_movie_rating
#
# Description: Return a vector that contains the ratings of a given movie
# Input: Movie ID (MovieLens)
# Output: Return a vector that contains the ratings of a given movie
###############################################################################################
get_movie_rating <- function(inMovieId){
r <- ratings_df[ratings_df$movieId==inMovieId,]$rating
return(r)
}
###############################################################################################
# Function get_movie_avg_rating
#
# Description: Return the average ratings of a given movie
# Input: Movie ID (MovieLens)
# Output: Return the average ratings of a given movie
###############################################################################################
get_movie_avg_rating <- function(inMovieId){
r <- movies_df[movies_df$movieId==inMovieId,]$avgRating
return(r)
}
###############################################################################################
# Function get_cb_movies
#
# Description: Return the movie ID of the movies recommended by Content-based Recommender for the given user
# Input: User ID
# Output: Return the movie ID of the movies recommended by Content-based Recommender for the given user
###############################################################################################
get_cb_movies <- function(inUserId){
m <- cb_movies_df[cb_movies_df$userId==inUserId,]$movieId
return(m)
}
###############################################################################################
# Function get_cf_popular_movies
#
# Description: Return the movie ID of the movies recommended by Popular Recommender for the given user
# Input: User ID
# Output: Return the movie ID of the movies recommended by Popular Recommender for the given user
###############################################################################################
get_cf_popular_movies <- function(inUserId){
m <- cf_popular_movies_df[cf_popular_movies_df$userId==inUserId,]$movieId
return(m)
}
###############################################################################################
# Function get_cf_als_imp_movies
#
# Description: Return the movie ID of the movies recommended by ALS-implicit Recommender for the given user
# Input: User ID
# Output: Return the movie ID of the movies recommended by ALS-implicit Recommender for the given user
###############################################################################################
get_cf_als_imp_movies <- function(inUserId){
m <- cf_als_imp_movies_df[cf_als_imp_movies_df$userId==inUserId,]$movieId
return(m)
}
###############################################################################################
#
# Recommender TAB (END)
#
###############################################################################################
###############################################################################################
#
# Movie Rating TAB (START)
#
###############################################################################################
###############################################################################################
# Function get_search_user
#
# Description: Return the current selected USER ID
# Input:
# Output: Return the current selected USER ID
###############################################################################################
get_search_user <- reactive({
return(input$selectUser2)
})
###############################################################################################
# Function get_search_user
#
# Description: Return the current selected USER ID
# Input:
# Output: Return the current selected USER ID
###############################################################################################
get_search_movie_title <- reactive({
return(input$searchMovieTitle)
})
###############################################################################################
# Function output$radioMovieTilesForSearch
#
# Description: Reactive function to a matrix of genres that satisfy the criteria
# Input:
# Output: Matrix of genres that satisfy the criteria
###############################################################################################
output$radioMovieTilesForSearch <- renderUI({
input$movieSearch
print("radioMovieTilesForSearch")
i_movie_title=get_search_movie_title()
m_posters=NULL
m_movies=NULL
rb_choiceNames=list()
rb_choiceValues=list()
m_movies <- searchdMovies(i_movie_title,30)
if (length(m_movies)>0) {
for (i in 1:nrow(m_movies$title)){
movie_id <- m_movies$movie_id[i,]$value
movie_title <- m_movies$title[i,]$value
movie_poster <- m_movies$poster[i,]$value
movie_rating <- m_movies$avg_rating[i,]$value
movie_score <- NULL
star_rating <- movie_rating/5 * 100
m_tile <- compose_movie_tile_html(movie_title,movie_poster,poster.height,poster.width,star_rating,movie_score)
rb_choiceNames <- append(rb_choiceNames, m_tile)
rb_choiceValues <- append(rb_choiceValues, paste0(movie_id,";",movie_title,";",movie_poster))
}
for (i in 1:length(rb_choiceNames)){
rb_choiceNames[[i]]<-HTML(rb_choiceNames[[i]])
}
print("rb_choiceNames:")
print(rb_choiceNames)
print("rb_choiceValues:")
print(rb_choiceValues)
# The options are dynamically generated on the server
radioButtons('movieChoice2', "", choiceNames=rb_choiceNames, choiceValues=rb_choiceValues, inline=TRUE)
}
else {
movie_id <- 0
movie_title <- "No Movie Found"
movie_poster <- "movie_star.jpg"
movie_rating <- 0
movie_score <- NULL
star_rating <- movie_rating/5 * 100
m_tile <- compose_movie_tile_html(movie_title,movie_poster,poster.height,poster.width,star_rating,movie_score)
HTML(m_tile)
}
})
###############################################################################################
# Function observeEvent
#
# Description: Function to observe an event of pressing the "Submit" button in Movie Rating page.
# Input:
# Output:
###############################################################################################
observeEvent(input$submitRating, {
if (!is.null(input$movieChoice2) && !is.null(input$movieRating)) {
if (input$movieChoice2 != "" && input$movieRating != "") {
login_id <- input$selectUser2
params<-unlist(strsplit(input$movieChoice2, ";"))
movie_id <- params[1]
user_rating <- input$movieRating
print(paste("Login: ",login_id," Movie: ",movie_id," Rating: ",user_rating))
updateMovieRating(login_id, movie_id, user_rating)
}
}
})
###############################################################################################
# Function output$chosenMovie
#
# Description: Function to render UI for showing the chosen movie in the Movie Rating page.
# Input:
# Output: R Shiny UI
###############################################################################################
output$chosenMovie <- renderUI({
movieChoice <- unlist(strsplit(as.character(input$movieChoice2),";"))
movie_title <- movieChoice[2]
movie_title_html <- HTML(paste('<font size="+2">',movie_title,'</font>'))
poster_height <- poster.height * 1.5
poster_width <- poster.width * 1.5
movie_poster <- HTML(paste('<img src="',movieChoice[3],'" alt="',movie_title,'" size=+2 height="',poster_height,'" width="',poster_width,'" ContentType="Images/jpeg" >'))
#print(movie_poster)
tagList(div(movie_poster,
div(movie_title_html,
div(ratingInput("movieRating", label="", dataStop=5, dataFractions=2)
)
)
),
div(HTML("<br>"),actionButton("submitRating", "Submit"))
)
})
###############################################################################################
# Function output$movieRating
#
# Description: Function to render text for showing the details of the chosen movie in the Movie Rating page.
# Input:
# Output: HTML text
###############################################################################################
output$movieRating <- renderText({
paste('<font size="+2">The movie was rated as',input$movieRating,'</font>')
})
output$moviePlot<-renderText({
movieChoice <- unlist(strsplit(as.character(input$movieChoice2),";"))
movie_id <- movieChoice[1]
m_movies <- searchdMoviesById(movie_id)
movie_plot <- HTML(paste('<div><h2>Movie Descrption</h2></div><br><div><font size="+1">',m_movies$plot[1,]$value,'</font></div><br><br>',
'<div><h3>Production</h3>',m_movies$company_csv[1,]$value,'<br>',
'<div><h3>Starring</h3>',m_movies$actor_csv[1,]$value,'<br>',
'<div><h3>Genres</h3>',m_movies$genre_csv[1,]$value,'<br>'
)
)
})
###############################################################################################
#
# Movie Rating TAB (END)
#
###############################################################################################
###############################################################################################
#
# Analysis TAB (START)
#
###############################################################################################
###############################################################################################
# Top 10 Movies (START)
###############################################################################################
###############################################################################################
# Function getTop10Movie
#
# Description: Reactive function to return the top 10 movies that satisfy the given criteria
# Input:
# Output: Reactive function to return the top 10 movies that satisfy the given criteria
###############################################################################################
getTop10Movie <- reactive({
# Change when the "update" button is pressed...
#input$update
# ...but not for anything else
getTop10MovieDF(input$ratedYear[1],input$ratedYear[2], input$minRatingCnt[1])
})
###############################################################################################
# Function output$top10Movie
#
# Description: Render function to return the HTML for composing top 10 movies result
# Input:
# Output: HTML for composing top 10 movies result
###############################################################################################
output$top10Movies<-renderText({
#print("Hello")
m_posters=NULL
m_movies <- getTop10Movie()
print("Top 10 Movies")
movie_title <- ""
movie_rating <- 0
m_posters <- append(m_posters, (paste0("<h1>Top 10 Movies (",input$ratedYear[1],"-",input$ratedYear[2],")</h1>")))
if (length(m_movies)>0) {
m_posters <- append(m_posters, '<div>')
for (i in 1:nrow(m_movies$title)){
movie_title <- m_movies$title[i,]$value
movie_poster <- m_movies$poster[i,]$value
movie_rating <- m_movies$avg_rating[i,]$value
star_rating <- movie_rating/5 * 100
m_posters <- append(m_posters, '<div class="gallery">')
m_posters <- append(m_posters, paste0('<img src="',movie_poster,'" alt="',movie_title,'" height="',poster.height,'" width="',poster.width,'" ContentType="Images/jpeg" >'))
m_posters <- append(m_posters, '<div class="ratings">')
m_posters <- append(m_posters, '<div class="empty-stars"></div>')
m_posters <- append(m_posters, paste0('<div class="full-stars", style="width:',star_rating,'%"></div>'))
m_posters <- append(m_posters, '</div>')
m_posters <- append(m_posters, paste0('<div class="desc" >',movie_title,'</div>'))
m_posters <- append(m_posters, '</div>')
}
m_posters <- append(m_posters, '</div>')
}
else{
m_posters <- append(m_posters, '<div><H1 style="text-align:center">No Movies Found</H1></div>')
}
#print(m_posters)
return(m_posters)
})
###############################################################################################
# Top 10 Movies (END)
###############################################################################################
###############################################################################################
# WordCloud of Genres and Tags (START)
###############################################################################################
###############################################################################################
# Function terms
#
# Description: Reactive function to a matrix of genres that satisfy the criteria
# Input:
# Output: Matrix of genres that satisfy the criteria
###############################################################################################
terms <- reactive({
# Change when the "update" button is pressed...
input$update
# ...but not for anything else
isolate({
withProgress({
setProgress(message = "Processing corpus...")
print(input$year[1])
print(input$year[2])
getTermMatrix(input$year[1],input$year[2],input$rating[1],input$rating[2])
})
})
})
###############################################################################################
# Function tags
#
# Description: Reactive function to a matrix of tags that satisfy the criteria
# Input:
# Output: Matrix of tags that satisfy the criteria
###############################################################################################
tags <- reactive({
# Change when the "update" button is pressed...
input$update
# ...but not for anything else
isolate({
withProgress({
setProgress(message = "Processing corpus...")
getTagMatrix(input$year[1],input$year[2],input$rating[1],input$rating[2])
})
})
})
# Make the wordcloud drawing predictable during a session
wordcloud_rep <- repeatable(wordcloud)
###############################################################################################
# Function genreWordCloud
#
# Description: Render function to plot the wordcloud of genres
# Input:
# Output: Wordcloud of genres
###############################################################################################
output$genreWordCloud <- renderPlot({
v <- terms()
print(paste0("wordcloud V: ",class(v),": ",length(names(v))))
wordcloud_rep(names(v), v, scale=c(4,0.5),
min.freq = input$freq, max.words=input$max,
colors=brewer.pal(8, "Dark2"))
})
###############################################################################################
# Function tagWordCloud
#
# Description: Render function to plot the wordcloud of tags
# Input:
# Output: Wordcloud of tags
###############################################################################################
output$tagWordCloud <- renderPlot({
v <- tags()
wordcloud_rep(names(v), v, scale=c(4,0.5),
min.freq = input$freq, max.words=input$max,
colors=brewer.pal(8, "Dark2"))
})
###############################################################################################
# WordCloud of Genres and Tags (END)
###############################################################################################
###############################################################################################
# Rating Analysis (START)
###############################################################################################
###############################################################################################
# Function ratingHistogram
#
# Description: Reactive function to return data frame of ratings that satisfy the given criteria
# Input:
# Output: Data frame of ratings that satisfy the given criteria
###############################################################################################
ratingHistogram <- reactive({
getRatingHistogramDF(input$year2[1],input$year2[2])
})
###############################################################################################
# Function avgRatingHistogram
#
# Description: Reactive function to return data frame of average ratings that satisfy the given criteria
# Input:
# Output: Data frame of average ratings that satisfy the given criteria
###############################################################################################
avgRatingHistogram <- reactive({
getAvgRatingHistogramDF(input$year2[1],input$year2[2])
})
###############################################################################################
# Function output$ratingHistogram
#
# Description: Render function to return the plot of the histogram of movie ratings that satisfy the given criteria
# Input:
# Output: The plot of the histogram of movie ratings that satisfy the given criteria
###############################################################################################
output$ratingHistogram <- renderPlot({
v <- ratingHistogram()
print("# of Rating")
print(nrow(v))
ggplot(data=v, aes(rating)) +
geom_histogram(breaks=seq(0, 5, by=0.5),
alpha = .7,
aes(fill=..count..)) +
labs(title=paste0("Histogram for Movie Ratings (",input$year2[1],"-",input$year2[2],")"), x="Rating", y="Count") +
scale_fill_gradient("Count", low="green", high="red")
})
###############################################################################################
# Function output$avgRatingHistogram
#
# Description: Render function to return the plot of the histogram of average movie ratings that satisfy the given criteria
# Input:
# Output: The plot of the histogram of average movie ratings that satisfy the given criteria
###############################################################################################
output$avgRatingHistogram <- renderPlot({
v <- avgRatingHistogram()
print("# of Avg Rating")
print(nrow(v))
ggplot(data=v, aes(avgRating)) +
geom_histogram(breaks=seq(0, 5, by=0.5),
alpha = .7,
aes(fill=..count..)) +
labs(title=paste0("Distribution for Average Movie Ratings (",input$year2[1],"-",input$year2[2],")"), x="Average Movie Rating", y="Count") +
scale_fill_gradient("Count", low="green", high="red")
})
###############################################################################################
# Function output$userRating
#
# Description: Render function to return the plot of the average number of ratings given by each user for a certain period
# Input:
# Output: The plot of the average number of ratings given by each user for a certain period
###############################################################################################
output$userRating <- renderPlot({
v <- getUserRatingHistogramDF()
print("# of Rating")
print(nrow(v))
print(v)
ggplot(data=v, aes(year, avgCntPerUser)) +
geom_col(fill="red") +
labs(title="Average Number of Ratings Given per User", x="Year", y="Average # of Ratings per User")
})
###############################################################################################
# Rating Analysis (END)
###############################################################################################
###############################################################################################
#
# Analysis TAB (END)
#
###############################################################################################
###############################################################################################
#
# Graph TAB (START)
#
###############################################################################################
###############################################################################################
# Function get_analyze_curr_login
#
# Description: Return the current selected USER ID
# Input:
# Output: Return the current selected USER ID
###############################################################################################
get_analyze_user <- reactive({
return(input$selectUser1)
})
###############################################################################################
# Function get_analyze_movie_category
#
# Description: Return the current selected Movie Category
# Input:
# Output: Return the current selected Movie Category
###############################################################################################
get_analyze_movie_category <- reactive({
return(input$selectCategory)
})
###############################################################################################
# Function compose_movie_tile_html
#
# Description: Return a piece of HTML text that represents a movie tile for display
# Input:
# Output: Return a piece of HTML text that represents a movie tile for display
###############################################################################################
compose_movie_tile_html <- function(movie_title, movie_poster, poster_height, poster_width, star_rating, score){
movie_tile <- NULL
movie_tile <- paste0('<div class="gallery">',
'<img src="',movie_poster,'" alt="',movie_title,'" height="',poster_height,'" width="',poster_width,'" ContentType="Images/jpeg" >',
'<div class="ratings">',
'<div class="empty-stars"></div>',
'<div class="full-stars", style="width:',star_rating,'%"></div>')
if(!is.null(score)) {
movie_tile <- paste0(movie_tile,'<div class="desc" >Score: ',round(score,2),'</div>')
}
movie_tile <- paste0(movie_tile,
'</div>',
'<div class="desc" >',movie_title,'</div>',
'</div>')
return (movie_tile)
}
###############################################################################################
# Function output$selectedMovieCategory
#
# Description: Return the movie posters of the movies that are recommended by the users' favorite actors / actresses
# Input:
# Output: Return the movie posters of the movies that are recommended by the users' favorite actors / actresses
###############################################################################################
output$selectedMovieCategory<-renderText({
print("selectedMovieCategory")
currUser=get_analyze_user()
currCategory=get_analyze_movie_category()
m_posters=NULL
m_movies=NULL
rb_choices=NULL
if (currCategory == "Movies similar to what you like"){
m_movies <- getContentBasedMovies(currUser,10,"N")
} else if (currCategory == "Others similar to you like these movies"){
m_movies <- getCollaborativeFilteringMovies(currUser,10,"N")
} else if (currCategory == "Movies with your favorite actors / actresses") {
m_movies <- getActorMovies(currUser,10,"N")
}
if (length(m_movies)>0) {
m_posters <- append(m_posters, (paste0("<h1>",currCategory,"</h1>")))
}
print(m_posters)
})
###############################################################################################
# Function output$radioMovieTiles
#
# Description: Return UI that contains a list of radio buttons with movie posters
# Input:
# Output: Return UI that contains a list of radio buttons with movie posters
###############################################################################################
output$radioMovieTiles <- renderUI({
print("radioMovieTiles")
currUser=get_analyze_user()
currCategory=get_analyze_movie_category()
m_posters=NULL
m_movies=NULL
rb_choiceNames=list()
rb_choiceValues=list()
if (currCategory == "Movies similar to what you like"){
m_movies <- getContentBasedMovies(currUser,10,"N")
} else if (currCategory == "Others similar to you like these movies"){
m_movies <- getCollaborativeFilteringMovies(currUser,10,"N")
} else if (currCategory == "Movies with your favorite actors / actresses") {
m_movies <- getActorMovies(currUser,10,"N")
}
if (length(m_movies)>0) {
for (i in 1:nrow(m_movies$title)){
movie_id <- m_movies$movie_id[i,]$value
movie_title <- m_movies$title[i,]$value
movie_poster <- m_movies$poster[i,]$value
movie_rating <- m_movies$avg_rating[i,]$value
movie_score <- m_movies$score[i,]$value
star_rating <- movie_rating/5 * 100
m_tile <- compose_movie_tile_html(movie_title,movie_poster,poster.height,poster.width,star_rating,movie_score)
rb_choiceNames <- append(rb_choiceNames, m_tile)
if (currCategory == "Movies similar to what you like"){
source_id_csv <- m_movies$source_id_csv[i,]$value
rb_choiceValues <- append(rb_choiceValues, paste0(source_id_csv,":",movie_id))
} else if (currCategory == "Others similar to you like these movies"){
u1_loginId <- m_movies$u1_loginId[i,]$value
u2_loginId <- m_movies$u2_loginId[i,]$value
rb_choiceValues <- append(rb_choiceValues, paste0(u1_loginId,":",u2_loginId,":",movie_id))
} else if (currCategory == "Movies with your favorite actors / actresses") {
source_id_csv <- m_movies$source_id_csv[i,]$value
rb_choiceValues <- append(rb_choiceValues, paste0(source_id_csv,":",movie_id))
}
}
for (i in 1:length(rb_choiceNames)){
rb_choiceNames[[i]]<-HTML(rb_choiceNames[[i]])
}
print("rb_choiceNames:")
print(rb_choiceNames)
print("rb_choiceValues:")
print(rb_choiceValues)
}
else {
rb_choiceNames <- c("No Movies")
}
# The options are dynamically generated on the server
radioButtons('movieChoice', "", choiceNames=rb_choiceNames, choiceValues=rb_choiceValues, inline=TRUE)
})
###############################################################################################
# Function output$myNetId
#
# Description: Return a VisNetwork object to display the knowledge graph
# Input:
# Output: Return a VisNetwork object to display the knowledge graph
###############################################################################################
output$myNetId <- renderVisNetwork({
visNetwork(nodes, edges)
})
output$movieGraph <- renderVisNetwork({
currCategory=get_analyze_movie_category()
mc <- input$movieChoice
if (!is.null(mc)) {
if (currCategory == "Movies similar to what you like"){
params<-unlist(strsplit(input$movieChoice, ":"))
G <- getContentBasedMovieGraph(params[1],params[2])
} else if (currCategory == "Others similar to you like these movies"){
params<-unlist(strsplit(input$movieChoice, ":"))
G <- getCollaborativeFilteringMovieeGraph(params[1],params[2],params[3])
} else if (currCategory == "Movies with your favorite actors / actresses") {
params<-unlist(strsplit(input$movieChoice, ":"))
G <- getActorMovieGraph(params[1],params[2])
}
G$nodes$image <- ""
G$nodes$label <- G$nodes$title
G$nodes[G$nodes$group=="Movie",]$image = G$nodes[G$nodes$group=="Movie",]$poster
G$nodes[G$nodes$group=="Person",]$image = "user_icon_red.png"
G$nodes[G$nodes$group=="Person",]$label = G$nodes[G$nodes$group=="Person",]$name
G$nodes[G$nodes$group=="Company",]$image = "film_company_icon_green.png"
G$nodes[G$nodes$group=="Company",]$label = G$nodes[G$nodes$group=="Company",]$name
G$nodes[G$nodes$group=="Genre",]$image = "genre_icon_pink.png"
G$nodes[G$nodes$group=="Genre",]$label = G$nodes[G$nodes$group=="Genre",]$name
G$nodes[G$nodes$group=="Country",]$image = "country_icon_blue.png"
G$nodes[G$nodes$group=="Country",]$label = G$nodes[G$nodes$group=="Country",]$name
G$nodes$title <- G$nodes$group
G$nodes$title <- paste('<p style="color:Black;font-size:14px">',G$nodes$group,'</p>')
visNetwork(G$nodes, G$relationships) %>%
visNodes(font = list(color = "#ffffff")) %>%
visGroups(groupname="Person",shape = "image") %>%
visGroups(groupname="Movie",shape = "image") %>%
visGroups(groupname="Company",shape = "image") %>%
visGroups(groupname="Genre",shape = "image") %>%
visGroups(groupname="Country",shape = "image") %>%
visEdges(font = list(color = "#ffffff", strokeColor = "#000000")) %>%
visPhysics(barnesHut = list(springConstant=0)) %>%
addFontAwesome()
}
})
###############################################################################################
#
# Graph TAB (END)
#
###############################################################################################
}
###############################################################################################
# Server (END)
###############################################################################################
shinyApp(ui, server)
|
c390c02e4cfcceecd2a52666d0d2b0209939cfdd
|
7d0f25a190e97e0d2714c43a57a017eaf093ecd5
|
/man/plotRoc.Rd
|
da819a5f92c5223404c0464c8ea8429db67130bb
|
[
"MIT"
] |
permissive
|
mikeniemant/nbs
|
f192f6d6ce16d725dc0985d6f505666c2c785020
|
3d82fd553c58ea5941d5dfa60cfaefa95df76121
|
refs/heads/master
| 2022-07-03T11:21:21.170515
| 2022-06-15T08:51:37
| 2022-06-15T08:51:37
| 142,566,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 565
|
rd
|
plotRoc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotRoc.R
\name{plotRoc}
\alias{plotRoc}
\title{Plot Receiver Operator Curve}
\usage{
plotRoc(x, group = NULL, title = NULL, breaks = F, print_plot)
}
\arguments{
\item{x}{Tibble with sensitivity and specificity computed by the performance R package}
\item{group}{Variable to group coordinates}
\item{title}{Plot title}
\item{breaks}{Boolean to plot additional breaks and lines}
}
\value{
A ggplot containing the receiver operator curve
}
\description{
Plot Receiver Operator Curve
}
|
e31ea3d855f44cdb6bb9b931c5dc3535d8fc3bbe
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610046171-test.R
|
8fc7b80f6c10f7205167316ef40a037ea038ef93
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
1610046171-test.R
|
testlist <- list(hi = 4.66726145839586e-62, lo = 1.32963809623256e-105, mu = 4.66726145839586e-62, sig = 4.66726145839586e-62)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
4a79009fa74e8b7e41efadd6ae0d6885f81526d3
|
184940aa0323a4f2a84fbd49e919aedb7e1fcaea
|
/Complete R/Chi.R
|
58a83c1df45860a57b26e6a922a969cee2b1f25c
|
[] |
no_license
|
Dipzmaster/Complete_R
|
7e700b1ae8f21dd07538d8f8e0ace2c374298b82
|
face68fdac71be6f2bf4f744884c401cebbadffd
|
refs/heads/main
| 2023-08-23T02:08:24.794579
| 2021-11-03T18:36:13
| 2021-11-03T18:36:13
| 415,090,983
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
Chi.R
|
library("MASS")
print(str(Cars93))
# Load the library.
library("MASS")
# Create a data frame from the main data set.
car.data <- data.frame(Cars93$AirBags, Cars93$Type)
# Create a table with the needed variables.
car.data = table(Cars93$AirBags, Cars93$Type)
print(car.data)
# Perform the Chi-Square test.
print(chisq.test(car.data))
|
fc0356e97cc9185c5824e1edbc77afb3b0867c77
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/apollo/man/apollo_writeTheta.Rd
|
f89c8182b432494b95ecfdb1520993d7bb9668c2
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 563
|
rd
|
apollo_writeTheta.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apollo_writeTheta.R
\name{apollo_writeTheta}
\alias{apollo_writeTheta}
\title{Writes the vector [beta,ll] to a file called modelname_iterations.csv}
\usage{
apollo_writeTheta(beta, ll, modelName)
}
\arguments{
\item{beta}{vector of parameters to be written.}
\item{ll}{scalar representing the loglikelihood of the whole model.}
\item{modelName}{Character. Name of the model.}
}
\value{
Nothing.
}
\description{
Writes the vector [beta,ll] to a file called modelname_iterations.csv
}
|
d3d2d0c099bf2d6cb9446bbff693d394e45f7354
|
ab2502b9db5190e9067986e52666ff0d1f653611
|
/R/user_profile.R
|
7eca75ac64ba50cca60c3b840f726f35905d5024
|
[] |
no_license
|
jakosz/wiki-tools
|
65604e0d8b9aedf5c08ec83b5201c3fe944a201b
|
94f4307268b0ded9fde290de442612c5de5e2da4
|
refs/heads/master
| 2021-01-22T02:28:58.862747
| 2014-08-09T09:36:18
| 2014-08-09T09:36:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,039
|
r
|
user_profile.R
|
source('settings.R')
LANG = 'de'
up = read.csv(ps(data$trans, LANG, '_user_page.csv'), sep = ';')
upp = read.csv(ps(data$trans, LANG, '_user_page_profiles.csv'), sep = ';')
upp = cbind(up, upp[,2:ncol(upp)])
# -- variables for per-user aggregation:
# distribute diff among categories
ctg = upp[,7:ncol(upp)]
upp[,7:ncol(upp)] = ctg * (upp$diff / rowSums(ctg))
# number of pages
upp$pages = rep(1, nrow(upp))
# input to good articles
upp$good_diff = upp$diff * upp$good
# input to featured
upp$featured_diff = upp$diff * upp$featured
# -- aggregate
bad = apply(upp, 1, function(x) any(any(is.na(x)), any(is.nan(x))))
upp = upp[!bad, ]
user_profile = aggregate(upp[,-c(1:2)], by = list(upp$userid), FUN = function(x) sum(as.numeric(x)))
names(user_profile)[1] = 'userid'
iCat = which(names(user_profile) %in% data$meta$namec[[LANG]])
user_profile$entropy = apply(user_profile[,iCat], 1, function(x) entropy(x, unit='log2'))
write.table(user_profile, ps(data$trans, LANG, '_user_profiles.csv'), sep = ';', row.names = FALSE)
|
579eab6035e284113b03228f360f21ae14f9f8e1
|
7a9e185724bad204059f487489ecefe1b30568ea
|
/dev_tsne.R
|
a53a04fb09f9b70f8143ecc5f63befaabcd381ab
|
[] |
no_license
|
jrboyd/waldron
|
21ca7ad82f6db5bac6855273acfb40ad753c5895
|
b89a119301da4cf86bc4805dc548d3f13f5c1cda
|
refs/heads/master
| 2020-05-01T17:07:16.479793
| 2019-03-25T13:24:57
| 2019-03-25T13:24:57
| 177,591,600
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,553
|
r
|
dev_tsne.R
|
library(BiocFileCache)
library(ggimage)
library(Rtsne)
library(magrittr)
setwd("~/R/waldron/")
source("setup_files.R")
source("functions_tsne.R")
source("geom_image.rect.R")
bfcif = peakrefine::bfcif
bfc = BiocFileCache()
options("mc.cores" = 36)
load("CD34_bivalent.save")
load("CD34_consensus_v1.save")
ref_gr = rtracklayer::import.gff("~/gencode.v28.annotation.gtf.gz", format = "gtf", feature.type = "transcript")
trans_gr = subset(ref_gr, transcript_support_level %in% 1:2)
promoters(trans_gr, 1, 1)
view_size = 15e3
n_points = 16
theme_set(theme_classic())
peaks = easyLoad_narrowPeak(c(k4_peaks, k27_peaks))
set.seed(0)
# qgr = biv_gr
# qgr = sample(peaks[["CD34-01562_H3K4me3"]], 15000)
# qgr = peaks[["CD34-01562_H3K4me3"]]
# qgr = reduce(c(k4_consenus, subsetByOverlaps(k27_consenus, k4_consenus, invert = TRUE)))
# qgr = reduce(c(peaks[["H7_H3K4me3"]], subsetByOverlaps(peaks[["H7_H3K27me3"]], peaks[["H7_H3K4me3"]], invert = TRUE)))
qgr = promoters(trans_gr, 1, 1) %>% reduce %>% resize(., view_size, fix = "center")
# qgr = sample(qgr, 15000)
qgr = resize(qgr, view_size, fix = "center")
qbw = c(k4_bws, k27_bws)
qdt = data.table(qbw = qbw)
qdt[, c("cell", "mark") := tstrsplit(basename(qbw), "_", keep = 1:2)]
qdt[, mark := sub("ME3", "me3", mark)]
stopifnot(length(unique(table(qdt$cell))) == 1)
stopifnot(length(unique(table(qdt$mark))) == 1)
message("fetch tsne input")
tsne_input = fetch_tsne_mat(qdt, qgr,
qwin = 50,
qmet = "summary",
cap_value = 30,
high_on_right = FALSE)
message("run tsne")
tsne_res = run_tsne(tsne_input$tsne_mat, perplexity = 100)
tp = sample(unique(tsne_res$id), min(500, length(unique(tsne_res$id))))
tsne_res.tp = tsne_res[id %in% tp]
p_basic = ggplot() +
annotate("point", x = tsne_res.tp$tx, y = tsne_res.tp$ty, color = "lightgray")
p_basic
ggplot() +
annotate("point", x = tsne_res.tp$tx, y = tsne_res.tp$ty, color = "lightgray") +
geom_point(data = tsne_res.tp[grepl("CD34", cell)], aes(x = tx, y = ty, color = cell))
message("make images")
img_res = make_tsne_img(
bw_dt = tsne_input$bw_dt,
tdt = tsne_res, #force_rewrite = TRUE,
n_points = n_points
)
p_profiles = ggplot(img_res$images_dt, aes(x = tx, y = ty, image = png_file)) + geom_image()
ggplot(img_res$images_dt, aes(xmin = tx - .05, xmax = tx + .05, ymin = ty - .05, ymax = ty + .05, image = png_file)) + geom_image.rect()
p_density = plot_tsne_img(img_res$images_dt, n_points = n_points,
N_ceiling = NULL, N_floor = 50,
show_plot = TRUE)$plot
p_density + theme(panel.background = element_rect(fill= "lightblue"))
cowplot::plot_grid(p_profiles, p_density)
# plot_tsne_img_byCell(img_res$images_dt, tsne_dt = img_res$tsne_dt, N_ceiling = 30, n_points = n_points, min_size = .05)
plot_tsne_img_byCell(img_res$images_dt, tsne_dt = img_res$tsne_dt[grepl("CD34", cell)], n_points = n_points, min_size = .05)
cell_a = "H7"
cell_b = "CD34-01517"
p = plot_tsne_img_byCell(img_res$images_dt,
tsne_dt = img_res$tsne_dt[cell %in% c(cell_a, cell_b)],
N_floor = 0,
# N_ceiling = 300,
n_points = n_points, min_size = 0)
ggsave("tmp_sideBySide.pdf", p$plot, width = 8, height = 4)
delta_res = calc_delta(tsne_res, cell_a, cell_b, n_points)
v_dt = delta_res$velocity_dt
v_dt.tp = v_dt[id %in% tp]
xy2deg = function(x1, y1, x2, y2){
x = x2 - x1
y = y2 - y1
deg = atan(y/x) * 180 / pi + 180
deg[x < 0] = deg[x < 0] + 180
deg[deg > 360] = deg[deg > 360] - 360
deg
}
v_dt.tp[, angle := xy2deg(x1 = tx_cell_a, x2 = tx_cell_b, y1 = ty_cell_a, y2 = ty_cell_b)]
v_dt.tp[, grp1 := tx_cell_a > tx_cell_b]
v_dt.tp[, grp2 := ty_cell_a > ty_cell_b]
p_arrows = ggplot(v_dt.tp, aes(x = tx_cell_a, xend = tx_cell_b,
y = ty_cell_a, yend = ty_cell_b,
color = angle)) +
labs(title = "color mapped to angle") +
geom_segment(arrow = arrow(length = unit(0.1,"cm"))) +
scale_color_gradientn(colours = c("red", "purple", "blue",
"green", "yellow", "orange"), limits = c(0, 360), breaks = 1:4*90) #+ facet_wrap("grp1~grp2")
ggsave("tmp_arrows.pdf")
av_dt = delta_res$agg_velocity_dt
p_velocity = ggplot() +
annotate("point", x = tsne_res.tp$tx, y = tsne_res.tp$ty, color = "lightgray") +
geom_point(data = tsne_res.tp[cell %in% c(cell_a, cell_b)], aes(x = tx, y = ty, color = cell)) +
geom_segment(data = av_dt[N > 6], aes(x = tx_cell_a, xend = tx_cell_b, y = ty_cell_a, yend = ty_cell_b, size = N), arrow = arrow()) +
coord_cartesian(xlim = range(tsne_res$tx), ylim = range(tsne_res$ty)) +
labs(x = "x", y = "y") +
scale_size_continuous(range = c(.5, 2), breaks = range(av_dt$N)) + theme_classic()
pg = cowplot::plot_grid(p_basic + labs(title = "t-sne: 2 ChIP-seq mark, 14 cell lines, 386 sites"),
p_density + labs(title = "t-sne: profile frequency"),
p_velocity + labs(title = "t-sne: changes between two cell lines"), nrow = 1, rel_widths = c(1,1, 1.3))
ggsave("tmp_changes.pdf", pg, width = 14, height = 4)
pr_gr = promoters(ref_gr, 1000, 1000)
pr_gr = reduce(pr_gr)
hist(width(pr_gr))
pr_dt = ssvFetchGRanges(list(prom2k = pr_gr), tsne_input$query_gr,
return_data.table = TRUE,
target_strand = "*",
win_size = 50,
win_method = "summary")
pr_dt = pr_dt[order(x)]
pr_dt$x = round(pr_dt$x, 3)
# pr_dt = pr_dt[, .(y = sum(y)), by = .(id, x, sample)]
# pr_dt[y > 1, y := 1]
p_pr_global = ggplot(pr_dt[, .(y = mean(y)), by = .(x)], aes(x = x, y = y)) + geom_path()
p_pr_global
ssvSignalHeatmap(pr_dt)
# pr_img_res = make_tsne_img(
# bw_dt = pr_dt, apply_norm = FALSE,
# tdt = tsne_res, #force_rewrite = TRUE,
# n_points = n_points, line_colors = c("signal" = "black")
# )
make_tss_plot = function(qcell){
xrng = range(tsne_res$tx)
yrng = range(tsne_res$ty)
pr_img_res = make_tsne_img(
bw_dt = pr_dt, apply_norm = FALSE,
tdt = tsne_res[cell == qcell], #force_rewrite = TRUE,
xrng = xrng,
yrng = yrng,
n_points = n_points, line_colors = c("signal" = "black")
)
p_pr_density = plot_tsne_img_byCell(pr_img_res$images_dt,
pr_img_res$tsne_dt[cell == qcell],
n_points = n_points, N_ceiling = NULL)$plot +
coord_cartesian(xlim = xrng, ylim = yrng)
p_h7_density = plot_tsne_img_byCell(img_res$images_dt,
img_res$tsne_dt[cell == qcell],
n_points = n_points, N_ceiling = NULL)$plot +
coord_cartesian(xlim = xrng, ylim = yrng)
pg = cowplot::plot_grid(p_h7_density + labs(title = paste(qcell, "k4me3+k27me3")),
p_pr_density + labs(title = paste(qcell, "tss frequency")))
ggsave(paste0("tmp_", qcell, "_tss.pdf"), plot = pg, width = 8, height = 4)
# head(pr_img_res$tsne_dt[cell == cell, .N, by = .(bx, by)][order(N, decreasing = TRUE)])
# head(img_res$tsne_dt[cell == cell, .N, by = .(bx, by)][order(N, decreasing = TRUE)])
}
make_tss_plot("H7")
make_tss_plot("CD34-01562")
#
# plot(pr_img_res$tsne_dt[cell == "H7"]$bx,
# img_res$tsne_dt[cell == "H7"]$bx)
|
eb9144c89dd72d765c2443527397b850368f5d00
|
8e5c16034bee9a17ca7f9318a92b864f51ba58d1
|
/RandomScale/man/prepare_admb.Rd
|
58c6c36ac11be994670e26fb9e2ac1d47d0cde2b
|
[] |
no_license
|
jlaake/RandomScale
|
0532dd9fca561d99a8b6fc9a2d089bf15f61968a
|
96e212ccd3f3e024d283a509c9b5db8dd5b67b10
|
refs/heads/master
| 2021-01-18T22:41:52.669034
| 2016-05-24T22:48:54
| 2016-05-24T22:48:54
| 59,616,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
rd
|
prepare_admb.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{prepare_admb}
\alias{prepare_admb}
\title{Prepare to use ADMB}
\usage{
prepare_admb()
}
\description{
Sets environment variables for admb if Windows
}
|
347eff115722b4043bcd54d743ca54df69cc95df
|
b05191d9e25d592155490ca4f970e445a2a3e858
|
/man/annual.summary.climate.Rd
|
178e11c6c1548cae8693de7578bd490b63db7627
|
[] |
no_license
|
cran/RSAlgaeR
|
3dbdfb1b8b12c65f1f175f9014f03866261c98dd
|
4639cc2b23f3b1ed3be27a85c5410e845eeef6dc
|
refs/heads/master
| 2020-03-09T18:34:13.295148
| 2018-04-10T12:14:47
| 2018-04-10T12:14:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 839
|
rd
|
annual.summary.climate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/annualsummaryFunctions.R
\name{annual.summary.climate}
\alias{annual.summary.climate}
\title{summarize climate conditions on an annual basis}
\usage{
annual.summary.climate(df, datecol, valuecol, parameter)
}
\arguments{
\item{df}{with estimated values, dates, location identifiers}
\item{datecol}{string, name of column with dates}
\item{valuecol}{string, name of column with climate parameter values}
\item{parameter}{string, name of parameter ("Precipitation","Temperature")}
}
\value{
list of annual (seasonal) summaries
}
\description{
summarize climate conditions on an annual basis
}
\examples{
data(climatedata)
sumdata <- annual.summary.climate(df=climatedata,valuecol="PRCP",datecol="DATE",
parameter="Precipitation")
}
|
1c94648c159bc043a22a4ed1d5dec7d86c332336
|
033383e71a1fe7a811fdd88f917d4491a124013d
|
/data_cleaning.R
|
93b5e40f45c94aa98b63eee7eae2313ad60a9438
|
[] |
no_license
|
NinoFoudraine/subwep
|
00f02b93da68abdc6a7f42eca9940a1e3fdb144e
|
5c383155dc99ef6404293297348dad9367fb7ce2
|
refs/heads/master
| 2022-04-10T19:38:44.127794
| 2020-02-17T14:44:11
| 2020-02-17T14:44:11
| 241,118,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,274
|
r
|
data_cleaning.R
|
########################################################################
######################## DATA CLEANING #################################
########################################################################
########################################################################
######################### Initialize ###################################
########################################################################
rm(list = ls())
#load packageas
library(utils)
library(stringr)
library(ngram) #for preprocess(x) function to lose capitals and extra spaces
########################################################################
######################### Import and format data #######################
########################################################################
OfferDetails = read.csv(file.choose(), header = T, sep = ';',stringsAsFactors = FALSE)
Occupancy_table = read.csv(file.choose(), header = T, sep = ';',stringsAsFactors = FALSE)
########################################################################
######################### Clean data ###################################
########################################################################
#delete USP from row 2310 and shift values
for (i in 12:19){
OfferDetails[2310,i] <- OfferDetails[2310,i+1]
}
OfferDetails[2310,20] <- OfferDetails[2311,1]
OfferDetails <- OfferDetails[-2311,]
for (i in 1:dim(OfferDetails)[1]){
OfferDetails$USP1[i] = preprocess(OfferDetails$USP1[i])
OfferDetails$USP2[i] = preprocess(OfferDetails$USP2[i])
OfferDetails$USP3[i] = preprocess(OfferDetails$USP3[i])
OfferDetails$MEAL_PLAN[i] = preprocess(OfferDetails$MEAL_PLAN[i])
}
#replace string 'NULL' by NA
OfferDetails[ OfferDetails == 'NULL' ] <- NA
# review rating, change '-' and zeros to empty cell
OfferDetails$REVIEW_RATING[OfferDetails$REVIEW_RATING == '-'] <- ''
OfferDetails$REVIEW_RATING[OfferDetails$REVIEW_RATING == 0] <- ''
OfferDetails$REVIEW_RATING <- str_replace(OfferDetails$REVIEW_RATING, ',', '.')
# star rating, change comma by dot for numeric values
OfferDetails$STAR_RATING <- str_replace(OfferDetails$STAR_RATING, ',', '.')
OfferDetails$STAR_RATING[OfferDetails$STAR_RATING == 0] <- 3
# change country_name costa del sol to Spanje
OfferDetails$COUNTRY_NAME[OfferDetails$COUNTRY_NAME == 'Costa del Sol'] = 'Spanje'
OfferDetails$COUNTRY_NAME[OfferDetails$COUNTRY_NAME == 'Spanje '] = 'Spanje'
# change country_name 'Egypte ' to 'Egypte'
OfferDetails$COUNTRY_NAME[OfferDetails$COUNTRY_NAME == 'Egypte '] = 'Egypte'
# change country_name = roda and city_name = Griekenland around
OfferDetails$CITY_NAME[OfferDetails$COUNTRY_NAME == 'Roda'] = 'Roda'
OfferDetails$COUNTRY_NAME[OfferDetails$COUNTRY_NAME == 'Roda'] = 'Griekenland'
# change meal plans
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'ultra all inclusive', "UAI")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'logies met ontbijt', "LO")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'logies en ontbijt', "LO")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'logies & ontbijt', "LO")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'all inclusive', "AI")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'logies ontbijt', "LO")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'half pension', "HP")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'ail inclusive', "AI")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'all inlcusive', "AI")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'volpension', "VP")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'logies', "LG")
OfferDetails$MEAL_PLAN = str_replace(OfferDetails$MEAL_PLAN,'halfpension', "HP")
# dubbele spelingen van zelfde gebieden universeren
## Tunesia and Tunesie
OfferDetails$COUNTRY_NAME[str_detect(OfferDetails$COUNTRY_NAME,'Tunes') == TRUE] <- 'Tunesie'
## costa de Almeria
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME,'Costa de Almer') == TRUE] <- 'Costa de Almeria'
## Costa del Sol
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME, 'Costa del Sol') == TRUE] <- 'Costa del Sol'
## Cyprus, Cyprus. and Cyprus (PFO)
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME,'Cyprus') == TRUE] <- 'Cyprus'
## Epirus and Epirug (Parga)
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME, 'Epirus') == TRUE] <- 'Epirus'
## Kreta and Kreta (HER)
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME, 'Kreta') == TRUE] <- 'Kreta'
## Peloponnesos correctly spelled
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME, 'Pel') == TRUE] <- 'Peloponnesos'
## Turkish/Turkse riviera spelling samenvoegen
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME, 'Turk') == TRUE] <- 'Turkse Riviera'
## Zwarte zee gebied generaliseren -> Zwarte zee, Zwarte zeekust en Zwarte zeekust Varna samenvoegen
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME, 'Zwart') == TRUE] <- "Zwarte zee"
## (Noord/Zuid) Egeische zee
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME, 'Ege') == TRUE & str_detect(OfferDetails$REGION_NAME, 'Zuid') == FALSE & str_detect(OfferDetails$REGION_NAME, 'Noord') == FALSE] <- 'Egeische kust'
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME, 'Ege') == TRUE & str_detect(OfferDetails$REGION_NAME, 'Zuid') == TRUE] <- 'Zuid-Egeische kust'
OfferDetails$REGION_NAME[str_detect(OfferDetails$REGION_NAME, 'Ege') == TRUE & str_detect(OfferDetails$REGION_NAME, 'Noord') == TRUE] <- 'Noord-Egeische kust'
########################################################################
#################### data transformation ###############################
########################################################################
# personen per kamer
OfferDetails = merge(OfferDetails, Occupancy_table, by.x = "ROOM_OCCUPANCY", by.y = "ï..Room_types", sort = FALSE)
# discount rate
OfferDetails$DISCOUNT_RATE <- (as.numeric(OfferDetails$PRICE_ORIGINAL) - as.numeric(OfferDetails$PRICE)) / as.numeric(OfferDetails$PRICE_ORIGINAL)
# departure month binary vars
OfferDetails$JANUARY <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'Ja')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'ja'))
OfferDetails$FEBRUARY <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'Fe')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'fe'))
OfferDetails$MARCH <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'Mar')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'ma'))
OfferDetails$APRIL <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'Ap')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'ap'))
OfferDetails$MAY <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'May')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'me'))
OfferDetails$JUNE <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'Jun')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'jun'))
OfferDetails$JULY <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'Jul')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'jul'))
OfferDetails$AUGUST <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'Au')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'au'))
OfferDetails$SEPTEMBER <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'Se')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'se'))
OfferDetails$OCTOBER <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'Oc')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'ok'))
OfferDetails$NOVEMBER <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'No')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'no'))
OfferDetails$DECEMBER <- as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'De')) + as.numeric(str_detect(OfferDetails$DEPARTURE_DATE, 'de'))
# categorical values
OfferDetails$COUNTRY_NAME <- as.factor(OfferDetails$COUNTRY_NAME)
OfferDetails$MEAL_PLAN <- as.factor(OfferDetails$MEAL_PLAN)
OfferDetails$OFFER_VISUALISATION <- as.factor(OfferDetails$OFFER_VISUALISATION)
OfferDetails$REGION_NAME <- as.factor(OfferDetails$REGION_NAME)
OfferDetails$CITY_NAME <- as.factor(OfferDetails$CITY_NAME)
# numerical values
OfferDetails$OFFER_POSITION <- as.numeric(OfferDetails$OFFER_POSITION)
OfferDetails$REVIEW_RATING <- as.numeric(OfferDetails$REVIEW_RATING)
OfferDetails$DURATION <- as.numeric(OfferDetails$DURATION)
OfferDetails$PRICE <- as.numeric(OfferDetails$PRICE)
OfferDetails$PRICE_ORIGINAL <- as.numeric(OfferDetails$PRICE_ORIGINAL)
OfferDetails$STAR_RATING <- as.numeric(OfferDetails$STAR_RATING)
OfferDetails$Persoon <- as.numeric(OfferDetails$Persoon)
OfferDetails$max..volwassen <- as.numeric(OfferDetails$max..volwassen)
OfferDetails$max..kinderen <- as.numeric(OfferDetails$max..kinderen)
OfferDetails$DISCOUNT_RATE <- as.numeric(OfferDetails$DISCOUNT_RATE)
OfferDetails$JANUARY <- as.numeric(OfferDetails$JANUARY)
OfferDetails$FEBRUARY <- as.numeric(OfferDetails$FEBRUARY)
OfferDetails$MARCH <- as.numeric(OfferDetails$MARCH)
OfferDetails$APRIL <- as.numeric(OfferDetails$APRIL)
OfferDetails$MAY <- as.numeric(OfferDetails$MAY)
OfferDetails$JUNE <- as.numeric(OfferDetails$JUNE)
OfferDetails$JULY <- as.numeric(OfferDetails$JULY)
OfferDetails$AUGUST <- as.numeric(OfferDetails$AUGUST)
OfferDetails$SEPTEMBER <- as.numeric(OfferDetails$SEPTEMBER)
OfferDetails$OCTOBER <- as.numeric(OfferDetails$OCTOBER)
OfferDetails$NOVEMBER <- as.numeric(OfferDetails$NOVEMBER)
OfferDetails$DECEMBER <- as.numeric(OfferDetails$DECEMBER)
# review rating correction
OfferDetails$REVIEW_RATING[ OfferDetails$REVIEW_RATING == 0 ] <- NA
OfferDetails$REVIEW_RATING[ OfferDetails$REVIEW_RATING == '-' ] <- NA
OfferDetails$REVIEW_RATING = str_replace(OfferDetails$REVIEW_RATING,',', '.')
OfferDetails$REVIEW_RATING <- as.numeric(OfferDetails$REVIEW_RATING)
# missing values
OfferDetails$REVIEW_RATING[is.na(OfferDetails$REVIEW_RATING)] <- mean(OfferDetails$REVIEW_RATING[!is.na(OfferDetails$REVIEW_RATING)])
OfferDetails$PRICE[is.na(OfferDetails$PRICE)] <- 299
OfferDetails$PRICE_ORIGINAL[is.na(OfferDetails$PRICE_ORIGINAL)] <- OfferDetails$PRICE[is.na(OfferDetails$PRICE_ORIGINAL)]
OfferDetails$DISCOUNT_RATE[is.na(OfferDetails$DISCOUNT_RATE)] <- 0
# price per day
OfferDetails$PRICE_PER_DAY <- (as.numeric(OfferDetails$PRICE) / as.numeric(OfferDetails$DURATION))
# transforming USPs in to binary variables
OfferDetails$BEACH <- as.numeric(str_detect(OfferDetails$USP1, 'strand')) + as.numeric(str_detect(OfferDetails$USP2, 'strand')) + as.numeric(str_detect(OfferDetails$USP3, 'strand'))
# ...
# ...
# more to come
# transform categorical variables to multiple binary variables
## Offer visualisation
OfferDetails$FULL_WIDTH <- ifelse(str_detect(OfferDetails$OFFER_VISUALISATION,'full'),1,0)
## country name (alles = 0 betekent Griekenland)
OfferDetails$BULGARIA <- ifelse(OfferDetails$COUNTRY_NAME == 'Bulgarije',1,0)
OfferDetails$EGYPT <- ifelse(OfferDetails$COUNTRY_NAME == 'Egypte',1,0)
OfferDetails$CYPRUS <- ifelse(OfferDetails$COUNTRY_NAME == 'Cyprus',1,0)
OfferDetails$PORTUGAL <- ifelse(OfferDetails$COUNTRY_NAME == 'Portugal',1,0)
OfferDetails$CROATIA <- ifelse(OfferDetails$COUNTRY_NAME == 'Kroatië',1,0)
OfferDetails$SPAIN <- ifelse(OfferDetails$COUNTRY_NAME == 'Spanje',1,0)
OfferDetails$TURKEY <- ifelse(OfferDetails$COUNTRY_NAME == 'Turkije',1,0)
OfferDetails$MONTENEGRO <- ifelse(OfferDetails$COUNTRY_NAME == 'Montenegro',1,0)
OfferDetails$CAPE_VERDE <- ifelse(OfferDetails$COUNTRY_NAME == 'Kaapverdië',1,0)
OfferDetails$MALTA <- ifelse(OfferDetails$COUNTRY_NAME == 'Malta',1,0)
OfferDetails$ITALY <- ifelse(OfferDetails$COUNTRY_NAME == 'Italië',1,0)
OfferDetails$UAE <- ifelse(OfferDetails$COUNTRY_NAME == 'Verenigde Arabische Emiraten',1,0)
OfferDetails$MOROCCO <- ifelse(OfferDetails$COUNTRY_NAME == 'Marokko',1,0)
OfferDetails$TUNESIA <- ifelse(OfferDetails$COUNTRY_NAME == 'Tunesie',1,0)
OfferDetails$ISRAEL <- ifelse(OfferDetails$COUNTRY_NAME == 'Israël',1,0)
## Meal Plan (alles = 0 betekent AI (all inclusive))
OfferDetails$LG <- ifelse(OfferDetails$MEAL_PLAN == 'LG',1,0)
OfferDetails$LO <- ifelse(OfferDetails$MEAL_PLAN == 'LO',1,0)
OfferDetails$UAI <- ifelse(OfferDetails$MEAL_PLAN == 'UAI',1,0)
OfferDetails$HP <- ifelse(OfferDetails$MEAL_PLAN == 'HP',1,0)
OfferDetails$VP <- ifelse(OfferDetails$MEAL_PLAN == 'VP',1,0)
## Duration (alles = 0 betekent 8 dagen)
OfferDetails$FOUR_DAYS <- ifelse(OfferDetails$DURATION == 4,1,0)
OfferDetails$FIVE_DAYS <- ifelse(OfferDetails$DURATION == 5,1,0)
OfferDetails$SIX_DAYS <- ifelse(OfferDetails$DURATION == 6,1,0)
OfferDetails$SEVEN_DAYS <- ifelse(OfferDetails$DURATION == 7,1,0)
OfferDetails$NINE_DAYS <- ifelse(OfferDetails$DURATION == 9,1,0)
OfferDetails$TEN_DAYS <- ifelse(OfferDetails$DURATION == 10,1,0)
OfferDetails$ELEVEN_DAYS <- ifelse(OfferDetails$DURATION == 11,1,0)
OfferDetails$TWELVE_DAYS <- ifelse(OfferDetails$DURATION == 12,1,0)
OfferDetails$THIRTEEN_DAYS <- ifelse(OfferDetails$DURATION == 13,1,0)
## remove Duration column
OfferDetails <- subset(OfferDetails, select = -DURATION)
# order by offerid + mailid
OfferDetails = OfferDetails[order(OfferDetails$OFFERID, OfferDetails$MAILID),]
# keep only numeric values
OfferDetails <- dplyr::select_if(OfferDetails, is.numeric)
|
a771b184a237a92a88598b7383ae567500c6bd73
|
8a9395ba7d69a651b8fdd0ca74158bbb92ce6a65
|
/man/gpuVector-methods.Rd
|
7c59add9c11120db107402720e92af8ca17b4f4c
|
[] |
no_license
|
bryant1410/gpuR
|
ce8a818a3cba6d4ac257e5b3035e3012b92df0f7
|
6e56cec81783fb9d97be1629a8063289c2c92633
|
refs/heads/master
| 2021-01-19T20:08:42.103635
| 2017-04-17T08:54:36
| 2017-04-17T08:54:36
| 88,490,189
| 1
| 0
| null | 2017-04-17T08:54:36
| 2017-04-17T08:54:36
| null |
UTF-8
|
R
| false
| true
| 1,078
|
rd
|
gpuVector-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gpuVector.R
\docType{methods}
\name{gpuVector}
\alias{gpuVector}
\alias{gpuVector,missingOrNULL}
\alias{gpuVector,missingOrNULL,ANY-method}
\alias{gpuVector,vector}
\alias{gpuVector,vector,missing-method}
\title{Construct a gpuVector}
\usage{
gpuVector(data, length, type = NULL, ...)
\S4method{gpuVector}{vector,missing}(data, type = NULL, ctx_id = NULL)
\S4method{gpuVector}{missingOrNULL,ANY}(data, length, type = NULL,
ctx_id = NULL)
}
\arguments{
\item{data}{An object that is or can be converted to a
\code{vector}}
\item{length}{A non-negative integer specifying the desired length.}
\item{type}{A character string specifying the type of gpuVector. Default
is NULL where type is inherited from the source data type.}
\item{...}{Additional method to pass to gpuVector methods}
\item{ctx_id}{An integer specifying the object's context}
}
\value{
A gpuVector object
}
\description{
Construct a gpuVector of a class that inherits
from \code{gpuVector}
}
\author{
Charles Determan Jr.
}
|
3e9d23be0afb1b1d3c1b7ea88b51c1e038ba0502
|
96cd5efd27207e07e9e5954bde92daa4644fa73e
|
/ComputingDataAnalysis_Quiz2_Question4.R
|
51c0750b1529806ae0fd72ef7114e86d0c90d020
|
[] |
no_license
|
Tati86/Computing_for_Data_Analysis
|
200e678ff2379c990b5e7bd597e956e9ab7e2ab7
|
2349fd23d6bebac0f35ba7da62a908ab240b4d12
|
refs/heads/master
| 2021-01-10T18:08:28.416235
| 2016-10-27T23:10:13
| 2016-10-27T23:10:13
| 71,941,589
| 0
| 0
| null | 2016-10-25T21:43:41
| 2016-10-25T21:43:40
| null |
UTF-8
|
R
| false
| false
| 2,974
|
r
|
ComputingDataAnalysis_Quiz2_Question4.R
|
rankhospital<- function(state,outcome, num="best"){
##read outcome data
outcome1=read.csv('outcome-of-care-measures.csv', colClasses="character")
State=as.numeric(outcome1[, "State"])
Hospital.Name=as.numeric(outcome1[, "Hospital.Name"])
Heart.Attack=as.numeric(outcome1[, 11])
Heart.Failure=as.numeric(outcome1[, 17])
Pneumonia=as.numeric(outcome1[, 23])
my.data=data.frame(I(State),I(Hospital.Name),I(Heart.Attack), I(Heart.Failure),I(Pneumonia))
##check that state and outcome data are valid
###outcome= c(heart.attack, heart.failure,pneumonia)
if(nrow(state.data) == 0){
stop("Invalid state")
}
for (i in num){
if(outcome == "heart attack"){
order.heartattack=order(state.data$Heart.Attack,decreasing=TRUE)
state.data$Rank=NA
state.data$Rank[order.heartattack]=1:nrow(state.data)
if(num == "best"){
rank=1
RankHospital=state.data[which(rank==1),"Hospital.Name"]
} else if (num == "worst"){
rank= max(state.data$Rank)
RankHospital=state.data[which(state.data$Rank==rank),"Hospital.Name"]
}else {
num=as.integer(num)
RankHospital=state.data[which(state.data$Rank==num),"Hospital.Name"]
}
}else if (outcome == "heart failure"){
order.heartfailure=order(state.data$Heart.Failure)
state.data$Rank=NA
state.data$Rank[order.heartfailure]=1:nrow(state.data)
if(num == "best"){
rank=1
RankHospital=state.data[which(rank==1),"Hospital.Name"]
} else if (num == "worst"){
rank= max(state.data$Rank)
RankHospital=state.data[which(state.data$Rank==rank),"Hospital.Name"]
}else {
num=as.integer(num)
RankHospital=state.data[which(state.data$Rank==num),"Hospital.Name"]
}
}else if (outcome == "pneumonia"){
order.pneumonia=order(state.data$Pneumonia)
state.data$Rank=NA
state.data$Rank[order.pneumonia]=1:nrow(state.data)
if(num == "best"){
rank=1
RankHospital=state.data[which(rank==1),"Hospital.Name"]
} else if (num == "worst"){
rank= max(state.data$Rank)
RankHospital=state.data[which(state.data$Rank==rank),"Hospital.Name"]
}else {
num=as.integer(num)
RankHospital=state.data[which(state.data$Rank==num),"Hospital.Name"]
}
} else {
stop("Invalid outcome")
}
RankHospital = as.character(RankHospital)
return(RankHospital)
}
}
rankhospital("TX", "heart failure", 4)
|
99a869a638528d9ebb1a6803571f8f51822e31c9
|
ad5a3396565fcb9bada6b902a3f8a5d43fc4b354
|
/scripts/compareBedScript.R
|
ac1024a44696f514ac267b9611dbcc7d8eff9674
|
[
"Apache-2.0"
] |
permissive
|
enriquea/PepBed
|
08840c8a68e2f086020a4534cecc1891d7103688
|
dbb22f6f99c3b809e23625f5d6e8569c19338942
|
refs/heads/master
| 2021-09-24T14:30:49.698811
| 2018-10-10T14:22:51
| 2018-10-10T14:22:51
| 110,535,201
| 1
| 0
| null | 2017-11-13T10:40:14
| 2017-11-13T10:40:14
| null |
UTF-8
|
R
| false
| false
| 1,447
|
r
|
compareBedScript.R
|
library(PepBedR)
library(VennDiagram)
library(ggplot2)
library(gplots)
bedFileDataPepGenome <- readBedFile("/Users/yperez/IdeaProjects/github-repo/BDP/benchmark/PepGenome-PeptideAtlas.bed");
bedFileDataPepGenome <- setBed12Columns(bedFileDataPepGenome);
bedFileDataPGx <- readBedFile("/Users/yperez/IdeaProjects/github-repo/BDP/benchmark/PGx-PeptideAtlas-Human.bed")
bedFileDataPGx <- setBed12Columns(bedFileDataPGx);
#Generate Venn Diagram Overlap the overlap of peptides.
peptideLists <-list(PepGenome = bedFileDataPepGenome$name, PGx = bedFileDataPGx$name)
venn.plot <- venn.diagram(peptideLists , filename = "../benchmark/PepGenome.png", fill=c("red", "green"), alpha=c(0.2,0.2), cex = 1, cat.fontface=1, cat.cex=1, category.names=c("PepGenome", "PGx"), main="Number of Peptides map to the Human ENSEMBL Genome", scaled = FALSE, cat.default.pos='text')
peptideListsPositions <-list(PepGenome = paste0(bedFileDataPepGenome$name, bedFileDataPepGenome$chromStart, bedFileDataPepGenome$chromEnd), PGx = paste0(bedFileDataPGx$name, bedFileDataPGx$chromStart, bedFileDataPGx$chromEnd))
venn.plot <- venn.diagram(peptideListsPositions , filename = "../benchmark/PepGenomeWithPositions.png", fill=c("red", "green"), alpha=c(0.2,0.2), cex = 1, cat.fontface=1, cat.cex=1, category.names=c("PepGenome", "PGx"), main="Number of Peptides map to the Human ENSEMBL Genome (Compare using Chromosome Positions)", scaled = FALSE, cat.default.pos='text')
|
0fcd5d749e39de7bcea6f771b7762accbc8f8661
|
67721a8b22ca21cf46f10865000274a3494dd8bf
|
/tests/testthat/test_seas.R
|
47d31b17fa4e9073eb569f5e519922abed05c0f8
|
[] |
no_license
|
lkegel/idxrepr
|
c157c9fc6aab8afdbf20f832983d3e1c26559958
|
2f3d35b1cae9fa6cd95047b025d3df5e7914d937
|
refs/heads/master
| 2022-07-02T08:58:02.729315
| 2019-10-11T14:25:40
| 2020-05-09T15:12:18
| 214,453,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 821
|
r
|
test_seas.R
|
context("seas")
test_that("unaggregated", {
method <- mgr_init("seas")
expect_equal(3, length(method))
rnvl <- list(rep(seq(12), 11), rep(seq(12) + 12, 11))
repr <- lapply(rnvl, represent.seas, method = method)
expected <- seq(12)
expect_equal(expected, repr[[1]])
expect_equal(expected + 12, repr[[2]])
di <- distance.seas(method, repr[[1]], repr[[2]])
expect_equal(sqrt(144 * 132), di)
})
test_that("aggregated", {
method <- mgr_init("seas")
method$w <- 3
rnvl <- list(rep(seq(12), 11), rep(seq(12) + 12, 11))
repr <- lapply(rnvl, represent.seas, method = method)
expected <- c(sum(1:4), sum(5:8), sum(9:12)) / 4
expect_equal(expected, repr[[1]])
expect_equal(expected + 12, repr[[2]])
di <- distance.seas(method, repr[[1]], repr[[2]])
expect_equal(sqrt(144 * 132), di)
})
|
81fd6bb4d1ae7b5cdc181a33045eefda0c317ba9
|
02c37615762af39de855590a40efd5d29858c9fc
|
/man/ridgePlot.Rd
|
4bd0d3f316c9c89b501f3acb9debb6c5712de3e5
|
[] |
no_license
|
ericdunipace/WpProj
|
d950d1f8e36094b1b93cd2bb62e99fc1b9ec3aef
|
6039e5ce8c5d3386e776fc1e6784807411805889
|
refs/heads/master
| 2023-03-27T19:23:12.132980
| 2021-04-02T21:32:56
| 2021-04-02T21:32:56
| 229,637,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 957
|
rd
|
ridgePlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ridgePlot.R
\name{ridgePlot}
\alias{ridgePlot}
\title{Ridge plots for range of coefficients}
\usage{
ridgePlot(
fit,
index = 1,
minCoef = 1,
maxCoef = 10,
scale = 1,
alpha = 0.5,
full = NULL,
transform = function(x) { x },
xlab = "Predictions",
bandwidth = NULL
)
}
\arguments{
\item{fit}{A `WpProj` object of list of `WpProj` objects}
\item{index}{The observation number to select. Can be a vector}
\item{minCoef}{The minimum number of coefficients to use}
\item{maxCoef}{The maximum number of coefficients to use}
\item{scale}{How the densities should be scale}
\item{alpha}{Alpha term from ggplot2 object}
\item{full}{"True" prediction to compare to}
\item{transform}{tranform for predictions}
\item{xlab}{x-axis label}
\item{bandwidth}{Bandwidth for kernel}
}
\value{
a `ggridges` plot
}
\description{
Ridge plots for range of coefficients
}
|
c7fce3de5fa88479a785e6546358465c17e29f1e
|
2e5c9d872ee261921d44a48e2647269ce312d313
|
/Scripts/read_data.R
|
edc5f24ec8216d32324e8c92f7a94359956e8b8c
|
[] |
no_license
|
tmat2020/ExData_Plotting2
|
14ae53f2e17da532096d92e748148573a22ff76d
|
8743f42342fbab2cecc2a2525333a06761c95193
|
refs/heads/master
| 2021-01-01T09:54:39.904906
| 2018-11-22T12:40:05
| 2018-11-22T12:40:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 306
|
r
|
read_data.R
|
# Load and unzip dataframe file
temp <- tempfile()
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url = url,method = "curl",destfile = temp)
unzip(zipfile = temp)
unlink(temp)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
|
d30d1f8e52690e69109046de9ce93c6dfa085923
|
995182d67072061d54b02947055d1fb66e319e7a
|
/species_replace.R
|
8a96dd05054eb9aea2941de7991de1d6c9432cb2
|
[] |
no_license
|
healyke/FestR_replacement_tests
|
4e75caa6e916217806e74f44198a31c1cd74cf66
|
70dc0cab15464a9ca38cbd175d5680d942618fc9
|
refs/heads/master
| 2016-08-11T00:33:52.867069
| 2016-04-01T13:36:01
| 2016-04-01T13:36:01
| 53,622,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,604
|
r
|
species_replace.R
|
#' replaces species tef with a NA and runs tefMcmcglmm
species_replace <- function(tef_data,
isotope = c("carbon","nitrogen"),
formula = ~delta15N ~ source.iso.15N + diet.type + habitat,
random = ~ animal + sp.col + tissue,
prior,
output.label,
nitt = c(120000),
thin = c(50),
burnin = c(20000),
no.chains = c(2),
convergence = c(1.1),
ESS = c(1000)) {
#####decide on the isotope###
if((isotope == "carbon") == T){
iso_term <- c("delta13C")
} else{
if((isotope == "nitrogen") == T){
iso_term <- c("delta15N")
}}
taxa_list <- unique(tef_data$data$animal)
mod_full <- list()
for(i in 1:length(taxa_list)){
spec_list <- taxa_list
#remove the entire species
tef_data_na <- tef_data
tef_data_na$data <- tef_data_na$data[tef_data_na$data$animal != spec_list[i],]
tef_data_dropped <- tef_data$data[tef_data$data$animal == spec_list[i],]
for(j in 1:(length(tef_data_dropped[,1]))){
tef_data_comb_na <- rbind(tef_data_dropped[j,] , tef_data_na$data)
tef_data_run <- tef_data_na
tef_data_run$data <- tef_data_comb_na
tef_data_run$data[1, iso_term] <- NA
output <- paste(output.label,"spe_teff",j, tef_data_dropped[j,"animal"], sep = "_")
mod <- tefMcmcglmm(mulTree.data = tef_data_run, formula = formula, random.terms = random, prior = prior, output = output, nitt = nitt, thin = thin, burnin = burnin, no.chains = no.chains, convergence = convergence, ESS = ESS)
mod_full[[spec_list[i]]][[j]]<- mod$tef_global
}
}
return(list(mod_full = mod_full))
}
|
9e5fa6849700f9c2aa04db7edb7fc55a20e7fae6
|
5373cfa4dfde2899762053daf3251c846adf5b35
|
/man/GreenplumDriver-class.Rd
|
3b5a855e23a6ebc69401b404be6964885dd79b9f
|
[] |
no_license
|
mwillumz/RGreenplum
|
abbd0818f92a711baad07ae143442a75ea1e8a01
|
734c888755f831c6589778c9b40bcb80ad477bf2
|
refs/heads/master
| 2021-04-09T15:40:49.138646
| 2018-06-22T01:02:08
| 2018-06-22T01:02:08
| 125,784,481
| 3
| 1
| null | 2019-10-14T14:15:42
| 2018-03-19T01:19:52
|
R
|
UTF-8
|
R
| false
| true
| 381
|
rd
|
GreenplumDriver-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GreenplumDriver.R
\docType{class}
\name{GreenplumDriver-class}
\alias{GreenplumDriver-class}
\alias{dbUnloadDriver,GreenplumDriver-method}
\title{GreenplumDriver and methods.}
\usage{
\S4method{dbUnloadDriver}{GreenplumDriver}(drv, ...)
}
\description{
GreenplumDriver and methods.
}
\keyword{internal}
|
5247c04c2a78c4d59fdc42b3ff7f6427b6b4224d
|
0433b19a7887ee802136638f77506ff37ce94c13
|
/man/is.chartjs.Rd
|
a79b4cfb25f95c272048a5d9953f3dac104fa805
|
[
"MIT"
] |
permissive
|
KO112/chartjs
|
57b4377220c7acf3bd7bff7f518ec70fac3ff9e8
|
233bc27ceecdc524127cb579c5116e18fc98ed6e
|
refs/heads/master
| 2022-11-30T09:44:09.482930
| 2020-08-05T16:44:34
| 2020-08-05T16:44:34
| 274,043,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 507
|
rd
|
is.chartjs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chartjs.R
\name{is.chartjs}
\alias{is.chartjs}
\title{Check that an Object is a \code{Chart.js} Plot Object}
\usage{
is.chartjs(x)
}
\arguments{
\item{x}{The object to check (any R object).}
}
\value{
Whether the object inherits from \code{chartjs}.
}
\description{
Check that an Object is a \code{Chart.js} Plot Object
}
\examples{
is.chartjs(mtcars) # FALSE
is.chartjs(chartjs(mtcars, x = ~ mpg)) # TRUE
}
|
8e8c8df94a88afcb9ea4226b5bf3104a2a6f7cf2
|
3f312cabe37e69f3a2a8c2c96b53e4c5b7700f82
|
/ver_devel/bio3d/tests/testthat/test-mol2.R
|
a63b59373cad04d21e23343db30d1b4712fae266
|
[] |
no_license
|
Grantlab/bio3d
|
41aa8252dd1c86d1ee0aec2b4a93929ba9fbc3bf
|
9686c49cf36d6639b51708d18c378c8ed2ca3c3e
|
refs/heads/master
| 2023-05-29T10:56:22.958679
| 2023-04-30T23:17:59
| 2023-04-30T23:17:59
| 31,440,847
| 16
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,661
|
r
|
test-mol2.R
|
context("Testing basic PDB structure operation")
test_that("read.mol2() reads a mol2 from zinc", {
## Simple test with aspirin
file <- system.file("examples/aspirin.mol2",package="bio3d")
invisible(capture.output(mol <- read.mol2(file)))
expect_is(mol$atom, "data.frame")
expect_true(inherits(mol, "mol2"))
expect_true(inherits(mol$xyz, "xyz"))
expect_equal(nrow(mol$atom), 20)
expect_equal(nrow(mol$bond), 20)
expect_equal(mol$info[1], 20)
expect_equal(mol$info[2], 20)
expect_equal(sum(mol$atom$elety=="H"), 7)
elena <- c("C1", "C2", "O1", "O2", "C3", "C4", "C5",
"C6", "C7", "C8", "C9", "O3", "O4", "H1", "H2",
"H3", "H4", "H5", "H6", "H7")
expect_equal(mol$atom$elena, elena)
x <- c(-1.4238, -1.3441, -1.4532, -1.1519, -0.9822)
y <- c(-2.5790, -3.9491, -4.7933, -4.2739, -2.8882)
z <- c(0.6434, 0.0976, 0.0938, 0.0032, -0.0844)
expect_equal(mol$atom$x[1:5], x)
expect_equal(mol$atom$y[6:10], y)
expect_equal(mol$atom$z[16:20], z)
})
test_that("read.mol2() reads and stores data properly", {
skip_on_cran()
skip_on_travis()
file <- system.file("examples/aspirin.mol2",package="bio3d")
invisible(capture.output(mol <- read.mol2(file)))
f <- tempfile()
write.mol2(mol, file=f)
mol2 <- read.mol2(f)
expect_equal(mol, mol2)
})
test_that("basic atom select and trim of mol2", {
skip_on_cran()
skip_on_travis()
file <- system.file("examples/aspirin.mol2",package="bio3d")
invisible(capture.output(mol <- read.mol2(file)))
capture.output( sele <- atom.select(mol, "noh") )
expect_equal(length(sele$atom), 13)
capture.output( sele <- atom.select(mol, elety="H") )
expect_equal(length(sele$atom), 7)
capture.output( sele <- atom.select(mol, elena="C1") )
expect_equal(length(sele$atom), 1)
capture.output( sele <- atom.select(mol, resno=1) )
expect_equal(length(sele$atom), 20)
capture.output( sele <- atom.select(mol, "noh") )
mol2 <- trim(mol, sele)
expect_equal(nrow(mol2$atom), 13)
expect_equal(nrow(mol2$bond), 13)
expect_equal(length(mol2$xyz), 39)
xyz <- c(-1.4238, 1.4221, 1.2577, -1.3441, -0.0813)
expect_equal(mol2$xyz[1:5], xyz)
})
test_that("converting mol2 to pdb works", {
skip_on_cran()
skip_on_travis()
file <- system.file("examples/aspirin.mol2",package="bio3d")
invisible(capture.output(mol <- read.mol2(file)))
capture.output( pdb <- as.pdb(mol) )
expect_equal(nrow(pdb$atom), nrow(mol$atom))
expect_equal(pdb$xyz, mol$xyz)
expect_equal(mol$atom$elena, pdb$atom$elety)
expect_equal(mol$atom$x, pdb$atom$x)
expect_equal(mol$atom$charge, pdb$atom$charge)
})
|
884b5ca86b1306f62c08711f9346a09b258a1d92
|
fb91bf05835aa980d23e364cd7853a37f38da06a
|
/man/make_barplot.Rd
|
93d0957e87d82d618a8cfe5308e54d32bddb93fe
|
[] |
no_license
|
retaoliveira/chronicle
|
da65dd0c26ef3595cd8dd63fb0fde5d3f0b1683f
|
42df3f38c1def7f96fa82cb3d73d738cf029d75b
|
refs/heads/master
| 2023-02-19T02:27:22.224618
| 2021-01-21T17:32:29
| 2021-01-21T17:32:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,864
|
rd
|
make_barplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/barplots.R
\name{make_barplot}
\alias{make_barplot}
\title{Create a bar plot from a data frame through ggplotly}
\usage{
make_barplot(
dt,
bars,
value = NULL,
break_bars_by = NULL,
horizontal = FALSE,
sort_by_value = FALSE,
sort_decreasing = TRUE,
ggtheme = "minimal",
x_axis_label = NULL,
y_axis_label = NULL,
plot_palette = NULL,
plot_palette_generator = "plasma"
)
}
\arguments{
\item{dt}{data.frame containing the data to plot.}
\item{bars}{Name of the column containing the different groups.}
\item{value}{Name of the columns to use as value on the y axis of the plot. If NULL (default), counts will be used.}
\item{break_bars_by}{Name of the categorical variable used to break each bar}
\item{horizontal}{Plot the bars horizontally. Default is FALSE}
\item{sort_by_value}{Sort the bars by value. Default is FALSE}
\item{sort_decreasing}{Sort the values decreasingly. Default is TRUE, but sort_by_value must also be TRUE.}
\item{ggtheme}{ggplot2 theme function to apply. Default is ggplot2::theme_minimal.}
\item{x_axis_label}{Label for the x axis.}
\item{y_axis_label}{Label for the y axis.}
\item{plot_palette}{Character vector of hex codes specifying the colors to use on the plot.}
\item{plot_palette_generator}{Palette from the viridis package used in case plot_palette is unspecified or insufficient for the number of colors required}
}
\value{
A plotly-ized version of a ggplot bar plot.
}
\description{
Create a bar plot from a data frame through ggplotly
}
\examples{
make_barplot(dt = iris, bars = 'Species', value = 'Sepal.Length')
make_barplot(dt = ggplot2::mpg,
bars = 'manufacturer',
break_bars_by = 'model',
value = 'cty',
horizontal = TRUE,
sort_by_value = TRUE)
}
|
a6ce58b8cedb292e5b635bd2fccfbf5cb8ae34bc
|
93c4cd901206f7794d4defd29bf87846b0d3df31
|
/minfi_script.R
|
46cc5817f93597f19975eb69db5abf85faaef297
|
[] |
no_license
|
rahulk87/myCodes
|
e48280d3336207e85b9fca9f9b49a96cffefd476
|
7ddb2621b9fdfe682d9bed0f5cb0f1297abcba38
|
refs/heads/master
| 2020-06-20T04:00:31.360335
| 2018-06-07T20:55:02
| 2018-06-07T20:55:02
| 74,883,865
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 961
|
r
|
minfi_script.R
|
library('minfi')
library('IlluminaHumanMethylationEPICmanifest')
library('IlluminaHumanMethylationEPICanno.ilm10b2.hg19')
library('IlluminaHumanMethylation450kmanifest')
library('IlluminaHumanMethylation450kanno.ilmn12.hg19')
base = "../rawdata/"
targets = read.metharray.sheet(base)
RGSet = read.metharray.exp(base=base, targets=targets)
RGSet@annotation = c(array="IlluminaHumanMethylationEPIC", annotation="ilm10b2.hg19")
gSet850 = preprocessFunnorm(RGSet)
snps = getSnpInfo(gSet850)
gSet850 = addSnpInfo(gSet850)
gSet850 = dropLociWithSnps(gSet850, snps=c("SBE","CpG"), maf=0)
beta850 = getBeta(gSet850)
phenoData = pData(gSet850)
colnames(beta850) = as.character(phenoData[,2])
annotation = getAnnotation(gSet850)
index = annotation[,"chr"]=="chrX" | annotation[,"chr"]=="chrY"
beta850 = beta850[!index,,drop=FALSE]
beta850_ann <- merge(beta850, annotation, by="row.names")
write.table(beta850_ann, file="beta850_ann.txt", sep="\t", quote=F, row.names=F)
|
8848c0bd2a76b286c49dc690ba0d273f63237935
|
ac04daadad3f3e6068eb466b08762eb75046e213
|
/Script_Figures_Article2.R
|
80f861391019149a78ed962753f606e4e48177e3
|
[] |
no_license
|
davbauman/MEM_W-matrices
|
dda782b66d93ba119dced4e290a52de992140ed8
|
870cfe281863869b440577f37c64a44df7db4516
|
refs/heads/master
| 2021-01-23T10:29:14.542736
| 2018-01-01T11:34:29
| 2018-01-01T11:34:29
| 93,063,945
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,423
|
r
|
Script_Figures_Article2.R
|
###################################
#### Figure Type I error rates ####
###################################
#### Load ggplot2 ####
library(ggplot2)
#### Input of the data ####
# Pour les points :
chic <- read.table("fig_typeIerror_all_(details).txt", h = T, sep = "\t")
str(chic)
chic$Connectivity <- factor(chic$Connectivity, levels = c("del", "gab", "rel", "mst", "db"))
#chic$Connectivity <- factor(chic$Connectivity, levels = c("graph-based", "distance-based"))
chic$Weighting <- factor(chic$Weighting, levels = c("Binary", "Linear", "Concave-down",
"Concave-up", "PCNM"))
chic$Design <- factor(chic$Design, levels = c("Clustered", "Random"))
# Pour les barplots :
chic2 <- read.table("fig_typeIerror_means_(details).txt", h = T, sep = "\t")
chic2$Connectivity <- factor(chic2$Connectivity, levels = c("del", "gab", "rel", "mst", "db"))
#chic2$Connectivity <- factor(chic2$Connectivity, levels = c("graph-based", "distance-based"))
chic2$Design <- factor(chic2$Design, levels = c("Clustered", "Random"))
# Barplots et points superposés :
# *******************************
b <- ggplot(chic2, aes(Connectivity, Mean))
b <- b + geom_bar(stat = "identity", position = "stack", color = "black",
fill = "gray80") + facet_wrap(~Design, ncol = 2)
b <- b + theme(panel.background = element_rect(fill = "white"),
panel.grid.major = element_line(colour = "white"), panel.grid.minor =
element_line(colour = "white"))
b <- b + geom_point(data = chic, aes(Connectivity, typeIerror,
color = factor(Weighting),
shape = factor(Weighting)), size = 1.5)
b <- b + scale_shape_manual("Weighting function:",
labels = c("Binary", "Linear", "Concave-down", "Concave-up",
"PCNM"), values = c(15:19))
b <- b + labs(x = "Connectivity matrix", y = "Type I error rate")
b <- b + theme(axis.title = element_text(size = 10.5))
b <- b + theme(axis.line.x = element_line(color = "black", size = 0.5,
linetype = "solid"),
axis.line.y = element_line(color = "black", size = 0.5,
linetype = "solid"))
b <- b + expand_limits(y = 0)
b <- b + scale_color_manual("Weighting function:",
labels = c("Binary", "Linear", "Concave-down", "Concave-up",
"PCNM"),
values = c("blue1", "black", "firebrick3", "forestgreen",
"darkorange2"))
b <- b + geom_hline(yintercept = 0.05, linetype = 2) +
scale_y_continuous(breaks = seq(0.01, 0.06, by = 0.01))
b
###################################
#### Figure Accuracy (deltaR²) ####
###################################
# Pour les moyennes (barplots):
data <- read.table("fig_power_POP_means_styleB.txt", h = T, sep = "\t")
str(data)
data$Connectivity <- factor(data$Connectivity, levels = c("del", "gab", "rel", "mst", "db"))
data$Strength <- factor(data$Strength, levels = c("Strong", "Weak"))
data$Scale <- factor(data$Scale, levels = c("Broad", "Fine"))
data$Design <- factor(data$Design, levels = c("Clustered", "Random"))
# Pour les matrices A:
data_w <- read.table("fig_power_POP_all_styleB.txt", h = T, sep = "\t")
str(data_w)
data_w$Connectivity <- factor(data_w$Connectivity, levels = c("del", "gab", "rel", "mst", "db"))
data_w$Design <- factor(data_w$Design, levels = c("Clustered", "Random"))
data_w$Scale <- factor(data_w$Scale, levels = c("Broad", "Fine"))
data_w$Weighting <- factor(data_w$Weighting, levels = c("Binary", "Linear",
"Concave-down", "Concave-up", "PCNM"))
# On construit le graphique séparément pour les deux niveaux du facteur Strength:
# *******************************************************************************
strength <- "Strong" # Strong or Weak
datasub <- subset(data, Strength == strength)
data_wsub <- subset(data_w, Strength == strength)
# Dans aes, on précise ce qui sur x et y, et à partir de quel facteur on sépare
# les barres :
g <- ggplot(datasub, aes(x = Connectivity, y = dR2sub)) + facet_grid(Design~Scale)
g <- g + geom_bar(stat = "identity", position = position_dodge(), color = "black",
fill = "gray80")
g <- g + theme(panel.background = element_rect(fill = "white"),
panel.grid.major = element_line(colour = "white"), panel.grid.minor =
element_line(colour = "white"))
g <- g + labs(x = "Connectivity matrix", y = "Accuracy (ΔR²sub)")
g <- g + theme(axis.title = element_text(size = 10.5))
g <- g + theme(axis.line.x = element_line(color = "black", size = 0.5,
linetype = "solid"),
axis.line.y = element_line(color = "black", size = 0.5,
linetype = "solid"))
g <- g + geom_hline(yintercept = 0, linetype = 1) +
scale_y_continuous(breaks = round(seq(-0.5, 0.1, by = 0.1), 2))
g <- g + geom_point(data = data_wsub, aes(Connectivity, dR2sub,
color = factor(Weighting),
shape = factor(Weighting)), size = 1.5)
g <- g + scale_shape_manual("Weighting function:",
labels = c("Binary", "Linear", "Concave-down", "Concave-up",
"PCNM"), values = c(15:19))
(g <- g + scale_color_manual("Weighting function:",
labels = c("Binary", "Linear", "Concave-down",
"Concave-up", "PCNM"),
values = c("blue1", "black", "firebrick3", "forestgreen",
"darkorange2")))
#### Power ####
###############
w <- ggplot(datasub, aes(x = Connectivity, y = Power)) + facet_grid(Design~Scale)
w <- w + geom_bar(stat = "identity", position = position_dodge(), color = "black",
fill = "gray80")
w <- w + theme(panel.background = element_rect(fill = "white"),
panel.grid.major = element_line(colour = "white"), panel.grid.minor =
element_line(colour = "white"))
w <- w + labs(x = "Connectivity matrix", y = "Power")
w <- w + theme(axis.title = element_text(size = 10.5))
w <- w + theme(axis.line.x = element_line(color = "black", size = 0.5,
linetype = "solid"),
axis.line.y = element_line(color = "black", size = 0.5,
linetype = "solid"))
w <- w + geom_hline(yintercept = 0, linetype = 1) +
scale_y_continuous(breaks = round(seq(0, 1, by = 0.2), 1))
w <- w + geom_point(data = data_wsub, aes(Connectivity, Power,
color = factor(Weighting),
shape = factor(Weighting)), size = 1.5)
w <- w + scale_shape_manual("Weighting function:",
labels = c("Binary", "Linear", "Concave-down", "Concave-up",
"PCNM"),
values = c(15:19))
(w <- w + scale_color_manual("Weighting function:",
labels = c("Binary", "Linear", "Concave-down",
"Concave-up", "PCNM"),
values = c("blue1", "black", "firebrick3", "forestgreen",
"darkorange2")))
# Visualisation simultannée de la puissance et de la précision :
# **************************************************************
library(gridExtra)
grid.arrange(b, w, g, nrow = 3)
####################################
### Figure TypeIerror MEM.modsel ###
####################################
typeIopt <- read.table("fig_typeIerror_Opt.txt", h = T, sep = "\t")
k <- ggplot(typeIopt, aes(Correction, typeIerror))
k <- k + geom_bar(stat = "identity", position = "stack", color = "black",
fill = "gray80") + facet_wrap(~Design, ncol = 2)
k <- k + theme(panel.background = element_rect(fill = "white"),
panel.grid.major = element_line(colour = "white"), panel.grid.minor =
element_line(colour = "white"))
k <- k + labs(x = "global p-value correction", y = "Type I error rate")
k <- k + theme(axis.title = element_text(size = 10.5))
k <- k + theme(axis.line.x = element_line(color = "black", size = 0.5,
linetype = "solid"),
axis.line.y = element_line(color = "black", size = 0.5,
linetype = "solid"))
k <- k + expand_limits(y = 0)
k <- k + geom_hline(yintercept = 0.05, linetype = 2) +
scale_y_continuous(breaks = seq(0.05, 0.35, by = 0.05))
k
#########################################
### Power and Accuracy - Optimisation ###
#########################################
data <- read.table("fig_power_Optim_POP_styleB.txt", h = T, sep = "\t")
str(data)
data$Strength <- factor(data$Strength, levels = c("Strong", "Weak"))
data$Scale <- factor(data$Scale, levels = c("Broad", "Fine"))
data$Design <- factor(data$Design, levels = c("Clustered", "Random"))
data$W_mat <- factor(data$W_mat, levels = c("Optimisation", "Random"))
# On construit le graphique séparément pour les deux niveaux du facteur Strength:
# *******************************************************************************
strength <- "Strong" # Strong or Weak
datasub <- subset(data, Strength == strength)
# Dans aes, on précise ce qui sur x et y, et à partir de quel facteur on sépare les barres :
(q <- ggplot(datasub, aes(x = W_mat, y = dR2sub)) + facet_grid(Design~Scale))
(q <- q + geom_bar(stat = "identity", position = position_dodge(), color = "black",
fill = "gray80"))
(q <- q + theme(panel.background = element_rect(fill = "white"),
panel.grid.major = element_line(colour = "white"), panel.grid.minor =
element_line(colour = "white")))
(q <- q + labs(x = "Choice of the W matrix", y = "Accuracy (ΔR²sub)"))
(q <- q + theme(axis.title = element_text(size = 10.5)))
(q <- q + theme(axis.line.x = element_line(color = "black", size = 0.5,
linetype = "solid"),
axis.line.y = element_line(color = "black", size = 0.5,
linetype = "solid")))
(q <- q + geom_hline(yintercept = 0, linetype = 1) +
scale_y_continuous(breaks = round(seq(-0.5, 0.1, by = 0.1), 2)))
(q <- q + geom_errorbar(data = datasub, aes(ymin = dR2sub - sd, ymax = dR2sub + sd),
width = .2, position = position_dodge(0.05)))
#### Power ####
###############
(p <- ggplot(datasub, aes(x = W_mat, y = Power))
+ facet_grid(Design~Scale))
(p <- p + geom_bar(stat = "identity", position = position_dodge(), color = "black",
fill = "gray80"))
(p <- p + theme(panel.background = element_rect(fill = "white"),
panel.grid.major = element_line(colour = "white"), panel.grid.minor =
element_line(colour = "white")))
(p <- p + labs(x = "Connectivity matrix", y = "Power"))
(p <- p + theme(axis.title = element_text(size = 10.5)))
(p <- p + theme(axis.line.x = element_line(color = "black", size = 0.5,
linetype = "solid"),
axis.line.y = element_line(color = "black", size = 0.5,
linetype = "solid")))
(p <- p + geom_hline(yintercept = 0, linetype = 1) +
scale_y_continuous(breaks = round(seq(0, 1, by = 0.2), 1)))
(p <- p + geom_errorbar(data = datasub, aes(ymin = Power - sd, ymax = Power + sd),
width = .2, position = position_dodge(0.05)))
#############################
#############################
#### INFORMATION DIVERSE ####
#############################
#### A Default ggplot ####
(g <- ggplot(chic, aes(Connectivity, TypeIerror, color = factor(Weighting))))
(g <- g + geom_point())
#### Working with Axes ####
(g <- g + labs(x = "Connectivity matrix", y = "Type I error rate") +
theme(axis.title = element_text(size = 10.5)))
# To include 0 on the y-axis:
(g <- g + expand_limits(y=0))
# Pour avoir des axes x et y apparents (barre noire) :
(g <- g + theme(axis.line.x = element_line(color = "black", size = 0.5,
linetype = "solid"),
axis.line.y = element_line(color = "black", size = 0.5,
linetype = "solid")))
#### Working with Legends ####
# Si on ne veut pas de légende :
#g + theme(legend.position = "none")
# Pas de titre à la légende :
# g + theme(legend.title = element_blank())
# Titre et redéfinir "labels" de la légende
(g <- g + scale_color_discrete("Weighting function:", labels=c("Binary", "Linear",
"Concave-down",
"Concave-up", "PCNM")))
#### Working with the background ####
# tout est dans theme(). theme(panel.background = element_rect(fill = "grey60")) permet
# de choisir la couleur du fond. panel.grid.major = element_line(colour = ) et
# panel.grid.minor décident de la couleur et de la taille (si on ajoute ", size = "
# dans element_line) des lignes de quadrillages principales et secondaires du graphique.
(g <- g + theme(panel.background = element_rect(fill = "white"),
panel.grid.major = element_line(colour = "white"), panel.grid.minor =
element_line(colour = "white")))
#### Working with themes ####
library(ggthemes)
g + theme_tufte() # Fond blanc, pas d'axes, ni lignes horizontales ou verticales
# - Utiliser theme_set() pour définir un thème et ne plus devoir le préciser.
# - Taper le nom d'un thème pour voir son code --> créer un nouveau thème en modifiant
# certains aspects et en enregistrant le résultat dans un nouveau thème (en créant une
# fonction), qu'on définit ensuite comme thème par défaut avec theme_set().
# - theme_Existant <- theme_update(...) --> theme_update permet de changer certains
# aspects d'un thème existant.
# - If we do not want to use our theme anymore: theme_set(theme_gray())
#### Working with Colors ####
## categorial variables
# Pour définir manuellement : g + scale_color_manual(values = c("dodgerblue4", ...))
# Pour utiliser palette existante :
g + scale_color_brewer(palette = "Set1")
# ou
library(ggthemes)
g + scale_color_tableau() # entre les () : palette = "nom palette"
g + scale_color_tableau(palette = "colorblind10")
## continiuous variables
# utiliser + scale_color_continuous("nom de la variable continue")
# ou
# + scale_color_gradient(low = "darkkhaki", high = "darkgreen", "Ozone:")
# ou encore
# scale_color_gradient2(midpoint = mid, low = "blue4", mid = "white", high = "red4",
# "Ozone:")
#### Ajout de lignes de références au plot ####
# Utilisation de geom_abline(), geom_hline(), geom_vline()
(g <- g + geom_hline(yintercept = 0.05, linetype = 2) +
scale_y_continuous(breaks = seq(0.05, 0.35, by = 0.05)))
# Et changement des valeurs d'axes avec scale_x_continuous ou scale_y_continuous
|
cf041771565671cfc56cc31ed3bb85df9a60e034
|
9fb478102a7a8aad73001d67d1d64d3b5590d442
|
/bootstrap_extras.R
|
8dad660b1c71f01f77ce2d39599587116f97cfab
|
[] |
no_license
|
juliaraices/Rotation1P1
|
f101d01c0dbae8552c5bce27037589d8a3ac9471
|
fb4ed4dcf191581fe1967266fe669351d1d796f8
|
refs/heads/master
| 2021-05-07T22:55:31.665776
| 2017-12-13T17:58:45
| 2017-12-13T17:58:45
| 107,370,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,002
|
r
|
bootstrap_extras.R
|
# Julia Raices
# NovembBer 2017
# names:
## F = female | M = male
## er = early | Blt = late
## 1 or 2 = replicate number
## slk = sex-determination linked
## nslk = not-sex-determination linked
## B = B. jarvisi | D = D. melanogaster
library(affy)
library(ggplot2)
BltF1 <- read.table("abundance_SRR1571147_extras.tsv", header=T, sep = "\t", quote = "")
BltF2 <- read.table("abundance_SRR1571155_extras.tsv", header=T, sep = "\t", quote = "")
BerF1 <- read.table("abundance_SRR1571160_extras.tsv", header=T, sep = "\t", quote = "")
BerF2 <- read.table("abundance_SRR1571161_extras.tsv", header=T, sep = "\t", quote = "")
D14dF <- read.table("abundance_SRR072915.tsv", header=T, sep = "\t", quote = "")
D14aF <- read.table("abundance_SRR072909.tsv", header=T, sep = "\t", quote = "")
D12F <- read.table("abundance_SRR072907.tsv", header=T, sep = "\t", quote = "")
BltF1$length <- NULL
BltF1$eff_length <- NULL
BltF1$est_counts <- NULL
BltF2$length <- NULL
BltF2$eff_length <- NULL
BltF2$est_counts <- NULL
BerF1$length <- NULL
BerF1$eff_length <- NULL
BerF1$est_counts <- NULL
BerF2$length <- NULL
BerF2$eff_length <- NULL
BerF2$est_counts <- NULL
D14dF$length <- NULL
D14dF$eff_length <- NULL
D14dF$est_counts <- NULL
D14aF$length <- NULL
D14aF$eff_length <- NULL
D14aF$est_counts <- NULL
D12F$length <- NULL
D12F$eff_length <- NULL
D12F$est_counts <- NULL
colnames(BltF1) <-c("target_id", "tpm_BltF1")
colnames(BltF2) <-c("target_id", "tpm_BltF2")
colnames(BerF1) <-c("target_id", "tpm_BerF1")
colnames(BerF2) <-c("target_id", "tpm_BerF2")
colnames(D14dF) <-c("target_id", "tpm_D14dF")
colnames(D14aF) <-c("target_id", "tpm_D14aF")
colnames(D12F) <-c("target_id", "tpm_D12F")
temp <- merge(BltF1, BltF2, by='target_id')
temp2 <- merge(temp, BerF1, by='target_id')
temp <- merge(temp2, BerF2, by='target_id')
temp2 <- merge(temp, D14dF, by='target_id')
temp <- merge(temp2, D14aF, by='target_id')
BDall <- merge(temp, D12F, by='target_id')
## comparisons: BerF1 - D12F | BerF2 - D14aF | BltF1 - D14aF | BltF2 - D14dF
#make a new table with columns to be normalized (you can't have 0s, so we add a tiny number to them) - lowest non-zero number is 54.882, so adding 0.000001 should not influence it a lot
BD_normalized<-c(BDall$tpm_BerF1+0.000001, BDall$tpm_BerF2+0.000001, BDall$tpm_BltF1+0.000001, BDall$tpm_BltF2+0.000001, BDall$tpm_D12F+0.000001, BDall$tpm_D14aF+0.000001, BDall$tpm_D14dF+0.000001)
time1<-c(BDall$tpm_BerF1+0.000001, BDall$tpm_D12F+0.000001)
time2<-c(BDall$tpm_BerF2+0.000001, BDall$tpm_D14aF+0.000001)
time3<-c(BDall$tpm_BltF1+0.000001, BDall$tpm_D14aF+0.000001)
time4<-c(BDall$tpm_BltF2+0.000001, BDall$tpm_D14dF+0.000001)
#bolF<-c(lott$e10+0.000001, lott$e11+0.000001, lott$e12+0.000001, lott$e13+0.000001, lott$e14a+0.000001, lott$e14b+0.000001, lott$e14c+0.000001, lott$e14d+0.000001)
BDNormMatrix<-matrix(BD_normalized, nrow(BDall), 7)
NormMatrix1<-matrix(time1, nrow(BDall), 2)
NormMatrix2<-matrix(time2, nrow(BDall), 2)
NormMatrix3<-matrix(time3, nrow(BDall), 2)
NormMatrix4<-matrix(time4, nrow(BDall), 2)
#bolFMat<-matrix(bolF, nrow(lott), 8)
############WARNING: change "8" to however many columns you have selected!!! And "lott" is the table that contained the values that I was normalizing, use your own table name
#normalize
x <- normalize.loess(BDNormMatrix)
y1 <- normalize.loess(NormMatrix1)
y2 <- normalize.loess(NormMatrix2)
y3 <- normalize.loess(NormMatrix3)
y4 <- normalize.loess(NormMatrix4)
#x<-normalize.loess(bolFMat)
newBD <- as.data.frame(x)
new1 <- as.data.frame(y1)
new2 <- as.data.frame(y2)
new3 <- as.data.frame(y3)
new4 <- as.data.frame(y4)
#newDataFrame <- as.data.frame(x)
## comparisons: BerF1 - D12F | BerF2 - D14aF | BltF1 - D14aF | BltF2 - D14dF
BDall$tpmN_BerF1 <- (newBD$V1)
BDall$tpmN_BerF2 <- (newBD$V2)
BDall$tpmN_BltF1 <- (newBD$V3)
BDall$tpmN_BltF2 <- (newBD$V4)
BDall$tpmN_D12F <- (newBD$V5)
BDall$tpmN_D14aF <- (newBD$V6)
BDall$tpmN_D14dF <- (newBD$V7)
BDall$tpmN1_BerF1 <- (new1$V1)
BDall$tpmN1_D12F <- (new1$V2)
BDall$tpmN2_BerF2 <- (new2$V1)
BDall$tpmN2_D14aF <- (new2$V2)
BDall$tpmN3_BltF1 <- (new3$V1)
BDall$tpmN3_D14aF <- (new3$V2)
BDall$tpmN4_BltF2 <- (new4$V1)
BDall$tpmN4_D14dF <- (new4$V2)
# lott$e10 <- (newDataFrame$V1)
# lott$e11 <- (newDataFrame$V2)
# lott$e12 <- (newDataFrame$V3)
# lott$e13 <- (newDataFrame$V4)
# lott$e14a <- (newDataFrame$V5)
# lott$e14b <- (newDataFrame$V6)
# lott$e14c <- (newDataFrame$V7)
# lott$e14d <- (newDataFrame$V8)
BDall_nslk <- subset(BDall, BDall$target_id!='sc' & BDall$target_id!='run' & BDall$target_id!='gro' & BDall$target_id!='dpn' & BDall$target_id!='emc' & BDall$target_id!='da' & BDall$target_id!='sisA' & BDall$target_id!='her' & BDall$target_id!='tra' & BDall$target_id!='upd3' & BDall$target_id!='Sxl' & BDall$target_id!='CG1641' & BDall$target_id!='CG1849' & BDall$target_id!='CG33542' & BDall$target_id!='CG3827' & BDall$target_id!='CG43770' & BDall$target_id!='CG8384' & BDall$target_id!='CG5102' & BDall$target_id!='CG4694' & BDall$target_id!='CG8704' & BDall$target_id!='CG16724' & BDall$target_id!='CG1007' & BDall$target_id!='SxlA' & BDall$target_id!='SxlB' & BDall$target_id!='SxlC')
BDall_slk <- subset(BDall, BDall$target_id=='sc' | BDall$target_id=='run' | BDall$target_id=='gro' | BDall$target_id=='dpn' | BDall$target_id=='emc' | BDall$target_id=='da' | BDall$target_id=='sisA' | BDall$target_id=='her' | BDall$target_id=='tra' | BDall$target_id=='upd3' | BDall$target_id=='Sxl' | BDall$target_id=='CG1641' | BDall$target_id=='CG1849' | BDall$target_id=='CG33542' | BDall$target_id=='CG3827' | BDall$target_id=='CG43770' | BDall$target_id=='CG8384' | BDall$target_id=='CG5102' | BDall$target_id=='CG4694' | BDall$target_id=='CG8704' | BDall$target_id=='CG16724' | BDall$target_id=='CG1007' | BDall$target_id=='SxlA' | BDall$target_id=='SxlB' | BDall$target_id=='SxlC')
par(mfrow=c(1,1))
## comparisons: BerF1 - D12F | BerF2 - D14aF | BltF1 - D14aF | BltF2 - D14dF
bootstrap <- function(set1, set2, repetitions=1000, stats=median){
n <- length(set1)
sett2 <- log10(set2)
sett1 <- log10(set1)
vec <- vector()
for(i in 1:repetitions){
s <- sample(sett2, n)
vec[i]<-stats(s)
}
b <- quantile(vec, probs = c(0.025, 0.975))
dens <- density(vec)
dd <- with(dens, data.frame(x,y))
a <- qplot(x,y,data=dd,geom="line", color=I("cornflowerblue")) + geom_ribbon(data=subset(dd,x>b[[1]] & x<b[[2]]),aes(ymax=y),ymin=0, fill="lightskyblue",colour=NA,alpha=0.5) + geom_vline(xintercept = stats(sett1), color="orchid",size=1)
return(a)
}
#abnormal, rs
berf1 <- bootstrap(BDall_slk$tpm_BerF1, BDall_nslk$tpm_BerF1)
berf2 <- bootstrap(BDall_slk$tpm_BerF2, BDall_nslk$tpm_BerF2)
bltf1 <- bootstrap(BDall_slk$tpm_BltF1, BDall_nslk$tpm_BltF1)
bltf2 <- bootstrap(BDall_slk$tpm_BltF2, BDall_nslk$tpm_BltF2)
d12f <- bootstrap(BDall_slk$tpm_D12F, BDall_nslk$tpm_D12F)
d14af <- bootstrap(BDall_slk$tpm_D14aF, BDall_nslk$tpm_D14aF)
d14df <- bootstrap(BDall_slk$tpm_D14dF, BDall_nslk$tpm_D14dF)
#normalized 7
berf1N <- bootstrap(BDall_slk$tpmN_BerF1, BDall_nslk$tpmN_BerF1)
berf2N <- bootstrap(BDall_slk$tpmN_BerF2, BDall_nslk$tpmN_BerF2)
bltf1N <- bootstrap(BDall_slk$tpmN_BltF1, BDall_nslk$tpmN_BltF1)
bltf2N <- bootstrap(BDall_slk$tpmN_BltF2, BDall_nslk$tpmN_BltF2)
d12fN <- bootstrap(BDall_slk$tpmN_D12F, BDall_nslk$tpmN_D12F)
d14afN <- bootstrap(BDall_slk$tpmN_D14aF, BDall_nslk$tpmN_D14aF)
d14dfN <- bootstrap(BDall_slk$tpmN_D14dF, BDall_nslk$tpmN_D14dF)
#normalized 2
berf1N1 <- bootstrap(BDall_slk$tpmN1_BerF1, BDall_nslk$tpmN1_BerF1)
berf2N2 <- bootstrap(BDall_slk$tpmN2_BerF2, BDall_nslk$tpmN2_BerF2)
bltf1N3 <- bootstrap(BDall_slk$tpmN3_BltF1, BDall_nslk$tpmN3_BltF1)
bltf2N4 <- bootstrap(BDall_slk$tpmN4_BltF2, BDall_nslk$tpmN4_BltF2)
d12fN1 <- bootstrap(BDall_slk$tpmN1_D12F, BDall_nslk$tpmN1_D12F)
d14afN2 <- bootstrap(BDall_slk$tpmN2_D14aF, BDall_nslk$tpmN2_D14aF)
d14afN3 <- bootstrap(BDall_slk$tpmN3_D14aF, BDall_nslk$tpmN3_D14aF)
d14dfN4 <- bootstrap(BDall_slk$tpmN4_D14dF, BDall_nslk$tpmN4_D14dF)
pdf("bootstrap_non-norm_extras.pdf")
berf1 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of expression median for B. jarvisi early 1")
berf2 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of expression median for B. jarvisi early 2")
bltf1 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of expression median for B. jarvisi late 1")
bltf2 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of expression median for B. jarvisi late 2")
d12f + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of expression median for D. melanogaster cell cycle 12")
d14af + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of expression median for D. melanogaster cell cycle 14a")
d14df + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of expression median for D. melanogaster cell cycle 14d")
dev.off()
pdf("bootstrap_norm7_extras.pdf")
berf1N + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for B. jarvisi early 1\n (normalized using all 7 samples)")
berf2N + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for B. jarvisi early 2\n (normalized using all 7 samples)")
bltf1N + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for B. jarvisi late 1\n (normalized using all 7 samples)")
bltf2N + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for B. jarvisi late 2\n (normalized using all 7 samples)")
d12fN + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for D. melanogaster\n cell cycle 12 (normalized using all 7 samples)")
d14afN + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for D. melanogaster\n cell cycle 14a (normalized using all 7 samples)")
d14dfN + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for D. melanogaster\n cell cycle 14d (normalized using all 7 samples)")
dev.off()
pdf("bootstrap_norm2_extras.pdf")
berf1N1 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for B. jarvisi early 1\n (normalized with D. melanogaster cell cycle 12)")
berf2N2 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for B. jarvisi early 2\n (normalized with D. melanogaster cell cycle 14a)")
bltf1N3 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for B. jarvisi late 1\n (normalized with D. melanogaster cell cycle 14a)")
bltf2N4 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for B. jarvisi late 2\n (normalized with D. melanogaster cell cycle 14d)")
d12fN1 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for D. melanogaster\n cell cycle 12 (normalized with B. jarvisi early sample 1)")
d14afN2 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for D. melanogaster\n cell cycle 14a (normalized with B. jarvisi early sample 2)")
d14afN3 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for D. melanogaster\n cell cycle 14a (normalized with B. jarvisi late sample 1)")
d14dfN4 + xlab("Median expression") + ylab("Log Expression Density") + ggtitle("Bootstrap of normalized expression median for D. melanogaster\n cell cycle 14d (normalized with B. jarvisi late sample 2)")
dev.off()
|
2d3c0263637c283130b4a64e3da6a2c1e7156cb7
|
18d110fc809baf72f3c61558fe385258448ea711
|
/man/add_abstracts_to_pub_index_md.Rd
|
352f07700cb3dd4bd32bb3d9834c7e4099a2593d
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.pubs
|
a90a720e19ff51373ac1b00e2dd336146eb2b3fc
|
adfd7d337031fe4f32fed673835a5213d0925fc7
|
refs/heads/master
| 2021-08-31T11:48:00.877412
| 2021-08-16T07:30:02
| 2021-08-16T07:30:02
| 165,939,816
| 0
| 0
|
MIT
| 2021-08-16T07:30:02
| 2019-01-15T23:31:54
|
R
|
UTF-8
|
R
| false
| true
| 909
|
rd
|
add_abstracts_to_pub_index_md.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_abstracts_to_pub_index_md.R
\name{add_abstracts_to_pub_index_md}
\alias{add_abstracts_to_pub_index_md}
\title{Add Abstracts to Hugo-Academic Publications}
\usage{
add_abstracts_to_pub_index_md(
endnote_df,
overwrite = FALSE,
hugo_root_dir = "."
)
}
\arguments{
\item{endnote_df}{endnote_df as retrieved by kwb.endnote::create_references_df()
or kwb.endnote::clean_references_df()}
\item{overwrite}{should existing "projects" be overwritten (default: FALSE)}
\item{hugo_root_dir}{root dir of hugo-academic website (default: ".")}
}
\value{
add abstracts to index.md
}
\description{
Add Abstracts to Hugo-Academic Publications
}
\examples{
\dontrun{
endnote_list <- kwb.endnote::create_endnote_list()
endnote_df <- kwb.endnote::clean_references_df(endnote_list)
add_abstracts_to_pub_index_md(endnote_df = endnote_df)
}
}
|
a2f6515e9179019740594c3f16b8fc774dcee70f
|
3819207740cbb823a372d002d2af49a76fb29aad
|
/lib/helpers.R
|
4a4b48dc6dc684c1587a022ad36e8cdc0007260f
|
[] |
no_license
|
rbdixon/millionpersoncities
|
cb0efa2218bc16687b4233beba21f93c8ac4476d
|
69d521d91c39552b9d80fceefe0ed467ec828bd4
|
refs/heads/master
| 2016-09-06T09:48:56.735237
| 2013-09-30T22:16:25
| 2013-10-01T00:29:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 220
|
r
|
helpers.R
|
# Return the index of the smallest non-zero item
# Like `order` but ignore zero.
minindex.nonzero <- function(x) {
x[x == 0] <- NaN
order(x, na.last=TRUE)[1]
}
printf <- function(s, ...) {
print(sprintf(s, ...))
}
|
ed1d3024a9285ff2e866ad9842a2c317cf2f0c33
|
f8fdd440073ff21773b8472044d1a619f4887e5e
|
/beer_city_wrangling.R
|
6f7f51cf7229fa1a1c5a8efeee38b07dce652fb0
|
[] |
no_license
|
cmswalec/prowler4growler
|
22e6686a988648345462b121c1756c2c797e852b
|
4ce04e8fd3ea8ea3e1d45ddc703e20d78a2e948d
|
refs/heads/master
| 2021-04-26T21:52:24.026486
| 2020-04-07T18:05:34
| 2020-04-07T18:05:34
| 124,168,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,515
|
r
|
beer_city_wrangling.R
|
library(tidyverse)
beers <- read_csv("beers.csv")
brews <- read_csv("breweries.csv")
full_beer_list <- merge(beers, brews)
cities <- read_csv("uscitiesv1.3.csv")
#MA
MA <- full_beer_list %>%
filter(state == "MA")
citiesMA <- cities %>%
filter(state_id_x == "MA")
MA_beer <- merge(MA, citiesMA, by.x = "city", by.y = "city_x") %>%
select("state", "city", "abv", "ibu", "name", "style", "brew_name", "lat", "lng")
colnames(MA_beer) <- c("state", "city", "abv", "ibu", "name", "style", "brew_name", "latitude", "longitude")
#CT
CT <- full_beer_list %>%
filter(state == "CT")
citiesCT <- cities %>%
filter(state_id_x == "CT")
CT_beer <- merge(CT, citiesCT, by.x = "city", by.y = "city_x") %>%
select("state", "city", "abv", "ibu", "name", "style", "brew_name", "lat", "lng")
colnames(CT_beer) <- c("state", "city", "abv", "ibu", "name", "style", "brew_name", "latitude", "longitude")
#RI
RI <- full_beer_list %>%
filter(state == "RI")
citiesRI <- cities %>%
filter(state_id_x == "RI")
RI_beer <- merge(RI, citiesRI, by.x = "city", by.y = "city_x") %>%
select("state", "city", "abv", "ibu", "name", "style", "brew_name", "lat", "lng")
colnames(RI_beer) <- c("state", "city", "abv", "ibu", "name", "style", "brew_name", "latitude", "longitude")
#VT
VT <- full_beer_list %>%
filter(state == "VT")
citiesVT <- cities %>%
filter(state_id_x == "VT")
VT_beer <- merge(VT, citiesVT, by.x = "city", by.y = "city_x") %>%
select("state", "city", "abv", "ibu", "name", "style", "brew_name", "lat", "lng")
colnames(VT_beer) <- c("state", "city", "abv", "ibu", "name", "style", "brew_name", "latitude", "longitude")
#NH
NH <- full_beer_list %>%
filter(state == "NH")
citiesNH <- cities %>%
filter(state_id_x == "NH")
NH_beer <- merge(NH, citiesNH, by.x = "city", by.y = "city_x") %>%
select("state", "city", "abv", "ibu", "name", "style", "brew_name", "lat", "lng")
colnames(NH_beer) <- c("state", "city", "abv", "ibu", "name", "style", "brew_name", "latitude", "longitude")
#ME
ME <- full_beer_list %>%
filter(state == "ME")
citiesME <- cities %>%
filter(state_id_x == "ME")
ME_beer <- merge(ME, citiesME, by.x = "city", by.y = "city_x") %>%
select("state", "city", "abv", "ibu", "name", "style", "brew_name", "lat", "lng")
colnames(ME_beer) <- c("state", "city", "abv", "ibu", "name", "style", "brew_name", "latitude", "longitude")
tot_beer <- rbind(CT_beer, MA_beer, RI_beer, VT_beer, NH_beer, ME_beer)
write_csv(tot_beer, "tot_beer.csv")
|
6f34caf97ea5ad063f2f1090bbc26fb4c5c8cb43
|
736cbf8954cfaca9ca618b96f78591cf2ece86d1
|
/run_analysis.R
|
fda58dc97fe67ec7ef172c3b7e41d9d67f9b7288
|
[] |
no_license
|
CandiedCode/GettingAndCleaningDataCourseProject
|
0e6779bd185bdeebf8eef27c21dc692af7e70ab0
|
7e331d6407cbca903655ce866f39d08d63fb6e1f
|
refs/heads/master
| 2021-01-13T01:03:09.712470
| 2016-01-31T15:32:02
| 2016-01-31T15:32:02
| 50,778,870
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,724
|
r
|
run_analysis.R
|
#referenced libraries
library(dplyr)
library(data.table)
library(tidyr)
#set working directory
setwd("/Users/CandiedCode/Documents/Code/Coursera/DataScience/GettingAndCleaningData/CourseProject")
#download the zipfile, if it doesn't exist
if (!(file.exists("Dataset.zip"))){
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url=fileUrl,destfile="Dataset.zip",mode="w",method="curl")
}
#unzip the file, set junkpath = TRUE to put files in the same folder as the dataset
unzip("Dataset.zip", overwrite = TRUE)
# from the readme.txt
#- 'features_info.txt': Shows information about the variables used on the feature vector.
#- 'features.txt': List of all features.
#- 'activity_labels.txt': Links the class labels with their activity name.
#- 'train/X_train.txt': Training set.
#- 'train/y_train.txt': Training labels.
#- 'test/X_test.txt': Test set.
#- 'test/y_test.txt': Test labels.
features <- read.table('UCI HAR Dataset/features.txt',header=FALSE, col.names = c("featureNum","featureName"))
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt", header=FALSE, col.names = c("activityNum","activityName"))
# Read training files
subjectTrain <- tbl_df(read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE, col.names = c("subject")))
xTrain <- tbl_df(read.table("UCI HAR Dataset/train/X_train.txt", header = FALSE, col.names = features[,2]))
yTrain <- tbl_df(read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE, col.names = c("activityNum")))
# Read test files
subjectTest <- tbl_df(read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE, col.names = c("subject")))
xTest <- tbl_df(read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE, col.names = features[,2]))
yTest <- tbl_df(read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE, col.names = c("activityNum")))
#Merges the training and the test sets to create one data set.
allData <- bind_cols(bind_rows(subjectTrain, subjectTest), bind_rows(yTrain, yTest), bind_rows(xTrain, xTest))
#Let's remove unneeded objects from workspace
rm(subjectTest, subjectTrain, xTest, xTrain, yTest, yTrain)
#Extracts only the measurements on the mean and standard deviation for each measurement.
featuresMeanStd <- grep("mean[^F]|std|subject|activityNum",colnames(allData)) #find columns that have std or mean but not meanFreq
subsetData <- select(allData,featuresMeanStd)
#Uses descriptive activity names to name the activities in the data set
subsetData <- merge(subsetData,activityLabels,all = TRUE)
subsetData$activityNum <- NULL #I don't need the activityNum anymore so let's drop it
#Appropriately labels the data set with descriptive variable names.
colnames(subsetData) <- gsub("std..", "StdDev", colnames(subsetData))
colnames(subsetData) <- gsub("mean..", "Mean", colnames(subsetData))
colnames(subsetData) <- gsub("^t", "time", colnames(subsetData))
colnames(subsetData) <- gsub("^f", "freq", colnames(subsetData))
colnames(subsetData) <- gsub("BodyBody", "Body", names(subsetData))
colnames(subsetData) <- gsub("[.]", "_", names(subsetData))
colnames(subsetData) <- gsub("Acc", "Accelerometer", names(subsetData))
colnames(subsetData) <- gsub("Gyro", "Gyroscope", names(subsetData))
colnames(subsetData) <- gsub("Mag", "Magnitude", names(subsetData))
#colnames(subsetData)
#From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
subsetSummary <- aggregate(. ~ subject - activityName, data = subsetData, mean)
subsetSummary <- tbl_df(arrange(subsetSummary,subject,activityName))
write.table(subsetSummary, "TidyData.txt", row.name=FALSE)
|
1ddea827d1da13ce74aa7d7bd074be54e6eb1175
|
3342b975e93791ceeea015314b0b14b04e442886
|
/TEDE_sim_MR_100.R
|
9c961c685f17fcc947f2a84ba1808c8c9cedd7c5
|
[] |
no_license
|
yangq001/TEDE
|
0a801323b35717f7bc582a685f82d8e17db7b0e4
|
96062c5404c94457545fd3e80552c39b82858717
|
refs/heads/main
| 2023-05-07T10:38:11.143646
| 2021-06-02T20:07:29
| 2021-06-02T20:07:29
| 356,360,957
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,394
|
r
|
TEDE_sim_MR_100.R
|
#Simulations in the MR context (independent SNPs, p=100)
library("mr.raps")
library("jointsum")
library("MASS")
library("MendelianRandomization")
library("bindata")
library("aSPU")
library("data.table")
library("PMR")
library("TEDE")
####### SIMULATIONS ########
#################################
p=100 #number of SNPs (30 | 100)
rho=0 #correlation = 0 for MR
maf=0.3
MAF=rep(maf,p) #MAFs of SNPs
#Define the correlation and covariance matrices
corsnps=matrix(nrow=p,ncol=p)
for(i in 1:p){
for(j in 1:p){
corsnps[i,j]=rho^(abs(i-j))
}
}
varsnps=diag(MAF*(1-MAF))
covsnps=sqrt(varsnps)%*%corsnps%*%sqrt(varsnps)
n1=10000 #sample size for the first dataset (G, X)
n1=50000 #sample size for the first dataset (G, X)
n2=10000 #sample size for the second dataset (G, Y)
n=n1+n2 #total sample size
iter=1000 #number of replications
size_XY=0 #X -> Y effect (0 | 0.2)
scenario = 1 #(1 | 2 | 3)
results=matrix(nrow=7,ncol=4) #each column corresponds to one value of %invalid
colnames(results)=c("0","10%","30%","50%")
row.names(results)=c("Q","egger","score","aSPU","score2","aSPU2","PMR")
for(pinvalid in c(0, 0.1, 0.3, 0.5)){ #proportion of IVs that are invalid
if(pinvalid == 0 & scenario > 2){
next #when pinvalid = 0, it is the same for all scenarios
}
p_Q=p_egger=p_PMR=c()
p_aspu=p_aspu2=p_score=p_score2=c()
it=1
for(it in 1:iter){
set.seed(it)
#Generate G->X effect sizes
trunk=0.08
size_GX=rnorm(p*90,sd=0.15)
size_GX=size_GX[size_GX>trunk | size_GX< -trunk]
size_GX=size_GX[1:p]
#Generate G->Y direct effect sizes and G->U effect sizes
if(pinvalid == 0){
size_GY=rep(0,p)
size_GU=rep(0,p)
}
if(pinvalid != 0 & scenario==1){
size_GY=rnorm(p)*sqrt(0.075)
size_GU=rep(0,p)
}
if(pinvalid != 0 & scenario==2){
size_GY=0.1+rnorm(p)*sqrt(0.025)
size_GU=rep(0,p)
}
if(pinvalid != 0 & scenario==3){
size_GY=0.1+rnorm(p)*sqrt(0.025)
size_GU=0.1*runif(p)
}
size_GY=size_GY*sign(size_GX)
#set valid IV's size_GU and size_GY to 0
nvalid=round(p*(1-pinvalid))
vav=sample(1:p,nvalid)
size_GY[vav]=0
size_GU[vav]=0
#Generate G and U
G=matrix(rbinom(n*p,2,prob=maf),ncol=p)
U=G%*%size_GU+rnorm(nrow(G))
LDcov=cov(G)
LDcor=cov2cor(LDcov)
#Generate X (effect sizes are modified to control proportion of X explained by G)
explained=0.2 #proportion of X explained by G
if(explained>0){
sdga=sd(G%*%size_GX)
sii=sqrt(2*explained/(1-explained))/sdga
size_GX=size_GX*sii
}
X=G%*%size_GX + U + rnorm(nrow(G))
var(G%*%size_GX)/var(X)
#Generate Y (effect sizes are modified to control proportion of Y explained by X and G)
bexp=0.02 #proportion of Y explained by X
exa=0.003 #proportion of Y explained by G's direct effects
if(sum(abs(size_GY))>0){
sdga=sd(G%*%size_GY)
sii2=sqrt(2*exa/(1-exa))/sdga
size_GY=size_GY*sii2
}
if(size_XY!=0){
tua=sd(size_XY*X)
sii3=sqrt(2*bexp/(1-bexp))/tua
size_XY2=size_XY*sii3 #resize size_XY
}else{
size_XY2=0
}
Y=G%*%size_GY + size_XY2*X + U + rnorm(nrow(G))
#Split the data into two datasets and center variables at 0
part1=1:n1
part2=(n1+1):nrow(G)
G1=G[part1,]
G2=G[part2,]
G1=t(t(G1)-colMeans(G1))
G2=t(t(G2)-colMeans(G2))
X1=X[part1]-mean(X[part1])
Y2=Y[part2]-mean(Y[part2])
#Get MARGINAL summary statistics (effect size, SE) for Y~SNP (r, syk) and X~SNP (w, sxk)
w=r=syk=sxk=c()
gg=colSums(G2^2)
r=c(t(G2)%*%Y2)/c(gg)
Y2_pred=t(t(G2)*r)
Y2_res=-(Y2_pred-Y2)
sr=sqrt(colSums(Y2_res^2)/(length(part2)-2))
syk=sr/sqrt(gg)
gg=colSums(G1^2)
w=c(t(G1)%*%X1)/c(gg)
X1_pred=t(t(G1)*w)
X1_res=-(X1_pred-X1)
sr=sqrt(colSums(X1_res^2)/(length(part1)-2))
sxk=sr/sqrt(gg)
#Analysis using summary statistics
#MR-Egger
mr_object<-mr_input(bx = w, bxse = sxk, by = r, byse = syk)
su1=mr_egger(mr_object)
p_egger[it]=su1$Pvalue.Int
#TEDE (building joint models from marginal is included in the function)
su3=TEDE(w,sxk,r,syk,n1=n1,n2=n2,LDcov=LDcov,correlated_snps=FALSE,method="aSPU")
su4=TEDE(w,sxk,r,syk,n1=n1,n2=n2,LDcov=LDcov,correlated_snps=FALSE,method="score")
p_aspu[it]=su3[1]
p_aspu2[it]=su3[2]
p_score[it]=su4[1]
p_score2[it]=su4[2]
#Q
p_Q[it]=su3[3]
#PMR
su2=PMR_summary_Egger(w/sxk,r/syk,LDcor,LDcor,n1,n2)
p_PMR[it]=su2$pleiotropy_pvalue
if(it%%10==0){
print(it)
}
}
#show results for one setting
data.frame(p,rho,pinvalid,size_XY,scenario)
ev=cbind(p_Q,p_egger,p_score,p_aspu,p_score2,p_aspu2,p_PMR)
print(colMeans(ev<0.05))
bexp;n1;n2
coll=which(c(0, 0.1, 0.3, 0.5)==pinvalid)
results[,coll]=colMeans(ev<0.05)
}
results
#save results
setwd("/Users/dengy/Dropbox/Data Copy/TEDE")
nam=paste("TEDE_MR_sc",scenario,"_p",p,"_rho",rho,"_beta",size_XY,"_nx",n1,"_ny",n2,"_hxy",bexp,".csv",sep="")
write.csv(results,file=nam)
|
220bcccbf49294003abdab5d34411bc47ed6c328
|
5318e14f6a6b98ac1639343a16263cdd7aa3b760
|
/6 - Transit System Speed/06-speed.R
|
27d0ea8e86ccf8cc81c2eea0d921b970255e198c
|
[
"MIT"
] |
permissive
|
sean-connelly/Open_Transit_Data_Toolkit
|
af88fe496e99fdbe37e88f2b468cc4a15e755876
|
12152ed556445b9c9eccb1d7883a795a1f49c311
|
refs/heads/master
| 2021-01-16T09:01:35.376625
| 2020-02-25T18:11:22
| 2020-02-25T18:11:22
| 243,050,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,690
|
r
|
06-speed.R
|
# Load libraries
library(tidyverse);library(lubridate);library(hrbrthemes)
library(janitor)
options(stringsAsFactors = FALSE)
# Read in MBTA headway for Bronx buses data file (already filtered down from raw MTA NYCT)
raw_bus_times <- readRDS(file = "./MTA-Bus-Time_2014-10-08_Bronx.rds")
# Create a POSIXct date and time variable using available data
# Select Peak time 07:00 EST (11:00 UTC) to 10:00 EST (14:00 UTC)
bus_times <- raw_bus_times %>%
mutate(time_received = as.POSIXct(time_received, format = "%Y-%m-%d %H:%M:%S")) %>%
filter(hour(time_received) >= 11 & hour(time_received) < 14)
# Create and format our analysis data frame
speed <- bus_times %>%
select(inferred_trip_id,
inferred_route_id,
time_received,
"max_distance" = distance_along_trip) %>%
group_by(inferred_trip_id) %>%
arrange(desc(max_distance)) %>%
slice(1) %>%
ungroup()
speed_min_distance <- bus_times %>%
select(inferred_trip_id,
"time" = time_received,
"min_distance" = distance_along_trip) %>%
group_by(inferred_trip_id) %>%
arrange(min_distance) %>%
slice(1) %>%
ungroup()
# Join speed and min_speed on inferred_trip_id
speed <- left_join(speed, speed_min_distance, by = "inferred_trip_id")
speed<- speed %>%
mutate(time_diff = time_received - time,
distance = max_distance - min_distance)
# Remove any rows which the time difference or distance travelled is zero
speed <- speed %>%
filter(time_diff > 0, distance > 0) %>%
mutate(m_per_sec = distance / as.numeric(time_diff),
mph = m_per_sec * 2.237) # meters per second to miles per hour = 1:2.23694
# Find of average (mean) speed from all the trips of each individual route
average_speed <- speed %>%
group_by(inferred_route_id) %>%
summarize(avg_mph = mean(mph, na.rm = TRUE))
# Plot
# Initialize a ggplot, and then create lines graphs for bus speeds throughout the day for each bus route.
speedplot <- speed %>%
mutate(inferred_route_id = str_remove(inferred_route_id, "^(MTA NYCT_|MTABC_)")) %>%
ggplot(., aes(x = time_received, y = mph)) +
geom_line() +
facet_wrap(~inferred_route_id, ncol = 10) +
theme_ipsum()
speedplot
# Replot with outliers remoted
# Initialize a ggplot, and then create lines graphs for bus speeds throughout the day for each bus route.
speedplot <- speed %>%
filter(mph < 30) %>%
mutate(inferred_route_id = str_remove(inferred_route_id, "^(MTA NYCT_|MTABC_)")) %>%
ggplot(., aes(x = time_received, y = mph)) +
geom_line() +
facet_wrap(~inferred_route_id, ncol = 10) +
theme_ipsum() +
ggtitle("Average Speeds for MTA Bronx Buses from 7am-10am 2014-10-08")
speedplot
|
d25a16bc04cdd06ed5c5f076764ef5bd35c41ffd
|
87a655c1e8a4e5344728b7f77ea62999f2ad494d
|
/man/wareki2seireki.Rd
|
060d4edc10b457eaaef5ff94e6c3398e551a6221
|
[
"MIT"
] |
permissive
|
nyubachi/pharmaprepro
|
b79b4a22a2c15b9769dee7b8c5996ab809d96b55
|
8c694faf7e64cc6e98a7ae9aed997b41a52480e7
|
refs/heads/master
| 2020-04-16T01:33:15.592414
| 2019-01-15T03:03:37
| 2019-01-15T03:03:37
| 165,178,344
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 551
|
rd
|
wareki2seireki.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wareki2seireki.R
\name{wareki2seireki}
\alias{wareki2seireki}
\title{Convert Japanese calendar to Christian calendar year}
\usage{
wareki2seireki(wareki)
}
\arguments{
\item{wareki}{The value of Japanese calendar. For example, please input like "S07.03.31" or "S07/03/31" or "S07-03-31".}
}
\description{
This function converts the Japanese calendar to the Christian era. It corresponds to Meiji, Taisho, Showa, Heisei. For example, converts "S47.03.31" to "1972-03-31".
}
|
d6c3beedc5a5964d8d88362df85340d8723219dd
|
ca45384eb29b370ab30294d0d51cc2578abd5e95
|
/Data manipulation functions/insert_F.R
|
5f2c0c0ebbb2a41637fdee13e89174bb5508955a
|
[
"MIT"
] |
permissive
|
Thvb/R-function-lib
|
89dd1a3b01cb4d1b3753c4faa61b32cf90ca0931
|
8ffd8c7239c6d3a5b10ff8f8568c483863de51aa
|
refs/heads/master
| 2021-01-19T14:27:48.760961
| 2017-06-12T10:30:50
| 2017-06-12T10:30:50
| 88,163,329
| 0
| 0
| null | 2017-06-12T10:30:50
| 2017-04-13T12:37:54
|
R
|
UTF-8
|
R
| false
| false
| 287
|
r
|
insert_F.R
|
#insert function
insert.at <- function(a, pos, ...){
dots <- list(...)
stopifnot(length(dots)==length(pos))
result <- vector("list",2*length(pos)+1)
result[c(TRUE,FALSE)] <- split(a, as.factor(cumsum(seq_along(a) %in% (pos+1))))
result[c(FALSE,TRUE)] <- dots
unlist(result)
}
|
4f066fe253a41bfd4e8a9697aa20f70c4746fe5f
|
526acbb6808244ca56db2cde1c0451ae4bc7359a
|
/R/ICGC_cluster_based_subtyping.R
|
aab06ad61c067e10ed76bdda43922e947854c641
|
[
"MIT"
] |
permissive
|
rmoffitt/pdacR
|
541d5b5ccde25d1a795130bca8bb9df6061aca16
|
f3f9d0cc5c72d5edeac322ac7eead1c669f1b930
|
refs/heads/master
| 2023-07-09T10:03:26.508616
| 2023-06-20T16:00:40
| 2023-06-20T16:00:40
| 251,721,180
| 2
| 2
|
NOASSERTION
| 2022-11-30T18:24:02
| 2020-03-31T20:17:40
|
R
|
UTF-8
|
R
| false
| false
| 19,993
|
r
|
ICGC_cluster_based_subtyping.R
|
#' @title Cluster Based Subtyping
#' @description Perform clustering, label samples, and re-save data sets
#' @export
ICGC_cluster_based_subtyping <- function(data_set_list = c("summary")){
pdf("ICGC_cluster_based_subtyping.pdf")
#######################################################################
dataset <- pdac::PACA_AU_seq
p <- plot(x=1:10,y=1:10,ann=FALSE,type="n",xaxt="n",yaxt="n")
p <- p;text(1,9,"PACA_AU_seq",adj = 0,cex = 3)
p <- p;text(1,5,paste("Total Samples",length(dataset$sampInfo[[1]])),adj = 0)
# -------------------------------------------------------------
# Boilerplate
plot.hist(dataset)
normalized.expression <- preprocessCore::normalize.quantiles(as.matrix(log2(1+dataset$ex)))
dataset$sampInfo$tumor.classifier.training <- FALSE
dataset$sampInfo$tumor.classifier.outlier <- FALSE
dataset$sampInfo[["cluster.MT"]] <- as.character(NA)
dataset$sampInfo[["cluster.MT.scaled"]] <- as.character(NA)
# -------------------------------------------------------------
# Select cases to cluster based on metadata
sampleset <- which(dataset$sampInfo$HistoSubtype %in% "Pancreatic Ductal Adenocarcinoma")
dataset$sampInfo$tumor.classifier.training[sampleset] <- TRUE
sampleset <- which(dataset$sampInfo$HistoSubtype %in% "PDA - Adenosquamous carcinoma")
dataset$sampInfo$tumor.classifier.training[sampleset] <- TRUE
p <- plot(x=1:10,y=1:10,ann=FALSE,type="n",xaxt="n",yaxt="n")
p <- p;text(1,8,paste("HistoSubtype is \n Pancreatic Ductal Adenocarcinoma or \n PDA - Adenosquamous carcinoma",
sum(dataset$sampInfo$tumor.classifier.training)),adj = 0)
sampleset <- which(dataset$sampInfo$Sample.type %in% "Cell line ")
dataset$sampInfo$tumor.classifier.training[sampleset] <- FALSE
p <- p;text(1,2,paste("Sample.type is not Cell line",
sum(dataset$sampInfo$tumor.classifier.training)),adj = 0)
sampleset <- which(dataset$sampInfo$HistoSubtype %in% "PDA - signet ring")
dataset$sampInfo$tumor.classifier.outlier[sampleset] <- TRUE
sampleset <- which(dataset$sampInfo$HistoSubtype %in% "Acinar Cell Carcinoma")
dataset$sampInfo$tumor.classifier.outlier[sampleset] <- TRUE
# -------------------------------------------------------------
# Boilerplate
sampleset <- which(dataset$sampInfo$tumor.classifier.training)
featureset <- which(dataset$featInfo$SYMBOL %in%
c(as.character(pdac::gene_lists$Moffitt.Basal.25),
as.character(pdac::gene_lists$Moffitt.Classical.25)))
smallx <- (normalized.expression[featureset,sampleset])
smallx.scaled <- t(scale(t(normalized.expression[featureset,sampleset])))
# -------------------------------------------------------------
# unscaled clustering
cluster.result.c <- ConsensusClusterPlus::ConsensusClusterPlus(d = as.matrix(smallx),
seed = 1234,
pFeature = 0.8,
pItem = 0.8,
maxK = 3,
reps=200,
distance="pearson",
clusterAlg="kmdist")[[2]]$consensusTree
cluster.cut <- data.frame(cuts = (c('classical','basal',NA,NA)[cutree(cluster.result.c,k=6)]),stringsAsFactors = FALSE)
dataset$sampInfo$cluster.MT[sampleset] <- cluster.cut[[1]]
visualize.it(dataset,smallx,cluster.result.c,featureset,cluster.cut)
# -------------------------------------------------------------
# scaled clustering
cluster.result.c <- ConsensusClusterPlus::ConsensusClusterPlus(d = as.matrix(smallx.scaled),
seed = 1234,
pFeature = 0.8,
pItem = 0.8,
maxK = 3,
reps=200,
distance="pearson",
clusterAlg="kmdist")[[2]]$consensusTree
cluster.cut <- data.frame(cuts = (c('classical','basal',NA)[cutree(cluster.result.c,k=7)]),stringsAsFactors = FALSE)
dataset$sampInfo$cluster.MT.scaled[sampleset] <- cluster.cut[[1]]
visualize.it(dataset,smallx,cluster.result.c,featureset,cluster.cut)
# -------------------------------------------------------------
# adjustments to training set
dataset$sampInfo$tumor.classifier.outlier[is.na(dataset$sampInfo$cluster.MT) &
is.na(dataset$sampInfo$cluster.MT.scaled) &
dataset$sampInfo$tumor.classifier.training ] <- TRUE
dataset$sampInfo$tumor.classifier.training[is.na(dataset$sampInfo$cluster.MT)] <- FALSE
# -------------------------------------------------------------
# Wrapup
dataset$sampInfo$cluster.MT <- factor(dataset$sampInfo$cluster.MT, levels = c("basal","classical"))
dataset$sampInfo$cluster.MT.scaled <- factor(dataset$sampInfo$cluster.MT.scaled, levels = c("basal","classical"))
dataset$sampInfo$tumor.classifier.training <- factor(dataset$sampInfo$tumor.classifier.training,
levels = c(FALSE,TRUE))
dataset$sampInfo$tumor.classifier.outlier <- factor(dataset$sampInfo$tumor.classifier.outlier,
levels = c(FALSE,TRUE))
PACA_AU_seq_plus <- dataset
save(file = "data/PACA_AU_seq_plus.RData",list = "PACA_AU_seq_plus")
p <- plot(x=1:10,y=1:10,ann=FALSE,type="n",xaxt="n",yaxt="n")
p <- p;text(1,8,paste("after cluster trimming",sum(!is.na(dataset$sampInfo$cluster.MT))),adj = 0)
#######################################################################
dataset <- pdac::PACA_CA_seq
p <- plot(x=1:10,y=1:10,ann=FALSE,type="n",xaxt="n",yaxt="n")
p <- p;text(1,9,"PACA_CA_seq",adj = 0,cex = 3)
p <- p;text(1,5,paste("Total Samples",length(dataset$sampInfo[[1]])),adj = 0)
# -------------------------------------------------------------
# Boilerplate
plot.hist(dataset)
normalized.expression <- preprocessCore::normalize.quantiles(as.matrix(log2(1+dataset$ex)))
dataset$sampInfo$tumor.classifier.training <- FALSE
dataset$sampInfo$tumor.classifier.outlier <- FALSE
dataset$sampInfo[["cluster.MT"]] <- as.character(NA)
dataset$sampInfo[["cluster.MT.scaled"]] <- as.character(NA)
# -------------------------------------------------------------
# Select cases to cluster based on metadata
sampleset <- which(dataset$sampInfo$actual_type %in% "Cell line")
dataset$sampInfo$tumor.classifier.training[sampleset] <- TRUE
p <- plot(x=1:10,y=1:10,ann=FALSE,type="n",xaxt="n",yaxt="n")
p <- p;text(1,8,paste("actual_type is Cell line",
sum(dataset$sampInfo$tumor.classifier.training)),adj = 0)
# -------------------------------------------------------------
# Boilerplate
sampleset <- which(dataset$sampInfo$tumor.classifier.training)
featureset <- which(dataset$featInfo$SYMBOL %in%
c(as.character(pdac::gene_lists$Moffitt.Basal.25),
as.character(pdac::gene_lists$Moffitt.Classical.25)))
smallx <- (normalized.expression[featureset,sampleset])
smallx.scaled <- t(scale(t(normalized.expression[featureset,sampleset])))
# -------------------------------------------------------------
# unscaled clustering
cluster.result.c <- ConsensusClusterPlus::ConsensusClusterPlus(d = as.matrix(smallx),
seed = 1234,
pFeature = 0.8,
pItem = 0.8,
maxK = 5,
reps=200,
distance="pearson",
clusterAlg="kmdist")[[2]]$consensusTree
cluster.cut <- data.frame(cuts = (c(NA)[cutree(cluster.result.c,k=2)]),stringsAsFactors = FALSE)
dataset$sampInfo$cluster.MT[sampleset] <- cluster.cut[[1]]
visualize.it(dataset,smallx,cluster.result.c,featureset,cluster.cut)
# -------------------------------------------------------------
# scaled clustering
cluster.result.c <- ConsensusClusterPlus::ConsensusClusterPlus(d = as.matrix(smallx.scaled),
seed = 1234,
pFeature = 0.8,
pItem = 0.8,
maxK = 3,
reps=200,
distance="pearson",
clusterAlg="kmdist")[[2]]$consensusTree
cluster.cut <- data.frame(cuts = (c(NA)[cutree(cluster.result.c,k=2)]),stringsAsFactors = FALSE)
dataset$sampInfo$cluster.MT.scaled[sampleset] <- cluster.cut[[1]]
visualize.it(dataset,smallx,cluster.result.c,featureset,cluster.cut)
# -------------------------------------------------------------
# adjustments to training set
dataset$sampInfo$tumor.classifier.training[is.na(dataset$sampInfo$cluster.MT)] <- FALSE
# -------------------------------------------------------------
# Wrapup
dataset$sampInfo$cluster.MT <- factor(dataset$sampInfo$cluster.MT, levels = c("basal","classical"))
dataset$sampInfo$cluster.MT.scaled <- factor(dataset$sampInfo$cluster.MT.scaled, levels = c("basal","classical"))
dataset$sampInfo$tumor.classifier.training <- factor(dataset$sampInfo$tumor.classifier.training,
levels = c(FALSE,TRUE))
dataset$sampInfo$tumor.classifier.outlier <- factor(dataset$sampInfo$tumor.classifier.outlier,
levels = c(FALSE,TRUE))
PACA_CA_seq_plus <- dataset
save(file = "data/PACA_CA_seq_plus.RData",list = "PACA_CA_seq_plus")
p <- plot(x=1:10,y=1:10,ann=FALSE,type="n",xaxt="n",yaxt="n")
p <- p;text(1,8,paste("after cluster trimming",sum(!is.na(dataset$sampInfo$cluster.MT))),adj = 0)
#######################################################################
dataset <- pdac::PACA_AU_array
p <- plot(x=1:10,y=1:10,ann=FALSE,type="n",xaxt="n",yaxt="n")
p <- p;text(1,9,"PACA_AU_array",adj = 0,cex = 3)
p <- p;text(1,5,paste("Total Samples",length(dataset$sampInfo[[1]])),adj = 0)
# -------------------------------------------------------------
# Boilerplate
plot.hist(dataset)
normalized.expression <- preprocessCore::normalize.quantiles(as.matrix((dataset$ex)))
dataset$sampInfo$tumor.classifier.training <- FALSE
dataset$sampInfo$tumor.classifier.outlier <- FALSE
dataset$sampInfo[["cluster.MT"]] <- as.character(NA)
dataset$sampInfo[["cluster.MT.scaled"]] <- as.character(NA)
# -------------------------------------------------------------
# Select cases to cluster based on metadata
sampleset <- which(dataset$sampInfo$HistoSubtype %in% "Pancreatic Ductal Adenocarcinoma")
dataset$sampInfo$tumor.classifier.training[sampleset] <- TRUE
sampleset <- which(dataset$sampInfo$HistoSubtype %in% "PDA - Adenosquamous carcinoma")
dataset$sampInfo$tumor.classifier.training[sampleset] <- TRUE
p <- plot(x=1:10,y=1:10,ann=FALSE,type="n",xaxt="n",yaxt="n")
p <- p;text(1,8,paste("HistoSubtype is \n Pancreatic Ductal Adenocarcinoma or \n PDA - Adenosquamous carcinoma",
sum(dataset$sampInfo$tumor.classifier.training)),adj = 0)
sampleset <- which(dataset$sampInfo$Sample.type %in% "Cell line ")
dataset$sampInfo$tumor.classifier.training[sampleset] <- FALSE
p <- p;text(1,2,paste("Sample.type is not Cell line",
sum(dataset$sampInfo$tumor.classifier.training)),adj = 0)
sampleset <- which(dataset$sampInfo$HistoSubtype %in% "PDA - signet ring")
dataset$sampInfo$tumor.classifier.outlier[sampleset] <- TRUE
sampleset <- which(dataset$sampInfo$HistoSubtype %in% "Acinar Cell Carcinoma")
dataset$sampInfo$tumor.classifier.outlier[sampleset] <- TRUE
# -------------------------------------------------------------
# Boilerplate
sampleset <- which(dataset$sampInfo$tumor.classifier.training)
featureset <- which(dataset$featInfo$SYMBOL %in%
c(as.character(pdac::gene_lists$Moffitt.Basal.25),
as.character(pdac::gene_lists$Moffitt.Classical.25)))
smallx <- (normalized.expression[featureset,sampleset])
smallx.scaled <- t(scale(t(normalized.expression[featureset,sampleset])))
# -------------------------------------------------------------
# unscaled clustering
cluster.result.c <- ConsensusClusterPlus::ConsensusClusterPlus(d = as.matrix(smallx),
seed = 1234,
pFeature = 0.8,
pItem = 0.8,
maxK = 3,
reps=200,
distance="pearson",
clusterAlg="kmdist")[[2]]$consensusTree
cluster.cut <- data.frame(cuts = (c('classical','basal',NA,NA)[cutree(cluster.result.c,k=4)]),stringsAsFactors = FALSE)
dataset$sampInfo$cluster.MT[sampleset] <- cluster.cut[[1]]
visualize.it(dataset,smallx,cluster.result.c,featureset,cluster.cut)
# -------------------------------------------------------------
# scaled clustering
cluster.result.c <- ConsensusClusterPlus::ConsensusClusterPlus(d = as.matrix(smallx.scaled),
seed = 1234,
pFeature = 0.8,
pItem = 0.8,
maxK = 3,
reps=200,
distance="pearson",
clusterAlg="kmdist")[[2]]$consensusTree
cluster.cut <- data.frame(cuts = (c('classical','basal',NA)[cutree(cluster.result.c,k=9)]),stringsAsFactors = FALSE)
dataset$sampInfo$cluster.MT.scaled[sampleset] <- cluster.cut[[1]]
visualize.it(dataset,smallx,cluster.result.c,featureset,cluster.cut)
# -------------------------------------------------------------
# adjustments to training set
dataset$sampInfo$tumor.classifier.outlier[is.na(dataset$sampInfo$cluster.MT) &
is.na(dataset$sampInfo$cluster.MT.scaled) &
dataset$sampInfo$tumor.classifier.training ] <- TRUE
dataset$sampInfo$tumor.classifier.training[is.na(dataset$sampInfo$cluster.MT)] <- FALSE
# -------------------------------------------------------------
# Wrapup
dataset$sampInfo$cluster.MT <- factor(dataset$sampInfo$cluster.MT, levels = c("basal","classical"))
dataset$sampInfo$cluster.MT.scaled <- factor(dataset$sampInfo$cluster.MT.scaled, levels = c("basal","classical"))
dataset$sampInfo$tumor.classifier.training <- factor(dataset$sampInfo$tumor.classifier.training,
levels = c(FALSE,TRUE))
dataset$sampInfo$tumor.classifier.outlier <- factor(dataset$sampInfo$tumor.classifier.outlier,
levels = c(FALSE,TRUE))
PACA_AU_array_plus <- dataset
save(file = "data/PACA_AU_array_plus.RData",list = "PACA_AU_array_plus")
p <- plot(x=1:10,y=1:10,ann=FALSE,type="n",xaxt="n",yaxt="n")
p <- p;text(1,8,paste("after cluster trimming",sum(!is.na(dataset$sampInfo$cluster.MT))),adj = 0)
#######################################################################
dev.off()
}
visualize.it <- function(dataset,smallx,cluster.result.c,featureset,cluster.cut){
cluster.result.r <- hclust(d = bioDist::cor.dist(x = (smallx)))
heatmap.3(x = smallx, scale="row",labRow = dataset$featInfo$SYMBOL[featureset],
col = colorRampPalette(c("blue", "white", "red"))(n = 299),
Colv = as.dendrogram(cluster.result.c),
Rowv = as.dendrogram(cluster.result.r),
ColSideColors = getSideColors(sampInfo = data.frame(cuts =
factor(x = cluster.cut$cuts,
levels = c("basal","classical","activated","normal"))),
sampleTracks = "cuts",
colorlists = list(c("orange","blue","brown","skyblue")),
drop.levels = FALSE)$SideColors,
RowSideColors = t(getSideColors(sampInfo = data.frame(basal =dataset$featInfo$SYMBOL[featureset] %in%
pdac::gene_lists$Moffitt.Basal.25,
classical =dataset$featInfo$SYMBOL[featureset] %in%
pdac::gene_lists$Moffitt.Classical.25,
normal =dataset$featInfo$SYMBOL[featureset] %in%
pdac::gene_lists$Moffitt.Normal.25,
activated = dataset$featInfo$SYMBOL[featureset] %in%
pdac::gene_lists$Moffitt.Activated.25),
sampleTracks = c("basal",
"classical",
"normal",
"activated"),
colorlists = list(b=c("white","orange"),
c=c("white","blue"),
n=c("white","skyblue"),
a=c("white","brown")))$SideColors))
}
plot.hist <- function(dataset){
par(mfrow=c(2,1))
h <- hist(log2(1+as.matrix(dataset$ex)),breaks=100,plot=FALSE)
plot(y=sqrt(h$count),x=h$mids,type='h',main = "Logged")
h <- hist(as.matrix(dataset$ex),breaks=100,plot=FALSE)
plot(y=sqrt(h$count),x=h$mids,type='h',main = "As Is")
}
|
d33a41747378107396fbee8ec3edbc23304d3e75
|
8281ba77a599e25525a88b427b162c65b9779407
|
/Scripts/GARCH/garchNN.R
|
e2157674a2afe38f4f3913290d92223bf4aa4fbd
|
[] |
no_license
|
lordbirkemose/P7
|
fa96ea7c61ee20ddcaefdc61af637eeeb3458349
|
ea5d91e55f36df9810e9143b04da7bf29fecb5b0
|
refs/heads/master
| 2020-07-31T09:00:30.100581
| 2019-12-16T08:12:24
| 2019-12-16T08:12:24
| 210,553,212
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,139
|
r
|
garchNN.R
|
### Packagets ----------------------------------------------------------------
require("keras")
require("tensorflow")
require("tidyverse")
require("magrittr")
### Tensorflow setup ---------------------------------------------------------
# Sys.setenv(WORKON_HOME = "/q/student/mnorda16")
install_tensorflow()
### Load data ----------------------------------------------------------------
data <- read.csv("./Data/garchMC.csv.gz")
minData <- min(data, na.rm = TRUE)
maxData <- max(data, na.rm = TRUE)
### Test and training set ----------------------------------------------------
set.seed(2012)
indTrain <- sample(nrow(data), nrow(data)*0.75)
dataTrain <- (2*data[indTrain, ] - maxData - minData)/(maxData - minData)
dataTrain <- dataTrain %>%
select(-C) %>%
as.matrix() %>%
set_colnames(NULL)
dataTest <- (2*data[-indTrain, ] - maxData - minData)/(maxData - minData)
dataTest <- dataTest %>%
select(-C) %>%
as.matrix() %>%
set_colnames(NULL)
dataTrainTarget <- data[indTrain, ] %>%
mutate(C = (2*C - maxData - minData)/(maxData - minData)) %>%
select(C) %>%
as.matrix() %>%
set_colnames(NULL)
dataTestTarget <- data[-indTrain, ] %>%
mutate(C = (2*C - maxData - minData)/(maxData - minData)) %>%
select(C) %>%
as.matrix() %>%
set_colnames(NULL)
### Construction the model ---------------------------------------------------
NN <- keras_model_sequential()
NN %>% layer_dense(units = 30, activation = 'elu',
input_shape = dim(dataTest)[2]) %>%
layer_dropout(rate = 0.2) %>%
layer_dense(units = 30, activation = 'elu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 30, activation = 'elu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 30, activation = 'elu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 1, activation = "linear")
earlyStop <- callback_early_stopping(monitor = "val_loss", patience = 50)
### Compile and fit ----------------------------------------------------------
NN %>% compile(loss = 'mse',
optimizer = optimizer_rmsprop(lr = 0.001, rho = 0.9),
metrics = 'mean_absolute_error')
history <- NN %>%
fit(dataTrain,
dataTrainTarget,
epochs = 500,
batch_size = 50,
validation_split = 0.2,
verbose = 1,
callbacks = list(earlyStop))
plot(history)
### Predict ------------------------------------------------------------------
dataTrain <- data[indTrain,] %>%
mutate(cHat = predict(NN, dataTrain),
cHat = (cHat*(maxData - minData) +
(maxData + minData))/2)
dataTest <- data[-indTrain,] %>%
mutate(cHat = predict(NN, dataTest),
cHat = (cHat*(max(data) - min(data)) +
(maxData + minData))/2)
# Saving model ---------------------------------------------------------------
save_model_hdf5(NN, "./Data/garchNn100Epoch500Batch30Neurons/NN.h5")
save(dataTest, history,
file = "./Data/garchNn500Epoch100Batch30Neurons/garchDataTest.Rdata")
write.csv(dataTrain,
gzfile("./Data/garchNn500Epoch100Batch30Neurons/garchDataTrain.csv.gz"),
row.names = FALSE)
|
566fa286c2e3d6a5dfa92fef44b0bb746285b927
|
4777ce6cad938c10a112e2afd80c431c64f89576
|
/R/bridge_sample.R
|
071913de65dd3da5d3d9e8359ec01007a463e0f5
|
[] |
no_license
|
Travis024/Comparison-of-Methods-for-Estimating-Future-Flood-Risk-in-New-Orleans-Louisiana
|
436a6acfaac8ecb1e7c09712e7e0f8a59b34635e
|
733509ff597410742e1388296d0c2a570ff40ab8
|
refs/heads/master
| 2020-05-29T09:00:13.848008
| 2019-07-28T21:14:57
| 2019-07-28T21:14:57
| 189,045,242
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,829
|
r
|
bridge_sample.R
|
#===============================================================================
# This file contains the working script for estimating the marginal likelihood
# of different GPD surge models and data experiments.
#
# This revised version calculates all likelihood estimates as a parallel
# 'foreach' loop.
#
# Original code: Vivek Srikrishnan (Penn State) 2017
# Modified code: Tony Wong (CU Boulder) 2018
#===============================================================================
# import libraries
library(mvtnorm) # used for multivariate normal samples and densities
#library(coda) # used to compute spectral density of MCMC draws at frequency 0
library(extRemes) # used to compute GEV densities within posterior likelihood function
library(foreach)
library(doParallel)
library(ncdf4)
calib_date <- '13Jun2019'
type.of.priors <- 'uniform' # can be 'uniform' or 'normalgamma'
if(Sys.info()['user']=='tony') {
# Tony's local machine (if you aren't me, you almost certainly need to change this...)
machine <- 'local'
path.R <- '/Users/tony/codes/students/Travis/nola_surge/R'
setwd(path.R)
# set data and save directories
path.data <- '/Users/tony/codes/students/Travis/nola_surge/parameters_data'
path.output <- '/Users/tony/codes/students/Travis/nola_surge/output'
path.save <- paste('/Users/tony/codes/students/Travis/nola_surge/output/bma/',type.of.priors,'/', sep='')
nnode <- 1 # number of CPUs to use
} else {
# ???
}
# import file containing the log likelihood calculations
source(paste(path.R,'likelihood.R',sep='/'))
# read temperature covariate data
source(paste(path.R,'read_temperature_data.R',sep='/'))
# set up parameters
source(paste(path.R,'parameter_setup.R',sep='/'))
# list to store the actual output
output <- vector('list', nmodel) # named just by number
gev.models <- seq(from=1, to=nmodel)
# data frame to store the experimental details
experiments <- expand.grid(station ="grandisle",
gev.model=gev.models)
n_experiments <- nrow(experiments)
output <- vector('list', n_experiments)
# data from Grand Isle
threshold_missing_data <- 0.9 # filter out years that are missing more data than this (%)
fillvalue <- -32767
source(paste(path.R,'data_processing.R',sep='/'))
file.nola <- paste(path.data,"h765a_grandisle.csv", sep="/")
data.nola <- process_tg_data(file.nola)
#cores = detectCores()
#cl <- makeCluster(cores[1]-1) #not to overload your computer
cl <- makeCluster(nnode)
print(paste('Starting cluster with ',nnode,' cores', sep=''))
registerDoParallel(cl)
source('bridge_sample_functions.R')
export.names <- c('bridge.samp.rel.err','bridge.samp.iter','recip.imp.samp','experiments','log_post_gev','log_like_gev','log_prior_gev','path.R','calib_date','type.of.priors')
finalOutput <- foreach(ee=1:n_experiments,
.packages=c('mvtnorm','extRemes','ncdf4'),
.export=export.names,
.inorder=FALSE) %dopar% {
setwd(path.R)
source(paste(path.R,'likelihood.R',sep='/'))
source(paste(path.R,'read_temperature_data.R',sep='/'))
# get parameters for this particular experiment
print(experiments[ee,])
station <- experiments[ee,'station']
gev.model <- experiments[ee,'gev.model']
# set output (saved as .RData; to be collected into a single output file later) file name
filename.out <- paste('ml_',station,'_',gev.model,'.RData',sep='')
if (file.exists(paste(path.save, filename.out, sep='/'))) {
#stop('Output file already exists!')
print('Output file already exists!')
output[[ee]] <- 'done!'
} else {
# read in calibration output file
print('loading calibration file...')
setwd(path.output)
filename.priors <- Sys.glob(paste('surge_priors_',type.of.priors,'_*','.RData',sep='')) # is in the output directory
# use this if multiple files exist for the same location and prior
load(filename.priors)
if (exists('calib_date')) {
setwd(path.output)
filename.calib <- paste('mcmc_output_processed_',type.of.priors,'_',calib_date,'.RData',sep='')
} else {
setwd(path.output)
filename.calib <- Sys.glob(paste('mcmc_output_processed_',type.of.priors,'_*','.RData',sep=''))
}
load(filename.calib) # gives parameters_posterior[[m]] m in 1:nmodel
print('done!')
# these chains are burned in and thinned, so use the whole thing.
nsamples <- nrow(parameters_posterior[[gev.model]])
# set number of samples to use for estimate
post.samp.num <- nsamples
imp.samp.num <- nsamples
# burn in samples and log.p values
post.samples <- parameters_posterior[[gev.model]]
if (gev.model > 1) {aux <- trimmed_forcing(data.nola[,"year"], time_forc, temperature_forc)$temperature
} else {aux <- NULL}
post.ll <- apply(post.samples, 1, log_post_gev,
parnames=gev_models[[gev.model]]$parnames,
data_calib=data.nola[,"lsl_max"],
priors=priors[[gev.model]],
auxiliary=aux)
# fit normal approximation to the posteriors
post.mean <- colMeans(post.samples)
post.cov <- cov(post.samples)
# get posterior samples
print('sampling from posterior distribution...')
samp.names <- c('samples','log.imp','log.p')
post.samp <- setNames(vector("list",length(samp.names)),samp.names)
samp.idx <- sample(x=nrow(post.samples), size=post.samp.num, replace=TRUE)
post.samp$samples <- post.samples
post.samp$samples <- post.samples[samp.idx,]
# get posterior log-likelihood of sampled posterior values
post.samp$log.p <- post.ll
post.samp$log.p <- post.ll[samp.idx]
# get importance log-likelhood of posterior samples
post.samp$log.imp <- dmvnorm(x=post.samp$samples, mean=post.mean, sigma=post.cov, log=TRUE)
print('done!')
# get importance samples and likelihood
print('sampling from importance distribution...')
imp.samp <- setNames(vector("list",length(samp.names)),samp.names)
imp.samp$samples <- rmvnorm(n=imp.samp.num, mean=post.mean, sigma=post.cov)
imp.samp$log.imp <- dmvnorm(x=imp.samp$samples, mean=post.mean, sigma=post.cov, log=TRUE)
colnames(imp.samp$samples) <- colnames(post.samp$samples)
# compute posterior log-likelihood of importance samples
imp.samp$log.p <- apply(imp.samp$samples, 1, log_post_gev,
parnames=gev_models[[gev.model]]$parnames,
data_calib=data.nola[,"lsl_max"],
priors=priors[[gev.model]],
auxiliary=aux)
print('done!')
print('beginning bridge sampling recursion...')
# set tolerance for halting of iteration
TOL <- 1e-10
# initialize storage for estimates
ml <- mat.or.vec(nr=1,nc=1)
# initialize with starting value
# we can't quite start with the reciprocal importance sampling estimate from
# Gelfand and Dey (1994) due to numerics (we get 0 values when we exponentiate
# the difference of the importance log-densities and posterior log-likelihoods), so we just
# average the ratios on a log scale.
ml[1] <- -mean(post.samp$log.imp - post.samp$log.p)
ml[2] <- bridge.samp.iter(ml[1], post.samp[c('log.p','log.imp')], imp.samp[c('log.p','log.imp')])
# iterate until within tolerance.
t <- 2
while (abs(ml[t] - ml[t-1]) >= TOL) {
ml[t+1] <- bridge.samp.iter(ml[t], post.samp[c('log.p', 'log.imp')], imp.samp[c('log.p', 'log.imp')])
t <- t+1
}
print('done!')
print('computing relative standard error of estimate')
# compute the relative standard error of the bridge sampling estimator
# we can treat the posterior samples as iid due to re-sampling from the posterior,
# so we use the error formula from Fruhwirth-Schnatter (2004) with the spectral density
# at frequency 0 set equal to 1.
re.sq <- bridge.samp.rel.err(ml[length(ml)], post.samp[c('log.p','log.imp')], imp.samp[c('log.p','log.imp')])
# save result of run
# if save directory doesn't exist, create it
#ifelse(!dir.exists(path.save), dir.create(path.save), FALSE)
setwd(path.save)
save(list=c('post.samp','imp.samp', 'ml', 're.sq', 'gev.model'), file=filename.out)
output[[ee]] <- 'done!'
}
}
stopCluster(cl)
#data_many <- finalOutput
#names(data_many) <- names(data_set)
#===============================================================================
# end
#===============================================================================
|
e645ec1ab6b0e473bbd1f6578fb6acd62c73fb1c
|
05a79b70fe74a76c292a5842cf1305e5ddb7c09d
|
/gimme/man/expand.grid.unique.Rd
|
6e806bdd0c9841bb40f9766aab2ce81ce9918c68
|
[] |
no_license
|
GatesLab/gimme
|
0cae55f00b9f3db2b0cb4874ea3594665b2fca3b
|
fe2e5c732bb7a8e964f71a7caa99c02327373beb
|
refs/heads/master
| 2023-09-04T01:24:26.488748
| 2023-09-01T16:44:21
| 2023-09-01T16:44:21
| 39,515,818
| 25
| 25
| null | 2023-02-17T19:22:45
| 2015-07-22T16:01:45
|
R
|
UTF-8
|
R
| false
| true
| 641
|
rd
|
expand.grid.unique.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expand.grid.unique.R
\name{expand.grid.unique}
\alias{expand.grid.unique}
\title{Provides unique combinations of two vectors.}
\usage{
expand.grid.unique(x, y, incl.eq = TRUE)
}
\arguments{
\item{x}{A character vector containing variable names.}
\item{y}{A character vector containing variable names.}
\item{incl.eq}{Logical. TRUE means that combinations are kept where
a variable appears twice.}
}
\value{
The unique combinations of the variable names. Used in syntax
creation.
}
\description{
Provides unique combinations of two vectors.
}
\keyword{internal}
|
7984b72f120be6790233821c3087c42105a12d88
|
9438cd69c9adbd11e2e6e9e15eb493b8d5b4a6d9
|
/man/expandTable.Rd
|
b0eb1d6440b565f67fb63f6c819c877e26d64a81
|
[] |
no_license
|
wokai/sqliteTools
|
1d74943793ff1d2dc50beb370a8c20852ab412c2
|
57dd6f54b4a2960e0257141a42b3751ef76f70d8
|
refs/heads/master
| 2021-01-13T01:23:40.073699
| 2015-01-07T10:46:37
| 2015-01-07T10:46:37
| 28,908,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,920
|
rd
|
expandTable.Rd
|
\name{expandTable}
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
% Alias
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
\alias{expandTable}
\title{expandTable
%% ~~function to do ... ~~
}
\description{Reads readTable and writes replicated and equally distributed
values into writeTable.}
\usage{
expandTable(dbfile, tables, boundCols, indexCol, copyCols, expandCols, verbose=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dbfile}{character. Name of database file.}
\item{tables}{character. Name of read table and write table.}
\item{boundCols}{character. Name of boundary columns:
loBound and hiBound}
\item{indexCol}{character. Name of index column which is written
to output table.}
\item{copyCols}{character. Name of columns which are copied.}
\item{expandCols}{character. Name of columns which are expanded.}
\item{verbose}{numeric. Verbosity of printed output.}
}
\details{The function expands 'quant' value for weeks between
min_woche and max_woche, as long as the distance to woche_index is <= 13.}
\value{None.}
\author{Wolfgang Kaisers}
\examples{
n <- 5
v <- 1:n
dfr <- data.frame(id=v,
exp1 = v * 100/7,
exp2 = v * 200/7,
cpy1 = letters[v],
cpy2 = 2*v,
min_woche = v*100 - 1,
max_woche = v*100 + 1)
dbfile <- file.path(".", "test.db3")
tables <- c("tbl", "rtbl")
boundCols <- c("min_woche", "max_woche")
indexCol <- "woche"
copyCols <- c("cpy1", "cpy2")
expandCols <- c("exp1", "exp2")
verbose <- 1L
# Write SQLite database. Contains test data
con <- dbConnect(RSQLite::SQLite(), dbfile)
dbWriteTable(con, tables[1], dfr, overwrite=TRUE)
dbDisconnect(con)
# Do replicate
expandTable(dbfile, tables, boundCols, indexCol, copyCols, expandCols, verbose)
}
\keyword{expandTable}
|
5907297d128c1c13d5c681fcbf4cd27547f805c9
|
239d38627bf51f07f7d0e933294f0421f1c96271
|
/week3/week3.R
|
90716a5c8e5e7100849f8173e7cd6afdecc67e83
|
[] |
no_license
|
stkubr/econometrics-coursera
|
a67d326a208e107fad326b651ad85371fdd721cc
|
dc54cfb0ae4c3edf527de8185d27ed460ba598e3
|
refs/heads/master
| 2021-01-25T08:42:32.870970
| 2015-05-07T23:56:11
| 2015-05-07T23:56:11
| 35,249,089
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,657
|
r
|
week3.R
|
library("memisc")
library("lmtest")
library("ggplot2")
library("dplyr")
library("foreign")
library("vcd")
library("devtools")
library("hexbin")
library("pander")
library("sjPlot")
library("knitr")
# load data
h <- diamonds
glimpse(h)
help(diamonds)
# diagram hexagons
qplot(data=h, carat, price)
bg <- qplot(data=h, log(carat), log(price))
bg + geom_hex()
# load flat's price data
f <- read.csv("flats_moscow.txt", sep="\t", header=TRUE, dec=".")
glimpse(f)
qplot(data=f, totsp, price)
qplot(data=f, log(totsp), log(price))
mosaic(data=f, ~walk+brick + floor, shade=TRUE)
# mutate walk, brick, floor, code into factors
f <- mutate_each(f, "factor", walk, brick, floor, code)
qplot(data=f, log(price), fill=brick, position="dodge")
# density functions
g2 <- qplot(data=f, log(price), fill=brick, geom="density", alpha=0.5)
# three regressions
model_0 <- lm(data=f, log(price)~log(totsp))
model_1 <- lm(data=f, log(price)~log(totsp)+brick)
model_2 <- lm(data=f, log(price)~log(totsp)+brick+brick:log(totsp))
# ":" is a product operation
sjp.lm(model_2)
# create a new data for prediction
nw <- data.frame(totsp=c(60,60), brick=factor(c(1,0)))
nw
# confidence interval
exp(predict(model_2, newdata=nw, interval="confidence"))
# prediction interval
exp(predict(model_2, newdata=nw, interval="prediction"))
# F-тест
waldtest(model_0, model_1) # H_0: model_0 H_a: model_1
# H_0 rejected
# add regression to diagram
gg0 <- qplot(data=f, log(totsp), log(price))
gg0 + stat_smooth(method="lm")
gg0 + stat_smooth(method="lm") + facet_grid(~walk)
gg0 + aes(col=brick) + stat_smooth(method="lm") + facet_grid(~walk)
# RESET test
resettest(model_2)
|
e54ff379eb5feee65114070bb5a633035e55ad7d
|
dc1ead4b62bba2a151b894d33dc16d76d12856e2
|
/The P-Hub/demos/main file template.R
|
c1fb1614d22a5d18e6020005f04422c958e86303
|
[] |
no_license
|
pabloirigo/IS_RProjects
|
6343e968523155164e4ef8d41dfbbe68f7588c35
|
e6576d713641038c09495c6dc8be3d458e10e3ab
|
refs/heads/master
| 2020-04-28T15:38:32.411954
| 2019-05-20T11:58:47
| 2019-05-20T11:58:47
| 175,378,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,007
|
r
|
main file template.R
|
# =======================================================================
# Names: Pablo Irigoyen, Luis Urrechaga
# Group Number: B
# Assignment: P-Hub
# Date:
# =======================================================================
# 1. Be sure to include, with this template, any necessary files
# for execution, including datasets (problem.R, methodXXX.R, ...)
# (submission of the entire template folder is recommended)
# 2. If you use a function of a certain package, do not forget to include the
# corresponding call to the "library ()" function
# 3. Do not forget to comment on the code, especially those non-trivial commands
# (remember that part of the rating depends on the cleaning of the code)
# 4. It is strongly recommended to test any implemented function in order to
# check for its proper operation
# =======================================================================
# (This is a general code, you must adapt it)
# =======================================================================
# Configuring the Environment
rm(list=ls())
cat("\014")
graphics.off()
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
dir()
# LIBRARIES (add any needed library here)
library(rstudioapi)
library(ggplot2)
library(gridExtra)
# ADDITIONAL FUNCTIONS (add any used method/problem here)
source("../problems/problem template.R")
source("../methods/Hill Climber.R")
# And here, there are additional (needed) functions
source("../methods/Expand Node.R")
source("../methods/Analyze Results.R")
source("../methods/Plot Results.R")
# =======================================================================
# Check the proper operation of implemented function here!
# =======================================================================
# Solving of the problem (you have to adapt it)
problem = initialize.problem("../data/phub_40.txt", 2)
res = Hill.Climber(problem, count.limit = problem$maxCombinations + 1)
all = list(res)
analyze.results(list(res),problem)
|
3558ac86292cf4034f263a93265702426d250429
|
d1b1cead5e9525fbfec5b7df989ebc0a09c8d782
|
/R/getExperimentSampleAssayData.R
|
3e1e056f222fa21447ca0e1e817f8c6d504d1071
|
[] |
no_license
|
AmundsenJunior/pfsrsdk
|
0ce8195f9d9a96562d31992f44303ee151bd4111
|
d799defb9447a4e70cb2906205f6023020fc621a
|
refs/heads/master
| 2020-06-06T00:24:06.491018
| 2019-07-15T16:59:12
| 2019-07-15T20:37:59
| 192,584,459
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,055
|
r
|
getExperimentSampleAssayData.R
|
#' getExperimentSampleAssayData - Gets assay data for an experiment sample.
#'
#' \code{getExperimentSampleAssayData } Gets assay data for a experiment sample identified by barcode.
#'
#' @param coreApi coreApi object with valid jsessionid
#' @param experimentAssayType assay type to get
#' @param experimentSampleBarcode experiment sample barcode of entity to get
#' @param fullMetadata - get full metadata, default is FALSE
#' @param ... additional arguments passed to \code{apiGET}
#' @return List of length 2, containing \code{entity} and \code{response} objects:
#' \itemize{
#' \item{\code{entity}} is the HTTP response content of entity information.
#' \item{\code{response}} is the entire HTTP response.
#' }
#' @export
#' @examples
#' \dontrun{
#' api <- coreAPI("PATH TO JSON FILE")
#' login <- authBasic(api)
#' experiment <- getExperimentSampleAssayData(login$coreApi, "experimentAssayType", "experimentSampleBarcode")
#' logOut(login$coreApi)
#' }
#' @author Craig Parman info@ngsanalytics.com
#' @author Natasha Mora natasha.mora@thermofisher.com
#' @author Scott Russell scott.russell@thermofisher.com
#' @description \code{ getExperimentSampleAssayData } Gets assay data for a experiment sample identified by barcode.
getExperimentSampleAssayData <-
function(coreApi,
experimentAssayType,
experimentSampleBarcode,
fullMetadata = FALSE,
...) {
# clean the name for ODATA
resource <- odataCleanName("EXPERIMENT_SAMPLE")
experimentAssayType <- odataCleanName(experimentAssayType)
query <- paste0(
"('",
experimentSampleBarcode,
"')/ASSAY_DATA/pfs.",
experimentAssayType,
"_DATA"
)
if (fullMetadata) {
header <- c(Accept = "application/json;odata.metadata=full")
} else {
header <- NULL
}
response <-
apiGET(
coreApi,
resource = resource,
query = query,
headers = header,
...
)
list(entity = response$content, response = response$response)
}
|
70b0670a87a0749d96abe7e4317902a39c6a527e
|
437c510c304fccfd6ccaa11ba158f68172a154ba
|
/R/calculate_percent_change_exchange.R
|
faa782290ef99cc67d0f8786b8b213452f4f4f72
|
[
"MIT"
] |
permissive
|
ries9112/PredictCrypto
|
37e44ac160434a77796093d5d34c868b0bedafef
|
f118e1dbcb3af5805c5be8603e23ae986d86f9b8
|
refs/heads/master
| 2020-12-26T23:09:01.034583
| 2020-09-06T21:06:19
| 2020-09-06T21:06:19
| 237,680,808
| 1
| 2
|
NOASSERTION
| 2020-09-05T00:56:56
| 2020-02-01T21:32:19
|
HTML
|
UTF-8
|
R
| false
| false
| 2,381
|
r
|
calculate_percent_change_exchange.R
|
#'@importFrom anytime anytime
#'@importFrom lubridate hours
#'@importFrom dplyr left_join
#'@importFrom dplyr select
#'@importFrom dplyr rename
#'@importFrom dplyr filter
#'@export
calculate_percent_change_exchange <- function (crypto_dataset, enterHours)
{
crypto_datasetHLater <- crypto_dataset
#create 24h offset
crypto_datasetHLater$DateTimeColoradoTimeMST <- crypto_datasetHLater$DateTimeColoradoTimeMST - lubridate::hours(enterHours)
#replace pkDummy
crypto_datasetHLater$pkDummy <- substr(paste(as.POSIXct(crypto_datasetHLater$DateTimeColoradoTimeMST,format="%Y-%m-%d"), format(as.POSIXct(crypto_datasetHLater$DateTimeColoradoTimeMST,format="%H:%M:%S"),"%H")),1,13)
crypto_dataset$pkDummy <- substr(paste(as.POSIXct(crypto_dataset$DateTimeColoradoTimeMST,format="%Y-%m-%d"), format(as.POSIXct(crypto_dataset$DateTimeColoradoTimeMST,format="%H:%M:%S"),"%H")),1,13)
crypto_dataset$pkey <- paste(crypto_dataset$pkDummy, crypto_dataset$Name)
crypto_datasetHLater$pkey <- paste(crypto_datasetHLater$pkDummy, crypto_datasetHLater$Name)
#re-adjust offset
crypto_datasetHLater$DateTimeColoradoTimeMST <- crypto_datasetHLater$DateTimeColoradoTimeMST + lubridate::hours(enterHours)
crypto_datasetHLater <- dplyr::select(crypto_datasetHLater, PriceUSD, pkey, DateTimeColoradoTimeMST, Exchange) %>%
dplyr::rename(PriceUSD_x_hoursLater = PriceUSD, DateTimeColoradoTimeMST_x_hoursLater = DateTimeColoradoTimeMST)
joinedDataset <- dplyr::left_join(crypto_dataset, crypto_datasetHLater, by = c("pkey" = "pkey" , 'Exchange' = 'Exchange'))
#joinedDataset <- filter(joinedDataset, joinedDataset$DateTimeColoradoTimeMST <=
# max(crypto_dataset$DateTimeColoradoTimeMST) - (24*60*60 )
joinedDataset$TargetPercChange <- ((joinedDataset$PriceUSD_x_hoursLater -
joinedDataset$PriceUSD)/joinedDataset$PriceUSD) * 100
joinedDataset <- dplyr::select(joinedDataset, -1)
return(joinedDataset %>% dplyr::filter(!is.na(TargetPercChange)) )
#return(crypto_dataset)
}
#### IMPORTANT NOTE FOR CODE ABOVE. RATHER THAN HAVING "XhoursLater", find a way to concat the string of the field name with the user input enterHours! Important, do it before tutorial is too far along!
# remember to create a function just like this but pre-made for a 24 hour period called calculate_24hour_perc_change()
|
e30f949ac7c3c2a065a9df5ba4eef711a0afc355
|
101ddeb6267e076c1a214e50478c2ecb3b012b98
|
/TEQ.R
|
e71dd7423622eae86a803df37d962ad8651cf98a
|
[
"Apache-2.0"
] |
permissive
|
bhar2254/TSU_Portfolio
|
ea080328550e2fdaeaef2d0bf6fc1caff4718168
|
8e77e91b5bd30d244c3b5c6a4a4988fc23ebf2ab
|
refs/heads/master
| 2022-04-24T19:55:32.237439
| 2020-04-29T05:14:40
| 2020-04-29T05:14:40
| 259,831,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,020
|
r
|
TEQ.R
|
# Blaine Harper
# CASE - Truman State University
# TEQ.R
# Automation of Truman's Annual Portfolio Project.
# TEQ Tables
TEQ <- portfolio_data("TEQ") %>% # copied from everywhere else
merge(y = Majors, by.x = c("Last Name","First Name"), by.y = c("Last","First"), all=T) %>% # Merge with the Majors data frame
portfolio_build_major() %>%
mutate(`TRQ-COURSE` = case_when(
is.na(`TRQ-HOW-1...25`) & is.na(`TRQ-HOW-2...26`) & is.na(`TRQ-HOW-1...27`) | is.na(`TRQ-HOW-2...28`) | is.na(`TRQ-HOW-3`) ~ "No",
TRUE ~ "Yes"
)) %>%
mutate(LeastBig4 = case_when(
`TRQ-STUAB` == "Yes" ~ 1,
`TRQ-SL` == "Yes" ~ 1,
`TRQ-RSCH` == "Yes" ~ 1,
`TRQ-INT` == "Yes" ~ 1,
TRUE ~ 0
)) %>%
mutate(Big4 = ifelse(`TRQ-STUAB` == "Yes",1,0) +
ifelse(`TRQ-SL` == "Yes",1,0) +
ifelse(`TRQ-RSCH` == "Yes",1,0) +
ifelse(`TRQ-INT` == "Yes",1,0)
) %>%
mutate(LeastAny = case_when(
`TRQ-STUAB` == "Yes" ~ 1,
`TRQ-SL` == "Yes" ~ 1,
`TRQ-RSCH` == "Yes" ~ 1,
`TRQ-INT` == "Yes" ~ 1,
`TRQ-LEAD` == "Yes" ~ 1,
`TRQ-EDEX` == "Yes" ~ 1,
`TRQ-WRIT` == "Yes" ~ 1,
`TRQ-OTH` == "Yes" ~ 1,
`TRQ-COURSE` == "Yes" ~ 1,
TRUE ~ 0
)) %>%
portfolio_build_gender()
##### 2019 Overall Truman Edu. Transformative
total.n <- TEQ %>%
summarise(`TRQ-ED` = "Total",
N = n(),
Percent = 100)
Overall.Ed.Transformative <- rbind(TEQ %>% group_by(`TRQ-ED`) %>%
summarise(N = n(),
Percent = round(N / total.n$N * 100)),
total.n
)
##### Various Activities
Various.Activities <- rbind(TEQ %>%
summarize(`Study Abroad` = sum(ifelse(`TRQ-STUAB` == "Yes",1,0)),
Service = sum(ifelse(`TRQ-SL` == "Yes",1,0)),
Research = sum(ifelse(`TRQ-RSCH` == "Yes",1,0)),
Internship = sum(ifelse(`TRQ-INT` == "Yes",1,0)),
Leadership = sum(ifelse(`TRQ-LEAD` == "Yes",1,0)),
`Student-led` = sum(ifelse(`TRQ-SL` == "Yes",1,0)),
Writing = sum(ifelse(`TRQ-WRIT` == "Yes",1,0)),
Other = sum(ifelse(`TRQ-OTH` == "Yes",1,0)),
Course = sum(ifelse(`TRQ-COURSE` == "Yes",1,0)),
Total = total.n$N
),TEQ %>%
summarize(`Study Abroad` = round(sum(ifelse(`TRQ-STUAB` == "Yes",1,0)) / n() * 100, 2),
Service = round(sum(ifelse(`TRQ-SL` == "Yes",1,0)) / n() * 100, 2),
Research = round(sum(ifelse(`TRQ-RSCH` == "Yes",1,0)) / n() * 100, 2),
Internship = round(sum(ifelse(`TRQ-INT` == "Yes",1,0)) / n() * 100, 2),
Leadership = round(sum(ifelse(`TRQ-LEAD` == "Yes",1,0)) / n() * 100, 2),
`Student-led` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0)) / n() * 100, 2),
Writing = round(sum(ifelse(`TRQ-WRIT` == "Yes",1,0)) / n() * 100, 2),
Other = round(sum(ifelse(`TRQ-OTH` == "Yes",1,0)) / n() * 100, 2),
Course = round(sum(ifelse(`TRQ-COURSE` == "Yes",1,0)) / n() * 100, 2),
Total = 100
)) %>%
t()
# row.names(Various.Activities) <- c("N","Percent")
##### Reporting Activity
Reporting.Activity <- TEQ %>%
summarize(`Study Abroad` = round(sum(ifelse(`TRQ-STUAB` == "Yes",1,0)) / n(), 2),
`Service Learning` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0)) / n(), 2),
Research = round(sum(ifelse(`TRQ-RSCH` == "Yes",1,0)) / n(), 2),
Internship = round(sum(ifelse(`TRQ-INT` == "Yes",1,0)) / n(), 2),
Leadership = round(sum(ifelse(`TRQ-LEAD` == "Yes",1,0)) / n(), 2),
`Student-led` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0)) / n(), 2),
Writing = round(sum(ifelse(`TRQ-WRIT` == "Yes",1,0)) / n(), 2),
Other = round(sum(ifelse(`TRQ-OTH` == "Yes",1,0)) / n(), 2),
Course = round(sum(ifelse(`TRQ-COURSE` == "Yes",1,0)) / n(), 2),
`Any (Big 4)` = round(sum(`LeastBig4`) / n(), 2),
`Any` = round(sum(`LeastAny`) / n(), 2),
Total = 100
) %>%
t() %>%
as.data.frame(.) %>%
mutate(Experience = rownames(.)) %>%
merge(read_excel("TEQ_Historical.xlsx", sheet = "Reporting.Activities"),., by = "Experience")
##### Activities by Gender
Activities.by.Gender <- rbind(
TEQ %>%
summarize(`Study Abroad` = sum(ifelse(`TRQ-STUAB` == "Yes" & `Gender` == "F",1,0)),
Service = sum(ifelse(`TRQ-SL` == "Yes" & `Gender` == "F",1,0)),
Research = sum(ifelse(`TRQ-RSCH` == "Yes" & `Gender` == "F",1,0)),
Internship = sum(ifelse(`TRQ-INT` == "Yes" & `Gender` == "F",1,0)),
Leadership = sum(ifelse(`TRQ-LEAD` == "Yes" & `Gender` == "F",1,0)),
`Student-led` = sum(ifelse(`TRQ-SL` == "Yes" & `Gender` == "F",1,0)),
Writing = sum(ifelse(`TRQ-WRIT` == "Yes" & `Gender` == "F",1,0)),
Other = sum(ifelse(`TRQ-OTH` == "Yes" & `Gender` == "F",1,0)),
Course = sum(ifelse(`TRQ-COURSE` == "Yes" & `Gender` == "F",1,0)),
Total = total.n$N - sum(ifelse(Gender == "M",1,0))
),TEQ %>%
summarize(`Study Abroad` = round(sum(ifelse(`TRQ-STUAB` == "Yes" & `Gender` == "F",1,0)) / n() * 100, 2),
Service = round(sum(ifelse(`TRQ-SL` == "Yes" & `Gender` == "F",1,0)) / n() * 100, 2),
Research = round(sum(ifelse(`TRQ-RSCH` == "Yes" & `Gender` == "F",1,0)) / n() * 100, 2),
Internship = round(sum(ifelse(`TRQ-INT` == "Yes" & `Gender` == "F",1,0)) / n() * 100, 2),
Leadership = round(sum(ifelse(`TRQ-LEAD` == "Yes" & `Gender` == "F",1,0)) / n() * 100, 2),
`Student-led` = round(sum(ifelse(`TRQ-SL` == "Yes" & `Gender` == "F",1,0)) / n() * 100, 2),
Writing = round(sum(ifelse(`TRQ-WRIT` == "Yes" & `Gender` == "F",1,0)) / n() * 100, 2),
Other = round(sum(ifelse(`TRQ-OTH` == "Yes" & `Gender` == "F",1,0)) / n() * 100, 2),
Course = round(sum(ifelse(`TRQ-COURSE` == "Yes" & `Gender` == "F",1,0)) / n() * 100, 2),
Total = 100
),
TEQ %>%
summarize(`Study Abroad` = sum(ifelse(`TRQ-STUAB` == "Yes" & `Gender` == "M",1,0)),
Service = sum(ifelse(`TRQ-SL` == "Yes" & `Gender` == "M",1,0)),
Research = sum(ifelse(`TRQ-RSCH` == "Yes" & `Gender` == "M",1,0)),
Internship = sum(ifelse(`TRQ-INT` == "Yes" & `Gender` == "M",1,0)),
Leadership = sum(ifelse(`TRQ-LEAD` == "Yes" & `Gender` == "M",1,0)),
`Student-led` = sum(ifelse(`TRQ-SL` == "Yes" & `Gender` == "M",1,0)),
Writing = sum(ifelse(`TRQ-WRIT` == "Yes" & `Gender` == "M",1,0)),
Other = sum(ifelse(`TRQ-OTH` == "Yes" & `Gender` == "M",1,0)),
Course = sum(ifelse(`TRQ-COURSE` == "Yes" & `Gender` == "M",1,0)),
Total = total.n$N - sum(ifelse(Gender == "F",1,0))
),
TEQ %>%
summarize(`Study Abroad` = round(sum(ifelse(`TRQ-STUAB` == "Yes" & `Gender` == "M",1,0)) / n() * 100, 2),
Service = round(sum(ifelse(`TRQ-SL` == "Yes" & `Gender` == "M",1,0)) / n() * 100, 2),
Research = round(sum(ifelse(`TRQ-RSCH` == "Yes" & `Gender` == "M",1,0)) / n() * 100, 2),
Internship = round(sum(ifelse(`TRQ-INT` == "Yes" & `Gender` == "M",1,0)) / n() * 100, 2),
Leadership = round(sum(ifelse(`TRQ-LEAD` == "Yes" & `Gender` == "M",1,0)) / n() * 100, 2),
`Student-led` = round(sum(ifelse(`TRQ-SL` == "Yes" & `Gender` == "M",1,0)) / n() * 100, 2),
Writing = round(sum(ifelse(`TRQ-WRIT` == "Yes" & `Gender` == "M",1,0)) / n() * 100, 2),
Other = round(sum(ifelse(`TRQ-OTH` == "Yes" & `Gender` == "M",1,0)) / n() * 100, 2),
Course = round(sum(ifelse(`TRQ-COURSE` == "Yes" & `Gender` == "M",1,0)) / n() * 100, 2),
Total = 100
)) %>%
t()
##### Activities.by.Major
Activities.by.Major <- bind_rows(
TEQ %>% group_by(School) %>%
filter(Majr1 != 'IDSM') %>%
summarize(`Majr1` = "TOTAL",
N = n(),
`Study Abroad` = sum(ifelse(`TRQ-STUAB` == "Yes",1,0)),
`Study Abroad%` = round(sum(ifelse(`TRQ-STUAB` == "Yes",1,0) / n()) * 100),
Service = sum(ifelse(`TRQ-SL` == "Yes",1,0)),
Service.per = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Research = sum(ifelse(`TRQ-RSCH` == "Yes",1,0)),
Research.per = round(sum(ifelse(`TRQ-RSCH` == "Yes",1,0) / n()) * 100),
Internship = sum(ifelse(`TRQ-INT` == "Yes",1,0)),
Internship.per = round(sum(ifelse(`TRQ-INT` == "Yes",1,0) / n()) * 100),
Leadership = sum(ifelse(`TRQ-LEAD` == "Yes",1,0)),
Leadership.per = round(sum(ifelse(`TRQ-LEAD` == "Yes",1,0) / n()) * 100),
`Student-led` = sum(ifelse(`TRQ-SL` == "Yes",1,0)),
`Student-led.per` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Writing = sum(ifelse(`TRQ-WRIT` == "Yes",1,0)),
Writing.per = round(sum(ifelse(`TRQ-WRIT` == "Yes",1,0) / n()) * 100),
Course = sum(ifelse(`TRQ-COURSE` == "Yes",1,0)),
Course.per = round(sum(ifelse(`TRQ-COURSE` == "Yes",1,0) / n()) * 100),
Other = sum(ifelse(`TRQ-OTH` == "Yes",1,0)),
Other.per = round(sum(ifelse(`TRQ-OTH` == "Yes",1,0) / n()) * 100)
),
TEQ %>% group_by(School, `Majr1`) %>%
summarize(N = n(),
`Study Abroad` = sum(ifelse(`TRQ-STUAB` == "Yes",1,0)),
`Study Abroad%` = round(sum(ifelse(`TRQ-STUAB` == "Yes",1,0) / n()) * 100),
Service = sum(ifelse(`TRQ-SL` == "Yes",1,0)),
Service.per = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Research = sum(ifelse(`TRQ-RSCH` == "Yes",1,0)),
Research.per = round(sum(ifelse(`TRQ-RSCH` == "Yes",1,0) / n()) * 100),
Internship = sum(ifelse(`TRQ-INT` == "Yes",1,0)),
Internship.per = round(sum(ifelse(`TRQ-INT` == "Yes",1,0) / n()) * 100),
Leadership = sum(ifelse(`TRQ-LEAD` == "Yes",1,0)),
Leadership.per = round(sum(ifelse(`TRQ-LEAD` == "Yes",1,0) / n()) * 100),
`Student-led` = sum(ifelse(`TRQ-SL` == "Yes",1,0)),
`Student-led.per` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Writing = sum(ifelse(`TRQ-WRIT` == "Yes",1,0)),
Writing.per = round(sum(ifelse(`TRQ-WRIT` == "Yes",1,0) / n()) * 100),
Course = sum(ifelse(`TRQ-COURSE` == "Yes",1,0)),
Course.per = round(sum(ifelse(`TRQ-COURSE` == "Yes",1,0) / n()) * 100),
Other = sum(ifelse(`TRQ-OTH` == "Yes",1,0)),
Other.per = round(sum(ifelse(`TRQ-OTH` == "Yes",1,0) / n()) * 100)
),
TEQ %>%
summarize('School' = "ALL",
'Majr1' = "ALL",
N = n(),
`Study Abroad` = sum(ifelse(`TRQ-STUAB` == "Yes",1,0)),
`Study Abroad%` = round(sum(ifelse(`TRQ-STUAB` == "Yes",1,0) / n()) * 100),
Service = sum(ifelse(`TRQ-SL` == "Yes",1,0)),
Service.per = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Research = sum(ifelse(`TRQ-RSCH` == "Yes",1,0)),
Research.per = round(sum(ifelse(`TRQ-RSCH` == "Yes",1,0) / n()) * 100),
Internship = sum(ifelse(`TRQ-INT` == "Yes",1,0)),
Internship.per = round(sum(ifelse(`TRQ-INT` == "Yes",1,0) / n()) * 100),
Leadership = sum(ifelse(`TRQ-LEAD` == "Yes",1,0)),
Leadership.per = round(sum(ifelse(`TRQ-LEAD` == "Yes",1,0) / n()) * 100),
`Student-led` = sum(ifelse(`TRQ-SL` == "Yes",1,0)),
`Student-led.per` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Writing = sum(ifelse(`TRQ-WRIT` == "Yes",1,0)),
Writing.per = round(sum(ifelse(`TRQ-WRIT` == "Yes",1,0) / n()) * 100),
Course = sum(ifelse(`TRQ-COURSE` == "Yes",1,0)),
Course.per = round(sum(ifelse(`TRQ-COURSE` == "Yes",1,0) / n()) * 100),
Other = sum(ifelse(`TRQ-OTH` == "Yes",1,0)),
Other.per = round(sum(ifelse(`TRQ-OTH` == "Yes",1,0) / n()) * 100)
)
) %>%
ungroup() %>%
mutate(School = case_when(
School == "ALL" | School == "IDSM" ~ " ",
TRUE ~ as.character(School)
)) %>%
arrange(School, Majr1) %>%
rename('Major' = Majr1)
##### Activities.by.Major.percentOnly
Activities.by.Major.percentOnly <- bind_rows(
TEQ %>% group_by(School) %>%
filter(!is.na(Majr1), Majr1 != 'IDSM') %>%
summarize(`Majr1` = "TOTAL",
N = n(),
`Study Abroad` = round(sum(ifelse(`TRQ-STUAB` == "Yes",1,0) / n()) * 100),
Service = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Research = round(sum(ifelse(`TRQ-RSCH` == "Yes",1,0) / n()) * 100),
Internship = round(sum(ifelse(`TRQ-INT` == "Yes",1,0) / n()) * 100),
Leadership = round(sum(ifelse(`TRQ-LEAD` == "Yes",1,0) / n()) * 100),
`Student-led` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Writing = round(sum(ifelse(`TRQ-WRIT` == "Yes",1,0) / n()) * 100),
Course = round(sum(ifelse(`TRQ-COURSE` == "Yes",1,0) / n()) * 100),
Other = round(sum(ifelse(`TRQ-OTH` == "Yes",1,0) / n()) * 100)
),
TEQ %>% group_by(School, `Majr1`) %>%
filter(!is.na(Majr1)) %>%
summarize(N = n(),
`Study Abroad` = round(sum(ifelse(`TRQ-STUAB` == "Yes",1,0) / n()) * 100),
Service = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Research = round(sum(ifelse(`TRQ-RSCH` == "Yes",1,0) / n()) * 100),
Internship = round(sum(ifelse(`TRQ-INT` == "Yes",1,0) / n()) * 100),
Leadership = round(sum(ifelse(`TRQ-LEAD` == "Yes",1,0) / n()) * 100),
`Student-led` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Writing = round(sum(ifelse(`TRQ-WRIT` == "Yes",1,0) / n()) * 100),
Course = round(sum(ifelse(`TRQ-COURSE` == "Yes",1,0) / n()) * 100),
Other = round(sum(ifelse(`TRQ-OTH` == "Yes",1,0) / n()) * 100)
),
TEQ %>%
filter(!is.na(Majr1)) %>%
summarize('School' = "ALL",
'Majr1' = "ALL",
N = n(),
`Study Abroad` = round(sum(ifelse(`TRQ-STUAB` == "Yes",1,0) / n()) * 100),
Service = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Research = round(sum(ifelse(`TRQ-RSCH` == "Yes",1,0) / n()) * 100),
Internship = round(sum(ifelse(`TRQ-INT` == "Yes",1,0) / n()) * 100),
Leadership = round(sum(ifelse(`TRQ-LEAD` == "Yes",1,0) / n()) * 100),
`Student-led` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()) * 100),
Writing = round(sum(ifelse(`TRQ-WRIT` == "Yes",1,0) / n()) * 100),
Course = round(sum(ifelse(`TRQ-COURSE` == "Yes",1,0) / n()) * 100),
Other = round(sum(ifelse(`TRQ-OTH` == "Yes",1,0) / n()) * 100)
)
) %>%
ungroup() %>%
mutate(School = case_when(
School == "ALL" | School == "IDSM" ~ " ",
TRUE ~ as.character(School)
)) %>%
arrange(School, Majr1) %>%
rename('Major' = Majr1)
##### Majors.by.Gender
Majors.by.Gender <- bind_rows(
TEQ %>% group_by(School) %>%
filter(!is.na(Majr1), Majr1 != 'IDSM') %>%
summarize(`Majr1` = "TOTAL",
N = n(),
Male = sum(ifelse(Gender == "M",1,0)),
Female = sum(ifelse(Gender == "F",1,0))
),
TEQ %>% group_by(School, `Majr1`) %>%
filter(!is.na(Majr1)) %>%
summarize(N = n(),
Male = sum(ifelse(Gender == "M",1,0)),
Female = sum(ifelse(Gender == "F",1,0))
),
TEQ %>%
filter(!is.na(Majr1)) %>%
summarize('School' = "ALL",
'Majr1' = "ALL",
N = n(),
Male = sum(ifelse(Gender == "M",1,0)),
Female = sum(ifelse(Gender == "F",1,0))
)
) %>%
ungroup() %>%
mutate(School = case_when(
School == "ALL" | School == "IDSM" ~ " ",
TRUE ~ as.character(School)
)) %>%
arrange(School, Majr1) %>%
rename('Major' = Majr1)
##### Big4Any Major
Big4Any.Major <- bind_rows(
TEQ %>% group_by(School) %>%
filter(!is.na(Majr1), Majr1 != 'IDSM') %>%
summarize(`Majr1` = "TOTAL",
N = n(),
Big4Any = sum(LeastBig4),
Big4Any.per = round(sum(LeastBig4) / n() * 100),
Any = sum(LeastAny),
Any.per = round(sum(LeastAny) / n() * 100)
),
TEQ %>% group_by(School, `Majr1`) %>%
filter(!is.na(Majr1)) %>%
summarize(N = n(),
Big4Any = sum(LeastBig4),
Big4Any.per = round(sum(LeastBig4) / n() * 100),
Any = sum(LeastAny),
Any.per = round(sum(LeastAny) / n() * 100)
),
TEQ %>%
filter(!is.na(Majr1)) %>%
summarize('School' = "ALL",
'Majr1' = "ALL",
N = n(),
Big4Any = sum(LeastBig4),
Big4Any.per = round(sum(LeastBig4) / n() * 100),
Any = sum(LeastAny),
Any.per = round(sum(LeastAny) / n() * 100)
)
) %>%
ungroup() %>%
mutate(School = case_when(
School == "ALL" | School == "IDSM" ~ " ",
TRUE ~ as.character(School)
)) %>%
arrange(School, Majr1) %>%
rename('Major' = Majr1)
##### Big4.Counts.by.Major
Overall <- bind_rows(
TEQ %>% group_by(School) %>%
filter(!is.na(Majr1), Majr1 != 'IDSM') %>%
summarize(`Majr1` = "TOTAL",
N = n(),
`1` = sum(ifelse(`TRQ-ED` == "Not Particularly Transformative",1,0)),
`2` = sum(ifelse(`TRQ-ED` == "Somewhat Transformative",1,0)),
`3` = sum(ifelse(`TRQ-ED` == "Transformative",1,0)),
`4` = sum(ifelse(`TRQ-ED` == "Very Transfomative" | `TRQ-ED` == "Very Transformative",1,0)), ##### MISPELLED IN THE DATA #####
`5` = sum(ifelse(`TRQ-ED` == "Totally Transformative",1,0)),
Avg = round((1*`1` + 2*`2` + 3*`3` + 4*`4` + 5*`5`) / n(),2),
`%4or5` = round((`4` + `5`) / n() * 100)
),
TEQ %>% group_by(School, `Majr1`) %>%
filter(!is.na(Majr1)) %>%
summarize(N = n(),
`1` = sum(ifelse(`TRQ-ED` == "Not Particularly Transformative",1,0)),
`2` = sum(ifelse(`TRQ-ED` == "Somewhat Transformative",1,0)),
`3` = sum(ifelse(`TRQ-ED` == "Transformative",1,0)),
`4` = sum(ifelse(`TRQ-ED` == "Very Transfomative" | `TRQ-ED` == "Very Transformative",1,0)), ##### MISPELLED IN THE DATA #####
`5` = sum(ifelse(`TRQ-ED` == "Totally Transformative",1,0)),
Avg = round((1*`1` + 2*`2` + 3*`3` + 4*`4` + 5*`5`) / n(),2),
`%4or5` = round((`4` + `5`) / n() * 100)
),
TEQ %>%
filter(!is.na(Majr1)) %>%
summarize('School' = "ALL",
'Majr1' = "ALL",
N = n(),
`1` = sum(ifelse(`TRQ-ED` == "Not Particularly Transformative",1,0)),
`2` = sum(ifelse(`TRQ-ED` == "Somewhat Transformative",1,0)),
`3` = sum(ifelse(`TRQ-ED` == "Transformative",1,0)),
`4` = sum(ifelse(`TRQ-ED` == "Very Transfomative" | `TRQ-ED` == "Very Transformative",1,0)), ##### MISPELLED IN THE DATA #####
`5` = sum(ifelse(`TRQ-ED` == "Totally Transformative",1,0)),
Avg = round((1*`1` + 2*`2` + 3*`3` + 4*`4` + 5*`5`) / n(),2),
`%4or5` = round((`4` + `5`) / n() * 100)
)
) %>%
portfolio_clean_major()
##### Overall Historical
Overall.Historical <- Overall %>%
filter(School != " ") %>%
group_by(School) %>%
summarise(Avg = round(mean(Avg),1),
`%4or5` = round(mean(`%4or5`) / 100 ,2)) %>%
rbind(Overall %>%
filter(School == " ") %>%
mutate(School = Major) %>%
group_by(School) %>%
summarise(Avg = round(mean(Avg),1),
`%4or5` = round(mean(`%4or5`) / 100 ,2))) %>%
merge(read_excel("TEQ_Historical.xlsx", sheet = "Overall"),.)
##### Activity.Major.Over.Time
Activity.Major.Over.Time <-
bind_rows(
TEQ %>% filter(!is.na(Majr1), Majr1 != 'IDSM') %>%
summarize(School = "ALL",
Year = 2019,
N = n(),
`Study Abroad` = round(sum(ifelse(`TRQ-STUAB` == "Yes",1,0) / n()),2),
Service = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()),2),
Research = round(sum(ifelse(`TRQ-RSCH` == "Yes",1,0) / n()),2),
Internship = round(sum(ifelse(`TRQ-INT` == "Yes",1,0) / n()),2),
Leadership = round(sum(ifelse(`TRQ-LEAD` == "Yes",1,0) / n()),2),
`Student-led` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()),2),
Writing = round(sum(ifelse(`TRQ-WRIT` == "Yes",1,0) / n()),2),
Other = round(sum(ifelse(`TRQ-OTH` == "Yes",1,0) / n()),2),
AnyBig4 = round(sum(`LeastBig4`) / n(),2),
Any = round(sum(`LeastAny`) / n(),2))
,
TEQ %>% group_by(School) %>%
filter(!is.na(Majr1)) %>%
summarize(Year = 2019,
N = n(),
`Study Abroad` = round(sum(ifelse(`TRQ-STUAB` == "Yes",1,0) / n()),2),
Service = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()),2),
Research = round(sum(ifelse(`TRQ-RSCH` == "Yes",1,0) / n()),2),
Internship = round(sum(ifelse(`TRQ-INT` == "Yes",1,0) / n()),2),
Leadership = round(sum(ifelse(`TRQ-LEAD` == "Yes",1,0) / n()),2),
`Student-led` = round(sum(ifelse(`TRQ-SL` == "Yes",1,0) / n()),2),
Writing = round(sum(ifelse(`TRQ-WRIT` == "Yes",1,0) / n()),2),
Other = round(sum(ifelse(`TRQ-OTH` == "Yes",1,0) / n()),2),
AnyBig4 = round(sum(`LeastBig4`) / n(),2),
Any = round(sum(`LeastAny`) / n(),2)),
read_excel("TEQ_Historical.xlsx", sheet = "Activity.Major.Over.Time")
) %>% ungroup() %>%
arrange(School, Year)
Activity.Major.Over.Time[4:13] = 100 * Activity.Major.Over.Time[4:13]
##### Kables
TEQ_Kables <- list(
Overall.Ed.Transformative = kable_template(Overall.Ed.Transformative, type = "basic", col.names = c("Response","N","%")),
Various.Activities = kable_template(Various.Activities, type = "basic", col.names = c("N Participated","%")) %>%
add_header_above(c("Various Activity Counts" = 3)),
Reporting.Activity = kable_template(Reporting.Activity, type = "basic", col.names = c("Experience",seq(2014,2019))) %>%
add_header_above(c(" " = 1, "% Reporting Activity" = 6)),
Activities.by.Gender = kable_template(Activities.by.Gender, type = "basic", col.names = c("F", "%", "M", "%")) %>%
add_header_above(c("Activities by Gender" = 5)),
Majors.by.Gender = kable_template(Majors.by.Gender, type = "basic"),
Activities.by.Major = kable_template(Activities.by.Major, type = "basic"),
Activities.by.Major.percentOnly = kable_template(Activities.by.Major.percentOnly, type = "basic"),
Big4Any.Major = kable_template(Big4Any.Major, type = "basic", col.names = c("","Major","N","Count","%","Count","%")) %>%
add_header_above(c(" " = 3, "Big 4" = 2, "Any" = 2)),
Overall = kable_template(Overall, type = "basic"),
Overall.Historical = kable_template(Overall.Historical, type = "basic", col.names = c("School",rep(c("Avg","%4or5"),5))) %>%
add_header_above(c("Academic Year" = 1, "2015" = 2, "2016" = 2, "2017" = 2, "2018" = 2, "2019" = 2)),
Activity.Major.Over.Time = kable_template(Activity.Major.Over.Time, type = "basic")
)
rm(Overall.Ed.Transformative,Various.Activities,Reporting.Activity,Activities.by.Gender,
Majors.by.Gender,Activities.by.Major,Activities.by.Major.percentOnly,
Big4Any.Major,Overall,Overall.Historical,total.n, Activity.Major.Over.Time)
|
e032cad07b72017d17c0fc16ddfa6ce0c611ba17
|
68a372a3a51dd43110a384a308443efaca35a43a
|
/New Code/test_code.R
|
155294b375d748df8e897d06990898067b42cd21
|
[] |
no_license
|
matteo-fontana/fastIWTlm
|
44c37168a9d8f0d4437b08ab49d98474351d5043
|
b2a3c37c03ac75dc4bc67d5fd1ce4b31029f6b66
|
refs/heads/master
| 2020-03-24T20:58:36.662305
| 2018-08-03T13:29:19
| 2018-08-03T13:29:19
| 143,006,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 612
|
r
|
test_code.R
|
library(Rcpp)
Sys.setenv('PKG_CXXFLAGS' = '-std=c++11 -Wall -pedantic -fopenmp')
sourceCpp('New Code/IWT2C_exp.cpp')
output=IWT2C_exp()
#now, try input via R
##prepare input objects: first step with known n1-n2-p
data1=as.matrix(read.delim('Data1.txt',sep=' ',header=F))
data2=as.matrix(read.delim('Data2.txt',sep=' ',header=F))
mu=as.matrix(read.delim('Mean0.txt',sep=' ',header=F))
n1=dim(data1)[1]
n2=dim(data2)[1]
p=dim(data2)[2]
save(data1,data2,mu, file='New Code/test-data.rdata')
#first, only input parameters
output=IWT2C_exp(data1,data2,mu)
##experiment, read directly as Eigen matrices
|
b78196686888df4338a41f40f68fea7654304373
|
91417608f40cbc0a2c021993e701b1f6264524e8
|
/cleandata/project/dataset/run_analysis.R
|
c0324b2dbda7c05fb99dea02f36b8d9a7fc3ca29
|
[] |
no_license
|
latimer/datasciencecoursera
|
0ddcf18298a6ca73fa4d3a270b21ee5b96b4814f
|
88feeae5b7205cf0717d57d77f3323e5caeba958
|
refs/heads/master
| 2021-01-02T09:13:46.580020
| 2014-06-23T05:45:38
| 2014-06-23T05:45:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,113
|
r
|
run_analysis.R
|
# Read data from one directory
readData <- function (dirname, colNames) {
suffix <- paste(dirname, ".txt", sep = "")
# The readings
measurements <- read.table(paste(dirname, "/X_", suffix, sep = ""))
# Poor shmuck who is doing the exercise
subject <- read.table(paste(dirname, "/subject_", suffix, sep = ""))
# Exercise type
exType <- read.table(paste(dirname, "/y_", suffix, sep = ""))
if (nrow(subject) != nrow(exType)) {
sprintf("Found subjects %d != exercises %d", nrow(subject), nrow(exType))
return
}
if (nrow(subject) != nrow(measurements)) {
sprintf("Found subjects %d != exercises %d", nrow(subject), nrow(measurements))
return
}
data <- cbind.data.frame(subject, exType, measurements)
names(data) <- colNames
exDescriptions <- c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS",
"SITTING", "STANDING", "LAYING")
data[,][[2]] <- exDescriptions[as.numeric(data[,][[2]])]
return(data)
}
features <- read.table("features.txt", colClasses="character")
colNames <- c("Subject", "ExerciseType", features[[2]])
testData <- readData("test", colNames)
trainData <- readData("train", colNames)
testSubjects <- unique(sort(testData$Subject))
trainSubjects <- unique(sort(trainData$Subject))
commonSubjects <- intersect(testSubjects, trainSubjects)
if (length(commonSubjects) != 0) {
print("The following subjects are in training as well as testing")
print(commonSubjects)
}
#Combine testing and training
allData <- rbind.data.frame(testData, trainData)
#Get all mean and std deviation columns
meanAndStd <- sort(c(1, 2,
grep(".*mean", colNames),
grep(".*std", colNames)))
#Filter out the rest
meanAndStdData <- allData[,meanAndStd]
write.csv(meanAndStdData, "meanAndStdData.csv", row.names = FALSE)
readings <- meanAndStdData[,c(-1,-2)]
groupData <- aggregate(zd, by=list(meanAndStdData$Subject, meanAndStdData$ExerciseType), FUN=mean)
names(ag)[1] <- "Subject"
names(ag)[2] <- "ExerciseType"
write.csv(ag, "aggregate.csv", row.names = FALSE)
|
8e42abc831490b78d8851b081fdb82cba4461b49
|
eccc786c00c11473c64c919c0b0fd2eefad68888
|
/rcdk/man/getsmilesparser.Rd
|
3182c9a1f5e1dd79a4aa832739bcfbd7e5fbffb5
|
[] |
no_license
|
schymane/cdkr
|
25c3e6349cc4a1541ad721567b8141f141bb9e61
|
5d074e5031a2b04c99d58364b0a59ed20143f738
|
refs/heads/master
| 2021-01-20T12:24:24.235252
| 2018-11-11T10:38:59
| 2018-11-11T10:38:59
| 82,657,563
| 1
| 0
| null | 2018-11-12T21:38:20
| 2017-02-21T08:54:10
|
R
|
UTF-8
|
R
| false
| false
| 653
|
rd
|
getsmilesparser.Rd
|
\name{get.smiles.parser}
\alias{get.smiles.parser}
\title{
Get a SMILES Parser
}
\description{
This function returns a reference to a SMILES parser
object. If you are parsing multiple SMILES strings, it is
preferable to create your own parser and supply it to
\code{\link{parse.smiles}} rather than forcing that function
to instantiate a new parser for each call
}
\usage{
get.smiles.parser()
}
\value{
A \code{jobjRef} to a CDK \code{SmilesParser} object
}
\keyword{programming}
\seealso{
\code{\link{get.smiles}},
\code{\link{get.smiles.parser}},
\code{\link{view.molecule.2d}}
}
\author{Rajarshi Guha (\email{rajarshi.guha@gmail.com})}
|
85138bde8ae916a83cf68fef244ed93f8d79a0de
|
99c4b03153c585224c98b7632158d41c16423cac
|
/pkg/man/priceMovePlot.Rd
|
e2fa6b1f53f124da07c29ba50eda712d048bc5e1
|
[] |
no_license
|
Tynes-Science/aekee2aiseir
|
3be8d8653a3b7a44ac135617f7f9a174db8b4020
|
3673c9f38f7bb6015f753724cf2aa61fe57c55d9
|
refs/heads/master
| 2021-01-01T12:52:33.146450
| 2020-06-15T17:36:11
| 2020-06-15T17:36:11
| 239,287,938
| 0
| 0
| null | 2020-06-15T17:36:12
| 2020-02-09T10:50:06
|
R
|
UTF-8
|
R
| false
| true
| 876
|
rd
|
priceMovePlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis.R
\name{priceMovePlot}
\alias{priceMovePlot}
\title{Generate price plot}
\usage{
priceMovePlot(
data,
smb_list,
start_date = as.Date("1900-01-01"),
end_date = as.Date("2900-01-01"),
type = c("open", "high", "low", "close"),
brk = "2 month"
)
}
\arguments{
\item{data}{(data frame) daily price data with `date`, `symbol`, `open`, `high`, `low`, `close` columns}
\item{smb_list}{(character vector) list of symbols for which generate plot}
\item{start_date}{(character) start date in format "%Y-%m-%d" for data in plot}
\item{end_date}{(character) end date in format "%Y-%m-%d" for data in plot}
\item{type}{(character) type of price for plot from: c("open","high","low","close")}
\item{brk}{(character) how often show labels on x-axis}
}
\description{
Generate price plot
}
|
7f0c1a5611f5a9a6dcb06bb6112255044114bc50
|
f6352c4ee695ca2d5b6a3c36400b37259ff3f800
|
/man/stats_chisq.test.Rd
|
a4e3397a229f187e1ba88085c73e43dfddc4abbe
|
[] |
no_license
|
lshen1/anatools
|
57c7e9c7ea24662593b28b639aafca3e5e6ab2ec
|
1e9d10ed40defa5db7ef62d9b48710e9e20f0b12
|
refs/heads/master
| 2021-01-10T21:48:42.495201
| 2015-07-21T22:54:44
| 2015-07-21T22:54:44
| 38,843,573
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,119
|
rd
|
stats_chisq.test.Rd
|
% Generated by roxygen2 (4.0.2.9000): do not edit by hand
% Please edit documentation in R/modelTesting.R
\name{stats_chisq.test}
\alias{stats_chisq.test}
\title{Perform Pearson's Chi-squared Test between each row in a matrix and a vector using `stats` package in R}
\usage{
stats_chisq.test(data, group, rowlevel, ...)
}
\arguments{
\item{data}{a matrix with at least 2 outcomes (0/1; Mut/WT, etc).}
\item{group}{a vector of factor which will be concatenated to the row of 'data'.}
\item{rowlevel}{specifies the order of factor use in a contingency table.}
\item{...}{other parameters in chisq.test function.}
}
\value{
the contingency table and p-value.
}
\description{
'chisq.test' performs chi-squared contingency table tests and goodness-of-fit tests.
}
\examples{
m0 <- matrix(0, 5, 30)
dat <- apply(m0, c(1,2), function(x) sample(c(0,1),1))
rownames(dat) <- paste("R", 1:5, sep="")
colnames(dat) <- paste("C", 1:30, sep="")
si.gp <- factor(c(rep("E", 15), rep("M", 15)), levels=c("E", "M"))
stats_chisq.test(dat, si.gp, rowlevel=c("0", "1"))
}
\seealso{
\code{\link{chisq.test}} which this function wraps.
}
|
ea9f947cdae4161a24a9edee899efd369bad096b
|
39ccd03931b7bb71a17bbdeff27c63e948b8e28a
|
/man/font_style.Rd
|
61f77b4a08b98acd52def66ba3f6b09a0afe5da6
|
[
"MIT"
] |
permissive
|
coolbutuseless/devoutsvg
|
d4448f5bf60e377e6acec03e1750fb99dc2d37f7
|
9151bd07a58a506ffa3d39f46d0982fd1e1693e1
|
refs/heads/master
| 2020-08-09T19:51:08.571630
| 2020-04-19T06:45:37
| 2020-04-19T06:45:37
| 214,160,262
| 58
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 501
|
rd
|
font_style.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-style.R
\name{font_style}
\alias{font_style}
\title{Construct a vector of styles related to 'font'}
\usage{
font_style(state, geom)
}
\arguments{
\item{state}{list including 'gc' (graphics context)}
\item{geom}{which geometry has asked for a style}
}
\value{
character vector of multiple "{attr_name}: {value}" strings for
'font-size', 'font-family', 'fill'
}
\description{
TODO: add font choice and style
}
|
580ec4b25269117cbf50e72fedf6b8bd48e2ef93
|
642493202c771806f6f9f50a429cadf7023bb416
|
/data/sealevel_world/input4_plot_sealevel_data_world_ewl_slr.r
|
219cc8c6b40457e0888bee09c43fae928e7224c6
|
[] |
no_license
|
TheClimateService/UI-mockup
|
a0fd242513f1c18121a027e305ae96c5ba3dc225
|
bf4546bb6461dc19e1ba99ea90d3b32ed6be3554
|
refs/heads/master
| 2021-01-19T15:50:47.039962
| 2018-08-16T18:28:52
| 2018-08-16T18:28:52
| 100,974,930
| 0
| 0
| null | 2017-08-21T17:06:29
| 2017-08-21T17:06:29
| null |
UTF-8
|
R
| false
| false
| 1,932
|
r
|
input4_plot_sealevel_data_world_ewl_slr.r
|
# This generates location and scenario inputs for plot_sealevel_data_world_ewl_slr.r .
locID <- corpLocations %>% filter(ParentCorpID==USER$ParentCorpID & LocationName==input$inputLocations_overall) %>% select(LocationID)
key <- paste(locID,USER$ParentCorpID,input$inputLocations_overall)
key <- gsub(" ","_",key)
nd = read.table("./data/scoring_engine/coastalflooding/TCSDB_structure.locations.csv.nearest.gtsr.segment", header=TRUE)
# V17 (RLm2yr) is the first historical return level in the nd table; if the location is outside the coastal distance threshold in the SE, this value will be a string (e.g., "TooFarFromCoast_threshold_10km") rather than a number.
# Force the elements of RLM2yr that are not numbers to be "NA".
nd$RLm2yr <- as.numeric(as.character(nd$RLm2yr))
#ele = nd %>% filter(nd$V1==key) %>% select(V14:V17, V27)
ele = nd %>% filter(nd$LocationID_ParentCorpID_LocationName==key) %>% select(mindistid2, nearestseglon, nearestseglat, RLm2yr, station)
# key2 is the string that identifies the element in world_ewl_with_slr_stations (created by /data/sealevel_world/load_sealevel_world.r) that has been associated with the current corporate facility. It consists of of the id of the nearest coastal segment, the segment's lon/lat, and the name of the EWL station associated with that segement. An example is 3873_-79.472_8.999_BALBOA.
#key2 <- paste(ele$V14, ele$V15, ele$V16, ele$V27)
key2 <- paste(ele$mindistid2, ele$nearestseglon, ele$nearestseglat, ele$station)
key2 <- gsub(" ","_",key2)
#loc <- input$extremewaterLocation2_with_slr_station
loc <- key2
# The following sets the scenario from the backend, not Corporate/Analyze
# scenario <- input$world_slr_scenario
# The following sets the scenario from Corporate/Analyze, using uiOutput("selectInput_scenario") in ui.R and its definition in server.R.
scenario <- input$selectscenario_overall
|
f02c6cd508570f9b3ea43b385749e3ae6857b39c
|
b725ae79645bb08446f3c7eb4c95da47b2627ad3
|
/tests/testthat/test-06-zproject.R
|
ff13af916b4df827098cbd7e868fd3a1ea299de4
|
[] |
no_license
|
cbig/zonator
|
cf1692e9d210d96317164c94dfc902464b3e361c
|
bfa5a27689d853ef824634a5a6be52f9b3f54c24
|
refs/heads/master
| 2021-01-18T22:08:44.457702
| 2020-05-18T18:29:08
| 2020-05-18T18:29:08
| 8,728,996
| 12
| 6
| null | 2018-04-11T06:51:40
| 2013-03-12T13:57:04
|
R
|
UTF-8
|
R
| false
| false
| 3,453
|
r
|
test-06-zproject.R
|
context("Zproject creation")
options <- get_options()
test_that("Zproject is created correctly based on existing project", {
setup.dir <- options$setup.dir
test.project <- load_zproject(setup.dir)
# Test slots
expect_that(test.project, is_a("Zproject"),
paste("Test project is not an instance of class 'Zproject':",
class(test.project)))
expect_that(test.project@root, equals(setup.dir),
paste("Test project object's slot 'root' does not point to tutorial directory:",
test.project@root))
# Test that there are variants
expect_true(length(test.project@variants) >= 1,
"Test project has no variants.")
for (variant in test.project@variants) {
expect_that(variant, is_a("Zvariant"),
paste("Test project object's slot 'variants' contains an object",
"not an instance of class 'Zvariant:",
class(variant)))
}
})
context("Zproject methods")
test_that("Retrieving variants works", {
setup.dir <- options$setup.dir
test.project <- load_zproject(setup.dir)
bat.file <- options$bat.file
spp.file <- options$spp.file
test.variant <- new("Zvariant", bat.file = bat.file)
# Number of variants
expect_equivalent(nvariants(test.project), 6,
info = paste("Number of variants reported incorrectly"))
# Single variant retrieval based on index
expect_identical(get_variant(test.project, 1),
test.variant,
"Variant isn't returned correctly based on index")
# Single variant retrieval based on name
expect_identical(get_variant(test.project, "01"),
test.variant,
"Variant isn't returned correctly based on name")
all.variants <- test.project@variants
expect_equivalent(variants(test.project), all.variants,
"All variants are not returned correctly")
})
# test_that("Zproject is created correctly as a new project", {
# temp.dir <- file.path(tempdir(), "test_zproject")
#
# variant.names <- c("GPAN_01_abf",
# "GPAN_02_caz",
# "GPAN_03_abf_w",
# "GPAN_04_caz_w",
# "GPAN_05_abf_w_ecor_w10",
# "GPAN_06_caz_w_ecor_w10",
# "GPAN_07_abf_w_ecor_w40",
# "GPAN_08_caz_w_ecor_w40")
#
# dat.template <- system.file("extdata", "template_GPAN.dat", package="zonator")
#
# test.project <- create_zproject(root=temp.dir, variants=variant.names,
# dat.from=dat.template)
#
# # Test slots
# expect_that(test.project, is_a("Zproject"),
# paste("Test project is not an instance of class 'Zproject':",
# class(test.project)))
# expect_that(test.project@root, equals(temp.dir),
# paste("Test project object's slot 'root' does not point to tutorial directory:",
# test.project@root))
# # Test that there are variants
# expect_true(length(test.project@variants) >= 1,
# "Test project has no variants.")
#
# for (variant in test.project@variants) {
# expect_that(variant, is_a("Zvariant"),
# paste("Test project object's slot 'variants' contains an object",
# "not an instance of class 'Zvariant:",
# class(variant)))
# }
# })
|
cb32e51cc38509eb5632df69853df709d0993ec4
|
4feba473f0490928f34a9e03be7810e8fc50a0cd
|
/cachematrix.R
|
bb74a3022ab11d6052d812d2e33333fd667c7920
|
[] |
no_license
|
drP84/ProgrammingAssignment2
|
a5ec20ef46507790c6fca6790148a9d253a05f32
|
43cb0a3d7de9a3ef32db6aba8f8b2150eb7547bc
|
refs/heads/master
| 2020-12-25T06:26:23.400926
| 2016-01-22T13:34:15
| 2016-01-22T13:34:15
| 50,133,042
| 0
| 0
| null | 2016-01-21T20:00:10
| 2016-01-21T20:00:09
| null |
UTF-8
|
R
| false
| false
| 1,017
|
r
|
cachematrix.R
|
## Frankly, I just followed the example function and changed mean to inverse
##when you put in a matrix x, the function:
##sets the value of the matrix,
##gets the value of the matrix,
##sets the value of the inverse
##gets the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inverse_mat <- NULL
set <- function (y){
x <<- y
inverse_mat <<- NULL
}
get <-function() x
setInverse <- function(inverse) inverse_mat <<- inverse
getInverse <- function() inverse_mat
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Again, I used the example as a blue print
## this function:
## checks if inverse is already set
## if not, calculates the inverse and sets is as inverse_mat
cacheSolve <- function(x, ...) {
inverse_mat <- x$getInverse()
if(!is.null(inverse_mat)){
message("get inverse from cache")
return(inverse_mat)
}
data <- x$get()
inverse_mat <- solve(data, ...)
x$setInverse(inverse_mat)
inverse_mat
}
|
6f85446bfc9ea1c804f3dbad0a5f29bb2b2678f2
|
81fbd0db9e8c99fcff278a20afdc2963c47d6900
|
/cachematrix.R
|
f32c71a3e71f42987469c0f96827403d5506d986
|
[] |
no_license
|
jaysonleek/ProgrammingAssignment2
|
2dbc873056c7f91279b12d05ae5aae28348e4675
|
ff1f33a5cce159750f15936222c5befe4b42e600
|
refs/heads/master
| 2022-10-17T11:01:35.599117
| 2020-06-09T18:15:19
| 2020-06-09T18:15:19
| 271,029,701
| 0
| 0
| null | 2020-06-09T18:15:21
| 2020-06-09T14:45:45
| null |
UTF-8
|
R
| false
| false
| 1,442
|
r
|
cachematrix.R
|
## This function creates a special "matrix" object
makeCacheMatrix <- function(x = matrix()) { ## computes the matrix
inv <- NULL ## will hold the matrix inverse value, initalizes as NULL
set <- function(y) { ## define the set where new value assigned
x <<- y ## value of matrix in parent environment
inv <<- NULL ## if there's a new matrix reset inverse to NULL
}
get <- function() x # will be assigned the value after setting and getting inverse
setinverse <- function(inverse) inv <<- inverse # sets the value of the matrix
getinverse <- function() inv # gets the value of the matrix
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
# helper list so we can use $ operator to identify column names later
}
# The following function calculates the inverse of the special "matrix" created with the
# above function. However, it first checks to see if the inverse has already been calculated.
# If so, it gets the inverse from the cache and skips the computation. Otherwise, it
# calculates the inverse of x and sets the value of the inverse in the cache via the
# setinverse function.
cacheSolve <- function(x, ...) { ## Returns a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cleared data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
88c85e5326e6890c2ff756f95d8e9a47beb92210
|
b92b0e9ba2338ab311312dcbbeefcbb7c912fc2e
|
/build/shogun_lib/examples/documented/r_modular/kernel_diag_modular.R
|
24d69961e4fe4215644f8fe0f4a70416ae4ab1b4
|
[] |
no_license
|
behollis/muViewBranch
|
384f8f97f67723b2a4019294854969d6fc1f53e8
|
1d80914f57e47b3ad565c4696861f7b3213675e0
|
refs/heads/master
| 2021-01-10T13:22:28.580069
| 2015-10-27T21:43:20
| 2015-10-27T21:43:20
| 45,059,082
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 617
|
r
|
kernel_diag_modular.R
|
# This is an example for the initialization of the diag-kernel.
# The diag kernel has all kernel matrix entries but those on
# the main diagonal set to zero.
library(shogun)
fm_train_real <- as.matrix(read.table('../data/fm_train_real.dat'))
fm_test_real <- as.matrix(read.table('../data/fm_test_real.dat'))
# diag
print('Diag')
feats_train <- RealFeatures(fm_train_real)
feats_test <- RealFeatures(fm_test_real)
diag <- 23.
kernel <- DiagKernel(feats_train, feats_train, diag)
km_train <- kernel$get_kernel_matrix()
dump <- kernel$init(kernel, feats_train, feats_test)
km_test <- kernel$get_kernel_matrix()
|
464adcf20caed11cb983ea7eefd2959005e5d38e
|
d70cec5dace0bd0193c23676b51a5abf539a4183
|
/scripts/plotThalmoCorticalConnectivity.R
|
9e3a0a08276de743ee49f95147a96b72be06ef7b
|
[] |
no_license
|
jeffduda/StructConnRepro
|
7de0a43a7bb2ade1f82d8730e46c2631e1353757
|
d6da6bcec6dd8cab87209eadb5a72897507d6dea
|
refs/heads/master
| 2021-01-10T18:07:28.583014
| 2014-04-07T21:13:57
| 2014-04-07T21:13:57
| 10,126,849
| 0
| 2
| null | 2014-04-08T20:00:37
| 2013-05-17T15:53:16
|
TeX
|
UTF-8
|
R
| false
| false
| 3,655
|
r
|
plotThalmoCorticalConnectivity.R
|
# plot thalamo cortical connectivity
getThalamoCorticalConnectivity <- function( )
{
listIdx <- 1
matlist <- list()
files <- list.files(path="/mnt/picsl/jtduda/StructConnRepro/data/MMRR-21_processed/", all.files=TRUE, full.names=TRUE, recursive=TRUE, pattern="*bayes1000_sc.csv")
subjects <- c()
for ( file in files ) {
#print( paste( "Reading:", file ))
if ( file.info(file)$size > 0 ) {
cmat <- as.matrix(read.csv( file ))
cmat[is.na(cmat)] <- 0
print(basename(file))
id <- substr(basename(file),0,3)
if ( dim(cmat)[1] > 6000 ) {
leftvals <- (cmat[,4] == 1)
rightvals <- (cmat[,4] == 2)
lsums <- rowSums( cmat[leftvals,6:12] )
rsums <- rowSums( cmat[rightvals,13:19] )
sums <- rowSums( cmat[,6:19] )
#cmat[,6:19] <- cmat[,6:19] / sums
cmat[leftvals,6:12 ] <- cmat[leftvals,6:12] / lsums
cmat[rightvals,13:19 ] <- cmat[rightvals,13:19] / rsums
cmat[is.nan(cmat)] <- 0
print(max(cmat[leftvals, 6:12]))
print(max(cmat[rightvals, 13:19]))
matlist[[listIdx]] <- cmat
print( paste( listIdx, file ) )
subjects <- c(subjects, id)
listIdx <- listIdx + 1
}
}
else {
print( paste("Ignoring empty file:",file))
}
}
return( list(data=matlist, names=subjects) )
}
plotThalamoCorticalConnectivity <- function( connectivityMats, names )
{
labs <- c(1:14)
labs[ labs > 7 ] <- labs[ labs > 7 ] + 1
labs <- labs + 1
regions <- c( "Sensory", "Occipital", "Frontal", "Premotor", "Parietal", "Temporal", "Cingulate")
revregions <- regions[c(7:1)]
actualRegions <- c(paste("L",regions), paste("R",revregions))
plotRegions <- c( "Top", paste("L",regions),"Bottom",paste("R",revregions) )
labs <- c(actualRegions)
values <- c()
subject <- c()
hemi <- c()
labels <- c()
n <- length(connectivityMats)
for ( i in c(1:n) ) {
mat <- connectivityMats[[i]]
id <- names[i]
mat[is.na(mat)] <- 0
nCol <- dim(mat)[2]
# divide counts by number of streamlines
#mat[,6:nCol] <- mat[,6:nCol] / mat[,5]
lvalues <- (mat[,4] == 1)
rvalues <- (mat[,4] == 2)
lvals <- colMeans( mat[lvalues,6:12] )
rvals <- colMeans( mat[rvalues,13:19] )
vals <- c(lvals, rvals[c(7:1)])
#vals[ is.na(vals) ] <- 0
#print(paste( i, sum(vals) ) )
#print( max(vals) )
#vals <- vals[c(c(1:7),c(14:8))]
values <- c(values, vals)
#subject <- c(subject, rep(i,length(vals) ) )
subject <- c(subject, rep(id, length(vals) ) )
hemi <- c( hemi, rep((i*2-1),7), rep((i*2),7))
labels <- c(labels, labs)
}
labels <- factor( labels, levels=plotRegions )
dat <- data.frame( Connectivity=values, subject=as.factor(subject), hemi=as.factor(hemi), labels=labels)
#x2 <- c(0,0,0,0,0,0)
#y2 <- c(0,0.01,0.02,0.03,0.04,0.05)
#textvalues <-c("0.00", "0.01", "0.02", "0.03", "0.04", "0.05" )
textdat <- data.frame(x2=0.0, y2=0.0, texthere="Text Here")
xx=seq(0,14,length=10)
yy=rep(0,10)
g <- ggplot( dat, aes( x=labels, y=Connectivity, colour=subject, group=hemi) ) + geom_line(alpha=0.4) + geom_point(alpha=0.4) + scale_y_continuous(limits=c(-sqrt(var(values)),max(values))) + theme(axis.text.x=element_text(size=7,face="bold"), axis.title.x=element_blank() ) + theme(legend.position = "none") + ggtitle( "Thalma-Cortical Structural Connectivity" ) + geom_hline(aes(x=xx, y=yy))
g <- g + coord_polar(theta="x", direction=-1)
#+ geom_text(data=NULL, colour="black", aes(x=0, y=10, label="Text"))
return ( g )
}
|
d45cf819ed7d9aa06e32ac6b60029b274439ae5a
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-docs/src/booklets/v2_2015/source/DeepLearning_Vignette_code_examples/deeplearning_gridsearch_random.R
|
e708ea44bb4a9b9c9a84b1874e98fca348eeddb7
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 674
|
r
|
deeplearning_gridsearch_random.R
|
hidden_opt = lapply(1:100, function(x)10+sample(50, sample(4), replace=TRUE))
l1_opt = seq(1e-6,1e-3,1e-6)
hyper_params <- list(hidden = hidden_opt, l1 = l1_opt)
search_criteria = list(strategy = "RandomDiscrete",
max_models = 10, max_runtime_secs = 100,
seed=123456)
model_grid <- h2o.grid("deeplearning",
grid_id = "mygrid",
hyper_params = hyper_params,
search_criteria = search_criteria,
x = x,
y = y,
distribution = "multinomial",
training_frame = train,
validation_frame = test,
score_interval = 2,
epochs = 1000,
stopping_rounds = 3,
stopping_tolerance = 0.05,
stopping_metric = "misclassification")
|
74c6ebdba421582f5bf0abeef0eef923026766d3
|
4b44d1dbdf20c25b5afadd63180ab695a1ea738b
|
/dplyrTest.R
|
3c2ee4012830f83e8d5733b7e3c9ba44d44e3d49
|
[] |
no_license
|
NTU-CSX-DataScience/pecu
|
34f3beeba3b08394e134b5596849ea62c6f53883
|
b2a3e0eb4ea31eb3eef0f894aedca15f3725e2e6
|
refs/heads/master
| 2021-09-13T14:10:26.326879
| 2018-05-01T03:26:56
| 2018-05-01T03:26:56
| 103,476,845
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
dplyrTest.R
|
library(dplyr)
library(hflights)
head(hflights)
dataOrg = hflights
temp = filter(dataOrg, Month == 1 | dataOrg$DayOfWeek == 3)
SelTemp = select(dataOrg, DayOfWeek, Month, DepTime, UniqueCarrier) %>%
filter(Month == 1 | DayOfWeek == 3)
|
24161e0388cad6fe49abf845bb95fd35fac1137b
|
248c8a01242b6730a4e9fed531d3ba9390e80726
|
/lab_mm-08_after.R
|
021ca97228b551a9b16c109cfb5e7b9291c1a222
|
[] |
no_license
|
aksyuk/R_lab
|
635faa7f1f64dae6e88367b800b1bf6acc20a56d
|
30997f9d1f2bfddb27bf2697269a545ee4df3009
|
refs/heads/master
| 2021-05-13T13:32:19.911254
| 2017-05-26T21:45:34
| 2017-05-26T21:45:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,654
|
r
|
lab_mm-08_after.R
|
# Математическое моделирование: Практика 8
# Деревья решений
library('tree')
library('ISLR')
library('MASS')
library('randomForest')
library('gbm')
# Бэггинг ----------------------------------------------
# бэггинг -- частный случай случайного леса с m = p, поэтому и то, и другое
# можно построить функцией randomForest
# бэггинг: =====================================================================
au <- Auto[,-9]
my.seed <- 1
n <- nrow(au)
train.percent <- 0.5
set.seed(my.seed)
inTrain <- sample(n, n * train.percent)
# обучающая выборка -- 50%
ncol(au)
# берём все 7 предикторов на каждом шаге
set.seed(1)
bag.au <- randomForest(mpg ~ ., data = au, subset = inTrain,
mtry = 7, importance = TRUE)
bag.au
?randomForest
# прогноз
yhat.bag <- predict(bag.au, newdata = au[-inTrain, ])
# график "прогноз -- реализация"
au.test <- au[-inTrain, "mpg"]
plot(yhat.bag, au.test)
abline(0, 1) # линия идеального прогноза
mean((yhat.bag - au.test)^2) # MSE на тестовой (тыс.долл.)
# можно изменить число деревьев с помощью аргумента ntree
set.seed(1)
bag.au <- randomForest(mpg ~ ., data = au, subset = inTrain,
mtry = 7, ntree = 500)
yhat.bag <- predict(bag.au, newdata = au[-inTrain, ])
mean((yhat.bag - au.test)^2)
importance(bag.au)
|
3bf0b471f1a7478003d660ff83bc84029620882f
|
061c13925c352598bf66428f453ac9186b3a05c2
|
/devsem-stat.R
|
e289e46aef7e686ea94bc9117d76206f15709816
|
[] |
no_license
|
patcha2014/Dissertation_Analysis
|
d649226ebc38d6e932c488345e333d392a7cf231
|
120141b67796e26743c464dbcc9b59b50be69911
|
refs/heads/master
| 2020-04-06T07:12:57.193241
| 2016-10-19T18:20:47
| 2016-10-19T18:20:47
| 52,697,659
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
r
|
devsem-stat.R
|
# dev sem stats
cleanreg <- csv.get("/Users/Mint/Dropbox/Dissertation_Data/cleanreg.txt",sep="\t")
#temp <- temp[temp$qtr==3,]
library(ggplot2) # line graph
ggplot(data=temp, aes(x = year, y = wage, group=skill, colour=skill)) +
geom_line() +
geom_point()
temp <- tapply(cleanreg$wage, cleanreg$skill, mean, na.rm=TRUE)
wage.tab <- data.frame(key=names(temp), wage=temp,options(digits=2))
interaction.plot(time_id, wage,)
library(xtable)
xtable(y)
|
386704b6a9a8b5beb80f5c2b8dc3799a27e753fc
|
4764c2b10291d7387a37e7722a6429abad96a9d9
|
/figure/plot3.R
|
80955deb8779f62243fa201ae9c6d607c63fb8eb
|
[] |
no_license
|
ericbrown07/ExData_Plotting1
|
0e8f34e9d3bc372ee4939d1ad2580a7f069ee69c
|
7dd892547de00a1bd691d6337b5d015e904e116e
|
refs/heads/master
| 2021-01-23T21:22:47.536534
| 2014-12-03T18:17:27
| 2014-12-03T18:17:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,129
|
r
|
plot3.R
|
## Plot 3 - Energy Sub metering (1,2,3) (by time, labeled by day)
## Use these functions to read data into R and set up data frame for plots
library(data.table) ## load data.table package to use fread function
powerdata <- fread("household_power_consumption.txt") ## use fread to read in file
# fread will read table into R in same way as read.csv or read.table
powerdatafebpull <- powerdata[which(powerdata$Date == "1/2/2007" | powerdata$Date == "2/2/2007"),]
# create data frame with data from desired dates
class(powerdatafebpull) <- "data.frame"
# coerce data into data frame
powerdatafebpull$Date <- as.Date(powerdatafebpull$Date, format = "%d/%m/%Y")
# coerce date column to date format
Date.Time <- paste(powerdatafebpull$Date, powerdatafebpull$Time)
Date.Time <- strptime(Date.Time, format = "%Y-%m-%d %H:%M:%S")
powerdatafeb <- cbind(Date.Time, powerdatafebpull)[,c(1,4:10)]
# create vector combining Date and Time columns, coerce to time format
# use cbind to replace Date and Time columns with new combined vector
for (i in 2:8) {
class(powerdatafeb[,i]) <- "numeric"
}
# coerce data columns into numeric class
str(powerdatafeb) # check classes - should be POSIXct first and the rest num
## Data now ready for plotting
png(filename = "plot3.png", width = 480, height = 480)
# create png file to be plotted onto
with(powerdatafeb, plot(Date.Time, Sub_metering_1, type = "l",
ylab = "Energy sub metering", xlab = ""))
# create line plot to show change in SM1 over time
with(powerdatafeb, lines(Date.Time, Sub_metering_2, col = "red"))
# layer new line plot to show change in SM2 over time on top of first plot
with(powerdatafeb, lines(Date.Time, Sub_metering_3, col = "blue"))
# layer new line plot to show change in SM3 over time on top of others
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# create legend to show line colors represented in plot
dev.off()
# png graphic device now off
# plotting process finished
|
b72cddc8203197d836de679bc2f3884146e271de
|
bb2cbd11aac465d8fe4c2bdd65e7a4970cd8b067
|
/code_chunks/load_preprocess.R
|
0bf11000b746afd693470d0482028e839c5f3f48
|
[] |
no_license
|
diebugger/RepData_PeerAssessment1
|
ef457e2a0f7c9c3151c9b99baf13ca4edfffe113
|
c83b03437e38c5dc5d1bb9c5fb616c95ca647ce8
|
refs/heads/master
| 2020-04-29T14:39:57.646431
| 2015-04-19T15:54:49
| 2015-04-19T15:54:49
| 34,166,618
| 0
| 0
| null | 2015-04-18T13:21:40
| 2015-04-18T13:21:39
| null |
UTF-8
|
R
| false
| false
| 447
|
r
|
load_preprocess.R
|
### Load useful libraries
library("dplyr")
library("knitr")
library("lubridate")
library("ggplot2")
# prerequisite: normalize to US locale
Sys.setlocale(category = "LC_ALL", locale = "US")
### unzip and read table
unzip("activity.zip")
activity <- read.csv("activity.csv", header = T)
### convert date
activity$date <- as.Date.factor(activity$date)
activity <- tbl_df(activity)
### remove NAs
clean_activity <- activity[!is.na(activity$steps),]
|
7eab214920aed027b95a0b9be36480c48bf2ecfa
|
61a85e9d0a2452b7bffaf7486fd40b4f42d958d1
|
/statistical_inference/13_resampling.R
|
5499af0d38878c7660f2231140fc8811cbfff8f4
|
[
"MIT"
] |
permissive
|
fuchingy/data_science
|
0cd84f0efbfe85ba3a653f466deab50aa9c8c7e8
|
aaf021ae28dbf163efb4fe8d7842ac5a13e344de
|
refs/heads/master
| 2021-06-08T12:47:19.423277
| 2016-11-10T11:13:12
| 2016-11-10T11:13:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,093
|
r
|
13_resampling.R
|
library(UsingR)
data(father.son)
x <- father.son$sheight
n <- length(x)
B <- 10000 # 10000 bootstrap resamples
hist(x)
# We collect 1078 data (x)
# Then, we resample 10780000 data from the collected 1078 data, and arrange the
# data as the following matrix.
#
# resamples
# 1078 (n)
# 10000 (B) ...
#
resamples <- matrix(sample(x, n*B, replace=TRUE), B, n)
head(resamples)
hist(resamples)
# Assume we are interested in median
resampledMedians <- apply(resamples, 1, median)
hist(resampledMedians)
# The estimated standard error of the median is
sd(resampledMedians)
# The 95% confidence interval of the estimated median is
# The instructor says this should be improved by BCa interval (correction for bias)
# BCA: Bias-corrected and accelerated interval
quantile(resampledMedians, c(0.025, 0.975))
# Histgram of bootstrap resamples
g = ggplot(data.frame(medians=resampledMedians), aes(x=medians))
g = g + geom_histogram(color="black", fill="lightblue", binwidth=0.05)
g
# Permutation tests
head(InsectSprays)
ggplot(InsectSprays, aes(factor(spray), count)) + geom_boxplot(aes(fill=factor(spray)))
# Subset only data with spary "B" and "C"
subdata <- InsectSprays[InsectSprays$spray %in% c("B", "C"),]
subdata
# Calculate the difference of mean of B and mean of C.
# y: 11 17 21 11 16 14 17 17 19 21 7 13 0 1 7 2 3 1 2 1 3 0 1 4
# group: B B B B B B B B B B B B C C C C C C C C C C C C
#
# observeStat: 13.25
y <- subdata$count
group <- as.character(subdata$spray)
testStat <- function(w, g) mean(w[g == "B"]) - mean(w[g == "C"])
observeStat <- testStat(y, group)
observeStat
# By null hypothesis: group label is unrelated to the outcome
permutations <- sapply(1 : 10000, function(i) testStat(y, sample(group)))
permutations
# p-Value closed to 0: reject null hypothesis
# => group level is related to the outcome
mean(permutations > observeStat)
g = ggplot(data.frame(permutations=permutations), aes(x=permutations))
g = g + geom_histogram(color="black", fill="lightblue", binwidth=1)
g = g + geom_vline(xintercept = 13)
g
|
3bed093ade33964008fcc8f8f07640648d731a0c
|
b409efb1b86628179bfb3e9bde3501a95409b274
|
/tests/testthat/test-exec.R
|
85186ac93a504aa542b19393619aa28157f71450
|
[] |
no_license
|
MuriloPetruci/SnakeCharmR
|
84b806cced22ab73e1887472093b6bcee609dfb3
|
26002e01fa22838752b5d06a4dd6ac3074cc4ca9
|
refs/heads/master
| 2021-05-21T15:09:22.816777
| 2020-01-02T18:40:51
| 2020-01-02T18:40:51
| 252,691,742
| 1
| 0
| null | 2020-04-03T09:46:15
| 2020-04-03T09:46:14
| null |
UTF-8
|
R
| false
| false
| 266
|
r
|
test-exec.R
|
context("exec")
test_that("exception handling on executing Python code works", {
expect_error(py.exec("raise Exception('oh noes')", stopOnException = TRUE), "oh noes")
expect_warning(py.exec("raise Exception('oh noes')", stopOnException = FALSE), "oh noes")
})
|
3d5fadb75bf874eebe21a322fd7d98a1c583e640
|
d4cce763dc2131c0ae85d5c99c045aa26151299e
|
/man/osmplotr.Rd
|
c2ddd413f89ed150478d7554a3b7d216c5899892
|
[] |
no_license
|
jeperez/osmplotr
|
7864a9248f8d41cf9a0916149f61e3d2c0e2f1e9
|
2d42cda7e0124a250e1c498b8886ce1e644b589e
|
refs/heads/master
| 2021-01-24T20:01:52.331044
| 2016-05-18T10:53:12
| 2016-05-18T10:53:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,083
|
rd
|
osmplotr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/osmplotr.R
\docType{package}
\name{osmplotr}
\alias{osmplotr}
\alias{osmplotr-package}
\title{osmplotr.}
\description{
Produces customisable images of OpenStreetMap (OSM) data and enables data
visualisation using OSM objects. Extracts data using the overpass API.
Contains the following functions, data, and vignettes.
}
\section{Data Functions}{
\tabular{ll}{
'extract_osm_objects'\tab Download arbitrary OSM objects\cr
'connect_highways'\tab Returns points sequentially connecting list of named
highways\cr
}
}
\section{Basic Plotting Functions (without data)}{
\tabular{ll}{
'add_axes'\tab Overlay longitudinal and latitudinal axes on plot\cr
'add_osm_objects'\tab Overlay arbitrary OSM objects\cr
'make_osm_map'\tab Automate map production with structures defined in
'osm_structures'\cr
'osm_structures'\tab Define structures and graphics schemes for automating
map production \cr
'plot_osm_basemap'\tab Initiate a ggplot object for an OSM map\cr
'print_osm_map'\tab Print a map to specified graphics device
}
}
\section{Advanced Plotting Functions (with data)}{
\tabular{ll}{
'add_osm_groups'\tab Overlay groups of objects using specified colour
scheme\cr
'add_osm_surface'\tab Overlay data surface by interpolating given data \cr
'add_colourbar'\tab Overlay a scaled colourbar for data added with
'add_osm_surface'\cr
}
}
\section{Colour Manipulation Functions}{
\tabular{ll}{
'adjust_colours'\tab Lighted or darken given colours by specified amount\cr
'colour_mat'\tab Generate continuous 2D spatial matrix of colours\cr
}
}
\section{Other Functions}{
\tabular{ll}{
'get_bbox'\tab return bounding box from input vector\cr
}
}
\section{Data}{
\tabular{ll}{
'london'\tab OSM Data from a small portion of central London\cr
}
}
\section{Vignettes}{
\tabular{ll}{
'basic-maps'\tab Describes basics of downloading data and making custom
maps\cr
'data-maps'\tab
Describes how map elements can be coloured according to user-provided data,
whether categorical or continuous.
}
}
|
e4a54955871bcb6f274327b402f0a1a3fd28bc42
|
8a8f4a8cd9cce41e5c492324d463cf135ad986d1
|
/ui.R
|
d2b85e3dce67212ba979b1b78820cb0a5851484a
|
[] |
no_license
|
hyanworkspace/LumiaInterface
|
1b71559fb35827425c3c5b3d409a1ff88c9d345b
|
b0e5da3582145ab6fc64cad1748aab53a0f65b18
|
refs/heads/master
| 2020-11-30T02:24:39.163609
| 2019-12-29T18:38:40
| 2019-12-29T18:38:40
| 230,275,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,614
|
r
|
ui.R
|
# ui.R
ui <- dashboardPage(
dashboardHeader(title = tagList('Mixer beta',
img(src="images/logoR39.png",
height = 50, align = "right"))
),
dashboardSidebar(
sidebarMenuOutput("menu")
),
dashboardBody(
withMathJax(),
bsModal(id = 'gif', title = 'Generating experts and mixing...',
trigger = 'valide_expert', size = 'large',
img(src="images/busy.gif")
),
tabItems(
# --------------------- settings tab -----------------------------
tabItem("settings",
# -------------- General settings - import data & model --------------
fluidRow(
column(6,
tabBox(width = 12,
tabPanel("Train data",
fileInput("train_data",
label = h5("Train data :"))
),
tabPanel("Test data",
fileInput("test_data",
label = h5("Test data :"))
),
tabPanel("Orig expert",
textInput("orig_expert", label = h5("Original expert :")),
actionButton("generate_orig", label = h5("Generate"))
)
),
fluidRow(
valueBoxOutput('train_data_box'),
valueBoxOutput('test_data_box'),
valueBoxOutput('model_data_box')
)
),
box(width = 6, solidHeader = TRUE, status = "primary",
title = "Machine learing experts",
checkboxGroupInput("expert_ml_choice",
label = h5("Choose the machine learning experts to mix :"),
choices = list("SVM polynomial" = 1,
"SVM radial" = 2,
"Xgboost" = 3,
"Random Forest" = 4,
"Projection pursuit regression" = 5,
"Keras" = 6),
selected = 1:5)
)
),
# --------------------- expert specification -----------------------------
fluidRow(
box(width = 6,
solidHeader = TRUE,
id = 'expert_choice_box', status = "primary",
title = 'Strategies to mix ',
checkboxGroupInput("expert_choice",
label = h5("Choose the strategies to mix :"),
choices = list("Constant bias" = 1,
"Bagging" = 2,
"Boosting" = 3,
"Random walk" = 4,
"QGam" = 5,
"Gamlss" = 6),
selected = 1:6)
),
uiOutput('tabs')
),
# --------------------- recap expert param -----------------------------
fluidRow(
box(width = 12, solidHeader = TRUE, status = "primary",
title = "Strategy Parameter",
textOutput('recap_expert_param'),
tags$style(type="text/css",
"#recap_expert_param {white-space: pre-wrap;}"),
br(),
actionButton('valide_expert', 'Generate Experts')
)
)
),
# --------------------- expert tab -----------------------------
tabItem("expert",
box(title = 'Analysis by group of experts', width = 12,
uiOutput('plot_by_group')
),
box(title = 'Prediction by group of experts', width = 12,
uiOutput('plot_prediction_by_group')
),
box(title = 'Residual by group of experts', width = 12,
uiOutput('plot_res_by_group')
)
),
# --------------------- oracle tab -----------------------------
tabItem("oracle",
box('Metrics', width = 12,
plotlyOutput('plot_metrics')),
box('Compare', width = 12,
plotlyOutput('plot_compare_strategy'))
),
# --------------------- mix tab -----------------------------
tabItem("mix",
box('RMSE - Root of mean square error', width = 6,
helpText('$$RMSE = \\sqrt{\\sum_{i=1}^{n} \\frac{(x_i - \\hat{x_i})^2}{n}}$$')
),
box('MAPE - Mean Absolute Percentage Error', width = 6,
helpText('$$MAPE = \\frac{1}{n} \\sum_{i=1}^{n} |\\frac{x_i - \\hat{x_i}}{x_i}| $$')
),
box(title = "Table of evaluation metrics", solidHeader = TRUE,
background = "blue", width = 12,
tableOutput('res'))
)
)
)
)
|
c0f02f18f1a359cf537c0f13cdb9fdbc3e70ecac
|
175b99eb5ca8724f8f25d757e9bd164e1889b72a
|
/main.R
|
fe9f625a48d71114d7a2be4b576dc610adb64168
|
[] |
no_license
|
arielsun1990/RProgrammingCourseA1
|
505e0b4cdfe6050cf88b7283d1b00a68b15e2bba
|
9c5071dcd4ae651cce3911b2ebed4b8a7b97f97a
|
refs/heads/master
| 2020-12-11T05:45:08.730554
| 2014-05-22T01:30:17
| 2014-05-22T01:30:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26
|
r
|
main.R
|
source("pollutantmean.R")
|
fd05bdc79073563df65ea59c997811557291f414
|
a8d015305338849fcaa711464c964c02305cd0e0
|
/tests/testthat/test.R
|
f85929921376f1a308ab98d64743130de8c7b0ac
|
[
"MIT"
] |
permissive
|
noamross/twee
|
15f208438d53de99a1ca345a73ce4725eaf5bb8f
|
dd115756203157cd8c474604b37256d5e098e033
|
refs/heads/master
| 2021-01-20T16:51:20.366567
| 2017-05-10T12:39:13
| 2017-05-10T12:39:13
| 90,854,633
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 69
|
r
|
test.R
|
context("twee")
test_that("twee works", {
expect_true(TRUE)
})
|
834bf6b2487ba67f2496c94f62c80fa8279cf48b
|
3afe0a0834ae866d72116f3d66caac8c451b07c2
|
/Diffusion_Simulations/MetaPopulation_Diffusion/tests/SIR_Gillepsie_Example.R
|
e2bb0201c0cfde932a72af4beb7e1e705ffcf8e1
|
[] |
no_license
|
ethanbhojani/IDEANet
|
2c07d6682d708acc3bc6a799d2531c80f78cf92a
|
6fc0189ab34138ebd44cd25104d6696bde168991
|
refs/heads/main
| 2023-08-22T20:24:50.044618
| 2021-09-30T21:25:57
| 2021-09-30T21:25:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,703
|
r
|
SIR_Gillepsie_Example.R
|
# Simple SIR Simulation where the Gillepsie Algorythm Is Implemented
# Jonathan H. Morgan Using Resources Provided by Ben Bolker (2003, 2015)
# 20 July 2021
# LambertW Implmentation: https://stat.ethz.ch/pipermail/r-help/2003-November/042793.html
# Basic SIR Model with the Gillespie Algorithm Implemented: https://rpubs.com/bbolker/SIRgillespie
# Clear Out Console Script
cat("\014")
# Setting Work Directory
setwd("~/Dropbox/My Mac (Jonathan’s MacBook Pro)/Desktop/DNAC/IDEANet/Data_Scripts")
getwd()
# Options
options(stringsAsFactors = FALSE)
#################
# FUNCTIONS #
#################
# LambertW Arguments
# {z} (complex) vector of values for which to compute the function
# {b} (integer) vector of branches: b=0 specifies the principal branch, 0 and -1 are the ones that can take non-complex values
# {maxiter} maximum numbers of iterations for convergence
# {eps} convergence tolerance
# {min.imag} maximum magnitude of imaginary part to chop when returning solutions
# Nici Schraudolph's lambertw implementation
lambertW = function(z,b=0,maxiter=10,eps=.Machine$double.eps, min.imag=1e-9) {
if (any(round(Re(b)) != b))
stop("branch number for W must be an integer")
if (!is.complex(z) && any(z<0)) z=as.complex(z)
w = (1 - 2*abs(b))*sqrt(2*exp(1)*z + 2) - 1
v = log(z + as.numeric(z==0 & b==0)) + 2*pi*b*1i;
v = v - log(v + as.numeric(v==0))
c = abs(z + exp(-1));
c = (c > 1.45 - 1.1*abs(b));
c = c | (b*Im(z) > 0) | (!Im(z) & (b == 1))
w = (1 - c)*w + c*v
# Halley iteration
for (n in 1:maxiter) {
p = exp(w)
t = w*p - z
f = (w != -1)
t = f*t/(p*(w + f) - 0.5*(w + 2.0)*t/(w + f))
w = w - t
if (abs(Re(t)) < (2.48*eps)*(1.0 + abs(Re(w))) && abs(Im(t)) < (2.48*eps)*(1.0 + abs(Im(w))))
break
}
if (n==maxiter) warning(paste("iteration limit (",maxiter,") reached, result of W may be inaccurate",sep=""))
if (all(Im(w)<min.imag)) w = as.numeric(w)
return(w)
}
# Examples of the Function
curve(lambertW(x),from=0,to=10, bty='n', col="brown", family='HersheySerif', las=1)
grid(lwd = 2)
pvec = seq(-1,1,length=40)
m = outer(pvec, pvec, function(x,y) Re(lambertW(x+y*1i)))
persp(pvec,pvec,m, theta=290,shade=0.5,zlab="lambertW")
num1 = uniroot(function(x) {x*exp(x)-1},lower=0,upper=1,tol=1e-9)
abs(lambertW(1)-num1$root)<1e-9
# Functions for computing the event rates and the transitions to be executed when the events occur
# Function Specifying the Rates at Infection and Recovery Occurr
ratefun <- function(x, p) {
with(as.list(c(x, p)),{
c(inf = beta*S*I/N, ## scale inf by pop size
recover = gamma*I)
})
}
# Function Specifying the State Transitions Based on Counts of Susceptible and Infected
transfun <- function(x,w) {
switch(w,
x + c(-1,1), ## infection: S-1, I+1
x + c(0,-1)) ## removal: I-1
}
# A Wrapper Function to Run the Simulation with Specified Parameters/Starting Values & Return
# Either the Ending State or a Matrix of Event Times and Types
run <- function(p=c(beta=2, gamma=1, N=100), I0=1, itmax=1e5, ret=c("final","all")) {
ret <- match.arg(ret)
if (ret=="all") {
rmat <- matrix(NA,nrow=itmax,ncol=2,
dimnames=list(NULL,c("t","trans")))
}
x <- c(S=unname(p["N"])-I0, I=I0) # Number of Susceptible and Infected (99 and 1) at time 1
it <- 1 # Iteration value at time 1
t <- 0 # Count of Recovered
trans <- c(0,0) # Transitions Movement from Susceptible (S-1) to Infected (I + 1)
while (x["I"] > 0 & it < itmax) {
r <- ratefun(x,p) # Rates for Susceptible and Infected based on beta, gamma, N values
t <- t + rexp(1, rate=sum(r)) # t = t + random draw of one from an exponential distribution with a rate of sum(r)
w <- sample(length(r), size=1,prob=r) # Sample one of 2 values (susceptible or infected events) based on the probabilities of the two.
x <- transfun(x,w) # Update the number of Suceptible and Infected based on the event that occurred in the last step.
if (ret=="all")
rmat[it,] <- c(t,w)
it <- it+1
}
if (ret=="all") return(rmat[!is.na(rmat[,1]),])
return(c(S=unname(x["S"]),t=t,it=it))
}
# Analytic Computation of Expected Final Size based on the Lambert W Function
finalsize <- function(R0) {
1 + 1/R0*lambertW(-R0*exp(-R0))
}
###################
# SIMULATIONS #
###################
# Simulation: Basic Examples
# Two Infections, Three Recoveries
set.seed(101)
ex0 <- run(ret="all")
plot(0, type='n', xlab='t', ylab='Transitions', xlim=c(0, 1), ylim=c(1, 2), cex.axis=1.3, family='HersheySerif', las=1, main='', bty='n')
grid(lwd = 2)
points(x=ex0[,1], y=ex0[,2], col="brown", pch=16, cex=1.3)
title('Simulation 1', family='serif', cex.main=2, line=1.25)
# One Infection, One Recovery
set.seed(101)
ex0 <- run(p=c(beta=1.1, gamma=1,N=100), ret="all")
plot(0, type='n', xlab='t', ylab='Transitions', xlim=c(0, 1), ylim=c(1, 2), cex.axis=1.3, family='HersheySerif', las=1, main='', bty='n')
grid(lwd = 2)
points(x=ex0[,1], y=ex0[,2], col="brown", pch=16, cex=1.3)
title('Simulation 1', family='serif', cex.main=2, line=1.25)
abline(v=0.1761341, col='brown', lty=2)
# Exercises 1 and 2
# Beta is at the Default Value of 2
trials <- vector('list', 1000)
for (i in seq_along(trials)) {
trials[[i]] <- as.numeric(run())
}
ex1 <- do.call("rbind", trials)
ex1 <- as.data.frame(cbind(seq(1, 1000, 1), ex1))
colnames(ex1) <- c('n', 'S', "t", 'it')
# Beta is specified to be 1.1
for (i in seq_along(trials)) {
trials[[i]] <- as.numeric(run(p=c(beta=1.1,gamma=1,N=100)))
}
ex2 <- do.call("rbind", trials)
ex2 <- as.data.frame(cbind(seq(1, 1000, 1), ex2))
colnames(ex2) <- c('n', 'S', "t", 'it')
rm(trials)
######################
# VISUALIZATIONS #
######################
# Plotting Simulation 1 and Simulation 2 Results
layout.matrix <- matrix(c(1, 3, 2, 4), nrow = 2, ncol = 2)
layout(mat = layout.matrix,
heights = c(2, 2), # Heights of the two rows
widths = c(2, 2)) # Widths of the two columns
layout.show(4)
par(mar = c(4,6,3,3), family='HersheySerif')
hist(ex1$S, breaks = 35, main=" ", ylim=c(0, 400), xlab='', ylab='', las=1, cex.axis=1.3, family='HersheySerif')
mtext(side = 1, text = 's', col = "black", line = 3, cex = 1.5, family='HersheySerif')
mtext(side = 2, text = "Count", col = "black", line = 4, cex = 1.5, family='HersheySerif')
title('Simulation 1: S', family='serif', cex.main=1.5, line=1.25)
hist(ex1$t, breaks = 35, main=" ", xlab=' ', ylab=' ', ylim=c(0, 400), las=1, cex.axis=1.3, family='HersheySerif')
mtext(side = 1, text = 't', col = "black", line = 3, cex = 1.5, family='HersheySerif')
mtext(side = 2, text = "Count", col = "black", line = 4, cex = 1.5, family='HersheySerif')
title('Simulation 1: t', family='serif', cex.main=1.5, line=1.25)
hist(ex2$S, breaks = 35, main=" ", ylim=c(0, 500), xlab='', ylab='', las=1, cex.axis=1.3, family='HersheySerif')
mtext(side = 1, text = 's', col = "black", line = 3, cex = 1.5, family='HersheySerif')
mtext(side = 2, text = "Count", col = "black", line = 4, cex = 1.5, family='HersheySerif')
title('Simulation 2: S', family='serif', cex.main=1.5, line=1.25)
hist(ex2$t, breaks = 35, main=" ", xlab='', ylab=' ', ylim=c(0, 500), las=1, cex.axis=1.3, family='HersheySerif')
mtext(side = 1, text = 't', col = "black", line = 3, cex = 1.5, family='HersheySerif')
mtext(side = 2, text = "Count", col = "black", line = 4, cex = 1.5, family='HersheySerif')
title('Simulation 2: t', family='serif', cex.main=1.5, line=1.25)
# Estimating Expected Sizes
finalsize(2)
finalsize(1.1)
|
5313001afae882567acf9c7428172fbffec9f977
|
b3f0ad3934f871dfe1105a8168d0f91048c0420c
|
/R/dataset_TRW_complete.R
|
a46663d7b199e0ed5f473a4f956a1c67d061482a
|
[] |
no_license
|
cran/dendroTools
|
25739b0411af2f8a9114e46c01641fac4b40e538
|
51c3eed999c8d27b38ea7d2edd91cb0063cf2127
|
refs/heads/master
| 2023-07-20T07:48:19.869232
| 2023-07-17T12:50:02
| 2023-07-17T14:34:17
| 105,138,038
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 847
|
r
|
dataset_TRW_complete.R
|
#' The complete dataset of standardized tree-ring chronology from Albania
#'
#' A dataset with a tree-ring width (TRW) chronology of Pinus nigra from Albania This TRW
#' chronology has a span of 551 years (period 2009 - 1459) and was already used to reconstruct
#' summer temperatures by Levanič et al. (2015). In this paper, all the details about sample
#' replication, site description and correlation statistics are described.
#'
#'
#' @format A data frame with 551 rows and 1 variable:
#' \describe{
#' \item{TRW}{Standardised tree-ring width chronology of Pinus nigra from Albania}
#' }
#' @source Levanič, T., Poljanšek, S., Toromani, E., 2015. Early summer temperatures reconstructed from
#' black pine (Pinus nigra Arnold) tree-ring widths from Albania. The Holocene 25, 469-481.
#'
#' @export
"dataset_TRW_complete"
|
8c94f7074cc33fa5bde9d14451f8821723867f6f
|
a90fb0b0366bdaee2e875b5642d31a2b8171586e
|
/metric_calc.R
|
0fcefd52f1b2656c5244d7381367c8878bc30779
|
[] |
no_license
|
InterstateCommissionPotomacRiverBasin/Large_River_Cummins
|
6480e2b70bf7abcef561bfbc37350fe4fb3e2ec4
|
b8e098de289fdd0db37cf84a2f942b02c2355f88
|
refs/heads/master
| 2020-03-25T23:55:36.052538
| 2018-08-02T12:10:05
| 2018-08-02T12:10:05
| 144,299,244
| 0
| 1
| null | 2018-08-10T14:53:56
| 2018-08-10T14:53:56
| null |
UTF-8
|
R
| false
| false
| 3,413
|
r
|
metric_calc.R
|
#==============================================================================
#==============================================================================
# Author: Zachary M. Smith
# Organization: ICPRB
# Created: 3/08/17
# Updated: 7/25/2018
# Maintained: Zachary M. Smith
# Purpose:
# Output:
#==============================================================================
#==============================================================================
# Install and load the Benthos package
#==============================================================================
# Install the Benthos package.
#devtools::install_github("zsmith27/Benthos", force = TRUE)
# Load the Benthos package.
library(Benthos)
# Load the Master Taxa List contained within the Benthos package.
data(master)
# Load the Vegan package.
library(vegan)
# Load the Tidyverse packages.
library(tidyverse)
#==============================================================================
# Import the metrics table.
reach.df <- read.csv("data/reach_year_agg.csv", stringsAsFactors = FALSE) %>%
toolbox::standard_char() %>%
mutate(
GENUS = case_when(
FAMILY == "chironomidae" ~ "chironomidae",
FAMILY == "pisidiidae" ~ "pisidiidae",
CLASS == "oligochaeta" ~ "oligochaeta",
PHYLUM == "platyhelminthes" ~ "platyhelminthes",
FINAL_ID == "serratella_deficiens" ~ "teloganopsis",
FINAL_ID == "tvetenia_discoloripes" ~ "chironomidae",
FINAL_ID == "sphaeriidae" ~ "pisidiidae",
FINAL_ID == "turbellaria" ~ "platyhelminthes",
FINAL_ID == "tubificidae" ~ "oligochaeta",
TRUE ~ GENUS)
)
wide.df <- Benthos::wide(reach.df, "GENUS")
#vegan::rarecurve(wide.df[, 8:ncol(wide.df)], step = 20)
#==============================================================================
source("large_river_functions.R")
#==============================================================================
samp.size = 1000
#==============================================================================
# Carderock Samples
system.time(rare.CR_2012 <- prep_rare(wide.df, "cr_2012", samp.size))
rare.CR_2013 <- prep_rare(wide.df, "cr_2013", samp.size)
rare.CR_2014 <- prep_rare(wide.df, "cr_2014", samp.size)
cr.reach <- rbind(rare.CR_2012, rare.CR_2013, rare.CR_2014)
# Knoxville Samples
rare.KX_2012 <- prep_rare(wide.df, "kx_2012", samp.size)
rare.KX_2013 <- prep_rare(wide.df, "kx_2013", samp.size)
rare.KX_2014 <- prep_rare(wide.df, "kx_2014", samp.size)
kx.reach <- rbind(rare.KX_2012, rare.KX_2013, rare.KX_2014)
# Little Falls Samples
rare.LF_2012 <- prep_rare(wide.df, "lf_2012", samp.size)
rare.LF_2013 <- prep_rare(wide.df, "lf_2013", samp.size)
rare.LF_2014 <- prep_rare(wide.df, "lf_2014", samp.size)
lf.reach <- rbind(rare.LF_2012, rare.LF_2013, rare.LF_2014)
all.reach <- list(cr.reach, kx.reach, lf.reach)
#==============================================================================
# Calculate Metrics
#==============================================================================
# Calculate metrics for each reach.
metrics.list <- lapply(all.reach, function(x){
large_river_metrics(x, master)
})
#==============================================================================
# Create a list of the metrics calculated
metrics.vec <- names(metrics.list[[1]][, 9:ncol(metrics.list[[1]])])
#==============================================================================
|
e7fb210998083c17137e383337cee6ad37d1ed96
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/zscorer/examples/run_zscorer.Rd.R
|
07a19911f576f0e0df523044caa794cda7b309fa
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 113
|
r
|
run_zscorer.Rd.R
|
library(zscorer)
### Name: run_zscorer
### Title: run_zscorer
### Aliases: run_zscorer
### ** Examples
#
|
b5f3a4838e45d0c79cf4e083b98b97de2f990fb4
|
efdcd47a08e872cbb264ddc2484293ef49357e7e
|
/R/checkfunArgs.R
|
bf1647a5a9dcab041fce6f5753bd7e6a3d90cd87
|
[] |
no_license
|
cran/ipred
|
b69ef67667a47910efdecfb54837fca00bc8fb90
|
79c406d551eea2790c298403cb4496a08e85dc82
|
refs/heads/master
| 2023-03-16T08:35:29.570578
| 2023-03-09T13:50:02
| 2023-03-09T13:50:02
| 17,696,820
| 0
| 3
| null | 2018-08-14T06:01:58
| 2014-03-13T05:03:12
|
R
|
UTF-8
|
R
| false
| false
| 848
|
r
|
checkfunArgs.R
|
# $Id: checkfunArgs.R,v 1.1 2003/02/17 09:49:31 hothorn Exp $
checkfunArgs <- function(fun, type=c("model", "predict")) {
# check for appropriate arguments of user-supplied function "fun"
# this will not work for generics in R < 1.7.0 and therefore not used by
# now
type <- match.arg(type)
if (!is.function(fun)) {
warning("fun is not a function")
return(FALSE)
}
funargs <- formals(fun)
switch(type, "model"={
if (!all(names(funargs)[1:2] %in% c("formula", "data"))) {
warning("fun is not a function with at least 'formula' and 'data' arguments")
return(FALSE)
} else {
return(TRUE)
}
}, "predict"={
if (length(funargs) < 2) {
warnings("fun is not a function with at least 'object' and 'newdata' arguments")
return(FALSE)
} else {
return(TRUE)
}
})
}
|
2e030df5707b2458aeff225f5914716103aa65a0
|
9d79f581ef4419b6e43fef0155af9e9cc33ffdce
|
/Utils.r
|
2d3fb866a54a7c3e3ab9cbd6565e6fd8b450ab0e
|
[] |
no_license
|
linlin0026/fiber_-_10.1016-j.chom.2022.03.036
|
d9f9cb7ec86eaa81d3ac7566a3d150259e59142a
|
8dec8b716ed12baf34f42f5fce6a94bdcd7fe7d2
|
refs/heads/main
| 2023-07-08T19:55:58.112798
| 2021-08-12T01:53:53
| 2021-08-12T01:53:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,863
|
r
|
Utils.r
|
#functions here
#For some of the measurements we have repeats. I think mainly Charles did this in order to see the batch effects.
#However this is probably unnecessary if the data are truly randomized, and then we can perform the best batch
#correction using Combat or something like that
#The input matrix should have the first column as the participant and the second column as the timepoint
averaging_replicates <- function(fiber_subsetted_dataframe) {
df <- data.frame(fiber_subsetted_dataframe)
colnames(df)[1] <- "Participant"
colnames(df)[2] <- "Time_point"
df[,3:ncol(df)] <- data.frame(lapply(df[,3:ncol(df)], as.character), stringsAsFactors=FALSE, row.names = rownames(df)) #To change to a double, this needs to go through character first
df[,3:ncol(df)] <- data.frame(lapply(df[,3:ncol(df)], as.numeric), stringsAsFactors=FALSE, row.names = rownames(df)) #This needs to be done for the transcript data, but causes problems with the pcl data.
df2 <- aggregate(df,list(Participant=df[,1], Visit=df[,2]), mean)
df2 <- data.frame(t(df2)) #popping out the analytes that have NAs
df2 <- na.omit(df2)
df2 <- data.frame(t(df2))
rownames(df2) <- paste(as.character(df2$Participant), as.character(df2$Visit),sep="_")
return(df2)
}
# A standatdized way of determining the correlations
correlations <- function(to_correlate_df) {
require(Hmisc)
cor <- rcorr(format(t(to_correlate_df),digits=20), type="spearman")
cor.data <- cor$r
cor.data[upper.tri(cor.data, diag = T)]<- 0
pval.data <- cor$P
pval.data[upper.tri(pval.data, diag = T)]<- NA
FDR.data <- apply(pval.data,2,p.adjust,method="BH", n = length(pval.data))
pdf("./pval_bonferonni_hist.pdf")
hist(pval.data, breaks = 100, col="darkblue")
dev.off()
pdf("./FDR_bonferonni_hist.pdf")
hist(FDR.data, breaks = 100, col="darkblue")
dev.off()
pdf("./cor_bonferonni_hist.pdf")
hist(cor.data, breaks = 10, col="red")
dev.off()
cor.data[FDR.data > 0.05]=0
return(cor.data)
}
# A standard way of running the standardise funciton
standardise_data <- function(data_frame) {
require(Mfuzz)
n <- as.matrix(data_frame)
class(n) <- "numeric"
eset1 <- ExpressionSet(n)
eset1 <- standardise(eset1) #Running standarise
o <- exprs(eset1)
o <- na.omit(o)
return(o)
}
# This function will perform mean imputation
# For the knn imputation, I can just use the funciton impute.knn
mean_imputation <- function(combined_df){
imputed_df <- data.frame(matrix(nrow = 0, ncol = 6)) #creating a matrix for the imputed values. I originally ran it on the non-imputed values, but I think I might as well add them
require(Xmisc)
combined_df <- as.matrix(combined_df)
class(combined_df) <- "numeric"
for (i in 1:nrow(combined_df)) {
j <- combined_df[i,]
j[is.na(j)] <- mean(j, na.rm = TRUE)
imputed_df[i,] <- j
}
rownames(imputed_df) <- rownames(combined_df)
}
plotting_fuzzy_clusters <- function(eset, mfuzzcl){
require(Mfuzz)
xaxis_ticks = c("B","10","20","30","D3","D10")
par(mfrow=c(5,4),oma = c(5,4,1,0) + 0.1,
mar = c(2,1,1,1) + 0.1)
mfuzz.plot2(eset,cl=mfuzzcl,mfrow=c(4,4), ylim=c(-3,3),time.labels = xaxis_ticks, bg="white",ax.col="red",col.lab="black",col.main="green",col.sub="blue",col="blue", Xwidth=20, Xheight=20,colo="fancy", lwd=1,ylab='',xlab='',x11=FALSE,cex.main=1.1,cex.lab=0.1)
}
rld_pca <- function (rld, intgroup = "condition", ntop = 500, colors=NULL, legendpos="bottomleft", main="PCA Biplot", textcx=1, ...) {
require(genefilter)
require(calibrate)
require(RColorBrewer)
rv = rowVars(assay(rld))
select = order(rv, decreasing = TRUE)[seq_len(min(ntop, length(rv)))]
pca = prcomp(t(assay(rld)[select, ]))
fac = factor(apply(as.data.frame(colData(rld)[, intgroup, drop = FALSE]), 1, paste, collapse = " : "))
if (is.null(colors)) {
if (nlevels(fac) >= 3) {
colors = brewer.pal(nlevels(fac), "Paired")
} else {
colors = c("black", "red")
}
}
pc1var <- round(summary(pca)$importance[2,1]*100, digits=1)
pc2var <- round(summary(pca)$importance[2,2]*100, digits=1)
pc1lab <- paste0("PC1 (",as.character(pc1var),"%)")
pc2lab <- paste0("PC1 (",as.character(pc2var),"%)")
plot(PC2~PC1, data=as.data.frame(pca$x), bg=colors[fac], pch=21, xlab=pc1lab, ylab=pc2lab, main=main, ...)
with(as.data.frame(pca$x), textxy(PC1, PC2, labs=rownames(as.data.frame(pca$x)), cex=textcx))
legend(legendpos, legend=levels(fac), col=colors, pch=20)
# rldyplot(PC2 ~ PC1, groups = fac, data = as.data.frame(pca$rld),
# pch = 16, cerld = 2, aspect = "iso", col = colours, main = draw.key(key = list(rect = list(col = colours),
# terldt = list(levels(fac)), rep = FALSE)))
}
volcanoplot <- function (res, lfcthresh=2, sigthresh=0.1, main="Volcano Plot", legendpos="bottomright", labelsig=TRUE, textcx=1, ...) {
with(res, plot(log2FoldChange, -log10(pvalue), pch=20, main=main, ...))
with(subset(res, padj<sigthresh ), points(log2FoldChange, -log10(pvalue), pch=20, col="red", ...))
with(subset(res, abs(log2FoldChange)>lfcthresh), points(log2FoldChange, -log10(pvalue), pch=20, col="orange", ...))
with(subset(res, padj<sigthresh & abs(log2FoldChange)>lfcthresh), points(log2FoldChange, -log10(pvalue), pch=20, col="green", ...))
if (labelsig) {
require(calibrate)
with(subset(res, padj<sigthresh & abs(log2FoldChange)>lfcthresh), textxy(log2FoldChange, -log10(pvalue), labs=Gene, cex=textcx, ...))
}
legend(legendpos, xjust=1, yjust=1, legend=c(paste("FDR<",sigthresh,sep=""), paste("|LogFC|>",lfcthresh,sep=""), "both"), pch=20, col=c("red","orange","green"))
}
#Must be run with resdata, not res! This is because of the lab=Gene
maplot <- function (res, thresh=0.1, labelsig=TRUE, textcx=1, ...) {
with(res, plot(baseMean, log2FoldChange, pch=20, cex=.5, log="x", ...))
with(subset(res, padj<thresh), points(baseMean, log2FoldChange, col="red", pch=20, cex=1.5))
if (labelsig) {
require(calibrate)
with(subset(res, padj<thresh), textxy(baseMean, log2FoldChange, labs=Gene, cex=textcx, col=2))
}
}
entrezid <- function( resdata ) {
require(EnsDb.Hsapiens.v79)
a = resdata$Gene #the column to iterate over will be different if I'm using res vs resdata
tmp=gsub("\\..*","",a)
tmp <- as.character(tmp)
txdb <- EnsDb.Hsapiens.v79
df <- AnnotationDbi::select(txdb, keys = tmp, keytype = "GENEID", columns = "ENTREZID")
df2 <- AnnotationDbi::select(txdb, keys = tmp, keytype = "GENEID", columns = "SYMBOL")
ENTREZID <- c()
SYMBOL <- c()
counter1 <- 0
for (i in tmp) {
counter1 <- counter1 + 1
j <- match(i,df$GENEID)
ENTREZID <- c(ENTREZID, toString(df[j,][2]))
SYMBOL <- c(SYMBOL, toString(df2[j,][2]))}
resdata$ENTREZID <- ENTREZID
resdata$SYMBOL <- SYMBOL
resdata$EnsemblGene <- tmp
resdata
}
|
5fdf9d6a92a9726b2c629232eb17a194d9034a8a
|
c4c92cd67ee94a3535254e61a5a4502b891f97d6
|
/tests/testthat/testgc.R
|
bbd7826568c9dfffff9bccd76d29110ab629e7ec
|
[] |
no_license
|
fernandogelin/qckitfastq
|
4c7c373868c7376e2232a3cd2d8ba4d588b5635c
|
3e002c124af84eaab32763b6fe7c1345d9be79d4
|
refs/heads/master
| 2020-03-26T11:46:14.164878
| 2018-08-15T20:13:19
| 2018-08-15T20:13:19
| 144,859,277
| 0
| 0
| null | 2018-08-15T13:51:25
| 2018-08-15T13:51:25
| null |
UTF-8
|
R
| false
| false
| 195
|
r
|
testgc.R
|
testthat::test_that("GC",{
#test the dimension of resulting dataset
testthat::expect_equal(nrow(GC_content(system.file("extdata", "10^5_reads_test.fq.gz", package = "qckitfastq"))),25000)
})
|
e79e6ecfaced258e5f2eae6edadda15783edb798
|
6ef2884f487c4819fae2d57750bfdb91e5d085c8
|
/ml/RLab/Main.R
|
bdef44f5fc9821fc7770aea05803ce963cc3bf1b
|
[] |
no_license
|
vsantiagogon/dit
|
33ddd013ba2a6d208ce30f93b2e088f157ef48e0
|
114790c429b4774ed532eac6628d55ab1294d53b
|
refs/heads/master
| 2021-05-09T02:05:06.416893
| 2018-04-24T09:13:59
| 2018-04-24T09:13:59
| 119,196,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 827
|
r
|
Main.R
|
source('repos/dit/ml/utils/Setup.R');
# Load the dataset & dependencies
DATA = read.csv('repos/dit/datasets/Titanic.csv', stringsAsFactors = FALSE, na.strings = 'NA');
Setup$getPkgs('SDMTools');
Asses = function (test, pred) {
# Finally, we show the results, using the confusion matrix.
mat = confusion.matrix(test, pred)
TN = mat[1, 1]
TP = mat[2, 2]
FN = mat[1, 2]
FP = mat[2, 1]
TOTAL = TP + TN + FN + FP;
Accuracy = (TP + TN)/TOTAL
Precision = TP / (TP + FP);
MisRate = (FP + FN) / TOTAL
cat('ACCURACY: ', Accuracy, ' PRECISION: ', Precision, ' Mis. Rate: ', MisRate)
}
source('repos/dit/ml/RLab/featured.R');
source('repos/dit/ml/RLab/base_line.R');
Featured = feat_model(DATA);
Asses(Featured$test, Featured$pred);
Baseline = base_model(DATA);
Asses(Baseline$test, Baseline$pred);
|
00fd612260fbf759e29777265d6132d414846021
|
89b94a1553151b34386d75b0006b970dc372d2de
|
/College Football/Big 12 Deep Dive/Team Stats.R
|
b98d0ff27436de73f45b778ca8d29a45ef680c47
|
[] |
no_license
|
aelfering/Sports-Data
|
0d2052a9fdf0e05b08ec820faaefa34f0066d48d
|
86ed99e443f255a4c3178358c374429454984d13
|
refs/heads/master
| 2022-05-02T23:50:14.812967
| 2022-04-01T15:49:59
| 2022-04-01T15:49:59
| 222,936,805
| 1
| 0
| null | 2019-11-21T01:02:03
| 2019-11-20T12:52:29
| null |
UTF-8
|
R
| false
| false
| 7,322
|
r
|
Team Stats.R
|
# Big 12 Box Stats
library(ggplot2)
library(tidyr)
library(dplyr)
library(tidyverse)
library(lubridate)
library(stringr)
# Load the Data
big_12_reg <- read.csv('2019 Reg Season Stats.csv')
big_12_post <- read.csv('2019 Post Season Stats.csv')
big_12_schedule <- read.csv('2019 Schedule.csv')
full_big_12_stats <- bind_rows(big_12_reg, big_12_post)
# create a dataframe of the game_id and dates
sched_dates <- big_12_schedule %>%
select(game_id = id,
date = start_date) %>%
mutate(date = substr(date, 1, 10),
date = ymd(date)) %>%
distinct(game_id,
date)
full_big_12_stats_dates <- inner_join(sched_dates, full_big_12_stats, by = c('game_id' = 'game_id'))
#### Recreate basic win-loss data ####
team1 <- full_big_12_stats_dates %>%
distinct(date,
game_id,
school,
conference,
homeAway,
points) %>%
filter(conference == 'Big 12') %>%
arrange(school,
date)
team2 <- full_big_12_stats_dates %>%
distinct(date,
game_id,
school,
conference,
homeAway,
points) %>%
arrange(school,
date)
team_1_2_join <- inner_join(team1, team2, by = c('game_id' = 'game_id', 'date' = 'date'))
full_scores <- team_1_2_join %>%
filter(school.x != school.y) %>%
select(date,
school = school.x,
sch_conf = conference.x,
location = homeAway.x,
school_pts = points.x,
opponent = school.y,
opp_conf = conference.y,
opponent_pts = points.y) %>%
arrange(school,
date) %>%
group_by(school) %>%
mutate(game_no = row_number()) %>%
ungroup() #%>%
#filter(game_no <= 12)
wins_losses <- full_scores %>%
mutate(wins = ifelse(school_pts > opponent_pts, 1, 0),
loses = ifelse(school_pts < opponent_pts, 1, 0)) %>%
group_by(school) %>%
summarise(total_wins = sum(wins),
total_losses = sum(loses),
points_for = sum(school_pts),
points_agt = sum(opponent_pts),
avg_points_for = mean(school_pts),
avg_points_agt = mean(opponent_pts)) %>%
ungroup() %>%
mutate(plus_minus = points_for - points_agt) %>%
# Arrange the teams by total wins
arrange(desc(total_wins))
#### How did the Big 12 perform by other statistics? ####
head(full_big_12_stats_dates)
# How many first downs did each team get? What was the average, and for the conference overall?s
total_first_downs <- full_big_12_stats_dates %>%
filter(conference == 'Big 12') %>%
filter(grepl('firstDowns', stat_category)) %>%
mutate(stat = as.character(stat),
stat = as.integer(stat),
avg_1st_downs = mean(stat)) %>%
group_by(school) %>%
mutate(school_avg_1st_downs = mean(stat),
highest_1st_downs = max(stat),
least_first_downs = min(stat)) %>%
slice(which.max(date)) %>%
ungroup() %>%
select(school,
avg_1st_downs,
school_avg_1st_downs,
highest_1st_downs,
least_first_downs)
# How efficient are teams at converting on the third and fourth down?
# Tom Osborne talked about 45% conversion rate for success
downs <- full_big_12_stats_dates %>%
#filter(conference == 'Big 12') %>%
filter(stat_category %in% c('fourthDownEff', 'thirdDownEff'))
downs_sep <- separate(data = downs, col = stat, into = c("conversions", "attempts"), sep = "\\-")
# summary of conversions on third and fourth downs
downs_conversion_pct <- downs_sep %>%
filter(conference == 'Big 12') %>%
mutate(conversions = as.character(conversions),
conversions = as.integer(conversions),
attempts = as.character(attempts),
attempts = as.integer(attempts)) %>%
group_by(school,
stat_category) %>%
summarise(conversions = sum(conversions),
attempts = sum(attempts)) %>%
ungroup() %>%
mutate(pct_conversion = conversions/attempts) %>%
arrange(desc(stat_category),
desc(pct_conversion))
# conversions by game
big_12_downs <- downs_sep %>%
filter(conference == 'Big 12') %>%
mutate(conversions = as.character(conversions),
conversions = as.integer(conversions),
attempts = as.character(attempts),
attempts = as.integer(attempts)) %>%
group_by(school,
game_id,
date,
stat_category) %>%
summarise(conversions = sum(conversions),
attempts = sum(attempts)) %>%
ungroup() %>%
mutate(pct_conversion = conversions/attempts)
opp_downs <- downs_sep %>%
mutate(conversions = as.character(conversions),
conversions = as.integer(conversions),
attempts = as.character(attempts),
attempts = as.integer(attempts)) %>%
group_by(school,
game_id,
date,
stat_category) %>%
summarise(conversions = sum(conversions),
attempts = sum(attempts)) %>%
ungroup() %>%
mutate(pct_conversion = conversions/attempts)
school_opp_downs <- inner_join(big_12_downs, opp_downs, by = c('game_id' = 'game_id', 'date' = 'date', "stat_category" = "stat_category"))
big_12_downs_compare <- school_opp_downs %>%
filter(school.x != school.y) %>%
select(school = school.x,
opponent = school.y,
game_id,
date,
stat_category,
school_conversions = pct_conversion.x,
opp_conversions = pct_conversion.y)
# How many completions did each team make for every attempt?
completionAttempts <- full_big_12_stats %>%
filter(conference == 'Big 12') %>%
filter(stat_category %in% c('completionAttempts'))
big_12_cmpAtt <- separate(data = completionAttempts, col = stat, into = c("completions", "attempts"), sep = "\\-")
passes_completed <- big_12_cmpAtt %>%
mutate(completions = as.character(completions),
completions = as.integer(completions),
attempts = as.character(attempts),
attempts = as.integer(attempts)) %>%
group_by(school) %>%
summarise(completions = sum(completions),
attempts = sum(attempts)) %>%
ungroup() %>%
mutate(pct = completions/attempts)
# How many touchdowns resulted from passes?
passes_touchdown <- full_big_12_stats %>%
filter(conference == 'Big 12') %>%
filter(stat_category %in% c('passingTDs')) %>%
mutate(passingTD = as.character(stat),
passingTD = as.integer(passingTD)) %>%
group_by(school) %>%
summarise(passingTDs = sum(passingTD))
# How many interceptions were thrown per attempt?
passes_intercepted <- full_big_12_stats %>%
filter(conference == 'Big 12') %>%
filter(stat_category %in% c('interceptions')) %>%
mutate(interceptions = as.character(stat),
interceptions = as.integer(interceptions)) %>%
group_by(school) %>%
summarise(interceptions = sum(interceptions))
# Bring all the passes, touchdowns, and interceptions together
passes_touchdowns <- inner_join(passes_completed, passes_touchdown, by = c('school' = 'school'))
passes_touchdowns_int <- inner_join(passes_touchdowns, passes_intercepted, by = c('school' = 'school'))
passes_touchdowns_int %>%
mutate(touchdown_ratio = passingTDs/interceptions,
pct_interceptions = interceptions/attempts,
ratio_rank = dense_rank(desc(touchdown_ratio)),
interception_rank = dense_rank(desc(pct_interceptions))) %>%
arrange(desc(touchdown_ratio))
|
28fc3d02503caf6a1b91e92478ee96bcb7d4903f
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/3795_0/rinput.R
|
6ac377bfc6d2db7b95930d5078bb310a002981a6
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("3795_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3795_0_unrooted.txt")
|
d60e5d747f71f76c8e8efe325f937af575ba0c31
|
dea51706c221e2b2813d62513b295ac99f6f82ae
|
/projects/2xmammals/psg_overlaps/psg_overlaps.R
|
004042a6af0e1c7aa74a3c35a9618e7f01b62041
|
[] |
no_license
|
gjuggler/greg-ensembl
|
47bcb82e29013866735b5af398965b83e02b6088
|
5564fc09bbb0e586c9a1999f21e713c119c490db
|
refs/heads/master
| 2016-09-11T12:37:14.557777
| 2013-12-18T13:31:46
| 2013-12-18T13:31:46
| 275,374
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,053
|
r
|
psg_overlaps.R
|
get.all.scores = function(merged.genes,score.column='m.dnds.m0') {
merged.genes$score = merged.genes[,score.column]
assign('roc.kosiol',compare.kosiol.eslr(merged.genes),pos=.GlobalEnv)
assign('roc.nielsen',compare.nielsen.eslr(merged.genes),pos=.GlobalEnv)
assign('roc.clark',compare.clark.eslr(merged.genes),pos=.GlobalEnv)
assign('roc.sabeti',compare.sabeti.eslr(merged.genes),pos=.GlobalEnv)
}
output.kosiol.locations = function(genes) {
a = genes[,c('chr.hg19','start.hg19','end.hg19','strand.x')]
#print(a[1:10,])
write.table(a,file="kosiol_locs.txt",sep="\t",col.names=F,row.names=F,quote=F)
}
merge.kosiol.lifted = function(kosiol.genes) {
# Coming from Galaxy liftOver output.
data = read.delim("kosiol_hg19.bed",sep="\t",header=F)
lifted = data[,1:4]
colnames(lifted) = c("chr.hg19","start.hg19","end.hg19","name")
merged = merge(kosiol.genes,lifted,by.x="transcriptid",by.y="name")
return(merged)
}
merge.kosiol.mapped = function(genes) {
mapped <- read.delim(file="kosiol_ensg.txt",header=F)
genes$ensg.hg19 <- as.vector(mapped[,1])
return(genes)
}
compare.kosiol.eslr = function(eslr.merged) {
kosiol.genes.data = read.delim(file="kosiol-gene-definitions.txt",sep="\t",header=T,stringsAsFactors=F)
kosiol.genes.table = read.delim(file="kosiol-gene-table.tsv",sep="\t",header=T,stringsAsFactors=F)
kosiol.genes = merge(kosiol.genes.data,kosiol.genes.table,by.x='transcriptid',by.y='name')
kosiol.lifted = merge.kosiol.lifted(kosiol.genes)
if (!exists('kosiol.mapped')) {
output.kosiol.locations(kosiol.lifted)
cmd = 'perl ~/src/greg-ensembl/scripts/get_genes_by_location.pl < kosiol_locs.txt > kosiol_ensg.txt'
system(cmd)
kosiol.mapped <- merge.kosiol.mapped(kosiol.lifted)
assign('kosiol.mapped',kosiol.mapped,pos=.GlobalEnv)
}
truth <- kosiol.mapped$score > 400
kosiol.truth <- data.frame(
id=kosiol.mapped$transcriptid,
chr=kosiol.mapped$chr.hg19,
start=kosiol.mapped$start.hg19,
end=kosiol.mapped$end.hg19,
ensg=as.character(kosiol.mapped$ensg.hg19),
truth=as.integer(truth)
)
eslr.scores <- get.eslr.score.df(eslr.merged)
print(paste(nrow(kosiol.truth),nrow(eslr.scores)))
eslr.kosiol <- merge(kosiol.truth,eslr.scores,by='ensg')
print(nrow(eslr.kosiol))
a <- simple.roc(eslr.kosiol)
return(a)
}
compare.nielsen.eslr = function(eslr.merged) {
nielsen.genes = read.csv("nielsen-genes.csv")
# Get truth values for Nielsen tests.
nielsen.genes$truth <- as.integer(nielsen.genes$p.value < 0.2)
nielsen.genes[is.na(nielsen.genes$truth),]$truth = 0
nielsen.genes$ensg <- nielsen.genes$ENS.ref
nielsen.genes <- subset(nielsen.genes,!is.na(ensg))
print(paste("Number of Nielsen true positives: ",(sum(nielsen.genes$truth))))
# Get E-SLR scores and merge.
eslr.scores <- get.eslr.score.df(eslr.merged)
eslr.nielsen <- merge(nielsen.genes,eslr.scores,by='ensg')
print(paste("After merging:",sum(eslr.nielsen$truth)))
a <- simple.roc(eslr.nielsen)
return(a)
}
compare.clark.eslr = function(eslr.merged) {
ncbi.gene.info <- get.ncbi.gene.info()
clark.genes <- read.delim("clark-genes.tsv",header=T,sep="\t")
clark.genes <- subset(clark.genes,!is.na(LocusLink.ID) & LocusLink.ID != "")
clark.ncbi <- merge(clark.genes,ncbi.gene.info,by.x="LocusLink.ID",by.y="GeneID")
#print(clark.ncbi[1:10,c('LocusLink.ID','gene.symbol','ensg')])
# Get truth values for Clark tests.
#clark.ncbi$truth = as.integer(clark.ncbi$`Model.2..M2..p.value.chimp` < 0.01 | clark.ncbi$`Model.2..M2..p.value.human` < 0.01)
clark.ncbi$truth <- as.integer(clark.ncbi$`P.value.model.1..dN.dS..human..one.sided..test.for.pos.selection.` < 0.2
| clark.ncbi$`P.value.model.1..dN.dS..chimp..one.sided..test.for.pos.selection.` < 0.2)
# Get E-SLR scores and merge.
eslr.scores <- get.eslr.score.df(eslr.merged)
eslr.clark <- merge(clark.ncbi,eslr.scores,by='ensg')
print(paste("True positives after merging:",sum(eslr.clark$truth)))
a <- simple.roc(eslr.clark)
return(a)
}
compare.sabeti.eslr = function(eslr.merged) {
sabeti.genes <- read.delim("sabeti-genes.txt",header=F,stringsAsFactors=F,col.names='id')
ncbi.gene.info <- get.ncbi.gene.info()
sabeti.ncbi <- merge(sabeti.genes,ncbi.gene.info,by.x='id',by.y='Symbol')
eslr.scores <- get.eslr.score.df(eslr.merged)
eslr.sabeti <- merge(sabeti.ncbi,eslr.scores,by='ensg',all.y=T)
eslr.sabeti$truth <- as.integer(!is.na(eslr.sabeti$id))
a <- simple.roc(eslr.sabeti)
return(a)
}
get.ncbi.gene.info = function() {
ncbi.gene.info <- read.delim("Homo_sapiens.gene_info",sep="\t",header=F,stringsAsFactors=F)
names(ncbi.gene.info) <- c("tax_id","GeneID","Symbol","LocusTag","Synonyms","dbXrefs","chromosome","map.location","description","type","symbol.from.nomenclature","full.name.from.nomenclature","nomenclature.status","other.designations","modification.date")
ncbi.ensgs <- sapply(strsplit(ncbi.gene.info$dbXrefs,"\\|"),function(x){a=grep("Ensembl",x,value=T);a=sub("Ensembl:","",a);a[1]})
ncbi.gene.info$ensg <- ncbi.ensgs
ncbi.gene.info <- subset(ncbi.gene.info,!is.na(ensg) & ensg != "")
return(ncbi.gene.info)
}
get.eslr.score.df = function(eslr.merged) {
eslr.merged = subset(eslr.merged,!is.na(human_gene))
eslr.scores = data.frame(
ensg = eslr.merged$human_gene,
score = eslr.merged$score
)
return(eslr.scores)
}
simple.roc = function(df,truth='truth',score='score') {
library(doBy)
library(plyr)
df$score = df[[score]]
df[is.na(df$score),]$score = min(df$score)-1
df$truth = df[[truth]]
df <- orderBy(~-score,data=df)
#print(df[1:10,])
df$tp = cumsum(df$truth) / sum(df$truth)
df$tn = cumsum(1-df$truth) / sum(1-df$truth)
df$p = cumsum(df$truth)
df$n = cumsum(1-df$truth)
return(df)
}
roc.summary = function(roc) {
out.df = data.frame()
for (i in c(0.1,0.25,0.5,0.75,1)) {
result = roc[max(which(roc$tp <= i)),c('score','tp','tn','p','n')]
result$thresh = i
out.df = rbind(out.df,result)
}
print(out.df)
}
|
dd3e7c84f7b25bcbbc71ba53b24f0bb85ecd1a47
|
023ce89770fef9921eca1e8e11e9d1111f01c2f6
|
/R/zchunk_LA144.Commercial-korea.R
|
f12059a7358a06c864c4230967928c6a7b1f8f07
|
[
"MIT"
] |
permissive
|
rohmin9122/gcam-korea-release
|
d1c1527be06c941c33c656ce32658cc37d2d7e13
|
a3d63ffcca01140578952b3f19676fabc4c7e113
|
refs/heads/master
| 2021-07-10T00:51:20.197828
| 2020-11-20T09:09:43
| 2020-11-20T09:09:43
| 214,117,693
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,261
|
r
|
zchunk_LA144.Commercial-korea.R
|
#' module_gcam.korea_LA144.Commercial
#'
#' Calculates commercial floorspace by state and energy consumption by state/fuel/end use
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L144.flsp_bm2_korea_comm}, \code{L144.in_EJ_korea_comm}. The corresponding file in the
#' original data system was \code{LA144.Commercial.R} (gcam-korea level1).
#' @details Calculates commercial floorspace by state and energy consumption by state/fuel/end use
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter mutate select
#' @importFrom tidyr gather spread
#' @author M. Roh
module_gcam.korea_LA144.Commercial <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "gcam-korea/states_subregions",
FILE = "gcam-korea/Census_pop_hist",
FILE = "gcam-korea/A44.flsp_bm2_korea_comm",
"L144.in_EJ_R_bld_serv_F_Yh",
"L142.in_EJ_korea_bld_F",
"L143.share_korea_Pop_CDD",
"L143.share_korea_Pop_HDD"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L144.flsp_bm2_korea_comm",
"L144.in_EJ_korea_comm"))
} else if(command == driver.MAKE) {
# Silence package checks
subregion4 <- subregion9 <- REGION <- DIVISION <- state <- year <- value <- setNames <-
SQFT1 <- SQFT2 <- SQFT <- ADJWT <- subregion4 <- . <- pcflsp_m2 <- pcflsp_m2.x <- pcflsp_m2.y <-
conv_4_9 <- variable <- scaler <- sector <- value <- fuel <- share <- efficiency <- service <-
value.x <- value.y <- Year <- unit <- value_EJ <- pre <- post <- state_EJ <- NULL
all_data <- list(...)[[1]]
# Load required inputs
states_subregions <- get_data(all_data, "gcam-korea/states_subregions") %>%
select(state_name, REGION, DIVISION, state) %>%
distinct()
Census_pop_hist <- get_data(all_data, "gcam-korea/Census_pop_hist") %>% gather_years
A44.flsp_bm2_korea_comm <- get_data(all_data, "gcam-korea/A44.flsp_bm2_korea_comm")
L144.in_EJ_R_bld_serv_F_Yh <- get_data(all_data, "L144.in_EJ_R_bld_serv_F_Yh") %>%
filter(GCAM_region_ID == gcamkorea.RegionNum)
L142.in_EJ_korea_bld_F <- get_data(all_data, "L142.in_EJ_korea_bld_F")
L143.share_korea_Pop_CDD <- get_data(all_data, "L143.share_korea_Pop_CDD")
L143.share_korea_Pop_HDD <- get_data(all_data, "L143.share_korea_Pop_HDD")
# ===================================================
# Add subregions to census population for aggregating
L144.Census_pop_hist <- Census_pop_hist %>%
left_join_error_no_match(states_subregions, by = "state") %>%
filter(year %in% HISTORICAL_YEARS)
L144.pop_korR <- L144.Census_pop_hist %>%
group_by(state, year) %>%
summarise(value = sum(value)) %>%
ungroup()
# Calculate per capita floorspace
L144.flsp_bm2_korR <- A44.flsp_bm2_korea_comm %>%
left_join_error_no_match(L144.pop_korR, by = c("year", "state")) %>%
mutate(pcflsp_bm2 = flsp_bm2 / value) %>%
select(state, year, pcflsp_bm2)
# Interpolate floorspace values to all historical years
L144.pcflsp_bm2_kor_comm <- L144.flsp_bm2_korR %>%
select(state) %>%
distinct() %>%
repeat_add_columns(tibble(year = HISTORICAL_YEARS)) %>%
# Using left_join because not all years included
left_join(L144.flsp_bm2_korR, by = c("state", "year")) %>%
group_by(state) %>%
mutate(pcflsp_bm2 = approx_fun(year, pcflsp_bm2, rule = 2)) %>%
ungroup()
# Expand to states: multiply per-capita floorspace in each subregion9 times the population of each state
L144.flsp_bm2_korea_comm <- L144.Census_pop_hist %>%
left_join_error_no_match(L144.pcflsp_bm2_kor_comm, by = c("state", "year")) %>%
transmute(state,
sector = "comm",
year,
# Floorspace = population * per-capita floorspace
value = value * pcflsp_bm2)
L144.in_EJ_R_bld_serv_F_Yh<- L144.in_EJ_R_bld_serv_F_Yh %>%
filter(sector=="bld_comm")
bld.services <- unique(L144.in_EJ_R_bld_serv_F_Yh$service)
L144.in_EJ_R_bld_serv_F_Yh.share <- L144.in_EJ_R_bld_serv_F_Yh %>%
group_by(year, fuel) %>%
mutate(total=sum(value), share=value/total) %>%
ungroup() %>%
repeat_add_columns(tibble(state = gcamkorea.STATES)) %>%
select(state, fuel, service, year, share)
L144.in_EJ_korea_comm <- L142.in_EJ_korea_bld_F %>%
filter(sector == "comm") %>%
repeat_add_columns(tibble(service=bld.services)) %>%
left_join(L144.in_EJ_R_bld_serv_F_Yh.share, by=c(state, fuel, service, year)) %>%
replace_na(list(share = 0)) %>%
transmute(state,
sector = "comm",
fuel,
service,
year,
value = value*share)
# ===================================================
# Produce outputs
L144.flsp_bm2_korea_comm %>%
add_title("Commercial floorspace by state") %>%
add_units("billion m2") %>%
add_comments("Floorspace by state calculated by multiplying state population by per-capita census division floorspace") %>%
add_legacy_name("L144.flsp_bm2_korea_comm") %>%
add_precursors("gcam-korea/states_subregions",
"gcam-korea/Census_pop_hist",
"gcam-korea/A44.flsp_bm2_korea_comm") ->
L144.flsp_bm2_korea_comm
L144.in_EJ_korea_comm %>%
add_title("Commercial energy consumption by state/fuel/end use") %>%
add_units("EJ/yr") %>%
add_comments("Service energy is divided by GCAM default share") %>%
add_legacy_name("L144.in_EJ_korea_comm") %>%
add_precursors("gcam-korea/states_subregions",
"L144.in_EJ_R_bld_serv_F_Yh",
"L142.in_EJ_korea_bld_F",
"L143.share_korea_Pop_CDD",
"L143.share_korea_Pop_HDD") ->
L144.in_EJ_korea_comm
return_data(L144.flsp_bm2_korea_comm, L144.in_EJ_korea_comm)
} else {
stop("Unknown command")
}
}
|
afc601513fdd0882d6d7cb0e4143abc9bd7acb64
|
72d03ec10b4955bcc7daac5f820f63f3e5ed7e75
|
/input/gcam-data-system/emissions-processing-code/level2/L251.en_ssp_nonco2.R
|
4deabd7a2b68116315e3195e4860da6d67adea51
|
[
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bgmishra/gcam-core
|
54daddc3d037571bf745c4cf0d54c0d7a77f493f
|
bbfb78aeb0cde4d75f307fc3967526d70157c2f8
|
refs/heads/master
| 2022-04-17T11:18:25.911460
| 2020-03-17T18:03:21
| 2020-03-17T18:03:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,792
|
r
|
L251.en_ssp_nonco2.R
|
# Before we can load headers we need some paths defined. They
# may be provided by a system environment variable or just
# having already been set in the workspace
if( !exists( "EMISSPROC_DIR" ) ){
if( Sys.getenv( "EMISSIONSPROC" ) != "" ){
EMISSPROC_DIR <- Sys.getenv( "EMISSIONSPROC" )
} else {
stop("Could not determine location of emissions data system. Please set the R var EMISSPROC_DIR to the appropriate location")
}
}
# Universal header file - provides logging, file support, etc.
source(paste(EMISSPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
source(paste(EMISSPROC_DIR,"/../_common/headers/EMISSIONS_header.R",sep=""))
logstart( "L251.en_ssp_nonco2.R" )
adddep(paste(EMISSPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
adddep(paste(EMISSPROC_DIR,"/../_common/headers/EMISSIONS_header.R",sep=""))
printlog( "Emissions factors for the SSP future years in the energy system" )
# -----------------------------------------------------------------------------
# 1. Read files
sourcedata( "COMMON_ASSUMPTIONS", "A_common_data", extension = ".R" )
sourcedata( "COMMON_ASSUMPTIONS", "level2_data_names", extension = ".R" )
sourcedata( "MODELTIME_ASSUMPTIONS", "A_modeltime_data", extension = ".R" )
sourcedata( "EMISSIONS_ASSUMPTIONS", "A_emissions_data", extension = ".R" )
GCAM_region_names <- readdata( "COMMON_MAPPINGS", "GCAM_region_names")
A_region <- readdata( "EMISSIONS_ASSUMPTIONS", "A_regions" )
L161.SSP2_EF <- readdata( "EMISSIONS_LEVEL1_DATA", "L161.SSP2_EF")
L161.SSP15_EF <- readdata( "EMISSIONS_LEVEL1_DATA", "L161.SSP15_EF")
L161.SSP34_EF <- readdata( "EMISSIONS_LEVEL1_DATA", "L161.SSP34_EF")
L201.nonghg_steepness <- readdata( "EMISSIONS_LEVEL2_DATA", "L201.nonghg_steepness", skip = 4 )
# -----------------------------------------------------------------------------
# 2. Build tables for CSVs
#SSP1/5 Emissions coefficients
L251.ssp15 <- interpolate_and_melt( L161.SSP15_EF, ssp_model_years )
L251.ssp15 <- add_region_name( L251.ssp15 )
#Format for csv file
L251.ssp15_ef <- L251.ssp15[ c( names_StubTechYr, "Non.CO2" ) ]
L251.ssp15_ef$emiss.coeff <- round( L251.ssp15$value, digits_emissions )
#SSP2 Emissions coefficients
L251.ssp2 <- interpolate_and_melt( L161.SSP2_EF, ssp_model_years )
L251.ssp2 <- add_region_name( L251.ssp2 )
#Format for csv file
L251.ssp2_ef <- L251.ssp2[ c( names_StubTechYr, "Non.CO2" ) ]
L251.ssp2_ef$emiss.coeff <- round( L251.ssp2$value, digits_emissions )
#SSP3/4 Emissions coefficients
L251.ssp34 <- interpolate_and_melt( L161.SSP34_EF, ssp_model_years )
L251.ssp34 <- add_region_name( L251.ssp34 )
#Format for csv file
L251.ssp34_ef <- L251.ssp34[ c( names_StubTechYr, "Non.CO2" ) ]
L251.ssp34_ef$emiss.coeff <- round( L251.ssp34$value, digits_emissions )
#Delete GDP controls
L251.ctrl.delete <- subset( L251.ssp2_ef, year == min( year ) )[ c( names_StubTechYr, "Non.CO2" ) ]
L251.ctrl.delete$year <- ctrl_base_year
L251.ctrl.delete$ctrl.name <- "GDP_control"
#Add emissions controls for future years of vintaged technologies
L251.ssp15_ef_vin <- subset( L251.ssp15_ef, supplysector == "electricity" )
L251.ssp15_ef_vin$future.emiss.coeff.year <- L251.ssp15_ef_vin$year
L251.ssp15_ef_vin$future.emiss.coeff.name <- "SSP_GAINS"
L251.ssp15_ef_vin$year <- 1975
L251.ssp15_ef_vin <- L251.ssp15_ef_vin[ c( names_StubTechYr, "Non.CO2", "future.emiss.coeff.name", "future.emiss.coeff.year", "emiss.coeff" ) ]
L251.ssp2_ef_vin <- subset( L251.ssp2_ef, supplysector == "electricity" )
L251.ssp2_ef_vin$future.emiss.coeff.year <- L251.ssp2_ef_vin$year
L251.ssp2_ef_vin$future.emiss.coeff.name <- "SSP_GAINS"
L251.ssp2_ef_vin$year <- 1975
L251.ssp2_ef_vin <- L251.ssp2_ef_vin[ c( names_StubTechYr, "Non.CO2", "future.emiss.coeff.name", "future.emiss.coeff.year", "emiss.coeff" ) ]
L251.ssp34_ef_vin <- subset( L251.ssp34_ef, supplysector == "electricity" )
L251.ssp34_ef_vin$future.emiss.coeff.year <- L251.ssp34_ef_vin$year
L251.ssp34_ef_vin$future.emiss.coeff.name <- "SSP_GAINS"
L251.ssp34_ef_vin$year <- 1975
L251.ssp34_ef_vin <- L251.ssp34_ef_vin[ c( names_StubTechYr, "Non.CO2", "future.emiss.coeff.name", "future.emiss.coeff.year", "emiss.coeff" ) ]
printlog( "Rename to regional SO2" )
L251.ctrl.delete <- rename_SO2( L251.ctrl.delete, A_region, FALSE )
L251.ssp15_ef <- rename_SO2( L251.ssp15_ef, A_region, FALSE )
L251.ssp2_ef <- rename_SO2( L251.ssp2_ef, A_region, FALSE )
L251.ssp34_ef <- rename_SO2( L251.ssp34_ef, A_region, FALSE )
L251.ssp15_ef_vin <- rename_SO2( L251.ssp15_ef_vin, A_region, FALSE )
L251.ssp2_ef_vin <- rename_SO2( L251.ssp2_ef_vin, A_region, FALSE )
L251.ssp34_ef_vin <- rename_SO2( L251.ssp34_ef_vin, A_region, FALSE )
# Only delete GDP control functions that exist
L251.ctrl.delete <- L251.ctrl.delete[
vecpaste( L251.ctrl.delete[ c( names_StubTech, "Non.CO2" ) ] ) %in%
vecpaste( L201.nonghg_steepness[ c( names_StubTech, "Non.CO2" ) ] ), ]
# -----------------------------------------------------------------------------
# 3. Write all csvs as tables, and paste csv filenames into a single batch XML file
write_mi_data( L251.ctrl.delete, "DelEmCtrl", "EMISSIONS_LEVEL2_DATA", "L251.ctrl.delete", "EMISSIONS_XML_BATCH", "batch_delete_gdp_control.xml" )
write_mi_data( L251.ssp15_ef, "InputEmissCoeff", "EMISSIONS_LEVEL2_DATA", "L251.ssp15_ef", "EMISSIONS_XML_BATCH", "batch_ssp15_emissions_factors.xml" )
write_mi_data( L251.ssp2_ef, "InputEmissCoeff", "EMISSIONS_LEVEL2_DATA", "L251.ssp2_ef", "EMISSIONS_XML_BATCH", "batch_ssp2_emissions_factors.xml" )
write_mi_data( L251.ssp34_ef, "InputEmissCoeff", "EMISSIONS_LEVEL2_DATA", "L251.ssp34_ef", "EMISSIONS_XML_BATCH", "batch_ssp34_emissions_factors.xml" )
write_mi_data( L251.ssp15_ef_vin, "ReadInControl", "EMISSIONS_LEVEL2_DATA", "L251.ssp15_ef_vin", "EMISSIONS_XML_BATCH", "batch_ssp15_emissions_factors.xml" )
write_mi_data( L251.ssp2_ef_vin, "ReadInControl", "EMISSIONS_LEVEL2_DATA", "L251.ssp2_ef_vin", "EMISSIONS_XML_BATCH", "batch_ssp2_emissions_factors.xml" )
write_mi_data( L251.ssp34_ef_vin, "ReadInControl", "EMISSIONS_LEVEL2_DATA", "L251.ssp34_ef_vin", "EMISSIONS_XML_BATCH", "batch_ssp34_emissions_factors.xml" )
insert_file_into_batchxml( "EMISSIONS_XML_BATCH", "batch_delete_gdp_control.xml", "EMISSIONS_XML_FINAL", "delete_gdp_control.xml", "", xml_tag="outFile" )
insert_file_into_batchxml( "EMISSIONS_XML_BATCH", "batch_ssp15_emissions_factors.xml", "EMISSIONS_XML_FINAL", "ssp15_emissions_factors.xml", "", xml_tag="outFile" )
insert_file_into_batchxml( "EMISSIONS_XML_BATCH", "batch_ssp2_emissions_factors.xml", "EMISSIONS_XML_FINAL", "ssp2_emissions_factors.xml", "", xml_tag="outFile" )
insert_file_into_batchxml( "EMISSIONS_XML_BATCH", "batch_ssp34_emissions_factors.xml", "EMISSIONS_XML_FINAL", "ssp34_emissions_factors.xml", "", xml_tag="outFile" )
logstop()
|
db382a4c661a52047fc27ff4501d13e82153e6a1
|
1189806ea21fed9f9648a734ac02958b0f30d4ff
|
/Scripts/meme.R
|
c59c27a672128571651a7978de0a54f8c692b0f4
|
[] |
no_license
|
deenhe91/Hannahs_Data
|
232e701fdffaa9db3077afa8075186e224271e13
|
ce1e8d2882f3d7eca599d25689a8072cd511c3f0
|
refs/heads/master
| 2021-01-24T20:40:55.396252
| 2015-09-03T10:09:47
| 2015-09-03T10:09:47
| 39,335,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,494
|
r
|
meme.R
|
library(CHNOSZ)
file <- system.file("extdata/fasta/EF-Tu.aln", package="CHNOSZ")
# Function
ReadFasta<-function(file) {
# Read the file line by line
fasta<-readLines(file)
# Identify header lines
ind<-grep(">", fasta)
# Identify the sequence lines
s<-data.frame(ind=ind, from=ind+1, to=c((ind-1)[-1], length(fasta)))
# Process sequence lines
seqs<-rep(NA, length(ind))
for(i in 1:length(ind)) {
seqs[i]<-paste(fasta[s$from[i]:s$to[i]], collapse="")
}
# Create a data frame
DF<-data.frame(name=gsub(">", "", fasta[ind]), sequence=seqs)
# Return the data frame as a result object from the function
return(DF)
}
#
setwd("/Users/hannahdeen/Desktop")
#
seqs<-ReadFasta("mm9_feP4k.fasta")
descs <- as.character(seqs[,1])
gene_names <- mat.or.vec(dim(seqs)[1],1) #gene IDs from whole genome fasta
seq_descr <- "y" #geneID and descriptive strip for fasta file
for (i in 1:dim(seqs)[1]){
tmo <- strsplit(descs[i], " ")[[1]][3:30]
tmp <- strsplit(descs[i], " ")[[1]][2]
tmq <- strsplit(tmp, "_")[[1]][1]
tmr <- c(tmq, tmo)
tms <- paste0(tmr[1:30], collapse = " ")
tmp_length <- nchar(tmp)
seq_descr <- c(seq_descr, tms)
gene_names[i] <- substr(tmp,1,(tmp_length -2 ))
}
gene_names <- toupper(gene_names)
##
gene_names <- "x"
for (i in 1:1696) {
m <- strsplit(as.character(memes[i,1]), " ")
gene_names <- c(gene_names, m[[1]][1])
}
dscr <- "z"
for (i in 2:21240) {
klm <- strsplit(seq_descr[i], "NA")[[1]][1]
dscr <- c(dscr, klm)
}
dscr <- toupper(dscr) ##seq_descr without NAs and capitalised to match run3 genes
##all genes in all clusters
run3 <- read.table("run3.txt", sep="\t")
run3genes <- as.character(unique(run3[,2]))
##find genes that are NOT in the fasta file. commongenes_50 and
commongenes <- Reduce(intersect, list(gene_names, run3genes))
##generates 493 genes, no duplicates
matchlist <- 0
for (i in 1:994) {
m <- which((gene_names) == run3genes[i])
matchlist <- c(matchlist, m[1])
}
gene_names[matchlist]
## matchlist generates 631
meme_seqs <- seqs[matchlist,1:2]
##matching first 50 genes
#find out positions of common genes in meme50genes and then delete these.
matchlist_50 <- 0
for (i in 1:656) {
m <- which((gene_names) == meme50genes[i])
matchlist_50 <- c(matchlist_50, m+1)
}
meme_seqs_50 <- seqs[matchlist_50,]
##
##meme50 <- read.table("meme50.txt")
##meme50genes <- unique(as.character(meme50[,2]))
##commongenes_50 <- Reduce(intersect, list(gene_names, meme50genes))
j = 0
sink("wigwam.fasta")
for (i in 2:495) {
j <- j+1
cat(">")
cat(dscr[matchlist[i]+1])
cat("\n")
cat(as.character(seqs[matchlist[i]+1,2]))
cat("\n")
}
sink()
#### for first 50 genes
j = 0
sink ("meme_50.fasta")
for (i in 2:420) { #matchlist[1] is zero
j <- j+1
cat(">")
cat(dscr[matchlist_50[i]+1])
cat("\n")
cat(as.character(meme_seqs_50[(j),2]))
cat("\n")
}
sink()
##modmeme <- strsplit(as.character(memegenes[,1]), "_")
meme.table <- mat.or.vec(5542,2)
meme.table <- data.frame(meme.table)
vecs <- "x"
for (i in 1:5542) {
vec <- strsplit(as.character(run3[i,1]), "_")
vecb <- strsplit(vec[[1]][1], "e")
meme.table[i,1] <- vecb[[1]][2]
meme.table[i,2] <- as.character(run3[i,2])
vecs <- c(vecs, vecb[[1]][2])
}
j = 0
sink ("mast.fasta")
for (i in 2:21239) { #matchlist[1] is zero
j <- j+1
cat(">")
cat(dscr[i])
cat("\n")
cat(as.character(seqs[j,2]))
cat("\n")
}
sink()
|
46416810bd143b3bd6b46061910c985b1f120022
|
b48ea7f06b12d71fe597adefa5da35e81d08caf8
|
/inst/examples/08-class/server.R
|
c3c01b0127b370612d8a82ee559bd661508786d8
|
[
"MIT"
] |
permissive
|
shinyTree/shinyTree
|
c840dd94af787e15cce6b7c0d4b73b2537123b8a
|
110b49970d117d0638746d47b074e955287abed0
|
refs/heads/master
| 2023-08-30T22:50:33.875203
| 2023-08-07T15:53:07
| 2023-08-07T15:53:07
| 22,565,116
| 75
| 42
|
NOASSERTION
| 2023-02-08T10:37:30
| 2014-08-03T02:44:13
|
JavaScript
|
UTF-8
|
R
| false
| false
| 381
|
r
|
server.R
|
library(shiny)
library(shinyTree)
#' Define server logic required to generate a simple tree
#' @author Jeff Allen \email{jeff@@trestletech.com}
shinyServer(function(input, output, session) {
output$tree <- renderTree({
list(
root1 = structure("", sttype="red-node"),
root2 = structure(list( SubListA = list(leaf1 = "", leaf2 = "")), sttype="blue-node"))
})
})
|
527248c6a443d5f995f2c12e09b43145534b2b73
|
36d379836d93b50efd85d10dc8fc2f849ead22eb
|
/week_3/ggplot練習/ggplot練習-1.R
|
d7b98bae6aea6e49ae90087657280baf686c9602
|
[] |
no_license
|
bigmike6322/NTU-CS-X-RProject-AHsiang
|
a185dd98b3b5692d15795ed1905d8f7726b5f255
|
d8a081edab844d21d04655e90b4d5e55301263b2
|
refs/heads/master
| 2021-01-25T10:43:48.442309
| 2018-04-08T07:48:14
| 2018-04-08T07:48:14
| 123,368,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,266
|
r
|
ggplot練習-1.R
|
library(ggplot2)
library(ggthemes)
library(data.table)
#df <- read.csv("/Users/bigmike6322/Documents/GitHub/NTU-CS-X-RProject-AHsiang/week_3/ggplot練習/Economist_Assignment_Data.csv")
#str(df)
df <- fread('/Users/bigmike6322/Documents/GitHub/NTU-CS-X-RProject-AHsiang/week_3/ggplot練習/Economist_Assignment_Data.csv', drop=1) #drop會把你指定的該數字整欄隱藏住
str(df)
#The points are colored by region. 也可以換不同的category
pl <- ggplot(df, aes(x = CPI, y = HDI , color=Region)) + geom_point(shape=1, size=0.5)
pl
#geom_smooth中的參數:method可以用不同的方式來線性回歸data,預設值是"loess",但也可以用別的,比方說lm, glm, gam, loess, rlm,lm->一元一次"、method='lm', formula=y~log(x)->"lm"+自己寫的一元二次的方程式、se= TRUE/FALSE對應到灰色的誤差區間打開/關掉,fill可以調整灰色區間的顏色,size/alpha/color調整線的粗細/透明度/顏色,
pl2 <-pl +geom_smooth(aes(group=1),method='lm', formula=y~log(x),se=FALSE) #group選1234對應到Region的Americas/Asia Pacific/...etc; group1=group"Amercias"
#pl2
#可以再根據pl2用goem_text做出的線性回歸去標記出每個點是啥
pl3 <- pl2+ geom_text(aes(label=Country))
#pl3
#可以用pointsToLabels特別抓出需要的country
pointsToLabel <-c("Russia", "Venezuela", "Iraq", "Myanmar", "Sudan",
"Afghanistan", "Congo", "Greece", "Argentina", "Brazil",
"India", "Italy", "China", "South Africa", "Spane","Norway","Singapore")
pl3 <-pl2 + geom_text(aes(label = Country),color ="blue", data = subset(df,Country %in% pointsToLabel),check_overlap =TRUE ) #check_overlap,在小圖中如果有重複就不會出現要出現就是FALSE,或是開Zoom把圖放大才有
#可以幫指標圖的部分去個背(加主題)
pl4 <- pl3 + theme_bw() #白色背景、theme_grey()是灰色背景
#加上圖表的Title跟整張圖的XY軸數值上限
pl5 <-pl4+ scale_x_continuous(limits = c(1,11),breaks = 1:10)+ scale_y_continuous(limits = c(0.25,1)) #從0.25開始圖比較漂亮
#pl5
pl6 <-pl5 + labs(title="Corruption and Human Development")
#pl6
#add theme ,其中一種theme_economist_white()
pl_final <-pl6 + theme_economist_white()
pl_final
|
c97fa663dfde2122cf4c99edf5ef63b0bc911123
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/wordspace/examples/print_dsm.Rd.R
|
f018ed6013304e65c0a451837ff1a12247af832a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
print_dsm.Rd.R
|
library(wordspace)
### Name: print.dsm
### Title: Print Information About DSM Object (wordspace)
### Aliases: print.dsm
### ** Examples
print(dsm(DSM_HieroglyphsMatrix))
|
8815a0926763fc4dc942552af7aa3c82282c3296
|
7aa75ed5813d3778d557ccb1c1bceca9e4b14fe7
|
/rshc/RealSimComp.R
|
afc3d51db71d69f33e81c752372a9ea27c7d4a06
|
[] |
no_license
|
xzhou/gnome
|
8a40ad4b3269129dcb585addaeaeb9f4690dee56
|
1e9df8a380e9935e7e1455e677eb2badbb7657ed
|
refs/heads/master
| 2021-01-10T01:20:15.174093
| 2010-11-19T03:26:03
| 2010-11-19T03:26:03
| 36,974,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 361
|
r
|
RealSimComp.R
|
# TODO: Add comment
#
# Author: xzhou
###############################################################################
source("SHC.R")
t <- function()
{
t <- readGenotypeFromFastaFile()
er <- calculateRValues(t)
save(er, file = "77_2000estR")
rr <- calculateRealR(t)
save(rr, file = "77_2000realR")
sr <- singRecoverate(er, rr)
print(sr)
}
t()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.