blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d9c60de72a4f49f77893f149e7385f9316e8963d
|
d8ea9459151048d15cf421881cf793688aa5f9ca
|
/rscripts/final/1.1_mss_processing.R
|
b0a445a6b69a64a2f847aa4ef35d8a34c7b57ef3
|
[] |
no_license
|
cschwab1/Berlin-Gentrification-Project.github.io
|
e74b9698af4c96c119e33750401ed08d17016e6c
|
93f3dd70de4cda9e7a8e0699ca7a5251fb9ba76f
|
refs/heads/main
| 2023-06-02T23:40:17.414279
| 2021-05-17T18:42:05
| 2021-05-17T18:42:05
| 364,145,592
| 0
| 0
| null | 2021-06-28T05:55:19
| 2021-05-04T05:07:54
|
HTML
|
UTF-8
|
R
| false
| false
| 11,150
|
r
|
1.1_mss_processing.R
|
########################################
# Thesis Script IV: MSS Processing
########################################
library(tidyverse)
library(sf)
library(areal)
setwd("~/Desktop/Code/Thesis")
########## Subsetting variables and cleaning data
##### MSS03
mss_2003 <- read.csv("~/Desktop/Code/Thesis/demographic/2003_MSS_cut.csv")
mss_2003 <- mss_2003 %>% separate(Verkehrszellen,
c("VKZ_num", "VKZ_name"),
sep = 4)
mss_2003 <- mss_2003 %>%
dplyr::select("VKZ_name",
"VKZ_num",
"Wanderungs.saldo.pro.100.EW",
"EW.mit.türkischer.Staatsangehörigkeit.pro.100.EW",
"EW.aus.ausgewählten.arabischen.Staaten.pro.100.EW...",
"EW.aus.den.GUS.Staaten.pro.100.EW",
"EW.mit.polnischer.Staatsangehörigkeit.pro.100.EW",
"EW.aus.Jugoslawien.und.dessen.Nach.folgestaaten.pro.100.EW",
"EW.aus.EU.Staaten.pro.100.EW",
"Arbeitslose.insgesamt.31.12.02.pro.100.EW.18.60.J.",
"Deutsche.Sozialhilfe.Empfänger.pro.100.deutsche.EW",
"Ausländische.Sozialhilfe.Empfänger.pro.100.ausl..EW") %>%
rename(
WA = Wanderungs.saldo.pro.100.EW,
turk = EW.mit.türkischer.Staatsangehörigkeit.pro.100.EW,
arab = EW.aus.ausgewählten.arabischen.Staaten.pro.100.EW...,
easteuro = EW.aus.den.GUS.Staaten.pro.100.EW,
pol = EW.mit.polnischer.Staatsangehörigkeit.pro.100.EW,
yugo = EW.aus.Jugoslawien.und.dessen.Nach.folgestaaten.pro.100.EW,
eu = EW.aus.EU.Staaten.pro.100.EW,
gwelf = Deutsche.Sozialhilfe.Empfänger.pro.100.deutsche.EW,
awelf = Ausländische.Sozialhilfe.Empfänger.pro.100.ausl..EW,
unemp = Arbeitslose.insgesamt.31.12.02.pro.100.EW.18.60.J.
)
mss_2003[,2:12] <- lapply(mss_2003[,2:12], as.numeric)
mss_2003 <- mss_2003[1:338,]
mss_2003$aus_noneu <- mss_2003$turk + mss_2003$arab + mss_2003$easteuro + mss_2003$pol + mss_2003$yugo
mss_2003$welf <- mss_2003$gwelf + mss_2003$awelf
mss_2003 <- dplyr::select(mss_2003,
"VKZ_num",
"VKZ_name",
"WA",
"aus_noneu",
"eu",
"welf",
"unemp")
##### MSS 05
mss_2005 <- read.csv("~/Desktop/Code/Thesis/demographic/2005_MSS_cut.csv")
mss_2005 <- mss_2005 %>% separate(Gebiet,
c("VKZ_num", "VKZ_name"),
sep = 4)
mss_2005 <- mss_2005 %>%
dplyr::select("VKZ_name",
"VKZ_num",
"Wanderungs.saldo.pro.100.EW.2004",
"EW.mit.türkischer.Staatsangehörigkeit.pro.100.EW.am.31.12.2004",
"EW.aus.ausgewählten.arabischen.Staaten.pro.100.EW.am.31.12.2004.",
"EW.aus.den.GUS.Staaten.pro.100.EW.am.31.12.2004",
"EW.mit.polnischer.Staatsangehörigkeit.pro.100.EW.am.31.12.2004",
"EW.aus.Jugoslawien.und.dessen.Nach.folgestaaten.pro.100.EW.am.31.12.2004",
"EW.aus.EU.Staaten.pro.100.EW.am.31.12.2004",
"Arbeitslose.insgesamt.pro.100.EW.18.60.J..am.31.12.2004",
"Deutsche.Sozialhilfe.Empfänger.pro.100.deutsche.EW.am.31.12.2004",
"Ausländische.Sozialhilfe.Empfänger.pro.100.ausl..EW.am.31.12.2004") %>%
rename(
WA = Wanderungs.saldo.pro.100.EW.2004,
turk = EW.mit.türkischer.Staatsangehörigkeit.pro.100.EW.am.31.12.2004,
arab = EW.aus.ausgewählten.arabischen.Staaten.pro.100.EW.am.31.12.2004.,
easteuro = EW.aus.den.GUS.Staaten.pro.100.EW.am.31.12.2004,
pol = EW.mit.polnischer.Staatsangehörigkeit.pro.100.EW.am.31.12.2004,
yugo = EW.aus.Jugoslawien.und.dessen.Nach.folgestaaten.pro.100.EW.am.31.12.2004,
eu = EW.aus.EU.Staaten.pro.100.EW.am.31.12.2004,
gwelf = Deutsche.Sozialhilfe.Empfänger.pro.100.deutsche.EW.am.31.12.2004,
awelf = Ausländische.Sozialhilfe.Empfänger.pro.100.ausl..EW.am.31.12.2004,
unemp = Arbeitslose.insgesamt.pro.100.EW.18.60.J..am.31.12.2004
)
mss_2005[,2:12] <- lapply(mss_2005[,2:12], as.numeric)
mss_2005 <- mss_2005[1:339,]
mss_2005$aus_noneu <- mss_2005$turk + mss_2005$arab + mss_2005$easteuro + mss_2005$pol + mss_2005$yugo
mss_2005$welf <- mss_2005$gwelf + mss_2005$awelf
mss_2005 <- dplyr::select(mss_2005,
"VKZ_num",
"VKZ_name",
"WA",
"aus_noneu",
"eu",
"welf",
"unemp")
##### MSS 2007
##### think I gotta skip this year — not enough data available
mss_2007 <- read.csv("~/Desktop/Code/Thesis/demographic/2007_MSS_cut.csv")
mss_2007 <- mss_2007[1:447,]
mss_2007 <- mss_2007 %>%
dplyr::select("Gebiet",
"NR",
"Dynamik2",
"E4",
"E5",
"E6",
"E7",
"E10",
"E8",
"Dynamik4",
"Dynamik5",
"Status1") %>%
rename(
WA = Dynamik2,
turk = E4,
arab = E5,
easteuro = E6,
pol = E10,
yugo = E7,
eu = E8,
unemp = Status1
)
mss_2007[,2:12] <- lapply(mss_2007[,2:12], as.numeric)
mss_2007$aus_noneu <- mss_2007$turk + mss_2007$arab + mss_2007$easteuro + mss_2007$pol + mss_2007$yugo
mss_2007$welf_change <- mss_2007$Dynamik4 + mss_2007$Dynamik5
mss_2007 <- dplyr::select(mss_2007,
"NR",
"Gebiet",
"WA",
"aus_noneu",
"eu",
"welf_change",
"unemp")
##### MSS 2009
mss_2009 <- read.csv("~/Desktop/Code/Thesis/demographic/2009_MSS_cut.csv")
mss_2009 <- mss_2009[1:447,]
mss_2009 <- mss_2009 %>%
dplyr::select("Nr.",
"Planungsraum",
"Dynamik.2",
"E.11",
"E.12",
"E.13",
"E.14",
"E.17",
"E.15",
"Status.1",
"E.22", "E.23") %>%
rename(
WA = Dynamik.2,
turk = E.11,
arab = E.12,
easteuro = E.13,
pol = E.17,
yugo = E.14,
eu = E.15,
unemp = Status.1,
gwelf = E.22,
awelf = E.23
)
mss_2009[,2:12] <- lapply(mss_2009[,2:12], as.numeric)
mss_2009$aus_noneu <- mss_2009$turk + mss_2009$arab + mss_2009$easteuro + mss_2009$pol + mss_2009$yugo
mss_2009$welf <- mss_2009$gwelf + mss_2009$awelf
mss_2009 <- dplyr::select(mss_2009,
"Nr.",
"Planungsraum",
"WA",
"aus_noneu",
"eu",
"welf",
"unemp")
########## MSS 2011
mss_2011 <- read.csv("~/Desktop/Code/Thesis/demographic/2011_MSS_cut.csv")
mss_2011 <- mss_2011[1:447,]
mss_2011 <- mss_2011 %>%
dplyr::select("Gebiet",
"Raumid",
"Dynamik2",
"E11",
"E12",
"E13",
"E14",
"E17",
"E15",
"E22",
"E23",
"Status1") %>%
rename( WA = Dynamik2,
turk = E11,
arab = E12,
easteuro = E13,
pol = E17,
yugo = E14,
eu = E15,
unemp = Status1,
gwelf = E22,
awelf = E23)
mss_2011[,2:12] <- lapply(mss_2011[,2:12], as.numeric)
mss_2011$aus_noneu <- mss_2011$turk + mss_2011$arab + mss_2011$easteuro + mss_2011$pol + mss_2011$yugo
mss_2011$welf <- mss_2011$gwelf + mss_2011$awelf
mss_2011 <- dplyr::select(mss_2011,
"Raumid",
"Gebiet",
"WA",
"aus_noneu",
"eu",
"welf",
"unemp")
##### MSS 2013
mss_2013 <- read.csv("~/Desktop/Code/Thesis/demographic/2013_MSS_cut.csv")
mss_2013 <- mss_2013[1:447,]
mss_2013 <- mss_2013 %>%
dplyr::select("Name",
"Nummer",
"K.12",
"S3",
"S1") %>%
rename( WA = K.12,
unemp = S1,
welf = S3)
mss_2013[,2:5] <- lapply(mss_2013[,2:5], as.numeric)
##### MSS 2015
mss_2015 <- read.csv("~/Desktop/Code/Thesis/demographic/2015_MSS_cut.csv")
mss_2015 <- mss_2015 %>%
dplyr::select("Name",
"Nummer",
"K.12",
"K.17",
"S3",
"S1") %>%
rename( WA = K.12,
noEUfor = K.17,
unemp = S1,
welf = S3)
mss_2015[,2:6] <- lapply(mss_2015[,2:6], as.numeric)
##### MSS 2017
mss_2017 <- read.csv("~/Desktop/Code/Thesis/demographic/2017_MSS_cut.csv")
mss_2017 <- mss_2017 %>%
dplyr::select("Name",
"Nummer",
"K.12",
"K.17",
"S3",
"S1") %>%
rename( WA = K.12,
noEUfor = K.17,
unemp = S1,
welf = S3)
mss_2017[,2:6] <- lapply(mss_2017[,2:6], as.numeric)
##### MSS 2019
mss_2019 <- read.csv("~/Desktop/Code/Thesis/demographic/2019_MSS_cut.csv")
mss_2019 <- mss_2019 %>%
dplyr::select("Name",
"Nummer",
"K.12",
"K.17",
"S3",
"S1") %>%
rename( WA = K.12,
noEUfor = K.17,
unemp = S1,
welf = S3)
mss_2019[,2:6] <- lapply(mss_2019[,2:6], as.numeric)
########## Spatially aggregating to the same scale
##### Verkehrzehlen
# vz <- sf_fisbroker("https://fbinter.stadt-berlin.de/fb/wfs/data/senstadt/s_vz")
# vz <- vz %>% separate(gml_id, c("s_vz.", "gml_id"), sep=5) %>%
# dplyr::select("gml_id", "geometry")
#vz$s_vz. <- NULL
##### LOR
# lor <- sf_fisbroker("https://fbinter.stadt-berlin.de/fb/wfs/data/senstadt/s_lor_plan")
# lor <- lor %>%
# separate(gml_id, c("s_lor_plan", "gml_id"), sep=11) %>%
# dplyr::select("gml_id", "geometry")
lor <- st_as_sf(readOGR("~/Desktop/Code/Thesis/shapefiles/lor.shp"))
vz <- st_as_sf(readOGR("~/Desktop/Code/Thesis/shapefiles/vz.shp"))
vz$gml_id <- as.numeric(vz$gml_id)
mss_2003 <- left_join(mss_2003, as.data.frame(vz), by=c("VKZ_num" = "gml_id")) %>% st_as_sf()
mss_2005 <- left_join(mss_2005, as.data.frame(vz), by=c("VKZ_num" = "gml_id")) %>% st_as_sf()
varnames <- c("WA", "aus_noneu","eu", "welf", "unemp")
mss_2003 <- areal::aw_interpolate(lor, mss_2003,
sid = "VKZ_num",
tid = "gml_id",
intensive = varnames,
weight="sum",
output="sf")
mss_2005 <- areal::aw_interpolate(lor, mss_2005,
sid = "VKZ_num",
tid = "gml_id",
intensive = varnames,
weight="sum",
output="sf")
save(mss_2003, mss_2005, mss_2007, mss_2009, mss_2011, mss_2013, mss_2015, mss_2017, mss_2019, file = "~/Desktop/Code/Thesis/Data_for_Analysis/mss.Rdata")
|
efca53d5f5120f18f4483a09d5d1b14985feff68
|
3bef70f4b3d6283f2b2bfb44ccdfbf9b28c6429d
|
/man/tidy_factor.Rd
|
e6a2cefe718d129313321bc36f32f33ac2576d5c
|
[
"MIT"
] |
permissive
|
KWB-R/dwc.wells
|
4c1594ea66b1792c6c955b98418982edf80675c1
|
45e8670647c4771fe70d59db0f7cfd1e80242361
|
refs/heads/main
| 2023-04-10T01:24:40.973815
| 2022-07-12T13:42:20
| 2022-07-12T13:42:20
| 351,021,733
| 0
| 0
|
MIT
| 2022-10-16T09:17:19
| 2021-03-24T09:35:15
|
R
|
UTF-8
|
R
| false
| true
| 579
|
rd
|
tidy_factor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare_functions.R
\name{tidy_factor}
\alias{tidy_factor}
\title{turn character into factor, sort factor levels and replace NA level}
\usage{
tidy_factor(x, level_sorting = c("frequency", "alphabet")[1])
}
\arguments{
\item{x}{character vector to be turned to factor}
\item{level_sorting}{sorting of factor levels; two options: "frequency"
(default) and "alphabet"; level "Unbekannt" is always always at the end}
}
\description{
turn character into factor, sort factor levels and replace NA level
}
|
7f78107bd7387418bcf06e6d50b42c9442325ea5
|
555f124ae8496518e510726b84e7f97c78af589b
|
/SANDBOX/Bikes Markdown.R
|
5cfb50acb0f40748a73caca6c24be7787adffb5b
|
[] |
no_license
|
qilixiang007/CityBikeNYC
|
1056c0a1e21f2c54e50b34f221756f936ccf4e0f
|
8e4a8d4598eff8a7afe2b0b470c7c041dbf1193f
|
refs/heads/master
| 2022-04-12T03:07:39.490675
| 2020-03-20T00:03:53
| 2020-03-20T00:03:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,559
|
r
|
Bikes Markdown.R
|
---
title: "Mapping NYC Citi Bike Routes"
output:
html_document:
keep_md: true
theme: cerulean
highlight: haddock
---
# Setup
Add this code to set global options. Always initialize all of your packages up-front.
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE, warning=F, message=F, fig.width=10)
library( dplyr )
library( ggmap )
library( tibble )
```
# Load Data
```{r}
# Upload and analyze dataset
# bikes <- readRDS("bikes.rds")
bikes <- readRDS(gzcon(url("https://github.com/lecy/CityBikeNYC/raw/master/DATA/bikes.rds")))
str(bikes) # 285552 rows
summary(bikes)
names(bikes)
```
# Attempt 1 (with dplyr)
Explain what you are trying to do here...
```{r}
# Deleting all columns besides start station lat and long
# Start station
start.station <- bikes %>% as_tibble() %>%
mutate(tripduration = NULL, starttime = NULL, stoptime = NULL, start.station.id = NULL,
start.station.name = NULL, end.station.id = NULL, end.station.name = NULL,
end.station.latitude = NULL, end.station.longitude = NULL, bikeid = NULL, usertype = NULL,
birth.year = NULL, gender = NULL)
head(start.station, 10)
# Deleting all columns besides end station lat and long
# End station
end.station <- bikes %>% as_tibble() %>%
mutate(tripduration = NULL, starttime = NULL, stoptime = NULL, start.station.id = NULL,
start.station.name = NULL, start.station.latitude = NULL, start.station.longitude = NULL,
end.station.id = NULL, end.station.name = NULL, bikeid = NULL, usertype = NULL,
birth.year = NULL, gender = NULL)
head(end.station, 10)
# Merge data (long and lat columns for start and end station by id)
bikes.merged <- merge(start.station, end.station, by="ID")
head(bikes.merged, 10)
head(table(table(bikes.merged$ID)), 10)
# 1 4 9 16 25 36 49 64 81 100
# 12932 6848 4400 3182 2355 1899 1543 1264 1162 983
# Group and summarize data
bikes.merged1 <- bikes.merged %>%
group_by(ID) %>%
summarise(n = n())
# unique(bikes.merged1)
# ID n
# <chr> <int>
# 1 116-116 400
# 2 116-127 100
# 3 116-128 1
# 4 116-147 16
# 5 116-151 16
# 6 116-153 169
# 7 116-157 1
# 8 116-160 9
# 9 116-167 1
# 10 116-168 1600
# ... with 44,063 more rows
```
# Attempt 2 (with dplyr)
Explain what you are trying to do here...
Another approach - not to merge two dataframes, but create the only one with dplyr.
```{r}
stations <- bikes %>% as_tibble() %>%
mutate(tripduration = NULL, starttime = NULL, stoptime = NULL, start.station.id = NULL,
start.station.name = NULL, end.station.id = NULL, end.station.name = NULL, bikeid = NULL, usertype = NULL,
birth.year = NULL, gender = NULL)
head(stations, 10)
bikes2 <- stations %>%
group_by(ID) %>%
summarise(n = n())
# A tibble: 44,073
# ID n
# <chr> <int>
# 1 116-116 20
# 2 116-127 10
# 3 116-128 1
# 4 116-147 4
# 5 116-151 4
# 6 116-153 13
# 7 116-157 1
# 8 116-160 3
# 9 116-167 1
# 10 116-168 40
# ... with 44,063 more rows
head(table(table(stations$ID)), 10)
# 1 4 9 16 25 36 49 64 81 100
# 12932 6848 4400 3182 2355 1899 1543 1264 1162 983
```
# So, situation is different, answers are different - in the first approach we have observations,
# which are square rooted.
# Which of options is correct?
# Based on Environment, it looks like the second approach is correct.
|
8c9a1cbfb43318db349bbad04e62d7c1ee0311b3
|
6cab50985daf1a272f7f8e5b5cd18ec2b210341f
|
/label_maf.R
|
5b8ba2a1331f973a5dc95731ac37ff9eb3e96461
|
[] |
no_license
|
wooyaalee/vcf_to_tidy_tsv
|
f9880181164c2efcf0ecebdc4d8e76159b75e915
|
0dd4dc2f6ef9e21e57b9d01f3848357c210cc4d3
|
refs/heads/master
| 2022-09-04T03:10:46.819835
| 2020-05-18T02:31:12
| 2020-05-18T02:31:12
| 267,899,335
| 0
| 0
| null | 2020-05-29T16:03:39
| 2020-05-29T16:03:38
| null |
UTF-8
|
R
| false
| false
| 1,366
|
r
|
label_maf.R
|
library(GetoptLong)
library(dplyr)
library(tidyr)
library(readr)
## A script to add Tumor/Normal sample barcodes and Study
## to a MAF file.
## Future feature: arbitrary field setting with a -C (custom) flag.
read_maf <- function(fi){
return(readr::read_tsv(fi,
comment="#",
col_types=cols(Chromosome=col_character(),
HGNC_Previous_Name=col_character()),
skip_empty_rows=TRUE,
trim_ws=TRUE
))
}
label_maf <- function(x, var_name, var_value){
x <- x %>% mutate(!!var_name := var_value)
return(x)
}
GetoptLong(
"maf=s", "The MAF file to label.",
"tumor=s", "The Tumor_Sample_Barcode to add.",
"normal=s", "The Normal_Sample_Barcode to add.",
"study=s", "The study field value to add.",
"caller=s", "The variant caller used to generator the MAF",
"output=s", "An output file name to write the labeled maf to."
#"CUSTOM=s%", "A comma-separated list of custom-key-values to add."
)
maf <- read_maf(maf)
maf <- label_maf(maf, "Tumor_Sample_Barcode", tumor)
maf <- label_maf(maf, "Matched_Norm_Sample_Barcode", normal)
maf <- label_maf(maf, "Study", study)
maf <- label_maf(maf, "caller", caller)
#cat(readr::format_tsv(maf))
write_tsv(maf, output)
|
8891a4476a1634858ff650b14f26c9cba203ccf6
|
16073509499c165a47add2639fe8abf8be1acb74
|
/sensitivity_simulation.R
|
169ce51e7d98d2647b575925e93924f90914750e
|
[] |
no_license
|
nsvitek/observer-free-morphotype-characterization
|
f64b2c7aca21b0abfe522d33d431af7037fc4199
|
921852e7b2a36719dc464b945ecee7d6ad7a05e4
|
refs/heads/master
| 2022-07-08T06:04:41.698122
| 2022-06-24T19:06:12
| 2022-06-24T19:06:12
| 58,686,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,771
|
r
|
sensitivity_simulation.R
|
locateScripts<-"C:/cygwin/home/N.S/scripts/observer-free-morphotype-characterization/"
# location of data
locateData<-"D:/Dropbox/Documents/Dissertation/sensitivity_analysis/data"
# Load Dependencies, Common Objects ------------------------------------------------------------------
setwd(locateScripts)
source("sensitivity_dependencies.R")
filename<-"morphologika_unscaled_high.txt"
groups<-c(8,1,5,6,7,8,8,8,8,8,1,1,1,1,1,2,3,4) #set group ID's
sim.palette<-brewer.pal(n=8,"Spectral") #set color palette
cluster<-c("0128","0256","0512","1024","2048","4096") #group replicates
# plot settings
pseudolm.lab<-c(128,256,512,1024,2048,4096) #build grouping vectors
col.tab.discrete<-brewer.pal(length(cluster),"Set2")
ylab.txt<-parse(text="R^2")
cex=1.5
cex.lab=1
cex.axis=1
mtext.line=2
mtext.cex=1
line.lwd=0.2
legend.pos='bottomright'
legend.cex=1.5
pset<-c(21,22,23,24,25,25)
legend.txt<-c("128","256","512","1,024","2,048","4,096")
legend.title<-"Pseudolandmarks"
taxon.point<-add.alpha(sim.palette,alpha=1)
taxon.bubble<-add.alpha(sim.palette,alpha=0.3)
palette<-colorRampPalette(c("blue","green","yellow","red"))
# Load Data ------------------------------------------------------------------
setwd(locateData)
setwd("simulation")
PCA<-readrep(19,22,c(1:length(groups)),filename=filename) #read in data
# Analyses for Publication --------------------------------------------------
setwd("../outputr")
(r_vals<-genRvals(PCA,cluster))
summary_stats<-Rvalsumm(r_vals)
row.names(summary_stats)<-cluster
write.csv(summary_stats,file="sim_r-vals_summary-stats.csv")
pfish<-Pvalsumm(r_vals,cluster,metric="mean")
write.csv(pfish,"sim_r_pairwise_mean_p-vals.csv",quote=F)
pfish<-Pvalsumm(r_vals,cluster,metric="median")
write.csv(pfish,"sim_r_pairwise_median_p-vals.csv",quote=F)
# pick the two groups with highest and lowest mean R^2, look at distributions
minset<-r_vals[[which(summary_stats[,1]==min(summary_stats[,1]))]]
maxset<-r_vals[[which(summary_stats[,1]==max(summary_stats[,1]))]]
plotRdistr(minset,maxset)
# plot line graph of mean R^2
tiff(width=7,height=7,units="cm",res=800,pointsize=8,filename="sim_r-val-mean_line.tif")
par(mar=c(3,3.3,.5,.5))
alignLine(summary_stats[,1],col.tab.discrete[5],pseudolm.lab,summary_stats[,6],summary_stats[,7],
pch=pset[5],cex=cex,cex.lab=cex.lab,xlab="Pseudolandmarks",ylab=ylab.txt,cex.axis=cex.axis,
mtext.line=mtext.line)
dev.off()
# plot first two PCs with alignment error
choice<-1
point.set<-pset
point.index<-groups
point.color<-taxon.point
bubble.color<-taxon.bubble
cex<-cex
cex.lab<-cex.lab
mtext.line<-3
alignerrPC(PCA,cluster,choice=6,pcs=c(1,2),
point.set=rep(21,8),point.index=groups,point.color=sim.palette,
bubble.color=taxon.bubble,cex=cex,cex.lab=cex.lab,mtext.line=3)
alignerrPC(PCA,cluster,choice=6,pcs=c(1,2),
point.set=pset,point.index=groups,point.color=taxon.point,
bubble.color=taxon.bubble,cex=cex,cex.lab=cex.lab,mtext.line=3)
tiff(width=7,height=7,units="cm",res=300,pointsize=8,filename="sim_align_1.tif")
par(mar=c(4,5,.5,.5)) #best
alignerrPC(PCA,cluster,choice=5,pcs=c(1,2),
point.set=rep(21,8),point.index=groups,point.color=taxon.point,
bubble.color=taxon.bubble,cex=cex,cex.lab=2,mtext.line=2.5)
dev.off()
tiff(width=7,height=7,units="cm",res=300,pointsize=8,filename="sim_align_2.tif")
par(mar=c(4,5,.5,.5)) #worst
alignerrPC(PCA,cluster,choice=1,pcs=c(1,2),
point.set=rep(21,8),point.index=groups,point.color=taxon.point,
bubble.color=taxon.bubble,cex=cex,cex.lab=2,mtext.line=2.5)
dev.off()
tiff(width=7,height=7,units="cm",res=300,pointsize=8,filename="sim_align_3.tif")
par(mar=c(4,5,.5,.5)) #middle
alignerrPC(PCA,cluster,choice=4,pcs=c(1,2),
point.set=rep(21,8),point.index=groups,point.color=taxon.point,
bubble.color=taxon.bubble,cex=cex,cex.lab=2,mtext.line=2.5)
dev.off()
plot(c(0,2),c(0,1),type = 'n', axes = F,xlab = '', ylab = '')
legend('center',legend=c("shape 1","shape 8"),pch=21,cex=3,
pt.bg=sim.palette[c(1,8)],pt.cex=6)
# VarianceDisparity -------------------------------------------------
# are more or less complex (# of patches, size of patches) shapes
# any harder to align properly, i.e., is their more variance among some identical shapes
# than others?
# From Zelditch et al. 2012 workbook (361-362): In studies of shape, a variance can be calculated by measuring
# the Procrustes distance of each individual from the mean, which is equivalent to measuring the
# variance of each coordinate, summed over all the coordinates. Unlike P.D.P.'s function below,
# Workbook divides by N-1, not N
#for 1 & 8, take which(group=i), calculate disparity for that group n terms of PC scores
group1<-which(groups==1) #sphere
group2<-which(groups==8) #water molecule
disparity<-matrix(NA, nrow=2,ncol=length(PCA))
rownames(disparity)<-c("sphere1","water8")
for (i in 1:length(PCA)){
disparity[1,i]<-individual.disparity(PCA[[i]]$x[group1,])
disparity[2,i]<-individual.disparity(PCA[[i]]$x[group2,])
}
disparitydiff<-disparity[,] %>% #use only "good" alignments
apply(.,2, function(x) x[1]/x[2]) %>% #ratio of variance/disparity
cbind(.,c(rep(128,9),rep(256,9),rep(512,9),rep(1024,9),rep(2048,9),rep(4096,9))) %>% as.data.frame
colnames(disparitydiff)<-c("variance.ratio","id")
# library(ggplot2)
ggplot(data=disparitydiff, aes(x=variance.ratio,fill=factor(id))) +
geom_histogram(binwidth=0.25) #ratio of disparity depends on # of points
# Note in example below that code from P.D.P and Zelditch et al. calculations produce same results
# individual.disparity(PCA[[37]]$x[group1,])
# apply(PCA[[37]]$m2d[group1,],2,var) %>% sum(.)/(nrow(d2)-1)
# Mantel tests ------------------------------------------
# mantel_vals<-genMantelvals(PCA,cluster) #takes forever. save results and never overwrite.
# mantelR<-unlist(mantel_vals[[1]]) %>% matrix(.,ncol=36,byrow=TRUE)
# rownames(mantelR)<-cluster
# write.csv(mantelR,"sim_mantelR.csv")
# mantelP<-unlist(mantel_vals[[2]]) %>% matrix(.,ncol=36,byrow=TRUE)
# rownames(mantelP)<-cluster
# write.csv(mantelP,"sim_mantelP.csv")
# summary_stats<-Rvalsumm(mantel_vals[[1]])
mantelR<-read.csv("sim_mantelP.csv",row.names=1,header=TRUE) #note change from R to P!!!
summary_stats<-lapply(seq_len(nrow(mantelR)), function(i) unlist(mantelR[i,])) %>%
Rvalsumm
row.names(summary_stats)<-cluster
# write.csv(summary_stats,file="sim_mantelP_summary-stats.csv")
# pfish<-Pvalsumm(mantel_vals[[1]],cluster,metric="mean")
# write.csv(pfish,"sim_r_pairwise_mean_mantelR.csv",quote=F)
# pfish<-Pvalsumm(mantel_vals[[1]],cluster,metric="median")
# write.csv(pfish,"sim_r_pairwise_median_mantelR.csv",quote=F)
tiff(width=7,height=7,units="cm",res=800,pointsize=8,filename="sim_mantelP-mean_line.tif")
par(mar=c(3,3.3,.5,.5))
alignLine(summary_stats[,1],col.tab.discrete,pseudolm.lab,summary_stats[,6],summary_stats[,7],
pch=pset,cex=cex,cex.lab=cex.lab,xlab="Pseudolandmarks",ylab="mean observed correlation coefficeint",cex.axis=cex.axis,
legend.pos='topleft',legend.txt=n,legend.title=legend.title,
legend.cex=legend.cex,mtext.line=mtext.line)
dev.off()
#Phenetic trees -------------------------------------------------
RFdists<-getRFdist(PCA,cluster,tips=groups)
unlist(RFdists) %>% matrix(.,ncol=36,byrow=TRUE) %>%
write.csv("sim_RFdists_raw.csv")
summary_stats<-Rvalsumm(RFdists)
row.names(summary_stats)<-cluster
write.csv(summary_stats,file="sim_RFdists_summary-stats.csv")
pfish<-Pvalsumm(RFdists,cluster,metric="mean")
write.csv(pfish,"sim_r_pairwise_mean_RFdists.csv",quote=F)
pfish<-Pvalsumm(RFdists,cluster,metric="median")
write.csv(pfish,"sim_r_pairwise_median_RFdists.csv",quote=F)
# RFdists2<-getRFdist2(PCA,cluster,tips=groups,pcs=1)
# summary_stats<-Rvalsumm(RFdists2)
tiff(width=7,height=7,units="cm",res=800,pointsize=8,filename="sim_RFdist-mean_line.tif")
par(mar=c(3,3.3,.5,.5))
alignLine(summary_stats[,1],col.tab.discrete[5],pseudolm.lab,summary_stats[,6],summary_stats[,7],
pch=pset[5],cex=cex,cex.lab=cex.lab,xlab="Pseudolandmarks",ylab="Robinson-Foulds Distance",cex.axis=cex.axis,
legend.pos='topright',legend.txt=n,legend.title=legend.title,
legend.cex=legend.cex,mtext.line=mtext.line)
dev.off()
# Procrustes ANOVA -----------------------------------------------
repvals<-genRepeatvals(PCA,cluster,variable=groups,rep=6,pcs=length(groups))
summary_stats<-Rvalsumm(repvals)
tiff(width=7,height=7,units="cm",res=800,pointsize=8,filename="sim_repeatability-mean_line.tif")
par(mar=c(3,3.3,.5,.5))
alignLine(summary_stats[,1],col.tab.discrete[5],pseudolm.lab,summary_stats[,6],summary_stats[,7],
pch=pset[5],cex=cex,cex.lab=cex.lab,xlab="Pseudolandmarks",ylab="repeatability",cex.axis=cex.axis,
legend.pos='topright',legend.txt=n,legend.title=legend.title,
legend.cex=legend.cex,mtext.line=mtext.line)
dev.off()
identity<-makegroups(PCA,cluster) #figure out group identity
repeatability<-sapply(cluster,function(x) NULL) #make empty list
for (cls in 1:length(repeatability)){
repeatability[[cls]]<-sapply(identity[[cls]],function(x) NULL) #make empty list in list
for (i in 1:length(repeatability[[cls]])){
repeatability[[cls]][[i]]<-find_repeatablePCs(PCA[[identity[[cls]][i]]]$x,variable=groups,rep=6)
} #run repeatibly PCs for given cluster
repeatability[[cls]]<-unlist(repeatability[[cls]]) %>% #formatting: make each PC a row
matrix(.,nrow=length(repeatability[[cls]]),byrow=TRUE) %>% t
}
repeatability.mat<-unlist(repeatability[[1]]) %>% matrix(.,nrow=length(groups),byrow=FALSE)
for (i in 2:length(repeatability)){
repeatability.mat<-unlist(repeatability[[i]]) %>% matrix(.,nrow=length(groups),byrow=FALSE) %>%
rbind(repeatability.mat,.)
} #bind all clusters into one big matrix
summary_stats<-lapply(seq_len(nrow(repeatability.mat)), function(i) unlist(repeatability.mat[i,])) %>%
Rvalsumm #make plotting variables
tiff(width=7,height=7,units="cm",res=800,pointsize=8,filename="mar_repeatPC-mean_line.tif")
par(mar=c(3,3.3,.5,.5))
alignLine(summary_stats[,1],col.tab.discrete,pseudolm=seq(1,length(groups)),summary_stats[,6],summary_stats[,7],
pch=pset,cex=cex,cex.lab=cex.lab,xlab="Principal Components",ylab="repeatability",cex.axis=cex.axis,
legend.pos='topright',legend.txt=legend.txt,legend.title=legend.title,
legend.cex=legend.cex,mtext.line=mtext.line)
dev.off()
### If you use this code in published materials, please cite:
# Vitek, N.S., Manz, C.L., Gao, T. Bloch, J.I., Strait, S.G., Boyer, D.M. In Press. Semi-supervised determination of pseudocryptic morphotypes using observer-free characterizations of anatomical alignment and shape. Ecology and Evolution.
|
b7f5c363a3d8f6df3d959f4db2a7149a9c3c6a58
|
7f7a0f2774e61833f5944c7af798c86e224e3339
|
/man/Board_colors.Rd
|
2c093f3bf3b91aa48379d4af720717c9c37eaf30
|
[] |
no_license
|
cconley/SchoolR
|
a83f6bee0e1c7b3ab7f8a27cc79310d26f45f82b
|
1c9f563fac86f58aad783e57814d1901870e283e
|
refs/heads/master
| 2020-03-21T10:13:14.737678
| 2018-06-23T23:24:34
| 2018-06-23T23:24:34
| 138,439,166
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 723
|
rd
|
Board_colors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Colors.R
\docType{data}
\name{Board_colors}
\alias{Board_colors}
\title{These function loads the Board Color palettes which includes:}
\format{An object of class \code{character} of length 7.}
\usage{
Board_colors
}
\description{
Board_cols():
turquoise, orange, yellow, burgundy, blue, grey, green
}
\details{
Board_pal():
main = blue, green, yellow
cool = blue, green
hot = yellow, orange, burgundy),
mixed = blue, green, yellow, orange, burgundy),
grey = light grey, dark grey)
Board_scale_color()
Board_scale_fill()
#Use https://coolors.co/ to get color tints
}
\keyword{datasets}
|
6b5e1f8154388c8d75cb58a2ab2da1905e27f279
|
8cfb4de65cda8ececc9d6547a2bf1238b1ca4c81
|
/Matrix Models/Examples/Two-site matrix models for black-headed gulls.R
|
8930950df1bcbb7d28110bd9716979822a76da25
|
[] |
no_license
|
kanead/white-backed-vulture-population-dynamics
|
d299a475c1beb0505aac6c2aa5aa490f6e99cdb4
|
4ca7f04dbda31771e51b29f6134f1aa4129ba1f6
|
refs/heads/master
| 2021-01-10T23:21:42.608761
| 2017-10-20T16:29:10
| 2017-10-20T16:29:10
| 70,612,534
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,095
|
r
|
Two-site matrix models for black-headed gulls.R
|
# Matrix Models for Population Management & Conservation
# 2014
# CNRS CEFE workshop, Montpellier, France
# Jean-Dominique Lebreton, Olivier Gimenez, Dave Koons
# EXERCISE 3: Two-site matrix models for black-headed gulls
rm(list=ls(all=TRUE)) # clears the R memory, which is sometimes useful
################################################################################
# Piece 1 of code #
################################################################################
library(MASS) # an R package needed for some matrix functions
library(quadprog)
library(popbio)
# First, define the demographic parameters
# Good site
sg0 <- 0.4
sg1 <- 0.6
s <- 0.82 # adult survival common to both sites
fg <- 0.8 # half the mean clutch size (modeling females only)
pg2 <- 0.3 # proportion of adults that attempt to breed each year at age 2, etc.
pg3 <- 0.5
pg4 <- 0.7
pg5 <- 1
# Bad sites
sb0 <- 0.4
sb1 <- 0.5
fb <- 0.5 # half the mean clutch size at Bad sites
pb2 <- 0.5
pb3 <- 0.8
pb4 <- 1
pb5 <- 1
# Effective migration rates (juvenile dispersal * their survival)
s1gb <- 0.2 # Good to Bad
s1bg <- 0.3 # Bad to Good
# Next, create the two-site pre birth-pulse matrix model for black-headed gulls
A <- matrix(c(
0, sg0*fg*pg2, sg0*fg*pg3, sg0*fg*pg4, sg0*fg*pg5, 0, 0, 0, 0, 0,
sg1, 0, 0, 0, 0, s1bg, 0, 0, 0, 0,
0, s, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, s, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, s, s, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, sb0*fb*pb2, sb0*fb*pb3, sb0*fb*pb4, sb0*fb*pb5,
s1gb, 0, 0, 0, 0, sb1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, s, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, s, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, s, s), nrow = 10, byrow = TRUE)
A
# Then use the following popbio function; quite easy!
lambda(A)
# The following code demonstrates the matrix algebra that
# is used 'behind the scences' of the lambda function in the popbio package.
rows <- dim(A)[1]
cols <- dim(A)[2]
eig <- eigen(A) # eigenvalues of A
EigVecs <- eig$vectors # eigenvectors of A
Lambdas <- Re(eig$values) # real number components of eigenvalues
Lambda <- max(Lambdas) # long-term geometric rate of population growth
Lambda
pos <- which.max(Lambdas) # finding the position of the dominant eigenvalue
w <- Re(eig$vectors[1:rows,pos]) # its associated right eigenvector
w
sad <- w/(sum(w))
sad <- round(sad,3) # scaled dominant right eigenvector: Stable Age Distribution
# In the following, the ginv function inverts a matrix by calculating the
# Moore-Penrose generalized inverse of a matrix.
V <- Conj(ginv(EigVecs)) # left eigenvector; NOTE this notation from H Caswell
v <- Re(t(t(V[pos,]))) # dominant left eigenvector
v
rv <- v/(sum(v))
rv <- round(rv,3) # scaled to provide proportional Reproductive Values
sad
rv
# Conduct a sensitivity and elasticity analysis for the lower-level vital rates
# (i.e, those that make up the matrix elements) using the popbio package.
# Just put the vital rates in a list, and write the matrix as an expression
gull.vr <- list(sg0=0.4,sg1=0.6,s=0.82,fg=0.8,pg2=0.3,pg3=0.5,pg4=0.7,pg5=1,
sb0=0.4,sb1=0.5,fb=0.5,pb2=0.5,pb3=0.8,pb4=1,pb5=1,s1gb=0.2,s1bg=0.3)
gull.A <- expression(
0, sg0*fg*pg2, sg0*fg*pg3, sg0*fg*pg4, sg0*fg*pg5, 0, 0, 0, 0, 0,
sg1, 0, 0, 0, 0, s1bg, 0, 0, 0, 0,
0, s, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, s, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, s, s, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, sb0*fb*pb2, sb0*fb*pb3, sb0*fb*pb4, sb0*fb*pb5,
s1gb, 0, 0, 0, 0, sb1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, s, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, s, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, s, s)
# then apply the following popbio function
llsenselas <- vitalsens(gull.A,gull.vr)
llsenselas
################################################################################
# Piece 2 of code #
################################################################################
ratio <- numeric() # a storage bin for holding calculations of a ratio
# to be interpreted for question 5
n <- matrix(1,10,1) # a vector with an initial abundance of 1 individual per
# age and location
tspan <- 50
for (t in 1:tspan){
n <- A%*%n # %*% = matrix multiplication in R
# note that we simply overwrite the abundance vector at each time step
# since we have no need to store it for these questions. It is updating.
Nbreedg <- pg2*n[2]+pg3*n[3]+pg4*n[4]+pg5*n[5]
Nbreedb <- pb2*n[7]+pb3*n[8]+pb4*n[9]+pb5*n[10]
ratio[t] <- Nbreedg/Nbreedb # this we store
}
ratio
par(mar = c(5, 6, 4, 2))
plot(1:tspan,ratio,type="l",xlab=list("Time",cex=2),ylab=list("Ratio",cex=2))
################################################################################
# Piece 3 of code #
################################################################################
# Define adult dispersal probabilities
gb <- 0 # Good to Bad
bg <- 0 # Bad to Good
A <- matrix(c(
0, sg0*fg*pg2, sg0*fg*pg3, sg0*fg*pg4, sg0*fg*pg5, 0, 0, 0, 0, 0,
sg1, 0, 0, 0, 0, s1bg, 0, 0, 0, 0,
0, s*(1-gb), 0, 0, 0, 0, s*bg, 0, 0, 0,
0, 0, s*(1-gb), 0, 0, 0, 0, s*bg, 0, 0,
0, 0, 0, s*(1-gb), s*(1-gb), 0, 0, 0, s*bg, s*bg,
0, 0, 0, 0, 0, 0, sb0*fb*pb2, sb0*fb*pb3, sb0*fb*pb4, sb0*fb*pb5,
s1gb, 0, 0, 0, 0, sb1, 0, 0, 0, 0,
0, s*gb, 0, 0, 0, 0, s*(1-bg), 0, 0, 0,
0, 0, s*gb, 0, 0, 0, 0, s*(1-bg), 0, 0,
0, 0, 0, s*gb, s*gb, 0, 0, 0, s*(1-bg), s*(1-bg)), nrow = 10, byrow = TRUE)
ratio <- numeric() # a storage bin for holding calculations of a ratio
# to be interpreted for question 5
n <- matrix(1,10,1) # a vector with an initial abundance of 1 individual per
# age and location
tspan <- 50
for (t in 1:tspan){
n <- A%*%n # %*% = matrix multiplication in R
# note that we simply overwrite the abundance vector at each time step
# since we have no need to store it for these questions. It is updating.
Nbreedg <- pg2*n[2]+pg3*n[3]+pg4*n[4]+pg5*n[5]
Nbreedb <- pb2*n[7]+pb3*n[8]+pb4*n[9]+pb5*n[10]
ratio[t] <- Nbreedg/Nbreedb # this we store
}
ratio
|
837f9eeded62a2735291381fe99a6a2fa01cc546
|
2b4762251cadb659e03f4fcb4f3ad14be098b75a
|
/ui.R
|
12e3d92579fc443712d9a6b2373d4d3558690adc
|
[] |
no_license
|
bcheggeseth/ArtistDiversity
|
67a77cff52ff328430d86cc06c3778a9c43875a5
|
17b3105cf348788ed90f5439d095fdc541243f59
|
refs/heads/master
| 2020-04-08T20:27:04.386624
| 2019-01-23T20:20:33
| 2019-01-23T20:20:33
| 159,699,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,811
|
r
|
ui.R
|
library(shiny)
library(DT)
library(ggplot2)
library(shinyWidgets)
library(markdown)
shinyUI(
navbarPage(
title=a(tags$b("Diversity of Artists in Major U.S. Museums by Topaz et al."), href='http://www.plosone.org') ,
windowTitle="Artist Diversity",
id="mytabs",
tabPanel(title="Artist Demographics",
value="Demos",
sidebarLayout(
sidebarPanel(
tags$b("The dataset to the right contains crowdsourced information on a random sample of thousands of individual, identifiable artists in various U.S. museums."),
br(),br(),
tags$em("Not Inferred values indicate that we were not able to confidently determine the value based on crowdsourcing approach."),
awesomeCheckbox("filter", "Allow subsetting?", value=TRUE),
width = 3), #end first sidebarpanel
mainPanel(
DTOutput('artistdata'), tags$em('Note that the artist name is scraped from the web and is not a cleaned version due to character encodings.')
) #end first main panel
) #end sidebarLayout first panel
), #end tabPanel first panel
tabPanel(title="Graphs",
value="Graphs",
sidebarLayout(
sidebarPanel(
tags$b("The graphics to the right display crowdsourced information on a random sample of thousands of individual, identifiable artists in various U.S. museums."),
br(),br(),
tags$em("Not Inferred values indicate that we were not able to confidently determine the value based on crowdsourcing approach."),
br(),br(),
selectInput("demovar", "Choose a Demographic Variable:", choices=list("Gender"= "gender", "Ethnicity"="ethnicity",
"Birth Year"="birthyear", "Geographic Origin"="nationality"),selected='Gender'),
awesomeCheckbox("unknownfilter", "Exclude artists with not inferred values", value=TRUE),
#awesomeCheckboxGroup("order", "Prefer bar plots", value=FALSE)
awesomeCheckbox("barplot", "Prefer bar plots", value=FALSE),
width = 3), #end third sidebarPanel
mainPanel(
plotOutput("demoplot", height = 700)
) #end third mainPanel
) #end sidebarLayout third panel
), #end tabPanel third panel
tabPanel(title="Survey Instrument",
value="SI",
sidebarLayout(
sidebarPanel(
tags$b("The survey instrument to the right was used by Mechanical Turk workers to provide information on a random sample of thousands of records scrapped from various U.S. museums."),
br(),br(),
width = 3), #end second sidebarpanel
mainPanel(
includeMarkdown("include.md")
) #end second main panel
) #end sidebarLayout second panel
) #end tabPanel second panel
) #end navBar
) #end ui
|
548ff1e5d7546c6fd778f2583dce66270edf6d08
|
77c56c957ebbf937761880000a850574eb7d65aa
|
/randomForest.R
|
56a08306dd99ac3831a61dad1413da37b1305758
|
[] |
no_license
|
martinmontane/CienciaDeDatosClases
|
28d2dd848882b663b8bb570ab9be4441aaee6c12
|
dbbd403bcb51440550ffaea3397450892d2e3ada
|
refs/heads/master
| 2022-11-14T16:01:46.727802
| 2020-07-09T19:08:39
| 2020-07-09T19:08:39
| 263,441,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,887
|
r
|
randomForest.R
|
load(file=url("https://github.com/martintinch0/CienciaDeDatosParaCuriosos/raw/master/data/independientes.RData"))
library(caret)
library(randomForest)
library(tidyverse)
library(furrr)
plan(multiprocess)
# Intentamos tener los mismos resultados
set.seed(10)
# Lista que guarda los resultados
resultados <- list()
# Elegimos los grupos
grupos <- createFolds(independientes %>% pull(REGISTRADO), k = 5, list = FALSE)
independientes <- independientes %>% mutate(grupos=grupos)
# Entrenamos cada uno de los valores para mtry
salida <- future_map_dbl(1:5, function(opciones){
# Para cada uno de los grupos
mean(future_map_dbl(1:5, function(k){
# Generamos el grupo de entrenamiento
dataTraining <-independientes %>% filter(grupos != k) %>% select(-grupos)
# Entrenamos el random forest
rf <- randomForest(formula= REGISTRADO ~.,data=dataTraining,mtry=opciones, ntree=100)
# Predecimos sobre testing
dataTesting <- dataTraining <-independientes %>% filter(grupos == k) %>% select(-grupos)
pred <- predict(rf,newdata = dataTesting)
tabla <- table(pred,dataTesting %>% pull(REGISTRADO))
acc <- sum(diag(tabla))/sum(tabla)
}))
},.progress=TRUE)
# Solo un arbol
salidaArbol <- future_map_dbl(1:5, function(opciones){
mean(future_map_dbl(1:5, function(k){
# Generamos el grupo de entrenamiento
dataTraining <-independientes %>% filter(grupos != k) %>% select(-grupos)
# Entrenamos el random forest
rf <- randomForest(formula= REGISTRADO ~.,data=dataTraining,mtry=opciones,ntree=1)
# Predecimos sobre testing
dataTesting <- dataTraining <-independientes %>% filter(grupos == k) %>% select(-grupos)
pred <- predict(rf,newdata = dataTesting)
tabla <- table(pred,dataTesting %>% pull(REGISTRADO))
acc <- sum(diag(tabla))/sum(tabla)
}))
},.progress=TRUE)
|
b75dafd86074570a0dce36007b6c6b1c049bbe75
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GGEBiplots/examples/ExamineGen.Rd.R
|
2729e6723ac6f9afaf8560c1f6f4067d5bfde971
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 226
|
r
|
ExamineGen.Rd.R
|
library(GGEBiplots)
### Name: ExamineGen
### Title: Examine a genotype biplot
### Aliases: ExamineGen
### Keywords: GGE
### ** Examples
library(GGEBiplotGUI)
data(Ontario)
GGE1<-GGEModel(Ontario)
ExamineGen(GGE1,"cas")
|
dabb9f529ac41e243026e31ba1dcd705abf64670
|
3f36e3afc25870cf6e9429de4a5b0604d52dc03a
|
/R/TrajectoryArguments.R
|
9c7a48725802c803e60892a70818d07ef6f532f7
|
[] |
no_license
|
Patricklomp/VisualisingHealthTrajectories
|
4077a62b7da7b92ad2c7aa99a918aaf15585788e
|
98e69c50d354a693f0e9e8a3d76c81e3e5088a7a
|
refs/heads/master
| 2023-05-31T16:21:50.435675
| 2021-06-04T07:43:42
| 2021-06-04T07:43:42
| 317,466,951
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,532
|
r
|
TrajectoryArguments.R
|
#' Creates an object to hold analysis-specific data
#'
#' @param mode Indicates whether the analysis is run in DISCOVERY or VALIDATION mode. In VALIDATION mode, the package tries to validate predefined event pairs. In DISCOVERY mode, it tries to identify all directional event pairs from the data.
#' @param minimumDaysBetweenEvents The smallest number of days between 2 events of the patient that can be considered as event pair. Usually we have used 1.
#' @param maximumDaysBetweenEvents The maximum number of days between 2 events of the patient that can be considered as event pair. Ususally we have not really limited it so we have used 3650 (10 years)
#' @param minPatientsPerEventPair Minimum number of people having event1 -> event2 (directional) progression (satisfying minimumDaysBetweenEvents and maximumDaysBetweenEvents requirements) to be included in analysis. If the value is >=1, it is considered as the absolute count of event pairs. If the value is less than 1, the value is considered as prevalence among the cohort size. For instance, if you have 1000 persons in the cohort and the value is 0.05, each event pair must occur at least 1000x0.05=50 times. Can be used for limiting analysis to frequent event pairs only. However, it does not throw less frequent diagnosis pairs out of the (control group) data and therefore, does not affect the statistical significance.
#' @param addConditions TRUE/FALSE parameter to indicate whether events from Condition_occurrence table should be included in the analysis
#' @param addObservations TRUE/FALSE parameter to indicate whether events from Condition_occurrence table should be included in the analysis
#' @param addProcedures TRUE/FALSE parameter to indicate whether events from Procedure_occurrence table should be included in the analysis
#' @param addDrugExposures TRUE/FALSE parameter to indicate whether events from Drug_exposure table should be included in the analysis. In most of the cases, prefer using addDrugEras instead as the particular RxNorm codes may differ in various databases (leading to no replication) but drug_era is always on ingredient level (active compound) and it also fills gaps between close events.
#' @param addDrugEras TRUE/FALSE parameter to indicate whether events from Drug_era table should be included in the analysis. NB! use either addDrugEras=T or addDrugExposures=T (not both) as it leads to analysis duplication...
#' @param addBirths TRUE/FALSE parameter to indicate whether births events should be included in the analysis.
#' @param addDeaths TRUE/FALSE parameter to indicate whether events from Death table should be included in the analysis.
#' @param daysBeforeIndexDate 0 or any positive number that indicates for how many days before index date of the cohort the events are included in the analysis. In case one wants to include all events before index date, use value Inf
#' @param RRrangeToSkip": Range of relative risks (RR) that are skipped from the analysis. The minimum value for the range is 0. E.g RRrangeToSkip=c(0,1) searches for RR>1 only (event pairs where the first event increases the risk of the second event). To skip RR with very small effect, it is recommended to use RRrangeToSkip=c(0,1.1) or even RRrangeToSkip=c(0,1.2) in DISCOVERY mode. In case one is interested in pairs with decreasing risk also, it is recommended to use the range something like RRrangeToSkip=c(0.8,1.2) (analyse all pairs that have RR<0.8 or R>=1.2). If you don't want to skip anything, use RRrangeToSkip=c(1,1) (analyses all pairs that have RR<1 or RR>=1 - that means, all pairs)
#' @param packageName Do not use/edit, this is required by SqlRender::loadRenderTranslateSql
#' @param cohortName Reader-friendly short description of the cohort. Used in graph titles and file names (can contain spaces)
#' @param description This is a placeholder for any description of the study/cohort/analysis. For instance, it would be wise to descibe here what kind of cohort is that and what the analysis does.
#' @param eventIdsForGraph List of exact concept ID-s of the events that are used to align actual trajectories in the end of analysis. Can be left not defined (NA)
#'
#' @return TrajectoryAnalysisArgs object
#' @export
#'
#' @examples
createTrajectoryAnalysisArgs <- function(mode='DISCOVERY',
minimumDaysBetweenEvents=1,
maximumDaysBetweenEvents=3650,
minPatientsPerEventPair=10,
addConditions=T,
addObservations=F,
addProcedures=F,
addDrugExposures=F,
addDrugEras=F,
addBirths=F,
addDeaths=T,
daysBeforeIndexDate=Inf,
RRrangeToSkip=c(0,1.2),
cohortName = 'My sample cohort',
description = '',
eventIdsForGraphs=NA) {
if(!mode %in% c('DISCOVERY','VALIDATION')) stop("Error in createTrajectoryAnalysisArgs(): unknown value for MODE parameter: {mode}")
if(addDrugExposures==T & addDrugEras==T) stop("Error in createTrajectoryAnalysisArgs(): parameters values for 'addDrugExposures' and 'addDrugEras' are TRUE but both of them cannot be TRUE at the same time (choose one of them or set both to FALSE)")
if(mode=='VALIDATION') {
f=file.path(trajectoryLocalArgs$inputFolder,'event_pairs_for_validation.tsv')
if(!file.exists(f)) {
logger::log_error(paste0("The package is run in VALIDATION mode, but file 'event_pairs_for_validation.tsv' does not exist in input folder ",trajectoryLocalArgs$inputFolder,"."))
stop()
}
}
if(RRrangeToSkip[2]<RRrangeToSkip[1]) {
logger::log_error(paste0("Error in RRrangeToSkip=c(",RRrangeToSkip[1],",",RRrangeToSkip[2],") value: the start value of the range ('",RRrangeToSkip[1],"') can't be larger than the end value ('",RRrangeToSkip[2],"'). Check your analysis parameters."))
stop()
}
value <- list(mode=mode,minimumDaysBetweenEvents=minimumDaysBetweenEvents,maximumDaysBetweenEvents=maximumDaysBetweenEvents, minPatientsPerEventPair=minPatientsPerEventPair,
addConditions=addConditions,addObservations=addObservations,addProcedures=addProcedures,addDrugExposures=addDrugExposures,
addDrugEras=addDrugEras,addBirths=addBirths,addDeaths=addDeaths,
daysBeforeIndexDate=daysBeforeIndexDate,
RRrangeToSkip=RRrangeToSkip,
cohortName=cohortName,description=description,eventIdsForGraphs=eventIdsForGraphs)
class(value) <- 'TrajectoryAnalysisArgs'
return(value)
}
#' Creates an object to hold local database-specific parameters
#'
#' @param cdmDatabaseSchema Schema containing source data in OMOP CDM format
#' @param vocabDatabaseSchema Schema containing OMOP vocabulary
#' @param resultsSchema Schema the user has writing access to (used to write analysis tables into)
#' @param oracleTempSchema In case you are using oracle, schema for temporary tables need to be specified. A schema where temp tables can be created in Oracle. Otherwise leave it as it is (is not used)
#' @param sqlRole Role to use in SQL for writing tables in 'resultsSchema'. It should also have access to 'cdmDatabaseSchema' and 'vocabDatabaseSchema'. Set to FALSE (or F) if setting to a specific role is not needed. It should be safe to use F if you have no idea of what the SQL roles mean.
#' @param prefixForResultTableNames Table prefix that is used for all output tables to avoid any collision with existing table names. An empty string is also allowed.
#' @param cohortTableSchema Schema where cohort table is located
#' @param cohortTable Name of the cohort table in cohortTableSchema
#' @param inputFolder Full path to input folder that contains SQL file for cohort definition (SQL Server format) and optionally also trajectoryAnalysisArgs.json. You can use built-in folders of this package such as: inputFolder=system.file("extdata", "RA", package = "Trajectories") which is also the default value. In case your cohort data already exists in the database and you do not need to build it from scratch, set the value to FALSE.
#' @param mainOutputFolder The output folder path. This is the folder where the final results are produced into. Use full path and do NOT add trailing slash! The folder must already exist. Default value is the default working directory.
#' @param databaseHumanReadableName In the future, it will be added to the titles of the graph to indicate what data is this. Use something short. Currently this parameter is not used.
#'
#' @return TrajectoryLocalArgs object
#' @export
#'
#' @examples
createTrajectoryLocalArgs <- function(cdmDatabaseSchema,
vocabDatabaseSchema,
resultsSchema,
oracleTempSchema,
sqlRole=F,
prefixForResultTableNames='',
#cohortTableSchema,
#cohortTable,
#cohortId=1, #use 1 for discovery studies and 2 for validation studies
inputFolder=system.file("extdata", "RA", package = "Trajectories"),
mainOutputFolder=getwd(),
databaseHumanReadableName='My database') {
#Sanity checks
if(!is.logical(inputFolder)) {
if (!dir.exists(inputFolder)) stop(paste0("ERROR in createTrajectoryLocalArgs(): inputFolder '",inputFolder,"' does not exist."))
if (!file.exists(file.path(inputFolder,'cohort.sql'))) stop(paste0("ERROR in createTrajectoryLocalArgs(): there is no 'cohort.sql' file in inputFolder '",inputFolder,"'."))
}
if (!dir.exists(mainOutputFolder)) stop(paste0("ERROR in createTrajectoryLocalArgs(): mainOutputFolder '",mainOutputFolder,"' does not exist."))
#if (!file.exists(paste0(inputFolder,'/trajectoryAnalysisArgs.json'))) stop(paste0("ERROR in createTrajectoryLocalArgs(): there is no 'trajectoryAnalysisArgs.json' file in inputFolder '",inputFolder,"'."))
value <- list(cdmDatabaseSchema=cdmDatabaseSchema,vocabDatabaseSchema=vocabDatabaseSchema,
resultsSchema=resultsSchema,oracleTempSchema=oracleTempSchema,sqlRole=sqlRole,
prefixForResultTableNames=prefixForResultTableNames,
#cohortTableSchema=cohortTableSchema,
#cohortTable=cohortTable,
#cohortId=cohortId,
inputFolder=inputFolder,
mainOutputFolder=mainOutputFolder, databaseHumanReadableName=databaseHumanReadableName)
class(value) <- 'TrajectoryLocalArgs'
return(value)
}
#' Checks whether the object is of type TrajectoryLocalArgs
#'
#' @param x Any R object
#'
#' @return
#' @export
#'
#' @examples
is.TrajectoryLocalArgs <- function(x) {
inherits(x, "TrajectoryLocalArgs")
}
#' Checks whether the object is of type TrajectoryAnalysisArgs
#'
#' @param x Any R object
#'
#' @return
#' @export
#'
#' @examples
is.TrajectoryAnalysisArgs <- function(x) {
inherits(x, "TrajectoryAnalysisArgs")
}
#' Writes trajectoryAnalysisArgs object to JSON file
#'
#' @param trajectoryAnalysisArgs Object created by Trajectories::createTrajectoryAnalysisArgs() method
#' @param filepath Full path to the output file. Should have .json extension as this is actually a JSON file.
#'
#' @return
#' @export
#'
#' @examples
TrajectoryAnalysisArgsToJson<-function(trajectoryAnalysisArgs, filepath) {
logger::log_info(paste0("Saving 'trajectoryAnalysisArgs' data in JSON format to ",filepath,"..."))
if(!Trajectories::is.TrajectoryAnalysisArgs(trajectoryAnalysisArgs)) stop("Something is not right. 'trajectoryAnalysisArgs' is not an object from class 'TrajectoryAnalysisArgs'")
library(jsonlite)
json<-jsonlite::toJSON(trajectoryAnalysisArgs, force=T, pretty=T)
fileConn<-file(filepath)
writeLines(json, fileConn)
close(fileConn)
logger::log_info("...done.")
}
#' Reads trajectoryAnalysisArgs object from JSON file
#'
#' @param filepath Full path to JSON file
#'
#' @return TrajectoryAnalysisArgs object
#' @export
#'
#' @examples
TrajectoryAnalysisArgsFromJson<-function(filepath) {
library(jsonlite)
logger::log_info(paste0("Loading 'trajectoryAnalysisArgs' object from JSON file ",filepath,"..."))
r.obj<-fromJSON(filepath)
#defaulting parameters if missing from JSON
defaults=list(
mode='DISCOVERY', #allowed values: 'DISCOVERY' or 'VALIDATION'
minimumDaysBetweenEvents=1,
maximumDaysBetweenEvents=3650,
minPatientsPerEventPair=10,
addConditions=T,
addObservations=F,
addProcedures=F,
addDrugExposures=F,
addDrugEras=F,
addBirths=F,
addDeaths=T,
daysBeforeIndexDate=Inf,
RRrangeToSkip=c(0,1.2),
cohortName = 'My sample cohort',
description = '',
eventIdsForGraphs=NA
)
vals_for_obj=list()
for(param in names(defaults)) {
if(!param %in% names(r.obj)) {
logger::log_warn("'{param}' parameter not given in JSON. Defaulting its value to {defaults[param]}")
vals_for_obj[[param]]=defaults[[param]]
} else {
vals_for_obj[[param]]=r.obj[[param]]
}
}
trajectoryAnalysisArgs<-Trajectories::createTrajectoryAnalysisArgs(
mode=vals_for_obj[['mode']],
minimumDaysBetweenEvents=vals_for_obj[['minimumDaysBetweenEvents']],
maximumDaysBetweenEvents=vals_for_obj[['maximumDaysBetweenEvents']],
minPatientsPerEventPair=vals_for_obj[['minPatientsPerEventPair']],
addConditions=vals_for_obj[['addConditions']],
addObservations=vals_for_obj[['addObservations']],
addProcedures=vals_for_obj[['addProcedures']],
addDrugExposures=vals_for_obj[['addDrugExposures']],
addDrugEras=vals_for_obj[['addDrugEras']],
addBirths=vals_for_obj[['addBirths']],
addDeaths=vals_for_obj[['addDeaths']],
daysBeforeIndexDate=vals_for_obj[['daysBeforeIndexDate']],
RRrangeToSkip=vals_for_obj[['RRrangeToSkip']],
cohortName=vals_for_obj[['cohortName']],
description=vals_for_obj[['description']],
eventIdsForGraphs=vals_for_obj[['eventIdsForGraphs']])
logger::log_info('...done.')
return(trajectoryAnalysisArgs)
}
#' Searches for trajectoryAnalysisArgs.json file from inputFolder (defined in trajectoryLocalArgs), creates trajectoryAnalysisArgs object from it and returns it.
#'
#' @inheritParams GetOutputFolder
#'
#' @return TrajectoryLocalArgs object
#' @export
#'
#' @examples
TrajectoryAnalysisArgsFromInputFolder<-function(trajectoryLocalArgs) {
trajectoryAnalysisArgs<-Trajectories::TrajectoryAnalysisArgsFromJson(file.path(trajectoryLocalArgs$inputFolder,"trajectoryAnalysisArgs.json"))
Trajectories::IsValidationMode(trajectoryAnalysisArgs,verbose=T)
#Create output folder for this analysis
outputFolder<-Trajectories::GetOutputFolder(trajectoryLocalArgs,trajectoryAnalysisArgs,createIfMissing=T)
# Set up logger
Trajectories::InitLogger(logfile = file.path(outputFolder,'logs',paste0(format(Sys.time(), "%Y%m%d-%H%M%S"),"-log.txt")), threshold = logger:::INFO)
return(trajectoryAnalysisArgs)
}
#' Returns full path to output folder for the results.
#'
#' Basically combines the value of mainOutputFolder, database name, and analysis name to get the output folder. Checks also that the folder exists. If createIfMissing=T, then creates the necessary subfolders under mainOutputFolder.
#'
#' @param trajectoryLocalArgs Object created by Trajectories::createTrajectoryLocalArgs() method
#' @param trajectoryAnalysisArgs Object created by Trajectories::createTrajectoryAnalysisArgs() method
#' @param createIfMissing If TRUE, then creates necessary folder if missing.
#'
#' @return Full output path
#' @export
#'
#' @examples
GetOutputFolder<-function(trajectoryLocalArgs,trajectoryAnalysisArgs,createIfMissing=F) {
outputFolder<-trajectoryLocalArgs$mainOutputFolder
subFolder1=make.names(trajectoryLocalArgs$databaseHumanReadableName)
subFolder2=make.names(trajectoryAnalysisArgs$cohortName)
subFolder3=trajectoryAnalysisArgs$mode
subFolder4="logs"
if (!dir.exists(outputFolder)) stop(paste0("ERROR in GetOutputFolder(): trajectoryLocalArgs$mainOutputFolder='",mainOutputFolder,"' does not exist."))
outputFolderPrev=outputFolder
outputFolder <- file.path(outputFolder, subFolder1)
if (!dir.exists(outputFolder)){
if(createIfMissing==F) stop(paste0("ERROR in GetOutputFolder(): There is no '",subFolder1,"' subfolder in '",outputFolderPrev,"' folder. Cannot create the folder either as parameter 'createIfMissing=F'."))
dir.create(outputFolder)
print(paste0('Created folder for database results: ',outputFolder)) #do not use logger::log_... here as logger is not yet initialized
} else {
#print(paste0('Folder for database results already exists: ',outputFolder))
}
outputFolderPrev=outputFolder
outputFolder <- file.path(outputFolder, subFolder2)
if (!dir.exists(outputFolder)){
if(createIfMissing==F) stop(paste0("ERROR in GetOutputFolder(): There is no '",subFolder2,"' subfolder in '",outputFolderPrev,"' folder. Cannot create the folder either as parameter 'createIfMissing=F'."))
dir.create(outputFolder)
print(paste0('Created folder for database results: ',outputFolder)) #do not use logger::log_... here as logger is not yet initialized
} else {
#print(paste0('Folder for analysis results already exists: ',outputFolder))
}
outputFolderPrev=outputFolder
outputFolder <- file.path(outputFolder, subFolder3)
if (!dir.exists(outputFolder)){
if(createIfMissing==F) stop(paste0("ERROR in GetOutputFolder(): There is no '",subFolder3,"' subfolder in '",outputFolderPrev,"' folder. Cannot create the folder either as parameter 'createIfMissing=F'."))
dir.create(outputFolder)
print(paste0('Created folder for database results: ',outputFolder)) #do not use logger::log_... here as logger is not yet initialized
} else {
#print(paste0('Folder for analysis results already exists: ',outputFolder))
}
if(createIfMissing==T) logger::log_info(paste0("Output folder set to ",outputFolder))
#subfolders
subfolder='logs'
outputFolderPrev=outputFolder
outputFolder2 <- file.path(outputFolder, subfolder)
if (!dir.exists(outputFolder2)){
if(createIfMissing==F) stop(paste0("ERROR in GetOutputFolder(): There is no '",subfolder,"' subfolder in '",outputFolderPrev,"' folder. Cannot create the folder either as parameter 'createIfMissing=F'."))
dir.create(outputFolder2)
print(paste0('Created folder for logs: ',outputFolder2)) #do not use logger::log_... here as logger is not yet initialized
} else {
#print(paste0('Folder for analysis results already exists: ',outputFolder))
}
subfolder='figures'
outputFolderPrev=outputFolder
outputFolder2 <- file.path(outputFolder, subfolder)
if (!dir.exists(outputFolder2)){
if(createIfMissing==F) stop(paste0("ERROR in GetOutputFolder(): There is no '",subfolder,"' subfolder in '",outputFolderPrev,"' folder. Cannot create the folder either as parameter 'createIfMissing=F'."))
dir.create(outputFolder2)
print(paste0('Created folder for figures: ',outputFolder2)) #do not use logger::log_... here as logger is not yet initialized
} else {
#print(paste0('Folder for analysis results already exists: ',outputFolder))
}
subfolder='tables'
outputFolderPrev=outputFolder
outputFolder2 <- file.path(outputFolder, subfolder)
if (!dir.exists(outputFolder2)){
if(createIfMissing==F) stop(paste0("ERROR in GetOutputFolder(): There is no '",subfolder,"' subfolder in '",outputFolderPrev,"' folder. Cannot create the folder either as parameter 'createIfMissing=F'."))
dir.create(outputFolder2)
print(paste0('Created folder for tables: ',outputFolder2)) #do not use logger::log_... here as logger is not yet initialized
} else {
#print(paste0('Folder for analysis results already exists: ',outputFolder))
}
return(outputFolder)
}
#' Returns TRUE if the package is run in validation mode.
#'
#' @param trajectoryAnalysisArgs Object created by Trajectories::createTrajectoryAnalysisArgs() method
#' @param verbose If TRUE, outputs some info in INFO/DEBUG log level. Otherwise, returns the results silently.
#'
#' @return
#' @export
#'
#' @examples
IsValidationMode<-function(trajectoryAnalysisArgs, verbose=F) {
if(verbose) {
if(trajectoryAnalysisArgs$mode=='VALIDATION') {
logger::log_info("The package is run in VALIDATION MODE")
} else {
logger::log_info("The package is run in DISCOVERY MODE")
}
}
return(trajectoryAnalysisArgs$mode=='VALIDATION')
}
|
7b2d30a8ae0407fff86587ca621aafebfffc80ce
|
4d034cff18c2b990cb987b1ddb84666d2591dfa4
|
/titanic-survivors/script/04_check.R
|
6c98f805dfa189e1177d079db65f6fb61553df37
|
[] |
no_license
|
GiuseppeTT/fooling-around
|
f40d755ebab8198f995d496fb7819799e0304567
|
b25a98f8619f6ad1e4870d89d41d6e0095521b36
|
refs/heads/main
| 2023-07-11T10:40:49.695509
| 2021-07-21T21:08:19
| 2021-07-21T21:08:19
| 384,141,688
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,005
|
r
|
04_check.R
|
# TODO:
# - Read test data and load fit model
# - Check it's performance
# - Plot ROC
# - Check AUC
# Set up -----------------------------------------------------------------------
## Load libraries
library(tidyverse)
library(tidymodels)
## Source auxiliary R files
# source("R/constants.R")
# source("R/functions.R")
# Check ------------------------------------------------------------------------
## Read test data and load workflow
test_data <-
read_rds("data/test_data.rds")
final_workflow <-
read_rds("output/final_workflow.rds")
## Predict
predictions <-
final_workflow %>%
predict(new_data = test_data, type = "prob") %>%
bind_cols(test_data)
## Check
roc_curve_plot <-
predictions %>%
roc_curve(.pred_survived, truth = outcome) %>%
autoplot()
ggsave("output/roc_curve.png")
roc_auc_value <-
predictions %>%
roc_auc(.pred_survived, truth = outcome) %>%
pull(.estimate) %>%
as.character()
write_file(roc_auc_value, "output/roc_auc.txt")
|
09b03773e395175f48689f443e4613f9aab592a4
|
759f30b784192b3baf83f8c1e0e7c0b88f3af4a9
|
/rankhospital.R
|
3b1d5a3e097fa692c0882b16856e688f077921ba
|
[] |
no_license
|
RQuinn78/coursera_assignments
|
2f1d72826bc32252690c2b10579db43cedfb92e4
|
609c17aea0fce392df8ed02322d9f89712a2c90a
|
refs/heads/master
| 2021-01-10T16:10:28.443617
| 2016-02-16T19:10:20
| 2016-02-16T19:10:20
| 51,860,844
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,312
|
r
|
rankhospital.R
|
rankhospital <- function (state, outcome, num= "best"){
care_meas <- read.csv("outcome-of-care-measures.csv", colClasses="character")
if (!any(state == state.abb)){
stop ("invalid state")}
if (!any(outcome == c("heart attack", "heart failure", "pneumonia"))){
stop ("invalid outcome")}
## the code above just checks to see whether the arguments for best are valid or not
state_dat <-subset(care_meas, care_meas[,7]==state)
state_dat[,11] <- suppressWarnings(as.numeric(state_dat[,11]))
state_dat[,17] <- suppressWarnings(as.numeric (state_dat[,17]))
state_dat[ ,23] <- suppressWarnings(as.numeric (state_dat[,23]))
colnames(state_dat)[11] <- "heart attack"
colnames(state_dat)[17] <- "heart failure"
colnames(state_dat)[23] <- "pneumonia"
x <- cbind(state_dat[2], state_dat[7], state_dat[outcome])
x <- na.omit(x)
y <- x[order(x[,3], x$Hospital.Name),]
z<- 1:nrow(x)
ans <- cbind(y,z)
colnames(ans)[4] <- "Rank"
if (num == "best"){
print (ans[1,1])
}
else if (num == "worst"){
j <- tail(ans, 1)
print (j[1,1])
}
else if (is.element(num, z)){
k<- subset (ans, ans[["Rank"]]==num)
print (k[1,1])
}
else if (num > tail(z, 1)){
print ("NA")
}
}
|
99f6f5a0d8a5bfc068064f0b989efc174da6fbf1
|
4ede1bcfdebc480e41b8bc384787cf3fdb2c3692
|
/Code/10_visualization_several_estimates.R
|
7311ed1a065a8bccc5d1340e2b5ecbd1a1d08418
|
[] |
no_license
|
kikeacosta/optimal_vaccination_age_varies_across_countries
|
fa21dde907176fb1d7a22be6165fcf60945ac97f
|
7343089fcdbb8070e75512c76ea24e8485b61f71
|
refs/heads/master
| 2023-06-22T08:32:02.234361
| 2021-07-22T00:58:47
| 2021-07-22T00:58:47
| 351,043,960
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,457
|
r
|
10_visualization_several_estimates.R
|
#=================================================================================================#
#=================================================================================================#
#
# YLL & Vaccines
# Visualization
#
#=================================================================================================#
#=================================================================================================#
# Description:
# Creates plots
#=================================================================================================#
#
# LIBRARIES
#
#
#=================================================================================================#
library(tidyverse)
library(here)
library(cowplot)
#=================================================================================================#
#
# DATA
#
#
#=================================================================================================#
# Mortality rates by age and e_yll
#=================================================================================================#
# # Full data on mortality rates by age and e_yll
# Confirmed deaths estimates
age_rates_yll_optimal_cov <-
readRDS("Output/age_m_rates_optimal_all.rds") %>%
select(Country, Age, mid_point, mx, Scenario, yll, mx_opt)
# Excess estimates
age_rates_yll_optimal_ex <- readRDS("Output/age_m_rates_optimal_excess.rds") %>%
filter(Sex == "t") %>%
select(Country, Age, mid_point, mx, e_yll, mx_opt) %>%
mutate(Scenario = "Excess") %>%
rename(yll = e_yll,
mx_opt = mx_opt,
mid_point = mid_point)
# merging confirmed deaths and excess estimates
age_rates_yll_optimal <-
age_rates_yll_optimal_cov %>%
bind_rows(age_rates_yll_optimal_ex)
# All cause mortality
all_cause_mx <- read_rds("Output/all_cause_mortality_rates.rds") %>%
select(Country, Age, mx_all)
# Optimal vaccination ages
#=================================================================================================#
# Vaccination age data
# vaccine_age <- readRDS("Output/vaccine_age_excess.rds")
#=================================================================================================#
#
# MANIPULATION
#
#
#=================================================================================================#
# standardizing all cause mortality so it crosses in the last age interval
# with covid mortality
all_cause_mx2 <-
all_cause_mx %>%
left_join(age_rates_yll_optimal_cov %>%
filter(Scenario == "Central ex") %>%
select(Country, Age, mid_point, mx))
# estimating adjustment factor to normalize at the last age group
mx_adj <- all_cause_mx2 %>%
group_by(Country) %>%
filter(Age == max(Age)) %>%
mutate(fct_adj = mx / mx_all) %>%
select(Country, fct_adj) %>%
ungroup()
all_cause_mx3 <-
all_cause_mx2 %>%
left_join(mx_adj) %>%
mutate(mx_std = mx_all * fct_adj) %>%
select(Country, Age, mx_std)
# selected countries for Figure 1
cts_temp <- c("USA", "Chile", "Peru")
# Ordering Countries as factor variable
labs_order <- c("USA", "Chile", "Peru")
# selecting scenarios
unique(age_rates_yll_optimal$Scenario)
scns <- c("Central ex")
age_rates_yll2 <-
age_rates_yll_optimal %>%
filter(Country %in% cts_temp,
Scenario %in% scns) %>%
# adding normalized all cause mortality rates
left_join(all_cause_mx3) %>%
mutate(Country = factor(Country, levels = labs_order)) %>%
select(Country, Age, mid_point, yll, mx, mx_opt, mx_std) %>%
gather(mx, mx_opt, mx_std, key = Source, value = mx) %>%
mutate(Source = recode(Source,
"mx" = "COVID-19",
"mx_opt" = "Theoretical",
"mx_std" = "All-cause"),
Source = factor(Source, levels = c("COVID-19",
"Theoretical",
"All-cause")))
yll_ages <-
age_rates_yll_optimal %>%
filter(Country %in% cts_temp,
Scenario %in% c("Central ex", "Excess")) %>%
mutate(Scenario = recode(Scenario,
"Central ex" = "COVID-19",
"Excess" = "Excess deaths"),
Country = factor(Country, levels = labs_order))
#=================================================================================================#
#
# PLOTS
#
#
#=================================================================================================#
#=================================================================================================#
# Figure 1. Covid death rates, crossover, all-cause mortality, and Years of life saved
#=================================================================================================#
p1 <-
age_rates_yll2 %>%
filter(mid_point >= 30) %>%
ggplot()+
geom_line(aes(mid_point, mx, col = Source, size = Source, alpha = Source))+
geom_point(aes(mid_point, mx, col = Source, alpha = Source, shape = Source), size = 1.5)+
scale_color_manual(values = c("black", "#e63946", '#1a759f'))+
scale_size_manual(values = c(1, 0.8, 0.8))+
scale_alpha_manual(values = c(1, 0.5, 0.5))+
scale_shape_manual(values = c(1, 16, 16))+
scale_y_log10()+
scale_x_continuous(breaks = seq(0, 100, 20))+
facet_wrap(~Country, scales = "free", nrow = 1)+
coord_cartesian(xlim = c(35, 100))+
theme_bw()+
labs(y = "Death Rate", x = "Age")+
theme(
legend.position = c(0.92, 0.2),
legend.background = element_rect(fill="transparent"),
legend.title = element_blank(),
strip.background = element_rect(fill = "transparent"),
strip.text = element_text(size = 10, face = "bold"),
# strip.text = element_text(size = 10),
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_text(angle = 90, hjust = .5, size = 8)
)
p2 <- yll_ages %>%
filter(mid_point >= 5) %>%
ggplot()+
geom_line(aes(mid_point, yll, col = Scenario), size = 1)+
geom_point(aes(mid_point, yll, col = Scenario), alpha = 1, size = 1.5, shape = 1)+
scale_x_continuous(breaks = seq(0, 100, 20))+
scale_color_manual(values = c("black", "#83c5be"))+
facet_wrap(~ Country, scales = "free", nrow = 1)+
coord_cartesian(xlim = c(35, 100))+
theme_bw()+
labs(y = "Years Saved", x = "Age")+
theme(
legend.position = c(0.92, 0.15),
legend.background = element_rect(fill="transparent"),
legend.title = element_blank(),
strip.background = element_blank(),
strip.text.x = element_blank(),
axis.text.y = element_text(angle = 90, hjust = .5, size = 8))
plot_grid(p1, p2, labels = c('A', 'B'), label_size = 12, ncol = 1)
ggsave("Figures/all_countries_mx_ex_yll_counterfactual_combined.png", width = 10, height = 6)
ggsave("Figures/all_countries_mx_ex_yll_counterfactual_combined.pdf", width = 10, height = 6)
# ==================================================== #
# plotting ages intervals and the age at crossover
# ==================================================== #
tx <- 8
exclude <- c("India", "Poland", "Romania", "England and Wales", "Northern Ireland", "Scotland", "Greece")
# crossover for confirmed deaths and "usual" ex
cross_mx <-
age_rates_yll_optimal %>%
filter(!Country %in% exclude,
Scenario == "Central ex") %>%
group_by(Country) %>%
mutate(cross = ifelse(round(mx, 8) >= round(mx_opt, 8) & lag(mx) < lag(mx_opt), 1, 0),
age_int = lead(Age) - Age,
age_int = ifelse(is.na(age_int), 100 - Age, age_int),
cross = ifelse(sum(cross) > 1 & Age * cross == max(Age * cross), 0, cross),
age_max = ifelse(Age == max(Age), 1, 0),
age_cross = max((Age + age_int * 0.5) * cross + 10 * age_max)) %>%
ungroup()
unique(age_rates_yll_optimal$Scenario)
# crossover for upper bound (+20%) of ex in last age group
cross_ub <-
age_rates_yll_optimal %>%
filter(!Country %in% exclude,
Scenario == "Upper bound (+20%)") %>%
select(Country, Age, mx, mx_opt) %>%
group_by(Country) %>%
mutate(cross = ifelse(round(mx, 8) >= round(mx_opt, 8) & lag(mx) < lag(mx_opt), 1, 0),
age_int = lead(Age) - Age,
age_int = ifelse(is.na(age_int), 100 - Age, age_int),
cross = ifelse(sum(cross) > 1 & Age * cross == max(Age * cross), 0, cross),
age_max = ifelse(Age == max(Age), 1, 0),
age_cross = max((Age + age_int * 0.5) * cross + 10 * age_max)) %>%
ungroup() %>%
rename(cross_ub = cross) %>%
select(Country, Age, cross_ub)
# crossover for excess mortality
cross_ex <-
age_rates_yll_optimal %>%
filter(!Country %in% exclude,
Scenario == "Excess") %>%
drop_na() %>%
group_by(Country) %>%
mutate(cross = ifelse(round(mx, 8) >= round(mx_opt, 8) & lag(mx) < lag(mx_opt), 1, 0),
age_int = lead(Age) - Age,
age_int = ifelse(is.na(age_int), 100 - Age, age_int),
cross = ifelse(sum(cross) > 1 & Age * cross == max(Age * cross), 0, cross),
age_max = ifelse(Age == max(Age), 1, 0),
age_cross = max((Age + age_int * 0.5) * cross + 10 * age_max)) %>%
ungroup() %>%
rename(cross_exc = cross) %>%
select(Country, Age, cross_exc)
# merging crossovers and identifying ages in each case
cross_mx_all <-
cross_mx %>%
select(Country, Age, age_int, age_cross, cross) %>%
left_join(cross_ub) %>%
left_join(cross_ex) %>%
group_by(Country) %>%
fill(cross_exc) %>%
ungroup() %>%
mutate(cross = factor(cross),
cross_ub = ifelse(cross_ub == 0, NA, cross_ub),
cross_ub = factor(cross_ub),
cross_exc = ifelse(cross_exc == 1, 2, NA),
cross_exc = factor(cross_exc),
mid_point = Age + age_int * .5,
age_ub = ifelse(cross_ub == 1, mid_point, NA),
age_exc = ifelse(cross_exc == 2, mid_point, NA))
# plot of three different threshold ages
cross_mx_all %>%
ggplot()+
geom_tile(aes(mid_point, reorder(Country, -age_cross),
width = age_int,
fill = cross),
col = "black", alpha = 0.8, size = 0.5)+
# adding upper bound of ex (+20%)
geom_point(aes(age_ub, Country, shape = cross_ub), size = 3, stroke = 0.8)+
# adding excess estimates
geom_point(aes(age_exc, Country, shape = cross_exc), size = 3, stroke = 0.8)+
scale_fill_manual(values = c("transparent", "#e63946"), breaks="1",
labels = c("Cross-over"))+
scale_shape_manual(values = c(0, 4), labels = c("Last ex + 20%", "Excess deaths"))+
guides(fill = guide_legend(order = 1), shape = guide_legend(order = 2))+
scale_x_continuous(expand = c(0, 0), breaks = seq(0, 100, 10))+
scale_y_discrete(expand = c(0, 0))+
labs(x = "Age")+
theme_bw()+
theme(
legend.position="bottom",
legend.title = element_blank(),
legend.text = element_text(size = tx + 4),
axis.text.x = element_text(size = tx + 4),
axis.text.y = element_text(size = tx + 4),
axis.title.x = element_text(size = tx + 5),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
plot.margin = unit(c(5, 5, 5, 5), "mm"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2)
)
ggsave("Figures/ages_intervals_all_measures.png", width = 8, height = 10)
ggsave("Figures/ages_intervals_all_measures.pdf", width = 8, height = 10)
unique(cross_mx_all$Country)
|
aec100152bc03a6c65bc1671de515bb12d6d9505
|
d23337c36e2f2b4d55c2d60cc818b905296fcf53
|
/R/cdf_plots.R
|
5c476b4c16dd3c3fbafb69c0c60f9fa64cf84678
|
[] |
no_license
|
tywagner/EDC_LandscapeBias
|
873af5b322423bbb20be810d0971bb4b67e2011a
|
75d8edc8b33299c1c35c7cdead5186f5e3ce82cf
|
refs/heads/master
| 2021-06-22T11:35:19.323719
| 2017-07-31T14:56:56
| 2017-07-31T14:56:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,286
|
r
|
cdf_plots.R
|
# rm(list=ls())
library(data.table)
# Read in census catchments
cens <- fread('FINAL_EDC_NHD_Summaries_V6.csv')
cens[, .N]
head(cens)
dim(cens)
# summary(cens)
# Read in sample reaches
samp <- fread('Immediate_Catchment_Summaries_V6.csv')
samp[, .N]
head(samp)
dim(samp)
# remove site names
samp <- samp[, c("SiteNm1","SiteNm2") := NULL]
dim(samp)
# Remove character columns from data tables, since those won't be used for cdf's
cens <- Filter(is.numeric, cens)
dim(cens)
dim(samp)
# Keep columns in samp that are in cens
cols <- colnames(cens)
samp <- samp[, cols, with = FALSE]
# Remove comid columns
cens <- cens[, "COMID" := NULL]
samp <- samp[, "COMID" := NULL]
dim(cens)
dim(samp)
# str(samp)
# str(cens)
# Test
# vari1c <- cens[, 1, with=FALSE]
# variECDFc <- ecdf(as.matrix(vari1c))
# vari1s <- samp[, 1, with=FALSE]
# variECDFs <- ecdf(as.matrix(vari1s))
# plot(variECDFc, verticals=TRUE, do.points=FALSE, las=1, lwd=2, axes=F, ylab='', xlab='', main='')
# plot(variECDFs, verticals=TRUE, do.points=FALSE, add=TRUE, col='brown', lty=2, lwd=2)
# axis(side=1,cex.axis=0.5, mgp=c(0,0.5,0),tck= -0.01)
# axis(side=2,tck= -0.01, mgp=c(0,0.5,0), cex.axis=0.5, las=1)
# CDF plots
# num of pannels per plot
n.pannel <- 50
n.plot <- ceiling(ncol(samp)/n.pannel)
for(hh in 1:n.plot){
pdf(paste('cdf_',hh,'.pdf',sep=''),width=8,height=12)
if(hh<n.plot){
n.fig=n.pannel #number of pannels per plot
layout(matrix(1:n.fig,nrow=10,ncol=5,byrow=T))
par(mar=c(2,4,1,1),oma=c(2,2,1,1))
}
if(hh==n.plot){
n.fig=ncol(samp)-(n.plot-1)*n.pannel
layout(matrix(c(1:n.fig,rep(0,(n.pannel-n.fig))),nrow=10,ncol=5,byrow=T))
par(mar=c(2,4,1,1),oma=c(2,2,1,1))
}
for(pp in 1:n.fig){
vari1c <- cens[, (hh-1)*n.pannel+pp, with=FALSE]
variECDFc <- ecdf(as.matrix(vari1c))
vari1s <- samp[, (hh-1)*n.pannel+pp, with=FALSE]
variECDFs <- ecdf(as.matrix(vari1s))
plot(variECDFc, verticals=TRUE, do.points=FALSE, las=1, lwd=2, axes=F, ylab='', xlab='', main=cols[(hh-1)*n.pannel+pp])
plot(variECDFs, verticals=TRUE, do.points=FALSE, add=TRUE, col='brown', lty=2, lwd=2)
axis(side=1,cex.axis=0.5, mgp=c(0,0.5,0),tck= -0.01)
axis(side=2,tck= -0.01, mgp=c(0,0.5,0), cex.axis=0.5, las=1)
}
dev.off()
}
|
8d6fe46b07d2e4f91cd99462bd4b6d5450899252
|
63cb78527bcb90f984788587a29f8f115e94ab64
|
/R/dashbioManhattan.R
|
84d1c7e80ace4f3eaca6ac466b4c65c73c24d02a
|
[
"MIT"
] |
permissive
|
plotly/dash-bio
|
2b3468626c7f021c083c8b9170e61862d5dc151d
|
8a97db7811cc586d7e0bf1d33c17b898052b2e8f
|
refs/heads/master
| 2023-09-03T13:30:45.743959
| 2023-08-16T15:26:27
| 2023-08-16T15:26:27
| 141,365,566
| 505
| 228
|
MIT
| 2023-08-23T01:28:46
| 2018-07-18T01:40:23
|
Python
|
UTF-8
|
R
| false
| false
| 1,609
|
r
|
dashbioManhattan.R
|
dashbioManhattan <- function (dataframe, chrm = "CHR", bp = "BP", p = "P",
snp = "SNP", logp = TRUE, title = "Manhattan Plot", showgrid = FALSE, xlabel = NULL,
ylabel = "-log10(p)", point_size = 5, showlegend = FALSE, col = c("#969696", "#252525"),
suggestiveline_value = -log10(1e-05), suggestiveline_color = "blue",
suggestiveline_width = 1, genomewideline_value = -log10(5e-08), genomewideline_color = "red",
genomewideline_width = 1, highlight = NULL, highlight_color = "#00FF00", ...)
{
manhattanly::manhattanly(x = dataframe,
chr = chrm,
bp = bp,
p = p,
snp = snp,
logp = logp,
title = title,
showgrid = showgrid,
xlab = xlabel,
ylab = ylabel,
point_size = point_size,
showlegend = showlegend,
col = col,
suggestiveline = suggestiveline_value,
suggestiveline_color = suggestiveline_color,
suggestiveline_width = suggestiveline_width,
genomewideline = genomewideline_value,
genomewideline_color = genomewideline_color,
genomewideline_width = genomewideline_width,
highlight = highlight,
highlight_color = highlight_color,
...)
}
|
e5466753f0401adf67c4bd83af5524bfc55e9942
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/utiml/examples/subset_correction.Rd.R
|
a649aec5852af786f6fc58c67435cf1b6a329b2b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 235
|
r
|
subset_correction.Rd.R
|
library(utiml)
### Name: subset_correction
### Title: Subset Correction of a predicted result
### Aliases: subset_correction
### ** Examples
prediction <- predict(br(toyml, "RANDOM"), toyml)
subset_correction(prediction, toyml)
|
bf71377b2024c573015f4cbdf102925ca468b556
|
87d67d1370f9eff8d02e661a754cfc68765b6311
|
/cachematrix.R
|
57d42ee363983bb8f3c21ba5cd4a9ed127151eb6
|
[] |
no_license
|
dhdhdh/ProgrammingAssignment2
|
68bff4d87e52a350afa06db09455726dc2b30e8a
|
8f8cb2947ce53c349b238a19012baaa14edfc374
|
refs/heads/master
| 2021-01-14T10:31:15.200265
| 2015-06-18T11:53:50
| 2015-06-18T11:53:50
| 37,654,766
| 0
| 0
| null | 2015-06-18T11:07:45
| 2015-06-18T11:07:44
| null |
UTF-8
|
R
| false
| false
| 935
|
r
|
cachematrix.R
|
## These functions calculates the inverse of an invertible square matrix and
##caches the result as long as the matrix did not change
## This function allows the input square invertible matrix to cache its inverse
makeCacheMatrix <- function(x = matrix()) {
Inv<-NULL
set <- function(y) {
x <<- y
Inv <<- NULL
}
get <- function() x
SetInv<-function(I) Inv<<-I
GetInv<-function() Inv
list(set=set, get=get, getinv=GetInv, setinv=SetInv)
}
## This function returns the cached inverse matrix of the input (calculated by
## makeCacheMatrix function) as long as it is
## not changed. But it calculates the inverse when the input changes or
##when it is not calculated before.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
I<-x$getinv()
if(!is.null(I))
{
message("getting cached data")
return(I)
}
data<-x$get()
I<-solve(data)
x$setinv(I)
I
}
|
845f076b94f860888cc05080788177c572a29a1d
|
92fe75798aff7836c17cc9cc439fef0bdc9bfcb6
|
/R/naiveIntegration.R
|
988a8486acaf89c01be44116826a762433d30557
|
[
"BSD-2-Clause"
] |
permissive
|
pmartR/peppuR
|
a036e07fe230a66195190b90d4a8c96e733083c6
|
d1dae01782c69bc49749b405b36fb82416226515
|
refs/heads/master
| 2020-04-12T16:28:48.210435
| 2020-01-13T21:23:09
| 2020-01-13T21:23:09
| 162,613,771
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,189
|
r
|
naiveIntegration.R
|
#'Combine probabilities from multiple data sources.
#'
#'@param probabilities_by_source a list of data.frames with one column for each
#' predicted class corresponding to the probability of that class label given
#' the data source. Each data frame corresponds to a data source. All
#' probabilities in each row must sum to 1. The rows are generally cross
#' validated test sets.
#'
#'@details This function combines class probabilities from learning algorithms
#' on multiple or single data sources over cross validation partitions. The
#' mechanism for combining the probabilities is a posterior probability where
#' \deqn{P(c_{j} | x_{i}) = /prod{P(c_{j} | x_{i}^{s})}_{s=1}^{ns}} where
#' \eqn{c} is one of \eqn{j} class labels, \eqn{x} is observed data for one of
#' \eqn{ns} data sources.
#'@export
#'
naiveIntegration <- function(probabilities_by_source){
if (length(probabilities_by_source) == 1){
stop("One source detected. Nothing to integrate.")
}
nobs <- lapply(probabilities_by_source, nrow)
if (sum(unlist(nobs)) != unlist(nobs[1])*length(nobs)) { # check that all dfs have the same number of rows
stop("The same number of observations is needed for all sources")
}
# strip away columns that are not probabilities
# by design these lists are in the same order
meta_info <- probabilities_by_source[[1]] %>%
dplyr::select(Truth, Partition_info) #rename and correct partition info later
probabilities_by_source <- lapply(probabilities_by_source, function(d_source){
return(d_source[, grepl("Prob", colnames(d_source))])
})
#---- multiply dataframes together elementwise------#
# (e.g. the first row/column in each dataframe is multiplied together)
integrated_probs <- Reduce("*", probabilities_by_source)
integrated_probs <- integrated_probs/apply(integrated_probs, 1, sum)
PredictedLabel <- apply(integrated_probs, 1, function(x) names(x)[which.max(x)])
PredictedLabel <- gsub(pattern = "PredictedProbs.",replacement = "", x = PredictedLabel)
meta_info <- cbind(PredictedLabel, meta_info)
#--- return meta information back to the probabilities
results <- cbind(integrated_probs, meta_info)
return(results)
}
|
d538b4c82aa504786a3c5d2e9e1e6ef975b4acce
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mdscore/examples/lr.test.Rd.R
|
c06ffc2b0eac39c74ac1cb38182a3641718d84f8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 374
|
r
|
lr.test.Rd.R
|
library(mdscore)
### Name: lr.test
### Title: Likelihood ratio test for generalized linear models
### Aliases: lr.test
### Keywords: likelihood ratio glm
### ** Examples
data(strength)
fitf <- glm(y ~ cut * lot, data = strength,family = inverse.gaussian("inverse"))
fit0 <- glm(y ~ cut + lot, data = strength, family = inverse.gaussian("inverse"))
lr.test(fit0,fitf)
|
026f85de4f47e4899663e0b0335e165b7a59e568
|
0392b09ae91b7a4fdbddb014cd57238202241b62
|
/plot4.R
|
72be6672325a5be02de4bab6f7ef1644cc68e9e3
|
[] |
no_license
|
JeremiahMurphy/ExData_Plotting1
|
484f68925c5a48dd9331305f0610555f3eadf9ee
|
a17526e40cb11bf304bcfdb1ffc21583861580c3
|
refs/heads/master
| 2020-05-20T05:49:24.073652
| 2019-05-07T14:12:32
| 2019-05-07T14:12:32
| 185,415,407
| 0
| 0
| null | 2019-05-07T14:11:00
| 2019-05-07T14:11:00
| null |
UTF-8
|
R
| false
| false
| 1,039
|
r
|
plot4.R
|
install.packages("dplyr")
library(dplyr)
my_data <- read.csv2("household_power_consumption.txt")
days <- filter(my_data, Date == "1/2/2007" | Date == "2/2/2007")
DateTime <- strptime(paste(days$Date, days$Time), "%d/%m/%Y %H:%M:%S")
png(file="plot4.png",width=480,height=480)
par(mfrow=c(2,2))
plot(DateTime,as.numeric(days$Global_active_power)/1000, type = "l", ylab= "Global Active Power", xlab="")
plot(DateTime,as.numeric(days$Voltage),type = "l", ylab= "Voltage")
plot(DateTime,as.numeric(days$Sub_metering_1), type = "l", ylab= "Energy sub metering", xlab="", col="black")
lines(DateTime,as.numeric(days$Sub_metering_2), type = "l", ylab= "Energy sub metering", xlab="", col="red")
lines(DateTime,as.numeric(days$Sub_metering_3), type = "l", ylab= "Energy sub metering", xlab="",col="blue")
legend("topright",cex=0.4,lty="solid",legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black","red","blue"))
plot(DateTime,as.numeric(days$Global_reactive_power)/100,type = "l",ylab= "Global_reactive_power")
dev.off()
|
1cab1d82e99eb4bf34b9e4abcd9ecca04a4a5a7c
|
872b1c8e4ccaf58ffeb863e3726913a3ca8511c9
|
/demo/WriteDot.R
|
23de076b947f8c1c2138bbf975846fe3969a8d2c
|
[] |
no_license
|
rdiaz02/BML
|
ad49b5903c131e10cdd06baebcc29f3989326aca
|
50e8793b45730262db60e658708e63992d787a08
|
refs/heads/master
| 2020-05-23T00:43:51.624777
| 2019-01-16T12:38:05
| 2019-01-16T12:38:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 97
|
r
|
WriteDot.R
|
library(BML)
col_resic <- bml(BML::col_resic, 5, 0.5, 5)
writeDotFile(col_resic, 'COL_RESIC.dot')
|
b1809e7d8f6025da4d16ae956434ec88bd5711c5
|
863259383c00396558454c1e9f78e993f65391d9
|
/AuthorNetwork_kirkversion.R
|
ca09d6e58d1abb7e7dbb334eaab36125ccf69542
|
[] |
no_license
|
RotmanCodingClub/Project0
|
f36cc04ebe5bea69c854196de04fa9c9bfd8f030
|
191fccb9b33699cb989835a062bb9bf0992b0e5d
|
refs/heads/master
| 2021-01-12T08:09:21.344479
| 2017-04-20T16:53:57
| 2017-04-20T16:53:57
| 76,487,569
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 943
|
r
|
AuthorNetwork_kirkversion.R
|
library("broom", lib.loc="~/R/R-3.3.1/library")
library("tidyr", lib.loc="~/R/R-3.3.1/library")
library("ggplot2", lib.loc="~/R/R-3.3.1/library")
library("gridExtra", lib.loc="~/R/R-3.3.1/library")
library("dplyr", lib.loc="~/R/R-3.3.1/library")
library(stringr)
library(networkD3)
#read in csv file downloaded from project 0 (John added to github)
df_authors_all <- read.csv("C:/Users/Summer Students/Documents/WebScrapedAuthors.csv")
#making a df with just author in (currently unused)
df_justAuthors <- data.frame(df_authors_all$author_list)
#Making a df with each author getting own column. Note currently has a max of ten authors before they get lumped.
#Need to write something that will find the max number of commas in author list to set it to that instead of 10
authors_split <- str_split_fixed(df_authors_all$author_list, ",", n=Inf)
authors_split <- data.frame(authors_split)
simpleNetwork(authors_split)
|
c22e5015b5a454e4e7a80b7448672884c0471695
|
960e994f0ba2f7db9821cbad3490a579aaaba136
|
/man/probe_simple_take_off_velocity.Rd
|
fea0f6aee1c92d7edfe4c270545bc3ca222940de
|
[
"MIT"
] |
permissive
|
ouzlim/vjsim
|
1a3aaabf5d93bc1be72c9fe069d80f00eb5d8755
|
456d771193463ef00efb91085ef8782ca57f9f21
|
refs/heads/master
| 2022-11-26T22:14:19.172622
| 2020-08-03T22:39:53
| 2020-08-03T22:39:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,253
|
rd
|
probe_simple_take_off_velocity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simple_optimal_profile.R
\name{probe_simple_take_off_velocity}
\alias{probe_simple_take_off_velocity}
\title{Probe Simple Take-off Velocity}
\usage{
probe_simple_take_off_velocity(
L0,
TOV0,
bodyweight,
change_ratio = seq(0.9, 1.1, length.out = 3),
aggregate = "raw"
)
}
\arguments{
\item{L0}{Numeric vector}
\item{TOV0}{Numeric vector}
\item{bodyweight}{Numeric vector}
\item{change_ratio}{Numeric vector indicating probing change ratios}
\item{aggregate}{How should \code{\link{probe_simple_take_off_velocity}} output be aggregated?
Default is "raw". Other options involve "ratio" and "diff" which use initial
output values}
}
\value{
Probing data frame
}
\description{
\code{probe_simple_take_off_velocity} probes results of the \code{\link{get_simple_take_off_velocity}} function by varying
\code{L0}, \code{TOV0}, and \code{bodyweight} parameters
}
\examples{
require(ggplot2)
simple_probe_data <- probe_simple_take_off_velocity(
L0 = 250,
TOV0 = 3,
bodyweight = 75,
change_ratio = seq(0.8, 1.2, length.out = 1001)
)
ggplot(
simple_probe_data,
aes(
x = change_ratio,
y = take_off_velocity,
color = probing
)
) +
geom_line()
}
|
6bd3330ee8f7650939bdd12ec042c6256ad78fdd
|
cb789e80d114f84215838f53dba8123050780546
|
/plot3.R
|
16bc853fe9c79dc0b0028de72ebb7f0c134c1202
|
[] |
no_license
|
Minnovate/ExData_Plotting1
|
15fe78cbbc31cb4d283f3ddbcb1cdf913d54ef27
|
655e4fd38e8ee471228d4ee88265ceb9b9bb7a38
|
refs/heads/master
| 2021-01-12T17:15:32.726195
| 2016-10-25T05:03:32
| 2016-10-25T05:03:32
| 71,530,385
| 0
| 0
| null | 2016-10-21T04:40:55
| 2016-10-21T04:40:55
| null |
UTF-8
|
R
| false
| false
| 2,081
|
r
|
plot3.R
|
#Set the Working directory to the right location
defaultWD <- "//Users/gamelord/Documents/OneDrive/Coursera.org/4. Exploratory Data/ExData_Plotting1/"
setwd(defaultWD)
fileurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileurl,destfile="./power.zip",method="curl")
# unzip file http://stat.ethz.ch/R-manual/R-devel/library/utils/html/unzip.html
unzip("./power.zip", files = NULL, list = FALSE, overwrite = TRUE,
junkpaths = FALSE, exdir = "./data", unzip = "internal",
setTimes = FALSE)
list.files("./data/") #list files in the folder
#Read file into R
all_power <- read.table("./data/household_power_consumption.txt",sep=";",comment.char="", header=TRUE)
#Convert Date and Time variables
all_power$Datetime<-strptime(paste(all_power$Date,all_power$Time),"%d/%m/%Y %H:%M:%S")
all_power$Date2 <- as.Date(all_power$Date,"%d/%m/%Y")
#Using only 2 dates
power_consumption<-all_power[c(which(all_power$Date2=="2007-02-01"),which(all_power$Date2=="2007-02-02")),2:11]
power_consumption$Global_active_power <- as.numeric(as.character(power_consumption$Global_active_power))
power_consumption$Sub_metering_1 <- as.numeric(as.character(power_consumption$Sub_metering_1))
power_consumption$Sub_metering_2 <- as.numeric(as.character(power_consumption$Sub_metering_2))
power_consumption$Sub_metering_3 <- as.numeric(as.character(power_consumption$Sub_metering_3))
#Plot 3 starts from here
xrange <- range(power_consumption$Datetime)
yrange <- range(power_consumption$Sub_metering_1)
#set up the plot
png('plot3.png',width=480,height=480)
plot(xrange, yrange, type="n", ylab="Energy sub metering", xlab = "")
lines(power_consumption$Datetime,power_consumption$Sub_metering_1,type="l")
lines(power_consumption$Datetime,power_consumption$Sub_metering_2,type="l",col="red")
lines(power_consumption$Datetime,power_consumption$Sub_metering_3,type="l",col="blue")
#add a legend
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
,lty=c(1,1),lwd=c(2.5,2.5),col=c("grey","red","blue"))
dev.off()
|
9fbe17a207756d5293437f186f24c2f77f5d3c57
|
a17b69940bbd695d068e9e811583bc0010ee336e
|
/man/predict.Rd
|
84d6e22ad6c7b33c575c0551ddafe639f1f67562
|
[] |
no_license
|
iwonasado/sgPLS
|
e825050d12b954af7c5e126f15ef0a07a112b53f
|
8a358b4dfb2dea3b25b83a242b17c8cbad2e5f71
|
refs/heads/master
| 2021-01-18T06:06:37.691777
| 2015-03-14T00:00:00
| 2015-03-14T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,061
|
rd
|
predict.Rd
|
\name{predict}
\encoding{latin1}
\alias{predict.sgPLS}
\alias{predict.gPLS}
\alias{predict.sPLS}
\title{Predict Method for sPLS, gPLS or sgPLS}
\description{
Predicted values based on sparse PLS, group PLS, sparse group PLS models. New responses and
variates are predicted using a fitted model and a new matrix of observations.
}
\usage{
\method{predict}{sPLS}(object, newdata, ...)
\method{predict}{gPLS}(object, newdata, ...)
\method{predict}{sgPLS}(object, newdata, ...)
}
\arguments{
\item{object}{object of class inheriting from \code{"sPLS"}, \code{"gPLS"}or \code{"sgPLS"}.}
\item{newdata}{data matrix in which to look for for explanatory variables to be used for prediction.}
\item{...}{not used currently.}
}
\details{
The \code{predict} function for pls and spls object has been created by Sebastien Dejean, Ignacio Gonzalez, Amrit Singh and Kim-Anh Le Cao for \code{mixOmics} package. Similar code is used for sPLS, gPLS and sgPLS models performed by \code{sgPLS} package.
\code{predict} function produces predicted values, obtained by evaluating the sparse PLS, group PLS or sparse group PLS
model returned by \code{sPLS}, \code{gPLS} or \code{sgPLS} in the frame \code{newdata}.
Variates for \code{newdata} are also returned. The prediction values are calculated based on the regression coefficients of \code{object$Y} onto \code{object$variates$X}.
}
\value{
\code{predict} produces a list with the following components:
\item{predict}{A three dimensional array of predicted response values. The dimensions
correspond to the observations, the response variables and the model dimension, respectively.}
\item{variates}{Matrix of predicted variates.}
\item{B.hat}{Matrix of regression coefficients (without the intercept).}
}
\references{
Tenenhaus, M. (1998). \emph{La r\'egression PLS: th\'eorie et pratique}. Paris: Editions Technic.
}
\author{Benoit Liquet and Pierre Lafaye de Micheaux}
\seealso{\code{\link{sPLS}}, \code{\link{gPLS}}, \code{\link{sgPLS}}.}
\keyword{regression}
\keyword{multivariate}
|
7f48348f87145f4120fc6aa509588c7ff0cd6ad4
|
e3b4c2ebe67b68abdbc2190353493a382962ba10
|
/man/conan_check.Rd
|
b259eb62d671ce944a1407446486763e9db06e9b
|
[
"MIT"
] |
permissive
|
mrc-ide/conan
|
9f219f18a8f65e9a0e625506e376f3d0606871cc
|
312a42e3086966697931e3b1fe0b60735c051669
|
refs/heads/main
| 2023-09-01T17:10:26.435970
| 2021-05-07T16:29:58
| 2021-05-07T16:29:58
| 352,555,128
| 4
| 0
|
NOASSERTION
| 2023-08-22T08:26:31
| 2021-03-29T07:34:17
|
R
|
UTF-8
|
R
| false
| true
| 984
|
rd
|
conan_check.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check.R
\name{conan_check}
\alias{conan_check}
\title{Check if packages in library}
\usage{
conan_check(packages, library)
}
\arguments{
\item{packages}{A character vector of package names or pkgdepends
"references"}
\item{library}{A path to a library}
}
\value{
A list with elements
\itemize{
\item \code{complete}: logical, indicating if all packages were found
\item \code{found}: A character vector of found packages
\item \code{missing}: A character vector of missing packages
}
}
\description{
Check to see if all packages are available in a given library
}
\examples{
# Simple usage:
conan::conan_check(c("conan", "pkgdepends"), .libPaths())
# While we parse references, we don't check version information:
conan::conan_check("github::mrc-ide/conan@v2.0.0", .libPaths())
# Missing packages will be returned as the inferred package name
conan::conan_check("github::org/unknownpkg", .libPaths())
}
|
e92fd8675d3d850e26792e8f26512433a33ff0c3
|
0e4457fb2de5f700ba75ea97e70a8570fe00513d
|
/tests/testthat/test-sample-mc-binary-cov.R
|
4066d228d29660d14fc55bfaca0d30e412692393
|
[
"MIT"
] |
permissive
|
skgallagher/InfectionTrees
|
63043f87b206dfc25f98a629bc832cc1db8a4fab
|
cfe4f5d4d3f6eca11c1cce56261857b4d1f44f24
|
refs/heads/master
| 2023-06-28T07:55:12.273475
| 2021-07-24T21:05:42
| 2021-07-24T21:05:42
| 277,616,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,340
|
r
|
test-sample-mc-binary-cov.R
|
test_that("summarize_binary_trees", {
mc_trees <- data.frame(cluster_id = c(1, 1, 1,
2, 2, 2,
3, 3, 3),
n_inf = c(2, 0, 0,
1, 1, 0,
1, 0, 1),
x = c(1, 0, 0,
0, 1, 0,
0, 0, 1),
gen = c(1, 2, 2,
1, 2, 3,
1, 3, 2))
multiple_outside_transmissions <- FALSE
out <- summarize_binary_cov_trees(mc_trees,
multiple_outside_transmissions =
multiple_outside_transmissions)
expect_true(all(out$x_pos == 1))
expect_true(all(out$x_neg == 2))
expect_equal(as.numeric(out[1, 3:5]), c(1, 1, 2))
expect_equal(as.numeric(out[2, 3:5]), c(2, 0, 1))
## tests for outside
## TODO
})
test_that("sample_mc_binary_cov_inner", {
x_pos <- 2
x_neg <- 1
B <- 5
root_node <- NULL
out <- sample_mc_binary_cov_inner(x_pos,
x_neg,
B,
root_node)
expect_equal(nrow(out), B * (x_pos + x_neg))
expect_equal(x_pos * B, sum(out$x))
## tests for outside
## TODO
})
test_that("sample_mc_binary_cov", {
B <- 5
observed_cluster_summaries <- data.frame(freq = c(4, 2),
cluster_size = c(1, 1),
x_pos = c(1,0),
x_neg = c(0, 1))
out <- sample_mc_binary_cov(B,
observed_cluster_summaries)
expect_equal(out$mc_freq, c(5, 5))
expect_equal(out$freq, c(4, 2))
##
B <- 10
observed_cluster_summaries <- data.frame(freq = c(4),
cluster_size = c(3),
x_pos = c(2),
x_neg = c(1))
out <- sample_mc_binary_cov(B,
observed_cluster_summaries)
expect_equal(sum(out$mc_freq), B)
})
|
92dc97bbd755f09e9ceff10dcc9268d64c0050a6
|
0a677c67824ad812542e8625126be1dd3ed7c711
|
/tools/torchgen/man/method_cpp_exceptions.Rd
|
389d9280cb5bbba9844a531daa2e32c29f514d91
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
dfalbel/torch
|
48ff1b38ffdef9abe4873364c26b8abbae3ba330
|
ae317db8ec1392acd9f1a2d5f03cef9ad676f778
|
refs/heads/master
| 2021-07-08T04:41:59.099075
| 2020-08-07T22:21:43
| 2020-08-07T22:21:43
| 151,864,442
| 58
| 8
|
NOASSERTION
| 2019-10-24T21:35:26
| 2018-10-06T17:27:42
|
C++
|
UTF-8
|
R
| false
| true
| 247
|
rd
|
method_cpp_exceptions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods_cpp.R
\name{method_cpp_exceptions}
\alias{method_cpp_exceptions}
\title{Get the exceptions}
\usage{
method_cpp_exceptions()
}
\description{
Get the exceptions
}
|
de4fa80c7b66febd9c3ca88b07d0a65ff9b3e2aa
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/permutations/examples/sgn.Rd.R
|
55bae69663625272c7a90fee268af63cb6d237ce
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
sgn.Rd.R
|
library(permutations)
### Name: sgn
### Title: Sign of a permutation
### Aliases: sgn is.even
### ** Examples
sgn(id) # always problematic
sgn(rperm(10,5))
x <- rperm(40,6)
y <- rperm(40,6)
stopifnot(all(sgn(x*y) == sgn(x)*sgn(y))) # sgn() is a homomorphism
z <- as.cycle(rperm(20,9,5))
z[is.even(z)]
|
11ad3687e848a3039a627c339234a3c8cc88830e
|
fdc0fa3eda2092bc285ab39542e1e5d65f3140e6
|
/plot3.R
|
3ec123ee644cfd838d7188d5a493b39e77d77495
|
[] |
no_license
|
cdshps/ExData_Plotting1
|
2f37efc140593f2f14cbd7a3f943f3096da2436d
|
50e522e086284b803c5c22abf0a788f7dcd54e1d
|
refs/heads/master
| 2020-04-01T04:38:05.241101
| 2018-10-13T12:52:12
| 2018-10-13T12:52:12
| 152,871,219
| 0
| 0
| null | 2018-10-13T12:46:30
| 2018-10-13T12:46:30
| null |
UTF-8
|
R
| false
| false
| 1,313
|
r
|
plot3.R
|
plot3 <- function()
{
# Read relevant data (1st and 2nd February 2007)
electricData <- read.table(file="household_power_consumption.txt", header=T, sep=";", na.strings="?")
electricData <- electricData[(electricData$Date=="1/2/2007") | (electricData$Date=="2/2/2007"),]
# Summarize columns Date and Time into one new column DateAndTime and convert this into an actual time variable
electricData <- cbind(DateAndTime=paste(electricData$Date, electricData$Time), electricData[,-c(1,2)])
electricData$DateAndTime <- strptime(electricData$DateAndTime, format="%d/%m/%Y %H:%M:%S")
# Plot the required diagram (saved into file "plot3.png")
png("plot3.png", width=480, height=480)
Sys.setlocale(category="LC_ALL", locale="english") # Needed for the weekdays to be displayed in english
with(electricData, plot(DateAndTime, Sub_metering_1, xlab="", ylab="Energy sub metering", type="n"))
with(electricData, lines(DateAndTime, Sub_metering_1))
with(electricData, lines(DateAndTime, Sub_metering_2, col="red"))
with(electricData, lines(DateAndTime, Sub_metering_3, col="blue"))
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lwd=1)
dev.off()
}
|
939d320b17710b62daf384b168fa8ba5f4928819
|
d0ea8cdff2e89f2c6e38f7c266c577a2b81d8008
|
/lecture8/regression_lecture_students.r
|
9e6d49dd757b9707f7f4906c4bbc6f442e451ca0
|
[] |
no_license
|
wampeh1/Ecog314_Spring2017
|
5a5b113abf9b6788dcc3f6de334d70271a7672f0
|
f4776d26fe04457402a799296fbc912f767fdf12
|
refs/heads/master
| 2021-05-02T00:53:54.558669
| 2017-04-21T12:39:08
| 2017-04-21T12:39:08
| 78,502,858
| 4
| 7
| null | 2017-03-08T21:37:14
| 2017-01-10T06:02:15
|
HTML
|
UTF-8
|
R
| false
| false
| 1,173
|
r
|
regression_lecture_students.r
|
#install.packages("Lahman")
library(Lahman)
library(dplyr)
library(ggplot2)
# load data from package Lahman
# make subsets of some of the data
teams_small <- Teams %>%
mutate() %>%
select()
batting_small <- Batting %>%
mutate() %>%
select()
# join all the data together
baseball_data <- left_join() %>%
left_join() %>%
na.omit()
# what are the dimensions of the combined data
dim()
# simple scatterplots
p_salary_vs_ba <-
p_salary_vs_hr <-
# basic model
basic_model <- lm()
str(basic_model)
summary(basic_model)
baseball_data_residuals <- baseball_data %>%
mutate(residuals = )
# box plot of residuals
p_box_res_year <-
p_box_res_team <-
# model with fixed effects for years
fancy_model <- lm()
summary(fancy_model)
# histogram of salary variable
# model with log salary
super_fancy_model <- lm()
summary(super_fancy_model)
# birth weights
ezpass_birthweights <- read.csv()
str(ezpass_birthweights)
ezpass_reg_data <- ezpass_birthweights %>%
mutate()
ezpass_reg = lm()
summary(ezpass_reg)
|
31eb42812453f838e1afbdb9c6b10de8b092cceb
|
6a5770f0513758cdf4965e78dd36220419e2f316
|
/02 Data Wrangling/Data Wrangling.R
|
f29856345e80e3b9e422c077693902ccdce4320c
|
[] |
no_license
|
leronewilliams/DV_RProject2
|
0d7db07cb1de8f70d0f0a64eb21af41c0f8c51b0
|
3c1d7e27753f40112c02a37c4a4b8ded224bf975
|
refs/heads/master
| 2021-01-17T13:46:09.491652
| 2016-05-26T23:52:45
| 2016-05-26T23:52:45
| 30,425,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
Data Wrangling.R
|
require("dplyr")
require("tidyr")
head(vocab)
#Generates the "Female" and "Male"columns from the "SEX" column
vocab %>% spread(SEX,EDUCATION) %>% tbl_df
#Average Vocab per Education
vocab %>% group_by(SEX) %>% summarize(Average_Education = mean(EDUCATION)) %>% tbl_df
#Average Vocab per Sex
vocab %>% group_by(SEX) %>% summarize(Average_Vocabulary = mean(VOCABULARY)) %>% tbl_df
#Standard Deviation Vocab per Sex
vocab %>% group_by(SEX) %>% summarize(Standard_Deviation_Vocabulary = sd(VOCABULARY)) %>% tbl_df
#Average Vocabs per Education
avgPerEduc <- vocab %>% group_by(EDUCATION) %>% summarize(Average_Vocabulary_per_Education = mean(VOCABULARY))
print(avgPerEduc, n=21)
#Top Vocabulary
vocab %>% arrange(desc(VOCABULARY)) %>% tbl_df
#Top 3 Vocab per Sex
vocab %>% group_by(SEX) %>% arrange(desc(VOCABULARY)) %>% slice(1:10) %>% tbl_df
|
ff4dfb427d4ca4814256c80cd15b5488e22aa106
|
be1cc419487bd3d57e32b4e44e24599a3514b295
|
/java-r/src/main/resources/mtcars.R
|
301c47526e2baaf4966fa7caff2a2b830624932a
|
[
"MIT"
] |
permissive
|
bbonnin/talk-r-java-graalvm
|
f4192a56fd973acedc15208706fd87d3b08057a1
|
644bc68b061585f98ba3d8e2a71a9d721418bb98
|
refs/heads/master
| 2023-01-23T07:24:11.998457
| 2019-02-27T16:43:37
| 2019-02-27T16:43:37
| 149,638,699
| 0
| 0
|
MIT
| 2023-01-06T08:15:41
| 2018-09-20T16:28:16
|
R
|
UTF-8
|
R
| false
| false
| 1,545
|
r
|
mtcars.R
|
library(ggplot2)
library(scales)
logger <- java.type("io.millesabords.demos.java_r.Logger")
#############################################################################################
# With plotMtcars, all the context for the execution is provided by the Java app
function(params) {
svg()
logger$log("CODE R - Type of plot:", params$type)
mtcars$cyl <- as.factor(mtcars$cyl)
switch(params$type,
'loess'={
plot <- ggplot(data = mtcars, mapping = aes(x = wt, y = mpg)) +
geom_ribbon(stat = "smooth", method = "loess", alpha = .15, fill = "lightgray", linetype = 0) +
geom_smooth(method = 'loess', se = FALSE, color = "blue")
},
'lm_cyl'={
plot <- ggplot(mtcars, aes(x=wt, y=mpg, color=cyl, shape=cyl)) +
geom_ribbon(stat = "smooth", method = "lm", alpha = .15, fill = "lightgray", linetype = 1) +
geom_smooth(method=lm, se=FALSE)
},
'lm_cyl_no_confidence_interval'={
plot <- ggplot(mtcars, aes(x=wt, y=mpg, color=cyl, shape=cyl)) +
geom_smooth(method=lm, se=FALSE, fullrange=TRUE)
},
{
plot <- ggplot(data = mtcars, mapping = aes(x = wt, y = mpg))
}
)
plot <- plot + geom_point(size=3.0) +
xlab('Weight (x 1000lbs)') + ylab('Miles per Gallon') +
theme_minimal() +
theme(axis.text = element_text(family = "Arial", size = 8), text = element_text(family = "Arial"))
print(plot)
svg.off()
}
|
102bb4adf310cbd1a816f1f41dc4022cb1dc4db2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MSQC/examples/archery2.Rd.R
|
fc2bf0ba0d01a6679f0802d96334b671b65ed3a3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
archery2.Rd.R
|
library(MSQC)
### Name: archery2
### Title: Target archery dataset during the elimination stage (used as
### Phase II)
### Aliases: archery2
### Keywords: datasets
### ** Examples
data(archery1)
## maybe str(archery1) ; plot(archery1) ...
|
1a1efd0a71cde5479130920fbd32cb2d3dd9d860
|
00d3c5d5a17235d7f683bc62d4442b1678b3353b
|
/word_plots_R/Plots_Words.R
|
b8bbdd39a0914173998b94c952c9e87c94b1e9e9
|
[] |
no_license
|
bowen1993/comp150_project1
|
e52c95250934227b54f5bccd93740915111e0517
|
ed7687bf6f15ee201b29cc92eedb6c49dc3d1187
|
refs/heads/master
| 2022-12-08T21:43:18.327174
| 2018-10-22T21:29:39
| 2018-10-22T21:29:39
| 153,384,277
| 0
| 0
| null | 2022-12-08T01:18:49
| 2018-10-17T02:33:38
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 5,339
|
r
|
Plots_Words.R
|
# Processing the labels of the raw data (6.8 in "Deep Learning with R")
group1_dir <- "/Users/sofia/comp150_project1/data/data1"
train_dir <- file.path(group1_dir, "train")
labels <- c()
texts <- c()
for (label_type in c("male", "female")) {
label <- switch(label_type, male = 0, female = 1)
dir_name <- file.path(train_dir, label_type)
for (fname in list.files(dir_name, pattern = glob2rx("*.txt"),
full.names = TRUE)) {
texts <- c(texts, readChar(fname, file.info(fname)$size))
labels <- c(labels, label)
}
}
# All the docs are in texts and the female/male characterization in labels
is.vector(texts)
length(texts)
is.vector(labels)
length(labels)
is.vector(label)
head(labels)
str(texts) # structure
texts[1:2]
labels[1:2]
# Plots with tm package. (The lesser plots.)
install.packages("tm") # for text mining
install.packages("SnowballC") # for text stemming
install.packages("wordcloud") # word-cloud generator
install.packages("RColorBrewer") # color palettes
install.packages("ggplot2")
# Load
library("tm")
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
library("ggplot2")
#Take just the 3 docs and Create a corpus in tm package
#texts_sub<-texts[1:3]
#labels_sub<-labels[1:3]
docs1<-VCorpus(VectorSource(texts))
names(docs1)
inspect(docs1[[1]])
docs2 <- tm_map(docs1, content_transformer(tolower))
# Remove numbers
docs3 <- tm_map(docs2, removeNumbers)
# Remove your own stop words
# specify your stopwords as a character vector
#docs2 <- tm_map(docs2, removeWords, c("blabla1", "blabla2"))
# Remove punctuations
docs4 <- tm_map(docs3, removePunctuation)
# Eliminate extra white spaces
docs5 <- tm_map(docs4, stripWhitespace)
#The Doc Term frequency matrix
dtm <- DocumentTermMatrix(docs5)
dimnames(dtm)
inspect(dtm)
findFreqTerms(dtm, 4)
m <- as.matrix(dtm)
dim(m) # Note that the number of words dropped from 227 to 207
#The number of words in each doc
rowSums(m)
#the max length of the docs_sub
max(rowSums(m))
freq <- sort(colSums(as.matrix(dtm)), decreasing=TRUE)
names(freq)
wof <- data.frame(word=names(freq), freq=freq)
head(freq)
wordcloud(words = wof$word, freq = wof$freq, min.freq = 1,
max.words=200, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Dark2"))
barplot(wof[1:20,]$freq, las = 2, names.arg = wof[1:20,]$word,
col ="lightblue", main ="Most frequent words",
ylab = "Word frequencies")
#########################
# The nice plots!
install.packages("quanteda")
library("quanteda")
Qda_corpus<-corpus(texts)
summary(Qda_corpus)
docvars(Qda_corpus, "Review")<-labels
summary(Qda_corpus)
tokenInfo <- summary(Qda_corpus)
tokenInfo[which.max(tokenInfo$Tokens), ]
Qda_dfm <- Qda_corpus %>%
dfm( remove_punct = TRUE) %>%
dfm_trim(min_termfreq = 10, verbose = FALSE)
set.seed(100)
textplot_wordcloud(Qda_dfm)
Qda_dfm %>%
dfm(groups = "Review", remove_punct = TRUE) %>%
dfm_trim(min_termfreq = 10, verbose = FALSE) %>%
textplot_wordcloud(comparison = TRUE)
Qda_dfm %>%
dfm(groups = "Review", remove_punct = TRUE) %>%
dfm_trim(min_termfreq = 10, verbose = FALSE) %>%
textplot_wordcloud(comparison = TRUE, colors = c('red', 'blue'))
review_dfm <- dfm(Qda_corpus, groups = "Review", remove_punct = TRUE)
# Calculate keyness and determine male speeches as target group
# the output is a data.frame of computed statistics and associated p-values,
# where the features scored name each row,
# and the number of occurrences for both the target and reference groups
# For measure = "chi2" this is the chi-squared value, signed female if the
# observed value in the target exceeds its expected value
result_keyness <- textstat_keyness(review_dfm, target = 1)
# Plot estimated word keyness
textplot_keyness(result_keyness)
head(textstat_keyness(review_dfm, target = 1))
#head(textstat_keyness(review_dfm, target = 1, measure="pmi"))
# plot 20 most frequent words in female speech
female_dfm <-
corpus_subset(Qda_corpus, Review == 1) %>%
dfm(remove_punct = TRUE)
female_freq <- textstat_frequency(female_dfm)
head(female_freq, 10)
library("ggplot2")
ggplot(female_freq[1:20, ], aes(x = reorder(feature, frequency), y = frequency)) +
geom_point() +
coord_flip() +
labs(x = "20 most frequent words in female speech", y = "Frequency")
# plot 20 most frequent words in male speech
male_dfm <-
corpus_subset(Qda_corpus, Review == 0) %>%
dfm(remove_punct = TRUE)
male_freq <- textstat_frequency(male_dfm)
head(male_freq, 10)
library("ggplot2")
ggplot(male_freq[1:20, ], aes(x = reorder(feature, frequency), y = frequency)) +
geom_point() +
coord_flip() +
labs(x = "20 most frequent words in male speech", y = "Frequency")
# Finally, texstat_frequency allows to plot the most frequent words in terms
# of relative frequency by group.
dfm_weight_rev <- Qda_corpus%>%
dfm( remove_punct = TRUE) %>%
dfm_weight(scheme = "prop")
# Calculate relative frequency by group
freq_weight <- textstat_frequency(dfm_weight_rev, n = 20, groups = "Review")
ggplot(data = freq_weight, aes(x = nrow(freq_weight):1, y = frequency)) +
geom_point() +
facet_wrap(~ group, scales = "free") +
coord_flip() +
scale_x_continuous(breaks = nrow(freq_weight):1,
labels = freq_weight$feature) +
labs(x = NULL, y = "Relative frequency")
|
6c8583eba95f5090178c05f318b55212b9b96e53
|
01114541c33a31ff4b1134788ff0815fef397329
|
/16S_amplicon/the new way/3_casting_h2o2.r
|
ab6249bfe9f73121dcc4dc330eca3b5b6c12ae4a
|
[] |
no_license
|
RJ333/R_scripts
|
06b31ad1459bafc68e0c212aa55eb83e5f354be9
|
a882732aeb86b10a44f5fedf86401bf20d4618f6
|
refs/heads/master
| 2021-04-26T22:55:19.096526
| 2019-07-22T08:30:33
| 2019-07-22T08:30:33
| 123,895,394
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,123
|
r
|
3_casting_h2o2.r
|
library(reshape2) #load package reshape for cast (not reshape2!)
#calculate sum of reads per sample in excel, then copy new table the relative abundances
#copy wholetax into second column, split one wholetax into taxonomic levels again (in excel)
#read into R
tcast_h2o2_rel<-read.csv(file.choose(),row.names=1,sep=",")
#wholetax also row names
tcast_h2o2_whole<-tcast_h2o2_rel[,c(1:(ncol(tcast_h2o2_rel)-6))] #takes all rows and the columns 1 to (max number of columns - 6) into new object
head(tcast_h2o2_whole)
#transpose back for merging
cast_h2o2_whole<-t(tcast_h2o2_whole)
#adjust names in excel for merging
write.csv(cast_h2o2_whole,file="cast_h2o2_whole.csv")
#create meta_data file in excelfor merging
#read in meta_data, sort stations in meta data
meta_h2o2<-read.csv(file.choose(),header=T,row.names=1,sep=";") #samples in rows, meta data in colums
str(meta_h2o2)
#meta_h2o2$time<-as.factor(meta_h2o2$time) #depending on the aims it might be necessary to change the classes of the "numeric" columns to "factor" columns
#meta_h2o2$days<-as.factor(meta_h2o2$days)
#meta_h2o2$parallel<-as.factor(meta_h2o2$parallel)
#str(meta_h2o2)
#merge:zwei tabellen, die eine gemeinsame spalte aufweisen (hier: row.names), werden zusammengesetzt. soviele zeilen, wie in x (der erstgenannten tabelle enthalten sind, kommen in die finale datei)
h2o2_wholetax_meta<-merge(cast_h2o2_whole,meta_h2o2,by="row.names",all.x=TRUE)
#after merging the row.names need to be adjusted
head(h2o2_wholetax_meta)
row.names(h2o2_wholetax_meta)<-h2o2_wholetax_meta$Row.names
head(h2o2_wholetax_meta)
h2o2_wholetax_meta<-h2o2_wholetax_meta[,(-1)] #getting rid of the first column
head(h2o2_wholetax_meta)
#melt into final table with all samples on your chosen taxonomic level and all metadata combinations
final_h2o2_whole<-melt(h2o2_wholetax_meta, id=c("station","time","treatment","parallel","nucleic_acid","DV"))
head(final_h2o2_whole,50)
str(final_h2o2_whole) #new melt function from reshape2 turns value into character format? no
#final_h2o2_whole$value<-as.numeric(final_h2o2_whole$value)
str(final_h2o2_whole)
#to aggregate means out of paralleles you can use:
final_h2o2_whole_mean<-aggregate(value~variable+time+treatment+station+DV+nucleic_acid, data = final_h2o2_whole, mean) #possible step to combine parallels as mean or sum, all columns except the to-be-combined must be named
h2o2_otu_tax<-read.csv(file.choose(),row.names=1,sep=";")
final_h2o2_tax<-merge(final_h2o2_whole,h2o2_otu_tax,by.x="variable", by.y="row.names",all.x=TRUE)
final_h2o2_tax_mean<-merge(final_h2o2_whole_mean,h2o2_otu_tax,by.x="variable", by.y="row.names",all.x=TRUE)
#don't calculate from rel. abundance (counts smaller than 1)
head(cast_h2o2)
## for Shannon Index (H) you need Species richness (S) and Pielou's evenness (J):
S <- specnumber(table) ## rowSums(table > 0) does the same...
J <- H/log(S)
##otus in spalten/proben als rownames
h_cast_h2o2<-diversity(cast_h2o2)
s_cast_h2o2<-specnumber(cast_h2o2)
#falsch: j_cast_h2o2<-h_cast_h2o2/log(cast_h2o2)
write.csv(h_cast_h2o2,file="h_cast_h2o2.csv")
write.csv(s_cast_h2o2,file="s_cast_h2o2.csv")
write.csv(j_cast_h2o2,file="j_cast_h2o2.csv")
#j korrekt?
#über sample name mit meta daten kombinieren, in plots einbauen, z.B. mit gallaeci
#16s plot über z.b. pseudomonas bin plot?
##########################################################################################################################plot ideas
#you can further subset table or use everything for plotting. the plotting itself also provides subsetting options
sample_subset<-subset(final_h2o2_tax_mean,treatment=="h2o2")
more_cell_counts<-read.csv(file.choose(),sep=";")
#specific otu
#gallaeci
gtest<-subset(final_table_tax,habitat == "water" & grepl("Gallaeci",variable))
meta_h2o2_test<-subset(meta_h2o2,habitat =="water")
gg<-ggplot(gtest,aes(x=days))+
geom_point(aes(y=value,colour=nucleic_acid))+
geom_point(data=meta_h2o2_test,aes(y=shannon*5,colour=nucleic_acid,shape=nucleic_acid))+
facet_wrap(~treatment,nrow=2)+
ggtitle("gtest")+
stat_summary(data=gtest,fun.y="mean",geom="line",aes(y=value,colour=nucleic_acid))+
##only plot diversity
divtest<-ggplot(meta_h2o2,aes(x=days))+
geom_point(data=meta_h2o2,aes(y=meta_h2o2$shannon*100,colour=nucleic_acid))+
geom_point(data=meta_h2o2,aes(y=meta_h2o2$richness,shape=nucleic_acid))+
facet_wrap(~treatment*habitat,nrow=2,ncol=2)
divtest
ggplot(df2, aes(x=x), y=y)) + stat_summary(fun.y="mean", geom="line", aes(group=factor(grouping)))
#phnM_iso<-ggplot(molten_phnM_gene_isoforms, aes(x=day))+
#geom_line(data=more_cell_counts, aes(y=glyph_mg_L*(cellfactor/15),colour="glyphosate concentration"),alpha=0.8,linetype="solid", size=1)+
#geom_line(data=more_cell_counts, aes(y=glyph_theor*(cellfactor/15),colour="glyphosate dilution"),alpha=0.5,linetype="F1", size=1)+
##hier etwas ausgefeilter: ich will nur einen organismus darstellen, in einem punkt diagramm (geom_point). auf der x achse könnte ich zB die parallelen darstellen, auf der y-achse natürlich die read counts
#mit ggtitle gebe ich dem plot einen namen
#facet_wrap ermöglicht mir viele verschiedene plots gleichzeitig, die sich in den faktoren "day" und "treatment" unterschieden. diese vielen plots möchte ich in zwei reihen und 4 spalten dargestellt haben (wenn das ausreicht)
#die "aes"-eingabe ist ziemlich kompliziert, siehe ggplot2-manual, ich glaube, hier geht es um die legende
#gemeinschaftsplot
test<-ggplot(sample_subset, aes(x ="", y = value, fill=family))+
facet_wrap( ~station*time,nrow=2)+
geom_bar(width = 2, stat = "identity")+
theme(legend.position='none')
#gemeinschaftsplot mit mindestreadsanzahl
test_groesser_0.2<-ggplot(final_h2o2_tax_mean[which(final_h2o2_tax_mean$value>0.2),], aes(x = "", y = value, fill=variable))+
facet_wrap( ~days*treatment,nrow=2,ncol=7)+
geom_bar(width = 1, stat = "identity")+
theme(legend.position='none')
#ohne legende: +theme(legend.position='none')
#um aus säulendiagramm ein tortendiagramm zu machen:
torten_test<-test + coord_polar("y", start=0)
|
cf70c82bf511b6049747dac7da643676be5a5f56
|
961f1a2de9dd6875fb6a86b08dfc917a78f0933d
|
/61_function.R
|
d604d5c3067acc5293e9ce6e6d2d032e258309bd
|
[] |
no_license
|
alex7777777/my_funktion
|
c9ba83405f4e37438565d559b749ee2980cfca16
|
fb1741a910a975f5c2052b6de3ccfa1be39e4a4f
|
refs/heads/master
| 2020-05-29T13:48:15.868690
| 2020-01-09T22:10:13
| 2020-01-09T22:10:13
| 189,173,623
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,346
|
r
|
61_function.R
|
#######################################################
# 2019-11-13 - by Alex Gorbach
#######################################################
# Function: heuristic rule.
# The function finds more than two times repeating events and reduces them.
# The same object is returned back, but only with the first and last event in repetitions.
# file structur: "ids", "Datum", "event"
my_repeating_reduction <- function(my_sqa, event_reduct="") {
if(class(my_sqa$event)!="character") { my_sqa$event <- as.character(my_sqa$event)}
ncol_for_return <- ncol(my_sqa)
lines_start <- nrow(my_sqa)
my_sqa$event2 <- my_sqa$event[2:(nrow(my_sqa)+1)]
my_sqa$event2[nrow(my_sqa)] <- ""
my_sqa$event3[2:nrow(my_sqa)] <- my_sqa$event[1:(nrow(my_sqa)-1)]
my_sqa$event3[1] <- ""
# default: reduction of all recurring events
if(event_reduct == "") {
my_sqa <- my_sqa[!((my_sqa$event==my_sqa$event2)
&(my_sqa$event==my_sqa$event3)), ]
} else {
# reduction of only the selected event
my_sqa <- my_sqa[!((my_sqa$event==event_reduct)
&(my_sqa$event==my_sqa$event2)
&(my_sqa$event==my_sqa$event3)), ]
}
cat(paste0(lines_start - nrow(my_sqa),
" lines with recurring events have been deleted\n"))
return(my_sqa[ , 1:ncol_for_return])
}
|
8ce468fcd8fae5277de86ce047ac10f7120f556d
|
626e46ac01f44d9a9cf390e3ffa2750daebb7ed4
|
/assets/gausman.R
|
0bad8dc1152403e32c34ea80ba4131ccdfc63d25
|
[] |
no_license
|
pqnelson/pqnelson.github.com
|
13913cd8aff1fbc88096688a4f5c39f422cf888c
|
1dfa24f2eaff77723761cf3e84fb1828d0d811eb
|
refs/heads/master
| 2023-08-15T13:26:30.210825
| 2023-08-14T14:37:18
| 2023-08-14T14:37:18
| 6,422,157
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 571
|
r
|
gausman.R
|
alpha0 <- 213 + 22 + 65
beta0 <- 214*3 + 1
x0 <- qbeta(0.05, alpha0, beta0)
x1 <- qbeta(0.95, alpha0, beta0)
coord.x <- c(x0,seq(x0,x1,0.001),x1)
coord.y <- c(0, dbeta(seq(x0,x1,0.001), alpha0, beta0),0)
# https://www.baseball-reference.com/boxes/ANA/ANA201508070.shtml
alpha1 <- 9
beta1 <- 27 - alpha1
p1 <- alpha1/(alpha1 + beta1)
png('gausman-pitching.png')
curve(dbeta(x,alpha0,beta0),xlim=c(0.25,0.37), xlab='Probability of hit, walk, homerun', ylab='Density', main='Gausman Pitching Stats')
polygon(coord.x, coord.y, col='red')
abline(v=p1, col='blue')
dev.off()
|
d02063b27addcbcd003657c14d71bfc2ec748a86
|
c719263832b498a7c7ceb53563ea4b8761ba6a09
|
/man/pick.Rd
|
99258babaf54c566c03cbedecebd3417e156f18d
|
[] |
no_license
|
cran/mudfold
|
f708d4ca8e9fb49dc73b97e00f1fc52565bd0137
|
55b8413294c4f88c59614b3143c1c1ff9b0e3d57
|
refs/heads/master
| 2022-11-29T16:18:33.523375
| 2022-11-24T08:30:02
| 2022-11-24T08:30:02
| 86,725,488
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,009
|
rd
|
pick.Rd
|
\name{pick}
\alias{pick}
\title{
Transform items to preference binary data.
}
\description{
Function \code{pick} can be used to transform quantitative or ordinal type of variables, into binary form (i.e., \code{0},\code{1}). When \code{byItem=FALSE}, then the underlying idea is that the individual selects those items with the higher preference. This is done through user provided cut-off values, or by assuming a \emph{pick} \code{k} \emph{out of} \code{N} response process, where, each continuous response vector takes a \code{1} at its \code{k} higher values. Dichotomization can be performed row-wise (default) or column-wise.
}
\usage{
pick(data , k=NULL, cutoff=NULL, byItem=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{: A matrix or data frame containing the continuous or discrete responses of \code{nrow(data)} persons/judges
to \code{ncol(data)} items. Missing values in \code{data} are not allowed.
}
\item{k}{: An integer (\eqn{1 \le} \code{k} \eqn{\le} \code{ncol(data)}) that restricts the number of items a person can pick (default \code{k=NULL}). This argument, is used if one wants to transform the data into \emph{pick} \code{k} \emph{out of} \code{N} form. If \code{k} is provided by the user, \code{cutoff} should be \code{NULL} and vice verca. By default, this process is applied to the matrix \code{data} rowise. The user can restrict the number
}
\item{cutoff}{:The value(s) that will be used as thresholds. The length of this argument should be equal to 1 (the same threshold for all rows (or columns) of \code{data}) or equal to \code{K} where \code{K=nrow(data)} or \code{K=ncol(data)} when \code{byItem=TRUE}.
}
\item{byItem}{: logical argument. If byItem=TRUE, the dichotomization is performed columnwise. In the default byItem=FALSE, the function determines the ones rowise.
}
}
\details{
Binary transformation of continuous or discrete variables with \eqn{\rho\ge 3} number of levels. Two different methods are available for the transformation.
The first method uses the argument \code{k} in the \code{pick} function, and assumes a \emph{pick} \code{k} \emph{out of} \code{N} response process. Such type of response processes are met in surveys and questionnaires, in which respondents are asked to pick exactly the \code{k} most preferred items. The value for \code{k} is an integer between 1 and \code{ncol(data)}. By choosing an integer for \code{k}, this function ''picks'' the \code{k} higher values in each row (if \code{byItem=FALSE}) of \code{data}. The \code{k} higher values in each row become 1 and the rest \code{ncol(data)-k} elements are set to 0. Obviously, if \code{k=ncol(data)}, then the resulting matrix will only consists of 1's and no 0's.
The second method is based on thresholding in order to binarize the data. For this method, the user should provide threshold(s) with the parameter \code{cutoff} in the \code{pick} function (default \code{cutoff=NULL}). If one value is provided in the \code{cutoff} parameter, i.e., \code{cutoff=}\eqn{\alpha}, then \eqn{\alpha} is used as threshold in each row \eqn{i} (if \code{byItem=FALSE}) of the data matrix \code{data} such that, any value greater than or equal to \code{cutoff} in row \eqn{i} becomes 1 and 0 else. Additionally, the user can provide row (or column) specific cut off values, i.e., \code{cutoff=}\eqn{\alpha} with \eqn{\alpha=(\alpha_1,...,\alpha_K)} where \eqn{\alpha_i} is the cut-off value for the row or column \eqn{i}. In this case, if \eqn{x_{ij}\ge \alpha_i} then \eqn{x_{ij}=1} and \eqn{x_{ij}=0} else.
The two methods cannot be used simultaneously. Only one of the parameters \code{k} and \code{cutoff} can be different than \code{NULL} each time. If both parameters are equal \code{NULL} (default), then a row specific cut off is determined automatically for each row \eqn{i} of \code{data}, such that, \eqn{\alpha_i= \bar{data_i}}. The dichotomization is performed by row of \code{data}, except the case, \code{byItem=TRUE}.
When the argument \code{k} is used, it can be the case that more than \code{k} values can be picked (i.e., ties). In this case, the choice on which item will be picked is being made after we add a small amount of noise in each observation of row or column \eqn{i}. This is done with the function \code{jitter}.
}
\value{
Binary valued (i.e., 0-1) data with the same dimensions as the input.
}
\section{Warning}{
\strong{!!!} This function should be used with care. Dichotomization may distort the data structure and lead to potential information loss. In the case of polytomous items, the user is suggested to consider polytomous unfolding models that take into account different levels of measurement. \strong{!!!}}
\author{Spyros E. Balafas (auth.), Wim P. Krijnen (auth.), Wendy J. Post (contr.), Ernst C. Wit (auth.)
Maintainer: Spyros E. Balafas (\email{s.balafas@rug.nl})
}
\examples{
\dontrun{
### simulate some data with 3 discrete variables with three levels
### and 1 variable with 4 levels
d1 <- cbind(sample(1:3,20,replace = TRUE),
sample(1:3,20,replace = TRUE,prob = c(0.3,0.3,0.4)),
sample(1:3,20,replace = TRUE,prob = c(0.2,0.4,0.4)),
sample(1:4,20,replace = TRUE,prob = c(.1,.3,.4,.2)))
### apply pick on d1 ###
# binarize at the mean of
# each row and column
d1_rowmean <- pick(d1)
d1_colmean <- pick(d1,byItem = TRUE)
# binarize at the cutoff=2
d1_cut <- pick(d1,cutoff = 2,byItem = TRUE)
# binarize at different cutoffs (per row)
# for example at the median of each row
med_cuts <- apply(d1,1,median)
d1_cuts <- pick(d1,cutoff = med_cuts)
# binarize at different cutoffs (per column)
# for example at the median of each column
med_cuts_col <- apply(d1,2,median)
d1_cuts_col <- pick(d1,cutoff = med_cuts_col,byItem = TRUE)
# binarize at the k=2 higher values
# per row and column
d1_krow <- pick(d1,k = 2)
d1_kcol <- pick(d1,k = 2,byItem = TRUE)
}
}
|
0715e748941df334cc77c623170a0862d7472f24
|
93d8d16ecedddea66ef7f51ab56deb85c24f2a7d
|
/Development/Remove_Micro_Trace.R
|
51127f43a29fe6a106c3d2d9e7c6ee26c3037cc1
|
[
"CC-BY-4.0"
] |
permissive
|
ItoErika/PBDB_Fidelity_app
|
b5231ad006f1c81b55972c5b2fa5e1199a74eb7a
|
a82b1f4e72b0b61fa6bd47a9449e25d1cdb4199e
|
refs/heads/master
| 2021-01-11T04:32:52.433881
| 2018-08-27T18:17:31
| 2018-08-27T18:17:31
| 71,168,019
| 1
| 2
| null | 2018-08-27T18:17:32
| 2016-10-17T18:21:06
|
R
|
UTF-8
|
R
| false
| false
| 2,299
|
r
|
Remove_Micro_Trace.R
|
# REFINE OUTPUT1
# Load the output from Stage 1 which involved extracting DeepDiveData sentences which contain both a word (or words) indicating the occurrence of fossils, and a candidate unit name.
# Load the stage 1 output table from postgres
# Load required library
library("RPostgreSQL")
# Connet to PostgreSQL
Driver <- dbDriver("PostgreSQL") # Establish database driver
Connection <- dbConnect(Driver, dbname = "labuser", host = "localhost", port = 5432, user = "labuser")
Stage1Output<-dbGetQuery(Connection,"SELECT * FROM pbdb_fidelity.output1_nlp352")
# Remove hits for microfossils and trace fossils within the output sentences
Microfossils<-grep("microfossil", Stage1Output[,"sentence"], ignore.case=TRUE, perl=TRUE)
TraceFossils<-grep("trace fossil", Stage1Output[,"sentence"], ignore.case=TRUE, perl=TRUE)
# Remove instances of the words "no fossils" to account for reading errors
NoFossils<-grep(" no fossils", Stage1Output[,"sentence"], ignore.case=TRUE, perl=TRUE)
LackFossils<-grep("lacks fossils", Stage1Output[,"sentence"], ignore.case=TRUE, perl=TRUE)
LackOfFossils<-grep("lack of fossils", Stage1Output[,"sentence"], ignore.case=TRUE, perl=TRUE)
AbsentFossils<-grep("absence of fossils", Stage1Output[,"sentence"], ignore.case=TRUE, perl=TRUE)
VoidFossils<-grep("void of fossils", Stage1Output[,"sentence"], ignore.case=TRUE, perl=TRUE)
Correlative<-grep("correlative", Stage1Output[,"sentence"], ignore.case=TRUE, perl=TRUE)
Equivalent<-grep("equivalent", Stage1Output[,"sentence"], ignore.case=TRUE, perl=TRUE)
# Combine all unwanted rows (sentences) with microfossils or trace fossils into one vector
UnwantedRows<-unique(c(Microfossils,TraceFossils,NoFossils,LackFossils,LackOfFossils,AbsentFossils,VoidFossils,Correlative,Equivalent))
# Remove unwanted sentences from Stage1Output
CleanedOutput<-Stage1Output[-UnwantedRows,]
# Take a random sample of 100 Stage1Output Rows to check accuracy
CleanedSampleOutput1<-CleanedOutput[sample(c(1:nrow(CleanedOutput)),100,replace=FALSE,prob=NULL),]
# Save SampleOutput1 to a folder
write.csv(CleanedSampleOutput1,file="~/Documents/DeepDive/PBDB_Fidelity/R/CleanedSampleOutput1.csv",row.names=FALSE)
# Open the csv in excel or libre office and perform a manual accuracy test
# Renamed "CleanedSampleOutput1_Completed.csv"
|
6b8d51fe8d6516995b7ce097dae803d39d738d82
|
ead6f85cb11adb348eac80da2e4af54bee81bdd5
|
/UnderstandingTheLinearRegression.R
|
71592644c421121b19fbf9a3d050bf0bfff2f01b
|
[] |
no_license
|
Softx0/RegresionLineal
|
6fbf923380460aab9da8c1dd7d39a36445e942f7
|
320148257edc7b3ccc2c072382ac3210cfdb2d69
|
refs/heads/master
| 2020-11-26T00:42:51.201849
| 2020-01-25T19:12:42
| 2020-01-25T19:12:42
| 228,910,004
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,248
|
r
|
UnderstandingTheLinearRegression.R
|
# En R utilizamos lm() para crear un modelo de regresion, para visualizar el modelo utilizamos la funcion summary()
# Para analizar los residuos del modelo, podemos utilziar el comodin $ para referirnos a ellso, con $resid
# Los residuos son diferentes entre la prediccion y el resultado actual que te arroje, entonces se necesita nalizar esas
# diferencias para encontrar formas para mejorar el modelo de regresion que se ha creado, mantenerse en mejora continua
# Importamos el csv
dataset = read.csv("/Users/Mac/Desktop/UNIVERSIDAD/Autodidacta/Data/R/Regresion Lineal/Resources/data-marketing-budget-12mo.csv",
header = T, colClasses = c("numeric", "numeric", "numeric"))
# Realizaremos la regresion linear de manera simple (una variable) y con multiples variables independientes.
#Con una variable
simple.fit = lm(Sales~Spend, data = dataset) #creamos el modelo
summary(simple.fit) # lo observamos
####### Podemos observar que lo hace en relacion con Spend, ya que fue la seleccionada, nos trae las secciones de residuales y
####### coeficientes
#Con dos variables
multi.fit = lm(Sales~ Spend + Month, data = dataset) # creacion del modelo
summary(multi.fit) # Observamos el modelo
####### Mientras que con este, nos trae datos en la seccion de coeficientes, de ambas variables
#### Porque no trae de mas de variables en la seccion residuals ? Hay que ver...
########## En otro sentido podemos observar que para ambos modelos, los resultados de la F-statistic, del Multiple E-square
########## del Adjusted R-squared son muy similares, diciendonos, que son significativamente validos los modelos como lo indica
########## la referencia **, en los signif. codes.
layout(matrix(c(1,1,2,3),2,2,byrow=T))
<<<<<<< HEAD
# Pr(>|t|) 0.296 <- este es el nivel de significancia
# Residual standard error: es la desviacion estandar de los residuos, mas pequeño, mucho mejor
# Multiple R-squared: muestra la cantidad de varianza que existe explicada en el modelo
# Adjusted R-squared: este toma peso cuando hay muchas mas variables, para la regresion multiple, pero hace lo mismo que el normal
# F - statistic: es la prueba F, y verfica si el peso de al menos una variable es significativamente diferente de cero
# p - value: puede ser tomado tambien como un valor de significancia, si es menor a 0.05 el modelo esta bien, pero
# si es mayor a 0.05, su modelo no esta haciendo nada.
layout(matrix(c(1,1,2,3),2,2,byrow=T))
#Spend x Residuals Plot
plot(simple.fit$resid~dataset$Spend[order(dataset$Spend)], main="Spend x Residuals\nfor Simple Regression",
xlab="Marketing Spend", ylab="Residuals")
abline(h=0,lty=2)
#Histogram of Residuals
hist(simple.fit$resid, main="Histogram of Residuals", ylab="Residuals")
#Q-Q Plot
qqnorm(simple.fit$resid)
qqline(simple.fit$resid)
=======
#Spend x Residuals Plot
plot(simple.fit$resid~dataset$Spend[order(dataset$Spend)], main="Spend x Residuals\nfor Simple Regression",
xlab="Marketing Spend", ylab="Residuals")
abline(h=0,lty=2)
#Histogram of Residuals
hist(simple.fit$resid, main="Histogram of Residuals", ylab="Residuals")
#Q-Q Plot
qqnorm(simple.fit$resid)
qqline(simple.fit$resid)
<<<<<<< HEAD
>>>>>>> develop
=======
>>>>>>> develop
|
554a1693e4a7bf89c6161d4671b24084ad121af8
|
a6089cc3f17e5fc7aa5c0e36daae87d750e54a1d
|
/man/getShapefile.Rd
|
670131f474cfcec184a00fdf1f7493ecfb3590e1
|
[] |
no_license
|
nzwormgirl/Amy
|
34cb8a35a53392eec25fcc67363ce972755b6bc1
|
05881c5a2ac000d1d4d7fc2b628af23fc57ec36e
|
refs/heads/master
| 2021-06-27T18:35:24.373486
| 2021-05-24T04:26:43
| 2021-05-24T04:26:43
| 67,004,302
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 521
|
rd
|
getShapefile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getShapefile.R
\name{getShapefile}
\alias{getShapefile}
\title{getShapefile}
\usage{
getShapefile(myShapefile, myDir)
}
\arguments{
\item{myShapefile}{A character string with the shapefile name. Must not end in .shp}
\item{myDir}{The directory path where the shapefile sits. Must end with trailing slash}
}
\description{
This function allows you to open a shapefile using a filename and path
}
\examples{
getShapefile()
}
\keyword{shapefile}
|
580abf629def0aef79d8fe8e272070266284d6ed
|
9c2f40ae8269413feb32cffa6d581dfe9f931dd0
|
/R/tv.R
|
2baa53632fb3ac0f7be47ce1217e8df9d34b9da7
|
[
"MIT"
] |
permissive
|
tpetricek/datadiff
|
ed5ca6cdfe8129ed947c24a42c62ea265aad86ef
|
8941269b483da9abcacde804b7f6b6e0a122a57a
|
refs/heads/master
| 2020-07-31T19:09:25.118489
| 2019-09-25T23:55:01
| 2019-09-25T23:55:01
| 210,723,004
| 0
| 0
|
MIT
| 2019-09-25T00:39:49
| 2019-09-25T00:39:49
| null |
UTF-8
|
R
| false
| false
| 2,490
|
r
|
tv.R
|
#' Total variation distance for two discrete variables (as factors)
#'
#' Compute the total variation distance for samples from two discrete
#' distributions, coded as unordered factors. The result is a number in the
#' (closed) unit interval.
#'
#' @param v1,v2
#' A pair of factors. Both must have at least one non-missing value.
#'
#' @return A number between 0 and 1 inclusive.
#'
#' @export
#'
#' @examples
#' v1 <- sample(1:10, size = 40, replace = TRUE)
#' v2 <- sample(1:8, size = 20, replace = TRUE)
#' tv(as.factor(v1), as.factor(v2))
#'
tv <- function(v1, v2) {
stopifnot(is.factor(v1) && is.factor(v2))
if (sum(!is.na(v1)) == 0 || sum(!is.na(v2)) == 0)
stop("Both arguments must have one or more non-missing values.")
lev1 <- levels(v1)
lev2 <- levels(v2)
# if (length(intersect(lev1, lev2)) == 0)
# return(1.0)
# If v1 & v2 contain character data and their intersection is empty except for
# the empty string, return 1.0.
if (is.character(lev1) && is.character(lev2) &&
identical(intersect(lev1, lev2), character(1)))
return(1.0)
# Align levels of v1 and v2, if necessary.
if (!identical(lev1, lev2)) {
lev <- union(lev1, lev2)
v1 <- factor(v1, levels = lev)
v2 <- factor(v2, levels = lev)
}
nbins <- length(levels(v1))
t1 <- tabulate(v1, nbins = nbins)
t2 <- tabulate(v2, nbins = nbins)
sum(abs(t1/sum(t1) - t2/sum(t2)))/2
}
# OLD:
# NOTE: Factors with disjoint (but equal number of)
# levels are not distinguished. Also, integer vector arguments are not handled
# correctly (always returns 0 since in that case levels() returns NULL)
#
# Total variation for two discrete variables (as factors)
#
# Compute the total variation for two discrete distributions, coded as
# unordered factors. The two distributions are assumed to have common levels in
# the sense that for each level the internal integer representation is the
# same.
# @param f1,f2 An unordered factor or vector of integers. Note that the function
# \code{tabulate} is used which generates a vector of length equal to the
# maximum integer in the input (\emph{not} the length of the input).
# @return A number between 0 and 1
# tv_aligned <- function(f1, f2) {
# nbins <- length(levels(f1))
# if (nbins != length(levels(f2)))
# stop("arguments to tv_aligned must have the same number of levels")
#
# t1 <- tabulate(f1, nbins = nbins)
# t2 <- tabulate(f2, nbins = nbins)
#
# 0.5 * sum(abs(t1/sum(t1) - t2/sum(t2)))
# }
|
d5f1a6b5ef9d1f6d12e650d5af54005a36cf4309
|
3b26ab6bc88a47dfef383d4937558e4bd44da506
|
/man/mort.Rd
|
6a6e9f0a44dbcc9e99a673c613a1574da288252a
|
[
"MIT"
] |
permissive
|
SMBaylis/fishSim
|
affafad3915dad24057895d1b0708bc53dd206bd
|
2f98c4545780d4d42f63dd169fb9902c61d0c614
|
refs/heads/master
| 2021-08-02T18:07:06.651542
| 2021-07-23T06:17:11
| 2021-07-23T06:17:11
| 144,930,871
| 3
| 2
|
MIT
| 2021-02-15T01:28:04
| 2018-08-16T03:17:48
|
R
|
UTF-8
|
R
| false
| true
| 2,429
|
rd
|
mort.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fishSim_dev.R
\name{mort}
\alias{mort}
\title{kill some members of the population}
\usage{
mort(
indiv = makeFounders(),
year = "-1",
type = "simple",
maxAge = if (type == "age") length(ageMort) - 1 else Inf,
maxPop = 1000,
mortRate,
ageMort,
stockMort,
ageStockMort
)
}
\arguments{
\item{indiv}{A matrix of inidviduals, as from makeFounders(), move(), or mate().}
\item{year}{An integer, denoting the year in which we're killing animals}
\item{type}{One of "simple", "flat", "age", "stock", or "ageStock".
If type = "simple" and the living pop > maxPop, individuals are killed at random until
the living pop == maxPop. Can easily be set to never cause extinctions.
If type = "flat", individuals are killed with probability set in mortRate. Generates
an exponential death curve.
If type = "age", individuals are killed with probability for their age set in ageMort.
If type = "stock", individuals are killed with probability for their stock set in
stockMort.
If type = "ageStock", individuals are killed with probability for their age:stock
combination set in ageStockMort.}
\item{maxAge}{Sets an age above which animals *will* be killed before anything else happens. Allows
a short age-specific mortality curve to be set, without checking if there are any
individuals outside the range for each iteration.}
\item{maxPop}{If type = "simple", the population will be reduced to this number, if not already
smaller. See 'type'.}
\item{mortRate}{Numeric mortality rate. See 'type'.}
\item{ageMort}{Numeric vector of mortality rates, one for each age, ordered 0:max(age). See 'type'.}
\item{stockMort}{Numeric vector of mortality rates, one for each stock, ordered 1:max(stock). Note
that stocks are numbered (as in makeFounders() ), not named. Because stocks are
stored as a character vector, stocks are converted via as.numeric() to associate
rates with stocks. This distinction is important in cases with >9 stocks. See
'type'.}
\item{ageStockMort}{A matrix of mortality rates, with age by row and stock by column. See 'ageMort'
and 'stockMort' for structure of rows and columns.}
}
\description{
Members are chosen according to one of several defined mortality structures.
Mortality rates can bring the population to a specified size, or can be a flat probability,
or a probability that depends on age, stock, or age:stock.
}
|
8af6aee56808473a96e8d5f9e2d6aec301ef36b4
|
533af4bb66e3797c25d86dba3482839827d68a0a
|
/Create_simulated_predictors.R
|
1915df7640b9e1d1b5efc9f173f5fb0287bc57f3
|
[] |
no_license
|
Qosine/Final_Seminar
|
739c24758e7b2da803910dfd5bb00da5d324414f
|
d00e7d910a8d779bc3631889cc776bf10c57bdab
|
refs/heads/master
| 2021-04-08T01:49:30.054839
| 2020-03-21T18:36:47
| 2020-03-21T18:36:47
| 248,726,593
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,293
|
r
|
Create_simulated_predictors.R
|
########################################################################################
# Author : Pointlogic Team 4, Case Studies in BA&QM
# Description : From an original sample, create a larger simulated population
# by permuting columns (of the original data) independently,
# and then sampling from the permuted columns (with replacement).
# Usage : Create functions, subsequently call simulate_population()
# Function arguments
# 1) path : Path of working directory
# 2) population_size : Desired population size
# 3) seed : Choice of seed (default 200127)
# !!! 4) target_audience : Whether to simulate target or nontarget
# population (boolean, default TRUE)
# 5) target_gender_m : Whether to set gender of target audience to male
# (default TRUE)
# 6) min_age : Lower age bound of the target audience, inclusive
# (default 25)
# 7) max_age : Upper age bound of the target audience, inclusive
# (default 34)
# 8) replacement : Whether to sample with replacement
# (default TRUE)
#
########################################################################################
library(dplyr)
read_source_data <- function(path) {
setwd(path)
data <- read.csv("./cleaned_unified_sample.csv")
return(data)
}
split_sample <- function(data,
li_target1,
li_target2) {
# Target group 1
if (li_target1$male==TRUE) {target_gender="male"}
else {target_gender="female"}
target1_sample = data[ ( data["sd_gender"]==target_gender
& data["sd_age"]>=li_target1$min_age
& data["sd_age"]<=li_target1$max_age ), ]
# Target group 2
if ( !is.null(li_target2) ) {
if (li_target2$male==TRUE) {target_gender="male"} else {target_gender="female"}
target2_sample = data[ ( data["sd_gender"]==target_gender
& data["sd_age"]>=li_target2$min_age
& data["sd_age"]<=li_target2$max_age ), ]
} else {target2_sample = NULL}
nontarget_sample = setdiff(data, target1_sample, target2_sample) ##!!
out = list();
out$target1 = target1_sample; out$target2 = target2_sample
out$nontarget = nontarget_sample
return (out)
}
sum_contact_vars <- function(subsample) {
# Extract columns of interest
df_contacts = subsample[,93:182]
# Sum across categories
v_audiosum = rowSums(df_contacts[,1:5])
v_digitalsum = rowSums(df_contacts[,6:12])
v_programsum = df_contacts[,13]
v_tvsum = rowSums(df_contacts[,14:81])
v_vodsum = rowSums(df_contacts[,82:89])
v_yousum = df_contacts[,90]
# Return single dataframe
m_contact_vars <- cbind(v_audiosum, v_digitalsum, v_programsum, v_tvsum,
v_vodsum, v_yousum)
return(m_contact_vars)
}
draw_new_samples <- function(original_data,
population_size,
user_replace_choice = FALSE) {
population = matrix( 0, population_size, ncol(original_data) ) # Allocate memory
colnames(population) = c("v_audiosum", "v_digitalsum", "v_programsum",
"v_tvsum", "v_vodsum", "v_yousum","v_male","v_age3544","v_age4554","v_age55plus","v_havechildren","v_etn_cauc","v_income030","v_income3050","v_income5075","v_income75100","v_income100150","v_educ2","v_educ3","v_married","v_single","v_seperated" )
# Permute columns independently
for( i in 1:ncol(original_data) ) {
population[,i] = sample(original_data[,i],
size = population_size,
replace = user_replace_choice)
}
return(population)
}
## MAIN SCRIPT ##
simulate_population <- function(path,
population_size,
seed = 200127,
target_audience=TRUE,
target1_gender_m,
target1_min_age,
target1_max_age,
target2_gender_m=NULL,
target2_min_age=NULL,
target2_max_age=NULL,
replacement=TRUE) {
data = read_source_data(path)
li_target1 = list(); li_target1$male = target1_gender_m
li_target1$min_age = target1_min_age; li_target1$max_age = target1_max_age
li_target2 = NULL
if(!all( sapply(list(target2_gender_m,
target2_min_age,
target2_max_age), is.null ))) {
li_target2 = list(); li_target2$male = target2_gender_m
li_target2$min_age = target2_min_age; li_target2$max_age = target2_max_age
}
subsamples = split_sample(data, li_target1, li_target2)
target_contacts = sum_contact_vars(subsamples$target)
nontarget_contacts = sum_contact_vars(subsamples$nontarget)
#control variables
v_male <- (ifelse(data$sd_gender == "male", 1,0))
v_havechildren <- (ifelse(data$sd_havechildren == "yes", 1,0))
v_age3544 <- (ifelse(data$sd_age <=44 & data$sd_age >=35, 1,0))
v_age4554 <- (ifelse(data$sd_age <=54 & data$sd_age >=45, 1,0))
v_age55plus <- (ifelse(data$sd_age >=55, 1,0))
v_employed <- (ifelse(data$sd_employment == "employed", 1,0))
v_income030 <- (ifelse(data$sd_householdincome == "[0,30)", 1,0))
v_income3050 <- (ifelse(data$sd_householdincome == "[30,50)", 1,0))
v_income5075 <- (ifelse(data$sd_householdincome == "[50,75)", 1,0))
v_income75100 <- (ifelse(data$sd_householdincome == "[75,100)", 1,0))
v_income100150 <- (ifelse(data$sd_householdincome == "[100,150)", 1,0))
v_educ2 <- (ifelse(data$sd_education == "secondary", 1,0))
v_educ3 <- (ifelse(data$sd_education == "tertiary", 1,0))
v_etn_cauc <- (ifelse(data$sd_ethnicity_caucasian == "yes", 1,0))
v_etn_afric <- (ifelse(data$sd_ethnicity_africanamerican == "yes", 1,0))
v_etn_hisp <- (ifelse(data$sd_ethnicity_hispanic == "yes", 1,0))
v_etn_asian <- (ifelse(data$sd_ethnicity_asian == "yes", 1,0))
v_etn_native <- (ifelse(data$sd_ethnicity_nativeamerican == "yes", 1,0))
v_etn_other <- (ifelse(data$sd_ethnicity_other == "yes", 1,0))
v_married <- (ifelse(data$sd_maritalstatus == "married", 1,0))
v_single <- (ifelse(data$sd_maritalstatus == "single", 1,0))
v_seperated <- (ifelse(data$sd_maritalstatus == "seperated", 1,0))
total_demographics <- split_sample(cbind(data, v_male,
v_age3544, v_age4554, v_age55plus, v_havechildren, v_etn_cauc,
v_income030, v_income3050,v_income5075, v_income75100,v_income100150, v_educ2, v_educ3,v_married, v_single,v_seperated),li_target1, li_target2)
target_demographics = total_demographics$target[,(ncol(data)+1):(ncol(data) +ncol(cbind(v_male,
v_age3544, v_age4554, v_age55plus, v_havechildren, v_etn_cauc,
v_income030, v_income3050,v_income5075, v_income75100,v_income100150, v_educ2, v_educ3,v_married, v_single,v_seperated)))]
nontarget_demographics = total_demographics$nontarget[,(ncol(data)+1):(ncol(data) +ncol(cbind(v_male,
v_age3544, v_age4554, v_age55plus, v_havechildren, v_etn_cauc,
v_income030, v_income3050,v_income5075, v_income75100,v_income100150, v_educ2, v_educ3,v_married, v_single,v_seperated)))]
# target_demographics = cbind(ifelse(sumbsample$target[,3]=="male",1,0))
# nontarget_demographics = cbind(ifelse(sumbsample$nontarget[,3]=="male",1,0))
# colnames(target_demographics) = c("male", "25-34","35-44","45-54", "55-99" )
# colnames(nontarget_demographics) = c("male", "25-34","35-44","45-54", "55-99" )
#print(head(target_contacts))
if (target_audience==TRUE) {population = draw_new_samples(cbind(target_contacts,target_demographics) ,
population_size,
replacement)}
else {population = draw_new_samples(cbind(nontarget_contacts, nontarget_demographics),
population_size,
replacement)}
return(population)
}
# Set path
path = "."
#path = "D:/brian/Documents/EUR/19-20 Business Analytics and QM/Block 3/Seminar Case Studies/Git/Seminar"
simulated_target_predictors = simulate_population(path,
5*10^4,
target_audience = TRUE,
target1_gender_m = TRUE,
target1_min_age = 25,
target1_max_age = 34)
save(simulated_target_predictors[,-(7:10)], file = "simulated_target_predictors_w_control.RData")
simulated_nontarget_predictors = simulate_population(path,
5*10^4,
target_audience = FALSE,
target1_gender_m = TRUE,
target1_min_age = 25,
target1_max_age = 34)
save(simulated_nontarget_predictors, file = "simulated_nontarget_predictors_w_control.RData")
# Create overview of demographic groups over which we can loop
demographic_groups = rbind(c("Male", 25, 34),
c("Male", 35, 44),
c("Male", 45, 54),
c("Male", 55, 99),
c("Female", 25, 34),
c("Female", 35, 44),
c("Female", 45, 54),
c("Female", 55, 99))
datafile_names = c("male_25_34", "male_35_44", "male_45_54", "male_55_99",
"female_25_34", "female_35_44", "female_45_54", "female_55_99")
no_obs_to_simulate = 15*10^3
for (i in 1:nrow(demographic_groups)) {
print(demographic_groups[i,])
if (demographic_groups[i, 1] == "Male") {male_dummy = TRUE} else {male_dummy = FALSE}
simulated_population = simulate_population(path,
no_obs_to_simulate,
target_audience = TRUE,
target1_gender_m = male_dummy,
target1_min_age = demographic_groups[i, 2],
target1_max_age = demographic_groups[i, 3])
datafile_string = paste(datafile_names[i], "_2.Rds", sep="")
saveRDS(simulated_population, file = datafile_string)
print("Simulated successfully")
}
|
7a609b2a0b8cda779d75c00034840d40682a9615
|
b8abb780d06dcc80e73fefe581c0bb6e0985be79
|
/server.R
|
be099414c2436ae92208ca18c9eeb93171eda7ab
|
[] |
no_license
|
gomugomu0034/Shiny-App
|
c79433efd479fe5fbfb2607e3e7cf8bc1ec4bcff
|
1b2408065db98b7056bc8b6250855534f83c4c7e
|
refs/heads/master
| 2020-06-04T02:32:38.810061
| 2019-06-13T22:21:49
| 2019-06-13T22:21:49
| 191,836,415
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,294
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# SHiny server function designs two different model based on mtcars dataset
# model 1 (a linear model) fits mileage with horsepower,
# model 2 (a linear model) fits mileage with horsepower, cylinder numbers and transmission mode
# These models are used to predict mileage for the desired input
#
shinyServer(function(input, output) {
model1 <- lm(mpg ~ hp, data = mtcars)
model2 <- lm(mpg ~ hp + factor(cyl) + factor(am), data = mtcars)
model1pred <- reactive({
hpInput <- input$hp
predict(model1, newdata = data.frame(hp = hpInput))
})
model2pred <- reactive({
hpInput <- input$hp
if(input$am == "Automatic") {amInput <- 1}
else {amInput <- 0}
cylInput <- input$cyl
predict(model2, newdata = data.frame(hp = hpInput, cyl = cylInput, am = amInput))
})
output$Plot1 <- renderPlot({
# renderplot generates the plot which will be displayed in the main panel of the application
# on the x axis of graph data on horsepower is represented
# on the y axis of the graph, data on mileage is represented
# There are two lines in the graph corresponding to respective linear models they represent
hpInput <- input$hp
plot(mtcars$hp, mtcars$mpg, xlab = "Horsepower", ylab = "Miles per Gallon",
pch = 16, xlim = c(50,350), ylim = c(10,35))
abline(model1,col = "red", lwd = 2)
if(input$showModel2){
abline(model2, col = "blue", lwd = 2)
}
legend(250,35
, c("Model 1 Prediction", "Model 2 Prediction"),
pch = 16, col = c("red", "blue"), bty = "n", cex = 1.2)
points(hpInput, model1pred(), col = "red", pch = 20, cex = 2)
points(hpInput, model2pred(), col = "blue", pch = 20, cex = 2)
})
# Text output is rendered here
# takes the prediction output from bpth models and pass them to the user interface as text
output$pred1 <- renderText({model1pred()})
output$pred2 <- renderText({model2pred()})
})
|
0838661cc5d4ff52ea85f26ec01d88da14442447
|
f0ec114929d46453f39ed8903d6c2b9bad28183b
|
/R/boxplot.R
|
b01492ed2cbcdb22d723a7c3142cdcfa4b709d38
|
[] |
no_license
|
cran/microbenchmark
|
f3d671db49c18d90a568ffedfa8e1630fa42e0cd
|
3054890c0c5cf68d2b4321a4e258e3e41a23b636
|
refs/heads/master
| 2023-05-13T01:56:44.055467
| 2023-04-28T20:20:02
| 2023-04-28T20:20:02
| 17,697,488
| 3
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,661
|
r
|
boxplot.R
|
#' Boxplot of \code{microbenchmark} timings.
#'
#' @param x A \code{microbenchmark} object.
#' @param unit Unit in which the results be plotted.
#' @param log Should times be plotted on log scale?
#' @param xlab X axis label.
#' @param ylab Y axis label.
#' @param horizontal Switch X and Y axes.
#' @param ... Passed on to boxplot.formula.
#'
#' @method boxplot microbenchmark
#'
#' @author Olaf Mersmann
boxplot.microbenchmark <- function(x, unit="t", log=TRUE, xlab, ylab,
horizontal=FALSE, ...) {
x$time <- convert_to_unit(x$time, unit)
timeunits <- c("ns", "us", "ms", "s", "t")
frequnits <- c("hz", "khz", "mhz", "eps", "f")
if (missing(xlab))
xlab <- "Expression"
if (missing(ylab)) {
ylab <- if (log) {
if (unit %in% timeunits)
paste("log(time) [", unit, "]", sep="")
else if (unit %in% frequnits)
paste("log(frequency) [", unit, "]", sep="")
else
paste("log(", unit, ")", sep="")
} else {
if (unit %in% timeunits)
paste("time [", unit, "]", sep="")
else if (unit %in% frequnits)
paste("frequency [", unit, "]", sep="")
else if (unit == "eps")
"evaluations per second [Hz]"
else
unit
}
}
if (log) {
# min time cannot be 0 when
ylim <- pmax(1, range(x$time))
} else {
ylim <- NULL
}
if (horizontal) {
ll <- if (log) "x" else ""
boxplot(time ~ expr, data=x, xlab=ylab, ylab=xlab, log=ll, ylim=ylim,
horizontal=TRUE, ...)
} else {
ll <- if (log) "y" else ""
boxplot(time ~ expr, data=x, xlab=xlab, ylab=ylab, log=ll, ylim=ylim, ...)
}
}
|
4c1b9a6d824276d6cd6195af67221afdf50116c6
|
cdb466282c1bede90c67114ae10dacb50f7febdc
|
/help/Basa_Bleiveis.R
|
448ffd040f306ea5c2dd12dba0d17f1362b41be3
|
[] |
no_license
|
matejbasa2/R-BasketballScorePrediction
|
a6279879c04b4ac74e2c5b6c1c75410e71b0a680
|
f1d9403ce748fb85740607eed1593675c3fedb47
|
refs/heads/master
| 2020-12-19T14:50:02.685802
| 2020-01-23T09:55:17
| 2020-01-23T09:55:17
| 235,765,963
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,761
|
r
|
Basa_Bleiveis.R
|
mae <- function(observed, predicted)
{
mean(abs(observed - predicted))
}
rmae <- function(observed, predicted, mean.val)
{
sum(abs(observed - predicted)) / sum(abs(observed - mean.val))
}
mse <- function(observed, predicted)
{
mean((observed - predicted)^2)
}
rmse <- function(observed, predicted, mean.val)
{
sum((observed - predicted)^2)/sum((observed - mean.val)^2)
}
setwd("C:/Users/Non5ens3/Desktop/UI")
install.packages("e1071")
install.packages("CORElearn")
install.packages("rpart")
install.packages("randomForest")
install.packages("ggplot2")
library(e1071)
library(rpart)
library(ggplot2)
data(mpg, package="ggplot2")
library(randomForest)
library(CORElearn)
ucna$SEASON <- NULL
ucna$DATE <- NULL
ucna$HOME <- NULL
ucna$AWAY <- NULL
validacijska$SEASON <- NULL
validacijska$DATE <- NULL
validacijska$HOME <- NULL
validacijska$AWAY <- NULL
testna$SEASON <- NULL
testna$DATE <- NULL
testna$HOME <- NULL
testna$AWAY <- NULL
vsi_podatki <- read.table("regular.txt", header = T, sep=",")
ucna <- vsi_podatki[1:1230,]
validacijska <- vsi_podatki[1231:2460,]
testna <- vsi_podatki[2461:3690,]
povprecje_pred_tekmo <- function(ime_ekipe, datum, atribut){
st_gameov <- 0
a <- 0
i <- 0
for (i in 1:length(ucna$HFTM))
{
if (ucna$HOME[i] == ime_ekipe)
{
st_gameov <- st_gameov + 1
a <- a + ucna$HFTM[i]
}
}
a <- 0
j <- 0
for (j in 1:length(ucna$AFTM))
{
if (ucna$AWAY[j] == ime_ekipe)
{
st_gameov <- st_gameov + 1
b <- b + ucna$AFTM[j]
}
}
avg_a <- (a+b)/st_gameov
return(avg_a)
}
abc <- povprecje_pred_tekmo('LAL', 10-10-2014, HPTS)
observed <- testna$HPTS
lm.model <- lm(HPTS ~ . , data = ucna)
predicted <- predict(lm.model, validacijska)
summary(observed)
mae(observed, predicted) #Mean Absolute Error
mse(observed, predicted)
rmse(observed, predicted, mean(ucna$HPTS))
rt.model <- rpart(HPTS ~ . , ucna)
predicted <- predict(rt.model, validacijska)
plot(rt.model);text(rt.model, pretty = 0)
mae(observed, predicted)
rmae(observed, predicted, mean(ucna$HPTS))
#REGRESIJSKO DREVO RPART
rt.model <- rpart(HPTS ~ . , data = ucna, minsplit=100)
plot(rt.model);text(rt.model, pretty = 0)
printcp(rt.model) #Napove za vsako vejo koliko se splaca (xerror)
rt.model2 <- prune(rt.model, cp = 0.02) #0.02, da bols kot karkoli drugega
plot(rt.model2);text(rt.model2, pretty = 0)
predicted <- predict(rt.model2, testna)
mae(observed, predicted)
rmae(observed, predicted, mean(ucna$HPTS))
#REGRESIJSKO DREVO CORElearn
rt.core <- CoreModel(HPTS ~ . , data = ucna, model = 'regTree', modelTypeReg=1)
plot(rt.model);text(rt.model, pretty = 0)
predicted <- predict(rt.core, validacijska)
mae(observed, predicted)
rmae(observed, predicted, mean(ucna$HPTS))
cm.nb <- CoreModel(HPTS ~ . , data = ucna, model = 'bayes')
plot(cm.nb);text(cm.nb, pretty = 0)
predicted <- predict(cm.nb, validacijska, type="class")
mae(observed, predicted)
rmae(observed, predicted, mean(ucna$HPTS))
#RANDOM FOREST (Nakljucni gozd)
observed <- testna$HPTS
rf.model <- randomForest(HPTS ~ H3PM+H2PM+HFTM, data = ucna)
plot(rf.model);text(rt.model, pretty = 0)
predicted <- predict(rf.model, testna, type = "class")
mae(observed, predicted)
rmae(observed, predicted, mean(ucna$HPTS))
#Naivni Bayes
CA <- function(observed, predicted)
{
t <- table(observed, predicted)
sum(diag(t)) / sum(t)
}
obsMat <- model.matrix(HPTS~-1, validacijska)
observed <- validacijska$HPTS
nb <- naiveBayes(HPTS ~ H3PM+H2PM+HFTM, data = ucna)
predicted <- predict(nb, validacijska, type="class")
CA(observed, predicted)
predMat <- predict(nb, validacijska, type = "raw")
brier.score(obsMat, predMat)
errorest(HPTS ~ H3PM+H2PM+HFTM, data=ucna, model = naiveBayes, predict = mypredict.generic)
#VIZUALIZACIJA 1
ekipe <- list(ucna$HOME)
ekipe <- unique(unlist(ekipe))
avg_HFTM <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
k <- 1
for (i in 1:length(ekipe))
{
st_gameov <- 0
HFTM <- 0
j <- 0
for (j in 1:length(ucna$HFTM))
{
if (ucna$HOME[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HFTM <- HFTM + ucna$HFTM[j]
}
}
j <- 0
for (j in 1:length(ucna$AFTM))
{
if (ucna$AWAY[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HFTM <- HFTM + ucna$AFTM[j]
}
}
avg_HFTM[k] <- HFTM/st_gameov
k <- k+1
}
print(avg_HFTM)
avg_H2PM <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
k <- 1
for (i in 1:length(ekipe)){
st_gameov <- 0
H2PM <- 0
for (j in 1:length(ucna$H2PM))
{
if (ucna$HOME[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
H2PM <- H2PM + ucna$H2PM[j]
}
}
for (j in 1:length(ucna$A2PM))
{
if (ucna$AWAY[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
H2PM <- H2PM + ucna$A2PM[j]
}
}
avg_H2PM[k] <- (H2PM)*2/st_gameov
k <- k+1
}
print(avg_H2PM)
avg_H3PM <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
k <- 1
for (i in 1:length(ekipe))
{
st_gameov <- 0
H3PM <- 0
for (j in 1:length(ucna$H3PM))
{
if (ucna$HOME[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
H3PM <- H3PM + ucna$H3PM[j]
}
}
for (j in 1:length(ucna$A3PM))
{
if (ucna$AWAY[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
H3PM <- H3PM + ucna$A3PM[j]
}
}
avg_H3PM[k] <- (H3PM*3)/st_gameov
k <- k+1
}
print(avg_H3PM)
tocke <- data.frame(avg_HFTM, avg_H2PM, avg_H3PM)
par(mar=c(5, 4, 4, 2) + 0.1)
barplot(t(tocke), main="Ekipa/Povprecne Tocke", ylab = "AverageTocke", col=heat.colors(3), names.arg = ekipe, space=0.1, cex.axis=1, las=2, cex = 0.7, ylim = c(0,130))
legend(29, 150, names(tocke), cex=1, fill=heat.colors(3));
#VIZUALIZACIJA 2
ekipe <- list(ucna$HOME)
ekipe <- unique(unlist(ekipe))
avg_HFTA <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
avg_HFTM <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
k <- 1
for (i in 1:length(ekipe))
{
st_gameov <- 0
HFTA <- 0
HFTM <- 0
j <- 0
for (j in 1:length(ucna$HFTA))
{
if (ucna$HOME[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HFTA <- HFTA + ucna$HFTA[j]
HFTM <- HFTM + ucna$HFTM[j]
}
}
j <- 0
for (j in 1:length(ucna$HFTA))
{
if (ucna$AWAY[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HFTA <- HFTA + ucna$HFTA[j]
HFTM <- HFTM + ucna$HFTM[j]
}
}
avg_HFTA[k] <- HFTA/st_gameov
avg_HFTM[k] <- HFTM/st_gameov
k <- k+1
}
print(avg_HFTA)
print(avg_HFTM)
avg_H2PA <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
avg_H2PM <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
k <- 1
for (i in 1:length(ekipe))
{
st_gameov <- 0
H2PA <- 0
H2PM <- 0
j <- 0
for (j in 1:length(ucna$H2PA))
{
if (ucna$HOME[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
H2PA <- H2PA + ucna$H2PA[j]
H2PM <- H2PM + ucna$H2PM[j]
}
}
j <- 0
for (j in 1:length(ucna$H2PA))
{
if (ucna$AWAY[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
H2PA <- H2PA + ucna$H2PA[j]
H2PM <- H2PM + ucna$H2PM[j]
}
}
avg_H2PA[k] <- H2PA/st_gameov
avg_H2PM[k] <- H2PM/st_gameov
k <- k+1
}
print(avg_H2PA)
print(avg_H2PM)
avg_H3PA <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
avg_H3PM <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
k <- 1
for (i in 1:length(ekipe))
{
st_gameov <- 0
H3PA <- 0
H3PM <- 0
j <- 0
for (j in 1:length(ucna$H3PA))
{
if (ucna$HOME[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
H3PA <- H3PA + ucna$H3PA[j]
H3PM <- H3PM + ucna$H3PM[j]
}
}
j <- 0
for (j in 1:length(ucna$H3PA))
{
if (ucna$AWAY[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
H3PA <- H3PA + ucna$H3PA[j]
H3PM <- H3PM + ucna$H3PM[j]
}
}
avg_H3PA[k] <- H3PA/st_gameov
avg_H3PM[k] <- H3PM/st_gameov
k <- k+1
}
print(avg_H3PA)
print(avg_H3PM)
ASA = avg_HFTA + avg_H2PA + avg_H3PA
ASM = avg_HFTM + avg_H2PM + avg_H3PM
tocke <- data.frame((ASM/ASA), (ASA-ASM)/ASA)
par(mar=c(5, 4, 4, 2) + 0.1)
barplot(t(tocke), main="Povprecna procentna uspesnost meta neglede na to iz kje je",ylab = "AverageTocke", col=heat.colors(2), names.arg = ekipe, space=0.1, cex.axis=1, las=2, cex = 0.7)
legend(29, 150, names(tocke), cex=1, fill=heat.colors(2));
Average_Free_Throws_Made <- avg_HFTM/ASA
Average_Free_Throws_Missed <- (avg_HFTA-avg_HFTM)/ASA
Average_2_Pointers_Made <- avg_H2PM/ASA
Average_2_Pointers_Missed <- (avg_H2PA-avg_H2PM)/ASA
Average_3_Pointers_Made <-avg_H3PM/ASA
Average_3_Pointers_Missed <- (avg_H3PA-avg_H3PM)/ASA
tocke <- data.frame(Average_Free_Throws_Made,Average_2_Pointers_Made,Average_3_Pointers_Made, Average_Free_Throws_Missed,Average_2_Pointers_Missed,Average_3_Pointers_Missed)
par(mar=c(5, 4, 4, 2) + 0.1)
par(xpd=T, mar=par()$mar+c(5,0,0,0))
barplot(t(tocke), main="Povprecna procentna uspesnost meta neglede na to iz kje je", ylab = "AverageTocke", col=c('#ec0909', '#1bc81d', '#1e23c0', '#f95d5d', '#71f073', '#5d61e7'), names.arg = ekipe, space=0.2, cex.axis=1, las=2, cex = 0.7)
legend(-5, -0.17, names(tocke), cex=1, fill=c('#ec0909', '#1bc81d', '#1e23c0', '#f95d5d', '#71f073', '#5d61e7'));
#VIZUALIZACIJA 3
ekipe <- list(ucna$HOME)
ekipe <- unique(unlist(ekipe))
avg_HSTL <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
k <- 1
for (i in 1:length(ekipe))
{
st_gameov <- 0
HSTL <- 0
j <- 0
for (j in 1:length(ucna$HSTL))
{
if (ucna$HOME[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HSTL <- HSTL + ucna$HSTL[j]
}
}
j <- 0
for (j in 1:length(ucna$ASTL))
{
if (ucna$AWAY[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HSTL <- HSTL + ucna$ASTL[j]
}
}
avg_HSTL[k] <- HSTL/st_gameov
k <- k+1
}
print(avg_HSTL)
avg_HDRB <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
k <- 1
for (i in 1:length(ekipe)){
st_gameov <- 0
HDRB <- 0
for (j in 1:length(ucna$HDRB))
{
if (ucna$HOME[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HDRB <- HDRB + ucna$HDRB[j]
}
}
for (j in 1:length(ucna$ADRB))
{
if (ucna$AWAY[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HDRB <- HDRB + ucna$ADRB[j]
}
}
avg_HDRB[k] <- HDRB/st_gameov
k <- k+1
}
print(avg_HDRB)
avg_HORB <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
k <- 1
for (i in 1:length(ekipe))
{
st_gameov <- 0
HORB <- 0
for (j in 1:length(ucna$HORB))
{
if (ucna$HOME[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HORB <- HORB + ucna$HORB[j]
}
}
for (j in 1:length(ucna$AORB))
{
if (ucna$AWAY[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HORB <- HORB + ucna$AORB[j]
}
}
avg_HORB[k] <- HORB/st_gameov
k <- k+1
}
print(avg_HORB)
avg_HPTS <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
k <- 1
for (i in 1:length(ekipe))
{
st_gameov <- 0
HPTS <- 0
j <- 0
for (j in 1:length(ucna$HPTS))
{
if (ucna$HOME[j] == ekipe[i])
{
st_gameov <- st_gameov + 1
HPTS <- HPTS + ucna$HPTS[j]
}
}
avg_HPTS[k] <- HPTS/st_gameov
k <- k+1
}
print(avg_HPTS)
xx = ekipe
c(avg_HPTS/avg_HSTL)
yy = sort.int(c(avg_HPTS/avg_HSTL))
Average_Free_Throws_Made <- avg_HFTM/ASA
Average_Free_Throws_Missed <- (avg_HFTA-avg_HFTM)/ASA
Average_2_Pointers_Made <- avg_H2PM/ASA
Average_2_Pointers_Missed <- (avg_H2PA-avg_H2PM)/ASA
Average_3_Pointers_Made <-avg_H3PM/ASA
Average_3_Pointers_Missed <- (avg_H3PA-avg_H3PM)/ASA
ratio <- (avg_HFTM+avg_H2PM+avg_H3PM)/ASA
plot(x=ASA, y=ratio,main="Ratio zadetih kosev v odvisnosti od vseh metov", las=2)
|
3e843f19b76d7f8655a7ac5029fa8aacb921f15f
|
eb4f7e624e5b4b0f3436c75c6711ff4f9455c347
|
/run_analysis.R
|
675f1efc4c4d28f9dd88cc5068f7870a0f43191b
|
[] |
no_license
|
shivika3390/Getting_and_cleaning_data_course_project
|
bd61cf762b7d567b545d55f6e68036c397146ee6
|
c6a89df53a6656cc66223be086ed699dde07ef22
|
refs/heads/master
| 2021-01-10T03:46:57.960997
| 2016-02-04T13:11:20
| 2016-02-04T13:11:20
| 50,766,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,000
|
r
|
run_analysis.R
|
##Download and unzip the dataset
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
##Reading the files
x_test <- read.table(file.path(path_rf, "test", "X_test.txt"), header=FALSE)
y_test <- read.table(file.path(path_rf, "test", "Y_test.txt"), header=FALSE)
subject_test <- read.table(file.path(path_rf, "test", "subject_test.txt"), header=FALSE)
x_train <- read.table(file.path(path_rf, "train", "X_train.txt"), header=FALSE)
y_train <- read.table(file.path(path_rf, "train", "Y_train.txt"), header=FALSE)
subject_train <- read.table(file.path(path_rf, "train", "subject_train.txt"), header=FALSE)
##1. Merging and creating one data set
x_data <- rbind(x_train, x_test)
features <- read.table("./data/UCI HAR Dataset/features.txt", header=FALSE, quote="\"")
names(x_data) <- features$V2
y_data <- rbind(y_train, y_test)
names(y_data) <- c("Activity")
subject_data <- rbind(subject_train, subject_test)
names(subject_data) <- c("Subject")
##Merging columns to get the data frame
combine <- cbind(subject_data, y_data)
final_data <- cbind(x_data, combine)
##2. Extract only the measurements on the mean and std for each measurement
subset_data <-final_data[,grepl("Subject|Activity|mean\\(\\)|std\\(\\)", names(final_data))]
##Get columns with only mean() or std() in their names
mean_std_features <- grep("-(mean|std)\\(\\)", features[, 2])
##subset and correct the desired columns and then correct the column name
x_data <- x_data[, mean_std_features]
names(x_data) <- features[mean_std_features, 2]
##3. Use descriptive activity names to name the activities in the data set
activities <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
##Update the values with correct activity name and then correct the column name
y_data[,1] <- activities[y_data[,1],2]
##4. Appropriately labels the data set with descriptive variable names
names(x_data)<-gsub("^t", "time", names(x_data))
names(x_data)<-gsub("^f", "frequency", names(x_data))
names(x_data)<-gsub("Acc", "Accelerometer", names(x_data))
names(x_data)<-gsub("Gyro", "Gyroscope", names(x_data))
names(x_data)<-gsub("Mag", "Magnitude", names(x_data))
names(x_data)<-gsub("BodyBody", "Body", names(x_data))
names(x_data)<-gsub("-mean", "Mean", names(x_data))
names(x_data)<-gsub("-std", "Std", names(x_data))
names(x_data)<-gsub("\\()", "", names(x_data))
##Bind all data into one single data set
all_data <-cbind(x_data, y_data, subject_data)
##5. Create a second independent tidy dataset with average of each variable
##for each activity and each subject
averages_data <- ddply(all_data, .(Subject, activity), function(x) colMeans(x[, 1:66]))
write.table(averages_data, "tidy_data.txt", row.name=FALSE)
|
cc6ed5e23d99efa84ec887c9de0898f0a919142e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/crossdes/examples/williams.BIB.Rd.R
|
36a914de2e25e8d5c1ffc4db480fa220e4413174
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 301
|
r
|
williams.BIB.Rd.R
|
library(crossdes)
### Name: williams.BIB
### Title: Construction of Carryover Balanced Designs Based on Balanced
### Incomplete Block Designs
### Aliases: williams.BIB
### Keywords: design
### ** Examples
d <- matrix( rep(1:3,each=2), ncol=2)
# # check for balance
# isGYD(d)
williams.BIB(d)
|
34579d355450cd8f87a603d19087235d531efb56
|
0479b5e809beae1d18a9c6b603305d674fd5b12e
|
/man/Merge_methy_tcga.Rd
|
15b331b2f91dec5253b503552f470af17db03f01
|
[] |
no_license
|
huerqiang/GeoTcgaData
|
ecbd292e37df065ae4697c7dd07027c1e665853d
|
cc85914f2a17177164c7ae426f8f0f09f91e98c1
|
refs/heads/master
| 2023-04-12T10:04:20.034688
| 2023-04-04T05:57:04
| 2023-04-04T05:57:04
| 206,305,770
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 778
|
rd
|
Merge_methy_tcga.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Merge_methylation.R
\name{Merge_methy_tcga}
\alias{Merge_methy_tcga}
\title{Merge methylation data downloaded from TCGA}
\usage{
Merge_methy_tcga(dirr = NULL)
}
\arguments{
\item{dirr}{a string for the directory of methylation data download from tcga
useing the tools gdc}
}
\value{
a matrix, a combined methylation expression spectrum matrix
}
\description{
When the methylation data is downloaded from TCGA,
each sample is saved in a folder, which contains the methylation value file
and the descriptive file. This function can directly
extract and consolidate all folders.
}
\examples{
merge_result <- Merge_methy_tcga(system.file(file.path("extdata", "methy"),
package = "GeoTcgaData"))
}
|
996d46c9c01d4a3aec43eb6e3422d3f0b9c297bf
|
cbfaa5d8817adb478029bc6f3aff5d1545fd42a5
|
/Scraper.R
|
3a3a0193722ab2af4e3dfe398cb5cb2a1f404648
|
[] |
no_license
|
dmolloy3/AFL
|
cb06cb9d0b4829a5d17f728ff7c83b4702c947b2
|
bdc90bc21021ca89026a1c5e863420e7e3e9d211
|
refs/heads/master
| 2021-05-15T12:23:12.336023
| 2017-10-26T06:26:00
| 2017-10-26T06:26:00
| 108,352,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,367
|
r
|
Scraper.R
|
library(tidyverse)
library(rvest)
library(stringr)
#Import and Tidy all games
all.games <- read_table("All Games.txt", skip = 2, col_names = FALSE) %>%
select(date = X2, round = X3, home.team = X4, home.score = X5, away.team = X6, away.score = X7, venue = X8)
all.games$date <- parse_date(all.games$date, "%d-%b-%Y")
all.games$home.team <- factor(all.games$home.team)
all.games$away.team <- factor(all.games$away.team)
all.games$venue <- factor(all.games$venue)
home.scores <- all.games$home.score %>% str_split("\\.", simplify = TRUE) %>% as_tibble() %>% select(home.goals = V1, home.behinds = V2, home.total = V3)
away.scores <- all.games$away.score %>% str_split("\\.", simplify = TRUE) %>% as_tibble() %>% select(away.goals = V1, away.behinds = V2, away.total = V3)
all.games <- bind_cols(all.games, home.scores)
all.games <- bind_cols(all.games, away.scores)
all.games <- all.games %>%
select(date, round, home.team, home.goals, home.behinds, home.total, away.team, away.goals, away.behinds, away.total, venue) %>%
mutate_at(vars(ends_with("goals")), funs(as.integer)) %>%
mutate_at(vars(ends_with("behinds")), funs(as.integer)) %>%
mutate_at(vars(ends_with("total")), funs(as.integer)) %>%
mutate(home.win = if_else(home.total > away.total, 1, 0),
home.win = if_else(home.total == away.total, .5, .5))
rm(list=setdiff(ls(), "all.games"))
|
685cab93c9edde722934daa98f1d40af29d02cd8
|
1542b8ef5c6387facf4d49f8fd4f6b5ef5d8e9c0
|
/man/xClassifyPerf.Rd
|
2d026605e31eca768f81141838a3831c620a0506
|
[] |
no_license
|
wuwill/XGR
|
7e7486614334b664a05e389cd646678c51d1e557
|
c52f9f1388ba8295257f0412c9eee9b7797c2029
|
refs/heads/master
| 2020-04-12T12:38:04.470630
| 2018-12-19T17:40:30
| 2018-12-19T17:40:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,804
|
rd
|
xClassifyPerf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xClassifyPerf.r
\name{xClassifyPerf}
\alias{xClassifyPerf}
\title{Function to evaluate the prediction performance via ROC and Precision-Recall (PR) analysis}
\usage{
xClassifyPerf(prediction, GSP, GSN, rescale = F, plot = c("none",
"ROC", "PR"), highlight = F, verbose = TRUE)
}
\arguments{
\item{prediction}{a data frame containing predictions along with
predictive scores. It has two columns: 1st column for subjects, 2nd
column for predictive scores on subjects}
\item{GSP}{a vector containing Gold Standard Positives (GSP)}
\item{GSN}{a vector containing Gold Standard Negatives (GSN)}
\item{rescale}{logical to indicate whether to linearly rescale
predictive scores for GSP/GSN to the range [0,1]. By default, it sets
to false}
\item{plot}{the way to plot performance curve. It can be 'none' for no
curve returned, 'ROC' for ROC curve, and 'PR' for PR curve.}
\item{highlight}{logical to indicate whether a dot is highlighted. It
only works when plot is drawn. When true, the maximum accuracy
highlighted in ROC curve, and the Fmax highlighted in PR curve. By
default, it sets to false}
\item{verbose}{logical to indicate whether the messages will be
displayed in the screen. By default, it sets to TRUE for display}
}
\value{
an object of class "pPerf", a list with following components:
\itemize{
\item{\code{data}: a data frame with 8 columns, including 4 performance
measures ('Accuracy', 'Precision', 'Recall' and 'Specificity'), 'name'
(subjects), 'pred' (predictive scores), 'label' (1 for GSP and 0 for
GSN), 'corrected' (corrected/transformed predictiv scores, always the
higher the better)}
\item{\code{auroc}: a scalar value for ROC AUC}
\item{\code{fmax}: a scalar value for maximum F-measure}
\item{\code{amax}: a scalar value for maximum accuracy}
\item{\code{direction}: '+' (the higher score the better prediction)
and '-' (the higher score the worse prediction)}
\item{\code{gp}: a ggplot object (if plotted) or NULL}
\item{\code{Pred_obj}: a ROCR prediction-class object (potentially used
for calculating other performance measures)}
}
}
\description{
\code{xClassifyPerf} is supposed to assess the prediction performance
via Receiver Operating Characteristic (ROC) and Precision-Recall (PR)
analysis. It requires three inputs: 1) Gold Standard Positive (GSP)
targets; 2) Gold Standard Negative (GSN) targets; 3) prediction
containing predictive scores on subjects. It returns an object of class
"pPerf".
}
\note{
AUC: the area under ROC
F-measure: the maximum of a harmonic mean between precision and recall
along PR curve
}
\examples{
\dontrun{
# Load the library
library(XGR)
}
RData.location <- "http://galahad.well.ox.ac.uk/bigdata"
\dontrun{
pPerf <- xClassifyPerf(prediction, GSP, GSN)
}
}
|
086104dd0c67bcf8dc6bddb22c1e9fd1e53e64d6
|
ef342e6f6abd0015a63fc864ebe4c092a0a812a0
|
/R/install_packs.R
|
ce760c55ad132f72c0b5b9637d1bbcbbcb5f8944
|
[] |
no_license
|
eugejoh/edatools
|
58813098382dbef74350b152b61ce5454ec3b601
|
1e26ef7803a17edaf21966f1f8390d467b75e3a5
|
refs/heads/master
| 2020-04-09T13:59:36.248376
| 2019-01-17T19:58:49
| 2019-01-17T19:58:49
| 160,385,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 744
|
r
|
install_packs.R
|
#' Install Packages
#'
#' This wrapper function checks whether specified packages are installed and installs if neccessary,
#' otherwise loads the package.
#'
#' @param pkg \code{character} string of package names to install or load.
#'
#' @return loads packages
#' @export
#'
#' @importFrom utils installed.packages install.packages
#'
#'
#' @examples
#'
#' \donttest{
#' packs <- c("utils", "ggplot2")
#'
#' install_packs("utils")
#' }
install_packs <- function(pkg) {
if (!is.character(pkg)) stop("pkg must be character type")
new_pkg <- pkg[!(pkg %in% utils::installed.packages()[,"Package"])]
if (length(new_pkg)) {
utils::install.packages(new_pkg, dependencies = TRUE)
}
sapply(pkg, require, character.only = TRUE)
}
|
d9736fb6c23507e546abf70e6e39d42909b96ea9
|
35ae1abde4828b315a805ca5ed207bcf4d13722c
|
/new_combination.R
|
bb21826a7909f516a4fafca94d3fa2c704f119fe
|
[] |
no_license
|
DongboShi/chinese_author_disambiguation
|
a0f49b98123971d7ce7d32457ba83372f14e4ba4
|
0081699c386f73de598941ed1b7aa4a0ffbf90bb
|
refs/heads/master
| 2021-07-07T18:58:38.857284
| 2020-12-16T16:28:08
| 2020-12-16T16:28:08
| 215,990,534
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,352
|
r
|
new_combination.R
|
library(rhdf5)
library(dplyr)
library(stringr)
library(readr)
library(parallel)
observations<-read_csv('/Users/zijiangred/changjiang/dataset/feature/observations.csv')
train_sample<-observations%>%filter(ntruth>=10)
train_data<-data.frame()
setwd('/Users/zijiangred/changjiang/dataset')
for (i in 1:nrow(train_sample)) {
fl <- str_remove(train_sample$cj[i],'CJ')
pairorder<-h5read(file=paste0('pairorder/',fl,"_pair.h5"),name="pair")
label<-h5read(file=paste0('pairorder/',fl,"_label.h5"),name="label")
Feature_authororder<-read_csv(file=paste0('feature/Feature_author/Feature_authororder/Feature_authororder_',fl,'.csv'))
Feature_authororder<-left_join(pairorder,Feature_authororder,by = c("paperA", "paperB"))
Feature_authororder<-Feature_authororder[,c(1,2,9)] %>%
arrange(match(paperA,pairorder$paperA),
match(paperB,pairorder$paperB))
Feature_authororder<-Feature_authororder[,3:ncol(Feature_authororder)]
Feature_givenname<-read_csv(file=paste0('feature/Feature_author/Feature_givenname/Feature_givenname_',fl,'.csv'))
Feature_givenname<-left_join(pairorder,Feature_givenname,by = c("paperA", "paperB"))
Feature_givenname<-Feature_givenname[,c(1,2,12)] %>%
arrange(match(paperA,pairorder$paperA),
match(paperB,pairorder$paperB))
Feature_givenname<-Feature_givenname[,3:ncol(Feature_givenname)]
Feature_lName<-read_csv(file=paste0('feature/Feature_author/Feature_lName/Feature_lName_',fl,'.csv'))
Feature_lName<-left_join(pairorder,Feature_lName,by = c("paperA", "paperB"))
Feature_lName<-Feature_lName[,c(1,2,7)] %>%
arrange(match(paperA,pairorder$paperA),
match(paperB,pairorder$paperB))
Feature_lName<-Feature_lName[,3:ncol(Feature_lName)]
year<-read_csv(file=paste0('feature/year/year_',fl,'.csv'))
year<-left_join(pairorder,year,by = c("paperA", "paperB"))
year<-year[,c(1,2,5,6)] %>%
arrange(match(paperA,pairorder$paperA),
match(paperB,pairorder$paperB))
year<-year[,3:ncol(year)]
feature_before<-read_csv(file=paste0('/Users/zijiangred/changjiang/dataset/feature/feature/feature_',fl,'.csv'))
feature_before<-feature_before[,c(1,2,7:21,24:39)]
feature_new<-cbind(pairorder,label,Feature_authororder,Feature_givenname,Feature_lName,year)
feature<-left_join(feature_new,feature_before,by=c('paperA','paperB'))
feature_1<-feature%>%filter(label==1)
feature_0<-feature%>%filter(label==0)
if(nrow(feature_0)>nrow(feature_1)){
pb=nrow(feature_1)/nrow(feature_0)
feature_0$ind<-sample(2,nrow(feature_0),replace=TRUE,prob=c(1-pb,pb))
feature_2<-feature_0%>%filter(ind==2)
feature_0<-feature_2[,1:39]
balance_data<-rbind(feature_0,feature_1)
}else{
pb=nrow(feature_0)/nrow(feature_1)
feature_1$ind<-sample(2,nrow(feature_1),replace=TRUE,prob=c(1-pb,pb))
feature_2<-feature_1%>%filter(ind==2)
feature_1<-feature_2[,1:39]
balance_data<-rbind(feature_0,feature_1)
}
balance_data$id<-fl
train_data<-rbind(train_data,balance_data)
print(paste0(i,'---',fl))
}
write.csv(train_data,file='/Users/zijiangred/changjiang/dataset/feature/train_data_new.csv',row.names=F)
|
9f5bf1fa199a7d6cf9d23f02fcf5344bc253b858
|
cecced4835b4f960141b85e25eabd8756f1702ea
|
/R/sc_workflow.R
|
d03813df8fcd64fbb761d73c5fa2bdff099b1b3d
|
[] |
no_license
|
LuyiTian/scPipe
|
13dab9bea3b424d1a196ff2fba39dec8788c2ea8
|
d90f45117bf85e4a738e19adc3354e6d88d67426
|
refs/heads/master
| 2023-06-23T01:44:20.197982
| 2023-04-17T13:26:42
| 2023-04-17T13:26:42
| 71,699,710
| 61
| 26
| null | 2023-06-12T11:04:49
| 2016-10-23T11:53:40
|
HTML
|
UTF-8
|
R
| false
| false
| 12,726
|
r
|
sc_workflow.R
|
#' create a SingleCellExperiment object from data folder generated by preprocessing step
#'
#' after we run \code{sc_gene_counting} and finish the preprocessing step. \code{create_sce_by_dir}
#' can be used to generate the \link{SingleCellExperiment} object from the folder that contains gene count matrix and QC statistics.
#' it can also generate the html report based on the gene count and quality control statistics
#'
#' @param datadir the directory that contains all the data and `stat` subfolder.
#' @param organism the organism of the data. List of possible names can be retrieved using the function
#' `listDatasets`from `biomaRt` package. (i.e `mmusculus_gene_ensembl` or `hsapiens_gene_ensembl`)
#' @param gene_id_type gene id type of the data A possible list of ids can be retrieved using the function `listAttributes` from `biomaRt` package.
#' the commonly used id types are `external_gene_name`, `ensembl_gene_id` or `entrezgene`
#' @param pheno_data the external phenotype data that linked to each single cell. This should be an \code{AnnotatedDataFrame} object
#' @param report whether to generate the html report in the data folder
#'
#' @details after we run \code{sc_gene_counting} and finish the preprocessing step. \code{create_sce_by_dir}
#' can be used to generate the SingleCellExperiment object from the folder that contains gene count matrix and QC statistics.
#'
#' @return a SingleCellExperiment object
#'
#' @importFrom utils read.csv
#' @import SingleCellExperiment
#' @importClassesFrom SingleCellExperiment SingleCellExperiment
#' @importFrom utils packageVersion
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # the sce can be created fron the output folder of scPipe
#' # please refer to the vignettes
#' sce = create_sce_by_dir(datadir="output_dir_of_scPipe",
#' organism="mmusculus_gene_ensembl",
#' gene_id_type="ensembl_gene_id")
#' }
#' # or directly from the gene count and quality control matrix:
#' data("sc_sample_data")
#' data("sc_sample_qc")
#' sce = SingleCellExperiment(assays = list(counts = as.matrix(sc_sample_data)))
#' organism(sce) = "mmusculus_gene_ensembl"
#' gene_id_type(sce) = "ensembl_gene_id"
#' QC_metrics(sce) = sc_sample_qc
#' demultiplex_info(sce) = cell_barcode_matching
#' UMI_dup_info(sce) = UMI_duplication
#' dim(sce)
#'
create_sce_by_dir <- function(datadir, organism=NULL, gene_id_type=NULL, pheno_data=NULL, report=FALSE) {
gene_cnt <- read.csv(file.path(datadir, "gene_count.csv"), row.names=1)
cell_stat <- read.csv(file.path(datadir, "stat", "cell_stat.csv"), row.names=1)
demultiplex_stat <- read.csv(file.path(datadir, "stat", "overall_stat.csv"))
UMI_dup_stat <- read.csv(file.path(datadir, "stat", "UMI_duplication_count.csv"))
gene_cnt <- gene_cnt[, order(colnames(gene_cnt))]
cell_stat <- cell_stat[order(rownames(cell_stat)), ]
sce <- SingleCellExperiment(assays = list(counts =as.matrix(gene_cnt)))
sce@metadata$scPipe$version <- packageVersion("scPipe") # set version information
if(!is.null(organism)){
organism(sce) <- organism
}
if(!is.null(gene_id_type)){
gene_id_type(sce) <- gene_id_type
}
QC_metrics(sce) <- cell_stat
if(!is.null(pheno_data)){
colData(sce) <- cbind(colData(sce), pheno_data[order(rownames(pheno_data)),])
}
demultiplex_info(sce) <- demultiplex_stat
UMI_dup_info(sce) <- UMI_dup_stat
#if(any(grepl("^ERCC-", rownames(sce)))){
# isSpike(sce, "ERCC") <- grepl("^ERCC-", rownames(sce))
#}
if(report){
create_report(sample_name=basename(datadir),
outdir=datadir,
organism=organism,
gene_id_type=gene_id_type)
}
return(sce)
}
#' create_report
#'
#' create an HTML report using data generated by proprocessing step.
#'
#' @param sample_name sample name
#' @param outdir output folder
#' @param r1 file path of read1
#' @param r2 file path of read2 default to be NULL
#' @param outfq file path of the output of \code{sc_trim_barcode}
#' @param read_structure a list contains read structure configuration. For more help see `?sc_trim_barcode`
#' @param filter_settings a list contains read filter settings for more help see `?sc_trim_barcode`
#' @param align_bam the aligned bam file
#' @param genome_index genome index used for alignment
#' @param map_bam the mapped bam file
#' @param exon_anno the gff exon annotation used. Can have multiple files
#' @param stnd whether to perform strand specific mapping
#' @param fix_chr add `chr` to chromosome names, fix inconsistent names.
#' @param barcode_anno cell barcode annotation file path.
#' @param max_mis maximum mismatch allowed in barcode. Default to be 1
#' @param UMI_cor correct UMI sequence error: 0 means no correction, 1 means simple correction and merge UMI with distance 1.
#' @param gene_fl whether to remove low abundant gene count. Low abundant is defined as only one copy of one UMI for this gene
#' @param organism the organism of the data. List of possible names can be retrieved using the function
#' `listDatasets`from `biomaRt` package. (i.e `mmusculus_gene_ensembl` or `hsapiens_gene_ensembl`)
#' @param gene_id_type gene id type of the data A possible list of ids can be retrieved using the function `listAttributes` from `biomaRt` package.
#' the commonly used id types are `external_gene_name`, `ensembl_gene_id` or `entrezgene`
#'
#' @return no return
#' @export
#'
#' @examples
#' \dontrun{
#' create_report(sample_name="sample_001",
#' outdir="output_dir_of_scPipe",
#' r1="read1.fq",
#' r2="read2.fq",
#' outfq="trim.fq",
#' read_structure=list(bs1=-1, bl1=2, bs2=6, bl2=8, us=0, ul=6),
#' filter_settings=list(rmlow=TRUE, rmN=TRUE, minq=20, numbq=2),
#' align_bam="align.bam",
#' genome_index="mouse.index",
#' map_bam="aligned.mapped.bam",
#' exon_anno="exon_anno.gff3",
#' stnd=TRUE,
#' fix_chr=FALSE,
#' barcode_anno="cell_barcode.csv",
#' max_mis=1,
#' UMI_cor=1,
#' gene_fl=FALSE,
#' organism="mmusculus_gene_ensembl",
#' gene_id_type="ensembl_gene_id")
#' }
#'
create_report <- function(sample_name,
outdir,
r1="NA",
r2="NA",
outfq="NA",
read_structure=list(bs1=0, bl1=0, bs2=0, bl2=0, us=0, ul=0),
filter_settings=list(rmlow = TRUE, rmN = TRUE, minq = 20, numbq = 2),
align_bam="NA",
genome_index="NA",
map_bam="NA",
exon_anno="NA",
stnd=TRUE,
fix_chr=FALSE,
barcode_anno="NA",
max_mis=1,
UMI_cor=1,
gene_fl=FALSE,
organism,
gene_id_type) {
fn <- system.file("extdata", "report_template.Rmd", package = "scPipe")
tx <- readLines(fn)
tx <- gsub(pattern = "SAMPLE_NAME__", replacement = sample_name, x = tx)
tx <- gsub(pattern = "FQ1__", replacement = r1, x = tx)
if (!is.null(r2)) {
tx <- gsub(pattern = "FQ2__", replacement = r2, x = tx)
}
else {
tx <- gsub(pattern = "FQ2__", replacement = "NA", x = tx)
}
tx <- gsub(pattern = "FQOUT__", replacement = outfq, x = tx)
if (read_structure$bs1<0) {
tx <- gsub(pattern = "BC1_INFO__", replacement = "NA", x = tx)
}
else {
tx <- gsub(pattern = "BC1_INFO__", replacement =
paste0("start at position ", read_structure$bs1, ", length ", read_structure$bl1), x = tx)
}
tx <- gsub(pattern = "BC2_INFO__", replacement =
paste0("start at position ", read_structure$bs2, ", length ", read_structure$bl2), x = tx)
tx <- gsub(pattern = "UMI_INFO__", replacement =
paste0("start at position ", read_structure$us, ", length ", read_structure$ul), x = tx)
tx <- gsub(pattern = "RM_N__", replacement = as.character(filter_settings$rmN), x = tx)
tx <- gsub(pattern = "RM_LOW__", replacement = as.character(filter_settings$rmlow), x = tx)
tx <- gsub(pattern = "MIN_Q__", replacement = filter_settings$minq, x = tx)
tx <- gsub(pattern = "NUM_BQ__", replacement = filter_settings$numbq, x = tx)
tx <- gsub(pattern = "BAM_ALIGN__", replacement = align_bam, x = tx)
tx <- gsub(pattern = "G_INDEX__", replacement = genome_index, x = tx)
tx <- gsub(pattern = "BAM_MAP__", replacement = map_bam, x = tx)
tx <- gsub(pattern = "OUTDIR__", replacement = outdir, x = tx)
tx <- gsub(pattern = "ANNO_GFF__", replacement = paste(exon_anno, collapse=", "), x = tx)
tx <- gsub(pattern = "STND__", replacement = as.character(stnd), x = tx)
tx <- gsub(pattern = "FIX_CHR__", replacement = as.character(fix_chr), x = tx)
tx <- gsub(pattern = "BC_ANNO__", replacement = barcode_anno, x = tx)
tx <- gsub(pattern = "MAX_MIS__", replacement = max_mis, x = tx)
if (UMI_cor == 1) {
tx <- gsub(pattern = "UMI_COR__", replacement = "simple correction and merge UMI with distance 1", x = tx)
}
else if (UMI_cor == 0) {
tx <- gsub(pattern = "UMI_COR__", replacement = "no correction", x = tx)
}
else {
tx <- gsub(pattern = "UMI_COR__", replacement = "unknown", x = tx)
}
# If organism and gene id type are not provided, delete them from param list
# of rmd. param$organism and param$gene_id_type will then return NULL when
# when used in code.
tx <- gsub(pattern = "GENE_FL__", replacement = as.character(gene_fl), x = tx)
if(!missing(organism) && !is.null(organism)){
tx <- gsub(pattern = "ORGANISM__", replacement = organism, x = tx)
}else{
tx <- tx[!grepl(pattern = "ORGANISM__", x = tx)]
}
if(!missing(gene_id_type) && !is.null(gene_id_type)){
tx <- gsub(pattern = "GENE_ID_TYPE__", replacement = gene_id_type, x = tx)
}else{
tx <- tx[!grepl(pattern = "GENE_ID_TYPE__", x = tx)]
}
writeLines(tx, con=file.path(outdir, "report.Rmd"))
knitr::wrap_rmd(file.path(outdir, "report.Rmd"), width = 120, backup = NULL)
rmarkdown::render(file.path(outdir, "report.Rmd"), output_file = file.path(outdir, "report.html"), knit_root_dir = ".")
}
#' create_processed_report
#'
#' Create an HTML report summarising pro-processed data. This is an alternative to the more verbose \code{create_report} that requires only the processed counts and stats folders.
#' @param outdir output folder.
#' @param organism the organism of the data. List of possible names can be retrieved using the function
#' `listDatasets`from `biomaRt` package. (e.g. `mmusculus_gene_ensembl` or `hsapiens_gene_ensembl`).
#' @param gene_id_type gene id type of the data A possible list of ids can be retrieved using the function `listAttributes` from `biomaRt` package.
#' the commonly used id types are `external_gene_name`, `ensembl_gene_id` or `entrezgene`.
#' @param report_name the name of the report .Rmd and .html files.
#'
#' @returns file path of the created compiled document.
#' @examples
#' \dontrun{
#' create_report(
#' outdir="output_dir_of_scPipe",
#' organism="mmusculus_gene_ensembl",
#' gene_id_type="ensembl_gene_id")
#' }
#'
#' @export
create_processed_report <- function(
outdir = ".",
organism,
gene_id_type,
report_name = "report"
) {
if (!requireNamespace("rmarkdown", quietly=TRUE)) {
stop("Install 'rmarkdown' to use this function.")
}
fn <- system.file("extdata", "report_template_slim.Rmd", package = "scPipe")
tx <- readLines(fn)
fill_report_field <- function(field, value) {
pattern <- paste0(field, "__")
if (is.na(value)) {
tx <- tx[-grep(pattern, tx)]
} else {
gsub(pattern, value, tx)
}
}
if (!missing(organism) && !is.null(organism)) {
tx <- fill_report_field("ORGANISM", organism)
} else {
tx <- fill_report_field("ORGANISM", NA)
}
if (!missing(gene_id_type) && !is.null(gene_id_type)) {
tx <- fill_report_field("GENE_ID_TYPE", gene_id_type)
} else {
tx <- fill_report_field("GENE_ID_TYPE", NA)
}
report_path <- file.path(outdir, paste0(report_name, ".Rmd"))
tx <- tx[!is.na(tx)]
writeLines(tx, con = report_path)
rmarkdown::render(
input = report_path,
envir = new.env(),
knit_root_dir = "."
)
}
|
e51aea7a904566ae6a5a377686e6ca51f93208c2
|
30659c6dba27f5e056810f0559456247e8dd29c9
|
/Chaos/settingUpChaosByKeystoneness.R
|
f4cca5b54b23bb56b3f27af23f0c841ba4883c01
|
[] |
no_license
|
mcgregorv/CRAM_chaos
|
4253d594f82a94011cb245a7944efa765830efbb
|
bb890f6e65f4c2f10953d176bac9b45926ae23a7
|
refs/heads/master
| 2020-07-19T16:00:41.907512
| 2019-11-29T00:58:02
| 2019-11-29T00:58:02
| 206,476,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,831
|
r
|
settingUpChaosByKeystoneness.R
|
####### didn't really end up using this as shifting all by the same amount doesn't really test much..
##
## similar to settingUpTestingChaos.R, but uses a scalar on the initial conditions based on how well informed we defined the group in the first paper
## with the option of only changes the top XX groups by keystoneness, or all of them
source(paste(DIR$`General functions`,"getVolDepth.R",sep=""))
source(paste(DIR$`General functions`,"read_boxes.R",sep=""))
## fix keystonenss csv
temp<- read.csv(paste(DIR$'Tables', "Keystoneness_baseModel.csv", sep=""),header=FALSE); nk<-dim(temp)[1]
key_df<-array(NA, dim=c(nk, 2))
for(k in 1:nk){
if(k==1){
thisNum <-1
thisGroup <- unlist(str_split(temp[k,]," "))[2]
} else{
thisNum<-get_first_number(temp[k,]); thisGroup<-str_trim(gsub(thisNum, "", temp[k,]), side="both")
}
xx<-unlist(str_split(thisGroup," "));
xxx<-paste(xx, collapse="_")
xxxx<-grep(xxx,groupsDF$Name); thisCode<-as.character(groupsDF$Code[xxxx])
key_df[k,]<-c(thisNum, thisCode)
}
basePath<-paste(DIR$'Base',"ATLANTISmodels\\",sep="")
##need ##
existingICfile<-"CRAM_input_fromBase50yr";
chaosDirections <- c("up", "down"); chaosShifts <- c(0.05, 0.1, 0.2, 0.5); nd <- length(chaosDirections); ns <- length(chaosShifts)
for(d in 1:nd){
chaosDirection <- chaosDirections[d]
for(s in 1:ns){
chaosShift<-chaosShifts[s]
#
# chaosDirection<-"up";
# chaosDirection<-"down"
# chaosShift <- 0.05
# chaosShift <- 0.1
# chaosShift <- 0.2
# chaosShift <- 0.5
if(chaosDirection=="up"){
thisScale <- 1 + chaosShift
} else{
thisScale <- 1- chaosShift
}
## need ## - call it what you like
newICFile<-paste(existingICfile, chaosDirection,gsub("\\.", "", chaosShift),sep="")
### testing testing ###
# read this in to check how it went - prob with 'up'?
# newICdata <- nc_open(paste(basePath,newICFile, ".nc", sep=""))
# thisCode<-"ASQ"; thisName<-str_trim(groupsDF$Name[groupsDF$Code==thisCode], side="both")
# thisTracer<-paste(thisName,"1_Nums", sep="")
# thisICdata <- ncvar_get(newICdata, thisTracer)
# testExistingData <- nc_open(paste(basePath, existingICfile, ".nc", sep=""))
# thisExistingData <- ncvar_get(testExistingData, thisTracer)
if(!file.exists(paste(basePath,newICFile,sep=""))){
## need this
file.copy(from=paste(basePath,existingICfile,".nc",sep=""),to=paste(basePath,newICFile,".nc",sep=""))
}
## need this
init <- nc_open(paste(basePath,newICFile,".nc",sep=""), write = T)
#
# bgmf<-"CHAT30_aea.bgm"
# bgmFile<-paste(basePath,"..\\",bgmf,sep="")
#
# depthLayers<-c(1050,700,500,300,100)
# num_wc<-length(depthLayers)
#
# volAndSuch<-getVolDepth(bgmFile,depthLayers)
nlayers=6
# ndynBoxes<-24
nboxes<-30
# boundBoxes<-c(1,seq(26,30)) #labels are 1 less than these (0,25,...,29)
# dynBoxes<-seq(2,25) #labels are 1 less than these
Fix_negs<-function(x){
y=x
if(x<0){
y<-0
}
return(y)
}
# Get info from init
var_names_init <- names(init$var)
# vars <- var_names_init[is.element(var_names_init, var_names_out)]
vars<-unique(sort(var_names_init))
skipVars<-c("nominal_dz", "volume", "dz", "numlayers", "topk")
vars<-vars[!(vars %in% skipVars)]
thisNames <- groupsDF$Name[groupsDF$GroupType=="Vert"]
thisNumTracers <- paste(thisNames, 1,"_Nums", sep="")
for(i in seq_along(vars)){
dataTemp<-ncvar_get(init,vars[i])
newData <- dataTemp * thisScale
if(length(dim(dataTemp))==3){
dataTemp[,,1]<-newData[,,1]; dataTemp[,,2]<-"_"
} else{
dataTemp[,1]<-newData[,1]; dataTemp[,2]<-"_"
}
ncvar_put(init,varid = vars[i],vals = dataTemp)
}
# close the file.
nc_close(init)
}
}
## create a .bat file to dump all inton text files so can fix the dimensions
batFile <- paste(basePath, "dumpChaosInput2text.bat", sep="")
cat("##\n", file=batFile, append=FALSE)
for(d in 1:nd){
chaosDirection <- chaosDirections[d]
for(s in 1:ns){
chaosShift<-chaosShifts[s]
thisNCfile <- paste(existingICfile, chaosDirection,gsub("\\.", "", chaosShift),sep="")
thisLine <- paste("ncdump ", thisNCfile, ".nc > ", thisNCfile, ".txt \n", sep="")
cat(thisLine, file=batFile, append=TRUE)
}
}
## fix the dimensions
for(d in 1:nd){
chaosDirection <- chaosDirections[d]
for(s in 1:ns){
chaosShift<-chaosShifts[s]
thisfile<-thisNCfile <- paste(basePath,existingICfile, chaosDirection,gsub("\\.", "", chaosShift), ".txt",sep="")
newfile<-thisfile
thisLines<-readLines(thisfile)
x <- thisLines[grep("_,",thisLines)[1]]
## only edit lines after 'data:'
lineStartIndex<-grep("data:", thisLines)
newLines<-thisLines
newLines[lineStartIndex:length(newLines)] <- unlist(lapply(thisLines[lineStartIndex:length(newLines)], FUN=function(x){str_trim(gsub("_,|;|NaN,|NaN","",x), side="both")}))
newLines[grep("_,",thisLines)[1]]
## add in the end ;
index<-grep(",",newLines); ni<-length(index)
for(i in 1:ni){
this_i <- index[i]
nextLine<-newLines[(this_i+1)]
if(nextLine==""){
thisLine<-newLines[this_i]
temp <- gsub(", ", " TEMP ", thisLine); temp2<-gsub(",",";", temp);
if(length(grep(";", temp2))==0){ temp2 <- paste(temp, ";", collapse="", sep="")} #if there is no , at the end of the line - then just add the ; to the end
thisNewLine<-gsub(" TEMP", ",", temp2)
newLines[this_i]<-thisNewLine
}
}
## check for lone underscores left
index <- newLines=="_"
newLines <- newLines[!index]
## might as well take out white space
index <- newLines==""
newLines <- newLines[!index]
## a couple of ad-hoc fixes
x <- grep("^t =", newLines)
newLines[x]<- "t = 0;"
writeLines(newLines, newfile)
}
}
## file to turn back into .nc files
## create a .bat file to dump all inton text files so can fix the dimensions
batFile <- paste(basePath, "dumpChaosText2input.bat", sep="")
cat("##\n", file=batFile, append=FALSE)
for(d in 1:nd){
chaosDirection <- chaosDirections[d]
for(s in 1:ns){
chaosShift<-chaosShifts[s]
thisNCfile <- paste(existingICfile, chaosDirection,gsub("\\.", "", chaosShift),sep="")
thisLine <- paste("ncgen -o ", thisNCfile, ".nc ", thisNCfile, ".txt \n", sep="")
cat(thisLine, file=batFile, append=TRUE)
}
}
## set up the run file
baseICfile<- "CRAM_input"
baseRunCommand <- "../../bin/bin/atlantisMerged -i CRAM_input.nc 0 -o output.nc -r CRAM_base_run.prm -f inputs/CRAM_forceBURNIN1865.prm -p inputs/CRAM_physics.prm -b CRAM_BH_hybrid_biol.prm -s CRAM_Groups.csv -q CRAM_Fisheries.csv -d outputFolder"
fishRunCommand <- "../../bin/bin/atlantisMerged -i CRAM_input.nc 0 -o output.nc -r CRAM_baseFish_run.prm -f inputs/CRAM_forceBURNIN1865.prm -p inputs/CRAM_physics.prm -b CRAM_BH_hybrid_biol.prm -h CRAM_harvest_short.prm -s CRAM_Groups.csv -q CRAM_Fisheries.csv -d outputFolder"
runFile<-paste(basePath, "RunChaos", sep="")
cat("#Run base run and historical catches removed run with scaled ICs", file=runFile, append=FALSE)
for(d in 1:nd){
chaosDirection <- chaosDirections[d]
for(s in 1:ns){
chaosShift<-chaosShifts[s]
thisNCfile <- paste(existingICfile, chaosDirection,gsub("\\.", "", chaosShift),sep="")
thisRunCommand <- gsub(baseICfile, thisNCfile, baseRunCommand)
thisRunCommand <- gsub("outputFolder", paste("outputChaos",chaosDirection,gsub("\\.", "", chaosShift), sep="" ), thisRunCommand)
cat(paste("WD=\"$(pwd)\"
RUN=\"", thisRunCommand ,"\"
echo $RUN > RUN
CMD=\"msub -l nodes=1 -l walltime=50:00:00 -l partition=slurm -l qos=standby -p -1000 -q large -o CRAMBase1.log.%j -e CRAMBase.err.%j -S /bin/bash RUN\"
echo \"Running Atlantis Base for CRAMBase on MOAB in directory:\" $WD
echo -n \"Job started at: \" ; date
echo $RUN
COMMAND=\"cd $WD ; $CMD\"
ssh turbine $COMMAND
sleep 1"), file=runFile, append=TRUE)
## this fishing version
thisRunCommand <- gsub(baseICfile, thisNCfile, fishRunCommand)
thisRunCommand <- gsub("outputFolder", paste("outputChaosFISH",chaosDirection,gsub("\\.", "", chaosShift), sep="" ), thisRunCommand)
cat(paste("
WD=\"$(pwd)\"
RUN=\"", thisRunCommand ,"\"
echo $RUN > RUN
CMD=\"msub -l nodes=1 -l walltime=50:00:00 -l partition=slurm -l qos=standby -p -1000 -q large -o CRAMBase1.log.%j -e CRAMBase.err.%j -S /bin/bash RUN\"
echo \"Running Atlantis Base for CRAMBase on MOAB in directory:\" $WD
echo -n \"Job started at: \" ; date
echo $RUN
COMMAND=\"cd $WD ; $CMD\"
ssh turbine $COMMAND
sleep 1
"), file=runFile, append=TRUE)
}
}
## also add in the base runs - this uses the new base IC file
thisNCfile<- "CRAM_input_fromBase50yr"
thisRunCommand <- gsub(baseICfile, thisNCfile, baseRunCommand)
thisRunCommand <- gsub("outputFolder", "outputChaosBASE", thisRunCommand)
cat(paste("WD=\"$(pwd)\"
RUN=\"", thisRunCommand ,"\"
echo $RUN > RUN
CMD=\"msub -l nodes=1 -l walltime=50:00:00 -l partition=slurm -l qos=standby -p -1000 -q large -o CRAMBase1.log.%j -e CRAMBase.err.%j -S /bin/bash RUN\"
echo \"Running Atlantis Base for CRAMBase on MOAB in directory:\" $WD
echo -n \"Job started at: \" ; date
echo $RUN
COMMAND=\"cd $WD ; $CMD\"
ssh turbine $COMMAND
sleep 1"), file=runFile, append=TRUE)
## this fishing version
thisRunCommand <- gsub(baseICfile, thisNCfile, fishRunCommand)
thisRunCommand <- gsub("outputFolder", "outputChaosFISH", thisRunCommand)
cat(paste("
WD=\"$(pwd)\"
RUN=\"", thisRunCommand ,"\"
echo $RUN > RUN
CMD=\"msub -l nodes=1 -l walltime=50:00:00 -l partition=slurm -l qos=standby -p -1000 -q large -o CRAMBase1.log.%j -e CRAMBase.err.%j -S /bin/bash RUN\"
echo \"Running Atlantis Base for CRAMBase on MOAB in directory:\" $WD
echo -n \"Job started at: \" ; date
echo $RUN
COMMAND=\"cd $WD ; $CMD\"
ssh turbine $COMMAND
sleep 1
"), file=runFile, append=TRUE)
|
cad623c47f2c714eb46a584ec54d149f4d8cead8
|
8ce774c79575b5fea9cf60e762afc771ccba4d86
|
/R/presupuestochile-package.R
|
c810e294a6d59a41dd471d0366f0c5ca7a5b23e0
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mmc00/presupuestochile
|
b5755e12138828bdaec62e427f40d2f054f1b8c3
|
6811d76ec2ce15c74d64cfe6a614fbc9f61e9829
|
refs/heads/master
| 2022-11-12T04:35:10.321633
| 2020-07-05T04:51:21
| 2020-07-05T04:51:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,749
|
r
|
presupuestochile-package.R
|
#' Presupuestos (2012-2020)
#' Contiene el valor asignado al Presupuesto de la Nacion por anio.
#' @name presupuestos
#' @docType data
#' @author Direccion de Presupuestos (DIPRES)
#' @usage presupuestos
#' @format Un tibble de 9 filas y 8 columnas
#' @references \url{http://presupuesto.bcn.cl/presupuesto/api}
#' @keywords data
NULL
#' Partidas (2012-2020)
#' Contiene el valor asignado al Presupuesto de la Nacion para cada partida, es
#' decir para el Poder Judicial, Contraloria, etc.
#' @name partidas
#' @docType data
#' @author Direccion de Presupuestos (DIPRES)
#' @usage partidas
#' @format Un tibble de 246 filas y 9 columnas
#' @references \url{http://presupuesto.bcn.cl/presupuesto/api}
#' @keywords data
#' @examples
NULL
#' Capitulos (2012-2020)
#' Contiene el valor asignado al Presupuesto de la Nacion para cada capitulo, es
#' decir para el Senado y Camara de Diputados que pertenecen al Congreso
#' Nacional, etc.
#' @name capitulos
#' @docType data
#' @author Direccion de Presupuestos (DIPRES)
#' @usage capitulos
#' @format Un tibble de 1903 filas y 17 columnas
#' @references \url{http://presupuesto.bcn.cl/presupuesto/api}
#' @keywords data
#' @examples
#' \dontrun{
#' capitulos %>%
#' left_join(partidas %>% select(nombre_partida, id_partida)) %>%
#' select(nombre_partida, everything())
#' }
NULL
#' Programas (2012-2020)
#' Contiene el valor asignado al Presupuesto de la Nacion para cada programa, es
#' decir para la Academia Judicial y la Corporacion Administrativa del
#' Poder Judicial, etc.
#' @name programas
#' @docType data
#' @author Direccion de Presupuestos (DIPRES)
#' @usage programas
#' @format Un tibble de 2874 filas y 17 columnas
#' @references \url{http://presupuesto.bcn.cl/presupuesto/api}
#' @keywords data
#' @examples
#' \dontrun{
#' programas %>%
#' left_join(capitulos %>% select(id_capitulo, id_partida)) %>%
#' left_join(partidas %>% select(nombre_partida, id_partida)) %>%
#' select(nombre_partida, everything())
#' }
NULL
#' Subtitulos (2012-2020)
#' Contiene el valor asignado al Presupuesto de la Nacion para cada subtitulo, es
#' decir para el aporte fiscal, gastos en personal, saldo de caja de cada programa
#' del Poder Judicial, etc.
#' @name subtitulos
#' @docType data
#' @author Direccion de Presupuestos (DIPRES)
#' @usage subtitulos
#' @format Un tibble de 33852 filas y 13 columnas
#' @references \url{http://presupuesto.bcn.cl/presupuesto/api}
#' @keywords data
#' @examples
#' \dontrun{
#' subtitulos %>%
#' left_join(programas %>% select(id_programa, id_capitulo)) %>%
#' left_join(capitulos %>% select(id_capitulo, id_partida)) %>%
#' left_join(partidas %>% select(nombre_partida, id_partida)) %>%
#' select(nombre_partida, everything())
#' }
NULL
|
af99ceaaf9333bcba31c6946e6f6416516c0b37b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ph2bye/examples/bayes.design.Rd.R
|
5ee14620deaefe549c9d43caee7f4ba1c62de3ff
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 747
|
r
|
bayes.design.Rd.R
|
library(ph2bye)
### Name: bayes.design
### Title: Bayesian design method for sequentially monitoring patients
### using Beta-Binomial posterior probability based on observing data
### Aliases: bayes.design
### ** Examples
# Using Multiple Myeloma (MM) data example
MM.r = rep(0,6); MM.mean = 0.1; MM.var = 0.0225
a <- MM.mean^2*(1-MM.mean)/MM.var - MM.mean; b <- MM.mean*(1-MM.mean)^2/MM.var - (1-MM.mean)
bayes.design(a=a,b=b,r=MM.r,stop.rule="futility",p0=0.1)
# Using Acute Promyelocytic Leukaemia (APL) data example
APL.r <- c(0,1,0,0,1,1); APL.mean = 0.3; APL.var = 0.0191
a <- APL.mean^2*(1-APL.mean)/APL.var - APL.mean; b <- APL.mean*(1-APL.mean)^2/APL.var - (1-APL.mean)
bayes.design(a=a,b=b,r=APL.r,stop.rule="efficacy",p0=0.1)
|
f395fb37aba6f0763fd3fec3ca455b8f2e0e8ca0
|
669cb22798e081bcdd50308251b3dc14715a369d
|
/man/playNote.Rd
|
35cb4c1b1fd80188253a494526c9d8df135caef8
|
[] |
no_license
|
cran/music
|
1205fdddd3eea8e9c17bb6a33e359862bc269132
|
168954210763189e90a05b3ad35da07e3b7415c9
|
refs/heads/master
| 2022-07-21T21:17:15.856809
| 2022-07-10T17:30:02
| 2022-07-10T17:30:02
| 171,648,062
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,273
|
rd
|
playNote.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/play.R
\name{playNote}
\alias{playNote}
\title{Play Note}
\usage{
playNote(
note,
oscillator = "sine",
duration = rep(1, length(note)),
BPM = 120,
sample.rate = 44100,
attack.time = 50,
inner.release.time = 50,
A4 = 440,
plot = FALSE,
...
)
}
\arguments{
\item{note}{String, Vector: Note(s) to be played, e.g. c("Ab4", "B4")}
\item{oscillator}{String: "sine", "square", "saw". Default = "sine"}
\item{duration}{Float: Note duration in beats. Default = 1}
\item{BPM}{Integer: Beats per minute. Default = 120}
\item{sample.rate}{Integer: Sample rate. Default = 44100}
\item{attack.time}{Integer: Attack time. Default = 50 (Helps prevent popping)}
\item{inner.release.time}{Integer: Release time, that ends on note OFF (instead of beginning at note OFF).
Default = 50 (Also helps prevent popping)}
\item{A4}{Float: Frequency for A4 in Hz. Default = 440}
\item{plot}{Logical: If TRUE, plot notes using \link{cplot.piano}. This support only two octaves;
do not try plotting if your notes span more than two octaves.}
\item{...}{Additional arguments to pass to \link{note2freq}}
}
\description{
Play Note
}
\examples{
\dontrun{
playNote("B4")
}
}
\author{
E.D. Gennatas
}
|
17a86dc951f42786c1764206f9238d450988cf23
|
5b07b36d6fdf9f11d8d75884ae9484d20904f29a
|
/scripts/understory-stats.R
|
e3b6e0154d8eaebcd59d575b7d6f22c5e871a5e0
|
[] |
no_license
|
schwilklab/understory-ma
|
2f35b631fe135dcd2afaeda821df538608566f86
|
69180bfc187b8f44d415b70b71c49cd57b4851ac
|
refs/heads/master
| 2021-03-27T13:07:06.586735
| 2020-11-19T17:35:59
| 2020-11-19T17:35:59
| 17,608,788
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,635
|
r
|
understory-stats.R
|
library(ggplot2)
library(metafor)
library(dplyr)
options(na.action = "na.omit")
RESULTS_DIR = "../results/plots/"
DATA_DIR = "../data/response-vars/"
# no need to look at graminoids and forbs separately now
EXCLUDES = c("g-richness.csv", "g-cover.csv", "f-richness.csv", "f-cover.csv" ,
"exotic-cover.csv", # use exotic richness
"herb-richness.csv", # use herb-cover
"s-richness.csv", #use shrub cover
"native-cover.csv", "native-richness.csv" # similar to total
)
## Global variable for default modifers:
MODS = ~ EastWest # this is only modifier that ends up significant
#MODS=NULL
# read in table of papers
papers <- read.csv("../data/papers.csv", stringsAsFactors=FALSE)
papers <- filter(papers, UsedOrNot=="Yes")
papers$FireIntensity <- factor(papers$FireIntensity)
papers$FuelType <- factor(papers$FuelType)
papers$EastWest <- ifelse(papers$Long < -100, "West", "East")
papers$EastWest <- factor(papers$EastWest, levels = c("West", "East"))
# Run a single treatment comparison. Needs some tricky text parsing
runComparison <- function(data, t1, t2, mods = MODS) {
dat <- escalc("SMD", m1i=eval(parse(text=paste(t1, ".mean",sep=""))),
m2i=eval(parse(text=paste(t2, ".mean", sep=""))),
sd1i=eval(parse(text=paste(t1, ".s", sep=""))),
sd2i=eval(parse(text=paste(t2, ".s", sep=""))),
n1i=eval(parse(text=paste(t1, ".n", sep=""))),
n2i=eval(parse(text=paste(t2, ".n", sep=""))),
data=data)
#returnNull <- function(err) NULL # we just need to skip any errors
# "level" below indicates sig level. We use 90, for one-tailed test. This
# effects confidence intervals, no p values, so we can still adjust those
# manually.
res <- tryCatch(rma(yi, vi, mods = mods, data=dat, level=90),
error = function(cond) {
message("RMA failed")
return(NULL)
}
)
return(res)
}
getZVals <- function(rma.res, t1, t2) {
ci <- data.frame( contrast = paste(t1,"-", t2,sep=""),
param = rownames(rma.res$b), b = rma.res$b,
zval = rma.res$zval, pval = rma.res$pval,
ci.lb = rma.res$ci.lb, ci.ub = rma.res$ci.ub)
return(ci)
}
makePlotsGetZs <- function(data, resp.var, t1, t2) {
r <- runComparison(data, t1, t2, mods=MODS)
if(is.null(r)) {
return(NULL)
}
print(paste(t1, " vs ", t2, resp.var))
pdf(file.path(RESULTS_DIR, paste(resp.var, "-", t1, "-vs-", t2, ".pdf", sep="")))
forest(r, slab=data$FormattedName)
dev.off()
png(file.path(RESULTS_DIR, paste(resp.var, "-", t1, "-vs-", t2, "-funnel.png", sep="")))
funnel(r)
dev.off()
print(r)
z <- getZVals(r, t1, t2)
return(z)
}
# Create three forest plots for a file and save them,
# confidence intervals to stdout
plotsAndConfint <- function(x) {
df <- read.csv(x, header = TRUE)
df <- merge(df, papers, all.x=TRUE)
#df <- subset(df, Paper != "Nelson+Halpern+etal-2008")
bname <- strsplit(basename(x),".", fixed=TRUE)[[1]][1]
print(paste("Running tests on", bname))
# burn vs control
ci.bc <- makePlotsGetZs(df, bname, "burn", "control")
# thin vs control
ci.tc <- makePlotsGetZs(df, bname, "thin", "control")
# thin vs burn
ci.bt <- makePlotsGetZs(df, bname, "burn", "thin")
r.df <- rbind(ci.bc,ci.tc,ci.bt)
r.df$var <- bname
return(r.df)
}
# makes a list of the files in your working directory
# the user needs to specify their directory path
varfiles <- list.files(DATA_DIR, pattern = "*.csv", full.names = TRUE)
varfiles <- varfiles[! basename(varfiles) %in% EXCLUDES]
# make graphs and results
r.list <- lapply(varfiles,FUN=plotsAndConfint)
# make big data frame of all confint results
conf.int.df <- bind_rows(r.list)
conf.int.df <- mutate(conf.int.df, sig=(ci.lb>0 & ci.ub>0) | (ci.lb<0 & ci.ub<0))
conf.int.df <- conf.int.df[with(conf.int.df, order(param, pval)), ]
# P value adjustment "holm" is most conservative (= sequential Bonferroni), but
# "hommel" or "hochberg" are almost appropriate these data. Here we are really
# only interested in the intercept pvalue for adjustment, but I run on both
# params using ddply:
conf.int.df <- conf.int.df %>% group_by(param) %>%
mutate(adj.pval = p.adjust(pval, "holm"))
write.csv(conf.int.df, "../results/confidence-intervals.csv", row.names=FALSE)
|
6c27496143ebe9e4fefae2f513bcc540bf63d93a
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610054654-test.R
|
eaba7f87cf5c626f6d7e433fd4b0269b5d4c5a9a
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 458
|
r
|
1610054654-test.R
|
testlist <- list(a = -1L, b = -2236963L, x = c(-8323073L, NA, -1L, -233L, -1L, -1L, -209L, -1L, -1L, -1L, -1879048193L, 0L, 788529152L, 16777007L, -253L, -572662307L, -572653569L, -1L, -1L, -16763648L, 1L, 115L, 1373494749L, -574619649L, -54999L, -52993L, -1L, -1L, -870527796L, 13421595L, 0L, 0L, -9043840L, 0L, 16777216L, 1937143337L, 1364328447L, 3538944L, -8388567L, -52993L, -1L, -246L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
a27b125a73ba11ffdd4448f9754864e62b1c7264
|
e3e319f0bf486394c27587a81bc720807c84602a
|
/R/countenance-package.R
|
589a2d13769184b763e6a41a5d89653248ff264e
|
[
"MIT"
] |
permissive
|
hrbrmstr/countenance
|
2443f7c8cce717813a5425c96a4dbfda426b1cb8
|
a3813cd8fa6efa8b838d1db1aab98c8d3b936c6f
|
refs/heads/master
| 2022-04-20T18:46:28.734128
| 2020-04-23T01:31:23
| 2020-04-23T01:31:23
| 258,053,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 285
|
r
|
countenance-package.R
|
#' Tools to Work with the Pi-Hole API
#'
#' Named after a primary synonym for 'pihole', tools are provided
#' to access the Pi-Hole API.
#'
#' @md
#' @name countenance
#' @keywords internal
#' @author Bob Rudis (bob@@rud.is)
#' @import httr
#' @importFrom jsonlite fromJSON
"_PACKAGE"
|
263f726262f70a71e80fa72b1d8299cf73df1dd9
|
ba2845eadc8880147e906ab727d322d875226efa
|
/Analyses/wholegroup_paper1/DirIndEffectsFigure.R
|
c02e9bb1417c7671fa14e53fb320dd98ce22235b
|
[] |
no_license
|
AileneKane/radcliffe
|
80e52e7260195a237646e499bf4e3dad4af55330
|
182cd194814e46785d38230027610ea9a499b7e8
|
refs/heads/master
| 2023-04-27T19:55:13.285880
| 2023-04-19T15:15:02
| 2023-04-19T15:15:02
| 49,010,639
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,645
|
r
|
DirIndEffectsFigure.R
|
# ------------------------------------------
# Mock figures showing spatial variation in experimental warming
# A. Ettinger, aettinger@fas.harvard.edu
# Description: Mock plot of spatial variation: blocks vs plots vs treatment levels
setwd("~/git/radcliffe")
rm(list=ls())
options(stringsAsFactors=FALSE)
#Here is a draft of the conceptual figure for biological implications of warming
quartz(width=11,height=6)
par(mfrow=c(2,3),omi=c(.1,2,.1,.1), mai=c(.6,.3,.5,.1))
x<-c(seq(1:10));y<-rev(seq(261,270))
plot(x,y,type="l", lwd=2, xlab="", ylab="", bty="l", cex.lab=1.2,ylim=c(260,280), yaxt="n",xaxt="n")
mtext("Muted response:",side=3, line=-7, adj=-1.5)
mtext("Direct Effect",side=3, line=2, adj=.5,cex=.9)
mtext("+",side=3, line=2, adj=1, cex=1.1)
mtext("Air temperature",side=1, line=1, adj=.5)
mtext("Focal Response",side=2, line=1.5)
mtext("(e.g. doy of phenological event)",side=2, line=.5, cex=.9)
y2<-seq(261,265.5,by=.5)
plot(x,y2,type="l", lwd=2, xlab="", ylab="", bty="l",cex.lab=1.2,ylim=c(260,280), yaxt="n",xaxt="n")
mtext("=",side=3, line=2, adj=1, cex=1.1)
mtext("Indirect Effect",side=3, line=2, adj=.5,cex=.9)
mtext("Artifically co-varying driver",side=1, line=1, adj=.5)
mtext("(e.g. soil moisture)",side=1, line=2, adj=.5,cex=.8)
y3<-rev(seq(261,265.5,by=.5))
plot(x,y3,type="l", lwd=2, xlab="", ylab="", bty="l",cex.lab=1.2,ylim=c(260,280), yaxt="n",xaxt="n")
mtext("Net Observed Effect of Warming",side=3, line=2, adj=.5,cex=.9)
mtext("Air temperature",side=1, line=1, adj=.5)
plot(x,y2,type="l", lwd=2, xlab="", ylab="", bty="l", cex.lab=1.1,ylim=c(260,280), yaxt="n",xaxt="n")
mtext("Exaggerated response:",side=3, line=-7, adj=-2.5)
mtext("Air temperature",side=1, line=1, adj=.5)
mtext("Focal Response",side=2, line=1.5)
mtext("(e.g. species abundance)",side=2, line=.5, cex=.9)
y5<-rev(seq(261,280,by=2))
plot(x,y5,type="l", lwd=2, xlab="", ylab="", bty="l", yaxt="n",xaxt="n")
mtext("=",side=3, line=1.5, adj=1.2)
mtext("Artifically co-varying driver",side=1, line=1, adj=.5)
mtext("(e.g. abundance of competing species)",side=1, line=2, adj=.5,cex=.8)
y5<-rev(seq(261,280,by=2))
plot(x,y2,type="l", lwd=2, xlab="", ylab="", bty="l", yaxt="n",xaxt="n")
mtext("Air temperature",side=1, line=1, adj=.5)
###Modifications to figure based on lizzie's feedback, plus new version of figure:
quartz(width=11,height=4)
par(mfrow=c(1,3),omi=c(.1,.2,.1,1), mai=c(.6,.3,.5,.1))
x<-c(seq(1:10));y<-seq(261,270)
plot(x,y,type="l", lty=2,lwd=1, xlab="", ylab="", bty="l", cex.lab=1.2,ylim=c(255,275), yaxt="n",xaxt="n")
lines(x,y+.5,lty=1,lwd=2)#observed response
mtext("a) Temperature only response",side=3, line=2, adj=.5,cex=.9)
mtext("Treatment intensity",side=1, line=1, adj=.5)
mtext("(cooler", side=1, line=2, adj=0)
mtext("warmer)", side=1, line=2, adj=1)
text(6.5,266,"Observed response =",adj=0)
text(6.5,265,"Effect of temperature",adj=0)
mtext("Response",side=2, line=1.5)
mtext("(e.g. phenology, growth)",side=2, line=.5, cex=.9)
#Panel b: muted response
plot(x,y,type="l", lty=2,lwd=1, xlab="", ylab="", bty="l", cex.lab=1.2,ylim=c(255,275), yaxt="n",xaxt="n")
y2<-rev(seq(256.5,261,by=.5))
lines(x,y2,lty=3,lwd=1)#indirect effect
y3<-seq(261,265.5,by=.5)
lines(x,y3,lty=1,lwd=2)#observed response
mtext("b) Muted observed response",side=3, line=2, adj=.5,cex=.9)
mtext("Treatment intensity",side=1, line=1, adj=.5)
mtext("(cooler", side=1, line=2, adj=0)
mtext("warmer)", side=1, line=2, adj=1)
text(6.5,266,"Effect of temperature",adj=0)
text(6.5,263.2,"Observed response",adj=0)
text(6.5,256.5,"Indirect effect",adj=0)
#Panel c: exaggerated response
plot(x,y,type="l", lty=2,lwd=1, xlab="", ylab="", bty="l", cex.lab=1.2,ylim=c(255,275), yaxt="n",xaxt="n")
y4<-seq(261,274.5,by=1.5)
lines(x,y4,lty=3,lwd=1)#indirect effect
y5<-seq(261,283.5,by=2.5)
lines(x,y5,lty=1,lwd=2)#observed response
mtext("c) Exaggerated observed response",side=3, line=2, adj=.5,cex=.9)
mtext("Treatment intensity",side=1, line=1, adj=.5)
mtext("(cooler", side=1, line=2, adj=0)
mtext("warmer)", side=1, line=2, adj=1)
text(6.5,266,"Effect of temperature",adj=0)
text(6.5,274.5,"Observed response",adj=0)
text(6.5,269,"Indirect effect",adj=0)
#Alternative bar graph version of figure
quartz(width=11,height=4)
par(mfrow=c(1,3),omi=c(.1,.2,.1,.8), mai=c(.6,.3,.5,1))
x=barplot(c(1.5,NA,1.5),col=c("black",NA,"gray"),horiz=TRUE, space=c(1,1),xlim=c(-2,2),ylim=c(0,8), xlab="",ylab="")
abline(v=0, lwd=1)
mtext("Response",side=1, line=2)
mtext("(e.g. phenology, growth)",side=1, line=3, cex=.9)
text(1.5,x[1],"Effect of temperature", cex=1.5, adj=0)
text(1.5,x[3],"Observed response", cex=1.5, adj=0)
mtext("a) Isolated treatment effect",side=3, line=2, adj=.5,cex=.9)
#Panel b: muted response
x=barplot(c(1.5,-.5,1),col=c("black","white","gray"),horiz=TRUE, space=c(1,1),xlim=c(-2,2),ylim=c(0,8), xlab="",ylab="")
abline(v=0, lwd=1)
mtext("Response",side=1, line=2)
mtext("(e.g. phenology, growth)",side=1, line=3, cex=.9)
text(1.5,x[1],"Effect of temperature", cex=1.5, adj=0)
text(0,x[2],"Indirect effect", cex=1.5, adj=0)
text(1,x[3],"Observed response", cex=1.5, adj=0)
mtext("b) Muted observed response",side=3, line=2, adj=.5,cex=.9)
#Panel c: exaggerated response
x=barplot(c(1.5,.5,2),col=c("black","white","gray"),horiz=TRUE, space=c(1,1),xlim=c(-2,2),ylim=c(0,8), xlab="",ylab="")
abline(v=0, lwd=1)
mtext("Response",side=1, line=2)
mtext("(e.g. phenology, growth)",side=1, line=3, cex=.9)
text(1.5,x[1],"Effect of temperature", cex=1.5, adj=0)
text(.5,x[2],"Indirect effect", cex=1.5, adj=0)
text(2,x[3],"Observed response", cex=1.5, adj=0)
mtext("c) Exaggerated observed response",side=3, line=2, adj=.5,cex=.9)
|
8762344724bdc9bb7d9cf5350530155394ff17cc
|
d5a9b43b6cce6d03b8ecd5a571b6ba94421064a7
|
/HW4_1_Regression_Tree_vs_Linear_Model.R
|
89ad6941744eb6faa415aaccda1a6e7ef54a4872
|
[] |
no_license
|
garnerat/HW4_CART
|
79ef750bda0691ce22ebacdf6b7f7be539665102
|
acca514c46f941b9afb11fa9a57349bec59ffa7b
|
refs/heads/master
| 2021-01-13T16:18:11.971993
| 2017-02-14T01:25:55
| 2017-02-14T01:25:55
| 81,395,805
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,315
|
r
|
HW4_1_Regression_Tree_vs_Linear_Model.R
|
# Data Mining
# HW 4 Problem 1
# Create Linear Model and Regression Tree on Boston Housing data, compare results
##### packages #####
packages<- c("MASS","caret","ggplot2","GGally","boot","rpart","rpart.plot")
install.packages(packages)
lapply(as.list(packages),library, character.only = TRUE)
##### Partition Train/Test #####
data("Boston")
set.seed(1984)
index <- sample(1:nrow(Boston),nrow(Boston)*.7)
# Alternate set of data
set.seed(5001)
index <- sample(1:nrow(Boston),nrow(Boston)*.7)
train <- Boston[index,]
test <- Boston[-index,]
##### Summary Stats #####
str(Boston)
summary(Boston) #no NA values
##### EDA #####
#Scatterplot Matrix and Density Plots
ggpairs(Boston) # some variable have clear correlation, some predictors
# have clear relationship with medv, there are outliers present
#prep for correlation matrix
num <- sapply(train, is.numeric)
df_cor <- train[,num]
zv <- apply(df_cor, 2, function(x) length(unique(x)) <= 2)
sum(zv)
zv
df_cor <- df_cor[, !zv]
corr <- cor(df_cor,use = "pairwise.complete.obs")
highCorr <- findCorrelation(corr, 0.70)
length(highCorr)
colnames(corr[, highCorr,drop = FALSE])
ggcorr(df_cor, method = c("complete", "pearson"), nbreaks = 10) #could also use pairwise, which is the default
# 5 variable have high correlation (>.7)
# loop of boxplot based on example here: https://www.r-bloggers.com/ggplot2-graphics-in-a-loop/
# need to fix axis titles and scale issues from outliers
boxplot.loop <- function(x, na.rm = TRUE, ...) {
numer <- sapply(x, is.numeric)
nm <- names(x[,numer])
for (i in seq_along(nm)) {
plots <- ggplot(data = x,aes(x = factor(0), y = x[,nm[i]])) + geom_boxplot()
ggsave(plots,filename=paste("boxplot",nm[i],".png",sep="_"))
}
}
boxplot.loop(train)
# outliers are present in many variables
##### Linear Model with no variable manipulation #####
linreg.model <- lm(medv ~ ., data = train)
summary(linreg.model)
##### variable selection #####
# set null and full model for use in variable selection
nullmodel = lm(medv ~ 1, data = train)
fullmodel = lm(medv ~ ., data = train)
#bw
model.backward = step(fullmodel, direction = "backward")
#fw
model.forward = step(nullmodel, scope = list(lower = nullmodel, upper = fullmodel),
direction = "forward")
#step
model.stepwise = step(nullmodel, scope = list(lower = nullmodel, upper = fullmodel),
direction = "both")
# Summary
stepwise.summary <- summary(model.stepwise)
# all came to same model, so will pick one arbitrarily (stepwise)
#Diagnostic Plots
plot(model.stepwise)
# data does not seem to fit underlying assumptions perfectly based on the diagnotics plots
##### Linear Model Fit #####
# MSE
(stepwise.summary$sigma)^2
# R-squared
stepwise.summary$r.squared
# Adjusted R-squared (penalizes model complexity)
stepwise.summary$adj.r.squared
# AIC and BIC of the model, these are information criteria. Smaller values indicate better fit.
AIC(model.stepwise)
BIC(model.stepwise)
# Out of sample prediction
# pi is a vector that contains predicted values for test set.
pi <- predict(object = model.stepwise, newdata = test)
# Mean Squared Error (MSE): average of the squared differences between the predicted and actual values
mean((pi - test$medv)^2)
# 21.32945
#3-fold Cross Validation
model.glm.cv <- glm(formula = medv ~ lstat + rm + ptratio + dis + nox + chas +
zn + crim + rad + tax + black, data = Boston)
linear.cv <- cv.glm(data = Boston, glmfit = model.glm.cv, K = 3)
linear.cv$delta[2]
# 23.57908
##### Regression Tree #####
model.tree <- rpart(medv ~., data = train)
# plot model
rpart.plot(model.tree,tweak = 1.2)
# out of sample prediction (full tree)
test.pred = predict(model.tree, test)
# MSE (full tree)
mean((test.pred - test$medv)^2)
##### Prune Tree #####
plotcp(model.tree) # prune tree to size of 6
printcp(model.tree)
model.tree.pruned <- prune.rpart(model.tree, cp = model.tree$cptable[which.min(model.tree$cptable[,"xerror"]),"CP"])
# trying out rattle package's "fancyRpartPlot"
fancyRpartPlot(model.tree.pruned, uniform=TRUE, main="Pruned Classification Tree")
# out of sample prediction (pruned tree)
test.pred.prune = predict(model.tree.pruned, test)
# MSE (pruned tree)
mean((test.pred.prune - test$medv)^2)
|
aa9d0aad27efe866df91343115a4eb064de56915
|
e9d04d8ac6f62fe8ace6a8881e17ea5926548cf4
|
/tests/testthat.R
|
db1af1c916b9eb3fe212268919a2e18dc890768e
|
[] |
no_license
|
aristoteleo/glmGamPoi
|
aadd8bcb4521e12c8c312d0b11ffcdcf9c975598
|
6ddb3f4706e525cdf83ce3dd662c9599a90cf9e1
|
refs/heads/master
| 2022-12-03T14:51:47.867448
| 2020-08-24T14:37:24
| 2020-08-24T14:37:24
| 292,893,581
| 1
| 0
| null | 2020-09-04T16:17:45
| 2020-09-04T16:17:44
| null |
UTF-8
|
R
| false
| false
| 62
|
r
|
testthat.R
|
library(testthat)
library(glmGamPoi)
test_check("glmGamPoi")
|
89bfd37b9fa13078c9c47f3957a477ccda7424a9
|
dc22b8c55d15e0a627c85d2f3c220541d362f4fc
|
/man/ENMeval-package.Rd
|
589604cacb08124453d06f95eb7c7f42d40c0467
|
[] |
no_license
|
darcyj/ENMeval
|
742fe4101375bb62e6f56648a38bac6eccc4a73b
|
40cc98527d3b032450544f41cc8120d1443164cc
|
refs/heads/master
| 2020-03-18T13:09:55.698293
| 2017-01-10T09:17:38
| 2017-01-10T09:17:38
| 134,764,744
| 1
| 0
| null | 2018-05-24T20:21:53
| 2018-05-24T20:21:53
| null |
UTF-8
|
R
| false
| false
| 5,584
|
rd
|
ENMeval-package.Rd
|
\name{ENMeval-package}
\alias{ENMeval-package}
\alias{ENMeval}
\docType{package}
\title{ Automated runs and evaluations of ecological niche models }
\description{Automatically partitions data into bins for model training and testing, executes ecological niche models (ENMs) across a range of user-defined settings, and calculates evaluation metrics to help achieve a balance between goodness-of-fit and model complexity.}
\details{
\tabular{ll}{
Package: \tab ENMeval\cr
Type: \tab Package\cr
Version: \tab 0.2.2\cr
Date: \tab 2017-01-04\cr
License: \tab GNU 3.0\cr
}
The \pkg{ENMeval} package (Muscarella \emph{et al.} 2014) (1) automatically partitions data into training and testing bins using one of six methods (including several options for spatially independent partitions as well as user-defined bins), (2) executes a series of ENMs using Maxent (Phillips \emph{et al.} 2006) with a variety of user-defined settings (i.e., feature classes and regularization multipliers), conducting \emph{k}-fold cross validation, and (3) calculates multiple evaluation metrics to aid in selecting model settings that balance model goodness-of-fit and complexity (i.e., "model tuning" or "smoothing").
\code{\link{ENMevaluate}} is the primary function of the \pkg{ENMeval} package, and multiple other functions highlighted below are called when it is run. The six options for partitioning occurrence data into training and testing (i.e., calibration and evaluation) bins are: \emph{n}-1 jackknife, random \emph{k}-fold, user-specified bins, and three explicit methods of masked geographically structured \emph{k}-fold partitioning (see: \code{\link{get.evaluation.bins}}). After model training, these bins are used to calculate five metrics of model performance for each combination of settings: model discrimination (AUC of test localities), the difference between training and testing AUC, two different threshold-based omission rates, and the small sample-size corrected version of the Akaike information criterion (AICc), the latter using the unpartitioned dataset. A model prediction (as a raster layer) using the full (unpartitioned) dataset is generated for each combination of feature class and regularization multiplier settings. Similarity of these models in geographic space (i.e., "niche overlap") can be calculated to better understand how model settings change predictions (see \code{\link{calc.niche.overlap}}). The results of \code{ENMevaluate} are returned as an object of class \code{\link{ENMevaluation-class}}. A basic plotting function (\code{\link{eval.plot}}) can be used to visualize how evaluation metrics depend on model settings.
As of version 0.2.0, \code{\link{ENMevaluate}} includes an option for parallel computing. Setting \code{parallel = TRUE} can significantly speed up processing time, particularly for large analyses. For very small analyses, it may actually take longer than running with \code{parallel = FALSE}.
}
\note{
Currently, \pkg{ENMeval} only implements the Maxent algorithm, but we eventually plan to expand it to work with other algorithms. All calculations are based on the raw Maxent output (i.e., \emph{not} logistic or cumulative transformations) and users can choose whether to use 'clamping' (see Maxent documentation for details on this option). Additionally, Maxent models are run with the arguments: \code{noaddsamplestobackground} and \code{noremoveDuplicates}. Users should consult Maxent documentation (Phillips \emph{et al.} 2006) and other references (e.g., Phillips and Dudik 2008) for more information on these options. We note that interested users can edit the source code of \code{ENMeval} (in particular, the \code{\link{make.args}} and \code{\link{tuning}} functions) if they desire to change these or other options.
\code{ENMevaluate} directly uses several functions from the \pkg{dismo} package (Hijmans \emph{et al.} 2011), the most important of which is the \code{maxent} function that runs the Maxent algorithm (Phillips \emph{et al.} 2006) in Java. Before running this command, the user must first download Maxent from \href{http://www.cs.princeton.edu/~schapire/maxent/}{this website}. Then, place the file 'maxent.jar' in the 'java' folder of the \pkg{dismo} package. The user can locate that folder by typing: \code{system.file("java", package="dismo")}. For additional details, users should consult the documentation of the \pkg{dismo} package.
}
\author{
Robert Muscarella, Peter J. Galante, Mariano Soley-Guardia, Robert A. Boria, Jamie M. Kass, Maria Uriarte and Robert P. Anderson
Maintainer: Robert Muscarella <bob.muscarella@gmail.com>
}
\references{
Hijmans, R. J., Phillips, S., Leathwick, J. and Elith, J. 2011. dismo package for R. Available online at: \url{https://cran.r-project.org/package=dismo}.
Muscarella, R., Galante, P. J., Soley-Guardia, M., Boria, R. A., Kass, J. M., Uriarte, M., and Anderson, R. P. 2014. ENMeval: An R package for conducting spatially independent evaluations and estimating optimal model complexity for Maxent ecological niche models. \emph{Methods in Ecology and Evolution}, \bold{5}: 1198-1205.
Phillips, S. J., Anderson, R. P., and Schapire, R. E. 2006. Maximum entropy modeling of species geographic distributions. \emph{Ecological Modelling}, \bold{190}: 231-259.
Phillips, S. J., and Dudik, M. 2008. Modeling of species distributions with Maxent: new extensions and a comprehensive evaluation. \emph{Ecography}, \bold{31}: 161-175.
}
\keyword{ niche }
\keyword{ ENM }
\keyword{ SDM }
\seealso{
\code{maxent} in the \pkg{dismo} package
}
|
71b5d398333f240eb88f8e6027716c41ca4829a5
|
9d211512cc5ff67f0aba6562f9248dc6ad80b673
|
/man/calAv.Rd
|
aba3cc9a2d550ffe9ca9e08b4c6ed0a6812e7440
|
[] |
no_license
|
amsszlh/scMC
|
b4a82f3fb1692fe6c75eb40e3a3b89d3fef51e78
|
40908ae90fc153c2ba270c8da46fe70068c146e5
|
refs/heads/main
| 2023-02-04T10:27:54.579463
| 2020-12-27T19:35:27
| 2020-12-27T19:35:27
| 324,831,102
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 286
|
rd
|
calAv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeling.R
\name{calAv}
\alias{calAv}
\title{compute the projected data}
\usage{
calAv(v, args)
}
\arguments{
\item{v}{correction vectors}
\item{args}{arguments}
}
\description{
compute the projected data
}
|
2a3f733c5227bc58a2395e4c5ba15805d18b987e
|
de7ae016ab661e1aa34116c644d094b9c552de62
|
/server.R
|
e4f9a8490a3450ec2556697c94fae1c4acc20e9b
|
[] |
no_license
|
jwyatt85/field_experiments
|
caca6519c56d4d5e6073ec7ac099082e10064eb5
|
0488a6499df4e772c7074818a9baabad95c56ecd
|
refs/heads/master
| 2021-01-19T02:05:12.155940
| 2017-03-15T15:19:21
| 2017-03-15T15:19:21
| 55,979,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,748
|
r
|
server.R
|
# server.R
source("helpers.R")
options(shiny.maxRequestSize = 30*1024^2)
shinyServer(function(input, output) {
#make the plot - reactive so I can use the function
makePlot <- reactive({
inFile <- input$file1
if(is.null(inFile)) return(NULL)
dat<- read.csv(inFile$datapath)
Y = dat[,input$Y]
D = dat[,input$D]
Z = dat[,input$Z]
if(!is.numeric(Y)) stop("The dependent variable must be numeric.")
if(!is_binary(D)) stop("The contact variable can only include 0's and 1's.")
if(!is_binary(Z)) stop("The treatment variable can only include 0's and 1's.")
df <- data.frame(Y, D, Z = ifelse(Z==1, "Treatment", "Control"))
group_by(df, Z) %>%
dplyr::summarize(y_bar = mean(Y),
N = n(),
se = sd(Y)/sqrt(N),
ui = y_bar + 1.96*se,
li = y_bar - 1.96*se) %>%
ggplot(aes(x=Z, y=y_bar)) +
geom_pointrange(aes(ymin=li, ymax=ui, width = .04), color="blue", fill="white", shape=22, size=1.5) +
ylim(0, NA) +
ggtitle("Difference Between Conditional Groups") +
xlab("Condition") + ylab("Average Outcome") +
theme_bw()
})
# Create Plot
output$out_plot <- renderPlot({
makePlot()
})
# Upload a Dataset
output$outcome <- renderUI({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
dat<- read.csv(inFile$datapath)
vars <- names(dat)
selectInput(inputId = "Y", "Dependent Variable", vars, selected=vars[1])
})
output$contact <- renderUI({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
dat<- read.csv(inFile$datapath)
vars <- names(dat)
selectInput(inputId = "D", "Treatment Received", vars, selected=vars[2])
})
output$treatment <- renderUI({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
dat<- read.csv(inFile$datapath)
vars <- names(dat)
selectInput(inputId = "Z", "Treatment Assigned", vars, selected=vars[3])
})
# Create summary table - left panel
output$summary_table_2 <- renderTable(digits=3, expr = {
inFile <- input$file1
if(is.null(inFile)) return(NULL)
dat<- read.csv(inFile$datapath)
Y = dat[,input$Y]
D = dat[,input$D]
Z = dat[,input$Z]
if(!is.numeric(Y)) stop("The dependent variable must be numeric.")
if(!is_binary(D)) stop("The contact variable can only include 0's and 1's.")
if(!is_binary(Z)) stop("The treatment variable can only include 0's and 1's.")
xtable(table_maker_2(Y,D,Z))
})
#download Report
output$downloadReport <- downloadHandler(
filename = function() {
paste('my-report', sep = '.', switch(
input$format, PDF = 'pdf', HTML = 'html', Word = 'docx'
))
},
content = function(file) {
src <- normalizePath('reports.Rmd')
# temporarily switch to the temp dir, in case you do not have write
# permission to the current working directory
owd <- setwd(tempdir())
on.exit(setwd(owd))
file.copy(src, 'reports.Rmd')
library(rmarkdown)
out <- render('reports.Rmd', switch(
input$format,
PDF = pdf_document(), HTML = html_document(), Word = word_document()
))
file.rename(out, file)
}
)
makeTable <- reactive({
expr = {
inFile <- input$file1
if(is.null(inFile)) return(NULL)
dat<- read.csv(inFile$datapath)
Y = dat[,input$Y]
D = dat[,input$D]
Z = dat[,input$Z]
if(!is.numeric(Y)) stop("The dependent variable must be numeric.")
if(!is_binary(D)) stop("The contact variable can only include 0's and 1's.")
if(!is_binary(Z)) stop("The treatment variable can only include 0's and 1's.")
statistical_results_2(Y,D,Z)
}
})
output$out_table <- renderTable({
makeTable()
})
####### Randomization Section
output$out_table3 <- renderTable({
xtable(head(makeExport()))
})
#export file
makeExport <- reactive({
expr = {
inFile2 <- input$file2
if(is.null(inFile2)) return(NULL)
x <- input$randtype
CondPer <- input$ConditionPercent
dat2<- read.csv(inFile2$datapath)
dat2 <- na.omit(dat2)
colnames(dat2)[1] <- "ID"
#if user chooses block Rand
if(x == 1){
out <- block(dat2, n.tr = 2, id.vars = colnames(dat2)[1], algorithm="optGreedy",
block.vars = colnames(dat2)[2:length(dat2)], distance =
"mahalanobis", level.two = FALSE, verbose = TRUE)
assg <- assignment(out, namesCol=c("Treatment", "Control"), seed = runif(1, 1, 100))
final.assignment <- as.data.frame(assg[1])
colnames(final.assignment)[1] <- "Treatment"
colnames(final.assignment)[2] <- "Control"
colnames(final.assignment)[3] <- "Distance"
row.names(final.assignment) <- NULL
return(final.assignment)
} # end of Blocked Rand
#if User chooses SRA
if(x == 2){
dat2$Condition <- as.integer(ra(nrow(dat2),round(nrow(dat2)*(CondPer/100)),seed=runif(1, 1, 100))[,2])
return(dat2)
}
}
})
output$downloadData <- downloadHandler(
filename = function() {paste("Final.Randomized.List", '.csv', sep='') },
content = function(file) {
write.csv(makeExport(), file)
}
)
makeLogit <- reactive({
inFile2 <- input$file2
if(is.null(inFile2)) return(NULL)
df.original <- read.csv(inFile2$datapath)
df.original <- na.omit(df.original)
df.rando <- makeExport()
x <- input$randtype
seed1 <- input$seed
if(x == 1){
#checks to see if unique ID is in "Treatment"
df.original$Condition <- as.integer(ifelse(df.original[,1] %in% df.rando[,1], 1,0))
df.original$ID <- NULL
#now run regression
my.logit <- glm(Condition ~., data = df.original, family = binomial(link = "logit"))
return(my.logit)
}
if(x == 2) {
df.rando$ID <- NULL
my.logit <- glm(Condition ~., data = df.rando, family = binomial(link = "logit"))
return(my.logit)
}
})
output$out_logit <- renderTable({
xtable(makeLogit())
})
### Logit chart outputs
makeLogitPlot <- reactive({
my.logit.for.plot <- makeLogit()
sjp.glm(my.logit.for.plot,
axisLabels.y = predlab,
type = "pred") # changed to pred instead of prob, based on predict.glm
})
# Create Plot
output$out_plotLogit <- renderPlot({
makeLogitPlot()
})
### Logit chart outputs
makeBoxPlot <- reactive({
inFile2 <- input$file2
if(is.null(inFile2)) return(NULL)
df.original <- read.csv(inFile2$datapath)
df.original <- na.omit(df.original)
df.rando <- makeExport()
x <- input$randtype
if(x == 1) {
#checks to see if unique ID is in "Treatment"
df.original$Condition <- as.integer(ifelse(df.original[,1] %in% df.rando[,1], 1,0))
df.original$ID <- NULL
df.m <- melt(df.original, id.var = "Condition")
p <- ggplot(df.m, aes(x=factor(variable), y=value)) + geom_boxplot(aes(fill=factor(Condition)))
p <- p + theme_bw() + ggtitle("Box Plot Analysis Across Groups") + scale_fill_manual(values = alpha(c("dark blue", "red"), .90))
return(p)
}
if(x == 2){
df.rando$ID <- NULL
df.m <- melt(df.rando, id.var = "Condition")
p <- ggplot(df.m, aes(x=factor(variable), y=value)) + geom_boxplot(aes(fill=factor(Condition)))
p <- p + theme_bw() + ggtitle("Box Plot Analysis Across Groups") + scale_fill_manual(values = alpha(c("dark blue", "red"), .90))
return(p)
}
})
output$out_box <- renderPlot({
makeBoxPlot()
})
#make MDE Plots
### Logit chart outputs
makeMDEPlot <- reactive({
treatment <- input$percentintreatment
treatment <- treatment/100
contactrate <- input$contactrate
contactrate <- contactrate / 100
colrate <- input$colrate
colrate <- colrate/100
conrate <- input$controlrate
conrate <- conrate/100
n <- input$total
df.mde <- mde(conrate, n, 0, treatment, contactrate, colrate)
p <- ggplot(df.mde, aes(fill=variable, y=value, x=variable), ymax = )
p <- p + geom_bar(position="dodge", stat="identity",colour = "black") + theme_bw()
p <- p + scale_fill_manual(values = alpha(c("dark blue", "red"), .90)) + ggtitle("MDE for ITT and TOT")
p <- p + geom_text(aes(label=round(value, digits=5)), position=position_dodge(width=0.9), vjust=-0.25)
return(p)
})
output$out_mde <- renderPlot({
makeMDEPlot()
})
makeMDEspanPlot <- reactive({
n <- input$total
contactrate <- input$contactrate
contactrate <- contactrate / 100
colrate <- input$colrate
colrate <- colrate/100
my.mde.df <- mde.span(n, contactrate, colrate)
p <- ggplot(my.mde.df, aes(fill=variable, x=per.treat2, y=value))
p <- p + geom_bar(position="dodge", stat="identity",colour = "black") + theme_bw() +
geom_line(aes(fill=variable)) + geom_text(size = 4, aes(label=round(value, digits=3)), position=position_dodge(width=0.05), vjust=-0.25)
p <- p + scale_fill_manual(values = alpha(c("dark blue", "red"), .90)) + ggtitle("ITT/TOT MDE Over Span")
return(p)
})
output$out_mde_span <- renderPlot({
makeMDEspanPlot()
})
makePlotSubGroups <- reactive({
inFile <- input$file5
if(is.null(inFile)) return(NULL)
dat<- read.csv(inFile$datapath)
colnames(dat)[1] <- "DV"
colnames(dat)[2] <- "Treatment"
colnames(dat)[2] <- "Subgroup"
})
# Create Plot
output$out_plot_subgroups <- renderPlot({
makePlotSubGroups()
})
#mapping
output$maps <- renderPlot({
makemap()
})
makemap <- reactive({
expr = {
inFile2 <- input$mapcsv
if(is.null(inFile2)) return(NULL)
dat2<- read.csv(inFile2$datapath)
dat2 <- na.omit(dat2)
if(length(names(dat2)) != 2){
stop("You can only have 2 columns: fips/stname and value")
}
names(dat2) <- c("region", "value")
color <- as.character(input$mapcolor)
maptitle <- as.character(input$maptitle)
maplegend <- as.character(input$maplegend)
if(input$statezoom == ""){
statezoomfinal <- tolower(state.name)
statezoomfinal <- statezoomfinal[-c(grep("alaska|hawaii", statezoomfinal))]
}else{
statezoomfinal <- tolower(unlist(strsplit(input$statezoom, "[,]")))
trim <- function (x) gsub("^\\s+|\\s+$", "", x) # function to remove leading or trailing spaces caused by the unlist(strsplit())
statezoomfinal <- trim(statezoomfinal)
}
if(input$maptype == 'state'){
dat2$region <- as.character(dat2$region)
dat2$value <- as.numeric(dat2$value)
my_map <- state_choropleth(dat2,
title = maptitle,
legend = maplegend,
zoom = statezoomfinal) + scale_fill_brewer(name = maplegend, palette = color, drop=FALSE)
}
if(input$maptype == 'county'){
dat2$region <- as.numeric(dat2$region)
dat2$value <- as.numeric(dat2$value)
my_map <- county_choropleth(dat2,
title = maptitle,
legend = maplegend,
state_zoom = statezoomfinal) + scale_fill_brewer(name = maplegend, palette = color, drop=FALSE)
}
if(input$maptype == 'zip'){
dat2$region <- as.character(dat2$region)
dat2$value <- as.numeric(dat2$value)
my_map <- zip_choropleth(dat2,
state_zoom= statezoomfinal,
title=maptitle,
num_colors = 7,
legend=maplegend) + scale_fill_brewer(name = maplegend, palette = color, drop=FALSE)
}
return(my_map)
}})
#map download section
output$Download = downloadHandler(
filename = 'my_map.png',
content = function(file) {
device <- function(..., width, height) {
grDevices::png(..., width = width, height = height,
res = 300, units = "in")
}
ggsave(file, plot = makemap(), device = device)
})
# Sub-group analysis
# Upload a Dataset
output$Sub.DV <- renderUI({
inFile <- input$file5
if (is.null(inFile))
return(NULL)
dat<- read.csv(inFile$datapath)
vars <- names(dat)
selectInput(inputId = "Sub.DV", "Dependent Variable", vars, selected=vars[1])
})
output$Sub.Treat <- renderUI({
inFile <- input$file5
if (is.null(inFile))
return(NULL)
dat<- read.csv(inFile$datapath)
vars <- names(dat)
selectInput(inputId = "Sub.Treat", "Treatment Received", vars, selected=vars[2])
})
output$Sub.Group <- renderUI({
inFile <- input$file5
if (is.null(inFile))
return(NULL)
dat<- read.csv(inFile$datapath)
vars <- names(dat)
selectInput(inputId = "Sub.Group", "Sub Group to Analyze", vars, selected=vars[3])
})
### make sub group plot
#make the plot - reactive so I can use the function
makeSubGroupPlot <- reactive({
inFile <- input$file5
if(is.null(inFile)) return(NULL)
dat<- read.csv(inFile$datapath)
DV = dat[,input$Sub.DV]
Treatment = dat[,input$Sub.Treat]
Sub.Group = dat[,input$Sub.Group]
if(!is.numeric(DV)) stop("The dependent variable must be numeric: 1/0")
if(!is.numeric(Treatment)) stop("The treatment variable can only be numeric")
if(!is.numeric(Treatment)) stop("The Group variable can only include Numbers for specific Groups (Ex: 1=M; 2=F)")
df <- data.frame(DV, Treatment, Sub.Group)
#run logits
df$DV <- as.factor(df$DV)
df$Treatment <- as.factor(df$Treatment)
df$Sub.Group <- as.factor(df$Sub.Group)
my.logit <- glm(DV ~ Treatment + Treatment*Sub.Group, family=binomial(link="logit"), data=df)
plot(allEffects(my.logit))
})
# Create Plot for Sub-groups
output$subgroup.plot <- renderPlot({
makeSubGroupPlot()
})
#### --- Output the Logit Effects for the
makeSubGroupTable <- reactive({
inFile <- input$file5
if(is.null(inFile)) return(NULL)
dat<- read.csv(inFile$datapath)
DV = dat[,input$Sub.DV]
Treatment = dat[,input$Sub.Treat]
Sub.Group = dat[,input$Sub.Group]
if(!is.numeric(DV)) stop("The dependent variable must be numeric: 1/0")
if(!is.numeric(Treatment)) stop("The treatment variable can only be numeric")
if(!is.numeric(Treatment)) stop("The Group variable can only include Numbers for specific Groups (Ex: 1=M; 2=F)")
df <- data.frame(DV, Treatment, Sub.Group)
#run logits
df$DV <- as.factor(df$DV)
df$Treatment <- as.factor(df$Treatment)
df$Sub.Group <- as.factor(df$Sub.Group)
my.logit <- glm(DV ~ Treatment + Treatment*Sub.Group, family=binomial(link="logit"), data=df)
eff <- allEffects(my.logit)
my.list <- eff$`Treatment:Sub.Group`
my.list.df <- as.data.frame(my.list)
return(my.list.df)
})
output$sub.group.table <- renderTable({
xtable(makeSubGroupTable())
})
}) #end of shiny server everything above ^^
|
6ceaf1e3640248e0ef118a1a8b2dd5704bb25097
|
7472860795edf6f7332a1c6acc8dcc6dca966297
|
/VectorManipulation/Vdecode/Vdecode.r
|
e8759486dc1eb05631fccc56c6d3542d6c1aad8a
|
[] |
no_license
|
selectedacre/Max_Objects
|
c3de3ae1240625e1d9ebc728bb4a9a623382441f
|
1533ab2f00c7d17a51f82b91fbaae4ec390514d0
|
refs/heads/master
| 2021-01-14T08:35:30.386587
| 2016-04-24T20:49:03
| 2016-04-24T20:49:03
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 505
|
r
|
Vdecode.r
|
#include "Carbon.r"
#include "QuickDraw.r"
resource 'STR#' (17166, "Vdecode") {
{ /* array StringArray: 3 elements */
/* [1] */
"Converts a mixed-base vector to a number"
".";
/* [2] */
"List input";
/* [3] */
"Result output"
}
};
resource 'vers' (1, "Vdecode") {
0x1,
0x0,
release,
0x0,
0,
"1",
"Converts a mixed-base vector to a number"
"."
};
resource 'vers' (2, "Vdecode") {
0x1,
0x0,
release,
0x0,
0,
"1",
"©2003 by Schiphorst, Jaffe, Gregory and "
"Gregson."
};
|
6c109e561ab08b14d1ef78c57bec8f6c58c9786c
|
38f396c9d6b7a964e909355ab8978dc72132748a
|
/20151201/rei8.R
|
7c5db4e02f884b8ec07c8df3d1858af27135487c
|
[] |
no_license
|
shengbo-medley/MiscForStudy
|
321417549863208316aab4f2f129eeb6c70184d1
|
9c9cd1bed80efb3756ec3b38d84a06b33de54b23
|
refs/heads/master
| 2020-04-06T03:40:41.695471
| 2016-01-26T15:53:24
| 2016-01-26T23:42:17
| 42,396,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 96
|
r
|
rei8.R
|
source("rei7.R")
head(modelLookup())
packageVersion("caret")
length(unique(modelLookup()$model))
|
69cb5a4fdf653edb14f7201e420d5111a82d3f27
|
5bd968ab7897d690e57c21d5012c55be22aac23e
|
/anno.R
|
b14c63cef3267578b3b8626e7226beeeeb99118d
|
[] |
no_license
|
gahoo/circRNAmerged
|
f315f0982056463c2e002fd73eab9a4f51a584b4
|
b0f54f03cb4e2ed9ffe8aa6e91ebb405821abb71
|
refs/heads/master
| 2021-01-10T11:12:55.637230
| 2016-01-09T16:46:27
| 2016-01-09T16:46:27
| 45,912,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,387
|
r
|
anno.R
|
library(dplyr)
library(GenomicRanges)
library(org.Hs.eg.db)
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
GeneID_SYMBOL<-AnnotationDbi::select(org.Hs.eg.db,
keys = keys(org.Hs.eg.db),
columns = c("ENTREZID", "SYMBOL")) %>%
rename_(GENEID="ENTREZID")
AnnotationDbi::select(TxDb.Hsapiens.UCSC.hg19.knownGene,
keys = keys(TxDb.Hsapiens.UCSC.hg19.knownGene, keytype='GENEID'),
columns = c('GENEID', 'TXID', 'TXCHROM', 'TXSTRAND', 'TXSTART', 'TXEND'),
keytype = 'GENEID') %>%
group_by(GENEID, TXCHROM, TXSTRAND) %>%
summarise(start=min(TXSTART),
end = max(TXEND))%>%
merge(GeneID_SYMBOL, by='GENEID', all.x=T) %>%
makeGRangesFromDataFrame(seqnames.field="TXCHROM",
strand.field="TXSTRAND",
keep.extra.columns=T) ->
GeneRanges
ciri_rbind %>%
#filter(circRNA_type == 'intergenic region') %>%
filter(!is.na(circRNA_type)) %>%
dplyr::select(chr, circRNA_start, circRNA_end, circRNA_ID) %>%
unique %>%
makeGRangesFromDataFrame(seqnames.field='chr',
start.field='circRNA_start',
end.field='circRNA_end',
keep.extra.columns=T) ->
intergenicRanges
hits<-findOverlaps(intergenicRanges, GeneRanges)
anno <- cbind(mcols(intergenicRanges[queryHits(hits)]),
mcols(GeneRanges[subjectHits(hits)])) %>%
unique %>%
as.data.frame %>%
group_by(circRNA_ID) %>%
summarise(region_gene_id=paste0(GENEID, collapse = ','),
region_symbol=paste0(SYMBOL, collapse = ','),
region_gene_cnt=n())
ciri_rbind %>%
mutate(depth.total = junction.Normal+junction.Tumor+non_junction.Normal+non_junction.Tumor,
depth.Normal = junction.Normal+non_junction.Normal,
depth.Tumor = junction.Tumor+non_junction.Tumor) %>%
left_join(anno) ->
ciri_rbind_anno
abs_diff<-function(x){sqrt(sum(x^2)/length(x))}
write.table(ciri_rbind_anno, file='ciri_rbind_region_check.txt', row.names=F, quote=F, sep='\t')
ciri_rbind_anno %>%
dplyr::select(circRNA_ID, ratio.Normal, ratio.Tumor) %>%
mutate(ratio.Normal = 100 * ratio.Normal,
ratio.Tumor = 100 * ratio.Tumor,
ratio.diff = ratio.Tumor - ratio.Normal) %>%
group_by(circRNA_ID) %>%
summarise(occurrence = n(),
ratio.Normal.sd = sd(ratio.Normal, na.rm=T),
ratio.Tumor.sd = sd(ratio.Tumor, na.rm=T),
ratio.Diff.sd = sd(ratio.diff, na.rm=T),
#ratio.Normal.sd = ifelse(is.na(ratio.Normal.sd),1,ratio.Normal.sd),
#ratio.Tumor.sd = ifelse(is.na(ratio.Tumor.sd),1,ratio.Tumor.sd),
#ratio.Diff.sd = ifelse(is.na(ratio.Diff.sd),1,ratio.Diff.sd),
ratio.abs_diff = abs_diff(ratio.diff),
ratio.rank = ratio.abs_diff/(ratio.Normal.sd * ratio.Tumor.sd * ratio.Diff.sd),
ratio.rank2 = ratio.abs_diff/(ratio.Normal.sd * ratio.Tumor.sd),
ratio.rank3 = ratio.abs_diff/ratio.Diff.sd
) ->
ciri_rbind_anno_rank
write.table(ciri_rbind_anno_rank, file='ciri_rbind_rank.txt', row.names=F, quote=F, sep='\t')
countSample<-function(df){
df %>%
dplyr::select(circRNA_ID) %>%
group_by(circRNA_ID) %>%
summarise(sample_cnt=n())
}
ciri_rbind_anno %>%
filter(junction.Normal <= 1,
junction.Tumor >= 3,
depth.total >= 10,
p.values <=0.05) ->
ciri_rbind_anno_more_tumor
ciri_rbind_anno %>%
filter(junction.Normal >= 3,
junction.Tumor <= 1,
depth.total >= 10,
p.values <=0.05) ->
ciri_rbind_anno_more_normal
ciri_rbind_anno_more_normal<-merge(ciri_rbind_anno_more_normal,
countSample(ciri_rbind_anno_more_normal),
all=T)
ciri_rbind_anno_more_tumor<-merge(ciri_rbind_anno_more_tumor,
countSample(ciri_rbind_anno_more_tumor),
all=T)
write.table(ciri_rbind_anno_more_normal, file='ciri_rbind_region_check_more_normal.txt', row.names=F, quote=F, sep='\t')
write.table(ciri_rbind_anno_more_tumor, file='ciri_rbind_region_check_more_tumor.txt', row.names=F, quote=F, sep='\t')
getSetDf<-function(df){
df %>%
select(circRNA_ID, sample, p.values) %>%
mutate(p.values = 1) %>%
spread(sample, p.values, fill=0)
}
plotSampleSets<-function(df, ...){
setCnt<-length(unique(df$sample))
df %>%
getSetDf %>%
upset(nsets=setCnt,
#nintersects=50,
order.by='freq',
#number.angles =45,
decreasing=c(T,F),
...
)
}
filterCnt<-function(df, cnt){
df %>%
filter(sample_cnt >= cnt) %>%
"$"('circRNA_ID') %>%
as.character %>%
unique
}
plotCircSets<-function(df, cnt=0, ...){
candidates<-filterCnt(df, cnt)
df_set<-getSetDf(df)
df_set_t<-df_set
df_set %>%
left_join(unique(df[c('circRNA_ID', 'symbol', 'region_symbol')]), by='circRNA_ID') %>%
mutate(anno = sprintf("%s.%s",
ifelse(is.na(symbol), region_symbol, as.character(symbol)),
circRNA_ID)) -> df_set
row.names(df_set_t)<-unique(df_set$anno)
df_set %>%
filter(circRNA_ID %in% candidates) %>%
"$"("anno") -> candidates
#row.names(df_set_t)<-df_set$circRNA_ID
df_set_t<-as.data.frame(t(df_set_t[,-1]))
upset(df_set_t, sets = candidates, nsets = length(candidates), ...)
}
# plotCircSets<-function(df, cnt=0, ...){
# candidates<-filterCnt(df, cnt)
# df_set<-getSetDf(df)
# df_set_t<-df_set
# row.names(df_set_t)<-df_set$circRNA_ID
# df_set_t<-as.data.frame(t(df_set_t[,-1]))
# upset(df_set_t, sets = candidates, nsets = length(candidates), ...)
# }
pdf('plots/ciri_rbind_anno_more_normal.samples.pdf', width=8, height=6)
plotSampleSets(ciri_rbind_anno_more_normal,nintersects=20)
dev.off()
pdf('plots/ciri_rbind_anno_more_tumor.samples.pdf', width=8, height=6)
plotSampleSets(ciri_rbind_anno_more_tumor)
dev.off()
pdf('plots/ciri_rbind_anno_more_tumor.circRNA_Sets.inCGC.sampleCnt_ge2.pdf', width=8, height=6)
plotCircSets(ciri_rbind_anno_more_tumor %>% filter(inCGC==T),2)
dev.off()
pdf('plots/ciri_rbind_anno_more_tumor.circRNA_Sets.ge3.pdf', width=8, height=6)
plotCircSets(ciri_rbind_anno_more_tumor, 3)
dev.off()
pdf('plots/ciri_rbind_anno_more_normal.circRNA_Sets.inCGC.sampleCnt_ge3.pdf', width=8, height=6)
plotCircSets(ciri_rbind_anno_more_normal %>% filter(inCGC==T), 3)
dev.off()
pdf('plots/ciri_rbind_anno_more_normal.circRNA_Sets.sampleCnt_ge6.pdf', width=8, height=6)
plotCircSets(ciri_rbind_anno_more_normal, 6)
dev.off()
plotRelExpPattern<-function(df, circRNA_IDs){
df %>%
filter(circRNA_ID %in% circRNA_IDs) %>%
dplyr::select(ratio.Normal, ratio.Tumor, sample, circRNA_ID, p.values, symbol, region_symbol) %>%
gather(type,ratio,ratio.Normal,ratio.Tumor) %>%
mutate(type=gsub('ratio.','',type),
significant=p.values<=0.05,
log10P=log10(p.values),
anno = sprintf("%s\n%s",
ifelse(is.na(symbol), region_symbol, as.character(symbol)),
circRNA_ID)
) %>%
ggplot(aes(x=sample, y=ratio, group=type, color=type)) +
geom_line() +
geom_point(aes(size=-log10P, alpha=significant)) +
#scale_size_continuous(range=c(2,8)) +
facet_grid(anno~.) +
ylab('Relative Expression Ratio') +
theme(axis.text.x=element_text(angle=90))
}
pdf('plots/relExpPattern.pdf', height=12, width=12)
plotRelExpPattern(ciri_rbind_anno,
circRNA_IDs=filterCnt(ciri_rbind_anno_more_tumor %>% filter(inCGC==T), 2) ) +
ggtitle("Relative Expression Pattern of\n Tumor circRNA in CGC and more than 1 pairs")
plotRelExpPattern(ciri_rbind_anno,
circRNA_IDs=filterCnt(ciri_rbind_anno_more_tumor, 3) ) +
ggtitle("Relative Expression Pattern of\n Tumor circRNA in more than 2 pairs")
plotRelExpPattern(ciri_rbind_anno,
circRNA_IDs=filterCnt(ciri_rbind_anno_more_normal %>% filter(inCGC==T), 3) ) +
ggtitle("Relative Expression Pattern of\n Normal circRNA in CGC and more than 2 pairs")
plotRelExpPattern(ciri_rbind_anno,
circRNA_IDs=filterCnt(ciri_rbind_anno_more_normal, 6) ) +
ggtitle("Relative Expression Pattern of\n Normal circRNA in more than 5 pairs")
dev.off()
|
7b3d4bc4dc6264115206de6de8d00145997d784f
|
65f3c0ad386a67ed103b0419fadd32a8f201a30a
|
/inst/examples/time_varying_example.R
|
912452172f7dd9e9debac8d1bb309b5cee40c373
|
[
"MIT"
] |
permissive
|
r-glennie/CTMCdive
|
1d827dd4abe1cf24582deed30d29c986eb78af4d
|
ca2fb902d0a47eb523691ee911e0c73012e08bcc
|
refs/heads/master
| 2023-07-07T09:48:47.716882
| 2023-06-28T17:02:18
| 2023-06-28T17:02:18
| 160,235,889
| 2
| 4
|
MIT
| 2022-02-24T14:01:54
| 2018-12-03T18:29:45
|
R
|
UTF-8
|
R
| false
| false
| 2,388
|
r
|
time_varying_example.R
|
library(CTMCdive)
# Simulate data -----------------------------------------------------------
# total observation time
T <- 24 * 60 * 7
# time step
dt <- 0.1
# time-varying intensities
tgr <- seq(0, T, by = dt)
dive_I <- function(t) {
#return(rep(0.06, length(t)))
return(0.01 + 0.2 * (t/T - 1/2)^2)
}
surf_I <- function(t) {
x <- t / T
f <- 0.2 * x^11 * (10 * (1 - x))^6 + 4 *
(10 * x)^3 * (1 - x)^10
return(0.1 - f / 50)
}
# set kappa
kappa <- list(dive = 3, surface = 3)
# mean durations given start time
divei <- dive_I(tgr)
surfi <- surf_I(tgr)
surf_dur <- function(t, divei, tgr, dt) {
surv <- exp(-cumsum(divei[tgr >= t - 1e-10]) * dt)
est_duration <- sum(surv) * dt
return(est_duration)
}
dive_dur <- function(t, surfi, tgr, dt) {
surv <- exp(-cumsum(surfi[tgr >= t - 1e-10]) * dt)
est_duration <- sum(surv) * dt
return(est_duration)
}
# plot truth
plot(tgr, dive_I(tgr), type = "l", lwd = 1.5, xlab = "Time", ylab = "Dive Intensity")
plot(tgr, surf_I(tgr), type = "l", lwd = 1.5, xlab = "Time", ylab = "Surface Intensity")
# simulate data
set.seed(sample(1:65555, size = 1))
dat <- simulateCTMC2(dive_I, surf_I, T, dt, kappa = kappa)
# plot data
plot(dat$time, dat$dive, pch = 19, xlab = "Time of Dive Start", ylab = "Dive Duration")
plot(dat$time, dat$surf, pch = 19, xlab = "Time of Dive Start", ylab = "Surface Duration")
# Fit Model ---------------------------------------------------------------
# setup model
forms <- list(surface ~ s(time, bs = "cs"),
dive ~ s(time, bs = "cs"))
# fit model
mod <- FitCTMCdive(forms, dat, dt = 1, print = TRUE)
# see results
mod
exp(mod$res$surface[,1])
exp(mod$res$dive[,1])
# plot fitted model
plot(mod)
# get predicted values
pred <- predict(mod)
# plot residuals
rdive <- pred$rdive
plot(dat$time, rdive, pch = 19)
hist(rdive)
qqnorm(rdive); qqline(rdive)
rsurf <- pred$rsurf
plot(dat$time, rsurf, pch = 19)
hist(rsurf)
qqnorm(rsurf); qqline(rsurf)
# test residuals for normality
ks.test(rsurf, "pnorm")
ks.test(rdive, "pnorm")
# plot estimated intensities against truth
plot(tgr, dive_I(tgr), type = "l", lwd = 1.5, xlab = "Time", ylab = "Dive Intensity")
lines(mod$sm$ints, pred$diveI, lwd = 1.5, col = "firebrick")
plot(tgr, surf_I(tgr), type = "l", lwd = 1.5, xlab = "Time", ylab = "Surface Intensity")
lines(mod$sm$ints, pred$surfI, lwd = 1.5, col = "firebrick")
|
db8f6a2e0811117691fab42c65d544facfeb0aad
|
b4753e0c5a3c1b61cbdce34f3dd94004ea22d0b4
|
/ui.r
|
746fe3f3531a28a5291914f76248e434d9592e0a
|
[] |
no_license
|
MrityunjayKumar123/ShinyApp
|
b4cea496e0eb3238e0422df63b47e88d35d9ef4e
|
b63e8b7f02a2bb74e537c2ae6c43bf584e7cfff6
|
refs/heads/master
| 2020-04-09T22:33:42.379425
| 2018-12-22T18:06:04
| 2018-12-22T18:06:04
| 160,631,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,196
|
r
|
ui.r
|
if (!require(shiny)){install.packages("shiny")}
if (!require(udpipe)){install.packages("udpipe")}
if (!require(stringr)){install.packages("stringr")}
if (!require(lattice)){install.packages("lattice")}
if (!require(igraph)){install.packages("igraph")}
if (!require(ggraph)){install.packages("ggraph")}
if (!require(readtext)){install.packages("readtext")}
if (!require(textrank)){install.packages("textrank")}
if (!require(wordcloud)){install.packages("wordcloud")}
library(shiny)
library(udpipe)
library(textrank)
library(lattice)
library(igraph)
library(ggraph)
library(ggplot2)
library(wordcloud)
library(stringr)
shinyUI(
fluidPage(
titlePanel("Text Analytics, An Overview of UDPipe NLP workflow"),
sidebarLayout(
sidebarPanel(
fileInput("file1", "Select Text File for Analysis"),
tags$hr(),
checkboxGroupInput("myupos",label = h4("Select part-of-speech tags (XPOS) for plotting Co-occurrences/Freq Counts/Annotate:"),
c("Adjective" = "ADJ",
"Propernoun" = "PROPN",
"Adverb" = "ADV",
"Noun" = "NOUN",
"Verb"= "VERB"),
selected = c("ADJ","NOUN","VERB"),
width = '100%'
),
tags$hr(style="border-color: black;"),
tags$head(
tags$style(HTML("hr {border-top: 2px solid #000000;}"))
),
radioButtons("rb","Choose any Language- English, Spanish, Hindi:",
choiceNames = list("English","Hindi","Spanish"),
choiceValues = list(
"English","Hindi", "Spanish"
)),
textOutput("txt"),
tags$hr(style="border-color: black;"),
sliderInput("freq", "Select the Frequency of Co-Occurance Graph:", min = 0, max = 50, value = 30)
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Overview",h2(p("App Overview")),
p("There are three tabs in this appliction."),
h4(p("How to use this App")),
p("Upload the text files first and then move on to tabs."),
p("This app supports only text file (.txt) file."),
p("Please refer to the link below for sample txt file."),
p("<English Text.>"),
a(href="https://github.com/MrityunjayKumar123/Sample-Data/blob/master/amazon%20nokia%20lumia%20reviews.txt"
,"Download from here"),
p("<Spanish Text.>"),
a(href=" https://github.com/MrityunjayKumar123/Sample-Data/blob/master/Spanish.txt"
,"Download from here"),
p("<Hindi Text.>"),
a(href="https://github.com/MrityunjayKumar123/Sample-Data/blob/master/Gaban_Hindi.txt"
,"Download from here"),
p("1:- Frequency count:- It will count the frequencies of NOUN,ADJ,VERB,ADVERB and PRONOUN presents in text by graph."),
p("2:- Co-Occurance Plot:- Co-occurrence can mean two words occurring together in the same document."),
p("3:- Annotated Document:- Elements of an annotation with names 'description' and 'wording' have a special meaning.")
),
tabPanel("Frequencies Counts Plot",
h4("Noun/verb"),
plotOutput('plot0'),
h4("Nouns"),
plotOutput('plot1'),
h4("Verbs"),
plotOutput('plot2'),
h4("Adverbs"),
plotOutput('plot3'),
h4("Adjectives"),
plotOutput('plot4')),
tabPanel("Co-Occurance Plot", plotOutput("Cooccurance")),
tabPanel("Annotate",dataTableOutput('Annotate'))
)
)
)
))
|
425f785422389f15e15e48e68927c4f5fa4db99c
|
2ad8af2d89893e6a59cb77c55d39f7f1aee1cef1
|
/code/paper_figures.R
|
475be447be250d57cd4d77e5adcc1ff95ce81d25
|
[] |
no_license
|
Armadilloa16/CVgeneralisationCode
|
f691d1fac875d2c79276306c4b8844908eb05d7c
|
93b88a6d85ed396b2008bdb5774b7eb690f90b60
|
refs/heads/master
| 2021-08-04T14:01:53.304786
| 2021-07-27T08:43:30
| 2021-07-27T08:43:30
| 235,052,456
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,356
|
r
|
paper_figures.R
|
rm(list=ls())
library(plyr)
library(ggplot2)
library(latex2exp)
# Load and organise data
loo.results = read.csv(file.path("data", "result_summaries", "SIM_loo_results.csv"))
loo.results$err = loo.results$err / 43
names(loo.results)[names(loo.results) == 'err'] = 'Eloo'
tru.results = read.csv(file.path("data", "result_summaries", "SIM_tru_results.csv"))
names(tru.results)[names(tru.results) == 'err'] = 'Etrue'
names(tru.results)[names(tru.results) == 'n.dims'] = 'n.dims.tru'
df = merge(loo.results, tru.results)
df[df$Etrue < 1e-15, 'Etrue'] = 1e-15
# df.CV3 = subset(df, CV == 3)
# # df = subset(df, CV != 3)
#
# df.CV2 = subset(df, CV == 2)
#
# tmp1 = df.CV2
# tmp1$n.dims = tmp1$n.dims.min
# tmp1$range = 'min'
# tmp1 = tmp1[, c('sim', 'method', 'Pi', 'Alt', 'n.dims', 'range')]
# tmp2 = df.CV2
# tmp2$n.dims = tmp2$n.dims.max
# tmp2$range = 'max'
# tmp2 = tmp2[, c('sim', 'method', 'Pi', 'Alt', 'n.dims', 'range')]
#
# df.CV2 = rbind(tmp1, tmp2)
# # Opti
# df.opti = subset(df, Alt %in% 1:3)[, c('sim', 'method', 'Alt', 'n.dims.tru', 'Etrue')]
# names(df.opti)[names(df.opti) == 'n.dims.tru'] = 'n.dims'
# df.opti$Alt = as.character(df.opti$Alt)
# df.opti[df.opti$Alt == "1", 'Alt'] = 'CV2'
# df.opti[df.opti$Alt == "2", 'Alt'] = 'CV1Pi1'
# df.opti[df.opti$Alt == "3", 'Alt'] = 'CV1Pi2'
#
# # E_{true, k}
# Etrue = read.csv(file.path("data", "result_summaries", "SIM_Etrue.csv"))
# Etrue$err = Etrue$fn + Etrue$fp
# Etrue[Etrue$err < 1e-15, 'err'] = 1e-15
# Etrue = Etrue[, c('sim', 'method', 'n.dims', 'err')]
#
# kstar = ddply(Etrue,
# c('sim', 'method'),
# summarise,
# n.dims = min(n.dims[err == min(err)]))
# tmp = transform(merge(Etrue, kstar), Alt = 'Opti')
# names(tmp)[names(tmp) == 'err'] = 'Etrue'
# df.opti = rbind(df.opti, tmp)
# Plotting aesthetic modifications
x_max_hist = max(c(max(df$Eloo*43), max(df$Etrue*43)))
df$CV = paste0('CV', df$CV)
df$Pi = paste0('Pi', df$Pi)
# df.CV2$Pi = paste0('Pi', df.CV2$Pi)
# Figure 2: Ecv vs Epred
tmp = df[, c('sim', 'method', 'CV', 'Pi', 'Eloo')]
names(tmp)[names(tmp) == 'Eloo'] = 'E'
tmp$Type = 'CV'
df.plot = tmp
tmp = df[, c('sim', 'method', 'CV', 'Pi', 'Etrue')]
names(tmp)[names(tmp) == 'Etrue'] = 'E'
tmp$Type = 'Predict'
df.plot = rbind(df.plot, tmp)
tmp = df[, c('sim', 'method', 'CV', 'Pi', 'Eloo', 'Etrue')]
tmp$E = tmp$Eloo - tmp$Etrue
tmp$Type = 'CV - Predict'
df.plot = rbind(df.plot, tmp[, c('sim', 'method', 'CV', 'Pi', 'E', 'Type')])
df.plot$Alt = factor(paste(df.plot$CV, df.plot$Pi, sep = '.'), levels = c("CV1.Pi2", "CV1.Pi1", "CV2.Pi2", "CV2.Pi1", "CV3.Pi1"))
p = ggplot(df.plot, aes(x = E, fill = method)) +
geom_histogram(binwidth = 1/43,
position = 'identity', alpha = 0.4) +
facet_grid(Alt ~ Type, scales = 'free') +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
geom_vline(data = subset(df.plot, Type == "CV - Predict"), aes(xintercept = 0))
p
ggsave(file.path('..', 'figures', 'Fig2_EcvEpred.png'), p, width = 11.4, height = 11.4, units = "cm")
p = p + theme(legend.position = "none",
axis.title = element_blank(),
strip.text.y = element_blank(),
strip.text.x = element_blank())
ggsave(file.path('..', 'figures', 'Fig2_EcvEpred_notext.png'), p, width = 8.7, height = 8.7, units = "cm")
# # Figure 3 (Supplementary): kstar
# tmp = df.CV2[, c('sim', 'method', 'Pi', 'range', 'n.dims')]
# names(tmp)[names(tmp) == 'range'] = 'CV'
# tmp$CV = paste0('CV2 (', tmp$CV, ')')
# tmp = rbind(tmp, subset(df, CV == 'CV1')[, c('sim', 'method', 'Pi', 'CV', 'n.dims')])
# df.plot = tmp
#
# p = ggplot(df.plot, aes(x = n.dims, fill = method)) +
# geom_histogram(breaks = seq(1.5, 39.5, 2), position = 'identity', alpha = 0.4) +
# facet_grid(CV ~ Pi) +
# xlab(TeX('$k^*$ used to calculate $E^{cv}$'))
# # print(p)
# ggsave(file.path('..', 'figures', 'Fig3_kstar.png'), p, width = 7, height = 7)
#
#
#
#
#
#
# # Figure 4 (Supplementary): Etrue vs k
# p = ggplot(Etrue, aes(x = n.dims, y = err, group = interaction(sim, method), colour = method)) +
# geom_line(alpha = 0.08, size = 0.6) +
# guides(colour = guide_legend(override.aes = list(alpha = 1))) +
# xlab('k') +
# ylab(TeX('$E^{pred}_k$'))
# # print(p)
# ggsave(file.path('..', 'figures', 'Fig4_Epred_vs_k.png'), p, width = 7, height = 7)
|
c96d07cd61b17a2eae0584a0002ffedb1401e9a5
|
d4a19fdbcf046b82a79491f5b11e83d0c1c0b0ce
|
/R/my_file_rename.R
|
432c82b138d3d586b7d9be639fdcb5b74655fe05
|
[] |
no_license
|
Laurigit/libSE
|
60289577beb67ddad199b331c8226aa18aa79330
|
11e500e741fe0e614eac6d5b5928c35e33ad1564
|
refs/heads/main
| 2023-06-01T01:58:14.486995
| 2021-06-09T12:38:03
| 2021-06-09T12:38:03
| 375,009,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 483
|
r
|
my_file_rename.R
|
#https://stackoverflow.com/questions/10266963/moving-files-between-folders
my_file_rename <- function(from, to) {
todir <- dirname(to)
if (!isTRUE(file.info(todir)$isdir)) dir.create(todir, recursive=TRUE)
# from <- "C:/Users/lepistol/OneDrive - Stora Enso OYJ/SOP_data_share/SOPDataAll_May_2021_ver1.xlsx"
# to <- "C:/Users/lepistol/OneDrive - Stora Enso OYJ/SOP_data_share/old_versions/SOPDataAll_May_2021_ver122.xlsx"
file.copy(from = from, to = to, overwrite = TRUE)
}
|
ddef2da15102c87055ff5c1cbb9aa8e21c513cc5
|
553992ae66d19695a240b2c8df4357b09f99bb69
|
/ONDRI/Scholars_SEP2019_Ordination/Heatmaps_DecompViz.R
|
5606fe242f8a078698c1b79613f172e5d922f9ad
|
[] |
no_license
|
Alfiew/Workshops
|
839ec14d5c4b95cd39474044e9bdb2946d2dece9
|
4ac40823e13ed285bcabc44eb4449d4d1be4cd05
|
refs/heads/master
| 2023-04-19T05:48:15.096172
| 2021-04-27T01:12:07
| 2021-04-27T01:12:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,872
|
r
|
Heatmaps_DecompViz.R
|
## make heatmap & eigen/SVD break downs
library(pheatmap)
# load(file=paste0("/Data/ADNI/Examples/amerge_subset.rda"))
#
#
# # select the continuous only subset
# continuous_data_subset <- amerge_subset[,which(variable_type_map[,"Continuous"]==1)]
vis_pca_data <- wine$objective
rownames(vis_pca_data) <- NULL
colnames(vis_pca_data) <- NULL
scaled_vis_pca_data <- scale(vis_pca_data)
cor_mat <- cor(scaled_vis_pca_data)
## output this as a rectangle of a certain size
png("./images/Eigen_SVD/data.png", height = 700, width = 200, pointsize = 20)
pheatmap(scaled_vis_pca_data, cluster_rows = F, cluster_cols = F, color = viridisLite::cividis(100), legend = F)
dev.off()
png("./images/Eigen_SVD/cor_mata.png", height = 200, width = 200, pointsize = 20)
pheatmap(cor_mat, cluster_rows = F, cluster_cols = F, color = viridisLite::cividis(100), legend = F)
dev.off()
svd_res <- svd(scaled_vis_pca_data)
png("./images/Eigen_SVD/U.png", height = 700, width = 200, pointsize = 20)
pheatmap(svd_res$u, cluster_rows = F, cluster_cols = F, color = viridisLite::cividis(100), legend = F)
dev.off()
png("./images/Eigen_SVD/D.png", height = 200, width = 200, pointsize = 20)
pheatmap(diag(svd_res$d), cluster_rows = F, cluster_cols = F, color = viridisLite::cividis(100), legend = F)
dev.off()
png("./images/Eigen_SVD/L.png", height = 200, width = 200, pointsize = 20)
pheatmap(diag(svd_res$d^2), cluster_rows = F, cluster_cols = F, color = viridisLite::cividis(100), legend = F)
dev.off()
png("./images/Eigen_SVD/Vt.png", height = 200, width = 200, pointsize = 20)
pheatmap(t(svd_res$v), cluster_rows = F, cluster_cols = F, color = viridisLite::cividis(100), legend = F)
dev.off()
png("./images/Eigen_SVD/V.png", height = 200, width = 200, pointsize = 20)
pheatmap(svd_res$v, cluster_rows = F, cluster_cols = F, color = viridisLite::cividis(100), legend = F)
dev.off()
|
cec71c1b40fc899e6664e31f0eb66db1337a393f
|
78205c1f432baf8dbf03cd7e6b325635d3fbab0c
|
/tests/testthat.R
|
8757b214b6918412d3f631cee3f1677dfa952226
|
[
"MIT"
] |
permissive
|
mikldk/popr
|
4ec23427cea231bb7c18e8c71f5fcb56481d15f6
|
a6a444a5e288fb3f0db99a06b01627e039b7fcf2
|
refs/heads/master
| 2020-12-25T14:49:20.491710
| 2017-10-05T13:43:15
| 2017-10-05T13:43:15
| 66,831,709
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52
|
r
|
testthat.R
|
library(testthat)
library(popr)
test_check("popr")
|
5926280a5f90d58a9b1ad1080d1e19d470fadb77
|
da0dc67d2612430c239b0694b58bbb60eb82629d
|
/Test/MicrobeMetaboliteCorrelation.R
|
bbde8a9c0a0ca0c0e877b2d583f38244abfd110c
|
[
"MIT"
] |
permissive
|
omicsEye/microbial_physiology
|
5f9ebcdb74ad4c988dd9b4a8261f45a351fd9a52
|
41a711ce9c396fe18b785140c3cdb61ac79fd614
|
refs/heads/master
| 2023-07-09T09:26:25.649734
| 2021-07-22T14:47:25
| 2021-07-22T14:47:25
| 190,052,643
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,834
|
r
|
MicrobeMetaboliteCorrelation.R
|
setwd("~/Documents/R_WorkSpace/m2interact")
source('R/Heatmap.R')
sample_data <- load.meta.data('Data/IHMP/hmp2_metadata.csv', tax_column = 2)
# sample_data <- microbe_sample_data[, c(75, 34, 40)]
microbe_abundance_table <- load.abundance.data('Data/iHMP/taxonomic_profiles.tsv_AbundanceTable_2019-07-09.csv')
microbe_abundance_table <- microbe_abundance_table[, -c(1)]
# read ihmp metabolite data
# iHMP_metabolomics_HILIC-neg_060517
ihmp <- read.csv(
'Data/iHMP/iHMP_metabolomics_HILIC-neg_060517.csv',
header = TRUE,
fill = TRUE,
comment.char = "" ,
check.names = TRUE,
stringsAsFactors = FALSE
)
ihmp_col_names <- ihmp[4,-c(1:7)]
# filter out for only hmdb id
ihmp <- ihmp[(ihmp$X.5) != "",]
# get column_names
ihmp_row_names <- ihmp[-1,6]
ihmp <- ihmp[-1,-c(1:7)]
ihmp <- apply(as.matrix(ihmp), 2, as.numeric)
# clean up ihmp data
colnames(ihmp) <- ihmp_col_names
rownames(ihmp) <- ihmp_row_names
ihmp[is.na(ihmp)] <- 0
# Match up samples
for(i in 1:ncol(microbe_abundance_table)) {
a <- colnames(microbe_abundance_table)
for(j in 1:nrow(sample_data)) {
if(colnames(microbe_abundance_table)[i] == rownames(sample_data)[j])
a[i] <- sample_data[j,3]
}
colnames(microbe_abundance_table) <- a
}
a <- microbe_abundance_table[, colnames(microbe_abundance_table) %in% colnames(ihmp)]
q <- ihmp[, colnames(ihmp) %in% colnames(microbe_abundance_table)]
a <- a[, !duplicated(colnames(a))]
q <- q[, !duplicated(colnames(q))]
a <- a[, colnames(a) %in% colnames(q)]
q <- q[, colnames(q) %in% colnames(a)]
z <- cor(x = t(a), y = t(q), method = 'spearman')
z[is.na(z)] <- 0
s_meta <- metabolite_data[rownames(metabolite_data) %in% colnames(z), ]
f_meta <- microbe_data[rownames(microbe_data) %in% row.names(z), ]
create.heatmap(data=z, sample_meta = metabolite_data[,c(91,92)], feature_meta = microbe_data[,c(7, 25, 24)], show = TRUE, omit_na = FALSE)
# Get single microbe
z.r <- cor(t(a[1,]), t(q))
z.r.x <- list()
a.b <- a[,colnames(q)]
for(i in 1:nrow(a.b)) {
a.l <- a.b[i, , drop = FALSE]
a.l <- cor(t(a.l), t(q), method = 'spearman')
z.r.x <- rbindlist(list(z.r.x, as.data.frame(a.l)), use.names = TRUE)
}
z.r.x[is.na(z.r.x)] <- 0
z.r.x <- as.data.table(lapply(z.r.x, as.numeric))
z.r.x <- as.matrix(as.data.frame.data.frame(z.r.x))
row.names(z.r.x) <- rownames(a.b)
create.heatmap(data = z.r.x, sample_meta = metabolite_data[,c(91,92)], feature_meta = microbe_data[,c(7, 25, 24)], show = TRUE, omit_na = FALSE)
a.l <- cor(t(a.b), t(q), method = 'spearman')
create.heatmap(data = a.l, sample_meta = metabolite_data[,c(91,92)], feature_meta = microbe_data[,c(7, 25, 24)], show = TRUE, omit_na = FALSE)
# Determine if correlation with metabolites
# Look at only metabolites that microbe known to use
# Look at only metabolites that microbe known to produce
|
f1feef786c6d03c5edf069c00b5f5c487284047f
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/trafficlight-controller/tlc04-nonuniform-depth-41/tlc04-nonuniform-depth-41.R
|
513215bad19a69f1f2182c2e0ea5784f37d43eb3
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 694
|
r
|
tlc04-nonuniform-depth-41.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 39420
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 39420
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc04-nonuniform-depth-41.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 14785
c no.of clauses 39420
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 39420
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc04-nonuniform-depth-41.qdimacs 14785 39420 E1 [] 0 84 14577 39420 NONE
|
5fcead8b4b8b740f57b98b1ca868a572029be6c5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/solarius/examples/dat50.Rd.R
|
9c043e86f124f5e203acefb55c8fd40740657ad2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
dat50.Rd.R
|
library(solarius)
### Name: phenodata
### Title: dat50 data set adapted from FFBSKAT R package
### Aliases: genocovdata genodata kin phenodata snpdata
### ** Examples
data(dat50)
str(phenodata)
plotKinship2(2*kin)
str(genodata)
genodata[1:5, 1:5]
str(genocovdata)
genocovdata[1:5, 1:5]
# compare with the genotypes
genodata[1:5, 1:5]
str(snpdata)
head(snpdata)
|
630553d9dc64152f0fff30687a4d8ac4343e61c6
|
89a65b6c63a0b37540925e8addf5e925821fd21e
|
/run_analysis.R
|
3638500aa7a2063fbd438bf9d565b5a3e7aefe2a
|
[] |
no_license
|
kjitender/Getdata-033
|
bec75280feee00ecacf03def674023337dfe87cf
|
3ab8fc60a0d60f1808f4cca202e807ea0e9c9df1
|
refs/heads/master
| 2021-01-10T08:28:28.565205
| 2015-10-25T17:59:07
| 2015-10-25T17:59:07
| 44,920,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,678
|
r
|
run_analysis.R
|
library(plyr)
library(data.table)
library(dplyr)
## Getting the data
## Assuming Samsung data is not there in folder.
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
file_path <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
setwd(file_path)
## Reading the data
y_train <-read.table(file.path(file_path, "train", "Y_train.txt"),header = FALSE)
y_test <- read.table(file.path(file_path, "test" , "Y_test.txt" ),header = FALSE)
x_test <- read.table(file.path(file_path, "test" , "X_test.txt" ),header = FALSE)
x_train <- read.table(file.path(file_path, "train", "X_train.txt"),header = FALSE)
subject_train <- read.table(file.path(file_path, "train", "subject_train.txt"),header = FALSE)
subject_test <- read.table(file.path(file_path, "test" , "subject_test.txt"),header = FALSE)
features <- read.table(file.path(file_path, "features.txt"),head=FALSE)
activity_labels <- read.table(file.path(file_path, "activity_labels.txt"),head=FALSE)
colnames(activity_labels)<- c("V1","Activity")
## Merging same datasets
subject_data <- rbind(subject_train, subject_test)
y_data<- rbind(y_train, y_test)
x_data<- rbind(x_train, x_test)
## Renaming columns in datasets. This will help in connecting all datasets
names(subject_data)<-c("subject")
names(y_data)<- c("activity")
names(x_data)<- features$V2
## Assignment 1: Merges the training and the test sets to create one data set.
All_data <- cbind(x_data,cbind(subject_data,y_data))
## Assignment 2:Extracts only the measurements on the mean and standard deviation for each measurement.
Mean_cols <- grep("mean()", names(All_data), value = FALSE, fixed = TRUE)
Std_cols <- grep("std()", names(All_data), value = FALSE, fixed = TRUE)
Select_data<-All_data[c(Mean_cols,Std_cols)]
Select_data<-cbind(Select_data,All_data$subject,All_data$activity)
colnames(Select_data)[which(names(Select_data) == "All_data$subject")] <- "subject"
colnames(Select_data)[which(names(Select_data) == "All_data$activity")] <- "activity"
## Assignment 3: Uses descriptive activity names to name the activities in the data set
Select_data$activity <- as.character(Select_data$activity)
Select_data$activity[Select_data$activity == 1] <- "Walking"
Select_data$activity[Select_data$activity == 2] <- "Walking Upstairs"
Select_data$activity[Select_data$activity == 3] <- "Walking Downstairs"
Select_data$activity[Select_data$activity == 4] <- "Sitting"
Select_data$activity[Select_data$activity == 5] <- "Standing"
Select_data$activity[Select_data$activity == 6] <- "Laying"
Select_data$activity <- as.factor(Select_data$activity)
## Assignment 4:Appropriately labels the data set with descriptive variable names.
names(Select_data)<-gsub("Acc", "Accelerometer", names(Select_data))
names(Select_data)<-gsub("BodyBody", "Body", names(Select_data))
names(Select_data)<-gsub("^f", "frequency", names(Select_data))
names(Select_data)<-gsub("Gyro", "Gyroscope", names(Select_data))
names(Select_data)<-gsub("Mag", "Magnitude", names(Select_data))
names(Select_data)<-gsub("^t", "time", names(Select_data))
## Assignment 5: From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
Tidy_data<-aggregate(. ~subject + activity, Select_data, mean)
Tidy_data<-Tidy_data[order(Tidy_data$subject,Tidy_data$activity),]
write.table(Tidy_data, file = "tidy_data.txt",row.name=FALSE)
|
7f176c7ecd262aecf160f777a39e33e0a0b3dfa0
|
540c8c7f7c84df193d879ac1b56529e1bf11c616
|
/week5/quiz5/recycling_program.R
|
d24acef59472ab177098e4e67753731e8c4a6818
|
[
"MIT"
] |
permissive
|
jcontesti/experimentation_for_improvement
|
5a0bd4e3be621335c21c3f8ee41fc103f7a21e4d
|
d64b8d399567c10ef500e91751ae72a729c93629
|
refs/heads/master
| 2021-05-22T17:05:27.685268
| 2020-04-19T16:51:10
| 2020-04-19T16:51:10
| 253,014,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 110
|
r
|
recycling_program.R
|
H <- c(-1, +1, -1, +1)
S <- c(-1, -1, +1, +1)
y <- c(200, 150, 120, 100)
model <- lm(y ~ H*S)
summary(model)
|
7ca86c545c5afbd27cef7c49e466abf42ebcc256
|
3981bd092d1c0c0d64c112da7d36d7ff76fc2098
|
/GradientDesent&Correlation/PartialCorrelations.R
|
ead89a824d88b83f569b336e89fe90c466b4cdad
|
[] |
no_license
|
GirishGore/Machine-Learning-Tutorials
|
42fd198f98fcdda258606147deeb37d412571d92
|
deb70bd0bacac2b16018fe29d72cfa25f956b241
|
refs/heads/master
| 2020-03-08T20:08:56.206601
| 2018-04-23T04:17:40
| 2018-04-23T04:17:40
| 128,374,461
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 164
|
r
|
PartialCorrelations.R
|
X = c(2,4,15,20)
Y = c(1,2,3,4)
Z = c(0,0,1,1)
mm1 = lm(X~Z)
res1 = mm1$residuals
mm2 = lm(Y~Z)
res2 = mm2$residuals
cor(res1,res2)
##0.919145
cor(X,Y)
##0.9695016
|
a5a3c5aa7714a559c2848b8ff4aaabbd6c240d88
|
f559c911e683268f517b25a34a2cbf463f80671d
|
/ZIP/ZIP_sin7.R
|
f4944b0eed8969e1abcc194ccb172fee05ff9716
|
[] |
no_license
|
Luis-2199/BayesProject
|
70ee263fe5aae88b6c1943c2cdd0aeb57ad6c7ef
|
3b0cfc05622047122f6da13a0f60757994e0cbc6
|
refs/heads/main
| 2023-07-08T16:09:29.984859
| 2021-08-15T14:12:15
| 2021-08-15T14:12:15
| 388,851,361
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,181
|
r
|
ZIP_sin7.R
|
################# Modelo Ganador sin 7s ##################################
# Base sin 7s
CData_CDMX2_sin7 <- CData_CDMX2 %>% filter(Vic_Rob_As < 7)
# Ajustamos modelo
mod_ZIPsin7 <- zeroinfl(Vic_Rob_As ~ Seg_Mun + Region |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod_ZIPsin7)
# loglik of zero-inflated model -3237.105
# BIC of zero-inflated model 6585.975
# AIC of zero-inflated model 6500.21
{
cat("loglik of zero-inflated model", logLik(mod_ZIPsin7), "\n")
cat("BIC of zero-inflated model", BIC(mod_ZIPsin7), "\n")
cat("AIC of zero-inflated model", AIC(mod_ZIPsin7))
}
############# Ji cuadrada
mnull <- update(mod_ZIPsin7, . ~ 1) # Modelo Nulo
pchisq(2*(logLik(mod_ZIPsin7) - logLik(mnull)),
df = 6, lower.tail = FALSE) # Es estadísticamente significativo
# que el poisson sin covariables
pchisq(2*(logLik(mod_ZIPsin7) - logLik(mnull)),
df = 5, lower.tail = FALSE) # Es estadísticamente significativo
# que el poisson sin covariables
############# Vuong (contra poisson solo)
summary(mod_PoiSin7 <- glm(Vic_Rob_As ~ Seg_Mun + Region,
family = poisson, data = CData_CDMX2_sin7)) # Poisson solo
vuong(mod_ZIPsin7, mod_PoiSin7) # Si es mejor aplicar un Poisson inflado
############# Freq plot vs reales vs poisson solo
# Realizamos las simulaciones con base en el modelo y con base en el poisson
{
####### Simulacion ZIP
# Cargamos los valores estimados para p y lambda
p <- predict(mod_ZIPsin7, type = "zero")
lambda <- predict(mod_ZIPsin7, type = "count")
N <- length(CData_CDMX2_sin7$Vic_Rob_As)
sim_ZIP <- rep(0, times = N)
for(i in 1:N){
sim_ZIP[i] <- rzipois(1, lambda = lambda[i], pstr0 = p[i])
}
####### Simulacion Reg Poisson
mu <- exp(predict(mod_PoiSin7))
sim_Poi <- rep(0, times = N)
for(i in 1:N){
sim_Poi[i] <- rpois(1, lambda = mu[i])
}
reales <- CData_CDMX2_sin7$Vic_Rob_As
tib_sim_ZIP <- table(sim_ZIP) %>% as_tibble() %>%
rename(count_sim=n, value=sim_ZIP); tib_sim_ZIP
tib_reales <- table(reales) %>% as_tibble() %>%
rename(count_real=n, value=reales); tib_reales
tib_poisson <- table(sim_Poi) %>% as_tibble() %>%
rename(count_poi=n, value=sim_Poi); tib_poisson
tib_completa <- tib_sim_ZIP %>% right_join(tib_reales,
by = c("value" = "value")) %>%
left_join(tib_poisson, by = c("value" = "value")) %>%
replace_na(list(count_sim = 0, count_poi = 0)); tib_completa
}
# Graficamos
ggplot(tib_completa, aes(x=value, y=count_real)) +
geom_bar(stat = "identity") +
geom_point(aes(y=count_sim, color="ZIP")) +
geom_point(aes(y=count_poi, color="Poisson"))
############ Pruebas simulaciones (media y mat de confusion)
sim_conf_mat_zeroinfl2(mod_ZIPsin7, res = CData_CDMX2_sin7$Vic_Rob_As,
muest.size = length(CData_CDMX2_sin7$Vic_Rob_As))
# 68.42034% de efectividad
sim_media_zeroinfl(mod_ZIPsin7,
muest.size = length(CData_CDMX2_sin7$Vic_Rob_As)) # 0.2409083
mean(CData_CDMX2_sin7$Vic_Rob_As) # 0.2538305
########### Pruebas RegPoi sola
sim_conf_mat_poi(mod_PoiSin7, res = CData_CDMX2_sin7$Vic_Rob_As,
muest.size = length(CData_CDMX2_sin7$Vic_Rob_As))
# 68.21968% de efectividad
################## Modelo sin Reg NO GANA ######################################
mod_ZIPsin7_2 <- zeroinfl(Vic_Rob_As ~ Seg_Mun|
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod_ZIPsin7_2)
# loglik of zero-inflated model -3278.8
# BIC of zero-inflated model 6643.572
# AIC of zero-inflated model 6577.599
{
cat("loglik of zero-inflated model", logLik(mod_ZIPsin7_2), "\n")
cat("BIC of zero-inflated model", BIC(mod_ZIPsin7_2), "\n")
cat("AIC of zero-inflated model", AIC(mod_ZIPsin7_2))
}
|
cd5d5f2b9dc602b023c425695121117149b2d212
|
2a6b1b93b9388fb6c8f289efec52bb2f50963eb0
|
/examples/20111009-kuntien-sukupuolijakauma.R
|
79d1ca7557f5b1a183f6b3567144b58c5b4e578e
|
[] |
no_license
|
louhos/takomo
|
be80209cf3ee0f1773648d8b127219ad212ae487
|
7af1752f14821b879f80f052bebcc97ba5ff5804
|
refs/heads/master
| 2021-01-17T12:25:09.186161
| 2016-07-18T12:14:39
| 2016-07-18T12:14:39
| 3,610,040
| 8
| 4
| null | 2015-08-10T19:10:22
| 2012-03-03T10:49:30
|
R
|
UTF-8
|
R
| false
| false
| 2,201
|
r
|
20111009-kuntien-sukupuolijakauma.R
|
# This script is part of the Louhos-project (http://louhos.github.com/)
# Copyright (C) 2010-2013 Leo Lahti.
# Contact: <http://louhos.github.com/contact>.
# All rights reserved.
# This program is open source software; you can redistribute it and/or modify
# it under the terms of the FreeBSD License (keep this notice):
# http://en.wikipedia.org/wiki/BSD_licenses
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# Skripti hakee Suomen kuntarajat ja vaestorekisterin asukaslukutiedot
# kunnittain, ja laskee sekä visualisoi sukupuolten suhteellisen
# osuuden eri kunnissa Suomen kartalla.
# Install and load sorvi package
# Instructions in http://louhos.github.com/sorvi/asennus.html
# This script is tested with sorvi version 0.2.27
library(sorvi)
# hae suomen kartta ja kuntarajat gadm-muodossa
gadm <- sorvi::GetGADM("FIN_adm", "Kunta")
# vaestorekisterin asukasluvut kunnittain
vrek <- sorvi::GetPopulationRegister("http://vrk.fi/default.aspx?docid=5127&site=3&id=0")
# Liita vaestorekisterin tiedot karttaobjektiin ja
# aseta nollaan asukasluku kunnissa joiden osalta se ei ole tiedossa
gadm$asukkaita <- log10(rowSums(vrek[gadm$Kunta, c("Miehet", "Naiset")]))
gadm$asukkaita[is.na(gadm$asukkaita)] <- 0
# Laske myos sukupuolten suhteellinen osuus
gadm$miehet.osuus <- vrek[gadm$Kunta, "Miehet"]/vrek[gadm$Kunta, "Yhteensa"]
gadm$naiset.osuus <- vrek[gadm$Kunta, "Naiset"]/vrek[gadm$Kunta, "Yhteensa"]
# Aseta arvoon 50% miesten/naisten osuus
# kunnissa joiden osalta vakiluku ei ole tiedossa
gadm$miehet.osuus[is.na(gadm$miehet.osuus)] <- 0.5
gadm$naiset.osuus[is.na(gadm$naiset.osuus)] <- 0.5
# paletin rajapisteet
varname <- "naiset.osuus"
interval <- max(abs(gadm[[varname]] - 0.5))
at <- seq(0.5 - interval, 0.5 + interval, length = 100)
# Piirra Suomen kartta varitettyna naisten suhteellisen osuuden nojalla
q <- sorvi::PlotShape(gadm, varname, type = "twoway",
at = at, main = "Naiset Suomen kunnissa", plot=FALSE)
# Save the Figure into a file:
png("Suomen.kuntien.sukupuolijakauma.png", width = 600, height = 600)
print(q)
dev.off()
|
2ac820e9e7dcd0e93a3aa32842add6a47fb0d5ed
|
c63fc5e6607e2cd5d62464a72c78b06191277eb6
|
/man/rprt_glimpse0.Rd
|
7d0b53297f5000e68c43ee1077ad0814e10d95e3
|
[] |
no_license
|
SWS-Methodology/faoswsTrade
|
6ce400e545fc805fe1f87d5d3f9d5ba256a8a78c
|
2145d71a2fda7b63d17fa7461ec297f98b40756c
|
refs/heads/master
| 2023-02-17T08:40:21.308495
| 2023-02-09T13:53:56
| 2023-02-09T13:53:56
| 55,507,302
| 4
| 1
| null | 2020-05-15T14:45:59
| 2016-04-05T12:49:03
|
R
|
UTF-8
|
R
| false
| true
| 344
|
rd
|
rprt_glimpse0.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rprt_glimpse0.R
\name{rprt_glimpse0}
\alias{rprt_glimpse0}
\title{Drops dplyr::glimpse's invisible value}
\usage{
rprt_glimpse0(tbl)
}
\arguments{
\item{tbl}{Data.}
}
\description{
It is required to include glimpse's output into
logging messages of futile.logger.
}
|
9dcf828f0a2e807c200ab8ab39afd8ed205eede3
|
6a9aafcd2a09b13173833f79b7797c1db2ebb42d
|
/plot1.R
|
30ad62431adcf1b7f82ee65618d42c866b09da90
|
[] |
no_license
|
aryalsohan0/EDA-CP2
|
37daffa8c86e88eaaddcf1c91cfb3c8ab13b9d80
|
f5e335d5a67d8aeddda20ccc8831f89956e4f8fd
|
refs/heads/master
| 2022-12-10T19:58:14.727465
| 2020-08-30T09:42:05
| 2020-08-30T09:42:05
| 291,427,672
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 665
|
r
|
plot1.R
|
# Reading data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Create data frame yearly emission (YE)
install.packages("tidyverse")
library(tidyverse)
YE <- NEI %>%
group_by(year) %>%
summarise(Yearly_Emissions = sum(Emissions))
# plotting yearly total emissions
png("plot1.png", height = 480, width = 480)
barplot((YE$Yearly_Emissions)/10^6,
names.arg = YE$year,
xlab = "Years",
ylab = "Total emissions in year (* 10^6)",
main = "Total PM2.5 Emissions over the years")
dev.off()
# We can see from the plot that total emissions have decreased over the years.
|
54817c8b4e74e19e9eb5bc7a23fbe9a5ce3fbe2c
|
4848ca8518dc0d2b62c27abf5635952e6c7d7d67
|
/R/f_hv_rab.R
|
3a3746a050982ed3aad85edf71dd3c14b386f43b
|
[] |
no_license
|
regenesis90/KHCMinR
|
ede72486081c87f5e18f5038e6126cb033f9bf67
|
895ca40e4f9953e4fb69407461c9758dc6c02cb4
|
refs/heads/master
| 2023-06-28T00:29:04.365990
| 2021-07-22T04:44:03
| 2021-07-22T04:44:03
| 369,752,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 699
|
r
|
f_hv_rab.R
|
#' Heavy Vehicle Factors in Roundabout
#'
#' It follows <Formula 11-4> in KHCM(2013) p.500
#' @param lane Roundabout lane. Choose one from : \code{1}, \code{2}
#' @param P_T Heavy Vehicle Ratio.
#' @keywords Heavy Vehicle Fators Roundabout
#' @seealso \code{\link{E_T_rab}}, \code{\link{V_i_pce_rab}}
#' @export f_hv_rab Heavy Vehicle Factors in Roundabout
#' @examples
#' f_hv_rab(lane = 2, P_T = 0.3)
f_hv_rab <- function(lane = NULL, P_T = NULL){
et <- E_T_rab(lane = lane, hv_pcn = P_T * 100)
if (is.numeric(et) == TRUE){
if (P_T >= 0 & P_T <= 1){fhv <- 1 / (1 + P_T * (et - 1))}
else {fhv <- 'Error : [P_T] must be >= 0 and <= 1. Please check that.'}
}
else {fhv <- et}
fhv
}
|
eae2cceaa78575a56ecc303122f6d2e243a77b7d
|
1ba1eada0980537db781a8ccd388533e74d644cd
|
/cachematrix.R
|
ff8e279fae84802ff9e24caf67be3ef149055fef
|
[] |
no_license
|
bmeier01/ProgrammingAssignment2
|
c5e234e7495965362f4b1e17f37e3342a19099a7
|
5b227edf24e68b64cc529fcdf82b13c42d9ac4b8
|
refs/heads/master
| 2021-01-21T16:00:16.351475
| 2015-01-24T22:57:48
| 2015-01-24T22:57:48
| 27,048,429
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,935
|
r
|
cachematrix.R
|
## These functions are used to cache the inverse of a matrix rather than computing it every time
## If the contents of a vector are not changing, it may make sense to cache i.e. the value of the mean
## especially when it may take long to compute
## therefore when we need it again, it can be looked up in the cache rather than being recalculated
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
} # set the parameters of the matrix
get <- function() x # get the value of the matrix
setmatrixinv <- function(solve) m <<- solve # this function is called by cacheSolve to compute the inverse of the matrix
getmatrixinv <- function() m # this function provides the inverse of the matrix in cacheSolve if already calculated previously
list(set = set, get = get, # internal methods or functions
setmatrixinv = setmatrixinv,
getmatrixinv = getmatrixinv)
}
## This function computes the inverse of the matrix "makeCacheMatrix" after checking if it has not been calculated before
## if it has been calculated before it does not recalculate but returns it from cache.
cacheSolve <- function(x, ...) {
m <- x$getmatrixinv() # obtain the inverse of the matrix from cache and store in m
if(!is.null(m)) { # tests if m is not NULL. If this is TRUE, inverse has been calculated before and is consecutively diplayed
message("getting cached data") # prints this message
return(m) # and returns inverse of the matrix (in this case from cache)
}
data <- x$get() # if m is NULL, this gets the matrix and stores it in data
m <- solve(data, ...) # calculates the inverse of the matrix
x$setmatrixinv(m) # stores the inverse of the matrix so that it can be retrieved from cache afterwards
m #returns inverse of the matrix
}
|
c879d635adc7032134d037b727b4e306e9ccece0
|
4b0238b0dffae1e750d16eb590f15188288c5091
|
/graduate/fig_baseline_pattern.R
|
e494de140689995c282bebc0ff91fdb741a39ea6
|
[] |
no_license
|
Kiki1017/gev
|
9a9ae1968bf8ccf0624e2bb401bc4e9262406d39
|
1cd15d6257df895f988a6447f4e42e4e6841155b
|
refs/heads/master
| 2022-03-30T00:26:38.132629
| 2020-01-08T06:52:12
| 2020-01-08T06:52:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,659
|
r
|
fig_baseline_pattern.R
|
# To verify that the initial value (fgev) is working
rm(list=ls())
setwd("~/GITHUB/gev")
source("./lib/sgev3library.R")
source("./lib/pack.R")
# surface base setting
mean_vec = c(0,0)
sig_mat = matrix(c(30,0,0,30),nrow=2)
set_uni = dmvnorm(cbind(x1,x2), mean=mean_vec, sigma=sig_mat)
mean_vec1 = c(5,0); mean_vec2 = c(-5,0)
sig_mat = matrix(c(10,0,0,10),nrow=2)
set_bi = 0.4*dmvnorm(cbind(x1,x2),mean=mean_vec1, sigma=sig_mat*1) +0.6*dmvnorm(cbind(x1,x2),mean=mean_vec2, sigma=sig_mat*2)
xyrange = c(-10,10)
# x = seq(xyrange[1],xyrange[2],length=20)
# y = seq(xyrange[1],xyrange[2],length=20)
x = seq(xyrange[1],xyrange[2],length=1000)
y = seq(xyrange[1],xyrange[2],length=1000)
par(mfrow=c(1,1))
fxy = outer(x, y, function(x,y) -3*x+3*y)
range(100+fxy)
range(40+0.5*fxy)
range(0.1+0.005*fxy)
pmat1 <- persp(x,y, fxy, phi = 10, theta = 30,
xlim=c(-12,12), ylim=c(-12,12),ticktype="detailed",
xlab="x1",ylab="x2",zlab="",main="Plane")
fxy = outer(x, y, function(x,y) dmvnorm(cbind(x,y), mean=mean_vec, sigma=sig_mat) )
pmat2 <- persp(x,y, fxy, phi = 10, theta = 30,
xlim=c(-10,10), ylim=c(-10,10),ticktype="detailed",
xlab="x1",ylab="x2",zlab="",main="Unimodal")
range(90+4000*fxy)
range(30+4000*fxy)
range(50*fxy)
fxy = outer(x, y, function(x,y) 0.4*dmvnorm(cbind(x,y),mean=mean_vec1, sigma=sig_mat*1) +0.6*dmvnorm(cbind(x,y),mean=mean_vec2, sigma=sig_mat*2) )
pmat3 <- persp(x,y, fxy, phi = 10, theta = 30,
xlim=c(-10,10), ylim=c(-10,10),ticktype="detailed",
xlab="x1",ylab="x2",zlab="",main="Bimodal")
range(90+3000*fxy)
range(30+3000*fxy)
range(50*fxy)
|
819ca5c55c00f0580aec71d6f99026bd4fc9f33a
|
691a1a785b2f0a47a04777ada08cb1a8bf4b94ef
|
/Sourced_Functions/ProjectIISourceFunctions_v2.R
|
7e2161f86cfbfaf0d7a542994bf55aae2cdc909b
|
[] |
no_license
|
arthurvickie/Multi-Marker_Method
|
cbafc3e6a9a16c703b3d241d19234d0dfdb38e89
|
d1d90c3c6f99d587a987f4be761c8dbe042f176a
|
refs/heads/main
| 2023-07-15T10:17:00.234816
| 2021-08-30T20:49:22
| 2021-08-30T20:49:22
| 377,881,258
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60,302
|
r
|
ProjectIISourceFunctions_v2.R
|
############################################
## Source functions for Project II ###
############################################
###################################
DetermineAssocRSNPs = function(gene, LowLD, percentageAssoc){
#determine which SNPs to actually set as assoc. based on gene
#check gene
if(gene == "NAT2"){
if(LowLD == TRUE){
#then looking at SNPs in low LD
if(percentageAssoc == 5){
#then 1 SNP is associated
assocSNPs = c(1)
} else if(percentageAssoc == 15){
#then 2 SNPs are associated
assocSNPs = c(1,2)
} else {
#then 5 SNPs are associated
assocSNPs = c(1,2,9,7,10)
}
} else {
#looking at SNPs in high LD
if(percentageAssoc == 5){
#then 1 SNP is associated
assocSNPs = c(13)
} else if(percentageAssoc == 15){
#then 2 SNPs are associated
assocSNPs = c(13,14)
} else {
#then 5 SNPs are associated
assocSNPs = c(13,14,4,5,3)
}
}
} else if(gene == "CHI3L2"){
if(LowLD == TRUE){
#then looking at SNPs in low LD
if(percentageAssoc == 5){
#then 2 SNPs are associated
assocSNPs = c(12,2)
} else if(percentageAssoc == 15){
#then 5 SNPs are associated
assocSNPs = c(12,2,3,9,8)
} else {
#then 8 SNPs are associated
assocSNPs = c(12,2,3,9,8,24,28,19)
}
} else {
#looking at SNPs in high LD
#then looking at SNPs in low LD
if(percentageAssoc == 5){
#then 2 SNPs are associated
assocSNPs = c(17,29)
} else if(percentageAssoc == 15){
#then 5 SNPs are associated
assocSNPs = c(5,6,23,18,13)
} else {
#then 8 SNPs are associated
assocSNPs = c(5,6,23,18,13,15,17,29)
}
}
} else {
#otherwise we have ASAH1
if(LowLD == TRUE){
#then looking at SNPs in low LD
if(percentageAssoc == 5){
#then 2 SNPs are associated
assocSNPs = c(31,9)
} else if(percentageAssoc == 15){
#then 6 SNPs are associated
assocSNPs = c(31,9,4,8,10,5)
} else if(percentageAssoc == 25){
#then 10 SNPs are associated
assocSNPs = c(31,9,4,8,10,5,3,2,7,1)
}
} else {
#looking at SNPs in high LD
#then looking at SNPs in low LD
if(percentageAssoc == 5){
#then 2 SNPs are associated
assocSNPs = c(25, 32)
} else if(percentageAssoc == 15){
#then 6 SNPs are associated
assocSNPs = c(37,29,36,28,30,33)
} else if(percentageAssoc == 25){
#then 10 SNPs are associated
assocSNPs = c(37,29,36,28,30,33,34,38,25,32)
}
}
}
return(assocSNPs)
}
##Step 1) Generate paired genotype data, get this data into correct format
#pull R genotype information
obtainRGenotypes = function(chr = c(), numSamples = c(), simNum = c(), gene = "", path=paste0("/path/to/data/")){
#function of obtain Recipient genotypes for D/R transplant pairs
#output is a matrix of R genotypes for all individuals (N x m)
#load needed packages
suppressMessages(require(ARTP2,lib.loc='/home/vlynn/R/library'))
suppressMessages(require(dplyr))
#make sure number of samples is integer value
numSamples = as.integer(numSamples)
#setwd to location of plink data
setwd(path)
#make lists of names of plink data
#for HapGen generated data
bedFile = paste0(gene,"_",numSamples,"Pairs_SimNum",simNum,"_Subset.bed")
bimFile = paste0(gene,"_",numSamples,"Pairs_SimNum",simNum,"_Subset.bim")
famFile = paste0(gene,"_",numSamples,"Pairs_SimNum",simNum,"_Subset.fam")
#read in plink binary files
plinkFile = read.bed(bed = bedFile, bim = bimFile, fam = famFile)
#matched pairs are even and odd columns of each file
#define variables for number of columns and rows of the df
nrowPlink = nrow(plinkFile) #this is number of generated subjects (2*SS)
ncolPlink = ncol(plinkFile) #this is number of SNPs
recipientGenotypes = plinkFile[seq(2,nrowPlink,2),]
RGenosMat = matrix(unlist(recipientGenotypes), ncol = ncolPlink, byrow = F)
return(RGenosMat)
}
#pull D genotype information
obtainDGenotypes = function(chr = c(), numSamples = c(), simNum = c(), gene = "", path=paste0("/path/to/data/")){
#function of obtain Donor genotypes for D/R transplant pairs
#output is a matrix of D genotypes for all individuals (m x N)
#load needed packages
suppressMessages(require(ARTP2,lib.loc='/home/vlynn/R/library'))
suppressMessages(require(dplyr))
#make sure number of samples is integer value
numSamples = as.integer(numSamples)
#setwd to location of plink data
setwd(path)
#make lists of names of plink data
#for HapGen generated data
bedFile = paste0(gene,"_",numSamples,"Pairs_SimNum",simNum,"_Subset.bed")
bimFile = paste0(gene,"_",numSamples,"Pairs_SimNum",simNum,"_Subset.bim")
famFile = paste0(gene,"_",numSamples,"Pairs_SimNum",simNum,"_Subset.fam")
#read in plink binary files
plinkFile = read.bed(bed = bedFile, bim = bimFile, fam = famFile)
#matched pairs are even and odd columns of each file
#define variables for number of columns and rows of the df
nrowPlink = nrow(plinkFile) #this is number of generated subjects (2*SS)
ncolPlink = ncol(plinkFile) #this is number of SNPs
#Ds are odd numbered rows, Rs are even numbered rows
donorGenotypes = plinkFile[seq(1,nrowPlink,2),]
DGenosMat = matrix(unlist(donorGenotypes), ncol = ncolPlink, byrow = F)
return(DGenosMat)
}
###################################
##Step 2) Calculate individual scores for each pair,
##then calc score for gene region
#use R and D genotypes to calc IBS mismatch score
calcIBSMismatch = function(RGenosMat = matrix(), DGenosMat = matrix()){
#function to calculate the IBS mismatch score for all D/R pairs
#returns a matrix of IBS mismatch scores (m x N)
#number of SNPs is the number of columns in D Genotypes matrix
ncolPlink = ncol(DGenosMat)
#calculate the difference between the two subjects
diffsPlink = abs(DGenosMat - RGenosMat)
#######################
## IBS Score
#######################
#if diff = 0, score = 0
#if diff = 1, score is unchanged
#if diff = 2, score = 2
IBSMismatch = diffsPlink
#save IBS scores
RIBSScoresMat = matrix(unlist(IBSMismatch), ncol = ncolPlink, byrow = F)
return(RIBSScoresMat)
}
#use R and D genotypes to calc Incompatibility score
calcIncompatibilityScore = function(RGenosMat = matrix(), DGenosMat = matrix()){
#function to calculate the incompatibility score for all D/R pairs
#returns a matrix of incompatibility scores (m x N)
#number of SNPs is the number of columns in D Genotypes matrix
ncolPlink = ncol(DGenosMat)
#calculate the difference between the two subjects
diffsPlink = abs(DGenosMat - RGenosMat)
#######################
## Incomp Score
#######################
#initialize a list of empty dfs with same number of columns as original
incomp = diffsPlink
for(ii in 1:ncol(diffsPlink)){
incomp[diffsPlink[,ii] == 0,ii] = 0
}
for(ii in 1:ncol(diffsPlink)){
incomp[diffsPlink[,ii] != 0,ii] = 1
}
#save incomp scores
RIncompScoresMat = matrix(unlist(incomp), ncol = ncolPlink, byrow = F)
return(RIncompScoresMat)
}
#use R and D genotypes to calc AMS score
calcAMS = function(RGenosMat = matrix(), DGenosMat = matrix()){
#function to calculate the AMS score for all D/R pairs
#returns a matrix of AMS scores (m x N)
#number of SNPs is the number of columns in D Genotypes matrix
ncolPlink = ncol(DGenosMat)
#calculate the difference between the two subjects
diffsPlink = abs(DGenosMat - RGenosMat)
#######################
## AMS
#######################
#mismatch if D has allele not in R
#sum across both alleles in genotype
#Score is either 0, 1, or 2
alloMismatch = matrix(0, nrow = nrow(diffsPlink), ncol = ncol(diffsPlink)) #make default value 0
alloMismatch[(DGenosMat == 0) & (RGenosMat == 2)] = 2 #Donor AA, Recip aa
alloMismatch[(DGenosMat == 2) & (RGenosMat == 0)] = 2 #Donor aa, Recip AA
alloMismatch[(DGenosMat == 1) & (RGenosMat == 2)] = 1 #Donor Aa, Recip aa
alloMismatch[(DGenosMat == 1) & (RGenosMat == 0)] = 1 #Donor Aa, Recip AA
alloMismatch[is.na(DGenosMat) | is.na(RGenosMat)] = NA #make sure NAs are preserved
#match row and column names from the original data set
#row names should be R Ids
rownames(alloMismatch) = rownames(DGenosMat)
colnames(alloMismatch) = colnames(DGenosMat)
#save AMS scores
RAlloMismatchMat = matrix(unlist(alloMismatch), ncol = ncolPlink, byrow = F)
return(RAlloMismatchMat)
}
#use R and D genotypes to calc binary mismatch score
calcBinaryMM = function(RGenosMat = matrix(), DGenosMat = matrix()){
#function to calculate the binary MM score for all D/R pairs
#returns a matrix of binary MM scores (m x N)
#number of SNPs is the number of columns in D Genotypes matrix
ncolPlink = ncol(DGenosMat)
#calculate the difference between the two subjects
diffsPlink = abs(DGenosMat - RGenosMat)
#######################
## Binary Mismatch
#######################
#mismatch if D has allele not in R
#Score is either 0 or 1
binMismatch = matrix(0, nrow = nrow(diffsPlink), ncol = ncol(diffsPlink)) #make default value 0
binMismatch[(DGenosMat == 1) & (RGenosMat == 2)] = 1 #Donor Aa, Recip aa
binMismatch[(DGenosMat == 0) & (RGenosMat == 2)] = 1 #Donor AA, Recip aa
binMismatch[(DGenosMat == 2) & (RGenosMat == 0)] = 1 #Donor aa, Recip AA
binMismatch[(DGenosMat == 1) & (RGenosMat == 0)] = 1 #Donor Aa, Recip AA
binMismatch[is.na(DGenosMat) | is.na(RGenosMat)] = NA #make sure NAs are preserved
#match row and column names from the original data set
#row names should be R Ids
rownames(binMismatch) = rownames(DGenosMat)
colnames(binMismatch) = colnames(DGenosMat)
#save Binary mismatch scores
RBinMismatchMat = matrix(unlist(binMismatch), ncol = ncolPlink, byrow = F)
return(RBinMismatchMat)
}
#use single SNP kernel functions to calc gene-based score
calcGeneScore = function(SingleSNPKernel = matrix(), standardize = FALSE, useWeights = FALSE, weights){
#function to calculate the gene based score based on the single SNP
#kernels and optional weights
#Returns 1 x N vector, with single gene score for each individual
if(useWeights == FALSE){
#unweighted sum of kernel
if(standardize == FALSE){
#then no division by anything, just a simple sum of kernels
#sum over all the SNPs
geneScore = rowSums(SingleSNPKernel)
}
else {
#unweighted sum but standardized by maximum score value
#sum over all the SNPs
geneScore.raw = rowSums(SingleSNPKernel)
#figure out if we need to multiply number of SNPs by 2
#if the max score is larger than the total number of SNPs,
#we know the score can go up to 2, so we multiply denominator by 2
if(max(geneScore.raw) > ncol(SingleSNPKernel)){
geneScore = geneScore.raw/(2*ncol(SingleSNPKernel))
} else {
#otherwise just divide by total number of SNPs
geneScore = geneScore.raw/(ncol(SingleSNPKernel))
}
}
}
#otherwise we are using weights
else {
#need to determine number of weights to pull
nSNP = dim(SingleSNPKernel)[2]
#pull nSNP weights from the main weight vector
weights_subset = weights[1:nSNP]
#need the sum of all weights
weightTotal = sum(weights_subset)
#need to multiply kernel by weight and sum
#make weights a vector (m x 1)
weights.vec = as.matrix(weights_subset)
sum.w.Kernel = SingleSNPKernel %*% weights.vec
geneScore = sum.w.Kernel/weightTotal
}
geneScore.mat = as.matrix(geneScore)
return(geneScore.mat)
}
#use single SNP kernel functions to calc gene-based score, using percent of SNPs
calcGeneScorePercentOfSNPs = function(SingleSNPKernel = matrix(), gene = "", percentageAssoc = 100, LowLD = TRUE, standardize = FALSE, useWeights = FALSE, weights){
#function to calculate the gene based score based on the single SNP
#Only uses a percentage of SNPs in the gene region, assuming not all SNPs are associated with outcome
#kernels and optional weights
#Returns 1 x N vector, with single gene score for each individual
#check gene
if(gene == "NAT2"){
if(LowLD == TRUE){
#then looking at SNPs in low LD
if(percentageAssoc == 5){
#then 1 SNP is associated
SingleSNPKernel = as.matrix(SingleSNPKernel[,1],ncol=1)
} else if(percentageAssoc == 15){
#then 2 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(1,2)]
} else if(percentageAssoc == 25){
#then 5 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(1,2,9,7,10)]
} else if(percentageAssoc == 50){
#then 7 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(1,2,9,7,10,6,8)]
} else if(percentageAssoc == 75){
#then 11 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(1,2,9,7,10,6,8,11,12,3,5)]
} else {
#otherwise use all SNPs
SingleSNPKernel = SingleSNPKernel
}
} else {
#looking at SNPs in high LD
if(percentageAssoc == 5){
#then 1 SNP is associated
SingleSNPKernel = as.matrix(SingleSNPKernel[,13],ncol=1)
} else if(percentageAssoc == 15){
#then 2 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(13,14)]
} else if(percentageAssoc == 25){
#then 5 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(13,14,4,5,3)]
} else if(percentageAssoc == 50){
#then 7 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(11,12,3,5,4,14,13)]
} else if(percentageAssoc == 75){
#then 11 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(7,10,6,8,11,12,3,5,4,14,13)]
} else {
#otherwise use all SNPs
SingleSNPKernel = SingleSNPKernel
}
}
} else if(gene == "CHI3L2"){
if(LowLD == TRUE){
#then looking at SNPs in low LD
if(percentageAssoc == 5){
#then 2 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(12,2)]
} else if(percentageAssoc == 15){
#then 5 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(12,2,3,9,8)]
} else if(percentageAssoc == 25){
#then 8 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(12,2,3,9,8,24,28,19)]
} else if(percentageAssoc == 50){
#then 17 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(12,2,3,9,8,24,28,19,21,22,20,11,1,10,16,4,31)]
} else if(percentageAssoc == 75){
#then 25 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(12,2,3,9,8,24,28,19,21,22,20,11,1,10,16,4,31,26,25,32,14,27,33,30,7)]
} else {
#otherwise use all SNPs
SingleSNPKernel = SingleSNPKernel
}
} else {
#looking at SNPs in high LD
if(percentageAssoc == 5){
#then 2 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(17,29)]
} else if(percentageAssoc == 15){
#then 5 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(5,6,23,18,13)]
} else if(percentageAssoc == 25){
#then 8 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(5,6,23,18,13,15,17,29)]
} else if(percentageAssoc == 50){
#then 17 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(31,26,25,32,14,27,33,30,7,5,6,23,18,13,15,17,29)]
} else if(percentageAssoc == 75){
#then 25 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(21,22,20,11,1,10,16,4,31,26,25,32,14,27,33,30,7,5,6,23,18,13,15,17,29)]
} else {
#otherwise use all SNPs
SingleSNPKernel = SingleSNPKernel
}
}
} else {
#otherwise we have ASAH1
if(LowLD == TRUE){
#then looking at SNPs in low LD
if(percentageAssoc == 5){
#then 2 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(31, 9)]
} else if(percentageAssoc == 15){
#then 6 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(31,9,4,8,10,40)]
} else if(percentageAssoc == 25){
#then 10 SNPs are associated
if(dim(SingleSNPKernel)[2] < 40){
SingleSNPKernel = SingleSNPKernel[,c(31,9,4,8,10,5,3,2,7,1)]
} else {
SingleSNPKernel = SingleSNPKernel[,c(31,9,4,8,10,40,5,3,2,7)]
}
} else if(percentageAssoc == 50){
#then 20 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(31,9,4,8,10,40,5,3,2,7,1,35,21,22,11,6,15,12,13,14)]
} else if(percentageAssoc == 75){
#then 30 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(31,9,4,8,10,40,5,3,2,7,1,35,21,22,11,6,15,12,13,14,18,39,16,19,20,24,17,26,23,27)]
} else {
#otherwise use all SNPs
SingleSNPKernel = SingleSNPKernel
}
} else {
#looking at SNPs in high LD
if(percentageAssoc == 5){
#then 2 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(25, 32)]
} else if(percentageAssoc == 15){
#then 6 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(30,33,34,38,25,32)]
} else if(percentageAssoc == 25){
#then 10 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(27, 37, 29, 36, 28, 30, 33, 34, 38, 25, 32)]
} else if(percentageAssoc == 50){
#then 20 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(18,39,16,19,20,24,17,26,23,27,37,29,36,28,30,33,34,38,25,32)]
} else if(percentageAssoc == 75){
#then 30 SNPs are associated
SingleSNPKernel = SingleSNPKernel[,c(1,35,21,22,11,6,15,12,13,14,18,39,16,19,20,24,17,26,23,27,37,29,36,28,30,33,34,38,25,32)]
} else {
#otherwise use all SNPs
SingleSNPKernel = SingleSNPKernel
}
}
}
if(useWeights == FALSE){
#unweighted sum of kernel
if(standardize == FALSE){
#then no division by anything, just a simple sum of kernels
geneScore = rowSums(SingleSNPKernel)
} else {
#unweighted sum but standardized by maximum score value
#sum over all the SNPs
geneScore.raw = rowSums(SingleSNPKernel)
#figure out if we need to multiply number of SNPs by 2
#if the max score is larger than the total number of SNPs,
#we know the score can go up to 2, so we multiply denominator by 2
if(max(geneScore.raw) > ncol(SingleSNPKernel)){
geneScore = geneScore.raw/(2*ncol(SingleSNPKernel))
} else {
#otherwise just divide by total number of SNPs
geneScore = geneScore.raw/(ncol(SingleSNPKernel))
}
}
} else {
#otherwise we are using weights
#need to determine number of weights to pull
nSNP = dim(SingleSNPKernel)[2]
#pull nSNP weights from the main weight vector
weights_subset = weights[1:nSNP]
#need the sum of all weights
weightTotal = sum(weights_subset)
#need to multiply kernel by weight and sum
#make weights a vector (m x 1)
weights.vec = as.matrix(weights_subset)
sum.w.Kernel = SingleSNPKernel %*% weights.vec
geneScore = sum.w.Kernel/weightTotal
}
geneScore.mat = as.matrix(geneScore)
return(geneScore.mat)
}
###################################
##Step 3) Generate covariates
#generate covariate data
GenCovData = function(SampleSize, BinaryValues, ContinuousValues){
#####################################
# Simulate covariates
# returns a matrix of size N x K (N = sample size)
# where K is number of covariates generated
# allows input of number of binary and
# continuous covariate values
#####################################
# Binary covariates are drawn from a random
# binimial distribution with p=0.5
# Continuous covariate sare drawn from a standard
# Normal distribution
K = BinaryValues + ContinuousValues
ss = SampleSize
#define W matrix
W = matrix(NA,nrow = ss, ncol = K)
for(ii in 1:BinaryValues){
W[,ii] = rbinom(ss,1,0.5)
}
for(ii in (BinaryValues+1):K){
W[,ii] = rnorm(ss, mean = 0, sd = 1)
}
return(W)
}
###################################
##Step 4) Generate true phenotype data using covariates only (TIE)
## or using covariates, R genotype, and score (power)
#generate phenotype data - null model, with covariates
GenNullPhenos = function(SampleSize, includeCov = FALSE, YCat = TRUE, YPrev, Covariates){
# generates null phenotypes to be used as "true" Y values
# returns N x 1 vector of phenotypes
# SampleSize (N) is number of D/R pairs
# includeCov is FALSE if no covariates are being used, TRUE if they are being used
# YCat = TRUE if outcome is categorical, otherwise Y is continuous
# YPrev is the probability that Y=1, only needed if YCat=TRUE
# Covariates is N x K matrix of covariate values (if they are being used)
ss = SampleSize
#only define W and eff sizes if we have covariates
if(includeCov == TRUE){
W = Covariates
K = ncol(W)
#define all effect sizes as 0.5, since this is what most people do in
#similar papers for TIE simulations
eff_sizes = matrix(0.5, nrow = K, ncol = 1)
}
if(YCat == TRUE){
prev = YPrev
}
#if not including covariates
if(includeCov == FALSE){
#if Y is catergorical, and no covariates
# then Y is pulled from a binom(prev) for each individual
#logit (pi) = alpha_0
if(YCat == TRUE){
nullPhenos = rbinom(ss, 1, prev)
}
#otherwise Y is continuous, and no covs
#then Y is pulled from a standard normal for each individual
# basically, Y = epsilon (random error)
else {
nullPhenos = rnorm(ss, 0, 1)
}
}
# otherwise we are including covariates
else{
#if Y is categorical, and covariates
#then logit Pr(Y=1) = a0 + a*Covariates
if(YCat == TRUE){
#alpha 1
alphas = W %*% eff_sizes
#set alpha0 value to start
alpha_0 = rep(1,ss)
#calc alpha_0 + alphas
lin_predict = alpha_0 + alphas
#calculate p values for binomials, want colMeans of this to be around prevalence
prob_case = exp(lin_predict)/(1 + exp(lin_predict))
#change alpha_0 such that prob of case is close to prevalence
while((mean(prob_case) > prev) == TRUE) {
#set alpha_0 value such that prob of case is approx prevalence
alpha_0 = alpha_0 - 0.01
#calc alpha_0 + alphas
lin_predict = alpha_0 + alphas
#calculate p values for binomials, want colMeans of this to be around prevalence
prob_case = exp(lin_predict)/(1 + exp(lin_predict))
#use binomial dist to get phenos
nullPhenos = rbinom(ss,1,prob_case)
}
}
#otherwise Y is continuous
else {
#alphas
alphas = W %*% eff_sizes
#epsilon
epsilon = rnorm(ss,0,1)
#linear model, added in normally dist error
lin_predict = alphas + epsilon
#add to null phenos
nullPhenos = lin_predict
}
}
nullPhenos = as.matrix(nullPhenos, nrow = ss, ncol = 1)
return(nullPhenos)
}
#generate phenotype data - alt model, with covariates
GenAltPhenos = function(SampleSize, includeCov = FALSE, YCat = TRUE, YPrev, Covariates, RGenoData = matrix(), ScoreData = matrix(), Betas = matrix(), Gamma = c()){
# generates "true" phenotypes for power analysis using the alternative
# hypothesis model
# returns N x 1 vector of phenotypes
# SampleSize: number of D/R pairs
# includeCov: TRUE or FALSE for if covariates are included in the modeling
# YCat: TRUE of FALSE for whether Y is categorical or continuous
# YPrev: if Y is categorical, the p(y=1)
# Covariates is K x N matrix of covariate values
# RGenoData is N x m matrix of R genotype values
# ScoreData is N x 1 vector of gene-based score values
# Betas: cov effect sizes for R geno, could all be 0 (m x 1)
# Gamma: cov effect size for score, could all be 0 (1 x 1)
ss = SampleSize
#only define W and eff sizes if we have covariates
if(includeCov == TRUE){
W = Covariates
K = ncol(W)
#define all effect sizes as 0.5, since this is what most people do in
#similar papers for TIE simulations
eff_sizes = matrix(0.5, nrow = K, ncol = 1)
}
if(YCat == TRUE){
prev = YPrev
}
X = RGenoData
B = as.matrix(Betas, ncol=1)
Z = ScoreData
G = matrix(Gamma, nrow = 1, ncol = 1)
#if not including covariates
if(includeCov == FALSE){
#if Y is catergorical, and no covariates
# then Y is calculated using only XBeta and ZGamma values
#logit (pi) = alpha_0 + XBeta + ZGamma
if(YCat == TRUE){
#beta terms
betas = X %*% B
#set alpha0 value to start
alpha_0 = rep(1,ss)
#gamma term
gamma = ScoreData %*% Gamma
#calc alpha_0 + Betas*SNPGeno + Gamma*Score
lin_predict = alpha_0 + betas + gamma
#calculate p values for binomials, want colMeans of this to be around prevalence
prob_case = exp(lin_predict)/(1 + exp(lin_predict))
#change beta_0 such that prob of case is close to prevalence
while((mean(prob_case) > prev) == TRUE) {
#set beta0 value such that prob of case is approx prevalence
alpha_0 = alpha_0 - 0.01
#calc alpha_0 + Betas*SNPGeno + Gamma*Score
lin_predict = alpha_0 + betas + gamma
#calculate p values for binomials, want colMeans of this to be around prevalence
prob_case = exp(lin_predict)/(1 + exp(lin_predict))
#use binomial dist to get phenos
altPhenos = rbinom(ss,1,prob_case)
}
} else {
#otherwise Y is continuous, and no covs
#then Y is pulled from a standard normal for each individual
# basically, Y = epsilon (random error)
#beta terms
betas = X %*% B
#gamma term
gamma = ScoreData %*% Gamma
#epsilon
epsilon = rnorm(ss,0,1)
#linear model, added in normally dist error
lin_predict = betas + gamma + epsilon
#add to null phenos
altPhenos = lin_predict
}
} else {
# otherwise we are including covariates
#if Y is categorical, and covariates
#then logit Pr(Y=1) = a0 + a*Covariates + XBeta + ZGamma
if(YCat == TRUE){
#alpha terms
alphas = W %*% eff_sizes
#beta terms
betas = X %*% B
#set alpha0 value to start
alpha_0 = rep(1,ss)
#gamma term
gamma = ScoreData %*% G
#calc alpha_0 + Betas*SNPGeno + Gamma*Score
lin_predict = alpha_0 + alphas + betas + gamma
#calculate p values for binomials, want colMeans of this to be around prevalence
prob_case = exp(lin_predict)/(1 + exp(lin_predict))
#change beta_0 such that prob of case is close to prevalence
while((mean(prob_case) > prev) == TRUE) {
#set beta0 value such that prob of case is approx prevalence
alpha_0 = alpha_0 - 0.01
#calc alpha_0 + Betas*SNPGeno + Gamma*Score
lin_predict = alpha_0 + alphas + betas + gamma
#calculate p values for binomials, want colMeans of this to be around prevalence
prob_case = exp(lin_predict)/(1 + exp(lin_predict))
#use binomial dist to get phenos
altPhenos = rbinom(ss,1,prob_case)
}
} else {
#otherwise Y is continuous
#alpha terms
alphas = W %*% eff_sizes
#beta terms
betas = X %*% B
#gamma term
gamma = ScoreData %*% G
#epsilon
epsilon = rnorm(ss,0,1)
#linear model, added in normally dist error
lin_predict = alphas + betas + gamma + epsilon
#add to null phenos
altPhenos = lin_predict
}
}
altPhenos = as.matrix(altPhenos, nrow = ss, ncol = 1)
return(altPhenos)
}
###################################
##Step 5) Calculate Scores for either R genotypes OR genetic matching score
#calculate individual score values (for either R geno or mismatch score)
CalcUScore = function(SampleSize, includeCov = FALSE, CovData, CalcUR = TRUE, RGenoData, ScoreData, Phenos, BinPhenos = TRUE){
#SampleSize is number of D/R pairs (N)
#includeCov: T or F whether or not covariates are included in the modeling
#CovData: Covariate matrix (N x K) if covariates are included
#CalcUR: T if we are calculating U for R geno, F if we are calculating U for Score
#RGenoData: Matrix of R genotype data (N x m)
#ScoreData: Vector of Gene-based score data (Nx1)
#Phenos: Vector of generated phenotypes from either GenNullPhenos or GenAltPhenos
#BinPhenos: T if phenotypes are binary, F is they are continuous
#define variables
ss = SampleSize
if(includeCov == TRUE){
W = CovData
K = ncol(W)
} else {
K = 0
}
if(CalcUR == TRUE){
#then we have R geno data
XZ = RGenoData
} else {
#otherwise we have score data
XZ = ScoreData
}
Y = Phenos
#######################################################
#For binary phenotypes
if(BinPhenos == TRUE){
#need to calculate p_hat (predicted prob of Y=1 under H0)
nulllogitreg = glm(Y~W,family=binomial)
# p: expected value of Y from the logistic regression
p1 = fitted(nulllogitreg)
p0 = 1 - p1
#calculate U stat using Taylor expanded equation
#we are summing over N (rows)
#define (1,W) as N x (k+1) vector
if(includeCov == FALSE){
#if no covariates, then (1,W) will be a N x 1 vector of 1s
OneW = matrix(1, nrow = nrow(Y), ncol = 1)
} else {
#otherwise we have covariates, so (1,W) will be a N x (K + 1) vector
OneW = cbind(matrix(1, nrow = nrow(Y), ncol = 1), W)
}
OneW = as.matrix(OneW)
#calculate the inverse term
tosum = list()
for(i in 1:ss){
tosum[[i]] = t(t(OneW[i,])) %*% t(OneW[i,]) * p1[i] * p0[i]
}
SummedMats = Reduce('+', tosum)
invTerm = solve(SummedMats)
#calculate the summed term
sumterm.raw = list()
for(i in 1:ss){
sumterm.raw[[i]] = t(t(XZ[i,])) %*% t(OneW[i,]) * p1[i] * p0[i]
}
summedTerm = Reduce('+', sumterm.raw)
#combine with other terms and sum to get score(s)
finalTermsToSum = list()
for(i in 1:ss){
finalTermsToSum[[i]] = (t(t(XZ[i,])) - summedTerm %*% invTerm %*% t(t(OneW[i,])))*(Y[i]-p1[i])
}
UScores = Reduce('+', finalTermsToSum)
}
else {
#otherwise we have continuous phenotypes so the equation changes slightly
#calculate U stat using Taylor expanded equation
#we are summing over N (rows)
nulllinearreg = glm(Y~W,family=gaussian)
# expected value of Y from the linear regression
Yhat = fitted(nulllinearreg)
#define (1,W) as N x (k+1) vector
if(includeCov == FALSE){
#if no covariates, then (1,W) will be a N x 1 vector of 1s
OneW = matrix(1, nrow = nrow(Y), ncol = 1)
} else {
#otherwise we have covariates, so (1,W) will be a N x (K + 1) vector
OneW = cbind(matrix(1, nrow = nrow(Y), ncol = 1), W)
}
OneW = as.matrix(OneW)
#calculate the inverse term
tosum = list()
for(i in 1:ss){
tosum[[i]] = t(t(OneW[i,])) %*% t(OneW[i,])
}
SummedMats = Reduce('+', tosum)
invTerm = solve(SummedMats)
#calculate the summed term
sumterm.raw = list()
for(i in 1:ss){
sumterm.raw[[i]] = t(t(XZ[i,])) %*% t(OneW[i,])
}
summedTerm = Reduce('+', sumterm.raw)
#combine with other terms and sum to get score(s)
finalTermsToSum = list()
for(i in 1:ss){
finalTermsToSum[[i]] = (t(t(XZ[i,])) - summedTerm %*% invTerm %*% t(t(OneW[i,])))*(Y[i]-Yhat[i])
}
UScores = Reduce('+', finalTermsToSum)
}
return (UScores)
}
CalcUScoreGhat = function(SampleSize, includeCov = FALSE, CovData, CalcUR = TRUE, RGenoData, ScoreData, Phenos, BinPhenos = TRUE){
#define variables
#SampleSize is number of D/R pairs (N)
#includeCov: T or F whether or not covariates are included in the modeling
#CovData: Covariate matrix (N x K) if covariates are included
#CalcUR: T if we are calculating U for R geno, F if we are calculating U for Score
#RGenoData: Matrix of R genotype data (N x m)
#ScoreData: Vector of Gene-based score data (Nx1)
#Phenos: Vector of generated phenotypes from either GenNullPhenos or GenAltPhenos
#BinPhenos: T if phenotypes are binary, F is they are continuous
#define variables
ss = SampleSize
if(includeCov == TRUE){
W = CovData
K = ncol(W)
} else {
K = 0
W = matrix(1, nrow = ss, ncol = 1)
}
if(CalcUR == TRUE){
#then we have R geno data
XZ = RGenoData
} else {
#otherwise we have score data
XZ = ScoreData
}
Y = Phenos
if(BinPhenos == TRUE){
nulllogitreg=glm(Y~W,family=binomial)
# p: expected value of Y from the logistic regression
p1=fitted(nulllogitreg)
p0 = 1 - p1
# X or Z hat: expected genotype from weighted linear regression
XZhat=XZ
for(j in 1:ncol(XZ)){
xz=XZ[,j]
linearreg=lm(xz~W,weights=p1*p0) # weighted linear regression
XZhat[,j]=fitted(linearreg)
}
U_score_XZhat = as.matrix(t(XZ-XZhat)%*%(Y-p1))
}
else {
#phenotypes are continuous, so slightly different approach
# X or Z hat: expected genotype from unweighted linear regression
XZhat=XZ
for(j in 1:ncol(XZ)){
xz=XZ[,j]
linearreg=lm(xz~W) # unweighted linear regression
XZhat[,j]=fitted(linearreg)
}
#need expected value of Ys
nulllinearreg = glm(Y~W,family=gaussian)
# expected value of Y from the linear regression
Yhat = fitted(nulllinearreg)
U_score_XZhat = as.matrix(t(XZ-XZhat)%*%(Y-Yhat))
}
return(U_score_XZhat)
}
#also need to calc Q values
CalcQValues = function(SampleSize, includeCov = FALSE, CovData, CalcUR = TRUE, RGenoData, ScoreData, Phenos, BinPhenos = TRUE){
#SampleSize is number of D/R pairs (N)
#includeCov: T or F whether or not covariates are included in the modeling
#CovData: Covariate matrix (N x K) if covariates are included
#CalcUR: T if we are calculating U for R geno, F if we are calculating U for Score
#RGenoData: Matrix of R genotype data (N x m)
#ScoreData: Vector of Gene-based score data (Nx1)
#Phenos: Vector of generated phenotypes from either GenNullPhenos or GenAltPhenos
#BinPhenos: T if phenotypes are binary, F is they are continuous
#output should be matrix of dim n x m for QR, and n x 1 for QS
#define variables
ss = SampleSize
if(includeCov == TRUE){
W = CovData
K = ncol(W)
} else {
K = 0
}
if(CalcUR == TRUE){
#then we have R geno data
XZ = RGenoData
m = ncol(XZ)
} else {
#otherwise we have score data
XZ = ScoreData
m=1
}
Y = Phenos
#######################################################
#For binary phenotypes
if(BinPhenos == TRUE){
#need to calculate p_hat (predicted prob of Y=1 under H0)
nulllogitreg = glm(Y~W,family=binomial)
# p: expected value of Y from the logistic regression
p1 = fitted(nulllogitreg)
p0 = 1 - p1
#calculate U stat using Taylor expanded equation
#we are summing over N (rows)
#define (1,W) as N x (k+1) vector
if(includeCov == FALSE){
#if no covariates, then (1,W) will be a N x 1 vector of 1s
OneW = matrix(1, nrow = nrow(Y), ncol = 1)
} else {
#otherwise we have covariates, so (1,W) will be a N x (K + 1) vector
OneW = cbind(matrix(1, nrow = nrow(Y), ncol = 1), W)
}
OneW = as.matrix(OneW)
#calculate the inverse term
tosum = list()
for(i in 1:ss){
tosum[[i]] = t(t(OneW[i,])) %*% t(OneW[i,]) * p1[i] * p0[i]
}
SummedMats = Reduce('+', tosum)
invTerm = solve(SummedMats)
#calculate the summed term
sumterm.raw = list()
for(i in 1:ss){
sumterm.raw[[i]] = t(t(XZ[i,])) %*% t(OneW[i,]) * p1[i] * p0[i]
}
summedTerm = Reduce('+', sumterm.raw)
#calculate Q values
finalTermsToSum = list()
for(i in 1:ss){
finalTermsToSum[[i]] = (t(t(XZ[i,])) - summedTerm %*% invTerm %*% t(t(OneW[i,])))*(Y[i]-p1[i])
}
Qmat = matrix(unlist(finalTermsToSum), nrow = ss, ncol = m, byrow = TRUE)
}
else {
#otherwise we have continuous phenotypes so the equation changes slightly
#calculate U stat using Taylor expanded equation
#we are summing over N (rows)
nulllinearreg = glm(Y~W,family=gaussian)
# expected value of Y from the linear regression
Yhat = fitted(nulllinearreg)
#define (1,W) as N x (k+1) vector
if(includeCov == FALSE){
#if no covariates, then (1,W) will be a N x 1 vector of 1s
OneW = matrix(1, nrow = nrow(Y), ncol = 1)
} else {
#otherwise we have covariates, so (1,W) will be a N x (K + 1) vector
OneW = cbind(matrix(1, nrow = nrow(Y), ncol = 1), W)
}
OneW = as.matrix(OneW)
#calculate the inverse term
tosum = list()
for(i in 1:ss){
tosum[[i]] = t(t(OneW[i,])) %*% t(OneW[i,])
}
SummedMats = Reduce('+', tosum)
invTerm = solve(SummedMats)
#calculate the summed term
sumterm.raw = list()
for(i in 1:ss){
sumterm.raw[[i]] = t(t(XZ[i,])) %*% t(OneW[i,])
}
summedTerm = Reduce('+', sumterm.raw)
#combine with other terms and sum to get score(s)
finalTermsToSum = list()
for(i in 1:ss){
finalTermsToSum[[i]] = (t(t(XZ[i,])) - summedTerm %*% invTerm %*% t(t(OneW[i,])))*(Y[i]-Yhat[i])
}
Qmat = matrix(unlist(finalTermsToSum), nrow = ss, ncol = m, byrow = TRUE)
}
return(Qmat)
}
CalcQValuesGhat = function(SampleSize, includeCov = FALSE, CovData, CalcUR = TRUE, RGenoData, ScoreData, Phenos, BinPhenos = TRUE){
#define variables
#SampleSize is number of D/R pairs (N)
#includeCov: T or F whether or not covariates are included in the modeling
#CovData: Covariate matrix (N x K) if covariates are included
#CalcUR: T if we are calculating U for R geno, F if we are calculating U for Score
#RGenoData: Matrix of R genotype data (N x m)
#ScoreData: Vector of Gene-based score data (Nx1)
#Phenos: Vector of generated phenotypes from either GenNullPhenos or GenAltPhenos
#BinPhenos: T if phenotypes are binary, F is they are continuous
#output should be matrix of dim n x m for QR, and n x 1 for QS
#define variables
ss = SampleSize
if(includeCov == TRUE){
W = CovData
K = ncol(W)
} else {
K = 0
W = matrix(1, nrow = ss, ncol = 1)
}
if(CalcUR == TRUE){
#then we have R geno data
XZ = RGenoData
m = ncol(XZ)
} else {
#otherwise we have score data
XZ = ScoreData
m = 1
}
Y = Phenos
if(BinPhenos == TRUE){
nulllogitreg=glm(Y~W,family=binomial)
# p: expected value of Y from the logistic regression
p1=fitted(nulllogitreg)
p0 = 1 - p1
# X or Z hat: expected genotype from weighted linear regression
XZhat=XZ
for(j in 1:ncol(XZ)){
xz=XZ[,j]
linearreg=lm(xz~W,weights=p1*p0) # weighted linear regression
XZhat[,j]=fitted(linearreg)
}
#define Q matrix
Q=matrix(ncol=m,nrow=ss)
#populate Q differently for R geno vs Score
for (i in 1:nrow(Q)){
for (j in 1:ncol(Q)){
Q[i,j]=(XZ[i,j]-XZhat[i,j])*(Y[i]-p1[i])
}
}
}
else{
#phenotypes are continuous, so slightly different approach
# X or Z hat: expected genotype from unweighted linear regression
XZhat=XZ
for(j in 1:ncol(XZ)){
xz=XZ[,j]
linearreg=lm(xz~W) # unweighted linear regression
XZhat[,j]=fitted(linearreg)
}
#need expected value of Ys
nulllinearreg = glm(Y~W,family=gaussian)
# expected value of Y from the linear regression
Yhat = fitted(nulllinearreg)
#define Q matrix
Q=matrix(ncol=m,nrow=ss)
for (i in 1:nrow(Q)){
for (j in 1:ncol(Q)){
Q[i,j]=(XZ[i,j]-XZhat[i,j])*(Y[i]-Yhat[i])
}
}
}
return(Q)
}
###################################
##Step 6) Calculate original variance
CalcVariance = function(SampleSize, QValues){
#Calculates original variance/cov matrix for score
#define variables
#SampleSize is number of D/R pairs (N)
#QValues is a matrix of combined Q values for R geno and Score
### dim should be n x (m+1)
Q = QValues
ss = SampleSize
#variance is nQ'Q
V = ss*t(Q)%*%Q
#return variance matrix
return(V)
}
###################################
##Step 7) Calc final statistic
CalcStatisticPVal = function(SampleSize, Variance, UscoresR, UscoreS, s){
#define variables
#SampleSize is number of D/R pairs (N)
#Variance is the original Var/Cov matrix for the scores calculated using CalcVariance()
#### dim should be (m+1) x (m+1)
#UscoresR is the m x 1 vector of scores for the r genos
#UscoreS should be the 1x1 score value for the gene-based score
#s is the percent of variance we want explained by the PCs we are keeping
ss = SampleSize
VFull = Variance
UR = UscoresR
US = UscoreS
s = s
#Decompose the VFull matrix
m = dim(VFull)[1] - 1
VR = VFull[1:m, 1:m]
CRS = matrix(VFull[1:m, m+1], nrow = m, ncol = 1)
CSR = matrix(VFull[m+1, 1:m], nrow = 1, ncol = m)
VS = VFull[m+1, m+1]
#Eigen Decomp of VR
A = eigen(VR)$vectors
lambdas = eigen(VR)$values
# keep PCs that explain s% of the variance
eigen_percent = cumsum(lambdas/sum(lambdas))
#determine if s is too small
#if s is smaller than smallest PVE, use 1 PC
s_pct = s/100
if(eigen_percent[1] >= s_pct){
num_pc = 1
} else {
num_pc = which(eigen_percent >= s_pct)[1]
}
# num_pc = s #number of PCs is directly equal to s
A_s = A[,1:num_pc]
lambda_s = lambdas[1:num_pc]
#define UPR vector
UPR = matrix(nrow = num_pc, ncol = 1)
#if num_pc = 1, then this errors, need to split into cases
if(num_pc == 1){
UPR = t(UR) %*% A_s %*% (1/sqrt(lambda_s))
} else {
for(i in 1:num_pc){
UPR[i,] = t(UR) %*% A_s[,i] %*% (1/sqrt(lambda_s[i]))
}
}
#construct UP vector
UP = rbind(UPR, US)
UP.t = t(UP)
#construct new variance/cov matrix
Ident = diag(1, nrow = num_pc, ncol = num_pc)
Cov_UPR_US = matrix(nrow = num_pc, ncol = 1)
#if num_pc = 1, then this errors, need to split into cases
if(num_pc == 1){
Cov_UPR_US = (1/sqrt(lambda_s)) * t(A_s) %*% CRS
} else {
for(i in 1:num_pc){
Cov_UPR_US[i,] = (1/sqrt(lambda_s[i])) * t(A_s[,i]) %*% CRS
}
}
Cov_US_UPR = t(Cov_UPR_US)
LHS = rbind(Ident, Cov_US_UPR)
RHS = rbind(Cov_UPR_US, VS)
NewVar = cbind(LHS, RHS)
#have to increase tolerance or else it says matrix singular for binary phenos data
NewVarInv = solve(NewVar) #, tol = 1.0e-30
finalStat = ss * (UP.t %*% NewVarInv %*% UP)
#calculate p value
#stat should be dist as chi-sq s+1
pval = 1 - pchisq(finalStat, df = num_pc + 1)
dof = num_pc + 1
finalOutput = cbind(finalStat, pval, dof)
return(finalOutput)
}
################################################
## Putting some functions together
CalcNullPhenotypeData = function(chr, numPairs, simNum, YPrev, gene, path, weightedScores = FALSE, standardizeScores = FALSE){
#pull recipient and donor genotypes
RGenos = obtainRGenotypes(chr = chr, numSamples = numPairs, simNum = simNum, gene = gene, path = path)
DGenos = obtainDGenotypes(chr = chr, numSamples = numPairs, simNum = simNum, gene = gene, path = path)
#calculate single snp scores
IBS.snp = calcIBSMismatch(RGenosMat = RGenos, DGenosMat = DGenos)
Incomp.snp = calcIncompatibilityScore(RGenosMat = RGenos, DGenosMat = DGenos)
AMS.snp = calcAMS(RGenosMat = RGenos, DGenosMat = DGenos)
BinMM.snp = calcBinaryMM(RGenosMat = RGenos, DGenosMat = DGenos)
#calculate gene based scores
#check to see if weights are used for scores
if(weightedScores == FALSE){
#check to see if scores should be standardized (can't be weighted and standardized)
if(standardizeScores == FALSE){
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = FALSE)
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = TRUE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = TRUE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = TRUE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = TRUE, useWeights = FALSE)
}
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
}
#generate covariates
#for now, a single binary and a single continous covariate
CovData = GenCovData(SampleSize = numPairs, BinaryValues = 1, ContinuousValues = 1)
#generate phenotypes, both continuous and binary
CatPhenos = GenNullPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = TRUE, YPrev = YPrev, Covariates = CovData)
ContPhenos = GenNullPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = FALSE, Covariates = CovData)
Phenos = list(CatPhenos, ContPhenos)
return(Phenos)
}
CalcAltPhenotypeData_Scores = function(chr, numPairs, simNum, YPrev, gene, path, percentageAssoc, LowLD, TrueScore, Gamma){
#pull recipient and donor genotypes
RGenos = obtainRGenotypes(chr = chr, numSamples = numPairs, simNum = simNum, gene = gene, path = path)
DGenos = obtainDGenotypes(chr = chr, numSamples = numPairs, simNum = simNum, gene = gene, path = path)
#calculate single snp scores
IBS.snp = calcIBSMismatch(RGenosMat = RGenos, DGenosMat = DGenos)
Incomp.snp = calcIncompatibilityScore(RGenosMat = RGenos, DGenosMat = DGenos)
AMS.snp = calcAMS(RGenosMat = RGenos, DGenosMat = DGenos)
BinMM.snp = calcBinaryMM(RGenosMat = RGenos, DGenosMat = DGenos)
#calc gene based score for all SNPs
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = FALSE)
#also need to calculate gene based scores if not all SNPs are associated
IBS.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = IBS.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
Incomp.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = Incomp.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
AMS.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = AMS.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
BinMM.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = BinMM.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
#need to use TrueScore to pull gene based scores matrix for generating phenotypes
if(TrueScore == "IBS.gene"){
PhenoScore = IBS.gene.PercentOfSNPs
} else if(TrueScore == "Incomp.gene"){
PhenoScore = Incomp.gene.PercentOfSNPs
} else if(TrueScore == "AMS.gene"){
PhenoScore = AMS.gene.PercentOfSNPs
} else {
PhenoScore = BinMM.gene.PercentOfSNPs
}
#generate covariates
#for now, a single binary and a single continous covariate
CovData = GenCovData(SampleSize = numPairs, BinaryValues = 1, ContinuousValues = 1)
#need to define null Betas for phenotype generation
nSNP = ncol(RGenos) #this should be the number of SNPs
Betas = rep(0,nSNP) #generate null beta values
Betas = as.matrix(Betas, ncol = 1)
#generate phenotypes, both continuous and binary
#Based on single true score
CatPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = TRUE, YPrev = YPrev, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
ContPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = FALSE, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
AltScorePhenos = list(RGenos, IBS.gene, Incomp.gene, AMS.gene, BinMM.gene, CovData, CatPhenos, ContPhenos)
return(AltScorePhenos)
}
CalcAltPhenotypeData_RSNPs = function(chr, numPairs, simNum, YPrev, gene, path, ORSize, LowLD, percentAssoc, TrueScore){
#define effect based on OR size
if(ORSize == "small"){
effect = 0.14
} else if(ORSize == "medium"){
effect = 0.41
} else {
effect = 0.69
}
assocSNPs = DetermineAssocRSNPs(gene = gene, LowLD = LowLD, percentageAssoc = percentageAssoc)
#pull recipient and donor genotypes
RGenos = obtainRGenotypes(chr = chr, numSamples = numPairs, simNum = simNum, gene = gene, path = path)
DGenos = obtainDGenotypes(chr = chr, numSamples = numPairs, simNum = simNum, gene = gene, path = path)
#calculate single snp scores
IBS.snp = calcIBSMismatch(RGenosMat = RGenos, DGenosMat = DGenos)
Incomp.snp = calcIncompatibilityScore(RGenosMat = RGenos, DGenosMat = DGenos)
AMS.snp = calcAMS(RGenosMat = RGenos, DGenosMat = DGenos)
BinMM.snp = calcBinaryMM(RGenosMat = RGenos, DGenosMat = DGenos)
#calculate gene based scores
#check to see if weights are used for scores
if(weightedScores == FALSE){
#check to see if scores should be standardized (can't be weighted and standardized)
if(standardizeScores == FALSE){
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = FALSE)
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = TRUE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = TRUE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = TRUE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = TRUE, useWeights = FALSE)
}
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
}
#need to use TrueScore to pull gene based scores matrix for generating phenotypes
if(TrueScore == "IBS.gene"){
PhenoScore = IBS.gene
} else if(TrueScore == "Incomp.gene"){
PhenoScore = Incomp.gene
} else if(TrueScore == "AMS.gene"){
PhenoScore = AMS.gene
} else {
PhenoScore = BinMM.gene
}
#generate covariates
#for now, a single binary and a single continous covariate
CovData = GenCovData(SampleSize = numPairs, BinaryValues = 1, ContinuousValues = 1)
#need to define null Betas for phenotype generation
nSNP = ncol(RGenos) #this should be the number of SNPs
nullBetas = rep(0,nSNP) #generate null beta values
Betas = nullBetas
#set assoc Betas
#all betas have same effect for now
for(jj in assocSNPs){
Betas[jj] = effect
Betas = as.matrix(Betas, ncol = 1)
}
nullGamma = c(0)
#generate phenotypes, both continuous and binary
#Based on single true score
CatPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = TRUE, YPrev = YPrev, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = nullGamma)
ContPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = FALSE, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = nullGamma)
AltPhenos_RSNPs = list(RGenos, IBS.gene, Incomp.gene, AMS.gene, BinMM.gene, CovData, CatPhenos, ContPhenos)
}
RunSKATAnalysis = function(PhenoList, kernel, kernelWeights = c()){
RGenos = PhenoList[[1]]
IBS.gene = PhenoList[[2]]
Incomp.gene = PhenoList[[3]]
AMS.gene = PhenoList[[4]]
BinMM.gene = PhenoList[[5]]
CovData = PhenoList[[6]]
CatPhenos = PhenoList[[7]]
ContPhenos = PhenoList[[8]]
#Combine R geno and Scores into 4 separate datasets, size: N x (m+1)
RGeno.IBS.snp = cbind(RGenos, IBS.gene)
RGeno.Incomp.snp = cbind(RGenos, Incomp.gene)
RGeno.AMS.snp = cbind(RGenos, AMS.gene)
RGeno.BinMM.snp = cbind(RGenos, BinMM.gene)
## Generate SKAT Null Models
# formulas will be Y ~ covariates for continuous and dichotomous Y
obj_dich=SKAT_Null_Model(CatPhenos~CovData, out_type="D", Adjustment = FALSE)
obj_cont=SKAT_Null_Model(ContPhenos~CovData, out_type="C", Adjustment = FALSE)
#Perform SKAT for all 8 combos of score and cont/dich outcome
#unweighted SKAT
if(length(kernelWeights) == 0){
Stat_IBS_CatPhenos = SKAT(RGeno.IBS.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKAT(RGeno.Incomp.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKAT(RGeno.AMS.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKAT(RGeno.BinMM.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
} else {
Stat_IBS_CatPhenos = SKAT(RGeno.IBS.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKAT(RGeno.Incomp.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKAT(RGeno.AMS.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKAT(RGeno.BinMM.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
}
#fill columns in order
##Binary Phenos
##IBS Cat, Incomp Cat, AMS Cat, Bin MM Cat,
IBSCat = c(Stat_IBS_CatPhenos$Q, Stat_IBS_CatPhenos$p.value)
IncompCat = c(Stat_Incomp_CatPhenos$Q, Stat_Incomp_CatPhenos$p.value)
AMSCat = c(Stat_AMS_CatPhenos$Q, Stat_AMS_CatPhenos$p.value)
BinMMCat = c(Stat_BinMM_CatPhenos$Q, Stat_BinMM_CatPhenos$p.value)
##Cont Phenos
##IBS Cont, Incomp Cont, AMS Cont, Bin MM Cont
IBSCont = c(Stat_IBS_ContPhenos$Q, Stat_IBS_ContPhenos$p.value)
IncompCont = c(Stat_Incomp_ContPhenos$Q, Stat_Incomp_ContPhenos$p.value)
AMSCont = c(Stat_AMS_ContPhenos$Q, Stat_AMS_ContPhenos$p.value)
BinMMCont = c(Stat_BinMM_ContPhenos$Q, Stat_BinMM_ContPhenos$p.value)
statsAndPValsMat = cbind(IBSCat,IncompCat,AMSCat,BinMMCat,IBSCont,IncompCont,AMSCont,BinMMCont)
return(statsAndPValsMat)
}
RunSKATBinaryAnalysis = function(PhenoList, kernel, kernelWeights = c()){
RGenos = PhenoList[[1]]
IBS.gene = PhenoList[[2]]
Incomp.gene = PhenoList[[3]]
AMS.gene = PhenoList[[4]]
BinMM.gene = PhenoList[[5]]
CovData = PhenoList[[6]]
CatPhenos = PhenoList[[7]]
ContPhenos = PhenoList[[8]]
#Combine R geno and Scores into 4 separate datasets, size: N x (m+1)
RGeno.IBS.snp = cbind(RGenos, IBS.gene)
RGeno.Incomp.snp = cbind(RGenos, Incomp.gene)
RGeno.AMS.snp = cbind(RGenos, AMS.gene)
RGeno.BinMM.snp = cbind(RGenos, BinMM.gene)
## Generate SKAT Null Models
# formulas will be Y ~ covariates for continuous and dichotomous Y
obj_dich=SKAT_Null_Model(CatPhenos~CovData, out_type="D", Adjustment = FALSE)
obj_cont=SKAT_Null_Model(ContPhenos~CovData, out_type="C", Adjustment = FALSE)
#Perform SKAT for all 8 combos of score and cont/dich outcome
#unweighted SKAT
if(length(kernelWeights) == 0){
Stat_IBS_CatPhenos = SKATBinary(RGeno.IBS.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKATBinary(RGeno.Incomp.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKATBinary(RGeno.AMS.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKATBinary(RGeno.BinMM.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
} else {
Stat_IBS_CatPhenos = SKATBinary(RGeno.IBS.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKATBinary(RGeno.Incomp.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKATBinary(RGeno.AMS.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKATBinary(RGeno.BinMM.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
}
#fill columns in order
##Binary Phenos
##IBS Cat, Incomp Cat, AMS Cat, Bin MM Cat,
IBSCat = c(Stat_IBS_CatPhenos$Q, Stat_IBS_CatPhenos$p.value)
IncompCat = c(Stat_Incomp_CatPhenos$Q, Stat_Incomp_CatPhenos$p.value)
AMSCat = c(Stat_AMS_CatPhenos$Q, Stat_AMS_CatPhenos$p.value)
BinMMCat = c(Stat_BinMM_CatPhenos$Q, Stat_BinMM_CatPhenos$p.value)
##Cont Phenos
##IBS Cont, Incomp Cont, AMS Cont, Bin MM Cont
IBSCont = c(Stat_IBS_ContPhenos$Q, Stat_IBS_ContPhenos$p.value)
IncompCont = c(Stat_Incomp_ContPhenos$Q, Stat_Incomp_ContPhenos$p.value)
AMSCont = c(Stat_AMS_ContPhenos$Q, Stat_AMS_ContPhenos$p.value)
BinMMCont = c(Stat_BinMM_ContPhenos$Q, Stat_BinMM_ContPhenos$p.value)
statsAndPValsMat = cbind(IBSCat,IncompCat,AMSCat,BinMMCat,IBSCont,IncompCont,AMSCont,BinMMCont)
return(statsAndPValsMat)
}
|
5ebce29b24c6170f7509941d0ddf39ba1c010c50
|
4504e2c8fc4aefdaccaac740a9b82f29cd49e817
|
/man/vanke1127.Rd
|
f26a4e9df448cab61f9c64c7fdb980db3770c3db
|
[] |
no_license
|
dgrtwo/animation
|
37daaf114583e548c2e569fa1f3b12183b12c15f
|
c7a3155a3544d80b94b5378f6ace8811774632de
|
refs/heads/master
| 2021-01-09T05:25:47.116366
| 2016-02-14T02:57:17
| 2016-02-14T02:57:17
| 51,805,168
| 2
| 1
| null | 2016-02-16T03:29:58
| 2016-02-16T03:29:56
|
R
|
UTF-8
|
R
| false
| false
| 1,539
|
rd
|
vanke1127.Rd
|
% Please edit documentation in R/animation-package.R
\docType{data}
\name{vanke1127}
\alias{vanke1127}
\title{Stock prices of Vanke Co., Ltd on 2009/11/27}
\format{A data frame with 2831 observations on the following 2 variables.
\describe{ \item{time}{POSIXt: the time corresponding to stock prices}
\item{price}{a numeric vector: stock prices} }}
\source{
This data can be obtained from most stock websites.
}
\description{
This is a sample of stock prices of the Vanke Co., Ltd on 2009/11/27.
}
\examples{
tab.price = table(vanke1127$price)
plot(as.numeric(names(tab.price)), as.numeric(tab.price), type = 'h',
xlab = 'price', ylab = 'frequency')
oopt = ani.options(interval = 0.5, loop = FALSE, title = 'Stock price of Vanke')
## a series of HTML animations with different time spans
saveHTML({
price.ani(vanke1127$price, vanke1127$time, lwd = 2)
}, img.name = 'vanke_a', description = 'Prices changing along with time interval 15 min',
htmlfile = "vanke1127_1.html")
saveHTML({
price.ani(vanke1127$price, vanke1127$time, span = 30 * 60, lwd = 3)
}, img.name = 'vanke_b', description = 'Prices changing along with time interval 30 min',
htmlfile = "vanke1127_2.html")
saveHTML({
price.ani(vanke1127$price, vanke1127$time, span = 5 * 60, lwd = 2)
}, img.name = 'vanke_c', description = 'Prices changing along with time interval 5 min',
htmlfile = "vanke1127_3.html")
## GIF animation
saveGIF(price.ani(vanke1127$price, vanke1127$time, lwd = 2),
movie.name = 'price.gif', loop = 1)
ani.options(oopt)
}
|
6040a2643dd02d4a92658559d407a7d687ec73a5
|
9f72ac41c3e173a0873bfd85b396a4a3e64fc975
|
/R_Scripts/Investigation_5_8_regression.R
|
9a88495fa8c32039f8f45836baafc90f010090b6
|
[] |
no_license
|
gmtanner-cord/MATH205
|
278735ab544989b101a2a60d643b0db17fcb828b
|
194faf9a3903b0edc823cd97c22c4a5a743c43d3
|
refs/heads/master
| 2021-06-26T00:25:32.832588
| 2021-04-07T20:27:37
| 2021-04-07T20:27:37
| 224,021,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 619
|
r
|
Investigation_5_8_regression.R
|
# Investigation 5.8
# Load data
HeightFoot = read.delim("http://www.rossmanchance.com/iscam2/data/HeightFoot.txt")
# Plot data
plot(HeightFoot$height~HeightFoot$foot)
# Correlation
cor(HeightFoot$height,HeightFoot$foot) # r
cor(HeightFoot$height,HeightFoot$foot)^2 # r-squared
# Regression Model
?lm
lm(HeightFoot$height~HeightFoot$foot)
summary(lm(HeightFoot$height~HeightFoot$foot))
sum(residuals(lm(HeightFoot$height~HeightFoot$foot))^2) # gives you the SSE
# Add regression line to plot. Must first have plot(HeightFoot$height~HeightFoot$foot)
abline(lm(HeightFoot$height~HeightFoot$foot))
|
2e6b7707423a41b611b6edaff78f1e6ab621e70c
|
420cac816c739b8f6a3581c1628d706f7d398beb
|
/R/RefSigmaW.r
|
af83d21d8386e8a69f13c1b1bdbed79a159e4f8e
|
[] |
no_license
|
cran/RobustAFT
|
d80a89efb8ffcc80b604d5959893210aab0ae31b
|
357b7400ae0a4d0be157b6a46970eb04d8b9ea51
|
refs/heads/master
| 2023-08-31T10:42:53.415730
| 2023-08-21T16:40:02
| 2023-08-21T17:30:23
| 17,693,388
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
RefSigmaW.r
|
RefSigmaW <- function(sigma,Beta,X,y,delta,tol=0.0001,maxit=100,nitmon)
{
# Fixed point algorithm for scale
nit <- 1
repeat{
sigmao <- sigma
sigma <- ( RefAve2W(sigmao,Beta,X,y,delta)*sigmao^2/0.5 )^0.5; d <- sigma-sigmao
if (nit==maxit | abs(d)<tol) break
if(nitmon) cat(nit,sigma,"\n")
nit <- nit+1}
list(sigma=sigma,nit=nit)}
|
6b9d8c49c6ca17ead22076ff55d05ffac0432e12
|
2ca2e5579439d507467b3e45f8538148f9676b86
|
/getclean_data/course_project/run_analysis.R
|
7853b8db2bcd3fcd598e0bb464777860bfd43ce2
|
[] |
no_license
|
carterfawson/datasciencecoursera
|
c00a5d85a50b219b884e915e05d65cc765ab0906
|
b383e360085281a0d5e10b88ae2017a1d9a06682
|
refs/heads/master
| 2021-01-17T09:29:36.094144
| 2016-04-18T04:39:34
| 2016-04-18T04:39:34
| 38,410,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,025
|
r
|
run_analysis.R
|
setwd("~/datasciencecoursera/getclean_data/course_project/")
library(dplyr)
#Load in activities
activities <- read.table("UCI HAR Dataset/activity_labels.txt")
#Load in features
features_data <- read.table("UCI HAR Dataset/features.txt")
# Grab desired features
features <- grep('mean[^A-Z]+|.*std.*', features_data$V2)
features.names <- features_data[features,2]
# Get rid of all hyphens and parentheses in variable names in the variable names
features.names = gsub('-mean', 'Mean', features.names)
features.names = gsub('-std', 'Std', features.names)
features.names <- gsub('[-()]', '', features.names)
#Load in the activities
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainActivities_df <- data.frame(matrix(unlist(trainActivities), byrow = TRUE, nrow = 7352))
colnames(trainActivities_df) <- c('Activities')
#Now I am going to convert the numbers into activity names
activity_sub <- function(activities){
activities$Activities <- gsub(1, "WALKING", activities$Activities)
activities$Activities <- gsub(2, "WALKING_UPSTAIRS", activities$Activities)
activities$Activities <- gsub(3, "WALKING_DOWNSTAIRS", activities$Activities)
activities$Activities <- gsub(4, "SITTING", activities$Activities)
activities$Activities <- gsub(5, "STANDING", activities$Activities)
activities$Activities <- gsub(6, "LAYING", activities$Activities)
return(activities)
}
trainActivities_df <- activity_sub(trainActivities_df)
#load in the subjects
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
trainSubjects_df <- data.frame(matrix(unlist(trainSubjects), byrow = TRUE, nrow = 7352))
colnames(trainSubjects_df) <- c('Subjects')
#Load in the observations
train <- read.table("UCI HAR Dataset/train/X_train.txt")[features]
train_df <- data.frame(matrix(unlist(train), byrow = TRUE, nrow = 7352))
colnames(train_df) <- features.names
#Connect all the training dfs into one CLEANED training df
training_clean <- cbind(trainSubjects_df, trainActivities_df, train_df)
#Now do the same things for the test set
#Load in the activities
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testActivities_df <- data.frame(matrix(unlist(testActivities), byrow = TRUE, nrow = 2947))
colnames(testActivities_df) <- c('Activities')
#Now I am going to convert the numbers into activity names
activity_sub <- function(activities){
activities$Activities <- gsub(1, "WALKING", activities$Activities)
activities$Activities <- gsub(2, "WALKING_UPSTAIRS", activities$Activities)
activities$Activities <- gsub(3, "WALKING_DOWNSTAIRS", activities$Activities)
activities$Activities <- gsub(4, "SITTING", activities$Activities)
activities$Activities <- gsub(5, "STANDING", activities$Activities)
activities$Activities <- gsub(6, "LAYING", activities$Activities)
return(activities)
}
testActivities_df <- activity_sub(testActivities_df)
#load in the subjects
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
testSubjects_df <- data.frame(matrix(unlist(testSubjects), byrow = TRUE, nrow = 2947))
colnames(testSubjects_df) <- c('Subjects')
#Load in the observations
test <- read.table("UCI HAR Dataset/test/X_test.txt")[features]
test_df <- data.frame(matrix(unlist(test), byrow = TRUE, nrow = 2947))
colnames(test_df) <- features.names
#Cbind them all into one tidy test_df
test_clean <- cbind(testSubjects_df, testActivities_df, test_df)
#Now finally I just need to rbind the two dataframes and get my final dataset!
tidy_data <- rbind(training_clean, test_clean)
tidy_data <- tbl_df(tidy_data)
#I also need to create now a new summarized tidy data frame.
summary_data <- group_by(tidy_data, Subjects, Activities)
summary_data <- summary_data %>% summarise_each(funs(mean))
#These csv files were just for my own use:
#write.csv(tidy_data, "full_tidyData.csv")
#write.csv(summary_data, "tidyData.csv")
#Now I write out the data to a text file:
write.table(summary_data, "cfaw_tidydata.txt", row.names = FALSE)
|
62ebd867ca205c32a3b074493e14520e201cf56c
|
46e23dffc271469c88b3fc72c4731852a6ec46ec
|
/man/Rdenominator.Rd
|
195da20a8464a14169bbbaf99ac652627d1d0581
|
[] |
no_license
|
cran/tolBasis
|
11cac2f8dbef7e3613467d73d3c100f264b925eb
|
3c8ecd25cb93d05be9cbda6dd23cfaee59ea3556
|
refs/heads/master
| 2021-01-10T13:15:09.394899
| 2015-11-05T13:38:39
| 2015-11-05T13:38:39
| 48,090,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
rd
|
Rdenominator.Rd
|
\name{Rdenominator}
\alias{Rdenominator}
\title{Ratio Denominator}
\description{
Denominator of a Ratio object
}
\usage{
Rdenominator(r)
}
\arguments{
\item{r}{
a Ratio object
}
}
\value{
Returns the Polyn object corresponding to the denominator of the Ratio.
}
\seealso{
See also the function \code{\link{Rnumerator}}
}
\examples{
# Obtain the denominator of a Ratio object
ratio <- Ratio(1, 1+B) + Ratio(1, 1-B)
Rdenominator(ratio)
}
|
46a161dc77068fd963cc525b1eec15cc0b660b88
|
7bd83dcebeeb58ef6df4d8e99d67f0bf20d1a980
|
/R/nexoc-sample.R
|
6ddc5a750aac87508d3a53eb565ccb45e0744186
|
[
"MIT"
] |
permissive
|
biokcb/coxen
|
b0558be6383712cda0f9e9ca148d45d3a911a535
|
70a557df97fef5d75ac19c1916ad6603afe4f08f
|
refs/heads/master
| 2020-03-31T06:39:03.699867
| 2019-02-16T23:20:12
| 2019-02-16T23:20:12
| 151,989,904
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,304
|
r
|
nexoc-sample.R
|
### Example script on how to use the NEXOC function... which I guess is really just a coxen pipeline function, but whatever
#set working directory
setwd("/Users/Kristen/Desktop/COXEN")
#input directory where the coxen set is located
input.dir <- "./Laval"
#input directory where the independent test set is located
indep.dir <- "./BladderFive"
#load necessary librarys
library(affy)
library(frma)
library(hgu133plus2frmavecs) # for processing expression data
library(MASS)
library(MiPP)
library(samr)
library(qvalue) #t-test
library(SuperLearner)
library(randomForest) #for deg selection, model building
#source nexoc.R script, which contains copies of all the functions we'll need that aren't in packages
source("./nexoc.R")
#load up gene expression data for NCI60
load("./NCI60 Batch Effects/NCI60.fRMA.collapsed.rda")
genematrix <- frozen.rma.collapse
rm(frozen.rma.collapse)
# read in .CEL files or processed expression matrix
filenames <- list.files(path=input.dir, pattern=".CEL")
eset <- ReadAffy(filenames=paste(input.dir,'/',filenames,sep=''), phenoData=NULL)
slotNames(eset)
dates <- eset@protocolData
slotNames(dates)
date.info <- dates@data
date.info # all were scanned on the same date.
coxenmat <- frma(eset)
coxenmat <- exprs(coxenmat)
rm(eset)
# alternatively, load the processed laval set
load("./Laval Batch/Laval.FrozenRMA.rda")
coxenmat <- exprs(laval.frozen)
rm(laval.frozen)
# read in .CEL files or processed expression matrix
filenames <- list.files(path=indep.dir, pattern=".CEL")
eset <- ReadAffy(filenames=paste(indep.dir,'/',filenames,sep=''), phenoData=NULL)
slotNames(eset)
dates <- eset@protocolData
slotNames(dates)
date.info <- dates@data
date.info # all were scanned on the same date.
indepmat <- frma(eset)
indepmat <- exprs(indepmat)
rm(eset)
# read in drug data - here we are just loading the FDA approved list I had previously saved.
load("./meanac_REG0.1alldrugs.rda") # the object here is named "meanac"
identical(colnames(meanac), colnames(genematrix))
# for the 76 FDA approved drug list, I also have group sizes that were selected
# based on maximizing DEGs, so we'll use that too.
num.group <- read.csv("./NCI60frma-FDA76groups.csv", header=TRUE, row.names=1)
num.group <- num.group[order(num.group[,1]),]
identical(as.character(num.group[,1]), rownames(meanac))
# regression coefficients calculated originally
reg.coef <- read.csv("./Regression.Coefficients.Cutoff_0.1.csv", header=TRUE, row.names=1)
reg.coef <- reg.coef[order(reg.coef[,1]),]
identical(as.character(reg.coef[,1]), rownames(meanac))
#randomForests using the ivDrug method requires tissue type information
info <- read.csv("./NCI60 Batch Effects/filename.match.csv", header=TRUE, row.names=1)
tissue <- info[match(colnames(meanac), info[,4]),c(4,1)]
rownames(tissue) <- tissue[,1]
#### applying the nexoc function. ####
#create empty master matrix to store predictions and other information
master.pred <- matrix(data=NA, nrow=nrow(meanac), ncol=(ncol(coxenmat) + ncol(indepmat) + 10))
colnames(master.pred) <- c("NSC","Reg.Coef","Average.Score","Num.GeneModels", "Average.NumGenes","T.Test", "SAM","SAM-FDR", "CorTest", "COXEN", colnames(coxenmat), colnames(indepmat))
master.pred[,1] <- rownames(meanac)
master.pred[,2] <- reg.coef[,2]
# only indep set predictions
master.pred <- matrix(data=NA, nrow=nrow(meanac), ncol=(ncol(indepmat) + 10))
colnames(master.pred) <- c("NSC","Reg.Coef","Average.Score","Num.GeneModels", "Average.NumGenes","T.Test", "SAM","SAM-FDR", "CorTest", "COXEN", colnames(indepmat))
master.pred[,1] <- rownames(meanac)
master.pred[,2] <- reg.coef[,2]
for (k in 1:nrow(meanac)){
master.pred[k,3:ncol(master.pred)] <- nexoc(genematrix, coxenmat, indepmat, scale=TRUE, pred.all=TRUE, rownames(meanac[k,]), meanac[k,], rev=TRUE, tissue, num=num.group[k,2:3], deg=c("t.test"), min.gene=10, max.gene=1000, method=c("randomForests"))
}
write.csv(master.pred, file="./BladderFDAPredictions-mipp.csv")
#### group extremes
for (i in 1:nrow(meanac)){
extremes <- select.extremes(genematrix, order(meanac[i,]), 0.1, min.num.sensitive=8, min.num.resistant=8, max.num.resistant=15, max.num.sensitive=15)
num.group[i,2] <- length(extremes$drug.sensitive)
num.group[i,3] <- length(extremes$drug.resistant)
num.group[i,1] <- rownames(meanac[i,])
}
|
d73e43c42146e30a2b2429f0d2b257ef437dd26a
|
95aaf5a86ead034a389aaa457643556424ff6173
|
/R/incrementor.R
|
52d533a74296977adc72c61cca322f68804bdbae
|
[
"MIT"
] |
permissive
|
schloerke/sortableR
|
5d5bd176f2d716ca0d70f89da03a24aad7b70cdd
|
f9e47b90d7052906669045e14f6c994b360add3e
|
refs/heads/master
| 2020-05-31T11:10:03.904115
| 2019-05-18T05:23:24
| 2019-05-18T05:23:24
| 190,256,120
| 0
| 0
| null | 2019-06-04T18:17:53
| 2019-06-04T18:17:52
| null |
UTF-8
|
R
| false
| false
| 183
|
r
|
incrementor.R
|
incrementor <- function(prefix = "increment_"){
i <- 0
function(){
i <<- i + 1
paste0(prefix, i)
}
}
incrementSortableItemlist <- incrementor("sortable_itemlist_id_")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.