blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74b0deb7fbefe2a117d71af2d1d1ed641f7e6ab7
|
ccf99da96d03f53555ea6f8db95632fb0882716a
|
/tennis.R
|
fc1e4345ae29671b7a7570db1f4a62b5a9893b6a
|
[] |
no_license
|
Cat-n-Dog/tidy_tuesday
|
ad3eb67d84be3e9c4f639a9e2300cbd768167339
|
0a46f0bd0297e25bfa644a8e701d6f14fa75b020
|
refs/heads/master
| 2020-05-15T14:08:32.068438
| 2019-04-19T21:07:20
| 2019-04-19T21:07:20
| 182,325,761
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,517
|
r
|
tennis.R
|
library(tidyverse)
library(lubridate)
library(ggthemes)
library(cowplot)
library(gghighlight)
player_dob2 <- player_dob %>%
filter(!is.na(grand_slam)) %>%
mutate(calc_age = (date_of_first_title - date_of_birth))
player_dob2 %>% ggplot(aes(x = age)) +
geom_dotplot(binwidth = 90) +
theme_minimal()
player_dob2 %>% ggplot(aes(x = date_of_first_title, y = age)) +
geom_point() +
theme_minimal()
grand_slams %>%
# filter(rolling_win_count == 1) %>%
group_by(gender, year) %>%
summarise(new_champs = sum(rolling_win_count <= 4)) %>%
ggplot(aes(x = year, y = new_champs)) +
geom_line() +
facet_wrap(~gender) +
theme_bw()
grand_slams %>%
filter(gender == "Female") %>%
ggplot() +
geom_line(aes(x = year, y = rolling_win_count, group = name)) +
geom_point(aes(x = year, y = rolling_win_count, group = name), size = 0.5) +
theme_minimal() +
ggtitle("Number of Grand Slam Titles by Female Player") +
xlab("") +
ylim(0, 25) + ylab("") +
gghighlight::gghighlight(max(rolling_win_count) >= 10, use_group_by = TRUE) +
ggsave("female_slams.png")
grand_slams %>%
filter(gender == "Male") %>%
ggplot() +
geom_line(aes(x = year, y = rolling_win_count, group = name)) +
geom_point(aes(x = year, y = rolling_win_count, group = name), size = 0.5) +
theme_minimal() +
ggtitle("Number of Grand Slam Titles by Male Player") +
xlab("") +
ylim(0, 25) + ylab("") +
gghighlight::gghighlight(max(rolling_win_count) >= 10, use_group_by = TRUE) +
ggsave("male_slams.png")
|
fd132041cf1277be6dbce427e7a23908f17df7bc
|
0dfc9f092963a7c3252578c4a9dd9a326f6d5f7f
|
/scripts/pattern_22.R
|
3f004ee3723b1fae0891995e001279e03b056fb7
|
[] |
no_license
|
crouzet-o/patterns
|
ee98b4fde517ddc62031d00e81b357d2d8c6722d
|
8f6c03ee33a1f2ee5d3f30e0dc7391c9b6376a43
|
refs/heads/main
| 2023-02-16T04:39:54.380756
| 2021-01-02T06:35:19
| 2021-01-02T06:35:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
r
|
pattern_22.R
|
library(dplyr)
library(ggplot2)
library(ggforce)
apply_pattern_theme = function(bg_hex, caption_hex){
theme(
plot.background = element_rect(fill = bg_hex),
panel.background = element_rect(fill = bg_hex),
panel.grid = element_blank(),
plot.caption = element_text(family = "Open Sans",
size = 6,
color = caption_hex),
legend.position = "none",
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()
)
}
num_lines = 100
circle = tibble(
len = seq(0, 2*pi, length.out = num_lines),
x = sin(len),
y = cos(len))
circle_top = circle %>%
filter(y >= 0)
circle_bottom = circle %>%
filter(y < 0) %>%
mutate(x = x + 1)
lines_top = circle %>%
filter(y >= 0.055) %>%
mutate(yend = y,
y = 0.055,
x = x + 1,
xend = x)
lines_bottom = circle %>%
filter(y <= -0.03) %>%
mutate(yend = y,
y = -.03,
xend = x)
ggplot() +
geom_segment(data = lines_top,
aes(x = x, y = y,
xend = xend, yend = yend),
size = 0.7,
lineend = "round",
color = "white") +
geom_segment(data = lines_bottom,
aes(x = x, y = y,
xend = xend, yend = yend),
size = 0.7,
lineend = "round",
color = "white") +
geom_shape(data = circle_top,
aes(x, y),
fill = "#cbac9a") +
geom_shape(data = circle_bottom,
aes(x, y),
fill = "#cbac9a") +
coord_fixed() +
labs(caption = "Ijeamaka Anyene | @ijeamaka_a") +
apply_pattern_theme("#3c5551", "white")
ggsave(
"pattern_22.png",
plot = last_plot(),
device = "png",
path = here::here("outputs"),
width = 7,
height = 5
)
|
d362a908b2f879147c933c67d3b43b9a17ae0404
|
1c01ed7a5e79c5e281c0ede3406f702f79766882
|
/man/empty.frame.Rd
|
63aeace58d7c8210a3b19115c2bef7bb8625bdeb
|
[] |
no_license
|
christiantillich/AnaliTools
|
19e738e4084be1678ff7aeda45aa9f146de5ac1d
|
cab56ef7729f1d9692af5241ac5eca60060c3045
|
refs/heads/master
| 2020-04-06T05:12:48.950283
| 2019-02-25T22:09:03
| 2019-02-25T22:09:03
| 47,645,937
| 0
| 1
| null | 2019-02-25T22:09:04
| 2015-12-08T19:53:20
|
R
|
UTF-8
|
R
| false
| true
| 382
|
rd
|
empty.frame.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.tools.R
\name{empty.frame}
\alias{empty.frame}
\title{empty.frame}
\usage{
empty.frame(col.names)
}
\arguments{
\item{col.names}{- User input column names.}
}
\value{
An empty data frame object with the specified column names
}
\description{
Create an empty data frame. Surprisingly tricky to do.
}
|
0204e400e0b01591f56d8b9755b73ea18f995302
|
e2d06410af5af94eff00bffd230fb5c689daaba1
|
/man/gtm_workspace_id.Rd
|
6379b48ef818232cc2638e14fb1aa2a325f4f986
|
[] |
no_license
|
MarkEdmondson1234/gtmR
|
b99855895768c2b7dd3eee33881a89adcdc20770
|
644684285808ecf02a59fa31830802d4b1435d28
|
refs/heads/master
| 2021-01-10T16:22:00.092511
| 2020-12-05T21:32:24
| 2020-12-05T21:32:24
| 55,138,326
| 6
| 3
| null | 2020-12-05T21:32:25
| 2016-03-31T09:37:49
|
R
|
UTF-8
|
R
| false
| true
| 373
|
rd
|
gtm_workspace_id.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workspace.R
\name{gtm_workspace_id}
\alias{gtm_workspace_id}
\title{GTM current workspace ID}
\usage{
gtm_workspace_id(accountId, containerId)
}
\arguments{
\item{accountId}{Add your GTM account ID}
\item{containerId}{Add your GTM container ID}
}
\description{
Downloads current workspace ID
}
|
722df2eef24ddc0e4a0e7766b64f4656e58e9dc8
|
2693a682078fe71bed78997f82b71b82c0abd0ad
|
/base/logger/man/logger.getLevelNumber.Rd
|
e3f4b0a3a9d77e6fcd5c5732d2744f398bfd061d
|
[
"LicenseRef-scancode-unknown-license-reference",
"NCSA"
] |
permissive
|
ashiklom/pecan
|
642a122873c9bca4f7ac60f6f260f490f15692e4
|
52bb31866810e2c93ddf540f2065f41ec008627b
|
refs/heads/develop
| 2023-04-01T11:39:16.235662
| 2021-05-24T22:36:04
| 2021-05-24T22:36:04
| 28,980,311
| 3
| 0
|
NOASSERTION
| 2023-04-01T20:08:15
| 2015-01-08T18:44:03
|
R
|
UTF-8
|
R
| false
| true
| 438
|
rd
|
logger.getLevelNumber.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logger.R
\name{logger.getLevelNumber}
\alias{logger.getLevelNumber}
\title{Returns numeric value for string}
\usage{
logger.getLevelNumber(level)
}
\value{
level the level of the message
}
\description{
Given the string representation this will return the numeric value
ALL = 0
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
ALL = 99
}
\author{
Rob Kooper
}
|
aa4115734c81fc4573c5c66c40455d808d5b93de
|
825a5e6e1d8952ea84e54632b9e88d41e521963d
|
/Clusterization.R
|
795f50c6676ebdb6063eea5e77817b7015e0f041
|
[] |
no_license
|
Alexadol/Salmonella_novobiocin
|
f55feeebb29867f19d53acfa9c2d47723baddc0e
|
587e06ee9232bc149016747be07e8ee24e5d2f8c
|
refs/heads/master
| 2022-11-12T04:02:55.342378
| 2020-06-19T18:28:50
| 2020-06-19T18:28:50
| 267,666,046
| 0
| 2
| null | 2020-06-19T18:25:22
| 2020-05-28T18:29:14
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 4,091
|
r
|
Clusterization.R
|
#Loading required packages
library(edgeR)
#Set working directory
setwd('/home/alexadol/TKA3/')
#Open tables with raw counts
res_with <- read.csv('counts_res_with.csv',sep=' ') #with antibiotic
res_wo <- read.csv('counts_res_without.csv',sep=' ') #without antibiotic
#binding two tables in one
res_full <- cbind(res_wo,res_with)
#making matrix from data frame
my_y <- as.matrix((res_full))
#Creating DGEList object (EdgeR), setting group names for samples
#wo - without antibiotic with - with antibiotic 0,10,20,60 - time after treatment
my_y <- DGEList(counts = my_y, group=c(rep('wo_0min',3),rep('wo_10min',3),rep('wo_20min',3),rep('wo_60min',3),rep('with_100_60min',3),rep('with_500_10min',3),rep('with_500_20min',3),rep('with_500_60min',3)))
#Normalization
my_y <- calcNormFactors(my_y)
#Getting counts per million instead of raw counts
my_z <- cpm(my_y, normalized.lib.size=TRUE)
#transpose table
scaledata <- t(my_y)
#select rows without NA
scaledata <- scaledata[complete.cases(scaledata),]
scaledata <- as.matrix(scaledata)
#Get mean of 3 samples in all condition (we have 3 repeats for each condition)
scaledata_mean <- data.frame(ID=my_y[,0], wo_0min_mean=rowMeans(my_y[,1:3]),wo_10min_mean=rowMeans(my_y[,4:6]), wo_20min_mean=rowMeans(my_y[,7:9]),wo_60min_mean=rowMeans(my_y[,10:12]),with_100_60min_mean=rowMeans(my_y[,13:15]),with_500_10_mean=rowMeans(my_y[,16:18]),with_500_20min_mean=rowMeans(my_y[,19:21]),with_500_60min_mean=rowMeans(my_y[,22:24]))
#Identify optimal number of clusters (when K-means is used for clusterization) using Gap-statistic
library(cluster)
set.seed(13)
gap <- clusGap(scaledata_mean, kmeans, 20, B = 100, verbose = interactive())
#Visualize results
plot(gap, main = "Gap statistic")
abline(v=which.max(gap$Tab[,3]), lty = 2)
##Identify optimal number of clusters using Within groups sum of squares
wss <- (nrow(scaledata_mean)-1)*sum(apply(scaledata_mean,2,var))
for (i in 2:20) wss[i] <- sum(kmeans(scaledata_mean,
centers=i)$withinss)
#Visualize results
plot(1:20, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
#Implement clusterization using kmeans function (In our case optimal numbers of clusters was determined as 4)
set.seed(20)
kClust <- kmeans(scaledata_mean, centers=4, nstart = 1000, iter.max = 20)
kClusters <- kClust$cluster
#Prepare results for visualization
clust.centroid = function(i, dat, clusters) {
ind = (clusters == i)
colMeans(dat[ind,])
}
kClustcentroids <- sapply(levels(factor(kClusters)), clust.centroid, scaledata_mean, kClusters)
kClustcentroids_with <- kClustcentroids[c(1,6,7,8,5),]
#Change names of points in which genes within clusters have similar expression dinamics
rownames(kClustcentroids_with) <- c ('0 minutes','10 minutes novobiocin 500 mkg','20 minutes novobiocin 500 mkg','60 minutes novobiocin 500 mkg','60 minutes novobiocin 100 mkg')
library(ggplot2)
library(reshape)
#get in long form for plotting
Kmolten <- melt(kClustcentroids_with)
colnames(Kmolten) <- c('sample','cluster','value')
#Creating plot to assess dynamics of genes within clusters
p1 <- ggplot(Kmolten, aes(x=sample,y=value, group=cluster, colour=as.factor(cluster))) +
geom_point() +
geom_line() +
xlab("Time") +
ylab("Expression") +
labs(title= "Cluster Expression by Time with novobiocin treatment",color = "Cluster")+
scale_color_manual(values=c('#993404','#E69F00', '#56B4E9','#31a354'))+
theme_classic(base_size = 15)+theme(axis.text.x = element_text(angle = 60, hjust = 1))
p1
#Checking if clusters have low correlation level (Less number of clusters should be used if some of them have high correlation values)
cor(kClustcentroids)
#Extract genes from clusters
K1 <- (scaledata_mean[kClusters==4,])
#Get score of genes in cluster (how far it from core, values near 1 identify genes which have dinamics near to core of cluster)
score <- apply(K1, 1, corscore)
score_names <- names(score)
score_names <- as.data.frame(score_names)
#save genes from cluster to table
write.table(score_names,'cluster_up_in_60_wo_ab_names.txt')
|
57e8cce6345f6c4644525a436872ff382f32c4b7
|
800ad0439a48efc7da01488835813bbc2328f42f
|
/StataR Recitations/Week 1 EDA and grepl.R
|
710bb1c4d4eb787a55eba5ec89280299fc80cffe
|
[] |
no_license
|
christianmconroy/Teaching-Materials
|
d4bd969e784df816e1e538015ccb7bb84cf08987
|
2a2a3e13f24e2f9f86b6c7473717aba68d2c4270
|
refs/heads/master
| 2022-06-02T08:07:46.689712
| 2022-05-18T14:20:01
| 2022-05-18T14:20:01
| 181,171,256
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,255
|
r
|
Week 1 EDA and grepl.R
|
# In class activity 1 #
mydata<- read.csv("/Users/christianmconroy/Desktop/Documents/GeorgetownMPPMSFS/McCourtMPP/Semester3Fall2017MPP/StataRecitations/RExportcsv.csv", sep=",")
attach(mydata)
# In the case of R, the "browse" and "list" functions are essentially the same
mydata
mydata[order(mydata$price),]
nrow(mydata)
str(mydata)
str(mydata$mpg); str(mydata$make)
str(mydata$Weight)
# Just Like Stata, R is also case sensitive! This is why the above returns a null
str(mydata$Wei)
# Unlike Stata, R does not accept abbreviations!
# There is no wildcard operator in R. The closest equivalent is grep() or grep1(). See below for an example of how to identify variables starting with m. Can then create vector or subset and analyze from there.
grepl("^[m_]", colnames(mydata))
summary(mydata$mpg)
summary(mydata$make)
# In R, the summary provides a frequency count in the case of strings.
# In class activity 2 #
mydata<- read.csv("/Users/christianmconroy/Desktop/Documents/GeorgetownMPPMSFS/McCourtMPP/Semester3Fall2017MPP/StataRecitations/RExportcsv.csv", sep=",")
attach(mydata)
str(mydata)
d <- data.frame(price,mpg, weight, length)
summary(d)
mydata[order(mydata$mpg),]
41-12
# Difference is 29
# To summarize price, mpg, weight, and length for cars costing less than $4000.
summary(mydata[mydata$price < 4000, c("price", "mpg", "weight", "length")], basic = T)
# To summarize price, mpg, weight, and length for cars costing less than $4000 that are NOT foreign
summary(mydata[mydata$price < 4000 & mydata$foreign == 'Domestic', c("price", "mpg", "weight", "length")], basic = T)
summary(mydata$price)
mydata$price
make[13]
# Look at the summary, find the row in the matrix, and search make for the corresponding row. The make is Cad. Seville and the price is $15906.
# After Class Review Exercies
mydata<- read.csv("/Users/christianmconroy/Desktop/Documents/GeorgetownMPPMSFS/McCourtMPP/Semester3Fall2017MPP/StataRecitations/lifexpcsv.csv", sep=",")
attach(mydata)
str(mydata)
# Variables: 6; Strings: 2; Numeric: 4; Observations: 68
sum(is.na(popgrowth))
sum(is.na(lexp))
sum(is.na(gnppc))
sum(is.na(safewater))
# 5 missing values for gnppc and 28 missing values for safewater.
summary(lexp)
79-54
#Difference is 25
|
1581b38982dfa82bc770679ce21b167de9e5d5bc
|
4362529de57d61cbc52a9007929ed3e9ca55ead4
|
/plot3.R
|
c622bd93fa9f16f46dc3fae03c1e4509769c6c53
|
[] |
no_license
|
alokjadhav/ExData_Plotting1
|
de665e8c583c06d7f29623f5c8712f19e8bf809e
|
0d1107500f3f67e1883326e131bd4d142a02826e
|
refs/heads/master
| 2021-01-16T17:53:57.077430
| 2015-04-12T12:02:14
| 2015-04-12T12:02:14
| 33,803,165
| 0
| 0
| null | 2015-04-12T04:36:46
| 2015-04-12T04:36:46
| null |
UTF-8
|
R
| false
| false
| 863
|
r
|
plot3.R
|
dir <- "S:/Online_courses/DataScience_Specialization/4_Exploratory_Data_Analysis/proj1"
setwd(dir)
#data
filename <- "household_power_consumption.txt"
data <- read.csv(filename,sep=";",stringsAsFactors=FALSE,na.strings="?")
data$Time <- strptime(paste(data$Date,data$Time),format="%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
from.date <- as.Date("2007-02-01")
to.date <- as.Date("2007-02-02")
i <- data$Date == from.date | data$Date == to.date
data <- data[i,]
#plot3.R
output <- "plot3.png"
with(data, plot(Time, Sub_metering_1 , type="l", ylab="Enery sub metering"))
with(data, lines(Time, Sub_metering_2, col="red"))
with(data, lines(Time, Sub_metering_3, col="blue"))
legend("topright",lty=c(1,1),col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.copy(png, output)
dev.off()
rm(data)
|
2bc65d5b3498f2801db094e3c04a6d9f14305a2b
|
cd91da282206d5b487e5832cc266a543d59d007a
|
/codes/ARACNeReady.R
|
34b92a0c08dcfa3723c5cf6c30157ab6458b8251
|
[] |
no_license
|
hjkim88/GTExProj
|
bdbb530b9cf9476d813f3962eaed2e0fd39624bb
|
71c41d47a97e9db10cdd4007ecdb82674214484b
|
refs/heads/master
| 2020-04-12T00:12:27.218482
| 2020-02-14T21:52:07
| 2020-02-14T21:52:07
| 162,192,579
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
ARACNeReady.R
|
###
# File name : ARACNeReady.R
# Author : Hyunjin Kim
# Date : Dec 1, 2017
# Email : hk2990@cumc.columbia.edu
# Purpose : Make Aracne-ready file from the transformed counts
#
# Instruction
# 1. Source("ARACNeReady.R")
# 2. Run the function "makeReady()" - specify the input file (transformed counts) directory and output directory
# 3. ARACNe-ready data will be generated in the output directory
#
# Example
# > source("The_directory_of_ARACNeReady.R/ARACNeReady.R")
# > makeReady(inputPath="./results/transformed_counts/", outputPath="./results/aracne_ready/")
###
makeReady <- function(inputPath="./results/transformed_counts/", outputPath="./results/aracne_ready/") {
### collect files from the cntPath
f <- list.files(inputPath)
### A function to change "(" or ")" to "-"
refineFileName <- function(str_line) {
result_line <- gsub("\\(", "-", str_line)
result_line <- gsub("\\)", "-", result_line)
return(result_line)
}
### iteratively perform transforming
for(i in 1:length(f)) {
### load filtered counts
cnt <- read.table(paste0(inputPath, f[i]), sep="\t", row.names = 1, header = TRUE, check.names = FALSE)
cnt <- cbind(Gene=rownames(cnt), cnt[,-1])
### save the transformed data
write.table(cnt, paste0(outputPath, refineFileName(substr(f[i], 1, nchar(f[i])-4)), ".dat"), sep = "\t", row.names = FALSE, quote = FALSE)
}
}
|
cd55b7bdcf53d464799cb2bfd67f722bd787e49f
|
359d345d9afa5f47e7cc2ce3ccc994be246054ca
|
/bench/bench-var-sandwich.R
|
5739a877f5745bcf89eb32031af9db60dbb8f13a
|
[
"MIT"
] |
permissive
|
shamindras/maars
|
de6bda58bc696aa33336320ab9b5b89049a8cc6d
|
261be12745649a813bb4f9c9768a28ee717306eb
|
refs/heads/main
| 2023-05-23T16:56:34.474132
| 2021-09-16T23:07:43
| 2021-09-16T23:07:43
| 312,942,004
| 15
| 1
|
NOASSERTION
| 2021-09-24T20:26:11
| 2020-11-15T02:22:58
|
R
|
UTF-8
|
R
| false
| false
| 2,129
|
r
|
bench-var-sandwich.R
|
set.seed(168465)
# Number of decimal places to round the variance estimators
NUM_DEC_PL <- 7
# Function to create the sample linear regression simulated data
create_lm_fit <- function(n, p) {
Sigma <- diag(p)
betas <- seq.int(from = 1, by = 1, length.out = p)
X <- MASS::mvrnorm(n = n, rep(0, p), Sigma)
y <- 2 + X %*% betas + stats::rnorm(n, 0, 10)
return(stats::lm(y ~ X))
}
results <- bench::press(
n = seq.int(from = 1000, by = 1000, length.out = 4),
p = seq.int(from = 1, by = 1, length.out = 5),
{
lm_fit <- create_lm_fit(n, p)
bench::mark(
min_iterations = 50,
qr_var = unname(round(maars:::comp_sandwich_qr_var(lm_fit)[, 3],
NUM_DEC_PL)),
sandwich_sandpkg_var = unname(round(sandwich::sandwich(lm_fit),
NUM_DEC_PL))
)
}
)
# Replicate autoplot code from bench [R] package
# Source: https://github.com/r-lib/bench/blob/master/R/autoplot.R
summary_cols <- c("min", "median", "itr/sec", "mem_alloc", "gc/sec")
data_cols <- c("n_itr", "n_gc", "total_time", "result", "memory", "time", "gc")
object <- results
res <- tidyr::unnest(object, c(time, gc))
res <- res %>% dplyr::filter(p == 5)
p <- ggplot2::ggplot(res)
p <- p +
ggplot2::aes_string("expression", "time", color = "gc") +
ggbeeswarm::geom_quasirandom() +
ggplot2::coord_flip()
parameters <- setdiff(
colnames(object),
c("expression",
summary_cols, data_cols,
c("level0", "level1", "level2")))
p +
ggplot2::facet_grid(
paste0(parameters[[1]], "~", parameters[[2]]),
labeller = ggplot2::label_both) +
ggplot2::theme_bw() +
ggplot2::theme(strip.text.x = element_blank(),
strip.text.y = element_text(size = 12)) +
ggplot2::ylim(0, 0.05) +
labs(title = "Sandwich Est. Benchmark: p = 5, n = (1000,...,5000)",
y = "Time (ms)",
x = "Sandwich Estimator Type")
ggplot2::ggsave(filename = here::here("bench", "figures",
"rbench_p_5_n_1k_to_5k.png"))
|
077f88a404952bcd9ec6f3cf36cb5d76942e224b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/episode/vignettes/episode.R
|
75ffe3d06f462328748e7f8fb7c9d1a89041eed3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,746
|
r
|
episode.R
|
## ---- echo=FALSE,results='hide'------------------------------------------
# devtools::load_all()
set.seed(123)
library(episode)
## ------------------------------------------------------------------------
# Stoichiometric matrices of the Michaelis-Menten system
A <- matrix(
c(1, 1, 0, 0,
0, 0, 1, 0,
0, 0, 1, 0), ncol = 4, byrow = TRUE)
B <- matrix(
c(0, 0, 1, 0,
1, 1, 0, 0,
1, 0, 0, 1), ncol = 4, byrow = TRUE)
colnames(A) <- colnames(B) <- c("E", "S", "ES", "P")
m <- mak(A, B)
m
## ------------------------------------------------------------------------
# Initial state
x0 <- setNames(c(8, 10, 1.5, 1.5), colnames(m$A))
# Rate parameters
k <- c(2.1, 2.25, 1.5)
# Time discretisation
Time <- seq(0, 1, by = 0.1)
trajectory <- numsolve(m, time = Time, x0 = x0, param = k)
trajectory
## ------------------------------------------------------------------------
field(m, x = x0, param = k)
## ------------------------------------------------------------------------
solver("rk23")
p <- plk(A, s = solver("rk23"))
## ------------------------------------------------------------------------
# Generated data
y <- trajectory
y[, -1] <- y[, -1] + matrix(rnorm(length(y[,-1]), sd = .5), nrow = nrow(y))
# Create optimisation object
op <- opt(y)
## ------------------------------------------------------------------------
# Create optimisation object, but only 10 lambda values
op <- opt(y, nlambda = 10)
## ------------------------------------------------------------------------
reg("elnet")
m <- mak(A, B, r = reg("elnet"))
## ------------------------------------------------------------------------
# Generate intervened data with different initial state
y_int <- numsolve(m, time = Time, x0 = x0 + 1, param = k * c(1, 0, 1))
y_int[, -1] <- y_int[, -1] + matrix(rnorm(length(y_int[,-1]), sd = .1), nrow = nrow(y_int))
y2 <- rbind(y, y_int)
# Create optimisation object with data from original system and intervened system
op2 <- opt(y2, nlambda = 10)
## ------------------------------------------------------------------------
# First column scales the parameter in the original system, the second in the intervened system
m2 <- mak(A, B, r = reg(contexts = cbind(1, c(1, 0, 1))))
## ------------------------------------------------------------------------
rod <- rodeo(m2, op2, x0 = NULL, params = NULL)
rod$params$rate
## ------------------------------------------------------------------------
a <- aim(m2, op2)
a$params$rate
## ------------------------------------------------------------------------
# Change regularisation type to "none"
a$o$rs$rate$reg_type <- "none"
rod <- rodeo(a)
rod$params$rate
## ------------------------------------------------------------------------
matrix(k, ncol = 1)
|
13af9cd37aa9a759dcc0648eab0704a3e84b3014
|
b9625edf086a612cab705772bb846608f05cc259
|
/R/whipple.R
|
a87e703bc5461b6517a5714d8ef5cdb1a2949133
|
[] |
no_license
|
statistikat/simPop
|
73a0c53a608b990664e8b98904052d8e22f8cef5
|
701228cf7fe29806edd553b4ad26eca4aade26ff
|
refs/heads/master
| 2023-08-07T18:22:55.097196
| 2023-08-04T04:42:03
| 2023-08-04T04:42:03
| 88,950,531
| 21
| 6
| null | 2023-08-04T04:42:04
| 2017-04-21T06:38:00
|
R
|
UTF-8
|
R
| false
| false
| 2,971
|
r
|
whipple.R
|
#' Whipple index (original and modified)
#'
#' The function calculates the original and modified Whipple index to evaluate
#' age heaping.
#'
#' The original Whipple's index is obtained by summing the number of persons in
#' the age range between 23 and 62, and calculating the ratio of reported ages
#' ending in 0 or 5 to one-fifth of the total sample. A linear decrease in the
#' number of persons of each age within the age range is assumed. Therefore,
#' low ages (0-22 years) and high ages (63 years and above) are excluded from
#' analysis since this assumption is not plausible.
#'
#' When the digits 0 and 5 are not reported in the data, the original Whipple
#' index varies between 0 and 100, 100 if no preference for 0 or 5 is within
#' the data. When only the digits 0 and 5 are reported in the data it reaches a
#' to a maximum of 500.
#'
#' For the modified Whipple index, age heaping is calculated for all ten digits
#' (0-9). For each digit, the degree of preference or avoidance can be
#' determined for certain ranges of ages, and the modified Whipple index then
#' is given by the absolute sum of these (indices - 1). The index is scaled between
#' 0 and 1, therefore it is 1 if all age values end with the same digit and 0 it is
#' distributed perfectly equally.
#'
#' @name whipple
#' @param x numeric vector holding the age of persons
#' @param method \dQuote{standard} or \dQuote{modified} Whipple index.
#' @param weight numeric vector holding the weights of each person
#' @return The original or modified Whipple index.
#' @author Matthias Templ, Alexander Kowarik
#' @seealso \code{\link{sprague}}
#' @references Henry S. Shryock and Jacob S. Siegel, Methods and Materials of
#' Demography (New York: Academic Press, 1976)
#' @keywords arith
#' @export
#' @examples
#'
#' #Equally distributed
#' age <- sample(1:100, 5000, replace=TRUE)
#' whipple(age)
#' whipple(age,method="modified")
#'
#' # Only 5 and 10
#' age5 <- sample(seq(0,100,by=5), 5000, replace=TRUE)
#' whipple(age5)
#' whipple(age5,method="modified")
#'
#' #Only 10
#' age10 <- sample(seq(0,100,by=10), 5000, replace=TRUE)
#' whipple(age10)
#' whipple(age10,method="modified")
#'
whipple <- function(x, method="standard",weight=NULL){
if(method == "standard"){
if(is.null(weight)){
x <- x[x >= 23 & x <= 62]
xm <- x %% 5
return((length(xm[xm==0])/length(x))*500)
}else{
weight <- weight[x >= 23 & x <= 62]
x <- x[x >= 23 & x <= 62]
xm <- x %% 5
return((sum(weight[xm==0])/sum(weight))*500)
}
}else if(method == "modified"){
if(is.null(weight)){
tab <- table(x)
}else{
tab <- tableWt(x,weight)
}
W <- numeric(10)
for(i in 1:10){
W[i] <- sum(tab[as.numeric(names(tab))%in%seq(i-10,200,by=10)]) / (sum(tab)/10)
}
return(sum(abs(W-1), na.rm=TRUE)/18)
}else{
stop(paste("Supplied mehtod",method,"is not implemented"))
}
}
|
0627b9d7b1b9d19dd40bd96c118323affab04ccc
|
49fdecdd43d53709fffcbdd0b17c37a350a94613
|
/Keyword_Analysis/ui.R
|
abedb469ebb75a34d0befc3d0ec956828dc54a6f
|
[] |
no_license
|
saksham-aggarwal/final_project_info201
|
a643f1d876ef3c7450e97263669bea73095bcffb
|
62363fad2530dde1d705c855669e018777819c98
|
refs/heads/master
| 2021-08-23T18:28:57.442638
| 2017-12-06T02:15:07
| 2017-12-06T02:15:07
| 111,857,638
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 808
|
r
|
ui.R
|
library(shiny)
library(dplyr)
library(data.table)
shinyUI(fluidPage(
titlePanel("How Fake is Fake News"),
sidebarLayout(
sidebarPanel(
textInput("search", "Search", placeholder = "Search here", value = "Trump"),
checkboxGroupInput("types",
"Type of BS",
c("bs" = "bs",
"conspiracy" = "conspiracy",
"fake" = "fake",
"satire" = "satire",
"hate" = "hate",
"junksci" = "junksci",
"state" = "state"),
selected = "bs")
),
mainPanel(
textOutput("prompt"),
textOutput("text"),
plotOutput("plot")
)
)
)
)
|
59078a2bc24e478215fcf7b1aa5b1af7f07db54c
|
f0be089f46ad7fae262a4fdc1cc313d216861955
|
/man/plot.eb.Rd
|
01174672f309b037968ab947f853b1f6a2e69b9e
|
[] |
no_license
|
cran/EpiBayes
|
db211767a0764b7d73905f794176326ba923c2a9
|
710a77f6d0056ffd8ef595afb27348c5764ccfe3
|
refs/heads/master
| 2021-01-19T08:11:06.429169
| 2015-06-24T00:00:00
| 2015-06-24T00:00:00
| 37,978,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 880
|
rd
|
plot.eb.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plot.eb.R
\name{plot.eb}
\alias{plot.eb}
\title{Plot Method for EpiBayes Output}
\usage{
\method{plot}{eb}(x, burnin = NULL, ...)
}
\arguments{
\item{x}{An object of class \code{eb} (e.g., the output of functions
\code{\link{EpiBayes_ns}} or \code{link{EpiBayes_s}}).}
\item{burnin}{Number of MCMC iterations to discard from the beginning of the chain.
Integer scalar.}
\item{...}{Additional arguments to be passed to \code{plot}.}
}
\value{
One plotting window for each subzone including posterior distributions for the
cluster-level prevalence across all time periods.
}
\description{
This method plots the output of the function \code{\link{EpiBayes_ns}} and
\code{\link{EpiBayes_s}}. It does so by plotting the posterior distribution(s) of the
cluster-level prevalence(s).
}
|
2ee38c741a7e73125f8b78f804a0da196a7a7043
|
9456677b348c919542cc370dfea0b56d06ca767a
|
/Functions/disparity/R/slice.tree.R
|
6325d9411a0e2deff0e6b671f5f381c1c892602c
|
[] |
no_license
|
yassato/SpatioTemporal_Disparity
|
97befea49605279788ebb2da251ade9c08e75aa8
|
0e2b9dd29f51e94189b50767aee863592cd85d8a
|
refs/heads/master
| 2022-04-25T00:36:19.657712
| 2018-01-05T11:30:37
| 2018-01-05T11:30:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,334
|
r
|
slice.tree.R
|
##########################
#slice.tree
##########################
#Slices a tree given a specific age
#Modified from paleotree::timeSliceTree
#v1.0
#Update: added RATES method
#Update: changed the RATES method into PROXIMITY
#Update: added possibility of adding FAD_LAD data
#Update: added random method
##########################
#SYNTAX :
#<tree> a 'phylo' object
#<age> where to slice the tree
#<method> the slicing method (what becomes of the sliced branches): can be random, acctran, deltran or proximity.
#<FAD> optional First Apparition Datum data (tree.age format)
#<LAD> optional Last Apparition Datum data (tree.age format)
##########################
#Method details: the slicing methods are the method of the edge to chose when cutting through a branch. At any point of the branch cut, the different method picks either the data of the parent node or one of the offspring node or tip.
#random: randomly chose between parent and offspring (default);
#acctran: always chose offspring;
#deltran: always chose parent;
#prozimity: chose between the parent or the offspring based on branch length. If the cut is equal to more than half the branch length, the offspring is chosen, else the parent.
#----
#guillert(at)tcd.ie 10/03/2015
##########################
slice.tree<-function(tree, age, method, FAD, LAD) {
#SANITIZING
#tree
check.class(tree, 'phylo')
#must have node labels
if(is.null(tree$node.label)) {
stop('The tree must have node label names.')
}
#age
check.class(age, 'numeric')
#age must be at least higher than the root age
if(age > tree$root.time) {
stop("age cannot be older than the tree's root age.")
}
#method
check.class(method, 'character', " must be either \'random\', \'acctran\', \'deltran\' or \'proximity\'.")
check.length(method, 1, " must be either \'random\', \'acctran\', \'deltran\' or \'proximity\'.", errorif=FALSE)
METHODS<-c("random", "acctran", "deltran", "proximity")
if(!any(method == METHODS)) {
stop("method must be either \'random\', \'acctran\', \'deltran\' or \'proximity\'.")
}
#FAD/LAD
if(missing(FAD)) {
FAD<-tree.age(tree)
}
if(missing(LAD)) {
LAD<-tree.age(tree)
}
#SLICING A TREE
#Creating the tree.age matrix
tree_age<-tree.age(tree)
#Running the timeSliceTree function (remove warning, called as a message in the original function)
suppressMessages(
tree_slice<-timeSliceTree(tree, age, drop.extinct=TRUE, plot=FALSE)
)
#Error with trees with two taxa
if(Ntip(tree_slice) < 3) {
stop('To few taxa for the tree slice at age ', age, '!')
}
#Selecting the tips
tips<-tree_slice$tip.label
#renaming the tree_slice
tree_sliced<-tree_slice
#Correcting the sliced tree
for (tip in 1:Ntip(tree_slice)) {
#Check if the tree is sliced at the exact age of a tip (e.g. time=0)
if(tree_age[which(tree_age[,2]==tips[tip]),1] == age) {
#Save the tip
tree_slice$tip.label[tip]<-tree_slice$tip.label[tip]
} else {
#Check if the age of the tip is in between the FAD/LAD
if(FAD[which(FAD[,2]==tips[tip]),1] >= age & LAD[which(LAD[,2]==tips[tip]),1] <= age) {
#Save the tip
tree_slice$tip.label[tip]<-tree_slice$tip.label[tip]
} else {
#Chose the tip/node following the given method
if(method == "random") {
selected_method<-sample(c("deltran", "acctran"), 1)
} else {
selected_method<-method
}
if(selected_method == "deltran") {
#Parent
tree_sliced$tip.label[tip]<-slice.tree_DELTRAN(tree, tips[tip], tree_slice)
}
if(selected_method == "acctran") {
#Offspring
tree_sliced$tip.label[tip]<-slice.tree_ACCTRAN(tree, tips[tip], tree_slice)
}
if(selected_method == "proximity") {
#Closest
tree_sliced$tip.label[tip]<-slice.tree_PROXIMITY(tree, tips[tip], tree_slice)
}
}
}
}
return(tree_sliced)
}
|
abf80fc908e5ee58867c46499b268ed5257870db
|
c739d6ead9a9521ed999935108cbfd68bdf14071
|
/R_kmooc2/13weeks/[Ex]13_1.R
|
8c41c93f62a76f8ce99ebd0899a7fbc66491befe
|
[] |
no_license
|
HeeSeok-Kwon/R_study
|
526489c31bcf6c3eddb1b6f9e483cd2fafe28ac0
|
796fa69d4653ccf877abcfe6e986efb80c12b741
|
refs/heads/main
| 2023-03-19T09:58:21.909280
| 2021-03-20T08:49:35
| 2021-03-20T08:49:35
| 349,654,245
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 395
|
r
|
[Ex]13_1.R
|
head(state.x77)
st <- data.frame(state.x77)
plot(Murder~Illiteracy, data=st)
model <- lm(Murder~Illiteracy, data=st) #dist:종속변수, speed:독립변수
model
coef(model)[1] #b
coef(model)[2] #W
Illiteracy <- 0.5
Murder <- 4.257*Illiteracy+2.397
print(Murder)
Illiteracy <- 1.0
Murder <- 4.257*Illiteracy+2.397
print(Murder)
Illiteracy <- 1.5
Murder <- 4.257*Illiteracy+2.397
print(Murder)
|
e4fbd18aab793c075318cc5bc8e107d0a0568c62
|
5897ed1db9d4a1a53acccf188fb1448da9f0fe55
|
/LNGSupplyChain/lng/draft/tabitems/t_item1b/server.R
|
7a0eab33483f1dd5a2ee376af07764e337d8cd97
|
[] |
no_license
|
MaverickBatman/lngsilkroute
|
10babac461ca96dfb48387c8aa3fa97e39a46941
|
2f0eb82fe1044b78702f5eba064a89cfe11cdd84
|
refs/heads/master
| 2020-05-21T05:29:36.130812
| 2019-05-10T05:34:26
| 2019-05-10T05:34:26
| 185,922,861
| 0
| 0
| null | 2019-05-10T05:34:27
| 2019-05-10T05:15:34
| null |
UTF-8
|
R
| false
| false
| 2,568
|
r
|
server.R
|
mydb <- dbConnect(RSQLite::SQLite(), "db/my-db.db")
if(dbExistsTable(mydb,"wacog")) {
data <- dbGetQuery(mydb, 'SELECT * FROM wacog')
}
if(!dbExistsTable(mydb,"wacog"))
{
wacog <- read.csv("db/WACOG_Input.csv")
dbWriteTable(mydb, "wacog", wacog)
data <- dbGetQuery(mydb, 'SELECT * FROM wacog')
}
newWacog <- c()
prevWacog <- c()
for (row in 1:nrow(data)) {
price <- data[row, "InjGasPrice"]
date <- data[row, "Date"]
openBal <- data[row, "OpeningBal"]
inj <- data[row, "Injections"]
loss <- data[row, "Losses"]
if(row == 1)
{
#pWacog <- data[row, "PrevWACOG"]
pWacog <- 0
}
else
{
pWacog <- data[row-1, "CurrWACOG"]
}
numerator = (openBal * pWacog)+((inj+loss)*price)
denom = (openBal + inj)
curWacog = round(numerator/denom,2)
data[row, "CurrWACOG"] <- curWacog
newWacog <- c(newWacog,curWacog)
}
cbind(data, newWacog)
if(dbExistsTable(mydb,"wacog"))
{
dbRemoveTable(mydb,"wacog")
}
dbWriteTable(mydb, "wacog", data)
output$price <- DT::renderDataTable({
data})
newday = as.Date(data$Date, "%d-%b-%y")
df1 = data.frame(Date = newday,
Price = data$CurrWACOG)
# output$plot <- renderPlot({
# plot(Price ~ Date,df1,xaxt = "n", type=input$plotType)
# })
vals <- reactiveValues()
observe({
vals$i <- input$inj
vals$b <- input$bal
vals$l <- input$loss
vals$g <- input$gas_price
vals$w <- input$wacog
vals$cost <- (((vals$i+vals$l)*vals$g)+(vals$b * vals$w))/(vals$i + vals$b)
})
output$txtout <- renderText({
paste("On ",format(input$date)," Effective Cost = ,",vals$cost )
})
newday = as.Date(data$Date, "%d-%b-%y")
mydb <- dbConnect(RSQLite::SQLite(), "db/my-db.db")
if(dbExistsTable(mydb,"wacog")) {
data <- dbGetQuery(mydb, 'SELECT * FROM wacog')
}
if(!dbExistsTable(mydb,"wacog"))
{
wacog <- read.csv("db/WACOG_Input.csv")
dbWriteTable(mydb, "wacog", wacog)
data <- dbGetQuery(mydb, 'SELECT * FROM wacog')
}
newday = as.Date(data$Date, "%d-%b-%y")
# output$table <- DT::renderDataTable({
# data
# })
df2 <- data.frame(Date = newday ,Location = "Gas Storage", Price = data$CurrWACOG)
fileInput <- read.csv("db/Prices.csv")
newday1 = as.Date(fileInput$Date, "%m/%d/%Y")
df3 <- data.frame(Date = newday1,Location = fileInput$Location,Price = fileInput$Price)
both <- rbind(df3, df2)
output$trendPlot <- renderPlotly({
p <- ggplot(data=both, aes(x=Date,
y=Price,
group = Location,
colour = Location)
) + geom_line()
ggplotly(p)
})
|
c1637c39a1c972b258f95b31f587ba47c84fbe49
|
758536cdba8ec0a15c9ee34f888b2e7c7850231f
|
/R/estambi.R
|
713fadaff0cba1fa6f0d7f7a64a32ebe0fe4b951
|
[] |
no_license
|
cran/Rramas
|
919e630f39d76091ad8787ffc8f1d4c7817ac97a
|
0e72536be1ca03f2ebe22fc1b9629d945c18764f
|
refs/heads/master
| 2021-06-04T21:32:23.074268
| 2019-04-22T08:10:04
| 2019-04-22T08:10:04
| 17,693,400
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 735
|
r
|
estambi.R
|
estambi <-function (mat, matsd, equalsign)
{
mat<- as.matrix(mat)
matsd <- as.matrix(matsd)
if (equalsign != TRUE) {
mat <- matrix(
rnorm(length(mat), mean = as.vector(mat), sd = as.vector(matsd)),
nrow = dim(mat)[1], ncol = dim(mat)[2])
}
if (equalsign == TRUE) {
#get normal deviates (mean 0, sd = matsd), all with the same random sign
deviates<- abs(rnorm(length(mat), mean = 0, sd = as.vector(matsd))) * sample(c(-1,1),1)
mat <- mat + matrix(deviates, nrow = dim(mat)[1], ncol = dim(mat)[2])
}
# correct negative transitions and probabilities > 1
mat[mat < 0] <- 0
mat[-1,][mat[-1,] > 1] <- 1
return(mat)
}
|
9911e8eba9e6e80f2cb9f9b88555a7bd8f32cd0b
|
3776aa89edfad5a98aa43449c6e61533a7cdb7cb
|
/R/ssize.twoSampVary.r
|
077867033a5c937578f7fb283fcb369f9cad7cd3
|
[] |
no_license
|
cran/ssize.fdr
|
6376e9c4bc5071351cd1aeb9637a7a6e3fbf96a3
|
ac3d20c9193eb246f38442aa0640230929fe27af
|
refs/heads/master
| 2022-06-18T20:20:44.457654
| 2022-06-07T03:30:02
| 2022-06-07T03:30:02
| 17,700,089
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,620
|
r
|
ssize.twoSampVary.r
|
ssize.twoSampVary<-function(deltaMean,deltaSE,a,b,fdr=0.05,power=0.8,pi0=0.95,maxN=35,side="two-sided",cex.title=1.15,cex.legend=1){
delMean<-deltaMean
delSig<-deltaSE
N<-maxN
getAvgTcdf_varySigma<-function(c,a,b,deltaMean,deltaSE,n){
sigmaFun<-function(rho){
avgtcf<-(pt(q=c/sqrt(rho*deltaSE^2*n/2+1),df=2*n-2,
ncp=deltaMean/sqrt((deltaSE^2+2/(rho*n))))
*(1/(gamma(a)*(1/b)^a))*rho^(a-1)*exp((-1)*rho*b))
return(avgtcf)
}
sigmaInt<-integrate(sigmaFun,0,Inf,abs.tol=1e-10)
return(sigmaInt$value)
}
if(side=="two-sided"){
TSVary<-function(c,fdr,p,n,a,b,dM,dS){
r<-fdr*(1-p)/((1-fdr)*p)
dif<-abs((2*pt(q=-c,df=2*n-2)/(1-getAvgTcdf_varySigma(c,a,b,dM,dS,n)
+getAvgTcdf_varySigma(-c,a,b,dM,dS,n))-r))
}
}
if(side=="upper"){
TSVary<-function(c,fdr,p,n,a,b,dM,dS){
r<-fdr*(1-p)/((1-fdr)*p)
dif<-abs((pt(q=-c,df=2*n-2)/(1-getAvgTcdf_varySigma(c,a,b,dM,dS,n))-r))
}
}
if(side=="lower"){
TSVary<-function(c,fdr,p,n,a,b,dM,dS){
r<-fdr*(1-p)/((1-fdr)*p)
dif<-abs((pt(q=-c,df=2*n-2)/getAvgTcdf_varySigma(-c,a,b,dM,dS,n)-r))
}
}
pwr2<-NULL
crit<-NULL
ssize<-matrix(0,nrow=length(pi0),ncol=3)
colnames(ssize)<-c("pi0", "ssize","power")
up.start<-50
for(i in 1:length(pi0)){
p<-pi0[i]
up<-up.start
for(n in 3:N){
ci<-optimize(f=TSVary, interval=c(0,up), fdr=fdr,p=p,n=n,a=a,b=b,dM=deltaMean,dS=deltaSE)$min
up<-ci
if(abs(ci-up.start)>=1){
if(side=="two-sided"){pwr.new<-(1-getAvgTcdf_varySigma(ci,a,b,delMean,delSig,n)
+getAvgTcdf_varySigma(-ci,a,b,delMean,delSig,n))}
if(side=="upper"){pwr.new<-1-getAvgTcdf_varySigma(ci,a,b,delMean,delSig,n)}
if(side=="lower"){pwr.new<-getAvgTcdf_varySigma(-ci,a,b,delMean,delSig,n)}
}
if(abs(ci-up.start)<1){pwr.new<-0; ci<-NA}
crit<-c(crit,ci)
pwr2<-c(pwr2,pwr.new)
if(pwr2[(i-1)*(N-2)+n-2]>=power & ssize[i,1]==0){ ##finding first sample size with
ssize[i,]<-c(p,n,pwr2[(i-1)*(N-2)+n-2]) ##power greater than desired power
}
}
}
ssize[,1]<-pi0
if(sum(ssize==0)>0){warning("Desired power not achieved for at least one pi0")}
ssize[ssize==0]<-NA
pwrMatrix<-matrix(c(3:N,pwr2),ncol=length(pi0)+1,byrow=FALSE)
for(i in 1:length(pi0)){
if(i==1){
plot(3:N,pwrMatrix[,i+1],col=i,xlim=c(0,N),ylim=c(0,1),xlab="",ylab="",pch=16)
lines(3:N,pwrMatrix[,i+1],col=i,lty=i)
}
if(i!=1){
points(3:N,pwrMatrix[,i+1],col=i,pch=16)
lines(3:N,pwrMatrix[,i+1],col=i,lty=i)
}
}
abline(h=power,lty=2,lwd=2)
abline(v=0:N,h=0.1*(0:10),col="gray",lty=3)
dMt<-as.character(delMean); dSt<-as.character(delSig); at<-as.character(a); bt<-as.character(b)
Nd<-paste("N(",dMt,",",dSt,")",sep="")
Gd<-paste("IG(",at,",",bt,")",sep="")
title(xlab="Sample size (n)", ylab="Power")
mtext(bquote(paste("Average power vs. sample size with fdr=",.(fdr),",")),
cex=cex.title,padj=-1.85)
mtext(bquote(paste(Delta[g],"~N(",.(round(deltaMean,4)),",",.(round(deltaSE,4)),") and ",
sigma[g]^2,"~IG(",.(round(a,4)),",",.(round(b,4)),")")),cex=cex.title,padj=-0.1)
legend(x=N,y=0,xjust=1,yjust=0,col=1:i,pch=c(16,16,16),lty=1:length(pi0),
legend=as.character(pi0),bg="white",title=expression(pi[0]),cex=cex.legend)
pwrMatrix<-round(pwrMatrix,7)
colnames(pwrMatrix)<-c("n",as.character(pi0))
critMatrix<-matrix(c(3:N,crit),ncol=length(pi0)+1,byrow=FALSE)
colnames(critMatrix)<-c("n",as.character(pi0))
ret<-NULL
ret$ssize<-ssize
ret$power<-pwrMatrix
ret$crit.vals<-critMatrix
return(ret)
}
|
b8220c969e1c6298d6651fae4ea31c1d20cbc1c1
|
b22be0222d77ada8b65fd06a9f78f07125ab881e
|
/NetCellMatch/NetCellMatch Functions/Helper Functions/makesimilarity.R
|
a82e216102e406db75a86bc70ec0eec86022d446
|
[] |
no_license
|
saisaitian/Software
|
44bf4f46182a93fb3878af59d5aea78afe9790ad
|
bbe3ec75658f738f4fbeda6c8a0613647154333b
|
refs/heads/master
| 2023-04-10T09:25:49.553015
| 2021-04-22T18:05:50
| 2021-04-22T18:05:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 472
|
r
|
makesimilarity.R
|
#' Function to make similarity matrix (can make on own)
make.similarity <- function(my.data, similarity) {
N <- ncol(my.data) #note change here I had made column
S <- matrix(rep(NA,N^2), ncol=N)
#print(dim(S))
if(similarity=='spear'){
S=cor(t(my.data),method='spear')
S=abs(S)
}
else{
for(i in 1:N) {
for(j in 1:N) {
S[i,j] <- similarity(my.data[i,], my.data[j,])
#print(S[i,j])
print(j)
}
}}
return(S)
}
|
98d55fc1eeeb08bd698d8ce6b910b65182626b7f
|
765676bdd1603752874de2b930c092adee1c90ee
|
/man/HT_base_filter.Rd
|
b90c8c0f21d4eb915068f88b997db4b32c0f7901
|
[] |
no_license
|
parvezrana/TreeLS
|
f43c85e18131c56d3f19bc742e13aeaeef64826d
|
1be9b023be413ef47c9f14126b92309f60a7c4c9
|
refs/heads/master
| 2020-03-19T10:32:06.354572
| 2017-04-16T18:33:41
| 2017-04-16T18:33:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,235
|
rd
|
HT_base_filter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base_functions.R
\name{HT_base_filter}
\alias{HT_base_filter}
\title{Tree base Hough transformation filter}
\usage{
HT_base_filter(XYZmat, z.int = NULL, rad.inf = 2, cell.size = 0.025,
min.val = 0.3, Plot = F)
}
\arguments{
\item{XYZmat}{single tree point cloud - \emph{xyz} matrix}
\item{z.int}{optional - height interval to take the reference cylinder. If not specified, the height interval adopted is from 5\% to 10\% of the tree's total height}
\item{rad.inf}{inflation factor to multuply the radius. All points (in the entire point cloud) outside a range of \emph{rad.inf * radius} from the reference cylinder's center will be deleted}
\item{cell.size}{pixel size for the Hough transformation}
\item{min.val}{passed on to \code{\link{hough}}}
\item{Plot}{plot the reference tree segment? TRUE or FALSE}
}
\value{
vector of length 5, containing the upper and lower height limits of the reference cylinder, xy coordinates of circle center and its radius
}
\description{
identification of reference cylinder at the tree's base for filtering outliers using the Hough transformation
}
\seealso{
\code{\link{hough}}
}
|
28a95d6f0aec017fba4da98c4d3828afa8426a81
|
c234e0a2fda69da879d7ec823c57600ed2861c18
|
/Lab Session Linear Regression.R
|
b2e9742e682fed929c72f0d928d1f67873ce2a24
|
[] |
no_license
|
Garima1221/ISLR
|
16c8cbc9fea7af4a83c458050af49444340c1b98
|
079ee211a4ebf47e3e4a8f6e3dce8ad06189a655
|
refs/heads/master
| 2020-04-05T13:45:48.016420
| 2019-03-24T04:11:57
| 2019-03-24T04:11:57
| 156,908,518
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,232
|
r
|
Lab Session Linear Regression.R
|
library(MASS)
# fix allows editing on Boston Data set
fix(Boston)
#check features available in Boston data
names(Boston)
#fitting linear model to check response medv using a single predictor lstat in Boston dataset
lm.fit = lm(medv~lstat,data = Boston)
lm.fit
#Checking some basic information of our linear model
summary(lm.fit)
#checking information contained in lm.fit
names(lm.fit)
#cheking confidence interval of the coefficient estimates
confint(lm.fit)
#confidence intervals and prediction intervals for the prediction of medv for a given value of lstat
predict(lm.fit,data.frame(lstat=(c(5,10,15))),interval = "confidence")
predict(lm.fit,data.frame(lstat =(c(5,10,15))),interval = "prediction")
#plotting lstat vs medv and the regression line
attach(Boston)
plot(lstat,medv)
#abline(lm.fit)
#abline(lm.fit,lwd = 3)
abline(lm.fit,lwd = 3,col = "red")
#plot(1:20.1:20,pch=1:20)
#plotting diagnostic plots
par(mfrow=c(2,2))
plot(lm.fit)
#OR other ways of plotting residuals ,rstudent and leverage values
plot(predict(lm.fit),residuals(lm.fit))
plot(predict(lm.fit),rstudent(lm.fit)) ## the curve plot represent nonlinearity
plot(predict(lm.fit),hatvalues(lm.fit))#for leverage values
############################## Multiple Linear Regression ############################################
# using 2 predictors lstat and age
lm.fit = lm(medv~lstat+age,data = Boston)
summary(lm.fit)
#using all the 13 predictors in Boston data
lm.fit = lm(medv~.,data = Boston)
summary(lm.fit)
#checking multicollinearity in the dataset through vif() function present in car library
install.packages("car")
library(car)
vif(lm.fit)
#age has high p value ,so we can accept Null hypothesis and age has no effect on response
#thus we consider a new fit by removing the age feature
lm.fit1 = lm(medv~.-age,data = Boston)
summary(lm.fit1)
#OR we can also use update function for the same
lm.fit1 = update(lm.fit,~.-age)
#Adding interaction term
summary(lm(medv~lstat*age,data = Boston))
#adding non linear transformation
lm.fit2 = lm(medv~lstat+I(lstat^2))
summary(lm.fit2)
#ANOVA test to check improvement in our model through non linear transformation
lm.fit=lm(medv~lstat,data=Boston)
anova(lm.fit,lm.fit2)
#Higher order polynomials using poly function
lm.fit5 = lm(medv~poly(lstat,5))
summary(lm.fit5)
#improvement in model fit
# using log transformation
summary(lm(medv~log(rm),data = Boston))
######################################Qualitative Predictors ##################################
library(ISLR)
fix(Carseats)
names(Carseats)
#Creating linear model using interaction terms
lm.fit = lm(Sales~.+Income:Advertising +Price:Age,data = Carseats)
summary(lm.fit)
#contrasts() function is used to return the coding that R uses for dummy variable
attach(Carseats)
contrasts(ShelveLoc)
#############################Creating functions in R ############################################
loadLibrary = function(){
library(ISLR)
library(MASS)
print("The libraries have been loaded")
}
loadLibrary()
|
dd017d962e9d91b5ce8e7db8987bcbd3357563a9
|
e335c5fb95fdd6ad54c884ca76e93cab83240ab9
|
/man/usrecession.Rd
|
eb4e3b46926438b36de39b976c9559fc20e9a0f6
|
[] |
no_license
|
cran/MFDF
|
4ee74901ec54327444a204a6de3e2da055c6aca9
|
d6427e1a11a56af1d5e0c0c730dfea73e7268653
|
refs/heads/master
| 2020-04-29T11:18:09.134444
| 2009-10-31T00:00:00
| 2009-10-31T00:00:00
| 17,717,794
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,304
|
rd
|
usrecession.Rd
|
\name{usrecession}
\alias{usrecession}
\docType{data}
\title{
Records of U.S.economic recessions from 1953 to 2009
}
\description{
The National Bureau of Economic Research (NBER) provides the
most widely accepted definition of a recession (NBER 2008): \emph{A recession is a significant decline in economic activity spread across the economy, lasting more than a few months, normally visible in production, employment, real income, and other indicators. A recession
begins when the economy reaches a peak of activity and ends when the economy reaches its trough. Between trough and peak, the economy is in an expansion.} The latest dating result of U.S. recessions can be found at the official web site of NBER (http://www.nber.org/cycles.html)
}
\usage{data(usrecession)}
\format{
A data frame with 682 observations on the following 3 variables.
\code{time index} is a numeric vector.
\code{flag} is a numeric vector.
\code{time} is a factor contains characters specifying the month and year.
}
\source{
Official web site of NBER (http://www.nber.org/cycles.html)
}
\references{
Dou, W., Pollard, D and Zhou, H.H. (2009) Functional Regression for General Exponential Families. \emph{manuscripts}.
}
\examples{
data(usrecession)
}
\keyword{datasets}
|
36936309f36079414ac6e2a53aac7c497af5acf8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mut/examples/LR2.Rd.R
|
79c236ff67291b138675a41ac88d348c0531726d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,480
|
r
|
LR2.Rd.R
|
library(mut)
### Name: LR2
### Title: Pairwise LR with Mutation
### Aliases: LR2
### ** Examples
library(expm)
library(Familias)
### The examples are from Egeland, Pinto and Amorim (2016)
### (referred to as the 'paper' below)
### Example 2.1.
### Consider a duo case and assume mutations are not possible.
p <- c(0.1,0.2,0.3,0.4)
M <- diag(c(1,1,1,1))
n.num <- c(2, 2, 2, 2)
n.den <- c(0, 0, 0, 0)
alpha <- c(1,1,1,1)/4
kappa.num <- c(0.25, 0.50, 0.25); kappa.den <- c(1,0,0); theta <- 0
# The next three LRs coincide with those found
# using exact formulae in the paper
LR2(c(1,2), c(3,4), n.num, n.den, p, M, kappa.num,
kappa.den, alpha, theta)$LR
LR2(c(1,2), c(1,3), n.num, n.den, p, M, kappa.num,
kappa.den, alpha, theta)$LR
LR2(c(1,2), c(1,2), n.num, n.den, p, M, kappa.num,
kappa.den, alpha, theta)$LR
### Example 2.2 Identifying parent-child with mutation
### "A father and a son are missing ..."
p <- 0.2; R <- 0.005; k <- R/(2*p*(1-p))
M <- rbind(c(1-k*(1-p),k*(1-p)),c(k*p,1-k*p))
n.num <- c(0, 1, 1, 0); kappa.num <- c(0, 1, 0)
n.den <- c(0, 0, 0, 0); kappa.den <- c(1, 0, 0)
alpha <- c(0, 0.5, 0.5, 0)
theta <- 0.0
# Below values coincide, this is no longer true if
# M is made unbalanced or theta>0
LR2(c(1,1), c(2,2), n.num, n.den, c(p, 1-p), M,
kappa.num, kappa.den, alpha, theta)$LR
LR2(c(2,2), c(1,1), n.num, n.den, c(p, 1-p), M,
kappa.num, kappa.den, alpha, theta)$LR
### Example 2.3
library(Familias)
persons <- c("Child", "Alleged father")
sex <- c("male", "male")
ped1 <- FamiliasPedigree(id=persons, dadid=c("Alleged father",NA),
momid=c(NA,NA), sex=c("male", "male"))
ped2 <- FamiliasPedigree(id=persons, dadid=c(NA,NA), momid=c(NA,NA),
sex=c("male", "male"))
pedigrees <- list(ped1,ped2)
R =.01; theta = 0.02
locus1 <- FamiliasLocus(frequencies=c(0.25,0.25,0.25,0.25),
name="L1", allelenames=c("1","2","3","4"),
MutationRate = R, MutationModel="Proportional")
loci <- list(locus1)
datamatrix <- data.frame(locus1.1=c("3","1"), locus1.2=c("4","2"))
datamatrix <- data.frame(locus1.1=c("1","3"), locus1.2=c("2","4")) #Swapped, same result
rownames(datamatrix) <- persons
result <- FamiliasPosterior(pedigrees, loci, datamatrix, ref=2, kinship=theta)
result$LR[1]
(1+2*theta)/(1-theta)*(4/3)*R
### Example 3.1 of paper, double first cousin example
p <- 0.2; q <- 1-p; R <- 0.005; k <- R/(2*p*(1-p))
k4 <- 1-(1-k)^4
m <- 1-k4*(1-p)
kappa0 <- 9/16; kappa1 <- 6/16; kappa2 <- 1/16
kappa0+kappa1*m/p+kappa2*m^2/p^2
# LR=3.759526 as confirmed by familias.name/DFC.SNP.fam
# Alternatively using library familias.name/mut.zip
alpha <- c(0,0.5,0.5, 0)
n.num <- c(0,4, 4, 0); n.den <- c(0,0,0,0)
kappaDFC <- c(kappa0, kappa1, kappa2)
LR2(c(1,1),c(1,1),n.num, n.den, M=M, p=c(p,1-p), kappa.num=kappaDFC,
alpha=alpha, theta=0, beta=0)$LR
# Four alleles
p <- c(0.1,0.2,0.3,0.4)
locus1 <- FamiliasLocus(frequencies=p, name="locus1",
allelenames= 1:length(p), MutationRate=R, MutationModel="Proportional")
M <- locus1$maleMutationMatrix
LR2(c(1,1), c(1,1),n.num, n.den, M=M, p=p, kappa.num=kappaDFC,
alpha=alpha, theta=0, beta=0)
# LR=10.15314 as confirmed by http://familias.name/DFC.4.fam (takes a few minutes)
# With ten alleles, all allele freq 0.1, and "Equal" mutation model
p <- rep(0.1,10)
locus1 <- FamiliasLocus(frequencies=p, name="locus1",
allelenames= 1:length(p), MutationRate=R, MutationModel="Equal")
M <- locus1$maleMutationMatrix
LR2(c(1,1), c(1,1),n.num, n.den, M=M, p=p, kappa.num=kappaDFC,
alpha=alpha, theta=0, beta=0)
# LR=10.24266 as confirmed by http://familias.name/DFC.10.fam (takes a few minutes)
### Example 3.2 of paper
library(paramlink)
library(Familias)
R <- 0.005; p <- c(0.01, 0.2, 0.3, 0.49)
g1 <- c(1,2); g2 <- c(1,3)
an <- 1:length(p)
nn1 <- nn2 <- 3; x <- doubleCousins(nn1, nn2)
v <- 3
kappa.num <- c((2^{2*v}-1)^2, 2*(2^{2*v}-1),1)/16^v
alpha <- c(0.5, 0, 0, 0.5)
locus1 <- FamiliasLocus(frequencies=p, name="locus1",
allelenames= an, MutationRate=R, MutationModel="Proportional")
M <- locus1$maleMutationMatrix
n1 <- nn1+nn2+2; n2 <- nn1+nn2+2
n.num <- c(n1, 0, 0, n2)
n.den <- c(0, 0, 0, 0)
kappa.den <- c(1,0,0)
myLR <- LR2(g1, g2, n.num, n.den, p, M, kappa.num, kappa.den, alpha, beta=0)
myLR$LR
#Exact
k <- R/(1-sum(p^2))
k10 <- 1-(1-k)^n2
kappa.num[1]+
kappa.num[2]*(p[3]*(k10*p[1]+(1-k10*(1-p[1]))) +
(p[1]*2*k10*p[3]))/(4*p[1]*p[3]) +
kappa.num[3]*2*((1-k10*(1-p[1]))*k10*p[3]+k10^2*p[1]*p[3])/(4*p[1]*p[3])
### Example 3.3 HS or GP versus avuncular
p <- c(0.1, 0.2, 0.3, 0.4); R <-0.005
locus1 <- FamiliasLocus(frequencies=p, name="L1", allelenames=1:4,
femaleMutationRate = R, maleMutationRate =R,femaleMutationRange = 0.1,
maleMutationRange = 0.1,femaleMutationRate2 = 0, maleMutationRate2 = 0,
maleMutationModel="Proportional",femaleMutationModel="Proportional")
M <- locus1$maleMutationMatrix
kappa.num <- kappa.den <-c(0.5,0.5,0)
alpha <- c(1,0,0,0)
n1 <- c(2,0,0,0)
n2 <- c(3,0,0,0)
a <- 1; b<-2; c<-3; d<-4
g1 <- c(a,b); g2 <- c(c,d)
LR.1 <- LR2(g1, g2,n.num=n1, n.den=n2, p=p, M, kappa.num,
kappa.den, alpha, theta=0, beta=0)$LR
H <- 1-sum(p^2)
k <- R/H
k2 <- 1-(1-k)^2; k3 <- 1-(1-k)^3
# Case 1: a,b,c,d differ, no overlap in genotypes
LR.2 <- (1+k2)/(1+k3)
LR.1-LR.2#Equal
# Case 2 all a
g1 <- c(a,a); g2 <- c(a,a)
LR.1 <- LR2(g1, g2,n.num=n1, n.den=n2, p=p, M, kappa.num, kappa.den, alpha, theta=0)$LR
LR.2 <- (p[a]+1-k2*(1-p[a]))/(p[a]+1-k3*(1-p[a])) #all a
#Case 3 equal hetero
c <- 3; d <- 4
g1 <- c(c,d); g2 <- c(c,d)
LR.1 <- LR2(g1, g2,n.num=n1, n.den=n2, p=p, M, kappa.num, kappa.den, alpha, theta=0)$LR
num <- (4*p[c]*p[d]+p[d]*(1+k2*(2*p[c]-1))+p[c]*(1+k2*(2*p[d]-1)))
den <- (4*p[c]*p[d]+p[d]*(1+k3*(2*p[c]-1))+p[c]*(1+k3*(2*p[d]-1)))
LR.2 <- num/den
LR.1-LR.2
# Example QHFC Thompson p 22
p <- c(0.1, 0.9); R <- 0.005
kappa <-c(17,14,1)/32
alpha <- c(0.25,0.25,0.25,0.25)
n.num <- c(4,4,4,4); n.den <- c(0, 0, 0, 0)
locus1 <- FamiliasLocus(frequencies=p, name="locus1",
allelenames= 1:length(p), MutationRate=R, MutationModel="Proportional")
M <- locus1$maleMutationMatrix
LR2(c(1,2), c(1,1), n.num, n.den, M=M, p=p, kappa.num=kappa,
alpha=alpha, theta=0, beta=0.5)
#=2.562366 See familias.name/QHFC.fam
# Example Last line of Table 1 of old ms, to be balanced paper
data(NorwegianFrequencies)
d <- as.double(NorwegianFrequencies$D12S391)
names(d) <- 1:length(d) #21=16, 22=17 etct
n.num <- c(4,0,0,2); n.den <- c(0, 0, 0, 0)
alpha <- c(1/8,0,0, 7/8);kappa.num <- c(7/16,8/16,1/16)
line <-NULL
R <-0
locus1 <- FamiliasLocus(frequencies=d, name="locus1",
allelenames=1:length(d), MutationRate=R, MutationModel="Proportional")
M <- locus1$maleMutationMatrix
line <- c(line, LR2(c(16,17), c(18,19), n.num, n.den,
d, M, kappa.num, alpha=alpha)$numerator)
R <-0.0021
locus1 <- FamiliasLocus(frequencies=d, name="locus1",
allelenames=1:length(d), MutationRate=R, MutationModel="Proportional")
M <- locus1$maleMutationMatrix
line <- c(line,LR2(c(16,17),c(18,19),n.num, n.den ,d,M,kappa.num, alpha=alpha)$numerator)
locus1 <- FamiliasLocus(frequencies=d, name="locus1",
allelenames=1:length(d), MutationRate=R, MutationModel="Equal")
M <- locus1$maleMutationMatrix
line <- c(line,LR2(c(16,17),c(18,19), n.num, n.den, d, M, kappa.num, alpha=alpha)$numerator)
line <- c(line,LR2(c(18,19),c(16,17), n.den, n.den, d, M, kappa.num, alpha=alpha)$numerator)
names(line) <- paste("col",1:4,sep="")
p <- d[c(16,17,18,19)]
(7/16)*4*prod(p)# First column
#Silent, mutation and kinship
p <- c(0.2, 0.75, 0.05); R <- 0.005
locus1 <- FamiliasLocus(frequencies=p, name="L1",
allelenames=c("1","2", "silent"),
femaleMutationRate = R, maleMutationRate =R,femaleMutationRange = 0.1,
maleMutationRange = 0.1,femaleMutationRate2 = 0, maleMutationRate2 = 0,
maleMutationModel="Proportional",
femaleMutationModel="Proportional")
M <- locus1$maleMutationMatrix
persons <- c("AF", "CH")
sex <- c("male", "male")
H1 <- FamiliasPedigree(dadid=c(NA, "AF"), momid= c(NA,NA),
sex=sex, id=persons)
H2 <- FamiliasPedigree(dadid=c(NA, NA), momid= c(NA,NA),
sex=sex, id=persons)
dm <- rbind(c(1,1),
c(2,2))
rownames(dm) <- persons
theta <- 0.03
alpha <- c(0.5, 0.5, 0, 0)
n.num <- c(1, 1, 0, 0)
n.den <- c(0, 0, 0, 0)
pedigrees <- list(H1, H2)
LRfam <- FamiliasPosterior(pedigrees, locus1, dm, ref=2,
kinship = theta)$LRperMarker[1]
LR <- LR2(c(1,1), c(2,2), n.num,n.den, p, M, c(0,1,0), c(1,0,0),
alpha, theta, silent=TRUE)$LR #=0.1973758 as for Familias
# Example in Section 2.1.2. Duo with mutation and theta
R <- 0.01
theta <- 0.02
LR <- 4*(R/3)*(1+2*theta)/(1-theta) #Exact LR
g1 <- c(1,2); g2 <- c(3,4)
n.num <- c(1,0, 1,0); n.den <- c(0, 0, 0, 0)
p <- c(1, 1, 1, 1)/4
kappa.num <- c(0, 1, 0)
kappa.den <- c(1, 0, 0)
alpha <- c(0.5,0.0,0.5,0)
theta <- 0.02
silent <- 0
locus1 <- FamiliasLocus(frequencies=p, name="locus1",
allelenames= 1:4, MutationRate=R,
MutationModel="Proportional")
M <- locus1$maleMutationMatrix
LR2(g1, g2, n.num, n.den, p, M, kappa.num, kappa.den, alpha, theta, silent)
|
e311b93076b79dde522758255c08310e3511d01a
|
96cf500cdef898429bf1d293754cdba4a21721ac
|
/modules/2021_April25_functions.R
|
6008295a8c8d568fd053b06f4884fb84d0d588c3
|
[] |
no_license
|
ggiaever/2021_HIPLAB_Novartis
|
e4320b35f5909694fc0db6ef9f96e76f6b6383e5
|
a7f81bc5914056998b41853f3772d66e9d684a4c
|
refs/heads/main
| 2023-04-13T11:11:00.624270
| 2021-04-26T06:21:39
| 2021-04-26T06:21:39
| 361,041,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 47,084
|
r
|
2021_April25_functions.R
|
#### GO enrichment function
#### curr_exp is name of experiment
#### mat = experimental matrix
#### coln = coln of mat with fitness defect scores for enrichment
#### sig = significance threshold for GO enrichment of fitness defect scores
#### bp_path = path of GOSET RDS file
#### minGeneSetSize = minimum size of geneSet to include in enrichment
#### maxSetSize = (not an option) maximum size of geneSet to include in enrichment -- set to 300
# RETURNS a dataframe of enrichment results, sorted by increasing FDR value. The columns are:
# term = name of gene set
# querySetFraction = the fraction of the query set that overlaps with the term set
# geneSetFraction = the fraction of the term set that overlaps with the query set
# foldEnrichment = the fold enrichment of the query set with the term genes
# P = P value estimating the significance with which the query set is enriched with the term genes
# FDR = FDR value estimating the significance of enrichment
# overlapGenes = a |-separated list of genes in the overlap of the query set and the term set;
# if scoreMat is provided (not NULL), the scores of the genes are shown in parentheses
# maxOverlapGeneScore = if scoreMat is provided (not NULL), the maximum score of the overlapGenes
####
#### query fraction is length of overlap divided by number of genes scored significant in screen
#### geneSet is length of genes in the geneSet
#### overlap is length of intersect
#### bgRate round rate is nGenes /total number of rows in unigene set (matrix)
#### foldenrichment query fraction /bgRate
#### foldenrichment * bgRate = query fraction
runGORESP = function (fdrThresh = 1, curr_exp, mat,coln,sig, bp_path = "2021_April5_GOBP_SGD.RDS",go_path = NULL, bp_input = NULL, minGeneSetSize = 2){
NONSPECIFIC.TERMS <- list(mf=c("MOLECULAR_FUNCTION", "BINDING", "CATALYTIC ACTIVITY"),
cc=c("CELL", "CELL CORTEX PART", "CELL DIVISION SITE PART", "CELL FRACTION", "CELL PART", "CELL PERIPHERY", "CELL PROJECTION PART", "CELL WALL PART", "CELLULAR_COMPONENT", "CHROMOSOMAL PART", "CYTOPLASMIC PART", "CYTOPLASMIC VESICLE PART", "CYTOSKELETAL PART", "CYTOSOLIC PART", "ENDOPLASMIC RETICULUM PART", "ENDOSOMAL PART", "EXTERNAL ENCAPSULATING STRUCTURE", "EXTERNAL ENCAPSULATING STRUCTURE PART", "EXTRINSIC TO MEMBRANE", "GOLGI APPARATUS PART", "INSOLUBLE FRACTION", "INTEGRAL TO MEMBRANE", "INTEGRAL TO MEMBRANE OF MEMBRANE FRACTION", "INTRACELLULAR", "INTRACELLULAR ORGANELLE", "INTRACELLULAR ORGANELLE LUMEN", "INTRACELLULAR ORGANELLE PART", "INTRACELLULAR PART", "INTRACELLULAR MEMBRANE-BOUNDED ORGANELLE", "INTRACELLULAR NON-MEMBRANE-BOUNDED ORGANELLE", "INTRINSIC TO MEMBRANE", "MEMBRANE", "MEMBRANE-BOUNDED ORGANELLE", "MEMBRANE-ENCLOSED LUMEN", "MEMBRANE FRACTION", "MEMBRANE PART", "MICROBODY PART", "MICROTUBULE ORGANIZING CENTER PART", "MITOCHONDRIAL MEMBRANE PART", "MITOCHONDRIAL PART", "NON-MEMBRANE-BOUNDED ORGANELLE", "NUCLEAR CHROMOSOME PART", "NUCLEAR MEMBRANE PART", "NUCLEAR PART", "NUCLEOLAR PART", "NUCLEOPLASM PART", "ORGANELLE", "ORGANELLE INNER MEMBRANE", "ORGANELLE LUMEN", "ORGANELLE MEMBRANE", "ORGANELLE OUTER MEMBRANE", "ORGANELLE PART", "ORGANELLE SUBCOMPARTMENT", "PERIPHERAL TO MEMBRANE OF MEMBRANE FRACTION", "PEROXISOMAL PART", "PLASMA MEMBRANE ENRICHED FRACTION", "PLASMA MEMBRANE PART", "VACUOLAR PART", "VESICULAR FRACTION"),
bp=c("POSITIVE REGULATION OF MACROMOLECULE METABOLIC PROCESS", "REGULATION OF CELLULAR COMPONENT ORGANIZATION", "POSITIVE REGULATION OF METABOLIC PROCESS", "POSITIVE REGULATION OF CELLULAR METABOLIC PROCESS", "POSITIVE REGULATION OF CELLULAR PROCESS", "REGULATION OF CELLULAR PROCESS", "CELLULAR NITROGEN COMPOUND BIOSYNTHETIC PROCESS", "POSITIVE REGULATION OF NITROGEN COMPOUND METABOLIC PROCESS", "REGULATION OF CATALYTIC ACTIVITY", "POSITIVE REGULATION OF CATALYTIC ACTIVITY", "REGULATION OF MOLECULAR FUNCTION", "POSITIVE REGULATION OF CELLULAR COMPONENT ORGANIZATION", "REGULATION OF ORGANELLE ORGANIZATION", "POSITIVE REGULATION OF CATABOLIC PROCESS", "POSITIVE REGULATION OF CELLULAR CATABOLIC PROCESS", "POSITIVE REGULATION OF MOLECULAR FUNCTION", "REGULATION OF CATABOLIC PROCESS", "REGULATION OF CELLULAR CATABOLIC PROCESS", "CELLULAR RESPONSE TO CHEMICAL STIMULUS", "CELLULAR RESPONSE TO ORGANIC SUBSTANCE", "POSITIVE REGULATION OF BIOSYNTHETIC PROCESS", "POSITIVE REGULATION OF CELLULAR BIOSYNTHETIC PROCESS", "POSITIVE REGULATION OF MACROMOLECULE BIOSYNTHETIC PROCESS", "CELLULAR CARBOHYDRATE METABOLIC PROCESS", "REGULATION OF CELLULAR PROTEIN METABOLIC PROCESS", "REGULATION OF PROTEIN METABOLIC PROCESS", "NEGATIVE REGULATION OF BIOSYNTHETIC PROCESS", "NEGATIVE REGULATION OF CELLULAR BIOSYNTHETIC PROCESS", "NEGATIVE REGULATION OF CELLULAR MACROMOLECULE BIOSYNTHETIC PROCESS", "NEGATIVE REGULATION OF MACROMOLECULE METABOLIC PROCESS", "RESPONSE TO EXTERNAL STIMULUS", "RESPONSE TO EXTRACELLULAR STIMULUS", "CELLULAR HOMEOSTASIS", "HOMEOSTATIC PROCESS", "REGULATION OF HOMEOSTATIC PROCESS", "ORGANIC SUBSTANCE TRANSPORT", "CELLULAR NITROGEN COMPOUND CATABOLIC PROCESS", "ORGANIC ACID BIOSYNTHETIC PROCESS", "NEGATIVE REGULATION OF ORGANELLE ORGANIZATION", "ORGANELLE FISSION", "NEGATIVE REGULATION OF CELLULAR COMPONENT ORGANIZATION", "NEGATIVE REGULATION OF NITROGEN COMPOUND METABOLIC PROCESS", "CELLULAR DEVELOPMENTAL PROCESS", "MAINTENANCE OF LOCATION IN CELL","REGULATION OF DEVELOPMENTAL PROCESS","SMALL MOLECULE CATABOLIC PROCESS","ORGANIC ACID TRANSPORT","CARBOXYLIC ACID TRANSPORT", "CELLULAR RESPONSE TO EXTERNAL STIMULUS","NEGATIVE REGULATION OF RESPONSE TO STIMULUS","RESPONSE TO ENDOGENOUS STIMULUS","CELLULAR RESPONSE TO ENDOGENOUS STIMULUS","REGULATION OF LIGASE ACTIVITY", "CELLULAR COMPONENT MACROMOLECULE BIOSYNTHETIC PROCESS","REGULATION OF CELLULAR KETONE METABOLIC PROCESS", "POSITIVE REGULATION OF ORGANELLE ORGANIZATION", "RIBONUCLEOPROTEIN COMPLEX BIOGENESIS", "PROTEIN COMPLEX SUBUNIT ORGANIZATION", "PROTEIN COMPLEX BIOGENESIS", "PROTEIN COMPLEX ASSEMBLY", "CELLULAR PROTEIN COMPLEX ASSEMBLY", "RIBONUCLEOPROTEIN COMPLEX SUBUNIT ORGANIZATION", "RIBONUCLEOPROTEIN COMPLEX ASSEMBLY", "REGULATION OF PROTEIN COMPLEX ASSEMBLY", "PROTEIN COMPLEX DISASSEMBLY", "RIBONUCLEOPROTEIN COMPLEX LOCALIZATION", "RIBONUCLEOPROTEIN COMPLEX EXPORT FROM NUCLEUS", "CELLULAR PROTEIN COMPLEX DISASSEMBLY", "REGULATION OF PROTEIN COMPLEX DISASSEMBLY", "PROTEIN COMPLEX LOCALIZATION", "POSITIVE REGULATION OF PROTEIN COMPLEX ASSEMBLY", "CELLULAR PROTEIN COMPLEX LOCALIZATION", "NEGATIVE REGULATION OF PROTEIN COMPLEX DISASSEMBLY", "NEGATIVE REGULATION OF PROTEIN COMPLEX ASSEMBLY", "SMALL NUCLEOLAR RIBONUCLEOPROTEIN COMPLEX ASSEMBLY", "RIBONUCLEOPROTEIN COMPLEX DISASSEMBLY", "CHAPERONE-MEDIATED PROTEIN COMPLEX ASSEMBLY", "POSITIVE REGULATION OF PROTEIN COMPLEX DISASSEMBLY", "NEGATIVE REGULATION OF MACROMOLECULE BIOSYNTHETIC PROCESS", "CELLULAR COMPONENT MOVEMENT", "CELLULAR COMPONENT DISASSEMBLY", "REGULATION OF CELLULAR COMPONENT SIZE", "CELLULAR COMPONENT MAINTENANCE", "REGULATION OF CELLULAR COMPONENT BIOGENESIS", "CELLULAR COMPONENT DISASSEMBLY AT CELLULAR LEVEL", "CELLULAR COMPONENT MAINTENANCE AT CELLULAR LEVEL", "NEGATIVE REGULATION OF CELLULAR METABOLIC PROCESS", "RESPONSE TO ORGANIC SUBSTANCE", "CELLULAR CHEMICAL HOMEOSTASIS", "CHEMICAL HOMEOSTASIS", "REGULATION OF RESPONSE TO STIMULUS", "POSITIVE REGULATION OF RESPONSE TO STIMULUS"),
bp.lenient=c("POSITIVE REGULATION OF MACROMOLECULE METABOLIC PROCESS", "REGULATION OF CELLULAR COMPONENT ORGANIZATION", "POSITIVE REGULATION OF METABOLIC PROCESS", "POSITIVE REGULATION OF CELLULAR METABOLIC PROCESS", "POSITIVE REGULATION OF CELLULAR PROCESS", "REGULATION OF CELLULAR PROCESS", "REGULATION OF CATALYTIC ACTIVITY", "POSITIVE REGULATION OF CATALYTIC ACTIVITY", "REGULATION OF MOLECULAR FUNCTION", "POSITIVE REGULATION OF CELLULAR COMPONENT ORGANIZATION", "REGULATION OF ORGANELLE ORGANIZATION", "POSITIVE REGULATION OF CATABOLIC PROCESS", "POSITIVE REGULATION OF CELLULAR CATABOLIC PROCESS", "POSITIVE REGULATION OF MOLECULAR FUNCTION", "REGULATION OF CATABOLIC PROCESS", "REGULATION OF CELLULAR CATABOLIC PROCESS", "CELLULAR RESPONSE TO CHEMICAL STIMULUS", "CELLULAR RESPONSE TO ORGANIC SUBSTANCE", "POSITIVE REGULATION OF BIOSYNTHETIC PROCESS", "POSITIVE REGULATION OF CELLULAR BIOSYNTHETIC PROCESS", "POSITIVE REGULATION OF MACROMOLECULE BIOSYNTHETIC PROCESS", "NEGATIVE REGULATION OF BIOSYNTHETIC PROCESS", "NEGATIVE REGULATION OF CELLULAR BIOSYNTHETIC PROCESS", "NEGATIVE REGULATION OF CELLULAR MACROMOLECULE BIOSYNTHETIC PROCESS", "NEGATIVE REGULATION OF MACROMOLECULE METABOLIC PROCESS", "RESPONSE TO EXTERNAL STIMULUS", "RESPONSE TO EXTRACELLULAR STIMULUS", "CELLULAR HOMEOSTASIS", "HOMEOSTATIC PROCESS", "REGULATION OF HOMEOSTATIC PROCESS", "ORGANIC SUBSTANCE TRANSPORT", "ORGANIC ACID BIOSYNTHETIC PROCESS", "NEGATIVE REGULATION OF ORGANELLE ORGANIZATION", "ORGANELLE FISSION", "NEGATIVE REGULATION OF CELLULAR COMPONENT ORGANIZATION", "CELLULAR DEVELOPMENTAL PROCESS", "MAINTENANCE OF LOCATION IN CELL","REGULATION OF DEVELOPMENTAL PROCESS","SMALL MOLECULE CATABOLIC PROCESS","ORGANIC ACID TRANSPORT", "CELLULAR RESPONSE TO EXTERNAL STIMULUS","NEGATIVE REGULATION OF RESPONSE TO STIMULUS","RESPONSE TO ENDOGENOUS STIMULUS","CELLULAR RESPONSE TO ENDOGENOUS STIMULUS","REGULATION OF LIGASE ACTIVITY", "CELLULAR COMPONENT MACROMOLECULE BIOSYNTHETIC PROCESS", "POSITIVE REGULATION OF ORGANELLE ORGANIZATION", "NEGATIVE REGULATION OF MACROMOLECULE BIOSYNTHETIC PROCESS", "CELLULAR COMPONENT MOVEMENT", "CELLULAR COMPONENT DISASSEMBLY", "REGULATION OF CELLULAR COMPONENT SIZE", "CELLULAR COMPONENT MAINTENANCE", "REGULATION OF CELLULAR COMPONENT BIOGENESIS", "CELLULAR COMPONENT DISASSEMBLY AT CELLULAR LEVEL", "CELLULAR COMPONENT MAINTENANCE AT CELLULAR LEVEL", "NEGATIVE REGULATION OF CELLULAR METABOLIC PROCESS", "RESPONSE TO ORGANIC SUBSTANCE", "CELLULAR CHEMICAL HOMEOSTASIS", "CHEMICAL HOMEOSTASIS", "REGULATION OF RESPONSE TO STIMULUS", "POSITIVE REGULATION OF RESPONSE TO STIMULUS"),
complexes=c("GOLGI APPARATUS","CELL CORTEX","CELL WALL","CELLULAR BUD","CHROMOSOME","CYTOPLASM","CYTOPLASMIC MEMBRANE-BOUNDED VESICLE","CYTOSKELETON","ENDOMEMBRANE SYSTEM","ENDOPLASMIC RETICULUM","MEMBRANE FRACTION","MEMBRANE","MICROTUBULE ORGANIZING CENTER","MITOCHONDRIAL ENVELOPE","MITOCHONDRION","HETEROGENEOUS NUCLEAR RIBONUCLEOPROTEIN COMPLEX","NUCLEOLUS","NUCLEUS","PEROXISOME","PLASMA MEMBRANE","SITE OF POLARIZED GROWTH","VACUOLE","POLAR MICROTUBULE","SMALL NUCLEAR RIBONUCLEOPROTEIN COMPLEX","SMALL NUCLEOLAR RIBONUCLEOPROTEIN COMPLEX","TRANSCRIPTION FACTOR COMPLEX","CDC73-PAF1 COMPLEX","SIGNAL RECOGNITION PARTICLE", "ARP2-3 PROTEIN COMPLEX", "CCR4-NOT NOT2-NOT5 SUBCOMPLEX", "CDC48-UFD1-NPL4 COMPLEX", "EKC-KEOPS PROTEIN COMPLEX", "HDA COMPLEX", "HRD1 UBIQUITIN LIGASE ERAD-L COMPLEX", "MRNA CAPPING ENZYME COMPLEX", "NRD1-NAB3-SEN1 TERMINATION COMPLEX", "RIC1-RGP1 COMPLEX", "SNRNP U2", "SNRNP U6", "RNA POLYMERASE III COMPLEX"))
##############
library(gplots)
##############
mycolors = c(
"darkorange1",
"dodgerblue",
"darkgreen",
"navy",
"mediumpurple" ,
"royalblue3",
"darkolivegreen4",
"firebrick",
"cyan4",
"hotpink3",
"plum4",
"blue",
"magenta4",
"skyblue3",
"green4",
"red3",
"steelblue3",
"tomato",
"purple4",
"goldenrod3",
"steelblue",
"darkred",
"lightpink3",
"darkorchid",
"lightblue3",
"dimgrey",
"chocolate1",
"seagreen3",
"darkkhaki",
"darksalmon"
)
##############
CLUST.COL <- c("#FF00CC","#33CCFF", "#33CC00", "#9900FF", "#FF9900", "#FFFF00", "#FFCCFF", "#FF0000", "#006600", "#009999", "#CCCC00", "#993300", "#CC99CC", "#6699CC","#CCCCFF", "#FFCC99", "#9966FF", "#CC6600", "#CCFFFF", "#99CC00", "#FF99FF", "#0066FF", "#66FFCC", "#99CCFF", "#9999CC", "#CC9900", "#CC33FF", "#006699", "#F5DF16", "#B5185E", "#99FF00", "#00FFFF", "#990000", "#CC0000", "#33CCCC", "#CC6666", "#996600", "#9999FF", "#3366FF")
rc=col2hex(mycolors)
prunedCol <- "#BEBEBE"
CLUST.COL = c(CLUST.COL,rc)
maxSetSize = 300
##### required functions
# computes the number of unique pairs given the number of items to consider
# maxVal - the maximum number of items
# RETURNS a 2-column matrix where each row contains a different pair, specified with item indices
getUniquePairs = function (maxVal)
{
firstI <- rep(1:(maxVal - 1), (maxVal - 1):1)
secondI <- sapply(2:maxVal, function(x) {
x:maxVal
})
cbind(firstI, unlist(secondI))
}
##############
# generates a dataframe of gene sets that are *not* significantly enriched, yet they contain query genes,
# i.e. genes in chemical-genetic interactions
# queryGeneSets - named list of queryGeneSets, where each list element is a vector of query genes
# - the names are filenames (typically) identifying different experiments
# enrichMat - dataframe with enrichment stats for gene sets (one per row), with the following columns:
# filename, term, geneSetFraction, FDR, overlapGenes, maxOverlapGeneScore
# - see documentation for the output of hyperG() for descriptions of these columns
# - rows with the same value, x, in the filename column specify enrichment results for
# the set of query genes in queryGeneSets with name=x
# scoreMat - score matrix/dataframe; row names are gene IDs and column names are filenames
# - each column contains a different set of scores
# termsToExclude - vector of terms (i.e. names of gene sets) to exclude from the results; can be NULL
# fdrThresh - FDR threshold; only show gene sets that do not pass this significance threshold
# RETURNS a dataframe of gene sets that are not significantly enriched (one per row),
# sorted by decreasing maxOverlapGeneScore value. The dataframe includes these columns:
# filename = filename identifying the query gene set
# term = gene set name
# geneSetFraction = the fraction of the term set that overlaps with the query set
# overlapGenes = a |-separated list of genes in the overlap of the query set and the term set;
# the scores of the genes are shown in parentheses
# maxOverlapGeneScore = the maximum score of the overlapGenes
# unenrichedGenes = a |-separated list of genes in the overlap of the query set and the term set
# that *also* do not belong to any significantly enriched term set;
#
genesNotInEnrichedTerm = function (queryGeneSets, enrichMat, scoreMat, termsToExclude ,
fdrThresh = 0.1)
{
scoreMat <- as.matrix(scoreMat)
enrichMat <- enrichMat[!(enrichMat$term %in% termsToExclude),
, drop = F]
lens <- sapply(queryGeneSets, length)
queryGeneSets <- queryGeneSets[lens > 0]
oGenes <- strsplit(enrichMat$overlapGenes, "\\|")
oGenes <- lapply(oGenes, function(genes) {
genes <- strsplit(genes, "\\(")
sapply(genes, function(vec) {
vec[1]
})
})
rowI <- split(1:nrow(enrichMat), enrichMat$filename)
enrichI <- match(names(queryGeneSets), names(rowI))
extraGenes <- queryGeneSets[is.na(enrichI)]
queryGeneSets <- queryGeneSets[!is.na(enrichI)]
rowI <- rowI[enrichI[!is.na(enrichI)]]
tmp <- lapply(1:length(queryGeneSets), function(expI) {
setdiff(queryGeneSets[[expI]], unlist(oGenes[rowI[[expI]]]))
})
names(tmp) <- names(queryGeneSets)
extraGenes <- c(extraGenes, tmp)
lens <- sapply(extraGenes, length)
extraGenes <- extraGenes[lens > 0]
if (length(extraGenes) > 0) {
lens <- lens[lens > 0]
extraGenes <- data.frame(filename = rep(names(extraGenes),
lens), gene = unlist(extraGenes), stringsAsFactors = F)
i <- match(extraGenes$gene, rownames(scoreMat))
i <- cbind(i, match(extraGenes$filename, colnames(scoreMat)))
extraGenes$score <- round(scoreMat[i], 2)
extraGenes <- extraGenes[order(extraGenes$score, decreasing = T),
]
i <- split(1:nrow(extraGenes), extraGenes$filename)
extraGenes <- lapply(i, function(curRow) {
tmp <- paste(extraGenes$gene[curRow], "(", extraGenes$score[curRow],
")", sep = "")
c(extraGenes$score[curRow[1]], paste(tmp, collapse = "|"))
})
}
tmp <- lapply(1:length(queryGeneSets), function(expI) {
curRow <- rowI[[expI]]
sigI <- curRow[enrichMat$FDR[curRow] <= fdrThresh]
unenrichedGenes <- setdiff(queryGeneSets[[expI]], unlist(oGenes[sigI]))
curRow <- setdiff(curRow, sigI)
if (length(curRow) == 0) {
return(list(rowI = NULL, unenrichedGenes = NULL))
}
unenrichedGenes <- lapply(oGenes[curRow], function(genes) {
intersect(unenrichedGenes, genes)
})
lens <- sapply(unenrichedGenes, length)
unenrichedGenes <- unenrichedGenes[lens > 0]
curRow <- curRow[lens > 0]
if (length(curRow) == 0) {
return(list(rowI = NULL, unenrichedGenes = NULL))
}
expI <- match(enrichMat$filename[curRow[1]], colnames(scoreMat))
unenrichedGenes <- lapply(unenrichedGenes, function(curGenes) {
geneI <- match(curGenes, rownames(scoreMat))
geneStr <- scoreMat[geneI, expI]
names(geneStr) <- curGenes
geneStr <- round(sort(geneStr, decreasing = T), 2)
geneStr <- paste(names(geneStr), "(", geneStr, ")",
sep = "")
paste(geneStr, collapse = "|")
})
list(rowI = curRow, unenrichedGenes = unenrichedGenes)
})
unenrichedMat <- enrichMat[unlist(lapply(tmp, function(ob) {
ob$rowI
})), ]
unenrichedMat$unenrichedGenes <- unlist(lapply(tmp, function(ob) {
ob$unenrichedGenes
}))
if (length(extraGenes) > 0) {
unenrichedMat <- unenrichedMat[c(rep(1, length(extraGenes)),
1:nrow(unenrichedMat)), ]
toDoI <- 1:length(extraGenes)
unenrichedMat$filename[toDoI] <- names(extraGenes)
unenrichedMat$term[toDoI] <- "OTHER"
unenrichedMat$overlapGenes[toDoI] <- sapply(extraGenes,
function(vec) {
vec[2]
})
unenrichedMat$maxOverlapGeneScore[toDoI] <- as.numeric(sapply(extraGenes,
function(vec) {
vec[1]
}))
unenrichedMat$unenrichedGenes[toDoI] <- unenrichedMat$overlapGenes[toDoI]
if (!is.null(unenrichedMat$pruneOutcome)) {
unenrichedMat$pruneOutcome[toDoI] <- "OTHER"
}
naCol <- setdiff(colnames(unenrichedMat), c("filename",
"term", "overlapGenes", "maxOverlapGeneScore", "unenrichedGenes"))
colI <- match(naCol, colnames(unenrichedMat))
unenrichedMat[toDoI, colI] <- NA
}
rownames(unenrichedMat) <- NULL
unenrichedMat <- unenrichedMat[order(unenrichedMat$geneSetFraction,
decreasing = T), ]
unenrichedMat[order(unenrichedMat$maxOverlapGeneScore, decreasing = T),
]
}
# computes enrichment using the hypergeometric test, and uses the resulting P values with
# the Benjamini Hochberg method to estimate FDR values
# querySet - character vector of genes in query set
# geneSets - named list of gene sets to test for significant overlap w/ the query set
# scoreMat - dataframe of gene scores
# - first column = scores, gene column
# - can be NULL
# uni - character vector of genes in the universe (i.e. background set)
# - if NULL, must specify uniSize
# uniSize - the # of genes in the universe
# minSetSize, maxSetSize - min/max # of genes in geneSets (after restricting to the gene universe)
# RETURNS a dataframe of enrichment results, sorted by increasing FDR value. The columns are:
# term = name of gene set
# querySetFraction = the fraction of the query set that overlaps with the term set
# geneSetFraction = the fraction of the term set that overlaps with the query set
# foldEnrichment = the fold enrichment of the query set with the term genes
# P = P value estimating the significance with which the query set is enriched with the term genes
# FDR = FDR value estimating the significance of enrichment
# overlapGenes = a |-separated list of genes in the overlap of the query set and the term set;
# if scoreMat is provided (not NULL), the scores of the genes are shown in parentheses
# maxOverlapGeneScore = if scoreMat is provided (not NULL), the maximum score of the overlapGenes
hyperG = function (querySet, geneSets, uni, scoreMat, minSetSize = minGeneSetSize,
maxSetSize = 300, uniSize = NA)
{
if (!is.null(uni)) {
geneSets <- lapply(geneSets, intersect, uni)
lens <- sapply(geneSets, length)
geneSets <- geneSets[lens >= minSetSize & lens <= maxSetSize]
uniSize <- length(uni)
}
if (!is.null(scoreMat)) {
scoreMat <- scoreMat[order(scoreMat$score, decreasing = T),
]
if (!is.null(uni)) {
i <- match(uni, scoreMat$gene)
scoreMat <- scoreMat[sort(i[!is.na(i)]), ]
}
scoreMat$score <- round(scoreMat$score, 2)
}
enrichInfo <- sapply(geneSets, function(geneSet) {
overlapSet <- intersect(querySet, geneSet)
pVal <- phyper(length(overlapSet) - 1, length(geneSet),
uniSize - length(geneSet), length(querySet), lower.tail = F)
if (length(overlapSet) > 0) {
overlapSet <- sort(overlapSet)
}
overlapSize <- length(overlapSet)
if (is.null(scoreMat)) {
maxScore <- NA
}
else {
i <- sort(match(overlapSet, scoreMat$gene))
maxScore <- scoreMat$score[i[1]]
overlapSet <- paste(scoreMat$gene[i], "(", scoreMat$score[i],
")", sep = "")
}
overlapSet <- paste(overlapSet, collapse = "|")
bgRate <- length(geneSet)/uniSize
foldEnrich <- overlapSize/length(querySet)/bgRate
c(overlapSet, overlapSize/length(geneSet), foldEnrich,
pVal, maxScore, overlapSize/length(querySet))
})
enrichInfo <- t(enrichInfo)
enrichCol <- data.frame(term = names(geneSets), querySetFraction = as.numeric(enrichInfo[,
6]), geneSetFraction = as.numeric(enrichInfo[, 2]), foldEnrichment = as.numeric(enrichInfo[,
3]), P = as.numeric(enrichInfo[, 4]), FDR = p.adjust(as.numeric(enrichInfo[,
4]), method = "BH"), overlapGenes = enrichInfo[, 1],
maxOverlapGeneScore = as.numeric(enrichInfo[, 5]), stringsAsFactors = F)
rownames(enrichCol) <- NULL
enrichCol = enrichCol[order(enrichCol$FDR), ]
}
#######hiphop:::overlapCoeff
#######the overlap of genesets for all combinations
#######If set X is a subset of Y or the converse then the overlap coefficient is equal to 1.
# compute the overlap coefficient given a pair of (gene) sets
# gsPairList - a list of two sets (each set is a vector of IDs)
# RETURNS the overlap coefficient
overlapCoeff = function (gsPairList)
{
length(intersect(gsPairList[[1]], gsPairList[[2]]))/min(length(gsPairList[[1]]),
length(gsPairList[[2]]))
}
####### given enrichInfo after hyperG, computes edgeMat for making enrichment map
# generates an enrichment map in xgmml format using hypergeometric test statistics
# enrichInfo - dataframe with enrichment stats for gene sets (one per row), with the following columns:
# term, geneSetFraction, querySetFraction, FDR, overlapGenes, maxOverlapGeneScore
# - see documentation for the output of hyperG() for descriptions of these columns
# geneSets - named list of gene sets tested for significant overlap w/ the query set,
# restricted to genes in the universe
# outFile - the output xgmml file will be saved to this location
# fdrThresh - FDR threshold; only show gene sets that pass this significance threshold
# overlapThresh - an edge between a pair of enriched gene sets will only be shown if the overlap coefficient
# is >= overlapThresh
# nonEnrichInfo - dataframe with info on gene sets that are *not* significantly enriched (one per row)
# with the following columns:
# term, overlapGenes, maxOverlapGeneScore, geneSetFraction, unenrichedGenes
# - see documentation for the output of genesNotInEnrichedTerm() for descriptions of these columns
# - can be NULL
# barModGenes - if provided (i.e. not NULL) a vector of genes that should be marked distinctly in the
# barplots, should they be in the top overlap genes
# scoreName - score label to use in top overlap gene barplots
# plotForEachEnrichedTerm - if TRUE, a top overlap gene barplot will be created for each enriched term;
# if FALSE, a barplot will be created for each enriched term cluster
# goTable - a dataframe with the following columns describing GO terms:
# - "term" (GO term), "id" (GOID)
# - if provided (i.e. not NULL), the GO ID numbers of the enriched GO terms will be saved in
# the output xgmml file as a node attribute called "GOID".
# - the GOID allows for an easy link to the GO website page for the associated GO term
###############
#### query set is genes with significant fitness defect, uni is all the genes in the data matrix, the union
clusterEnrich = function (enrichInfo, geneSets, outFile, fdrThresh = 0.1, overlapThresh = 0.5,
nonEnrichInfo = NULL, barModGenes = NULL, scoreName = "Fitness defect score",
plotForEachEnrichedTerm = F, go_path = NULL){
go_file = file.path(go_path)
if(!is.null(go_path)) goTable = read.delim(go_file,stringsAsFactors = F,check.names = F)
nodeSizeRange <- c(10, 40)
prunedCol <- "#BEBEBE"
labelWidth <- 20
edgeWidthRange <- c(1, 5)
overlapCoeffRange <- c(overlapThresh, 1)
if (!is.null(nonEnrichInfo)) {
nonEnrichInfo$maxOverlapGeneScore <- round(nonEnrichInfo$maxOverlapGeneScore,
2)
nonEnrichInfo$geneSetFraction <- round(nonEnrichInfo$geneSetFraction *100, 1)
if (is.null(nonEnrichInfo$nGenes)) {
lens <- sapply(geneSets, length)
i <- match(nonEnrichInfo$term, names(lens))
nonEnrichInfo$nGenes <- lens[i]
}
tmp <- strsplit(nonEnrichInfo$overlapGenes, "\\|")
w <- which(is.na(tmp))
if(length(w)>0) tmp = tmp[-w]
if (is.null(nonEnrichInfo$unenrichedGenes)) {
nonEnrichInfo$overlapGenes <- sapply(tmp, paste,
collapse = "| ")
}
else {
unEnriched <- strsplit(nonEnrichInfo$unenrichedGenes,
"\\|")
tmp.mod <- sapply(1:length(tmp), function(termI) {
vec <- tmp[[termI]]
i <- match(unEnriched[[termI]], tmp[[termI]])
vec[i] <- paste("<b>", vec[i], "</b>", sep = "")
paste(vec, collapse = "| ")
})
nonEnrichInfo$overlapGenes <- tmp.mod
}
if (is.null(enrichInfo)) {
return()
}
}
enrichInfo <- enrichInfo[enrichInfo$FDR <= fdrThresh, , drop = F]
enrich = enrichInfo
if(!is.null(go_path)) {
toDoI <- match(enrichInfo$term, goTable$term)
enrichInfo$GOID = goTable$GOID[toDoI]
}
if (nrow(enrichInfo) == 0) {
return()
}
enrichInfo$formattedLabel <- sapply(enrichInfo$term, function(curLabel) {
curLabel <- strwrap(curLabel, labelWidth)
paste(curLabel, collapse = "\n")
})
i <- match(enrichInfo$term, names(geneSets))
if (any(is.na(i))) {
stop("Could not find gene sets for ", sum(is.na(i)),
" enriched terms.")
}
geneSets <- geneSets[i]
if (is.null(enrichInfo$nGenes)) {
enrichInfo$nGenes <- sapply(geneSets, length)
}
tmpSize <- -log10(enrichInfo$FDR)
maxVal <- max(tmpSize[!is.infinite(tmpSize)])
tmpSize[is.infinite(tmpSize)] <- maxVal + 2
gsSizeRange <- range(tmpSize)
if (gsSizeRange[1] == gsSizeRange[2]) {
gsSizeRange[1] <- -log10(fdrThresh)
gsSizeRange[2] <- gsSizeRange[2] + 1
}
tmpSize <- (tmpSize - gsSizeRange[1])/(gsSizeRange[2] - gsSizeRange[1])
tmpSize <- nodeSizeRange[1] + tmpSize * (nodeSizeRange[2] -nodeSizeRange[1])
enrichInfo$size <- round(tmpSize, 2)
if (nrow(enrichInfo) == 1) {
enrichInfo$cluster <- CLUST.COL[1]
edgeMat <- NULL
}
######################### EDGE MAT ########################
else {
pairI <- getUniquePairs(length(geneSets))
distVal <- apply(pairI, 1, function(onePair) {
overlapCoeff(geneSets[onePair])
})
distVal[distVal < overlapThresh] <- 0
edgeMat <- data.frame(nodeA = pairI[, 1], nodeB = pairI[,2], coeff = distVal)
enrichInfo$cluster <- prunedCol
if (is.null(enrichInfo$pruneOutcome)) {
termI <- 1:nrow(enrichInfo)
}
else {
termI <- which(enrichInfo$pruneOutcome == enrichInfo$term)
}
if (length(termI) == 1) {
enrichInfo$cluster[termI] <- CLUST.COL[1]
}
else {
i <- which((edgeMat$nodeA %in% termI) & (edgeMat$nodeB %in%
termI))
enrichInfo$id = termI
g=igraph::graph_from_data_frame(edgeMat[which(edgeMat$coeff!=0),],directed = F,vertices = enrichInfo$id)
adj = igraph::as_adjacency_matrix(g)
clusters = igraph::clusters(g)
clusters = split(names(clusters$membership),clusters$membership)
clusters <- lapply(clusters, as.numeric)
lens <- sapply(clusters, length)
clusters <- data.frame(id = unlist(clusters), cluster = CLUST.COL[rep(1:length(clusters),
lens)], stringsAsFactors = F)
enrichInfo$cluster[clusters$id] <- clusters$cluster
}
edgeMat <- edgeMat[edgeMat$coeff > 0, , drop = F]
if (nrow(edgeMat) > 0) {
edgeMat$size <- (edgeMat$coeff - overlapCoeffRange[1])/(overlapCoeffRange[2] -
overlapCoeffRange[1])
edgeMat$size <- edgeWidthRange[1] + edgeMat$size *
(edgeWidthRange[2] - edgeWidthRange[1])
edgeMat$coeff <- round(edgeMat$coeff, 2)
edgeMat$size <- round(edgeMat$size, 2)
}
else {
edgeMat <- NULL
}
}
otherI <- order(enrichInfo$cluster)
otherI <- otherI[order(enrichInfo$FDR[otherI])]
termI <- which(enrichInfo$cluster[otherI] != prunedCol)
if (length(termI) < length(otherI)) {
otherI <- c(otherI[termI], otherI[-termI])
}
enrichInfo$id <- 1:nrow(enrichInfo)
enrichInfo <- enrichInfo[otherI, , drop = F]
enrichInfo$geneSetFraction <- round(enrichInfo$geneSetFraction *
100, 1)
enrichInfo$querySetFraction <- round(enrichInfo$querySetFraction *
100, 1)
if (is.null(edgeMat)) print("edgeMat is NULL")
if (!is.null(edgeMat)) {
nam = c("source","target","label","overlapCoeff","width")
orig = c("nodeA","nodeB","label","coeff","size")
src = names(geneSets)[edgeMat$nodeA]
trg = names(geneSets)[edgeMat$nodeB]
edgeMat$label = paste(src,"(overlap)",trg)
m = match(names(edgeMat),orig)
names(edgeMat) = nam[m]
}
output = list(enrichInfo = enrichInfo,edgeMat = edgeMat)
return(output)
}
###############
compSCORE <- function(mat,coln, sig){
df = data.frame(score = mat[,coln],stringsAsFactors = F)
library(dplyr)
df$gene = rownames(mat)
rownames(df) = df$gene
df$index=0
wdf = which(df$score > sig)
df$index[wdf]=1
df = df[,c('index','score','gene')]
df = df %>% arrange(desc(score))
df
}
###### bulk of code
score = compSCORE(mat,coln,sig = sig)
fdrThresh = as.numeric(fdrThresh)
bp_file = file.path(bp_path)
scoreMat = score
queryGenes.mn <- sort(unique(scoreMat$gene[which(scoreMat$index >= 1)]))
uniGenes.mn <- sort(unique(scoreMat$gene[!is.na(scoreMat$score)]))
if(!is.null(bp_input)) {bp = bp_input} else {bp <- readRDS(bp_file)}
#bp <- readRDS(bp_file)
#### intersect of the geneSets with the backgroundSet, filtering for size
enrichMat.mn <- hyperG(querySet = queryGenes.mn, geneSets = bp,
uni = uniGenes.mn, scoreMat = score, minSetSize = minGeneSetSize,
maxSetSize = 300, uniSize = NA)
queryGeneSets = list()
queryGeneSets[[curr_exp]] = queryGenes.mn
enrichMat.mn$filename <- curr_exp
enrichMat_Ordered = enrichMat.mn[with(enrichMat.mn, order(FDR,
-foldEnrichment)), ]
scoreMat <- scoreMat[order(scoreMat$score, decreasing = T),]
scoreMat <- scoreMat[match(uniGenes.mn, scoreMat$gene), "score",drop = F]
rownames(scoreMat) <- uniGenes.mn
colnames(scoreMat) <- curr_exp
nonEnrichMat.mn <- genesNotInEnrichedTerm(queryGeneSets,
enrichMat.mn, scoreMat, NONSPECIFIC.TERMS$bp, fdrThresh)
maxSetSize = 300
#### intersect of the geneSets with the backgroundSet, filtering for size
bp <- lapply(bp, intersect, uniGenes.mn)
lens <- sapply(bp, length)
bp <- bp[lens >= minGeneSetSize & lens <= maxSetSize]
q = clusterEnrich(enrichInfo = enrichMat.mn, geneSets = bp,
fdrThresh = fdrThresh, overlapThresh = 0.5,
nonEnrichInfo = nonEnrichMat.mn, barModGenes = NULL,
scoreName = "score", plotForEachEnrichedTerm = F,go_path = go_path)
edgeMat = q$edgeMat
enrichInfo = q$enrichInfo
library(dplyr)
if(!is.null(enrichInfo)) {
enrichInfo = enrichInfo %>% arrange(FDR)
enrichInfo$interSect = round(enrichInfo$geneSetFraction*enrichInfo$nGenes/100)
enrichInfo$nQuery = round((enrichInfo$geneSetFraction*enrichInfo$nGenes/100)/(enrichInfo$querySetFraction/100))
w = which(names(enrichInfo) %in% c("querySetFraction", "geneSetFraction" ,
"foldEnrichment" , "P" , "FDR" ))
enrichInfo[,c("querySetFraction","geneSetFraction", "foldEnrichment")] =
round(enrichInfo[,c("querySetFraction","geneSetFraction", "foldEnrichment")],2)
enrichInfo[,c("P", "FDR")] =
signif(enrichInfo[,c("P", "FDR")],digits = 3)
} else { print("no GO enrichment!") }
return(list(enrichInfo = enrichInfo , edgeMat = q$edgeMat))
}
######### GENE SIGNATURE FUNCTION
strSPLIT = function(str,splt,index){
xsplt = sapply(strsplit(str,splt), function(x) x = x[index])
xsplt
}
######### GENE SIGNATURE FUNCTION
collapseSTR = function(sresp,limit = 20){
lens = sapply(sresp,length)
wlens = which(lens > limit)
wresp= lapply(sresp[wlens], function(x) x = x[1:limit])
lsresp = lapply(sresp,function(x) paste(x,collapse = "|"))
lwresp = lapply(wresp,function(x) paste(x,collapse = "|"))
lsresp[wlens]=lwresp
lsresp
}
######### GENE SIGNATURE FUNCTION
revSPLIT <- function(lsn){
s = lapply(lsn,strsplit,"\\|")
s2 = lapply(s,unlist)
s2
}
#####################################################
#####################################################
#####################################################
############
# generates parameters for drawing GO enrichment networks
# enrichInfo - dataframe with enrichment stats for gene sets (one per row), with the following columns:
# filename, term, geneSetFraction, FDR, overlapGenes, maxOverlapGeneScore
# - see documentation for the output of hyperG() for descriptions of these columns
# - rows with the same value, x, in the filename column specify enrichment results for
# the set of query genes in queryGeneSets with name=x
# edgeMat - dataframe of edge set info: id=gene set id, cluster=cluster id, es=enrichment score, fdr=FDR value
#
####
visSetup = function(enrichInfo, edgeMat, fontsize = 22, fontface = "Arial") {
library(igraph)
library(visNetwork)
n = enrichInfo
e = edgeMat
w = which(names(n) == "id")
coln = (1:ncol(n))[-w]
n = n[, c(w, coln)]
if (is.null(e) & !is.null(n)) {
gr = make_empty_graph(nrow(enrichInfo))
v = gr
V(v)$color.background = n$cluster
v = set_vertex_attr(v, "label", value = n$formattedLabel)
}
if (!is.null(e) & !is.null(n)) {
w = which(names(e) == "label")
let = graph_from_data_frame(e[, -w], vertices = n, directed = F)
v = set_vertex_attr(let, "label", value = n$formattedLabel)
V(v)$color.background = n$cluster
}
vis = toVisNetworkData(v)
vis$nodes = data.frame(vis$nodes, stringsAsFactors = F)
if (!is.null(vis$edges))
vis$edges = data.frame(vis$edges, stringsAsFactors = F)
m = match(vis$nodes$id, n$id)
vis$nodes$label = n$formattedLabel[m]
vis$nodes$FDR = n$FDR[m]
vis$nodes$FDR = signif(vis$nodes$FDR, 5)
w = which(duplicated(vis$nodes$FDR))
if (length(w) > 0) {
vis$nodes = vis$nodes %>% group_by(FDR) %>% mutate(
jitter = if (n() > 1)
abs(jitter(FDR))
else
(FDR)
)
w = which(names(vis$nodes) == "FDR")
vis$nodes = vis$nodes[, -w]
w = which(names(vis$nodes) == "jitter")
names(vis$nodes)[w] = "FDR"
vis$nodes$FDR = signif(vis$nodes$FDR, 6)
}
w = which(duplicated(vis$nodes$FDR))
vis$nodes$term = n$term[m]
vis$nodes$interSect = n$interSect[m]
vis$nodes$nQuery= n$nQuery[m]
vis$nodes$nGenes = n$nGenes[m]
vis$nodes$geneSetFraction = n$geneSetFraction[m]
vis$nodes$querySetFraction = n$querySetFraction[m]
vis$nodes$filename = n$filename[m]
vis$nodes$formattedLabel = n$formattedLabel[m]
vis$nodes$overlapGenes = n$overlapGenes[m]
vis$nodes$label = vis$nodes$formattedLabel
vis$nodes$color.border = "black"
vis$nodes = vis$nodes %>% arrange(label)
if (nrow(vis$edges) > 0)
vis$edges$color = "black"
vis$nodes$borderWidthSelected = 4
w = which(names(vis$nodes) %in% c("fomattedLabel", "color", "cluster"))
if (length(w) > 0) vis$nodes = vis$nodes[, -w]
vis$nodes$color.highlight.border = "#000066"
vis$nodes$color.highlight.background = "#c0b3ff"
vis$nodes$color.hover.background = "#000066"
vis$nodes$color.hover.border = "#c0b3ff"
vis$nodes$font.face = fontface
vis$nodes$shape = "dot"
vis$nodes$font.size = fontsize
vis$nodes$font.bold = F
vis$nodes$borderWidth = 2
#vis$nodes$vadjust = "mono"
vis$nodes$borderWidthSelected = 4
vis$nodes$labelHighlightBold = T
w = which.min(vis$nodes$FDR)
if (length(w) > 0) {
vis$nodes$font.size[w] = fontsize + 4
vis$nodes$borderWidth[w] = 4
vis$nodes$font.bold[w] = T
}
vis$nodes = vis$nodes %>% arrange(FDR)
vis
}
###########
mycolors = c(
"darkorange1",
"dodgerblue",
"darkgreen",
"navy",
"mediumpurple" ,
"royalblue3",
"darkolivegreen4",
"firebrick",
"cyan4",
"hotpink3",
"plum4",
"blue",
"magenta4",
"skyblue3",
"green4",
"red3",
"steelblue3",
"tomato",
"purple4",
"goldenrod3",
"steelblue",
"darkred",
"lightpink3",
"darkorchid",
"lightblue3",
"dimgrey",
"chocolate1",
"seagreen3",
"darkkhaki",
"darksalmon"
)
############
compSCORE <- function(mat,coln, sig){
df = data.frame(score = mat[,coln],stringsAsFactors = F)
library(dplyr)
df$gene = rownames(mat)
rownames(df) = df$gene
df$index=0
wdf = which(df$score > sig)
df$index[wdf]=1
df = df[,c('index','score','gene')]
df = df %>% arrange(desc(score))
df
}
######### PLOTTING FUNCTION
# compute the top leading edge genes common to given gene sets
# geneSets - a list of gene sets, where each list element is a vector of gene IDs
# scoreMat - dataframe of gene scores; must have "ID" (gene ID) and "score" columns
# maxGenes - maximum number of top leading edge genes to return
# RETURNS a matrix of the top leading edge genes, where each row corresponds to one gene;
# rownames are gene IDs
# 1st column = % of the given gene sets for which the gene is in the leading edge
# 2nd column = gene score
# generate a barplot of the common leading edge genes using google charts (can be visualized
# with the html img tag), bar length corresponds to score
# leadInfo - matrix/dataframe of leading edge genes; rownames = gene IDs, column 1 = % of gene sets, column 2 = score
# - if a 3rd column is provided, it should provide TRUE/FALSE indicating whether or not the gene
# should be marked
# plotCol - the colour of the bars; hexidecimal format without the '#' character ###########################
###########################
#######################
# scoreRange - a vector of the range of scores in the profile; 1st value = min, 2nd value = max
# barWidth - bar width in the plot
# scoreLabel - label for the score axis
# RETURNS a vector of plot info: plot width, plot height and the google chart URL for the plot
# aliias genLeadingEdgePlot.gChart <- function(leadInfo, plotCol, scoreRange, barWidth, scoreLabel="Sensitivity") {
# generate a barplot of the common leading edge genes using google charts (can be visualized
# with the html img tag), bar length corresponds to score
# leadInfo - matrix/dataframe of leading edge genes; rownames = gene IDs, column 1 = % of gene sets, column 2 = score
# - if a 3rd column is provided, it should provide TRUE/FALSE indicating whether or not the gene
# should be marked
# plotCol - the colour of the bars; hexidecimal format without the '#' character #000066
# scoreRange - a vector of the range of scores in the profile; 1st value = min, 2nd value = max
# barWidth - bar width in the plot 15
# scoreLabel - label for the score axis "Fitness Defect"
# RETURNS a vector of plot info: plot width, plot height and the google chart URL for the plot
genLeadingEdgePlot.gChart <- function(leadInfo, plotCol, scoreRange, barWidth, scoreLabel, width) {
# express bar lengths as values in [0, 100]
dataRange <- scoreRange[2] - scoreRange[1]
barLens <- round((leadInfo[, 2] - scoreRange[1])/dataRange * 100)
# determine the score step size
if (dataRange <= 1) {
stepSize <- 0.5
} else if (dataRange <= 5) {
stepSize <- 2
} else if (dataRange <= 20) {
stepSize <- 5
} else if (dataRange <= 50) {
stepSize <- 10
} else if (dataRange <= 100) {
stepSize <- 20
} else if (dataRange <= 500) {
stepSize <- 100
} else {
stepSize <- 250
}
# replace any spaces in scoreLabel with a +
scoreLabel <- unlist(strsplit(scoreLabel, ""))
scoreLabel[scoreLabel == " "] <- "+"
scoreLabel <- paste(scoreLabel, collapse="")
# determine the positions of the gene labels
tmpStep <- 100/length(barLens)
labelPos <- round(seq(tmpStep/2, 100, by=tmpStep))
# compute plot size
w <- width
h <- barWidth*length(barLens) + 50
# specify the zero line if have negative values
if (scoreRange[1] < 0) {
zeroLineStr <- paste("&chp=", round(abs(scoreRange[1])/dataRange, 2), sep="")
} else {
zeroLineStr <- ""
}
# if a 3rd column is in leadInfo, use it to determine which gene bars to mark
if (ncol(leadInfo) > 2 && any(leadInfo[, 3] == 1)) {
barModStr <- paste("o,000000,", which(leadInfo[,3]==1) - 1, ",-1,5", sep="")
barModStr <- paste("&chm=", paste(barModStr, collapse="|"), sep="")
} else {
barModStr <- ""
}
c(w, h, paste("http://chart.apis.google.com/chart?chxt=x,x,y&chs=", w, "x", h,
"&cht=bhg&chd=t:", paste(barLens, collapse="|"),
"&chco=", plotCol,
"&chxl=1:|", scoreLabel, "|2:|", paste(rev(rownames(leadInfo)), collapse="|"),
"&chxp=1,50|2,", paste(labelPos, collapse=","),
"&chxr=0,", scoreRange[1], ",", scoreRange[2], ",", stepSize,
"&chbh=", barWidth, ",1,0",
zeroLineStr, barModStr, sep=""))
}
######### PLOTTING FUNCTION
# generate a barplot of the top-scoring overlap genes (driving enrichment) using google charts (can be
# visualized with the html img tag), bar length corresponds to score
# oGeneStr - |-separated string of overlap genes, and after each gene, its score is provided in parentheses,
# genes are sorted by score in decreasing order
# scoreRange - a vector of the range of scores in the profile; 1st value = min, 2nd value = max
# barModGenes - if provided (i.e. not NULL) a vector of genes that should be marked distinctly,
# should they be in the top overlap genes
# barWidth - bar width in the plot
# maxGenes - maximum number of top overlap genes to return
# plotCol - the colour of the bars; hexidecimal format without the '#' character
# scoreLabel - label for the score axis
# RETURNS a vector of plot info: plot width, plot height and the google chart URL for the plot
# ###########################
genOverlapGenePlot.gChart <- function(oGeneStr, width = 150, scoreRange, barModGenes=NULL, barWidth=10, maxGenes=10, plotCol="DACCFF", scoreLabel="Sensitivity") {
oGenes <- unlist(strsplit(oGeneStr, "\\|"))
genes <- strsplit(oGenes, "\\(")
scores <- sapply(genes, function(vec) { vec[length(vec)] })
genes <- sapply(genes, function(vec) { paste(vec[-length(vec)], collapse="(") })
scores <- unlist(strsplit(scores, ")"))
scoreMat <- data.frame(percent=rep(NA, length(genes)), score=as.numeric(scores), stringsAsFactors=F)
rownames(scoreMat) <- genes
if (nrow(scoreMat) > maxGenes) {
scoreMat <- scoreMat[1:maxGenes, ]
}
if (!is.null(barModGenes)) {
scoreMat$mod <- as.numeric(scoreMat$gene %in% barModGenes)
}
genLeadingEdgePlot.gChart(scoreMat, plotCol, scoreRange, barWidth, scoreLabel, width)
}
######### PLOTTING FUNCTION
############
# generate leading edge barplots for all clusters
# scoreMat - dataframe: ID, score, optional 3rd column indicating which ones to mark (TRUE/FALSE)
# geneSets - a list of all gene sets, where each list element is a vector of gene IDs
# gsInfo - dataframe of gene set info: id=gene set id, cluster=cluster id, es=enrichment score, fdr=FDR value
# scoreName - score label to use in leading edge plots
# plotCol - the colour of the bars, in hexidecimal format without the '#' character
# RETURNS a dataframe of barplot node info, each row corresponds to a different node;
# contains "id", "image" (google chart URL), "w" (plot width), "h" (plot height), "cluster" columns
#genLeadingEdgePlot.all <- function(scoreMat, geneSets, gsInfo, scoreName, plotCol="BEBEBE") {
####
geneBARPLOT = function(overlapGenes, desc = FALSE){
library(dplyr)
strSPLIT <- function(str,splt,index){
xsplt = sapply(strsplit(str,splt), function(x) x = x[index])
xsplt
}
splt = strsplit(overlapGenes, "\\|")
gene = lapply(splt,strSPLIT,"\\(",1)
score = lapply(splt,strSPLIT,"\\(",2)
score2 = lapply(score,strSPLIT,"\\)",1)
score3 = lapply(score2,as.numeric)
leadInfo = data.frame(gene = unlist(gene),score = unlist(score3),stringsAsFactors = F)
if(nrow(leadInfo) > 10) leadInfo = leadInfo[1:10,]
if(desc){ leadInfo = leadInfo %>% arrange(desc(score))}else {
leadInfo = leadInfo %>% arrange(score)
}
#
leadInfo
}
barplotHEIGHT = function(df){
scoreRange = range(df$score)
dataRange <- scoreRange[2] - scoreRange[1]
barLens <- round((df[, "score"] - scoreRange[1])/dataRange *100)
w <- 150
barWidth = 15
h <- barWidth * length(unlist(barLens)) + 50
h
}
genebarHEIGHT = function(leadInfo) {
scoreRange = range(leadInfo$score)
dataRange <- scoreRange[2] - scoreRange[1]
barLens <- round((leadInfo[, 2] - scoreRange[1]) / dataRange *
100)
w <- 150
barWidth = 15
h <- barWidth * length(barLens) + 50
h
}
|
f9ae83882c72d5e3af356a17fc7068319c523aa9
|
7512ccf38a7d4b768948574838339edf6c9bba5f
|
/man/svm_class.Rd
|
a2278d6f0659cfed27d83ab85458d417c204f540
|
[] |
no_license
|
ZW-xjtlu/perflite
|
dc76007111c070db734c46cb997abb1f8913dfc2
|
988eca396232a47599c89df1442a0cf3f8e4491c
|
refs/heads/master
| 2021-07-02T20:55:21.583010
| 2020-09-28T00:16:18
| 2020-09-28T00:16:18
| 175,593,255
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 878
|
rd
|
svm_class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svm_class.R
\name{svm_class}
\alias{svm_class}
\title{A function to generate the decision values for svm classification.}
\usage{
svm_class(y, X_train, X_test, ...)
}
\arguments{
\item{y}{a \code{factor} that contains the binary response variable with levels 1,0 for prediction.}
\item{X_train}{a \code{matrix} that contains the features for tarining set, nrow(X) = length(y).}
\item{X_test}{a \code{matrix} that contains the features for testing set.}
\item{...}{additional arguments passed to \code{svm} defined in the package \code{e1071}.}
}
\value{
a \code{vector} that is the same length of y containing the decision values on testing set.
}
\description{
A function to generate the decision values for svm classification.
}
\details{
\code{svm_f} conduct binary classification using SVM.
}
|
4e2fac2908669d4103875f63611687a5f4a97c76
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PPQplan/examples/ti.occurve.Rd.R
|
6ea002561c45e2e9856cc99d238c88668e9359c2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 849
|
r
|
ti.occurve.Rd.R
|
library(PPQplan)
### Name: ti.occurve
### Title: Operating Characteristic (OC) Curves for the PPQ Plan using
### Tolerance Interval.
### Aliases: ti.occurve
### ** Examples
ti.occurve(attr.name = "Sterile Concentration Assay", attr.unit="%",
mu=97, sigma=seq(0.1, 10, 0.1), Llim=95, Ulim=105, n=10, add.reference=TRUE)
ti.occurve(attr.name = "Sterile Concentration Assay", attr.unit="%",
mu=100, sigma=seq(0.1, 10, 0.1), Llim=95, Ulim=105, n=10, add.reference=TRUE)
ti.occurve(attr.name = "Extractable Volume", attr.unit = "% of NV=3mL",
Llim = 100, Ulim = Inf, mu=102.5, sigma=seq(0.2, 6 ,0.05), n=40,
alpha = 0.05, coverprob = 0.97, side=1, NV=3)
ti.occurve(attr.name = "Extractable Volume", attr.unit = "% of NV=3mL",
Llim = 100, Ulim = Inf, mu=102.5, sigma=seq(0.2, 6 ,0.05), n=40,
alpha = 0.05, coverprob = 0.992, side=1, NV=3)
|
5af3c6556fcea2dedff5c6eb85ea5ad9db7515fa
|
7d198bf47fd89dbbb9cf878ac614ae3880d71d15
|
/Mineria_Final/Codigo/01-load.R
|
93b03f71aaf4702b0139919df7ca3d24f296adf6
|
[] |
no_license
|
AnaLuisaMasetto/proyecto_final_mineria
|
9dcefc86d5e7b5d4af98a25ad33d776ca3010636
|
2283418a0b439be5eaba8e02f89394c20563eb99
|
refs/heads/master
| 2020-04-11T19:21:41.178864
| 2019-01-27T23:13:31
| 2019-01-27T23:13:31
| 162,031,307
| 0
| 2
| null | 2018-12-19T17:24:03
| 2018-12-16T18:57:12
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 23
|
r
|
01-load.R
|
walmart_data <- load()
|
ed2138ba75821492e766de0531e45c55076de465
|
19134b14210afcb391b6ad09c73189d7a3e15a0b
|
/pop_functions.R
|
f1788ab200d6ff8a18ae93fab3b10f990da85736
|
[] |
no_license
|
VajiraL/african_population
|
b7475db8ad15a93a5f1bfb4ea43bc1f824ab47cf
|
74fdfe6bd2f0cc7aea17d7eaaf71b1d1fd36d001
|
refs/heads/master
| 2021-12-11T03:40:15.029223
| 2016-10-19T13:42:09
| 2016-10-19T13:42:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,370
|
r
|
pop_functions.R
|
############ POPULATION FUNCTIONS AFRICA ##############
# Functions used for the population gridding of africa #
# #
# #
# Author: Niklas Boke-Olen #
# niklas.boke-olen@nateko.lu.se #
# niklasbokeolen@gmail.com #
# #
#########################################################
to_urban_mask_parallell <- function(urb_frac,pop_grid,pixels,cores){
urb_frac1 <- round(urb_frac*pixels)
ind_rast <- urb_frac1;
ind_rast [,] <- 1:length(ind_rast[,]);
ind_mask <- urb_frac1;
ind_mask[ind_mask>0] <- 1; ind_mask[ind_mask==0] <- NA;
ind_rast <- ind_mask*ind_rast;
ind.df <- na.omit(as.data.frame(ind_rast))
registerDoMC(cores) #change to CPU cores
cutval <- NA
cutval1 <- foreach(i=1:length(ind.df$layer),.combine=rbind) %dopar% {
urbP_cell <- urb_frac1[ind.df$layer[i]]
xy <- xyFromCell(urb_frac1, ind.df$layer[i], spatial=FALSE) - 0.25
ext <- extent(xy[1],xy[1]+0.5,xy[2],xy[2]+0.5)
a <- intersect(ext,extent(pop_grid)) #check if should be processed. only if within area chosen
if(!is.null(a)){
if(a == ext){
sort_pix <- sort(extract(pop_grid,ext))
if(length(sort_pix)>0){
index_sort <- (length(sort_pix)- urbP_cell)
if(index_sort<1){ #if all pixels should be included allow for this (could happend with alot of NA for example)
cutval[i] <- max((sort_pix[1] - 1),0)
}else{
cutval[i] <- sort_pix[index_sort]
if(sum(sort_pix>cutval[i])<urbP_cell){ #if we have equal values select all equal at the cutoff to be urban.(should only be pairs)
diff <- sort_pix - sort_pix[index_sort]
cutval[i] <- sort_pix[diff<0][length(sort_pix[diff<0])]
}
}
}
}
}
cutval[i]
}
beginCluster(cores)
cutval_grid <- urb_frac1; cutval_grid[,] <- NA
cutval_grid[ind.df$layer[1:length(ind.df$layer)]] <- cutval1
cutval_rsmp <- resample(cutval_grid,pop_grid,method='ngb')
urban_mask <- pop_grid > cutval_rsmp
urban_mask <- reclassify(urban_mask, cbind(NA, 0))
endCluster()
return(urban_mask)
}
|
b8b827e1f92a4e07ce1f7f82bee45c5ba0a940ef
|
5ddea1ad62a397deee8f8e348afbce631d5417ac
|
/man/demo_participants.Rd
|
b5efbd34bb15bd50ede5950fa9e2b822a064344b
|
[
"MIT"
] |
permissive
|
martina-starc/peRson
|
da0513dd7f08e8cfba6055b2a7eb3350b3a311ed
|
7376da729836bff767581d5ad566dfd27b655787
|
refs/heads/master
| 2023-04-08T19:09:49.814150
| 2021-04-09T17:04:30
| 2021-04-09T17:04:30
| 336,203,610
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 657
|
rd
|
demo_participants.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_demo_quiz.R
\docType{data}
\name{demo_participants}
\alias{demo_participants}
\title{Demo quiz participants}
\format{
A data frame with 7 rows and 4 variables: \describe{ \item{name}{name
of the participant} \item{color}{chosen color of the participant}
\item{image}{path to the participant's image} \item{email}{participant's
email} }
}
\usage{
demo_participants
}
\description{
A dataset containing data for fictional quiz participants. Images from image
variable are included in the package. To get correct paths, use
[get_demo_participants()].
}
\keyword{datasets}
|
0f39a143a67b857ee21e920413356ab4d9dfecb4
|
d420f963ab5ff604cc74a3037f9396cd826e606c
|
/man/cambodia_poverty.Rd
|
7609effe6ca9cd7dc3b8cd8f12ffdb81ae4ff256
|
[] |
no_license
|
johnrbryant/bdefdata
|
d103f09bcd26cbfe677449d799ba9274cf0209e7
|
6c637e28d891c32cb0e8bd18d046c355287eadb8
|
refs/heads/master
| 2020-03-18T17:16:50.127404
| 2018-09-23T06:43:11
| 2018-09-23T06:43:11
| 135,018,297
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 662
|
rd
|
cambodia_poverty.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cambodia_poverty.R
\docType{data}
\name{cambodia_poverty}
\alias{cambodia_poverty}
\title{Estimates of provincial poverty rates Cambodia, 2009}
\format{A data frame, with two columns:
\describe{
\item{province}{Province of Cambodia}
\item{poverty}{Percent below poverty line}
}}
\source{
Asian Development Bank. 2014. Cambodia Country Poverty Analysis 2014.
Manila: Asian Development Bank
}
\usage{
cambodia_poverty
}
\description{
Estimates of the percent of the population living below the poverty line,
by province, compiled by the Asian Development Bank.
}
\keyword{datasets}
|
92ad416485a71e82576edcf6a34f61593a805c49
|
a150f79e34aea1ea78cead88f1ab375e46dbd07c
|
/man/krsa_read.Rd
|
ae90bfc302f2035d18ead4467ab303832f30e88f
|
[
"MIT"
] |
permissive
|
AliSajid/KRSA
|
e7faad7dbdadb44d15bcb6dbb29014f3ce0bb044
|
970697f023364e0a7168381783f04a360630f7c7
|
refs/heads/master
| 2023-07-14T02:13:40.596639
| 2021-08-26T16:06:42
| 2021-08-26T16:06:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 813
|
rd
|
krsa_read.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/krsa_read.R
\name{krsa_read}
\alias{krsa_read}
\title{Read crosstab format files exported from bioNavigator and tidy them}
\usage{
krsa_read(signal_file, signal_saturation)
}
\arguments{
\item{signal_file}{path to median signal minus background file (Median_SigmBg)}
\item{signal_saturation}{path to signal saturation file (Signal_Saturation)}
}
\value{
tbl_df
}
\description{
This function takes in paths to the median signal minus background (Median_SigmBg) and signal saturation (Signal_Saturation) files and parse and tidy them
}
\examples{
TRUE
}
\seealso{
Other core functions:
\code{\link{krsa_group_diff}()},
\code{\link{krsa_quick_filter}()},
\code{\link{krsa_scaleModel}()},
\code{\link{krsa}()}
}
\concept{core functions}
|
c6e430ef1e3506c137d678a9e219c31f5bc30332
|
c67ed6bfca50b35228ef31a477865e0063701836
|
/IARPA Report/CFQ_analyses.R
|
bb4fd0adc8a2bd20eb5745e47c722e4f4037936f
|
[] |
no_license
|
joetidwell/QES2
|
1bbfdbc4d5e901162064e14f2c37a8df58b8e350
|
1f2741e27c8ce7a58c486473f15d980236c70a55
|
refs/heads/master
| 2020-12-24T16:49:56.005345
| 2015-12-09T17:50:29
| 2015-12-09T17:50:29
| 32,096,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,132
|
r
|
CFQ_analyses.R
|
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
####
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
library(ggplot2)
library(foreach)
library(doMC)
library("multicore", quietly=TRUE)
registerDoMC(max(multicore:::detectCores()-6,2)) # use all cores minus 2
registerDoMC(28)
source("~/ACE/global_vars.R")
source(file.path(kRootPath, "util", "load_data.R"), chdir=T)
source(file.path(kRootPath, "fitting", "interval_fitting_funcs.R"), chdir=T)
source(file.path(kRootPath, "forecast", "method_consensus_dist.R"), chdir=T)
options(stringsAsFactors = FALSE)
path.mydata <- "~/R/QES2/data"
# path.mydata <- "~/git/QES2/data"
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#### Get set of closed IFPS
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Get closed IFPs
ifp.key <- as.data.table(read.csv(file.path(path.mydata, "ifp_key-SV.csv")))
ifp.key <- ifp.key[status=="closed",]
setkey(ifp.key, ifp_idx)
ifp.data <- as.data.table(read.csv(file.path(kRootPath, "data", "ifps", "ifps.yr4.csv")))
ifp.data <- ifp.data[, list(ifpid, q_status, q_type, date_closed)]
setkey(ifp.data, ifpid)
ifp.data <- ifp.data[ifp.key][date_closed!="NULL"]
ifp.data[,ifp_id:=paste(ifpid,q_type,sep="-")]
ifp.data <- ifp.data[q_status=="closed",]
ifp.data[, date_closed:=as.Date(date_closed)-1]
# Get resolution values
IFP.res <- data.table(read.csv(file.path(path.mydata,"lum_ifp_data.csv")))
IFP.res[,IFPID:=as.character(IFPID)]
ifp.data[, ifpid:=as.character(ifpid)]
setkey(IFP.res, IFPID)
setkey(ifp.data,ifpid)
ifp.data <- IFP.res[,list(IFPID,Outcome)][ifp.data]
ifp.data[Outcome=="",]
# Only keep IFPs which have closed, and for which we have resolution values
# ifp.data <- ifp.data[Outcome!="",]
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#### Get all forecasts and fit distributions
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LoadContinuous <- function(ifp.set=NULL, years = 4){
source(file.path(kRootPath, "data","update_lumenogic_fits.R"), chdir=TRUE)
# load forecasts
fcasts <- data.table(read.csv(file.path(path.mydata, "fcasts.lum_roll.yr4.csv")))
# rename bins
setnames(fcasts,
grep("^Bin",names(fcasts),value=TRUE),
sub("Bin.","bin_", grep("^Bin",names(fcasts),value=TRUE)))
# rename cutpoints
setnames(fcasts,
grep("^CutPoint",names(fcasts),value=TRUE),
sub("CutPoint.","ctpt_", grep("^CutPoint",names(fcasts),value=TRUE)))
# rename others
setnames(fcasts,
names(fcasts)[c(1:9,23)],
c("lum_user_id","user_id","screen_name","type","ifp_id",
"cifp_branch","value_type","range_low","range_high","timestamp"))
# remove non-forecaster accounts
fcasts <- fcasts[user_id > 20000, ]
# Only keep ifps designated by ifp.set
if(!is.null(ifp.set)) {
fcasts <- fcasts[ifp_id %in% ifp.set,]
}
# add idx
fcasts[, idx := 1:.N]
# add fcast_date column from timestamps
fcasts[, timestamp := timestamp/1000] # convert from miliseconds to seconds
fcasts[, date_time_gmt := as.POSIXct(timestamp, origin="1970-01-01", tz="GMT")]
#fcasts[, fcast_date := as.Date(date_time_gmt,tz = "EST")]
fcasts$fcast_date <- substr(as.character(fcasts$date_time_gmt),1,10)
setkey(fcasts, timestamp)
# Convert date-time formatted date responses to integers (days since 1970-01-01 00:00:00 GMT)
TimeToInt <- function(date.time.tz){
date.time.tz[is.na(date.time.tz)|date.time.tz == ""] <- NA
date.time.tz <- as.integer(as.Date(date.time.tz))
date.time.tz[date.time.tz < 0] <- 0
return(date.time.tz)
}
# convert the cut points and ranges for date IFPs to numeric values
cutpt.cols <- grep("^range|^ctpt",names(fcasts),value=TRUE)
fcasts[value_type=="date",
c(cutpt.cols):=lapply(.SD, function(x) as.character(TimeToInt(x))),
.SDcols=cutpt.cols]
# convert the cut points and ranges to numeric values
fcasts[,c(cutpt.cols):=lapply(.SD, as.numeric),.SDcols=cutpt.cols]
# Offset date bins by date of forecast
l_ply(grep("^ctpt",names(fcasts),value=TRUE), function(x) {
fcasts[value_type=="date", eval(as.name(x)) := eval(as.name(x)) - as.numeric(as.Date(fcast_date)), by=idx]
})
# add the q_type suffix to 4-digit IFPs
ifps <- LoadIfps()
ifp.types <- unique(ifps[, q_type, key = list(ifp_id = as.integer(substr(ifp_id, 0, 4)))])
setkey(fcasts, ifp_id)
fcasts <- ifp.types[fcasts]
fcasts[cifp_branch != "C1", q_type := as.integer(substr(cifp_branch, 2,2))]
fcasts[, ifp_id:=as.character(ifp_id)]
fcasts[, ifp_id := paste(ifp_id, q_type, sep="-")]
# merge in q_status
ifp.statuses <- ifps[,list(ifp_id, q_status)]
fcasts <- ifp.statuses[fcasts]
# add tournament year designation
fcasts[, year := 4]
# keep only IFPs requested via q.status
# fcasts <- fcasts[q_status %in% q.status, ]
## Temporarily disabled updating csv
# Load fitted parameters
# fits <- UpdateContinuousFits()
# setkey(fits, idx)
# Merge with IFPs
# setkey(fcasts, idx)
# fcasts <- fits[fcasts]
bin.names <- c("bin_0","bin_1","bin_2","bin_3","bin_4","bin_5","bin_6",
"bin_7","bin_8","bin_9","bin_10","bin_11","bin_12","bin_13")
fcasts[,c(bin.names):=lapply(.SD, as.numeric),.SDcols=bin.names]
# 'FIX' FORECASTS WITH 100% PROBABILITY IN 1 BIN
fcasts[bin_0==100,c("bin_0","bin_1"):=list(99,1)]
fcasts[bin_1==100,c("bin_0","bin_1","bin_2"):=list(.5,99,.5)]
fcasts[bin_2==100,c("bin_1","bin_2","bin_3"):=list(.5,99,.5)]
fcasts[bin_3==100,c("bin_2","bin_3","bin_4"):=list(.5,99,.5)]
fcasts[bin_4==100,c("bin_3","bin_4","bin_5"):=list(.5,99,.5)]
fcasts[bin_5==100,c("bin_4","bin_5","bin_6"):=list(.5,99,.5)]
fcasts[bin_6==100,c("bin_5","bin_6","bin_7"):=list(.5,99,.5)]
fcasts[bin_7==100,c("bin_6","bin_7","bin_8"):=list(.5,99,.5)]
fcasts[bin_8==100,c("bin_7","bin_8","bin_9"):=list(.5,99,.5)]
fcasts[bin_9==100,c("bin_8","bin_9","bin_10"):=list(.5,99,.5)]
fcasts[bin_10==100,c("bin_9","bin_10","bin_11"):=list(.5,99,.5)]
fcasts[bin_11==100,c("bin_10","bin_11","bin_12"):=list(.5,99,.5)]
fcasts[bin_12==100,c("bin_11","bin_12","bin_13"):=list(.5,99,.5)]
fcasts[bin_13==100,c("bin_12","bin_13"):=list(1,99)]
# Replaces UpdateContinuousFits()
# Refits all forecasts every time called
fcasts <- GetContinuousFits(fcasts)
# SaveCache(fcasts, fn.name="LoadContinuousFcasts", args = args )
return(fcasts)
}
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#### Generic Functions
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Switchable generic cumulative distribution function
CDF <- function(dist.name, ...) {
return(
switch(dist.name,
normal = pnorm(...),
beta = pbeta(...),
gamma = pgamma(...)
)
)
}
# Switchable generic density function
PDF <- function(dist.name, ...) {
return(
switch(dist.name,
normal = dnorm(...),
beta = dbeta(...),
gamma = dgamma(...)
)
)
}
# Generic Inverse Cumulative Distribution Function
QDF <- function(dist.name, ...){
switch(dist.name,
normal = qnorm(...),
beta = qbeta(...),
gamma = qgamma(...)
)
}
# Generic Sampler
RDF <- function(dist.name, ...){
switch(dist.name,
normal = rnorm(...),
beta = rbeta(...),
gamma = rgamma(...)
)
}
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#### Data Wrangling
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fcasts <- LoadContinuous(ifp.set=ifp.data$IFPID)
all.fits <- fcasts
save(all.fits, file=file.path(path.mydata,"allfits.Rdata"))
# Load data
# load(file.path(path.mydata,"fcasts.Rdata"))
# load(file.path(path.mydata,"ifp.data.Rdata"))
# load("data/fcasts.Rdata")
# Only roller conditions
fcasts <- fcasts[!(type%in%c("rolling","rollingCtl"))]
fcasts[,unique(ifp_id)]
# for some reason 1448 is duplicated
tmp <- ifp.data[IFPID=="1448"][1]
ifp.data <- cbind(ifp.data[IFPID!="1448"],tmp)
# merge with ifp.data
# ifp.data <- ifp.data[IFPID!="1419"]
setkey(fcasts, ifp_idx)
setkey(ifp.data, IFPID)
fcasts <- ifp.data[fcasts]
names.bins <- grep("bin_", names(fcasts), fixed=TRUE, value=TRUE)
names.ctpts <- grep("ctpt_", names(fcasts), fixed=TRUE, value=TRUE)
# Convert date outcomes to numeric
fcasts[dist=="gamma" & Outcome!="",Outcome:=as.numeric(as.Date(Outcome))]
fcasts[,unique(Outcome),by=c("ifp_id")]
# Remove any foracsts without specified outcomes
fcasts <- fcasts[!is.na(Outcome),]
fcasts[,Outcome:=as.numeric(Outcome)]
# Check outcome verses medians as sanity check
# fcasts[,est.median:=QDF(dist,.5,par.1,par.2),by=1:nrow(fcasts)]
# fcasts[,list(median(est.median),Outcome[1]),by=ifp_id][4:36,round(.SD,3),.SDcols=c("V1","V2")]
# fcasts[,list(est=round(median(est.median),digits=10),
# out=round(median(Outcome), digits=2)),by=ifp_id]
# Error
fcasts[,SS.tot:=2*apply(fcasts[,.SD,.SDcols=names.ctpts],1,var,na.rm=TRUE)]
fcasts[, SS.fit := SS.tot - sse]
fcasts[, R.sq := 1-(sse/SS.tot)]
# Drop 'unfittable'
fcasts <- fcasts[sse!=42,]
# Drop neg Rsq
fcasts <- fcasts[R.sq>0,]
# Adjust outcome (true.score) for date IFPs by date forecast made, and betas
fcasts[value_type=="date",true.score:=Outcome-as.numeric(as.Date(fcast_date))]
fcasts[value_type!="date",true.score:=Outcome]
fcasts[dist=="beta",true.score:=Outcome/100]
# Drop forecasts made after close
fcasts <- fcasts[as.Date(date_closed)>=as.Date(fcast_date)]
# FOR SOME REASON MANY DATE FORECASTS HAVE NEGATIVE CUTPOINTS... WTF? FIND OUT WHY
fcasts <- fcasts[!(dist=="gamma" & ctpt_0<0)]
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#### CRPS
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
scaleUnit <- function(x) {
(x-min(x, na.rm=TRUE))/(max(x, na.rm=TRUE)-min(x, na.rm=TRUE))
}
GneitingCRPS <- function(distribution,par.1,par.2,true.score,n=1e6) {
X <- RDF(distribution,n,par.1,par.2)
X.prime <- RDF(distribution,n,par.1,par.2)
mean(abs(X-true.score))-.5*mean(abs(X-X.prime))
}
out <- foreach(i=1:nrow(fcasts)) %dopar% {
fcasts[i,GneitingCRPS(dist,par.1,par.2,true.score,n=1e6)]
}
fcasts[,S.R:=unlist(out)]
fcasts[is.na(S.R),.N,by=c("type")]
fcasts[, S.R.r := scaleUnit(rank(S.R)),by=ifp_id]
save.image()
tmp <- fcasts[,mean(S.R),by=c("screen_name","ifp_id","type","dist")]
tmp[,y:=scaleUnit(rank(V1)),by=ifp_id]
library(lme4)
mod <- lmer(y~factor(type) + (1|ifp_id) + (1|dist), data=tmp)
summary(lm(y~type,tmp))
tmp[,mean(y),by=c("type","ifp_id")]
tmp[,median(y),by=c("type","ifp_id")]
tmp[,median(y),by=c("type")]
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#### Consensus
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# First get 'Daily' forecasts
getDailySet <- function(fcasts,
ifp,
decay.recent=.5,
decay.min.days=3) {
myifp <- fcasts[ifp_id==ifp,]
bin.id <- paste0("bin_",1:13)
ctpt.id <- paste0("ctpt_",1:12)
min.date <- myifp[,min(as.Date(fcast_date))]
max.date <- myifp[,as.Date(date_closed[1])]
# Get distributions for every live day of the forecast
roll <- foreach(d=seq.Date(min.date,max.date,"day")) %dopar% {
# filter to forecasts made on or before d
tmp <- myifp[as.Date(fcast_date)<=d,]
# Keep only the most recent forecast by each user
setkey(tmp, fcast_date)
tmp <- tmp[idx %in% tmp[, list(last_fcast = tail(idx,n = 1)),
by=user_id]$last_fcast]
# get N by condition
N <- tmp[,.N,by=type]
# Keep only the newest forecasts
# Either by using the decay.recent parameter and keeping the most recent X% of forecasts
N[, n.keep.decay:=as.integer(N*decay.recent)]
# or the number of forecasts made in the last N days as specificed by decay.min.days
N[, n.keep.daymin:= tmp[,sum(as.Date(fcast_date)>(d-decay.min.days)),by=type]$V1]
# whichever is larger
N[, n.keep := max(n.keep.decay, n.keep.daymin),by=type]
# unless N <= 5
N[, n.keep:=ifelse(N<=5,N,n.keep)]
N[, first:=(N-n.keep)+1]
N[, last:=N]
setkey(N,type)
# print(N)
tmp2 <- tmp[0,]
for(condition in N$type) {
tmp2 <- rbind(tmp2,tmp[type==condition,][N[condition,]$first:N[condition,]$last,])
}
tmp2[,.N,by=type]
tmp <- tmp2
tmp[,roll.date:=d]
}
do.call(rbind,roll)
}
daily.set <- foreach(ifp=unique(fcasts$ifp_id), .combine="rbind") %do% {
getDailySet(fcasts, ifp)
}
daily.set[,fcast_date:=as.Date(fcast_date)]
updateGammas <- function(daily.set, n.bins=20) {
# Subset to forecasts that need to be updated
# forecasts made on the same day as the roll date are fine
# and only gammas are updated
updates <- daily.set[dist=="gamma" & (fcast_date!=roll.date),]
# Define refitting cutpoints for each forecast
ctpts <- data.table(matrix(NA_real_, nrow=nrow(updates), ncol=n.bins))
bins <- copy(ctpts)
bin.id <- paste0("bin_",1:20)
ctpt.id <- paste0("ctpt_",1:20)
setnames(ctpts, ctpt.id)
setnames(bins, bin.id)
updates <- cbind(updates[,list(idx,ifp_id,dist,par.1,par.2,fcast_date, roll.date, type)],bins,ctpts)
updates[, row.id:=1:.N]
# Obtain probabilities for each bin
updates[,c(bin.id):=as.list((pgamma(seq(roll.date-fcast_date,
qgamma(.9999,par.1,par.2),
length=20),par.1,par.2) -
pgamma(roll.date-fcast_date, par.1, par.2)) /
as.numeric(1-pgamma(roll.date-fcast_date, par.1, par.2))),
by=row.id]
# Convert to interval probabilities
updates[,c(bin.id):=as.list(updates[,bin.id,with=FALSE] -
cbind(0,updates[,bin.id[-length(bin.id)],with=FALSE]))]
# Get cutpoints
updates[,c(ctpt.id):=as.list(seq(as.numeric(roll.date-fcast_date),
qgamma(.9999,par.1,par.2),
length=20)-as.numeric(roll.date-fcast_date)),
by=row.id]
# Drop unnecessary 1st ctpt/bin
updates[,bin_1:=NULL]
updates[,ctpt_1:=NULL]
# refit parameters to new, renormed probabilities
new.fits <- foreach(i=1:nrow(updates)) %dopar% {
bin.vals <- unlist(updates[i, .SD, .SDcols=bin.id[-1]])
ctpt.vals <- unlist(updates[i, .SD, .SDcols=ctpt.id[-1]])
dist.name <- updates[i,]$dist
tryCatch({
if(abs(round(sum(bin.vals),2)-1)<.01) {
fit <- FitFnInterval(probs = bin.vals,
quants = ctpt.vals,
dist.name = dist.name)
return(data.table(par.1 = fit[1],
par.2 = fit[2],
sse = fit[3]))
} else {
return(data.table(par.1 = NA_real_,
par.2 = NA_real_,
sse = NA_real_))
}
},
error=function(cond){
return(data.table(par.1 = NA_real_,
par.2 = NA_real_,
sse = NA_real_))
})
}
new.fits <- do.call(rbind,new.fits)
daily.set[dist=="gamma" & (fcast_date!=roll.date),
c("par.1","par.2","sse"):=new.fits]
return(daily.set)
}
daily.set <- updateGammas(daily.set)
#### Get Rolling Consensus
# Theta-M
ThetaM <- daily.set[,list(par.1=median(par.1, na.rm=TRUE),
par.2=median(par.2, na.rm=TRUE),
true.score=Outcome[1],
dist=dist[1],
ctpts=ctpts[1],
N=.N),
by=c("roll.date","ifp_id","type")]
ThetaM[dist=="gamma", true.score:=true.score-as.numeric(roll.date)]
# Score ThetaM
ThetaM[dist=="beta", true.score:=true.score/100]
GneitingCRPS("beta",40,73,.447,n=1e6)
out <- foreach(i=1:nrow(ThetaM)) %dopar% {
ThetaM[i,GneitingCRPS(dist,par.1,par.2,true.score,n=1e6)]
}
ThetaM[,S.R:=unlist(out)]
ThetaM[is.na(S.R),.N,]
ThetaM[, S.R.r := rank(S.R),by=c("ifp_id","roll.date")]
ThetaM[,sum(S.R.r==1)/(.N),by=c("type")]
ThetaM[,sum(S.R.r==2)/(.N),by=c("type")]
ThetaM[,sum(S.R.r==3)/(.N),by=c("type")]
ThetaM[,mean(S.R.r),by=c("type","ifp_id")]
fcasts[ifp_id=="1410-0",]
ThetaM[ifp_id=="1410-0",]
ThetaM.MD <- ThetaM[,list(S.R=mean(S.R)),by=c("ifp_id","type")]
ThetaM.MD[,S.R.r:=rank(S.R),by=c("ifp_id")]
fm1 <- clm(ordered(S.R.r)~factor(type, levels=c("fixed","random","user")), data=ThetaM.MD)
summary(fm1)
# fixed < user
# random ~< user
save.image()
ThetaM.MD[,sd(S.R.r)/sqrt(.N),by=type]
ThetaM.MD[,mean(S.R.r),by=type]
setkey(fcasts,ifp_id)
setkey(ThetaM,ifp_id)
1-mean(ThetaM[type=="random",mean(S.R),by=ifp_id]$V1/fcasts[type=="random",mean(S.R),by=ifp_id]$V1)
1-mean(ThetaM[type=="user",mean(S.R),by=ifp_id]$V1/fcasts[type=="user",mean(S.R),by=ifp_id]$V1)
1-mean(ThetaM[type=="fixed",mean(S.R),by=ifp_id]$V1/fcasts[type=="fixed",mean(S.R),by=ifp_id]$V1)
sd(ThetaM[type=="random",mean(S.R),by=ifp_id]$V1/fcasts[type=="random",mean(S.R),by=ifp_id]$V1)/sqrt(36)
sd(ThetaM[type=="user",mean(S.R),by=ifp_id]$V1/fcasts[type=="user",mean(S.R),by=ifp_id]$V1)/sqrt(36)
sd(ThetaM[type=="fixed",mean(S.R),by=ifp_id]$V1/fcasts[type=="fixed",mean(S.R),by=ifp_id]$V1)/sqrt(36)
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#### Brier Scores
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
extremize <- function(dist,par.1,par.2,k=1) {
switch(dist,
gamma = {
mu <- par.1/par.2
v <- k*par.1/par.2^2
beta <- mu/v
alpha <- v*beta^2
list(alpha,beta)
},
normal = {
alpha <- par.1
beta <- par.2*k
list(alpha,beta)
},
beta = {
mu <- par.1/(par.1+par.2)
v <- k*(par.1*par.2)/((par.1+par.2)^2*(par.1+par.2+1))
alpha <- (((1-mu)/v)-(1/mu))*mu^2
beta <- (((1-mu)/v)-(1/mu))*mu*(1-mu)
list(alpha,beta)
})
}
blah <- function(dist,par.1,par.2) {
switch(dist,
gamma = {
mu <- par.1/par.2
v <- par.1/par.2^2
v
},
normal = {
par.2
},
beta = {
mu <- par.1/(par.1+par.2)
v <- (par.1*par.2)/((par.1+par.2)^2*(par.1+par.2+1))
v
})
}
BS <- function(dist, par.1, par.2, ctpt, true.score) {
round(2*(CDF(dist, ctpt, par.1, par.2)-(true.score<ctpt))^2,4)
}
vec.BS <- Vectorize(BS)
# Fix date formats
ThetaM[,ctpts:=gsub("/","-",ctpts)]
#
ThetaM[dist=="beta",true.score:=true.score/100]
setkey(ThetaM,ifp_id)
# get GJP cutpoints
id <- unique(ThetaM$ifp_id)[2]
id <- "1489-6"
ThetaM[ifp_id==id,ctpts:="2015-03-01|2015-05-31"]
id <- "1493-6"
ThetaM[,unique(ctpts),by=ifp_id]
testit <- ThetaM[ifp_id=="1459-0" & type=="random",]
as.Date(testit[1]$roll.date)-as.Date(testit[1]$ctpts)
id="1459-0"
MDBS <- foreach(id=unique(ThetaM$ifp_id)) %do% {
tryCatch({
score.cut <- unlist(ThetaM[id,][1,strsplit(ctpts,"|",fixed=TRUE)])
if(ThetaM[id,]$dist[1]=="gamma") {
if(grepl("^..-",score.cut[1])) {
score.cut <- sapply(score.cut, function(s) {
tmp <- unlist(strsplit(s,"-",fixed="TRUE"))
paste(paste0("20",tmp[3]),
ifelse(nchar(tmp[1])==1,paste0("0",tmp[1]),tmp[1]),
ifelse(nchar(tmp[2])==1,paste0("0",tmp[2]),tmp[2]),
sep="-")
})
}
score.cut <- unlist(sapply(score.cut, as.Date))
score.cut <- foreach(sc=score.cut, .combine="cbind") %do% {
matrix(sc-as.numeric(ThetaM[id,]$roll.date), ncol=1)
}
} else {
score.cut <- matrix(as.numeric(rep(score.cut, times=nrow(ThetaM[id,])), ncol=1))
}
out <- apply(score.cut,2, function(ct) {
ThetaM[id, list(dist=dist,
type=type,
BS=vec.BS(dist, par.1, par.2, ct, true.score),
N=N)]
})
out <- out[[1]]
out[,list(BS=round(mean(BS, na.rm=TRUE),4),
N=mean(N)),by=c("type","ifp_id","dist")]
}, error = function(cond) {
data.table(type=NA,ifp_id=id,dist=ThetaM[id,]$dist[1],BS=NA,N=NA)
})
}
MDBS <- do.call(rbind,MDBS)
MDBS
save.image(file="roll.Rdata")
MDBS[ifp_id=="1415-0",mean(BS, na.rm=TRUE),by=c("ifp_id","type")]
ThetaM[,row.id:=1:nrow(ThetaM)]
ThetaM[,v:=blah(dist,par.1,par.2),by=row.id]
ThetaM[,sd(v),by=ifp_id]
ThetaM[,c("alpha","beta"):=extremize(dist,par.1,par.2,3),by=row.id]
MDBS.ex <- foreach(id=unique(ThetaM$ifp_id)) %dopar% {
tryCatch({
score.cut <- unlist(ThetaM[id,][1,strsplit(ctpts,"|",fixed=TRUE)])
if(ThetaM[id,]$dist[1]=="gamma") {
if(grepl("^..-",score.cut[1])) {
score.cut <- sapply(score.cut, function(s) {
tmp <- unlist(strsplit(s,"-",fixed="TRUE"))
paste(paste0("20",tmp[3]),
ifelse(nchar(tmp[1])==1,paste0("0",tmp[1]),tmp[1]),
ifelse(nchar(tmp[2])==1,paste0("0",tmp[2]),tmp[2]),
sep="-")
})
}
if(grepl("/",score.cut[1])) {
score.cut <- sapply(score.cut, function(s) {
tmp <- unlist(strsplit(s,"/",fixed="TRUE"))
paste(paste0("20",tmp[3]),
ifelse(nchar(tmp[1])==1,paste0("0",tmp[1]),tmp[1]),
ifelse(nchar(tmp[2])==1,paste0("0",tmp[2]),tmp[2]),
sep="-")
})
}
score.cut <- unlist(sapply(score.cut, as.Date))
score.cut <- foreach(sc=score.cut, .combine="cbind") %do% {
matrix(sc-as.numeric(ThetaM[id,]$roll.date), ncol=1)
}
} else {
score.cut <- matrix(as.numeric(rep(score.cut, times=nrow(ThetaM[id,])), ncol=1))
}
out <- apply(score.cut,2, function(ct) {
ThetaM[id, list(dist=dist,
type=type,
BS=vec.BS(dist, alpha, beta, ct, true.score),
N=N)]
})
out <- out[[1]]
out[,list(BS=round(mean(BS),4),
N=mean(N)),by=c("type","ifp_id","dist")]
}, error = function(cond) {
data.table(type=NA,ifp_id=id,dist=ThetaM[id,]$dist[1],BS=NA,N=NA)
})
}
MDBS.ex <- do.call(rbind,MDBS.ex)
tmp1 <- copy(MDBS.ex)
tmp2 <- copy(MDBS.ex)
tmp1[,inf:=1]
tmp2[,inf:=3]
MDBS.ex <- rbind(tmp1,tmp2)
MDBS.ex[ifp_id=="1415-0"]
ThetaM[ifp_id=="1415-0"]
# MDBS.ex
# round(cbind(MDBS.ex[,mean(BS),by=ifp_id]$V1,
# MDBS[,mean(BS),by=ifp_id]$V1),2)
# blah <- cbind(MDBS[,mean(BS),by=ifp_id]$V1,MDBS.ex[,mean(BS),by=ifp_id]$V1)
blah <- cbind(blah,MDBS.ex[,mean(BS),by=ifp_id]$V1)
id <- "1415-0"
setkey(daily.set,ifp_id)
MM <- foreach(id=unique(daily.set$ifp_id)) %do% {
tryCatch({
score.cut <- unlist(daily.set[id,][1,strsplit(ctpts,"|",fixed=TRUE)])
if(daily.set[id,]$dist[1]=="gamma") {
if(grepl("^..-",score.cut[1])) {
score.cut <- sapply(score.cut, function(s) {
tmp <- unlist(strsplit(s,"-",fixed="TRUE"))
paste(paste0("20",tmp[3]),
ifelse(nchar(tmp[1])==1,paste0("0",tmp[1]),tmp[1]),
ifelse(nchar(tmp[2])==1,paste0("0",tmp[2]),tmp[2]),
sep="-")
})
}
score.cut <- unlist(sapply(score.cut, as.Date))
score.cut <- foreach(sc=score.cut, .combine="cbind") %do% {
matrix(sc-as.numeric(ThetaM[id,]$roll.date), ncol=1)
}
} else {
score.cut <- matrix(as.numeric(rep(score.cut, times=nrow(daily.set[id,])), ncol=1))
}
out <- foreach(i=1:nrow(score.cut)) %dopar% {
daily.set[id,][i,CDF(dist,score.cut[i,],par.1,par.2)]
}
# probabilities
probs <- do.call(rbind,out)
# get mean prob by day
tmp.p <- cbind(daily.set[id,list(roll.date)],probs)
tmp.p <- tmp.p[,lapply(.SD,mean),by=roll.date,.SDcols=grep("^V",names(tmp.p),value=TRUE)]$V1
# get ctpt by day
tmp.c <- cbind(daily.set[id,list(roll.date)],score.cut)[,V1[1],by=roll.date]$V1
# outcome by day
heaviside <- tmp.c < daily.set[id,true.score[1],by=roll.date]$V1
mean(2*(tmp.p - heaviside)^2)
tmp <- cbind(daily.set[id,list(type,roll.date)],out)
cols <- grep("^V",names(tmp),value=TRUE)
tmp <- tmp[,lapply(.SD,mean),.SDcols=cols,by=c("roll.date")]
daily.set[i,true.score] < score.cut
daily.set[i,true.score] < score.cut
colMeans(do.call(rbind,out))
cbind(daily.set[id,list(ifp_idx,type)],out)[,mean(out),by=type]
}, error = function(cond) {
data.table(type=NA,ifp_id=id,dist=ThetaM[id,]$dist[1],BS=NA,N=NA)
})
}
MM <- do.call(rbind,MDBS.ex)
blah <- data.table(blah)
setnames(blah,c("k=1","k=1.5","k=3","k=5"))
blah[,ifp_id:=unique(MDBS.ex$ifp_id)]
# X11(width=6,height=6)
# p <- ggplot(data=MDBS, aes(x=ifp_id, y=BS, color=type)) +
# geom_point() +
# xlim(c(0,2))
ThetaM[id,qbeta(.5,par.1,par.2)]
ThetaM[id,par.1]
ThetaM[id,par.2]
#Add a "too good to be true" slide
#1412 is a great example
# have a horizontl line for GJP threshold
# line fro tru outcome
# curev w/ 95% CI for daily consensus
ifp.data
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#### Fitted versus raw forecasts
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fitraw <- copy(rollers)
out <- foreach(i=1:nrow(fitraw)) %do% {
quants <- unlist(fitraw[i,c(paste0("ctpt_",0:12)),with=FALSE])
probs <- unlist(fitraw[i,c(paste0("bin_",0:13)),with=FALSE])/100
quants <- quants[1:(min(which(is.na(quants)))-1)]
probs <- probs[1:(min(which(is.na(probs)))-1)]
probs <- cumsum(probs)[-length(probs)]
ts <- fitraw[i,true.score]
# Empirical
BS.emp <- 2*(c(probs,1-probs[length(probs)-1]) - c(ts < quants, ts>quants[length(quants)] ))^2
# From fitted
par.1 <- fitraw[i,]$par.1
par.2 <- fitraw[i,]$par.2
dist <- fitraw[i,]$dist
f.probs <- CDF(dist, quants, par.1, par.2)
BS.fit <- 2*(c(f.probs,1-f.probs[length(f.probs-1)]) - c(ts < quants, ts>quants[length(quants)] ))^2
mean(BS.fit - BS.emp)
}
Reduce('+',out)/length(out)
out <- unlist(out)
mean(out)
sd(out)/sqrt(length(out))
setkey(fcasts, ifp_id)
setkey(ThetaM, ifp_id)
ThetaM[,median(S.R),by=ifp_id]$V1<fcasts[,median(S.R),by=ifp_id]$V1
i <- 1
out <- foreach(i=1:nrow(fitraw)) %do% {
quants <- unlist(fitraw[i,c(paste0("ctpt_",0:12)),with=FALSE])
probs <- unlist(fitraw[i,c(paste0("bin_",0:13)),with=FALSE])/100
quants <- quants[1:(min(which(is.na(quants)))-1)]
probs <- probs[1:(min(which(is.na(probs)))-1)]
probs <- cumsum(probs)[-length(probs)]
ts <- fitraw[i,true.score]
# Empirical
# BS.emp <- 2*(c(probs,1-sum(probs)) - c(ts < quants, ts>quants[length(quants)] ))^2
# From fitted
par.1 <- fitraw[i,]$par.1
par.2 <- fitraw[i,]$par.2
dist <- fitraw[i,]$dist
f.probs <- CDF(dist, quants, par.1, par.2)
BS.fit <- 2*(c(f.probs,1-f.probs[length(f.probs)]) - c(ts < quants, ts>quants[length(quants)] ))^2
mean(BS.fit)
}
out <- unlist(out)
mean(out)
fitraw
tmp <- data.table(ifp_id=fitraw$ifp_id,BS=out)
tmp <- tmp[,mean(BS),by=ifp_id]
tmp <- rbind(tmp,tmp)
setkey(tmp,ifp_id)
setkey(MDBS,ifp_id)
MDBS[,blah:=BS-tmp$V1]
MDBS[,mean(blah),by=type]
MDBS[,sd(blah)/sqrt(.N),by=type]
MDBS[,mean(BS,na.rm=TRUE)]
1-mean(ThetaM[type=="rolling",mean(S.R),by=ifp_id]$V1/fitraw[type=="rolling",mean(S.R),by=ifp_id]$V1)
1-mean(ThetaM[type=="rollingCtl",mean(S.R),by=ifp_id]$V1/fitraw[type=="rollingCtl",mean(S.R),by=ifp_id]$V1)
(Reduce('+',out)/length(out))
MDBS
do.call(c,out)
fcasts
length(out)
fitraw[,MDBS:=do.call(c,out)]
tmp <- fitraw[,mean(MDBS),by=ifp_id]
mean(MDBS[,mean(BS, na.rm=TRUE),by=ifp_id]$V1/fitraw[,mean(MDBS),by=ifp_id]$V1, na.rm=TRUE)
fitraw[MDBS>2,][1,]
(2*(.15-0)^2+
2*(.65-0)^2+
2*(1-0)^2+
2*(0-1)^2)/4
which(fitraw$idx==512)
fcasts[ifp_id=="1415-0",median((qgamma(.5,par.1,par.2)+as.numeric(as.Date(fcast_date))-Outcome))]
ThetaM[ifp_id=="1415-0",median((qgamma(.5,par.1,par.2)+as.numeric(as.Date(roll.date))-16307))]
ThetaM[ifp_id=="1415-0",]
fcasts[ifp_id=="1415-0",median(S.R),by=type]
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#### Lets look at 1415-0 to see wtf!
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tmp <- daily.set[ifp_id=="1415-0",]
gjp <- as.numeric(as.Date("2014-10-01"))
tmp[,gjp:=gjp-as.numeric(as.Date(roll.date))]
tmp[,row.id:=1:nrow(tmp)]
extremize <- function(dist,par.1,par.2,k=1) {
switch(dist,
gamma = {
mu <- par.1/par.2
v <- k*par.1/par.2^2
beta <- mu/v
alpha <- v*beta^2
list(alpha,beta)
})
}
k <- 3
tmp[,c("alpha","beta"):=extremize(dist, par.1,par.2,k=k), by=row.id]
avg.fcast <- tmp[,2*(mean(pgamma(gjp,par.1,par.2))-1)^2,by="roll.date"]
con.fcast <- tmp[,2*(pgamma(gjp[1],median(par.1),median(par.2))-1)^2,by="roll.date"]
con.e.fcast <- tmp[,2*(pgamma(gjp[1],median(alpha),median(beta))-1)^2,by="roll.date"]
mean(avg.fcast$V1)
mean(con.fcast$V1)
mean(con.e.fcast$V1)
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#### Look at a good one to see how much worse it makes it
####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tmp <- daily.set[ifp_id=="1416-0",]
gjp <- as.numeric(as.Date("2015-01-01"))
tmp[,gjp:=gjp-as.numeric(as.Date(roll.date))]
tmp[,row.id:=1:nrow(tmp)]
k <- 3
tmp[,c("alpha","beta"):=extremize(dist, par.1,par.2,k=k), by=row.id]
avg.fcast <- tmp[,2*(mean(pgamma(gjp,par.1,par.2))-1)^2,by="roll.date"]
con.fcast <- tmp[,2*(pgamma(gjp[1],median(par.1),median(par.2))-1)^2,by="roll.date"]
con.e.fcast <- tmp[,2*(pgamma(gjp[1],median(alpha),median(beta))-1)^2,by="roll.date"]
mean(avg.fcast$V1,na.rm=TRUE)
mean(con.fcast$V1, na.rm=TRUE)
mean(con.e.fcast$V1, na.rm=TRUE)
tmp2<-ThetaM[ifp_id=="1416-0",]
tmp2[,mean2*(pgamma(gjp-as.numeric(as.Date(roll.date)),par.1,par.2))^2]
|
95f26b16e1d2079dce41b36b1746eb6db3880e31
|
572c9072506a54b38da51d18f1902af7769a5b50
|
/FIT5147-DataCleaning.R
|
70ce5f7a5984352ad3b03d83aef745532d772e1a
|
[] |
no_license
|
EswarHariKumar/dataModelling
|
3c74f4b9aa2d6b2bb871067d9efd066f909daddc
|
ef632efa90dc24bc77179475195ccffe4bd4318e
|
refs/heads/master
| 2020-04-02T11:16:56.814643
| 2016-07-05T01:00:03
| 2016-07-05T01:00:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,024
|
r
|
FIT5147-DataCleaning.R
|
# install.packages("ggmap")
install.packages("plyr")
install.packages("dplyr")
install.packages("stringr")
# library(maptools)
require(ggmap)
require(ggplot2)
require(plyr)
require(dplyr)
require(ggalt)
require(ggthemes)
require(rgdal)
# library(leaflet)
require(gridExtra)
require(stringr)
require(MASS)
require(shiny)
timesData <- read.csv("timesData.csv", stringsAsFactors = FALSE,na.strings = c("", "-"))
timesData$country = gsub("Unted Kingdom","United Kingdom",timesData$country)
timesData$country = gsub('Republic of Ireland','Ireland',timesData$country)
timesData$country = gsub('Russian Federation','Russia',timesData$country)
timesData$country = gsub("Unisted States of America",'United States of America',timesData$country)
unique_country <- distinct(select(timesData,country))
lonlat_all <- geocode(as.character(unique_country$country))
country_with_lonlat <- cbind(unique_country,lonlat_all)
#maintains the orignal order of col
timesData_lonlat <- join(timesData,country_with_lonlat,by="country")
#Transform the Rank to Integer -> rank_int
#and then Dividing the rank_int -> ranking_range (25 records per break)
timesData_lonlat[c("rank_int","ranking_range")] <- NA
timesData_lonlat$rank_int <- as.integer(sapply(strsplit(
str_replace(timesData_lonlat$rank,"=",''),"-"),"[[",1))
timesData_lonlat$ranking_range <- timesData_lonlat$rank
timesData_lonlat$ranking_range[timesData_lonlat$rank_int > 0 &
timesData_lonlat$rank_int <26] <- c("0-25")
timesData_lonlat$ranking_range[timesData_lonlat$rank_int > 25 &
timesData_lonlat$rank_int < 51] <- c("26-50")
timesData_lonlat$ranking_range[timesData_lonlat$rank_int > 50 &
timesData_lonlat$rank_int <76] <- c("51-75")
timesData_lonlat$ranking_range[timesData_lonlat$rank_int > 75 &
timesData_lonlat$rank_int <101] <- c("76-100")
timesData_lonlat$ranking_range[timesData_lonlat$rank_int > 100 &
timesData_lonlat$rank_int <126] <- c("101-125")
timesData_lonlat$ranking_range[timesData_lonlat$rank_int > 125 &
timesData_lonlat$rank_int < 151] <- c("126-150")
timesData_lonlat$ranking_range[timesData_lonlat$rank_int > 150 &
timesData_lonlat$rank_int <176] <- c("151-175")
timesData_lonlat$ranking_range[timesData_lonlat$rank_int > 175 &
timesData_lonlat$rank_int <201] <- c("176-200")
#remove missing data and compute mean values
timesData_lonlat[c("research_new","teaching_new","international_new",
"citations_new","income_new","overall_new")] <- 0
#dividing the data by year
td.2011 <- filter(timesData_lonlat, timesData_lonlat$Year==2011)
td.2012 <- filter(timesData_lonlat, timesData_lonlat$Year==2012)
td.2013 <- filter(timesData_lonlat, timesData_lonlat$Year==2013)
td.2014 <- filter(timesData_lonlat, timesData_lonlat$Year==2014)
td.2015 <- filter(timesData_lonlat, timesData_lonlat$Year==2015)
td.2016 <- filter(timesData_lonlat, timesData_lonlat$Year==2016)
#calculating the mean values by year
td.2011 <- ddply(td.2011, .(country), mutate,
summarize,researchMean=mean(research,na.rm=TRUE),
teachingMean=mean(teaching,na.rm=TRUE),
internationalMean = mean(international,
na.rm=TRUE),
citationsMean = mean(citations,na.rm=TRUE),
incomeMean = mean(income, na.rm = TRUE))
td.2012 <- ddply(td.2012,
.(country), mutate,
summarize,researchMean=mean(research,na.rm=TRUE),
teachingMean=mean(teaching,na.rm=TRUE),
internationalMean = mean(international,
na.rm=TRUE),
citationsMean = mean(citations,na.rm=TRUE),
incomeMean = mean(income,na.rm=TRUE))
td.2013 <- ddply(td.2013,
.(country), mutate,
summarize,researchMean=mean(research,na.rm=TRUE),
teachingMean=mean(teaching,na.rm=TRUE),
internationalMean = mean(international,
na.rm=TRUE),
citationsMean = mean(citations,na.rm=TRUE),
incomeMean = mean(income,na.rm=TRUE))
td.2014 <- ddply(td.2014,
.(country), mutate,
summarize,researchMean=mean(research,na.rm=TRUE),
teachingMean=mean(teaching,na.rm=TRUE),
internationalMean = mean(international,
na.rm=TRUE),
citationsMean = mean(citations,na.rm=TRUE),
incomeMean = mean(income,na.rm=TRUE))
td.2015 <- ddply(td.2015,
.(country), mutate,
summarize,researchMean=mean(research,na.rm=TRUE),
teachingMean=mean(teaching,na.rm=TRUE),
internationalMean = mean(international,
na.rm=TRUE),
citationsMean = mean(citations,na.rm=TRUE),
incomeMean = mean(income,na.rm=TRUE))
td.2016 <- ddply(td.2016,
.(country), mutate,
summarize,researchMean=mean(research,na.rm=TRUE),
teachingMean=mean(teaching,na.rm=TRUE),
internationalMean = mean(international,
na.rm=TRUE),
citationsMean = mean(citations,na.rm=TRUE),
incomeMean = mean(income,na.rm=TRUE))
#Bring the data back together and sorting them by year and rank
timesData.all <- rbind(td.2011,td.2012,td.2013,td.2014,td.2015,td.2016)
timesData.all <- arrange(timesData.all,timesData.all$Year,timesData.all$rank_int)
timesData.all$research_new <- ifelse(is.na(timesData.all$research),
round(timesData.all$researchMean,1),
timesData.all$research)
timesData.all$teaching_new <- ifelse(is.na(timesData.all$teaching),
round(timesData.all$teachingMean,1),
timesData.all$teaching)
timesData.all$international_new <- ifelse(is.na(timesData.all$international),
round(timesData.all$internationalMean,1),
timesData.all$international)
timesData.all$citations_new <- ifelse(is.na(timesData.all$citations),
round(timesData.all$citationsMean,1),
timesData.all$citations)
timesData.all$income_new <- ifelse(is.na(timesData.all$income),
round(timesData.all$incomeMean,1),
timesData.all$income)
timesData.all$overall_new <- ifelse(is.na(timesData.all$overall),
round(0.3*timesData.all$teaching_new +
0.3*timesData.all$research_new +
0.3*timesData.all$citations_new +
0.075*timesData.all$international_new +
0.025*timesData.all$income_new,1 ),
timesData.all$overall)
# write.csv(file = "timesData_all.csv",timesData.all)
write.table(file="timesData_allTbl.csv", timesData.all)
timesDataTable <- read.table("timesData_allTbl.csv", stringsAsFactors = FALSE)
timesData.all <- data.frame(timesDataTable)
td.2011 <- filter(timesData.all, timesData.all$Year==2011)
td.2012 <- filter(timesData.all, timesData.all$Year==2012)
td.2013 <- filter(timesData.all, timesData.all$Year==2013)
td.2014 <- filter(timesData.all, timesData.all$Year==2014)
td.2015 <- filter(timesData.all, timesData.all$Year==2015)
td.2016 <- filter(timesData.all, timesData.all$Year==2016)
firstRankedUniPerCounty <- ddply(td.2016, .(country),
function (x) as.character(x$school_name[1]))
names(firstRankedUniPerCounty)[2] <- "school_name"
theRest <- subset(td.2016,
!td.2016$school_name %in% firstRankedUniPerCounty$school_name)
theRest.avg <- ddply(theRest,.(country),mutate,
summarize,researchMean2=mean(research_new,na.rm=TRUE),
teachingMean2=mean(teaching_new,na.rm=TRUE),
internationalMean2 = mean(international_new,na.rm=TRUE),
citationsMean2 = mean(citations_new,na.rm=TRUE),
incomeMean2 = mean(as.numeric(income_new) ,na.rm=TRUE),
overallMean2 = mean(overall_new,na.rm=TRUE))
avgResult <- distinct(select(theRest.avg,teachingMean2,internationalMean2,citationsMean2,incomeMean2,overallMean2, researchMean2,country))
firstRankedUniPerCounty_allinfo <-
filter(select(td.2016,rank:overall_new),
td.2016$school_name %in% firstRankedUniPerCounty$school_name)
firstRanked.avg <- ddply(firstRankedUniPerCounty_allinfo,.(country),mutate,
summarize,researchMean2=mean(research_new,na.rm=TRUE),
teachingMean2=mean(teaching_new,na.rm=TRUE),
internationalMean2 = mean(international_new,na.rm=TRUE),
citationsMean2 = mean(citations_new,na.rm=TRUE),
incomeMean2 = mean(income_new),
overallMean2 = mean(overall_new))
firstRanked <- select(firstRanked.avg,school_name,country,Year,rank_int,ranking_range,
teaching_new,research_new,
international_new,citations_new,income_new,overall_new,lon,lat)
firstRanked.theRest <- join(firstRanked,avgResult, by="country")
write.table(file="firstRankedandtheRest.csv", firstRanked.theRest)
firstRanked.theRestTbl <- read.table("firstRankedandtheRest.csv", stringsAsFactors = FALSE)
firstRanked.theRest <- data.frame(firstRanked.theRestTbl)
school.country_lookup <- read.csv("school_and_country_table.csv")
|
945b2d148d6163829da6c2527579a2d13d369465
|
4435fed21e4ea4b9b1f0a562e6cff9bfb2f72471
|
/R/rwa.R
|
48a34ff3b051e069f0069ed84e5deef539c1aef5
|
[] |
no_license
|
martinctc/rwa
|
73eaef307a1522a3932f553a6fa6c9ceaccadb54
|
7980b82aef19e276d3f38ccb45f38b5bcd8dd0d3
|
refs/heads/master
| 2021-06-17T03:09:23.677470
| 2021-03-06T23:17:35
| 2021-03-06T23:17:35
| 174,752,587
| 9
| 5
| null | 2021-02-27T21:46:13
| 2019-03-09T22:23:36
|
R
|
UTF-8
|
R
| false
| false
| 5,129
|
r
|
rwa.R
|
#' @title Create a Relative Weights Analysis (RWA)
#'
#' @description This function creates a Relative Weights Analysis (RWA) and returns a list of outputs.
#' RWA provides a heuristic method for estimating the relative weight of predictor variables in multiple regression, which involves
#' creating a multiple regression with on a set of transformed predictors which are orthogonal to each other but
#' maximally related to the original set of predictors.
#' `rwa()` is optimised for dplyr pipes and shows positive / negative signs for weights.
#'
#' @details
#' `rwa()` produces raw relative weight values (epsilons) as well as rescaled weights (scaled as a percentage of predictable variance)
#' for every predictor in the model.
#' Signs are added to the weights when the `applysigns` argument is set to `TRUE`.
#' See https://relativeimportance.davidson.edu/multipleregression.html for the original implementation that inspired this package.
#'
#' @param df Data frame or tibble to be passed through.
#' @param outcome Outcome variable, to be specified as a string or bare input. Must be a numeric variable.
#' @param predictors Predictor variable(s), to be specified as a vector of string(s) or bare input(s). All variables must be numeric.
#' @param applysigns Logical value specifying whether to show an estimate that applies the sign. Defaults to `FALSE`.
#' @param plot Logical value specifying whether to plot the rescaled importance metrics.
#'
#' @return `rwa()` returns a list of outputs, as follows:
#' - `predictors`: character vector of names of the predictor variables used.
#' - `rsquare`: the rsquare value of the regression model.
#' - `result`: the final output of the importance metrics.
#' - The `Rescaled.RelWeight` column sums up to 100.
#' - The `Sign` column indicates whether a predictor is positively or negatively correlated with the outcome.
#' - `n`: indicates the number of observations used in the analysis.
#' - `lambda`:
#' - `RXX`: Correlation matrix of all the predictor variables against each other.
#' - `RXY`: Correlation values of the predictor variables against the outcome variable.
#'
#' @importFrom magrittr %>%
#' @importFrom tidyr drop_na
#' @importFrom stats cor
#' @import dplyr
#' @examples
#' library(ggplot2)
#' rwa(diamonds,"price",c("depth","carat"))
#'
#' @export
rwa <- function(df,
outcome,
predictors,
applysigns = FALSE,
plot = TRUE){
# Gets data frame in right order and form
thedata <-
df %>%
dplyr::select(outcome,predictors) %>%
tidyr::drop_na(outcome)
numVar <- NCOL(thedata) # Output - number of variables
cor_matrix <-
cor(thedata, use = "pairwise.complete.obs") %>%
as.data.frame(stringsAsFactors = FALSE, row.names = NULL) %>%
remove_all_na_cols() %>%
tidyr::drop_na()
matrix_data <-
cor_matrix %>%
as.matrix()
RXX <- matrix_data[2:ncol(matrix_data), 2:ncol(matrix_data)] # Only take the correlations with the predictor variables
RXY <- matrix_data[2:ncol(matrix_data), 1] # Take the correlations of each of the predictors with the outcome variable
# Get all the 'genuine' predictor variables
Variables <-
cor_matrix %>%
names() %>%
.[.!=outcome]
RXX.eigen <- eigen(RXX) # Compute eigenvalues and eigenvectors of matrix
D <- diag(RXX.eigen$val) # Run diag() on the values of eigen - construct diagonal matrix
delta <- sqrt(D) # Take square root of the created diagonal matrix
lambda <- RXX.eigen$vec %*% delta %*% t(RXX.eigen$vec) # Matrix multiplication
lambdasq <- lambda ^ 2 # Square the result
# To get partial effect of each independent variable on the dependent variable
# We multiply the inverse matrix (RXY) on the correlation matrix between dependent and independent variables
beta <- solve(lambda) %*% RXY # Solve numeric matrix containing coefficients of equation (Ax=B)
rsquare <- sum(beta ^ 2) # Output - R Square, sum of squared values
RawWgt <- lambdasq %*% beta ^ 2 # Raw Relative Weight
import <- (RawWgt / rsquare) * 100 # Rescaled Relative Weight
beta %>% # Get signs from coefficients
as.data.frame(stringsAsFactors = FALSE, row.names = NULL) %>%
dplyr::mutate_all(~(dplyr::case_when(.>0~"+",
.<0~"-",
.==0~"0",
TRUE~NA_character_))) %>%
dplyr::rename(Sign="V1")-> sign
result <- data.frame(Variables,
Raw.RelWeight = RawWgt,
Rescaled.RelWeight = import,
Sign = sign) # Output - results
nrow(drop_na(thedata)) -> complete_cases
if(applysigns == TRUE){
result %>%
dplyr::mutate(Sign.Rescaled.RelWeight = ifelse(Sign == "-",
Rescaled.RelWeight * -1,
Rescaled.RelWeight)) -> result
}
list("predictors" = Variables,
"rsquare" = rsquare,
"result" = result,
"n" = complete_cases,
"lambda" = lambda,
"RXX" = RXX,
"RXY" = RXY)
}
|
cf8ea26efc1d14de3acc28683e9657ade95a24c9
|
0b3a125509386bce962297493a52032d39f2e68d
|
/00_scripts/popfreq2bayescan.R
|
4974c1619774c830403755a3dcff0657ffc6abf6
|
[] |
no_license
|
kylewellband/ssa_cast2016
|
5748165e184f8920cfe3e2fe5c76db386153cc5f
|
00fddb9a1d0184c7223c2b986d7d0451c16f5423
|
refs/heads/master
| 2020-03-20T06:33:59.701058
| 2018-06-14T18:08:46
| 2018-06-14T18:08:46
| 137,253,074
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,342
|
r
|
popfreq2bayescan.R
|
#!/usr/bin/env Rscript
# popfreq2bayescan.R
# by: Kyle Wellband
# Function takes a population MAF file as produced by PLINK using --freq and
# either --family or --within <groupfile> options and converts to bayescan input
args <- commandArgs(trailingOnly = T)
freq.name <- args[1]
outfile.name <- sub(".frq.strat", ".bayescan", freq.name)
freq <- read.table(freq.name, header = T)
nloci <- nlevels(freq$SNP)
pops <- levels(freq$CLST)
bayescan_data <- list()
for(i in 1:length(pops)) {
bayescan_data[[i]] <- matrix(nrow = nloci, ncol = 5)
bayescan_data[[i]][,1] <- 1:nloci
bayescan_data[[i]][,2] <- freq$NCHROBS[freq$CLST == pops[i]]
bayescan_data[[i]][,3] <- 2
bayescan_data[[i]][,4] <- freq$NCHROBS[freq$CLST == pops[i]] - freq$MAC[freq$CLST == pops[i]]
bayescan_data[[i]][,5] <- freq$MAC[freq$CLST == pops[i]]
}
out_vec <- list() # character(4+(3*length(pops))+(nloci*length(pops)))
out_vec[1:4] <- c(paste0("[loci]=",nloci), "", paste0("[populations]=",length(pops)), "")
out_vec[seq(1,length(pops)*3, 3)+4] <- paste0("[pop]=", 1:length(pops))
out_vec[seq(1,length(pops)*3, 3)+6] <- ""
sub <- seq(1,length(pops)*3, 3)+5
for(i in 1:length(pops)) {
out_vec[[sub[i]]] <- apply(bayescan_data[[i]], 1, paste, collapse = " ")
}
write.table(unlist(out_vec), outfile.name, row.names = F, col.names = F, quote = F)
|
039483de912a9a606ce0cc83082cda101afd4e18
|
5115552f03dce9e781cb354621981d782f4b2e55
|
/scripts/covariates.R
|
b6304d0be74271e7700ac45a6392fca71df1cdaf
|
[] |
no_license
|
guscastles/practical_machine_learning_in_R
|
495e7fb67dc81344f2b25b8cb8cdbb1275af5466
|
9e84d08bc92556801883116668f9f1130df8a5f2
|
refs/heads/master
| 2020-06-26T13:50:30.387730
| 2019-07-30T12:40:28
| 2019-07-30T12:40:28
| 199,649,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 770
|
r
|
covariates.R
|
library(ISLR)
library(caret)
library(splines)
source("covariates_functions.R")
data(Wage)
datasets <- create_sets(Wage)
training <- datasets[[1]]
testing <- datasets[[2]]
trainingWithDummies <- remove_near_zero_var_variables(create_set_with_dummies(training))
bsBasis <- bs(training$age, df=3)
lm1 <- lm(wage ~ bsBasis, data=training)
plot(training$age, training$wage, pch=19, cex=0.5)
points(training$age, predict(lm1, newdata=training), col="red", pch=19, cex=0.5)
modelFit <- train(wage ~ ., data=trainingWithDummies, method="glm")
prediction <- predict(modelFit, remove_near_zero_var_variables(create_set_with_dummies(testing))[-12])
RMSE(pred = as.numeric(prediction), obs = unlist(testing['wage']))
mad(x = as.numeric(prediction) - unlist(testing['wage']))
|
57453f9cc0dee770eae82145e7b18ecfceadd310
|
beb6e2ea0b24d7c312e5adfa28bfe91c81da524f
|
/prep/melt.R
|
62ee235f7e1cecb46f7cad09f16a9841ddd08512
|
[] |
no_license
|
docsteveharris/collab-norad
|
98acb7399811242b00541272c6e61f4fc5c3ad0e
|
7736d92f8e656ad6ce486ca527a6ec9f4a604ba3
|
refs/heads/master
| 2021-01-14T08:36:32.373462
| 2016-05-03T17:36:02
| 2016-05-03T17:36:02
| 27,314,616
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,428
|
r
|
melt.R
|
# Melt variables available by time into long form
rm(list=ls(all=TRUE))
require(data.table)
require(assertthat)
load(file='../data/cleaned.Rdata')
wdt.original <- wdt
wdt$sample_N <- 1
wdt$all <- 1
wdt[, all := factor(all, label=("All patients"))]
str(wdt)
assert_that(nrow(wdt)==736)
names(wdt)
# Prepare hospital name sorted by mean mortality
m <- wdt[, .(s = mean(mort.itu, na.rm=TRUE)), by=hosp]
setorder(m, s)
m[, mort.itu.order := .I ]
m[, mort.itu.order := factor(mort.itu.order, labels=m$hosp, ordered=TRUE)]
setkey(wdt,hosp)
wdt <- wdt[m[,.(hosp,mort.itu.order)]]
wdt.melt <- melt(wdt[,list(hosp, id.hosp, mort.itu.order,
ne.1, ne.24, map.1, map.24, hr.1, hr.24,
sedation.1, sedation.24,
lac.1, lac.24,
pf.1, pf.24,
sofa.1, sofa.24,
fin.24, fb.24
)], id.vars=c('hosp', 'id.hosp', 'mort.itu.order'))
# TODO: 2014-11-30 - [ ] need to convert melt table back to data.table
wdt.melt
wdt.melt[,variable := as.character(variable)]
# Extract var and then time
wdt.melt[, var := gsub("([a-z]+)\\.([0-9]+)", "\\1", variable, perl=TRUE)]
wdt.melt[, time := gsub("(\\w+)\\.(\\d+)", "\\2", variable, perl=TRUE)]
table(wdt.melt$var)
table(wdt.melt$time)
# Drop old variable
wdt.melt[, variable := NULL]
wdt.melt
# Convert back to
wdt.long <- dcast.data.table(wdt.melt, hosp + id.hosp + mort.itu.order + value + time ~ var)
setorder(wdt.long, mort.itu.order)
wdt.long
save(wdt.long, file='../data/clean_long.RData')
|
282cf82c0b8dc3ec61f4188271e60dbec749f0d7
|
19970618f09c0b5216c48b8b82383aaf2a9f3778
|
/lectures/08/install.R
|
c11311e6a8dcfe935eae8cf3fad0843a40a611be
|
[] |
no_license
|
ellieevs/BIOS512
|
5867df4aa896313bda237dad448170983a086662
|
75181e9e08c6d3ea0503b60e8adec611d81ec7d2
|
refs/heads/main
| 2023-07-11T07:03:42.227904
| 2021-08-26T14:42:04
| 2021-08-26T14:42:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 61
|
r
|
install.R
|
install.packages("ggrepel", repos="http://cran.rstudio.com")
|
182569c75927b08d5eb5be8f8021979d3d5dc7f9
|
9484b93da2ee67dd0989745745b0ab3ce4567324
|
/code/old/Run_SRN_Adenoma_Comparisons.R
|
81e01e0a99bf47defbc7824e7c800834523d19c3
|
[
"MIT"
] |
permissive
|
SchlossLab/Sze_FollowUps_Microbiome_2017
|
0d8d2abefbec52914d0fce24c3f977d266583913
|
8ef69b722a0610d27ef898fa9ea0ab1385f6807b
|
refs/heads/master
| 2021-01-18T23:39:58.668847
| 2017-09-16T20:37:43
| 2017-09-16T20:37:43
| 46,723,430
| 1
| 6
| null | 2017-09-16T20:37:44
| 2015-11-23T13:53:29
|
HTML
|
UTF-8
|
R
| false
| false
| 3,610
|
r
|
Run_SRN_Adenoma_Comparisons.R
|
### Prediction of Follow Ups Analysis
### How do the adenomas and SRN specifically compare against each other
### P-value tables for results of SRN versus Adenoma in with and without FIT models
## Marc Sze
###Load needed Libraries and functions
source('code/functions.R')
source('code/graphFunctions.R')
loadLibs(c("dplyr", "tidyr", "ggplot2", "reshape2",
"gridExtra", "scales", "wesanderson"))
# Read in data tables
good_metaf <- read.csv("results/tables/mod_metadata/good_metaf_final.csv",
stringsAsFactors = F, header = T)
rf_prediction_summary <- read.csv(
'results/tables/rf_prediction_summary.csv', header = T)
aucrf_model_cutoff <- read.csv('results/tables/aucrf_model_cutoffs.csv',
header = T, row.names = 1)
# Compare differences between SRN and adenoma positive probability decrease
# Add an extra column for definitions to the rf summary table
rf_prediction_summary$Dx_Bin <- rep(good_metaf$Dx_Bin, 4)
rf_prediction_summary$EDRN <- rep(good_metaf$EDRN, 4)
# Create variable vectors to cycle through during the for loop
model_used <- c("wfit", "wofit")
samples <- c("initial", "followup")
srn_compare_pvalue_summary <- as.data.frame(matrix(
nrow = 2, ncol = 4, dimnames = list(
rows = c("wfit", "wofit"),
cols = c("prob_init", "prob_follow", "change", "proportion"))))
# Difference in probabilities between SRN and Adenoma Initials and Follow Ups
for(i in 1:length(model_used)){
for(j in 1:length(samples)){
srn_compare_pvalue_summary[i, j] <- wilcox.test(
filter(rf_prediction_summary,
model == paste(model_used[i]),
sample_type == paste(samples[j]),
Dx_Bin == "adv_adenoma")[, "postive_probability"],
filter(rf_prediction_summary,
model == paste(model_used[i]),
sample_type == paste(samples[j]),
Dx_Bin == "adenoma")[, "postive_probability"])$p.value
}
}
#Difference in change in probabilities between SRN and Adenoma
for(i in 1:length(model_used)){
srn_compare_pvalue_summary[i, "change"] <- wilcox.test(
filter(rf_prediction_summary,
model == paste(model_used[i]),
sample_type == "initial",
Dx_Bin == "adv_adenoma")[, "postive_probability"] -
filter(rf_prediction_summary,
model == paste(model_used[i]),
sample_type == "followup",
Dx_Bin == "adv_adenoma")[, "postive_probability"],
filter(rf_prediction_summary,
model == paste(model_used[i]),
sample_type == "initial",
Dx_Bin == "adenoma")[, "postive_probability"] -
filter(rf_prediction_summary,
model == paste(model_used[i]),
sample_type == "followup",
Dx_Bin == "adenoma")[, "postive_probability"])$p.value
}
#Difference in proportion above and below cutoff between SRN and Adenoma
for(i in 1:length(model_used)){
srn_compare_pvalue_summary[i, "proportion"] <-
makeANDfish_2by2(
select(rf_prediction_summary, -diagnosis) %>%
rename(diagnosis = Dx_Bin), c("initial", "followup"),
c("Yes", "No"), aucrf_model_cutoff,
model = FALSE, model_sample_type = NULL,
model_type = paste(model_used[i]), remove_sample = "cancer")
}
# Write out updated rf_prediction table
# Write out SRN comparison P-value summary
write.csv(rf_prediction_summary,
"results/tables/rf_prediction_summary.csv", row.names = FALSE)
write.csv(srn_compare_pvalue_summary,
"results/tables/adn_vs_srn_pvalue_summary.csv")
|
03addab32144d1b49f33cfc2d2685188ef3c4ec5
|
3116a79d9328de9f6fcefa97457e5b5f1fe0f6be
|
/R/main_test.R
|
3f65861e2ba5387542e2fcc9e5caaa92447a15c5
|
[
"MIT"
] |
permissive
|
klausfrieler/RAT
|
26679d8d61445e6435c37f031ed99406ebd10bf9
|
2631435c5488defadc4afab80fc97413bad92a42
|
refs/heads/master
| 2023-04-26T17:10:11.032396
| 2023-04-21T14:13:40
| 2023-04-21T14:13:40
| 172,775,138
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,627
|
r
|
main_test.R
|
get_eligible_first_items_RAT <- function(){
which(RAT::RAT_item_bank$type == "8" & RAT::RAT_item_bank$bit_flips == 4)
}
get_lures <- function(item_id){
if(purrr::is_scalar_character(item_id)){
return(RAT::RAT_lures_bank[RAT::RAT_lures_bank$item_id == item_id, ] %>% pull("lures"))
}
stop(printf("Invalid item id %s", item_id))
}
main_test <- function(label, audio_dir, img_dir, num_items,
next_item.criterion,
next_item.estimator,
next_item.prior_dist = next_item.prior_dist,
next_item.prior_par = next_item.prior_par,
final_ability.estimator,
constrain_answers) {
item_bank <- RAT::RAT_item_bank
psychTestRCAT::adapt_test(
label = label,
item_bank = item_bank,
show_item = show_item(audio_dir, img_dir),
stopping_rule = psychTestRCAT::stopping_rule.num_items(n = num_items),
opt = RAT_options(next_item.criterion = next_item.criterion,
next_item.estimator = next_item.estimator,
next_item.prior_dist = next_item.prior_dist,
next_item.prior_par = next_item.prior_par,
final_ability.estimator = final_ability.estimator,
constrain_answers = constrain_answers,
eligible_first_items = get_eligible_first_items_RAT(),
item_bank = item_bank)
)
}
show_item <- function(audio_dir, img_dir) {
function(item, ...) {
stopifnot(is(item, "item"), nrow(item) == 1L)
item_number <- psychTestRCAT::get_item_number(item)
num_items_in_test <- psychTestRCAT::get_num_items_in_test(item)
messagef("Showing item %s", item_number)
RAT_item(
label = paste0("q", item_number),
pattern = item$pattern,
lures = get_lures(item$item_id),
answer = item$answer,
prompt = get_prompt(item_number, num_items_in_test),
img_dir = img_dir,
audio_dir = audio_dir,
save_answer = TRUE,
get_answer = NULL,
on_complete = NULL,
instruction_page = FALSE
)
}
}
get_prompt <- function(item_number, num_items_in_test) {
shiny::div(
shiny::h4(
psychTestR::i18n(
"PAGE_HEADER",
sub = list(num_question = item_number,
test_length = if (is.null(num_items_in_test))
"?" else
num_items_in_test)),
style = "text_align:center"
),
shiny::p(
psychTestR::i18n("PROMPT"),
style = "margin-left:20%;margin-right:20%;text-align:justify")
)
}
|
8425e2a7d9dc7dda361fe34a349a350201459c5c
|
2f6f1979ad470d065fd34f43f3338ce0fe2e490e
|
/density_finalstate.R
|
cd4a2e1ad46ef25a35ce7b09c2047983e8e567ed
|
[] |
no_license
|
jintangxin/collective-decisions
|
7c4b2c90b5624cc60d298ec6aa2e27eef39c5f5d
|
4c7466d34c3cd4f48b9bc92f395f61af3f143c3a
|
refs/heads/master
| 2021-01-01T04:16:53.910464
| 2017-11-29T21:41:58
| 2017-11-29T21:41:58
| 97,156,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,161
|
r
|
density_finalstate.R
|
########################simulate to get final states for all neurons###########################################
require(plotly) # for nice plot at the end
# TRACK COMPUTING TIME OF THIS SIMULATION
ptm_total <- proc.time()
# PARAMETERS
total_time <- 810 #1620 #162 # in ms # 1000 # (BCD final value should be 1620 ms)
timesteps <- 500 #500 #100 # 1000
dt <- total_time/timesteps
time_silence <- 810 #81 # in ms # 500 # (BCD final value should be 810 ms)
timestep_silence <- time_silence/total_time*timesteps
num_sims <- 100 # 2 #30 # number of simulations for each 'pixel'
num_s <- 3 # number of different input signals we are looking at
s <- seq(-0.1,0.1,length.out=num_s) # input signals
#s <- 0
M <- 10 # number of individual neurons considered in this analysis
tau <- rep(10,M) # in ms
# BCD create list of times for plots
times <- array(1:timesteps)*dt
# connectivity matrix (!fixed across simulations)
gamma <- 0 # .03, fixed gamma, used in building cij (connectivity heterogeneity / noise)
cijnoise <- matrix(rnorm(M^2,0,gamma),nrow=M,ncol=M) #matrix(c_avg+rnorm(M^2,0,gamma),nrow=M,ncol=M) # are these row/columns correct?
# Make the matrix symmetrical, so that cij[a,b] == cij[b,a], and cij[a,a]==0:
cijnoise[lower.tri(cijnoise)] = t(cijnoise)[lower.tri(cijnoise)]
diag(cijnoise)<-0
# FUNCTIONS
g <- function(x){ # firing rate function - I changed this from (1-tanh(x)).
(tanh(x))
}
L <- function(r){ # LDA conversion function - right now, we're just using the average instead, and it seems fine.
# -.5*log(v%*%Cpos%*%v) - (((r-mupos)%*%v)^2)/(2*v%*%Cpos%*%v) + .5*log(v%*%Cneg%*%v) + (((r-muneg)%*%v)^2)/(2*v%*%Cneg%*%v)
mean(r)
}
xf <- array(0,dim=c(num_s,num_sims))
for (k in 1:num_s){
#
# simulate over different input signals
#
c_range <- c(1.,1.2) # ! Put min and max c here 0,1.4
cap_gamma_range <- c(1.,2.0) # ! Put min and max capital gamma values here 0,5
resolution <- 1 # how many pixels wide/tall do you want the resulting graphic to be?
mat7a <- matrix(nrow=resolution, ncol=resolution)
ptm <- proc.time() # track the time for each iteration
c_avg <- 10 #! here just use c=1.2
cbase <- matrix(rep(c_avg,M^2),nrow=M,ncol=M)
diag(cbase)<- 0
cij <- cijnoise + cbase
cap_gamma <- cap_gamma_range[2] #! here just use cap_gamma=2
# Initialize for this combination of c, capital gamma:
lr <- matrix(nrow=num_sims, ncol=M)
x <- array(0,dim=c(timesteps,M,num_sims))
dx_array <- array(0,dim=c(timesteps,M,num_sims))
# Loop through and compute data for each timestep, for (num_sims) trials:
for(sim in 1:num_sims) {
for(t in 1:timesteps) {
lastx <- if(t>1) x[(t-1),,sim] else rep(0,M) # set the INITIAL STATE of the neurons here, rep(0,M) normally
# NOTE that I'm using 1s here now, not 0s.
for(i in 1:M) {
dx <- ((ifelse(t<timestep_silence,s[k],0) - lastx[i] + cij[i,]%*%g(lastx)/M)*dt + rnorm(1,0,sqrt(dt*cap_gamma)))/tau[i] #!!!! 2* for troubleshooting!
# /sum(cij[i,]) as a divisor for the sigma cij term (last term inside parentheses)
# right now trying M*c_avg..
x[t,i,sim]<- lastx[i] + dx
dx_array[t,i,sim]<- dx
}
}
}
# average x at final state over all neurons;
xf[k,] <- apply(x[timesteps,,],c(2),mean)
}
###############KL divergence using kernel density estimator
density <- function(x, data){
#density esimator from data using sd=h at new points x
h <- 1
n <- length(x)
y <- array(0,dim=n)
for (i in 1:n){
y[i] <- 1/h * mean(dnorm(x[i], mean=data, sd=h))
}
return(y)
}
kl_divergence <- function(data1, data2){
# compute kl-divergence between data1 and data2
# using their density estimation
x = seq(-10,10,length.out = 1000) #initial points when compute kl-divergence
p <- density(x, data1)
q <- density(x, data2)
return(sum(p*log(p/q)))
}
# compute FIM by formula 5 in the draft.
FIM = (kl_divergence(xf[2,], xf[1,]) + kl_divergence(xf[2,], xf[3,]))/(s[2]-s[1])^2
############################plot##################
require(ggplot2)
plot(range(xf),c(0,5),type='n',main='distribution plot for different s', xlab='xf', ylab='density')
lines(density(xf[5,]), type='l', col='red') # s=0
lines(density(xf[1,]), type='l', col='black') # s= -0.4
lines(density(xf[2,]), type='l', col='yellow') # s= -0.3
lines(density(xf[3,]), type='l', col='blue') # s= -0.2
lines(density(xf[4,]), type='l', col='purple') # s= -0.1
lines(density(xf[6,]), type='l', col='green') # s= 0.1
lines(density(xf[7,]), type='l', col='cyan') # s= 0.2
lines(density(xf[8,]), type='l', col='coral') # s= 0.3
lines(density(xf[9,]), type='l', col='brown') # s= 0.4
###############################
plot(NULL,xlim=c(min(times),max(times)),ylim=c(-10,10), main = c("c_avg: ",c_avg,"cGAM: ",cap_gamma),xlab='time (ms)',ylab='mean neuron state')
# title(main = c("c_avg: ",c_avg,"cGAM: ",cap_gamma), ps=2)
for(i in 1:num_sims)
{points(times,rowMeans(x[,,i]),type="o", pch=".")} # prints out results of ea simulation, over time, for each pixel (the first simulation)
#############KL divergence using FNN
#require(FNN)
#KLMatrix <- array(0,dim=c(num_s,num_s))
#for (i in 1:num_s){
# for (j in 1:num_s){
# KLMatrix[i,j] <- KL.divergence(X=xf[i,],Y=xf[j,], k=5)[5]
# }
#}
###############KL divergence using kernel density estimator
density <- function(x, data){
#density esimator from data using sd=h at new points x
h <- 1
n <- length(x)
y <- array(0,dim=n)
for (i in 1:n){
y[i] <- 1/h * mean(dnorm(x[i], mean=data, sd=h))
}
return(y)
}
kl_divergence <- function(data1, data2){
# compute kl-divergence between data1 and data2
# using their density estimation
x = seq(-10,10,length.out = 1000) #initial points when compute kl-divergence
p <- density(x, data1)
q <- density(x, data2)
return(sum(p*log(p/q)))
}
# initialize a matrix to store kl_divergence between different s
KLMatrix <- array(0,dim=c(num_s,num_s))
for (i in 1:num_s){
for (j in 1:num_s){
KLMatrix[i,j] <- kl_divergence(xf[i,],xf[j,])
}
}
FIM = (kl_divergence(xf[2,], xf[1,]) + kl_divergence(xf[2,], xf[3,]))/(s[2]-s[1])^2
|
4b75f2be1ded60c2cfd74055195dad68ecb8ce28
|
473dfd3f5c89fd2bf2087c524c52e484ecc823b6
|
/tests/testthat/test-halton.indicies.CRT.R
|
9e82d1b7db1072589b8860608fb344e2ba9444eb
|
[] |
no_license
|
cran/SDraw
|
038ec0a0f2d8a094f89d258d43edb15a003303b2
|
0b06c5ecbd424a0d9ba59fe5fd4f4bf30a1ce326
|
refs/heads/master
| 2021-01-17T19:20:33.351896
| 2020-07-03T15:20:09
| 2020-07-03T15:20:09
| 60,879,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,017
|
r
|
test-halton.indicies.CRT.R
|
df <- data.frame(x=(0:100)/101, y = 0.2)
context("Test the halton.indices.CRT function")
test_that("halton.indices.CRT() operates appropriately", {
##Check that error catch for J = exponents operates as it should
expect_error(halton.indices.CRT(df, c(1, 2),
"number of boxes in one or more dimensions is not an integer power of bases. Check n.boxes and b."))
##Make sure that n.boxes only accepts vector arguments
expect_error(halton.indices.CRT(df, 16),
"number of boxes in one or more dimensions is not an integer power of bases. Check n.boxes and b.")
expect_length(halton.indices.CRT(df, c(16, 9)), 101)
expect_type(halton.indices.CRT(df, c(16, 9)), "double")
##Verify that output structure is the same as what is given in the original example
expect_identical(halton.indices.CRT(df, c(16,9))[1:7], c(48, 48, 48, 48, 48, 48, 48))
expect_identical(halton.indices.CRT(df, c(16,9))[80:85], c(3, 3, 3, 3, 75, 75))
})
|
5cd31323bbfe11dc6fd624e7b030eea8ea4d2889
|
d13066f005a224a0a3650b27317aa27d63039e46
|
/cachematrix.R
|
2fe3ed2f2c0efaaf4bdc691746e57249cf239ad8
|
[] |
no_license
|
datadotvisible/ProgrammingAssignment2
|
3f6c65c7ad727605107da607533dd61ae8b7401d
|
284a32c28f3a5276565dbb6bd50cb6d57da53b7b
|
refs/heads/master
| 2021-01-18T10:51:27.188178
| 2015-03-20T22:09:24
| 2015-03-20T22:09:24
| 32,603,432
| 0
| 0
| null | 2015-03-20T19:42:53
| 2015-03-20T19:42:53
| null |
UTF-8
|
R
| false
| false
| 1,247
|
r
|
cachematrix.R
|
## These two functions make use of the R languages Lexical Scoping to help store matrix calculations within
## the parent environments of the functions. This is accomplished by use of the <<- method for assigning values.
## makeCacheMatrix takes as its argument x a matrix, if none is supplied an empty one will be created.
## It then creates a set of functions which access data values in the parent scope.
## set, get, setmatrix, and getmatrix are accessed via function completions within R.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve) m <<- solve
getmatrix <- function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## cacheSolve takes a matrix (preferrably one created with makeCacheMatrix)
## and if it can find a copy of the matrix in the parent environment uses that matrix to solve.
## This also uses very tightly coupled knowledge of the functions available within the makeCacheMatrix function.
cacheSolve <- function(x, ...) {
m <- x$getmatrix()
if (!is.null(m)) {
return(m)
}
matrix <- x$get()
m <- solve(matrix, ...)
x$setmatrix(m)
return (m)
}
|
7d2c1cfab157ec362b19f396dac25430b725363a
|
ef673026d38bc94e7d6b9d2a47027d5616c17c29
|
/R/book_list.R
|
431371e0d7ecc2a78da100e85572250fa070ff21
|
[] |
no_license
|
Jjohn987/ctextclassics
|
5acd8709f73821e6980a38a6a35af938dd87f5ae
|
58285741b7b092ebac42ba14a99aa3d4544a87ba
|
refs/heads/master
| 2020-03-17T21:31:37.626366
| 2018-05-21T15:58:48
| 2018-05-21T15:58:48
| 133,962,724
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 457
|
r
|
book_list.R
|
#' @title Ctext.org Book List
#' @description An internal dataframe used for the get_text and get_books functions for downloading
#' books from the ctext.org API. Contains information for name of book, chapter, chapter number, and genre.
#' Indludes over 130 different books available on ctext.org.
#' @name book_list
#' @docType data
#' @usage book_list
#' available books <- unique(book_list$book)
#' @format data.frame object.
#' @keywords datasets
NULL
|
4094621c2e9ced3fa1c50f602703dd6f954ab320
|
20bcf6fac59ff712437755a39c815059498d353c
|
/man/IDSDS.Rd
|
e30160efe5b19bc10af09083a190859d82f66799
|
[
"MIT"
] |
permissive
|
indrag49/QGameTheory
|
7175ba824cef339080729d8bbee77f6f7b6fda6c
|
5adca439bd2e1129d24f57538be581d28664cfc5
|
refs/heads/master
| 2023-08-20T12:42:12.021132
| 2021-09-24T11:53:18
| 2021-09-24T11:53:18
| 264,378,364
| 13
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 973
|
rd
|
IDSDS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IDSDS.R
\name{IDSDS}
\alias{IDSDS}
\title{Iterated Deletion of Strictly Dominated Strategies algorithm}
\usage{
IDSDS(P1, P2)
}
\arguments{
\item{P1}{Payoff matrix to Alice}
\item{P2}{Payoff matrix to Bob}
}
\value{
A list consisting of the equilibrium strategies based on the rationality of the players by application of the IDSDS algorithm on \code{P1} and \code{P2}.
}
\description{
This function applies the IDSDS algorithm to result in the equilibrium strategies based on the rationaility of the players. The input parameters are equal dimensional payoff matrices for the first and the second players.
}
\examples{
init()
Alice <- matrix(c(8, 0, 3, 3, 2, 4, 2, 1, 3), ncol=3, byrow=TRUE)
Bob <- matrix(c(6, 9, 8, 2, 1, 3, 8, 5, 1), ncol=3, byrow=TRUE)
IDSDS(Alice, Bob)
}
\references{
\url{https://arxiv.org/abs/1512.06808}\cr
\url{https://en.wikipedia.org/wiki/Strategic_dominance}\cr
}
|
8be831e12d90316ccf4689c7ac768c6d47049d59
|
21317fc6ac471065e8316dc730e1e6090903dd8b
|
/pageview_timestamps.R
|
515ce0c41ec62d6f74b313363008a004aac8ab7d
|
[] |
no_license
|
tmuhimbisemoses/CRANtings
|
9428c9e95eec9231dc75328fdaea675671060ae4
|
d6d70634cf1fd0ce4aa67e9f519b6e9a2b2438cc
|
refs/heads/master
| 2023-06-17T12:30:05.097064
| 2021-07-13T19:02:49
| 2021-07-13T19:02:49
| 385,569,919
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 116
|
r
|
pageview_timestamps.R
|
library(pageviews)
pageview_timestamps(Sys.Date())
pageview_timestamps(Sys.time())
pageview_timestamps("2016020800")
|
02ab64de7d44f9af98e3b09133d865988851e0f0
|
8357f04a0a50e10697a650ad319b37738dcc2cf8
|
/man/get.issue.labels.Rd
|
86b8f90a0412745266f9af869a3dff5981cbed03
|
[
"MIT"
] |
permissive
|
prateek05/rgithub
|
2a7114a8262cd6abefc8d31f03a907b75f127ba2
|
153bde1466252952e21c1fdb2ff0b452b9c8de99
|
refs/heads/master
| 2021-01-17T04:24:00.180261
| 2014-07-14T17:24:43
| 2014-07-14T17:24:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 417
|
rd
|
get.issue.labels.Rd
|
\name{get.issue.labels}
\alias{get.issue.labels}
\title{List all labels for an issue}
\usage{
get.issue.labels(owner, repo, issue.number,
ctx = get.github.context())
}
\arguments{
\item{owner}{the repo owner}
\item{repo}{the repo name}
\item{issue.number}{the number of the issue}
\item{ctx}{the github context object}
}
\value{
The list of labels
}
\description{
List all labels for an issue
}
|
0384d879a023b5d1bcbcce332a9aa2ea14515e82
|
68f8217845056df195a2d78356ddd5a2f9a9e44e
|
/R/statistics_with_R/08_LogisticRegression/Script_Files/01_logisticRegression.R
|
9c69cdf2ba8fd8b1d403416cb42b410778b8995a
|
[
"MIT"
] |
permissive
|
snehilk1312/AppliedStatistics
|
dbc4a4f2565cf0877776eee88b640abb08d2feb5
|
0e2b9ca45b004f38f796fa6506270382ca3c95a0
|
refs/heads/master
| 2023-01-07T15:11:18.405082
| 2020-11-07T21:06:37
| 2020-11-07T21:06:37
| 289,775,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,685
|
r
|
01_logisticRegression.R
|
library(car)
library(mlogit)
df<- read.delim('/home/atrides/Desktop/R/statistics_with_R/08_LogisticRegression/Data_Files/eel.dat', header=TRUE)
# listing all columns in data frame
names(df)
# checking whether the passed columns ae factor or not
is.factor(df$Cured)
is.factor(df$Intervention)
# converting column to a factor
df$Cured<- as.factor(df$Cured)
df$Intervention<- as.factor(df$Intervention)
# Default factors were not suitable. So refactoring the revels
df$Cured<- relevel(df$Cured, "Not Cured")
df$Intervention<- relevel(df$Intervention, "No Treatment")
# fitting the model
# newModel<-glm(outcome ~ predictor(s), data = dataFrame, family = name of a distribution, na.action = an action)
m01<- glm(Cured~Intervention, data=df, family = binomial())
m02<- glm(Cured~Intervention+Duration, data=df, family = binomial())
m00<- glm(Cured~1, data=df, family = binomial())
# printing summary
summary(m00)
summary(m01)
summary(m02)
# Accessing some other statistics of our Logmodel
m01$null.deviance
m01$deviance
m01$coefficients
# to see what all statistics are there , we could do as follows
names(m01)
# getting some critical statistics, model chi square and its significance
modelChi<- m01$null.deviance - m01$deviance
modelChi
chidf<- m01$df.null-m01$df.residual
chidf
# feeding model chi square and its degree of freedom to calculate the p value
chisq.prob<- 1-pchisq(modelChi , chidf)
chisq.prob
# Note: we reject the null model that our model 'm01' is not better than just chance to predict outcome
# Now we will calculate various different R and R^2
R<- sqrt((3.074^2-2*1)/m01$null.deviance)
R
pseudoRsquared<- function(m){
dev<- m$deviance
nulldev<- m$null.deviance
n<- length(m$fitted.values)
R2_hl<- 1-dev/nulldev
R2_cs<- 1-exp(-(nulldev-dev)/n)
R2_n<- R2_cs/(1-(exp(-(nulldev/n))))
cat("Pseudo R^2 for logistic regression: \n")
cat("Hosmer and Lemeshow R^2: ", round(R2_hl, 3), "\n")
cat("Cox and Snell R^2: ", round(R2_cs ,3), "\n")
cat("Nagelkerke R^2: ", round(R2_n, 3),"\n")
}
pseudoRsquared(m01)
# odds Ratio
exp(m01$coefficients)
# confidence interval of these odds, as it doesn't cross 1 , so it says as intervention is done odds of
# being cured increases
exp(confint(m01))
# Model 2 , Intervention and Duration as predictor
summary(m02)
modelChi<- m01$deviance - m02$deviance
modelChi
chidf<- m01$df.residual - m02$df.residual
chidf
chisq.prob<- 1 - pchisq(modelChi, chidf)
chisq.prob
# from above chisq.prob , we can conclude that model 2 is not such an improvement over model 1
# also doing anova
anova(m01, m02)
# Doing casewise diagnostics
df$predicted.probablities<- fitted(m01)
df$standarized.residuals<- rstandard(m01)
df$studentized.residuals<- rstudent(m01)
df$dfbeta<- dfbeta(m01)
df$dffits<- dffits(m01)
df$leverage<- hatvalues(m01)
head(df)
# by seeing the residuals we can see that none of the case to be seem an outlier
head(df[order(-df$standarized.residuals),]$standarized.residuals, 10)
# all cases have DFBetas less than 1, and leverage statistics are very close to the calculated expected value of 0.018.
# All in all, this means that there are no influential cases having an effect on the model.
# The studentized residuals all have values of less than ±2 and so there seems to be very little here to concern us.
# Another Example
data<- read.delim('/home/atrides/Desktop/R/statistics_with_R/08_LogisticRegression/Data_Files/penalty.dat', header=TRUE)
head(data)
# checking if Scored is a factor or not
is.factor(data$Scored)
# it is not , so
data$Scored<- as.factor(data$Scored)
names(data)
m01<- glm(Scored~PSWQ+Previous, data=data, family=binomial())
m02<- glm(Scored~PSWQ+Previous+Anxious, data=data, family=binomial())
anova(m01, m02)
summary(m01)
summary(m02)
modelChi1<- m01$null.deviance - m01$deviance
modelChi1
chidf1<- m01$df.null - m01$df.residual
chidf1
chisq.prob1<- 1- pchisq(modelChi1, chidf1)
chisq.prob1
# the chisquare probability 'chisq.prob1' value is less than 0.05 which tells that this
# model was quite an improvement over a null model(i.e just chance)
# Now we will see whether model 2 is any improvement over model 1
modelChi2<- m01$deviance - m02$deviance
modelChi2
chidf2<- m01$df.residual - m02$df.residual
chidf2
chisq.prob2<- 1-pchisq(modelChi2, chidf2)
chisq.prob2
# the chisquare probability 'chisq.prob2' value is greater than 0.05 , which tells that this
# model(i.e m02) was a improvement over m01 , just by chance.
# dataframe of studentized residuals
df_resid<- rstudent(m01)
# printing the head, i.e top 10 residuals
head(df_resid[order(-df_resid)], 10)
# now , we will head to model m02, for assumption checking
# Testing for multicollinearity
# vif
vif(m02)
# tolerance
1/vif(m02)
# from the output of vif and tolerance , we can deduce that there is a high multicollinearity in our model
# checking correlation between different independent variables
cor(data[, cbind('PSWQ', 'Anxious', 'Previous')])
# from the above table , the correlation b/w Anxious and Previous is very high, thus leading to high multicollinearity
# Testing for linearity of logit
data$logPSWQ<- data$PSWQ * log(data$PSWQ)
data$logAnxious<- data$Anxious * log(data$Anxious)
data$logPrevious<- data$Previous * log(data$Previous)
head(data)
m03<- glm(Scored~PSWQ+logPSWQ+Anxious+logAnxious+Previous+logPrevious, data=data, family=binomial())
summary(m03)
# From the summary output , if any interaction term has significance less than 0.05 , it will mean that assumption
# of linearity has been violated. In our output we can conclude that the assumption of linearity has been met as all
# interaction term is non-significant
|
6875109724e29c8f825abe7209725cac615952e4
|
bdefef347f69d9787b85393f6dbffd6d295bfb4d
|
/checking_expression_data.R
|
37e4357781743a2d4b25c57d25ea227e8490f0dd
|
[] |
no_license
|
aleighbrown/background_mutability
|
8dea93370742809313702051ca0bb95838cea713
|
45639983ac52f92703df0b88a41e4a8c6d67cffe
|
refs/heads/master
| 2020-04-17T05:05:01.630497
| 2019-05-13T10:40:38
| 2019-05-13T10:40:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,530
|
r
|
checking_expression_data.R
|
rebuilt_amino <- fread("~/mahmood/binom/analyzed data/codon_substitution_table.csv")
rebuilt_amino[BScore <=5.462115e-05,Status85 := "Driver"]
#quickly rename some genes. never using Hugo symbols again after this project. Too many alias
rebuilt_amino[gene == "NSD3", gene := "WHSC1L1"]
rebuilt_amino[gene == "KNL1", gene := "CASC5"]
rebuilt_amino[gene == "AFDN", gene := "MLLT4"]
rebuilt_amino[gene == "WDCP", gene := "C2orf44"]
rebuilt_amino[gene == "NSD2", gene := "WHSC1"]
driver_genes = rebuilt_amino[Status85 == "Driver",unique(gene)]
ccle_data = fread("/Users/browna6/mahmood/binom/analyzed data/CCLE_RNAseq_genes_rpkm_20180929.gct.gz")
#melt the data
ccle_data_RK_melted = melt(ccle_data,measure.vars = names(ccle_data[,3:1021]), id.vars = c("Name","Description"))
setnames(ccle_data_RK_melted, c("Description","variable","value"),c("Gene","ccle_name","RKPM"))
#split up the tissue name into name and tissue
ccle_data_RK_melted[,tissue := str_split(str = ccle_name, pattern = "_", n = 2,simplify = T)[,2]]
tissue_gene_expression = ccle_data_RK_melted[,mean(RKPM), by = c("Gene","tissue")]
notthere = setdiff(rebuilt_amino[,unique(gene)],tissue_gene_expression[,unique(Gene)])
all_expression = tissue_gene_expression[Gene %in% rebuilt_amino[,unique(gene)]]
all_expression = all_expression[tissue != "" & tissue != "NS"]
driver_expression = tissue_gene_expression[Gene %in% driver_genes]
all_expression[V1 > 0.5 ]
driver_expression = tissue_gene_expression[Gene %in% driver_genes]
driver_expression[V1 > 0.5 ]
|
76fc46773a707414f05683d8bc6f64c2f1d6ca69
|
6526ee470658c2f1d6837f7dc86a81a0fbdcffd5
|
/man/MomsToPars.Rd
|
c2387f2f6d06e3ba1994be43314e583120d3578d
|
[] |
no_license
|
mdlama/milkweed
|
c7e8a24021a35eb6fbef13360400d2d4069b4649
|
b791c8b39802f33471f8e827f369afa47c06d6af
|
refs/heads/master
| 2023-09-06T03:00:45.554997
| 2022-09-14T15:25:58
| 2022-09-14T15:25:58
| 76,479,540
| 0
| 0
| null | 2021-09-21T19:04:44
| 2016-12-14T16:59:01
|
R
|
UTF-8
|
R
| false
| true
| 366
|
rd
|
MomsToPars.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mw_ipm.R
\name{MomsToPars}
\alias{MomsToPars}
\title{Moments to parameters.}
\usage{
MomsToPars(y, type = "ident")
}
\arguments{
\item{y}{Vector of moments.}
\item{type}{Distribution type (lnorm, gamma, or identity)}
}
\value{
Vector of moments.
}
\description{
Moments to parameters.
}
|
80cc646133ecef36c6f3aecff57772a3c8919863
|
5c05deb527c4ab642e0c98b66a762efdcec62e2b
|
/week1/intro.r
|
13d9991475c3cd0bce84945e6a2c65674d393a1b
|
[] |
no_license
|
Freshchris01/ws19-20_business_analytics
|
02f68dc39c8c6f11f746f05311f8541d6e061083
|
5805bf65426d53e3604467d5f77215f096bdfb37
|
refs/heads/master
| 2020-08-27T08:42:55.949397
| 2019-12-18T22:11:57
| 2019-12-18T22:11:57
| 217,304,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 543
|
r
|
intro.r
|
#set working directory in files view
#load lib
library(tidyverse)
#import data
data <- read_csv("week1/LaborSupply1988.csv") # is tibble by default
# data insights
glimpse(data)
str(data)
summary(data)
dim(data)
nrow(data)
ncol(data)
names(df)
head(data, n=20) # first n lines
tail(data, n=8)
data[10:12,] #10th to 12th row
#max and min of age
max(data[,4]) #data[,4]=df
min(data[,4])
summary(df$age)
range(df$age)
colMeans(data[,4])
for(i in c(0,1,2,3,4,5,6)){
test <- data %>% filter(data$kids == i)
print(colMeans(test))
}
|
ec368a9e160cc52f072b4c4687df88a11b9e376a
|
1b0eb05c1e253b4262044d1c1e88bf09f62198d3
|
/global.R
|
775840642e1f53e74ab244ec1851b3dca8d58d53
|
[] |
no_license
|
paulCrsr/fitibitShinyDemo
|
9ddc82837f396a811115f8d96b25f32839f68d9f
|
8d47702037d7153e8e2a6d12e89a564ec9443cd7
|
refs/heads/master
| 2021-08-10T17:06:56.411021
| 2017-11-12T20:21:10
| 2017-11-12T20:21:10
| 110,464,904
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 68
|
r
|
global.R
|
require(dplyr)
require(zoo)
csvData <- read.csv("fitbit_sample.csv")
|
3ed413fb37d03509fcb2e3bb8700275b58aa9022
|
93df22dd335b0e87d46fecc33e0b9313ceb53e45
|
/Prediction.R
|
b97c880e4825a9db627860ad50c243ffb5105643
|
[] |
no_license
|
pcolmer99/Machine-Learning-Assignment
|
d9b600162a1497b1252bcafad8d7a8f0859e727a
|
ba759dbe1a93638fb1893a002b9b4a043dae9265
|
refs/heads/master
| 2021-01-10T05:29:47.400912
| 2016-01-29T04:19:28
| 2016-01-29T04:19:28
| 50,623,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,696
|
r
|
Prediction.R
|
## Loadup Caret & Libraries
install.packages("caret")
library(caret)
library(ggplot2)
## Load in the training dataset
training <- read.csv('pml-training.csv', na.strings=c("NA","#DIV/0!",""))
## Load in the testing dataset
testing <- read.csv('pml-testing.csv', na.strings=c("NA","#DIV/0!",""))
## Cleanup the data
## Remove columns with more than 20% NAs
totcol <- ncol(training)
totrow <-nrow(training)
remove_na <- c(0)
for(i in 1:totcol) {
testna <- training[ ,i]
if (length(testna)*0.80 <= length(testna[is.na(testna)])) {
remove_na[length(remove_na)+1] <- i
}
}
training <- training[ ,-remove_na]
## Remove NAs from testing
testing <- testing[ ,-remove_na]
## Remove first 7 variables from datasets as they have no bearing on the physical technique
training <- training[,-c(1:7)]
testing <- testing[,-c(1:7)]
## Check that training and test set columns are identical for all predictors
colnames(training)
colnames(testing)
## Check for Near Zero Values ("NZV")
nzv <- nearZeroVar(training, saveMetrics=TRUE)
print(nzv)
## Slice up the training set into training 60% and cross-validation testing 40%
inTrain <- createDataPartition(y=training$classe, p=0.6, list=FALSE)
xtraining <- training[inTrain,]
vtest <- training[-inTrain,]
## Training Random Forest - No Additional Preprocessing
modfit <- train(classe ~., method="rf", data=xtraining)
## Training Random Forest - Preprocessing: Normalise skewed variables & remaining NAs
modfit2 <- train(classe ~., method="rf", preProcess=c("center", "scale", "knnImpute"), data=xtraining)
## Training Random Forest - Using K-Fold Cross Validation
modfit3 <- train(classe ~., method="rf", trControl=trainControl(method = "cv", number = 3), data=xtraining)
## Predict all 3 models on validation & show accuracy & show models
pred1 <- predict(modfit, newdata=vtest)
confusionMatrix(pred1, vtest$classe)$overall[1]
## pred1 - 0.992% accuracy which is mid and was used to quiz
pred2 <- predict(modfit2, newdata=vtest)
confusionMatrix(pred2, vtest$classe)$overall[1]
## pred2 - 0.989% accuracy which is lower so no advantage using these preprocessing techniques
pred3 <- predict(modfit3, newdata=vtest)
confusionMatrix(pred3, vtest$classe)$overall[1]
## pred3 - 0.993% accuracy which is best but pred1 used for quiz
## Calculate & Plot the 20 most important variables for Model 1 - as it is the most accurate
varimpobj = varImp(modfit)
plot(varimpobj, main = "Top 20 Variables", top=20)
modfit$finalModel
## Predict on testing and print results
finpred1 <- predict(modfit, newdata=testing)
print(finpred1)
## Out of Sample Error on Validation Set
ooserr <- 1 - confusionMatrix(pred1, vtest$classe)$overall[1]
print(ooserr)
|
bda8348945e7ab96882320dc33f4ebe4e2746912
|
dfb3d986e308512c38ae41d2b30948ad45fa1d18
|
/R/covid19_daily_cases_canada.R
|
26c6bf0119aad4ee8aedfad81e3fbd746d982bee
|
[] |
no_license
|
billpine/dataviz
|
42ab4239f23e0f0b96092d12e9ecf0321489d022
|
e940523d968b6b9f7a3fce7d2e77e71667be1e7e
|
refs/heads/master
| 2021-05-18T05:12:07.570761
| 2020-03-29T16:08:03
| 2020-03-29T16:08:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,697
|
r
|
covid19_daily_cases_canada.R
|
library(tidyverse)
library(ggpmthemes)
library(ggtext)
theme_set(theme_exo())
df <-
read_csv(
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
)
df <- df %>%
pivot_longer(-c(1:4), names_to = "date", values_to = "case_confirmed") %>%
janitor::clean_names() %>%
mutate(date = as.Date(date, "%m/%d/%y"))
df
df_viz <- df %>%
filter(country_region == "Canada") %>%
filter(!str_detect(province_state, "Recovered|Diamond|Grand")) %>%
group_by(province_state) %>%
arrange(date) %>%
mutate(daily_case = lead(case_confirmed) - case_confirmed) %>%
mutate(total_case = max(case_confirmed))
df_viz %>%
ggplot(aes(x = date, y = daily_case, group = province_state)) +
geom_line(size = 0.5) +
scale_x_date(date_breaks = "3 weeks", date_labels = "%b-%d") +
# geom_point() +
facet_wrap(~str_wrap(province_state, 20), ncol = 3) +
labs(
y = "Number of new daily cases",
x = NULL,
title = "Number of new daily cases of covid-19 in Canada",
subtitle = glue::glue("Total number of confirmed cases: {sum(df_viz$daily_case, na.rm = TRUE)}"),
caption = "Data: https://github.com/CSSEGISandData/COVID-19\nVisualization: @philmassicotte"
) +
theme(
strip.text.x = element_text(hjust = 0, face = "bold", size = 12),
strip.background = element_blank(),
panel.border = element_blank(),
plot.title.position = "plot",
plot.title = element_text(hjust = 0.5, size = 18),
plot.caption = element_text(size = 8)
)
ggsave(
here::here("graphs/covid19_daily_cases_canada.png"),
dpi = 600,
type = "cairo",
width = 7,
height = 8
)
|
6526ee94e5fa3dcfa6df9c75950bfe627802885b
|
27f131e09ef647c09e30dab8b83acc6e9b817fa8
|
/cyclistic_ride.R
|
657594857ee51d181d947e16361ada23e537d3bd
|
[] |
no_license
|
mishadarsh/Analytical-study-How-Does-a-Bike-Share-Navigate-Speedy-Success-
|
4cdb78f0dd2408efba30a1e86d548206f4b3a53c
|
edd7b9950c37ca420a707842639da0190be4ea17
|
refs/heads/main
| 2023-06-23T17:13:32.309644
| 2021-07-24T10:13:20
| 2021-07-24T10:13:20
| 389,057,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,587
|
r
|
cyclistic_ride.R
|
install.packages("tidyverse")
library("tidyverse")
install.packages("ggplot2")
library("ggplot2")
install.packages("lubridate")
library("lubridate")
install.packages("dtplyr")
library("dtplyr")
install.packages("dplyr")
library("dplyr")
install.packages("readr")
library("readr")
install.packages("plyr")
library("plyr")
install.packages("rlang")
library("rlang")
getwd()
setwd("/Users/Adarsh Mishra/OneDrive/Desktop/cyclistic_R/CSV FILES")
q3_2019 <- read_csv("Divvy_Trips_2019_Q3.csv")
q4_2019 <- read_csv("Divvy_Trips_2019_Q4.csv")
q1_2020 <- read_csv("Divvy_Trips_2020_Q1.csv")
colnames(q3_2019)
colnames(q4_2019)
colnames(q1_2020)
(q4_2019 <- rename(q4_2019
,ride_id = trip_id
,rideable_type = bikeid
,started_at = start_time
,ended_at = end_time
,start_station_name = from_station_name
,start_station_id = from_station_id
,end_station_name = to_station_name
,end_station_id = to_station_id
,member_casual = usertype))
(q3_2019 <- rename(q3_2019
,ride_id = trip_id
,rideable_type = bikeid
,started_at = start_time
,ended_at = end_time
,start_station_name = from_station_name
,start_station_id = from_station_id
,end_station_name = to_station_name
,end_station_id = to_station_id
,member_casual = usertype))
str(q1_2020)
str(q4_2019)
str(q3_2019)
q4_2019 <- mutate(q4_2019, ride_id = as.character(ride_id)
,rideable_type = as.character(rideable_type))
q3_2019 <- mutate(q3_2019, ride_id = as.character(ride_id)
,rideable_type = as.character(rideable_type))
all_trips <- bind_rows(q3_2019, q4_2019, q1_2020)
all_trips <- all_trips %>%
select(-c(start_lat, start_lng, end_lat, end_lng, birthyear, gender, "01 - Rental Details Duration In Seconds Uncapped", "05 - Member Details Member Birthday Year", "Member Gender", "tripduration"))
colnames(all_trips) #List of column names
nrow(all_trips) #How many rows are in data frame?
dim(all_trips) #Dimensions of the data frame?
head(all_trips) #See the first 6 rows of data frame. Also tail(qs_raw)
str(all_trips) #See list of columns and data types (numeric, character, etc)
summary(all_trips) #Statistical summary of data. Mainly for numerics
|
eb089120bb932f75beab3a5e8b030cf6e1dacb63
|
b2a07e6003cc146cb461bd058594b08870d03403
|
/hw6/hw6.r
|
d61446b5ea19beda67633b4f447b555aad9c5db4
|
[] |
no_license
|
adrianzhong/stat133
|
4a128c16d406cd298fa65f077427a049e191dcb8
|
bc32486274a54fbca36c82de7a391e59d0243f1c
|
refs/heads/master
| 2020-04-16T03:08:20.414750
| 2015-08-31T22:31:58
| 2015-08-31T22:31:58
| 29,747,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,556
|
r
|
hw6.r
|
# Homework 6
# Stat 133, Lec 2, Spring 2015
# Due : Friday March 20th by 5 pm
# Review the slides on simulations for this assignment.
# Consider the following model on use of a new drug:
# We have a population of doctors, population size : <n.doctors>
# Every doctor has either adopted the use of a new drug, or not (0/1 status)
# Now write a function that runs a simulation for a period of :
# <n.days> where
# - every day exactly two _random_ doctors meet
# - if one has adopted but the other one has not then the
# holdout adopts the use of the drug with probability p
# Return a matrix that shows for each day which doctors have adopted
# the use of the drug.
# Input varibles are
# <n.days> : the number of days the simulation should be run for
# <n.doctors> : total number of doctors
# <initial.doctors> : a 0/1 vector of length <n.doctors>, 1 for adopters
# <p> : the probability that the non-adopter adopts the drug.
# Ouput variable
# <has_adopted> : matrix with <n.doctors> rows and <n.days> columns
# i.e. one row for each doctor
# the entries are 0 for days where the doctor is a
# non-adopter, else 1 (so once a row turns to 1 it stays as 1).
sim.doctors <- function(initial.doctors, n.doctors, n.days, p){
# Set up the output variable, define it as a matrix then use initial.doctors
# to set the first column (day)
has_adopted=matrix(initial.doctors,nrow=n.doctors,ncol=n.days)
# Run a simulation for <n.days> (use a for loop). In the loop:
# 1) pick two random doctors
# 2) check if one has adopted the other hasn't
# 3) convert the non-adopter with probability p
for (i in 2:n.days){
has_adopted[,i]=has_adopted[,i-1]
sampled=sample(c(1:n.doctors),2)
if(has_adopted[sampled[1],i-1]==0 & has_adopted[sampled[2],i-1]==1)
has_adopted[sampled[1],i]=sample(c(0,1),1,prob=c(1-p,p))
if(has_adopted[sampled[1],i-1]==1 & has_adopted[sampled[2],i-1]==0)
has_adopted[sampled[2],i]=sample(c(0,1),1,prob=c(1-p,p))
}
# return the output
return (has_adopted)
}
# When you test your function you have to generate <initial.doctors> and
# pick values for the other input parameters.
set.seed(42)
# Generate a value for <initial.doctors> that has 10% 1s and 90% 0s.
n.doctors=100
initial.doctors=sample(c(0,1),size=n.doctors,prob=c(0.9,0.1),replace=T)
n.days=500
# Run your function for at least 5 different values of <p> and plot
p_0.2=sim.doctors(initial.doctors, n.doctors, n.days, 0.2)
p_0.4=sim.doctors(initial.doctors, n.doctors, n.days, 0.4)
p_0.6=sim.doctors(initial.doctors, n.doctors, n.days, 0.6)
p_0.8=sim.doctors(initial.doctors, n.doctors, n.days, 0.8)
p_1.0=sim.doctors(initial.doctors, n.doctors, n.days, 1.0)
# on x-axis: days,
# on y-axis : the number of doctors that have already adopted the drug, on that day
# Put all 5 lines in one figure (e.g. use first plot() then lines() for the subsequent lines)
count_sum=function(x){
a=c()
for (i in 1:ncol(x))
a[i]=sum(x[,i])
return(a)
}
sum_0.2=count_sum(p_0.2)
sum_0.4=count_sum(p_0.4)
sum_0.6=count_sum(p_0.6)
sum_0.8=count_sum(p_0.8)
sum_1.0=count_sum(p_1.0)
plot(c(1:n.days),ylim=c(0,100),sum_0.2,main="number of doctors adopted the drug",xlab="days",ylab="the number of doctors adopted the drug","l",col="red")
lines(c(1:n.days),sum_0.4,col="blue")
lines(c(1:n.days),sum_0.6,col="green")
lines(c(1:n.days),sum_0.8,col="gold")
lines(c(1:n.days),sum_1.0,col="black")
legend("bottomright",fill=c("red","blue","green","gold","black"),legend=c(0.2,0.4,0.6,0.8,1.0))
|
eb6e38df07c3070c8718d66d361d0a03126a8e30
|
ca4691e2c129deaa7885c8e905e381d9384a78f7
|
/scripts/14_distance_from_MPA.R
|
6388ae90805f488d5a21fb0ae0a1f629f189cbe3
|
[] |
no_license
|
philiphaupt/Turtle_sat_tag_Aldabra
|
da1e4c329f8df8ab032bfdc3dea0fa6f1d22452e
|
ef81dc108bc096675d850879fa4d1ed7cf6de58e
|
refs/heads/master
| 2023-01-23T02:44:01.086222
| 2023-01-20T07:16:36
| 2023-01-20T07:16:36
| 209,151,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,684
|
r
|
14_distance_from_MPA.R
|
# AIM: Determin the distance from the last transmisson point in the feeding grounds to the nearest MPA
library(sf)
library(tidyverse)
library(data.table)
library(tmap)
# read last points
last_pts <- read_rds("./data/last_pts.rds")
MPAs <- read_rds("./data/MPAs.rds")
st_crs(last_pts)
#set crs same
st_crs(MPAs)
MPAs_utm38 <- st_transform(MPAs, 32738)
st_crs(MPAs_utm38)
MPAs_centroid_utm38s <- MPAs_utm38 %>% sf::st_centroid()
MPAs_centroid_utm38s <- tibble::rowid_to_column(MPAs_centroid_utm38s, "ID")
MPAs_utm38 <- tibble::rowid_to_column(MPAs_utm38, "ID") %>%
dplyr::filter(GEOMETRY_TYPE == "POLYGON") %>%
dplyr::filter(AREA_KM2 > 6.5)
# distance from MPAs (minus land)
dist_list <- last_pts %>%
plyr::dlply(.variables = "tag_id", function(x){
x_sf <- st_as_sf(x)
# x_no_geom <- x_sf
# x_no_geom <- st_set_geometry(x_no_geom, NULL)
#
# x_join_mpa_names <- tidyr::expand(x_no_geom, ., MPA_names$NAME)
dist_last_pt_MPA <- as.data.frame(as.matrix(t(sf::st_distance(x_sf, MPAs_utm38))))
names(dist_last_pt_MPA) <- "distance_m"
dist_last_pt_MPA_named <- bind_cols(dist_last_pt_MPA, MPAs_utm38)
dist_min <- dist_last_pt_MPA_named %>% dplyr::filter(distance_m == min(distance_m))
#left_join(MPAs_centroid_utm38s, by = c("ID"))
})
dist_list
dist_df <- data.table::rbindlist(dist_list, use.names=TRUE)
# dist_df <- do.call(rbind,lapply(dist_list,data.frame))
# dist_df <- do.call(rbind.data.frame, dist_list)
dist_df$tag_id <- rep(names(dist_list), each=sapply(dist_list,nrow))
dist_sf <- st_as_sf(dist_df, sf_column_name = "geometry")
dist_sf_wgs84 <- st_transform_4326(dist_sf)
# plot
#view the protected areas on a map
tmap_mode("view")
tm_shape(MPAs) +
tm_polygons(col = "olivedrab2",
alpha = 0.3)+
#tm_borders(col = "forestgreen")+
tm_shape(dist_sf_wgs84)+
# tm_dots(size = 0.5,
# shapes = 4,
# alpha = 0.2,
# col = "salmon")+
tm_text("NAME")+
tm_shape(last_pts)+
tm_dots(size = 0.7,
col = "blue",
shapes = 15,
alpha = 0.4)+
tm_text("tag_id", col = "white")
#tm_text("distance_m")
dist_no_geom <- dist_df %>% dplyr::select(-geometry)
write.csv(dist_no_geom, "./data/dist_no_geom_no_points_greater_6point5km2.csv")
|
74f37f661e8264caa8e039c263fdc65a50562344
|
689635789d25e30767a562933f39fcba1cebecf1
|
/Alpha Modelling/QuantStrat/Strategies/Trading/3. Trend Vigor III.R
|
dc78fe21627740c358d1b3e3f86494f0db18406a
|
[] |
no_license
|
Bakeforfun/Quant
|
3bd41e6080d6e2eb5e70654432c4f2d9ebb5596c
|
f2874c66bfe18d7ec2e6f2701796fb59ff1a0ac8
|
refs/heads/master
| 2021-01-10T18:23:23.304878
| 2015-08-05T12:26:30
| 2015-08-05T12:26:30
| 40,109,179
| 5
| 0
| null | 2015-08-05T12:12:09
| 2015-08-03T06:43:12
|
R
|
UTF-8
|
R
| false
| false
| 5,615
|
r
|
3. Trend Vigor III.R
|
##### Information #####
# Trend Vigor Part III: ATR position sizing, Annualized Sharpe above 1.4, and Why Leverage Is Pointless
# Posted on May 29, 2014 by Ilya Kipnis
# Posted in Dr. John Ehlers, ETFs, QuantStrat, R, Trading, Uncategorized
# https://quantstrattrader.wordpress.com/2014/06/11/trend-vigor-part-iii-atr-position-sizing-annualized-sharpe-above-1-4-and-why-leverage-is-pointless/
# https://github.com/IlyaKipnis
# http://www.followingthetrend.com/2014/01/why-leverage-is-pointless/
##### Initialisation #####
setwd("~/Alpha Modelling/QuantStrat/Strategies/Trading")
require(DSTrading)
require(IKTrading)
require(quantstrat)
initDate="1990-01-01"
from="2003-01-01"
to="2010-12-31"
source("demoData.R") #contains all of the data-related boilerplate.
##### Correlations #####
tmp <- list()
length(tmp) <- length(symbols)
for (i in 1:length(symbols)) {
tmp[[i]] <-Cl(get(symbols[i]))
}
tmp <- do.call(cbind, tmp)
baseCors <- cor(tmp)
diag(baseCors) <- NA
instrumentAverageBaseCors <- rowMeans(baseCors, na.rm=TRUE)
names(instrumentAverageBaseCors) <- gsub(".Close", "", names(instrumentAverageBaseCors))
instrumentAverageBaseCors
(grandMeanBaseCors <- mean(instrumentAverageBaseCors))
##### Strategy implementation #####
# Trade sizing and initial equity settings
tradeSize <- 10000
initEq <- tradeSize*length(symbols)
strategy.st <- portfolio.st <- account.st <- "TVI_osATR"
rm.strat(portfolio.st)
rm.strat(strategy.st)
initPortf(portfolio.st, symbols=symbols, initDate=initDate, currency='USD')
initAcct(account.st, portfolios=portfolio.st, initDate=initDate, currency='USD',initEq=initEq)
initOrders(portfolio.st, initDate=initDate)
strategy(strategy.st, store=TRUE)
# Parameters (trigger lag unchanged, defaulted at 1)
delta=0
period=20
pctATR=.02 #control risk with this parameter
#pctATR=.04
# Indicators
add.indicator(strategy.st, name="TVI", arguments=list(x=quote(Cl(mktdata)), period=period, delta=delta), label="TVI")
add.indicator(strategy.st, name="lagATR", arguments=list(HLC=quote(HLC(mktdata)), n=period), label="atrX")
# Signals
add.signal(strategy.st, name="sigThreshold",
arguments=list(threshold=1, column="vigor.TVI", relationship="gte", cross=FALSE),
label="TVIgtThresh")
add.signal(strategy.st, name="sigComparison",
arguments=list(columns=c("vigor.TVI","trigger.TVI"), relationship="gt"),
label="TVIgtLag")
add.signal(strategy.st, name="sigAND",
arguments=list(columns=c("TVIgtThresh","TVIgtLag"), cross=TRUE),
label="longEntry")
add.signal(strategy.st, name="sigCrossover",
arguments=list(columns=c("vigor.TVI","trigger.TVI"), relationship="lt"),
label="longExit")
# Rules
add.rule(strategy.st, name="ruleSignal",
arguments=list(sigcol="longEntry", sigval=TRUE, ordertype="market",
orderside="long", replace=FALSE, prefer="Open", osFUN=osDollarATR,
tradeSize=tradeSize, pctATR=pctATR, atrMod="X"),
type="enter", path.dep=TRUE)
add.rule(strategy.st, name="ruleSignal",
arguments=list(sigcol="longExit", sigval=TRUE, orderqty="all", ordertype="market",
orderside="long", replace=FALSE, prefer="Open"),
type="exit", path.dep=TRUE)
##### Strategy application #####
t1 <- Sys.time()
out <- applyStrategy(strategy=strategy.st,portfolios=portfolio.st)
t2 <- Sys.time()
print(t2-t1)
##### Analytics #####
updatePortf(portfolio.st)
dateRange <- time(getPortfolio(portfolio.st)$summary)[-1]
updateAcct(portfolio.st,dateRange)
updateEndEq(account.st)
# Trade stats
tStats <- tradeStats(Portfolios = portfolio.st, use="trades", inclZeroDays=FALSE)
tStats[,4:ncol(tStats)] <- round(tStats[,4:ncol(tStats)], 2)
print(data.frame(t(tStats[,-c(1,2)])))
(aggPF <- sum(tStats$Gross.Profits)/-sum(tStats$Gross.Losses))
(aggCorrect <- mean(tStats$Percent.Positive))
(numTrades <- sum(tStats$Num.Trades))
(meanAvgWLR <- mean(tStats$Avg.WinLoss.Ratio))
# Daily stats
dStats <- dailyStats(Portfolios = portfolio.st, use="Equity")
rownames(dStats) <- gsub(".DailyEndEq","", rownames(dStats))
print(data.frame(t(dStats)))
# Portfolio cash PL
portPL <- .blotter$portfolio.TVI_osATR$summary$Net.Trading.PL
# Cash Sharpe
(SharpeRatio.annualized(portPL, geometric=FALSE))
# Portfolio comparisons to SPY
instRets <- PortfReturns(account.st)
# Correlations
instCors <- cor(instRets)
diag(instRets) <- NA
corMeans <- rowMeans(instCors, na.rm=TRUE)
names(corMeans) <- gsub(".DailyEndEq", "", names(corMeans))
print(round(corMeans,3))
mean(corMeans)
# Graph
cumPortfRets <- cumprod(1+portfRets)
firstNonZeroDay <- index(portfRets)[min(which(portfRets!=0))]
getSymbols("SPY", from=firstNonZeroDay, to="2010-12-31")
SPYrets <- diff(log(Cl(SPY)))[-1]
cumSPYrets <- cumprod(1+SPYrets)
comparison <- cbind(cumPortfRets, cumSPYrets)
colnames(comparison) <- c("strategy", "SPY")
chart.TimeSeries(comparison, legend.loc = "topleft",
main=paste0("Period=", period, ", Delta=",delta), colors=c("green","red"))
# Sharpe based on returns, annualized returns, and max drawdown
SharpeRatio.annualized(portfRets)
Return.annualized(portfRets)
maxDrawdown(portfRets)
# Individual instrument equity curve
chart.Posn(portfolio.st, "XLB")
# The triggerLag is NOT 30 for the strategy, just amplified in this case to illustrate exit logic.
# The actual trigger lag is defaulted at 1.
tmp <- TVI(Cl(XLB), period=period, delta=delta, triggerLag=1)
add_TA(tmp$vigor, lwd=3)
add_TA(tmp$trigger, on=5, col="red", lwd=1.5)
tmp2 <- lagATR(HLC=HLC(XLB), n=period)
add_TA(tmp2$atr, col="blue", lwd=2)
|
f45f514554c1e14b7e5a6f4cdcec9056de2f3bdb
|
7d0892e68e6f11e7ac961e7355d0fba14e3552a9
|
/tests/testthat/test-skeletor-clean.R
|
2909b93d480a3f8d6252a2bdab981cdd0ee1ee8c
|
[] |
no_license
|
nealrichardson/skeletor
|
948306502015c34edfcab7062904eca69fbe527d
|
3e3c2146f55056cce7bad9366c5fa9388166e8d5
|
refs/heads/main
| 2023-04-15T04:37:36.967555
| 2023-04-07T13:59:47
| 2023-04-07T13:59:47
| 55,750,058
| 20
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,972
|
r
|
test-skeletor-clean.R
|
context("Skeletor with no configuration")
options(skeletor.name=NULL, skeletor.email=NULL, skeletor.github=NULL)
public({
tmpd <- tempdir()
pkgdir <- tempfile(tmpdir="")
pkgdir <- substr(pkgdir, 2, nchar(pkgdir)) ## Remove the leading /. We'll need that later
dest <- file.path(tmpd, pkgdir)
test_that("Creating a package skeleton", {
skeletor("testskeletor", dest)
expect_dir_exists(dest)
})
test_that("The right dirs exist", {
expect_dir_exists(file.path(dest, "tests"))
expect_dir_exists(file.path(dest, "man"))
expect_dir_exists(file.path(dest, "vignettes"))
# But not this api one
expect_false(dir.exists(file.path(dest, "tests", "testthat", "example.com")))
})
test_that("The right files exist", {
expect_file_exists(file.path(dest, "DESCRIPTION"))
expect_file_exists(file.path(dest, ".Rbuildignore"))
expect_file_exists(file.path(dest, "Makefile"))
expect_file_exists(file.path(dest, ".gitignore"))
expect_file_exists(file.path(dest, "R", "testskeletor.R"))
# But not this one
expect_false(file.exists(file.path(dest, "tests", "testthat", "test-api.R")))
})
desc <- readLines(file.path(dest, "DESCRIPTION"))
tests <- readLines(file.path(dest, "tests", "testthat.R"))
git <- readLines(file.path(dest, ".gitignore"))
lisc <- readLines(file.path(dest, "LICENSE"))
onattach <- readLines(file.path(dest, "R", "testskeletor.R"))
test_that("The package name appears in the contents", {
expect_identical(desc[1], "Package: testskeletor")
expect_identical(tests[2], 'test_check("testskeletor")')
expect_identical(git[4], 'testskeletor*.tar.gz')
})
test_that("skeletor.name is empty, so it doesn't get overwritten", {
expect_identical(lisc[2], "COPYRIGHT HOLDER: yourname")
expect_true(any(grepl('person\\("your", "name"', desc)))
})
test_that("skeletor.github is empty, so it doesn't get overwritten", {
expect_true("URL: https://github.com/yourgithub/testskeletor" %in% desc)
})
test_that("skeletor.email is empty, so it doesn't get overwritten", {
expect_true(any(grepl("youremail@example.com", desc)))
})
test_that("The .onAttach function in the R file points to testskeletor", {
expect_true('.onLoad <- function (lib, pkgname="testskeletor") {' %in% onattach)
})
if (!no.check) {
setwd(tmpd)
test_that("The skeleton package can be built", {
Rcmd(paste("build", pkgdir))
expect_file_exists("testskeletor_0.1.0.tar.gz")
})
test_that("The built package passes R CMD CHECK", {
skip_on_appveyor() ## It apparently can't find pdflatex to build the manual
skip_on_cran() ## In case it is slow
status <- Rcmd("check testskeletor_0.1.0.tar.gz")
expect_equal(status, 0)
})
}
})
|
3f3905a876e19562749d4f9078cd37e9a314cdbe
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rcarbon/examples/SPpermTest.Rd.R
|
fd32f4ee915d993696e416cb7ba12f074ef8ef28
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,476
|
r
|
SPpermTest.Rd.R
|
library(rcarbon)
### Name: SPpermTest
### Title: Spatial Permutation Test of summed probability distributions.
### Aliases: SPpermTest
### ** Examples
## Reproduce Crema et al 2017 ##
## Not run:
##D data(euroevol) #load data
##D
##D ## Subset only for 8000 to 5000 Cal BP (c7200-4200 C14BP)
##D edge=800
##D timeRange=c(8000,5000)
##D euroevol2=subset(euroevol,C14Age<=c(timeRange[1]-edge)&C14Age>=c(timeRange[2]-edge))
##D
##D ## define chronological breaks
##D breaks=seq(8000,5000,-500)
##D
##D ## Create a SpatialPoints class object
##D library(sp)
##D sites = unique(data.frame(SiteID=euroevol2$SiteID,
##D Longitude=euroevol2$Longitude,Latitude=euroevol2$Latitude))
##D locations=data.frame(Longitude=sites$Longitude,Latitude=sites$Latitude)
##D rownames(locations)=sites$SiteID
##D coordinates(locations)<-c("Longitude","Latitude")
##D proj4string(locations)<- CRS("+proj=longlat +datum=WGS84")
##D
##D ## Compute Distance and Spatial Weights
##D distSamples=spDists(locations,locations,longlat = TRUE)
##D spatialweights=spweights(distSamples,h=100) #using a kernal bandwidth of 100km
##D
##D ## Calibration and binning
##D bins=binPrep(sites=euroevol2$SiteID,ages=euroevol2$C14Age,h=200)
##D calDates=calibrate(x=euroevol2$C14Age,errors=euroevol2$C14SD,
##D timeRange=timeRange,normalised=FALSE)
##D
##D ## Main Analysis (over 2 cores; requires doParallel package)
##D ## NOTE: the number of simulations should be ideally larger
##D ## to ensure a better resolution of the p/q-values.
##D res.locations=SPpermTest(calDates,timeRange=timeRange,bins=bins,locations=locations,
##D spatialweights=spatialweights,breaks=breaks,ncores=2,nsim=100,
##D permute="locations",datenormalised=FALSE)
##D
##D ## Plot results
##D library(rworldmap)
##D base=getMap(resolution="low") #optionally add base map
##D #retrieve coordinate limits#
##D xrange=bbox(res.locations$locations)[1,]
##D yrange=bbox(res.locations$locations)[2,]
##D
##D par(mfrow=c(2,2))
##D par(mar=c(0.1,0.1,0,0.5))
##D plot(base,col="antiquewhite3",border="antiquewhite3",xlim=xrange,ylim=yrange)
##D plot(res.locations,index=4,add=TRUE,option="raw",breakRange=c(-0.005,0.005))
##D plot(res.locations,option="rawlegend",breakRange=c(-0.005,0.005),rd=3)
##D par(mar=c(0.1,0.1,0,0.5))
##D plot(base,col="antiquewhite3",border="antiquewhite3",xlim=xrange,ylim=yrange)
##D plot(res.locations,index=4,add=TRUE,option="test")
##D plot(res.locations,option="testlegend")
## End(Not run)
|
c2c3bf7610fbd058d88883ab22d0d26c6d0a959e
|
94b36ac1cbce409b15b404e33ef947e8a5d1dbc2
|
/R/seeds_per_ecotype.R
|
3a75e90f90e351f097b7d1b2062827e7b713bbe2
|
[] |
no_license
|
MoisesExpositoAlonso/grene
|
dacaa0104851cfbe6bd9579c9cced1fb76edbd4b
|
8dc5f554f5c10935020cdcb9beaf7cd5944e8bb5
|
refs/heads/master
| 2023-03-09T14:04:19.253830
| 2018-04-01T10:46:03
| 2018-04-01T10:46:03
| 96,513,155
| 0
| 0
| null | 2022-09-21T22:32:56
| 2017-07-07T07:44:04
|
R
|
UTF-8
|
R
| false
| false
| 122
|
r
|
seeds_per_ecotype.R
|
ecotype.w<-read.table("data-raw/Grene-Net ecotypes\ -\ aliquote.tsv",header=TRUE,sep="\t")
weight_put_on_master_mix..g.
|
3d3277785b37ea3b5fec03d8e511ff313db95a14
|
287add902a548b978254b03f571f5e127d325e88
|
/R/IDW.R
|
0880969d1f45759fb662ae43e997f6e4493ad7d3
|
[] |
no_license
|
Auburngrads/publicLibs
|
e36884552220fcf859d28ef5cc16d26baeb23f65
|
804efbb6bc80f5053712e375a09d2d46ce2f61a6
|
refs/heads/master
| 2021-01-17T02:44:58.943620
| 2020-07-20T00:32:03
| 2020-07-20T00:32:03
| 58,672,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,141
|
r
|
IDW.R
|
Base_Locations <- read.table("inst/extdata/Base_Locations.txt", header = TRUE, stringsAsFactors = F)
CONUS_Locations <- Base_Locations[-c(2, 19, 21, 28),]
CONUS_Locations$'5_Miles' <- 0
CONUS_Locations$'Std_5_Miles' <- 0
CONUS_Locations$'10_Miles' <- 0
CONUS_Locations$'Std_10_Miles' <- 0
CONUS_Locations$'15_Miles' <- 0
CONUS_Locations$'Std_15_Miles' <- 0
CONUS_Locations$'20_Miles' <- 0
CONUS_Locations$'Std_20_Miles' <- 0
CONUS_Locations$'25_Miles' <- 0
CONUS_Locations$'Std_25_Miles' <- 0
CONUS_Locations$'30_Miles' <- 0
CONUS_Locations$'Std_30_Miles' <- 0
CONUS_Locations$'35_Miles' <- 0
CONUS_Locations$'Std_35_Miles' <- 0
CONUS_Locations$'40_Miles' <- 0
CONUS_Locations$'Std_40_Miles' <- 0
CONUS_Locations$'45_Miles' <- 0
CONUS_Locations$'Std_45_Miles' <- 0
CONUS_Locations$'50_Miles' <- 0
CONUS_Locations$'Std_50_Miles' <- 0
CONUS_Locations$'55_Miles' <- 0
CONUS_Locations$'Std_55_Miles' <- 0
CONUS_Locations$'60_Miles' <- 0
CONUS_Locations$'Std_60_Miles' <- 0
CONUS_Locations$'65_Miles' <- 0
CONUS_Locations$'Std_65_Miles' <- 0
CONUS_Locations$'70_Miles' <- 0
CONUS_Locations$'Std_70_Miles' <- 0
CONUS_Locations$'75_Miles' <- 0
CONUS_Locations$'Std_75_Miles' <- 0
CONUS_Locations$'80_Miles' <- 0
CONUS_Locations$'Std_80_Miles' <- 0
CONUS_Locations$'85_Miles' <- 0
CONUS_Locations$'Std_85_Miles' <- 0
CONUS_Locations$'90_Miles' <- 0
CONUS_Locations$'Std_90_Miles' <- 0
CONUS_Locations$'95_Miles' <- 0
CONUS_Locations$'Std_95_Miles' <- 0
CONUS_Locations$'100_Miles' <- 0
CONUS_Locations$'Std_100_Miles' <- 0
radius <- c(seq(5, 100, by = 5))
#--------------Straight Distance-----------------------
for(k in 1:length(radius)) {
rad <- radius[k]
for(i in 1:nrow(CONUS_Locations)){
file_name <- paste(CONUS_Locations[i,3], "_Libs.txt", sep = "")
base_Libs <- read.table(paste("inst/extdata4/Drive Distance/", file_name, sep = ""), header = TRUE, stringsAsFactors = F)
sizeWeight_sum <- 0
weight_sum <- 0
for(j in 1:nrow(base_Libs)){
if(base_Libs[j, "Distance"] <= rad){
sizeWeight_sum <- sizeWeight_sum + base_Libs[j, "SizeWeight"]
weight_sum <- weight_sum + base_Libs[j, "Weight"]
}
}
CONUS_Locations[i, (4+(2*k))] <- sizeWeight_sum/weight_sum
}
CONUS_Locations[, (4+(2*k))] <- as.numeric(gsub("NaN", 0, CONUS_Locations[, (4+(2*k))]))
maxIDW <- as.numeric(max(CONUS_Locations[, (4+(2*k))]))
# new stuff
minIDW <- as.numeric(min(CONUS_Locations[, (4+(2*k))]))
CONUS_Locations[, (5+(2*k))] <- (CONUS_Locations[, (4+(2*k))] - minIDW)/(maxIDW - minIDW)
#-----
#CONUS_Locations[, (5+(2*k))] <- CONUS_Locations[, (4+(2*k))]/maxIDW
}
txt.name <- paste(c('inst/','extdata4/IDW/Straight1.txt'),collapse = '')
write.table(CONUS_Locations, file = txt.name, row.names = F)
#---------------------Drive Distance-------------------------
for(k in 1:length(radius)) {
rad <- radius[k]
for(i in 1:nrow(CONUS_Locations)){
file_name <- paste(CONUS_Locations[i,3], "_Libs.txt", sep = "")
base_Libs <- read.table(paste("inst/extdata4/Drive Distance/", file_name, sep = ""), header = TRUE, stringsAsFactors = F)
sizeWeight_sum <- 0
weight_sum <- 0
for(j in 1:nrow(base_Libs)){
if(base_Libs[j, "DriveDistance"] <= rad){
sizeWeight_sum <- sizeWeight_sum + base_Libs[j, "DriveSizeWeight"]
weight_sum <- weight_sum + base_Libs[j, "DriveWeight"]
}
}
CONUS_Locations[i, (4+(2*k))] <- sizeWeight_sum/weight_sum
}
CONUS_Locations[, (4+(2*k))] <- as.numeric(gsub("NaN", 0, CONUS_Locations[, (4+(2*k))]))
maxIDW <- as.numeric(max(CONUS_Locations[, (4+(2*k))]))
# new stuff
minIDW <- as.numeric(min(CONUS_Locations[, (4+(2*k))]))
CONUS_Locations[, (5+(2*k))] <- (CONUS_Locations[, (4+(2*k))] - minIDW)/(maxIDW - minIDW)
#-----
#CONUS_Locations[, (5+(2*k))] <- CONUS_Locations[, (4+(2*k))]/maxIDW
}
txt.name <- paste(c('inst/','extdata4/IDW/Drive1.txt'),collapse = '')
write.table(CONUS_Locations, file = txt.name, row.names = F)
test1 <- read.table("inst/extdata4/IDW/Drive.txt", header = TRUE, stringsAsFactors = FALSE)
View(test1)
|
fc83d6e8fe90070c3c11b77cd21aca8b56750a97
|
657bbe51376c41fd550622df261d7fc044e652b9
|
/RANDOM FOREST/Random Forest On Glass Dataset/RANDOM FOREST glass.R
|
c26ce919e3f2c87d64438ec768b671e0c56b44de
|
[] |
no_license
|
dattatrayshinde/Machine-Learning-Algorithms-in-R
|
8cebd46d0aca539d2f01ce13d61634854aae7d9e
|
df0152ffe6f3350223d1477b2e0a4cf7235b5631
|
refs/heads/master
| 2021-06-16T09:34:46.475879
| 2016-12-02T19:26:27
| 2016-12-02T19:26:27
| 75,424,136
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 959
|
r
|
RANDOM FOREST glass.R
|
install.packages("randomForest")
library("randomForest")
library("mlbench")
data(Glass,package="mlbench")
Glass
index= 1:nrow(Glass)
testindex = sample(index,trunc(length(index)/3))
train= Glass[-testindex,]
test = Glass[testindex,]
# fit=randomForest(Type~.,train,ntree=500)
fit=randomForest(Type~.,train,ntree=2)
summary(fit)
pred = predict(fit,test)
mat=table(pred,test$Type)
mat
accuracy = sum(diag(mat))/sum(mat)
accuracy
# -----------------------------------------------------
iris
# -----------------------------------------------------
set.seed(123)
ind<-sample(2,nrow(iris),replace=TRUE,prob=c(0.50,0.50))
iris.training<-iris[ind==1,1:5]
iris.test<-iris[ind==2,1:5]
model <- randomForest(Species~.,iris.training,ntree=100)
summary(model)
pred = predict(model,iris.test)
mat=table(pred,iris.test$Species)
mat
accuracy = sum(diag(mat))/sum(mat)
accuracy
# -----------------------------------------------------
|
ab98ff0113542799270eab1770592d20f4ce38d5
|
e44f1a8bcf0bdbc68e1dcfa0a744180409e6fa1c
|
/Lab1_cv1-4.R
|
c57445a9b3f73c4d1f9f68b972ebd0b535289ecc
|
[] |
no_license
|
Zihan9710/DataAnalytics2020_Zihan_Zhao
|
1af4e9dc600811e3c386d8386ff7b0bb8ca6e99d
|
e40b2f0383d5be3f4258f0813fc4339782172a6b
|
refs/heads/master
| 2023-02-03T02:08:17.915709
| 2020-12-16T16:34:22
| 2020-12-16T16:34:22
| 294,724,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,054
|
r
|
Lab1_cv1-4.R
|
library(cvTools)
library(robustbase)
data(coleman)
#------------------cv1------------------------------
call <- call("lmrob", formula = Y ~ .)
# set up folds for cross-validation
folds <- cvFolds(nrow(coleman), K = 5, R = 10)
# perform cross-validation
cvTool(call, data = coleman, y = coleman$Y, cost = rtmspe, folds = folds, costArgs = list(trim = 0.1))
#vary K and R
#look at cvfits, use densityplot,
tuning <- list(tuning.psi=seq(2., 6., 20))
cvFitsLmrob <- cvTuning(call, data = coleman, y = coleman$Y, tuning = tuning, cost = rtmspe, folds = folds, costArgs = list(trim = 0.1))
# look at output
cvFitsLmrob
# summarize
aggregate(cvFitsLmrob, summary)
#--------------------cv2--------------------------
library(MASS)
data(mammals)
mammals.glm <- glm(log(brain) ~ log(body), data = mammals)
(cv.err <- cv.glm(mammals, mammals.glm)$delta)
(cv.err.6 <- cv.glm(mammals, mammals.glm, K = 6)$delta)
# Leave-one-out cross-validation estimate without any extra model-fitting.
muhat <- fitted(mammals.glm)
mammals.diag <- glm.diag(mammals.glm)
(cv.esterr <- mean((mammals.glm$y - muhat)^2/(1 - mammals.diag$h)^2))
# leave-one-out and 11-fold cross-validation prediction error for
# the nodal data set. Since the response is a binary variable
# an appropriate cost function is
library(boot)
data(nodal)
cost <- function(r, pi = 0) mean(abs(r-pi) > 0.5)
nodal.glm <- glm(r ~ stage+xray+acid, binomial, data = nodal)
(cv.err <- cv.glm(nodal, nodal.glm, cost, K = nrow(nodal))$delta)
(cv.11.err <- cv.glm(nodal, nodal.glm, cost, K = 11)$delta)
#--------------------cv3--------------------------
install.packages("robustbase")
install.packages("cvTools")
library("robustbase")
require(cvTools)
data("coleman")
set.seed(1234) # set seed for reproducibility
## set up folds for cross-validation
folds <- cvFolds(nrow(coleman), K = 5, R = 10)
## compare raw and reweighted LTS estimators for
## 50% and 75% subsets
# 50% subsets
fitLts50 <- ltsReg(Y ~ ., data = coleman, alpha = 0.5)
cvFitLts50 <- cvLts(fitLts50, cost = rtmspe, folds = folds,
fit = "both", trim = 0.1)
# 75% subsets
fitLts75 <- ltsReg(Y ~ ., data = coleman, alpha = 0.75)
cvFitLts75 <- cvLts(fitLts75, cost = rtmspe, folds = folds,
fit = "both", trim = 0.1)
# combine results into one object
cvFitsLts <- cvSelect("0.5" = cvFitLts50, "0.75" = cvFitLts75)
cvFitsLts
# "cv" object
ncv(cvFitLts50)
nfits(cvFitLts50)
cvNames(cvFitLts50)
cvNames(cvFitLts50) <- c("improved", "initial")
fits(cvFitLts50)
cvFitLts50
# "cvSelect" object
ncv(cvFitsLts)
nfits(cvFitsLts)
cvNames(cvFitsLts)
cvNames(cvFitsLts) <- c("improved", "initial")
fits(cvFitsLts)
fits(cvFitsLts) <- 1:2
cvFitsLts
#--------------------cv4--------------------------
# assumes coleman, robustbase and cvTools
set.seed(4321) # set seed for reproducibility
## set up folds for cross-validation
folds <- cvFolds(nrow(coleman), K = 5, R = 10)
## compare raw and reweighted LTS estimators for
## 50% and 75% subsets
# 50% subsets
fitLts50 <- ltsReg(Y ~ ., data = coleman, alpha = 0.5)
cvFitLts50 <- cvLts(fitLts50, cost = rtmspe, folds = folds,
fit = "both", trim = 0.1)
# 75% subsets
fitLts75 <- ltsReg(Y ~ ., data = coleman, alpha = 0.75)
cvFitLts75 <- cvLts(fitLts75, cost = rtmspe, folds = folds,
fit = "both", trim = 0.1)
# combine results into one object
cvFitsLts <- cvSelect("0.5" = cvFitLts50, "0.75" = cvFitLts75)
cvFitsLts
# summary of the results with the 50% subsets
aggregate(cvFitLts50, summary)
# summary of the combined results
aggregate(cvFitsLts, summary)
## evaluate MM regression models tuned for
## 80%, 85%, 90% and 95% efficiency
tuning <- list(tuning.psi=c(3.14, 3.44, 3.88, 4.68))
# set up function call
call <- call("lmrob", formula = Y ~ .)
# perform cross-validation
cvFitsLmrob <- cvTuning(call, data = coleman,
y = coleman$Y, tuning = tuning, cost = rtmspe,
folds = folds, costArgs = list(trim = 0.1))
cvFitsLmrob
# summary of results
aggregate(cvFitsLmrob, summary)
|
7b6c03f59e1a6dc0bd4b996364116b5369456de8
|
aab278c6ce3c4bc1f8ba8c12f1b9440578b40eb2
|
/man/fitted.AMMI.Rd
|
c07ee6187a0f7bbf8834565ea927f8c04e383803
|
[] |
no_license
|
Manigben/statgenGxE
|
23623fb8cd5177ed8ef9f03c88acf46350bfe141
|
fc438b805822050ae2f0ef0f264c1f581985e7d1
|
refs/heads/master
| 2023-06-01T16:06:19.305022
| 2021-01-08T10:41:55
| 2021-01-08T10:41:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 709
|
rd
|
fitted.AMMI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createAMMI.R
\name{fitted.AMMI}
\alias{fitted.AMMI}
\title{Extract fitted values.}
\usage{
\method{fitted}{AMMI}(object, ...)
}
\arguments{
\item{object}{An object of class AMMI}
\item{...}{Not used.}
}
\value{
A data.frame with fitted values.
}
\description{
Extract the fitted values for an object of class AMMI.
}
\examples{
## Run AMMI analysis on TDMaize.
geAmmi <- gxeAmmi(TD = TDMaize, trait = "yld")
## Extract fitted values.
fitAmmi <- fitted(geAmmi)
head(fitAmmi)
}
\seealso{
Other AMMI:
\code{\link{gxeAmmi}()},
\code{\link{plot.AMMI}()},
\code{\link{report.AMMI}()},
\code{\link{residuals.AMMI}()}
}
\concept{AMMI}
|
7950e7bea22b845531ea7e7b7d2ca5b432f13a93
|
9086b91355e7e5d62236ea7105162d426dbbb29a
|
/run_analysis.R
|
71963dcff69887e28a0a28b09449594eda69b672
|
[] |
no_license
|
pgupta05/getting-cleaning-data-project
|
7887ebe70dfe1104f6ed281daa5f5edc4757d3db
|
22901e666ac7d7dd557d80eca2cbe0925c053036
|
refs/heads/master
| 2021-01-21T21:48:32.801963
| 2015-06-21T14:32:00
| 2015-06-21T14:32:00
| 37,810,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,257
|
r
|
run_analysis.R
|
## Open libraries
library(reshape2)
## define root dir
rootDir = "C:/MyDataDir/Client/R-Code/UCI HAR Dataset/"
## Define Function
runAnalysis <- function() {
## Read all activities
activityList = read.table(paste0(rootDir,"activity_labels.txt"), sep=" ", col.names=c("activity_id", "activity_name"), fill=FALSE)
#print(activityList)
## Read all feature data and create a table
featureList <- read.table(paste0(rootDir,"features.txt"), sep=" ", col.names=c("feature_id", "feature_name"),fill=FALSE)
## make list of features names
colNames <- featureList[,2]
#print(colNames)
## Read Features File
##readFeatures.File <- function(name) {
## paste0("X_", name, ".txt", sep = "")
##}
## Read activity File
##readActivity.File <- function(name) {
## paste0("Y_", name, ".txt", sep = "")
## }
## Read Subject File
## readSubject.File <- function(name) {
## paste0("subject_", name, ".txt", sep = "")
## }
## Read files function
readFeatures.File <- function(type,name) {
paste0(type,"_", name, ".txt", sep = "")
}
## Read Test Data
name <- "test"
test.data <- read.table(paste0(rootDir,name,"/",readFeatures.File("X",name)))
test.activity <- read.table(paste0(rootDir,name,"/",readFeatures.File("y",name)))
test.subject <- read.table(paste0(rootDir,name,"/",readFeatures.File("subject",name)))
## Update Column Names
## for Test
colnames(test.data) <- colNames
colnames(test.activity) <- "activity_id"
colnames(test.subject) <- "subject_id"
##Combine the subject id's, activity id's and data into one dataframe
## For Test
#test_data <- cbind(test.subject , test.activity , test.data)
## Read Train Data
name <- "train"
train.data <- read.table(paste0(rootDir,name,"/",readFeatures.File("X",name)))
train.activity <- read.table(paste0(rootDir,name,"/",readFeatures.File("y",name)))
train.subject <- read.table(paste0(rootDir,name,"/",readFeatures.File("subject",name)))
## Update Column Names
## for Train
colnames(train.data) <- colNames
colnames(train.activity) <- "activity_id"
colnames(train.subject) <- "subject_id"
##Combine the subject id's, activity id's and data into one dataframe
## For Train
#train_data <- cbind(train.subject , train.activity , train.data)
## Combine both TEST and TRAIN data into single data Frame
#all_data <- rbind(train_data, test_data)
#print(all_data)
## Filter columns refering to mean() or std() values
## For Mean() value for test data
meancolID <- grep("mean()", names(test.data), fixed=TRUE)
meanColNames <- names(test.data)[meancolID]
## for STD() value for test
stdcolID <- grep("std()", names(test.data), fixed=TRUE)
stdColNames <- names(test.data)[stdcolID]
## filter the selection based on mean() and std() data and the selected columns
mean.std.testdata <- test.data[,c(meanColNames,stdColNames)]
mean.std.testdata <- cbind(test.subject, test.activity, mean.std.testdata)
mean.std.traindata <- train.data[,c(meanColNames,stdColNames)]
mean.std.traindata <- cbind(train.subject, train.activity, mean.std.traindata)
## Now Combine both test and train data set
all_data <- rbind(mean.std.traindata, mean.std.testdata)
print(nrow(all_data))
##Merge the activities data with the mean/std values data with descriptive ##activity names
desc_names <- merge(all_data,activityList,by.x="activity_id",by.y="activity_id",all=TRUE)
all_data <- desc_names[,c("subject_id", "activity_name", meanColNames, stdColNames)]
##Melt the dataset with the descriptive activity names for better handling
melt.data<- melt(all_data,id=c("subject_id","activity_name"))
##Cast the melted dataset according to the average of each variable
##for each activity and each subject
mean.data <- dcast(melt.data,subject_id + activity_name ~ variable,fun.aggregate = mean, na.rm = TRUE)
print("upto here")
## Create a file with the new tidy dataset
write.table(mean.data,"./tidy_movement_data.txt",row.names = FALSE, quote = FALSE)
}
|
0b7c511a20a9d4e6aa7bd5b109f37e7a3271bc7a
|
8b501a313569030bdef0c1fe4896b646fd5670f3
|
/tests/testthat/test-mgcViz_plot_effects.R
|
c125cd504e84694f250ec36b18460a53b5448ccc
|
[] |
no_license
|
mfasiolo/testGam
|
50990a918571fa20cbc20cc7aee3bd5b6cd4cf64
|
7b831b36978cdb23436c286b03588dbf7beb042b
|
refs/heads/master
| 2020-06-02T04:05:28.747046
| 2019-10-02T20:34:22
| 2019-10-02T20:34:22
| 191,029,853
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,119
|
r
|
test-mgcViz_plot_effects.R
|
context("plot effects")
test_that("plot.ptermMatrixNumeric", {
library(splines)
library(mgcViz)
set.seed(2)
dat <- gamSim(1, n = 200, dist="normal", scale=2)
dat$fac <- as.factor(sample(1:5, 200, replace=TRUE))
#####
## gamV, bamV, qgamV, gammV, gamm4V version
####
obj <- plt <- list()
obj[[1]] <- gamV(y ~ bs(x0, knots = 0.5, degree = 1) + s(x3), data = dat)
obj[[2]] <- bamV(y ~ bs(x0, knots = 0.5, degree = 1) + s(x3), data = dat)
obj[[3]] <- qgamV(y ~ bs(x0, knots = 0.5, degree = 1) + s(x3), data = dat, qu = 0.5)
obj[[4]] <- gammV(y ~ bs(x0, knots = 0.5, degree = 1) + s(x3), random=list(fac=~1), data = dat)
obj[[5]] <- gamm4V(y ~ bs(x0, knots = 0.5, degree = 1) + s(x3), random=~(1|fac), data = dat)
for(ii in 1:5){
expect_error(plt[[ii]] <- plot(obj[[ii]], allTerms = TRUE, trans = exp, select = 2), NA)
}
plt[[1]]
#####
## getViz version (test only gam version)
####
# gamV
b <- gam(y ~ bs(x0, knots = 0.5, degree = 1) + s(x3), data = dat)
b <- getViz(b)
expect_error(pl <- plot(b, allTerms = TRUE, trans = exp, select = 2), NA)
})
|
fea68dc5e126cc67c39d17372f2e208e05658a64
|
31cafb24cd7f8ba80ef3b3460a3954113093cbab
|
/R/covWeight.R
|
f2ce25d824e698b876ffea08540a2b17d3bf1c63
|
[] |
no_license
|
alfcrisci/Reot
|
2f683d79a43690ffde0692e524d2c28b8a64476c
|
64bdc06dc8778e7bd871ed9bb6667f273ce54ea9
|
refs/heads/master
| 2021-01-20T22:51:20.211381
| 2013-09-24T18:00:13
| 2013-09-24T18:00:13
| 13,077,334
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76
|
r
|
covWeight.R
|
covWeight <- function(y, weights) {
cov.wt(y, weights, cor = TRUE)
}
|
e49d9c70aa456e8ab18161687c0342166403aa08
|
bdf8de74bc020f27aa3cc3d410cac8aba7c5269c
|
/Lab1.R
|
55a02e643bed9ac9b5661067c1194e22bafbc932
|
[] |
no_license
|
JuanAndres896/Lab1DataScience
|
772265bf0865a6cb60bc614a1f4f206e3b3fe57c
|
c744f956560f52208a6c39e2d2dcb90cda50fe22
|
refs/heads/master
| 2020-03-25T22:43:16.972611
| 2018-08-29T05:27:41
| 2018-08-29T05:27:41
| 144,238,330
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,175
|
r
|
Lab1.R
|
# Univerisdad del Valle de Guatemala
# Data Science 1 - Seccion 10
# Juan Andres Garcia - 15046
# Laboratorio 1
#setwd("~/Sync/DATOS/DATA/LAB1")
test<-read.csv("test.csv")
train<-read.csv("train.csv")
sample<-read.csv("sample_submission.csv")
a<-vector()
ntrain<-as.data.frame(train$Id)
colnames(ntrain)<-colnames(train[1])
for(i in 2:length(train)){
if(is.numeric(train[,i])){
a<-train[,i]
ntrain<-cbind(ntrain,a)
colnames(ntrain)[length(ntrain)]<-colnames(train[i])
}
}
a<-vector()
ftrain<-as.data.frame(train$Id)
colnames(ftrain)<-colnames(train[1])
for(i in 2:length(train)){
if(is.factor(train[,i])){
a<-train[,i]
ftrain<-cbind(ftrain,a)
colnames(ftrain)[length(ftrain)]<-colnames(train[i])
}
}
cvars<-c("Discreta","Continua","Categórica","Discreta","Discreta","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Discreta","Discreta","Discreta","Discreta","Categórica","Categórica","Categórica","Categórica","Categórica","Discreta","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Discreta","Categórica","Discreta","Discreta","Discreta","Categórica","Categórica","Categórica","Discreta","Discreta","Discreta","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Categórica","Discreta","Categórica","Categórica","Discreta","Categórica","Discreta","Discreta","Categórica","Categórica","Categórica","Discreta","Discreta","Discreta","Discreta","Discreta","Discreta","Categórica","Categórica","Categórica","Discreta","Discreta","Discreta","Categórica","Categórica")
vars<-cbind(vars,cvars)
cors<-vector()
b1<-vector()
b2<-vector()
b3<-vector()
b4<-vector()
for(i in 1:length(a)){
b1<-a[i]
b2[i]<-max(b1[b1<1])
b3[i]<-row.names(b1)[b1==b2[i]]
b4[i]<-colnames(a)[i]
}
cors<-as.data.frame(cbind(b4,b2,b3))
colnames(cors)<-c("Variable","Correlacion","Variable Cor")
scors<-a["SalePrice",c(-1,-38)]
|
2273ee412a11c4cca9bba3bfc3fa85392bd9ffd7
|
343d569ab4a4a89a762c58f4fda375ab95823f0a
|
/man/Coll.Rd
|
348381f9f4426d2cadb3ea1b177b0ffca2842bac
|
[] |
no_license
|
asancpt/sasLM
|
d62aa33ac3e63aff1c1a2db92a4c8615840ba06b
|
8c8d4dcf5f556a44bedfa5b19d3094bbd41bc486
|
refs/heads/master
| 2023-05-26T06:33:44.773640
| 2021-06-15T03:50:02
| 2021-06-15T03:50:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 845
|
rd
|
Coll.Rd
|
\name{Coll}
\alias{Coll}
\title{Collinearity Diagnostics}
\description{
Collearity digsnotics with tolerance, VIF, eigenvalue, condition index, variance proportions
}
\usage{
Coll(Formula, Data)
}
\arguments{
\item{Formula}{fomula of the model}
\item{Data}{input data as a matrix or data.frame}
}
\details{
Sometimes collinearity diagnostics after multiple linear regression are necessary.
}
\value{
\item{Tol}{tolerance of independent variables}
\item{VIF}{variance inflation factor of independent variables}
\item{Eigenvalue}{eigenvalue of Z'Z (crossproduct) of standardized independent variables}
\item{Cond. Index}{condition index}
\item{under the names of coefficients}{proportions of variances}
}
\author{Kyun-Seop Bae k@acr.kr}
\examples{
Coll(mpg ~ disp + hp + drat + wt + qsec, mtcars)
}
|
f1f45833342625121b3813f84546e6ffc31270c1
|
99cfb90523e084cc553b642574d43861329ab5ba
|
/tests/testthat/test-fun_probabilstic_forecasts_integer.R
|
803941ba0e15cb58f1914129e1c6ceabc19bc805
|
[
"MIT"
] |
permissive
|
laasousa/scoringutils
|
95220ecc365cd617703eb10bb25f6d6c60e05bb5
|
cd531cebf62e8b365f33e723819c1810098036df
|
refs/heads/master
| 2021-04-08T22:16:03.789884
| 2020-03-18T14:52:24
| 2020-03-18T14:52:24
| 248,814,529
| 1
| 0
|
NOASSERTION
| 2020-03-20T17:29:06
| 2020-03-20T17:29:05
| null |
UTF-8
|
R
| false
| false
| 6,124
|
r
|
test-fun_probabilstic_forecasts_integer.R
|
# ===================================================================== #
# pit_int
# ===================================================================== #
test_that("function throws an error when missing true_values",
{
true_values <- rpois(10, lambda = 1:10)
predictions <- replicate(50, rpois(n = 10, lambda = 1:10))
expect_error(pit_int(predictions = predictions))
})
test_that("function throws an error when missing 'predictions'",
{
true_values <- rpois(10, lambda = 1:10)
predictions <- replicate(50, rpois(n = 10, lambda = 1:10))
expect_error(pit_int(true_values = true_values))
})
test_that("function throws a warning for wrong format of true_value",
{
true_values <- runif(10, min = 0, max = 1)
predictions <- replicate(10, rpois(10, lambda = 1:10))
expect_warning(pit_int(true_values = true_values,
predictions = predictions))
})
test_that("function throws a warning for wrong format of predictions",
{
true_values <- rpois(10, lambda = 1:10)
predictions <- replicate(10, runif(10, min = 0, max = 10))
expect_warning(pit_int(true_values = true_values,
predictions = predictions))
predictions <- list(replicate(10, rpois(10, lambda = 1:10)))
expect_error(pit_int(true_values = true_values,
predictions = predictions))
predictions <- replicate(10, runif(13, min = 0, max = 10))
expect_error(pit_int(true_values = true_values,
predictions = predictions))
})
test_that("function works for correct format of true_values and predictions",
{
true_values <- rpois(10, lambda = 1:10)
predictions <- replicate(10, rpois(10, lambda = 1:10))
output <- pit_int(true_values = true_values,
predictions = predictions)
expect_equal(length(output),
4)
expect_equal(class(output),
"list")
expect_equal(class(output[[1]]),
"numeric")
})
# ===================================================================== #
# bias_int
# ===================================================================== #
test_that("function throws an error when missing true_values",
{
true_values <- rpois(10, lambda = 1:10)
predictions <- replicate(50, rpois(n = 10, lambda = 1:10))
expect_error(bias_int(predictions = predictions))
})
test_that("function throws an error when missing 'predictions'",
{
true_values <- rpois(10, lambda = 1:10)
predictions <- replicate(50, rpois(n = 10, lambda = 1:10))
expect_error(bias_int(true_values = true_values))
})
test_that("function throws a warning for wrong format of true_value",
{
true_values <- runif(10, min = 0, max = 1)
predictions <- replicate(10, rpois(10, lambda = 1:10))
expect_warning(bias_int(true_values = true_values,
predictions = predictions))
})
test_that("function throws a warning for wrong format of predictions",
{
true_values <- rpois(10, lambda = 1:10)
predictions <- replicate(10, runif(10, min = 0, max = 10))
expect_warning(bias_int(true_values = true_values,
predictions = predictions))
predictions <- list(replicate(10, rpois(10, lambda = 1:10)))
expect_error(bias_int(true_values = true_values,
predictions = predictions))
predictions <- replicate(10, runif(13, min = 0, max = 10))
expect_error(bias_int(true_values = true_values,
predictions = predictions))
})
test_that("function works for correct format of true_values and predictions",
{
true_values <- rpois(10, lambda = 1:10)
predictions <- replicate(10, rpois(10, lambda = 1:10))
output <- bias_int(true_values = true_values,
predictions = predictions)
expect_equal(length(output),
length(true_values))
expect_equal(class(output),
"numeric")
})
# ===================================================================== #
# sharpness
# ===================================================================== #
test_that("function throws an error when missing 'predictions'",
{
predictions <- replicate(50, rpois(n = 10, lambda = 1:10))
expect_error(sharpness())
})
# test_that("function throws a warning for wrong format of predictions",
# {
# predictions <- replicate(10, runif(10, min = 0, max = 10))
#
# expect_warning(sharpness(predictions = predictions))
#
# predictions <- list(replicate(10, rpois(10, lambda = 1:10)))
# expect_error(bias_int(true_values = true_values,
# predictions = predictions))
#
# predictions <- replicate(10, runif(13, min = 0, max = 10))
# expect_error(bias_int(true_values = true_values,
# predictions = predictions))
# })
# test_that("function works for correct format of predictions",
# {
# true_values <- rpois(10, lambda = 1:10)
# predictions <- replicate(10, rpois(10, lambda = 1:10))
# output <- bias_int(true_values = true_values,
# predictions = predictions)
# expect_equal(length(output),
# length(true_values))
# expect_equal(class(output),
# "numeric")
# })
|
f7f5a92a6787df8d2066d15d793d3a55fee6b1c4
|
c85e6a6ee42cc1fc2334b1773929c3215fc13d45
|
/DL_Coursework/IDLR/Chapter6_CodeExamples.R
|
4a9ccf9d5219033f067795cf130408998f0941fc
|
[] |
no_license
|
wwells/CUNY_DATA_698
|
74edc514a46130385320668dd2773cff4e91b18f
|
21dd1e2995b235d529e613b5d77e21074d92da6e
|
refs/heads/master
| 2021-05-11T07:13:24.683732
| 2018-05-05T13:27:43
| 2018-05-05T13:27:43
| 118,012,390
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,216
|
r
|
Chapter6_CodeExamples.R
|
#Chapter 6 Code Examples
#Recurrent Neural Networks for Sequence Prediction
#Clear the workspace
rm(list = ls())
#Load the necessary packages
require(rnn)
require(Metrics)
#Function to be used later
#Creating Training and Test Data Set
dataset <- function(data){
x <- y <- c()
for (i in 1:(nrow(data)-2)){
x <- append(x, data[i, 2])
y <- append(y, data[i+1, 2])
}
#Creating New DataFrame
output <- cbind(x,y)
return(output[1:nrow(output)-1,])
}
#Monthly Milk Production: Pounds Per Cow
data <- read.table("/Users/tawehbeysolow/Downloads/monthly-milk-production-pounds-p.csv", header = TRUE, sep = ",")
#Plotting Sequence
plot(data[,2], main = "Monthly Milk Production in Pounds", xlab = "Month", ylab = "Pounds",
lwd = 1.5, col = "cadetblue", type = "l")
#Ploting Histogram
hist(data[,2], main = "Histogram of Monthly Milk Production in Pounds", xlab = "Pounds", col = "red")
#Creating Test and Training Sets
newData <- dataset(data = data)
rows <- sample(1:120, 120)
trainingData <- scale(newData[rows, ])
testData <- scale(newData[-rows, ])
#Max-Min Scaling
x <- trainingData[,1]
y <- trainingData[,2]
train_x <- (x - min(x))/(max(x)-min(x))
train_y <- (y - min(y))/(max(y)-min(y))
#RNN Model
RNN <- trainr(Y = as.matrix(train_x),
X = as.matrix(train_y),
learningrate = 0.04,
momentum = 0.1,
network_type = "rnn",
numepochs = 700,
hidden_dim = 3)
y_h <- predictr(RNN, as.matrix(train_x))
#Comparing Plots of Predicted Curve vs Actual Curve: Training Data
plot(train_y, col = "blue", type = "l", main = "Actual vs Predicted Curve: Training Data", lwd = 2)
lines(y_h, type = "l", col = "red", lwd = 2)
cat("Train MSE: ", mse(y_h, train_y))
#Test Data
testData <- scale(newData[-rows, ])
x <- testData[,1]
y <- testData[,2]
test_x <- (x - min(x))/(max(x)-min(x))
test_y <- (y - min(y))/(max(y)-min(y))
y_h2 <- predictr(RNN, as.matrix(x))
#Comparing Plots of Predicted Curve vs Actual Curve: Training Data
plot(test_y, col = "blue", type = "l", main = "Actual vs Predicted Curve: Test Data", lwd = 2)
lines(y_h2, type = "l", col = "red", lwd = 2)
cat("Test MSE: ", mse(y_h2, test_y))
|
ab8dff006c1d76d9dd16f87b0becc3c00771029c
|
8431621bfd23efdba6b9f5a27d45f9bb1cafd320
|
/global.R
|
f9b0b3e73b0e7d0028f7d71f10d9d4e7ad03ba57
|
[] |
no_license
|
5c077/CKDapp
|
ad0ab6d5bac9ce6814370883c88241a74d0ba8fe
|
917318a5093e62a80e1385a73fc02ae1e1f576d2
|
refs/heads/master
| 2021-01-16T23:22:04.154241
| 2017-07-17T15:47:01
| 2017-07-17T15:47:01
| 64,875,171
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,988
|
r
|
global.R
|
library(dplyr)
library(tidyr)
library(shiny)
library(shinydashboard)
library(DT)
library(d3heatmap)
library(ggvis)
library(ggplot2)
# library(rCharts)
library(dtplyr)
library(data.table)
#library(llamar)
library(plotly)
library(RColorBrewer)
# Source javascript pagination code ---------------------------------------
# Forked from https://github.com/wleepang/shiny-pager-ui
# source('pagerui.R')
# Import in the Muscle Transcriptome database -----------------------------
# mt_source = src_sqlite('~/Dropbox/Muscle Transcriptome Atlas/Website files/data/expr_public_2015-11-08.sqlite3', create = FALSE)
# data = tbl(mt_source, 'MT')
data = read.csv('data/data_CKD_16-09-27II.csv')
GOs = readRDS('data/allOntologyTerms.rds')
# Set the maximum of the expression, for the limits on the expr widget.
maxInit = max(data$expr)
# List of tissues
#shortName = list(
# 'WT' = 'WT',
# 'CKD' = 'CKD')
#allTissues = c('wild-type', 'chronic kidney disease')
#shortName <- c('wild-type' = 'WT', 'chronic kidney disease' = 'CKD')
# List of tissues
tissueList = list(
#'total aorta' = 'total aorta',
#'thoracic aorta' = 'thoracic aorta',
#'abdominal aorta' = 'abdominal aorta',
#'atria' = 'atria',
#'left ventricle' = 'left ventricle',
#'right ventricle' = 'right ventricle',
#'diaphragm' = 'diaphragm',
# 'eye' = 'eye',
#'EDL' = 'EDL',
#'FDB' = 'FDB',
#'masseter' = 'masseter',
#'plantaris' = 'plantaris',
#'soleus' = 'soleus',
#'tongue' = 'tongue',
'CKD' = 'CKD', 'WT' = 'WT')
allTissues = c(#'atria', 'left ventricle',
#'total aorta', 'right ventricle',
#'soleus',
#'thoracic aorta',
#'abdominal aorta',
#'diaphragm',
#'eye', 'EDL', 'FDB',
#'masseter', 'tongue',
#'plantaris', (
'WT', 'CKD')
|
0507a59d25e0f16696792bfc31dc6fe859d529c1
|
bfac9ce04f4c3c3afe24d134f229f3c67e32535b
|
/morgans_to_bp_recmap.R
|
439a813682e36af00fd4a642820105fd73447c00
|
[] |
no_license
|
hj1994412/mixnmatch
|
e68a48d6d9f3e5bfcb5a9b7a6ed7bf9e0dc7c41c
|
c55de2421b5a99225fed465f86c3da385bc3794b
|
refs/heads/master
| 2022-04-09T19:27:45.455953
| 2019-12-02T06:00:53
| 2019-12-02T06:00:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,900
|
r
|
morgans_to_bp_recmap.R
|
arrArgs<-commandArgs(trailingOnly = TRUE);
options(scipen=999)
infile<-as.character(arrArgs[1])
data<-read.csv(file=infile,sep="\t",head=FALSE)
start=as.numeric(arrArgs[2])
stop=as.numeric(arrArgs[3])
base_rate=as.numeric(arrArgs[4])
base_rate=(base_rate/1000)
chromlength=as.numeric(arrArgs[5])
Mlength=chromlength*base_rate
last_interval=as.numeric(arrArgs[6])
data$V1<-round(data$V1*chromlength)
data$V2<-round(data$V2*chromlength)
data<-subset(data,data$V1!=data$V2) #0 bp intervals not allowed
cM_per_bp<-(((data$V2-data$V1)*data$V3)/sum((data$V2-data$V1)*data$V3)*Mlength)
data$V4<-cM_per_bp/(data$V2-data$V1)
current_length=as.numeric(arrArgs[7])
counter=0
bp=last_interval
x=0
y=0
track=0
rate_index_last=1
while(current_length<stop){
rate_index=which.min(c(abs(data$V1-bp),abs(data$V2-bp)))
total_heat=(data$V2[rate_index]-data$V1[rate_index])*data$V4[rate_index]
if(rate_index <= length(data$V2)){
if(counter==0){
if((current_length+total_heat)<start){
current_length=current_length+total_heat
bp=data$V2[rate_index]+1
} else{
current_length=current_length+data$V4[rate_index]
bp=bp+1
}#add whole or partial window
}#if still within start interval
if(counter==1){
if((current_length+total_heat)<stop){
current_length=current_length+total_heat
bp=data$V2[rate_index]+1
} else{
current_length=current_length+data$V4[rate_index]
bp=bp+1
}#add whole or partial window
}#if still within stop interval
if((current_length>=start) & (counter==0)){
x=last_interval #deal w/windows that ended in the middle from previous interval
counter=1
}
if(current_length>=stop){
y=bp
}
rate_index_last=rate_index
}else{current_length=stop}#don't try to run through the end of the file
}#keep churning through
if(x>0 & y>0){
write.table(cbind(x,y,0),sep="\t",col.names=FALSE,row.names=FALSE)
} else{
write.table(cbind(last_interval,chromlength,1),sep="\t",col.names=FALSE,row.names=FALSE)
}
|
be097c29b38b07a1205589f706547791832b935f
|
3fdb12a1fe34aca6b96aa9047df4593404a5fc52
|
/plot.stm.kathy.R
|
c48312c2ac43712eb73d5ddb8aee820e1a852fe7
|
[] |
no_license
|
carnegie-dpb/bartonlab-modeling
|
06c90e10df8fc37973a02db41f2c882bc8ceedfd
|
7d875f16f675bf94fc04a360ae8f6855d4642619
|
refs/heads/master
| 2021-01-22T03:48:47.674881
| 2018-04-18T22:29:04
| 2018-04-18T22:29:04
| 81,460,423
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,166
|
r
|
plot.stm.kathy.R
|
## a plot for Kathy
t = getTimes(schema="bl2013", condition="GR-STM")
HSP90.1 = getExpression(schema="bl2013", condition="GR-STM", gene="HSP90.1")
BCAT2 = getExpression(schema="bl2013", condition="GR-STM", gene="BCAT-2")> BCAT2
LOL1 = getExpression(schema="bl2013", condition="GR-STM", gene="LOL1")
TINY2 = getExpression(schema="bl2013", condition="GR-STM", gene="TINY2")
AT1G30280 = getExpression(schema="bl2013", condition="GR-STM", gene="AT1G30280")
AT1G44760 = getExpression(schema="bl2013", condition="GR-STM", gene="AT1G44760")
plot.bars(t, HSP90.1/mean(HSP90.1[1:3]), xlab="time after DEX application (min)", ylab="relative expression", pch=9, log="y", ylim=c(.1,10))
plot.bars(t, BCAT2/mean(BCAT2[1:3]), over=T, pch=0)
plot.bars(t, LOL1/mean(LOL1[1:3]), over=T, pch=1)
plot.bars(t, TINY2/mean(TINY2[1:3]), over=T, pch=2)
plot.bars(t, AT1G30280/mean(AT1G30280[1:3]), over=T, pch=5)
plot.bars(t, AT1G44760/mean(AT1G44760[1:3]), over=T, pch=6)
legend(80, 0.1, yjust=0, pch=c(9,0,1,2,5,6), c("HSP90.1","BCAT2","LOL1","TINY2","At1g30280","At1g44760"))
|
bb21bc71233032771c073f5f0ca8b69c7abd2952
|
b197c890367797b4506630fbc1d6d9230e561d0e
|
/inst/application/ModelImport.R
|
0b6323b3f2422bfd2f6c05328629833ae10a9312
|
[] |
no_license
|
EmbrapaInformaticaAgropecuaria/sdsim
|
7eedceeda24ab517f0ddd2d0e185ab49af745ca2
|
4002a052fd6fbc582d09f55e49b52144fd7be438
|
refs/heads/master
| 2021-04-12T08:32:35.508068
| 2018-04-10T16:59:42
| 2018-04-10T16:59:42
| 126,221,807
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,910
|
r
|
ModelImport.R
|
# Loads model if no other models with same ids as the one loading exist
# Else, query user for confirmation before overwriting
LoadModel <- function(modelName, simData, session, input, output,
repository = NULL, loadDefaultScenario = T,
nTableRows = 50, replaceId = NULL) {
model <- ParseXML(modelName, repository)
id <- ""
componentIds <- list()
if(!is.null(model$modelId)) {
if(!is.null(replaceId) && replaceId != "")
model$modelId <- replaceId
id <- model$modelId
} else if (!is.null(model$coupledModelId)) {
if(!is.null(replaceId) && replaceId != "")
model$coupledModelId <- replaceId
id <- model$coupledModelId
componentIds <- lapply(model$components, function(x) {
if(!is.null(x$modelId))
x$modelId
else if(!is.null(x$staticModelId))
x$staticModelId
else if(!is.null(x$coupledModelId))
x$coupledModelId
})
} else if (!is.null(model$staticModelId)) {
if(!is.null(replaceId) && replaceId != "")
model$staticModelId <- replaceId
id <- model$staticModelId
}
modelIds <- c(id, componentIds)
if(any(modelIds %in% names(simData$models))) {
# Save loading model info in simData
simData$loadingModel <- model
idsToOverwrite <- paste(modelIds[modelIds %in% names(simData$models)], collapse = "\", \"")
idsToOverwrite <- sub("(.*),", "\\1 and", idsToOverwrite)
message <- paste0("The model(s) \"", idsToOverwrite, "\" will be overwritten.",
"\nDo you wish to continue?")
responseInputName <- "confirmModelOverwrite"
session$sendCustomMessage("confirmOverwrite",
list(message = message,
responseInputName = responseInputName))
return(NULL)
} else {
msg <- ConfirmLoadModel(model, simData, session, input,
output, nTableRows = nTableRows)
return(msg)
}
}
ConfirmLoadModel <- function(model, simData, session, input, output,
loadDefaultScenario = T, nTableRows = 50) {
withCallingHandlers({
tryCatch({
if(!is.null(model$modelId)) {
msg <- LoadAtomicModel(model, simData, session, input, output,
loadDefaultScenario)
} else if(!is.null(model$staticModelId)) {
msg <- LoadStaticModel(model, simData, session, input, output,
loadDefaultScenario)
} else if (!is.null(model$coupledModelId)){
msg <- LoadCoupledModel(model, simData, session, input, output,
loadDefaultScenario)
} else {
# Invalid model file
msg <- list(
paste("Load model aborted. The given file's model type is invalid.",
"Generate your XML files using the sdsim package functions."),
"red"
)
}
# Update UI
UpdateLoadedModel(simData, session, input, output, nTableRows)
UpdateLoadedScenario(simData, session, input, output, nTableRows)
return(msg)
},
error = function(e) {
errorOutput <- paste(capture.output(e), collapse = " ")
return(list(errorOutput, "red"))
})
},
warning = function(w) {
warningOutput <- paste(capture.output(w), collapse = " ")
return(list(warningOutput, "red"))
})
}
LoadAtomicModel <- function(model, simData, session, input, output,
loadDefaultScenario = T) {
# If default scenario is null or shouldn't be loaded
if(!loadDefaultScenario || is.null(model$defaultScenario)) {
# Load empty scenario as default
scenario <- ParseXML("UnnamedScenario", "application/xml")
model$defaultScenario <- scenario
}
model$defaultScenario$scenarioId <- "Default"
# Load model data and save to reactive list
simData$models[[model$modelId]] <- LoadAtomicModelData(model, simData)
# Load default scenario
LoadScenarioData(model$defaultScenario, simData, model$modelId)
# Update current model
simData$currentModelId <- model$modelId
msg <- list(
paste(model$modelId, "model successfully loaded!"),
"green"
)
return(msg)
}
LoadStaticModel <- function(model, simData, session, input, output,
loadDefaultScenario = T) {
# Load static model
# If default scenario is null or shouldn't be loaded
if(!loadDefaultScenario || is.null(model$defaultScenario)) {
# Load empty scenario as default
scenario <- ParseXML("UnnamedScenario", "application/xml")
scenario$scenarioId <- "Default"
model$defaultScenario <- scenario
}
model$defaultScenario$scenarioId <- "Default"
# Load static model
simData$models[[model$staticModelId]] <- LoadStaticModelData(model, simData)
LoadScenarioData(model$defaultScenario, simData, model$staticModelId)
# Update current model
simData$currentModelId <- model$staticModelId
msg <- list(
paste(model$staticModelId, "model successfully loaded!"),
"green"
)
return(msg)
}
LoadCoupledModel <- function(model, simData, session, input, output,
loadDefaultScenario = T) {
# Load coupled model
componentIdList <- c()
# Loads all component models into model list
for(component in model$components) {
if(!is.null(component$modelId)) {
componentIdList <- c(componentIdList, component$modelId)
# If default scenario is null
if(is.null(component$defaultScenario)) {
# Load empty scenario as default
scenario <- ParseXML("UnnamedScenario", "application/xml")
component$defaultScenario <- scenario
}
component$defaultScenario$scenarioId <- "Default"
simData$models[[component$modelId]] <- LoadAtomicModelData(component, simData)
# Load default scenario and save it to the component's scenario list
LoadScenarioData(component$defaultScenario, simData, component$modelId)
} else if(!is.null(component$staticModelId)) {
componentIdList <- c(componentIdList, component$staticModelId)
# If default scenario is null
if(is.null(component$defaultScenario)) {
# Load empty scenario as default
scenario <- ParseXML("UnnamedScenario", "application/xml")
component$defaultScenario <- scenario
}
component$defaultScenario$scenarioId <- "Default"
simData$models[[component$staticModelId]] <- LoadStaticModelData(component, simData)
# Load default scenario and save it to the component's scenario list
LoadScenarioData(component$defaultScenario, simData, component$staticModelId)
} else if (!is.null(component$coupledModelId)){
# msg <- LoadCoupledModel(model, simData, session, input, output,
# loadDefaultScenario)
# TODO
}
}
# Set empty scenario as current scenario
scenario <- ParseXML("UnnamedScenario", "application/xml")
# Get component Ids data frame
model$componentIds <- data.frame('Component ID' = componentIdList,
stringsAsFactors = FALSE,
row.names = NULL, check.names = F)
simData$models[[model$coupledModelId]] <-
LoadCoupledModelData(model, simData, scenario$scenarioId)
LoadScenarioData(scenario, simData, model$coupledModelId)
# Update current model
simData$currentModelId <- model$coupledModelId
msg <- list(
paste(model$coupledModelId, "model successfully loaded!"),
"green"
)
return(msg)
}
LoadScenario <- function(scenarioName, simData, session, input, output,
repository = NULL, nTableRows = 50, replaceId = NULL) {
withCallingHandlers({
tryCatch({
if(!is.null(repository) || grepl(".[xX][mM][lL]$", scenarioName)) {
scenario <- ParseXML(scenarioName, repository)
} else if(grepl(".[xX][lL][sS][xX]$", scenarioName)) {
scenario <- ParseXlsx(scenarioName)
}
if(!is.null(replaceId) && replaceId != "") {
scenario$scenarioId <- replaceId
}
if(scenario$scenarioId == "Default")
return(list("Cannot create or load a scenario with ID \"Default\"!",
"red"))
currentModel <- simData$models[[simData$currentModelId]]
currentModelScenarioIds <- names(currentModel$scenarios)
if(scenario$scenarioId %in% currentModelScenarioIds) {
# Save loading scenario info in simData
simData$loadingScenario <- scenario
message <- paste0("The current model's scenario \"", scenario$scenarioId,
"\" will be overwritten.",
"\nDo you wish to continue?")
responseInputName <- "confirmScenarioOverwrite"
session$sendCustomMessage("confirmOverwrite",
list(message = message,
responseInputName = responseInputName))
return(NULL)
} else {
msg <- ConfirmLoadScenario(scenario, simData, session, input, output,
repository, nTableRows)
return(msg)
}
},
error = function(e) {
errorOutput <- paste(capture.output(e), collapse = " ")
return(list(errorOutput, "red"))
})
},
warning = function(w) {
warningOutput <- paste(capture.output(w), collapse = " ")
return(list(warningOutput, "red"))
})
}
ConfirmLoadScenario <- function(scenario, simData, session, input, output,
repository = NULL, nTableRows = 50) {
withCallingHandlers({
tryCatch({
# Load scenario data and add it to current model in simData
LoadScenarioData(scenario, simData, simData$currentModelId)
# Change the current model's current scenario to the loaded scenario
simData$models[[simData$currentModelId]]$currentScenarioId <- scenario$scenarioId
msg <- paste(scenario$scenarioId, "scenario successfully loaded!")
UpdateLoadedScenario(simData, session, input, output, nTableRows)
return(list(msg, "green"))
},
error = function(e) {
errorOutput <- paste(capture.output(e), collapse = " ")
return(list(errorOutput, "red"))
})
},
warning = function(w) {
warningOutput <- paste(capture.output(w), collapse = " ")
return(list(warningOutput, "red"))
})
}
ParseXlsx <- function(file) {
scenario <- ReadDataExcel(file)
interpolation <- DataFrameToList(scenario$input, valueCol = "Interpolation")
interpolation <- interpolation[which(interpolation != "")]
units <- as.list(unlist(lapply(list("state", "constant", "input", "parameter", "switch", "aux"), function(x) {
DataFrameToList(scenario[[x]], valueCol = "Unit")
})))
units <- units[which(units != "")]
descriptions <- as.list(unlist(lapply(list("state", "constant", "input", "parameter", "switch", "aux"), function(x) {
DataFrameToList(scenario[[x]], valueCol = "Description")
})))
descriptions <- descriptions[which(descriptions != "")]
method <- GetDataFrameValue(scenario$simulation, "method", "Variable", "Value")
from <- GetDataFrameValue(scenario$simulation, "from", "Variable", "Value")
to <- GetDataFrameValue(scenario$simulation, "to", "Variable", "Value")
by <- GetDataFrameValue(scenario$simulation, "by", "Variable", "Value")
scenarioId <- GetDataFrameValue(scenario$simulation, "scenarioId", "Variable", "Value")
lscenario <- list(scenarioId = scenarioId,
times = list(from = from, to = to, by = by),
method = method,
state = DataFrameToList(scenario$state),
constant = DataFrameToList(scenario$constant),
input = DataFrameToList(scenario$input),
interpolation = interpolation,
parameter = DataFrameToList(scenario$parameter),
switch = DataFrameToList(scenario$switch),
unit = units,
description = descriptions)
return(lscenario)
}
ParseXML <- function(file, repositoryDir = NULL) {
if(!is.null(repositoryDir)) {
file <- system.file(appDir = paste0(repositoryDir, "/", file, ".xml"),
package = "sdsim")
}
sdsimprefix <- paste(readLines(file, n = 3), collapse = "\n")
if (!grepl(pattern = "<\\?sdsim.*version.*\\?>", sdsimprefix, ignore.case = T))
stop(paste("Load model aborted. The given file is not a valid XML file.",
"Generate your XML files using the sdsim package functions."),
call. = F)
# else
# {
# # valid prefix, now check version
# if (!grepl(pattern = paste0("(?<=version=\\')",
# sub("\\.","\\\\.", packageVersion("sdsim"))),
# x = sdsimprefix, ignore.case = T, perl = T))
# warning("Load Model: The sdsim XML version is deprecated. The current ",
# "sdsim version is: ", packageVersion("sdsim"))
# }
data <- XML::xmlParse(file)
data <- XML::xmlToList(data)
return(data)
}
UpdateLoadedModel <- function(simData, session, input, output, nTableRows = 50) {
# Clear previous model simulation results and log
ClearSimulationResults(simData, session, input, output)
# Get current model
currentModel <- simData$models[[simData$currentModelId]]
if(currentModel$type == "atomic") {
# Unhide atomic model panel
session$sendCustomMessage("unhideElement", "atomicModelPage")
# Hide static model panel
session$sendCustomMessage("hideElement", "staticModelPage")
# Hide coupled model panel
session$sendCustomMessage("hideElement", "coupledModelPage")
# Unhide method, initialTime, finalTime and step inputs
session$sendCustomMessage("enableElement", "method")
# Clear other model UI's
ClearStaticModelUI(simData, session, input, output)
ClearCoupledModelUI(simData, session, input, output)
# Update scripts
CustomAceUpdate(session, "description", value = currentModel$description)
CustomAceUpdate(session, "DifferentialEquations", value = currentModel$DifferentialEquations)
CustomAceUpdate(session, "initVars", value = currentModel$initVars)
CustomAceUpdate(session, "root", value = currentModel$root)
CustomAceUpdate(session, "event", value = currentModel$event)
CustomAceUpdate(session, "globalFunctions", value = paste(currentModel$globalFunctions, collapse = "\n\n"))
# Update auxiliaries table
nRows <- nTableRows - NROW(currentModel$aux)
aux <- rbind(currentModel$aux, CreateVarDataFrame(nRows = nRows),
stringsAsFactors = FALSE, row.names = NULL)
UpdateRHandsontable(aux, "aux", output)
simData$changed$aux <- F
} else if(currentModel$type == "static") {
# Hide atomic model panel
session$sendCustomMessage("hideElement", "atomicModelPage")
# Unhide static model panel
session$sendCustomMessage("unhideElement", "staticModelPage")
# Hide coupled model panel
session$sendCustomMessage("hideElement", "coupledModelPage")
# Hide method, initialTime, finalTime and step inputs
session$sendCustomMessage("disableElement", "method")
# Clear other model UI's
ClearAtomicModelUI(simData, session, input, output)
ClearCoupledModelUI(simData, session, input, output)
# Update scripts
CustomAceUpdate(session, "description", value = currentModel$description)
CustomAceUpdate(session, "staticInitVars", value = currentModel$initVars)
CustomAceUpdate(session, "staticGlobalFunctions", value = paste(currentModel$globalFunctions, collapse = "\n\n"))
# Update auxiliaries table
nRows <- nTableRows - NROW(currentModel$aux)
aux <- rbind(currentModel$aux, CreateVarDataFrame(nRows = nRows),
stringsAsFactors = FALSE, row.names = NULL)
UpdateRHandsontable(aux, "staticAux", output)
simData$changed$staticAux <- F
} else if(currentModel$type == "coupled") {
# Hide atomic model panel
session$sendCustomMessage("hideElement", "atomicModelPage")
# Hide static model panel
session$sendCustomMessage("hideElement", "staticModelPage")
# Unhide coupled model panel
session$sendCustomMessage("unhideElement", "coupledModelPage")
# Unhide method, initialTime, finalTime and step inputs
session$sendCustomMessage("enableElement", "method")
# Clear other model UI's
ClearAtomicModelUI(simData, session, input, output)
ClearStaticModelUI(simData, session, input, output)
# Update coupled model connections
if(!is.null(currentModel$connections)){
nRows <- nTableRows - NROW(currentModel$connections)
connections <- rbind(currentModel$connections,
CreateConnectionsDataFrame(nRows = nRows),
stringsAsFactors = FALSE, row.names = NULL)
} else {
nRows <- nTableRows
connections <- CreateConnectionsDataFrame(nRows = nRows)
}
UpdateRHandsontable(connections, "connections", output)
simData$changed$connections <- F
# Update coupled model components
if(!is.null(currentModel$componentIds)){
nRows <- nTableRows - NROW(currentModel$componentIds)
componentIds <- rbind(currentModel$componentIds,
CreateComponentsDataFrame(nRows = nRows),
stringsAsFactors = FALSE, row.names = NULL)
} else {
nRows <- nTableRows
componentIds <- CreateComponentsDataFrame(nRows = nRows)
}
UpdateRHandsontable(componentIds, "componentIds", output)
simData$changed$componentIds <- F
}
}
# SaveModelSimulationResults <- function(simData, session, input, output) {
# # Clear simulation log
# output$errorTitle <- renderText("")
# output$errorLog <- renderText("")
#
#
#
# updateSelectInput(session, "selVarPlot", choices = "No Variables Available", selected = "No Variables Available")
# updateSelectInput(session, "selectXAxisPlot", choices = "No Variables Available", selected = "No Variables Available")
# updateTextInput(session, "plotTitle", value = "")
# updateTextInput(session, "plotXLabel", value = "")
# updateTextInput(session, "plotYLabel", value = "")
# }
ClearSimulationResults <- function(simData, session, input, output) {
# Clear previous model simulation results
updateSelectInput(session, "selVarPlot", choices = "No Variables Available", selected = "No Variables Available")
updateSelectInput(session, "selectXAxisPlot", choices = "No Variables Available", selected = "No Variables Available")
updateTextInput(session, "plotTitle", value = "")
updateTextInput(session, "plotXLabel", value = "")
updateTextInput(session, "plotYLabel", value = "")
# Clear simulation log
output$errorTitle <- renderText("")
output$errorLog <- renderText("")
}
ClearAtomicModelUI <- function(simData, session, input, output) {
# Clear atomic model script fields
CustomAceUpdate(session, "DifferentialEquations", value = "")
CustomAceUpdate(session, "initVars", value = "")
CustomAceUpdate(session, "root", value = "")
CustomAceUpdate(session, "event", value = "")
CustomAceUpdate(session, "globalFunctions", value = "")
# Clear atomic model aux table
emptyDF <- CreateVarDataFrame(nRows = 1)
UpdateRHandsontable(emptyDF, "aux", output)
simData$changed$aux <- F
}
ClearStaticModelUI <- function(simData, session, input, output) {
# Clear static model script fields
CustomAceUpdate(session, "staticInitVars", value = "")
CustomAceUpdate(session, "staticGlobalFunctions", value = "")
# Clear static model aux table
emptyDF <- CreateVarDataFrame(nRows = 1)
UpdateRHandsontable(emptyDF, "staticAux", output)
simData$changed$staticAux <- F
}
ClearCoupledModelUI <- function(simData, session, input, output) {
# Clear coupled model connections table
emptyDF <- CreateConnectionsDataFrame(nRows = 50)
UpdateRHandsontable(emptyDF, "connections", output)
emptyDF <- CreateComponentsDataFrame(nRows = 50)
UpdateRHandsontable(emptyDF, "components", output)
simData$changed$components <- F
}
UpdateLoadedScenario <- function(simData, session, input, output, nTableRows = 50) {
currentModel <- simData$models[[simData$currentModelId]]
currentScenario <- NULL
if(!is.null(currentModel) &&
!is.null(currentModel$scenarios) &&
!is.null(currentModel$currentScenarioId))
currentScenario <- currentModel$scenarios[[currentModel$currentScenarioId]]
if(!is.null(currentScenario)) {
updateTextInput(session, "initialTime",
label = "Initial Time",
value = currentScenario$from)
updateTextInput(session, "finalTime",
label = "Final Time",
value = currentScenario$to)
updateTextInput(session, "step",
label = "Time Step",
value = currentScenario$by)
updateSelectInput(session = session, inputId = "method",
selected = currentScenario$method,
choices = c("lsoda", "lsode", "lsodes", "lsodar",
"vode", "daspk", "euler", "rk4", "ode23",
"ode45", "radau", "bdf", "bdf_d", "adams",
"impAdams", "impAdams_d"))
nRows <- nTableRows - NROW(currentScenario$state)
state <- rbind(currentScenario$state, CreateVarDataFrame(nRows = nRows),
stringsAsFactors = FALSE, row.names = NULL)
nRows <- nTableRows - NROW(currentScenario$constant)
constant <- rbind(currentScenario$constant, CreateVarDataFrame(nRows = nRows),
stringsAsFactors = FALSE, row.names = NULL)
nRows <- nTableRows - NROW(currentScenario$input)
input <- rbind(currentScenario$input, CreateInputDataFrame(nRows = nRows),
stringsAsFactors = FALSE, row.names = NULL)
nRows <- nTableRows - NROW(currentScenario$parameter)
parameter <- rbind(currentScenario$parameter,
CreateVarDataFrame(nRows = nRows),
stringsAsFactors = FALSE, row.names = NULL)
nRows <- nTableRows - NROW(currentScenario$switch)
switch <- rbind(currentScenario$switch, CreateVarDataFrame(nRows = nRows),
stringsAsFactors = FALSE, row.names = NULL)
simData$changed$state <- F
simData$changed$constant <- F
simData$changed$input <- F
simData$changed$parameter <- F
simData$changed$switch <- F
UpdateRHandsontable(state, "state", output)
UpdateRHandsontable(constant, "constant", output)
UpdateRHandsontable(input, "input", output)
UpdateRHandsontable(parameter, "parameter", output)
UpdateRHandsontable(switch, "switch", output)
}
}
# Loads an atomic model into the UI format
LoadAtomicModelData <- function(model, simData) {
# Get auxiliary data frame and update rhandsontable values
aux <- AuxListToDataFrame(model)
globalFunctions <- model$GlobalFunctions
globalFunctions <- lapply(names(globalFunctions), function(x) {
paste0(x, " <- ", FunToString(globalFunctions[[x]]))
})
# Create model
modelData <- CreateModelObject(
modelId = model$modelId,
description = model$modelDescription,
DifferentialEquations = FunToString(model$DifferentialEquations),
initVars = FunToString(model$InitVars),
root = FunToString(model$RootSpecification),
event = FunToString(model$EventFunction),
aux = aux,
globalFunctions = globalFunctions,
defaultScenarioId = model$defaultScenario$scenarioId,
currentScenarioId = model$defaultScenario$scenarioId
)
return(modelData)
}
# Loads a static model into the UI format
LoadStaticModelData <- function(model, simData) {
# Get auxiliary data frame and update rhandsontable values
aux <- AuxListToDataFrame(model, "equations")
globalFunctions <- model$GlobalFunctions
globalFunctions <- lapply(names(globalFunctions), function(x) {
paste0(x, " <- ", FunToString(globalFunctions[[x]]))
})
# Create model
modelData <- CreateStaticModelObject(
modelId = model$staticModelId,
description = model$modelDescription,
initVars = FunToString(model$InitVars),
aux = aux,
globalFunctions = globalFunctions,
defaultScenarioId = model$defaultScenario$scenarioId,
currentScenarioId = model$defaultScenario$scenarioId
)
return(modelData)
}
# Loads an coupled model into the UI format
LoadCoupledModelData <- function(model, simData, currentScenarioId = NULL) {
connections <- ConnectionsListToDataFrame(model)
modelData <- CreateCoupledModelObject(
modelId = model$coupledModelId,
description = model$coupledModelDescription,
connections = connections,
componentIds = model$componentIds,
currentScenarioId = currentScenarioId
)
return(modelData)
}
# Loads a scenario from a list into the UI
LoadScenarioData <- function(scenario, simData, parentModelId = NULL) {
# Get variables data frames from lists
state <- ScenarioListToDataFrame(scenario, "state")
constant <- ScenarioListToDataFrame(scenario, "constant")
input <- ScenarioListToDataFrame(scenario, "input")
parameter <- ScenarioListToDataFrame(scenario, "parameter")
switch <- ScenarioListToDataFrame(scenario, "switch")
times <- scenario$times
# Create scenario
scenarioData <- CreateScenarioObject(
scenarioId = scenario$scenarioId,
from = times$from,
to = times$to,
by = times$by,
method = scenario$method,
state = state,
constant = constant,
input = input,
parameter = parameter,
switch = switch
)
# Save scenario to parent model scenario list
if(!is.null(parentModelId)) {
simData$models[[parentModelId]]$scenarios[[scenario$scenarioId]] <- scenarioData
}
return(scenarioData)
}
CreateModelObject <- function(modelId,
description = NULL,
DifferentialEquations = NULL,
initVars = NULL,
root = NULL,
event = NULL,
aux = NULL,
globalFunctions = NULL,
defaultScenarioId = NULL,
currentScenarioId = NULL,
scenarios = list()) {
model <- list()
if(!is.null(DifferentialEquations))
model$type <- "atomic"
else
model$type <- "static"
model$modelId <- modelId
model$description <- description
model$DifferentialEquations <- DifferentialEquations
model$initVars <- initVars
model$root <- root
model$event <- event
model$aux <- aux
model$globalFunctions <- globalFunctions
model$defaultScenarioId <- defaultScenarioId
model$currentScenarioId <- currentScenarioId
model$scenarios <- scenarios
return(model)
}
CreateStaticModelObject <- function(modelId,
description = NULL,
initVars = NULL,
aux = NULL,
globalFunctions = NULL,
defaultScenarioId = NULL,
currentScenarioId = NULL,
scenarios = list()) {
model <- list()
model$type <- "static"
model$modelId <- modelId
model$description <- description
model$initVars <- initVars
model$aux <- aux
model$globalFunctions <- globalFunctions
model$defaultScenarioId <- defaultScenarioId
model$currentScenarioId <- currentScenarioId
model$scenarios <- scenarios
return(model)
}
CreateCoupledModelObject <- function(modelId = NULL,
description = NULL,
componentIds = NULL,
connections = NULL,
currentScenarioId = NULL,
scenarios = list()) {
model <- list()
model$type <- "coupled"
model$modelId <- modelId
model$description <- description
model$componentIds <- componentIds
model$connections <- connections
model$currentScenarioId <- currentScenarioId
model$scenarios <- scenarios
return(model)
}
CreateScenarioObject <- function(scenarioId = "default scenario",
from = 0, to = 100, by = 1,
method = "lsoda",
state = NULL,
constant = NULL,
input = NULL,
parameter = NULL,
switch = NULL) {
scenario <- list()
scenario$id <- scenarioId
scenario$from <- from
scenario$to <- to
scenario$by <- by
scenario$method <- method
scenario$state <- state
scenario$constant <- constant
scenario$input <- input
scenario$parameter <- parameter
scenario$switch <- switch
return(scenario)
}
ConnectionsListToDataFrame <- function(model, connectionsListName = "connections") {
if(!is.null(model[[connectionsListName]])) {
# convert the connections (parse text)
connections <- lapply(model[[connectionsListName]], function(x)
{
if (is.character(x) && length(x) == 1)
eval(parse(text = x))
else
x
})
connectionsDF <- t(as.data.frame(connections, stringsAsFactors = FALSE, row.names = NULL))
row.names(connectionsDF) <- c()
colnames(connectionsDF) <- c('Connection ID',
'Receiver Component ID',
'Receiver Input',
'Sender Component ID',
'Sender Output')
return(connectionsDF)
} else {
NULL
}
}
AuxListToDataFrame <- function(model, auxListName = "aux") {
ls <- lapply(model[[auxListName]], function(x) toString(x))
description <- model$defaultScenario$description
unit <- model$defaultScenario$unit
df <- data.frame(Variable = names(ls), Value = unlist(ls),
Unit = character(NROW(ls)),
Description = character(NROW(ls)),
stringsAsFactors = FALSE, row.names = NULL)
# Add descriptions and units to each variable
for (varNm in df[["Variable"]]) {
if (varNm %in% names(description))
df[["Description"]][[which(df[["Variable"]] == varNm)]] <- description[[varNm]]
if (varNm %in% names(unit))
df[["Unit"]][[which(df[["Variable"]] == varNm)]] <- unit[[varNm]]
}
return(df)
}
ScenarioListToDataFrame <- function(scenario, listName) {
ls <- lapply(scenario[[listName]], function(x) toString(x))
interpolation <- scenario$interpolation
description <- scenario$description
unit <- scenario$unit
variableNames <- names(ls)
if(is.null(variableNames))
variableNames <- character(0)
values <- unlist(ls)
if(is.null(values))
values <- character(0)
if(listName != "input") {
df <- data.frame(Variable = variableNames, Value = values,
Unit = character(NROW(ls)),
Description = character(NROW(ls)),
stringsAsFactors = FALSE, row.names = NULL)
for (varNm in df[["Variable"]]) {
if (varNm %in% names(description))
df[["Description"]][[which(df[["Variable"]] == varNm)]] <- description[[varNm]]
if (varNm %in% names(unit))
df[["Unit"]][[which(df[["Variable"]] == varNm)]] <- unit[[varNm]]
}
} else {
df <- data.frame(Variable = variableNames, Value = values,
Unit = character(NROW(ls)),
Description = character(NROW(ls)),
Interpolation = character(NROW(ls)),
stringsAsFactors = FALSE, row.names = NULL)
for (varNm in df[["Variable"]]) {
if (varNm %in% names(description))
df[["Description"]][[which(df[["Variable"]] == varNm)]] <- description[[varNm]]
if (varNm %in% names(unit))
df[["Unit"]][[which(df[["Variable"]] == varNm)]] <- unit[[varNm]]
if (varNm %in% names(interpolation))
df[["Interpolation"]][[which(df[["Variable"]] == varNm)]] <- interpolation[[varNm]]
}
}
return(df)
}
# Get function source in string format
FunToString <- function(fun) {
if(is.null(fun))
return("")
funStr <- paste(format(fun), collapse = "\n")
if(funStr == "NULL")
return("")
return(funStr)
}
# Read input as excel file
#
# First get all the available sheets and then read them all
#
# @param fileName Excel file name
# @return A list with the sheets as data.frames
ReadDataExcel <- function(fileName)
{
# read data from excel file with one or more sheets
sheets <- readxl::excel_sheets(fileName)
modelParms <- lapply(sheets, function(x)
{
tryCatch(
{
df <- readxl::read_excel(path = fileName, sheet = x,
trim_ws = T, col_types = "text", na = " ")
row.names(df) <- NULL
df[is.na(df)] <- NULL
return(as.data.frame(df))
},
error=function(e)
{
# readInputDataMsg$ReadDataExcel1(fileName, e)
return(NULL)
},
warning=function(w)
{
# readInputDataMsg$ReadDataExcel2(fileName, w)
return(NULL)
})
})
names(modelParms) <- sheets
return(modelParms)
}
# Get a value from the same row of a specified variable
GetDataFrameValue <- function(df, variableName, variableCol, valueCol) {
row <- which(df[[variableCol]] == variableName)
if(length(row) == 0)
return(NULL)
else
return(df[[valueCol]][[row]])
}
|
3fd29f7ae14c4e5cffd551bbb7aa6c9c7dae5980
|
2e627e0abf7f01c48fddc9f7aaf46183574541df
|
/PBStools/man/calcStockArea.Rd
|
0b0682cbe14755a3f2bdc9bacc577787cb8d7df4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
pbs-software/pbs-tools
|
30b245fd4d3fb20d67ba243bc6614dc38bc03af7
|
2110992d3b760a2995aa7ce0c36fcf938a3d2f4e
|
refs/heads/master
| 2023-07-20T04:24:53.315152
| 2023-07-06T17:33:01
| 2023-07-06T17:33:01
| 37,491,664
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,825
|
rd
|
calcStockArea.Rd
|
\name{calcStockArea}
\alias{calcStockArea}
\alias{calcWAParea}
\title{
Calculate Stock Area
}
\description{
Assign a stock area designation based on species HART code and PMFC
major and/or minor areas.
}
\usage{
calcStockArea(strSpp, dat, stockFld="stock", gmu=TRUE)
calcWAParea(major, minor, strat, wts)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{strSpp}{\code{character} -- Species string code (alpha-numeric), usually taken as a page number from Hart (1973).}
\item{dat}{\code{data.frame} -- Data file with location fields (\code{major}, \code{minor}, \code{Y}, etc.).}
\item{stockFld}{\code{character} -- Name of new field to add to the input data file \code{dat}.}
\item{gmu}{\code{logical} -- if \code{TRUE}, use GMU areas set out in the IFMP for each species,
if \code{FALSE} use alternative stock area names if they are specified in the function's code.}
\item{major}{\code{numeric} -- Vector of numeric codes for major PMFC statistical areas.}
\item{minor}{\code{numeric} -- Vector of numeric codes for minor PMFC statistical areas
(sometimes referred to as DFO salmon areas).}
\item{strat}{\code{numeric} -- Vector of values (e.g., \code{year}) used to stratify the subarea delineation.
Vector length must equal that for \code{major} and \code{minor}.}
\item{wts}{\code{numeric} -- Vector of values (e.g., \code{catch}) used to weight the
occurrence of subareas in a major area to determine allocation of unknown
subareas to known ones. Vector length must equal that for \code{major} and \code{minor}.}
}
\details{
\code{calcStockArea}:\cr
Assigns a stock designator for a given species based on major and/or minor areas.
If both \code{major} and \code{minor} are specified as inputs, the two
vectors must have the same length. Additional stocks will be added over time as needed.
\code{calcWAParea}:\cr
Assigns a stock designation for Walleye Pollock based on major and minor
areas. There are a fair number of records in the Strait of Georgia (\code{major=1})
labelled 0 or 99, which means they could have occurred anywhere in the Inside
waters of BC (between Vancover Island and the mainland). This function attempts
to assign a proportion of unknown tow locations to Minor areas 12 and 20 based on
known tow locations in PMFC 4B that occur in these areas. Tows in Minor area 12
are assigned to the 5AB stock while those in Area 20 are assigned to the 3CD stock.
}
\value{
\code{calcStockArea} -- a new field of stock identifiers is added to
the input data file.\cr
\code{calcWAParea} -- a character vector of stock identifiers with
the same number of records as that of the input vector(s).
}
\references{
Hart, J.L. (1973) Pacific fishes of Canada. \emph{Fisheries Research Board of Canada} \bold{180}: ix + 740 p.
}
\author{
\href{mailto:rowan.haigh@dfo-mpo.gc.ca}{Rowan Haigh}, Program Head -- Offshore Rockfish\cr
Pacific Biological Station (PBS), Fisheries & Oceans Canada (DFO), Nanaimo BC\cr
\emph{locus opus}: Institute of Ocean Sciences (IOS), Sidney BC\cr
Last modified \code{Rd: 2020-10-08}
}
\note{
There is no ideal way of parsing out the percentage of tows from the unknown subarea
pool to the known one. Tows are chosen in the order that they appear in the recordset
(i.e., non-random).
}
\seealso{
\code{\link[PBStools]{calcSRFA}},
\code{\link[PBStools]{plotGMA}},
\code{\link[PBSdata]{species}},
\code{\link[PBSdata]{major}},
\code{\link[PBSdata]{minor}}
}
\examples{
\dontrun{
getFile(gfmdat)
gfmdat$catch = gfmdat$landed + gfmdat$discard
gfmdat.new = calcStockArea(strSpp="228", gfmdat, stockFld="stock")
gfmdat$stock = calcWAParea(gfmdat$major, gfmdat$minor,
strat=as.numeric(substring(gfmdat$date,1,4)), wts=gfmdat$catch)
}
}
\keyword{manip}
|
cf070237b8b87050dd166838219388c0beb3a6fe
|
24a664c67f2ac72cb10337c71f2981762ec8034e
|
/Course-3-cleaning/web-scraping.R
|
2a1b95304bef96d46f37a18142e6321ca125f72b
|
[] |
no_license
|
sariya/datasciencecoursera
|
8f0e801feead0c65d5dac31168acb9f574c1384f
|
f6a028dd4110d0dd0c8b83a459392914816fc899
|
refs/heads/master
| 2021-01-10T04:00:26.060050
| 2016-03-19T18:48:36
| 2016-03-19T18:48:36
| 50,143,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,040
|
r
|
web-scraping.R
|
# Reading data from the web
# Week 2 reading, and getting, and clearning data
# Date Feb 28 2016
# Sanjeev Sariya
con<-url("http://scholar.google.com/citations?user=HI-I6COAAAAJ&hl=en")
htmlCode<-readLines(con)
close(con) # -- close the connection
htmlCode
# use XML package
library(XML)
url<-"http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en"
html<-htmlTreeParse(url,useInternalNodes = T)
xpathSApply(html,'//title',xmlValue)
xpathSApply(html,"//td[@id='col-citedby']",xmlValue)
## -- get command
# httpr
library(httr)
html2<-GET(url)
content2<-content(html2,as="text")
parsedHtml= htmlParse(content2,asText = TRUE)
xpathSApply(parsedHtml,"//title",xmlValue)
### -- accessing using password
pg2<-GET("http://httpbin.org/basic-auth/user/passwd")
pg2
## -- get the password, and authenticate
pg2<-GET("http://httpbin.org/basic-auth/user/passwd",authenticate("user","passwd"))
pg2
names(pg2)
##-- using handles
google<-GET("http://google.com")
pg1<-GET(handle = google,path="/")
pg2<-GET(handle = google,path="search")
|
b9b73ef09838e64959c33c0301b7bbe9001f240c
|
02f24f0d8eaed72b4cb8b4ceb98d731eef1ee5c8
|
/man/remove_constant.Rd
|
8f0762b10037fbc1b01ce7926fa7d3f23880a910
|
[
"MIT"
] |
permissive
|
bradleycolquitt/phyloRNA
|
78c24ea7d0e6f2df6f6a6e7a31702694518a837a
|
d7dc3bf1daace8fe884558b14ae735051d5e6ff0
|
refs/heads/master
| 2023-07-23T06:55:16.094078
| 2021-08-31T23:14:46
| 2021-08-31T23:14:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 678
|
rd
|
remove_constant.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_constant.r
\name{remove_constant}
\alias{remove_constant}
\title{Remove constant}
\usage{
remove_constant(data, margin = 1, unknown = "N")
}
\arguments{
\item{data}{a data matrix}
\item{margin}{\strong{optional} rows (1) or columns (2) that will be explored}
\item{unknown}{\strong{optional} elements that are excluded from this comparison
(do not count among the unique values).}
}
\value{
a data matrix without
}
\description{
Remove constant rows or columns from matrix.
}
\details{
Constant row or column is such column that has only single unique value
(beside the unknown elements).
}
|
aaca7a0396b2cf6fc5204f97edb4ced400607c14
|
912ad27fe0c462026131613a6cf838d074c6dd59
|
/R/thetaSigmaParam.R
|
e1f4994e8061447df07fb7af482c3b2da44cc5bb
|
[] |
no_license
|
OakleyJ/MUCM
|
8265ee0cf1bbf29f701a1d883aa8ab1f8074655b
|
28782a74ef18ce087b1a23fe15df5a67b156083e
|
refs/heads/master
| 2021-01-11T18:17:10.733210
| 2017-10-20T14:45:18
| 2017-10-20T14:45:18
| 69,334,386
| 5
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,141
|
r
|
thetaSigmaParam.R
|
thetaSigmaParam <- function(theta, param, n.outputs) {
if (param == "original") {
###original Parametrization inverse
Sigma <- matrix(nrow = n.outputs, ncol = n.outputs)
Sigma[upper.tri(Sigma, diag = TRUE)] <- theta
Sigma[lower.tri(Sigma)] <- t(Sigma)[lower.tri(Sigma)]
} else if (param == "cholesky") {
###cholesky Parametrization inverse
L <- matrix(0, nrow = n.outputs, ncol = n.outputs)
L[upper.tri(L, diag = TRUE)] <- theta
Sigma <- crossprod(L) # t(L) %*% L
} else if (param == "log-cholesky") {
###log - cholesky Parametrization -inverse
L <- matrix(0, nrow = n.outputs, ncol = n.outputs)
L[upper.tri(L, diag = TRUE)] <- theta
diag(L) <- exp(diag(L))
Sigma <- crossprod(L) # t(L) %*% L
} else if (param == "spherical") {
###spherical Parametrization -inverse
l <- matrix(0,nrow = n.outputs, ncol = n.outputs)
for (i in 1:n.outputs) {
l[1,i] <- exp(theta[i])
}
for (i in 2:n.outputs) {
for (j in 2:i) {
exp1 <- exp(theta[n.outputs + (i - 2)*(i - 1)/2 + (j - 1)])
l[j,i] <- (pi*exp1)/(1 + exp1)
}
}
L <- matrix(0, nrow = n.outputs, ncol = n.outputs)
for (i in 1:n.outputs) {
for (j in 1:i) {
L[j,i] <- l[1,i]
if (j < i) {
L[j,i] <- L[j,i] * cos(l[j + 1,i])
}
if (j > 1) {
for (k in 2:j)
L[j,i] <- L[j,i] * sin(l[k,i])
}
}
}
Sigma <- crossprod(L) # t(L) %*% L
} else if (param == "matrix-log") {
###Matrix logarithm Parametrization - inverse
log.SSigma2 <- matrix(0, nrow = n.outputs, ncol = n.outputs)
log.SSigma2[upper.tri(log.SSigma2, diag = TRUE)] <- theta
log.SSigma2[lower.tri(log.SSigma2)] <- t(log.SSigma2)[lower.tri(log.SSigma2)]
EG2 <- eigen(log.SSigma2, symmetric = TRUE)
log.Lambda <- EG2$values
Sigma <- EG2$vectors %*% diag(exp(log.Lambda)) %*% t(EG2$vectors)
} else
stop("Select appropriate param!")
Sigma
}
sigmaThetaParam <- function(Sigma, param) {
if (param == "original") {
###original Parametrization inverse
theta <- as.vector(Sigma[upper.tri(Sigma, diag = TRUE)])
} else if (param == "cholesky") {
###cholesky Parametrization
L <- chol(Sigma)
theta <- L[upper.tri(L, diag = TRUE)]
} else if (param == "log-cholesky") {
###log - cholesky Parametrization
L <- chol(Sigma)
diag(L) <- log(diag(L))
theta <- L[upper.tri(L, diag = TRUE)]
} else if (param == "spherical") {
L <- chol(Sigma)
n.outputs <- ncol(Sigma)
l <- matrix(0, ncol = n.outputs, nrow = n.outputs)
for (i in 1:n.outputs) {
l[1,i] <- sqrt(sum((L[, i] ^ 2)))
if (i > 1) {
for (j in 2:i) {
l[j,i] <- acos(L[j - 1,i] / sqrt(sum(L[(j - 1):i,i] ^ 2)))
}
}
}
theta <- vector(length = n.outputs*(n.outputs + 1)/2)
for (i in 1:n.outputs) {
theta[i] <- log(l[1,i])
}
for (i in 2:n.outputs) {
for (j in 2:i) {
theta[n.outputs + (i - 2)*(i - 1)/2 + (j - 1)] = log(l[j,i]/(pi - l[j,i]))
}
}
} else if (param == "matrix-log") {
###Matrix logarithm Parametrization
EG <- eigen(Sigma, symmetric = TRUE)
Lambda <- EG$values
log.SSigma <- EG$vectors %*% diag(log(Lambda)) %*% t(EG$vectors)
theta <- log.SSigma[upper.tri(log.SSigma, diag = TRUE)]
} else
stop("Select appropriate param!")
theta
}
|
4a6b52991c230041af7a10ff025c0fc758e36cdb
|
b778e193c4a9c843debd4ec70524d8aa17fe5984
|
/expo_est_Db=1_wine_beta_ver4_2_sub.R
|
793194f81d2e3bfd0fa836344d16f7902941c44c
|
[] |
no_license
|
YSheena/P-N_Criteria_Program
|
6965145140a3c6cd5cc626670258a84c2099e2a3
|
840238814c34e3e75b9881f584b5790e411b963d
|
refs/heads/main
| 2023-06-07T03:31:23.521501
| 2021-06-28T07:50:38
| 2021-06-28T07:50:38
| 368,452,854
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,271
|
r
|
expo_est_Db=1_wine_beta_ver4_2_sub.R
|
# This program is aimed to calculate the estimation risk
# for an exponential family distribution specialized for wine data.
# This program is for the submodel with reduced xi's
# The main variables of the model
# x1:"p1"-dimensional continuous variables
# x2:1-dimensional discrete variables from 1 to "Rq"
# Rq:the number of possible outcomes of x2
# Rqn:the Rq-dimensional vector of log probability mass function of x2
# n_base: the number of the xi's in the full model
# n_base_s: the number of the xi's in the submodel
# i.e. "p" in the paper, the dimension of the exponential family distribution
### Model Description ###
#
# 1. p.d.f. of the reference measure, i.e. dmu/dx ;
# x1_i(i=1,...,p1), x2 are all independent and
# x1_i~beta(a_i,b_i), x2 ~ multinomial dist.
# with the parameters given empirically from the sample.
# log of the p.d.f of the reference measure is given by
# (a_1-1)log(x1_1)+(b_1-1)log(1-x1_1)
#+(a_2-1)log(x1_2)+(b_2-1)log(1-x1_2)+...
#+log(I(x2=1)*p(x2=1)+...+I(x2=Rq)*p(x2=Rq))
#
# 2. xi's
# all the possible cross between the elements of x=(x1,x2)
# In this submodel program, one will removed from the pair of xi's
# with high correlation. It will be carried out later.
xi <- function(x){
count <- 0;
n <- length(x)
y <- rep(0,(n-1)*n/2)
for (i in 1:(n-2)){
for (j in (i+1):(n-1)){
count <- count + 1
y[count] <- x[i]*x[j]
}
}
for (i in 1:(n-1)){
count <- count + 1
y[count] <- x[n]*x[i]
}
return(y)
}
### End of Model Description ###
.libPaths("./library")
.libPaths()
install.packages("rstan",dependencies = TRUE)
install.packages("nleqslv")
install.packages("gtools")
install.packages("tidyverse")
install.packages("tensorA")
install.packages("rlang")
library("rlang")
library("rstan")
library("MASS") #We use "ginv" function
library("nleqslv") #Nonlinear Equation Solver
#library("tidyr") #For data polishing
library("parallel") # Parallel calculation.
library("tensorA") # tensor, Einstein summation
rstan_options(auto_write = TRUE)
options(mc.cores=parallel::detectCores())
source("grid_making_ver2.R")
winedata <- read.csv("winequality-red.csv",header=TRUE)
p1 <- 11; # the dimension x1 "chemical substances"
Rq <- 6; # "quality" min 3, max 8
n_base <- p1*(p1+1)/2 # the number of xi's for the full model
# Function "rv_model"
# Generataor of rv's from g(x;theta)
# Input: "theta", the parameter of g(x;theta)
# "seedchange", the seed for function "stan"
# Output: all random objects in the output of "stan"
rv_model <- function(theta,seedchange) {
data <- list(p1=p1,n_base=n_base,Rq=Rq,Rqn=Rqn,theta=theta,
beta_1=beta_1,beta_2=beta_2)
fit <- stan(file = "expo_stan_Db=1_ver3_beta.stan", data=data,
control = list(adapt_delta = 0.99,max_treedepth = 15),
chains=4, seed= seedchange, iter=10000)
return(rstan::extract(fit) )
}
# Function "eta_DdPsi"
# Input : "theta", the parameter of g(x;theta)
# "seedchange", the seed for function "stan"
# Ouput : List of two objects
# 1. "eta":simulated mean of xi's under g(x;theta)
# 2. "DdPsi":simulated covariance matrix of xi's under g(x;theta)
# Note that the "eta", "DdPsi" is calculated from the "theoretical" expectation
# over the distribution of x2, hence more accurate than those calculated
# "empirically" from the samples from "rv_model"
eta_DdPsi <- function(theta,seedchange) {
data <- list(p1=p1,n_base=n_base,Rq=Rq,Rqn=Rqn,theta=theta,
beta_1=beta_1,beta_2=beta_2)
fit <- stan(file = "expo_stan_Db=1_ver2_beta.stan", data=data,
control = list(adapt_delta = 0.99,max_treedepth = 15),
chains=4, seed= seedchange, iter=10000)
rx <- rstan::extract(fit)
eta <- apply(rx$xs,2,mean)
Ddpsi <- apply(rx$xsq,c(2,3),mean) - eta%*%t(eta)
return(list(eta = eta, Ddpsi = Ddpsi))
}
# grids for the calculation of moments or cumulants
# This takes several minutes.
grids <- grid_making(n_base)
M2 <- grids$M2 ; M3 <- grids$M3 ; M4 <- grids$M4
saveRDS(grids,"grids.rds")
# The sample moment calculation for the chosen variables
prodfunc<- function(data,x){
mean(apply(data[,x],1,prod))
}
# Function "theta_f"
# Input: theta_s, the theta for the submodel
# Output: the theta for the full model, i.e. the zero-interporated vector
theta_f <- function(theta_s,zsi){
for (i in 1:n_zsi){
if (zsi[i] < n_base){
theta_s <- c(theta_s[1:(zsi[i]-1)],0,theta_s[zsi[i]:length(theta_s)])
}else{
theta_s <- c(theta_s[1:(zsi[i]-1)],0)
}
}
return(theta_s)
}
seedchange <- 1234
# Spilt of the data into two sets
# 1. data_base: 100*(1-ratio)% of the winedata to be used for formulating
# the model
# 2. data_est: 100*ratio% of the winedata to be used for the estimation of
# the parameter of the exponential family
NN <- nrow(winedata)
ratio <- 0.5
set.seed(seedchange)
dt = sort(sample(NN, NN*ratio))
data_est <- winedata[dt,]
data_base <- winedata[-dt,]
N <- nrow(data_est)
### Model Forumlation ###
# 1-1. the constant to be used for normalizing x1
# 1-2. xi's used for the submodel
# 1-3. p.d.f of the reference measure
# Decide the constant "max_x1" used for normalizing x1
# the maximum of each x1 is multiplied by 2 so that the corresponding
# maximum for the "data_est" does not exceed it.
max_x1 <- apply(data_base[,-(p1+1)],2,max)*2
winedata_con <- as.matrix(data_base[,-(p1+1)])%*%diag(1/max_x1)
# Change "quality" so that the range 1:Rq
winedata_dis <- data_base[,(p1+1)]-2
# Merge of modified x1 and x2
data_base <- cbind(winedata_con, winedata_dis)
# Empirical Data of xi's for data_base
xi_emp_base <- as.data.frame(t(apply(data_base,1,xi)))
#Find high correlated xi's
rho <- 0.90
Corr_s <- cor(xi_emp_base)
Corr_s[lower.tri(Corr_s,diag = TRUE)] <- 0
# the index of the xi's that will be removed
# one of the pair whose correlation is over 0.98
zsi <- (which(abs(Corr_s) > rho, arr.ind=T))[,"col"]
zsi <- unique(zsi)
#zsi <- zsi[zsi <= n_base-p1] # if xi's depending on x2 are kept in the submodel
n_zsi <- length(zsi)
# the number of xi's in the submodel
n_base_s <- n_base - n_zsi
# gridvec3s, gridvec4s is the new grids for the submodel
index_s <- (1:n_base)[-zsi]
gridvec3s <- expand.grid(index_s,index_s,index_s)
gridvec4s <- expand.grid(index_s,index_s,index_s,index_s)
# Reference distribution for x2
# Let prob_i be the estimated probability P(x2=i)
# prob = c(p_1,...,p_Rq)
# To avoid zero probability, 1 is added to the frequency table
prob<- table(factor(data_base[,p1+1], levels=1:Rq))+1
minp <- min(log(prob))
# the log of probability mass function of x2 up to a constant
Rqn <- log(prob)-rep(minp,Rq)
# log(p.d.f) of the reference distribution of x1
# log of p.d.f. of beta(a,b) : (a-1)log(x)+(b-1)log(1-x)
# mean m=a/(a+b), variance v=(ab)/((a+b)^2*(a+b+1))
# ==> a = {m^2(1-m)\pm m(sqrt(m^2(1-m)^2-4v^2)}/(2v)
# b = a(1-m)/m
# the mean and variance of x1
m <- apply(data_base[,-(p1+1)],2,mean)
v <- apply(data_base[,-(p1+1)],2,var)
# parameters of the beta distribution
# for the reference distribution of x1
beta_1 <- m^2/v*(1-m)-m
beta_2 <- beta_1*(1-m)/m
### End of Model Formulation ###
### Estimation of theta ###
# Normalizing data_est by "max_x1"
data_est_con <- as.matrix(data_est[,-(p1+1)])%*%diag(1/max_x1)
# Change "quality" so that the range 1:Rq
data_est_dis <- data_est[,(p1+1)]-2
# Merge of modified x1 and x2
data_est <- cbind(data_est_con, data_est_dis)
# Empirical Data of xi's for data_est
xi_emp_est <- as.data.frame(t(apply(data_est,1,xi)))
# Sample mean of xi's in the submodel
eta0 <- apply(xi_emp_est,2,mean)
eta0_s <- eta0[-zsi]
# Sample covariance of xi's
Sigma <- cov(xi_emp_est)
Sigma_s <- Sigma[-zsi,-zsi]
## The solution of theta_* using "nleqslv"
# Procedure
# Step 1: Under the given theta(t), calculate
# eta(t), the mean of xi's, and DdPsi(t), the covariance matrix of xi's
# from the samples generated by MCMC.
# "eta_DdPsi" function with "expo_stan_Db=1_ver2_beta.stan" is used.
# Step 2: By Newton-Raphson method (together with another searching method),
# search the value of theta_* iteratively.
# theta(t+1) = theta(t)-(eta(t)-eta)%*%DdPsi(t)^(-1)
theta_z1 <- rep(0,n_base_s) # initial value of theta
result <- nleqslv(theta_z1,fn = function(x) {
eta_DdPsi(theta_f(x,zsi),seedchange)$eta[-zsi] - eta0_s},
jac = function(x) {eta_DdPsi(theta_f(x,zsi),seedchange)$Ddpsi[-zsi,-zsi]} ,
method = "Newton",
global = "cline",
xscalm = "auto",jacobian=TRUE,control=list(trace=1))
theta_star <- result$x # the solution of theta_*
saveRDS(result,"result.rds")
### End of estimation ###
### Risk Calculation ###
## N^{-1} order term ##
Psin <- ginv(result$jac) #just for the theta's in the submodel !
term0 <- sum(diag(Psin%*%Sigma_s))
# Sample from g(x;theta_*)
x_gstar <- rv_model(theta_f(theta_star,zsi),seedchange)
x_model <- cbind(x_gstar$x1,x_gstar$x2)
# Data of xi's from the model
xi_gstar <- as.data.frame(t(apply(x_model,1,xi)))
# Sample mean of xi's
eta1 <- apply(xi_gstar,2,mean)
# Sample covariance of xi's
DdPsi <- cov(xi_gstar)
# N^{-1} order term coeff. calculated in another way
term0a <- sum(diag(ginv(DdPsi[-zsi,-zsi])%*%Sigma_s))
# First-order term of ED #
FirN <- term0/(2*N)
## N^{-2}-order term calculation ##
# Higher order moments from the empirical data and the model g(x;eta_*)
detectCores()
cl1 <- makeCluster(detectCores())
clusterExport(cl1, varlist =
list('xi_gstar','xi_emp_est','prodfunc'))
moment2e <- parSapply(cl1,M2,function(x) {prodfunc(xi_emp_est,x)})
moment3e <- parSapply(cl1,M3,function(x) {prodfunc(xi_emp_est,x)})
moment2m <- parSapply(cl1,M2,function(x) {prodfunc(xi_gstar,x)})
moment3m <- parSapply(cl1,M3,function(x) {prodfunc(xi_gstar,x)})
#the most time-consuming, about 1 hr
moment4m <- parSapply(cl1,M4,function(x) {prodfunc(xi_gstar,x)})
stopCluster(cl1)
saveRDS(moment4m,"moment4m.rds")
# Higher order cumulants from the empirical data and the model g(x;eta_*)
cl2 <- makeCluster(24)
clusterExport(cl2, varlist=
list('a_v','M3','M4','eta0','eta1', 'moment2e', 'moment2m',
'moment3e','moment3m','moment4m'))
cumu3e <- parSapply(cl2, M3,
function(x){
x <- unlist(x)
moment3e[a_v(x)]-
eta0[x[1]]*moment2e[a_v(x[c(2,3)])]-
eta0[x[2]]*moment2e[a_v(x[c(1,3)])]-
eta0[x[3]]*moment2e[a_v(x[c(1,2)])]+
2*eta0[x[1]]*eta0[x[2]]*eta0[x[3]]
})
cumu3m <- parSapply(cl2, M3,
function(x){
x <- unlist(x)
moment3m[a_v(x)]-
eta1[x[1]]*moment2m[a_v(x[c(2,3)])]-
eta1[x[2]]*moment2m[a_v(x[c(1,3)])]-
eta1[x[3]]*moment2m[a_v(x[c(1,2)])]+
2*eta1[x[1]]*eta1[x[2]]*eta1[x[3]]
})
cumu4m <- parSapply(cl2, M4,
function(x){
x <- unlist(x)
moment4m[a_v(x)]-
moment3m[a_v(x[c(1,2,3)])]*eta1[x[4]]-
moment3m[a_v(x[c(1,2,4)])]*eta1[x[3]]-
moment3m[a_v(x[c(1,3,4)])]*eta1[x[2]]-
moment3m[a_v(x[c(2,3,4)])]*eta1[x[1]]-
moment2m[a_v(x[c(1,2)])]*moment2m[a_v(x[c(3,4)])]-
moment2m[a_v(x[c(1,3)])]*moment2m[a_v(x[c(2,4)])]-
moment2m[a_v(x[c(1,4)])]*moment2m[a_v(x[c(2,3)])]+
2*moment2m[a_v(x[c(1,2)])]*eta1[x[3]]*eta1[x[4]]+
2*moment2m[a_v(x[c(1,3)])]*eta1[x[2]]*eta1[x[4]]+
2*moment2m[a_v(x[c(1,4)])]*eta1[x[2]]*eta1[x[3]]+
2*moment2m[a_v(x[c(2,3)])]*eta1[x[1]]*eta1[x[4]]+
2*moment2m[a_v(x[c(2,4)])]*eta1[x[1]]*eta1[x[3]]+
2*moment2m[a_v(x[c(3,4)])]*eta1[x[1]]*eta1[x[2]]-
6*eta1[x[1]]*eta1[x[2]]*eta1[x[3]]*eta1[x[4]]
})
stopCluster(cl2)
# Change them to cover all the possible indexes, 1 <= i, j, k, (l)<= n_base_s
cl3 <- makeCluster(24)
clusterExport(cl3,varlist = list('cumu3e','cumu3m','cumu4m','a_v'))
# these cumulants are reduced for the subodel
cumu3ef <- parApply(cl3,gridvec3s,1,FUN = function (x) {
cumu3e[a_v(sort(x))]})
cumu3mf <- parApply(cl3,gridvec3s,1,FUN = function (x) {
cumu3m[a_v(sort(x))]})
# takes several minutes
cumu4mf <- parApply(cl3,gridvec4s,1,FUN = function (x) {
cumu4m[a_v(sort(x))]})
stopCluster(cl3)
# First term of the N^{-2}-order term
# 1. Change all objects into tensors
Psin_t1 <- as.tensor(Psin, dims=c(I=n_base_s,J=n_base_s))
Psin_t2 <- as.tensor(Psin, dims=c(K=n_base_s,L=n_base_s))
Psin_t3 <- as.tensor(Psin, dims=c(M=n_base_s,S=n_base_s))
cumu3ef_t <- to.tensor(cumu3ef,dims=c(I=n_base_s,K=n_base_s,M=n_base_s))
cumu3mf_t <- to.tensor(cumu3mf,dims=c(J=n_base_s,L=n_base_s,S=n_base_s))
# 2. Multiplication between tensors
# It is important to avoid outer product !
# Otherwise the size of the tensors easilly get over the memory.
term1 <- cumu3mf_t %e% Psin_t3 %e% Psin_t2 %e% Psin_t1 %e% cumu3ef_t
# Second term of the N^{-2}-order term
Psin_t1 <- as.tensor(Psin, dims=c(I=n_base_s,J=n_base_s))
Psin_t2 <- as.tensor(Psin, dims=c(K=n_base_s,L=n_base_s))
Psin_t3 <- as.tensor(Psin, dims=c(M=n_base_s,S=n_base_s))
Psin_t4 <- as.tensor(Psin, dims=c(O=n_base_s,P=n_base_s))
Psin_t5 <- as.tensor(Psin, dims=c(U=n_base_s,V=n_base_s))
Sigma_t1 <- as.tensor(Sigma_s, dims=c(J=n_base_s,L=n_base_s))
Sigma_t2 <- as.tensor(Sigma_s, dims=c(S=n_base_s,P=n_base_s))
Sigma_t3 <- as.tensor(Sigma_s, dims=c(J=n_base_s,S=n_base_s))
Sigma_t4 <- as.tensor(Sigma_s, dims=c(L=n_base_s,P=n_base_s))
Sigma_t5 <- as.tensor(Sigma_s, dims=c(J=n_base_s,P=n_base_s))
Sigma_t6 <- as.tensor(Sigma_s, dims=c(L=n_base_s,S=n_base_s))
cumu3mf_t1 <- to.tensor(cumu3mf,dims=c(I=n_base_s,K=n_base_s,U=n_base_s))
cumu3mf_t2 <- to.tensor(cumu3mf,dims=c(M=n_base_s,O=n_base_s,V=n_base_s))
term21 <- Sigma_t2 %e% Psin_t4 %e% Psin_t3 %e% cumu3mf_t2 %e%
Sigma_t1 %e% Psin_t5 %e% Psin_t2 %e% Psin_t1 %e% cumu3mf_t1
term22 <- Sigma_t4 %e% Psin_t4 %e% Psin_t3 %e% cumu3mf_t2 %e%
Sigma_t3 %e% Psin_t5 %e% Psin_t2 %e% Psin_t1 %e% cumu3mf_t1
term23 <- Sigma_t6 %e% Psin_t4 %e% Psin_t3 %e% cumu3mf_t2 %e%
Sigma_t5 %e% Psin_t5 %e% Psin_t2 %e% Psin_t1 %e% cumu3mf_t1
term2 <- term21+term22+term23
# Third term of the N^{-2}-order term
Psin_t1 <- as.tensor(Psin, dims=c(I=n_base_s,J=n_base_s))
Psin_t2 <- as.tensor(Psin, dims=c(K=n_base_s,L=n_base_s))
Psin_t3 <- as.tensor(Psin, dims=c(M=n_base_s,S=n_base_s))
Psin_t4 <- as.tensor(Psin, dims=c(O=n_base_s,P=n_base_s))
Sigma_t1 <- as.tensor(Sigma_s, dims=c(J=n_base_s,L=n_base_s))
Sigma_t2 <- as.tensor(Sigma_s, dims=c(S=n_base_s,P=n_base_s))
Sigma_t3 <- as.tensor(Sigma_s, dims=c(J=n_base_s,S=n_base_s))
Sigma_t4 <- as.tensor(Sigma_s, dims=c(L=n_base_s,P=n_base_s))
Sigma_t5 <- as.tensor(Sigma_s, dims=c(J=n_base_s,P=n_base_s))
Sigma_t6 <- as.tensor(Sigma_s, dims=c(L=n_base_s,S=n_base_s))
cumu4mf_t <- to.tensor(cumu4mf, dims=c(I=n_base_s,K=n_base_s,M=n_base_s,O=n_base_s))
term31 <- cumu4mf_t %e% Psin_t1 %e% Psin_t2 %e%
Psin_t3 %e% Psin_t4 %e% Sigma_t1 %e% Sigma_t2
term32 <- cumu4mf_t %e% Psin_t1 %e% Psin_t2 %e%
Psin_t3 %e% Psin_t4 %e% Sigma_t3 %e% Sigma_t4
term33 <- cumu4mf_t %e% Psin_t1 %e% Psin_t2 %e%
Psin_t3 %e% Psin_t4 %e% Sigma_t5 %e% Sigma_t6
term3 <- term31+term32+term33
## Second-order term ##
SecN <- (-8*term1 + 9*term2 -3*term3)/(24*N^2)
# The estimation risk in total
EstR <- FirN + SecN
cat("FirN=",FirN,"\n SecN=",SecN, "\n EstR=",EstR, "\n")
# Saving the results in "result_m"
result_m <- list(seedchange,ratio,N,rho,zsi,n_zsi,n_base_s, theta_star,eta1[-zsi],
term0,term1,term2,term3,FirN,SecN,EstR)
saveRDS(result_m,"result_m.rds")
### Code for the repetition with different "seedchange"s ###
# Summary for the repetition
risk_mean <- apply(result_m,2,mean)
risk_sd <- apply(result_m,2,sd)
result_m <- rbind(rbind(result_m,risk_mean),risk_sd)
# the colum names of the data set of the repetition result
theta_star_name <- c()
eta1_name <- c()
for (i in 1:n_base_s) {
theta_star_name <- append(theta_star_name,sprintf("theta_*_%2d",i))
eta1_name <- append(eta1_name, sprintf("eta1_%2d",i))
i <- i + 1
}
# Making of the data.frame for the repetition result
colnames(result_m) <- c(theta_star_name, eta1_name,
"term0","term1","term2","term3","FirN","SecN","EstR")
result_d <- as.data.frame(result_m)
write.csv(result_d, "result_d.csv")
##### Additional code for calssifyer #####
# Function "log_exp_pdf"
# Input: "x1": the continuous variable
# "x2": the discrete variable
# "Rqn": log p.m.f.of x2 up to a constant
# "theta": the parameter vector of the exponential distribution model
# Output: the non-constant part w.r.t. x2 of the log p.d.f. of x=(x1,x2)
log_exp_pdf <- function(x1, x2, theta, Rqn){
return (Rqn[x2] + theta_f(theta,zsi)%*%(xi(c(x1,x2))))
}
# Function "prpb_x2_given_x1"
# Input: "x1": the continuous variable
# "Rqn":log p.m.f.of x2
# "theta": the parameter vector of the exponential distribution model
# Output: the vector of the log p.d.f. for each value of x2 given x1
# up to a constant
prpb_x2_given_x1 <- function(x1, theta, Rqn){
n = length(Rqn)
y <- c()
for (i in 1:n){
y[i] = log_exp_pdf(x1,i,theta, Rqn);
}
return(y)
}
# Bayes discriminant function
bayes_dis <- function(x1, theta, Rqn){
pvec <- prpb_x2_given_x1(x1, theta, Rqn)
return(which(pvec == max(pvec))+2)
}
# Classify each individual in "data_est"
class_pre_est <- as.vector(
apply(data_est[,-(p1+1)],1,
function(x1) {
bayes_dis(x1, theta=theta_star, Rqn=Rqn)
}
)
)
classify_result_data_est <- table(class_pre_est, class_true_est = data_est[,(p1+1)]+2)
# Classify each individual in "data_base"
class_pre_base <- as.vector(
apply(data_base[,-(p1+1)],1,
function(x1) {
bayes_dis(x1, theta=theta_star, Rqn=Rqn)
}
)
)
classify_result_data_base <- table(class_pre_base, class_true_base = data_base[,(p1+1)]+2)
|
c0d742740a7aa66f56f771a7744bdbc99f2ddc2e
|
ff53eb4044c2006e7ccf4a5a92e666a7fdebfebc
|
/CCM/ccm_causality.R
|
52e8e6739f9afd2c6653936adca08448ad9dd96e
|
[] |
no_license
|
AhahaJade/netinf
|
d31b8a96782ca9dc6d1b998929f6074820351448
|
282fc2411d609523d0faa9e32b5564c8c72bc1e2
|
refs/heads/master
| 2022-11-05T04:01:19.701124
| 2020-06-26T17:08:42
| 2020-06-26T17:08:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,311
|
r
|
ccm_causality.R
|
library(rEDM)
library(lattice)
rgb.palette <- colorRampPalette(c("blue", "red"), space = "rgb")
#par(mfrow = c(2, 2))
# ANCHOVY SST DATA
data(sardine_anchovy_sst)
names <- c('anchovy', 'np_sst')
adj = get_ccm_means(sardine_anchovy_sst, 3, names)
print(adj)
# correlation coefficient threshold
thresh <- 0.1
mat <- apply(adj, c(1,2), max)
mat[mat < thresh] <- 0
mat[mat >= thresh] <- 1
levelplot(t(mat), main="Adjacency Matrix (anchovy, sst)",
xlab="", ylab="", ylim=c(length(names) + 0.5, 0.5),
col.regions=rgb.palette(120))
# TWO SPECIES MODEL
two_species <- two_species_data(c(0.1, 0.1, 0.1, 0.1, 0.1), 100)
names <- c('x', 'y')
adj = get_ccm_means(two_species, 3, names, lib_sizes=seq(10, 200, by = 10))
print(adj)
thresh <- 0.7
mat <- apply(adj, c(1,2), max)
mat[mat < thresh] <- 0
mat[mat >= thresh] <- 1
levelplot(t(mat), main="Adjacency Matrix Two Species",
xlab="", ylab="", ylim=c(length(names) + 0.5, 0.5),
col.regions=rgb.palette(120))
# FIVE SPECIES MODEL
five_species <- five_species_data(c(0.1, 0.1, 0.1, 0.1, 0.1), 100)
names <- c('y1', 'y2', 'y3', 'y4', 'y5')
adj = get_ccm_means(five_species, 3, names, lib_sizes=seq(10, 200, by = 10))
thresh <- 0.5
mat <- apply(adj, c(1,2), max)
mat[mat < thresh] <- 0
mat[mat >= thresh] <- 1
levelplot(t(mat), main="Adjacency Matrix Five Species",
xlab="", ylab="", ylim=c(length(names) + 0.5, 0.5),
col.regions=rgb.palette(120))
# NEAREST NEIGHBOR SPRING MODEL
# Number of Nodes
n = 3;
# Time Vector
time = 1:500
# Boundary Conditions
bc = "free"
# Initital Condition Functions
randpfn <- function(n) {
return(runif(n, 0, 1))
}
unifpfn <- function(n) {
return(0:1/n:(n-1)/n)
}
zerovfn <- function(n) {
return(rep(0, n))
}
constmfn <- function(n) {
return(rep(1, n))
}
constkfn <- function(n) {
return(rep(1, n))
}
nncoupled_model <- nncoupled_data(n, time, randpfn, zerovfn, constmfn, constkfn, bc=bc)
names <- paste("pos", 1:n, sep="")
adj = get_ccm_means(nncoupled_model, 3, names, lib_sizes=seq(10, 200, by = 10))
thresh <- 0.5
mat <- apply(adj, c(1,2), max)
#mat[mat < thresh] <- 0
#mat[mat >= thresh] <- 1
levelplot(t(mat), main="Adjacency Matrix NNCoupled",
xlab="", ylab="", ylim=c(length(names) + 0.5, 0.5),
col.regions=rgb.palette(120))
|
ac59095b98efde288654e61869d685d631e6597c
|
090ff155d4d2ab91ddabc7a84c5206c45f4de211
|
/Plot1.R
|
30374d3c385d76ee34f5cb0ff25c3fe705d26b1a
|
[] |
no_license
|
jcval94/Tesis
|
89db0e64bc51aa47e6c053d6eb0c8008fc66b168
|
cd9764811b6054c4f163aaafc45b80d971acb6ac
|
refs/heads/master
| 2021-07-12T04:39:28.303815
| 2020-08-09T05:30:43
| 2020-08-09T05:30:43
| 190,092,312
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 358
|
r
|
Plot1.R
|
library(tibble)
df <- data_frame(month=as.character(1:4), Freq=c(.19,.23,.31,.27))
p<-barplot(df$Freq, ylim=c(0,.5), names.arg = df$month, space=0.25, axes=F)
xval = seq(0, .5, .05)
axis(side = 2, at = xval, labels = FALSE, xpd=T)
axis(side = 2, at = xval, tick = FALSE, labels = xval, xpd=T)
text(p, df$Freq+.03, labels=df$Freq, xpd=TRUE)
ggtitle("P(X=x)")
|
df14a730647076c28cd23e8d5510488232269ad7
|
47cd7c4061392fb74f7d21d993a14f87a80fc7d2
|
/run_analysis.R
|
be948cacbf4048a9398967c251eacfb311f17aa0
|
[] |
no_license
|
gnarra/CourseraCleaningData
|
067f188557952113d06f96641fde9df5b62eaaa4
|
8d5ff1add26014612eef15081befbc2ccc3ca9df
|
refs/heads/master
| 2020-04-18T10:23:08.035737
| 2016-08-21T23:12:53
| 2016-08-21T23:12:53
| 66,224,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,150
|
r
|
run_analysis.R
|
# Set Working Directory
setwd("C:/Gopal/Coursera/CleaningData/UCI HAR Dataset")
# Read the 561 Column Names from features.txt
features <- read.table("features.txt")
# train/X_train.txt': Training set, train/y_train.txt': Training labels.
trainingSet <- read.table("train/X_train.txt", col.names = features[, 2])
trainingLabels <- read.table("train/y_train.txt", col.names="Label")
# test/X_test.txt': Test set, test/y_test.txt': Test labels.
testSet <- read.table("test/X_test.txt", col.names = features[, 2])
testLabels <- read.table("test/y_test.txt", col.names = "Label")
# train/subject_train.txt: Each row identifies the subject who performed the activity for each window sample. Its range is from 1 to 30.
trainingSubjects <- read.table("train/subject_train.txt", col.names = "Subjects")
# test/subject_test.txt
testSubjects <- read.table("test/subject_test.txt", col.names = "Subjects")
# Add the columns first for each set and then add rows to new set
fullDataSet <- rbind(cbind(trainingSubjects, trainingLabels, trainingSet),
cbind(testSubjects, testLabels, testSet))
# 2) Extracts only the measurements on the mean and standard deviation for each measurement.
# Get the Columns than have Mean or Std in Column Names
colIndexes <- grepl("mean|std", features[, 2]);
mergedColIndexes <- c(TRUE, TRUE, colIndexes)
meanData <- fullDataSet[,mergedColIndexes]
# 3) Uses descriptive activity names to name the activities in the data set
activityLabels <- read.table("activity_labels.txt", col.names = c("ActivityID", "ActivityDesc"))
meanData$Label <- activityLabels[meanData$Label, 2]
# 4) Appropriately labels the data set with descriptive variable names.
names(meanData) <- gsub("^t", "time", names(meanData))
names(meanData) <- gsub("^f", "frequency", names(meanData))
# 5) From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidyData <- aggregate(meanData, by = list(meanData[, "Subjects"], meanData[ , "Label"]), FUN=mean)
write.table(tidyData, file = "tidydata.txt", row.name=FALSE)
|
ba89c8ec97ce73e124bce80e86c290b70a709d18
|
3c59cc90a959aa4e8397cd1009045559209dfae9
|
/Session 6_Linear Regression/Linear_Regression_1.R
|
7a2f4fe980875a1372d2b9b48ea4086fc12c96b8
|
[] |
no_license
|
datamaniac03/HardikClass
|
fb12459271232fd9cedbc00db38d6c50358c0113
|
9e5a5fef0e21257ce81f36866c2c17cbd0a8f90e
|
refs/heads/master
| 2021-01-15T17:50:19.475053
| 2017-08-09T03:47:40
| 2017-08-09T03:47:40
| 99,760,103
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,893
|
r
|
Linear_Regression_1.R
|
hardik.data <- floor(runif(10,10,30))
print(hardik.data)
ajinkya.data<- findInterval(hardik.data,c(5,15,25,30))
print(ajinkya.data)
cut(hardik.data, breaks=c(5, 15, 25, 30), labels = c("Low","Mid", "HIGH"))
mydata<-read.csv("D:/Coaching/Business Intelligence/Course/Session 4/hsb2.csv")
print(mydata)
mydata$racel<-cut(mydata$race,breaks=c(0,1,2,3,4), labels=c("hispanic","asian","african-american","white"))
print(mydata$racel)
#Linear Regression in R:
#Assumptions in Linear Regression
#There are four principal assumptions which justify the use of linear regression models for purposes of inference or prediction:
# (i) linearity and additivity of the relationship between dependent and independent variables:
#(a) The expected value of dependent variable is a straight-line function of each independent variable, holding the others fixed.
#(b) The slope of that line does not depend on the values of the other variables.
#(c) The effects of different independent variables on the expected value of the dependent variable are additive.
# (ii) Statistical independence of the errors (in particular, no correlation between consecutive errors in the case of time series data)
# (iii) Homoscedasticity (constant variance) of the errors
# (a) versus time (in the case of time series data)
# (b) versus the predictions
# (c) versus any independent variable
#(iv) Normality of the error distribution.
#Why Linearity is important
Y<-c(1,4,9,16,25,36,49,64,81,100,121,144,169,196)
X<-seq(1:14)
print(Y)
print(X)
lm(Y[1:4]~X[1:4])
summary(lm(Y[1:4]~X[1:4]))
df<-data.frame(Y,X)
df$predY<-(5*df$X-5)
print(df)
df$predE<-((df$Y-df$predY)/df$Y)*100
print(df$predE)
#Why should we assume that the effects of different independent variables on the expected value of the dependent variable are additive?
#Again, this is a strong assumption. It implies that the marginal effect of one
#independent variable (i.e., its slope coefficient) does not depend on the
#current values of other independent variables. Why? Because it's conceivable
#that one independent variable could amplify the effect of another, or that its
#effect might vary systematically over time.
#Normal distribution of error: why errors of a linear model are independently and identically are assumed to be normally distributed?
#This assumption is often justified by appeal to the Central Limit Theorem of
#statistics, which states that the sum or average of a sufficiently large number
#of independent random variables--whatever their individual distributions--
#approaches a normal distribution. Much data in business and economics and
#engineering and the natural sciences is obtained by adding or averaging
#numerical measurements performed on many different persons or products or
#locations or time intervals. Insofar as the activities that generate the
#measurements may occur somewhat randomly and somewhat independently,
#we might expect the variations in the totals or averages to be somewhat
#normally distributed.
#Violation of variable interdependence- Durbin-Watson Test
Y<-c(1,4,9,16,25,36,49,64,81,100)
X<-seq(1:10)
#To do this test you need lmtest package
#install.packages('lmtest',dependencies = TRUE)
require(lmtest)
dat<-data.frame(X,Y)
dwtest(lm(dat$Y~dat$X),iterations = 15,exact = TRUE)
#Detection for Heteroscedasticity
#Violations of Homoscedasticity: The assumption of homoscedasticity is central to most of the linear models. Homoscedasticity describes a situation in which the error term (that is, the "noise" or random disturbance in the relationship between the independent variables and the dependent variable) is the same across all values of the independent variables
#The problem that heteroscedasticity presents for regression models is simple.
#A simple linear regression model tries to minimize residuals and in turn
#produce the smallest possible standard errors. By definition OLS regression
#gives equal weight to all observations, but when heteroscedasticity is present
#the cases with larger disturbances have more "pull" than other observations.
#The coefficients from OLS regression where heteroscedasticity is present are
#therefore inefficient but remain unbiased. In this case, weighted least
#squares regression would be more appropriate, as it down weights those
#observations with larger disturbances.
#Diagnoses: To detect heteroscedasticity look at a plot of residual verses predicted values or in case of time series data, a plot of residual vs. time. In our example the residual verses predicted values plot is as below for linear as well as non-linear relationship data:
#How to fix it: Try to apply transformation on dependent as well as independent variables. Example if in the above non-linear data we apply logarithmic transformation then the graph comes out to be as follows post regression. The graph below clearly shows a linear relationship between dependent variable (Y) and independent variable (X) post applying log transformation.
#Violation of Normality: Weak Assumption if you only want to minimize the mean square error
#Test for Normality - Anderson Darling Test
#One thing to note in AD test is that the null hypothesis stats that the distribution is normal
#Package - https://cran.r-project.org/web/packages/nortest/nortest.pdf
dat<-rnorm(100)
print(dat)
ad.test(dat)
#install.packages('nortest')
require(nortest)
dat2<-rexp(100)
ad.test(dat2)
#Other Example:
#https://rexplorations.wordpress.com/2015/08/11/normality-tests-in-r/
#Generating 10k points of data and arranging them into 100 columns
x<-rnorm(10000,10,1)
dim(x)<-c(100,100)
#Generating a simple normal quantile-quantile plot for the first column
#Generating a line for the qqplot
qqnorm(x[,1])
qqline (x[,1], col=2)
abline(col=3)
|
67fcc7403214de8c78444a8a0e48ada50c19495d
|
42aa74ccbd4f35997da7fbcceea1b903b3846c63
|
/R/NI.Dorf.calc1.R
|
960dc67d2ce914b5b82d0a1554c13c3b2db40b9d
|
[] |
no_license
|
cran/binGroup2
|
b88543f087ab3bd0a499b87144a1037e8c58a126
|
79be6e46ac7fea730f9c81557f673bc6f6026f67
|
refs/heads/master
| 2022-06-19T12:30:16.516766
| 2022-05-25T10:00:16
| 2022-05-25T10:00:16
| 254,030,147
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,920
|
r
|
NI.Dorf.calc1.R
|
# Start NI.Dorf.calc1() function
###################################################################
# Brianna Hitt - 12-06-19
# This function is the same as NI.Dorf(), but no longer finds the
# optimal testing configuration. It only calculates operating
# characteristics for a specific testing configuration.
NI.Dorf.calc1 <- function(p, Se, Sp, group.sz, a,
trace = TRUE, print.time = TRUE, ...) {
start.time <- proc.time()
I <- group.sz
# generate a probability vector for homogeneous population
p.vec <- rep(x = p[1], times = I)
order.for.p <- 1:I
# calculate descriptive measures for two-stage hierarchical testing
save.info <- hierarchical.desc2(p = p.vec[order.for.p],
se = Se, sp = Sp, I2 = NULL,
order.p = FALSE)
# extract ET, PSe, PSp and calculate the MAR function
ET <- save.info$ET
# for non-informative Dorfman (two-stage hierarchical) testing,
# all individuals have the same testing accuracy measures
check <- check.all.equal(save.info$individual.testerror,
which(colnames(save.info$individual.testerror) == "psp.vec"))
if (is.null(check)) {
ind.testerror <- get.unique.index(save.info$individual.testerror[a,],
which(colnames(save.info$individual.testerror) == "psp.vec"),
rowlabel = a)[,-1]
} else {
ind.testerror <- check[,-1]
}
colnames(ind.testerror) <- c("PSe", "PSP", "PPPV", "PNPV", "individuals")
group.testerror <- save.info$group.testerror
names(group.testerror) <- NULL
PSe <- group.testerror[1]
PSp <- group.testerror[2]
PPPV <- group.testerror[3]
PNPV <- group.testerror[4]
save.it <- c(p[1], I, ET, ET / I, PSe, PSp, PPPV, PNPV)
# put accuracy measures in a matrix for easier display of results
acc.ET <- matrix(data = save.it[5:8], nrow = 1, ncol = 4,
dimnames = list(NULL, c("PSe", "PSp", "PPPV", "PNPV")))
# create input accuracy value matrices for output display
Se.display <- matrix(data = Se, nrow = 1, ncol = 2,
dimnames = list(NULL, "Stage" = 1:2))
Sp.display <- matrix(data = Sp, nrow = 1, ncol = 2,
dimnames = list(NULL, "Stage" = 1:2))
# print time elapsed, if print.time == TRUE
if (print.time) {
time.it(start.time)
}
list("algorithm" = "Non-informative two-stage hierarchical testing",
"prob" = p[1], "Se" = Se.display, "Sp" = Sp.display,
"Config" = list("Stage1" = save.it[2]), "p.vec" = p.vec,
"ET" = save.it[3], "value" = save.it[4],
"Accuracy" = list("Individual" = ind.testerror, "Overall" = acc.ET))
}
###################################################################
|
c10be4bb42d256e40bcaa3e28f370b45c4ca6885
|
1f7b39f59c95b4741f9940a1556dab9062ad0783
|
/man/create_table.Rd
|
37e5b260d24a0ad71616d13e48706fbddeb308a5
|
[] |
no_license
|
Nardus/SCRCdataAPI
|
d755157e957ee1347b4d91de9d0e6ceaac2f9551
|
75159024829df574b13ec63ad994f19a82573c00
|
refs/heads/master
| 2022-10-13T10:00:46.332331
| 2020-06-09T18:07:17
| 2020-06-09T18:07:17
| 271,031,395
| 0
| 0
| null | 2020-06-09T14:52:44
| 2020-06-09T14:52:43
| null |
UTF-8
|
R
| false
| true
| 692
|
rd
|
create_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_table.R
\name{create_table}
\alias{create_table}
\title{create_table}
\usage{
create_table(h5filename, component, df, row_title, row_names, column_units)
}
\arguments{
\item{h5filename}{a \code{string} specifying the name of the hdf5 file}
\item{component}{a \code{string} specifying a location within the hdf5 file}
\item{df}{a \code{dataframe} containing the data}
\item{row_title}{a \code{string} descriptor of rownames}
\item{row_names}{a \code{vector} of rownames}
\item{column_units}{a \code{vector} comprising column units}
}
\description{
Function to populate hdf5 file with array type data.
}
|
82389405ebafee4337fdd7b99193122b3cbf5d88
|
0e9a5fab0cad29b979043d1168b15a6d87fdc0f2
|
/inst/cps_acs_processing/src/pre_shiny_app_processing.R
|
748263ce0232dc57b2ca9b837f6396c13e5d4b1a
|
[] |
no_license
|
economic-opportunity/eoq
|
509271e64c64a0ef912ab2f1ccfc83fdb1bfda47
|
a681aadaf55064623b6cb180ed11508a3e85972e
|
refs/heads/master
| 2023-04-01T06:17:28.786556
| 2021-03-22T00:31:53
| 2021-03-22T00:31:53
| 205,065,863
| 0
| 0
| null | 2021-03-07T17:59:44
| 2019-08-29T02:42:20
|
R
|
UTF-8
|
R
| false
| false
| 5,821
|
r
|
pre_shiny_app_processing.R
|
# libs --------------------------------------------------------------------
library(shiny)
library(tidyverse)
library(janitor)
library(shinythemes)
library(shinyWidgets)
library(plotly)
library(rjson)
library(pivotr)
library(eoq)
# load --------------------------------------------------------------------
# post processing to make acs and cps smaller
acs_dropbox_link <- "https://www.dropbox.com/s/sg2pjcbr0iyzzvw/ACS_Cleaned.zip?dl=1"
cps_dropbox_link <- "https://www.dropbox.com/s/zgqsb2ckw69putb/CPS_Cleaned.zip?dl=1"
bls_dropbox_link <- "https://www.dropbox.com/s/agt6mj16d52flhj/lau_unemp_max_month.csv?dl=1"
tmp_dir <- tempdir()
acs_tmp <- file.path(tmp_dir, "asc.csv.zip")
cps_tmp <- file.path(tmp_dir, "cps.csv.zip")
download.file(acs_dropbox_link, acs_tmp)
download.file(cps_dropbox_link, cps_tmp)
cps <- read_csv(cps_tmp) %>%
janitor::clean_names()
acs <- read_csv(acs_tmp) %>%
janitor::clean_names()
# exploration ---------------------------------------------------------------
acs_wage <- acs %>%
make_age_buckets(age) %>%
clean_race_ethnicity(racehispanic) %>%
clean_education(education) %>%
make_percentiles(totalwage, age_bucket, education, race_ethnicity, is_male) %>%
ungroup()
acs_employment <- acs %>%
make_age_buckets(age) %>%
clean_race_ethnicity(racehispanic) %>%
clean_education(education) %>%
calc_unemployment_rate(employmentstatus, age_bucket, education, race_ethnicity, is_male) %>%
ungroup()
cps_wage <- cps %>%
make_age_buckets(age) %>%
clean_race_ethnicity(racehispanic) %>%
clean_education(education) %>%
make_percentiles(hourlywage, age_bucket, education, race_ethnicity, is_male) %>%
ungroup()
cps_employment <- cps %>%
make_age_buckets(age) %>%
clean_race_ethnicity(racehispanic) %>%
clean_education(education) %>%
calc_unemployment_rate(employmentstatus, age_bucket, education, race_ethnicity, is_male) %>%
ungroup()
cps_fips_soc <- cps %>%
make_age_buckets(age) %>%
clean_race_ethnicity(racehispanic) %>%
clean_education(education) %>%
group_by(countyfips, soc_group_description) %>%
summarize(wage = mean(hourlywage, na.rm = TRUE),
unemployment_rate = sum(employmentstatus == "unemployed", na.rm = TRUE) / sum(employmentstatus %in% c("unemployed", "employed"), na.rm = TRUE),
wage_n = sum(!is.na(hourlywage)),
emp_n = sum(!is.na(employmentstatus))) %>%
ungroup()
cps_fips_naics <- cps %>%
make_age_buckets(age) %>%
clean_race_ethnicity(racehispanic) %>%
clean_education(education) %>%
group_by(countyfips, naics_2digit_label) %>%
summarize(wage = mean(hourlywage, na.rm = TRUE),
unemployment_rate = sum(employmentstatus == "unemployed", na.rm = TRUE) / sum(employmentstatus %in% c("unemployed", "employed"), na.rm = TRUE),
wage_n = sum(!is.na(hourlywage)),
emp_n = sum(!is.na(employmentstatus))) %>%
ungroup()
cps_fips_soc_naics <- cps %>%
make_age_buckets(age) %>%
clean_race_ethnicity(racehispanic) %>%
clean_education(education) %>%
group_by(countyfips, soc_group_description, naics_2digit_label) %>%
summarize(wage = mean(hourlywage, na.rm = TRUE),
unemployment_rate = sum(employmentstatus == "unemployed", na.rm = TRUE) / sum(employmentstatus %in% c("unemployed", "employed"), na.rm = TRUE),
wage_n = sum(!is.na(hourlywage)),
emp_n = sum(!is.na(employmentstatus))) %>%
ungroup()
# actual important ones ---------------------------------------------------
acs_wage <- acs %>%
make_age_buckets(age) %>%
clean_race_ethnicity(racehispanic) %>%
clean_education(education) %>%
clean_sex(is_male) %>%
group_by(age_bucket, education, race_ethnicity, sex) %>%
mutate(percentile = ntile(totalwage, 100)) %>%
pivotr::cube(groups = c(age_bucket, education, race_ethnicity, sex, percentile),
mean = mean(totalwage, na.rm = TRUE),
n = n(),
.totals = "All") %>%
ungroup()
acs_unemployment <- acs %>%
make_age_buckets(age) %>%
clean_race_ethnicity(racehispanic) %>%
clean_education(education) %>%
clean_sex(is_male) %>%
pivotr::cube(groups = c(age_bucket, education, race_ethnicity, sex),
unemp_rate = sum(employmentstatus == 1, na.rm = TRUE) / sum(employmentstatus %in% c(1,2), na.rm = TRUE),
n = n(),
.totals = "All") %>%
ungroup()
cps_hours <- cps %>%
make_age_buckets(age) %>%
clean_race_ethnicity(racehispanic) %>%
clean_education(education) %>%
clean_sex(is_male) %>%
pivotr::cube(groups = c(age_bucket, education, race_ethnicity, sex),
p25 = quantile(hoursperweek, .25, na.rm = TRUE),
p75 = quantile(hoursperweek, .75, na.rm = TRUE),
p99 = quantile(hoursperweek, .99, na.rm = TRUE),
.totals = "All") %>%
ungroup()
fips_soc_naics <- acs %>%
make_age_buckets(age) %>%
clean_education(education) %>%
pivotr::cube(groups = c(age_bucket, education, countyfips),
wage = mean(totalwage, na.rm = TRUE),
unemp_rate = sum(employmentstatus == 1, na.rm = TRUE) / sum(employmentstatus %in% c(1,2), na.rm = TRUE),
n = n(),
.totals = "All") %>%
ungroup()
# write -------------------------------------------------------------------
dropbox_data_filepath <- "~/Dropbox/Economic Opportunity Project/Data/Comparison to Peers/Outputs"
acs_wage_filepath <- file.path(dropbox_data_filepath, "acs_wage.csv")
acs_unemployment_filepath <- file.path(dropbox_data_filepath, "acs_unemployment.csv")
cps_hours_filepath <- file.path(dropbox_data_filepath, "cps_hours.csv")
write_csv(acs_wage, acs_wage_filepath)
write_csv(acs_unemployment, acs_unemployment_filepath)
write_csv(cps_hours, cps_hours_filepath)
|
f0b4d9805687fe07842a3d83384701cd7061f8e1
|
090147049b162664113190e628313626b626214f
|
/ques.R
|
31c265cd67716b6c0931b17752e1cf913dba6c26
|
[] |
no_license
|
a7420174/bsms222_121_kim
|
4e930585d6497e0a0259cb4ed6d5907a4147cf77
|
c42b21d74e30204ca8ba459a903785fea6b0e1e6
|
refs/heads/master
| 2023-01-08T20:34:57.853161
| 2020-11-16T01:22:58
| 2020-11-16T01:22:58
| 292,191,952
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,185
|
r
|
ques.R
|
startd <- function(d){
str_extract(d, "\\d+")
}
startp <- function(p){
str_extract(p, "\\d+")
}
d1 <- d[d$Inheritence %in% c("DeNovo", "Inherited") & d$Effect == "Missense",]
dna <- as.numeric(startd(d1$c.DNA))
pro <- as.numeric(startp(d1$p.Protein))
plot(dna, pro)
table(d$Effect)
max(as.numeric(d$SeizureOnsetDays[d$Effect == "Frameshift"]), na.rm = TRUE)
max(as.numeric(d$SeizureOnsetDays[d$Effect == "Missense"]), na.rm = TRUE)
max(as.numeric(d$SeizureOnsetDays[d$Effect == "Nonsense"]), na.rm = TRUE)
d %>% group_by(Effect) %>%
summarise(max = max(as.numeric(gsub('[^0-9]','',SeizureOnsetDays)), na.rm = TRUE))
d1 <- d[d$Effect == "Missense",]
cDNA <- as.numeric(gsub('[^0-9]','',d1$c.DNA))
protein <- as.numeric(gsub('[^0-9]','',d1$p.Protein))
plot(cDNA,protein)
## Plot the relationship between seizure onset days and age at assessment.
S_Day <- as.numeric(str_extract(d$SeizureOnsetDays, '[0-9]+'))
Age_at_Ass <- ifelse(str_detect(d$PatientAgeAtAssessment, 'm'),
as.numeric(str_extract(d$PatientAgeAtAssessment, "[0-9]+"))/12,
as.numeric(str_extract(d$PatientAgeAtAssessment, "[0-9]+")))
plot(S_Day,Age_at_Ass, log = "xy")
|
bf15be006e0cc82427b27f3645cdb1972f9de241
|
7e392f3775c687f1184c59ad7c5f5bb5742785a7
|
/Ridge_Lasso.R
|
9326a002365c5d3fdffaaf657ac708ecf5927a52
|
[] |
no_license
|
JIanying-Liang/STSCI4740-Project
|
ebf2d17b7ce14e66020da4dd191aacb4f6f25757
|
a05894474da8585347fe5672dddb739831994224
|
refs/heads/main
| 2023-01-28T09:40:19.194276
| 2020-12-15T07:52:42
| 2020-12-15T07:52:42
| 316,571,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,475
|
r
|
Ridge_Lasso.R
|
cancer_data = read.csv('D:/AmireuXFaye/Cornell/STSCI4740/cancer_data.csv')
cancer_data<-cancer_data[,-1]
View(cancer_data)
cancer_data$Level = as.factor(cancer_data$Level)
# Training set
set.seed(1)
train.index <- sample(1:nrow(cancer_data),0.8*nrow(cancer_data))
cancer_train <- as.matrix(cancer_data[train.index, 1:23])
cancer_te <- as.matrix(cancer_data[-train.index, 1:23])
level_train <- as.matrix(cancer_data[train.index, 24])
level_te <- as.matrix(cancer_data[-train.index, 24])
library(glmnet)
# Ridge
set.seed(1)
cv.out=cv.glmnet(cancer_train,level_train,alpha=0, family = "multinomial")
ridge.bestlam=cv.out$lambda.min
ridge.mod=glmnet(cancer_train,level_train,alpha=0,lambda=ridge.bestlam, family = "multinomial")
# ridge.coef = coef(ridge.mod)[,1]
pred.ridge = predict(ridge.mod, s=ridge.bestlam, newx = cancer_te, type="response")
pred.level = colnames(pred.ridge)[apply(pred.ridge, 1, which.max)]
mse.ridge = mean((pred.level==level_te)^2)
mse.ridge # = 0.955
# Lasso
set.seed(1)
cv.out=cv.glmnet(cancer_train,level_train, type.measure = "class", alpha=1, family = "multinomial")
lasso.bestlam = cv.out$lambda.min
lasso.mod = glmnet(cancer_train,level_train, type.measure = "class", alpha=1,lambda=lasso.bestlam, family = "multinomial")
pred.lasso = predict(lasso.mod, s=lasso.bestlam, newx = cancer_te, type = "response")
pred.level = colnames(pred.lasso)[apply(pred.lasso, 1, which.max)]
mse.lasso = mean((pred.level == level_te)^2)
mse.lasso # = 1
|
7a7c753062c36b0083e4904bbf00c048464d4715
|
48c23d3bec5c33d46917bb0aa001607733b82ec5
|
/final_visuals.R
|
74b6e45c9d8f3ebd74fecd8cec4f96d8281cdcee
|
[] |
no_license
|
nienkelegemaate/Fork-Me-Harder
|
72fd84f719b385311da31f2e276342746933ae75
|
53062349127d7ebeb032dfcafe9004eef2e3f298
|
refs/heads/main
| 2023-02-21T06:54:02.288278
| 2021-01-22T16:14:44
| 2021-01-22T16:14:44
| 330,630,407
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,445
|
r
|
final_visuals.R
|
# Preamble: Load libraries and read in DataFrame
library(tidyverse)
library(ggthemes)
df = read_csv2('gender_df.csv')
# Mutate Columns to indicate 0 (Absence) or 1 (Presence) for each metadata
# attribute of interest and remove rows for which NA values are present
df1 <- df %>%
mutate(Alma_Mater=ifelse(Alma_Mater=='None',0,1),
Education=ifelse(Education=='None',0,1),
Occupation=ifelse(Occupation=='None',0,1),
Profession=ifelse(Profession=='None',0,1),
Net_Worth=ifelse(Net_Worth=='None',0,1),
Known_For=ifelse(Known_For=='None',0,1),
Relation=ifelse(Relation=='None',0,1),
Relative=ifelse(Relative=='None',0,1),
Spouse=ifelse(Spouse=='None',0,1),
Children=ifelse(Children=='None',0,1),
Parent=ifelse(Parent=='None',0,1)) %>%
mutate(Edu_Group=ifelse(Alma_Mater==1|Education==1,1,0), # Mutates new columns for aggregating education, occupation, and family data
Occ_Group=ifelse(Occupation==1|Profession==1,1,0),
Family=ifelse(Parent==1|Children==1|Spouse==1|Relative==1|Relation==1,1,0)) %>%
drop_na()
# Computes Number of Male and Female Entries, and Proportions
gender_num <- group_by(df1, Gender) %>% summarise(Number = n())
# Entries: Males = 534967, Females = 96257
# Proportion: Males = 85%, Females = 0.15%
# Computes relative prevalence of attributes of interest across all (cleaned)
# DBpedia entries
df_ungrouped <- df1 %>%
summarise(Perc_Education = sum(Edu_Group)/n(),
Perc_Occupation = sum(Occ_Group)/n(),
Perc_NetWorth = sum(Net_Worth)/n(),
Perc_KnownFor = sum(Known_For)/n(),
Perc_Family = sum(Family)/n()) %>%
pivot_longer(c(1, 2, 3, 4, 5),
names_to = "Labels",
values_to = "Percentages")
# Plots Ungrouped Attribute Prevalence
ggplot(data = df_ungrouped) +
aes(x = Labels, y = Percentages) +
geom_bar(fill = '#666666', position = "dodge", stat = "identity") +
theme_clean() +
xlab("Metadata Attributes") +
ylab("Entries Containing Attribute (%)") +
scale_x_discrete(labels=c("Education", "Family", "Known For", "Net Worth", "Occupation")) +
scale_y_continuous(limits = c(0, 0.2), n.breaks = 6, labels = scales::percent) +
labs(title = "Relative Prevalence of Metadata Attributes in Wikipedia Biographies") +
geom_hline(yintercept=0.05, color="red") +
annotate("text", x=3.5, y=0.06, label="Inclusion Threshold", size=3)
ggsave('bargraph_ungrouped.pdf')
# Computes relative prevalence of (cleaned) attributes of interest grouped by gender
df_grouped <- df1 %>%
group_by(Gender) %>%
summarise(Perc_Education = sum(Edu_Group)/n(),
Perc_Occupation = sum(Occ_Group)/n(),
Perc_Family = sum(Family)/n()) %>%
pivot_longer(c(2, 3, 4),
names_to = "Labels",
values_to = "Percentages")
# Computes relative prevalence of Family-related attributes by gender
df_fam_grouped <- df1 %>%
group_by(Gender) %>%
summarise(Perc_Relation = sum(Relation)/n(),
Perc_Relative = sum(Relative)/n(),
Perc_Spouse = sum(Spouse)/n(),
Perc_Children = sum(Children)/n(),
Perc_Parent = sum(Parent)/n()) %>%
pivot_longer(c(2, 3, 4,5,6),
names_to = "Labels",
values_to = "Percentages")
# Plots Grouped Bar Graphs for Relative Prevalence of Metadata Attributes by Gender
ggplot(data = df_grouped) +
aes(fill = Gender, x = Labels, y = Percentages) +
geom_bar(position = "dodge", stat = "identity", ) +
theme_clean() +
xlab("Attributes") +
ylab("Entries Containing Attribute (% by Gender)") +
scale_x_discrete(labels=c("Education", "Family", "Occupation")) +
scale_y_continuous(labels = scales::percent) +
labs(title = "Relative Prevalence of Metadata Attributes by Gender")
ggsave('bargraph_grouped.pdf')
# Plots Grouped Bargraphs for Relative Prevalence of Family-Related Metadata Attributes by Gender
ggplot(data = df_fam_grouped) +
aes(fill = Gender, x = Labels, y = Percentages) +
geom_bar(position = "dodge", stat = "identity", ) +
theme_clean() +
xlab("Attributes") +
ylab("Entries Containing Attribute (% by Gender)") +
scale_x_discrete(labels=c("Children", "Parent", "Relation","Relative","Spouse")) +
scale_y_continuous(labels = scales::percent) +
labs(title = "Relative Prevalence of Family-Related Metadata Attributes by Gender")
ggsave('bargraph_fam_grouped.pdf')
# Total DBpedia Entries: 1,517,815
# Cleaned DBpedia Entries (excl. Fictional Characters & Entries w/o birthDate/birthYear): 975,235
# Total Entries Merged Dataset: 631,258
# Plots Normalized Frequency of Male vs Female Articles by Birth Year
birth_year_df <- read_csv2('birth_year.csv') %>%
mutate(Birth_Year = as.numeric(Birth_Year)) %>%
drop_na() %>%
filter(10 <= Birth_Year & Birth_Year <= 2014) %>%
drop_na() %>%
pivot_wider(names_from = Gender, values_from = Birth_Year)
ggplot(data = birth_year_df) +
geom_histogram(aes(x=MALE, y = stat(count / sum(count)), fill = 'Male'), alpha=0.6, binwidth = 20) +
geom_histogram(aes(x=FEMALE, y = stat(count / sum(count)), fill = 'Female'), alpha=0.6, binwidth = 20) +
theme_clean () +
xlab("Birth Year") +
ylab("Normalized Frequency") +
scale_x_continuous(limits = c(1500, 2016)) +
labs(title = "Distribution of Male vs Female Articles by Birth Year",
fill="Gender")
ggsave('histogram_gender_birthyear.pdf')
|
a6957d0effc7a2312cf3b7d07bef2c757aeaef12
|
41ad52fe9ff9c3efe3d65e2da9b09fba88e0094e
|
/R/utils-regressions.R
|
d29e7e7761815f74da553fe976b27b474dfc44f2
|
[
"MIT"
] |
permissive
|
AdrienLeGuillou/algUtils
|
31bdda3f7fb70a04e649c0c7312b0ade89cde951
|
a90a7e73e2291019ffeb6a799a110db288d4d351
|
refs/heads/master
| 2020-08-08T17:45:15.778315
| 2020-08-06T16:38:17
| 2020-08-06T16:38:17
| 213,881,173
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,547
|
r
|
utils-regressions.R
|
pre_reg <- function(df, fmla) {
dat <- model.frame(fmla, df)
n_NA <- nrow(df) - nrow(dat)
message(n_NA, " values removed due to missingness")
dat
}
kable_reg <- function(df) {
kable(df, format = "html") %>%
kable_styling(bootstrap_options = "striped")
}
#' @export
alg_reg_logi <- function(df, fmla, accuracy = 0.001) {
dat <- pre_reg(df, fmla)
mod <- glm(family = binomial, fmla, data = dat)
frmt_est <- scales::label_number(accuracy)
mod %>%
broom::tidy() %>%
mutate(
IC_low = frmt_est(exp(estimate - 1.96 * std.error)),
IC_hig = frmt_est(exp(estimate + 1.96 * std.error)),
OR = frmt_est(exp(estimate)),
p.value = frmt_pvalue(p.value)
) %>%
select(term, OR, IC_low, IC_hig, p.value) %>%
filter(term != "(Intercept)") %>%
kable_reg()
}
#' @export
alg_reg_lm <- function(df, fmla, accuracy = 0.001) {
dat <- pre_reg(df, fmla)
mod <- lm(fmla, data = dat)
frmt_est <- scales::label_number(accuracy)
mod %>%
broom::tidy() %>%
mutate(
IC_low = frmt_est(estimate - 1.96 * std.error),
IC_hig = frmt_est(estimate + 1.96 * std.error),
estimate = frmt_est(estimate),
p.value = frmt_pvalue(p.value)
) %>%
select(term, estimate, IC_low, IC_hig, p.value) %>%
filter(term != "(Intercept)") %>%
kable_reg()
}
#' @export
alg_reg_auto <- function(df, fmla, accuracy = 0.001) {
y <- as.character(fmla[[2]])
if (is.numeric(df[[y]])) {
alg_reg_lm(df, fmla, accuracy)
} else {
alg_reg_logi(df, fmla, accuracy)
}
}
|
1358f7c5e93e005a9e70e89819f436fa712e41b8
|
8902a1139209246adc0c2a7f6c50f01318b36e99
|
/studies/prefSamplingNew.R
|
534d839eb21b7999c5a02ba2fd68919ccea53e08
|
[] |
no_license
|
brianconroy/dataInt
|
d948f7e476303f070b3ba061ee6329af1d077a5d
|
d4b261577b9aec781b2810d215f04f77e7176a5c
|
refs/heads/master
| 2021-05-13T11:34:23.702692
| 2019-10-31T22:36:18
| 2019-10-31T22:36:18
| 117,129,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,490
|
r
|
prefSamplingNew.R
|
library(plyr)
library(R.utils)
library(ggplot2)
library(gridExtra)
sourceDirectory('Documents/research/dataInt/R/')
#########################
# Tunes the preferential
# sampling Bayesian model
# Updates locational parameters
# based on the joint likelihood,
# not merely the locational
# component
#########################
#######
# Setup
#######
#### Worldclim data
wc <- readWC()
ca <- getState('california')
caWin <- as.owin(ca)
caWc <- getStateWC(ca, wc)
#### Discretize the study region
simRegion <- discretizeSimRegion(caWin, caWc, factor=2)
W <- simRegion$W
caWc.disc <- simRegion$raster
#### Simulate survey locations
beta.samp <- c(-1.5, 1)
loc.disc <- caWc.disc[[c(6)]]
locs <- simBernoulliLoc(loc.disc, beta.samp, seed=42)
sum(locs$status)
#### Disease covariate surface
cov.disc <- caWc.disc[[c(12)]]
#### Visualize surfaces
pal1 <- colorRampPalette(c("blue","red"))
pal2 <- colorRampPalette(c("blue","green"))
# raw covariates
par(mfrow=c(1,2))
plot(cov.disc, main='A)', col=pal1(8))
plot(loc.disc, main='C)', col=pal2(8))
# sampling intensity
par(mfrow=c(1,1))
prob.rast <- loc.disc
prob.rast[!is.na(values(loc.disc))] <- locs$probs
plot(prob.rast, col=pal2(8))
points(locs$coords, pch=16)
#### True disease parameter values
beta.case <- c(2, 2)
#### Iterate over alpha
results.glm <- list()
counter <- 1
for (a in c(0, 0.15, 0.5, 2, 10, 20)){
M <- 500
samples.1 <- array(NA, c(M, 2))
samples.2 <- array(NA, c(M, 2 + length(names(loc.disc))))
samples.loc <- array(NA, c(M, length(names(loc.disc))))
for (i in 1:M){
data.a <- simLocCond(cov.disc, loc.disc, beta.case, beta.samp, a)
locs.x.sub <- data.a$loc$x.scaled[as.logical(data.a$loc$status)]
mod.1 <- glm(data.a$conditional$y ~ data.a$conditional$x.standardised - 1, family='poisson')
mod.2 <- glm(data.a$conditional$y ~ data.a$conditional$x.standardised + locs.x.sub - 1, family='poisson')
mod.loc <- glm(data.a$loc$status ~ data.a$loc$x.scaled-1, family='binomial')
samples.1[i,] <- mod.1$coefficients
samples.2[i,] <- mod.2$coefficients
samples.loc[i,] <- mod.loc$coefficients
}
results.1 <- summarizeSamps(samples.1, beta.case)
results.1$model <- 'glm'
results.1$a <- a
results.glm[[counter]] <- results.1
counter <- counter + 1
results.2 <- summarizeSamps(samples.2, beta.case)
results.2$model <- 'glm location'
results.2$a <- a
results.glm[[counter]] <- results.2
counter <- counter + 1
}
df.glm <- ldply(results.glm, data.frame)
write.table(df.glm[c('a', 'model', 'mu.b0', 'sd.b0', 'pbias.b0',
'mu.b1', 'sd.b1', 'pbias.b1')],
file='Documents/research/dataInt/pS_iterate_a_pos.csv', sep=',', row.names=F)
df.glm.1 <- df.glm[df.glm$model == 'glm',]
df.glm.2 <- df.glm[df.glm$model == 'glm location', ]
reformat <- function(df){
df.glm.b0 <- df[,c('a', 'mu.b0', 'sd.b0', 'pbias.b0')]
df.glm.b1 <- df[,c('a', 'mu.b1', 'sd.b1', 'pbias.b1')]
df.glm.b0$parameter <- 'beta0.cond'
df.glm.b1$parameter <- 'beta1.cond'
names(df.glm.b0) <- c('a', 'mu', 'sd', 'pbias', 'parameter')
names(df.glm.b1) <- names(df.glm.b0)
df.glm.new <- rbind(df.glm.b0, df.glm.b1)
return(df.glm.new)
}
df.glm.new1 <- reformat(df.glm.1)
df.glm.new2 <- reformat(df.glm.2)
p1 <- ggplot(data=df.glm.new1, aes(x=a, y=df.glm.new1$pbias, group=parameter))+
geom_line(aes(color=parameter))+
geom_point(aes(color=parameter))+
labs(x ='alpha')+
labs(y='percent bias')+
ggtitle('C)')
p2 <- ggplot(data=df.glm.new2, aes(x=a, y=df.glm.new2$pbias, group=parameter))+
geom_line(aes(color=parameter))+
geom_point(aes(color=parameter))+
labs(x ='alpha')+
labs(y='percent bias')+
ggtitle('D)')
grid.arrange(p1, p2, ncol=2)
#### chosen alpha
alpha <- 3
#### Simulate counts given locations
count.data <- simConditional(cov.disc, locs, beta.case, beta.samp, alpha, seed=42)
data <- list(conditional=count.data, loc=locs)
par(mfrow=c(1,1))
plot(loc.disc)
points(locs$coords, cex=count.data$y/40)
plot(cov.disc)
points(locs$coords, cex=count.data$y/35)
hist(count.data$y)
mean(count.data$y)
##############################
## Preferential Sampling Model
##############################
output <- prefSampleNew(
data, n.sample=500000, burnin=20000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
truevals <- list(
beta0.loc=beta.samp[1],
beta1.loc=beta.samp[2],
beta0=beta.case[1],
beta1=beta.case[2],
alpha=alpha
)
print(output$accept)
par(mfrow=c(2,3))
viewOutput(output, type='preferential.alpha', truevals, view.hist=FALSE)
prefSummary <- summarize(output, truevals, dic=FALSE)
df.ps <- ldply(prefSummary, data.frame)
df.ps$model <- 'PS'
df.ps <- df.ps[c('model', 'parameter', 'posterior.mean', 'posterior.sd', 'percbias')]
names(df.ps) <- c('model', 'parameter', 'estimate', 'sd', 'pbias')
###################################
# Compare against model which
# updates locational parameters
# only from the location likelihood
###################################
output.loc <- prefSample(
data, n.sample=500000, burnin=20000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
print(output.loc$accept)
par(mfrow=c(2,2))
viewOutput(output.loc, type='preferential.alpha', truevals, view.hist=FALSE)
prefSummaryLoc <- summarize(output.loc, truevals, dic=FALSE)
df.ps.loc <- ldply(prefSummaryLoc, data.frame)
df.ps.loc$model <- 'PS loc'
df.ps.loc <- df.ps.loc[c('model', 'parameter', 'posterior.mean', 'posterior.sd', 'percbias')]
names(df.ps.loc) <- c('model', 'parameter', 'estimate', 'sd', 'pbias')
df.comp <- rbind(df.ps, df.ps.loc)
df.comp <- df.comp[with(df.comp, order(parameter)),]
write.table(df.comp, file='Documents/research/dataInt/pS_summary_comp1.csv', sep=',', row.names=F)
######################
## Iterate models over
## various alpha
######################
summaries <- data.frame()
for (a in c(1, 2, 4, 6, 8)){
print(a)
count.data <- simConditional(cov.disc, locs, beta.case, beta.samp, a)
data <- list(conditional=count.data, loc=locs)
output.a <- prefSampleNew(
data, n.sample=300000, burnin=10000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
output.a.loc <- prefSample(
data, n.sample=300000, burnin=10000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
truevals <- list(
beta0.loc=beta.samp[1],
beta1.loc=beta.samp[2],
beta0=beta.case[1],
beta1=beta.case[2],
alpha=a
)
viewOutput(output.a, type='preferential.alpha', truevals)
prefSummary <- summarize(output.a, truevals, dic=FALSE)
df.ps <- ldply(prefSummary, data.frame)
df.ps$alpha <- a
df.ps$model <- 'PS'
summaries <- rbind(summaries, df.ps)
prefSummaryLoc <- summarize(output.a.loc, truevals, dic=FALSE)
df.ps.loc <- ldply(prefSummaryLoc, data.frame)
df.ps.loc$alpha <- a
df.ps.loc$model <- 'PS loc'
summaries <- rbind(summaries, df.ps.loc)
}
par(mfrow=c(2,3))
for (p in unique(summaries$parameter)){
summaries.p <- summaries[summaries$parameter == p,]
summaries.p.joint <- summaries.p[summaries.p$model == 'PS',]
summaries.p.loc <- summaries.p[summaries.p$model == 'PS loc',]
uy <- max(1.1 * max(summaries.p.joint$percbias),
1.1 * max(summaries.p.loc$percbias),
5)
ly <- min(0,
1.1 * min(summaries.p.joint$percbias),
1.1 * min(summaries.p.loc$percbias))
plot(x=summaries.p.joint$alpha, y=summaries.p.joint$percbias, ylim=c(ly, uy), main=p)
lines(x=summaries.p.joint$alpha, y=summaries.p.joint$percbias, type='l')
points(x=summaries.p.loc$alpha, y=summaries.p.loc$percbias, ylim=c(ly, uy), main=p)
lines(x=summaries.p.loc$alpha, y=summaries.p.loc$percbias, type='l', col='2')
abline(h=0, lty=2)
}
#################################
# Now consider a shared covariate
#################################
#### Simulate survey locations
beta.samp <- c(-3, 1, 1)
loc.disc <- caWc.disc[[c(6, 15)]]
locs <- simBernoulliLoc(loc.disc, beta.samp, seed=42)
sum(locs$status)
#### Disease covariate surface
beta.case <- c(1, 1, 2)
cov.disc <- caWc.disc[[c(12, 15)]]
#### chosen alpha
alpha <- 2
#### Simulate counts given locations
count.data <- simConditional(cov.disc, locs, beta.case, beta.samp, alpha, seed=42)
data <- list(conditional=count.data, loc=locs)
output <- prefSampleNew(
data, n.sample=500000, burnin=20000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
truevals <- list(
beta0.loc=beta.samp[1],
beta1.loc=beta.samp[2],
beta2.loc=beta.samp[3],
beta0=beta.case[1],
beta1=beta.case[2],
beta2=beta.case[3],
alpha=alpha
)
print(output$accept)
par(mfrow=c(2,4))
viewOutput(output, type='preferential.alpha', truevals, view.hist=FALSE)
prefSummary <- summarize(output, truevals, dic=FALSE)
df.ps <- ldply(prefSummary, data.frame)
df.ps$model <- 'PS'
df.ps <- df.ps[c('model', 'parameter', 'posterior.mean', 'posterior.sd', 'percbias')]
names(df.ps) <- c('model', 'parameter', 'estimate', 'sd', 'pbias')
#### Compare against original
output.loc <- prefSample(
data, n.sample=500000, burnin=20000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
prefSummaryLoc <- summarize(output.loc, truevals, dic=FALSE)
df.ps.loc <- ldply(prefSummaryLoc, data.frame)
df.ps.loc$model <- 'PS loc'
df.ps.loc <- df.ps.loc[c('model', 'parameter', 'posterior.mean', 'posterior.sd', 'percbias')]
names(df.ps.loc) <- c('model', 'parameter', 'estimate', 'sd', 'pbias')
df.comp <- rbind(df.ps, df.ps.loc)
df.comp <- df.comp[with(df.comp, order(parameter)),]
######################
## Iterate models over
## various alpha
######################
summaries <- data.frame()
for (a in c(1, 2, 4, 6, 8)){
print(a)
count.data <- simConditional(cov.disc, locs, beta.case, beta.samp, a)
data <- list(conditional=count.data, loc=locs)
output.a <- prefSampleNew(
data, n.sample=300000, burnin=10000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
output.a.loc <- prefSample(
data, n.sample=300000, burnin=10000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
truevals <- list(
beta0.loc=beta.samp[1],
beta1.loc=beta.samp[2],
beta0=beta.case[1],
beta1=beta.case[2],
alpha=a
)
viewOutput(output.a, type='preferential.alpha', truevals)
prefSummary <- summarize(output.a, truevals, dic=FALSE)
df.ps <- ldply(prefSummary, data.frame)
df.ps$alpha <- a
df.ps$model <- 'PS'
summaries <- rbind(summaries, df.ps)
prefSummaryLoc <- summarize(output.a.loc, truevals, dic=FALSE)
df.ps.loc <- ldply(prefSummaryLoc, data.frame)
df.ps.loc$alpha <- a
df.ps.loc$model <- 'PS loc'
summaries <- rbind(summaries, df.ps.loc)
}
par(mfrow=c(2,3))
for (p in unique(summaries$parameter)){
summaries.p <- summaries[summaries$parameter == p,]
summaries.p.joint <- summaries.p[summaries.p$model == 'PS',]
summaries.p.loc <- summaries.p[summaries.p$model == 'PS loc',]
uy <- max(1.1 * max(summaries.p.joint$percbias),
1.1 * max(summaries.p.loc$percbias),
5)
ly <- min(0,
1.1 * min(summaries.p.joint$percbias),
1.1 * min(summaries.p.loc$percbias))
plot(x=summaries.p.joint$alpha, y=summaries.p.joint$percbias, ylim=c(ly, uy), main=p)
lines(x=summaries.p.joint$alpha, y=summaries.p.joint$percbias, type='l')
points(x=summaries.p.loc$alpha, y=summaries.p.loc$percbias, ylim=c(ly, uy), main=p)
lines(x=summaries.p.loc$alpha, y=summaries.p.loc$percbias, type='l', col='2')
abline(h=0, lty=2)
}
########################
## Iterate models over
## locational intercepts
## (i.e. number of surveys)
########################
a <- 2
summaries <- data.frame()
for (beta.loc0 in c(-4, -3, -2, -1, 1)){
print(beta.loc0)
beta.samp <- c(beta.loc0, 1)
locs <- simBernoulliLoc(loc.disc, beta.samp, seed=42)
nsamp <- sum(locs$status)
count.data <- simConditional(cov.disc, locs, beta.case, beta.samp, a)
data <- list(conditional=count.data, loc=locs)
output.a <- prefSampleNew(
data, n.sample=300000, burnin=10000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
output.a.loc <- prefSample(
data, n.sample=300000, burnin=10000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
truevals <- list(
beta0.loc=beta.samp[1],
beta1.loc=beta.samp[2],
beta0=beta.case[1],
beta1=beta.case[2],
alpha=a
)
prefSummary <- summarize(output.a, truevals, dic=FALSE)
df.ps <- ldply(prefSummary, data.frame)
df.ps$n <- nsamp
df.ps$model <- 'PS'
summaries <- rbind(summaries, df.ps)
prefSummaryLoc <- summarize(output.a.loc, truevals, dic=FALSE)
df.ps.loc <- ldply(prefSummaryLoc, data.frame)
df.ps.loc$n <- nsamp
df.ps.loc$model <- 'PS loc'
summaries <- rbind(summaries, df.ps.loc)
}
par(mfrow=c(2,3))
for (p in unique(summaries$parameter)){
summaries.p <- summaries[summaries$parameter == p,]
summaries.p.joint <- summaries.p[summaries.p$model == 'PS',]
summaries.p.loc <- summaries.p[summaries.p$model == 'PS loc',]
uy <- max(1.1 * max(summaries.p.joint$percbias),
1.1 * max(summaries.p.loc$percbias),
5)
ly <- min(0,
1.1 * min(summaries.p.joint$percbias),
1.1 * min(summaries.p.loc$percbias))
plot(x=summaries.p.joint$n, y=summaries.p.joint$percbias, ylim=c(ly, uy), main=p)
lines(x=summaries.p.joint$n, y=summaries.p.joint$percbias, type='l')
points(x=summaries.p.loc$n, y=summaries.p.loc$percbias, ylim=c(ly, uy), main=p)
lines(x=summaries.p.loc$n, y=summaries.p.loc$percbias, type='l', col='2')
abline(h=0, lty=2)
}
########################
## and the same for the
## shared covariate
## scenario
########################
#### Simulate survey locations
beta.samp <- c(-3, 1, 1)
loc.disc <- caWc.disc[[c(6, 15)]]
#### Disease covariate surface
beta.case <- c(1, 1, 2)
cov.disc <- caWc.disc[[c(12, 15)]]
a <- 2
summaries <- data.frame()
for (beta.loc0 in c(-4, -3, -2, -1, 1)){
print(beta.loc0)
beta.samp <- c(beta.loc0, 1, 1)
locs <- simBernoulliLoc(loc.disc, beta.samp, seed=42)
nsamp <- sum(locs$status)
count.data <- simConditional(cov.disc, locs, beta.case, beta.samp, a)
data <- list(conditional=count.data, loc=locs)
output.a <- prefSampleNew(
data, n.sample=300000, burnin=10000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
output.a.loc <- prefSample(
data, n.sample=300000, burnin=10000, thin=1,
proposal.sd.beta.c=0.01, proposal.sd.beta.l=0.05,
proposal.sd.alpha=0.05, self.tune=TRUE
)
truevals <- list(
beta0.loc=beta.samp[1],
beta1.loc=beta.samp[2],
beta0=beta.case[1],
beta1=beta.case[2],
beta2=beta.case[3],
alpha=a
)
prefSummary <- summarize(output.a, truevals, dic=FALSE)
df.ps <- ldply(prefSummary, data.frame)
df.ps$n <- nsamp
df.ps$model <- 'PS'
summaries <- rbind(summaries, df.ps)
prefSummaryLoc <- summarize(output.a.loc, truevals, dic=FALSE)
df.ps.loc <- ldply(prefSummaryLoc, data.frame)
df.ps.loc$n <- nsamp
df.ps.loc$model <- 'PS loc'
summaries <- rbind(summaries, df.ps.loc)
}
par(mfrow=c(2,3))
for (p in unique(summaries$parameter)){
summaries.p <- summaries[summaries$parameter == p,]
summaries.p.joint <- summaries.p[summaries.p$model == 'PS',]
summaries.p.loc <- summaries.p[summaries.p$model == 'PS loc',]
uy <- max(1.1 * max(summaries.p.joint$percbias),
1.1 * max(summaries.p.loc$percbias),
5)
ly <- min(0,
1.1 * min(summaries.p.joint$percbias),
1.1 * min(summaries.p.loc$percbias))
plot(x=summaries.p.joint$n, y=summaries.p.joint$percbias, ylim=c(ly, uy), main=p)
lines(x=summaries.p.joint$n, y=summaries.p.joint$percbias, type='l')
points(x=summaries.p.loc$n, y=summaries.p.loc$percbias, ylim=c(ly, uy), main=p)
lines(x=summaries.p.loc$n, y=summaries.p.loc$percbias, type='l', col='2')
abline(h=0, lty=2)
}
|
9f12b71fcd3f5f2f686097c13db13aa8a1231247
|
8d0118a2259b00b5988373f43cebb4c28e498118
|
/scripts/DataExp.R
|
2bc6fef08e1ec7c77d19c730178ed221c349543c
|
[] |
no_license
|
FarMar/ForestSoils
|
b8be387d1481498fa5b7e1ebb3d43877358ce37a
|
10c4f6a7513e365574f807efbef4e60885706bc6
|
refs/heads/main
| 2023-06-24T02:42:17.966607
| 2021-07-30T11:50:15
| 2021-07-30T11:50:15
| 317,782,768
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64,540
|
r
|
DataExp.R
|
#####################################################################################################
#### Forest soils data exploration ###################
#### mark.farrell@csiro.au +61 8 8303 8664 18/03/2021 ################################
#####################################################################################################
#### Set working directory ####
setwd("/Users/markfarrell/OneDrive - CSIRO/Data/ForestSoils")
#### Packages ####
install.packages("PerformanceAnalytics")
install.packages("corrplot")
install.packages("RColorBrewer")
install.packages("plotrix")
library(tidyverse)
library(janitor)
library(PerformanceAnalytics)
library(corrplot)
library(RColorBrewer)
library(plotrix)
library(lubridate)
library(ggpmisc)
library(vegan)
library(ape)
library(RVAideMemoire)
library(BiodiversityR)
library(ggbluebadge)
library(magrittr)
#### Import data ####
OL_cor <- read_csv("data/processed/ChemAll_adm_OLrem.csv")
OL_cor <- OL_cor %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos), as.factor)) %>%
mutate(Date = dmy(Date))
str(OL_cor)
t1_summary <- read_csv("data/processed/summary.csv")
t1_summary <- t1_summary %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos), as.factor))
str(t1_summary)
t1_summary <- t1_summary %>%
relocate(where(is.character))
plfa <- read_csv("data/working/MasterFieldDataFC_NSW - PLFAs.csv")
plfa <- plfa %>%
mutate(Date = dmy(Date)) %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date) %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos, "Sampling Period"), as.factor))
plfa <- plfa %>%
mutate(`Sampling Period` = fct_relevel(`Sampling Period`, #remember the back-ticks (would probably have solved factor palaver too)
"Autumn 2019",
"Winter 2019",
"At flooding",
"3 months post flood",
"11 months post flood"
))
str(plfa)
OLP_cor <- read_csv("data/processed/ChemAll_adm_OLremPLFA.csv")
OLP_cor <- OLP_cor %>%
mutate(Date = dmy(Date)) %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date) %>%
relocate(PlotPos, .after = Plot) %>%
mutate(across(c(CombID, UniqueID, PrelimID, Transect, Plot, Inun, PlotPos, "Sampling Period"), as.factor))
str(OLP_cor)
#### Initial facet plot for proteolysis ####
facetlabs <- c("Transect 100",
"Transect 101",
"Transect 102",
"Transect 103",
"Transect 104",
"Transect 105",
"Transect 106",
"Transect 107",
"Transect 108",
"Transect 109")
names(facetlabs) <- c("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")
ggplot(data = OL_cor, aes(x = Date, y = Proteolysis, colour = PlotPos)) +
geom_point(aes(size = Moisture)) +
geom_line() +
scale_colour_manual(values = brewer.pal(n = 4, name = "BrBG")) + # (values = c("blue", "dark green", "brown", "dark brown")) +
scale_size(range = c(0, 6)) +
facet_wrap( ~ Transect, ncol = 2, scales='free', labeller = labeller(
Transect = facetlabs
)) +
#scale_y_continuous(limits=c(0,16)) +
theme_classic() +
theme(strip.background = element_blank(),
axis.title.x=element_blank())
#### Looping ####
# limit columns to just the factors required for the plot and the response variables from all five time points
trim <- OL_cor %>% select(UniqueID, Date, Transect, Plot, PlotPos, NDVI, VH, VV, Wet, Moisture, pHw, pHc, EC, AvailP,
DOC, DTN, NO3, NH4, FAA, Proteolysis, AAMin_k1, AAMin_k2, AAMin_a, AAMin_b, DON, MBC,
MBN, MicY, MicCN)
str(trim)
#Names for response and explanatory vars
#https://aosmith.rbind.io/2018/08/20/automating-exploratory-plots/
response = names(trim)[6:29]
expl = names(trim)[1:10]
response = set_names(response)
response
expl = set_names(expl)
expl
exp.fun = function(x, y, z1, z2, z3) {
ggplot(data = trim, aes(x = .data[[x]], y = .data[[y]], colour = .data[[z2]])) +
geom_point(aes(size = .data[[z3]])) +
geom_line() +
scale_colour_manual(values = brewer.pal(n = 4, name = "BrBG")) +
scale_size(range = c(0, 6)) +
facet_wrap( ~ .data[[z1]], ncol = 2, scales='free') + # labeller won't work with the .data for some reason
#scale_y_continuous(limits=c(0,16)) +
theme_classic() +
theme(strip.background = element_blank(),
axis.title.x=element_blank())
}
exp.fun("Date", "Proteolysis", "Transect", "Plot", "Moisture")
exp_plots = map(response, ~exp.fun("Date", .x, "Transect", "Plot", "Moisture") )
pdf("outputs/all_scatterplots.pdf")
exp_plots
dev.off()
#### summary plots ####
my.formula <- y ~ x
ggplot(data = t1_summary) +
geom_point(aes(x = RTHeight, y = Proteolysis_mean, colour = Transect), size = 4) +
stat_smooth(aes(x = RTHeight, y = Proteolysis_mean, colour = Transect), method = lm, se = FALSE, formula = my.formula, linetype = "longdash") +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
stat_smooth(aes(x = RTHeight, y = Proteolysis_mean), method = lm, formula = my.formula, colour = "black", size = 2) +
stat_poly_eq(formula = my.formula,
aes(x = RTHeight, y = Proteolysis_mean, label = paste(..eq.label.., ..rr.label.., sep = "~~~")),
parse = TRUE) +
theme_classic() +
theme(strip.background = element_blank())
response2 = names(t1_summary)[15:164]
expl2 = names(t1_summary)[1:14]
response2 = set_names(response2)
response2
expl2 = set_names(expl2)
expl2
exp.fun2 = function(x, y, z1) {
ggplot(data = t1_summary) +
geom_point(aes(x = .data[[x]], y = .data[[y]], colour = .data[[z1]]), size = 4) +
stat_smooth(aes(x = .data[[x]], y = .data[[y]], colour = .data[[z1]]), method = lm, formula = my.formula, se = FALSE, linetype = "longdash") +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
stat_smooth(aes(x = .data[[x]], y = .data[[y]]), method = lm, formula = my.formula, colour = "black", size = 2) +
stat_poly_eq(formula = my.formula,
aes(x = .data[[x]], y = .data[[y]], label = paste(..eq.label.., ..rr.label.., sep = "~~~")),
parse = TRUE) +
theme_classic() +
theme(strip.background = element_blank())
}
exp.fun2("RTHeight", "Proteolysis_mean", "Transect")
exp_plots2 = map(response2, ~exp.fun2("RTHeight", .x, "Transect") )
pdf("outputs/all_summaries.pdf")
exp_plots2
dev.off()
#### Create date grouping column ####
OL_cor <- OL_cor %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date)
OL_cor$`Sampling Period` <- as.factor(OL_cor$`Sampling Period`)
str(OL_cor)
levels(OL_cor$`Sampling Period`)
OL_cor <- OL_cor %>%
mutate(`Sampling Period` = fct_relevel(`Sampling Period`, #remember the back-ticks (would probably have solved factor palaver too)
"Autumn 2019",
"Winter 2019",
"At flooding",
"3 months post flood",
"11 months post flood"
))
#### Data selection ####
## Temporal
temporal <- OL_cor %>%
select(UniqueID, Date, `Sampling Period`, Transect, Plot, PlotPos, Easting, Northing, Height, RHeight, RTHeight, Inun,
NDVI, VH, VV, Wet, Moisture, pHc, EC, AvailP,
DOC, DTN, NO3, NH4, FAA, Proteolysis, AAMin_k1, DON, MBC,
MBN, MicY, MicCN)
## Biogeochem
bgc_mean <- t1_summary %>%
select(UniqueID, Transect, Plot, PlotPos, Easting, Northing, Height, RHeight, RTHeight, Inun,
Clay, CEC, WHC, BD0_30, NDVI_mean, Wet_mean, Moisture_mean, pHc_mean, EC_mean, AvailP_mean, CN_mean, Vuln_mean,
d13C_mean, d15N_mean, DOC_mean, NO3_mean, NH4_mean, FAA_mean, Proteolysis_mean,
AAMin_k1_mean, DON_mean, MBC_mean, MBN_mean, MicY_mean)
##Temporal + PLFA
temporalP <- OLP_cor %>%
select(UniqueID, Date, `Sampling Period`, Transect, Plot, PlotPos, Easting, Northing, Height, RHeight, RTHeight, Inun,
NDVI, VH, VV, Wet, Moisture, pHc, EC, AvailP,
DOC, DTN, NO3, NH4, FAA, Proteolysis, AAMin_k1, DON, MBC,
MBN, MicY, MicCN, TotalPLFA, F_B, Gp_Gn, Act_Gp)
#### MIR ####
# MIR import
mir <- read_csv("data/working/MasterFieldDataFC_NSW - MIR_raw.csv")
cols_condense(mir)
dim(mir)
mir <- mir %>%
group_by(Transect) %>%
mutate(PlotPos = dense_rank(desc(RTHeight))) %>%
ungroup() %>%
relocate(PlotPos, .after = Plot) %>%
mutate("Sampling Period" = case_when(
Date >= as_date("2019-03-25") & Date <= as_date("2019-03-28") ~ "Autumn 2019",
Date >= as_date("2019-07-29") & Date <= as_date("2019-07-31") ~ "Winter 2019",
Date >= as_date("2019-11-04") & Date <= as_date("2019-11-06") ~ "At flooding",
Date >= as_date("2020-02-03") & Date <= as_date("2020-02-05") ~ "3 months post flood",
Date >= as_date("2020-10-13") & Date <= as_date("2020-10-15") ~ "11 months post flood"
)
) %>%
relocate("Sampling Period", .after = Date)
mir$`Sampling Period` <- as.factor(mir$`Sampling Period`)
str(mir)
levels(mir$`Sampling Period`)
mir <- mir %>%
mutate(`Sampling Period` = fct_relevel(`Sampling Period`, #remember the back-ticks (would probably have solved factor palaver too)
"Autumn 2019",
"Winter 2019",
"At flooding",
"3 months post flood",
"11 months post flood"
))
# initial check plot
spec <- mir %>%
select(2, 27:1997)
waves <- seq(7999.27979, 401.121063, by = -3.8569)
colnames(spec[,2:1972]) <- waves
matplot(x = waves,
y = t(spec[2:1972]),
ylim = c(0, 3.5),
type = "l",
lty = 1,
main = "Raw spectra",
xlab = "Wavenumber (cm-1)",
ylab = "Absorbance",
col = rep(palette(), each = 3)
)
# Interpolation
mirinterp <- spec
mirinterp1 <- new("hyperSpec", # makes the hyperspec object
spc = mirinterp[, grep('[[:digit:]]', colnames(mirinterp))],
wavelength = as.numeric(colnames(mirinterp)[grep ('[[:digit:]]', colnames(mirinterp))]),
label = list(.wavelength = "Wavenumber",
spc = "Intensity"))
mirinterp3 <- hyperSpec::spc.loess(mirinterp1, c(seq(6000, 600, -4)))
# plot(mirinterp3, "spc", wl.reverse = T, col = rep(palette(), each = 3))
output <- mirinterp3[[]]
waves_l <- seq(6000, 600, by = -4)
colnames(output) <- waves_l
ID <- as.data.frame(mir$UniqueID)
final <- cbind(ID, output) #This is now the re-sampled df. Still needs baselining.
matplot(x = waves_l, y = t(final[,2:1352]), ylim=c(0,3), type = "l", lty = 1,
main = "Absorbance - 600 to 6000 & reample with resolution of 4", xlab = "Wavelength (nm)",
ylab = "Absorbance", col = rep(palette(), each = 3))
# baseline offset
spoffs2 <- function (spectra)
{
if (missing(spectra)) {
stop("No spectral data provided")
}
if (spectra[1, 1] < spectra[1, dim(spectra)[2]]) {
spectra <- t(apply(spectra, 1, rev))
}
s <- matrix(nrow = dim(spectra)[1], ncol = dim(spectra)[2])
for (i in 1:dim(spectra)[1]) {
s[i, ] <- spectra[i, ] - min(spectra[i, ])
}
output <- rbind(spectra[1, ], s)
output <- output[-1,]
}
spec_a_bc_d <- spoffs2(final[,2:1352])
dim(spec_a_bc_d)
head(spec_a_bc_d)
waves_ss <- seq(600, 6000, by=4)
matplot(x = waves_ss, y = t(spec_a_bc_d), ylim=c(0,2), xlim=rev(c(600, 6000)), type = "l", lty = 1,
main = "Absorbance - baseline corrected", xlab = expression("Wavenumber" ~ (cm^{-1})),
ylab = "Absorbance", col = rep(palette(), each = 3))
finalb <- cbind(ID, spec_a_bc_d) %>% #This is now the baselined and re-sampled df.
rename(UniqueID = "mir$UniqueID")
# combine data
mir_meta <- temporal %>%
select(UniqueID, Date, `Sampling Period`, Transect, Plot, PlotPos, Easting, Northing, Height, RHeight, RTHeight, Inun, Moisture)
mir_proc <- left_join(mir_meta, finalb, by = "UniqueID")
## Multivariate Exploration and Analysis
## MIR
# Prep
tmir <- mir_proc %>%
mutate(across(c(14:1364), ~((.+10)^(1/4))))
z.fn <- function(x) {
(x-mean(x))/sd(x)
}
stmir <- tmir %>%
mutate(across(c(14:1364), ~z.fn(.)))
fmir <- stmir %>%
select(1:13)
dmir <- stmir %>%
select(14:1363)
distmir <- vegdist(dmir, method = "manhattan", na.rm = TRUE)
pmir <- pcoa(distmir)
pmir$values$Relative_eig[1:10]
barplot(pmir$values$Relative_eig[1:10])
mir_points <- bind_cols(fmir, (as.data.frame(pmir$vectors)))
# Plot
ggplot(mir_points) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "PCoA Axis 1; 81.0%",
y = "PCoA Axis 2; 7.9%")
# Permanova
set.seed(1983)
perm_mir <- adonis2(distmir~Transect*`Sampling Period`, data = stmir, permutations = 9999, method = "manhattan")
perm_mir #strong impact of transect, weak of sampling time
permpt_mir <- pairwise.perm.manova(distmir, stmir$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_mir
permpd_mir <- pairwise.perm.manova(distmir, stmir$`Sampling Period`, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpd_mir #sniff of significance for last sampling vs 1st three samplings
perm_mirh <- adonis2(distmir~Transect*RTHeight, data = stmir, permutations = 9999, method = "manhattan")
perm_mirh #strong height interaction
# CAP by transect
stmir <- as.data.frame(stmir)
cap_mirt <- CAPdiscrim(distmir~Transect, data = stmir, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 999)
round(cap_mirt$F/sum(cap_mirt$F), digits=3)
barplot(cap_mirt$F/sum(cap_mirt$F))
cap_mirt_points <- bind_cols((as.data.frame(cap_mirt$x)), fmir)
glimpse(cap_mirt_points)
ggplot(cap_mirt_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "CAP Axis 1; 41.2%",
y = "CAP Axis 2; 35.3%")
# CAP + spider
mir_cent <- aggregate(cbind(LD1, LD2) ~ Transect, data = cap_mirt_points, FUN = mean)
mir_segs <- merge(cap_mirt_points, setNames(cent, c('Transect', 'oLD1', 'oLD2')), by = 'Transect', sort = FALSE)
ggplot(cap_mirt_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 3, alpha = .7) +
geom_segment(data = mir_segs, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Transect), alpha = .7, size = .25) +
geom_point(data = mir_cent, mapping = aes(x = LD1, y = LD2, colour = Transect), size = 5) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
labs(
x = "CAP Axis 1; 41.2%",
y = "CAP Axis 2; 35.3%")
#### BGC ####
#pre-prep - PCA of total emlements to reduce dimenstions
tot_elms <- t1_summary %>%
select(47:66) %>%
select(!c(As, B, Cd, Mo, Sb, Se))
chart.Correlation(tot_elms)
ttot_elms <- tot_elms %>%
mutate(P = log1p(P),
Na = log1p(Na),
Mg = log1p(Mg),
K = log1p(K),
Co = log1p(Co),
Ca = log1p(Ca))
chart.Correlation(ttot_elms)
pca_elms <- princomp(ttot_elms, cor = TRUE, scores = TRUE)
biplot(pca_elms, choices = c(1,2))
summary(pca_elms) #PC1 = 59.2%, PC2 = 11.7%
scores_elms <- as.data.frame(pca_elms[["scores"]]) %>%
select(1:2)
#prep
bgc_mean <- cbind(bgc_mean, scores_elms)
bgc_cor <- select(bgc_mean, 11:36)
chart.Correlation(bgc_cor, histogram=TRUE, pch=19)
tbgc_mean <- bgc_mean %>%
mutate(MBN_mean = log1p(MBN_mean),
NH4_mean = log1p(NH4_mean),
AvailP_mean = log1p(AvailP_mean),
EC_mean = log1p(EC_mean),
pHc_mean = log1p(pHc_mean),
BD0_30 = log1p(BD0_30))
stbgc_mean <- tbgc_mean %>%
mutate(across(c(11:36), ~z.fn(.)))
fbgc <- stbgc_mean %>%
select(1:10)
dbgc <- stbgc_mean %>%
select(11:36)
# PCoA
distbgc <- vegdist(dbgc, method = "euclidean", na.rm = TRUE)
pbgc <- pcoa(distbgc)
pbgc$values$Relative_eig[1:10]
barplot(pbgc$values$Relative_eig[1:10])
bgc_points <- bind_cols(fbgc, (as.data.frame(pbgc$vectors)))
compute.arrows = function (given_pcoa, orig_df) {
orig_df = orig_df #can be changed to select columns of interest only
n <- nrow(orig_df)
points.stand <- scale(given_pcoa$vectors)
S <- cov(orig_df, points.stand) #compute covariance of variables with all axes
pos_eigen = given_pcoa$values$Eigenvalues[seq(ncol(S))] #select only +ve eigenvalues
U <- S %*% diag((pos_eigen/(n - 1))^(-0.5)) #Standardise value of covariance
colnames(U) <- colnames(given_pcoa$vectors) #Get column names
given_pcoa$U <- U #Add values of covariates inside object
return(given_pcoa)
}
pbgc = compute.arrows(pbgc, dbgc)
pbgc_arrows_df <- as.data.frame(pbgc$U*10) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
# Plot
ggplot(bgc_points) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = PlotPos), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pbgc_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pbgc_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 25.6%",
y = "PCoA Axis 2; 16.2%")
# Permanova
set.seed(1983)
perm_bgc <- adonis2(distbgc~Transect+PlotPos, data = stbgc_mean, permutations = 9999, method = "euclidean")
perm_bgc #strong impact of transect and plot
permpt_bgc <- pairwise.perm.manova(distbgc, stbgc_mean$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_bgc #.098 is lowest possible - several pairwise comps have this
permpp_bgc <- pairwise.perm.manova(distbgc, stbgc_mean$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_bgc #4 is sig diff from 1&2. 3 borderline diff from 1&2. 1 borderline diff from 2
# CAP by transect
stbgc_mean <- as.data.frame(stbgc_mean)
cap_bgct <- CAPdiscrim(distbgc~Transect, data = stbgc_mean, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 999)
cap_bgct <- add.spec.scores(cap_bgct, dbgc, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_bgct$F/sum(cap_bgct$F), digits=3)
barplot(cap_bgct$F/sum(cap_bgct$F))
cap_bgct_points <- bind_cols((as.data.frame(cap_bgct$x)), fbgc)
glimpse(cap_bgct_points)
cap_bgct_arrows <- as.data.frame(cap_bgct$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_bgct_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgct_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgct_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 56.7%",
y = "CAP Axis 2; 23.0%")
# CAP by transect + spider
bgc_centt <- aggregate(cbind(LD1, LD2) ~ Transect, data = cap_bgct_points, FUN = mean)
bgc_segst <- merge(cap_bgct_points, setNames(bgc_centt, c('Transect', 'oLD1', 'oLD2')), by = 'Transect', sort = FALSE)
ggplot(cap_bgct_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 3, alpha = .6) +
geom_segment(data = bgc_segst, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Transect), alpha = .7, size = .25) +
geom_point(data = bgc_centt, mapping = aes(x = LD1, y = LD2, colour = Transect), size = 5) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgct_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgct_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 56.7%",
y = "CAP Axis 2; 23.0%")
# CAP by plotpos
stbgc_mean <- as.data.frame(stbgc_mean)
cap_bgcp <- CAPdiscrim(distbgc~PlotPos, data = stbgc_mean, axes = 10, m = 3, mmax = 10, add = FALSE, permutations = 999)
cap_bgcp <- add.spec.scores(cap_bgcp, dbgc, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_bgcp$F/sum(cap_bgcp$F), digits=3)
barplot(cap_bgcp$F/sum(cap_bgcp$F))
cap_bgcp_points <- bind_cols((as.data.frame(cap_bgcp$x)), fbgc)
glimpse(cap_bgcp_points)
cap_bgcp_arrows <- as.data.frame(cap_bgcp$cproj*3) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_bgcp_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgcp_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgcp_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 76.3%",
y = "CAP Axis 2; 23.7%")
# CAP by plot + spider
bgc_centp <- aggregate(cbind(LD1, LD2) ~ Plot, data = cap_bgcp_points, FUN = mean)
bgc_segsp <- merge(cap_bgcp_points, setNames(bgc_centp, c('Plot', 'oLD1', 'oLD2')), by = 'Plot', sort = FALSE)
ggplot(cap_bgcp_points) +
geom_point(aes(x=LD1, y=LD2, colour = Plot), size = 3, alpha = .6) +
geom_segment(data = bgc_segsp, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Plot), alpha = .9, size = .3) +
geom_point(data = bgc_centp, mapping = aes(x = LD1, y = LD2, colour = Plot), size = 5) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_bgcp_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_bgcp_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 56.7%",
y = "CAP Axis 2; 23.0%")
#### Temporal ####
# Data for this are in `temporal`
glimpse(temporal)
temporal %<>% relocate(Inun, .after = PlotPos)
temporal <- temporal %>%
mutate(Inun = fct_relevel(`Inun`,
"y",
"m",
"n"))
# Quick correlation plot for evaluation
chart.Correlation(temporal[, 8:32], histogram = TRUE, pch = 19)
# Drop and transform
ttemporal <- temporal %>%
select(-c(VH, VV, DTN)) %>%
mutate(across(c(Moisture, pHc, EC, AvailP, NO3, NH4, FAA, Proteolysis, DON, MBC, MBN, MicCN), ~log1p(.)))
chart.Correlation(ttemporal[, 8:29], histogram = TRUE, pch = 19)
#prep
sttemporal <- ttemporal %>%
drop_na() %>%
mutate(across(c(13:29), ~z.fn(.)))
ftemp <- sttemporal %>%
select(1:12)
dtemp <- sttemporal %>%
select(13:29)
#PCoA
disttemp <- vegdist(dtemp, method = "euclidean", na.rm = TRUE)
ptemp <- pcoa(disttemp)
ptemp$values$Relative_eig[1:10]
barplot(ptemp$values$Relative_eig[1:10])
temp_points <- bind_cols(ftemp, (as.data.frame(ptemp$vectors)))
compute.arrows = function (given_pcoa, orig_df) {
orig_df = orig_df #can be changed to select columns of interest only
n <- nrow(orig_df)
points.stand <- scale(given_pcoa$vectors)
S <- cov(orig_df, points.stand) #compute covariance of variables with all axes
pos_eigen = given_pcoa$values$Eigenvalues[seq(ncol(S))] #select only +ve eigenvalues
U <- S %*% diag((pos_eigen/(n - 1))^(-0.5)) #Standardise value of covariance
colnames(U) <- colnames(given_pcoa$vectors) #Get column names
given_pcoa$U <- U #Add values of covariates inside object
return(given_pcoa)
}
ptemp = compute.arrows(ptemp, dtemp)
ptemp_arrows_df <- as.data.frame(ptemp$U*10) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
# Plot
ggplot(temp_points) + #Some separation by date, transect# seems noisy
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = `Sampling Period`), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptemp_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptemp_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 19.7%",
y = "PCoA Axis 2; 17.0%")
ggplot(temp_points) + #A bit more informative, definite axis1 trend of transect. Date clustering a bit more obvious
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos, shape = `Sampling Period`), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptemp_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptemp_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 19.7%",
y = "PCoA Axis 2; 17.0%")
ggplot(temp_points) + #Seems to clearly show separation
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos, shape = Inun), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
scale_shape_manual(values = c(15, 18, 0)) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptemp_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptemp_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 19.7%",
y = "PCoA Axis 2; 17.0%")
# Permanova
set.seed(1983)
perm_temptp <- adonis2(disttemp~Transect*`Sampling Period`, data = sttemporal, permutations = 9999, method = "euclidean")
perm_temptp #strong impact of transect and sampling period, no interaction
perm_temppp <- adonis2(disttemp~PlotPos*`Sampling Period`, data = sttemporal, permutations = 9999, method = "euclidean")
perm_temppp #strong impact of plot position and sampling period, no interaction
perm_temptpp <- adonis2(disttemp~Transect+PlotPos+`Sampling Period`, data = sttemporal, permutations = 9999, method = "euclidean")
perm_temptpp #strong impact of transect, plot position and sampling period in additive model
permpt_temp <- pairwise.perm.manova(disttemp, sttemporal$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_temp #All differ except 0&8, 3&9, 5&7
permpp_temp <- pairwise.perm.manova(disttemp, sttemporal$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_temp #All differ except 2&3
permpp_temp <- pairwise.perm.manova(disttemp, sttemporal$`Sampling Period`, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_temp #All differ
# CAP by transect
sttemporal <- as.data.frame(sttemporal)
cap_tempt <- CAPdiscrim(disttemp~Transect, data = sttemporal, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 9)
cap_tempt <- add.spec.scores(cap_tempt, dtemp, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_tempt$F/sum(cap_tempt$F), digits=3)
barplot(cap_tempt$F/sum(cap_tempt$F))
cap_tempt_points <- bind_cols((as.data.frame(cap_tempt$x)), ftemp)
glimpse(cap_tempt_points)
cap_tempt_arrows <- as.data.frame(cap_tempt$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_tempt_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_tempt_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_tempt_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 62.1%",
y = "CAP Axis 2; 18.5%")
# CAP by transect + spider
temp_centt <- aggregate(cbind(LD1, LD2) ~ Transect, data = cap_tempt_points, FUN = mean)
temp_segst <- merge(cap_tempt_points, setNames(temp_centt, c('Transect', 'oLD1', 'oLD2')), by = 'Transect', sort = FALSE)
ggplot(cap_tempt_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 3, alpha = .6) +
geom_segment(data = temp_segst, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Transect), alpha = .7, size = .25) +
geom_point(data = temp_centt, mapping = aes(x = LD1, y = LD2, colour = Transect), size = 5) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_tempt_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_tempt_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 62.1%",
y = "CAP Axis 2; 18.5%")
# CAP by plotpos
cap_tempp <- CAPdiscrim(disttemp~PlotPos, data = sttemporal, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 9)
cap_tempp <- add.spec.scores(cap_tempp, dtemp, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_tempp$F/sum(cap_tempp$F), digits=3)
barplot(cap_tempp$F/sum(cap_tempp$F))
cap_tempp_points <- bind_cols((as.data.frame(cap_tempp$x)), ftemp)
glimpse(cap_tempp_points)
cap_tempp_arrows <- as.data.frame(cap_tempp$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_tempp_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_tempp_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_tempp_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 79.5%",
y = "CAP Axis 2; 20.0%")
# CAP by plot + spider
temp_centp <- aggregate(cbind(LD1, LD2) ~ PlotPos, data = cap_tempp_points, FUN = mean)
temp_segsp <- merge(cap_tempp_points, setNames(temp_centp, c('PlotPos', 'oLD1', 'oLD2')), by = 'PlotPos', sort = FALSE)
ggplot(cap_tempp_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 3, alpha = .6) +
geom_segment(data = temp_segsp, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = PlotPos), alpha = .9, size = .3) +
geom_point(data = temp_centp, mapping = aes(x = LD1, y = LD2, colour = PlotPos), size = 5) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_tempp_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_tempp_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 79.5%",
y = "CAP Axis 2; 20.0%")
# CAP by SamplingPeriod
cap_tempps <- CAPdiscrim(disttemp~`Sampling Period`, data = sttemporal, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 9)
cap_tempps <- add.spec.scores(cap_tempps, dtemp, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_tempps$F/sum(cap_tempps$F), digits=3)
barplot(cap_tempps$F/sum(cap_tempps$F))
cap_tempps_points <- bind_cols((as.data.frame(cap_tempps$x)), ftemp)
glimpse(cap_tempps_points)
cap_tempps_arrows <- as.data.frame(cap_tempps$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_tempps_points) +
geom_point(aes(x=LD1, y=LD2, colour = `Sampling Period`), size = 4) +
scale_colour_manual(values = brewer.pal(n = 6, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_tempps_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_tempps_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 66.8%",
y = "CAP Axis 2; 21.3%")
# CAP by SamplingPeriod + spider
temp_centps <- aggregate(cbind(LD1, LD2) ~ `Sampling Period`, data = cap_tempps_points, FUN = mean)
temp_segsps <- merge(cap_tempps_points, setNames(temp_centps, c('Sampling Period', 'oLD1', 'oLD2')), by = 'Sampling Period', sort = FALSE)
ggplot(cap_tempps_points) +
geom_point(aes(x=LD1, y=LD2, colour = `Sampling Period`), size = 3, alpha = .6) +
geom_segment(data = temp_segsps, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = `Sampling Period`), alpha = .9, size = .3) +
geom_point(data = temp_centps, mapping = aes(x = LD1, y = LD2, colour = `Sampling Period`), size = 5) +
scale_colour_manual(values = brewer.pal(n = 6, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_tempps_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_tempps_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 66.8%",
y = "CAP Axis 2; 21.3%")
#### PLFAs ####
# Data for this are in `plfa`
glimpse(plfa) #remember these have been standardised by total plfa already
plfa %<>% relocate(Inun, .after = PlotPos)
plfa <- plfa %>%
mutate(Inun = fct_relevel(`Inun`,
"y",
"m",
"n"))
# Quick correlation plot for evaluation
chart.Correlation(plfa[, 15:41], histogram = TRUE, pch = 19)
stplfa1 <- filter(plfa, `Sampling Period` == "Autumn 2019")
stplfa2 <- filter(plfa, `Sampling Period` == "Winter 2019")
stplfa3 <- filter(plfa, `Sampling Period` == "At flooding")
stplfa4 <- filter(plfa, `Sampling Period` == "3 months post flood")
stplfa5 <- filter(plfa, `Sampling Period` == "11 months post flood")
#prep
stplfa <- plfa
fplfa <- stplfa %>%
select(1:14)
dplfa <- stplfa %>%
select(15:41)
fplfa1 <- stplfa1 %>%
select(1:14)
dplfa1 <- stplfa1 %>%
select(15:41)
fplfa2 <- stplfa2 %>%
select(1:14)
dplfa2 <- stplfa2 %>%
select(15:41)
fplfa3 <- stplfa3 %>%
select(1:14)
dplfa3 <- stplfa3 %>%
select(15:41)
fplfa4 <- stplfa4 %>%
select(1:14)
dplfa4 <- stplfa4 %>%
select(15:41)
fplfa5 <- stplfa5 %>%
select(1:14)
dplfa5 <- stplfa5 %>%
select(15:41)
#PCoA
distplfa <- vegdist(dplfa, method = "euclidean", na.rm = TRUE)
distplfa1 <- vegdist(dplfa1, method = "euclidean", na.rm = TRUE)
distplfa2 <- vegdist(dplfa2, method = "euclidean", na.rm = TRUE)
distplfa3 <- vegdist(dplfa3, method = "euclidean", na.rm = TRUE)
distplfa4 <- vegdist(dplfa4, method = "euclidean", na.rm = TRUE)
distplfa5 <- vegdist(dplfa5, method = "euclidean", na.rm = TRUE)
pplfa <- pcoa(distplfa)
pplfa1 <- pcoa(distplfa1)
pplfa2 <- pcoa(distplfa2)
pplfa3 <- pcoa(distplfa3)
pplfa4 <- pcoa(distplfa4)
pplfa5 <- pcoa(distplfa5)
pplfa$values$Relative_eig[1:10]
pplfa1$values$Relative_eig[1:10]
pplfa2$values$Relative_eig[1:10]
pplfa3$values$Relative_eig[1:10]
pplfa4$values$Relative_eig[1:10]
pplfa5$values$Relative_eig[1:10]
barplot(pplfa$values$Relative_eig[1:10])
plfa_points <- bind_cols(fplfa, (as.data.frame(pplfa$vectors)))
plfa_points1 <- bind_cols(fplfa1, (as.data.frame(pplfa1$vectors)))
plfa_points2 <- bind_cols(fplfa2, (as.data.frame(pplfa2$vectors)))
plfa_points3 <- bind_cols(fplfa3, (as.data.frame(pplfa3$vectors)))
plfa_points4 <- bind_cols(fplfa4, (as.data.frame(pplfa4$vectors)))
plfa_points5 <- bind_cols(fplfa5, (as.data.frame(pplfa5$vectors)))
compute.arrows = function (given_pcoa, orig_df) {
orig_df = orig_df #can be changed to select columns of interest only
n <- nrow(orig_df)
points.stand <- scale(given_pcoa$vectors)
S <- cov(orig_df, points.stand) #compute covariance of variables with all axes
pos_eigen = given_pcoa$values$Eigenvalues[seq(ncol(S))] #select only +ve eigenvalues
U <- S %*% diag((pos_eigen/(n - 1))^(-0.5)) #Standardise value of covariance
colnames(U) <- colnames(given_pcoa$vectors) #Get column names
given_pcoa$U <- U #Add values of covariates inside object
return(given_pcoa)
}
pplfa = compute.arrows(pplfa, dplfa)
pplfa1 = compute.arrows(pplfa1, dplfa1)
pplfa2 = compute.arrows(pplfa2, dplfa2)
pplfa3 = compute.arrows(pplfa3, dplfa3)
pplfa4 = compute.arrows(pplfa4, dplfa4)
pplfa5 = compute.arrows(pplfa5, dplfa5)
pplfa_arrows_df <- as.data.frame(pplfa$U*.2) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
pplfa1_arrows_df <- as.data.frame(pplfa1$U*.05) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
pplfa2_arrows_df <- as.data.frame(pplfa2$U*.1) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
pplfa3_arrows_df <- as.data.frame(pplfa3$U*.2) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
pplfa4_arrows_df <- as.data.frame(pplfa4$U*.2) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
pplfa5_arrows_df <- as.data.frame(pplfa5$U*.2) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
# Plot
ggplot(plfa_points) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = PlotPos), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pplfa_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pplfa_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 29.2%",
y = "PCoA Axis 2; 17.7%")
ggplot(plfa_points1) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = PlotPos), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pplfa1_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pplfa1_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 41.6%",
y = "PCoA Axis 2; 25.0%")
ggplot(plfa_points2) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = PlotPos), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pplfa2_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pplfa2_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 33.0%",
y = "PCoA Axis 2; 20.5%")
ggplot(plfa_points3) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = PlotPos), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pplfa3_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pplfa3_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 36.1%",
y = "PCoA Axis 2; 21.2%")
ggplot(plfa_points4) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = PlotPos), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pplfa4_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pplfa4_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 40.3%",
y = "PCoA Axis 2; 21.9%")
ggplot(plfa_points5) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = PlotPos), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pplfa5_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pplfa5_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 35.9%",
y = "PCoA Axis 2; 19.2%")
# Not exactly much clear here
ggplot(plfa_points) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos, shape = `Sampling Period`), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pplfa_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pplfa_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 29.2%",
y = "PCoA Axis 2; 17.7%")
ggplot(plfa_points) +
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos, shape = Inun), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
scale_shape_manual(values = c(15, 18, 0)) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = pplfa_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = pplfa_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 29.2%",
y = "PCoA Axis 2; 17.7%")
# Permanova
set.seed(1983)
perm_plfatp <- adonis2(distplfa~Transect*`Sampling Period`, data = stplfa, permutations = 9999, method = "euclidean")
perm_plfatp #strong impact of transect and sampling period, STRONG interaction
perm_plfapp <- adonis2(distplfa~PlotPos*`Sampling Period`, data = stplfa, permutations = 9999, method = "euclidean")
perm_plfapp #strong impact of plot position and sampling period, no interaction
perm_plfatpp <- adonis2(distplfa~Transect*`Sampling Period`+PlotPos, data = stplfa, permutations = 9999, method = "euclidean")
perm_plfatpp #strong impact of transect, plot position, date and transect*date
permpt_plfa <- pairwise.perm.manova(distplfa, stplfa$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_plfa #Driffer: 0&3,9; 1&9; 2&3,8,9; 3&4,6,7,9; 4&8,9; 6&9
permpp_plfa <- pairwise.perm.manova(distplfa, stplfa$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_plfa #Differ: 1&2,3,4; 2&4
permpp_plfa <- pairwise.perm.manova(distplfa, stplfa$`Sampling Period`, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_plfa #1&2,3,4,5; 2&3,4,5
# Split by sampling period required
perm_plfa1tp <- adonis2(distplfa1~Transect+PlotPos, data = stplfa1, permutations = 9999, method = "euclidean")
perm_plfa1tp
permpt_plfa1 <- pairwise.perm.manova(distplfa1, stplfa1$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_plfa1 #NS
permpp_plfa1 <- pairwise.perm.manova(distplfa1, stplfa1$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_plfa1 #1 differs from 2 & 4 assuming p <0.1
perm_plfa2tp <- adonis2(distplfa2~Transect+PlotPos, data = stplfa2, permutations = 9999, method = "euclidean")
perm_plfa2tp
permpt_plfa2 <- pairwise.perm.manova(distplfa2, stplfa2$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_plfa2 #0 differs from all but site 1
permpp_plfa2 <- pairwise.perm.manova(distplfa2, stplfa2$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_plfa2 #NS
perm_plfa3tp <- adonis2(distplfa3~Transect+PlotPos, data = stplfa3, permutations = 9999, method = "euclidean")
perm_plfa3tp
permpt_plfa3 <- pairwise.perm.manova(distplfa3, stplfa3$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_plfa3 #NS
permpp_plfa3 <- pairwise.perm.manova(distplfa3, stplfa3$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_plfa3 #1&2,3,4; 2&4; 3&4
perm_plfa4tp <- adonis2(distplfa4~Transect+PlotPos, data = stplfa4, permutations = 9999, method = "euclidean")
perm_plfa4tp
permpt_plfa4 <- pairwise.perm.manova(distplfa4, stplfa4$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_plfa4 #0&3,8; 1&5,7,8; 2&3,8; 3&4,5,6,7,8,9; 4&8
permpp_plfa4 <- pairwise.perm.manova(distplfa4, stplfa4$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_plfa4 #1&2,3,4
perm_plfa5tp <- adonis2(distplfa5~Transect+PlotPos, data = stplfa5, permutations = 9999, method = "euclidean")
perm_plfa5tp
permpt_plfa5 <- pairwise.perm.manova(distplfa5, stplfa5$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_plfa5 #??
# CAP by transect - Does not make a huge amount of sense going off the above.
# Instead will incorporate total PLFA, F:B, G+:G-, G+:Actino into temporal data and re-run those analyses tomorrow
#### Temporal + PLFA ####
# Data for this are in `temporalP`
glimpse(temporalP)
temporalP %<>% relocate(Inun, .after = PlotPos)
temporalP <- temporalP %>%
mutate(Inun = fct_relevel(`Inun`,
"y",
"m",
"n"))
# Quick correlation plot for evaluation
chart.Correlation(temporalP[, 8:36], histogram = TRUE, pch = 19)
# Drop and transform
ttemporalP <- temporalP %>%
select(-c(VH, VV, DTN)) %>%
mutate(across(c(Moisture, pHc, EC, AvailP, NO3, NH4, FAA, Proteolysis, DON, MBC, MBN, MicCN, TotalPLFA, F_B), ~log1p(.)))
chart.Correlation(ttemporalP[, 8:33], histogram = TRUE, pch = 19)
#prep
sttemporalP <- ttemporalP %>%
drop_na() %>%
mutate(across(c(13:33), ~z.fn(.)))
ftempP <- sttemporalP %>%
select(1:12)
dtempP <- sttemporalP %>%
select(13:33)
#PCoA
disttempP <- vegdist(dtempP, method = "euclidean", na.rm = TRUE)
ptempP <- pcoa(disttempP)
ptempP$values$Relative_eig[1:10]
barplot(ptempP$values$Relative_eig[1:10])
tempP_points <- bind_cols(ftempP, (as.data.frame(ptempP$vectors)))
compute.arrows = function (given_pcoa, orig_df) {
orig_df = orig_df #can be changed to select columns of interest only
n <- nrow(orig_df)
points.stand <- scale(given_pcoa$vectors)
S <- cov(orig_df, points.stand) #compute covariance of variables with all axes
pos_eigen = given_pcoa$values$Eigenvalues[seq(ncol(S))] #select only +ve eigenvalues
U <- S %*% diag((pos_eigen/(n - 1))^(-0.5)) #Standardise value of covariance
colnames(U) <- colnames(given_pcoa$vectors) #Get column names
given_pcoa$U <- U #Add values of covariates inside object
return(given_pcoa)
}
ptempP = compute.arrows(ptempP, dtempP)
ptempP_arrows_df <- as.data.frame(ptempP$U*10) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
# Plot
ggplot(tempP_points) + #Some separation by date, transect# seems noisy
geom_point(aes(x=Axis.1, y=Axis.2, colour = Transect, shape = `Sampling Period`), size = 6) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptempP_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptempP_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 18.6%",
y = "PCoA Axis 2; 15.7%")
ggplot(tempP_points) + #A bit more informative, definite axis1 trend of transect. Date clustering a bit more obvious
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos, shape = `Sampling Period`), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptempP_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptempP_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 18.6%",
y = "PCoA Axis 2; 15.7%")
ggplot(tempP_points) + #Seems to clearly show separation
geom_point(aes(x=Axis.1, y=Axis.2, colour = PlotPos, shape = Inun), size = 6) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
scale_shape_manual(values = c(15, 18, 0)) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = ptempP_arrows_df,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = Axis.1, yend = Axis.2),
arrow = arrow(length = unit(3, "mm"))) +
ggrepel::geom_text_repel(data = ptempP_arrows_df, aes(x=Axis.1, y=Axis.2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "PCoA Axis 1; 18.6%",
y = "PCoA Axis 2; 15.7%")
# Permanova
set.seed(1983)
perm_tempPtp <- adonis2(disttempP~Transect*`Sampling Period`, data = sttemporalP, permutations = 9999, method = "euclidean")
perm_tempPtp #strong impact of transect and sampling period, no interaction
perm_tempPpp <- adonis2(disttempP~PlotPos*`Sampling Period`, data = sttemporalP, permutations = 9999, method = "euclidean")
perm_tempPpp #strong impact of plot position and sampling period, no interaction
perm_tempPtpp <- adonis2(disttempP~Transect+PlotPos+`Sampling Period`, data = sttemporalP, permutations = 9999, method = "euclidean")
perm_tempPtpp #strong impact of transect, plot position and sampling period in additive model
permpt_tempP <- pairwise.perm.manova(disttempP, sttemporalP$Transect, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpt_tempP #All differ except 0&8, 1&8, 3&9, 5&7
permpp_tempP <- pairwise.perm.manova(disttempP, sttemporalP$PlotPos, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permpp_tempP #All differ except 2&3
permps_tempP <- pairwise.perm.manova(disttempP, sttemporalP$`Sampling Period`, nperm = 9999, progress = TRUE, p.method = "fdr", F = TRUE, R2 = TRUE)
permps_tempP #All differ
# CAP by transect
sttemporalP <- as.data.frame(sttemporalP)
cap_temptP <- CAPdiscrim(disttempP~Transect, data = sttemporalP, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 9)
cap_temptP <- add.spec.scores(cap_temptP, dtempP, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_temptP$F/sum(cap_temptP$F), digits=3)
barplot(cap_temptP$F/sum(cap_temptP$F))
cap_temptP_points <- bind_cols((as.data.frame(cap_temptP$x)), ftempP)
glimpse(cap_temptP_points)
cap_temptP_arrows <- as.data.frame(cap_temptP$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_temptP_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temptP_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temptP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 57.0%",
y = "CAP Axis 2; 16.7%")
# CAP by transect + spider
tempP_centt <- aggregate(cbind(LD1, LD2) ~ Transect, data = cap_temptP_points, FUN = mean)
tempP_segst <- merge(cap_temptP_points, setNames(tempP_centt, c('Transect', 'oLD1', 'oLD2')), by = 'Transect', sort = FALSE)
ggplot(cap_temptP_points) +
geom_point(aes(x=LD1, y=LD2, colour = Transect, shape = PlotPos), size = 3, alpha = .6) +
geom_segment(data = tempP_segst, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = Transect), alpha = .7, size = .25) +
geom_point(data = tempP_centt, mapping = aes(x = LD1, y = LD2, colour = Transect), size = 5) +
scale_colour_manual(values = brewer.pal(n = 10, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temptP_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temptP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 57.0%",
y = "CAP Axis 2; 16.7%")
# CAP by plotpos
cap_temppP <- CAPdiscrim(disttempP~PlotPos, data = sttemporalP, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 9)
cap_temppP <- add.spec.scores(cap_temppP, dtempP, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_temppP$F/sum(cap_temppP$F), digits=3)
barplot(cap_temppP$F/sum(cap_temppP$F))
cap_temppP_points <- bind_cols((as.data.frame(cap_temppP$x)), ftempP)
glimpse(cap_temppP_points)
cap_temppP_arrows <- as.data.frame(cap_temppP$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
ggplot(cap_temppP_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 4) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppP_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 80.2%",
y = "CAP Axis 2; 18.7%")
# CAP by plot + spider
tempP_centp <- aggregate(cbind(LD1, LD2) ~ PlotPos, data = cap_temppP_points, FUN = mean)
tempP_segsp <- merge(cap_temppP_points, setNames(tempP_centp, c('PlotPos', 'oLD1', 'oLD2')), by = 'PlotPos', sort = FALSE)
ggplot(cap_temppP_points) +
geom_point(aes(x=LD1, y=LD2, colour = PlotPos), size = 3, alpha = .6) +
geom_segment(data = tempP_segsp, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = PlotPos), alpha = .9, size = .3) +
geom_point(data = tempP_centp, mapping = aes(x = LD1, y = LD2, colour = PlotPos), size = 5) +
scale_colour_manual(values = brewer.pal(n = 4, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppP_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 80.2%",
y = "CAP Axis 2; 18.7%")
# CAP by SamplingPeriod
cap_temppsP <- CAPdiscrim(disttempP~`Sampling Period`, data = sttemporalP, axes = 10, m = 0, mmax = 10, add = FALSE, permutations = 9)
cap_temppsP <- add.spec.scores(cap_temppsP, dtempP, method = "cor.scores", multi = 1, Rscale = F, scaling = "1")
round(cap_temppsP$F/sum(cap_temppsP$F), digits=3)
barplot(cap_temppsP$F/sum(cap_temppsP$F))
cap_temppsP_points <- bind_cols((as.data.frame(cap_temppsP$x)), ftempP)
glimpse(cap_temppsP_points)
cap_temppsP_arrows <- as.data.frame(cap_temppsP$cproj*5) %>% #Pulls object from list, scales arbitrarily and makes a new df
rownames_to_column("variable")
cap_temppsP_arrows
ggplot(cap_temppsP_points) +
geom_point(aes(x=LD1, y=LD2, colour = `Sampling Period`), size = 4) +
scale_colour_manual(values = brewer.pal(n = 6, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppsP_arrows,
x = 0, y = 0, alpha = 0.7,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppsP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 65.2%",
y = "CAP Axis 2; 22.6%")
# CAP by SamplingPeriod + spider
tempP_centps <- aggregate(cbind(LD1, LD2) ~ `Sampling Period`, data = cap_temppsP_points, FUN = mean)
tempP_segsps <- merge(cap_temppsP_points, setNames(tempP_centps, c('Sampling Period', 'oLD1', 'oLD2')), by = 'Sampling Period', sort = FALSE)
ggplot(cap_temppsP_points) +
geom_point(aes(x=LD1, y=LD2, colour = `Sampling Period`, shape = PlotPos), size = 3, alpha = .6) +
geom_segment(data = tempP_segsps, mapping = aes(x = LD1, y = LD2, xend = oLD1, yend = oLD2, colour = `Sampling Period`), alpha = .9, size = .3) +
geom_point(data = tempP_centps, mapping = aes(x = LD1, y = LD2, colour = `Sampling Period`), size = 5) +
scale_colour_manual(values = brewer.pal(n = 6, name = "Spectral")) +
theme_classic() +
theme(strip.background = element_blank()) +
geom_segment(data = cap_temppsP_arrows,
x = 0, y = 0, alpha = 0.3,
mapping = aes(xend = LD1, yend = LD2),
arrow = arrow(length = unit(2, "mm"))) +
ggrepel::geom_text_repel(data = cap_temppsP_arrows, aes(x=LD1, y=LD2, label = variable),
# colour = "#72177a",
size = 4
) +
labs(
x = "CAP Axis 1; 65.2%",
y = "CAP Axis 2; 22.6%")
|
5a1c343bf9c446fa5192c60a87751da7261cecc7
|
cab6be4f5004f4c9106e77623dfc85aec4fbeeec
|
/ballerDepHeterogenScripts/DataPrep.R
|
060f3492e76dcd27c2f12234ef27e6109748f3ed
|
[] |
no_license
|
PennBBL/ballerDepHeterogenScripts
|
fab351c54bb5263e1aac4d133a8f92b66df4b8f4
|
90d112fd734e41ae93ec4bd3a591885c53915359
|
refs/heads/master
| 2021-06-04T11:55:28.632889
| 2020-02-16T03:09:25
| 2020-02-16T03:09:25
| 112,241,167
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,841
|
r
|
DataPrep.R
|
#################
### LOAD DATA ###
#################
#Demographic data (n=1629)
data.demo <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/demographics/n1601_demographics_go1_20161212.csv", header=TRUE)
##Clinical data
#Screening diagnoses (n=1601)
data.diag <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/clinical/n1601_goassess_psych_summary_vars_20131014.csv", header=TRUE, na.strings=".")
#Bifactors and correlated traits (n=1601)
data.factors <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/clinical/n1601_goassess_clinical_factor_scores_20161212.csv", header=TRUE, na.strings=".")
#State trait anxiety data (n=1391)
data.stai <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/clinical/n1601_stai_pre_post_itemwise_smry_factors_20170131.csv", header=TRUE, na.strings=".")
#Exclusion data (n=1601)
#Health exclusion (use the new healthExcludev2 variable)
data.healthExclude <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/health/n1601_health_20170421.csv", header=TRUE, na.strings=".")
#T1 QA exclusion (n=1601)
data.t1QA <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/neuroimaging/t1struct/n1601_t1QaData_20170306.csv", header=TRUE, na.strings=".")
##################
#### DATA PREP ###
##################
#Transform the age variable from months to years
data.demo$age <- (data.demo$ageAtScan1)/12
#Recode male as 0 and female as 1
data.demo$sex[which(data.demo$sex==1)] <- 0
data.demo$sex[which(data.demo$sex==2)] <- 1
##################
### MERGE DATA ###
##################
dataMerge1 <-merge(data.demo,data.diag, by=c("bblid","scanid"), all=TRUE)
dataMerge2 <-merge(dataMerge1,data.factors, by=c("bblid","scanid"), all=TRUE)
dataMerge3 <-merge(dataMerge2,data.stai, by=c("bblid","scanid"), all=TRUE)
dataMerge4 <-merge(dataMerge3,data.healthExclude, by=c("bblid","scanid"), all=TRUE)
dataMerge5 <-merge(dataMerge4,data.t1QA, by=c("bblid","scanid"), all=TRUE)
#Retain only the 1601 bblids (demographics has 1629)
data.n1601 <- dataMerge5[match(data.t1QA$bblid, dataMerge5$bblid, nomatch=0),]
#Put bblids in ascending order
data.ordered <- data.n1601[order(data.n1601$bblid),]
#Count the number of subjects (should be 1601)
n <- nrow(data.ordered)
#################################
### APPLY EXCLUSIONS AND SAVE ###
#################################
##Count the total number excluded for healthExcludev2=1 (1=Excludes those with medical rating 3/4, major incidental findings that distort anatomy, psychoactive medical medications)
#Included: n=1447; Excluded: n=154, but medical.exclude (n=81) + incidental.exclude (n=20) + medicalMed.exclude (n=64) = 165, so 11 people were excluded on the basis of two or more of these criteria
data.final <- data.ordered
data.final$ACROSS.INCLUDE.health <- 1
data.final$ACROSS.INCLUDE.health[data.final$healthExcludev2==1] <- 0
health.include<-sum(data.final$ACROSS.INCLUDE.health)
health.exclude<-1601-health.include
#Count the number excluded just medical rating 3/4 (GOAssess Medial History and CHOP EMR were used to define one summary rating for overall medical problems) (n=81)
data.final$ACROSS.INCLUDE.medical <- 1
data.final$ACROSS.INCLUDE.medical[data.final$medicalratingExclude==1] <- 0
medical.include<-sum(data.final$ACROSS.INCLUDE.medical)
medical.exclude<-1601-medical.include
#Count the number excluded for just major incidental findings that distort anatomy (n=20)
data.final$ACROSS.INCLUDE.incidental <- 1
data.final$ACROSS.INCLUDE.incidental[data.final$incidentalFindingExclude==1] <- 0
incidental.include<-sum(data.final$ACROSS.INCLUDE.incidental)
incidental.exclude<-1601-incidental.include
#Count the number excluded for just psychoactive medical medications (n=64)
data.final$ACROSS.INCLUDE.medicalMed <- 1
data.final$ACROSS.INCLUDE.medicalMed[data.final$psychoactiveMedMedicalv2==1] <- 0
medicalMed.include<-sum(data.final$ACROSS.INCLUDE.medicalMed)
medicalMed.exclude<-1601-medicalMed.include
#Subset the data to just the that pass healthExcludev2 (n=1447)
data.subset <-data.final[which(data.final$ACROSS.INCLUDE.health == 1), ]
##Count the number excluded for failing to meet structural image quality assurance protocols
#Included: n=1396; Excluded: n=51
data.subset$ACROSS.INCLUDE.QA <- 1
data.subset$ACROSS.INCLUDE.QA[data.subset$t1Exclude==1] <- 0
QA.include<-sum(data.subset$ACROSS.INCLUDE.QA)
QA.exclude<-1447-QA.include
###Exclude those with ALL problems (health problems and problems with their t1 data) (included n=1396)
data.exclude <- data.subset[which(data.subset$healthExcludev2==0 & data.subset$t1Exclude == 0 ),]
#Demographics for the paper
meanAge<-mean(data.exclude$age)
sdAge<-sd(data.exclude$age)
rangeAge<-range(data.exclude$age)
genderTable<-table(data.exclude$sex)
#Save final dataset
saveRDS(data.exclude,"/data/joy/BBL/projects/pncNmf/subjectData/n1396_T1_subjData.rds")
#Save the bblids and scanids for the final sample (n=1396)
IDs <- c("bblid", "scanid")
bblidsScanids <- data.exclude[IDs]
#Remove header
names(bblidsScanids) <- NULL
#Save list
write.csv(bblidsScanids, file="/data/joy/BBL/projects/pncNmf/subjectData/n1396_T1_bblids_scanids.csv", row.names=FALSE)
############################
### SENSITIVITY ANALYSES ###
############################
#Count the number taking psychotropic psychiatric medications
#Included: n=1240; Excluded: n=156
data.exclude$ACROSS.INCLUDE.psychMeds <- 1
data.exclude$ACROSS.INCLUDE.psychMeds[data.exclude$psychoactiveMedPsychv2==1] <- 0
psychMeds.include<-sum(data.exclude$ACROSS.INCLUDE.psychMeds)
psychMeds.exclude<-1396-psychMeds.include
#Exclude those who were on psychiatric medications (included n=1240)
data.sensitivity <- data.exclude[which(data.exclude$ACROSS.INCLUDE.psychMeds==1),]
#Save sensitivity dataset
saveRDS(data.sensitivity,"/data/joy/BBL/projects/pncNmf/subjectData/n1240_T1_subjData_NoPsychMeds.rds")
|
d56d0905e642ef4a9314206a67a21f257471521b
|
ec7a64a18b9304a93ba84fa5538aa5a2da5dacb7
|
/Homework 9/Homework-9.R
|
9edf3dff4c042776a625c5ef07d8872615cf6e1b
|
[] |
no_license
|
statduck/SP21
|
47581bb8b0847675a0c21ee9e195a206b6a82e08
|
a048e97a23b0b820a420b9240cae4668c6698b74
|
refs/heads/main
| 2023-08-01T06:43:16.629247
| 2021-09-11T15:30:08
| 2021-09-11T15:30:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 95
|
r
|
Homework-9.R
|
rm(list=ls())
library(FrF2)
set.seed(42069)
design = FrF2(nruns = 16, nfactors = 10)
design
|
de2342e89ecee22199a091e4faec195f34267883
|
cfa1cfb6b9a39102a0cc7df5abf974cec2d472cc
|
/plot1.R
|
d2b75136250c17bc5387f8cd398888c5ca7383b0
|
[] |
no_license
|
MrMaksimize/ExData_Plotting1
|
78e3a17e0719ad46062d661f10d48093fb034c8b
|
edd6ec351daafca89aa1232f66f27f42012fe058
|
refs/heads/master
| 2021-01-22T01:44:06.048234
| 2015-06-06T00:10:53
| 2015-06-06T00:10:53
| 36,845,317
| 0
| 0
| null | 2015-06-04T03:18:04
| 2015-06-04T03:18:03
| null |
UTF-8
|
R
| false
| false
| 286
|
r
|
plot1.R
|
## Set WD to the location of file
setwd(dirname(parent.frame(2)$ofile))
source('helpers.R')
pcons <- getData()
hist(
pcons$Global_active_power,
col="red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)"
)
dev.copy(png, file="./plot1.png")
dev.off()
|
2f6157d44170cfda1c635740161a0671ba6f8953
|
97ba2fb82ac72c6cbf3c999bd89627269b03989a
|
/man/split_path.Rd
|
db9514896179d128073fd02f9b605b758864c096
|
[] |
no_license
|
markgene/mutils
|
e8b8ce71eb7733c7d18407ccbbeab63ff146af23
|
2d4bbc4769abd84b39adcce3194f38bc501f71b1
|
refs/heads/master
| 2022-03-27T12:45:53.785373
| 2022-03-06T15:42:07
| 2022-03-06T15:42:07
| 132,940,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 522
|
rd
|
split_path.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{split_path}
\alias{split_path}
\title{Split a file path into a vector of folder names}
\usage{
split_path(x)
}
\arguments{
\item{x}{A character scalar of file path.}
}
\value{
A character vector.
}
\description{
Split a file path into a vector of folder names
}
\details{
The function is adopted from \href{https://stackoverflow.com/questions/29214932/split-a-file-path-into-folder-names-vector}{a discussion on StackOverflow}.
}
|
c5ca05362abddab078e4b2188c512e3ff55eb341
|
bb94bfdd55b1351794ccd2d1c8e774f5d9957241
|
/modelowanienieznany.R
|
e9ac3ff2e5aeabce7585f571ec4605fb8b1201a3
|
[] |
no_license
|
HabibMrad/Bioinformatic_EDX
|
4fd5371afb6134c257c196421b7a83974e4670e2
|
a538cb6bc606b0428a4ad1c10c2fa80201c66cd3
|
refs/heads/master
| 2020-12-23T09:09:42.312875
| 2018-05-13T07:10:45
| 2018-05-13T07:10:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,599
|
r
|
modelowanienieznany.R
|
rm(list=ls())
install.packages("readr", "ggplot2", "MASS", "stats","lmtest","car", "RCurl")
library(readr)
library(ggplot2)
library(MASS)
library(stats)
library(lmtest)
library(car)
library(RCurl)
#pobranie danych
dane <- read.csv(text=getURL("https://raw.githubusercontent.com/sonjawap/final/master/Kaggle.csv"), row.names = 1)
View(dane)
#dane
#Kaggle <- read_csv("C:/Kaggle.csv", col_names=TRUE)
#a<-ncol(Kaggle)
#dane <- data.frame(Kaggle[,1:a], row.names=TRUE)
#View(dane)
#Model 1
#zmienna objasniana
zmienna1 <- dane$Human.Development.Index.HDI.2014
##############analiza zmiennej###############
layout(matrix(c(1,2,3,4),1,1))
plot(zmienna1)
summary(zmienna1)
m <- mean(zmienna1)
std <- sqrt(var(zmienna1))
hist(zmienna1, freq=F)
##############rozklad normalny#############
curve(dnorm(x, mean=m, sd=std), add=TRUE)
shapiro.test(zmienna1) #H0:rozklad normalny
#zmienna objasniajaca
zmienna2 <- dane$Share.of.seats.in.parliament.percentage.held.by.womand.2014
#############scatter plot#################
ggplot(dane, aes(x=zmienna2, y=zmienna1)) +
geom_point(shape=4) +
geom_smooth(method=lm)
##############korelacja################
cor(zmienna2, zmienna1) # wspolczynnik korelacji
#############model regresji liniowej###########
model1 <- lm(zmienna1~zmienna2)
summary(model1)
coef(model1)
###############
model1_parametry <- coefficients(model1) # model coefficients parametry modelu
y_teoretyczne <- fitted(model1) # wartosci teoretyczne - predicted values
model1_reszty <- resid(model1) #reszty modelu
#########Anscombe - dlaczego testujemy modele ##############
summary(anscombe)
ff <- y ~ x
mods <- setNames(as.list(1:4), paste0("lm", 1:4))
for(i in 1:4) {
ff[2:3] <- lapply(paste0(c("y","x"), i), as.name)
mods[[i]] <- lmi <- lm(ff, data = anscombe)
print(anova(lmi))
}
op <- par(mfrow = c(2, 2), mar = 0.1+c(4,4,1,1), oma = c(0, 0, 2, 0))
for(i in 1:4) {
ff[2:3] <- lapply(paste0(c("y","x"), i), as.name)
plot(ff, data = anscombe, col = "red", pch = 21, bg = "orange", cex = 1.2,
xlim = c(3, 19), ylim = c(3, 13))
abline(mods[[i]], col = "blue")
}
mtext("Anscombe's 4 Regression data sets", outer = TRUE, cex = 1.5)
par(op)
################# Testy ######################
# diagnostic plots
layout(matrix(c(1,2,3,4),2,2)) # 4 graphs/page
plot(model1)
####################testy######################
# niezaleznosc / autokorelacja reszt, test Durbina-Watsona H0: brak autokorelacji
dwtest(model1)
# normalnosc rozkladu reszt
layout(matrix(c(1,2,3,4),1,1))
hist(model1_reszty, freq=F)
curve(dnorm(x, mean=mean(model1_reszty), sd=sqrt(var(model1_reszty))), add=TRUE)
shapiro.test(model1_reszty) # H0:rozklad normalny
# heteroskedastycznosc - jednorodnosc wariancji reszt
bptest(model1) #H0:homoskedastycznosc - czy wariancja zalezy od zmiennej objasnianej
# Miara cooka wykres
influencePlot(model1, id.method="identify", main="Influence Plot") #miara Cooka
# test specyfikacji RESET
resettest(model1) #H0:poprawna postac funkcyjna
##################Model 2##########################
zmienna3 <- dane$Life.expectancy.at.birth..years
model2 <- lm(zmienna1 ~ zmienna2 + zmienna3, data = dane)
summary(model2)
model2_reszty <- resid(model2)
#################porownanie modeli#################
#r-squared
summary(model1)
summary(model2)
#aic, bic
AIC(model1)
AIC(model2)
#####################Zadanie####################
zmienna1 <- dane$Human.Development.Index.HDI.2014
zmienna2 <- dane$Carbon.dioxide.emissions.per.capita.2011.Tones
layout(matrix(c(1,2,3,4),1,1))
plot(zmienna1)
summary(zmienna1)
m <- mean(zmienna1)
std <- sqrt(var(zmienna1))
hist(zmienna1, freq=F)
ggplot(dane, aes(x=zmienna2, y=zmienna1)) + geom_point(shape=4) + geom_smooth(method=lm)
##############korelacja################
cor(zmienna2, zmienna1) # wspolczynnik korelacji
#############model regresji liniowej###########
model1 <- lm(zmienna1~zmienna2)
summary(model1)
coef(model1)
###############
model1_parametry <- coefficients(model1) # model coefficients parametry modelu
y_teoretyczne <- fitted(model1) # wartosci teoretyczne - predicted values
model1_reszty <- resid(model1) #reszty modelu
#########Anscombe - dlaczego testujemy modele ##############
summary(anscombe)
ff <- y ~ x
mods <- setNames(as.list(1:4), paste0("lm", 1:4))
for(i in 1:4) {
ff[2:3] <- lapply(paste0(c("y","x"), i), as.name)
mods[[i]] <- lmi <- lm(ff, data = anscombe)
print(anova(lmi))
}
op <- par(mfrow = c(2, 2), mar = 0.1+c(4,4,1,1), oma = c(0, 0, 2, 0))
for(i in 1:4) {
ff[2:3] <- lapply(paste0(c("y","x"), i), as.name)
plot(ff, data = anscombe, col = "red", pch = 21, bg = "orange", cex = 1.2,
xlim = c(3, 19), ylim = c(3, 13))
abline(mods[[i]], col = "blue")
}
mtext("Anscombe's 4 Regression data sets", outer = TRUE, cex = 1.5)
par(op)
################# Testy ######################
# diagnostic plots
layout(matrix(c(1,2,3,4),2,2)) # 4 graphs/page
plot(model1)
####################testy######################
# niezaleznosc / autokorelacja reszt, test Durbina-Watsona H0: brak autokorelacji
dwtest(model1)
# normalnosc rozkladu reszt
layout(matrix(c(1,2,3,4),1,1))
hist(model1_reszty, freq=F)
curve(dnorm(x, mean=mean(model1_reszty), sd=sqrt(var(model1_reszty))), add=TRUE)
shapiro.test(model1_reszty) # H0:rozklad normalny
# heteroskedastycznosc - jednorodnosc wariancji reszt
bptest(model1) #H0:homoskedastycznosc - czy wariancja zalezy od zmiennej objasnianej
# Miara cooka wykres
influencePlot(model1, id.method="identify", main="Influence Plot") #miara Cooka
# test specyfikacji RESET
resettest(model1) #H0:poprawna postac funkcyjna
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.