blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b93e64c43ab6df853534574a66b4af208b1c64c
|
136bd584b3bf7bc06812c386a84a8b51f115ec13
|
/DrawNicerTree_PRP.R
|
ac348241b9c4fe1bba1fbf684e0bc1e8bcf87fee
|
[] |
no_license
|
dhana2k14/ml-stuffs
|
a8fe8cc152c7f3f1106206df9c55aa87086dccd1
|
d932f2a5b1624748847f783379884ad2e2216cd2
|
refs/heads/master
| 2021-09-06T17:32:06.844608
| 2018-02-09T02:02:05
| 2018-02-09T02:02:05
| 108,121,291
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,009
|
r
|
DrawNicerTree_PRP.R
|
# Draw a nicer Classification and regression decision trees with prp
library(rattle) # Popular decision tree algorithm
library(rpart) # Fancy tree plot
library(rpart.plot) # Enhanced tree plot
library(RColorBrewer) # Color selection for fancy tree plot
library(party) # Alternative decision tree algorithm
library(partykit) # Convert rpart object to binary tree
library(caret) # Just a data source for this script but probably best packages in R
# Get some data
data(segmentationData)
data <- segmentationData[, -c(1, 2)]
# rpart decision tree
form <- as.formula(Class ~ .)
tree.1 <- rpart(form, data = data, control = rpart.control(minsplit = 20, cp =0))
plot(tree.1)
text(tree.1)
# Plot the tree with shorten variable names
prp(tree.1, varlen = 3)
# Interactively prue the tree
new.tree.1 <- prp(tree.1, snip = TRUE)$obj
prp(new.tree.1)
#------------------------
tree.2 <- rpart(form, data)
prp(tree.2)
fancyRpartPlot(tree.2)
|
300e3a45fe420dfd5601ec044a01dfa717ffdb8c
|
c451a937916f956367b98148de5ca45945ba8921
|
/Book Files/ISLR/Chapter%207%20Lab.txt
|
972d50f68df036f3bf715682cf568f97ecd3e790
|
[
"MIT"
] |
permissive
|
bozzmob/data-science-machine-learning-ai-big-data-resources
|
22b56ef6dd4d9368ad1d8776d82d047271c35098
|
97cc75b8f5dfdb17bbb89365742664400e766e4c
|
refs/heads/master
| 2020-06-17T19:37:46.682368
| 2016-12-17T22:07:35
| 2016-12-17T22:07:35
| 74,976,646
| 4
| 1
| null | 2016-12-17T22:07:35
| 2016-11-28T13:41:46
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 4,066
|
txt
|
Chapter%207%20Lab.txt
|
# Chapter 7 Lab: Non-linear Modeling
library(ISLR)
attach(Wage)
# Polynomial Regression and Step Functions
fit=lm(wage~poly(age,4),data=Wage)
coef(summary(fit))
fit2=lm(wage~poly(age,4,raw=T),data=Wage)
coef(summary(fit2))
fit2a=lm(wage~age+I(age^2)+I(age^3)+I(age^4),data=Wage)
coef(fit2a)
fit2b=lm(wage~cbind(age,age^2,age^3,age^4),data=Wage)
agelims=range(age)
age.grid=seq(from=agelims[1],to=agelims[2])
preds=predict(fit,newdata=list(age=age.grid),se=TRUE)
se.bands=cbind(preds$fit+2*preds$se.fit,preds$fit-2*preds$se.fit)
par(mfrow=c(1,2),mar=c(4.5,4.5,1,1),oma=c(0,0,4,0))
plot(age,wage,xlim=agelims,cex=.5,col="darkgrey")
title("Degree-4 Polynomial",outer=T)
lines(age.grid,preds$fit,lwd=2,col="blue")
matlines(age.grid,se.bands,lwd=1,col="blue",lty=3)
preds2=predict(fit2,newdata=list(age=age.grid),se=TRUE)
max(abs(preds$fit-preds2$fit))
fit.1=lm(wage~age,data=Wage)
fit.2=lm(wage~poly(age,2),data=Wage)
fit.3=lm(wage~poly(age,3),data=Wage)
fit.4=lm(wage~poly(age,4),data=Wage)
fit.5=lm(wage~poly(age,5),data=Wage)
anova(fit.1,fit.2,fit.3,fit.4,fit.5)
coef(summary(fit.5))
(-11.983)^2
fit.1=lm(wage~education+age,data=Wage)
fit.2=lm(wage~education+poly(age,2),data=Wage)
fit.3=lm(wage~education+poly(age,3),data=Wage)
anova(fit.1,fit.2,fit.3)
fit=glm(I(wage>250)~poly(age,4),data=Wage,family=binomial)
preds=predict(fit,newdata=list(age=age.grid),se=T)
pfit=exp(preds$fit)/(1+exp(preds$fit))
se.bands.logit = cbind(preds$fit+2*preds$se.fit, preds$fit-2*preds$se.fit)
se.bands = exp(se.bands.logit)/(1+exp(se.bands.logit))
preds=predict(fit,newdata=list(age=age.grid),type="response",se=T)
plot(age,I(wage>250),xlim=agelims,type="n",ylim=c(0,.2))
points(jitter(age), I((wage>250)/5),cex=.5,pch="|",col="darkgrey")
lines(age.grid,pfit,lwd=2, col="blue")
matlines(age.grid,se.bands,lwd=1,col="blue",lty=3)
table(cut(age,4))
fit=lm(wage~cut(age,4),data=Wage)
coef(summary(fit))
# Splines
library(splines)
fit=lm(wage~bs(age,knots=c(25,40,60)),data=Wage)
pred=predict(fit,newdata=list(age=age.grid),se=T)
plot(age,wage,col="gray")
lines(age.grid,pred$fit,lwd=2)
lines(age.grid,pred$fit+2*pred$se,lty="dashed")
lines(age.grid,pred$fit-2*pred$se,lty="dashed")
dim(bs(age,knots=c(25,40,60)))
dim(bs(age,df=6))
attr(bs(age,df=6),"knots")
fit2=lm(wage~ns(age,df=4),data=Wage)
pred2=predict(fit2,newdata=list(age=age.grid),se=T)
lines(age.grid, pred2$fit,col="red",lwd=2)
plot(age,wage,xlim=agelims,cex=.5,col="darkgrey")
title("Smoothing Spline")
fit=smooth.spline(age,wage,df=16)
fit2=smooth.spline(age,wage,cv=TRUE)
fit2$df
lines(fit,col="red",lwd=2)
lines(fit2,col="blue",lwd=2)
legend("topright",legend=c("16 DF","6.8 DF"),col=c("red","blue"),lty=1,lwd=2,cex=.8)
plot(age,wage,xlim=agelims,cex=.5,col="darkgrey")
title("Local Regression")
fit=loess(wage~age,span=.2,data=Wage)
fit2=loess(wage~age,span=.5,data=Wage)
lines(age.grid,predict(fit,data.frame(age=age.grid)),col="red",lwd=2)
lines(age.grid,predict(fit2,data.frame(age=age.grid)),col="blue",lwd=2)
legend("topright",legend=c("Span=0.2","Span=0.5"),col=c("red","blue"),lty=1,lwd=2,cex=.8)
# GAMs
gam1=lm(wage~ns(year,4)+ns(age,5)+education,data=Wage)
library(gam)
gam.m3=gam(wage~s(year,4)+s(age,5)+education,data=Wage)
par(mfrow=c(1,3))
plot(gam.m3, se=TRUE,col="blue")
plot.gam(gam1, se=TRUE, col="red")
gam.m1=gam(wage~s(age,5)+education,data=Wage)
gam.m2=gam(wage~year+s(age,5)+education,data=Wage)
anova(gam.m1,gam.m2,gam.m3,test="F")
summary(gam.m3)
preds=predict(gam.m2,newdata=Wage)
gam.lo=gam(wage~s(year,df=4)+lo(age,span=0.7)+education,data=Wage)
plot.gam(gam.lo, se=TRUE, col="green")
gam.lo.i=gam(wage~lo(year,age,span=0.5)+education,data=Wage)
library(akima)
plot(gam.lo.i)
gam.lr=gam(I(wage>250)~year+s(age,df=5)+education,family=binomial,data=Wage)
par(mfrow=c(1,3))
plot(gam.lr,se=T,col="green")
table(education,I(wage>250))
gam.lr.s=gam(I(wage>250)~year+s(age,df=5)+education,family=binomial,data=Wage,subset=(education!="1. < HS Grad"))
plot(gam.lr.s,se=T,col="green")
|
4d7ad1319aa9118f732fa79211875005fbdf1580
|
7ca8a0de0f69fd409fc67bf2ded294cf4b3d1f5f
|
/cibor.R
|
e4649f6f4148700a0c50655e4f74a082795c9c81
|
[] |
no_license
|
krose/ciborfixings
|
34823b8d0e428ce6c8efa6ef4f3616bcdce13099
|
2863c51902de2c4b2415acaff07b42d302fa2956
|
refs/heads/master
| 2016-09-05T23:40:22.497817
| 2014-11-06T15:20:00
| 2014-11-06T15:20:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,162
|
r
|
cibor.R
|
setwd("c:/users/kro/documents/github/ciborfixings/")
library(xlsx)
library(lubridate)
library(stringr)
library(dplyr)
# ### Build sequence of dates to try
# try_dates <- seq.Date(from = as.Date(ymd("2004-02-01")), to = as.Date(ymd("2014-11-05")), by = 1)
#
# ### Define the base url
# base_url <- "http://www.finansraadet.dk/Historical%20Rates/cibor/"
#
#
#
# #######################################################
# ## Download files
# ########################################################
#
# ### Loop over try_dates and try to download a file
# for(i in seq_along(try_dates)){
#
# try_url_xlsx <- paste0(base_url, as.character(year(try_dates[i])), "/", as.character(try_dates[i]), ".xlsx")
# try_url_xls <- paste0(base_url, as.character(year(try_dates[i])), "/", as.character(try_dates[i]), ".xls")
#
# try(download.file(url = try_url_xlsx, destfile = paste0("./data/", try_dates[i], ".xlsx"), mode = "wb"), silent = TRUE)
# try(download.file(url = try_url_xls, destfile = paste0("./data/", try_dates[i], ".xls"), mode = "wb"), silent = TRUE)
#
# Sys.sleep(time = 0.5)
# }
#
# ###########################################################
# ## Remove files with file size == 0
# ############################################################
#
# xl_file_names <- dir(path = "./data/")
#
# for(i in seq_along(xl_file_names)){
#
# if(file.info(paste0("./data/", xl_file_names[i])$size) == 0){
# file.remove(paste0("./data/", xl_file_names[i]))
# }
# }
#
###########################################################
## Read first column to figure out which rows to download
############################################################
### only read up until 2011-05-07 as they move to a very old
### xls file format.
## minus 59 because of NAs
xl_file_names <- dir(path = "./data/")
xl_file_names <- xl_file_names[1:(1818-59)]
## Create list for data
first_col <- vector(mode = "list", length = length(xl_file_names))
## Bad names was used in the exploratory phase
# bad_names <- c("danmarks nationalbank", "handelsafdelingen", "ciborfixing den", "stiller", "nordic operations", "nasdaq omx", "mail",
# "ciborfixing", "fax", "to:", "market operations", "NA", "yours sincerely",
# "This sheet containing rates from the CIBOR reporting banks shall be regarded as an integrated part of the control procedure",
# "of CIBOR. The rates do not represent the final CIBOR rates and are not to be published by any CIBOR reporting bank, nor",
# "by Finansraadet (Danish Bankers Association) or by anyone else before the official CIBOR rates are available. The official")
## These bank names are used going forward.
good_names <- unique(tolower(c("Amtssparekassen Fyn", "Danske Bank", "Jyske Bank", "HSH Nordbank",
"Nordea", "Nykredit Bank", "Spar Nord Bank", "Sydbank", "FIXING",
"Fionia Bank", "ABN Amro Bank", "Barclays Capital",
"Deutsche Bank", "Royal Bk of Scotland", "DANSKE BANK", "BARCLAYS", "Deutsche", "JYSKE BANK", "NORDEA",
"NYKREDIT", "RBS FM", "SYDBANK")))
## Read the first column of every file into a list object
## The column is being sequenced to single out the rows we want
## and matched based on the good_names
## I also do a bit of cleaning up like setting character to lower
for(i in seq_along(xl_file_names)){
## Print the file name being read
print(xl_file_names[i])
## Read file
first_col[[i]] <- try(read.xlsx2(file = paste0("./data/", xl_file_names[i]),sheetIndex = 1, startRow = 1, colIndex = 1, endRow = 70, as.data.frame = TRUE, header = FALSE, colClasses = "character", stringsAsFactors = FALSE), silent = FALSE)
## Make sequence
first_col[[i]]$row_number <- seq_along(first_col[[i]][,1])
## Add file name
first_col[[i]]$file_name <- xl_file_names[i]
## set characters to lower
first_col[[i]]$X1 <- tolower(str_trim(first_col[[i]]$X1))
## Test for good names
test_good <- first_col[[i]]$X1 %in% good_names
## filter and keep only the good names
first_col[[i]] <- first_col[[i]][test_good, ]
### Used in the exploratory phase
# ## get length of each character string
# strlen <- str_length(string = first_col[[i]][, 1])
#
# ## remove rows where length > 0 or bigger than 50 or is na
# first_col[[i]] <- first_col[[i]][strlen > 0,]
# first_col[[i]] <- first_col[[i]][strlen < 50,]
# first_col[[i]] <- first_col[[i]][!is.na(first_col[[i]]$X1),]
#
# ### test for bad names
# test_bad <- tolower(first_col[[i]]$X1) %in% bad_names
# ## remove bad names
# first_col[[i]] <- first_col[[i]][ !test_bad, ]
#
# test_bad <- str_detect(first_col[[i]]$X1, "[[:digit:]]")
# first_col[[i]] <- first_col[[i]][!test_bad,]
}
## Take every list object and rowbind them all
test_ob <- do.call("rbind", first_col)
## Group_by file_name and find the highest and lowest row number
## we can use for reading the the actual fixing and discard the rest
test_ob <- test_ob %>% group_by(file_name) %>%
summarise(min_row = min(row_number), max_row = max(row_number)) %>% ungroup
##############################################################
## Read the fixings
###########################################################
## create list object to save fixings into
curve_values <- list()
## Loop over each row in test_ob and read the file name using these variables.
for(i in seq_along(test_ob$file_name)){
## Print out where you are
print(test_ob$file_name[i])
## Read the file
curve_values[[i]] <- read.xlsx2(file = paste0("./data/", test_ob$file_name[i]), sheetIndex = 1, startRow = test_ob$min_row[i], endRow = test_ob$max_row[i], as.data.frame = TRUE, header = FALSE, stringsAsFactors = FALSE)
## Set first column tolower characters
curve_values[[i]]$X1 <- tolower(curve_values[[i]]$X1)
}
##########################################################
## clean and format the fixings
##########################################################
## Remove empty columns by looping through each reading and its columns.
for(i in seq_along(curve_values)){
for(n in length(names(curve_values[[i]])):1){
if(is.character(curve_values[[i]][, n])){
test <- sum(str_length(string = curve_values[[i]][, n])) < 1
if(test){
curve_values[[i]] <- curve_values[[i]][, -n]
}
}
}
}
## Remove blank rows from the file
for(i in seq_along(curve_values)){
test <- str_length(curve_values[[i]]$X1) > 0
curve_values[[i]] <- curve_values[[i]][test,]
}
## Look at the length of each column and keep only
## the ones with length == 15
test_ob$col_len <- sapply(curve_values, FUN = function(x) length(names(x)),simplify = TRUE)
## Keep only file readings with col_len == 15
## THis means that data from 2005-04-01 to 2011-04-01
## is retained
curve_values <- curve_values[test_ob$col_len == 15]
test_ob <- test_ob[test_ob$col_len == 15, ]
## Set colnames for fixing period
curve_col_names <- c("bank", "d7", "d14", "d30", "d60", "d90", "d120", "d150", "d180",
"d210", "d240", "d270", "d300", "d330", "d360")
## Set colnames
for(i in seq_along(curve_values)){
names(curve_values[[i]]) <- curve_col_names
}
## set fixings as numeric instead of character
for(i in seq_along(curve_values)){
for(n in seq_along(names(curve_values[[i]]))){
if(n == 1){
next
} else {
curve_values[[i]][,n] <- as.numeric(curve_values[[i]][,n])
}
}
}
## Set date
for(i in seq_along(curve_values)){
curve_values[[i]]$date <- str_replace(string = test_ob$file_name[i], pattern = ".xls", replacement = "")
curve_values[[i]]$date <- str_replace(string = curve_values[[i]]$date, pattern = "x", replacement = "")
curve_values[[i]]$date <- ymd(curve_values[[i]]$date)
}
## merge all the list objects
curve_values <- do.call(rbind, curve_values)
## set the rows in the "correct" order
curve_values <- curve_values %>% select(date, bank, d7:d360)
## write tidy data to the disk
write.csv2(curve_values, file = "cibor_csv2.csv")
|
38451548ba791c5c0ed094f4772d74b97fdd1fa4
|
a3f5cf06fd5d548ea390c52a3af988d0eb1e8cda
|
/tests/testthat/test-sys_details.R
|
9009c7fcc0bbec901856afcd8a2a20689dd37772
|
[] |
no_license
|
csgillespie/benchmarkme
|
3cb0b525b22951a551abd8e0edea29f1f83e9eb6
|
a43bf8ed5f84023c8381646e004676b37a0b16a5
|
refs/heads/main
| 2022-06-19T00:04:22.571644
| 2022-06-15T08:55:35
| 2022-06-15T08:55:35
| 44,773,568
| 47
| 14
| null | 2022-06-15T08:55:36
| 2015-10-22T21:12:50
|
R
|
UTF-8
|
R
| false
| false
| 251
|
r
|
test-sys_details.R
|
test_that("Test Sys Details", {
skip_on_cran()
sys = get_sys_details(sys_info = FALSE, installed_packages = FALSE)
expect_equal(length(sys), 13)
expect_equal(is.na(sys$sys_info), TRUE)
expect_equal(is.na(sys$installed_packages), TRUE)
}
)
|
73f9e83ccc11b544e889eef3c44d04a804481297
|
a24aa2f4f09551d54813cafa3e29645b672803d3
|
/src/r/plotLinear.R
|
9793086272c26ca66d56e4706b049dc0ec21bc47
|
[
"BSD-3-Clause"
] |
permissive
|
wangzhennan14/Anaquin
|
59ecae7fcdb9be5e2f3020c4aa5a1918a4348ec3
|
c69f27454ed7be42095261ba560583244c0ce281
|
refs/heads/master
| 2021-01-18T15:30:21.351242
| 2017-03-28T09:57:32
| 2017-03-28T09:57:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
plotLinear.R
|
#
# Anaquin - Sequin statistical analysis. Version 1.1.1.
#
# This R script was generated at %1%.
#
# %2%
#
library(Anaquin)
data <- read.csv('%3%/%4%', row.names=1, sep='\t')
title <- '%5%'
xlab <- '%6%'
ylab <- '%7%'
# Expected input concentration (x-axis)
input <- %8%
# Measured expression (y-axis)
measured <- %9%
# Create Anaquin data for PlotLinear
anaquin <- AnaquinData(analysis='PlotLinear', seqs=row.names(data), %10%=input, measured=measured)
plotLinear(anaquin, title=title, xlab=xlab, ylab=ylab, showLOQ=%11%)
|
b17077e98ba024467269ce00bfb1a43dd192f406
|
47088551d33d6fa167a2bb4b308b52b0300d1f19
|
/problem/SingleCellGrow/stage1_plot.r
|
584bbba663806f8616e67608322c96f0f88cd169
|
[] |
no_license
|
hydrays/sspat
|
1b0f2f7d581ce1ed84d4a61afe964d28858cfd05
|
9c4bc01ec124b31679199b1abac8643fac9a6777
|
refs/heads/master
| 2021-01-21T02:31:02.743226
| 2020-12-19T03:52:34
| 2020-12-19T03:52:34
| 39,364,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,743
|
r
|
stage1_plot.r
|
library('adk')
dyn.load('simulator1.so')
source('simulator1n.r')
mcell <- as.matrix(read.csv('data/day8slow.csv'))
mcell2 <- as.matrix(read.csv('data/day8fast.csv'))
## Nsample = 10000
## T = 8
## d <- matrix(0, 101, 101)
## pvalue <- matrix(0, 101, 101)
## x <- seq(Nsample)
## i <- 1
## for ( r2 in seq(0.4, 0.8, by=0.004) ){
## j <- 1
## for ( d2 in seq(0.2, 0.6, by=0.004) ){
## x <- simulator1n(T, 0, r2, 0, d2, 0, 0, Nsample)
## dis <- adk.test(mcell, x)
## d[i, j] <- dis$adk[1,2]
## pvalue[i, j] <- dis$adk[2,2]
## cat(c(d[i, j], r2, d2, i, j),'\n')
## j <- j + 1
## }
## i <- i + 1
## }
## ## -----------------------------------
## ## Plot the contour
## ## -----------------------------------
## res <- which(d == max(d), arr.ind=T)
## r2max <- 0.4 + 0.004*res[1]
## d2max <- 0.2 + 0.004*res[2]
## par(bg = 'white')
## filled.contour(x = seq(0, 1, length.out=101),
## y = seq(0, 1, length.out=101),
## d,
## color=terrain.colors,
## plot.title = title(main = "Contour-plot of p-value under AD test (2D)",
## xlab = "proliferation rate",
## ylab = "death rate"),
## asp = 1,
## ## plot.axes={ axis(1); axis(2); points(r2max,d2max,pch=17) },
## level=c(0, 0.0001, 0.01, 0.1, 0.2, 0.3, 0.5, 0.6, 0.65, 0.7))
## mtext(paste("@", date()), side=1, line=4, adj=1.04, cex=.66)
## text(0, 0.9, adj = c(0,0), "The method of fitting parameters is based on")
## text(0, 0.85, adj = c(0,0), "Minimize the AD-distance (or maximize its p-Value).")
## text(0, 0.75, adj = c(0,0), "Sample A: experimental data (day 8 cell number).")
## text(0, 0.7, adj = c(0,0), "Sample B: simulated data from the population model.")
## text(0, 0.6, adj = c(0,0), "We do a grid-search on the whole parameter space.")
## text(0, 0.55, adj = c(0,0), "Each parameter set gives a sample B.")
## text(0, 0.5, adj = c(0,0), "The parameter set that minimize the distance")
## text(0, 0.45, adj = c(0,0), "(or maximize the p-value) between sample A-B")
## text(0, 0.4, adj = c(0,0), "is the winner.")
## arrows(x0=0.2, y0=0.4, x1=0.4, y1=0.25)
## dev.copy(pdf,'I_method.pdf')
## dev.off()
## -----------------------------------
## Plot the fit
## -----------------------------------
Nsample = 50
T = 8
r1 = 0
r2 = 0.652
d1 = 0
d2 = 0.34
v = 0
w = 0
x <- simulator1n(T, r1, r2, d1, d2, v, w, Nsample)
Fn <- ecdf(x)
Fe <- ecdf(mcell)
plot(Fn, xlim=c(0,300),
main = "ECDF: simulation vs data",
ylab = "Cumulative probability",
xlab = "8-day Cell number ")
lines(Fe, col='red')
dis <- adk.test(mcell, x)
cat(dis$adk[1,2])
## good cell
T = 8
r1 = 1.1
r2 = 0.652
d1 = 0.4
d2 = 0.34
v = 0.25
w = 0.4
y <- simulator1n(T, r1, r2, d1, d2, v, w, Nsample)
Fn2 <- ecdf(y)
Fe2 <- ecdf(mcell2)
lines(Fn2, col='blue')
lines(Fe2, col='green')
dis2 <- adk.test(mcell2, y)
cat(dis2$adk[1,2])
text(50, 0.6, adj = c(0, 0), "Model parameter estimated by minimize AD-distance.")
text(50, 0.5, adj = c(0, 0), "Red: 8-day slow cell population")
text(50, 0.45, adj = c(0, 0), "Black: simulated data using parameters from grid search")
text(50, 0.4, adj = c(0, 0), "Two-parameter model: ( l2=0.652, d2=0.33 )")
text(50, 0.35, adj = c(0, 0), "max p-value: 0.66")
text(50, 0.25, adj = c(0, 0), "Green: 8-day fast cell population")
text(50, 0.2, adj = c(0, 0), "Blue: simulated data using parameters from grid search")
text(50, 0.15, adj = c(0, 0), "Four-parameter mixed model: ( l1=1.1, d1=0.4, v=0.25, w=0.4 )")
text(50, 0.1, adj = c(0, 0), "max p-value: 0.52")
mtext(paste("@", date()), side=1, line=4, adj=1.04, cex=.66)
dev.copy(pdf,'II_result.pdf')
dev.off()
|
99fcf458c187a58b57eb87e184e25515c3d1b052
|
74baaea058ecfa2bb73ee9a82aa66a9f3affdc61
|
/THEPROGRAMMINGCODE.r
|
1c2729deb037653761f03dd8671b0eed939f2665
|
[] |
no_license
|
hthoma20/OEIS-Data
|
1ebcc60784bb6b1fee6c6f81093041f1ada9ce15
|
3d9c8fb9b44673ca0b4b775121dc618daf08eb62
|
refs/heads/master
| 2020-05-07T12:24:14.578198
| 2019-04-25T22:36:11
| 2019-04-25T22:36:11
| 180,502,455
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,006
|
r
|
THEPROGRAMMINGCODE.r
|
# Make sure stripped and names are in same folder/working directory
# as the code
# Reading the files and making them nice
stripped <- read.csv("stripped", header= F, comment.char = '#')
#names <- read.delim("names", header= F, sep= "\n", quote= "", comment.char = '#')
colnames(stripped) <- c("id", 1:109)
# Can't set rownames, some rows are not unique apperantly
# How often each number occurs for all entries
only_sequences <- stripped[,-1]
counts <- table(unlist(only_sequences)) #-1 cause not the name of each sequence
ordered_all_counts <- counts[order(-counts)]
saveRDS(ordered_all_counts, file = "results/all_counts")
ordered.df <- as.data.frame(ordered_all_counts)
# This will be used to find specific number for dashboard:
counts["5"]
# Nice Histogram:
hist(counts, xlim = c(-20,100), ylim = c(0,500), breaks = 1e6)
rowcount <- rowSums(stripped[, -1] == '1' )
count.1 <- apply(only_sequences, 1, function(x) length(which(x=="1")))
# Count per row
# TODO: make sure the NA's don't give problems
|
4a421d9aec8f629f84ce55991c814e1225eb7a94
|
19a499470e93af08e40e2e75f874be6ff58d06f3
|
/R/halfcauchy.mle.R
|
1c6e6eb6bf31b5d7a3a55d8a92712c3b2a691d5f
|
[] |
no_license
|
happylearning423/Rfast2
|
c4dd6b6c9d53ec28ad7caabb88c5e078864061fa
|
1cfddcddb560de82fc6674f94fe5b7e006e688f8
|
refs/heads/master
| 2020-11-25T11:25:15.517148
| 2019-12-16T00:10:43
| 2019-12-16T00:10:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,529
|
r
|
halfcauchy.mle.R
|
#[export]
halfcauchy.mle <- function(x, tol = 1e-07) {
n <- length(x)
es <- 0.5 * (Rfast::nth(x, 3 * n/4) - Rfast::nth(x, n/4))
logs <- log(es)
x2 <- x^2
down <- 1/(x2 + es^2)
lik1 <- n * logs + sum( log(down) )
der <- n - 2 * es^2 * sum(down)
der2 <- - 4 * es^4 * sum(down^2)
logs <- logs - der/der2
es <- exp(logs)
down <- 1/(x2 + es^2)
lik2 <- n * logs + sum( log(down) )
i <- 2
while ( lik2 - lik1 > tol ) {
i <- i + 1
lik1 <- lik2
der <- n - 2 * es^2 * sum(down)
der2 <- - 4 * es^4 * sum(down^2)
logs <- logs - der/der2
es <- exp(logs)
down <- 1/(x2 + es^2)
lik2 <- n * logs + sum( log(down) )
}
list(iters = i, loglik = lik2 - n * log(2/pi), scale = es)
}
#[export]
cauchy0.mle <- function(x, tol = 1e-07) {
n <- length(x)
es <- 0.5 * (Rfast::nth(x, 3 * n/4) - Rfast::nth(x, n/4))
logs <- log(es)
x2 <- x^2
down <- 1/(x2 + es^2)
lik1 <- n * logs + sum( log(down) )
der <- n - 2 * es^2 * sum(down)
der2 <- - 4 * es^4 * sum(down^2)
logs <- logs - der/der2
es <- exp(logs)
down <- 1/(x2 + es^2)
lik2 <- n * logs + sum( log(down) )
i <- 2
while ( lik2 - lik1 > tol ) {
i <- i + 1
lik1 <- lik2
der <- n - 2 * es^2 * sum(down)
der2 <- - 4 * es^4 * sum(down^2)
logs <- logs - der/der2
es <- exp(logs)
down <- 1/(x2 + es^2)
lik2 <- n * logs + sum( log(down) )
}
list(iters = i, loglik = lik2 - n * log(pi), scale = es)
}
|
e89047cacc04e95251336a79980d16932eb2820f
|
e632bd877de421e7b8026a89393f42062ab38b6c
|
/Initialization/Code/Initialize_Loop_and_Code_Fxns.R
|
3f170cc8912b7ca1aff78fc6b104fb64387663a1
|
[
"MIT"
] |
permissive
|
tykukla/CH2O-CHOO-TRAIN
|
f4d3a90606f008b81920b78eecd7ac8d569bf339
|
34c31954a982cfc3e461f633db02d248a78dbcf5
|
refs/heads/master
| 2023-06-22T12:20:26.517586
| 2023-06-16T01:36:52
| 2023-06-16T01:36:52
| 535,833,456
| 3
| 0
| null | 2022-09-12T20:20:37
| 2022-09-12T20:08:50
|
R
|
UTF-8
|
R
| false
| false
| 9,606
|
r
|
Initialize_Loop_and_Code_Fxns.R
|
# --------------------------------------------------- #
# INITIALIZE BISTABILITY FORCING #
# AND MEBM + CARBON CYCLE SCRIPTS #
# --------------------------------------------------- #
## [1] INITIALIZE FORCING
## Function to initialize a list of parameters
## Each element is a vector of length = model.iters
## where the length depends on the number of unique
## model cases (calculated by this fxn)
##
## For example, to run two CO2 levels at two ice sheet wx coefficients
## we would get four total iterations. Other terms that don't vary
## would just have their value repeated four times in the list
initialize.forcing <- function(co2.force = pCO2.vector, # atmos CO2 vector
glwx.force = glwx.vector, # glacier weathering vector
m.force = m, # vector of mass of carbon injected (or removed)
t.start.force = t.exc.start, # time of volcanic anomaly start
t.end.force = t.exc.end, # time of volcanic anomaly end
SENSITIVITY.force = SENSITIVITY.VEC, # vector of sensitivity values
duration.force = duration, # [years] how long model is run
dt.force = dt, # [years] timestep
CTRL.idx.force = CONTROL.idx, # which sensitivity.vec values are equal to control case value
geog.force = geog.list # geography input files
){
# check whether geography is a list of many geogs, or a single geog
if(is.null(nrow(geog.force))){ # if it doesn't have rows, then the vec is the list
geog.vec <- names(geog.force)
} else{ # if it does have rows, then the vec is just this file
geog.vec <- LandFracFileName
}
# Get iters associated with time (three start and three end times are three simulations, not nine)
length.time.force <- max(c(length(t.start.force), length(t.end.force), length(duration.force), length(dt.force)))
# make a fake vec to expand the grid
fake.vec <- rep(-9999, length.time.force)
# expand grid (get every combination of relevant inputs)
grid.expansion <- expand.grid(co2.force, glwx.force, m.force, SENSITIVITY.force, geog.vec, fake.vec)
colnames(grid.expansion) <- c("co2.force", "glwx.force", "m.force", "SENSITIVITY.force", "geography.force", "x")
# we put fake vec last because we know it will be repeated in row (rep(THIS, each=nrow(grid.expansion)/length(fake.vec)))
# ... remove fake vec (was a placeholder)
grid.expansion <- subset(grid.expansion, select=-x)
# ... add the time force vecs
grid.expansion$t.start.force <- rep(t.start.force, each = (nrow(grid.expansion)/length(t.start.force)) )
grid.expansion$t.end.force <- rep(t.end.force, each = (nrow(grid.expansion)/length(t.end.force)) )
grid.expansion$duration.force <- rep(duration.force, each = (nrow(grid.expansion)/length(duration.force)) )
grid.expansion$dt.force <- rep(dt.force, each = (nrow(grid.expansion)/length(dt.force)) )
# ... add control index
if(length(CTRL.idx.force) < 2){
grid.expansion$CTRL.idx <- CTRL.idx.force[1]
} else{
# equation relating index to value in sensitivity.vec
sens.idx.fun <- approxfun(x=SENSITIVITY.force, y=c(1:length(SENSITIVITY.force)))
sens.idx <- sens.idx.fun(grid.expansion$SENSITIVITY.force)
grid.expansion$CTRL.idx <- CTRL.idx.force[sens.idx]
}
# order by control index so control iter is first
grid.expansion <- as.data.table(grid.expansion) # for the 'order' fxn to work
grid.out <- grid.expansion[order(-CTRL.idx)]
# return the result
return(grid.out)
}
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
## [2] INITIALIZE MEBM FILES
## terms are sourced (fxn does not need to be run to a variable)
initialize.mebm <- function(sourcefile.Dir.in = sourcefile.Dir, # where are the files located
MEBMmain.file.in = MEBMmain.file, # MEBM main solver
ParSave.file.in = ParSave.file, # list of parameters to save for reproducibility
CH2OCHOfxn.file.in = CH2OCHOfxn.file, # source CH2O-CHO functions
MEBMsolver.file.in = MEBMsolver.file, # solver
MEBMconst.file.in = MEBMconst.file, # physical constants
MEBMode.file.in = MEBMode.file, # MEBM ODE to get mse
MEBMhydro.file.in = MEBMhydro.file, # solve hydrologic fluxes
MEBMmultistable_nav.file.in = MEBMmultistable_nav.file, # script to navigate temp boundary conditions and other useful fxns
CH2OCHOwritelog.file.in = CH2OCHOwritelog.file # log file writer
){
# bring in the initialization scripts
source(paste(sourcefile.Dir.in, MEBMmain.file.in, sep='/'))
source(paste(sourcefile.Dir.in, ParSave.file.in, sep='/'))
source(paste(sourcefile.Dir.in, CH2OCHOfxn.file.in, sep='/'))
source(paste(sourcefile.Dir.in, MEBMsolver.file.in, sep='/'))
source(paste(sourcefile.Dir.in, MEBMconst.file.in, sep='/'))
source(paste(sourcefile.Dir.in, MEBMode.file.in, sep='/'))
source(paste(sourcefile.Dir.in, MEBMhydro.file.in, sep='/'))
source(paste(sourcefile.Dir.in, MEBMmultistable_nav.file.in, sep='/'))
source(paste(sourcefile.Dir.in, CH2OCHOwritelog.file.in, sep='/'))
}
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
## [3] INITIALIZE CARBON CYCLE
## terms are sourced or just read right to the environment (fxn does not need to be run to a variable)
initialize.c.cycle <- function(sourcefile.Dir.in = sourcefile.Ccycle){
# ------------------------------------------------------------------------- #
# LOAD GEOCHEM INITIALIZATION FILES *************************************** #
# ------------------------------------------------------------------------- #
# CLiBeSO Files
sw.file <<- "seawater values over time_interp.txt" # these go to global env. because they're read into the log file
Berner.volc.file <<- "Berner Volcanism Interp.txt"
# sens.file <<- "PaleozoicSensValues.txt"
CO2.file <<- "InterpCO2_Aug2018.txt"
# read in
sw.chem <<- read.table(file = paste(sourcefile.Dir.in, sw.file, sep='/'), header = TRUE)
Berner.volc <<- read.table(file = paste(sourcefile.Dir.in, Berner.volc.file, sep='/'), header = TRUE)
# sens.parm <<- read.table(file = paste(sourcefile.Dir, sens.file, sep='/'), header = TRUE)
interpCO2 <<- read.table(file = paste(sourcefile.Dir.in, CO2.file, sep='/'), header = TRUE)
Ca.i <<- vector()
Mg.i <<- vector()
k.run <<- vector()
clim.sens <<- Clim.Sens <<- vector()
degassing <<- vector()
# pCO2 <<- vector()
# temp.i <<- vector()
Age <<- vector()
output <<- vector(mode='list',length=length(sw.chem$Age))
## MODIFY BELOW TO INITIALIZE WITH PALEO CONDITIONS -- (Turned off for now)
## NOTE: some of these are not used in the calculations for certain scripts
f=1 # set to run for one time step (use for-loop for Phanerozoic run instead)
y=1 # time index in input data
Age[f] <<- sw.chem$Age[y]
Ca.i[f] <<- sw.chem$Ca[y]
Mg.i[f] <<- sw.chem$Mg[y]
degassing[f] <<- 1 # Berner.volc$Fvolc[y]
}
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
## [4] PRINT a summary of the simulation
pre_run.summary <- function(df.force){
# COLLECT THE NUMBER OF UNIQUE VALUES FOR EACH CASE
n.times.start <- length(unique(df.force$t.start.force))
n.times.end <- length(unique(df.force$t.end.force))
n.dur <- length(unique(df.force$duration.force))
n.time <- max(n.times.start, n.times.end, n.dur)
n.geog <- length(unique(df.force$geography.force))
# n.sens.par <- length(unique(df.force$SENSITIVITY.force)) # we don't track sens par names here
n.sens.val <- length(unique(df.force$SENSITIVITY.force))
n.m <- length(unique(df.force$m.force))
n.glwx <- length(unique(df.force$glwx.force))
n.co2 <- length(unique(df.force$co2.force))
force.values <- c("time grids" = n.time, "geographies" = n.geog,
"param values" = n.sens.val, "init co2s" = n.co2,
"carbon perturbations" = n.m, "glacial wx factors" = n.glwx)
print.idx <- which(force.values > 1)
# total sims
n.sims <- nrow(df.force)
n.summary <- paste(force.values[print.idx], names(force.values[print.idx]), collapse=' X ')
# ------------------------------------- #
# Print result
cat(paste("=======================================================",
paste("TOTAL SIMULATIONS TO RUN:", n.sims),
"**",
paste("One simulation for each:", n.summary),
"=======================================================",
sep='\n'))
# ------------------------------------- #
}
|
fc99b28b66514bf03c43761ca04141feb62cbdff
|
16d6f9a925fb8ae78938baf67173afc7b4e3f94b
|
/tests/testthat/test-disjoin.R
|
076320078f2b73d279d92739dbc1ca2c3ef31d38
|
[] |
no_license
|
liupfskygre/plyranges
|
809851435ac1d9a60df8400b8c7c409862966418
|
c82b7eb8ec31478e0f8439207d20897f0c102a6f
|
refs/heads/master
| 2023-06-04T10:52:44.864177
| 2021-06-28T01:15:43
| 2021-06-28T01:15:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,307
|
r
|
test-disjoin.R
|
context("disjoin ranges")
test_that("matches IRanges/GRanges tests", {
x <- IRanges()
expect_identical(x, disjoin_ranges(x))
ir <- IRanges(c(1, 21, 10, 1, 15, 5, 20, 20),
c(6, 20, 9, 3, 14, 11, 20, 19))
correct_ir <- IRanges(c(1, 4, 5, 7, 10, 20), c(3, 4, 6, 9, 11, 20))
test_ir <- disjoin_ranges(ir)
expect_identical(test_ir, correct_ir)
# check revmap
test_ir <- ir %>%
mutate(i = 1:n()) %>%
disjoin_ranges(revmap = IRanges::IntegerList(i))
mcols(correct_ir)$revmap <- IRanges::IntegerList(c(1, 4), 1, c(1, 6), 6, 6, 7)
expect_identical(test_ir, correct_ir)
# -- granges
gr <- GRanges(Rle(factor(c("chr1", "chr2", "chr1", "chr3")), c(1, 3, 2, 4)),
IRanges(1:10, width=10:1, names=head(letters, 10)),
Rle(c("-", "+", "*", "+", "-"), c(1, 2, 2, 3, 2)),
score=1:10, GC=seq(1, 0, length=10),
seqinfo=Seqinfo(paste("chr", 1:3, sep="")))
correct_gr <- GRanges(Rle(c("chr1", "chr2", "chr3"), c(3, 3, 4)),
IRanges(start=c(6, 1, 5, 2, 3, 4, 7, 8, 9, 10),
end=c(10, 10, 10, 2, 10, 10, 7, 10, 9, 10)),
c("+", "-", "*", "+", "+", "*", "+", "+", "-", "-"))
# matches directed
expect_identical(disjoin_ranges_directed(gr), correct_gr)
# this is the same as disjoin unstranded on correct_gr
expect_identical(disjoin_ranges(gr), disjoin_ranges(correct_gr))
gr <- GRanges(Rle(c("chr1", "chr3"), c(2, 2)),
IRanges(c(8, 6, 8, 6), c(11, 15, 11, 15),
names=c("k", "l", "m", "n")),
c("-", "-", "+", "*"),
score=11:14, GC=c(.2, .3, .3, .1))
correct_gr <- GRanges(Rle(c("chr1", "chr3"), c(3, 2)),
IRanges(c(6, 8, 12, 8, 6), c(7, 11, 15, 11, 15)),
Rle(c("-", "+", "*"), c(3, 1, 1)))
mcols(correct_gr)$revmap <- IRanges::IntegerList(2, 1:2, 2, 3, 4)
expect_identical(gr %>%
mutate(i = 1:n()) %>%
disjoin_ranges_directed(revmap = IRanges::IntegerList(i)),
correct_gr)
# grouping works as expected
grl <- GRangesList( GRanges(Rle(factor(c("chr1", "chr2", "chr1", "chr3")), c(1, 3, 2, 4)),
IRanges(1:10, width=10:1),
Rle(c("-", "+", "*", "+", "-"), c(1, 2, 2, 3, 2)),
score=1:10, GC=seq(1, 0, length=10),
seqinfo=Seqinfo(paste("chr", 1:3, sep=""))),
GRanges("1", IRanges(1, 10), score=21, GC=.21),
GRanges(),
GRanges(Rle(c("chr1", "chr3"), c(2, 2)),
IRanges(c(8, 6, 8, 6), c(11, 15, 11, 15)),
strand(c("-", "-","+","*")),
score=41:44, GC=c(.41, .42, .43, .44)))
gr_by_group <- stack(grl, "name") %>% group_by(name)
target <- stack(disjoin(grl, ignore.strand = TRUE), "name")
current <- disjoin_ranges(gr_by_group) %>%
mutate(name = Rle(name))
expect_identical(target, current)
})
test_that("matches HelloRanges multinter", {
oldwd <- getwd()
setwd(system.file("unitTests", "data", "multiinter", package="HelloRanges"))
bed_files <- list.files(pattern = ".bed$")
correct_gr <- GRanges("chr1",
IRanges(c(7, 9, 13, 16, 21, 23, 31, 33),
c(8, 12, 15, 20, 22, 30, 32, 34)),
i=IRanges::IntegerList(1, c(1,3), 1:3, 1:2, 2, 1:2, 2, 3))
gr_l <- S4Vectors::List(lapply(bed_files, function(x) {
mutate(read_bed(x), grp = sub(".bed$", "", basename(x)))
}))
gr_by_group_r <- unlist(gr_l) %>%
mutate(grp = factor(grp, levels = c("a", "b", "c"))) %>%
group_by(grp) %>%
reduce_ranges()
test_gr <- gr_by_group_r %>%
mutate(i = factor(as.integer(grp))) %>%
disjoin_ranges(i = IRanges::IntegerList(i))
expect_identical(correct_gr, test_gr)
# with names in place of integer
mcols(correct_gr)$i <- as(
extractList(factor(c("a", "b", "c")), mcols(correct_gr)$i),
"SimpleFactorList"
)
test_gr <- gr_by_group_r %>%
disjoin_ranges(i = IRanges::FactorList(grp, compress = FALSE))
expect_identical(correct_gr, test_gr)
setwd(oldwd)
})
|
3c584a476c51402733e1cc1c854edf03c438e044
|
f6c9f760bf10b1f7f8ac7ff3653e4fd180c8720f
|
/tests/testthat/test-seq_ttest.R
|
3521d654829e9c25f997cb5f869358108a5cb8fe
|
[] |
no_license
|
cran/sprtt
|
05a0f349644683afe6f7c8464945f89938a2d8ef
|
e6bde9996f96d6d559235a4ad99c84fd48b386db
|
refs/heads/master
| 2023-07-20T08:43:45.684070
| 2023-07-06T12:50:02
| 2023-07-06T12:50:02
| 393,568,991
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,982
|
r
|
test-seq_ttest.R
|
#* @testing seq_ttest
#*
# MARTIN Skript ----------------------------------------------------------
.sprt.lr <- function(x, y, mu, d, type, alt){
ttest <- switch(type,
one.sample = t.test(x, mu = mu),
two.sample = t.test(x, y, mu = mu,
var.equal = T),
paired = t.test(x, y, mu = mu,
paired = T))
# print(ttest)
ncp <- ifelse(type == "two.sample",
d/sqrt(1/length(x) + 1/length(y)),
d * sqrt(length(x)))
tval <- ifelse(alt == "less",
-1 * as.vector(ttest$statistic),
as.vector(ttest$statistic))
dof <- as.vector(ttest$parameter)
# print(paste("ncp", ncp))
# print(paste("tval", tval))
# print(paste("dof", dof))
if(alt=="two.sided"){
lr <- df(tval^2, df1 = 1, df2 = dof, ncp = ncp^2)/df(tval^2, df1 = 1, df2 = dof)
# l1 <- df(tval^2, df1 = 1, df2 = dof, ncp = ncp^2)
# l2 <- df(tval^2, df1 = 1, df2 = dof)
# # print(l1/l2)
} else{
lr <- dt(tval, dof, ncp = ncp)/dt(tval, dof)
}
lr
# print(lr)
}
.sprt.formula <- function(formula, data = NULL,
mu = 0, d, alpha = 0.05, power = 0.95,
alternative = "two.sided", paired = FALSE){
##### CHECK INPUT
match.arg(alternative, c("two.sided", "less", "greater"))
if(!(alpha > 0 && alpha < 1))
stop("Invalid argument <alpha>: Probabilities must be in ]0;1[.")
if(!(power > 0 && power < 1))
stop("Invalid argument <power>: Probabilities must be in ]0;1[.")
if(d <= 0)
stop("Invalid argument <d>: Must be greater than 0.")
if(!is.numeric(mu))
stop("Invalid argument <mu>: Must be numeric.")
if(!is.logical(paired))
stop("Invalid argument <paired>: Must be logical.")
if ((length(formula) != 3L) || (length(formula[[3]]) != 1L))
stop("'formula' is incorrect. Please specify as 'x~y'.")
temp <- model.frame(formula, data)
x <- temp[,1]
y <- temp[,2]
whichNA <- is.na(x) | is.na(y)
x <- x[!whichNA]
y <- y[!whichNA]
if(!is.numeric(x))
stop(paste("Dependent variable", names(temp)[1], "must be numeric."))
if(length(unique(y))!=2)
stop(paste("Grouping factor", names(temp)[2], "must contain exactly two levels."))
if(paired){
if(!(table(y)[[1]]==table(y)[[2]]))
stop("Unequal number of observations per group. Independent samples?")
}else{
if(length(x)<3)
stop("SPRT for two independent samples requires at least 3 observations.")
}
sd.check <- tapply(x, INDEX=y, FUN=sd)
sd.check <- ifelse(is.na(sd.check), 0, sd.check)
if(max(sd.check) == 0)
stop("Can't perform SPRT on constant data.")
##### RETURN ARGUMENTS
y <- as.factor(y)
x.1 <- x[y == levels(y)[1]]
x.2 <- x[y == levels(y)[2]]
data.name <- paste(names(temp)[1], "by", names(temp)[2])
method <- ifelse(paired, "Paired SPRT t-test", "Two-Sample SPRT t-test")
null.value <- mu
attr(null.value, "names") <- "difference in means"
if(paired){
estimate <- mean(x.1 - x.2)
attr(estimate, "names") <- "mean of the differences"
}else{
estimate <- c(mean(x.1), mean(x.2))
attr(estimate, "names") <- c("mean in group 1", "mean in group 2")
}
arg.list <- list(x = x.1, y = x.2,
mu = mu, d = d, alpha = alpha, power = power,
type = ifelse(paired, "paired", "two.sample"),
alt = alternative)
printarg.list <- list(estimate = estimate,
null.value = null.value,
alternative = alternative,
effect.size = d,
method = method,
data.name = data.name)
result <- do.call(.sprt.result, arg.list)
output <- c(result, printarg.list)
class(output) <- "sprt"
return(output)
}
.sprt.default <- function(x, y = NULL,
mu = 0, d, alpha = 0.05, power = 0.95,
alternative = "two.sided", paired = FALSE){
##### CHECK INPUT
match.arg(alternative, c("two.sided", "less", "greater"))
if(!(alpha > 0 && alpha < 1))
stop("Invalid argument <alpha>: Probabilities must be in ]0;1[.")
if(!(power > 0 && power < 1))
stop("Invalid argument <power>: Probabilities must be in ]0;1[.")
if(d<=0)
stop("Invalid argument <d>: Must be greater than 0.")
if(!is.numeric(mu))
stop("Invalid argument <mu>: Must be numeric.")
if(!is.null(y)){
x.name <- deparse(substitute(x))
y.name <- deparse(substitute(y))
data.name <- paste(x.name, "and", y.name)
if(!(is.atomic(x) && is.null(dim(x))))
warning(paste(x.name, "is not a vector. This might have caused problems."), call. = F)
if(!(is.atomic(y) && is.null(dim(y))))
warning(paste(y.name, "is not a vector. This might have caused problems."), call. = F)
if(is.factor(y))
stop("Is y a grouping factor? Use formula interface x ~ y.")
if(!is.numeric(x))
stop(paste("Invalid argument:", x.name, "must be numeric."))
if(!is.numeric(y))
stop(paste("Invalid argument:", y.name, "must be numeric."))
if(!paired && (length(x) + length(y) < 3))
stop("SPRT for two independent samples requires at least 3 observations.")
sd.check <- c(sd(x), sd(y))
sd.check <- ifelse(is.na(sd.check), 0, sd.check)
if(!(max(sd.check) > 0))
stop("Can't perform SPRT on constant data.")
if(!is.logical(paired))
stop("Invalid argument <paired>: Must be logical.")
type <- ifelse(paired, "paired", "two.sample")
method <- ifelse(paired, "Paired SPRT t-test", "Two-Sample SPRT t-test")
null.value <- mu
attr(null.value, "names") <- "difference in means"
if(paired){
if(length(x) != length(y))
stop("Unequal number of observations per group. Independent samples?")
whichNA <- is.na(x) | is.na(y)
x <- x[!whichNA]
y <- y[!whichNA]
estimate <- mean(x - y)
attr(estimate, "names") <- "mean of the differences"
}else{
x <- x[!is.na(x)]
y <- y[!is.na(y)]
estimate <- c(mean(x), mean(y))
attr(estimate, "names") <- c(paste("mean of", x.name), paste("mean of", y.name))
}
}else{
data.name <- deparse(substitute(x))
x <- x[!is.na(x)]
if(!is.numeric(x))
stop(paste("Invalid argument:", data.name, "must be numeric."))
sd.check <- ifelse(is.na(sd(x)), 0, sd(x))
if(!(sd.check > 0))
stop("Can't perform SPRT on constant data.")
type <- "one.sample"
method <- "One-Sample SPRT t-test"
null.value <- mu
attr(null.value, "names") <- "mean"
estimate <- mean(x)
attr(estimate, "names") <- "mean of x"
}
##### RETURN ARGUMENTS
arg.list <- list(x = x, y = y,
mu = mu, d = d, alpha = alpha, power = power,
type = type,
alt = alternative)
printarg.list <- list(estimate = estimate,
null.value = null.value,
alternative = alternative,
effect.size = d,
method = method,
data.name = data.name)
result <- do.call(.sprt.result, arg.list)
output <- c(result, printarg.list)
class(output) <- "sprt"
return(output)
}
.sprt.result <- function(x, y, mu, d, alpha, power, type, alt){
A <- power/alpha
B <- (1 - power)/(1 - alpha)
lr <- .sprt.lr(x, y, mu, d, type, alt)
if(lr >= A){
decision <- "accept H1"
}else if(lr <= B){
decision <- "accept H0"
}else{
decision <- "continue sampling"
}
attr(lr, "names") <- "likelihood ratio"
parameters <- c(alpha, power)
attr(parameters, "names") <- c("Type I error", "Power")
thresholds <- c(B, A)
attr(thresholds, "names") <- c("lower", "upper")
return(list(statistic = lr,
decision = decision,
parameters = parameters,
thresholds = thresholds))
}
print.sprt <- function(x){
cat(" ", x$method, "\n")
cat("\ndata:", x$data.name, "\n")
cat(names(x$statistic), " = ", round(x$statistic, digits = 5), ", decision = ", x$decision, sep="")
cat("\nSPRT thresholds:\n")
print(round(x$thresholds, digits = 5))
cat("alternative hypothesis: true", names(x$null.value), "is",
ifelse(x$alternative=="two.sided", "not equal to", paste(x$alternative, "than")), x$null.value)
cat("\neffect size: Cohen's d =", x$effect.size, "\n")
print(x$parameters)
cat("sample estimates:\n")
print(round(x$estimate, digits = 5))
}
sprt.t.test <- function(...) UseMethod(".sprt")
.sprt.result <- function(x, y, mu, d, alpha, power, type, alt){
A <- power/alpha
B <- (1 - power)/(1 - alpha)
lr <- .sprt.lr(x, y, mu, d, type, alt)
if(lr >= A){
decision <- "accept H1"
}else if(lr <= B){
decision <- "accept H0"
}else{
decision <- "continue sampling"
}
attr(lr, "names") <- "likelihood ratio"
parameters <- c(alpha, power)
attr(parameters, "names") <- c("Type I error", "Power")
thresholds <- c(B, A)
attr(thresholds, "names") <- c("lower", "upper")
return(list(statistic = lr,
decision = decision,
parameters = parameters,
thresholds = thresholds))
}
print.sprt <- function(x){
cat(" ", x$method, "\n")
cat("\ndata:", x$data.name, "\n")
cat(names(x$statistic), " = ", round(x$statistic, digits = 5), ", decision = ", x$decision, sep="")
cat("\nSPRT thresholds:\n")
print(round(x$thresholds, digits = 5))
cat("alternative hypothesis: true", names(x$null.value), "is",
ifelse(x$alternative=="two.sided", "not equal to", paste(x$alternative, "than")), x$null.value)
cat("\neffect size: Cohen's d =", x$effect.size, "\n")
print(x$parameters)
cat("sample estimates:\n")
print(round(x$estimate, digits = 5))
}
sprt.t.test <- function(...) UseMethod(".sprt")
# x <- rnorm(50)
# sprt.t.test(x, d = 0.3)
#-----------------------------------------------------------------
context("seq_ttest: test main function")
test_that("seq_ttest: comparison results with original script from m. schnuerch", {
x <- rnorm(50)
d <- 0.8
results_original <- sprt.t.test(x = x, d = d, power = 0.8)
results_sprtt <- seq_ttest(x, d = d, power = 0.8)
expect_true(results_sprtt@likelihood_ratio - results_original$statistic[[1]] < 1e-5)
expect_equal(results_sprtt@decision,
results_original$decision)
x <- rnorm(20)
y <- as.factor(c(rep(1,10), rep(2,10)))
d <- 0.95
results_original <- sprt.t.test(x ~ y, d = d)
results_sprtt <- seq_ttest(x ~ y, d = d)
expect_true(results_sprtt@likelihood_ratio - results_original$statistic[[1]] < 1e-5)
expect_equal(results_sprtt@decision,
results_original$decision)
# same data, but different input
x_1 <- x[1:(length(x) * 0.5)]
x_2 <- x[(length(x) * 0.5 + 1):length(x)]
results_sprtt2 <- seq_ttest(x_1, x_2, d = d)
expect_true(results_sprtt@likelihood_ratio_log - results_sprtt2@likelihood_ratio_log < 1e-5)
expect_equal(results_sprtt@decision,
results_sprtt2@decision)
x <- rnorm(20)
y <- as.factor(c(rep(1,10), rep(2,10)))
d <- 0.95
results_numeric <- seq_ttest(x, d = d)
results_formula <- seq_ttest(x ~ 1, d = d)
expect_true(results_formula@likelihood_ratio - results_numeric@likelihood_ratio < 1e-5)
expect_equal(results_sprtt@decision,
results_original$decision)
x_1 <- rnorm(5)
x_2 <- rnorm(5)
x <- c(x_1, x_2)
y <- as.factor(c(rep(1,5),rep(2,5)))
results_original <- sprt.t.test(x ~ y, d = d)
results_sprtt <- seq_ttest(x ~ y, d = d)
expect_true(results_sprtt@likelihood_ratio - results_original$statistic[[1]] < 1e-5)
expect_equal(results_sprtt@decision,
results_original$decision)
x_1 <- rnorm(10)
x_2 <- rnorm(10)
x <- c(x_1, x_2)
y <- as.factor(c(rep(1,10),rep(2,10)))
results_original <- sprt.t.test(x_1, x_2, d = d)
results_sprtt <- seq_ttest(x_1, x_2, d = d)
expect_true(results_sprtt@likelihood_ratio - results_original$statistic[[1]] < 1e-5)
expect_equal(results_sprtt@decision,
results_original$decision)
# same data, but different input
results_sprtt <- seq_ttest(x_1, x_2, d = 0.5, paired = TRUE)
results_sprtt2 <- seq_ttest(x ~ y, d = 0.5, paired = TRUE)
expect_true(results_sprtt@likelihood_ratio_log - results_sprtt2@likelihood_ratio_log < 1e-5)
expect_equal(results_sprtt@decision,
results_sprtt2@decision)
d <- 0.7
x <- rnorm(30)
y <- rnorm(30)
paired = TRUE
t_test <- t.test(x, y, paired = paired)
results_original <- sprt.t.test(x, y, d = d, paired = paired, alt = "two.sided")
results_sprtt <- seq_ttest(x, y, d = d, paired = paired, alt = "two.sided")
expect_true(results_sprtt@likelihood_ratio - results_original$statistic[[1]] < 1e-5)
expect_equal(results_sprtt@decision,
results_original$decision)
results_original <- sprt.t.test(x, y, d = d, paired = paired, alt = "less")
results_sprtt <- seq_ttest(x, y, d = d, paired = paired, alt = "less")
expect_true(results_sprtt@likelihood_ratio - results_original$statistic[[1]] < 1e-5)
expect_equal(results_sprtt@decision,
results_original$decision)
results_original <- sprt.t.test(x, y, d = d, paired = paired, alt = "greater")
results_sprtt <- seq_ttest(x, y, d = d, paired = paired, alt = "greater")
expect_true(results_sprtt@likelihood_ratio - results_original$statistic[[1]] < 1e-5)
expect_equal(results_sprtt@decision,
results_original$decision)
})
# test_that("", {
#
#
# })
|
a2faaa4a8278671183927936358dfc091b711162
|
87ff42f66bbb5ea37a18d68e4a1e932951ace828
|
/run_analysis.r
|
b39bba321e704c0679e16cd1225b7155b9f75483
|
[] |
no_license
|
lasu/Getting-and-Cleaning-Data_Course-Project
|
32f7c03282342484a67e04f60ec2ad6afe0fcf5e
|
5d1c0b2bc3d552aae1507cab53c9cd221224ceba
|
refs/heads/master
| 2020-04-06T06:38:51.427423
| 2014-08-24T23:06:48
| 2014-08-24T23:06:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,201
|
r
|
run_analysis.r
|
##Step1: Merges the training and the test sets to create one data set.
#features
trainData <- read.table("./train/X_train.txt") #dim(trainData) => 7352*561
testData <- read.table("./test/X_test.txt") #dim(testData) => 2947*561
Data <- rbind(trainData,testData) #dim(Data) => 10299*561
#activity labels
trainLabel <- read.table("./train/y_train.txt") #dim(trainLabel) => 7352*1
testLabel <- read.table("./test/y_test.txt") #dim(testLabel) => 2947*1
Label <- rbind(trainLabel,testLabel) #dim(Label) => 10299*1
#subject
trainSubject <- read.table("./train/subject_train.txt") #dim(trainSubject) => 7532*1
testSubject <- read.table("./test/subject_test.txt")#dim(testSubject) => 2947*1
Subject <- rbind(trainSubject,testSubject)#dim(Subject) => 10299*1
##Step2: Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("features.txt") #dim(features) => 561*2 (561rows features)
meanstd <- grep("mean\\(\\)|std\\(\\)", features[, 2]) #find mean or std features
Data <- Data[,meanstd] #dim(Data) => 10299*66, remove others(only choose mean and std features)
names(Data) <- features[meanstd,2] #rename column names to features
names(Data) <- gsub("\\(\\)",replacement="",names(Data)) #remove '(' and ')'
names(Data) <- gsub("-",replacement="",names(Data)) #remove '-'
##Step3: Uses descriptive activity names to name the activities in the data set.
activity <- read.table("activity_labels.txt") #6 activities
Label[,1] <- activity[Label[,1],2] #rename (1,2...-> WALKING,WALKING_UPSTAIRS...)
##Step4: Appropriately labels the data set with descriptive activity names.
names(Label) <- "activity"
names(Subject) <- "subject"
tidyData <- cbind(Subject,Label,Data) #merge 3 dataset
#write.table(tidyData, "tidy_data.txt",row.name=FALSE) #creat a new file(first file)
##Step5: Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
subject_n <- split(tidyData$activity,tidyData$subject) #30 subjects
subject_pre <- lapply(subject_n,length) #every subjects have how much datas(rows)
upperb <- 1 #upper bound of split matrix
lowerb <- 0 #lower bound of split matrix
subjects <- length(table(Subject)) #30 subjects
activities <- dim(activity)[1] #6 activities
current <- matrix(NA, nrow=subjects*activities, ncol=dim(tidyData)[2]) #initial final dataset
current <- as.data.frame(current)
temp <- matrix(NA,nrow=activities,ncol=dim(tidyData)[2]-2)
temp2 <- NULL
current[,1] <- sort(rep(seq(1:subjects),activities)) #1 1 1 1 1 1 2 2 2 2 2 2 ....
current[,2] <- rep(activity[,2],subjects) #WALKING,WALKING_UPSTAIRS...
for(i in 1:subjects){ #30 subjects
lowerb <- lowerb + as.numeric(subject_pre[i])
m <- tidyData[upperb:lowerb,] #split dataset into 6 subsets(subjects)
for(k in 3:68){ #66 variables
r <- tapply(m[,k],m$activity,mean) #mean
temp[,k-2] <- r #bind 66 variables
}
temp2 <- rbind(temp2,temp) #bind 6 subjects
upperb <- lowerb + 1
}
for(i in 3:dim(tidyData)[2]){ #bind with subject and activity
current[,i] <- temp2[,i-2]
}
colnames(current) <- names(tidyData) #rename column names
write.table(current, "tidy_data_second.txt",row.name=FALSE) #creat a new file(second file)
|
a2a8aa613cc9fe0e8c10b6a9000f12570ae852ad
|
3da5a8c987d75be666387208385374722cb39254
|
/Tecnicas de Agrupación y Segmentación/Apuntes de clase. 07-11-2019. Distancias.R
|
4cbfe35d449209e65617a0f9265aae1a48827ea6
|
[] |
no_license
|
lulisaila/Master-CUNEF-2019-2020-Data-Science
|
84d9805f91bcb1aa51cfc1443af4896af5d3c895
|
6ffb05557594baabe22de6e73c3976860dfc2fb6
|
refs/heads/master
| 2022-12-16T14:32:16.937488
| 2020-09-23T09:43:30
| 2020-09-23T09:43:30
| 265,264,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,979
|
r
|
Apuntes de clase. 07-11-2019. Distancias.R
|
##### APUNTES DE CLASE 07/11/2019. Distancias
## LUCÍA SAIZ LAPIQUE
library(readr)
## Cargamos los datos
Dist <- read_delim("C:/Users/Luli/Desktop/TODO/GitHub/lulisaila-Master_Data_Science_CUNEF/Tecnicas de Agrupación y Segmentación/BBDD/EMDDistancias.csv", ";", escape_double = FALSE, trim_ws = TRUE)
Dist
# le quitamos la primera columna
dist <- Dist[,-1]
dist
# convertimos la base de datos en una matriz particular:
dist2 <- as.matrix(dist)
dist2
# creo un objeto de ciudad con la primera columna que excluimos al principio
ciudad <- Dist[,1]
ciudad
# transformamos matriz de distancias, donde tenia las distintas ciudades con sus
# respectivas distancias, las situo en un mapa donde las tendre en funcion de su
# distancia relativa
# necesito saber cuales son las coordenadas en el grafico de las ciudades y posteriormente
# le voy a pegar las etiquetas de su nombre a cada coordenada.
d <- as.dist(as(dist, "matrix"))
d # va cruzando Albacete con todas y pone todas las distancias. Todo lo que hay
# en la diagonal de la matriz ppal.
# El objeto d que hemos creado es ahora una matriz de distancias, no una cualquiera
# obtengo los cuadrados de cada una de las distancias generando la matriz de
# cuadrados. Todas las distancias euclideas (distancia "ordinaria" entre dos
# puntos de un espacio eucl?deo, la cual se deduce a partir del teorema de
# Pit?goras) trabajan con cuadrados
dist_cuad <- dist2 * dist2
dist_cuad
# matriz de doble centrado: matriz identidad (Mj) N * N, a la que se le resta
# 1 / n (numero de variables) (en nuestro caso un septimo). Matriz de unos de
# dimension n * n.
# Objetivo: obtener la matriz "P"
# Primero obtengo la matriz identidad 7x7:
ident <- diag(7)
# creo un vector con unos, repetidos 7 veces. Me crea un vector con fila de 7 unos que replico 7 veces
uno <- rep(1, 7)
Uno <- rep(uno, each = 7) # vector fila de 49 unos
dim(Uno) = c(7, 7) # matriz 7x7 a partir del vector de unos
Uno
# Objetivo: J = ident - 1/n Uno
# la divido en dos partes:
jota_1 <- 1/7 * Uno
jota <- ident - jota_1
jota
# Aplicamos el doble centrado:
# Producto matricial
B_1 <- jota %*% dist_cuad %*% jota
B <- -1/2 * B_1
round(B, 3)
B # matriz cuadrada 7x7
# sacamos los autovectors y los autovalores de nuestra matriz
AV <- eigen(B)
AV
AV$vec
AV$values
# no me interesan todos los autovalores y autovectores, solo los mayores dos
# creamos objetos reducidos para la solucion de dos dimensiones
AV_red <- AV$values[1:2] # le pedimos los dos mayores autovalores reducidos (los primeros)
AV_red
AVvect_red <- AV$vec[, 1:2] # pedidmos los dos mayores autovectores reducidos (los primeros)
AVvect_red
# Generamos la mtriz de coordenadas: voy a pasar de un espacio de 7x7 a uno de
# dos dimensiones ?nicamente. Cada ciudad se refiere a otras seis, cada una
# est? posicionada con respecto a otras seis. Quiero pasar esa informaciona un
# espacio de dos dimensiones.
# Necesito las coordenadas en el nuevos sistema de dos dimensiones.
# Cuando paso a mas de tres es ijmposible representarlo en dos dimensiones
# para obtener las coordenadas: necesito mi vector de autovalores reducido
# (formado por lambda 1 y lambda 2) y la matriz E, formada por e1 y e2 (AVvect_red)
# que voy a multiplicar por una matriz diagonal con lo que vamos a obtener la matriz
# de coordenadas
AV_red_mat <- sqrt(AV_red)*diag(2) # matriz diagonal con los autovalores reducidos de antes
X <- AVvect_red %*% AV_red_mat # premultiplicar por los autovectores
X # reultado: matriz de coordenadas
X <- as.data.frame(X) # convertirlo en dataframe
colnames(X) <- c("H", "V") # cambiar los nombres a coordenada horizontal y vertical
X
# data frame de posiciones donde Albacete esta
posicion <- as.data.frame(cbind(ciudad, X))
posicion
H <- posicion$H
H
V <- posicion$V
V
plot(H, V,
main = "Posiciones relativas",
xlab = "Dimension 1",
ylab = "Dimension 2",
col = "blue", pch = 19, cex = 1, lty = "solid", lwd = 2,
frame.plot = FALSE)
text(H, V, labels = ciudad, cex = 0.7, pos = 3)
abline(h = c(0), v = (0), lty = 2, col = "blue")
# nueva base de datos para hacerlo de otra forma
corr <- read.csv("C:/Users/Luli/Desktop/TODO/GitHub/lulisaila-Master_Data_Science_CUNEF/Tecnicas de Agrupación y Segmentación/BBDD/EMDCorr.csv", header = TRUE, sep = ";")
corr2 <- corr[, -1]
corr2
var <- corr[, 1]
str(var)
str(corr2)
d <- as.dist(as(corr2, "matrix"))
d
# libreria de escalado multidimensional
# MDS clasico o analisis de coordenadas ppales
ms <- cmdscale(dist(corr))
ms
dist(ms)
dim(ms)
plot(ms[,1], ms[,2], xaxt = "n", yaxt = "n",
xlab = "Dimension 1",
ylab = "Dimension 2",
type = "p",
bty = "n",
col = "blue", pch = 19, cex = 1, lty = "solid", lwd = 2,
frame.plot = FALSE)
axis(side = 1, at = seq(-1, 1.5, 0.5), lwd = 3)
axis(side = 2, at = seq(-0.5, 0.5, 0.25), lwd = 3)
text(ms[,1], ms[,2], labels = var, cex = 0.6, pos = 3)
abline(h = c(0), v = (0), lty = 2, col = "blue")
|
bef2634894dfd4e992fe980cbd3b1383a6cd66a4
|
4cb5426e8432d4af8f6997c420520ffb29cefd3e
|
/S22.R
|
4f3fb63f57164802b9c9325666136e633689c3a5
|
[
"CC0-1.0"
] |
permissive
|
boyland-pf/MorpheusData
|
8e00e43573fc6a05ef37f4bfe82eee03bef8bc6f
|
10dfe4cd91ace1b26e93235bf9644b931233c497
|
refs/heads/master
| 2021-10-23T03:47:35.315995
| 2019-03-14T21:30:03
| 2019-03-14T21:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,364
|
r
|
S22.R
|
# making table data sets
library(dplyr)
library(tidyr)
library(MorpheusData)
#############benchmark 43
catalog <- read.table(text=
"supplier_key part_id
S1 P1
S1 P4
S2 P2
S2 P3
S3 P5
S4 P3
S4 P6
S5 P4
S5 P2
S6 P4
S7 P6
S8 P5
S8 P2
S9 P1
S10 P6
S11 P2
S11 P7", header=T)
parts <- read.table(text=
"part_id color
P1 red
P2 green
P3 yellow
P4 red
P5 green
P6 blue
P7 green", header=T)
suppliers <- read.table(text=
"supplier_key sname
S1 SN1
S2 SN2
S3 SN3
S4 SN4
S5 SN5
S6 SN6
S7 SN7
S8 SN8
S9 SN9
S10 SN10
S11 SN11", header=T)
# write.csv(catalog, "sql/catalog.csv", row.names=FALSE)
# catalog <- read.csv("sql/catalog.csv", check.names = FALSE)
# fctr.cols <- sapply(catalog, is.factor)
# int.cols <- sapply(catalog, is.integer)
# catalog[, fctr.cols] <- sapply(catalog[, fctr.cols], as.character)
# catalog[, int.cols] <- sapply(catalog[, int.cols], as.numeric)
# save(catalog, file = "sql/catalog.rdata")
# write.csv(suppliers, "sql/suppliers.csv", row.names=FALSE)
# suppliers <- read.csv("sql/suppliers.csv", check.names = FALSE)
# fctr.cols <- sapply(suppliers, is.factor)
# int.cols <- sapply(suppliers, is.integer)
# suppliers[, fctr.cols] <- sapply(suppliers[, fctr.cols], as.character)
# suppliers[, int.cols] <- sapply(suppliers[, int.cols], as.numeric)
# save(suppliers, file = "sql/suppliers.rdata")
# write.csv(parts, "sql/parts.csv", row.names=FALSE)
# parts <- read.csv("sql/parts.csv", check.names = FALSE)
# fctr.cols <- sapply(parts, is.factor)
# int.cols <- sapply(parts, is.integer)
# parts[, fctr.cols] <- sapply(parts[, fctr.cols], as.character)
# parts[, int.cols] <- sapply(parts[, int.cols], as.numeric)
# save(parts, file = "sql/parts.rdata")
# 5.2.10
# df1=inner_join(parts,catalog) %>%
# inner_join(suppliers) %>%
# filter (color == "green") %>%
# group_by(color,sname) %>%
# summarize(n = n()) %>%
# ungroup()
# df2=inner_join(parts,catalog) %>%
# inner_join(suppliers) %>%
# group_by(sname) %>%
# summarize(n = n())
# inner_join(df1,df2) %>%
# select(sname,n)
input=inner_join(parts,catalog) %>% inner_join(suppliers)
write.csv(input, "data-raw/s22_input1.csv", row.names=FALSE)
s22_input1 <- read.csv("data-raw/s22_input1.csv", check.names = FALSE)
fctr.cols <- sapply(s22_input1, is.factor)
int.cols <- sapply(s22_input1, is.integer)
s22_input1[, fctr.cols] <- sapply(s22_input1[, fctr.cols], as.character)
s22_input1[, int.cols] <- sapply(s22_input1[, int.cols], as.numeric)
save(s22_input1, file = "data/s22_input1.rdata")
df1=input %>%
filter(color != "green") %>%
select(sname)
df2=input %>%
select(sname)
df3=setdiff(df2,df1)
df4=input %>%
group_by(sname) %>%
summarise(n = n())
output=inner_join(df4,df3)
write.csv(output, "data-raw/s22_output1.csv", row.names=FALSE)
s22_output1 <- read.csv("data-raw/s22_output1.csv", check.names = FALSE)
fctr.cols <- sapply(s22_output1, is.factor)
int.cols <- sapply(s22_output1, is.integer)
s22_output1[, fctr.cols] <- sapply(s22_output1[, fctr.cols], as.character)
s22_output1[, int.cols] <- sapply(s22_output1[, int.cols], as.numeric)
save(s22_output1, file = "data/s22_output1.rdata")
input=inner_join(parts,catalog) %>% inner_join(suppliers)
df1=input %>%
filter(color != "green") %>%
select(sname)
df2=input %>%
select(sname)
df3=setdiff(df2,df1)
df4=input %>%
group_by(sname) %>%
summarise(n = n())
inner_join(df4,df3)
|
4e3568cfce036adc9893a76b73c7c763cc739017
|
0b498639d8e21794224d3ef2f150f506dbead29e
|
/man/scrape_satr.Rd
|
7bb612fffb48f1ac57ef7b449e672145fb8f0e2d
|
[
"MIT"
] |
permissive
|
averyrobbins1/tidytranscript
|
190d4e29f4e32c1ef65ef81730b9781034004fe5
|
3ab7f082c5799bcca52bb03992f2a679c5d30b6d
|
refs/heads/main
| 2023-03-22T18:23:04.265055
| 2021-03-18T21:29:56
| 2021-03-18T21:29:56
| 332,030,265
| 1
| 1
|
MIT
| 2021-03-07T04:47:59
| 2021-01-22T18:28:45
|
R
|
UTF-8
|
R
| false
| true
| 450
|
rd
|
scrape_satr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scrape_satr.R
\name{scrape_satr}
\alias{scrape_satr}
\title{Scrape SATR scores from a student's transcript}
\usage{
scrape_satr(.data)
}
\arguments{
\item{.data}{A character vector returned from the read_transcript function.}
}
\value{
A tibble of SATR test scores
}
\description{
Scrape SATR scores from a student's transcript
}
\examples{
\dontrun{
scrape_satr(dat)
}
}
|
7845bddaa5d33697b80ab9b46f92e11ce409c964
|
c0a843db4d4c1e0a63f8f9e91d32246a34aaa0f6
|
/tests/regtest-lmtree.R
|
c6c3e6f6506d68c1908c494c65b599124ef0ca9a
|
[] |
no_license
|
cran/partykit
|
6765cf014fb4528894c34be20967d2b61265ff86
|
e43d2286d0d67830cff8ff7a1ce719782b834d06
|
refs/heads/master
| 2023-04-27T04:43:46.983139
| 2023-04-14T08:20:02
| 2023-04-14T08:20:02
| 17,698,361
| 7
| 15
| null | null | null | null |
UTF-8
|
R
| false
| false
| 580
|
r
|
regtest-lmtree.R
|
suppressWarnings(RNGversion("3.5.2"))
library("partykit")
set.seed(29)
n <- 1000
x <- runif(n)
z <- runif(n)
y <- rnorm(n, mean = x * c(-1, 1)[(z > 0.7) + 1], sd = 3)
z_noise <- factor(sample(1:3, size = n, replace = TRUE))
d <- data.frame(y = y, x = x, z = z, z_noise = z_noise)
fmla <- as.formula("y ~ x | z + z_noise")
(m_mob <- mob(formula = fmla, data = d, fit = partykit:::lmfit))
(m_lm2 <- lmtree(formula = fmla, data = d))
mods <- nodeapply(m_lm2, ids = nodeids(m_lm2, terminal = TRUE), function(x) x$info$object)
sum(sapply(mods, function(x) sum(x$residuals^2)))
|
2e645af3ea963c3f4f8b199313adc0a5de18325b
|
204b1b2ebdce859adbf34e4c31debc4fa5129d4e
|
/GA-master/GA/man/initPop.Rd
|
4ad8624b1009615b50a6ccc02a211678f5744e52
|
[] |
no_license
|
esther730/stat243
|
4445a16b14ad48dd754a1b6659c793efd1c57649
|
6d438d8f916a6e3f2f811daf65d033bfd206881a
|
refs/heads/master
| 2021-03-22T05:25:23.327769
| 2017-12-31T06:44:50
| 2017-12-31T06:44:50
| 101,693,590
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 821
|
rd
|
initPop.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Initialization.R
\name{initPop}
\alias{initPop}
\title{initPop}
\usage{
initPop(dat, popSize = 30, genomes = NULL, fitfunc = "AIC",
family = "gaussian")
}
\arguments{
\item{dat}{dataframe, data to operation(default sets dependent variable in first column and independent varialbes is other columns)}
\item{popSize}{integer, population size(default=30)}
\item{genomes}{list, composed of chromosomes and fitness value with lenght equal to population size(default=NULL)}
\item{fitfunc}{method, model selection method(default is AIC)}
\item{family}{family,for linear regression model the family should be a continuous probability density function (default is gaussian family)}
}
\description{
Initializes population
}
|
553ef71f0f52dd4686626f7c94b1e924e1f2fbbb
|
dd8b1aa6b57fbab444bdcdeb38afb5b13018e590
|
/R/eedb_compare_behavior.r
|
6ea542ad376d81d85165a34a7a170ec3c12829e0
|
[] |
no_license
|
skranz/gtreeGUI
|
0acad3c3efdcd178cc17c72ac0ba7b23c0296577
|
0f04c9c8dddcd12ec5f89dd4b0916b9702892dd3
|
refs/heads/master
| 2020-05-29T14:01:46.086427
| 2019-05-29T08:12:52
| 2019-05-29T08:12:52
| 189,180,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,209
|
r
|
eedb_compare_behavior.r
|
load.behavior.struct = function(gameId, path=paste0(ee$struc.path,"/Behavior/",gameId), files = NULL) {
restore.point("load.behavior.struct")
# By default load all behaviors associated with the game
if (is.null(files))
files = list.files(path)
name = paste0("behavior_",gameId)
# Load behaviors from all files
objs = lapply(files, function(f) {
file = paste0(path,"/",f)
load.struct(name=name,file=file,typeName="behaviors", just.obj=TRUE)
})
# combine behaviors from all files
obj = do.call(c,objs)
# generate a structure
struc = obj.to.struct(obj,name=name)
struc
}
eval.behaviors = function(bs, game.dt) {
restore.point("eval.behaviors")
N = NROW(game.dt)
brows = which(is.subtype(bs$df$type,"behavior"))
bnames = bs$df$name[brows]
nb = length(brows)
bok = rep(FALSE,nb)
benv.li = replicate(nb,new.env(parent=.GlobalEnv), simplify=FALSE)
names(bok) = names(benv.li) = bnames
while(TRUE) {
had.import = FALSE
some.checked = FALSE
i = 1
for (i in seq_along(benv.li)) {
if (bok[i])
next
obj = bs$obj.li[[ brows[i] ]]
some.checked = TRUE
if (is.subtype(bs$df$type[brows[i]],"intersectBehavior")) {
ret = import.intersectBehavior(i=i,obj=obj,bs=bs,benv.li=benv.li,bok=bok)
message("intersectBehavior ",bnames[i], ": ", ret )
bok[i] = ret
had.import = had.import | ret
# A simple behavior consisting of several actions
} else {
bsi = obj.to.struct(obj,bnames[i])
ret = compute.variables(gs=bsi,venv=game.dt,denv=benv.li[[i]],N=N)
had.import = bok[i] = TRUE
}
}
if ((!had.import) | (!some.checked))
break
}
bdt.li = lapply(benv.li,data.env.to.dt)
bdt.li
}
#' Gets a list containing action compartors for a given variable
#' for different conditions
get.actionComparator = function(var.name, gs, comparator.type = "nicer") {
restore.point("get.actionComparator")
vrows = which(gs$df$name == var.name & is.subtype(gs$df$type,"variable"))
comps = NULL
vrow = vrows[1]
for (vrow in vrows) {
obj = gs$obj.li[[vrow]]
cond <- get.conditions(vrow,gs)
comp = obj[[comparator.type]]
if (!has._if.children(comp)) {
attr(comp,"cond") <- cond
comps = c(comps,list(comp))
# The actionComparator is splitted by different _if conditions
# add a separate actionComparator for each condition
} else {
upcomp = move._if.upwards(comp)
i = 1
for (i in seq_along(upcomp)) {
actcond = combine.conditions(cond,upcomp[[i]][[1]])
attributes(actcond) = NULL
actcomp = upcomp[[i]][[2]]
attr(actcomp,"cond") <- actcond
comps = c(comps,list(actcomp))
}
}
}
return(comps)
}
examples.get.actionComparator = function() {
setwd("C:/libraries/ExpEconDB")
init.ee("C:/libraries/ExpEconDB")
gs = load.game.struct("LureOfAuthority")
get.actionComparator("recommendation",gs,comparator.type="nicer")
}
get.actionComparator.points = function(val, comp, game.dt, N= NROW(game.dt)) {
restore.point("get.actionComparator.points")
if (!is.null(comp$points)) {
comp.points = names(comp$points)
comp.points = lapply(comp.points,function(p) rep(eval(parse(text=p),game.dt),length.out=N))
other.ind = which(sapply(comp$points, function(p) p == "_other"))
if (length(other.ind)>0) {
other.points = comp.points[[other.ind]]
comp.points = comp.points[-other.ind]
comp$points = comp$points[-other.ind]
} else {
other.points = NA
}
comp.val = lapply(comp$points,function(p) {
return(eval(parse(text=p),game.dt))
})
points = rep(other.points, length.out = N)
for (i in seq_along(comp.points)) {
rows = which(is.true(val == comp.val[[i]]))
points[rows] = comp.points[[i]][rows]
}
points[is.na(val)] = NA
return(points)
} else if (!is.null(comp$order)) {
if (comp$order == "decreasing")
return(-val)
return(val)
}
}
var.compare.to.behavior = function(var,bdt,game.dt,gs,comparator.type="nicer", N= NROW(game.dt)) {
restore.point("var.compare.to.behavior")
comps = get.actionComparator(var,gs,comparator.type=comparator.type)
val = rep(NA,N)
i = 1
for (i in seq_along(comps)) {
comp = comps[[i]]
gpoints = get.actionComparator.points(val = game.dt[[var]], comp=comp,game.dt = game.dt)
bpoints = get.actionComparator.points(val = bdt[[var]], comp=comp,game.dt = game.dt)
cond = attr(comp,"cond")
if (!is.null(cond)) {
rows = which(eval(parse(text=cond),game.dt) & !is.na(bpoints))
} else {
rows = which(!is.na(bpoints))
}
arows = rows[gpoints[rows]>bpoints[rows]]
val[arows] = 1
arows = rows[gpoints[rows]<bpoints[rows]]
val[arows] = -1
arows = rows[gpoints[rows]==bpoints[rows]]
val[arows] = 0
}
return(val)
}
examples.var.compare.to.behavior = function() {
setwd("C:/libraries/ExpEconDB")
init.ee("C:/libraries/ExpEconDB")
gameId = "LureOfAuthority"
bs = load.behavior.struct(gameId)
game.dt = load.game.data(gameId)
gs = load.game.struct(gameId)
bdt.li = eval.behaviors(bs,game.dt=game.dt)
var.compare.to.behavior("recommendation",bdt = bdt.li$informedRecommendPreferred,game.dt=game.dt,gs=gs)
compare.to.behavior(bdt = bdt.li$informedRecommendPreferred,game.dt=game.dt,gs=gs)
bc = compare.to.behaviors(bdt.li,game.dt=game.dt,gs=gs)
tabulate.behavior.comparisons(bc)
}
#' Compare actual data with a specified behavior, e.g. w.r.t. niceness
compare.to.behavior = function(bdt,game.dt,gs, comparator.type = "nicer", T=NROW(game.dt)) {
restore.point("is.nicer.than.behavior")
val = rep(NA,T)
col = names(bdt)[1]
for (col in names(bdt)) {
col.val = var.compare.to.behavior(col,bdt = bdt,game.dt=game.dt,gs=gs, comparator.type=comparator.type)
NaN.rows = is.true(val * col.val == -1 | is.nan(val))
One.rows = is.true(!NaN.rows &
((val + col.val >= 1) | ( is.na(val) & col.val==1)))
Neg.rows = is.true(!NaN.rows &
((val + col.val <= -1) | ( is.na(val) & col.val==-1)))
Zero.rows = is.true((val == 0 | is.na(val)) & col.val == 0)
val[NaN.rows] = NaN
val[One.rows] = 1
val[Neg.rows] = -1
val[Zero.rows] = 0
}
val
}
#' Compare actual data with a specified behavior, e.g. w.r.t. niceness
compare.to.behaviors = function(bdt.li,game.dt,gs, comparator.type = "nicer") {
restore.point("is.nicer.than.behavior")
lapply(bdt.li,compare.to.behavior, game.dt=game.dt,gs=gs,comparator.type=comparator.type)
}
# Which rows in data are consistent with a specified behavior?
# returns NA for rows for which the conditions of the behavior are not satisfied
matches.behavior = function(bdt,game.dt) {
restore.point("matches.behavior")
same = rep(TRUE,N)
for (col in names(bdt)) {
same = same & game.dt[[col]] == bdt[[col]]
}
same
}
# Which rows in data are consistent with a specified behavior?
# returns NA for rows for which the conditions of the behavior are not satisfied
matches.behaviors = function(bdt.li,game.dt) {
li = lapply(bdt.li,matches.behavior, game.dt=game.dt)
do.call(data.table,li)
}
import.intersectBehavior = function(i,obj,bs,benv.li,bok) {
restore.point("import.intersectBehavior")
subs = obj$behaviors
if (!all(bok[subs]))
return(FALSE)
benv = benv.li[[i]]
for (sub in subs) {
sub.env = benv.li[[sub]]
vars = objects(sub.env)
for (v in vars) {
if (!exists(v,benv)) {
benv[[v]] = sub.env[[v]]
} else {
rows = !is.na(sub.env[[v]])
benv[[v]][rows] = sub.env[[v]][rows]
}
}
}
return(TRUE)
}
tabulate.behavior.comparisons = function(bc, comparator.type = "nicer") {
li = lapply(bc,function(val) {
list("bigger" = sum(is.true(val==1)), "equal" = sum(is.true(val==0)),
"smaller" = sum(is.true(val==-1)),"not.comparable" = sum(is.nan(val)),
"NA" = sum(is.na(val)) )
})
df = as.data.frame(rbindlist(li))
rownames(df) = names(bm)
df$bigger.share = round(df$bigger / (df$bigger+df$equal+df$smaller+df$not.comparable),4)
df$equal.share = round(df$equal / (df$bigger+df$equal+df$smaller+df$not.comparable),4)
df$smaller.share = round(df$smaller / (df$bigger+df$equal+df$smaller+df$not.comparable),4)
if (comparator.type == "nicer") {
colnames(df) = c("nicer","as.nice","less.nice","not.comparable", "NA","nicer.share","as.nice.share","less.nice.share")
}
df
}
examples.tabulate.behavior.comparisons = function() {
setwd("C:/libraries/ExpEconDB")
init.ee("C:/libraries/ExpEconDB")
gameId = "LureOfAuthority"
bs = load.behavior.struct(gameId)
game.dt = load.game.data(gameId)
gs = load.game.struct(gameId)
bdt.li = eval.behaviors(bs,game.dt=game.dt)
bc = compare.to.behaviors(bdt.li,game.dt=game.dt,gs=gs)
tabulate.behavior.comparisons(bc)
}
tabulate.behavior.matches = function(bm) {
li = lapply(bm,function(val) {
list(true = sum(is.true(val)), false = sum(is.false(val)),
"NA" = sum(is.na(val)) )
})
df = as.data.frame(rbindlist(li))
rownames(df) = names(bm)
df$true.share = round(df$true / (df$true+df$false),4)
df$na.share = round(df[["NA"]] / (df$true+df$false+df[["NA"]]),4)
df
}
examples.load.behavior.struct = function() {
setwd("C:/libraries/ExpEconDB")
init.ee("C:/libraries/ExpEconDB")
gameId = "LureOfAuthority"
bs = load.behavior.struct(gameId)
bs
bs$obj.li[[97]]
dt.li = tableBehavior.comp.stat.dt(bs=bs,vars=c("searchP","searchA","util_1","util_2"),gameId=gameId)
dt = dt.li$util_1
d = dt[key == "delegate",]
d$better.del = d$val1 <= d$val2
setkeyv(d,c("key","var","behavior", "variant", "keyv1","keyv2"))
d
game.dt = load.game.data(gameId)
bdt.li = eval.behaviors(bs,game.dt=game.dt)
bm = matches.behaviors(bdt.li,game.dt)
tabulate.behavior.matches(bm)
dt = cbind(game.dt,be)
}
tableBehavior.comp.stat.dt = function(bs,vars, be = NULL, keyCols=NULL,gameId=NULL) {
restore.point("tableBehavior.comp.stat")
ret = combine.tableBehavior(bs=bs,vars=vars,be=be,keyCols=keyCols,gameId=gameId)
be = ret$be; vars=ret$vars; keyCols=ret$keyCols; be.dt=ret$dt
be.dt
bk.cols = c("behavior",keyCols)
# Simply remove multiple equilibria!!!!!
# Need to correct!!!
be.dt = be.dt[!duplicated(be.dt[,bk.cols,with=FALSE]),]
bk.list = lapply(bk.cols,function(col) unique(be.dt[[col]]))
names(bk.list) = bk.cols
var = "util_1"
key = "delegate"
csgrid.li = lapply(vars, function(var) {
grid.li = lapply(bk.cols, function(key) {
myvar = var
restore.point("jhfjsdhfj")
var = myvar
vals = bk.list[[key]]
if (length(vals)<=1)
return(NULL)
# All possible value combinations
vg = expand.grid(list(seq_along(vals),seq_along(vals)))
vg = vg[vg[,1]<vg[,2],]
vg[] = vals[as.matrix(vg)]
names(vg) = c("keyv1","keyv2")
vg
other.cols = setdiff(bk.cols,key)
grid.list = c(list(vg.ind = 1:NROW(vg)),bk.list[other.cols])
gr = as.data.table(expand.grid(grid.list))
i = 1
gr[[key]] = vg[gr$vg.ind,i]
mgr = merge(gr,be.dt, all.y=FALSE, by = bk.cols)
dt = mgr[,c(bk.cols,"vg.ind"),with=FALSE]
dt$keyv1 = vg[mgr$vg.ind,i]
dt$val1 = mgr[[var]]
i = 2
gr[[key]] = vg[gr$vg.ind,i]
mgr = merge(gr,be.dt, all.y=FALSE, by = bk.cols)
dt$keyv2 = vg[mgr$vg.ind,i]
dt$val2 = mgr[[var]]
dt$key = key
dt$var = var
dt[,vg.ind:=NULL]
dt[[key]] = vals[1]
setcolorder(dt,c(bk.cols,c("var", "key", "keyv1","keyv2","val1","val2")))
dt
})
rbindlist(grid.li)
})
names(csgrid.li)= vars
csgrid.li
}
# Do not deal with strategyMethod yet
make.obsTableBehavior = function(gameId,vars, keyCols, meanMedian="mean",obsData=NULL, util=NULL,n=NULL) {
restore.point("make.obsTableBehavior")
# Load data
if (is.null(obsData)) {
obsData = load.game.data(gameId=gameId)
}
# Get number of players
if (is.null(n)) {
str = colnames(obsData)
str = str[str.starts.with(str,"player_")]
str = str.split(str,"_")
iv = sapply(str,function(v) as.numeric(v[2]))
n = max(iv)
}
vars = union(vars,paste0("util_",1:n))
# Get utility formula
if (is.null(util)) {
util = list("payoffUtil",n=n)
}
util.formula = do.call(util[[1]],util[-1])
# Add utility to observed data
for (i in 1:n) {
run.dt(obsData,paste0("util_",i," := ", util.formula[i]))
}
# Make obsData smaller
obsData = obsData[,c(keyCols,vars),with=FALSE]
# Aggregate data by keyCol
fun = meanMedian[1]
code = paste0("list(", paste0(vars,"=",fun,"(",vars,")", collapse=","), ")")
data = run.dt(obsData,code,by=keyCols)
# Generate name and shortName
name = paste0("obs_",gameId,"_",paste0(util[[1]]),"_",paste0(sample(c(1:9,LETTERS),5),collapse=""))
arg.names = setdiff(names(util[-1]),"n")
if (length(arg.names)>0) {
args = substring(as.character(unlist(util[arg.names])),1,4)
shortName = paste0("obs_",meanMedian[1],"_",str.replace(util[[1]],"Util","")[[1]],"_",paste0(args,collapse="_"))
} else {
shortName = paste0("obs_",meanMedian[1],"_",str.replace(util[[1]],"Util","")[[1]])
}
# Generate object
values = list(sourceGameId=gameId,destGameId=gameId,shortName=shortName,numPlayers=n,numRows=NROW(data),keyCols=keyCols,meanMedian=meanMedian, actions=setdiff(vars,paste0("util_",1:n)), multiple=FALSE,obsData=obsData,data=data)
obj = new.obj(typeName="obsTableBehaviors",name=name,values = values)
obj
}
examples.make.obsTableBehavior = function() {
gameId = "LureOfAUthority"
vars = c("searchP","searchA","util_1","util_2")
keyCols = c("variant","delegate")
make.obsTableBehavior(gameId=gameId,vars=vars,keyCols=keyCols)
}
combine.tableBehavior = function(bs,vars,be=NULL,keyCols=NULL, add.obs=TRUE, gameId, meanMedian="mean") {
restore.point("tableBehavior.comp.stat")
# Find relavant behaviors manually
if (is.null(be)) {
rows = is.subtype(bs$df$type,"tableBehaviors")
objs = bs$obj.li[rows]
names(objs) = bs$df$name[rows]
has.action = sapply(objs, function(obj) all(vars %in% names(obj$data)))
objs = objs[has.action]
be = names(objs)
} else {
objs = bs$obj.li[[1]][be]
}
# Find common keyCols
if (is.null(keyCols)) {
li = lapply(objs, function(obj) obj$keyCols)
keyCols = intersect.vector.list(li)
}
if (add.obs) {
obs.obj = make.obsTableBehavior(gameId=gameId,vars=vars,keyCols=keyCols)
obs.objs = list(obs.obj)
names(obs.objs) = get.name(obs.obj)
objs = c(objs,obs.objs)
}
dt = rbindlist(lapply(objs, function(obj) {
restore.point("hshfghsf")
dt = obj$data[,c(keyCols,vars),with=FALSE]
dt[,"behavior":=obj$shortName,with=FALSE]
setcolorder(dt,c("behavior",keyCols,vars))
dt
}))
return(list(be=be,keyCols=keyCols,vars=vars,dt=dt))
}
|
d328f34ad5899b6dbc9b18e28027bd41ac6b3894
|
5e605fdb3bd68987f776b0121f950a7aee1ccbb9
|
/R/nb.glm.dispersion.models.v1.R
|
eb1003b7a4b24bb6cac7e0a96863dc78ec1549b7
|
[] |
no_license
|
diystat/NBPSeq
|
f150da798677a3c27dc27cee916f960f66af149d
|
358f095f4846476c7c9ffe720b502899ea18affb
|
refs/heads/master
| 2021-01-01T16:19:01.230766
| 2014-05-18T00:19:07
| 2014-05-18T00:19:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,237
|
r
|
nb.glm.dispersion.models.v1.R
|
##' Specify a dispersion model. The parameters of the specified model
##' are to be estimated from the data using the function
##' \code{optim.disp.apl} or \code{optim.disp.pl}.
##'
##' This functions calls \code{disp.fun.<model>} to specify a
##' dispersion model (a list), using output from a call to
##' \code{disp.predictor.<predictor>} as argument list, where
##' \code{<model>} is \code{model} from the input in lower case (one
##' of "nb2", "nbp", "nbq", "nbs" or "step") and \code{<predictor>} is
##' \code{predictor} from the input (one of "pi", "mu", or "rs")
##'
##' @title (private) Specify a dispersion model
##' @param nb NB data, output from \code{\link{prepare.nb.data}}
##' @param x a matrix, design matrix (specifying the treatment structure).
##' @param model a string giving the name of the disperion model,
##' can be one of "NB2", "NBP", "NBQ", "NBS" or "step" (not case
##' sensitive).
##' @param predictor a string giving the name of the predictor to use
##' in the dispersion model, can be one of "pi" and "mu", or "rs".
##' \code{"pi"}, preliminarily estimated mean relative frequencies; \code{"mu"}, preliminarily estimated
##' mean frequencies; \code{"rs"}, row sums.
##' @param subset a list of logical,
##' @param ... additional parameter to \code{disp.fun.*}
##' @return a list, output from the call to the funtion \code{disp.fun.<model>}.
make.disp = function(nb.data, x, model, predictor, subset=filter.mu.pre(nb.data, x), ...) {
predictor.fun.name = paste("disp.predictor", predictor, sep=".");
predictor = do.call(predictor.fun.name, list(nb.data=nb.data, x=x));
disp.fun.name = paste("disp.fun", tolower(model), sep=".");
do.call(disp.fun.name, c(predictor, list(subset=subset), ...));
}
##' Specify a dispersion model where the parameters of the
##' model will be estimated separately for different groups
##'
##' @title Specify a dispersion model where the parameters of the
##' model will be estimated separately for different groups
##' @param disp.fun
##' @param grp.ids
##' @param predictor
##' @param subset
##' @param predictor.label
##' @param ...
##' @return a list,
disp.by.group = function(disp.fun, grp.ids, predictor, subset, predictor.label="Predictor", ...) {
## grp.ids = rep(1:4, each=3);
## grp.ids = rep(1, 3);
m = length(grp.ids);
grps = split(1:m, grp.ids);
n.grps = length(grps);
res = lapply(grps, function(grp) do.call(disp.fun,
list(predictor=predictor[,grp,drop=FALSE], subset=subset, predictor.label=predictor.label, ...)));
## This seems tedious
par.init = res[[1]]$par.init;
n.pars = length(par.init);
par.ids = list(1:n.pars);
par.lower = res[[1]]$par.lower;
par.upper = res[[1]]$par.upper;
funs = list(res[[1]]$fun);
offset = res[[1]]$offset;
if (n.grps > 1) {
for (i in 2:n.grps) {
par.init = c(par.init, res[[i]]$par.init);
par.lower = c(par.lower, res[[i]]$par.lower);
par.upper = c(par.upper, res[[i]]$par.upper);
funs[[i]] = res[[i]]$fun
offset = c(offset, res[[i]]$offset);
par.ids[[i]] = n.pars * (i-1) + 1:length(res[[i]]$par.init);
}
}
env = new.env(parent=baseenv());
assign("m", nrow(predictor), envir=env)
assign("n", ncol(predictor), envir=env)
assign("funs", funs, envir=env)
assign("grps", grps, envir=env)
assign("n.grps", n.grps, envir=env)
assign("par.ids", par.ids, envir=env)
fun=function(par){
x = matrix(0, m, n);
for (i in 1:n.grps) {
x[,grps[[i]]]=funs[[i]](par[par.ids[[i]]]);
}
x
}
environment(fun) = env;
list(name=paste(res[[1]]$name, "by group"),
fun=fun, par.init=par.init, par.lower=par.lower, par.upper=par.upper, subset=subset,
predictor=predictor, offset=offset, predictor.label = predictor.label,
grp.ids = grp.ids)
}
##' @name Dispersion Models
##'
##' @title (private) Specify a NB2, NBP, NBS, NBS, or STEP dispersion model
##'
##' @details Specify a NBP dispersion model. The parameters of the specified
##' model are to be estimated from the data using the function
##' \code{optim.disp.apl} or \code{optim.disp.pl}.
##'
##' Under the NBP model, the log dispersion is modeled as a linear
##' function of specified predictor with a scalar offset,
##'
##' log(phi) = par[1] + par[2] * log(predictor/offset).
##'
##' Under this parameterization, par[1] is the dispersion value when
##' the value of predictor equals the offset. This function will
##' return a function (and related settings) to be estimated by either
##' \code{optim.disp.apl} or \code{optim.disp.pl}. The logical vector
##' \code{subset} specifieds which rows will be used when estimating
##' the paramters (\code{par}) of the dispersion model.
##'
##' Once estimated, the dispersion function will be applied to all
##' values of the \code{predictor} matrix. Care needs to be taken to
##' either avoid \code{NA/Inf} values when preparing the predictor
##' matrix or handle \code{NA/Inf} values afterwards (e.g., when
##' performing hypothesis tests).
##'
##' @rdname disp.fun
##' @param predictor a m-by-n matrix having the same dimensions as the NB counts, predictor of the dispersion. See Details.
##' @param subset a logical vector of length \eqn{m}, specifying the subset of rows to be used when estimating the dispersion model parameters.
##' @param offset a scalar offset.
##' @param predictor.label a string describing the predictor
##' @param par.init a numeric vector, initial values of par.
##' @param label a string character describing the predictor.
##' @param par.lower a numeric vector, lower bounds of the parameter values.
##' @param par.upper a numeric vector, upper bounds of the parameter values.
##' @return a list
##' \item{fun}{a function that takes a vector, \code{par}, as
##' input and outputs a matrix of dispersion values (same dimension as
##' counts)}
##' \item{par.init, par.lower, par.upper}{same as input}
##' \item{subset}{same as input}
##' \item{predictor, offset, predictor.lable}{same as input}
disp.fun.nb2 = function(predictor,
subset,
offset=NULL,
predictor.label="Predictor",
par.init = -1
) {
## 2014-04-21
env = new.env(parent=baseenv());
assign("d", dim(predictor), envir=env)
fun=function(par){
array(exp(par), d);
}
environment(fun) = env;
list(name="NB2",
fun = fun, par.init = par.init, subset=subset,
predictor=predictor, predictor.label=predictor.label)
}
##' @rdname disp.fun
disp.fun.nbp = function(predictor,
subset,
offset=median(predictor[subset,]),
predictor.label="Predictor",
par.init = c(log(0.1), 0),
par.lower = c(log(1e-20), -1.1),
par.upper = c(0, 0.1)
) {
## 2014-04-18
## 2014-04-20
env = new.env(parent=baseenv());
assign("z", log(predictor/offset), envir=env)
fun=function(par){
exp(par[1] + par[2] * z);
}
environment(fun) = env;
list(name="NBP",
fun = fun, par.init = par.init, par.lower = par.lower, par.upper = par.upper, subset=subset,
predictor=predictor, offset=offset, predictor.label=predictor.label)
}
##' @rdname disp.fun
disp.fun.nbq = function(predictor,
subset,
offset=median(predictor[subset,]),
predictor.label="Predictor",
par.init = c(log(0.1), 0, 0),
par.lower = c(log(1e-20), -1.0, -0.2),
par.upper = c(0, 1.0, 0.2)
) {
## 2014-04-18
## 2014-04-20
env = new.env(parent=baseenv());
assign("z", log(predictor/offset), envir=env)
fun = function(par) {
exp(par[1] + par[2] * z + par[3] * z^2);
}
environment(fun) = env;
list(name="NBQ", fun = fun, par.init = par.init, par.lower = par.lower, par.upper = par.upper, subset=subset,
predictor=predictor, offset=offset, predictor.label =predictor.label)
}
##' @rdname disp.fun
disp.fun.nbs = function(predictor,
subset,
offset=NULL,
predictor.label="Predictor",
df = 6,
par.init = rep(-1, df)
) {
## 2014-04-18
z = as.vector(log(predictor));
## Specify the boundary knots
Boundary.knots = range(z[subset]);
## Boundary.knots = c(1e-4, quantile(z, 0.99));
## Specify the knots
## It is not clear yet how to select the knots
zs = sort(z[subset]);
m = length(zs);
l = quantile(z[subset], 0.05);
r = quantile(z[subset], 0.95);
## if (m > 300) {
## l = max(l, zs[101]+0.01);
## r = min(r, zs[m-100]);
## }
knots = seq(l, r, length=df-2);
## Specify the spline basis by the providing the knots
## s = ns(z, df = df);
s = ns(z, knots=knots, Boundary.knots = Boundary.knots, intercept=TRUE);
## Specify the dispersion function as a function of par
## Create a minimal enviroment for the function
env = new.env(parent=baseenv());
assign("s", s, envir=env)
assign("d", dim(predictor), envir=env)
fun = function(par) {
phi = exp(s %*% par);
dim(phi) = d;
phi
}
environment(fun) = env;
list(name="NBS", fun = fun, par.init = par.init, subset=subset,
predictor=predictor, offset=offset, predictor.label =predictor.label)
}
##' @rdname disp.fun
disp.fun.step = function(predictor,
subset,
offset=NULL,
predictor.label="Predictor",
df = 6,
knots = NULL,
par.init = rep(-1, df)
) {
## 2014-04-21
z = log(predictor);
if (is.null(knots)) {
p = seq(0, 1, length=df+1)[-c(1, df+1)];
knots = quantile(z[subset], p);
} else {
knots = sort(knots);
}
## Specify the starting and ending positions of each step
l = c(min(z, knots)-0.01, knots);
r = c(knots, max(z, knots)+0.01);
## Identify indices of z values belonging to each step
ids = list(df);
m = length(z);
for (i in 1:df) {
ids[[i]] = (1:m)[z>=l[i] & z< r[i]];
}
d = dim(predictor);
## Specify the dispersion function as a function of par
## Create a minimal enviroment for the function
env = new.env(parent=baseenv());
assign("df", df, envir=env)
assign("ids", ids, envir=env)
assign("d", d, envir=env)
assign("knots", knots, envir=env)
fun = function(par) {
lphi = array(NA, d);
for (i in 1:df) {
lphi[ids[[i]]] = par[i];
}
exp(lphi)
}
environment(fun) = env;
## Specify the lower and upper bounds for the components of par
## Not needed
list(name="step", fun = fun, par.init = par.init, subset=subset,
predictor=predictor, predictor.label = predictor.label)
}
##' Create a logical vector specifyfing the subset of rows to be used when estimating the dispersion model
##'
##' @title Create a logical vector specifyfing the subset of rows to be used when estimating the dispersion model
##' @param nb
##' @param x
##' @param mu.lower
##' @param mu.upper
##' @return a logical vector specifyfing the subset of rows to be used when estimating the dispersion model
filter.mu.pre = function(nb.data, x, mu.lower=1, mu.upper=Inf, phi.pre=0.1) {
## Fit NB regression models to the rows of the count matrix using a
## preliminary constant dispersion parameter, phi0.
obj = irls.nb(nb.data$counts, nb.data$eff.lib.sizes, x, phi=phi.pre);
## Obtain preliminary estimates of the mean relative frequencies.
mu.pre = obj$mu;
## Specify the subset of rows to be used when estimating the dispersion model
subset = rowSums(is.na(mu.pre) | mu.pre<mu.lower | mu.pre>mu.upper)==0;
}
disp.predictor.pi = function(nb.data, x, phi.pre=0.1) {
## preliminary constant dispersion parameter, phi0.
obj = irls.nb(nb.data$counts, nb.data$eff.lib.sizes, x, phi=phi.pre, beta0 = rep(NA, dim(x)[2]));
## Obtain preliminary estimates of the mean relative frequencies.
mu.pre = obj$mu;
pi.pre = t(t(mu.pre)/nb.data$eff.lib.sizes);
## Replace extremely small pi values
## 11-22-2013
eps = 1/sum(nb.data$eff.lib.sizes);
pi.pre[pi.pre<eps] = eps;
list(predictor = pi.pre, predictor.label = "pi.pre");
}
##' Dispersion precitor
##' @title Dispersion precitor
##' @param nb.data
##' @param x
##' @param phi.pre
##' @param mu.lower
##' @param mu.upper
##' @return a logical vector
disp.predictor.mu = function(nb.data, x, phi.pre=0.1, mu.lower=1, mu.upper=Inf) {
## Fit NB regression models to the rows of the count matrix using a
## preliminary constant dispersion parameter, phi0.
obj = irls.nb(nb.data$counts, nb.data$eff.lib.sizes, x, phi=phi.pre, beta0 = rep(NA, dim(x)[2]));
## Obtain preliminary estimates of the mean relative frequencies.
mu.pre = obj$mu;
## Replace extremely small pi values
## 11-22-2013
eps = 1/ncol(nb.data$counts);
mu.pre[mu.pre<eps] = eps;
list(predictor = mu.pre, predictor.label = "mu.pre");
}
disp.predictor.rs = function(nb.data, x, phi.pre=0.1, mu.lower=1, mu.upper=Inf) {
rs = array(rowSums(nb.data$counts), dim(nb.data$counts));
## Replace extremely small row sums
eps = 1/ncol(nb.data$counts);
rs[rs<eps] = eps;
list(predictor = rs, predictor.label = "row sum");
}
|
1f13655b7e9774244b0e9067f75322568c539968
|
4608f5a0ec32308f5e30548031c52ac015155ecc
|
/Tutorial02/Tutorial02/app.R
|
539ed1334a8391175a218042f7191c36ec3b95d3
|
[] |
no_license
|
xaoch/BLA2_F20
|
4456226421d4a65cbf063a9b4a86b702e7ad538b
|
8362de71778558f7242a5e42d28d92ce37c58a19
|
refs/heads/master
| 2022-12-20T11:59:38.423609
| 2020-10-29T20:32:05
| 2020-10-29T20:32:05
| 292,670,893
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,063
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(caret)
library(randomForest)
classificationModel= readRDS("./classification_model.rds")
print("model")
print(classificationModel)
regressionModel= readRDS("./regression_model.rds")
ui <- dashboardPage(
dashboardHeader(title = "Prediction Dashboard"),
dashboardSidebar(
sidebarMenu(
h3("Prediction Types"),
menuItem("Classification", tabName = "classification", icon = icon("dashboard")),
menuItem("Regression", tabName = "regression", icon = icon("th")),
h3("VLE Data"),
numericInput("total_clicks", "Total Clicks", value = 100),
numericInput("total_elements", "Total Elements", value = 100),
sliderInput("active_days", "Active Days", min = 0, max = 50, value = 25),
numericInput("average_daily_clicks", "Average Daily Clicks", value = 25),
numericInput("average_elements", "Average Daily Elements", value = 25),
h3("Assessment Data"),
sliderInput("avgScore", "Average Score", min = 0, max = 100, value = 50),
numericInput("delivered", "Deliverd Assessments", value = 5),
numericInput("sumDelays", "Delayed Days", value = 0)
)
),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "classification",
fluidRow(
box(title="Student Info",
radioButtons("gender", "Gender",
choices = list("Male" = "M", "Female" = "F"),selected = "M"),
selectInput("region", "Region",
choices = list("East Anglian Region"="East Anglian Region",
"Yorkshire Region"="Yorkshire Region",
"East Midlands Region"="East Midlands Region",
"South East Region"="South East Region",
"North Western Region"= "North Western Region",
"Scotland"="Scotland",
"South West Region"="South West Region",
"West Midlands Region"="West Midlands Region",
"Wales"="Wales",
"Ireland"="Ireland",
"South Region"="South Region",
"London Region"="London Region",
"North Region"="North Region"),selected = "London Region"),
selectInput("highest_education", "Highest Level of Education",
choices =list("A Level or Equivalent",
"Lower Than A Level",
"HE Qualification",
"Post Graduate Qualification",
"No Formal quals"
), selected="HE Qualification"),
selectInput("imd_band", "IMD Band",
choices =list("0-10%",
"20-30%",
"30-40%",
"40-50%",
"50-60%",
"60-70%",
"70-80%",
"80-90%",
"90-100%"
), selected="50-60%"),
selectInput("age_band", "Age Band",
choices =list("0-35",
"35-55",
"55<="
), selected="0-35"),
numericInput("num_of_prev_attempts", "Previous Attempts", value = 0),
numericInput("studied_credits", "Studied Credits", value = 60),
radioButtons("disability", "Disability",
choices = list("Yes" = "Y", "No" = "N"),selected = "N")
),
valueBoxOutput("classificationPrediction"),
),
),
tabItem(tabName = "regression",
fluidRow(
valueBoxOutput("regressionPrediction"),
)
)
),
# Second tab content
)
)
server <- function(input, output) {
output$classificationPrediction <- renderValueBox({
dataset=data.frame("total_clicks"=input$total_clicks,
"total_elements"=input$total_elements,
"active_days"= input$active_days,
"average_daily_clicks"=input$average_daily_clicks,
"average_elements" = input$average_elements,
"avgScore" = input$avgScore,
"delivered" =input$delivered,
"sumDelays" = input$sumDelays,
"gender"= input$gender,
"region"= input$region,
"highest_education"= input$highest_education,
"imd_band"=input$imd_band,
"age_band"=input$age_band,
"num_of_prev_attempts"=input$num_of_prev_attempts,
"studied_credits"=input$studied_credits,
"disability"=input$disability,
"final_result"=NA
)
print(classificationModel)
predictedValue=predict(classificationModel,dataset)
print(predictedValue)
valueBox(
ifelse(predictedValue[1]=="Pass","Pass","Fail"),"Prediction", icon = icon(ifelse(predictedValue[1]=="Pass","check","exclamation")),
color = ifelse(predictedValue[1]=="Pass","green","red")
)
})
output$regressionPrediction <- renderValueBox({
datasetRegression=data.frame("total_clicks"=input$total_clicks,
"total_elements"=input$total_elements,
"active_days"= input$active_days,
"average_daily_clicks"=input$average_daily_clicks,
"average_elements" = input$average_elements,
"avgScore" = input$avgScore,
"delivered" =input$delivered,
"sumDelays" = input$sumDelays,
"score"=NA
)
value=predict(regressionModel,datasetRegression)
valueBox(
format(value[1], digits=2, nsmall=2),"Final Grade", icon = icon(ifelse(value[1]>70,"check",ifelse(value[1]>50,"exclamation","times"))),
color = ifelse(value[1]>70,"green",ifelse(value>50,"yellow","red"))
)
})
}
shinyApp(ui, server)
|
ec0df79b0100001a5680979a344aafc1759c7790
|
0dcac3528a7628b43f8826dcca80e08609a64b58
|
/inst/doc/msir.R
|
cd45c4904de58b6c51312ae90b256be50d61e2e8
|
[] |
no_license
|
cran/msir
|
2e57a910a704bde982ecbf419a204861d098dda1
|
d1cceaad2a368068304183ddf666e9750b0eaef0
|
refs/heads/master
| 2021-01-18T22:31:47.831575
| 2020-12-16T11:20:02
| 2020-12-16T11:20:02
| 17,697,694
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,022
|
r
|
msir.R
|
## ----setup, include=FALSE-----------------------------------------------------
library(knitr)
opts_chunk$set(fig.align = "center",
out.width = "70%",
fig.width = 5, fig.height = 4,
dev.args=list(pointsize=10),
par = TRUE, # needed for setting hook
collapse = TRUE, # collapse input & ouput code in chunks
warning = FALSE)
knit_hooks$set(par = function(before, options, envir)
{ if(before && options$fig.show != "none")
par(mar=c(4.1,4.1,1.1,1.1), mgp=c(3,1,0), tcl=-0.5)
})
# setupKnitr()
# knit_hooks$set(rgl = hook_webgl)
set.seed(1) # for exact reproducibility
## -----------------------------------------------------------------------------
library(msir)
## -----------------------------------------------------------------------------
n <- 200
p <- 5
b <- as.matrix(c(1,-1,rep(0,p-2)))
x <- matrix(rnorm(n*p), nrow = n, ncol = p)
y <- exp(0.5 * x%*%b) + 0.1*rnorm(n)
MSIR <- msir(x, y)
summary(MSIR)
plot(MSIR, type = "evalues")
plot(MSIR, type = "coefficients", which = 1)
plot(MSIR, type = "2Dplot")
## -----------------------------------------------------------------------------
n <- 200
p <- 5
b <- as.matrix(c(1,-1,rep(0,p-2)))
x <- matrix(rnorm(n*p), nrow = n, ncol = p)
y <- (0.5 * x%*%b)^2 + 0.1*rnorm(n)
MSIR <- msir(x, y)
summary(MSIR)
plot(MSIR, type = "evalues")
plot(MSIR, type = "coefficients", which = 1)
plot(MSIR, type = "2Dplot")
## -----------------------------------------------------------------------------
n <- 300
p <- 5
b1 <- c(1, 1, 1, rep(0, p-3))
b2 <- c(1,-1,-1, rep(0, p-3))
b <- cbind(b1,b2)
x <- matrix(rnorm(n*p), nrow = n, ncol = p)
y <- x %*% b1 + (x %*% b1)^3 + 4*(x %*% b2)^2 + rnorm(n)
MSIR <- msir(x, y)
summary(MSIR)
plot(MSIR, type = "evalues")
plot(MSIR, type = "coefficients", which = 1:2)
plot(MSIR, which = 1:2)
plot(MSIR, which = 1, type = "2Dplot", span = 0.7)
plot(MSIR, which = 2, type = "2Dplot", span = 0.7)
## ---- eval=requireNamespace("rgl", quietly = TRUE), echo=1, fig.width = 4, fig.height = 4----
plot(MSIR, type = "spinplot")
rgl::rglwidget(width=500, height=450)
## ---- eval=requireNamespace("rgl", quietly = TRUE), echo=1, fig.width = 4, fig.height = 4----
plot(MSIR, type = "spinplot", span = 0.75)
rgl::rglwidget(width=500, height=450)
## -----------------------------------------------------------------------------
x1 <- rnorm(100)
x2 <- rnorm(100)
y <- 2*x1 + x2^2 + 0.5*rnorm(100)
## ---- eval=requireNamespace("rgl", quietly = TRUE), echo=1--------------------
spinplot(x1, y, x2)
rgl::rglwidget(width=500, height=450)
## ---- eval=requireNamespace("rgl", quietly = TRUE), echo=1--------------------
spinplot(x1, y, x2, scaling="aaa")
rgl::rglwidget(width=500, height=450)
## ---- eval=requireNamespace("rgl", quietly = TRUE), echo=1--------------------
spinplot(x1, y, x2, rem.lin.trend = "TRUE")
rgl::rglwidget(width=500, height=450)
## ---- eval=requireNamespace("rgl", quietly = TRUE), echo=1--------------------
spinplot(x1, y, x2, fit.smooth = TRUE)
rgl::rglwidget(width=500, height=450)
## ---- eval=requireNamespace("rgl", quietly = TRUE), echo=1--------------------
spinplot(x1, y, x2, fit.ols = TRUE)
rgl::rglwidget(width=500, height=450)
## -----------------------------------------------------------------------------
x <- iris[,1:3]
y <- iris[,5]
## ---- eval=requireNamespace("rgl", quietly = TRUE), echo=1--------------------
spinplot(x)
rgl::rglwidget(width=500, height=450)
## ---- eval=requireNamespace("rgl", quietly = TRUE), echo=1--------------------
spinplot(x, markby = y)
rgl::rglwidget(width=500, height=450)
## ---- eval=requireNamespace("rgl", quietly = TRUE), echo=1--------------------
spinplot(x, markby = y, pch = c(0,3,1),
col.points = c("lightcyan", "yellow", "lightgreen"),
background = "black")
rgl::rglwidget(width=500, height=450)
## ---- echo=-1-------------------------------------------------------------------------------------
options(width=100)
sessionInfo()
|
60cb66af953dd9f53e34d1f060e2a204bbd4a3c2
|
aaf8222e2e7c1ca3480092387472ed539e79985a
|
/man/FirstTime.Rd
|
fe971407d5370489b228db9da757e25014cb66a5
|
[] |
no_license
|
M3SOulu/MozillaApacheDataset-Rpackage
|
57e7028f2d2ee9a6a672a9775f20bf40af9e4f4a
|
3644dbd266325309be4bfdf1ac926ae8859ebd19
|
refs/heads/master
| 2022-06-23T11:56:58.580415
| 2022-06-20T11:03:39
| 2022-06-20T11:03:39
| 238,914,906
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 407
|
rd
|
FirstTime.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/log.R
\name{FirstTime}
\alias{FirstTime}
\title{First Time}
\usage{
FirstTime(time, tz)
}
\arguments{
\item{time}{Vector of timestamps.}
\item{tz}{Vector of timezones.}
}
\value{
A POSIXct object.
}
\description{
Returns first timestamp with valid Git timezone (first timestamp
from 2007 and beyond that is not UTC timezone).
}
|
7b613da6f7c64d8afe83bee4aa36738a42233f5b
|
b6d06107e6e9d28c59e17ec9616777c6cb039522
|
/Twitter_Scrape.R
|
087e4c42543a95f5d19de5d667fd6d7250234d2e
|
[] |
no_license
|
forloopkilla/StartCo_Seed_Hatchery
|
b1d40f58b6a58c110f8f838bee34e053fdfc4ce9
|
26a491ff00c307a32bbaf0f053bcd5a140edb7ce
|
refs/heads/master
| 2021-01-22T22:16:42.428267
| 2017-04-06T13:41:46
| 2017-04-06T13:41:46
| 85,526,809
| 2
| 1
| null | 2017-03-22T06:24:09
| 2017-03-20T02:25:56
| null |
UTF-8
|
R
| false
| false
| 5,803
|
r
|
Twitter_Scrape.R
|
getwd()
setwd("/home/arpan/Documents/Ajax Intel/Sentiment_analysis")
#-----SAVE THE APIs------
consumer_key<- c("x8GzVDZNm376fDEhhgUjeDdLc", "BaXijPbyZTBtmjKsXVDPwTPCc",
"AKJsxNqX2D8uTo9orgjRirvWL", "Ch6e19OXkGNeUP8QBYUIr5WRH", "IOhPOwbSc5eBFq5uDVSBHf6Gb",
"V4n64KR9jNd2UQtqsm6RlsZ3s", "FiU2YPcxQbCqV8eENDeqMTmcX", "NfdqgKlVZGK1WwcIdcaoaK39r")
consumer_secret <- c("l2vc71gcyo2i213kXyLv4ZBNSjQO9hmburVwkyIrEdFQsg395C",
"qwuO61IXqt7C3HuGs9USpVm5lsMF3HbHxhdViw8cUmCftzTI2i", "QOKk0ctHhbXNQ5QaipqofrZQzWM92mfkcoP60xe7HJzjSUCz6F",
"OwsNjjFe5kNcZOgwBWYopcpuJEY5I30HnEWxXPs8HPzLzPo8AW", "tfLMqlZ1hfuvqcrbQjWnjYbUlw2fK60ZzXdzB5sxuSM1xJHlkG",
"7f0j0xbkmY9Pu1sA6bjpMCVyj13RiZc5UxSGX5x5VnsunvFL4v", "MX8LleP8SoCuDWqWVGAT2KtTW1oxye2IqJq55evR716S5mRmuX",
"OLccQscdKuE3F23aj5vE2h3jOBGZN2OCx2oT8vLhMeTunQkdhz")
access_token<- c("843639734782119937-tdHQ7GTqMEl40Zd5MJgfgweN0uivqY2",
"80344503-5mEqgYjpElSZ9euB0CsrrnIfgiy2AG9O5CtWEzlOE", "2617540074-5l6gGJhCP8iw9DS7sVD9qsFaUGfWGO9fqlHt5Wg",
"842813040290553856-pdKA5MciQNPc7h68eckie9EXR2cOPGv", "47391724-Hs2WBnocdSmIEwnl4JLUuPGMkQMmwHT5L2l2dpF3w",
"843506548412440576-YaJW4CfOrwnTSwHteWkwTC94OW7R0cz", "840746479413272576-s7EBAfxbT0idIhGZs27ONcCNpKqM1Zt",
"526850301-TSWPjJ2A8oHZ5dBRoS5hXyMfeUF5CYhh19ykgJI0")
access_secret <-c("1ZG2StMVZ5XuN0GG7OTQ6ZEPuCCO8wnbxnlAGo5xS58sG", "s0kTmeiautNWCOfkDPmkJ4gTsUafzoz6iNtIEijWW4mN8",
"VVMfNIzgPEUmCk5QyIWr5A4ZSC2Lxy7CERoUtWs4jAe0l", "D5SrdfyhCLZaKIsRM4ZaJEnVRVqD9OE9NAhkus4at9Tl3",
"S1lf38C5Ax9Q1ZHuVrvxDTjak7YNo1va8AoBEF9ZPd63B", "LepBEZIBMZwllWMRl8TjoTK3KBtJEYud4A76AQ1uIRs4x",
"vFdBepLoljrHItEZ315bT9Lsqy2qYxfrCsdbYoP5PcTcW", "DCyejp7s8BFCjdP0TM4AkOwYzfvuG4HxAgR4L5AKRM7t7")
#-----DOWNLOAD THE STOCKS FILE-----
URL_prod_rev <-
"https://raw.githubusercontent.com/datasets/s-and-p-500-companies/master/data/constituents-financials.csv"
SP500 <- read.csv(URL_prod_rev,
header=T,
sep=",",
stringsAsFactors=FALSE)
#
SP500_tickers <- paste("$",SP500$Symbol, sep = "")
SP500_tickers <- paste(SP500_tickers,SP500$Name, sep = " ")
# -----ACCESS THE TWITTER ACCOUNTS WITH API CALLS----
twitter_access <- function(consumer_key,consumer_secret,access_token,access_secret){
library(ROAuth)
library(twitteR)
setup_twitter_oauth(consumer_key ,consumer_secret, access_token, access_secret )
cred <- OAuthFactory$new(consumerKey = consumer_key,
consumerSecret = consumer_secret,
requestURL='https://api.twitter.com/oauth/request_token',
accessURL='https://api.twitter.com/oauth/access_token',
authURL='https://api.twitter.com/oauth/authorize')
return(TRUE)
}
# ------TWITTER CRAWLER FUNCTION TO PULL DATA-----------
twitter_crawler <- function(crawl_j){
tweets <<- list()
crawl_begin <-((crawl_j-1)*63)+1
crawl_end <- crawl_j*63
for (i in SP500_tickers[crawl_begin:crawl_end]) {
tweets[i] <<- list(searchTwitter(i,n=1 ,lang = "en", since = '2017-03-20'))
}
}
twitter_df <- function(){
tweets<- unlist(tweets)
#convert to data frame
tweet_df <<- do.call("rbind", lapply(tweets,as.data.frame))
return(tweet_df)
}
final_df <- data.frame(names=NULL)
for(i in 1:8){
twitter_access(consumer_key[i] ,consumer_secret[i], access_token[i], access_secret[i] )
twitter_crawler(i)
twitter_df()
final_df <- rbind(final_df, tweet_df)
}
################################# Jian's Version #########################
URL_prod_rev <-
"https://raw.githubusercontent.com/datasets/s-and-p-500-companies/master/data/constituents-financials.csv"
SP500 <- read.csv(URL_prod_rev,
header=T,
sep=",",
stringsAsFactors=FALSE)
SP500_tickers <- paste("$",SP500$Symbol, sep = "")
SP500_tickers <- paste(SP500_tickers,SP500$Name, sep = " ")
##################################################### First 63 Stock Tweets ################################
setupCrawler <- function(consumer_key,consumer_secret,access_token,access_secret) {
library(ROAuth)
library(twitteR)
setup_twitter_oauth(consumer_key ,consumer_secret, access_token, access_secret )
}
setupCrawler("x8GzVDZNm376fDEhhgUjeDdLc" ,"l2vc71gcyo2i213kXyLv4ZBNSjQO9hmburVwkyIrEdFQsg395C", "843639734782119937-tdHQ7GTqMEl40Zd5MJgfgweN0uivqY2", "1ZG2StMVZ5XuN0GG7OTQ6ZEPuCCO8wnbxnlAGo5xS58sG")
tweets<- list()
for (i in SP500_tickers[1:63]) {
tweets[i] <- list(searchTwitter(i,n=3 ,lang = "en", since = '2017-03-22'))
}
tweets<- unlist(tweets)
#convert to data frame
df_stocks_1to63 <- do.call("rbind", lapply(tweets,as.data.frame))
###################################################### 64 to 126 ###############################################
setupCrawler <- function(consumer_key,consumer_secret,access_token,access_secret) {
library(ROAuth)
library(twitteR)
setup_twitter_oauth(consumer_key ,consumer_secret, access_token, access_secret )
}
setupCrawler("x8GzVDZNm376fDEhhgUjeDdLc" ,"l2vc71gcyo2i213kXyLv4ZBNSjQO9hmburVwkyIrEdFQsg395C", "843639734782119937-tdHQ7GTqMEl40Zd5MJgfgweN0uivqY2", "1ZG2StMVZ5XuN0GG7OTQ6ZEPuCCO8wnbxnlAGo5xS58sG")
tweets<- list()
for (i in SP500_tickers[1:63]) {
tweets[i] <- list(searchTwitter(i,n=3 ,lang = "en", since = '2017-03-22'))
}
tweets<- unlist(tweets)
#convert to data frame
df_stocks_1to63 <- do.call("rbind", lapply(tweets,as.data.frame))
|
e746570e96f929bb8996a6b52fd8ab3c10a5deb4
|
5b155f5ba4ad65af6dc6f31253b3ed216bd7d344
|
/linear_SVM.R
|
4c6bb131d2369f34f321e67be94eb19fc21855ac
|
[] |
no_license
|
zippo92/MobdRProject
|
1a2d722d71336890a5808c49370869c932c8d668
|
4d19e2fffc514840969552d35dc10d3daf05a288
|
refs/heads/master
| 2021-04-29T12:42:01.137361
| 2018-02-25T16:08:59
| 2018-02-25T16:08:59
| 121,732,667
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,000
|
r
|
linear_SVM.R
|
#TR=dati di training
#YTR=classe dati di training
#TS=dati di test
#YTS=classe dati di test
#questo classificatore si trova nel package LibLineaR
linear_SVM <- function(TR,YTR,TS,YTS){
#addestramento
linear_model<-LiblineaR(data=TR,target=YTR,type=1,cost=1,bias=TRUE,verbose=FALSE) #
#predizione sui dati di test
test_prediction <- predict(linear_model,TS,decisionValues = TRUE)
#matrice di confusione sui dati di test
confusion_matrix<-table(predicted=test_prediction$predictions,observation=YTS)
#nella matrice di confusione:
#elemento [i,j] classe predetta i classe reale j
accuracy <- round((confusion_matrix["0","0"]+confusion_matrix["1","1"])/
nrow(TS),4)
TPR_0 <- round(((confusion_matrix["0","0"])/(confusion_matrix["0","0"]+confusion_matrix["1","0"])),4)
TPR_1 <- round(((confusion_matrix["1","1"])/(confusion_matrix["1","1"]+confusion_matrix["0","1"])),4)
kappa <- calculate_kappa(confusion_matrix);
return(c(accuracy,TPR_0,TPR_1, kappa))
}
|
a26597450ad97f3b08572f3b0afdee01a06cbfdc
|
8654c26f88587f9f81efec8ddb334e9588c2e8b0
|
/bin/sample.R
|
2895fa18cec63575daefef225cadbdd449b334d8
|
[] |
no_license
|
BigWeiWong/script-sample-qqz
|
38722ffd418fec75f17279dc15188b97dcf7bcc4
|
be592df05aa79fc9fea686a50746810c8d2f13a8
|
refs/heads/master
| 2020-09-13T22:53:18.043980
| 2019-11-20T12:31:46
| 2019-11-20T12:31:46
| 222,929,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,069
|
r
|
sample.R
|
## ---------------------------
## Script name: sample.R
## Purpose of script:
## Author: Qiongzi Qiu
## Date Created: 2019-01-01
## Copyright (c) Qiongzi Qiu, 2019
## Email: 3110102477@zju.edu.cn
## ---------------------------
## Notes: blahblah
## ---------------------------
setwd("work dictionary")
library(ggplot2)
library(reshape2)
library(ggpubr)
library(ComplexHeatmap)
library(circlize)
source("some package")
options(stringsAsFactors = FALSE)
##### boxplot sample #####
infile_list = c('replace',
'replace',
'replace')
for(infile in infile_list){
dat <- read.table(infile, header=T, sep='\t')
mutation_type = strsplit(infile, '\\.')[[1]][2]
dat['replace'] <- ifelse(dat$KO_non_pval_adj<0.05 & dat$KO_non_t_statistic>0 & !is.na(dat$KO_non_t_statistic), TRUE, FALSE)
dat_melt = melt(dat[,c('replace', 'replace', 'replace', 'replace')], id.vars='replace')
dat_melt[dat_melt=='replace'] <- gsub('_', ' ', mutation_type)
dat_melt[dat_melt=='replace'] <- 'replace'
my_comparisons = list(c('replace', 'replace'), c('replace', 'replace'), c('replace', 'replace'))
boxplot_tmp = ggboxplot(dat_melt,
x='variable', y='value',
xlab=NULL, ylab='replace') +
stat_compare_means(comparisons = my_comparisons,
method.args=list(alternative='greater')) +
facet_wrap( ~ replace, scales="fixed")
outfile = paste0(mutation_type, '_replace.pdf')
ggsave(outfile, boxplot_tmp)
}
##### heatmap sample #####
infile_list = c('replace',
'replace',
'replace',
'replace')
for(infile in infile_list){
dat = read.table(infile, header=T, sep='\t')
dat[,'replace'] = dat[,'replace']/(dat[,'replace'] + dat[,'replace'])
dat[is.na(dat[,'replace']),'replace'] = 'replace'
for(type_tmp in c(TRUE, FALSE)){
if(type_tmp==TRUE){
dat_tmp = dat[dat$KO_non_up>0, ]
outfile = gsub('replace', 'replace', infile)
}else{
dat_tmp = dat[dat$KO_non_prop==1, ]
outfile = gsub('replace', 'replace', infile)
}
dat_reshape = reshape(dat_tmp[,c('replace', 'replace', 'replace')], idvar='replace', timevar='replace', direction='wide')
rownames(dat_reshape) = dat_reshape[,1]
dat_reshape = t(dat_reshape[,-1])
count_row = as.numeric(apply(is.na(dat_reshape)==F, 1, sum))
count_col = as.numeric(apply(is.na(dat_reshape)==F, 2, sum))
dat_reshape = dat_reshape[count_row>0, count_col>0]
dat_reshape = t(apply(dat_reshape, 1, as.numeric))
dat_reshape[dat_reshape==1]=2
dat_reshape[dat_reshape<1]=1
dat_reshape[is.na(dat_reshape)]=0
count_row = as.matrix(cbind(as.numeric(apply(dat_reshape==2, 1, sum)),
as.numeric(apply(dat_reshape==1, 1, sum))))
count_col = as.matrix(cbind(as.numeric(apply(dat_reshape==2, 2, sum)),
as.numeric(apply(dat_reshape==1, 2, sum))))
annotation_row = rowAnnotation(Count=row_anno_barplot(count_row,
gp = gpar(fill = c("black", "grey"))))
annotation_col = HeatmapAnnotation(Count = anno_barplot(count_col,
gp = gpar(fill = c("black", "grey"),
col = c(NA, NA))))
pdf(outfile, width=6, height=6)
p = Heatmap(as.matrix(dat_reshape),
col = c('white', 'grey', 'black'),
show_column_names = F, na_col='white',
row_order = order(rowSums(count_row), decreasing = T),
column_order = order(rowSums(count_col), decreasing = T),
top_annotation = annotation_col,
right_annotation = annotation_row,
heatmap_legend_param = list(
title = "", at = c(2, 1, 0), border = "black", ncol = 3,
labels = c("replace", "replace", "replace")
))
draw(p, heatmap_legend_side = "bottom")
dev.off()
}
}
|
5364a46b3103c6d991018cc1f70e7ff357f4e13d
|
2da2406aff1f6318cba7453db555c7ed4d2ea0d3
|
/inst/snippet/vectors06.R
|
04559588c9953593e7f9c0d15de381d13ea5add7
|
[] |
no_license
|
rpruim/fastR2
|
4efe9742f56fe7fcee0ede1c1ec1203abb312f34
|
d0fe0464ea6a6258b2414e4fcd59166eaf3103f8
|
refs/heads/main
| 2022-05-05T23:24:55.024994
| 2022-03-15T23:06:08
| 2022-03-15T23:06:08
| 3,821,177
| 11
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 117
|
r
|
vectors06.R
|
# compare round() and signif() by binding rowwise into matrix
rbind(round(z, digits = 2), signif(z, digits = 2))
|
5036a3fe57f24d5b99e058c3d75cd41d5972df39
|
4d2cb20823e6bb238be47574bfd4aa6c1973c9de
|
/HW10/HW10-9-health.R
|
4d16e599ce01a2ac38b40b477dfabf6f17d3c2be
|
[] |
no_license
|
praal/data_analysis_course
|
072d289725430bb8980f556e96100ca15fb09623
|
882407822c638f4197cf179dd440b52fd5348c10
|
refs/heads/master
| 2020-03-22T23:27:46.839451
| 2018-07-22T21:49:51
| 2018-07-22T21:49:51
| 140,811,887
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,761
|
r
|
HW10-9-health.R
|
library(WDI)
library(readr)
library(dplyr)
library(ggplot2)
data = read_csv("~/Downloads/WDI_csv/WDIData.csv")
indic = list("SP.ADO.TFRT", "SP.DYN.CBRT.IN", "SH.DTH.COMM.ZS", "SH.DTH.NCOM.ZS", "SP.REG.DTHS.ZS", "SP.POP.DPND.YG", "SH.STA.BRTC.ZS", "SH.DTH.INJR.ZS", "SP.REG.BRTH.ZS", "SP.DYN.CDRT.IN", "SP.DYN.TFRT.IN", "SH.IMM.IDPT", "SH.TBS.INCD", "SM.POP.TOTL", "SP.DYN.LE00.IN", "SH.STA.TRAF.P5", "SH.STA.ANVC.ZS", "SH.DYN.AIDS.ZS", "SN.ITK.DEFC.ZS", "SH.MED.BEDS.ZS")
alls = NULL
for (ind in 1:20){
ind =1
ind = ind + 1
str = indic[ind]
data %>% filter(`Indicator Code` == str) ->gdp
name = gdp[1,3]
name = name[[1]]
se= c("Country Code","1997", "1998", "1999", "2000", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009","2010", "2011","2012","2013","2014","2015", "2016")
gdp = gdp[,se]
gdp %>% filter(`Country Code` == "IRN") -> irn
gdp %>% filter(`Country Code` != "IRN") -> rest
restmean = colMeans(rest[,-1], na.rm = TRUE)
irnmean = colMeans(irn[,-1], na.rm = TRUE)
fac1 = cbind(irnmean, restmean)
fac1 = as.data.frame(fac1)
fac1 <- add_rownames(fac1, "year")
fac1gg = data.frame(year = 0, value = 0 ,country =0, type = 0)
s = 1
for (i in 1:(nrow(fac1))){
x = fac1[i,]
fac1gg[s,] = c(x$year, x$irnmean, "iran", name)
s = s+1
fac1gg[s,] = c(x$year, x$restmean, "rest", name)
s = s + 1
}
fac1gg %>% filter(value != "NaN") -> fac1gg
alls = rbind(alls, fac1gg)
mypath <- file.path("~/Desktop/Data Analysis/HW10/images/",paste("hmyplot_", ind, ".jpg", sep = ""))
jpeg(file=mypath)
ggplot(fac1gg, aes(x= year , y = value , color = country)) + geom_point() + ggtitle(name) + theme(axis.text.x = element_text(angle=-90, vjust=0.5))
dev.off()
}
|
2588009f5826fdd63f4940ee9bc1f052689f7b48
|
65498cfa6e8d833640421157641f9e060d8c5460
|
/HIcalculation/functions/quant_delta_map_vTimeSlices.r
|
6a7fdfb70a9e71cf2d27c3050f78099a03461c58
|
[] |
no_license
|
IPCC-WG1/Chapter-12
|
7c372bbef624941c1cca18004617acd12534d827
|
358a09813f5b29ea064a0629c26b030d60c1f972
|
refs/heads/main
| 2023-04-07T23:52:43.573045
| 2023-03-20T08:35:28
| 2023-03-20T08:35:28
| 351,099,250
| 2
| 5
| null | 2023-03-08T17:03:23
| 2021-03-24T13:57:16
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 6,220
|
r
|
quant_delta_map_vTimeSlices.r
|
#Function that applies bias correction using quantile delta mapping (QDM) for different time slices
#Input:
# - model_grid: Model name (string)
# - fname_OBS: File name of heat stress indicator for reference dataset for bias correction (string)
# - fname_MOD: File name of heat stress indicator for model dataset (string)
# - dir_data: Folder where data are stored (string)
# - folder_out: Output folder (string)
# - var_name: Name of variable to be bias corrected (string)
# - time_ref: Vector indicating reference period (2-element vector)
# - time_app: Vector indicating application period (2-element vector)
# - time_mod: Vector indicating the whole time period for which data is present in fname_MOD (2 element vector)
# - DOYs: Number of days per year (string)
# - mask_land: Flag that indicates if only the land gridponts or all gridpoints should be included (bool)
quant_delta_map_vTimeSlices <- function(model_grid, fname_OBS, fname_MOD, dir_data, folder_out, var_name, time_ref, time_app, time_mod, DOYs=365, mask_land=TRUE) {
#Load libraries
.libPaths(c("/uio/kant/div-cicero-u1/clems/R/x86_64-conda_cos6-linux-gnu-library/3.5", .libPaths()))
library(MBC)
library(ncdf4)
library(doParallel)
library(feather)
#Define file names
f_name_YEARS_MOD <- paste0(dir_data, 'CMIP6/CMIP6_merged/', toString(time_mod[1]), '-', toString(time_mod[2]), '_dates_YEARS_', toString(DOYs), '.csv')
f_name_MONTHS_MOD <- paste0(dir_data, 'CMIP6/CMIP6_merged/', toString(time_mod[1]), '-', toString(time_mod[2]), '_dates_MONTHS_', toString(DOYs), '.csv')
f_name_YEARS_ERA <- paste0(dir_data, 'ERA5/ERA5_regrid/', toString(time_ref[1]), '-', toString(time_ref[2]), '_dates_YEARS_365.csv')
f_name_MONTHS_ERA <- paste0(dir_data, 'ERA5/ERA5_regrid/', toString(time_ref[1]), '-', toString(time_ref[2]), '_dates_MONTHS_365.csv')
fname_gridsel <- paste('/div/amoc/exhaustion/Heat_Health_Global/Data/Masks_Heights_Grids/Land_sea_masks/Land_without_Antarctica_', model_grid, '.nc', sep="")
#Read years and months
YR_MOD <- read.csv(file=f_name_YEARS_MOD, header=FALSE, sep=",")
MN_MOD <- read.csv(file=f_name_MONTHS_MOD, header=FALSE, sep=",")
YR_ERA <- read.csv(file=f_name_YEARS_ERA, header=FALSE, sep=",")
MN_ERA <- read.csv(file=f_name_MONTHS_ERA, header=FALSE, sep=",")
#Select data for reference and application periods
sel_ref_OBS <- YR_ERA>=time_ref[1] & YR_ERA<=time_ref[2]
sel_ref_MOD <- YR_MOD>=time_ref[1] & YR_MOD<=time_ref[2]
sel_app_MOD <- YR_MOD>=time_app[1] & YR_MOD<=time_app[2]
#Get months in reference and application periods
mon_ref_ERA <- MN_ERA[sel_ref_OBS]
mon_ref_MOD <- MN_MOD[sel_ref_MOD]
mon_app_MOD <- MN_MOD[sel_app_MOD]
#Create indices for selecting right time for NetCDF files
sta_ref_OBS = c(1, 1, which(sel_ref_OBS)[1])
sta_ref_MOD = c(1, 1, which(sel_ref_MOD)[1])
sta_app_MOD = c(1, 1, which(sel_app_MOD)[1])
len_ref_OBS = c(-1, -1, sum(sel_ref_OBS))
len_ref_MOD = c(-1, -1, sum(sel_ref_MOD))
len_app_MOD = c(-1, -1, sum(sel_app_MOD))
#Open data sets
nc_OBS <- nc_open(fname_OBS)
nc_MOD <- nc_open(fname_MOD)
#Read data
data_OBS_ref <- ncvar_get(nc_OBS, var_name, start=sta_ref_OBS, count=len_ref_OBS)
data_MOD_ref <- ncvar_get(nc_MOD, var_name, start=sta_ref_MOD, count=len_ref_MOD)
data_MOD_app <- ncvar_get(nc_MOD, var_name, start=sta_app_MOD, count=len_app_MOD)
#Close NetCDFs
nc_close(nc_OBS)
nc_close(nc_MOD)
#Create indicex for all grid points, on which to carry out the analysis
if (mask_land==TRUE) {
#Read which grid cells to consider
nc_gridsel <- nc_open(fname_gridsel)
data_gridsel <- ncvar_get(nc_gridsel, "selection")
vec_gridsel <- as.vector(data_gridsel) #Convert to vector
nc_close(nc_gridsel)
#Select land grid points
ind = which(data_gridsel==1, arr.ind = T)
N = dim(ind)[1]
} else {
#Select all grid points
data_gridsel = matrix(data=TRUE, nrow=dim(data_MOD_app)[1], ncol=dim(data_MOD_app)[2])
ind = which(data_gridsel==TRUE, arr.ind = T)
N = dim(ind)[1]
}
#Define number of cores for parallel computing
registerDoParallel(cores=40)
#Write time selection to file
sel_time = sel_app_MOD
path = paste0(folder_out, 'Time_selection.feather')
write_feather(as.data.frame(sel_time), path)
#Loop over all selected grid points
foreach(n=1:N) %dopar% {
# for (n in 1:N) {
#Select data on grid point
OBSref <- data_OBS_ref[ind[n,1], ind[n,2], ]
MODref <- data_MOD_ref[ind[n,1], ind[n,2], ]
MODapp <- data_MOD_app[ind[n,1], ind[n,2], ]
#Create data frame for output
QDM <- vector(,len_app_MOD[3]) * NA
#Loop over all months
for(i in 1:12) {
#Select data
selR_ERA <- mon_ref_ERA==(i%%12 + 12*(i%%12==0))
selR_MOD <- mon_ref_MOD==(i%%12 + 12*(i%%12==0))
selA_MOD <- mon_app_MOD==(i%%12 + 12*(i%%12==0))
#Select data for quantile mapping calibration
OBS_R <- OBSref[selR_ERA]
MOD_R <- MODref[selR_MOD]
MOD_A <- MODapp[selA_MOD]
#Check for NaNs
out_corr <- rep(NA, length(MOD_A))
ch1 <- !is.nan(OBS_R)
ch2 <- !is.nan(MOD_R)
ch3 <- !is.nan(MOD_A)
#Perform QDM if data is not NaN
if (!(sum(ch1)==0 | sum(ch2)==0 | sum(ch3)==0)) {
QDM_out <- QDM(OBS_R[ch1], MOD_R[ch2], MOD_A[ch3], ratio=FALSE, n.tau=50)
#Consider NaNs in output
out_corr[ch3] <- QDM_out$mhat.p
QDM[selA_MOD] <- out_corr #Save in data frame
}
}
#Write to file
path = paste0(folder_out, 'Grid_', toString(ind[n,1]), '_', toString(ind[n,2]), '_data.feather')
write_feather(as.data.frame(QDM), path)
}
#Overwrite big data sets to make them smaller (in case they would not be deleted from cache)
data_OBS_ref <- 0
data_MOD_ref <- 0
data_MOD_app <- 0
}
|
db672b4fdac68ddfe4aa503bd6a50768c54bc135
|
c8ea37fc5b8bee5854db5fcbfe8161dc5fa5581c
|
/silverYY.r
|
18e522bdde982c98b14c0e70d03820f3812c0435
|
[] |
no_license
|
31ingrid/Acod
|
56895c5837350303e1555124e725b403aeb66d1a
|
0220db9c64d28d82e2aa7fd112fdae4216d8b66a
|
refs/heads/master
| 2021-09-07T03:22:24.308624
| 2018-02-16T14:18:35
| 2018-02-16T14:18:35
| 103,710,703
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 89,615
|
r
|
silverYY.r
|
#high sel
#seed=132 # 145
#mig=2500
#now make flexible with number of microsatellites
#set.seed(2);#1 is good
#set.seed(5);#worked
#set.seed(seed);
library(batch)
parseCommandArgs()
library(Rcpp)
library(inline)
require(RcppArmadillo)
run8pops = '
using namespace Rcpp;
using namespace sugar;
RNGScope scope;
// Variables (and constants) section
NumericVector inputs(INPUTS);
int SSBinitA; SSBinitA=inputs[0];
double pop1_prop; pop1_prop=inputs[1];
int Nyrs; Nyrs=inputs[2];
int nsamp; nsamp=inputs[3];
int ngen; ngen=inputs[4];
double fishmort; fishmort=inputs[6];
int samptype; samptype=inputs[7];
int nsims; nsims=inputs[8];
int N; N=inputs[9];
int Yrs; Yrs=inputs[10];
int mmt; mmt=inputs[11];
double dpow; dpow=inputs[12];
double rmate; rmate=inputs[13];
int ages; ages=inputs[14];
int popdyn;popdyn=inputs[15];
int nSNP; nSNP=inputs[16];
int nmig; nmig=inputs[17];
double nmig_prop;
int SSBinitB; SSBinitB=inputs[18];
int nmig1;nmig1=nmig;
NumericVector fishsel(ages); //this comes from the average selectivity at age 1963-2011
//HIGH FISHSEL//fishsel[0]=0;//I changed this because of age structure
//fishsel[0]=0.2202403;changed from this
//fishsel[1]=0.2376;
//fishsel[2]=0.9080;fishsel[3]=1.0866;
//fishsel[4]=1.0712;fishsel[5]=1.0268;fishsel[6]=0.9764;fishsel[7]=0.9764;
//mean fishsel
//fishsel[0]=0.2376;
fishsel[0]=0;fishsel[1]=0.818;fishsel[2]=0.974;fishsel[3]=0.895;fishsel[4]=0.847;fishsel[5]=0.848;fishsel[6]=0.848;
int npops; if (pop1_prop<1){npops=2;}else{npops=1;}
int n_l; //proportion of fish in population 1
if (pop1_prop<0.5){n_l=1;}
if (pop1_prop==0.5){n_l=5;}
NumericVector N_til_init_a(ages); //numbers at age with 1 recruit
NumericVector N_til_init_a40(ages); //numbers at age with 1 recruit and F40%
NumericVector S_l0(2); //SSB in pop1 and 2
NumericVector R_l0A(Nyrs); //recruits in popA for each year
NumericVector R_l0B(Nyrs); //recruits in popB for each year
NumericVector R_l0A_hat(Nyrs); //recruitment with epsilon error term (same as for estimating numbers)
NumericVector R_l0B_hat(Nyrs); //
NumericVector Q_a(ages); //maturity index at age popA (Skagerrak)
NumericVector Q_b(ages); //maturity index at age popB (North Sea)
//North Sea maturity ICES
Q_b[0]=0.01;Q_b[1]=0.05;Q_b[2]=0.23;Q_b[3]=0.62;Q_b[4]=0.86;Q_b[5]=1;Q_b[6]=1;
//Skagerrak cod maturity (Knutsen 2011)
Q_a[0]= 0.03226423;Q_a[1]= 0.14982172;Q_a[2]= 0.48225745;Q_a[3]= 0.83117813;Q_a[4]= 0.96299517;Q_a[5]= 0.99278248;
Q_a[6]= 0.99862647;
double psi=1.75e-5; //used in weight at age (von bertallanffy)
double theta=2.8571; //used in weight at age
double Linf = 197; //wt at age
double K=0.1030; //wt at age
NumericVector Length(ages); //length at age
NumericVector Weight_a(ages); //wt at age
NumericVector Weight_b(ages); //wt at age
double meanFullF=0.8962449;
double varFullF=0.0232579;
NumericVector age(ages);
age=SeqLen(ages)-1;
int temp;
double temp2;
NumericVector N_init_a(ages);
NumericVector N_init_a1(ages);
NumericVector N_init_a2(ages);
NumericVector N_init_a1_FIRST(ages);
NumericVector N_init_a2_FIRST(ages);
//N_init declared below is sum of pop1_size and pop2_size (integer)
List genotypes(2);
int pop1_size;
int pop2_size;
List outmat(250);
double n_sampA;
double n_sampB;
NumericMatrix out1(nsims,Nyrs);
NumericMatrix out2(nsims,Nyrs);
NumericMatrix out3(nsims,Nyrs);
NumericMatrix out4(nsims,Nyrs);
NumericMatrix out5(nsims,Nyrs);
NumericMatrix out6(nsims,Nyrs);
NumericMatrix out7(nsims,Nyrs);
NumericMatrix out8(nsims,Nyrs);
NumericMatrix out9(nsims,Nyrs);
NumericMatrix out10(nsims,Nyrs);
NumericMatrix out11(nsims,Nyrs);
NumericMatrix out12(nsims,Nyrs);
NumericMatrix out13(nsims,Nyrs);
NumericMatrix out14(nsims,Nyrs);
NumericMatrix out15(nsims,Nyrs);
NumericMatrix out16(nsims,Nyrs);
NumericVector out17(nsims);//1 is true Fst, 2 is initial SBPRA, 3 is initial SBPRB
NumericVector out_fst60(nsims);
NumericMatrix numsmat(nsims,ages);
NumericMatrix fst_submatALL(nsims,Nyrs);
NumericMatrix sig_submatALL(nsims,Nyrs);
NumericMatrix fst_submat(nsims,9);
NumericMatrix sig_submat(nsims,9);
NumericMatrix fst_submat60(nsims,9);
NumericMatrix sig_submat60(nsims,9);
NumericMatrix fst_submat9(nsims,9);
NumericMatrix sig_submat9(nsims,9);
NumericMatrix fst_submat99(nsims,9);
NumericMatrix sig_submat99(nsims,9);
NumericMatrix out21(nsims,Nyrs);//subsample SSB/initial SSB popA
NumericMatrix out22(nsims,Nyrs);//subsample SSB/initial SSB popB
NumericMatrix out23(nsims,Nyrs);//est SSB/SB40 popA
NumericMatrix out24(nsims,Nyrs);//est SSB/SB40 popB
NumericMatrix out25(nsims,ngen+Nyrs);//fst_vec
NumericVector genvec(nsims);
List pop1(nSNP+1); //here is where I split the data into pop1 and pop2
List pop2(nSNP+1);
List popA(Nyrs+ages);
List popB(Nyrs+ages);
int pick1;
int pick2;
double prob;
double dir;
int mover1;
int mover2;
NumericVector mean_arichA(Nyrs);
NumericVector mean_arichB(Nyrs);
double SPR_init_a;
double SPR_init_b;
double SPR_init_F40_a;
double SPR_init_F40_b;
double S_lyINIT_a;
double S_lyINIT_b;
NumericVector TACA(Nyrs);
NumericVector TACB(Nyrs);
NumericVector TAC(Nyrs);
NumericVector S_hat_syjA(Nyrs);
NumericVector S_hat_syjB(Nyrs);
NumericVector S_hat_syj(Nyrs);
NumericVector SSB_hatA(Nyrs);
NumericVector SSB_hatB(Nyrs);
NumericVector SSB_hat(Nyrs);
NumericVector SPRAtrue(Nyrs);
NumericVector SPRBtrue(Nyrs);
NumericVector FishAvec(Nyrs); //vector of optimal fishing mortality for popA (separated management)
NumericVector FishBvec(Nyrs);//vector of optimal fishing mortality for popB (separated management)
double CVa=0.6085044;
double CVs=0.779;
double M=0.34;
double h=0.9;
double CVr=0.75;
NumericVector M2(ages);
NumericVector Surv(ages);
M2[0]=0.9;//M2[0]=1.038;
M2[1]=0.698;M2[2]=0.490;M2[3]=0.233;M2[4]=0.2;M2[5]=0.2;M2[6]=0.2;//natural mortality from ICES report
//Surv[0]=1;Surv[1]=0.43403;Surv[2]=0.18838;Surv[3]=0.08177;Surv[4]=0.03549;Surv[5]=0.01540;Surv[6]=0.00669;
Surv[0]=0.43403;Surv[1]=0.18838;Surv[2]=0.08177;Surv[3]=0.03549;Surv[4]=0.01540;Surv[5]=0.00669;Surv[6]=0.002;
NumericVector epsilonA(Nyrs);
NumericVector epsilonB(Nyrs);
NumericVector epsilon(Nyrs);
epsilonA[0]=0;
epsilonB[0]=0;
epsilon[0]=0;
int genrule;
int genrule2;
NumericVector N_hat_aA(ages);
NumericVector N_hat_aB(ages);
NumericVector N_hat_a(ages);
double sigma_ja2A=log(((1/pop1_prop)*pow(CVa,2))+1);
double sigma_ja2B=log(((1-(1/pop1_prop))*pow(CVa,2))+1);
double sigma_ja2=log(pow(CVa,2)+1);
double sigma_js2A=log(((1/pop1_prop)*pow(CVs,2))+1);
double sigma_js2B=log(((1-(1/pop1_prop))*pow(CVs,2))+1);
double sigma_js2=log(pow(CVs,2)+1);
double sigma_jr2=log(pow(CVr,2)+1);//may want to check this
double psiA;
double psiB;
double psi_star;
double rho=0.9;
double Fish=0; //set here so initially it is not a problem
double S_lyA;//true SSB
double S_lyB;
double eta_jyA;
double eta_jyB;
double eta_jy;
NumericVector temp8;
NumericVector temp9;
NumericVector temp10;
NumericVector temp11;
NumericVector temp12;
NumericVector temp13;
NumericVector temp14;
NumericVector temp15;
NumericVector temp16;
NumericVector temp17;
int temp18;
int temp19;
NumericVector Wt;
double FishA;
double FishB;
double sepruleA;
double sepruleB;
double seprule;
NumericVector Effort_tot;
NumericVector d(10); //10 is right because it is for the 10 areas
NumericVector Effort(10);
NumericVector Effort_norm;
NumericVector vec;
NumericVector spawnersA(ages);
NumericVector spawnersB(ages);
NumericVector spawnersA_rounded(ages);
NumericVector spawnersB_rounded(ages);
NumericVector spawnersA_rounded_star(ages);
NumericVector spawnersB_rounded_star(ages);
double pick_spawner_mal;
double pick_spawner_fem;
NumericVector which_spawnersA;
int some_integerA;
int some_integerB;
List tmpListA;
List tmpListB;
int pickM;
int pickF;
double pick_moverA;
double pick_moverB;
int mov_pick1;
int mov_pick2;
//NumericMatrix moverA2Brow1;
NumericMatrix RecA;
NumericMatrix RecB;
NumericMatrix recA;
NumericMatrix recB;
double rand_doub;
int samp_mom;
int samp_dad;
int rand_allele;
List tmpList_recsA(2);
List tmpList_recsB(2);
NumericVector NeAvec(Nyrs); //Keep Ne from each year popA here
NumericVector NeBvec(Nyrs); //Keep Ne from each year popB here
//NumericVector FSH;
//IntegerVector FSH_int;
List tmpdoneA(2);
List tmpdoneB(2);
NumericMatrix tmpageA_fem;
NumericMatrix tmpageA_mal;
NumericMatrix tmpageB_fem;
NumericMatrix tmpageB_mal;
NumericVector all_richA;
NumericVector all_richB;
NumericVector arichA(Nyrs);
NumericVector arichB(Nyrs);
NumericMatrix allA(Nyrs+1,ages);
NumericMatrix allB(Nyrs+1,ages);
NumericMatrix allspawnA(Nyrs,ages);
NumericMatrix allspawnB(Nyrs,ages);
NumericVector N_aA(ages);
NumericVector N_aB(ages);
List spawn_malA(Nyrs);
List spawn_femA(Nyrs);
List spawn_malB(Nyrs);
List spawn_femB(Nyrs);
NumericMatrix tmpMatrixA_1;
NumericMatrix tmpMatrixA_2;
NumericMatrix tmpMatrixB_1;
NumericMatrix tmpMatrixB_2;
IntegerVector NspawnersA(Nyrs);
IntegerVector NspawnersB(Nyrs);
NumericMatrix spawn_malA_thisyr; //twice as many genotypes as spawners
NumericMatrix spawn_femA_thisyr;
NumericMatrix spawn_malB_thisyr;
NumericMatrix spawn_femB_thisyr;
NumericMatrix tmpMatrixA_1plus;
NumericMatrix tmpMatrixA_2plus;
NumericMatrix tmpMatrixB_1plus;
NumericMatrix tmpMatrixB_2plus;
NumericMatrix tmpMatrixA_1details;
NumericMatrix tmpMatrixA_2details;
NumericMatrix tmpMatrixB_1details;
NumericMatrix tmpMatrixB_2details;
List tmpList_plusgroupA(2);
List tmpList_plusgroupB(2);
List tmpList_detailsA(2);
List tmpList_detailsB(2);
IntegerVector mal_spawnersA; //after random mating **also make sure you get the right row
IntegerVector fem_spawnersA; //after random mating
IntegerVector mal_spawnersB; //after random mating **also make sure you get the right row
IntegerVector fem_spawnersB; //after random mating
IntegerVector spawner_listM; //list of ones to choose from
IntegerVector spawner_listF;
NumericVector N_init_a1_star(ages);
NumericVector N_init_a2_star(ages);
NumericVector NtotalA(Nyrs+ages-1);
NumericVector NtotalB(Nyrs+ages-1);
NumericVector SB40_vecA(Nyrs);
NumericVector SB40_vecB(Nyrs);
NumericMatrix allelesA;
NumericMatrix allelesB;
NumericMatrix arichAmat;
NumericMatrix arichBmat;
NumericMatrix richnessA;
NumericMatrix richnessB;
NumericVector fst_vec(ngen+Nyrs);
NumericVector fst_vec2(ngen+Nyrs);
NumericVector Wrights_fst_vec(ngen+Nyrs);
NumericVector Wrights_fst_vec1(ngen+Nyrs);
NumericVector Wrights_fst_vec2(ngen+Nyrs);
NumericVector Wrights_simple(ngen+Nyrs);
NumericVector Wrights_simple1(ngen+Nyrs);
NumericVector Wrights_fst_vec_sub(ngen+Nyrs);
NumericVector Wrights_fst_vec_sub1(ngen+Nyrs);
NumericVector Wrights_simple_sub(ngen+Nyrs);
NumericVector arichsampA_vec(ngen+Nyrs);
NumericVector arichsampB_vec(ngen+Nyrs);
NumericVector arichsampA_ten(nSNP);
NumericVector arichsampB_ten(nSNP);
NumericVector ninecomp(9);
NumericVector diffsamp(10);
NumericVector nmig_vec(Nyrs);
NumericVector ps;
NumericVector s2;
NumericMatrix arichAmat_sub620;
NumericMatrix arichBmat_sub620;
NumericMatrix arichAmat_sub640;
NumericMatrix arichBmat_sub640;
NumericMatrix arichAmat_sub680;
NumericMatrix arichBmat_sub680;
//NumericVector arichAmat_sub;
//NumericVector arichBmat_sub;
NumericMatrix talmat2;
NumericMatrix talmat_sub2;
NumericVector NtotalA2;
NumericVector count_sub(nSNP);//list of number of unique alleles at each locus
if (pop1_prop==1){ninecomp=NumericVector::create(1,1,1,1,1,1,1,1,1);
if(samptype==1){diffsamp=rep(nsamp,10);}else{
diffsamp=NumericVector::create(200,180,155,130,110,90,65,40,20,10);}}
if (pop1_prop==0.2){ninecomp=NumericVector::create(1,2,2,2,2,2,2,2,2);
if(samptype==1){diffsamp=rep(nsamp,10);}else{
diffsamp=NumericVector::create(324,177,118,88,71,59,50,44,39,29);}}
if (pop1_prop==0.5){ninecomp=NumericVector::create(1,2,1,1,2,2,2,2,2);
if(samptype==1){diffsamp=rep(nsamp,10);}else{
diffsamp=NumericVector::create(170,130,100,65,33,170,130,100,65,33);}}
if (pop1_prop<=0.1){ninecomp=NumericVector::create(2,2,2,2,2,2,2,2,2);
if(samptype==1){diffsamp=rep(nsamp,10);}else{
diffsamp=NumericVector::create(324,177,118,88,71,59,50,44,39,29);}}
if (pop1_prop==0.01){ninecomp=NumericVector::create(2,2,2,2,2,2,2,2,2);
if(samptype==1){diffsamp=rep(nsamp,10);}else{
diffsamp=NumericVector::create(324,177,118,88,71,59,50,44,39,29);}}
if (pop1_prop==0.05){ninecomp=NumericVector::create(2,2,2,2,2,2,2,2,2);
if(samptype==1){diffsamp=rep(nsamp,10);}else{
diffsamp=NumericVector::create(324,177,118,88,71,59,50,44,39,29);}}
for (int zcounter=0;zcounter<nsims;zcounter ++){ //change back to 10
Rcout<<"zcounter"<<std::endl;
Rf_PrintValue(wrap(zcounter));
List data(nSNP+1);
NumericVector MUS(nSNP);
NumericVector SSBA_vec(Nyrs);
NumericVector SSBB_vec(Nyrs);
int num_wrong_pops;
int num_wrong_pops60;
NumericVector which_wrong_pops; //vector of the split locations
NumericMatrix R_l0_mat;//this stores R_l0 estimates for each wrong pop for 100 yers
NumericMatrix epsilon_vecmat;
NumericMatrix TAC_vec;
NumericVector which_wrong_pops60; //vector of the split locations
NumericMatrix R_l0_mat60;//this stores R_l0 estimates for each wrong pop for 100 yers
NumericMatrix epsilon_vecmat60;
NumericMatrix TAC_vec60;
if (popdyn==1){
// sim script - this is the part that sets up alleles at each locus
for (int m=0; m < nSNP; m++){ // this is for the 10 loci we simulate
NumericVector alleles;
double prob;
double dir;
double mu;
int pick;
NumericVector alleles1(2*N);
NumericVector PICKS(2*N);
mu=0.1*as<double>(rbeta(1,0.6,1));//here scale down by 1/100 so max is 0.01 OR not
MUS[m]=mu; //this takes mu back to 0.01* the rbeta
alleles=rep(200,2*N); //cool! this works in C++
for (int k=0; k < Yrs; k++){ //repeat mutation and random mating each year
//here is the mutate part
for (int i=0; i < 2*N; i++){ //repeat mutation for each allele
prob=as<double>(runif(1,0,1));
dir=as<double>(runif(1,0,1));
if(prob<mu&dir<0.5){
alleles[i]=alleles[i]+1;
}else if (prob<mu&dir>0.5){
alleles[i]=alleles[i]-1;
}
}
//now the random mating part - first choose a random allele
for (int j=0; j < 2*N; j++){ //repeat random mating 2N times
prob=as<double>(runif(1,0,(2*N)-1));
pick=int(prob);
if((prob+0.5)>=(int(prob)+1)){
pick=int(prob)+1;
}
//take that random allele and save it
PICKS[j]=pick;
alleles1[j]=alleles[pick];
}
alleles=alleles1;
}//end of Yrs
data[m]=alleles1;
}//end of m loop
data[nSNP]=MUS;
// end of simRcpp script
}//end popdyn==1
// initial numbers section (if there was one recruit)
N_til_init_a[0]=1;
for (int i=1; i < (ages-1); i++){ //fill in N_til_init;
N_til_init_a[i]=N_til_init_a[i-1]*exp(-M2[i-1]);}
N_til_init_a[ages-1]=N_til_init_a[ages-2]*exp(-M2[ages-2])/(1-exp(-M2[ages-1]));
//establish the SBPR with 1 recruit and F40%
N_til_init_a40[0]=1;
for (int i=1; i < (ages-1); i++){ //fill in N_til_init;
N_til_init_a40[i]=N_til_init_a40[i-1]*exp(-(M2[i-1]+fishsel[i-1]*fishmort));}
N_til_init_a40[ages-1]=N_til_init_a40[ages-2]*exp(-(M2[ages-2]+fishsel[ages-2]*fishmort))/(1-exp(-(M2[ages-1]+fishsel[ages-1]*fishmort)));
//for (int i=1; i < (ages-1); i++){ //fill in N_til_init;
//N_til_init_a40[i]=N_til_init_a40[i-1]*Surv[i-1];}
//N_til_init_a40[ages-1]=N_til_init_a40[ages-2]*Surv[ages-2]+N_til_init_a40[ages-1]*Surv[ages-1];
// establish length, weight of males and females
for (int i=0; i < ages; i++){
Length[i]=Linf*(1-exp(-K*(age[i])));
Weight_b[i]=psi*pow((Linf*(1-exp(-K*(age[i])))),theta);
} //0.001 is for kg to mt conversion
Weight_a[0]=0;Weight_a[1]=0.5508;Weight_a[2]=1.1016;Weight_a[3]=1.6524;
Weight_a[4]=2.2032;Weight_a[5]=2.7540;Weight_a[6]=3.3048;
// determine initial number of recruits and the SSB in each population (pops 1 and 2)
S_l0[0]=SSBinitA; //S_l0 initial spawning stock biomass for pop1
S_l0[1]=SSBinitB; //S_l0 initial spawning stock biomass pop2
NumericVector tempvec1a = Weight_a*N_til_init_a*Q_a;
NumericVector tempvec1b = Weight_b*N_til_init_a*Q_b;
R_l0A[0]=2.0*S_l0[0]/std::accumulate(tempvec1a.begin(), tempvec1a.end(), 0.0); //initial number recruits pop1 NOTE: if only one pop, all are in popA
R_l0B[0]=2.0*S_l0[1]/std::accumulate(tempvec1b.begin(), tempvec1b.end(), 0.0); //init recruits pop2
double R_l0A_init=R_l0A[0]; //record the first recruits for beginning of mgmt_counter loop
double R_l0B_init=R_l0B[0];
//n_init_a1 is the number of fish in each age class
//this loop will also round to an even number in each age class so you can split into males and females
for (int i=0; i<ages; i++){
N_init_a1[i]=N_til_init_a[i]*R_l0A[0];
temp=int(N_init_a1[i]);
if((N_init_a1[i]+0.5)>=(int(N_init_a1[i])+1)){
temp=int(N_init_a1[i])+1;}
N_init_a1[i]=temp;
if (temp%2!=0){
temp2=as<double>(runif(1,-1,1));
if(temp2>=0){N_init_a1[i]=N_init_a1[i]+1;}
else{N_init_a1[i]=N_init_a1[i]-1;}
}
N_init_a2[i]=N_til_init_a[i]*R_l0B[0];
temp=int(N_init_a2[i]);
if((N_init_a2[i]+0.5)>=(int(N_init_a2[i])+1)){
temp=int(N_init_a2[i])+1;}
N_init_a2[i]=temp;
if (temp%2!=0){
temp2=as<double>(runif(1,-1,1));
if(temp2>=0){N_init_a2[i]=N_init_a2[i]+1;}
else{N_init_a2[i]=N_init_a2[i]-1;}
}
}//end this for loop
N_init_a1_FIRST = clone(N_init_a1);
N_init_a2_FIRST = clone(N_init_a2);
N_init_a = N_init_a1+N_init_a2;
NumericVector tempvec_a = Weight_a*N_init_a*Q_a;
NumericVector tempvec_b = Weight_b*N_init_a*Q_b;
S_lyINIT_a=0.5*std::accumulate(tempvec_a.begin(), tempvec_a.end(), 0.0);//Initial Spawning bimass
S_lyINIT_b=0.5*std::accumulate(tempvec_b.begin(), tempvec_b.end(), 0.0);//Initial Spawning bimass
SPR_init_a=S_lyINIT_a/N_init_a[0]; //initial SPBR
SPR_init_b=S_lyINIT_b/N_init_a[0]; //initial SPBR
tempvec_a = Weight_a*N_til_init_a40*Q_a;
tempvec_b = Weight_b*N_til_init_a40*Q_b;
SPR_init_F40_a=0.5*std::accumulate(tempvec_a.begin(), tempvec_a.end(), 0.0);//Initial Spawning bimass
SPR_init_F40_b=0.5*std::accumulate(tempvec_b.begin(), tempvec_b.end(), 0.0);//Initial Spawning bimass
NumericVector temp6=(N_init_a1*Weight_a*Q_a);
double S_lyINITA=0.5*std::accumulate(temp6.begin(),temp6.end(),0.0);
NumericVector temp7=(N_init_a2*Weight_b*Q_b);
double S_lyINITB=0.5*std::accumulate(temp7.begin(),temp7.end(),0.0);
double SPR_initA; //initial SBPR popA
double SPR_initB; //initial SBPR popB
pop1_size = std::accumulate(N_init_a1.begin(),N_init_a1.end(),0.0);
pop2_size = std::accumulate(N_init_a2.begin(),N_init_a2.end(),0.0);
int N_init=pop1_size+pop2_size;
if (popdyn==1){
// evolve script - takes alleles for all individuals, splits it (or not in the case of one population) and then evolves population(s) for ngen generations
NumericVector transferdata(N_init);
//size of pop1 and pop2 (out of the original size)
NumericVector alleles_pop1_before(2*pop1_size);
NumericVector alleles_pop2_before(2*pop2_size);
NumericVector alleles_pop1_after(2*pop1_size); //after random mating
NumericVector alleles_pop2_after(2*pop2_size); //after random mating
NumericVector PICKS_pop1(2*pop1_size);
NumericVector PICKS_pop2(2*pop2_size);
//here we extract elements of the data list (with 10 microsatellites)
NumericVector selectA(2*pop1_size);
NumericVector selectB(2*pop2_size);
int pick3;
for (int i=0; i < 2*pop1_size; i++){ //create a list of random numbers 0-1999 twice as big as pop1_size to randomly select alleles from sim
prob=as<double>(runif(1,0,(2*N)-1)); //works, I checked it
pick3=int(prob);
if((prob+0.5)>=(int(prob)+1)){
pick3=int(prob)+1;
}
selectA[i]=pick3;}
for (int i=0; i < 2*pop2_size; i++){ //create a list of random numbers 0-1999 twice as big as pop2_size to randomly select alleles from sim
prob=as<double>(runif(1,0,(2*N)-1));
pick3=int(prob);
if((prob+0.5)>=(int(prob)+1)){
pick3=int(prob)+1;
}
selectB[i]=pick3;}
for (int i=0; i < nSNP; i++){
transferdata = data[i];
NumericVector tx1(2*pop1_size);
NumericVector tx2(2*pop2_size);
for (int j=0; j < 2*pop1_size; j++){
tx1[j]=transferdata[selectA[j]];
}
for (int j=0; j < 2*pop2_size; j++){
tx2[j]=transferdata[selectB[j]];
}
pop1[i]=tx1;
pop2[i]=tx2;
}
for (int k=0; k < ngen; k++){ //for ngen generations
for (int j=0; j<nSNP; j++){ //for all 10 microsatellites
//mutate alleles
alleles_pop1_before = pop1[j];
for (int i=0; i < 2*pop1_size; i++){ //pop1 mutation for each allele
prob=as<double>(runif(1,0,1));
dir=as<double>(runif(1,0,1));
if(prob<(MUS[j])&dir<0.5){
alleles_pop1_before[i]=alleles_pop1_before[i]+1;
}else if (prob<(MUS[j])&dir>0.5){
alleles_pop1_before[i]=alleles_pop1_before[i]-1;
}
}
alleles_pop2_before = pop2[j];
for (int i=0; i < 2*pop2_size; i++){ //pop2 mutation for each allele
prob=as<double>(runif(1,0,1));
dir=as<double>(runif(1,0,1));
if(prob<(MUS[j])&dir<0.5){
alleles_pop2_before[i]=alleles_pop2_before[i]+1;
}else if (prob<(MUS[j])&dir>0.5){
alleles_pop2_before[i]=alleles_pop2_before[i]-1;
}
}
//random mating
for (int i=0; i < 2*pop1_size; i++){ //random mating for each allele
prob=as<double>(runif(1,-0.5,(2*(rmate*pop1_size)-0.5)));
pick1=int(prob);
if((prob+0.5)>=(int(prob)+1)){
pick1=int(prob)+1;
}
PICKS_pop1[i]=pick1; //also seems to work, values from zero to (2*pop1_size)-1
alleles_pop1_after[i]=alleles_pop1_before[pick1];
}
//PICKS_pop1 is a set of random alleles from 0 to (2*pop1_size)-1 who will reproduce
for (int i=0; i < 2*pop2_size; i++){ //random mating for each allele
prob=as<double>(runif(1,-0.5,(2*(rmate*pop2_size)-0.5))); //was 2*pop2_size-0.5
pick2=int(prob);
if((prob+0.5)>=(int(prob)+1)){
pick2=int(prob)+1;
}
PICKS_pop2[i]=pick2;
alleles_pop2_after[i]=alleles_pop2_before[pick2];
}
//for each of j microsatellites, I have mutated them, and then selected some
//to reproduce next year. They are now stored in alleles_pop1_after and alleles_pop2_after
//migration removed for now in order to achieve high enough Fst values between populations
//int nmigevol;
//nmigevol=nmig;
//if(nmigevol>0){
//for (int m=0; m<nmigevol; m++){// for each migrant
//mover1=alleles_pop1_after(PICKS_pop1[m]);
//alleles_pop1_after.erase(PICKS_pop1[m]);
//mover2=alleles_pop2_after(PICKS_pop2[m]);
//alleles_pop2_after.erase(PICKS_pop2[m]);
//alleles_pop1_after.insert(PICKS_pop1[m],mover2);
//alleles_pop2_after.insert(PICKS_pop2[m],mover1);
//} // for each migrant
//} //if nmig>0
NumericVector clone_alleles_pop1_after=clone(alleles_pop1_after);
NumericVector clone_alleles_pop2_after=clone(alleles_pop2_after);
pop1[j]=clone_alleles_pop1_after; //update the list of alleles
pop2[j]=clone_alleles_pop2_after;
} //for each of 10 microsatellites
Rcout<<"k in evolve"<<std::endl;
Rf_PrintValue(wrap(k));
genotypes[0]=pop1;
genotypes[1]=pop2;
//end of evolve
//**********************************************************************************************
//TRY FST CALCS HERE
if (k%200==0){
allelesA = NumericMatrix((2*pop1_size),nSNP); //is it really 2*? yes
allelesB = NumericMatrix((2*pop2_size),nSNP); //is it really 2*? yes
arichAmat=NumericMatrix(2*pop1_size,nSNP);
arichBmat=NumericMatrix(2*pop2_size,nSNP);
richnessA=NumericMatrix(Nyrs,nSNP); //holds each year allelic richness for each microsat
richnessB=NumericMatrix(Nyrs,nSNP);
// now fill up allelesA with genotypes (one in each column)
List temp_genA;
List temp_genB;
NumericVector temp3;
//CHECK WHETERH THIS WORKS FOR 1 population
temp_genA=genotypes[0];
for (int i=0; i<nSNP; i++){
temp3=temp_genA[i];
allelesA(_,i)=temp3; //each column of allelesA has all alleles for that usat
}
temp_genB=genotypes[1];
for (int i=0; i<nSNP; i++){
temp3=temp_genB[i];
allelesB(_,i)=temp3;
}
//make a matrix to hold genotypes for keeping track of allelic richness
//first year popA and popB
arichAmat=allelesA; //arichAmat is all alleles males and females from population first year
arichBmat=allelesB; //use allelesA same thing I think
//***********************************************************************************************
//Fst (for the entire population) if nsamp is bigger than pop size, will give warning
//create a matrix with unique alleles for each microsatellite in each column
NumericMatrix tallyBOTHmat(2*pop2_size+2*pop1_size,nSNP);
int some_integerBOTH = 0;
NumericMatrix arichBOTHmat(2*pop2_size+2*pop1_size,nSNP);//all alleles both pops year 1
for (int i=0;i<2*pop1_size;i++){
arichBOTHmat(i,_)=arichAmat(i,_);
}
for (int i=0;i<2*pop2_size;i++){
arichBOTHmat(i+2*pop1_size,_)=arichBmat(i,_);
}
//get unique alleles for both populations
for (int rich = 0; rich<nSNP;rich++){
NumericMatrix::Column inputBOTH = arichBOTHmat(_,rich);
NumericVector xBOTH = clone<NumericVector>(inputBOTH);
NumericVector tally_allelesBOTH(2*pop2_size+2*pop1_size); //this is a vector that holds the unique alleles
int tallycounter=1;
//*******
int nallsBOTH=0;
typedef std::map<double,int> imap ;
typedef imap::value_type pair ;
imap index ;
int n = xBOTH.size() ;
double current, previous = xBOTH[0] ;
index.insert( pair( previous, 0 ) );
imap::iterator it = index.begin() ;
for( int i=1; i<n; i++){
current = xBOTH[i] ;
if( current == previous ){
xBOTH[i] = current + ( ++(it->second) / 100.0 ) ;
} else {
it = index.find(current) ;
if( it == index.end() ){
it = index.insert(
current > previous ? it : index.begin(),
pair( current, 0 )
) ;
} else {
xBOTH[i] = current + ( ++(it->second) / 100.0 ) ;
}
previous = current ;
}
if (xBOTH[i]-inputBOTH[i]==0)
{nallsBOTH=nallsBOTH+1;
tally_allelesBOTH[tallycounter]=inputBOTH[i];
tallycounter=tallycounter+1;
}
}
tally_allelesBOTH[0]=inputBOTH[0];
//*****************************
tallyBOTHmat(_,rich)=tally_allelesBOTH;
} //end of rich BOTH loop (over 10 microsats)
List tallist(nSNP); //list of 10 that will hold allele frequencies for each of 10 microsatellites.
//First column is the actual allele size, second is freq from popA, third is freq popB
NumericVector count(nSNP);//list of number of unique alleles at each locus
//now set up a table to get allele frequencies for popA and popB
for (int rich=0;rich<nSNP;rich++){
NumericMatrix::Column inputA=arichAmat(_,rich);
NumericMatrix::Column inputB=arichBmat(_,rich);
NumericMatrix::Column tal=tallyBOTHmat(_,rich);
for(int i=0;i<tal.size();i++){ //figure out how many unique alleles are at this microsat (from tal)
if(tal[i]!=0){count[rich]=count[rich]+1;}
}
NumericVector Counter=clone(count);
int counter=Counter[rich];
NumericVector taltrunc(counter); //will hold all unique alleles from this microsat (both pops)
NumericMatrix tallyAB(counter,3); //matrix that has alleles, freq at popA, freq at popB
NumericVector howmanyA(counter); //number of alleles for this population at each allele
NumericVector howmanyB(counter);
for(int i=0;i<counter;i++){ //counter is the number of unique alleles at a locus
taltrunc[i]=tal[i];
int counterA=0; //a counter for number of unique alleles at each locus
int counterB=0;
for (int j=0;j<2*pop1_size;j++){
if (inputA[j]==taltrunc[i])//go through all alleles to see how many match this unique one
{counterA=counterA+1;}
}
howmanyA[i]=counterA;
for (int j=0;j<2*pop2_size;j++){
if (inputB[j]==taltrunc[i])
{counterB=counterB+1;}
}
howmanyB[i]=counterB;
} //end of counter
tallyAB(_,0)=taltrunc;
tallyAB(_,1)=howmanyA/(2*pop1_size);
tallyAB(_,2)=howmanyB/(2*pop2_size);
tallist[rich]=tallyAB;
}//end of rich
//create talmat, which has unique alleles first column then freqs for popA and B
NumericMatrix talmat(std::accumulate(count.begin(),count.end(),0.0),5);
for (int i=0;i<nSNP;i++){
int talcount=0;
NumericMatrix taltmp(count[i],3);
taltmp=as<SEXP>(tallist[i]);
for (int j=std::accumulate(count.begin(),count.begin()+i,0.0);j<std::accumulate(count.begin(),count.begin()+i+1,0.0);j++)
{
talmat(j,_)=taltmp(talcount,_);
talcount=talcount+1;
}
}
//aha! talmat is a matrix with all loci. Genious!
//talmat is the raw material to find Fst!!!
//return(wrap(talmat));
//GET FST this function finds Fst between 2 populations, multiple loci and multiple alleles.
double n_sampA=pop1_size;
double n_sampB=pop2_size;
double n_bar=0.5*(n_sampA+n_sampB); //sample size - can change this.
double r=2;
double C=0;
NumericVector p_bar(talmat.nrow());
NumericMatrix::Column sampmatA =talmat(_,1);
NumericMatrix::Column sampmatB =talmat(_,2);
s2=NumericVector(talmat.nrow());
NumericVector h_bar(talmat.nrow());
NumericVector ones(talmat.nrow());
ones=rep(1,talmat.nrow());
p_bar=(n_sampA*sampmatA+n_sampB*sampmatB)/(2*n_bar); //each entry is average sample frequency of an allele
s2=pow((sampmatB-p_bar),2)+pow((sampmatA-p_bar),2);//sample variance of allele freqs over pops (for each allele)
//calculate mean heterozygosity (He=1-sum(all allele frequencies squared)), aka remove the homozygotes
h_bar=((n_sampA*2*sampmatA*(ones-sampmatA))+(n_sampB*2*sampmatB*(ones-sampmatB)))/(2*n_bar); //this takes the mean of 2 populations
//here heterozygosity assumes HWE.
double nc=((r*n_bar)-((pow(n_sampA,2)/(r*n_bar))+(pow(n_sampB,2)/(r*n_bar))))/(r-1); //same as n_bar
NumericVector a=(n_bar/nc)*(s2-(1/(n_bar-1))*((p_bar*(1-p_bar))-((r-1)/r)*s2-(0.25*h_bar)));
NumericVector dvec=((2*n_bar)/((2*n_bar)-1))*((p_bar*(1-p_bar))-((r-1)/r)*s2);
NumericVector b=(n_bar/(n_bar-1))*((p_bar*(1-p_bar))-((r-1)/r)*s2-(h_bar*(2*n_bar-1)/(4*n_bar)));
NumericVector c=h_bar/2;
NumericVector aplusdvec=a+b+c;
double fst=std::accumulate(a.begin(),a.end(),0.0)/(std::accumulate(aplusdvec.begin(),aplusdvec.end(),0.0)); //the better one
fst_vec[k]=fst;
}//second k loop
}//if k%100
}//end popdyn==1
//END TRY FST CALCS
//**********************************************************************************************
//mgmt_counter
//0=combined
//1=separated
//2=separated genetics test
//3=combined then separated
//4=tier 5
//5=combined then no fishing
//6=combined then tier5
//now take note of whether there are any false boundaries when the true boundary is not found
//here you are looking at the significant test results (mistakes)
//I am only interested in these if you do not correctly identify true split
//and only if there really are 2 populations
//but will calculate them here for all cases and then filter later
for (int mgmt_counter=0;mgmt_counter<mmt;mgmt_counter ++){
N_init_a1=N_init_a1_FIRST;
N_init_a2=N_init_a2_FIRST;
//genrule is the integer that tells you whether genetics test found the correct split site.
for (int k=0; k < Nyrs; k++){ //make k to be 110 ******should be Nyrs but 1 for now
//calculate age structure next year
temp8=(N_init_a1*Weight_a*Q_a);
S_lyA=0.5*std::accumulate(temp8.begin(),temp8.end(),0.0); //spawning biomass of popA
temp9=(N_init_a2*Weight_b*Q_b);
S_lyB=0.5*std::accumulate(temp9.begin(),temp9.end(),0.0); //spawning biomass of popB
SSBA_vec[k]=S_lyA;
SSBB_vec[k]=S_lyB;
if(N_init_a1[0]==0){SPRAtrue[k]=0;}else{SPRAtrue[k]=S_lyA/N_init_a1[0];}//true spawning biomass per recruit in each year
if(N_init_a2[0]==0){SPRBtrue[k]=0;}else{SPRBtrue[k]=S_lyB/N_init_a2[0];}
if(k>=1){
eta_jyA=as<double>(rnorm(1,0,sqrt(sigma_ja2A)));
eta_jyB=as<double>(rnorm(1,0,sqrt(sigma_ja2B)));
eta_jy=as<double>(rnorm(1,0,sqrt(sigma_ja2)));
epsilonA[k]=rho*epsilonA[k-1]+pow((1-pow(rho,2)),0.5)*eta_jyA;
epsilonB[k]=rho*epsilonB[k-1]+pow((1-pow(rho,2)),0.5)*eta_jyB;
epsilon[k]=rho*epsilon[k-1]+pow((1-pow(rho,2)),0.5)*eta_jy;
} //end of k>=1 loop
N_hat_aA = N_init_a1*exp(epsilonA[k]-sigma_ja2A/2);
N_hat_aB = N_init_a2*exp(epsilonB[k]-sigma_ja2B/2);
N_hat_a=(N_init_a1+N_init_a2)*exp(epsilon[k]-sigma_ja2/2);
psiA=as<double>(rnorm(1,0,sqrt(sigma_js2A)));
psiB=as<double>(rnorm(1,0,sqrt(sigma_js2B)));
psi_star=as<double>(rnorm(1,0,sqrt(sigma_js2)));
R_l0A_hat[0]=N_hat_aA[0];//same correction applied here as to total numbers
R_l0B_hat[0]=N_hat_aB[0];
if(N_hat_aA[0]==0){S_hat_syjA[k]=0;}else{
temp10= Weight_a*N_hat_aA*Q_a;
S_hat_syjA[k]=0.5*std::accumulate(temp10.begin(),temp10.end(),0.0)*exp(psiA-(sigma_js2A/2))/N_hat_aA[0];} //simulated estimate of spawning biomass per recruit
SSB_hatA[k]= 0.5*std::accumulate(temp10.begin(),temp10.end(),0.0)*exp(psiA-(sigma_js2A/2));
if(N_hat_aB[0]==0){S_hat_syjB[k]=0;}else{
temp11 = Weight_b*N_hat_aB*Q_b;
S_hat_syjB[k]=0.5*std::accumulate(temp11.begin(),temp11.end(),0.0)*exp(psiB-(sigma_js2B/2))/N_hat_aB[0];}
SSB_hatB[k]= 0.5*std::accumulate(temp11.begin(),temp11.end(),0.0)*exp(psiB-(sigma_js2B/2));
if(N_hat_a[0]==0){S_hat_syj[k]=0;}else{
temp12 = Weight_a*N_hat_a*Q_a;
S_hat_syj[k]=0.5*std::accumulate(temp12.begin(),temp12.end(),0.0)*exp(psi-(sigma_js2/2))/N_hat_a[0];}
SSB_hat[k]= 0.5*std::accumulate(temp12.begin(),temp12.end(),0.0)*exp(psi-(sigma_js2/2));
SPR_initA=SPR_init_F40_a*(std::accumulate(R_l0A_hat.begin(),R_l0A_hat.begin()+k,0.0)/k);//maybe get rid of this and other bad measures using it
SPR_initB=SPR_init_F40_b*(std::accumulate(R_l0B_hat.begin(),R_l0B_hat.begin()+k,0.0)/k);
SB40_vecA[k]=SPR_init_F40_a*(std::accumulate(R_l0A_hat.begin(),R_l0A_hat.begin()+k,0.0)/k);
SB40_vecB[k]=SPR_init_F40_b*(std::accumulate(R_l0B_hat.begin(),R_l0B_hat.begin()+k,0.0)/k);
FishA=as<double>(rlnorm(1,-0.39,0.46));//this if FullF for B20%
FishB=as<double>(rlnorm(1,-0.39,0.46));//this is FullF for B40%
//if (k>=649){
//FishA=as<double>(rlnorm(1,-0.01,0.4));//this if FullF for B20%
//}
FishAvec[k]=FishA;
FishBvec[k]=FishB;
temp15=Weight_a*N_init_a1*((fishsel*FishA)/(fishsel*FishA+M2))*(1-exp(-(fishsel*FishA+M2)));
TACA[k]=std::accumulate(temp15.begin(),temp15.end(),0.0);
temp16=Weight_b*N_init_a2*((fishsel*FishB)/(fishsel*FishB+M2))*(1-exp(-(fishsel*FishB+M2)));
TACB[k]=std::accumulate(temp16.begin(),temp16.end(),0.0);
//get spawning fish numbers
spawnersA = Q_a*N_init_a1*0.5;
spawnersB = Q_b*N_init_a2*0.5;
for (int i=0; i<ages; i++){
temp=int(spawnersA[i]);
if((spawnersA[i]+0.5)>=(int(spawnersA[i])+1)){
temp=int(spawnersA[i])+1;
}
spawnersA_rounded[i]=temp;
}
for (int i=0; i<ages; i++){
temp=int(spawnersB[i]);
if((spawnersB[i]+0.5)>=(int(spawnersB[i])+1)){
temp=int(spawnersB[i])+1;
}
spawnersB_rounded[i]=temp;
}
//make sure there are not more spawners than actual fish.
for(int i=0;i<ages;i++){
if((N_init_a1[i]/2)<spawnersA_rounded[i]){
spawnersA_rounded[i]=N_init_a1[i]/2;
}
if((N_init_a2[i]/2)<spawnersB_rounded[i]){
spawnersB_rounded[i]=N_init_a2[i]/2;
}
}
NspawnersA[k]=std::accumulate(spawnersA_rounded.begin(),spawnersA_rounded.end(),0.0); //males and females same number of spawners as in here
NspawnersB[k]=std::accumulate(spawnersB_rounded.begin(),spawnersB_rounded.end(),0.0);
//generate delta which is the variance for recruitment
double sigma_jr2_tot=log(pow(sigma_jr2,2)+1);
double sigma_rec=as<double>(rnorm(1,0,sqrt(sigma_jr2_tot)));
//N_aA and N_aB are numbers next year*****could double check this part
N_aA[0]=((4*h*R_l0A[0]*S_lyA)/(S_l0[0]*(1-h)+S_lyA*(5*h-1)))*exp(sigma_rec-(sigma_jr2/2)); //S_l0[0] are spawners in popA //BH spawner recruit relationship
N_aB[0]=((4*h*R_l0B[0]*S_lyB)/(S_l0[1]*(1-h)+S_lyB*(5*h-1)))*exp(sigma_rec-(sigma_jr2/2)); //S_l0[1] are spawners in popB
Rcout<<"first migration"<<std::endl;
//HERE IS MIGRATION IN POP DYN MODEL
//if (k%4==0){
//for (int i=0;i<nmig;i++){
//N_aA[0]=N_aA[0]+2;
//}
//}//end of if k%=4==0
if(NspawnersA[k]==0){N_aA[0]=0;}
if(NspawnersB[k]==0){N_aB[0]=0;}
for(int i=1; i<(ages-1); i++){
N_aA[i]=N_init_a1[i-1]*exp(-(fishsel[i-1]*FishA+M2[i-1])); //correct because you want the selectivity of the next youngest age
N_aB[i]=N_init_a2[i-1]*exp(-(fishsel[i-1]*FishB+M2[i-1])); //and fishsel now goes from 21 to 1
//N_aA[i]=N_init_a1[i-1]*Surv[i-1]; //correct because you want the selectivity of the next youngest age
//N_aB[i]=N_init_a2[i-1]*Surv[i-1]; //and fishsel now goes from 21 to 1
}
N_aA[ages-1]=N_init_a1[ages-2]*exp(-(fishsel[ages-2]*FishA+M2[ages-2]))+N_init_a1[ages-1]*exp(-(fishsel[ages-1]*FishA+M2[ages-1])); //dont forget about plus group
N_aB[ages-1]=N_init_a2[ages-2]*exp(-(fishsel[ages-2]*FishB+M2[ages-2]))+N_init_a2[ages-1]*exp(-(fishsel[ages-1]*FishB+M2[ages-1]));
//N_aA[ages-1]=N_init_a1[ages-2]*Surv[ages-2]+N_init_a1[ages-1]*Surv[ages-1]; //dont forget about plus group
//N_aB[ages-1]=N_init_a2[ages-2]*Surv[ages-2]+N_init_a2[ages-1]*Surv[ages-1];
//now round N_aA and N_aB to integers
for (int i=0; i<ages; i++){
temp18=int(N_aA[i]);
if((N_aA[i]+0.5)>=(int(N_aA[i])+1)){
temp18=int(N_aA[i])+1;
}
N_aA[i]=temp18;
if (temp18%2!=0){
temp2=as<double>(runif(1,-1,1));
if(temp2>=0){N_aA[i]=N_aA[i]+1;}
else{N_aA[i]=N_aA[i]-1;
}
}
}
for (int i=0; i<ages; i++){
temp19=int(N_aB[i]);
if((N_aB[i]+0.5)>=(int(N_aB[i])+1)){
temp19=int(N_aB[i])+1;}
N_aB[i]=temp19;
if (temp19%2!=0){
temp2=as<double>(runif(1,-1,1));
if(temp2>=0){N_aB[i]=N_aB[i]+1;}
else{N_aB[i]=N_aB[i]-1;
}
}
}
//how many individuals are there?
R_l0A[k]=N_init_a1[0];
R_l0B[k]=N_init_a2[0];
R_l0A_hat[k]=N_hat_aA[0]; //incorporate error into recruitment observation
R_l0B_hat[k]=N_hat_aB[0];
NtotalA[k] = std::accumulate(N_init_a1.begin(),N_init_a1.end(),0.0);
NtotalB[k] = std::accumulate(N_init_a2.begin(),N_init_a2.end(),0.0);
allA.row(k)=rev(N_init_a1); //each row is the number of each age during that year - reversed so it is 21-1 for next section
allB.row(k)=rev(N_init_a2);
allspawnA.row(k)=rev(spawnersA_rounded); //each row is the number of each age during that year - reversed so it is 21-1 for next section
allspawnB.row(k)=rev(spawnersB_rounded);
//put this at the end
N_init_a1=clone(N_aA);
N_init_a2=clone(N_aB);
//create separate matrices for each of the mgmt cases. Can return for separated case (left out here).
out3(zcounter,k)=R_l0A[k];//true recruitment
out4(zcounter,k)=R_l0B[k];
out21(zcounter,k)=0.4*SSBA_vec[k]/SB40_vecA[k];//true SSB div by our GThompson est of B40
out22(zcounter,k)=0.4*SSBB_vec[k]/SB40_vecB[k];
out23(zcounter,k)=SSBA_vec[k];//just ssb
out24(zcounter,k)=SSBB_vec[k];
} //end of k<Nyrs loop
NtotalA2=clone(NtotalA);
//try calculating Ne here work from 1 to <ages> here
NumericVector x_age(ages);
NumericVector S_xA(ages);
NumericVector S_xB(ages);
NumericVector bxA(ages);
NumericVector bxB(ages);
NumericVector lxA(ages);
NumericVector lxB(ages);
NumericVector b_primexA(ages);
NumericVector b_primexB(ages);
NumericVector bxlxA(ages);
NumericVector bxlxB(ages);
NumericVector BxA(ages);
NumericVector BxB(ages);
double L_popA; //generation length
double L_popB; //generation length
NumericVector L_popAvec(ages);
NumericVector L_popBvec(ages);
NumericVector L_popA_vec(Nyrs);
NumericVector L_popB_vec(Nyrs);
NumericVector VkA_vec(Nyrs);
NumericVector VkB_vec(Nyrs);
NumericVector k_barA(ages);
NumericVector k_barB(ages);
NumericVector VxA(ages);
NumericVector VxB(ages);
NumericVector DxA(ages);
NumericVector DxB(ages);
NumericVector k_barADxA(ages);
NumericVector k_barBDxB(ages);
NumericVector SSDIxA(ages);
NumericVector SSDIxB(ages);
NumericVector delxA(ages);
NumericVector delxB(ages);
NumericVector SSDGxA(ages);
NumericVector SSDGxB(ages);
NumericVector SSDxA(ages);
NumericVector SSDxB(ages);
NumericVector newnumsA(ages);//calculate new numbers based on initial nums and lx.
NumericVector newnumsB(ages);
double k_bardotA;
double k_bardotB;
double VkA;
double VkB;
for (int k=0;k<Nyrs;k++){ //put in Nyrs
x_age=age+1;
N_init_a1_star = rev(allA.row(k)); //0 yr olds to <ages> yr olds here
N_init_a2_star = rev(allB.row(k));
spawnersA_rounded_star = rev(allspawnA.row(k));
spawnersB_rounded_star = rev(allspawnB.row(k));
FishA = FishAvec[k];
FishB = FishBvec[k];
//S_xA = 1-(fishsel*FishA+M);
//S_xB = 1-(fishsel*FishB+M);
S_xA=rep((1-meanFullF),ages);
S_xA[(ages-1)]=0;
S_xB=rep((1-meanFullF),ages);
S_xB[(ages-1)]=0;
for (int i=0;i<ages;i++){ //make sure they are positive (neg can happen when fishing gets too high)
if (S_xA[i]<0){S_xA[i]=0;}
if (S_xB[i]<0){S_xB[i]=0;}
}
lxA[0]=1;
lxB[0]=1;
for (int i=1;i<ages;i++){
lxA[i] = S_xA[i-1]*lxA[i-1];
lxB[i] = S_xB[i-1]*lxB[i-1];
}
newnumsA=0.5*N_init_a1_star[0]*lxA;
newnumsB=0.5*N_init_a2_star[0]*lxB;
for(int i=0;i<ages;i++){
if (NspawnersA[k]*newnumsA[i]>0)
{bxA[i] = spawnersA_rounded_star[i]*N_init_a1_star[0]/(2*NspawnersA[k]*newnumsA[i]);}//div by2 because want half the number of N1s here
else{bxA[i]=0;}
if (NspawnersB[k]*newnumsB[i]>0)
{bxB[i] = spawnersB_rounded_star[i]*N_init_a2_star[0]/(2*NspawnersB[k]*newnumsB[i]);}
else{bxB[i]=0;}
}
bxlxA=bxA*lxA;
bxlxB=bxB*lxB;
for(int i=0;i<ages;i++){
if(std::accumulate(bxlxA.begin(),bxlxA.end(),0.0)>0){
b_primexA[i] = 2*bxA[i]/std::accumulate(bxlxA.begin(),bxlxA.end(),0.0);}
else{b_primexA[i] =0;}
if(std::accumulate(bxlxB.begin(),bxlxB.end(),0.0)>0){
b_primexB[i] = 2*bxB[i]/std::accumulate(bxlxB.begin(),bxlxB.end(),0.0);}
else{b_primexB[i]=0;}
}
BxA=b_primexA*newnumsA;
BxB=b_primexB*newnumsB;
L_popAvec = x_age*BxA;
L_popBvec = x_age*BxB;
if (N_init_a1_star[0]>0){
L_popA = std::accumulate(L_popAvec.begin(),L_popAvec.end(),0.0)/N_init_a1_star[0];}
else {L_popA=0;}
if (N_init_a2_star[0]>0){
L_popB = std::accumulate(L_popBvec.begin(),L_popBvec.end(),0.0)/N_init_a2_star[0];}
else{L_popB=0;}
L_popA_vec[k]=L_popA;
L_popB_vec[k]=L_popB;
k_barA[0]=0;
k_barB[0]=0;
for (int i=1;i<ages;i++){
k_barA[i] = b_primexA[i]+k_barA[i-1];
k_barB[i] = b_primexB[i]+k_barB[i-1];
}
VxA=k_barA;//just for males or females (but they have the same value) assume poisson variance
VxB=k_barB;
for (int i=0;i<(ages-1);i++){
if (newnumsA[i]-newnumsA[i+1]>=0){DxA[i] =newnumsA[i]-newnumsA[i+1];}
else{DxA[i]=0;}
if (newnumsB[i]-newnumsB[i+1]>=0){DxB[i] =newnumsB[i]-newnumsB[i+1];}
else{DxB[i]=0;}
}
k_barADxA=k_barA*DxA;
k_barBDxB=k_barB*DxB;
if (N_init_a1_star[0]>0){
k_bardotA = std::accumulate(k_barADxA.begin(),k_barADxA.end(),0.0)/N_init_a1_star[0];}
else{k_bardotA=0;}//divide by number of newborns
if (N_init_a2_star[0]>0){
k_bardotB = std::accumulate(k_barBDxB.begin(),k_barBDxB.end(),0.0)/N_init_a2_star[0];}
else{k_bardotB=0;}
delxA=k_barA-k_bardotA;
delxB=k_barB-k_bardotB;
SSDIxA=DxA*VxA;
SSDIxB=DxB*VxB;
SSDGxA=DxA*delxA*delxA;
SSDGxB=DxB*delxB*delxB;
SSDxA=SSDIxA+SSDGxA;
SSDxB=SSDIxB+SSDGxB;
double SSDtA = std::accumulate(SSDxA.begin(),SSDxA.end(),0.0);
double SSDtB = std::accumulate(SSDxB.begin(),SSDxB.end(),0.0);
if (N_init_a1_star[0]>0){VkA = SSDtA/N_init_a1_star[0];} //just for females or males but the values are the same
else{VkA=0;}
if (N_init_a2_star[0]>0){VkB = SSDtB/N_init_a2_star[0];}
else{VkB=0;}
//VkA and VkB are total variance
VkA_vec[k]=VkA;
VkB_vec[k]=VkB;
double NeA = (4*N_init_a1_star[0]*L_popA)/(VkA+2);
double NeB = (4*N_init_a2_star[0]*L_popB)/(VkB+2);
//if(NtotalA[k]>1){
//NeAvec[k]=NeA/NtotalA[k];}
//else{NeAvec[k]=0;}
//if(NtotalB[k]>1){
//NeBvec[k]=NeB/NtotalB[k];}
//else{NeBvec[k]=0;}
//calculate vkdot from males and females in pop and also teh VkA an dVkB.
//then find Ne.
NeAvec[k]=NeA;
NeBvec[k]=NeB;
}//end of k loop for finding Ne.
out7(zcounter,_) = TACA;
out8(zcounter,_)= TACB;
out9(zcounter,_)=FishAvec;
out10(zcounter,_)=FishBvec;
out14(zcounter,_)=TAC;
out15(zcounter,_)=NtotalA;
out16(zcounter,_)=NtotalB;
if (popdyn==1){
//Set up lists which will hold genotypes for males, females each pop
//List popA(Nyrs+ages);
//List popB(Nyrs+ages);
//popA and B lists of 131 matrices. First 21 will be length of N_init_a1_FIRST (age 21 to 1), the rest will be number of offspring for that year
NumericMatrix matsliceA; //popA males
NumericMatrix matsliceB; //popA females
//popA set up alleles for each individual from allelesA
for(int i=0;i<ages;i++){
matsliceA=NumericMatrix(N_init_a1_FIRST[i],nSNP);
matsliceB=NumericMatrix(N_init_a1_FIRST[i],nSNP);
for (int j=std::accumulate(N_init_a1_FIRST.begin(),N_init_a1_FIRST.begin()+i,0.0);
j<std::accumulate(N_init_a1_FIRST.begin(),N_init_a1_FIRST.begin()+i+1,0.0);j++){
matsliceA(j-std::accumulate(N_init_a1_FIRST.begin(),N_init_a1_FIRST.begin()+i,0.0),_)=allelesA(j,_);}
for (int j=std::accumulate(N_init_a1_FIRST.begin(),N_init_a1_FIRST.begin()+i,0.0)+pop1_size;
j<std::accumulate(N_init_a1_FIRST.begin(),N_init_a1_FIRST.begin()+i+1,0.0)+pop1_size;j++){
matsliceB(j-std::accumulate(N_init_a1_FIRST.begin(),N_init_a1_FIRST.begin()+i,0.0)-pop1_size,_)=allelesA(j,_);}
List temp5(2); temp5[0]=matsliceA;temp5[1]=matsliceB;
popA[ages-1-i]=temp5;
}
//pop2 set up alleles for each individual from allelesB
NumericMatrix matsliceC; //popB males
NumericMatrix matsliceD; //popB females
for(int i=0;i<ages;i++){
matsliceC=NumericMatrix(N_init_a2_FIRST[i],nSNP);
matsliceD=NumericMatrix(N_init_a2_FIRST[i],nSNP);
for (int j=std::accumulate(N_init_a2_FIRST.begin(),N_init_a2_FIRST.begin()+i,0.0);
j<std::accumulate(N_init_a2_FIRST.begin(),N_init_a2_FIRST.begin()+i+1,0.0);j++){
matsliceC(j-std::accumulate(N_init_a2_FIRST.begin(),N_init_a2_FIRST.begin()+i,0.0),_)=allelesB(j,_);}
for (int j=std::accumulate(N_init_a2_FIRST.begin(),N_init_a2_FIRST.begin()+i,0.0)+pop2_size;
j<std::accumulate(N_init_a2_FIRST.begin(),N_init_a2_FIRST.begin()+i+1,0.0)+pop2_size;j++){
matsliceD(j-std::accumulate(N_init_a2_FIRST.begin(),N_init_a2_FIRST.begin()+i,0.0)-pop2_size,_)=allelesB(j,_);}
List temp6(2); temp6[0]=matsliceC;temp6[1]=matsliceD;
popB[ages-1-i]=temp6;
}
NumericVector popA_first21 = rev(N_init_a1_FIRST);
NumericVector popB_first21 = rev(N_init_a2_FIRST);
NumericVector newrecsA = allA.column(ages-1);
NumericVector newrecsB = allB.column(ages-1);
NumericVector dimspopA(Nyrs+ages);
NumericVector dimspopB(Nyrs+ages);
NumericVector pluscounterA_list(Nyrs,-2.0);
NumericVector pluscounterB_list(Nyrs,-2.0);
for(int i=0;i<ages;i++){
dimspopA[i]=popA_first21[i];
dimspopB[i]=popB_first21[i];
}
for(int i=ages;i<(Nyrs+ages+1);i++){
dimspopA[i]=newrecsA[i-ages+1];
dimspopB[i]=newrecsB[i-ages+1];
}
for(int k=0;k<Nyrs-1;k++){ //Nyrs
//set up spawning groups (for males and females, popA)***************************************************
Rcout<<"k at 2079"<<std::endl;
Rf_PrintValue(wrap(k));
spawn_malA_thisyr = NumericMatrix(2*NspawnersA[k],nSNP); //twice as many genotypes as spawners
spawn_femA_thisyr = NumericMatrix(2*NspawnersA[k],nSNP);
some_integerA=0;
spawnersA_rounded_star = allspawnA.row(k); //from 21 yr olds to newborns
NumericVector N_init_a1_star = allA.row(k); //also from 21 yr olds to newborns first row is N_init_a1_FIRST
tmpMatrixA_1 = NumericMatrix(0,nSNP);
tmpMatrixA_2 = NumericMatrix(0,nSNP);
//fix plus group problems
if ((spawnersA_rounded_star[0]>0)){ //only worry about them if there is a plus group
IntegerVector vec_plusA(k+1);
int plus_counterA = -1;
for(int j=0;j<k+1;j++){
if(j==0){
vec_plusA[j]=dimspopA(k-j);} //vec_plus is a vector of the cumulative number of rows in popA from the current plus group to previous years, etc.
else{
vec_plusA[j]=vec_plusA[j-1]+dimspopA(k-j);}
if (vec_plusA[j]<2*spawnersA_rounded_star[0]) //plus_counterA tells you how far back you need to go
{plus_counterA = j;}
}
pluscounterA_list[k]=plus_counterA;
for (int j=k-(plus_counterA+1); j<k+1;j++){
tmpList_plusgroupA = popA[j]; //this is the actual plus group
tmpMatrixA_1plus=as<SEXP>(tmpList_plusgroupA[0]);
tmpMatrixA_2plus=as<SEXP>(tmpList_plusgroupA[1]);
for (int l=0;l<tmpMatrixA_1plus.nrow();l++){
spawn_malA_thisyr(some_integerA,_)=tmpMatrixA_1plus(l,_);
spawn_femA_thisyr(some_integerA,_)=tmpMatrixA_2plus(l,_);
some_integerA = some_integerA+1;
if(some_integerA>=2*spawnersA_rounded_star[0]){j=k+1;l=tmpMatrixA_1plus.nrow();}
}
}
}//end of if spawnersA_rounded_star[0]>0
//now your spawn_femA_thisyr has plus group in it.
//use some_integerA to fill in the rest of spawn_femA_thisyr
for(int i=1;i<ages;i++){
if((spawnersA_rounded_star[i]>0)){
mal_spawnersA = IntegerVector(spawnersA_rounded_star[i]); //after random mating **also make sure you get the right row
fem_spawnersA=IntegerVector(spawnersA_rounded_star[i]); //after random mating//may be 0 if spawnersA_rounded_star is 1 b/c of indexing (0 is 1)
spawner_listM = seq_len(N_init_a1_star[i]/2); //list of ones to choose from
spawner_listF = seq_len(N_init_a1_star[i]/2); //choosing from half the number of individuals because also using the subsequent row
tmpMatrixA_1 = NumericMatrix(0,nSNP);
tmpMatrixA_2 = NumericMatrix(0,nSNP);
tmpListA = popA[i+k];
tmpMatrixA_1=as<SEXP>(tmpListA[0]);
tmpMatrixA_2=as<SEXP>(tmpListA[1]);
//if (tmpMatrixA_1.nrow()>(2*N_init_a1_star[i])){
for (int j=0; j < spawnersA_rounded_star[i]; j++){ //random mating for each allele
pick_spawner_mal=as<double>(runif(1,-0.5,((N_init_a1_star[i]/2)-j-0.5)));//b/c choosing spawnersA_rounded_star[i] from all available and from half so can use subsequent row as well
pickM=int(pick_spawner_mal);
if((pick_spawner_mal+0.5)>=(int(pick_spawner_mal)+1)){
pickM=int(pick_spawner_mal)+1;
}
pick_spawner_fem=as<double>(runif(1,-0.5,((N_init_a1_star[i]/2)-j-0.5)));//b/c you are choosing spawnersA_rounded[i] from all those available
pickF=int(pick_spawner_fem);
if((pick_spawner_fem+0.5)>=(int(pick_spawner_fem)+1)){
pickF=int(pick_spawner_fem)+1;
}
mal_spawnersA[j]=spawner_listM[pickM]; //numeric(0) happens when you skip one because it is zero.
fem_spawnersA[j]=spawner_listF[pickF];
spawn_malA_thisyr(some_integerA,_)=tmpMatrixA_1(2*spawner_listM[pickM]-2,_); //the minus two is to get the indices to work out
spawn_malA_thisyr(some_integerA+1,_)=tmpMatrixA_1(2*spawner_listM[pickM]-1,_);//and the next one as well in the row
spawn_femA_thisyr(some_integerA,_)=tmpMatrixA_2(2*spawner_listF[pickF]-2,_);
spawn_femA_thisyr(some_integerA+1,_)=tmpMatrixA_2(2*spawner_listF[pickF]-1,_);
spawner_listM.erase(pickM); //this erases the one at the location you specify
spawner_listF.erase(pickF);
some_integerA=some_integerA+2;
} //end js for A
//}//end if loop
} //end if spawnersA_rounded_star[i]>0
} //end of i loop (done with finding spawners)
//set up spawning groups (for males and females, popB)***************************************************
spawn_malB_thisyr = NumericMatrix(2*NspawnersB[k],nSNP); //twice as many genotypes as spawners
spawn_femB_thisyr = NumericMatrix(2*NspawnersB[k],nSNP);
some_integerB=0;
spawnersB_rounded_star = allspawnB.row(k); //from 21 yr olds to newborns
NumericVector N_init_a2_star = allB.row(k); //also from 21 yr olds to newborns first row is N_init_a1_FIRST
tmpMatrixB_1 = NumericMatrix(0,nSNP);
tmpMatrixB_2 = NumericMatrix(0,nSNP);
if (spawnersB_rounded_star[0]>0){ //only worry about them if there is a plus group
IntegerVector vec_plusB(k+1);
int plus_counterB = -1;
for(int j=0;j<k+1;j++){
if(j==0){
vec_plusB[j]=dimspopB(k-j);} //vec_plus is a vector of the cumulative number of rows in popA from the current plus group to previous years, etc.
else{
vec_plusB[j]=vec_plusB[j-1]+dimspopB(k-j);}
if (vec_plusB[j]<2*spawnersB_rounded_star[0]) //plus_counterB tells you how far back you need to go
{plus_counterB = j;}
}
pluscounterB_list[k]=plus_counterB;
//problem is that by default it is 0 so it does not help
for (int j=k-(plus_counterB+1); j<k+1;j++){
tmpList_plusgroupB = popB[j]; //this is the actual plus group
tmpMatrixB_1plus=as<SEXP>(tmpList_plusgroupB[0]);
tmpMatrixB_2plus=as<SEXP>(tmpList_plusgroupB[1]);
for (int l=0;l<tmpMatrixB_1plus.nrow();l++){
spawn_malB_thisyr(some_integerB,_)=tmpMatrixB_1plus(l,_);
spawn_femB_thisyr(some_integerB,_)=tmpMatrixB_2plus(l,_);
some_integerB = some_integerB+1;
if(some_integerB>=2*spawnersB_rounded_star[0]){j=k+1;l=tmpMatrixB_1plus.nrow();} //>=because of C++ indexing
}
}
}//end of if spawnersB_rounded_star[0]>0
//now your spawn_femB_thisyr has plus group in it.
for(int i=1;i<ages;i++){
if((spawnersB_rounded_star[i]>0)){
mal_spawnersB = IntegerVector(spawnersB_rounded_star[i]); //after random mating **also make sure you get the right row
fem_spawnersB = IntegerVector(spawnersB_rounded_star[i]); //after random mating
spawner_listM = seq_len(N_init_a2_star[i]/2); //list of ones to choose from
spawner_listF = seq_len(N_init_a2_star[i]/2); //choosing from half the number of individuals because also using the subsequent row
tmpMatrixB_1 = NumericMatrix(0,nSNP);
tmpMatrixB_2 = NumericMatrix(0,nSNP);
tmpListB = popB[i+k];
tmpMatrixB_1=as<SEXP>(tmpListB[0]);
tmpMatrixB_2=as<SEXP>(tmpListB[1]);
for (int j=0; j < spawnersB_rounded_star[i]; j++){ //random mating for each allele
pick_spawner_mal=as<double>(runif(1,-0.5,((N_init_a2_star[i]/2)-j-0.5)));//b/c choosing spawnersB_rounded_star[i] from all available and from half so can use subsequent row as well
pickM=int(pick_spawner_mal);
if((pick_spawner_mal+0.5)>=(int(pick_spawner_mal)+1)){
pickM=int(pick_spawner_mal)+1;
}
pick_spawner_fem=as<double>(runif(1,-0.5,((N_init_a2_star[i]/2)-j-0.5)));//b/c you are choosing spawnersA_rounded[i] from all those available
pickF=int(pick_spawner_fem);
if((pick_spawner_fem+0.5)>=(int(pick_spawner_fem)+1)){
pickF=int(pick_spawner_fem)+1;
}
mal_spawnersB[j]=spawner_listM[pickM]; //numeric(0) happens when you skip one because it is zero.
fem_spawnersB[j]=spawner_listF[pickF];
spawn_malB_thisyr(some_integerB,_)=tmpMatrixB_1(2*spawner_listM[pickM]-2,_); //the minus two is to get the indices to work out
spawn_malB_thisyr(some_integerB+1,_)=tmpMatrixB_1(2*spawner_listM[pickM]-1,_);//and the next one as well in the row
spawn_femB_thisyr(some_integerB,_)=tmpMatrixB_2(2*spawner_listF[pickF]-2,_);
spawn_femB_thisyr(some_integerB+1,_)=tmpMatrixB_2(2*spawner_listF[pickF]-1,_);
spawner_listM.erase(pickM); //this erases the one at the location you specify
spawner_listF.erase(pickF);
some_integerB=some_integerB+2;
} //end js for B
} //end if spawnersB_rounded_star[i]>0
} //end of i loop (done with finding spawners)
spawn_femA[k]=spawn_femA_thisyr;
spawn_malA[k]=spawn_malA_thisyr;
spawn_femB[k]=spawn_femB_thisyr;
spawn_malB[k]=spawn_malB_thisyr;
//now establish genotypes for the new recruits
RecA = NumericMatrix (2*allA(k+1,ages-1),nSNP);
RecB = NumericMatrix (2*allB(k+1,ages-1),nSNP);
//recruits for popA
for(int j=0;j<allA(k+1,ages-1);j++){
rand_doub=as<double>(runif(1,-0.5,(spawn_femA_thisyr.nrow()/2)-1));
samp_mom=int(rand_doub);
if((rand_doub+0.5)>=(int(rand_doub)+1)){
samp_mom=int(rand_doub)+1;
}
rand_doub=as<double>(runif(1,-0.5,(spawn_malA_thisyr.nrow()/2)-1));
samp_dad=int(rand_doub);
if((rand_doub+0.5)>=(int(rand_doub)+1)){
samp_dad=int(rand_doub)+1;
}
for(int i=0;i<nSNP;i++){
rand_doub=as<double>(runif(1,-0.5,1.5));
rand_allele=int(rand_doub);
if((rand_doub+0.5)>=(int(rand_doub)+1)){
rand_allele=int(rand_doub)+1;
}
RecA(2*j,i)=spawn_malA_thisyr(samp_dad*2+rand_allele,i);
RecA(2*j+1,i)=spawn_femA_thisyr(samp_mom*2+rand_allele,i);
} //end of all 10 microsatellites
} //end of j RecAs recruits popA
//recruits for popB
for(int j=0;j<allB(k+1,ages-1);j++){
rand_doub=as<double>(runif(1,-0.5,(spawn_femB_thisyr.nrow()/2)-1));
samp_mom=int(rand_doub);
if((rand_doub+0.5)>=(int(rand_doub)+1)){
samp_mom=int(rand_doub)+1;
}
rand_doub=as<double>(runif(1,-0.5,(spawn_malB_thisyr.nrow()/2)-1));
samp_dad=int(rand_doub);
if((rand_doub+0.5)>=(int(rand_doub)+1)){
samp_dad=int(rand_doub)+1;
}
for(int i=0;i<nSNP;i++){
rand_doub=as<double>(runif(1,-0.5,1.5));
rand_allele=int(rand_doub);
if((rand_doub+0.5)>=(int(rand_doub)+1)){
rand_allele=int(rand_doub)+1;
}
RecB(2*j,i)=spawn_malB_thisyr(samp_dad*2+rand_allele,i);
RecB(2*j+1,i)=spawn_femB_thisyr(samp_mom*2+rand_allele,i);
} //end of all 10 microsatellites
} //end of j RecBs recruits popB
//here take the matrices RecA and RecB which are random offspring
//and select a subset of the individuals and make more of them
//this represents a subset of parents being successful at mating
//there might be better ways, like reducing number of spawners
recA = clone(RecA);
recB = clone(RecB);
//mutate new recruits
double prob;
double dir;
for (int j=0;j<nSNP;j++){
for (int i=0; i < recA.nrow(); i++){ //recA mutation for each allele
prob=as<double>(runif(1,0,1));
dir=as<double>(runif(1,0,1));
if(prob<0.01*MUS[j]&dir<0.5){
recA(i,j)=recA(i,j)+1;
}else if (prob<MUS[j]&dir>0.5){
recA(i,j)=recA(i,j)-1;
}
}
for (int i=0; i < recB.nrow(); i++){ //recB mutation for each allele
prob=as<double>(runif(1,0,1));
dir=as<double>(runif(1,0,1));
if(prob<0.01*MUS[j]&dir<0.5){
recB(i,j)=recB(i,j)+1;
}else if (prob<MUS[j]&dir>0.5){
recB(i,j)=recB(i,j)-1;
}
}
}
//MIGRATION
if ((k>1)&(k%1==0)){
Rcout<<"second migration"<<std::endl;
nmig=nmig1;
NumericVector BtoAlist(nmig);
NumericVector AfromBlist(nmig);
int whichone;
if (recA.nrow()<=recB.nrow()){whichone=1;}
if (recB.nrow()<recA.nrow()){whichone=2;}
if (whichone==1){//if there are fewer recruits in recA
if ((2*nmig)>recA.nrow()|(2*nmig)>recB.nrow()){nmig=recA.nrow()/2;}
}
if (whichone==2){//if there are fewer recruits in recB
if ((2*nmig)>recA.nrow()|(2*nmig)>recB.nrow()){nmig=recB.nrow()/2;}
}
Rcout<<"mig,recA,recB,whichone"<<std::endl;
Rf_PrintValue(wrap(nmig));
Rf_PrintValue(wrap(recA.nrow()));
Rf_PrintValue(wrap(recB.nrow()));
Rf_PrintValue(wrap(whichone));
nmig_vec[k]=nmig;
Rf_PrintValue(wrap(1619));
NumericMatrix clonerecB=clone(recB);
NumericMatrix clonerecA=clone(recA);
Rcout<<"clonerecB.nrow() and clonerecA.nrow()"<<std::endl;
Rf_PrintValue(wrap(clonerecB.nrow()));
Rf_PrintValue(wrap(clonerecA.nrow()));
BtoAlist=SeqLen(clonerecB.nrow()/2)-1;
AfromBlist=SeqLen(clonerecA.nrow()/2)-1;
Rf_PrintValue(wrap(1626));
for (int i=0;i<nmig;i++){
double probfromB=as<double>(runif(1,0,(clonerecB.nrow()/2)-1-i)); //works, I checked it
double probtoA=as<double>(runif(1,0,(clonerecA.nrow()/2)-1-i));
int pick_fromB=int(probfromB);
if((probfromB+0.5)>=(int(probfromB)+1)){
pick_fromB=int(probfromB)+1;
}
int pick_toA=int(probtoA);
if((probtoA+0.5)>=(int(probtoA)+1)){
pick_toA=int(probtoA)+1;
}
Rf_PrintValue(wrap(1641));
NumericMatrix::Row moverB2Arow1=clonerecB(2*(BtoAlist[pick_fromB]),_); //minus 1 is for indexing
NumericMatrix::Row moverB2Arow2=clonerecB((2*(BtoAlist[pick_fromB]))+1,_);
recA(2*(AfromBlist[pick_toA]),_)=moverB2Arow1;
recA((2*(AfromBlist[pick_toA]))+1,_)=moverB2Arow2;
BtoAlist.erase(pick_fromB);
AfromBlist.erase(pick_toA);
}//end of migration
Rf_PrintValue(wrap(1652));
//for (int i=0;i<2*nmig;i++){
// NumericMatrix::Row moverB2Arow=clonerecB(i,_); //minus 1 is for indexing
// recA(i,_)=moverB2Arow;
//}
}//end of if k >1
Rf_PrintValue(wrap(1662));
//save new recruits to popA and popB
NumericMatrix recA_1(allA(k+1,ages-1),nSNP);
NumericMatrix recA_2(allA(k+1,ages-1),nSNP);
for(int i=0;i<allA(k+1,ages-1);i++){
recA_1(i,_)=recA(i,_);
recA_2(i,_)=recA(i+allA(k+1,ages-1),_);
}
tmpList_recsA[0] = recA_1;
tmpList_recsA[1] = recA_2;
NumericMatrix recB_1(allB(k+1,ages-1),nSNP);
NumericMatrix recB_2(allB(k+1,ages-1),nSNP);
for(int i=0;i<allB(k+1,ages-1);i++){
recB_1(i,_)=recB(i,_);
recB_2(i,_)=recB(i+allB(k+1,ages-1),_);
}
tmpList_recsB[0] = recB_1;
tmpList_recsB[1] = recB_2;
popA[k+ages]=clone(tmpList_recsA);
popB[k+ages]=clone(tmpList_recsB);
}// end k loop
Rf_PrintValue(wrap(1686));
Rcout<<"Nyrs"<<std::endl;
Rf_PrintValue(wrap(Nyrs));
Rcout<<"ages"<<std::endl;
Rf_PrintValue(wrap(ages));
//get genetic diversity - put all genotypes in GenotypesA1, GenotypesA2, GenotypesB1, GenotypesB2
NumericVector cumpopsA(Nyrs+ages);//cumpops holds the cumulative number (for counting purposes)
NumericVector cumpopsB(Nyrs+ages);
cumpopsA[0]=0;
cumpopsB[0]=0;
Rf_PrintValue(wrap(1696));
Rcout<<"dimspopA"<<std::endl;
Rf_PrintValue(wrap(dimspopA));
Rcout<<"dimspopB"<<std::endl;
Rf_PrintValue(wrap(dimspopB));
for(int i=1;i<(Nyrs+ages);i++){
cumpopsA[i]=dimspopA[i-1]+cumpopsA[i-1];
cumpopsB[i]=dimspopB[i-1]+cumpopsB[i-1];
}
Rcout<<"cumpopsA"<<std::endl;
Rf_PrintValue(wrap(cumpopsA));
Rcout<<"cumpopsB"<<std::endl;
Rf_PrintValue(wrap(cumpopsB));
Rf_PrintValue(wrap(1701));
Rcout<<"cumpopsA[Nyrs+ages-1], cumpopsB[Nyrs+ages-1]"<<std::endl;
Rf_PrintValue(wrap(cumpopsA[Nyrs+ages-1]));
Rf_PrintValue(wrap(cumpopsB[Nyrs+ages-1]));
NumericMatrix GenotypesA1(cumpopsA[Nyrs+ages-1],nSNP);//matrix to hold everything (we want this but come back to it)
NumericMatrix GenotypesA2(cumpopsA[Nyrs+ages-1],nSNP);
NumericMatrix GenotypesB1(cumpopsB[Nyrs+ages-1],nSNP);
NumericMatrix GenotypesB2(cumpopsB[Nyrs+ages-1],nSNP);
NumericMatrix tmpMatA_1;
NumericMatrix tmpMatA_2;
NumericMatrix tmpMatB_1;
NumericMatrix tmpMatB_2;
List tmpLA(2);
List tmpLB(2);
some_integerA=0;
some_integerB=0;
Rf_PrintValue(wrap(1714));
//pluscounterB_list is Nyrs long (110)
NumericVector poprowsA(Nyrs+ages-1);
for (int i=0;i<(Nyrs+ages-1);i++){
List hmmA(2);
hmmA=popA[i];
NumericMatrix hmm2A=hmmA[1];
poprowsA[i]=hmm2A.nrow();}
GenotypesA1=NumericMatrix(std::accumulate(poprowsA.begin(),poprowsA.end(),0.0),nSNP);
GenotypesA2=NumericMatrix(std::accumulate(poprowsA.begin(),poprowsA.end(),0.0),nSNP);
for(int k=0; k<(Nyrs+ages-1);k++){
tmpLA = popA[k];
tmpMatA_1 = as<SEXP>(tmpLA[0]);
tmpMatA_2 = as<SEXP>(tmpLA[1]);
for(int i=0; i<tmpMatA_1.nrow();i++){
GenotypesA1(some_integerA,_)=tmpMatA_1(i,_);
GenotypesA2(some_integerA,_)=tmpMatA_2(i,_);
some_integerA=some_integerA+1;
}
}
//genotypesB1<sum of all popB or <tmpMatB_
NumericVector poprowsB(Nyrs+ages-1);
for (int i=0;i<(Nyrs+ages-1);i++){
List hmm(2);
hmm=popB[i];
NumericMatrix hmm2=hmm[1];
poprowsB[i]=hmm2.nrow();}
GenotypesB1=NumericMatrix(std::accumulate(poprowsB.begin(),poprowsB.end(),0.0),nSNP);
GenotypesB2=NumericMatrix(std::accumulate(poprowsB.begin(),poprowsB.end(),0.0),nSNP);
//if (genotypesB1.nrow()<
for(int k=0; k<(Nyrs+ages-1);k++){
tmpLB = popB[k];
tmpMatB_1 = as<SEXP>(tmpLB[0]);
tmpMatB_2 = as<SEXP>(tmpLB[1]);
for(int i=0; i<tmpMatB_1.nrow();i++){
GenotypesB1(some_integerB,_)=tmpMatB_1(i,_);
GenotypesB2(some_integerB,_)=tmpMatB_2(i,_);
some_integerB=some_integerB+1;
}
Rcout<<"k in this loop goes to 117 but 117 is empty"<<std::endl;
Rf_PrintValue(wrap(k));
}
//get allelic richness for popA - the initial year
all_richA=NumericVector(nSNP); //set up a vector to hold number of alleles for each microsat
int rich_nA = arichAmat.nrow();
for (int rich = 0; rich<nSNP;rich++){
NumericMatrix::Column inputA = arichAmat(_,rich);
NumericVector xA = clone<NumericVector>(inputA);
int nallsA=0;
typedef std::map<double,int> imap ;
typedef imap::value_type pair ;
imap index ;
int n = xA.size() ;
double current, previous = xA[0] ;
index.insert( pair( previous, 0 ) );
imap::iterator it = index.begin() ;
for( int i=1; i<n; i++){
current = xA[i] ;
if( current == previous ){
xA[i] = current + ( ++(it->second) / 100.0 ) ;
} else {
it = index.find(current) ;
if( it == index.end() ){
it = index.insert(
current > previous ? it : index.begin(),
pair( current, 0 )
) ;
} else {
xA[i] = current + ( ++(it->second) / 100.0 ) ;
}
previous = current ;
}
if (xA[i]-inputA[i]==0){nallsA=nallsA+1;}
}
all_richA[rich] = nallsA+1;
} //end of rich A loop (over 10 microsats)
//end of rch code popA
richnessA(0,_)=all_richA;
mean_arichA[0]=std::accumulate(all_richA.begin(),all_richA.end(),0.0)/nSNP;
//get allelic richness for popB - the initial year
all_richB=NumericVector(nSNP); //set up a vector to hold number of alleles for each microsat
int rich_nB = arichBmat.nrow();
for (int rich = 0; rich<nSNP;rich++){
NumericMatrix::Column inputB = arichBmat(_,rich);
NumericVector xB = clone<NumericVector>(inputB);
int nallsB=0;
typedef std::map<double,int> imap ;
typedef imap::value_type pair ;
imap index ;
int n = xB.size() ;
double current, previous = xB[0] ;
index.insert( pair( previous, 0 ) );
imap::iterator it = index.begin() ;
for( int i=1; i<n; i++){
current = xB[i] ;
if( current == previous ){
xB[i] = current + ( ++(it->second) / 100.0 ) ;
} else {
it = index.find(current) ;
if( it == index.end() ){
it = index.insert(
current > previous ? it : index.begin(),
pair( current, 0 )
) ;
} else {
xB[i] = current + ( ++(it->second) / 100.0 ) ;
}
previous = current ;
}
if (xB[i]-inputB[i]==0){nallsB=nallsB+1;}
}
all_richB[rich] = nallsB+1;
} //end of rich B loop (over 10 microsats)
//end of rch code popB
richnessB(0,_)=all_richB; //each row is allelic richness at each microsat in that year
mean_arichB[0]=std::accumulate(all_richB.begin(),all_richB.end(),0.0)/nSNP;
//get allelic richness in other years popA
for (int k = 1; k<Nyrs; k++){ //Nyrs
some_integerA = 0;
arichAmat = NumericMatrix(2*NtotalA[k],nSNP);
N_init_a1_star = allA.row(k);
int j_a; int j_b;
for (int i=0;i<ages;i++){
if ((cumpopsA[k+i]+N_init_a1_star[i])<=cumpopsA[Nyrs+ages-1]){
j_a = cumpopsA[k+i]; j_b = cumpopsA[k+i]+N_init_a1_star[i];}else
{j_a = cumpopsA[Nyrs+ages-1]-N_init_a1_star[i]; j_b = cumpopsA[Nyrs+ages-1];}
for(int j=j_a;j<j_b;j++){
arichAmat(some_integerA,_)=GenotypesA1(j,_);
arichAmat(some_integerA+NtotalA[k],_)=GenotypesA2(j,_);
some_integerA=some_integerA+1;
}
}
//get allelic richness for popA (all years except the first one)
all_richA=NumericVector(nSNP); //set up a vector to hold number of alleles for each microsat
int rich_nA = arichAmat.nrow();
for (int rich = 0; rich<nSNP;rich++){
NumericMatrix::Column inputA = arichAmat(_,rich);
NumericVector xA = clone<NumericVector>(inputA); //already defined xA
int nallsA=0;
typedef std::map<double,int> imap ;
typedef imap::value_type pair ;
imap index ;
int n = xA.size() ;
double current, previous = xA[0] ;
index.insert( pair( previous, 0 ) );
imap::iterator it = index.begin() ;
for( int i=1; i<n; i++){
current = xA[i] ;
if( current == previous ){
xA[i] = current + ( ++(it->second) / 100.0 ) ;
} else {
it = index.find(current) ;
if( it == index.end() ){
it = index.insert(
current > previous ? it : index.begin(),
pair( current, 0 )
) ;
} else {
xA[i] = current + ( ++(it->second) / 100.0 ) ;
}
previous = current ;
}
if (xA[i]-inputA[i]==0)
{nallsA=nallsA+1;
}
}
//*****************************
all_richA[rich] = nallsA+1;
} //end of rich A loop (over 10 microsats)
//end of rch code popA
richnessA(k,_)=all_richA;
mean_arichA[k]=std::accumulate(all_richA.begin(),all_richA.end(),0.0)/nSNP;
// }//end of k= 1 to Nyrs popA
//get allelic richness in other years popB
// for (int k = 1; k<Nyrs; k++){ //Nyrs
some_integerB = 0;
arichBmat = NumericMatrix(2*NtotalB[k],nSNP);
N_init_a2_star = allB.row(k);
//int j_a; int j_b;
for (int i=0;i<ages;i++){
if ((cumpopsB[k+i]+N_init_a2_star[i])<=cumpopsB[Nyrs+ages-1]){
j_a = cumpopsB[k+i]; j_b = cumpopsB[k+i]+N_init_a2_star[i];}else
{j_a = cumpopsB[Nyrs+ages-1]-N_init_a2_star[i]; j_b = cumpopsB[Nyrs+ages-1];}
for(int j=j_a;j<j_b;j++){
arichBmat(some_integerB,_)=GenotypesB1(j,_);
arichBmat(some_integerB+NtotalB[k],_)=GenotypesB2(j,_);
some_integerB=some_integerB+1;
}
}
Rf_PrintValue(wrap(1658));
//get allelic richness for popB
all_richB=NumericVector(nSNP); //set up a vector to hold number of alleles for each microsat
int rich_nB = arichBmat.nrow();
Rf_PrintValue(wrap(1665));
for (int rich = 0; rich<nSNP;rich++){
NumericMatrix::Column inputB = arichBmat(_,rich);
NumericVector xB = clone<NumericVector>(inputB);
int nallsB=0;
typedef std::map<double,int> imap ;
typedef imap::value_type pair ;
imap index ;
int n = xB.size() ;
double current, previous = xB[0] ;
index.insert( pair( previous, 0 ) );
imap::iterator it = index.begin() ;
for( int i=1; i<n; i++){
current = xB[i] ;
if( current == previous ){
xB[i] = current + ( ++(it->second) / 100.0 ) ;
} else {
it = index.find(current) ;
if( it == index.end() ){
it = index.insert(
current > previous ? it : index.begin(),
pair( current, 0 )
) ;
} else {
xB[i] = current + ( ++(it->second) / 100.0 ) ;
}
previous = current ;
}
if (xB[i]-inputB[i]==0)
{nallsB=nallsB+1;
}
}
all_richB[rich] = nallsB+1;
} //end of rich B loop (over 10 microsats)
//end of rch code popB
richnessB(k,_)=all_richB; //each row is allelic richness at each microsat in that year
mean_arichB[k]=std::accumulate(all_richB.begin(),all_richB.end(),0.0)/nSNP;
//for (int j=0;j<nSNP;j++){
//for (int i=0;i<(arichAmat.nrow()/2);i++){
//if (arichAmat((2*i),j)==arichAmat(((2*i)+1),j)){hetsA[j]=hetsA[j]+1;}
//}}
//for (int j=0;j<nSNP;j++){
//for (int i=0;i<(arichBmat.nrow()/2);i++){
//if (arichBmat((2*i),j)==arichBmat(((2*i)+1),j)){hetsB[j]=hetsB[j]+1;}
//}}
//***********************************************************************
//here get new FST based on this years arichAmat and arichBmat
//arichAmat and arichBmat get generated each of k years so they are already available.
//Fst (for the entire population) if nsamp is bigger than pop size, will give warning
//create a matrix with unique alleles for each microsatellite in each column
if ((k%20==0)&(NtotalA[k]>0)&(NtotalB[k]>0)&(mgmt_counter==0)){ //do this for all years for combined mgmt
//for all k
NumericMatrix tallyBOTHmat(2*NtotalB[k]+2*NtotalA[k],nSNP);
int some_integerBOTH = 0;
NumericMatrix arichBOTHmat(2*NtotalB[k]+2*NtotalA[k],nSNP);//all alleles both pops year 1
for (int i=0;i<2*NtotalA[k];i++){
arichBOTHmat(i,_)=arichAmat(i,_);
}
for (int i=0;i<2*NtotalB[k];i++){
arichBOTHmat(i+2*NtotalA[k],_)=arichBmat(i,_);
}
//get unique alleles for both populations
for (int rich = 0; rich<nSNP;rich++){
NumericMatrix::Column inputBOTH = arichBOTHmat(_,rich);
NumericVector xBOTH = clone<NumericVector>(inputBOTH);
NumericVector tally_allelesBOTH(2*NtotalB[k]+2*NtotalA[k]); //this is a vector that holds the unique alleles
int tallycounter=1;
//*******
int nallsBOTH=0;
typedef std::map<double,int> imap ;
typedef imap::value_type pair ;
imap index ;
int n = xBOTH.size() ;
double current, previous = xBOTH[0] ;
index.insert( pair( previous, 0 ) );
imap::iterator it = index.begin() ;
for( int i=1; i<n; i++){
current = xBOTH[i] ;
if( current == previous ){
xBOTH[i] = current + ( ++(it->second) / 100.0 ) ;
} else {
it = index.find(current) ;
if( it == index.end() ){
it = index.insert(
current > previous ? it : index.begin(),
pair( current, 0 )
) ;
} else {
xBOTH[i] = current + ( ++(it->second) / 100.0 ) ;
}
previous = current ;
}
if (xBOTH[i]-inputBOTH[i]==0)
{nallsBOTH=nallsBOTH+1;
tally_allelesBOTH[tallycounter]=inputBOTH[i];
tallycounter=tallycounter+1;
}}
tally_allelesBOTH[0]=inputBOTH[0];
tallyBOTHmat(_,rich)=tally_allelesBOTH;
} //end of rich BOTH loop (over 10 microsats)
List tallist(nSNP); //list of 10 that will hold allele frequencies for each of 10 microsatellites.
//First column is the actual allele size, second is freq from popA, third is freq popB
NumericVector count(nSNP);//list of number of unique alleles at each locus
//now set up a table to get allele frequencies for popA and popB
for (int rich=0;rich<nSNP;rich++){
NumericMatrix::Column inputA=arichAmat(_,rich);
NumericMatrix::Column inputB=arichBmat(_,rich);
NumericMatrix::Column tal=tallyBOTHmat(_,rich);
for(int i=0;i<tal.size();i++){ //figure out how many unique alleles are at this microsat (from tal)
if(tal[i]!=0){count[rich]=count[rich]+1;}
}
NumericVector Counter=clone(count);
int counter=Counter[rich];
NumericVector taltrunc(counter); //will hold all unique alleles from this microsat (both pops)
NumericMatrix tallyAB(counter,3); //matrix that has alleles, freq at popA, freq at popB
NumericVector howmanyA(counter); //number of alleles for this population at each allele
NumericVector howmanyB(counter);
for(int i=0;i<counter;i++){ //counter is the number of unique alleles at a locus
taltrunc[i]=tal[i];
int counterA=0; //a counter for number of unique alleles at each locus
int counterB=0;
for (int j=0;j<2*NtotalA[k];j++){
if (inputA[j]==taltrunc[i])//go through all alleles to see how many match this unique one
{counterA=counterA+1;}
}
howmanyA[i]=counterA;
for (int j=0;j<2*NtotalB[k];j++){
if (inputB[j]==taltrunc[i])
{counterB=counterB+1;}
}
howmanyB[i]=counterB;
} //end of counter
tallyAB(_,0)=taltrunc;
tallyAB(_,1)=howmanyA/(2*NtotalA[k]);
tallyAB(_,2)=howmanyB/(2*NtotalB[k]);
tallist[rich]=tallyAB;
}//end of rich create talmat, which has unique alleles first column then freqs for popA and B
NumericMatrix talmat(std::accumulate(count.begin(),count.end(),0.0),5);
for (int i=0;i<nSNP;i++){
int talcount=0;
NumericMatrix taltmp(count[i],3);
taltmp=as<SEXP>(tallist[i]);
for (int j=std::accumulate(count.begin(),count.begin()+i,0.0);j<std::accumulate(count.begin(),count.begin()+i+1,0.0);j++)
{
talmat(j,_)=taltmp(talcount,_);
talcount=talcount+1;
}
}
double hetsA;
double hetsB;
for (int i=0;i<nSNP;i++){
for (int j=std::accumulate(count.begin(),count.begin()+i,0.0);j<std::accumulate(count.begin(),count.begin()+i+1,0.0);j++)
{
hetsA=0;hetsB=0;
for (int k=0;k<100;k++){
if ((arichAmat((2*k),i)!=arichAmat(((2*k)+1),i))&((arichAmat((2*k),i)==talmat(j,0))|(arichAmat(((2*k)+1),i)==talmat(j,0)))){hetsA=hetsA+1;}
if ((arichBmat((2*k),i)!=arichBmat(((2*k)+1),i))&((arichBmat((2*k),i)==talmat(j,0))|(arichBmat(((2*k)+1),i)==talmat(j,0)))){hetsB=hetsB+1;}
}
talmat(j,3)=hetsA/pop1_size;
talmat(j,4)=hetsB/pop2_size;
}}
talmat2=clone(talmat);
//talmat is the raw material to find Fst!!!
//GET FST this function finds Fst between 2 populations, multiple loci and multiple alleles.
n_sampA=NtotalA[k];
n_sampB=NtotalB[k];
double n_bar=0.5*(n_sampA+n_sampB); //sample size - can change this.
double r=2;
NumericVector p_bar(talmat2.nrow());
NumericMatrix::Column sampmatA =talmat2(_,1);
NumericMatrix::Column sampmatB =talmat2(_,2);
NumericVector s2(talmat2.nrow());
NumericVector h_bar(talmat2.nrow());
NumericVector ones(talmat2.nrow());
ones=rep(1,talmat2.nrow());
p_bar=(n_sampA*sampmatA+n_sampB*sampmatB)/(2*n_bar); //each entry is average sample frequency of an allele
s2=((n_sampB*(pow((sampmatB-p_bar),2)))+(n_sampA*pow((sampmatA-p_bar),2)))/n_bar;//sample variance of allele freqs over pops (for each allele)
for (int i=0;i<talmat2.nrow();i++){
h_bar[i]=(talmat2(i,3)+talmat2(i,4))/2;
}
double nc=((r*n_bar)-((pow(n_sampA,2)/(r*n_bar))+(pow(n_sampB,2)/(r*n_bar))))/(r-1); //same as n_bar
double C2=r*(1-(nc/n_bar));
Rcout<<"C2"<<std::endl;
Rf_PrintValue(wrap(C2));
NumericVector a=(n_bar/nc)*(s2-((1/(n_bar-1))*((p_bar*(1-p_bar))-(((r-1)/r)*s2)-(0.25*h_bar))));
NumericVector dvec=((2*n_bar)/((2*n_bar)-1))*((p_bar*(1-p_bar))-(((r-1)/r)*s2));
NumericVector b=(n_bar/(n_bar-1))*((p_bar*(1-p_bar))-(((r-1)/r)*s2)-(h_bar*(2*n_bar-1)/(4*n_bar)));
NumericVector c=h_bar/2;
NumericVector aplusdvec=a+b+c;//I added b here
NumericVector aplusdvec2=a+dvec;
double fst=std::accumulate(a.begin(),a.end(),0.0)/(std::accumulate(aplusdvec.begin(),aplusdvec.end(),0.0)); //the better one
double fst2=std::accumulate(a.begin(),a.end(),0.0)/(std::accumulate(aplusdvec2.begin(),aplusdvec2.end(),0.0)); //the better one
ps=p_bar*(1-p_bar);
NumericVector theta=(s2-((1/((2*n_bar)-1))*((p_bar*(1-p_bar))-s2)))/(((1-((2*n_bar*C2)/(((2*n_bar)-1)*r)))*(p_bar*(1-p_bar)))+(1+((2*n_bar*C2)/((2*n_bar-1)*r)))*(s2/r));
NumericVector theta1=(s2-((1/(n_bar-1))*((p_bar*(1-p_bar))-s2-(h_bar/4))))/((((1-((n_bar*C2)/((n_bar-1)*r)))*(p_bar*(1-p_bar)))+(((1+n_bar*C2)/((n_bar-1)*r))*(s2/r))+((C2/(r*(n_bar-1)))*(h_bar/4))));
out_fst60[0]=fst;
fst_vec[ngen+k]=fst;
fst_vec2[ngen+k]=fst2;
Wrights_fst_vec[ngen+k]=std::accumulate(theta.begin(),theta.end(),0.0)/talmat2.nrow();
Wrights_fst_vec1[ngen+k]=std::accumulate(theta1.begin(),theta1.end(),0.0)/talmat2.nrow();
Wrights_simple[ngen+k]=std::accumulate(s2.begin(),s2.end(),0.0)/std::accumulate(ps.begin(),ps.end(),0.0);
}//end of if statement
Rcout<<"k is"<<std::endl;
Rf_PrintValue(wrap(k));
//****GET FST FOR JUST A RANDOM SAMPLE OF 100
if((k==620|k==640|k==680|(k>599&k%10==0))&mgmt_counter==0){ //ends at 3106
n_sampA=nsamp;//just 100 generally
n_sampB=nsamp;//and 100
IntegerVector order(2*n_sampA+2*n_sampB);
int nsig=100;
NumericVector fst_subvec(nsig);
IntegerVector ORDER(4*nsamp);
Rf_PrintValue(wrap(2186));
//randomly select 2*diffsamp without replacement from populationA
IntegerVector randsampA = seq_len(2*NtotalA[k]);
IntegerVector whichsampA(2*n_sampA);
NumericMatrix arichAmat_sub(2*n_sampA,nSNP);
NumericMatrix arichBmat_sub(2*n_sampB,nSNP);
int probA; int pickA;
Rf_PrintValue(wrap(2193));
if((NtotalA[k]>=n_sampA)&(NtotalB[k]>=n_sampB)){
for (int i=0; i < n_sampA; i++){
probA=as<double>(runif(1,-0.5,(NtotalA[k]-0.5)));//sample without replacement
pickA=int(probA);
if((probA+0.5)>=(int(probA)+1)){
pickA=int(probA)+1;
}
whichsampA[i]=randsampA[pickA]-1;//because of C++ special indexing
//randsampA.erase(pickA);
}
for (int i=0;i<n_sampA;i++){
arichAmat_sub(i,_)=arichAmat(whichsampA[i],_); //arichAmat is all alleles males and females from population that year
arichAmat_sub(i+n_sampA,_)=arichAmat(whichsampA[i]+1,_);
} //arichAmat_sub is a random sample of 2*n_sampA
arichBmat_sub=NumericMatrix(2*n_sampB,nSNP);
//randomly select 2*diffsamp without replacement from populationB
//if (ninecomp[nine]==2){
IntegerVector randsampB = seq_len(2*NtotalB[k]);
IntegerVector whichsampB(2*n_sampB);
int probB;
int pickB;
for (int i=0; i < n_sampB; i++){
probB=as<double>(runif(1,-0.5,(NtotalB[k]-0.5)));//each time there is one fewer to choose from
pickB=int(probB);
if((probB+0.5)>=(int(probB)+1)){
pickB=int(probB)+1;
}
whichsampB[i]=randsampB[pickB]-1;
//randsampB.erase(pickB);
}
for (int i=0;i<n_sampB;i++){
arichBmat_sub(i,_)=arichBmat(whichsampB[i],_);
arichBmat_sub(i+n_sampB,_)=arichBmat(whichsampB[i]+1,_);
}
//arichAmat_sub
//} //end of if ninecomp[nine]==2;
if (k==620){arichAmat_sub620=arichAmat_sub;
arichBmat_sub620=arichBmat_sub;}
if (k==640){arichAmat_sub640=arichAmat_sub;
arichBmat_sub640=arichBmat_sub;}
if (k==680){arichAmat_sub680=arichAmat_sub;
arichBmat_sub680=arichBmat_sub;}
//by here you should have arichAmat_sub and arichBmat_sub create a matrix with unique alleles for each microsatellite in each column
NumericMatrix tallyBOTHmat_sub(2*n_sampA+2*n_sampB,nSNP); //this has everything subsampled, just 200+200 total
NumericMatrix arichBOTHmat_sub(2*n_sampA+2*n_sampB,nSNP); //all alleles both pops year 1
NumericMatrix arichBOTHmat_subrand(2*n_sampA+2*n_sampB,nSNP);
int some_integerBOTH_sub = 0;
for (int i=0;i<2*n_sampA;i++){ //holds 200 genotypes from each population
arichBOTHmat_sub(i,_)=arichAmat_sub(i,_);}
for (int i=0;i<2*n_sampB;i++){
arichBOTHmat_sub(i+2*n_sampA,_)=arichBmat_sub(i,_);
}
NumericVector fstsig(1); //this is going to count how many times the initial sample is larger than permuted ones
fstsig[0]=0;
for (int sigfst=0;sigfst<nsig;sigfst++){//want 100 later
ORDER=seq_len(2*n_sampA+2*n_sampB)-1;
order=rep(0,2*n_sampA+2*n_sampB);
if (sigfst==0)
{order=seq_len(2*n_sampA+2*n_sampB)-1;}
else{
int probZ;
int pickZ;
for (int i=0; i < 2*n_sampA+2*n_sampB; i++){ //random mating for each allele 4*nsamp
probZ=as<double>(runif(1,-0.5,(2*n_sampA+2*n_sampB-0.5-i)));
pickZ=int(probZ);
if((probZ+0.5)>=(int(probZ)+1)){
pickZ=int(probZ)+1;
}
order[i]=ORDER[pickZ];
ORDER.erase(pickZ);
}
}//end of is this the first iteration of sigfst or not
for(int i=0;i<2*n_sampA+2*n_sampB;i++){ //rearrange arichBOTHmat_sub for significance calculations
arichBOTHmat_subrand(i,_)=arichBOTHmat_sub(order[i],_);
}
//divide up arichBOTHmat_subrand into 2 populations again for significance calculations first time these will be truly from pops A and B but later just jumbles of both
NumericMatrix arichAmat_subrand(2*n_sampA,nSNP);
NumericMatrix arichBmat_subrand(2*n_sampB,nSNP);
for (int i=0;i<2*n_sampA;i++){
arichAmat_subrand(i,_)=arichBOTHmat_subrand(i,_);}
for (int i=0;i<2*n_sampB;i++){
arichBmat_subrand(i,_)=arichBOTHmat_subrand(i+2*n_sampA,_);
}
//get unique alleles for both populations
for (int rich = 0; rich<nSNP;rich++){
NumericMatrix::Column inputBOTH_sub = arichBOTHmat_subrand(_,rich);
NumericVector xBOTH_sub = clone<NumericVector>(inputBOTH_sub);
NumericVector tally_allelesBOTH_sub(2*n_sampA+2*n_sampB); //this is a vector that holds the unique alleles
int tallycounter_sub=1;
int nallsBOTH_sub=0;
typedef std::map<double,int> imap ;
typedef imap::value_type pair ;
imap index ;
int n = xBOTH_sub.size() ;
double current, previous = xBOTH_sub[0] ;
index.insert( pair( previous, 0 ) );
imap::iterator it = index.begin() ;
for( int i=1; i<n; i++){
current = xBOTH_sub[i] ;
if( current == previous ){
xBOTH_sub[i] = current + ( ++(it->second) / 100.0 ) ;
} else {
it = index.find(current) ;
if( it == index.end() ){
it = index.insert(
current > previous ? it : index.begin(),
pair( current, 0 )
) ;
} else {
xBOTH_sub[i] = current + ( ++(it->second) / 100.0 ) ;
}
previous = current ;
}
if (xBOTH_sub[i]-inputBOTH_sub[i]==0)
{nallsBOTH_sub=nallsBOTH_sub+1;
tally_allelesBOTH_sub[tallycounter_sub]=inputBOTH_sub[i];
tallycounter_sub=tallycounter_sub+1;
}
}
tally_allelesBOTH_sub[0]=inputBOTH_sub[0];
tallyBOTHmat_sub(_,rich)=tally_allelesBOTH_sub;
} //end of rich BOTH loop (over 10 microsats)
List tallist_sub(nSNP); //list of 10 that will hold allele frequencies for each of 10 microsatellites.
//First column is the actual allele size, second is freq from popA, third is freq popB
//now set up a table to get allele frequencies for popA and popB
for (int rich=0;rich<nSNP;rich++){
NumericMatrix::Column inputA_sub=arichAmat_subrand(_,rich);
NumericMatrix::Column inputB_sub=arichBmat_subrand(_,rich);
NumericMatrix::Column tal_sub=tallyBOTHmat_sub(_,rich);
for(int i=0;i<tal_sub.size();i++){ //figure out how many unique alleles are at this microsat (from tal)
if(tal_sub[i]!=0){count_sub[rich]=count_sub[rich]+1;}
}
NumericVector Counter_sub=clone(count_sub);
int counter_sub=Counter_sub[rich];
NumericVector taltrunc_sub(counter_sub); //will hold all unique alleles from this microsat (both pops)
NumericMatrix tallyAB_sub(counter_sub,3); //matrix that has alleles, freq at popA, freq at popB
NumericVector howmanyA_sub(counter_sub); //number of alleles for this population at each allele
NumericVector howmanyB_sub(counter_sub);
for(int i=0;i<counter_sub;i++){ //counter is the number of unique alleles at a locus
taltrunc_sub[i]=tal_sub[i];
int counterA_sub=0; //a counter for number of unique alleles at each locus
int counterB_sub=0;
for (int j=0;j<2*n_sampA;j++){
if (inputA_sub[j]==taltrunc_sub[i])//go through all alleles to see how many match this unique one
{counterA_sub=counterA_sub+1;}
}
howmanyA_sub[i]=counterA_sub;
for (int j=0;j<2*n_sampB;j++){
if (inputB_sub[j]==taltrunc_sub[i])
{counterB_sub=counterB_sub+1;}
}
howmanyB_sub[i]=counterB_sub;
} //end of counter
tallyAB_sub(_,0)=taltrunc_sub;
tallyAB_sub(_,1)=howmanyA_sub/(2*n_sampA);
tallyAB_sub(_,2)=howmanyB_sub/(2*n_sampB);
arichsampA_ten[rich]=0;
arichsampB_ten[rich]=0;
for(int i=0;i<taltrunc_sub.size();i++){
if (howmanyA_sub[i]!=0){arichsampA_ten[rich]=arichsampA_ten[rich]+1;}
if (howmanyB_sub[i]!=0){arichsampB_ten[rich]=arichsampB_ten[rich]+1;}
}
tallist_sub[rich]=tallyAB_sub;
}//end of rich
arichsampA_vec[ngen+k]=std::accumulate(arichsampA_ten.begin(),arichsampA_ten.end(),0.0)/nSNP;
arichsampB_vec[ngen+k]=std::accumulate(arichsampB_ten.begin(),arichsampB_ten.end(),0.0)/nSNP;
//create talmat, which has unique alleles first column then freqs for popA and B
NumericMatrix talmat_sub(std::accumulate(count_sub.begin(),count_sub.end(),0.0),5);
for (int i=0;i<nSNP;i++){
int talcount_sub=0;
NumericMatrix taltmp_sub(count_sub[i],3);
taltmp_sub=as<SEXP>(tallist_sub[i]);
for (int j=std::accumulate(count_sub.begin(),count_sub.begin()+i,0.0);j<std::accumulate(count_sub.begin(),count_sub.begin()+i+1,0.0);j++)
{
talmat_sub(j,_)=taltmp_sub(talcount_sub,_);
talcount_sub=talcount_sub+1;
}
}
talmat_sub2=clone(talmat_sub);
double hetsA_sub;
double hetsB_sub;
for (int i=0;i<nSNP;i++){
for (int j=std::accumulate(count_sub.begin(),count_sub.begin()+i,0.0);j<std::accumulate(count_sub.begin(),count_sub.begin()+i+1,0.0);j++)
{
hetsA_sub=0;hetsB_sub=0;
for (int k=0;k<100;k++){
if ((arichAmat_sub((2*k),i)!=arichAmat_sub(((2*k)+1),i))&((arichAmat_sub((2*k),i)==talmat_sub2(j,0))|(arichAmat_sub(((2*k)+1),i)==talmat_sub2(j,0)))){hetsA_sub=hetsA_sub+1;}
if ((arichBmat_sub((2*k),i)!=arichBmat_sub(((2*k)+1),i))&((arichBmat_sub((2*k),i)==talmat_sub2(j,0))|(arichBmat_sub(((2*k)+1),i)==talmat_sub2(j,0)))){hetsB_sub=hetsB_sub+1;}
}
talmat_sub2(j,3)=hetsA_sub/100;
talmat_sub2(j,4)=hetsB_sub/100;
}}
//talmat: GET FST for SUBSAMPLE this function finds Fst between 2 populations, multiple loci and multiple alleles.
double n_bar_sub=0.5*(n_sampA+n_sampB); //sample size - can change this. already declared at 1746
double r_sub=2;
NumericVector p_bar_sub(talmat_sub2.nrow());
NumericMatrix::Column sampmatA_sub =talmat_sub2(_,1);
NumericMatrix::Column sampmatB_sub =talmat_sub2(_,2);
NumericVector s2_sub(talmat_sub2.nrow());
NumericVector h_bar_sub(talmat_sub2.nrow());
for (int i=0;i<talmat_sub2.nrow();i++){
h_bar_sub[i]=(talmat_sub2(i,3)+talmat_sub2(i,4))/2;
}
NumericVector ones_sub(talmat_sub2.nrow());
ones_sub=rep(1,talmat_sub2.nrow());
p_bar_sub=(n_sampA*sampmatA_sub+n_sampB*sampmatB_sub)/(2*n_bar_sub); //each entry is average sample frequency of an allele
s2_sub=((n_sampB*(pow((sampmatB_sub-p_bar_sub),2)))+(n_sampA*pow((sampmatA_sub-p_bar_sub),2)))/n_bar_sub;//sample variance of allele freqs over pops (for each allele)
double nc_sub=((r_sub*n_bar_sub)-((pow(n_sampA,2)/(r_sub*n_bar_sub))+(pow(n_sampB,2)/(r_sub*n_bar_sub))))/(r_sub-1); //same as n_bar
NumericVector a_sub=(n_bar_sub/nc_sub)*(s2_sub-((1/(n_bar_sub-1))*((p_bar_sub*(1-p_bar_sub))-((0.5)*s2_sub)-(0.25*h_bar_sub))));
NumericVector dvec_sub=((2*n_bar_sub)/((2*n_bar_sub)-1))*((p_bar_sub*(1-p_bar_sub))-(1/2)*s2_sub);
NumericVector b_sub=(n_bar_sub/(n_bar_sub-1))*((p_bar_sub*(1-p_bar_sub))-((1/2)*s2_sub)-(h_bar_sub*(2*n_bar_sub-1)/(4*n_bar_sub)));
NumericVector c_sub=h_bar_sub/2;
NumericVector aplusdvec_sub=a_sub+b_sub+c_sub;
double fst_sub=std::accumulate(a_sub.begin(),a_sub.end(),0.0)/std::accumulate(aplusdvec_sub.begin(),aplusdvec_sub.end(),0.0); //the better one
fst_subvec[sigfst]=fst_sub;
//if (sigfst==0&k==620|k==640|k==680){
//double C2_sub=r_sub*(1-(nc_sub/n_bar_sub));
//NumericVector ps_sub=p_bar_sub*(1-p_bar_sub);
//NumericVector theta_sub=(s2_sub-((1/((2*n_bar_sub)-1))*((p_bar_sub*(1-p_bar_sub))-s2_sub)))/(((1-((2*n_bar_sub*C2_sub)/(((2*n_bar_sub)-1)*r_sub)))*(p_bar_sub*(1-p_bar_sub)))+(1+((2*n_bar_sub*C2_sub)/((2*n_bar_sub-1)*r_sub)))*(s2_sub/r_sub));
//NumericVector theta1_sub=(s2_sub-(1/(n_bar_sub-1)*((p_bar_sub*(1-p_bar_sub))-s2_sub-(h_bar_sub/4))))/((((1-((n_bar_sub*C2_sub)/((n_bar_sub-1)*r_sub)))*(p_bar_sub*(1-p_bar_sub)))+(((1+n_bar_sub*C2_sub)/((n_bar_sub-1)*r_sub))*(s2_sub/r_sub))+((C2_sub/(r_sub*(n_bar_sub-1)))*(h_bar_sub/4))));
//Wrights_fst_vec_sub1[ngen+k]=std::accumulate(theta1_sub.begin(),theta1_sub.end(),0.0)/talmat_sub2.nrow();
//Wrights_fst_vec_sub[ngen+k]=std::accumulate(theta_sub.begin(),theta_sub.end(),0.0)/talmat_sub2.nrow();
//Wrights_simple_sub[ngen+k]=std::accumulate(s2_sub.begin(),s2_sub.end(),0.0)/std::accumulate(ps_sub.begin(),ps_sub.end(),0.0);
//}
if(sigfst>0){
if(fst_sub>=fst_subvec[0]){fstsig[0]=fstsig[0]+1;}//count how many times random value is greater than true
}
}//end of sigfst
NumericVector outfstsig=clone(fstsig);
sig_submatALL(zcounter,k)=0.01*outfstsig[0];
fst_submatALL(zcounter,k)=fst_subvec[0];
if (k==640&mgmt_counter==0) {sig_submat60(zcounter,0)=0.01*outfstsig[0];//this gives significance of each Fst value
fst_submat60(zcounter,0)=fst_subvec[0];//this gives subsampled Fst
}
if (k==680&mgmt_counter==0) {sig_submat99(zcounter,0)=0.01*outfstsig[0];//this gives significance of each Fst value
fst_submat99(zcounter,0)=fst_subvec[0];//this gives subsampled Fst
}
if (k==620&mgmt_counter==0) {sig_submat9(zcounter,0)=0.01*outfstsig[0];
fst_submat9(zcounter,0)=fst_subvec[0];
}
//arichAmat_sub=clone(arichBmat_sub); //each time shift so that we do not double sample
//n_sampA=n_sampB;
//}//end of nine comparisons
} //end of if nsamp>Ntotal something like that
}//end of k==9 or 60 and NtotalA and NtotalB>0
}//end of k= 1 to Nyrs popB
out1(zcounter,_)=mean_arichA;
out2(zcounter,_)=mean_arichB;
out25(zcounter,_)=fst_vec;
} //end of mgmt_counter
} //end of zcounter
}//end if popdyn==1
//mgmt==0 (combined)
outmat[0]=out1; //mean_arichA;
outmat[1]=out2; //mean_arichB;
outmat[2]=out3; //R_l0A
outmat[3]=out4; //R_l0B
outmat[4]=out5; //NmigA_vec
outmat[5]=out6; //NmigB_vec
outmat[6]=out7; //TACA
outmat[7]=out8; //TACB
outmat[8]=out9; //FishAvec (fishing mortality calculated for population A - either sep or comb)
outmat[9]=out10; //FishBvec (fishing mortality calculated for population B - either sep or comb)
outmat[13]=out14; //TAC total catch for both pops combined mgmt only?
outmat[14]=out15;//NtotalA;
outmat[15]=out16;//NtotalB;
outmat[20]=out21; //holds SSB/initialSSB popA
outmat[21]=out22; //holds SSB/initial SSB popB
outmat[22]=out23; //holds estSSBA/SB40 popA
outmat[23]=out24; //holds estSSBB/SB40 popA
outmat[24]=out25;//fst_vec;
outmat[25]=Wrights_fst_vec;
outmat[26]=Wrights_fst_vec1;
outmat[27]=fst_vec2;
outmat[28]=Wrights_simple;
outmat[29]=ps;;
outmat[30]=Wrights_fst_vec_sub1;
outmat[31]=Wrights_simple_sub;
outmat[32]=s2;
//outmat[3]=allelesA;
//outmat[32]=allelesB;
outmat[33]=N_init_a1;
outmat[34]=N_init_a2;
outmat[35]=allA;
outmat[36]=allB;
outmat[37]=NeAvec;
outmat[38]=NeBvec;
//outmat[39]=popA;
outmat[40]=nmig_vec;
outmat[208]=fst_submatALL;
outmat[209]=sig_submatALL;
outmat[210]=fst_submat9; //this is the subsampled Fst, 1st column is true
outmat[211]=sig_submat9; //holds significances
outmat[212]=fst_submat60; //this is the subsampled Fst, 1st column is true
outmat[213]=sig_submat60; //holds significances
outmat[214]=fst_submat99; //this is the subsampled Fst, 1st column is true
outmat[215]=sig_submat99; //holds significances
outmat[216]=arichAmat_sub620;
outmat[217]=arichBmat_sub620;
outmat[218]=arichAmat_sub640;
outmat[219]=arichBmat_sub640;
outmat[220]=arichAmat_sub680;
outmat[221]=arichBmat_sub680;
outmat[230]=inputs;
return(outmat);
'
run2pops = cxxfunction(signature(INPUTS="numeric"), body = run8pops,plugin = "RcppArmadillo")
#SSBinit,pop1_prop,Nyrs,nsamp,ngen,nmig,fishmort,samptype,nsims,N,Yrs,mmt,dpow (power of distance),Rmat,ages, popdyn (1 for yes 0 for no),nSNP (# of markers)
#out=run2pops(c(15000,0.5,700,100,1,.34,0.12,1,1,3000,1000,1,1.15,1,7,1,13,1100,1655000))
#save(out,file="silverYYfindbug.RData")
#this is runer file part
out=run2pops(c(15000,0.5,700,100,1,.34,0.12,1,1,3000,1000,1,1.15,1,7,1,13,mig,1655000,0))
save(out,file=paste("silverYY_",mig,"_",seed,".RData",sep=""))
|
642150ff2dd1017a37b11b80bd7738e3108d8c5f
|
a9f0bf0ab553185c197634155eb663a1b85b80bd
|
/Traits/09_finalizing continuous and catagorical trait data.R
|
5350240b78cbde97a3d09a51caf41cd67dd77541
|
[] |
no_license
|
klapierre/CoRRE
|
1e0d8cbe6a8025e441aec1fd6a13c55063ee8605
|
64b2bb21cf1ec2406c7529f25921bfbee8be4ca5
|
refs/heads/master
| 2023-07-25T00:04:44.189065
| 2023-07-12T22:45:22
| 2023-07-12T22:45:22
| 246,136,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39,919
|
r
|
09_finalizing continuous and catagorical trait data.R
|
################################################################################
## finalizing continuous and categorical traits.R: Checking imputed continuous data and gathered categorical data.
##
## Authors: Kimberly Komatsu, Meghan Avolio, Kevin Wilcox
################################################################################
#### Set up working space ####
# rm(list=ls()) clean up workspace
#library(FD)
library(PerformanceAnalytics)
# library(ggforce)
library(scales)
library(tidyverse)
setwd('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\CoRRE_database\\Data') #Kim's
# setwd("C:\\Users\\wilco\\Dropbox\\shared working groups\\sDiv_sCoRRE_shared\\CoRRE data\\") # Kevin's laptop wd
theme_set(theme_bw())
theme_update(axis.title.x=element_text(size=30, vjust=-0.35, margin=margin(t=15)), axis.text.x=element_text(size=26),
axis.title.y=element_text(size=30, angle=90, vjust=0.5, margin=margin(r=15)), axis.text.y=element_text(size=26),
plot.title = element_text(size=34, vjust=2),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title=element_blank(), legend.text=element_text(size=30))
#### Categorical trait data ####
categoricalTraits <- read.csv("CleanedData\\Traits\\complete categorical traits\\sCoRRE categorical trait data_12142022.csv") %>%
dplyr::select(family, species_matched, leaf_type, leaf_compoundness, stem_support, growth_form, photosynthetic_pathway, lifespan, clonal, mycorrhizal_type, n_fixation, rhizobial, actinorhizal) %>%
mutate(photosynthetic_pathway = replace(photosynthetic_pathway, grep("possible", photosynthetic_pathway), NA)) %>%
mutate(clonal = replace(clonal, clonal=="uncertain", NA)) %>%
mutate(mycorrhizal_type = replace(mycorrhizal_type, mycorrhizal_type=="uncertain", NA)) %>%
mutate(lifespan = replace(lifespan, lifespan=="uncertain", NA)) %>%
mutate(n_fixation_type=ifelse(rhizobial=='yes', 'rhizobial',
ifelse(actinorhizal=='yes', 'actinorhizal', 'none'))) %>%
filter(lifespan != "moss") %>%
select(-family, -n_fixation, -rhizobial, -actinorhizal)
# #### Testing out stream graphs ####
# categoricalTraitsGather <- categoricalTraits %>%
# # filter(clonal!='NA') %>%
# group_by(growth_form, leaf_type, leaf_compoundness) %>%
# summarise(value=length(species_matched)) %>%
# ungroup() %>%
# gather_set_data(c(1:3))
#
# ggplot(categoricalTraitsGather, aes(x, id = id, split = y, value = value)) +
# geom_parallel_sets(aes(fill = growth_form), alpha = 0.3, axis.width = 0.1) +
# geom_parallel_sets_axes(axis.width = 0.1) +
# geom_parallel_sets_labels(colour = 'white')
#### Pie Charts for each categorical trait ####
# leaf type
leafType <- categoricalTraits %>%
group_by(leaf_type) %>%
count() %>%
ungroup() %>%
mutate(proportion = round((n/sum(n)), digits=3)) %>%
arrange(proportion) %>%
mutate(labels=scales::percent(proportion))
ggplot(leafType, aes(x="", y=proportion, fill=leaf_type)) +
geom_col() +
coord_polar(theta="y") +
scale_fill_manual(values=c('#7DCBBB', '#FFFFA4', '#B0AAD1', '#F7695F', '#6EA1C9', '#FBA550', '#A5DA56', '#AD68AF'))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\pie chart\\4_leaf_type.png', width=8, height=8, units='in', dpi=300, bg='white')
# leaf compoundness
leafCompoundness <- categoricalTraits %>%
group_by(leaf_compoundness) %>%
count() %>%
ungroup() %>%
mutate(proportion = round((n/sum(n)), digits=3)) %>%
arrange(proportion) %>%
mutate(labels=scales::percent(proportion))
ggplot(leafCompoundness, aes(x="", y=proportion, fill=leaf_compoundness)) +
geom_col() +
coord_polar(theta="y") +
scale_fill_manual(values=c('#7DCBBB', '#FFFFA4', '#B0AAD1', '#F7695F', '#6EA1C9', '#FBA550', '#A5DA56', '#AD68AF'))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\pie chart\\5_leaf_compoundness.png', width=8, height=8, units='in', dpi=300, bg='white')
# stem support
stemSupport <- categoricalTraits %>%
group_by(stem_support) %>%
count() %>%
ungroup() %>%
mutate(proportion = round((n/sum(n)), digits=3)) %>%
arrange(proportion) %>%
mutate(labels=scales::percent(proportion))
ggplot(stemSupport, aes(x="", y=proportion, fill=stem_support)) +
geom_col() +
coord_polar(theta="y") +
scale_fill_manual(values=c('#7DCBBB', '#FFFFA4', '#B0AAD1', '#F7695F', '#6EA1C9', '#FBA550', '#A5DA56', '#AD68AF'))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\pie chart\\6_stem_support.png', width=8, height=8, units='in', dpi=300, bg='white')
# growth form
growthForm <- categoricalTraits %>%
group_by(growth_form) %>%
count() %>%
ungroup() %>%
mutate(proportion = round((n/sum(n)), digits=3)) %>%
arrange(proportion) %>%
mutate(labels=scales::percent(proportion))
ggplot(growthForm, aes(x="", y=proportion, fill=growth_form)) +
geom_col() +
coord_polar(theta="y") +
scale_fill_manual(values=c('#7DCBBB', '#FFFFA4', '#B0AAD1', '#F7695F', '#6EA1C9', '#FBA550', '#A5DA56', '#AD68AF'))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\pie chart\\1_growth_form.png', width=8, height=8, units='in', dpi=300, bg='white')
# photosynthetic pathway
photosyntheticPathway <- categoricalTraits %>%
group_by(photosynthetic_pathway) %>%
count() %>%
ungroup() %>%
mutate(proportion = round((n/sum(n)), digits=3)) %>%
arrange(proportion) %>%
mutate(labels=scales::percent(proportion))
ggplot(photosyntheticPathway, aes(x="", y=proportion, fill=photosynthetic_pathway)) +
geom_col() +
coord_polar(theta="y") +
scale_fill_manual(values=c('#7DCBBB', '#FFFFA4', '#B0AAD1', '#F7695F', '#6EA1C9', '#FBA550', '#A5DA56', '#AD68AF'))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\pie chart\\7_photosynthetic_pathway.png', width=8, height=8, units='in', dpi=300, bg='white')
# lifespan
lifespan <- categoricalTraits %>%
group_by(lifespan) %>%
count() %>%
ungroup() %>%
mutate(proportion = round((n/sum(n)), digits=3)) %>%
arrange(proportion) %>%
mutate(labels=scales::percent(proportion))
ggplot(lifespan, aes(x="", y=proportion, fill=lifespan)) +
geom_col() +
coord_polar(theta="y") +
scale_fill_manual(values=c('#7DCBBB', '#FFFFA4', '#B0AAD1', '#F7695F', '#6EA1C9', '#FBA550', '#A5DA56', '#AD68AF'))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\pie chart\\2_lifespan.png', width=8, height=8, units='in', dpi=300, bg='white')
# clonal
clonal <- categoricalTraits %>%
group_by(clonal) %>%
count() %>%
ungroup() %>%
mutate(proportion = round((n/sum(n)), digits=3)) %>%
arrange(proportion) %>%
mutate(labels=scales::percent(proportion))
ggplot(clonal, aes(x="", y=proportion, fill=clonal)) +
geom_col() +
coord_polar(theta="y") +
scale_fill_manual(values=c('#7DCBBB', '#FFFFA4', '#B0AAD1', '#F7695F', '#6EA1C9', '#FBA550', '#A5DA56', '#AD68AF'))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\pie chart\\3_clonal.png', width=8, height=8, units='in', dpi=300, bg='white')
# mycorrhizal type
mycorrhizalType <- categoricalTraits %>%
group_by(mycorrhizal_type) %>%
count() %>%
ungroup() %>%
mutate(proportion = round((n/sum(n)), digits=3)) %>%
arrange(proportion) %>%
mutate(labels=scales::percent(proportion))
ggplot(mycorrhizalType, aes(x="", y=proportion, fill=mycorrhizal_type)) +
geom_col() +
coord_polar(theta="y") +
scale_fill_manual(values=c('#7DCBBB', '#FFFFA4', '#B0AAD1', '#F7695F', '#6EA1C9', '#FBA550', '#A5DA56', '#AD68AF'))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\pie chart\\8_mycorrhizal_type.png', width=8, height=8, units='in', dpi=300, bg='white')
# n fixation type
nFixationType <- categoricalTraits %>%
group_by(n_fixation_type) %>%
count() %>%
ungroup() %>%
mutate(proportion = round((n/sum(n)), digits=3)) %>%
arrange(proportion) %>%
mutate(labels=scales::percent(proportion))
ggplot(nFixationType, aes(x="", y=proportion, fill=n_fixation_type)) +
geom_col() +
coord_polar(theta="y") +
scale_fill_manual(values=c('#7DCBBB', '#FFFFA4', '#B0AAD1', '#F7695F', '#6EA1C9', '#FBA550', '#A5DA56', '#AD68AF'))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\pie chart\\9_n_fixation_type.png', width=8, height=8, units='in', dpi=300, bg='white')
#### Continuous traits ####
# Read species data to remove mosses
mossKey <- read.csv("CleanedData\\Traits\\complete categorical traits\\sCoRRE categorical trait data_12142022.csv") %>%
dplyr::select(species_matched, leaf_type) %>%
mutate(moss = ifelse(leaf_type=="moss", "moss","non-moss")) %>%
dplyr::select(-leaf_type)
# Read in imputed trait data and bind on species information
## this is trait data without replacement (all imputed)
imputedRaw <- read.csv("CleanedData\\Traits\\gap filled continuous traits\\20230623_final\\imputed_traits_mice.csv") %>%
bind_cols(read.csv('OriginalData\\Traits\\raw traits for gap filling\\TRYAusBIEN_continuous_June2023.csv')[,c('DatabaseID', 'DatasetID', 'ObservationID', 'family', 'genus', 'species_matched')]) %>%
left_join(mossKey) %>%
mutate(moss2=ifelse(moss %in% c('non-moss', NA), 1, 0)) %>% #accounts for all GEx spp being non-moss
filter(moss2==1) %>%
dplyr::select(-moss, -moss2) #removes 20 observations from 5 species
imputedLong <- imputedRaw %>%
pivot_longer(names_to='trait', values_to='imputed_value', seed_dry_mass:SRL)
# Read original trait data and join with imputed data
originalRaw <- read.csv('OriginalData\\Traits\\raw traits for gap filling\\TRYAusBIEN_continuous_June2023.csv') %>%
pivot_longer(names_to='trait', values_to='original_value', seed_dry_mass:SRL) %>%
na.omit()
# Join original trait data with imputed data. Only keep traits of interest.
allContinuous <- imputedLong %>%
left_join(originalRaw) %>%
filter(trait %in% c('LDMC', 'leaf_area', 'leaf_dry_mass', 'leaf_N', 'plant_height_vegetative', 'seed_dry_mass', 'SLA', 'SRL'))
allContinuousWide <- allContinuous %>%
select(-original_value) %>%
pivot_wider(names_from=trait, values_from=imputed_value)
# Calculate averages for each species
meanContinuous <- allContinuous %>%
group_by(family, species_matched, trait) %>%
summarize_at(.vars=c('imputed_value', 'original_value'),
.funs=list(mean=mean, sd=sd),
na.rm=T) %>%
ungroup()
speciesCount <- meanContinuous %>%
select(family, species_matched) %>%
unique() %>%
group_by(family) %>%
summarize(num_species=length(family)) %>%
ungroup() #147 families
sum(speciesCount$num_species)
# Compare imputed to original continuous trait data
ggplot(data=na.omit(meanContinuous), aes(x=original_value_mean, y=imputed_value_mean)) +
geom_point() +
geom_abline(slope=1) +
facet_wrap(~trait, scales='free') +
xlab('Mean Original Value') + ylab('Mean Imputed Value') +
theme(strip.text.x = element_text(size = 12))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\Fig x_mean original v imputed_20230620.png', width=12, height=12, units='in', dpi=300, bg='white')
# Only grasses -- 11620 species
ggplot(data=na.omit(subset(allContinuous, family=='Poaceae')), aes(x=original_value, y=imputed_value)) +
geom_point() +
geom_abline(slope=1) +
facet_wrap(~trait, scales='free')
# Only asters -- 11480
ggplot(data=na.omit(subset(allContinuous, family=='Asteraceae')), aes(x=original_value, y=imputed_value)) +
geom_point() +
geom_abline(slope=1) +
facet_wrap(~trait, scales='free')
# Only legumes -- 4620
ggplot(data=na.omit(subset(allContinuous, family=='Fabaceae')), aes(x=original_value, y=imputed_value)) +
geom_point() +
geom_abline(slope=1) +
facet_wrap(~trait, scales='free')
# Compare raw imputed and original data
ggplot(data=na.omit(allContinuous), aes(x=original_value, y=imputed_value)) +
geom_point() +
geom_abline(slope=1) +
facet_wrap(~trait, scales='free') +
xlab('Original Value') + ylab('Imputed Value') +
theme(strip.text.x = element_text(size = 12))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\Fig x_pre cleaning original v imputed.png', width=12, height=12, units='in', dpi=300, bg='white')
allTogether <- allContinuous %>%
pivot_longer(imputed_value:original_value, names_to='data_type', values_to='trait_value') %>%
# mutate(data_type=ifelse(data_type=='original_value', DatabaseID, data_type)) %>%
na.omit()
# Look at boxplots for each trait
ggplot(data=allTogether, aes(x=data_type, y=trait_value)) +
geom_boxplot() +
facet_wrap(~trait, scales='free')
#### Clean imputed continuous trait data ####
# Checked to ensure no negative values (confirmed that there are none)
# Things that look problematic but Kim thinks are real: leaf_area (some forbs with huge leaves), plant_height_vegetative (vines that have big heights like Vitus sp and virginia creeper), seed dry mass (consistently low numbers for some species that probably do have tiny seeds like orchids)
meanSD <- allContinuous %>%
group_by(trait) %>%
summarize(across('imputed_value', .fns=list(mean=mean, sd=sd))) %>%
ungroup()
meanSDSpecies <- allContinuous %>%
group_by(trait, species_matched) %>%
summarize(across('imputed_value', .fns=list(species_mean=mean, species_sd=sd, species_length=length))) %>%
ungroup()
cleanContinuous <- allContinuous %>%
#calculate z-scores (error risk) for continuous traits
left_join(meanSD) %>%
left_join(meanSDSpecies) %>%
mutate(error_risk_overall=(imputed_value-imputed_value_mean)/imputed_value_sd) %>%
mutate(error_risk_species=(imputed_value-imputed_value_species_mean)/imputed_value_species_sd) %>%
filter(error_risk_overall<4) %>% #drops 10,677 observations (0.52% of data)
filter(error_risk_species<4 & error_risk_species>(-4)) %>% #drops an additional 19,183 observations (0.94% of data), all of which were from species with at least 18 observations for the given trait value being dropped
mutate(trait2=ifelse(trait=='leaf_area', 'Leaf Area (leaf, +petiole)',
ifelse(trait=='SLA', 'Specific Leaf Area (+petiole)',
ifelse(trait=='SRL', 'Specific Root Length (all root)',
ifelse(trait=='leaf_N', 'Leaf N Content',
ifelse(trait=='plant_height_vegetative', 'Plant Vegetative Height',
ifelse(trait=='seed_dry_mass', 'Seed Dry Mass',
ifelse(trait=='leaf_dry_mass', 'Leaf Dry Mass',
ifelse(trait=='LDMC', 'Leaf Dry Matter Content',
trait)))))))))
cleanContinousWide <- cleanContinuous %>%
pivot_longer(cols=c('original_value', 'imputed_value'), names_to='data_type', values_to='trait_value') %>%
mutate(data_type2=ifelse(data_type=='original_value', DatabaseID, data_type)) %>%
na.omit()
#### Root mean square error ####
cleanContinuousNRMSEtrait <- cleanContinuous %>%
select(trait, original_value, imputed_value) %>%
na.omit() %>%
mutate(sq_diff=(imputed_value-original_value)^2) %>%
group_by(trait) %>%
summarise(sum=sum(sq_diff), n=length(trait), min=min(original_value), max=max(original_value), mean=mean(original_value)) %>%
ungroup() %>%
mutate(NRMSE=sqrt(sum/n)/mean)
cleanContinuousNRMSE <- cleanContinuous %>%
select(original_value, imputed_value) %>%
na.omit() %>%
mutate(sq_diff=(imputed_value-original_value)^2) %>%
summarise(sum=sum(sq_diff), n=length(sq_diff), min=min(original_value), max=max(original_value), mean=mean(original_value)) %>%
mutate(NRMSE=sqrt(sum/n)/(mean))
#### Boxplots for each trait ####
cleanContinousWide$trait2 = factor(cleanContinousWide$trait2, levels=c('Leaf Area (leaf, +petiole)', 'Leaf Dry Mass', 'Leaf Dry Matter Content', 'Specific Leaf Area (+petiole)', 'Leaf N Content', 'Plant Vegetative Height', 'Specific Root Length (all root)', 'Seed Dry Mass'))
ggplot(data=cleanContinousWide, aes(x=as.factor(data_type2), y=trait_value)) +
geom_jitter(aes(color=data_type2)) +
geom_boxplot(color='black', alpha=0) +
facet_wrap(~trait2, scales='free_y', ncol=3, labeller=label_wrap_gen(width=25)) +
scale_x_discrete(breaks=c("AusTraits", "BIEN", "CPTD2", "TIPleaf", "TRY", "imputed_value"),
limits=c("AusTraits", "BIEN", "CPTD2", "TIPleaf", "TRY", "imputed_value"),
labels=c("Au", "BN", "C2", "TP", "TY", "imp")) +
scale_color_manual(values=c('#4E3686', '#5DA4D9', '#80D87F', 'darkgrey', '#FED23F', '#EE724C')) +
theme_bw() +
theme(panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
legend.position='none',
strip.text.x = element_text(size = 20),
axis.title.x=element_text(size=22, vjust=-0.35, margin=margin(t=15)), axis.text.x=element_text(size=22),
axis.title.y=element_text(size=22, angle=90, vjust=0.5, margin=margin(r=15)), axis.text.y=element_text(size=22)) +
xlab('Data Type') + ylab('Trait Value') +
scale_y_continuous(trans='log10', labels=label_comma())
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\Fig 4_boxplots of original and imputed_20230623_jitter.png', width=14, height=15, units='in', dpi=300, bg='white')
#Look at boxplots for each trait -- means by species
cleanContinuousWideBoxplot <- cleanContinousWide %>%
group_by(DatabaseID, data_type2, species_matched, trait, trait2) %>%
summarise(trait_value_mean=trait_value) %>%
ungroup()
ggplot(data=cleanContinuousWideBoxplot, aes(x=as.factor(data_type2), y=trait_value_mean)) +
geom_jitter(aes(color=data_type2)) +
geom_boxplot(color='black', alpha=0) +
facet_wrap(~trait2, scales='free_y', ncol=3, labeller=label_wrap_gen(width=25)) +
scale_x_discrete(breaks=c("AusTraits", "BIEN", "CPTD2", "TIPleaf", "TRY", "imputed_value"),
limits=c("AusTraits", "BIEN", "CPTD2", "TIPleaf", "TRY", "imputed_value"),
labels=c("Au", "BN", "C2", "TP", "TY", "imp")) +
scale_color_manual(values=c('#4E3686', '#5DA4D9', '#80D87F', 'darkgrey', '#FED23F', '#EE724C')) +
theme_bw() +
theme(panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
legend.position='none',
strip.text.x = element_text(size = 20),
axis.title.x=element_text(size=22, vjust=-0.35, margin=margin(t=15)), axis.text.x=element_text(size=22),
axis.title.y=element_text(size=22, angle=90, vjust=0.5, margin=margin(r=15)), axis.text.y=element_text(size=22)) +
xlab('Data Type') + ylab('Trait Value') +
scale_y_continuous(trans='log10', labels=label_comma())
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\Fig 4_boxplots of original and imputed_20230623_jitter_means.png', width=14, height=15, units='in', dpi=300, bg='white')
# Look at boxplots for each trait
ggplot(data=subset(cleanContinousWide, species_matched %in% c('Helianthus maximiliani', 'Potentilla anserina', 'Clintonia borealis')), aes(x=species_matched, y=trait_value)) +
geom_boxplot(aes(color=data_type)) +
facet_wrap(~trait2, scales='free')
ggplot(data=subset(cleanContinousWide, species_matched=='Andropogon gerardii'), aes(x=data_type, y=trait_value)) +
geom_boxplot() +
facet_wrap(~trait2, scales='free')
cleanContinuous$trait2 = factor(cleanContinuous$trait2, levels=c('Leaf Area (leaf, +petiole)', 'Leaf Dry Mass', 'Leaf Dry Matter Content', 'Specific Leaf Area (+petiole)', 'Leaf N Content', 'Plant Vegetative Height', 'Specific Root Length (all root)', 'Seed Dry Mass'))
#### Correlation statistics for each trait ####
#leaf area
with(subset(cleanContinuous, trait=='leaf_area'), hist(log10(original_value)))
with(subset(cleanContinuous, trait=='leaf_area'), hist(log10(imputed_value)))
with(subset(cleanContinuous, trait=='leaf_area'), cor.test(original_value, imputed_value,method = "pearson", use = "complete.obs"))
# r 0.9710058
summary(leaf_area <- lm(log10(imputed_value)~log10(original_value), data=subset(cleanContinuous, trait=='leaf_area'&!is.na(original_value))))
confint(leaf_area)
# slope: 0.9741485, SE: 0.0009489, CI: 0.00185984
# Adjusted R-squared: 0.9792
# F-statistic: 1.054e+06 on 1 and 22378 DF, p-value: < 2.2e-16
# 2.5 % 97.5 %
# (Intercept) 0.06288035 0.07264136
# log10(original_value) 0.97228869 0.97600834
#leaf dry mass
with(subset(cleanContinuous, trait=='leaf_dry_mass'), hist(log10(original_value)))
with(subset(cleanContinuous, trait=='leaf_dry_mass'), hist(log10(imputed_value)))
with(subset(cleanContinuous, trait=='leaf_dry_mass'), cor.test(original_value, imputed_value,method = "pearson", use = "complete.obs"))
# r 0.9577364
summary(leaf_dry_mass <- lm(log10(imputed_value)~log10(original_value), data=subset(cleanContinuous, trait=='leaf_dry_mass'&!is.na(original_value))))
confint(leaf_dry_mass)
# slope: 0.9700605 , SE: 0.0009431, CI: 0.001848476
# Adjusted R-squared: 0.9709
# F-statistic: 1.058e+06 on 1 and 31750 DF, p-value: < 2.2e-16
# 2.5 % 97.5 %
# (Intercept) 0.02538688 0.03084578
# log10(original_value) 0.96821193 0.97190914
#LDMC
with(subset(cleanContinuous, trait=='LDMC'), hist(log10(original_value)))
with(subset(cleanContinuous, trait=='LDMC'), hist(log10(imputed_value)))
with(subset(cleanContinuous, trait=='LDMC'), cor.test(original_value, imputed_value,method = "pearson", use = "complete.obs"))
# r 0.9787819
summary(LDMC <- lm(log10(imputed_value)~log10(original_value), data=subset(cleanContinuous, trait=='LDMC'&!is.na(original_value))))
confint(LDMC)
# slope: 0.9541052, SE: 0.0004651, CI: 0.000911596
# Adjusted R-squared: 0.961
# F-statistic: 1.551e+06 on 1 and 63003 DF, p-value: < 2.2e-16
# 2.5 % 97.5 %
# (Intercept) -0.02772662 -0.0259034
# log10(original_value) 0.95260343 0.9556070
#SLA
with(subset(cleanContinuous, trait=='SLA'), hist(log10(original_value)))
with(subset(cleanContinuous, trait=='SLA'), hist(log10(imputed_value)))
with(subset(cleanContinuous, trait=='SLA'), cor.test(original_value, imputed_value,method = "pearson", use = "complete.obs"))
# r 0.955878
summary(SLA <- lm(log10(imputed_value)~log10(original_value), data=subset(cleanContinuous, trait=='SLA'&!is.na(original_value))))
confint(SLA)
# slope: 0.912210 SE: 0.002140 CI: 0.0041944
# Adjusted R-squared: 0.92
# F-statistic: 3.077e+05 on 1 and 26771 DF, p-value: < 2.2e-16
# 2.5 % 97.5 %
# (Intercept) 0.1100690 0.1184596
# log10(original_value) 0.9089866 0.9154328
#leaf N
with(subset(cleanContinuous, trait=='leaf_N'), hist(log10(original_value)))
with(subset(cleanContinuous, trait=='leaf_N'), hist(log10(imputed_value)))
with(subset(cleanContinuous, trait=='leaf_N'), cor.test(original_value, imputed_value,method = "pearson", use = "complete.obs"))
# r 0.9627765
summary(leaf_N <- lm(log10(imputed_value)~log10(original_value), data=subset(cleanContinuous, trait=='leaf_N'&!is.na(original_value))))
confint(leaf_N)
# slope: 0.939524 SE: 0.001490 CI: 0.0029204
# Adjusted R-squared: 0.9377
# F-statistic: 3.976e+05 on 1 and 26392 DF, p-value: < 2.2e-16
# 2.5 % 97.5 %
# (Intercept) 0.07658514 0.08444615
# log10(original_value) 0.93660334 0.94244461
#plant vegetative height
with(subset(cleanContinuous, trait=='plant_height_vegetative'), hist(log10(original_value)))
with(subset(cleanContinuous, trait=='plant_height_vegetative'), hist(log10(imputed_value)))
with(subset(cleanContinuous, trait=='plant_height_vegetative'), cor.test(original_value, imputed_value,method = "pearson", use = "complete.obs"))
# r 0.9471077
summary(plant_height_vegetative <- lm(log10(imputed_value)~log10(original_value), data=subset(cleanContinuous, trait=='plant_height_vegetative'&!is.na(original_value))))
confint(plant_height_vegetative)
# slope: 0.9217863 SE: 0.0009958 CI: 0.001951768
# Adjusted R-squared: 0.9307
# F-statistic: 8.569e+05 on 1 and 63842 DF, p-value: < 2.2e-16
# 2.5 % 97.5 %
# (Intercept) -0.04571394 -0.04256993
# log10(original_value) 0.91983456 0.92373803
#SRL
with(subset(cleanContinuous, trait=='SRL'), hist(log10(original_value)))
with(subset(cleanContinuous, trait=='SRL'), hist(log10(imputed_value)))
with(subset(cleanContinuous, trait=='SRL'), cor.test(original_value, imputed_value,method = "pearson", use = "complete.obs"))
# r 0.9002603
summary(SRL <- lm(log10(imputed_value)~log10(original_value), data=subset(cleanContinuous, trait=='SRL'&!is.na(original_value))))
confint(SRL)
# slope: 0.916581 SE: 0.005051 CI: 0.00989996
# Adjusted R-squared: 0.9215
# F-statistic: 3.293e+04 on 1 and 2804 DF, p-value: < 2.2e-16
# 2.5 % 97.5 %
# (Intercept) 0.2794818 0.3558467
# log10(original_value) 0.9066768 0.9264859
#seed dry mass
with(subset(cleanContinuous, trait=='seed_dry_mass'), hist(log10(original_value)))
with(subset(cleanContinuous, trait=='seed_dry_mass'), hist(log10(imputed_value)))
with(subset(cleanContinuous, trait=='seed_dry_mass'), cor.test(original_value, imputed_value,method = "pearson", use = "complete.obs"))
# r 0.9930513
summary(seed_dry_mass <- lm(log10(imputed_value)~log10(original_value), data=subset(cleanContinuous, trait=='seed_dry_mass'&!is.na(original_value))))
confint(seed_dry_mass)
# slope: 0.9888753 SE: 0.0002790 CI: 0.00054684
# Adjusted R-squared: 0.9962
# F-statistic: 1.256e+07 on 1 and 47880 DF, p-value: < 2.2e-16
# 2.5 % 97.5 %
# (Intercept) 0.001306287 0.002106772
# log10(original_value) 0.988328318 0.989422193
# Compare cleaned imputed and original data
ggplot(data=na.omit(cleanContinuous), aes(x=original_value, y=imputed_value)) +
geom_abline(slope=1, linewidth=2, color='black') +
geom_point(color='darkgrey') +
geom_smooth(linewidth=2, se=T, color='#e47d0099', method='lm') +
facet_wrap(~trait2, scales='free', ncol=3, labeller=label_wrap_gen(width=25)) +
xlab('Original Value') + ylab('Imputed Value') +
scale_y_continuous(trans='log10', labels = label_comma()) +
scale_x_continuous(trans='log10', labels = label_comma()) +
theme(strip.text.x = element_text(size = 28),
axis.title.x=element_text(size=32, vjust=-0.35, margin=margin(t=15)), axis.text.x=element_text(size=32),
axis.title.y=element_text(size=32, angle=90, vjust=0.5, margin=margin(r=15)), axis.text.y=element_text(size=32))
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\Fig 5_original v imputed_20230623.png', width=18, height=16, units='in', dpi=300, bg='white')
# look up some values for species that we know and make sure they are right
ggplot(data=subset(cleanContinuous, species_matched %in% c('Ruellia humilis', 'Andropogon gerardii', 'Parthenocissus quinquefolia')),
aes(x=species_matched, y=imputed_value)) +
geom_boxplot() +
facet_wrap(~trait, scales='free') +
scale_x_discrete(breaks=c("Andropogon gerardii", "Parthenocissus quinquefolia", "Ruellia humilis"),
limits=c("Andropogon gerardii", "Parthenocissus quinquefolia", "Ruellia humilis"),
labels=c("Ag", "Pq", "Rh"))
##### Mean values for each species #####
meanCleanContinuous <- cleanContinuous %>%
group_by(family, genus, species_matched, trait) %>%
summarize(trait_value=mean(imputed_value)) %>%
ungroup()
meanSD <- meanCleanContinuous %>%
group_by(trait) %>%
summarize(across('trait_value', .fns=list(mean=mean, sd=sd))) %>%
ungroup()
meanSDFamily <- meanCleanContinuous %>%
group_by(trait, family) %>%
summarize(across('trait_value', .fns=list(family_mean=mean, family_sd=sd, family_length=length))) %>%
ungroup()
meanSDGenus <- meanCleanContinuous %>%
group_by(trait, genus) %>%
summarize(across('trait_value', .fns=list(genus_mean=mean, genus_sd=sd, genus_length=length))) %>%
ungroup()
meanCleanContinuousErrorRisk <- meanCleanContinuous %>%
left_join(meanSD) %>%
left_join(meanSDFamily) %>%
left_join(meanSDGenus) %>%
mutate(error_risk_overall=(trait_value-trait_value_mean)/trait_value_sd,
error_risk_family=ifelse(trait_value_family_length>2, (trait_value-trait_value_family_mean)/trait_value_family_sd, NA),
error_risk_genus=ifelse(trait_value_genus_length>2, (trait_value-trait_value_genus_mean)/trait_value_genus_sd, NA)) %>%
select(family, genus, species_matched, trait, trait_value, error_risk_overall, error_risk_family, error_risk_genus) %>%
left_join(meanContinuous) %>%
select(-imputed_value_mean, imputed_value_sd, original_value_sd) %>%
mutate(trait2=ifelse(trait=='leaf_area', 'Leaf Area (leaf, +petiole)',
ifelse(trait=='SLA', 'Specific Leaf Area (+petiole)',
ifelse(trait=='SRL', 'Specific Root Length (all root)',
ifelse(trait=='leaf_N', 'Leaf N Content',
ifelse(trait=='plant_height_vegetative', 'Plant Vegetative Height',
ifelse(trait=='seed_dry_mass', 'Seed Dry Mass',
ifelse(trait=='leaf_dry_mass', 'Leaf Dry Mass',
ifelse(trait=='LDMC', 'Leaf Dry Matter Content',
trait)))))))))
ggplot(data=meanCleanContinuousErrorRisk, aes(x=trait_value)) +
geom_histogram() +
facet_wrap(~trait2)
ggplot(data=na.omit(meanCleanContinuousErrorRisk), aes(x=original_value_mean, y=trait_value)) +
geom_point() +
geom_abline(slope=1) +
geom_smooth(se=T, color='darkorange') +
facet_wrap(~trait2, scales='free') +
theme(strip.text.x = element_text(size = 12)) +
xlab('Mean Original Value') + ylab('Mean Imputed Value')
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\Fig 5_mean original v imputed_20230623.png', width=12, height=12, units='in', dpi=300, bg='white')
meanCleanContinuousWide <- meanCleanContinuousErrorRisk %>%
pivot_longer(cols=c('original_value_mean', 'trait_value'))
sppnum <- meanCleanContinuousWide %>%
select(species_matched, family) %>%
unique()
ggplot(data=na.omit(meanCleanContinuousWide), aes(x=name, y=value)) +
geom_boxplot() +
facet_wrap(~trait, scales='free') +
theme(strip.text.x = element_text(size = 12)) +
xlab('') + ylab('Trait Value') +
scale_y_continuous(trans='log10')
# ggsave('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\DataPaper\\2023_sCoRRE_traits\\figures\\Fig 6_mean original v imputed_20230608.png', width=12, height=12, units='in', dpi=300, bg='white')
##### Combine continuous and categorical traits #####
correSpecies <- read.csv("CompiledData\\Species_lists\\FullList_Nov2021.csv") %>% #species names are standardized
left_join(read.csv("CompiledData\\Species_lists\\species_families_trees_2021.csv")) %>%
filter(tree.non.tree != "tree") %>% #Remove trees
separate(species_matched, into=c('genus', 'species', 'subspp'), sep=' ') %>%
filter(species!='sp.') %>%
unite(col='species_matched', genus:species, sep=' ', remove=T) %>%
select(family, species_matched) %>%
unique()
# # Import GEx species names
# GExSpecies <- read.csv('OriginalData\\Traits\\GEx_species_tree_complete.csv') %>%
# select(family, species_matched) %>%
# unique()
#
# sppNames <- rbind(correSpecies, GExSpecies) %>%
# unique() %>%
# mutate(drop=ifelse(species_matched=='Dianella longifolia'&family=='Xanthorrhoeaceae', 1,
# ifelse(species_matched=='Lancea tibetica'&family=='Phrymaceae', 1, 0))) %>%
# filter(drop==0) %>%
# select(-drop)
longCategorical <- categoricalTraits %>%
pivot_longer(leaf_type:n_fixation_type, names_to="trait", values_to="trait_value") %>%
mutate(error_risk_overall=NA,
error_risk_family=NA,
error_risk_genus=NA) %>%
left_join(correSpecies)
#### START HERE: need GEx categorical to bind onto continuous data ####
traitsAll <- meanCleanContinuousErrorRisk %>%
select(family, species_matched, trait, trait_value, error_risk_overall, error_risk_family, error_risk_genus) %>%
rbind(longCategorical)
# write.csv(traitsAll, 'CleanedData\\Traits\\CoRRE_allTraitData_June2023.csv', row.names=F)
traitsWide <- traitsAll %>%
select(-error_risk_overall, -error_risk_family, -error_risk_genus) %>%
pivot_wider(names_from=trait, values_from=trait_value)
# write.csv(traitsWide, 'CleanedData\\Traits\\CoRRE_allTraitData_wide_June2023.csv', row.names=F)
# #### testing if imputation runs are different ####
# imputedRaw0620 <- read.csv("CleanedData\\Traits\\gap filled continuous traits\\20230620\\imputed_traits_mice.csv") %>%
# mutate(run='run_2') %>%
# rowid_to_column(var="rowid")
#
# imputedRaw0608 <- read.csv("CleanedData\\Traits\\gap filled continuous traits\\20230608\\imputed_traits_mice.csv") %>%
# mutate(run='run_1') %>%
# rowid_to_column(var="rowid")
#
# compareImputed <- rbind(imputedRaw0608, imputedRaw0620) %>%
# pivot_longer(cols=seed_dry_mass:SRL, names_to='trait', values_to='values') %>%
# pivot_wider(names_from=run, values_from=values) %>%
# left_join(originalRaw)
#
# ggplot(data=subset(compareImputed, trait=='SLA'), aes(x=run_1, y=run_2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value')
#
# ggplot(data=subset(compareImputed, trait=='seed_dry_mass'), aes(x=run_1, y=run_2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value') +
# scale_x_continuous(trans='log10') +
# scale_y_continuous(trans='log10')
#
# ggplot(data=subset(compareImputed, trait=='plant_height_vegetative'), aes(x=run_1, y=run_2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value') +
# scale_x_continuous(trans='log10') +
# scale_y_continuous(trans='log10')
#
# ggplot(data=subset(compareImputed, trait=='SRL'), aes(x=run_1, y=run_2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value')
#
# ggplot(data=subset(compareImputed, trait=='LDMC'), aes(x=run_1, y=run_2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value')
#
# ggplot(data=subset(compareImputed, trait=='leaf_area'), aes(x=run_1, y=run_2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value') +
# scale_x_continuous(trans='log10') +
# scale_y_continuous(trans='log10')
#
# ggplot(data=subset(compareImputed, trait=='leaf_N'), aes(x=run_1, y=run_2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value')
#
# ggplot(data=subset(compareImputed, trait=='leaf_dry_mass'), aes(x=run_1, y=run_2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value')
#
#
# #only those with original data
# imputedRawRun1 <- read.csv("CleanedData\\Traits\\gap filled continuous traits\\20230608\\imputed_traits_mice.csv") %>%
# bind_cols(read.csv('OriginalData\\Traits\\raw traits for gap filling\\TRYAusBIEN_continuous_June2023.csv')[,c('DatabaseID', 'DatasetID', 'ObservationID', 'family', 'genus', 'species_matched')]) %>%
# left_join(mossKey) %>%
# filter(moss!="moss") %>%
# dplyr::select(-moss) #removes 6 species observations
#
# imputedLongRun1 <- imputedRawRun1 %>%
# pivot_longer(names_to='trait', values_to='imputed_value', seed_dry_mass:SRL) %>%
# rename(imputed_value_run1=imputed_value)
#
# imputedRawRun2 <- read.csv("CleanedData\\Traits\\gap filled continuous traits\\20230620\\imputed_traits_mice.csv") %>%
# bind_cols(read.csv('OriginalData\\Traits\\raw traits for gap filling\\TRYAusBIEN_continuous_June2023.csv')[,c('DatabaseID', 'DatasetID', 'ObservationID', 'family', 'genus', 'species_matched')]) %>%
# left_join(mossKey) %>%
# filter(moss!="moss") %>%
# dplyr::select(-moss) #removes 6 species observations
#
# imputedLongRun2 <- imputedRawRun2 %>%
# pivot_longer(names_to='trait', values_to='imputed_value', seed_dry_mass:SRL) %>%
# rename(imputed_value_run2=imputed_value)
#
# # Read original trait data and join with imputed data
# originalRaw <- read.csv('OriginalData\\Traits\\raw traits for gap filling\\TRYAusBIEN_continuous_June2023.csv') %>%
# pivot_longer(names_to='trait', values_to='original_value', seed_dry_mass:SRL) %>%
# na.omit()
#
#
# # Join original trait data with imputed data. Only keep traits of interest.
# allContinuousComparison <- imputedLongRun1 %>%
# left_join(imputedLongRun2) %>%
# left_join(originalRaw) %>%
# filter(trait %in% c('LDMC', 'leaf_area', 'leaf_dry_mass', 'leaf_N', 'plant_height_vegetative', 'seed_dry_mass', 'SLA', 'SRL'))
#
# originalComparison <- allContinuousComparison %>%
# na.omit()
#
# ggplot(data=subset(originalComparison, trait=='SLA'), aes(x=imputed_value_run1, y=imputed_value_run2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value')
#
# ggplot(data=subset(originalComparison, trait=='seed_dry_mass'), aes(x=imputed_value_run1, y=imputed_value_run2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value') +
# scale_x_continuous(trans='log10') +
# scale_y_continuous(trans='log10')
#
# ggplot(data=subset(originalComparison, trait=='plant_height_vegetative'), aes(x=imputed_value_run1, y=imputed_value_run2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value') +
# scale_x_continuous(trans='log10') +
# scale_y_continuous(trans='log10')
#
# ggplot(data=subset(originalComparison, trait=='SRL'), aes(x=imputed_value_run1, y=imputed_value_run2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value')
#
# ggplot(data=subset(originalComparison, trait=='LDMC'), aes(x=imputed_value_run1, y=imputed_value_run2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value')
#
# ggplot(data=subset(originalComparison, trait=='leaf_area'), aes(x=imputed_value_run1, y=imputed_value_run2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value') +
# scale_x_continuous(trans='log10') +
# scale_y_continuous(trans='log10')
#
# ggplot(data=subset(originalComparison, trait=='leaf_N'), aes(x=imputed_value_run1, y=imputed_value_run2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value')
#
# ggplot(data=subset(originalComparison, trait=='leaf_dry_mass'), aes(x=imputed_value_run1, y=imputed_value_run2)) +
# geom_point() +
# geom_abline(slope=1) +
# xlab('Run 1 Value') + ylab('Run 2 Value')
|
1caf803a09bfbf1d9a9f2aff3673b2283a591950
|
5aa0650617ba6ac21d8c1a73daa141f6f63ba0e6
|
/02_Functions/B5_calc_curvature_and_torsion.R
|
00a28acb9a2a45261c2d1fffcb7b80b5039117f1
|
[
"Unlicense"
] |
permissive
|
k-kobay/GDS
|
e661e9329b9061ac82131f79818796cb132d46c3
|
d019fdc0527eb526b42da8f130ece8a62a4eeb3d
|
refs/heads/master
| 2021-09-09T12:29:57.836532
| 2018-03-16T04:43:13
| 2018-03-16T04:43:13
| 123,993,988
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,457
|
r
|
B5_calc_curvature_and_torsion.R
|
mycalc_3dCT <- function(x){
# calc diff
d1_mat <- t(apply(cbind(x, x[c(2:nrow(x),1),]), 1, function(xx){
xx[4:6] - xx[1:3]
}))
d1_mat2 <- t(apply(cbind(d1_mat, d1_mat[c(2:nrow(d1_mat),1),]), 1, function(xx){
(xx[1:3] + xx[4:6])/2
}))
d2_mat <- t(apply(cbind(d1_mat, d1_mat[c(2:nrow(d1_mat),1),]), 1, function(xx){
xx[4:6] - xx[1:3]
}))
d3_mat <- t(apply(cbind(d2_mat, d2_mat[c(2:nrow(d2_mat),1),]), 1, function(xx){
xx[4:6] - xx[1:3]
}))
d3_mat2 <- t(apply(cbind(d3_mat, d3_mat[c(2:nrow(d3_mat),1),]), 1, function(xx){
(xx[1:3] + xx[4:6])/2
}))
# cbind each axis
XX <- cbind(x[,1], d1_mat2[,1], d2_mat[,1], d3_mat2[,1])
YY <- cbind(x[,2], d1_mat2[,2], d2_mat[,2], d3_mat2[,2])
ZZ <- cbind(x[,3], d1_mat2[,3], d2_mat[,3], d3_mat2[,3])
# calculating each term
term1 <- (ZZ[,3]*YY[,2] - YY[,3]*ZZ[,2])
term2 <- (XX[,3]*ZZ[,2] - ZZ[,3]*XX[,2])
term3 <- (YY[,3]*XX[,2] - XX[,3]*YY[,2])
term4 <- (XX[,2]^2 + YY[,2]^2 + ZZ[,2]^2)
# curvature
curvature <- sqrt( term1^2 + term2^2 + term3^2 ) / (term4^{(3/2)})
# torsion
torsion <- {(XX[,4]*term1) + (YY[,4]*term2) + (ZZ[,4]*term3)}/{ term1^2 + term2^2 + term3^2 }
# length
tmp1 <- cbind(x, x[c(2:nrow(x),1),])
rownames(tmp1) <- c()
c_length <- sum(apply(tmp1, 1, function(x){ dist(rbind(x[1:3], x[4:6])) }))
# return
return(list(length=c_length ,curvature = curvature, torsion = torsion))
}
|
134ed22489f69b1853326a509dd4cb93218e7ec5
|
2da2406aff1f6318cba7453db555c7ed4d2ea0d3
|
/man/Inflation.Rd
|
47a5b6fa4ab0c9a35162fa35318299b61f5838dd
|
[] |
no_license
|
rpruim/fastR2
|
4efe9742f56fe7fcee0ede1c1ec1203abb312f34
|
d0fe0464ea6a6258b2414e4fcd59166eaf3103f8
|
refs/heads/main
| 2022-05-05T23:24:55.024994
| 2022-03-15T23:06:08
| 2022-03-15T23:06:08
| 3,821,177
| 11
| 8
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,114
|
rd
|
Inflation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{Inflation}
\alias{Inflation}
\title{Inflation data}
\format{
A data frame with 23 observations on the following 5 variables.
\itemize{ \item{country}{ country where data were collected}
\item{ques}{ questionnaire index of independence}
\item{inf}{ annual inflation rate, 1980-1989 (percent)}
\item{legal}{ legal index of independence}
\item{dev}{ developed (1) or developing (2) nation} }
}
\source{
These data are available from OzDASL, the Australasian Data and
Story Library (\url{https://dasl.datadescription.com/}).
}
\description{
The article developed four measures of central bank independence and
explored their relation to inflation outcomes in developed and developing
countries. This datafile deals with two of these measures in 23 nations.
}
\examples{
data(Inflation)
}
\references{
A. Cukierman, S.B. Webb, and B. Negapi, "Measuring the
Independence of Central Banks and Its Effect on Policy Outcomes," World Bank
Economic Review, Vol. 6 No. 3 (Sept 1992), 353-398.
}
\keyword{datasets}
|
c37d6187a448d6f34140a440c7f0eec95602d195
|
a5ced02be5ef57cfc093b9a77fbb71cdb18d9d76
|
/tests/testthat/test-SDA_hydric.R
|
56eced0beabc5c1dd948780256f44fb88d6670ec
|
[] |
no_license
|
Emory-ENVS-SihiLab/soilDB
|
e882de8337a3f3bd9943046c781f42a473723669
|
fca026cc1039f3f8936b70d0efe8c092950db4ee
|
refs/heads/master
| 2023-08-13T16:25:03.372504
| 2021-09-18T00:48:57
| 2021-09-18T00:48:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 256
|
r
|
test-SDA_hydric.R
|
test_that("get_SDA_hydric works", {
skip_if_offline()
skip_on_cran()
# by areasymbol
expect_equal(nrow(get_SDA_hydric(areasymbols = c("CA077", "CA630"))), 313)
# by mukey
expect_equal(nrow(get_SDA_hydric(mukeys = c(461994, 461995))), 2)
})
|
95a3ccd48288ab612c9e36315631c33a1fe86233
|
b05ff0cb36e1be4f7808b956a0743acc9e0a5d93
|
/R/bioclimatic_variables_daily_test.R
|
2e18c3adf78f50e239433624a7463992737817c1
|
[
"CC0-1.0"
] |
permissive
|
dongmeic/climate-space
|
b649a7a8e6b8d76048418c6d37f0b1dd50512be7
|
7e800974e92533d3818967b6281bc7f0e10c3264
|
refs/heads/master
| 2021-01-20T02:13:12.143683
| 2020-04-03T16:47:56
| 2020-04-03T16:47:56
| 89,385,878
| 0
| 0
| null | 2020-04-03T16:47:57
| 2017-04-25T17:01:45
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,476
|
r
|
bioclimatic_variables_daily_test.R
|
#!/usr/bin/env Rscript
# Created by Dongmei Chen
# Writing netCDFfiles for daily bioclimatic variables (minimum temperatures)
library(ncdf4)
args <- commandArgs(trailingOnly=T)
print(paste('args:', args))
print("Starting...")
source("/gpfs/projects/gavingrp/dongmeic/climate-space/R/damian/getDailyStats.R")
inpath <- "/gpfs/projects/gavingrp/dongmeic/beetle/output/tables/"
setwd(inpath)
start_year <- 1901; end_year <- 2016; years <- start_year:end_year; nt <- length(years)
print("calculating the biocliamtic variables using daily data")
ptm <- proc.time()
dim1 <- 277910; dim2 <- nt
i <- as.numeric(args[1])
print(paste('i:', i))
indata1 <- read.csv(paste0(inpath, "na10km_v2_climatic_values_",years[i],".csv"))
indata2 <- read.csv(paste0(inpath, "na10km_v2_climatic_values_",years[i+1],".csv"))
indata <- rbind(indata1, indata2)
df <- data.frame(Ncs=integer(), Acs=integer(), min20=integer(), min22=integer(), min24=integer(), min26=integer(), min28=integer())
#df <- data.frame(min30=integer(), min32=integer(), min34=integer(), min36=integer(), min38=integer(), min40=integer())
for(j in 1:dim1){
df.j <- indata[j,]
for(m in 1:23){
df.m <- rbind(df.j, indata[j+dim1*m,])
df.j <- df.m
#print(m)
}
df[j,] <- get.daily.stats(years[i], df.m$tmx, df.m$tmp, df.m$tmn)
print(j)
}
write.csv(df, paste0("min/bioclimatic_variables_daily_min_",years[i+1],"_2.csv"), row.names = FALSE)
print(paste("got data from", years[i+1]))
proc.time() - ptm
print("all done!")
|
08dba389e2e87ed572d2274fa8c30d92a599663d
|
baeff4d94f807133eaf631f794e94c19f3f7150d
|
/sampleSolutions/.file.dome.R
|
fc8b4c4b78c000cd5995c970a61c6d5fa5ba276c
|
[] |
no_license
|
sgassefa/R-EDA
|
06792b0875cd8f732682c3a53db45acaedd1818b
|
e489a4fa3e507bb80d3209bdd0f80fa7c98f9cf7
|
refs/heads/master
| 2020-07-26T16:37:45.382943
| 2019-05-16T12:28:01
| 2019-05-16T12:28:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,185
|
r
|
.file.dome.R
|
## Sample solution >>> (file.dome)
## Read the csv file
tmp <- read.csv("./data/table_S3.csv",
header = FALSE,
as.is = TRUE)
## The object "tmp" should appear in the Data section of the Environment tab
## in the top-right pane. It has a spreadsheet symbol next to it. Click that -
## or type View(rawDat), and study the object. You should find:
## - all columns are named V<something>
## - rows 1 to 6 do not contain data
## - there is not a single row that could be used for column names
## - str(tmp) shows: all columns are characters
## This all needs to be fixed:
head(tmp, 10) # Use head() to inspect
tmp <- tmp[-(1:6), ] # Remove unneeded header rows
colnames(tmp) <- c("genes", # gene names
"B.ctrl", # Cell types are taken from
"B.LPS", # Figure 4 of Jaitin et al.
"MF.ctrl", # .ctrl and .LPS refer to control
"MF.LPS", # and LPS challenge
"NK.ctrl", # The cell types are:
"NK.LPS", # B: B-cell
"Mo.ctrl", # MF: Macrophage
"Mo.LPS", # NK: Natural killer cell
"pDC.ctrl", # Mo: Monocyte
"pDC.LPS", # pDC: plasmacytoid dendritic cell
"DC1.ctrl", # DC1: dendritic cell subtype 1
"DC1.LPS", # DC2: dendritic cell subtype 2
"DC2.ctrl", #
"DC2.LPS", #
"cluster") # Gene assigned to cluster by authors
head(tmp)
rownames(tmp) <- 1:nrow(tmp) # fix rownames
str(tmp) # next: fix the column types
for (i in 2:ncol(tmp)) { # convert character columns to numeric
tmp[,i] <- as.numeric(tmp[ ,i])
if (any(is.na(tmp[,i]))) {
message(sprintf("Caution: NA in column %d", i)) # always validate!
}
}
str(tmp)
## if everything is good ...
LPSdat <- tmp # assign to meaningful name
rm(tmp) # always a good idea to clean up
## <<< Sample solution
# [END]
|
778994f2da939471ef1d55dc5f6bf9690dfbf40c
|
6a9593d8b75c4e0207c804961b2d4dd961252871
|
/R/match_times.R
|
cf3271200337d968fb9efc44fc98ad7cb4ef7be9
|
[] |
no_license
|
noamross/sodi
|
554990dbdf2e7dd2e25b61e3fecad3707d5f489f
|
7cb855d54036f59dc733ac70c39daa8700b33204
|
refs/heads/master
| 2021-01-16T19:34:23.038160
| 2013-10-23T23:38:36
| 2013-10-23T23:38:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 250
|
r
|
match_times.R
|
match_times = function(sodi, times) {
sodi_times = unique(sodi$Time)
matched_times = sodi_times[approx(x = sodi_times, y = 1:length(sodi_times),
xout = times, method = "constant", rule = 2)$y]
return(matched_times)
}
|
9f139ce1c14a12726a951ffc7c7d42b66be8f011
|
4b4ab3305a88d748a76f69c36510a486b8a342d5
|
/My Documents/Coursera/Exploratory Data Analysis/ExData_Plotting1/Plot1.R
|
3216d949296255016a4862ac92a0b89a462895a9
|
[] |
no_license
|
disiting/Getting-and-Cleaning-Data-Project-2
|
bde52e752ae1d519b023ab702f5de61149f85b91
|
60946e3ba97f28f5168d40debac5492fb5f61a5e
|
refs/heads/master
| 2020-06-04T22:14:52.165674
| 2014-10-26T19:04:28
| 2014-10-26T19:04:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 680
|
r
|
Plot1.R
|
## Downloading data
data1 <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
comment.char="")
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
## Subsetting for Feb 1 and Feb 2, 2007
ourdata <- subset(data1, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Converting dates
datetime <- paste(as.Date(ourdata$Date), ourdata$Time)
ourdata$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(ourdata$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to png file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
0c45d604037d07285444c9490040d302edec177b
|
bce8156a9e5b39f17f5c4f6fcd4c9fbff4d74897
|
/R/risk-budgeting.R
|
1b65d709daee0b1134c905777459864f9cfa9004
|
[] |
no_license
|
cran/fPortfolio
|
fb8f26496a32fd8712361a20cbb325c0bfcffe01
|
d0189fabdf712c043fb13feb80f47696ac645cef
|
refs/heads/master
| 2023-04-29T14:30:55.700486
| 2023-04-25T06:50:06
| 2023-04-25T06:50:06
| 17,695,954
| 10
| 10
| null | 2015-04-23T18:15:24
| 2014-03-13T04:38:33
|
R
|
UTF-8
|
R
| false
| false
| 22,202
|
r
|
risk-budgeting.R
|
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General
# Public License along with this library; if not, write to the
# Free Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
###############################################################################
# FUNCTION: DESCRIPTION:
# pfolioReturn Returns portfolio returns
# FUNCTION: DESCRIPTION:
# sampleCOV Returns sample covariance risk
# normalVaR Returns normal Value at Risk
# modifiedVaR Returns modified Cornish Fisher VaR
# sampleVaR Returns sammple VaR from historical quantiles
# FUNCTION: DESCRIPTION:
# budgetsSampleCOV Covariance risk contribution and budgets
# budgetsNormalVAR Normal VaR risk contribution and budgets
# budgetsModifiedVAR Modified VaR risk contribution and budgets
# budgetsNormalES Normal ES (CVaR) risk contribution and budgets
# budgetsModifiedES Modified ES (CVaR) risk contribution and budgets
# UTILITIES: DESCRIPTION:
# .M34.MM Internal fast computing of M3 and M4
# .run Returns execution time information
# .Ipower Internal utility function to compute M3
# .derIpower Internal utility function to compute M4
# .myVaR ... to do
# DEPRECATED: DESCRIPTION:
# .covarRisk Computes covariance portfolio risk
# .mcr Computes marginal contribution to covariance risk
# .mcrBeta Computes beta, the rescaled mcr to covariance risk
# .riskContributions Computes covariance risk contributions
# .riskBudgets Computes covariance risk budgets
###############################################################################
sampleCOV <-
function(x)
{
# A function implemented by Diethelm Wuertz
# Description:
# Returns sample covariance risk
# Arguments:
# x - a 'timeSeries' object
# FUNCTION:
# Return Value:
cov(x)
}
# -----------------------------------------------------------------------------
normalVaR <-
function(x, alpha=0.05)
{
# A function implemented by Diethelm Wuertz
# Description:
# Returns normal Value at Risk
# Arguments:
# x - a 'timeSeries' object
# FUNCTION:
# Mean and Centered 2nd Moment:
x.mean <- colMeans(x)
x.centered <- t(t(x) - x.mean)
m2 <- colMeans(x.centered^2)
# Gaussian:
q <- qnorm(alpha)
# Return Value:
x.mean + q * sqrt(m2)
}
# -----------------------------------------------------------------------------
modifiedVaR <-
function(x, alpha=0.05)
{
# A function implemented by Diethelm Wuertz
# Description:
# Returns modified Cornish Fisher VaR
# Arguments:
# x - a 'timeSeries' object
# Details:
# Includes Code Borrowed from Peterson and Boudt, GPL
# FUNCTION:
# Mean and Centered Moments:
x.mean <- colMeans(x)
x.centered <- t(t(x) - x.mean)
m2 <- colMeans(x.centered^2)
m3 <- colMeans(x.centered^3)
m4 <- colMeans(x.centered^4)
skew <- m3 / sqrt(m2^3)
kurt <- m4 / (m2*m2) - 3
# Cornish Fisher:
z <- qnorm(alpha)
q <- z + (z*z-1)*skew/6 + z*(z*z-3)*kurt/24 - z*(2*z*z-5)*skew*skew/36
# Return Value:
x.mean + q * sqrt(m2)
}
# -----------------------------------------------------------------------------
sampleVaR <-
function(x, alpha=0.05)
{
# A function implemented by Diethelm Wuertz
# Description:
# Returns sammple VaR from historical quantiles
# Arguments:
# x - a 'timeSeries' object
# FUNCTION:
# Return Value:
colQuantiles(x, alpha)
}
# -----------------------------------------------------------------------------
budgetsSampleCOV <-
function(x, weights, mu=NULL, Sigma=NULL)
{
# A function implemented by Diethelm Wuertz
# Description:
# Arguments:
# x - a 'timeSeries' object
# Details:
# Includes Code Borrowed from Peterson and Boudt, GPL
# Rmetrics Re-Implementation
# FUNCTION:
# Risk:
if(is.null(mu)) mu <- colMeans(x)
if(is.null(Sigma)) Sigma <- cov(x)
risk <- sqrt( t(weights) %*% Sigma %*% weights )[[1]]
attr(risk, "estimator") <- substitute(FUN)
# Risk Contributions:
m2.pfolio <- (t(weights) %*% Sigma %*% weights)[[1]]
dm2.pfolio <- as.vector(Sigma %*% weights)
contribution <- dm2.pfolio/sqrt(m2.pfolio) * weights
names(contribution) <- colnames(x)
# Risk Budgets:
budgets <- contribution/risk
names(budgets) <- colnames(x)
attr(budgets, "control") <- sum(budgets)
# Return Value:
list(riskCOV=risk, contribution=contribution, budgets=budgets)
}
# -----------------------------------------------------------------------------
budgetsNormalVAR <-
function(x, weights, alpha=0.05, mu=NULL, Sigma=NULL)
{
# A function implemented by Diethelm Wuertz
# Description:
# Arguments:
# x - a 'timeSeries' object
# Details:
# Includes Code Borrowed from Peterson and Boudt, GPL
# FUNCTION:
# Risk:
if(is.null(mu)) mu <- colMeans(x)
if(is.null(Sigma)) Sigma <- cov(x)
risk <- -(t(weights) %*% mu + qnorm(alpha) *
sqrt( t(weights) %*% Sigma %*% weights))[[1]]
attr(risk, "estimator") <- substitute(FUN)
attr(risk, "alpha") <- alpha
# Risk Contributions:
m2.pfolio <- (t(weights) %*% Sigma %*% weights)
dm2.pfolio <- as.vector(Sigma %*% weights)
contribution <- - (mu + qnorm(alpha)* dm2.pfolio/sqrt(m2.pfolio)) *
weights
names(contribution) <- colnames(x)
# Risk Budgets:
budgets <- contribution/risk
names(budgets) <- colnames(x)
attr(budgets, "sumBudgets") <- sum(budgets)
# Return Value:
list(riskVAR=risk, contributionVAR=contribution, budgetsVAR=budgets)
}
# -----------------------------------------------------------------------------
budgetsModifiedVAR <-
function(x, weights, alpha=0.05, mu=NULL, Sigma=NULL, M3=NULL, M4=NULL)
{
# A function implemented by Diethelm Wuertz
# Description:
# Arguments:
# x - a 'timeSeries' object
# Details:
# Includes code borrowed from Peterson and Boudt, GPL
# FUNCTION:
# Compute Moments:
if(is.null(mu)) mu <- colMeans(x)
if(is.null(Sigma)) Sigma <- cov(x)
if(is.null(M3) || is.null(M4)) {
MM <- .M34.MM(x, mu=mu)
M3 <- MM$M3
M4 <- MM$M4
}
# Risk:
z <- qnorm(alpha)
location <- t(weights) %*% mu
pm2 <- t(weights) %*% Sigma %*% weights
dpm2 <- as.vector(2 * Sigma %*% weights)
pm3 <- weights %*% M3 %*% (weights %x% weights)
dpm3 <- as.vector(3 * M3 %*% (weights %x% weights))
pm4 <- t(weights) %*% M4 %*% (weights %x% weights %x% weights)
dpm4 <- as.vector(4 * M4 %*% (weights %x% weights %x% weights))
skew <- (pm3/pm2^(3/2))[[1]]
exkurt <- (pm4/pm2^(2) - 3)[[1]]
derskew <- (2 * (pm2^(3/2)) * dpm3 - 3 * pm3 * sqrt(pm2) * dpm2) /
(2 * pm2^3)
derexkurt <- ((pm2) * dpm4 - 2 * pm4 * dpm2)/(pm2^3)
h <- z + (1/6) * (z^2 - 1) * skew
h <- h + (1/24) * (z^3 - 3 * z) * exkurt - (1/36) * (2 * z^3 - 5 * z) *
skew^2
risk <- -(location + h * sqrt(pm2))
# Risk Contribution:
derGausVaR <- -as.vector(mu) - qnorm(alpha) *
(0.5 * as.vector(dpm2))/sqrt(pm2)
derMVaR <- derGausVaR + (0.5 * dpm2/sqrt(pm2)) * (-(1/6) *
(z^2 - 1) * skew - (1/24) * (z^3 - 3 * z) * exkurt +
(1/36) * (2 * z^3 - 5 * z) * skew^2)
derMVaR <- derMVaR + sqrt(pm2) * (-(1/6) * (z^2 - 1) * derskew -
(1/24) * (z^3 - 3 * z) * derexkurt + (1/36) * (2 * z^3 -
5 * z) * 2 * skew * derskew)
contribution <- as.vector(weights) * as.vector(derMVaR)
names(contribution) <- colnames(x)
# Risk Budgets:
budgets <- contribution/risk
names(budgets) <- colnames(x)
budgets
attr(budgets, "sum(contribution)-risk") <- sum(contribution)-risk
attr(budgets, "sum(budgets)") <- sum(budgets)
# Return Value:
list(modifiedVAR=risk, contribution=contribution, budgets=budgets)
}
# -----------------------------------------------------------------------------
budgetsNormalES <-
function(x, weights, alpha=0.05, mu=NULL, Sigma=NULL)
{
# A function implemented by Diethelm Wuertz
# Description:
# x - a 'timeSeries' object
# Arguments:
# Details:
# Includes Code Borrowed from Peterson and Boudt, GPL
# FUNCTION:
# Risk:
if(is.null(mu)) mu <- colMeans(x)
if(is.null(Sigma)) Sigma <- cov(x)
location <- t(weights) %*% mu
pm2 <- t(weights) %*% Sigma %*% weights
dpm2 <- as.vector(2 * Sigma %*% weights)
risk <- -location + dnorm(qnorm(alpha)) * sqrt(pm2)/alpha
attr(risk, "estimator") <- substitute(FUN)
attr(risk, "alpha") <- alpha
# Contribution:
derES <- -mu + (1/alpha) * dnorm(qnorm(alpha)) * (0.5 * dpm2)/sqrt(pm2)
contribution <- weights * derES
names(contribution) <- colnames(x)
# Budgets:
budgets <- contribution/risk
names(budgets) <- colnames(x)
attr(budgets, "sumBudgets") <- sum(budgets)
# Return Value:
list(normalES=risk, contribution=contribution, budgets=budgets)
}
# -----------------------------------------------------------------------------
budgetsModifiedES <-
function(x, weights, alpha=0.05, mu=NULL, Sigma=NULL, M3=NULL, M4=NULL)
{
# A function implemented by Diethelm Wuertz
# Description:
# Arguments:
# x - a 'timeSeries' object
# Details:
# Includes code borrowed from Peterson and Boudt, GPL
# FUNCTION:
# Settings:
if(is.null(mu)) mu <- colMeans(x)
if(is.null(Sigma)) Sigma <- cov(x)
if(is.null(M3) || is.null(M4)) {
MM <- .M34.MM(x, mu=mu)
M3 <- MM$M3
M4 <- MM$M4
}
# Risk:
z <- qnorm(alpha)
location <- t(weights) %*% mu
pm2 <- (t(weights) %*% Sigma %*% weights)[[1]]
dpm2 <- as.vector(2 * Sigma %*% weights)
pm3 <- (weights %*% M3 %*% (weights %x% weights))[[1]]
dpm3 <- as.vector(3 * M3 %*% (weights %x% weights))
pm4 <- (t(weights) %*% M4 %*% (weights %x% weights %x% weights))[[1]]
dpm4 <- as.vector(4 * M4 %*% (weights %x% weights %x% weights))
skew <- (pm3/pm2^(3/2))[[1]]
exkurt <- (pm4/pm2^(2) - 3)[[1]]
derskew <- (2 * (pm2^(3/2)) * dpm3 -
3 * pm3 * sqrt(pm2) * dpm2)/(2 * pm2^3)
derexkurt <- ((pm2) * dpm4 - 2 * pm4 * dpm2)/(pm2^3)
h <- z +
(1/6) * (z^2 - 1) * skew
h <- h +
(1/24) * (z^3 - 3 * z) * exkurt -
(1/36) * (2 * z^3 - 5 * z) * skew^2
derh <- (1/6) * (z^2 - 1) * derskew +
(1/24) * (z^3 - 3 * z) * derexkurt -
(1/18) * (2 * z^3 - 5 * z) * skew * derskew
E <- dnorm(h)
E <- E + (1/24) * (.Ipower(4, h) - 6 * .Ipower(2, h) +
3 * dnorm(h)) * exkurt
E <- E + (1/6) * (.Ipower(3, h) - 3 * .Ipower(1, h)) * skew
E <- E + (1/72) * (.Ipower(6, h) - 15 * .Ipower(4, h) +
45 * .Ipower(2, h) - 15 * dnorm(h)) * (skew^2)
E <- E/alpha
risk <- MES <- -location + sqrt(pm2) * E
# Risk Contributions:
derMES <- -mu + 0.5 * (dpm2/sqrt(pm2)) * E
derE <- (1/24) * (.Ipower(4, h) - 6 * .Ipower(2, h) +
3 * dnorm(h)) * derexkurt
derE <- derE + (1/6) * (.Ipower(3, h) - 3 * .Ipower(1, h)) * derskew
derE <- derE + (1/36) * (.Ipower(6, h) - 15 * .Ipower(4, h) +
45 * .Ipower(2, h) - 15 * dnorm(h)) * skew * derskew
X <- -h * dnorm(h) + (1/24) * (.derIpower(4, h) - 6 * .derIpower(2, h) -
3 * h * dnorm(h)) * exkurt
X <- X + (1/6) * (.derIpower(3, h) - 3 * .derIpower(1, h)) * skew
X <- X + (1/72) * (.derIpower(6, h) - 15 * .derIpower(4, h) +
45 * .derIpower(2, h) + 15 * h * dnorm(h)) * skew^2
derE <- derE + derh * X
derE <- derE/alpha
derMES <- derMES + sqrt(pm2) * derE
contribution <- as.vector(weights) * as.vector(derMES)
names(contribution) <- colnames(x)
# Risk Budgets:
budgets <- contribution/risk
names(budgets) <- colnames(x)
attr(budgets, "sumBudgets") <- sum(budgets)
# Return Value:
list(modifedES=risk, contribution=contribution, budgets=budgets)
}
###############################################################################
.M34.MM <-
function (x, mu=NULL)
{
# A function implemented by Diethelm Wuertz
# Description:
# Arguments:
# x - a 'timeSeries' object
# Details:
# Includes Code Borrowed from Peterson and Boudt, GPL
# Fast Rmetrics Implementation:
n <- ncol(x)
m <- nrow(x)
if(is.null(mu)) mu <- colMeans(x)
M3 <- matrix(rep(0, n^3), nrow=n, ncol=n^2)
M4 <- matrix(rep(0, n^4), nrow=n, ncol=n^3)
centret <- series(x) - matrix(rep(mu, each=m), ncol=n)
for (i in c(1:m)) {
cent <- centret[i, ]
tcent <- t(cent)
M <- (cent %*% tcent) %x% tcent
M3 <- M3 + M
M4 <- M4 + M %x% tcent
}
# Return Value:
list(M3=M3/m, M4=M4/m, mu=mu)
}
# -----------------------------------------------------------------------------
.run <-
function(FUN, times=10, mult=100, ...)
{
# A function implemented by Diethelm Wuertz
# Description:
# Arguments:
# FUNCTION:
# Timing:
fun <- match.fun(FUN)
now <- Sys.time()
for (i in 1:as.integer(times)) ans <- fun(...)
done <- Sys.time()
time <- mult * as.numeric(done - now)
# Print Timing Results:
cat("Timing:\n")
print(c(sec=round(time,0), times=times, mult=mult, runs=times*mult))
cat("\nResults:\n\n")
# Return Value:
ans
}
# -----------------------------------------------------------------------------
.Ipower <-
function (power, h)
{
# Description:
# Arguments:
# Details:
# A function borrowed from PerformanceAnalytics, GPL
fullprod <- 1
if ((power%%2) == 0) {
pstar <- power/2
for (j in c(1:pstar)) fullprod <- fullprod * (2 * j)
I <- fullprod * dnorm(h)
for (i in c(1:pstar)) {
prod <- 1
for (j in c(1:i)) prod <- prod * (2 * j)
I <- I + (fullprod/prod) * (h^(2 * i)) * dnorm(h)
}
} else {
pstar <- (power - 1)/2
for (j in c(0:pstar)) {
fullprod = fullprod * ((2 * j) + 1)
}
I <- -fullprod * pnorm(h)
for (i in c(0:pstar)) {
prod = 1
for (j in c(0:i)) prod = prod * ((2 * j) + 1)
I <- I + (fullprod/prod) * (h^((2 * i) + 1)) * dnorm(h)
}
}
return(I)
}
# -----------------------------------------------------------------------------
.derIpower <-
function (power, h)
{
# Description:
# Arguments:
# Details:
# A function borrowed from PerformanceAnalytics, GPL
fullprod <- 1
if ((power%%2) == 0) {
pstar <- power/2
for (j in c(1:pstar)) fullprod = fullprod * (2 * j)
I <- -fullprod * h * dnorm(h)
for (i in c(1:pstar)) {
prod = 1
for (j in c(1:i)) prod = prod * (2 * j)
I <- I + (fullprod/prod) * (h^(2 * i - 1)) *
(2 * i - h^2) * dnorm(h)
}
} else {
pstar = (power - 1)/2
for (j in c(0:pstar)) fullprod <- fullprod * ((2 * j) + 1)
I <- -fullprod * dnorm(h)
for (i in c(0:pstar)) {
prod = 1
for (j in c(0:i)) prod <- prod * ((2 * j) + 1)
I <- I + (fullprod/prod) * (h^(2 * i) *
(2 * i + 1 - h^2)) * dnorm(h)
}
}
return(I)
}
# -----------------------------------------------------------------------------
.myVaR <-
function(x, alpha=0.05, method=c("normal", "modified", "sample"))
{
# A function implemented by Diethelm Wuertz
# todo ...
# Funcion Selection:
fun <- match.fun(paste(match.arg(method), "VaR", sep=""))
# Return Value:
fun(x, alpha)
}
###############################################################################
# DEPRECATED - DO NOT REMOVE - REQUIRED BY PACKAGE appRmetricsHandbook
.covarRisk <-
function(data, weights=NULL, FUN="cov", ...)
{
# A function implemented by Diethelm Wuertz
# Description:
# Computes covariance portfolio risk
# Arguments:
# data - a multivariate timeSeries object of financial returns
# weights - numeric vector of portfolio weights
# FUN - a covariance estimator, which returns a matrix of
# covariance estimates, by default the sample covariance
# ... - Optional arguments passed to the function FUN
# Example:
# covarRisk(data)
# FUNCTION:
# Covariance Risk:
covFun <- match.fun(FUN)
COV <- covFun(data)
# Portfolio Weights:
N <- ncol(COV)
if (is.null(weights)) weights = rep(1/N, N)
names(weights) <- colnames(COV)
# Covariance Portfolio Risk:
covarRisk <- sqrt( t(weights) %*% COV %*% weights )[[1, 1]]
# Return Value:
covarRisk
}
# -----------------------------------------------------------------------------
.mcr <-
function(data, weights=NULL, FUN="cov", ...)
{
# A function implemented by Diethelm Wuertz
# Description
# Computes marginal contribution to covariance risk
# Arguments:
# data - a multivariate timeSeries object of financial returns
# weights - numeric vector of portfolio weights
# FUN - a covariance estimator, which returns a matrix of
# covariance estimates, by default the sample covariance
# ... - Optional arguments passed to the function FUN
# Details:
# The formula are implemented according to Goldberg et al.,
# see also R script assetsPfolio.R
# References:
# Lisa Goldberg et al., Extreme Risk Management, 2009
# Scherer and Martin, Introduction to modern portfolio Optimimization
# Example:
# data <- assetsSim(100, 6); mcr(data)
# FUNCTION:
# Covariance Risk:
covFun <- match.fun(FUN)
COV <- covFun(data)
N <- ncol(data)
if (is.null(weights)) weights <- rep(1/N, N)
# Marginal Contribution to Risk
mcr <- (COV %*% weights)[, 1] / .covarRisk(data, weights, FUN, ...)
names(mcr) <- colnames(data)
# Return Value:
mcr
}
# -----------------------------------------------------------------------------
.mcrBeta <-
function(data, weights=NULL, FUN="cov", ...)
{
# A function implemented by Diethelm Wuertz
# Description:
# Computes beta, the rescaled marginal contribution to covariance risk
# Arguments:
# data - a multivariate timeSeries object of financial returns
# weights - numeric vector of portfolio weights
# FUN - a covariance estimator, which returns a matrix of
# covariance estimates, by default the sample covariance
# ... - Optional arguments passed to the function FUN
# Example:
# .mcrBeta(data)
# FUNCTION:
# Portfolio Beta:
beta <- .mcr(data, weights, FUN = FUN, ...) /
.covarRisk(data, weights, FUN = FUN, ...)
# Return Value:
beta
}
# -----------------------------------------------------------------------------
.riskContributions <-
function(data, weights=NULL, FUN="cov", ...)
{
# A function implemented by Diethelm Wuertz
# Description:
# Computes covariance risk contributions
# Arguments:
# data - a multivariate timeSeries object of financial returns
# weights - numeric vector of portfolio weights
# FUN - a covariance estimator, which returns a matrix of
# covariance estimates, by default the sample covariance
# ... - Optional arguments passed to the function FUN
# Example:
# .riskContributions(data)
# FUNCTION:
# Risk Contributions:
if (is.null(weights)) {
N <- ncol(data)
weights <- rep(1/N, times = N)
}
riskContributions <- weights * .mcr(data, weights, FUN, ...)
# Return Value:
riskContributions
}
# -----------------------------------------------------------------------------
.riskBudgets <-
function(data, weights=NULL, FUN="cov", ...)
{
# A function implemented by Diethelm Wuertz
# Description:
# Computes covariance risk budgets
# Arguments:
# data - a multivariate timeSeries object of financial returns
# weights - numeric vector of portfolio weights
# FUN - a covariance estimator, which returns a matrix of
# covariance estimates, by default the sample covariance
# ... - Optional arguments passed to the function FUN
# Example:
# data <- 100*LPP2005.RET[, 1:6]; .riskBudgets(data)
# FUNCTION:
# Risk Budgets:
riskBudgets <- .riskContributions(data, weights, FUN, ...) /
.covarRisk(data, weights, FUN, ...)
# Return Value:
riskBudgets
}
###############################################################################
|
82f2c6ec478d5fb233ebacba6638d72cffd83646
|
992a8fd483f1b800f3ccac44692a3dd3cef1217c
|
/Project_bioinformatics/Bra.popGenePaper/pi and heter plot/heter plot.r
|
a123f6a5b069dcd671b8dd74ec8a86709ea7fa9a
|
[] |
no_license
|
xinshuaiqi/My_Scripts
|
c776444db3c1f083824edd7cc9a3fd732764b869
|
ff9d5e38d1c2a96d116e2026a88639df0f8298d2
|
refs/heads/master
| 2020-03-17T02:44:40.183425
| 2018-10-29T16:07:29
| 2018-10-29T16:07:29
| 133,203,411
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,170
|
r
|
heter plot.r
|
### cal he and ho based on hardy output in vcftool
getwd()
setwd("C:/Users/qxs/Desktop/")
setwd("E:/Dropbox/Xubuntu-Win7_UAproject/Brassica Project/Chenlu_Rotation/Results_by_qxs##")
# replace / with \t in the document
t<-read.table(file="hardy_5424noneWGT.hwe",header = TRUE);
t<-as.data.frame(t)
t<-t[((t$P_HET_DEFICIT<=0.05)|(t$P_HET_EXCESS<=0.05)),]
nrow(t)
hohe.no<-t[,4]/t[,7]
t<-read.table(file="hardy_27919WGT.hwe",header = TRUE);
t<-as.data.frame(t)
t<-t[((t$P_HET_DEFICIT<=0.05)|(t$P_HET_EXCESS<=0.05)),]
nrow(t)
hohe.wgt<-t[,4]/t[,7]
#head(t)
#head(t[,4])
#hohe<-t[,4]/t[,7]
hist((hohe.wgt),breaks=200,col='skyblue',border=F,ylim=c(0,60000))
mean(hohe.wgt)
summary(hohe.wgt)
hist(hohe.no,breaks=200,add=T,col='red',border=F,ylim=c(0,60000))
mean(hohe.no)
summary(hohe.no)
mean<-c(mean(hohe.no,na.rm=T),
mean(hohe.wgt,na.rm=T))
mean
#0.3079451 0.2973541
boxplot(hohe.wgt,hohe.no,
col=c("purple","orange"),
ylab="Ho/He",
main="Box plot of Ho/He",
outline=FALSE,
xlab=c("WGT gene none WGT gene"))
t.test(hohe.wgt,hohe.no)
#t = -54.687, df = 34055, p-value < 2.2e-16
#t = -2.1451, df = 5377.2, p-value = 0.03199 # only sign
set.seed(42)
hist(hohe.no,xlim=c(0,2),ylim=c(0,5000),breaks=200,col='skyblue',border=F)
hist(hohe.wgt,xlim=c(0,2),ylim=c(0,5000),breaks=200,add=T,col=scales::alpha('red',.5),border=F)
# library(ggplot2)
# df<-cbind(hohe.wgt,hohe.no)
# ggplot()+
# geom_boxplot(data=hohe.wgt)+
# geom_boxplot(data=hohe.no)
#
# p <- ggplot(hohe.wgt,))
#
# p + geom_boxplot()
##
# By the central limit theorem, means of samples from a population with finite variance
# approach a normal distribution regardless of the distribution of the population.
# Rules of thumb say that the sample means are basically normally distributed as long as
# the sample size is at least 20 or 30. For a t-test to be valid on a sample of smaller size,
# the population distribution would have to be approximately normal.
#
# The t-test is invalid for small samples from non-normal distributions,
# but it is valid for large samples from non-normal distributions.
|
3e5d5caa379058f0e1b95a981b1bce53a4315ffd
|
f18c5b643a853c8da28d4f21f038d617790f10e3
|
/ICCscript.R
|
3bfccdea3a9da17f6cd34090362d19fbeed97aee
|
[] |
no_license
|
pbrehill/IDMalldimensions
|
6dc35d0c8d085b607956934ef5b55e52efdf9f74
|
f83cfc9ac37bb96fcdceadc5926a0c28a4f03fde
|
refs/heads/master
| 2020-09-14T08:58:09.538638
| 2020-03-03T10:23:49
| 2020-03-03T10:23:49
| 223,083,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,225
|
r
|
ICCscript.R
|
library(ICC)
source("importclean.R")
smallimport <- import.clean("fiji_newscored_cleaned_Oct19.csv")[[2]]
ICC_fiji <- function(df, scores){
fiji_icc_df <- df
ls <- list()
for (i in 1:length(scores)){
results_vec <- c()
fiji_icc_df$HHID_unique <- as.factor(fiji_icc_df$HHID_unique)
scoreno <- scores[i]
ICC_formula <- paste0("ICCest(HHID_unique, ",scoreno,", data = fiji_icc_df, alpha = 0.05)")
results <- try(eval(parse(text=ICC_formula)))
if (class(results) == "try-error"){
next
}
# results_vec[1] <- results[["ICC"]]
# results_vec[2] <- results[["LowerCI"]]
# results_vec[3] <- results[["UpperCI"]]
# names(results_vec) <- c("point_est", "2.5_est", "97.5_est")
results[[length(results) + 1]] <- scoreno
ls[[i]] <- results
}
# Organise results into a dataframe
ICC_df <- data.frame(matrix(nrow=length(ls), ncol=4))
for (i in 1:length(ls)){
if (class(ls[[i]]) == "list") {
ICC_df[i, 1] <- ls[[i]][[8]]
ICC_df[i, 2] <- ls[[i]][["ICC"]]
ICC_df[i, 3] <- ls[[i]][["LowerCI"]]
ICC_df[i, 4] <- ls[[i]][["UpperCI"]]
}
}
colnames(ICC_df) <- c("Dimension", "ICC est", "LowerCI", "UpperCI")
return(ICC_df)
}
|
6569c461e3cfbe205e4f1cedafc7aaec13ff1012
|
d53874ea3d6927e9890f9eb4927a1479e705940b
|
/scripts/graphs_response_letter_flow.R
|
26427937d6a50760f413d3dfa007f78446d834af
|
[] |
no_license
|
CourtneyCampany/WTC3_c13
|
f4e6b635e0da4dd02ba3847a6f2ecdfc353b4282
|
f3c1b2ccb4bb7cc67e86e9b416c315a00a7e361c
|
refs/heads/master
| 2021-01-22T12:58:01.093448
| 2020-11-11T16:00:48
| 2020-11-11T16:00:48
| 82,255,409
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,293
|
r
|
graphs_response_letter_flow.R
|
source("scripts/calculateCin.R")
palette(1:12)
feb <- subset(deltaPaired, DateTime >= as.Date('2014-02-01') & DateTime <= ('2014-02-28'))
plot(Air_in ~ DateTime, data = feb, col = as.factor(feb$chamber))
plot(Air_out ~ DateTime, data = feb)
c03 <- subset(deltaPaired, chamber == 'C03')
c04 <- subset(deltaPaired, chamber == 'C04' & Air_in >= 6)
c01 <- subset(deltaPaired, chamber == 'C01')
c06 <- subset(deltaPaired, chamber == 'C06')
firstDay <- c('2013-10-01', '2013-12-01', '2014-01-01', '2014-02-01', '2014-03-01', '2014-04-01')
lastDay <- c('2013-10-31', '2013-12-31', '2014-01-31', '2014-02-28', '2014-03-31', '2014-04-30')
names <- c('Oct-13', 'Dec-13', 'Jan-14', 'Feb-14', 'Mar-14', 'Apr-14')
windows(12, 8)
par(mfrow=c(2, 3), mar = c(3, 5, 4, 1))
for (i in 1:3){
plot(Air_in ~ DateTime,
data = subset(c01, DateTime >= as.Date(firstDay[i]) & DateTime <= as.Date(lastDay[i])),
ylim = c(6, 7.55), type = 'l', col = 'blue', cex.lab = 1.3,
xlab = '', ylab = expression(italic(f)~(std~L~s^-1)), main = names[i])
lines(Air_out ~ DateTime,
data = subset(c01, DateTime >= as.Date(firstDay[i]) & DateTime <= as.Date(lastDay[i])),
col = 'blue', lty = 2)
lines(Air_in ~ DateTime,
data = subset(c06, DateTime >= as.Date(firstDay[i]) & DateTime <= as.Date(lastDay[i])),
col = 'red')
lines(Air_out ~ DateTime,
data = subset(c06, DateTime >= as.Date(firstDay[i]) & DateTime <= as.Date(lastDay[i])),
col = 'red', lty = 2)
}
legend('bottomright', bty = 'n', legend = c('in', 'out', 'amb', 'warm'), lty = c(1, 2, 1, 1),
col = c('black', 'black', 'blue', 'red'))
windows(12, 8)
par(mfrow=c(2, 3), mar = c(3, 5, 4, 1))
for (i in 1:length(firstDay)){
plot(CO2Injection ~ DateTime,
data = subset(c03, DateTime >= as.Date(firstDay[i]) & DateTime <= as.Date(lastDay[i])),
ylim = c(0, 0.16), type = 'l', col = 'blue', cex.lab = 1.3,
xlab = '', ylab = expression(italic(C)[inj]~(mmol~s^-1)), main = names[i])
lines(CO2Injection ~ DateTime,
data = subset(c04, DateTime >= as.Date(firstDay[i]) & DateTime <= as.Date(lastDay[i])),
col = 'red')
}
legend('topright', bty = 'n', legend = c('in', 'out', 'amb', 'warm'), lty = c(1, 2, 1, 1),
col = c('black', 'black', 'blue', 'red'))
|
ead1d5c02c2039f0aa410a3ec5deef98fdbdb350
|
a0d8b03bd65565e21153681ad86d10000f1cc369
|
/man/tab_itemscale.Rd
|
9a177e6adfd322546c5cb49f4494a78bb3795628
|
[] |
no_license
|
strengejacke/sjPlot
|
8fe406c4b878f178237d5bab5c881d7006ae7dbc
|
c52c7c420270ea57b56d337263153f117b8c516d
|
refs/heads/master
| 2023-08-24T19:36:09.229188
| 2023-08-17T11:11:24
| 2023-08-17T11:11:24
| 27,482,073
| 535
| 90
| null | 2023-03-31T05:10:36
| 2014-12-03T10:37:38
|
R
|
UTF-8
|
R
| false
| true
| 10,085
|
rd
|
tab_itemscale.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tab_itemscale.R
\name{tab_itemscale}
\alias{tab_itemscale}
\alias{sjt.itemanalysis}
\title{Summary of item analysis of an item scale as HTML table}
\usage{
tab_itemscale(
df,
factor.groups = NULL,
factor.groups.titles = "auto",
scale = FALSE,
min.valid.rowmean = 2,
alternate.rows = TRUE,
sort.column = NULL,
show.shapiro = FALSE,
show.kurtosis = FALSE,
show.corr.matrix = TRUE,
CSS = NULL,
encoding = NULL,
file = NULL,
use.viewer = TRUE,
remove.spaces = TRUE
)
sjt.itemanalysis(
df,
factor.groups = NULL,
factor.groups.titles = "auto",
scale = FALSE,
min.valid.rowmean = 2,
alternate.rows = TRUE,
sort.column = NULL,
show.shapiro = FALSE,
show.kurtosis = FALSE,
show.corr.matrix = TRUE,
CSS = NULL,
encoding = NULL,
file = NULL,
use.viewer = TRUE,
remove.spaces = TRUE
)
}
\arguments{
\item{df}{A data frame with items.}
\item{factor.groups}{If not \code{NULL}, \code{df} will be splitted into sub-groups,
where the item analysis is carried out for each of these groups. Must be a vector of same
length as \code{ncol(df)}, where each item in this vector represents the group number of
the related columns of \code{df}. If \code{factor.groups = "auto"}, a principal
component analysis with Varimax rotation is performed, and the resulting
groups for the components are used as group index. See 'Examples'.}
\item{factor.groups.titles}{Titles for each factor group that will be used as table caption for each
component-table. Must be a character vector of same length as \code{length(unique(factor.groups))}.
Default is \code{"auto"}, which means that each table has a standard caption \emph{Component x}.
Use \code{NULL} to suppress table captions.}
\item{scale}{Logical, if \code{TRUE}, the data frame's vectors will be scaled when calculating the
Cronbach's Alpha value (see \code{\link[performance]{item_reliability}}). Recommended, when
the variables have different measures / scales.}
\item{min.valid.rowmean}{Minimum amount of valid values to compute row means for index scores.
Default is 2, i.e. the return values \code{index.scores} and \code{df.index.scores} are
computed for those items that have at least \code{min.valid.rowmean} per case (observation, or
technically, row). See \code{mean_n} for details.}
\item{alternate.rows}{Logical, if \code{TRUE}, rows are printed in
alternatig colors (white and light grey by default).}
\item{sort.column}{Numeric vector, indicating the index of the column
that should sorted. by default, the column is sorted in ascending order.
Use negative index for descending order, for instance,
\code{sort.column = -3} would sort the third column in descending order.
Note that the first column with rownames is not counted.}
\item{show.shapiro}{Logical, if \code{TRUE}, a Shapiro-Wilk normality test is computed for each item.
See \code{\link{shapiro.test}} for details.}
\item{show.kurtosis}{Logical, if \code{TRUE}, the kurtosis for each item will also be shown (see \code{\link[psych]{kurtosi}}
and \code{\link[psych]{describe}} in the \code{psych}-package for more details.}
\item{show.corr.matrix}{Logical, if \code{TRUE} (default), a correlation matrix of each component's
index score is shown. Only applies if \code{factor.groups} is not \code{NULL} and \code{df} has
more than one group. First, for each case (df's row), the sum of all variables (df's columns) is
scaled (using the \code{\link{scale}}-function) and represents a "total score" for
each component (a component is represented by each group of \code{factor.groups}).
After that, each case (df's row) has a scales sum score for each component.
Finally, a correlation of these "scale sum scores" is computed.}
\item{CSS}{A \code{\link{list}} with user-defined style-sheet-definitions,
according to the \href{https://www.w3.org/Style/CSS/}{official CSS syntax}.
See 'Details' or \href{https://strengejacke.github.io/sjPlot/articles/table_css.html}{this package-vignette}.}
\item{encoding}{Character vector, indicating the charset encoding used
for variable and value labels. Default is \code{"UTF-8"}. For Windows
Systems, \code{encoding = "Windows-1252"} might be necessary for proper
display of special characters.}
\item{file}{Destination file, if the output should be saved as file.
If \code{NULL} (default), the output will be saved as temporary file and
opened either in the IDE's viewer pane or the default web browser.}
\item{use.viewer}{Logical, if \code{TRUE}, the HTML table is shown in the IDE's
viewer pane. If \code{FALSE} or no viewer available, the HTML table is
opened in a web browser.}
\item{remove.spaces}{Logical, if \code{TRUE}, leading spaces are removed from all lines in the final string
that contains the html-data. Use this, if you want to remove parantheses for html-tags. The html-source
may look less pretty, but it may help when exporting html-tables to office tools.}
}
\value{
Invisibly returns
\itemize{
\item \code{df.list}: List of data frames with the item analysis for each sub.group (or complete, if \code{factor.groups} was \code{NULL})
\item \code{index.scores}: A data frame with of standardized scale / index scores for each case (mean value of all scale items for each case) for each sub-group.
\item \code{ideal.item.diff}: List of vectors that indicate the ideal item difficulty for each item in each sub-group. Item difficulty only differs when items have different levels.
\item \code{cronbach.values}: List of Cronbach's Alpha values for the overall item scale for each sub-group.
\item \code{knitr.list}: List of html-tables with inline-css for use with knitr for each table (sub-group)
\item \code{knitr}: html-table of all complete output with inline-css for use with knitr
\item \code{complete.page}: Complete html-output.
}
If \code{factor.groups = NULL}, each list contains only one elment, since just one
table is printed for the complete scale indicated by \code{df}. If \code{factor.groups}
is a vector of group-index-values, the lists contain elements for each sub-group.
}
\description{
This function performs an item analysis with certain statistics that are
useful for scale or index development. The resulting tables are shown in the
viewer pane resp. webbrowser or can be saved as file. Following statistics are
computed for each item of a data frame:
\itemize{
\item percentage of missing values
\item mean value
\item standard deviation
\item skew
\item item difficulty
\item item discrimination
\item Cronbach's Alpha if item was removed from scale
\item mean (or average) inter-item-correlation
}
Optional, following statistics can be computed as well:
\itemize{
\item kurstosis
\item Shapiro-Wilk Normality Test
}
If \code{factor.groups} is not \code{NULL}, the data frame \code{df} will be
splitted into groups, assuming that \code{factor.groups} indicate those columns
of the data frame that belong to a certain factor (see return value of function \code{\link{tab_pca}}
as example for retrieving factor groups for a scale and see examples for more details).
}
\note{
\itemize{
\item The \emph{Shapiro-Wilk Normality Test} (see column \code{W(p)}) tests if an item has a distribution that is significantly different from normal.
\item \emph{Item difficulty} should range between 0.2 and 0.8. Ideal value is \code{p+(1-p)/2} (which mostly is between 0.5 and 0.8).
\item For \emph{item discrimination}, acceptable values are 0.20 or higher; the closer to 1.00 the better. See \code{\link[performance]{item_reliability}} for more details.
\item In case the total \emph{Cronbach's Alpha} value is below the acceptable cut-off of 0.7 (mostly if an index has few items), the \emph{mean inter-item-correlation} is an alternative measure to indicate acceptability. Satisfactory range lies between 0.2 and 0.4. See also \code{\link[performance]{item_intercor}}.
}
}
\examples{
# Data from the EUROFAMCARE sample dataset
library(sjmisc)
library(sjlabelled)
data(efc)
# retrieve variable and value labels
varlabs <- get_label(efc)
# recveive first item of COPE-index scale
start <- which(colnames(efc) == "c82cop1")
# recveive last item of COPE-index scale
end <- which(colnames(efc) == "c90cop9")
# create data frame with COPE-index scale
mydf <- data.frame(efc[, start:end])
colnames(mydf) <- varlabs[start:end]
\dontrun{
if (interactive()) {
tab_itemscale(mydf)
# auto-detection of labels
tab_itemscale(efc[, start:end])
# Compute PCA on Cope-Index, and perform a
# item analysis for each extracted factor.
indices <- tab_pca(mydf)$factor.index
tab_itemscale(mydf, factor.groups = indices)
# or, equivalent
tab_itemscale(mydf, factor.groups = "auto")
}}
}
\references{
\itemize{
\item Jorion N, Self B, James K, Schroeder L, DiBello L, Pellegrino J (2013) Classical Test Theory Analysis of the Dynamics Concept Inventory. (\href{https://www.academia.edu/4104752/Classical_Test_Theory_Analysis_of_the_Dynamics_Concept_Inventory}{web})
\item Briggs SR, Cheek JM (1986) The role of factor analysis in the development and evaluation of personality scales. Journal of Personality, 54(1), 106-148. doi: 10.1111/j.1467-6494.1986.tb00391.x
\item McLean S et al. (2013) Stigmatizing attitudes and beliefs about bulimia nervosa: Gender, age, education and income variability in a community sample. International Journal of Eating Disorders. doi: 10.1002/eat.22227
\item Trochim WMK (2008) Types of Reliability. (\href{https://conjointly.com/kb/types-of-reliability/}{web})
}
}
|
b53d8f281af16314fd743724ba72aedecea40d98
|
40237bcda0890aeb6b63e5d609d1241a73fca134
|
/read_write_db_example2.R
|
58c8cd0e087902f6af0b94bb6baace2677db4a94
|
[] |
no_license
|
fowlerthefox/statsports3
|
e5ffc9ea0d486e9f8f4faf328bb9e0a47f4fb7b8
|
1857142fe7b0293dbfe9c4d4ae86e87be3da5059
|
refs/heads/master
| 2022-05-25T04:37:11.690354
| 2020-05-03T17:37:20
| 2020-05-03T17:37:20
| 260,050,143
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,513
|
r
|
read_write_db_example2.R
|
library(tidyverse)
library(usethis)
library(DBI)
library(RPostgreSQL)
library(tidyverse)
library(geosphere)
library(lubridate)
library(sf)
#setting enviroment variables
#set postgres_db = 'trackingdata'
usethis::edit_r_environ()
#access variables at anytime but will be specific to the machine that set enviromental variable
Sys.getenv('postgres_db')
#' function to connect to DB - note we should have stored our database name with using
#' usethis::edit_r_environ()
initConnection <- function(db = Sys.getenv('postgres_db')){
RPostgreSQL::dbConnect(RPostgreSQL::PostgreSQL(),
dbname = db,
host = "localhost",
user = "postgres")
}
#----------------------
# Setup schema
#----------------------
con <- initConnection()
# ensure time is character and we have a primary key
DBI::dbSendQuery(con, 'CREATE TABLE IF NOT EXISTS positions2 (id SERIAL PRIMARY KEY,
player_display_name VARCHAR(64), time VARCHAR(10), seconds NUMERIC,
lat NUMERIC, lon NUMERIC, speed_m_s NUMERIC, heart_rate_bpm NUMERIC, instantaneous_acceleration_impulse NUMERIC,
match_day VARCHAR(4), match_date VARCHAR(10), team VARCHAR, distance NUMERIC);')
# add constraint so we don't accidently add duplicate records
DBI::dbSendQuery(con, "ALTER TABLE positions2 ADD CONSTRAINT nodup2 UNIQUE (player_display_name, seconds, lat, lon, speed_m_s, heart_rate_bpm, instantaneous_acceleration_impulse, match_day, match_date, team);")
# close db connection
DBI::dbDisconnect(con)
# ---------------------
# Data Cleaning
#---------------------
# use seq_along(x) instead of 1:length(x) as 1:length does not account for 0
# loop to read file paths
team_paths <- list.files('Data', full.names = T)
my_paths <- character()
team_paths <- list.files('Data', full.names = T)
for(i in seq_along(team_paths)){
first_sub <- list.files(team_paths[i], full.names = T)
for(j in seq_along(first_sub)){
# only include csv files in case we have other file extensions
second_sub <- list.files(first_sub[j], full.names = T, pattern = '.csv')
my_paths <- c(my_paths, second_sub)
}
}
my_paths
con <- initConnection()
# function to read in our data and write to database
read_with_metadata <- function(path, con=con){
message('reading file ', path)
# specify column types - set time column to character
df <- read_csv(path, col_types= cols('c','c','d','d','d','d','d')) %>%
janitor::clean_names() %>%
arrange(time)
# throw error if we don't have the right number of columns
if(ncol(df) != 7) {
stop(glue::glue('error in {path}\n expected 7 columns, got {ncol(df)}. Please check the file.'))
}
# get metadata from file path
matchDay <- str_extract(path, '(MD|RHB)([-+0-9]{0,})')
matchDate <- str_extract(path, '[0-9]{4}-[0-9]{2}-[0-9]{2}')
team <- str_extract(path, '(?<=Team )([0-9]{1,})')
clean_df <- df %>% mutate(
match_day = matchDay,
match_date = matchDate,
team = team ) %>%
# remove nas in lat/lng
filter(!is.na(lat), !is.na(lon))
# calculate distance between consecutive positions
distance <- clean_df %>%
st_as_sf(coords = c('lon', 'lat'), crs = 4326, remove = FALSE) %>%
as('Spatial') %>%
geosphere::distGeo()
clean_df$distance <- distance
# calculate time since the first timestamp
# differnt files have different formats so read data depending on format
if(str_detect(clean_df$time[1], '^\\d{2}:\\d{2}.\\d{1,}$')){
time_col <- strptime(x = clean_df$time, format = "%M:%OS")
} else{
# else time should be in format HH:MM:SS
time_col <- strptime(x = clean_df$time, format = "%H:%M:%OS")
}
# time_col <- strptime(x = clean_df$time, format = "%H:%M:%S")
# remove NA's
# time_since_start <- as.numeric(time_col - min(time_col, na.rm = T))/60
# clean_df$time_since_start <- time_since_start
clean_df$seconds <- as.numeric(round(seconds(time_col)- seconds(min(time_col, na.rm=TRUE))))
summarised_df <- clean_df %>%
# these columns stay constant
group_by(player_display_name, seconds, match_day, match_date, team) %>%
# these columns need to be summarised by second
summarise(time = last(time), lat = mean(lat), lon = mean(lon), speed_m_s = mean(speed_m_s),
heart_rate_bpm = mean(heart_rate_bpm),
instantaneous_acceleration_impulse = mean(instantaneous_acceleration_impulse),
distance = sum(distance))
# write to temporary table
#suppressWarnings(dbSendQuery(con, "CREATE TABLE IF NOT EXISTS temp_tbl (like positions including all);"))
dbSendQuery(con, "CREATE TABLE IF NOT EXISTS temp_tbl2 (id SERIAL PRIMARY KEY,
player_display_name VARCHAR(64), time VARCHAR(10), seconds NUMERIC,
lat NUMERIC, lon NUMERIC, speed_m_s NUMERIC, heart_rate_bpm NUMERIC, instantaneous_acceleration_impulse NUMERIC,
match_day VARCHAR(4), match_date VARCHAR(10), team VARCHAR, distance NUMERIC)")
dbWriteTable(con, 'temp_tbl2', summarised_df, row.names = FALSE, append=TRUE)
# Insert into db - only write to new table if not a duplicate
my_sql <- glue::glue_sql("
INSERT INTO positions2
(player_display_name, time, seconds,
lat, lon, speed_m_s, heart_rate_bpm, instantaneous_acceleration_impulse,
match_day, match_date, team, distance)
SELECT player_display_name, time, seconds,
lat, lon, speed_m_s, heart_rate_bpm, instantaneous_acceleration_impulse,
match_day, match_date, team, distance
FROM temp_tbl2
ON CONFLICT
DO NOTHING;",.con=con)
# run insert into
dbSendQuery(con, my_sql)
dbSendQuery(con, 'DROP TABLE temp_tbl2;')
}
# need to manually fix the path: Data/Team 1/20190415. MD/2019-04-15-Sonra 02-RawDataExport.csv
# read in all files using function to clean data from paths found in loop
lapply(my_paths, function(x) read_with_metadata(x, con))
player_distance_per_game <- tbl(con, "positions") %>%
filter(team == '2') %>%
group_by(player_display_name, match_date) %>%
summarise(distance_covered = sum(distance, na.rm=T)) %>%
arrange(desc(distance_covered))
player_1 <- tbl(con, "positions") %>%
filter(player_display_name == 'Sonra 05')
team_1 <- tbl(con, "positions") %>%
filter(team == '1') %>%
group_by(player_display_name, match_date, match_day) %>%
summarise(totaldist = sum(distance, na.rm = T))
print(team_1)
per_second <- player_1 %>%
collect() %>%
mutate(dt = parse_date_time(paste0(match_date,' ',time),orders = '%Y-%m-%d %H:%M%S',tz = 'GMT')) %>%
group_by(dt) %>%
summarise(speed_m_s = mean(speed_m_s), distance = sum(distance))
player_1_sf <- tbl(con, "positions") %>%
filter(player_display_name == 'Sonra 02') %>%
collect() %>%
mutate(dt = lubridate::parse_date_time(paste0(match_date,' ',time),orders = '%Y-%m-%d %H:%M%S',tz = 'GMT')) %>%
st_as_sf(coords = c('lon', 'lat'), crs = 4326, remove=F)
DBI::dbDisconnect(con)
#----------------
# -- HEATMAP
#---------------
library(leaflet)
library(leaflet.extras)
leaflet(player_1_sf) %>%
addProviderTiles('Esri.WorldImagery') %>%
addHeatmap(group="heat", lng=~lon, lat=~lat, max=.6, blur = 60)
tbl(con, 'positions') %>% filter(team == '1', player_display_name=='Sonra 02', match_day == 'MD',
match_date == '2019-04-15' ) %>% glimpse
#debug distance problem
# data <- tbl(con, 'positions') %>%
# filter(team == '2',
# player_display_name == 'STATSports 01',
# match_day == 'MD',
# match_date == '2019-10-10') %>%
# collect()
#
#
# data %>%
# select(lat, lon, speed_m_s, distance, time) %>%
# st_as_sf(coords = c('lon', 'lat'), crs = 4326, remove = FALSE) %>%
# as('Spatial') %>%
# geosphere::distGeo()
#
#
#
#
# df <- read_csv('Data/Team 2/20191010 MD/2019-10-10-STATSports 01-RawDataExport.csv')
# con <- initConnection()
# # ensure time is character and we have a primary key
# DBI::dbSendQuery(con, 'CREATE TABLE IF NOT EXISTS test (id SERIAL PRIMARY KEY,
# player_display_name VARCHAR(64), time VARCHAR(10), time_since_start NUMERIC,
# lat NUMERIC, lon NUMERIC, speed_m_s NUMERIC, heart_rate_bpm NUMERIC, instantaneous_acceleration_impulse NUMERIC,
# match_day VARCHAR(4), match_date VARCHAR(10), team VARCHAR, distance NUMERIC);')
# # add constraint so we don't accidently add duplicate records
# DBI::dbSendQuery(con, "ALTER TABLE test ADD CONSTRAINT noduptest UNIQUE (player_display_name, time, lat, lon, speed_m_s, heart_rate_bpm, instantaneous_acceleration_impulse, match_day, match_date, team);")
# path <- 'Data/Team 2/20191010 MD/2019-10-10-STATSports 01-RawDataExport.csv'
# df <- read_csv(path, col_types= cols('c','c','d','d','d','d','d')) %>%
# janitor::clean_names() %>%
# arrange(time)
# # get metadata from file path
# matchDay <- str_extract(path, '(MD|RHB)([-+0-9]{0,})')
# matchDate <- str_extract(path, '[0-9]{4}-[0-9]{2}-[0-9]{2}')
# team <- str_extract(path, '(?<=Team )([0-9]{1,})')
# clean_df <- df %>% mutate(
# match_day = matchDay,
# match_date = matchDate,
# team = team ) %>%
# # remove nas in lat/lng
# filter(!is.na(lat), !is.na(lon))
# # calculate distance between consecutive positions
# distance <- clean_df %>%
# st_as_sf(coords = c('lon', 'lat'), crs = 4326, remove = FALSE) %>%
# as('Spatial') %>%
# geosphere::distGeo()
# clean_df$distance <- distance
# # calculate time since the first timestamp
# # differnt files have different formats so read data depending on format
# if(str_detect(clean_df$time[1], '^\\d{2}:\\d{2}.\\d{1,}$')){
# time_col <- strptime(x = clean_df$time, format = "%M:%OS")
# } else{
# # else time should be in format HH:MM:SS
# time_col <- strptime(x = clean_df$time, format = "%H:%M:%OS")
# }
# # time_col <- strptime(x = clean_df$time, format = "%H:%M:%S")
# # remove NA's
# time_since_start <- as.numeric(time_col - min(time_col, na.rm = T))/60
# clean_df$time_since_start <- time_since_start
# clean_df %>%
# filter(team == '2',
# player_display_name == 'STATSports 01',
# match_day == 'MD',
# match_date == '2019-10-10')
# dbSendQuery(con, "CREATE TABLE IF NOT EXISTS test_temp_tbl (id SERIAL PRIMARY KEY,
# player_display_name VARCHAR(64), time VARCHAR(10), time_since_start NUMERIC,
# lat NUMERIC, lon NUMERIC, speed_m_s NUMERIC, heart_rate_bpm NUMERIC, instantaneous_acceleration_impulse NUMERIC,
# match_day VARCHAR(4), match_date VARCHAR(10), team VARCHAR, distance NUMERIC)")
# dbWriteTable(con, 'test_temp_tbl', clean_df, row.names = FALSE, overwrite=TRUE)
# my_sql <- glue::glue_sql("
# INSERT INTO test
# (player_display_name, time, time_since_start,
# lat, lon, speed_m_s, heart_rate_bpm, instantaneous_acceleration_impulse,
# match_day, match_date, team, distance)
# SELECT player_display_name, time, time_since_start,
# lat, lon, speed_m_s, heart_rate_bpm, instantaneous_acceleration_impulse,
# match_day, match_date, team, distance
# FROM test_temp_tbl
# ON CONFLICT
# DO NOTHING;",.con=con)
# dbSendQuery(con, my_sql)
# tbl(con, 'test') %>%
# filter(team == '2',
# player_display_name == 'STATSports 01',
# match_day == 'MD',
# match_date == '2019-10-10') %>%
# collect()
#
# tbl(con, 'test') %>%
# collect
# tbl(con, 'test_temp_tbl') %>%
df <- tbl(pool,'positions2') %>%
filter(team == '1',
player_display_name == 'Sonra 02',
match_day == 'MD',
match_date == '2019-04-08') %>%
collect()
df %>%
arrange(.,seconds) %>%
ggplot() +
geom_line(aes(x= seconds/60, y = speed_m_s*50), col = 'green', alpha = 0.5) +
geom_line(aes(x= seconds/60, y = heart_rate_bpm), col = '#cb4b16') +
scale_y_continuous(name="BPM", sec.axis = sec_axis(~./50, name = "speed"))
scale_x_continuous(name = 'minutes since start')+
ggthemes::theme_solarized(light=FALSE)
#heart rate and speed smoothed
df %>%
arrange(.,seconds) %>%
ggplot() +
geom_smooth(aes(x= seconds/60, y = speed_m_s*50), col = 'green', alpha = 0.5, method = 'loess', span=0.1) +
geom_smooth(aes(x= seconds/60, y = heart_rate_bpm), col = '#cb4b16' , method = 'loess', span=0.1) +
scale_y_continuous(name="BPM", sec.axis = sec_axis(~./50, name = "speed"))
scale_x_continuous(name = 'minutes since start')+
ggthemes::theme_solarized(light=FALSE)
|
b885bcda24ef008ee37f3b2554041c9702a8d6f0
|
ef22e559b8ad41f16894aaffbc3dd47b22ccfa92
|
/PSmodel.R
|
519fd8fcd24a363342c13c409131b753bfb6bf86
|
[] |
no_license
|
hannahlepper/MScProject
|
5f578f4c4e57e3b7b7ac20f0af65f5f6199a09b3
|
36f2d54abc5e8dcd2fc2c6308e7805f5d78ef03b
|
refs/heads/master
| 2021-01-01T16:22:03.052919
| 2017-09-07T16:03:13
| 2017-09-07T16:03:13
| 97,812,325
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,994
|
r
|
PSmodel.R
|
library(deSolve)
PSmodel <- function (t, x, pars) {
with(as.list(c(x,pars)),{
CDR_survey <- CDR_int(CDR, cov, k, sens)
del <- approxfun(x=survey_times(survey_interval),y=rep(c(CDR, CDR_survey, CDR),3),method="linear",rule=2)
births <- Mu*(U + Ls + Lf + I + N + C) + Mui*I + Mun*N
P <- U + Ls + Lf + I + N + C
# Equations####
# Betas
bw <- (1-r) * b
bc <- r * b
# FOIs
foi_basic <- bw * U * (I + (c*N))
foi_comm <- bc * U *Ic
foi_exo_react <- bw * a * x * (I + (c*N)) * Ls
foi_exo_react_comm <- bc * a * x * Ic * Ls
foi_reinf <- bw * a * x * (I + (c*N)) * C
foi_reinf_comm <- bc * a * x * Ic * C
# Derivatives
dU <- births - foi_basic - foi_comm - Mu*U
dLs <- ((1-a)*(foi_basic + foi_comm)) - foi_exo_react - foi_exo_react_comm - (vs + Mu)*Ls
dLf <- (a * (foi_basic + foi_comm)) - (vf + Mu)*Lf
dI <- sg*(vs*Ls + vf*Lf + foi_exo_react + foi_exo_react_comm
+ foi_reinf + foi_reinf_comm + p*C) + theta*N - (del(t)* tau + Mu + Mui + nc)*I
dN <- (1-sg)*(vs*Ls + vf*Lf + foi_exo_react + foi_exo_react_comm
+ foi_reinf + foi_reinf_comm + p*C) - (theta + del(t)* tau + Mu + Mun + nc)*N
dC <- (nc + del(t)* tau)*(I + N) - foi_reinf - foi_reinf_comm - (p + Mu)*C
# Outputs====
Inc <- vs*Ls + vf*Lf + foi_exo_react + foi_exo_react_comm + foi_reinf + foi_reinf_comm + p*C
Inc_first <- vf*Lf
Inc_react <- vs*Ls + foi_exo_react_comm + foi_exo_react
Inc_relap <- p*C
Inc_recent <- Inc_first + foi_exo_react + foi_exo_react_comm +
foi_reinf + foi_reinf_comm
case_notifications <- del(t) * (I + N)
cases_removed <- del(t) * tau * (I + N)
treatment_cov <- case_notifications / Inc
TB_prev <- I+N
Inf_prev <- Ls + Lf + I + N + C
TB_Deaths <- Mui*I + Mun*N
dur_I <- 1/(Mu + Mui + del(t)* tau + nc)
dur_N <- 1/(theta + Mu + Mun + del(t)* tau + nc)
dur_active_TB <- (I*dur_I + N*dur_N)/(I + N)
dur_active_inf_TB <- (I*dur_I + c*N*dur_N)/(I + c*N)
list(
c(dU,dLs,dLf,dI,dN,dC),
Total = P,
foi_basic = foi_basic,
foi_reinf_comm = foi_reinf_comm,
foi_reinf = foi_reinf,
foi_exo_react_comm = foi_exo_react_comm,
foi_exo_react = foi_exo_react,
foi_comm = foi_comm,
Inc = Inc * 100000,
Inc_first = Inc_first * 100000,
Inc_react = Inc_react * 100000,
Inc_relap = Inc_relap *100000,
Inc_recent = Inc_recent * 100000,
case_notifications = case_notifications * 100000,
treatment_cov = treatment_cov,
cases_removed = cases_removed * 100000,
Prev = TB_prev * 100000,
Inf_prev = Inf_prev * 100000,
Mort = TB_Deaths * 100000,
dur_active_TB = dur_active_TB,
dur_active_inf_TB = dur_active_inf_TB
)
})
}
# Detection rate functions====
CDR_int <- function(CDR, cov, k, sens) {CDR + 50 * (cov * k * sens)}
survey_times <- function(survey_interval) {
cumsum(c(499.98, 0.02, 0.02,
rep(c(survey_interval-0.04, 0.02,0.02), 2)))
}
# Initials====
# Init_inf <- 0.2 # Fraction of the pop initially infected
# yinit <- c(U=1-Init_inf,Ls=0.99*Init_inf,Lf=0,I=0.01*Init_inf,N=0,C=0)
#
# # Parameters====
# pars_base <- c(b=22, p=0.01, a=0.11, vf=0.67, vs=0.0005,
# sg=0.45, x=0.65, nc=0.2,theta=0.015,
# Mu=0.06, Mui=0.3, Mun=0.21, CDR=0.7,
# cov = 0, k = 0, tau=0.91, r=0.2,
# c=0.22, Ic = 0.002, survey_interval=5)
#
# # Solve====
# sol_base <-ode(y=yinit,times=seq(0,600, by=0.02),func=PSmodel,
# parms=fullparset[80,])
# #time step = 0.02 of a year
#
# sol_base_df <- as.data.frame(sol_base)
# plot(sol_base_df$time, sol_base_df$cases_removed, type = "l")
# plot(sol_base_df$time, sol_base_df$Prev, type = "l", c(499, 510), c(50,150))
|
ae6381fb9897b096bd0b476e2353b9f546b9cd67
|
89764643a4b65cabb368d1d20147677e83330d1a
|
/cookbook.R
|
61cba4842fabe9b29db7af2a94a71933fcbd7e30
|
[] |
no_license
|
jhirani-moj/R-Learning
|
4aabb9ef2c43cd71779f072d2b7cd259b27eb6d4
|
2065e1452f0dae89c69addcbf38d084f8b32d0cd
|
refs/heads/master
| 2021-10-10T14:06:17.138187
| 2019-01-11T16:05:50
| 2019-01-11T16:05:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,227
|
r
|
cookbook.R
|
'''Resources'''
# R for Data Science
https://r4ds.had.co.nz/
# R Cheatsheet collection
https://www.rstudio.com/resources/cheatsheets/
'''Basics'''
Alt + Shift + K shows keyboard shortcuts
Cmd + Ctrl + Enter runs everything after the cursor
Cmd + Shift + S runs the whole script
install.packages("tidyverse") # Install a package
library(tidyverse) # Load a package
?ggplot() # Get Help
x <- 3 * 4 # Object Assignment
function_name(arg1 = val1, arg2 = val2) # Call a function
View(flights) # View a full dataset
?object # Get help
print() # Prints to the console
getwd() # Get working directory
setwd("/path/to/my/CoolProject") # Set Working directory
problems(x) # Show problem data in a tibble
# Function syntaxå
rescale01 <- function(x) {
rng <- range(x, na.rm = TRUE)
(x - rng[1]) / (rng[2] - rng[1])
}
'''Data Manipulation'''
# Filtering
df <- filter(df, col1 == 1, col2 == 1) # Basic Filtering
df <- filter(df, is.na(col1), col2 != 1) # NA syntax
df <- filter(df, col1 %in% c(1,2,3)) # In syntax (c function combines values into a vector)
# Sorting
df <- arange(df, col1, col2, col3) # Sort rows based upon values
df <- arange(df, desc(col1)) # Sort Descending
# Columns
colnames(df)
# Select columns
df <- select(df, col1, col2, col3) # Select named columns
df <- select(df, col1:col10) # Select columns between
df <- select(df, -(col1:col10)) # Select columns except
df <- select(df, starts_with('col')) # Selct columns that start with ...
df <- select(df, ends_with('1')) # Selct columns that end with ...
df <- select(df, contains('col')) # Selct columns that contain ...
df <- select(df, matches("(.)\\1")) # Selct using a regex
df <- select(df, col1, col2, everything()) # Order columns with specified variables first
# Rename columns
df <- rename(df, col1 = col2) # Rename a columns
# Create columns (mutate)
mutate(df,
new_col1 = col1 - col2,
new_col2 = col1 + col2
)
# Create new columns + get rid of old ones
transmute(
col1 = col1 - col2,
col2 = col3 + col4
)
# Groupby / summarise
gp <- group_by(df, col1, col2, col3)
summarise(gp, delay = mean(col4, na.rm = TRUE))
# Pipes
flights %>%
group_by(year, month, day) %>%
summarise(mean = mean(dep_delay, na.rm = TRUE))
# Tibbles & Parsing
as_tibble(df) # Coerce a df to a tibble
# Create a tibble
tb <- tibble(
`:)` = "smile",
` ` = "space",
`2000` = "number"
)
# Create a tibble using a tribble
tribble(
~x, ~y, ~z,
#--|--|----
"a", 2, 3.6,
"b", 1, 8.5
)
c("TRUE", "FALSE") # Convert values to a vector / list
df <- class(as.data.frame(tb)) # Convert a tibble to a dataframe
df <- read_csv("data/file.csv") # Read in a csv to a tibble
str(parse_logical(c("TRUE", "FALSE", "NA"))) # Parse logical values
str(parse_integer(c("1", "2", "3"))) # Parse integers
str(parse_date(c("2010-01-01", "1979-10-14"))) # Parse dates
parse_number("$100") # Parse currency etc.
parse_datetime("2010-10-01T2010") # Parse datetime
guess_parser("2010-10-01") # Get R to guess the data format
# Syntax for ggplot:
ggplot(data = <DATA>) +
<GEOM_FUNCTION>(mapping = aes(<MAPPINGS>))
# Examples
# Simple Bar chart
ggplot(data = df) +
geom_bar(mapping = aes(x = var1))
# Basic gg plot
ggplot(data = mpg) + # Creates the plot
geom_point(mapping = aes(x = displ, y = hwy, color = class)) # Adds x, y & color data
# Can also take args size, alpha, shape
# Manually setting params (outside aes function)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy), color = "blue")
|
697842c9d53a17f2dd863f22aeeee4ccf90da48e
|
76639f2df7d941d55c8e2878126c2b3d2b395211
|
/shiny_spark/ui.R
|
574d8baec80851ac43740db94fad1b53f00fb4ee
|
[] |
no_license
|
JoseAngelFernandez/data_lake
|
5d7c4df3bb9dba2f3e8037c581826ce3a134d0d4
|
47aba96c7222399312548fbb1eb3fc594a56d258
|
refs/heads/master
| 2021-08-24T17:54:19.497375
| 2017-11-21T09:26:02
| 2017-11-21T09:26:02
| 111,524,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 778
|
r
|
ui.R
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage( #theme=shinytheme("flatly"),
# Application title
titlePanel("Log based stats - Exploratory Data Analysis Example"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("InputDate",
"Date:",
Available_days),
selectInput("InputHour",
"Hour:",
Hours,
selected = 0),
# selectInput("InputMinute",
# "Minutes:",
# Minutes),
submitButton("Apply")
),
mainPanel(
textOutput("testText"),
plotOutput("distPlot"),
plotOutput("distPlot2")
)
)
)
)
|
f413db52fd63dd8575f230104c49924530f3ae35
|
3aafde0fd80385b6de558f17b99d6c1a4e03731c
|
/course/st790/examples/dental_gls.R
|
98ab394136e09d83889a771b8ecc930a935c8ae5
|
[] |
no_license
|
BowenNCSU/BowenNCSU.github.io
|
21be4597333e51c9e61d8634969ea0fff6fe49a5
|
92e3052252e7b17568aac65a9ebdbc32ac71abef
|
refs/heads/master
| 2018-12-25T01:58:07.464151
| 2018-12-08T01:01:19
| 2018-12-08T01:01:19
| 120,840,945
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,137
|
r
|
dental_gls.R
|
##############################################################################
#
# Using gls() to fit the dental data with artificial missingness
# and unstructured covariance matrix
#
##############################################################################
library(nlme)
# read in the data
dent1 <- matrix(scan("dental_dropout_R.dat"),ncol=5,byrow=TRUE)
child <- dent1[,2]
age <- dent1[,3]
distance <- dent1[,4]
gender <- factor(dent1[,5])
# create time variable for each individual as required for the CorSymm
# correlation structure
time <- rep(seq(1,4,1),max(child))
dental.un <- gls(distance ~ -1 + gender + age:gender,correlation=corSymm(form = ~ time | factor(child)),
weights = varIdent(form = ~ 1 |age),method="ML",na.action=na.omit)
# This is the SAS code using proc mixed to produce the same analysis
# data dent2; infile 'dental_dropout_sas.dat';
# input obsno child age distance gender;
# run;
# proc mixed method=ml data=dent2;
# class child;
# model distance = gender gender*age / noint solution chisq ;
# repeated / type=un subject=child r=2 rcorr=2;
# run;
|
9f949fd59d82fa2e1fc439b008f767719bacbbe3
|
726a92a53407406654d5498be6170ad86b02502d
|
/Scripts/varioplot.R
|
b0edae809df421eacdb451ad6c4213a2b93f75ac
|
[] |
no_license
|
dansmi-hub/SomersetLevels
|
416e7a18b61f31174a318e388e230bbaa248b636
|
6df06d0ec4f90d0b67031c5ffb448d41765633e5
|
refs/heads/master
| 2023-03-05T18:31:26.897957
| 2021-02-11T12:38:14
| 2021-02-11T12:38:14
| 269,067,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,176
|
r
|
varioplot.R
|
library(tidyverse)
library(gstat)
library(sp)
library(ncf)
library(raster)
# -------------------------------------------------------------------------
data <- read_rds("Data/hmscdata.rds")
data <- data %>% ungroup() %>% na.omit() %>% as.data.frame()
d <- data %>% select(plot_id, totals_n_larvae,
an_maculipennis, an_claviger,
cx_pipiens, cs_annulata,
x, y)
plot(d$x, d$y)
coordinates(d) = ~x+y
# -------------------------------------------------------------------------
par(mfrow = c(2,2))
bubble(d, zcol = "an_maculipennis")
bubble(d, zcol = "an_claviger")
bubble(d, zcol = "cx_pipiens")
bubble(d, zcol = "cs_annulata")
par(mfrow = c(1,1))
V <- variogram(cx_pipiens ~ 1, data = d)
plot(V)
VM <- vgm(psill=0.015, model="Gau", nugget=0.001, range=1000)
plot(V, model=VM)
variogram()
# -------------------------------------------------------------------------
foo <- data %>% dplyr::select(x, y, totals_n_larvae)
rounded <- foo %>% round()
counts <- rasterFromXYZ(rounded, res = )
plot(counts, "Abundance Distribution (All)")
P.idw <- gstat::idw(Precip_in ~ 1, P, newdata=grd, idp=2.0)
|
0915c2d9d1650c51f266fcec6b8b2a1638399016
|
bc7120d924070eb3a482923f20e7507dcae0a751
|
/simulation/n2_02/sum.R
|
295a052d3dc78d0b779689c58db0181f5b55473b
|
[] |
no_license
|
dragontaoran/proj_two_phase_mexy
|
803ec584f2eb260ec48901126cc933032ce8edd1
|
ccbf3365b6285e0bc6869f3c907703f2a8217760
|
refs/heads/master
| 2022-12-14T01:16:51.212254
| 2020-09-12T17:06:18
| 2020-09-12T17:06:18
| 159,406,581
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 578
|
r
|
sum.R
|
p = 0.6
rho = 0.3
hn_set = c(30, 40, 60)
nsieve = 20
NJOB = 100
n2_set = c(25)
dir.create("results", showWarnings=FALSE)
for (hn in hn_set) {
for(n2 in n2_set) {
prefix = paste0("n2_", n2, "_hn", hn)
load(paste0("res/", prefix, "/1.RData"))
tmp = results
for (njob in 2:NJOB) {
load(paste0("res/", prefix, "/", njob, ".RData"))
tmp = rbind(tmp, results)
}
results = tmp
print(dim(results))
save(list="results", file=paste0("results/", prefix, ".RData"))
}
}
|
d7fa957a2bd7a850ce491f3d6a982406aed0159d
|
b1480938e9ce126a1b349ddc288cedff87ee54a8
|
/src/shape_files_to_RData.R
|
9c378142b5a1311d4aa89c57ff04e8962a5c7739
|
[
"Apache-2.0"
] |
permissive
|
tsdataclinic/newerhoods
|
d4452b34d14868f95ca1fb4e7d82b406bd859702
|
c05fba4e8fdcac3f64bf4dbb480ff5302729098a
|
refs/heads/master
| 2023-02-05T08:37:07.832895
| 2023-01-27T18:33:27
| 2023-01-27T18:33:27
| 162,489,045
| 38
| 7
|
Apache-2.0
| 2019-09-30T21:22:08
| 2018-12-19T20:47:04
|
HTML
|
UTF-8
|
R
| false
| false
| 1,334
|
r
|
shape_files_to_RData.R
|
### saving shapefiles as RData for faster loading
census_tracts <- readOGR("../data/shapefiles/2010 Census Tracts/geo_export_4d4ca7d0-0c46-467e-8dee-99c93361f914.shp",stringsAsFactors = FALSE)
save(census_tracts,file = "../newerhoods/clean_data/census_tracts.RData")
pumas <- readOGR("../data/shapefiles/Public Use Microdata Areas (PUMA)/geo_export_112df737-99d9-4599-8357-4c0b1e37faeb.shp")
save(pumas,file="../newerhoods/clean_data/pumas.RData")
ntas <- readOGR("../data/shapefiles/nynta_18d/nynta.shp")
ntas <- spTransform(ntas,CRS("+proj=longlat +ellps=WGS84 +no_defs"))
save(ntas,file="../newerhoods/clean_data/ntas.RData")
cds <- readOGR("../data/shapefiles/nycd_18d/nycd.shp")
cds <- spTransform(cds,CRS("+proj=longlat +ellps=WGS84 +no_defs"))
save(cds,file="../newerhoods/clean_data/cds.RData")
precincts <- readOGR("../data/shapefiles/Police Precincts/geo_export_e7d15636-2d89-486a-bd6d-41a7dfa67b3d.shp")
precincts <- spTransform(precincts,CRS("+proj=longlat +ellps=WGS84 +no_defs"))
save(precincts,file="../newerhoods/clean_data/precincts.RData")
school_dists <- readOGR("../data/shapefiles/School Districts/geo_export_af10107a-e4ca-45b9-a03e-0717947ea03b.shp")
school_dists <- spTransform(school_dists,CRS("+proj=longlat +ellps=WGS84 +no_defs"))
save(school_dists,file="../newerhoods/clean_data/school_dists.RData")
|
0c0375caad2e6c50dbaaa16149d7afa77ad7bde0
|
df0a43d45e3b4751bdf661afcfa23151cba8758f
|
/man/expmeasure.Rd
|
6f4323d39fc16b592eccddad9eb5c213f7888825
|
[] |
no_license
|
igemsoftware2021/expmeasure
|
9e1b7838de1c3984fc0a156bb7ca92dd1ab60c0d
|
7be46cfac6ecd48a6d79ebbe5ac975b621c779b3
|
refs/heads/main
| 2023-09-05T20:03:12.651201
| 2021-10-15T13:25:49
| 2021-10-15T13:25:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 807
|
rd
|
expmeasure.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expmeasure.R
\name{expmeasure}
\alias{expmeasure}
\title{Open Expmeasure software}
\usage{
expmeasure()
}
\description{
\code{expmeasure} opens a shiny-based software that helps iGEMers to
analyse their data in part characterization experiments.
}
\details{
This function opens opens a shiny-based software that provides a range of
statistical tools which can be useful for part characterization in iGEM.
Data used in this software should be database file formats with each column
specifies a variable and each row for an observation.
}
\examples{
##Once you execute the following function
## It will not stop until you close the window of "Expmeasure" GUI
\dontrun{
expmeasure()
}
}
\author{
Wu Hao-Ran <haoranwu@zju.edu.cn>
}
|
43cc3cbbd2b04f34bbd7ed460ce98e939b06e289
|
d46a845e019d4928a3827aac2529a3500a9b2d90
|
/compare_kipu_and_matched_controls.R
|
5486d2090910bea64ba7c50d3be647e446f61b49
|
[] |
no_license
|
jtsuvile/kipuklinikka
|
5bf48f082a547d35bc8ae6269ab98c0e35adcaf5
|
502123e80aaea15ceb7bc70fdcda4bc5e4fc4460
|
refs/heads/main
| 2023-09-03T08:36:57.659157
| 2021-11-05T12:35:36
| 2021-11-05T12:35:36
| 308,590,954
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,393
|
r
|
compare_kipu_and_matched_controls.R
|
setwd('/Users/juusu53/Documents/projects/kipupotilaat/')
source('./code/helper_functions_for_r_analysis.R')
library(psych)
library(RColorBrewer)
library(tidyverse)
library(apaTables)
library(rstatix)
library(ggpubr)
library(WRS2)
library(raincloudplots)
library(gghalves)
# NB: fix old style plot titles, send both new and old
subs <- read.csv('data/all_pain_patients_with_activations_19_10_2020.csv',
na.strings = 'NaN')
subs$batch <- 'patient'
subs_control <- read.csv('data/matched_controls_with_activations_18_11_2020.csv',
na.strings = 'NaN')
subs_control$batch <- 'control'
subs_fixed <- make_total_colouring_columns(subs) %>% rename_emotions()
subs_control_fixed <- make_total_colouring_columns(subs_control) %>% rename_emotions()
subs_all_big <- subs_fixed %>%bind_rows(subs_control_fixed)
## total pixels
data_long <- subs_all_big %>%
select(subid, sex, batch, sadness_pos_color:neutral_total) %>% select(-contains("pain")) %>% select(-contains("sensitivity")) %>%
pivot_longer(sadness_pos_color:neutral_total, names_to = "emotion", values_to="prop_coloured") %>%
separate(emotion, into=c("emotion", "type", NA)) %>% pivot_wider(names_from=type, values_from=prop_coloured) %>%
mutate(emotion = factor(emotion), subid = factor(subid), batch = factor(batch, levels=c('patient', 'control'))) %>%
rename(group = batch)
outliers_total_pixels <- data_long %>% group_by(group, emotion) %>% identify_outliers(total)
data_long %>% group_by(group, emotion) %>% shapiro_test(total)
ggqqplot(data_long, "total", ggtheme = theme_bw()) +
facet_grid(emotion ~ group)
# activations and deactivations
basic_anova <- lm(total ~ group * emotion, data = data_long)
summary(basic_anova)
apa.aov.table(basic_anova, filename = "Table1_APA.doc", table.number = 1)
special_anova <- bwtrim(total ~ group * emotion, id=subid, data = data_long)
special_anova
summarized_total <- data_long %>% group_by(emotion, group) %>%
summarise(coloured = mean(total, na.rm=T), sd = sd(total, na.rm=T), n = n(), na_nums= sum(is.na(total))) %>%
mutate(se = sd/sqrt(n))
summary_for_reporting_1 <- data_long %>% group_by(group) %>%
summarise(coloured = mean(total, na.rm=T), sd = sd(total, na.rm=T), n = n(), na_nums= sum(is.na(total)))
summary_for_reporting_2 <- data_long %>% group_by(emotion) %>%
summarise(coloured = mean(total, na.rm=T), sd = sd(total, na.rm=T), n = n(), na_nums= sum(is.na(total)))
# positive activations
basic_anova_pos <- aov(pos ~ group * emotion, data = data_long)
summary(basic_anova_pos)
summarized_pos <- data_long %>% group_by(emotion, group) %>%
summarise(coloured = mean(pos, na.rm=T), sd = sd(pos, na.rm=T), n = n(), na_nums= sum(is.na(pos))) %>%
mutate(se = sd/sqrt(n))
# negative (inactivations)
basic_anova_neg <- aov(neg ~ group * emotion, data = data_long)
summary(basic_anova_neg)
summarized_neg <- data_long %>% group_by(emotion, group) %>%
summarise(coloured = mean(neg, na.rm=T), sd = sd(neg, na.rm=T), n = n(), na_nums= sum(is.na(neg))) %>%
mutate(se = sd/sqrt(n))
g <- ggplot(data = data_long, aes(y = total, x = emotion, fill = group, col=group)) +
# geom_half_violin(data=data_long, aes(y=total, x=emotion, fill=group),
# position = position_nudge(x = .2, y = 0), alpha = .8, side = "r") +
geom_point(position = position_jitterdodge(jitter.width = .15, dodge.width = 0.6), size = .9, alpha = 0.8) +
geom_boxplot(width=0.4, outlier.shape = NA, alpha = 0.5, position = position_dodge(width=0.6), notch=TRUE, col='black') +
scale_x_discrete(limits=c('fear','happiness','sadness', 'anger','disgust','surprise','neutral')) +
expand_limits(x = 5.25) +
labs(y=' ', x='') +
#coord_flip() +
ggtitle("C Combined") +
theme_classic() +
theme(text = element_text(size=20),
plot.margin = margin(1.5,0.1,0.1,0.1, "cm"),
axis.text.x = element_text(angle = 45, hjust = 1))
#g
g2 <- ggplot(data = data_long, aes(y = pos, x = emotion, fill = group, col=group)) +
# geom_half_violin(data=data_long, aes(y=total, x=emotion, fill=group),
# position = position_nudge(x = .2, y = 0), alpha = .8, side = "r") +
geom_point(position = position_jitterdodge(jitter.width = .15, dodge.width = 0.6), size = .9, alpha = 0.8) +
geom_boxplot(width=0.4, outlier.shape = NA, alpha = 0.5, position = position_dodge(width=0.6), notch=TRUE, col='black') +
scale_x_discrete(limits=c('fear','happiness','sadness', 'anger','disgust','surprise','neutral')) +
expand_limits(x = 5.25) +
labs(y='Proportion of body area coloured', x='') +
#coord_flip() +
ggtitle("A Activations") +
theme_classic() +
theme(text = element_text(size=20),
plot.margin = margin(1.5,0.1,0.1,0.1, "cm"),
axis.text.x = element_text(angle = 45, hjust = 1))
g3 <- ggplot(data = data_long, aes(y = neg, x = emotion, fill = group, col=group)) +
# geom_half_violin(data=data_long, aes(y=total, x=emotion, fill=group),
# position = position_nudge(x = .2, y = 0), alpha = .8, side = "r") +
geom_point(position = position_jitterdodge(jitter.width = .15, dodge.width = 0.6), size = .9, alpha = 0.8) +
geom_boxplot(width=0.4, outlier.shape = NA, alpha = 0.5, position = position_dodge(width=0.6), notch=TRUE, col='black') +
scale_x_discrete(limits=c('fear','happiness','sadness', 'anger','disgust','surprise','neutral')) +
expand_limits(x = 5.25) +
labs(y=' ', x='') +
#coord_flip() +
theme_classic() +
ggtitle("B Deactivations") +
theme(text = element_text(size=20),
plot.margin = margin(1.5,0.1,0.1,0.1, "cm"),
axis.text.x = element_text(angle = 45, hjust = 1))
ggarrange(g2, g3, g,
#labels = c("A activations", "B deactivations", "C activations and deactivations"),
font.label = c(size = 24),
hjust = c(-0.2,-0.55,-0.45),
vjust = 1.5,
ncol = 3, nrow = 1,
legend = 'right',
common.legend = TRUE) %>%
ggexport(filename = '/Users/juusu53/Documents/projects/kipupotilaat/figures/helsinki_manuscript_figs/n_colored_pixels_patients_and_controls_dotandbox.png',
width = 1000, height = 400, pointsize = 30)
## old style PLOT
pd <- position_dodge(0.4)
pjd <- position_jitterdodge(jitter.width = .15, dodge.width = 0.4)
p <- ggplot(data=summarized_total, aes(x=emotion, y=coloured, colour=group, group=group)) +
#geom_jitter(data=data_long, aes(x=emotion, y=total, colour=group, group=group), alpha=0.3) +
geom_point(data=data_long,aes(x=emotion, y=total, colour=group, group=group), position = pjd, size = .9, alpha = 0.8) +
geom_errorbar(aes(ymin=coloured-se, ymax=coloured+se), color='black',width=.2, position=pd) +
scale_x_discrete(limits=c('fear','happiness','sadness', 'anger','disgust','surprise','neutral'))+
geom_point(position=pd, size=2) +
geom_line(position=pd, size=2) +
theme_classic() +
theme(text = element_text(size=20),
axis.text.x = element_text(angle = 45, hjust = 1))+
coord_fixed(ratio=7)+
theme(plot.margin = margin(1,0.1,0.1,0.1, "cm")) +
labs(color = "Group", x='', y='')
p
##
## positive pixels
p1 <- ggplot(data=summarized_pos, aes(x=emotion, y=coloured, colour=group, group=group)) +
#geom_jitter(data=data_long, aes(x=emotion, y=pos, colour=group, group=group), alpha=0.3) +
geom_point(data=data_long,aes(x=emotion, y=pos, colour=group, group=group), position = pjd, size = .9, alpha = 0.8) +
geom_line(position=pd, size=2) +
geom_errorbar(aes(ymin=coloured-se, ymax=coloured+se), color='black',width=.2, position=pd) +
geom_point(position=pd, size=2) +
scale_x_discrete(limits=c('fear','happiness','sadness', 'anger','disgust','surprise','neutral'))+
theme_classic() +
theme(text = element_text(size=20),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.y = element_blank())+
coord_fixed(ratio=7)+
theme(plot.margin = margin(1,0.1,0.1,0.1, "cm")) +
labs(color = "Group", y='Proportion coloured', x='')
p1
## negative pixels
p2 <- ggplot(data=summarized_neg, aes(x=emotion, y=coloured, colour=group, group=group)) +
#geom_jitter(data=data_long, aes(x=emotion, y=neg, colour=group, group=group), alpha=0.3) +
geom_point(data=data_long,aes(x=emotion, y=neg, colour=group, group=group), position = pjd, size = .9, alpha = 0.8) +
geom_errorbar(aes(ymin=coloured-se, ymax=coloured+se), color='black',width=.2, position=pd) +
geom_point(position=pd, size=2) +
geom_line(position=pd, size=2) +
scale_x_discrete(limits=c('fear','happiness','sadness', 'anger','disgust','surprise','neutral'))+
theme_classic() +
theme(text = element_text(size=20),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.y = element_blank()) +
coord_fixed(ratio=7) +
theme(plot.margin = margin(1,0.1,0.1,0.1, "cm")) +
labs(color = "Group")
p2
ggarrange(p1, p2, p,
labels = c("A Activations", "B Deactivations", "C combined"), font.label = c(size = 20),
hjust = c(-0.3,-0.5,-0.8),
vjust = 1.1,
ncol = 3, nrow = 1, common.legend = TRUE,
legend='bottom') %>%
ggexport(filename = '/Users/juusu53/Documents/projects/kipupotilaat/figures/n_colored_pixels_patients_and_controls.png',
width = 1300, height = 500, pointsize = 30)
## old style as facet wrap
pd <- position_dodge(0.4)
pjd <- position_jitterdodge(jitter.width = .15, dodge.width = 0.4)
data_extra_long <- data_long %>% pivot_longer(cols=c(pos, neg, total),names_to='type', values_to='Proportion colored') %>%
mutate(type = factor(type, levels = c('pos', 'neg', 'total'), labels = c('A Activations', 'B Deactivations', 'C Combined activations\nand deactivations')))
summarized_neg <- summarized_neg %>% mutate(type= 'neg')
summarized_pos <- summarized_pos %>% mutate(type='pos')
summarized_total <- summarized_total %>% mutate(type='total')
summary_all <- rbind(summarized_neg, summarized_pos, summarized_total) %>%
mutate(type = factor(type, levels = c('pos', 'neg', 'total'), labels = c('A Activations', 'B Deactivations', 'C Combined activations\nand deactivations')))
ggplot(data=summary_all, aes(x=emotion, y=coloured, colour=group, group=group)) +
geom_point(data=data_extra_long,aes(x=emotion, y=`Proportion colored`, colour=group, group=group), position = pjd, size = .9, alpha = 0.5) +
geom_errorbar(aes(ymin=coloured-se, ymax=coloured+se), color='black',width=.2, position=pd) +
geom_point(position=pd, size=2) +
geom_line(position=pd, size=2) +
scale_x_discrete(limits=c('fear','happiness','sadness', 'anger','disgust','surprise','neutral'))+
theme_classic() +
facet_wrap(~type) +
theme(text = element_text(size=20),
axis.text.x = element_text(angle = 45, hjust = 1),
legend.position='bottom',
strip.background = element_blank(),
strip.text = element_text(hjust = 0)) +
coord_fixed(ratio=7) +
theme(plot.margin = margin(1,0.1,0.1,0.1, "cm")) +
labs(color = "Group", y = 'Proportion coloured')
ggsave('/Users/juusu53/Documents/projects/kipupotilaat/figures/helsinki_manuscript_figs/n_colored_pixels_act_deact_combo.pdf',
width=300, height = 150, units = 'mm', limitsize=FALSE)
# ##
#
# t.test(subs$sadness_total, subs_control$sadness_total) # ns
# t.test(subs$happiness_total, subs_control$happiness_total) # *
# t.test(subs$anger_total, subs_control$anger_total) # ns
# t.test(subs$surprise_total, subs_control$surprise_total) # *
# t.test(subs$fear_total, subs_control$fear_total) # ***
# t.test(subs$disgust_total, subs_control$disgust_total) # *
# t.test(subs$neutral_total, subs_control$neutral_total) # ns
#
# t.test(subs$pain_0_pos_color, subs_control$pain_0_pos_color) # ***
# t.test(subs$pain_1_pos_color, subs_control$pain_1_pos_color) # ***
#
# t.test(subs$sensitivity_0_pos_color, subs_control$sensitivity_0_pos_color) # ns
# t.test(subs$sensitivity_1_pos_color, subs_control$sensitivity_1_pos_color) # ***
# t.test(subs$sensitivity_2_pos_color, subs_control$sensitivity_2_pos_color) # ns
#
# # kipu vs crps
# t.test(subs[subs$groups == 'CRPS', 'sadness_total'], subs[subs$groups != 'CRPS', 'sadness_total']) # ns
# t.test(subs[subs$groups == 'CRPS', 'happiness_total'], subs[subs$groups != 'CRPS', 'happiness_total']) # ns
# t.test(subs[subs$groups == 'CRPS', 'anger_total'], subs[subs$groups != 'CRPS', 'anger_total']) # ns
# t.test(subs[subs$groups == 'CRPS', 'surprise_total'], subs[subs$groups != 'CRPS', 'surprise_total']) # ns
# t.test(subs[subs$groups == 'CRPS', 'fear_total'], subs[subs$groups != 'CRPS', 'fear_total']) # **
# t.test(subs[subs$groups == 'CRPS', 'disgust_total'], subs[subs$groups != 'CRPS', 'disgust_total']) # **
# t.test(subs[subs$groups == 'CRPS', 'sadness_total'], subs[subs$groups != 'CRPS', 'sadness_total']) # ns
#
# t.test(subs[subs$groups == 'CRPS', 'sensitivity_0_pos_color'], subs[subs$groups != 'CRPS', 'sensitivity_0_pos_color']) # ns
# t.test(subs[subs$groups == 'CRPS', 'sensitivity_1_pos_color'], subs[subs$groups != 'CRPS', 'sensitivity_1_pos_color']) # ns
# t.test(subs[subs$groups == 'CRPS', 'sensitivity_2_pos_color'], subs[subs$groups != 'CRPS', 'sensitivity_2_pos_color']) # ns
#
# emotions <- c('sadness','happiness','anger','surprise','fear','disgust','neutral')
# pains <- c('current_pain','chronic_pain')
# sensitivity <- c('tactile_sensitivity', 'pain_sensitivity','hedonic_sensitivity')
|
ee2809e1643cf333ffa8fa11ce4aa0b1fd95b1bd
|
9d3e3c3950c4101bc863a90e69606d7c7d03a4e9
|
/Lagoon/01_storm/00_d_merge_storms.R
|
ea15e30500efe3905a2ea7aff0b92e909a2dea72
|
[
"MIT"
] |
permissive
|
HNoorazar/Ag
|
ca6eb5a72ac7ea74e4fe982e70e148d5ad6c6fee
|
24fea71e9740de7eb01782fa102ad79491257b58
|
refs/heads/main
| 2023-09-03T18:14:12.241300
| 2023-08-23T00:03:40
| 2023-08-23T00:03:40
| 146,382,473
| 3
| 6
| null | 2019-09-23T16:45:37
| 2018-08-28T02:44:37
|
R
|
UTF-8
|
R
| false
| false
| 2,648
|
r
|
00_d_merge_storms.R
|
.libPaths("/data/hydro/R_libs35")
.libPaths()
library(data.table)
library(lubridate)
library(dplyr)
options(digit=9)
options(digits=9)
# Time the processing of this batch of files
start_time <- Sys.time()
################################################################
main_in <- "/data/hydro/users/Hossein/lagoon/00_model_level_storm/"
out_dir <- "/data/hydro/users/Hossein/lagoon/01_storm_cumPrecip/storm/"
if (dir.exists(out_dir) == F) {dir.create(path = out_dir, recursive = T)}
################################################################
model_names <- c("bcc-csm1-1", "CanESM2", "CSIRO-Mk3-6-0",
"HadGEM2-CC365", "IPSL-CM5A-LR", "MIROC5",
"NorESM1-M", "bcc-csm1-1-m", "CCSM4",
"GFDL-ESM2G", "HadGEM2-ES365", "IPSL-CM5A-MR",
"MIROC-ESM-CHEM", "BNU-ESM", "CNRM-CM5",
"GFDL-ESM2M", "inmcm4", "IPSL-CM5B-LR",
"MRI-CGCM3")
emission <- c("historical", "rcp45", "rcp85")
rcp45_data <- data.table()
rcp85_data <- data.table()
hist_data <- data.table()
for (model in model_names){
current_45 <- readRDS(paste0(main_in, model, "/rcp45/", "storm.rds"))
current_85 <- readRDS(paste0(main_in, model, "/rcp85/", "storm.rds"))
current_hist <- readRDS(paste0(main_in, model, "/historical/", "storm.rds"))
print (dim(current_45))
current_45$model <- gsub("-", "_", current_45$model)
current_85$model <- gsub("-", "_", current_85$model)
current_hist$model <- gsub("-", "_", current_hist$model)
rcp45_data <- rbind(rcp45_data, current_45)
rcp85_data <- rbind(rcp85_data, current_85)
hist_data <- rbind(hist_data, current_hist)
}
hist_data <- unique(hist_data)
rcp45_data$emission <- "RCP 4.5"
rcp85_data$emission <- "RCP 8.5"
hist_data_45 <- hist_data
hist_data_85 <- hist_data
hist_data_45$emission <- "RCP 4.5"
hist_data_85$emission <- "RCP 8.5"
########### Read observed storm:
#
storm_observed <- readRDS(paste0(out_dir, "storm_observed.rds"))
storm_observed_45 <- storm_observed
storm_observed_85 <- storm_observed
storm_observed_45$emission <- "RCP 4.5"
storm_observed_85$emission <- "RCP 8.5"
#
###########
all_storms <- rbind(rcp45_data, rcp85_data,
hist_data_45, hist_data_85,
storm_observed_45, storm_observed_85)
saveRDS(all_storms, paste0(out_dir, "all_storms.rds"))
print ("Do not be surprised, it is fast indeed. No Error!")
# saveRDS(rcp45_data, paste0(out_dir, "storm_RCP45.rds"))
# saveRDS(rcp85_data, paste0(out_dir, "storm_RCP85.rds"))
# saveRDS(hist_data, paste0(out_dir, "storm_modeled_hist.rds"))
end_time <- Sys.time()
print( end_time - start_time)
|
51e0bcde5c6257cd1c95c9c968a830d119270af5
|
7683a0eaeaabce78c3059445cb0e53a4581bc489
|
/plot3.R
|
9542a586c0182953f98552b183f504de30a09111
|
[] |
no_license
|
pikaff/ExData_Plotting1
|
594de2798b058bdcf666dc8d8d34f642398faca1
|
552ca4a612ebb6f51f709dcf968cd76de5d7dd33
|
refs/heads/master
| 2020-12-30T17:11:51.234717
| 2014-07-10T01:41:56
| 2014-07-10T01:41:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 478
|
r
|
plot3.R
|
setwd("C:/Users/Feng/Downloads/data science/exploratory data analysis/ExData_Plotting1")
load("mydata.Rda")
png("plot3.png", width=480, height=480)
with(mydata, plot(time, Sub_metering_1, type="l", ylab="Energy sub metering", xlab=""))
with(mydata, lines(time, Sub_metering_2, col="red"))
with(mydata, lines(time, Sub_metering_3, col="blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black","red","blue"), lty=1)
dev.off()
|
3866c5c4d9c41c1bb3ac2cdc0e1ff61b4a198156
|
0732340eadb6fbf63c4ca67d48e8ade7cbe8a464
|
/run_script.R
|
af471d3ed70ae116a22b64e4deae6fe9fd533aaa
|
[] |
no_license
|
gstewart12/delmarva-baywatch
|
244410deec3c0c5149a806a1884aaa682633fd45
|
eac5638514b974aab0e6f390bf1d5cb0cbafa886
|
refs/heads/master
| 2023-07-09T08:41:07.509024
| 2021-08-16T20:21:22
| 2021-08-16T20:21:22
| 254,463,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,201
|
r
|
run_script.R
|
### ============================================================================
# Run a script =================================================================
### ============================================================================
# Set the session information
# These settings will be used to determine which processing options are
# implemented in the script. It will also form part of the saved documentation,
# so options should be adjusted here rather than in the script directly.
rm(list = ls())
settings <- list(
name = "Graham Stewart", # user who ran the script
email = "grahamstewart12@gmail.com", # user's email contact
site = "JLA", # three letter site code
year = 2019, # four digit year
date = lubridate::today(), # date script was run
info = devtools::session_info() # R session info: R, OS, packages
)
# Set the script name (see below for options)
script <- "01_combine_eddypro"
# Run the script
source(file.path(
"~/Desktop/DATA/Flux/tools/engine", "processing", paste0(script, ".R")
))
# Available scripts:
# 01_combine_eddypro
# 02_correct_eddypro
# 03_biomet_qc
# 04_biomet_gapfill
# 05_footprint
# 06_flux_qc
# 07_footprint_cover
# 08_daily_summary
|
0ccc38dd6e520c58c8f70f7474489fecb8c2b00f
|
28e3fb6150fa105f68821c562d02b4e1341aa36d
|
/Laboratorio 3.R
|
aec0356c46a74487ee002a830a1772b66414dd6c
|
[] |
no_license
|
GordoEnForma/Rstudiazo
|
b62f8138dd6aac14eb2c3b1d97fde094999da04b
|
0446be62d8854f00306ce81a31aa5d48da0d33af
|
refs/heads/main
| 2023-08-17T14:31:51.875351
| 2021-09-29T15:33:39
| 2021-09-29T15:33:39
| 397,639,739
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,374
|
r
|
Laboratorio 3.R
|
print(iris)
str(iris)
summary(iris)
my.iris <- iris
head(my.iris)
ls() #Objetos creados en memoria por nosotros
rm(my.iris) #Borrar el objeto my.iris
ls()
my.iris <- iris
head(my.iris)
my.iris$Petal.Area <- my.iris$Petal.Length * my.iris$Petal.Width
my.iris$Petal.Area <- NULL
my.iris <-iris[order(iris$Sepal.Length),]
# Creación de Vectores
x <-1:10
y <-iris$Species
y # Esta compuesto por las categorías(niveles ) de las especies del dataset Iris
ls()
1:5
5:1
c(1:5,5:1)
seq(1,5)
rep(1:5,5) #Repetir una secuencia n veces
rep(1:5,each = 5) #Repite cada elemento n veces
#Exploración de Vectores
plot(x)
table(y)
summary(y)
head(x)
tail(x)
table(iris$Species)
x <- x^2
x
x[1:3]
x[c(1,3)]
x[x>25]
x[3:1]
x[-(1:2)]
x
x[-length(x)] #Remueve el elemento en la posición señalada
w <-table(iris$Species)
w["setosa"]
w[c("setosa","virginica")]
my.iris <- iris
colnames(my.iris)[5]<-"Especie"
colnames(my.iris)
z<-table(iris$Species)
names(z)
names(z)[1] <- "Tipo 1"
names(z)
z <-1:10
z
z[z<5]<-100
x<-1:10
sample(x,4) #Elegir 4 elementos aleatorios del vector x
sample(x,50,replace=TRUE)
#Funciones
x<- 1:10
mean(x)
max(x)
median(x)
sum(x)
prod(x)
suma_cuadrados <- function(x) sum(x*x)
suma_cuadrados(x)
media <- function(x){
longitud <-length(x)
suma<-sum(x)
suma/longitud
}
media(x)
tapply(iris$Petal.Length, iris$Species, mean)
# Creación de Listas
is.list(iris)
x <-list("a" = 2.5, "b" = TRUE, c= 1:3)
x$a
typeof(x)
length(x)
str(x)
x <- list(2,TRUE,1:3)
x
x <- list(name = "John", age = 19, speaks = c("English", "French"))
x$name
x$age
x$speaks
x[c(1:2)]
x[-2]
x[c(T,F,F)]
x[c("age","speaks")]
x[["name"]] <-"Patricia";x
x[["married"]] <- TRUE
x[["age"]] <- NULL
str(x)
x$married <- NULL
# Gráfico de datos
hist(iris$Sepal.Width)
hist(iris$Sepal.Width,main="Iris: Histograma de la anchura de los petalos",
xlab ="anchura del sépalo", ylab = "frecuencia",
col = "steelblue")
barplot(table(iris$Species))
plot(iris$Petal.Length, iris$Petal.Width, main="Edgar Anderson's Iris Data")
boxplot(iris$Sepal.Width ~ iris$Species, col = "gray", main = "Especies
de iris\nsegún la anchura del sépalo")
|
953f49c1d311695f3529d91e7257325e96a20c89
|
25055d164bd577f1b0b0f7d0c4467917cedba566
|
/PowergridNetworking_Original/EntropicDegreeAttack.R
|
ca8367a48cbbb22253c6a7c1ae59373f597c976b
|
[] |
no_license
|
JonnoB/Setse_optimisation
|
2ffe207c7930542fa57663c88862fdd8152c124f
|
761f3ed620b065157bb33f99b4ff7d10670fb307
|
refs/heads/master
| 2020-11-28T18:34:25.258039
| 2020-03-09T19:42:20
| 2020-03-09T19:42:20
| 229,894,045
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 659
|
r
|
EntropicDegreeAttack.R
|
EntropicDegreeAttack <-function(g, Target = "Nodes", Number = 1){
#This strategy starts with the highest entropicdegree nodes and removes tham in alpha-numeric order.
#As degree is a function of nodes it can only be performed on vertices
#g: a network an igraph object
#Target: Either nodes or edges
#Number the total number of nodes/edges to remove
if(Target == "Nodes"){
df <- as_data_frame(g, what = "vertices") %>%
mutate(metric = EntropicDegree(g, Scale = TRUE)) %>%
arrange(desc(metric))
Out <- df$name[1:Number]
} else {
stop("Only Nodes available for the Degree attack strategy")
}
return(Out)
}
|
1c10ee02665e2b6199ef76db3ee1afa672388807
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Probability_And_Statistics_For_Engineers_by_Richard_L._Scheaffer,_Madhuri_S._Mulekar,_James_T._Mcclave/CH4/EX4.37/ex_4_37.R
|
203acc03d06e92136915448d6ab97f606a1f7156
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 335
|
r
|
ex_4_37.R
|
supplier1<-0.40
supplier2<-0.60
defective_supplier1<-0.10
defective_supplier2<-0.05
cat("prob tire comes from supplier1 if it is defective", (supplier1*defective_supplier1)/((supplier1*defective_supplier1)+
supplier2*defective_supplier2))
|
506e3e120f78769d48bc6dc0ab57baaae865a25c
|
557d00ba378311dd1cf9ed83526aca462a374b82
|
/wordcloud.R
|
85990558d388df1682b681edb39a40bf77538d01
|
[] |
no_license
|
Cooooe/dataminingToy
|
543a225c07c1b52674e187f811446f4a5c37c70f
|
866eb044eb211b8782d5d647e8a34f9636356bd6
|
refs/heads/master
| 2020-12-31T00:54:45.763571
| 2017-02-01T05:49:59
| 2017-02-01T05:49:59
| 80,593,293
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,838
|
r
|
wordcloud.R
|
library(KoNLP)
library(wordcloud)
library(RColorBrewer)
library(tm)
library(arules)
library(combinat)
par(family="AppleGothic")
file_loc <- '/Users/KangHyungWon/Documents/Source/project/datamining/data.csv'
f <- read.csv(file_loc)
## TITLE TEXT MINING
myCorpus <- Corpus(VectorSource(f$title))
inspect(myCorpus)
myCorpus <- tm_map(myCorpus, content_transformer(tolower))
myCorpus <- tm_map(myCorpus, removeWords, stopwords('english'))
myCorpus <- tm_map(myCorpus, removeWords, '문의드립니다')
myCorpus <- tm_map(myCorpus, removeWords, '요청의')
myCorpus <- tm_map(myCorpus, removeWords, '요청드립니다')
myCorpus <- tm_map(myCorpus, removeWords, '못하는')
myCorpus <- tm_map(myCorpus, removeWords, '관련하여')
myCorpus <- tm_map(myCorpus, removeWords, '이슈입니다')
myCorpus <- tm_map(myCorpus, removeWords, '발생하는')
myCorpus <- tm_map(myCorpus, removeWords, '사용시')
myCorpus <- tm_map(myCorpus, removeWords, '실행시')
myCorpus <- tm_map(myCorpus, removeWords, '오류의')
myCorpus <- tm_map(myCorpus, removeWords, '안됩니다')
myCorpus <- tm_map(myCorpus, removeWords, 'error')
myCorpus <- tm_map(myCorpus, removeWords, '이동시')
myCorpus <- tm_map(myCorpus, removeWords, '확인요청')
myCorpus <- tm_map(myCorpus, removeWords, '수정요청')
myCorpus <- tm_map(myCorpus, removeWords, '의')
myCorpus <- tm_map(myCorpus, removePunctuation)
myCorpus <- tm_map(myCorpus, stripWhitespace)
myCorpus <- tm_map(myCorpus, removeNumbers)
myDTM = TermDocumentMatrix(myCorpus)
m = as.matrix(myDTM)
v = sort(rowSums(m), decreasing = TRUE)
d = data.frame(word =names(v),freq=v)
pal <- brewer.pal(5,"Set1")
wordcloud(words = d$word, freq = d$freq, min.freq = 2,scale=c(5,.9),max.words=200, random.order=FALSE, rot.per=0.1,random.color=T,colors=pal)
## CONTENT TEXT MINING
f$content
contentCorpus = Corpus(VectorSource(f$content))
inspect(contentCorpus)
contentCorpus <- tm_map(contentCorpus, content_transformer(tolower))
contentCorpus <- tm_map(contentCorpus, removeWords, stopwords('english'))
contentCorpus <- tm_map(contentCorpus, removeWords, '문의드립니다')
contentCorpus <- tm_map(contentCorpus, removeWords, '요청의')
contentCorpus <- tm_map(contentCorpus, removeWords, '요청드립니다')
contentCorpus <- tm_map(contentCorpus, removeWords, '못하는')
contentCorpus <- tm_map(contentCorpus, removeWords, '관련하여')
contentCorpus <- tm_map(contentCorpus, removeWords, '이슈입니다')
contentCorpus <- tm_map(contentCorpus, removeWords, '발생하는')
contentCorpus <- tm_map(contentCorpus, removeWords, '사용시')
contentCorpus <- tm_map(contentCorpus, removeWords, '실행시')
contentCorpus <- tm_map(contentCorpus, removeWords, '오류의')
contentCorpus <- tm_map(contentCorpus, removeWords, '안됩니다')
contentCorpus <- tm_map(contentCorpus, removeWords, 'error')
contentCorpus <- tm_map(contentCorpus, removeWords, '이동시')
contentCorpus <- tm_map(contentCorpus, removeWords, '확인요청')
contentCorpus <- tm_map(contentCorpus, removeWords, '수정요청')
contentCorpus <- tm_map(contentCorpus, removeWords, '의')
contentCorpus <- tm_map(contentCorpus, removeWords, 'span')
contentCorpus <- tm_map(contentCorpus, removePunctuation)
contentCorpus <- tm_map(contentCorpus, stripWhitespace)
contentCorpus <- tm_map(contentCorpus, removeNumbers)
contentDTM = TermDocumentMatrix(contentCorpus)
contentm = as.matrix(contentDTM)
contentv = sort(rowSums(contentm), decreasing = TRUE)
contentd = data.frame(word =names(contentv),freq=contentv)
pal <- brewer.pal(5,"Set1")
wordcloud(words = contentd$word, freq = contentd$freq, min.freq = 2,scale=c(5,.9),max.words=200, random.order=FALSE, rot.per=0.1,random.color=T,colors=pal)
## YEAR DATA EXTRACT
yearCorpus <- Corpus(VectorSource(f$YMD))
yearCorpus <- tm_map(yearCorpus, stripWhitespace)
yearCorpus <- tm_map(yearCorpus, removePunctuation)
yearDTM = TermDocumentMatrix(yearCorpus)
ym = as.matrix(yearDTM)
ymv = sort(rowSums(ym), decreasing = TRUE)
ymd = data.frame(word =names(ymv), freq=ymv)
ymd <- ymd[do.call(order, ymd), ]
newData = read.table('/Users/KangHyungWon/Documents/Source/project/datamining/data.txt')
barplot(ymd$freq, names.arg = ymd$word, border = NA, ylim = c(0, 200), las=1, ylab = "frequency", xlab = "Month")
titleList = list(f$title)
f$title
nouns <- sapply(titleList,extractNoun,USE.NAMES = F)
nouns <- sapply(nouns,extractNoun,USE.NAMES = F)
nouns
undata <- unlist(nouns)
typeof(f)
typeof(newData)
nouns <- Filter(function(x){nchar(x)>=2}, undata)
nouns <- gsub('요청', '', nouns)
nouns <- gsub('문의', '', nouns)
nouns <- gsub('관련', '', nouns)
nouns <- gsub('오류', '', nouns)
nouns <- gsub('문제', '', nouns)
nouns <- gsub('발생', '', nouns)
nouns <- gsub('발급', '', nouns)
nouns <- gsub('확인', '', nouns)
nouns <- gsub('1.', '', nouns)
nouns <- gsub('적용', '', nouns)
nouns <- gsub('4.', '', nouns)
nouns <- gsub('27', '', nouns)
nouns <- gsub('2.', '', nouns)
nouns <- gsub('8.', '', nouns)
nouns <- gsub('방문.', '', nouns)
nouns <- gsub('ios', 'iOS', nouns)
nouns <- gsub('IOS', 'iOS', nouns)
nouns <- gsub('Push', 'Push', nouns)
nouns <- gsub('PUSH', 'Push', nouns)
nouns <- gsub('push', 'Push', nouns)
nouns <- gsub('푸시', 'Push', nouns)
nouns <- gsub('33', '', nouns)
nouns2 <- nouns
names(nouns2) = paste("Tr", 1: length(nouns2), sep="")
wordtran <- as(nouns2, "transactions")
write(unlist(nouns), '/Users/KangHyungWon/Documents/data-table.txt')
revised <- read.table('/Users/KangHyungWon/Documents/data-table.txt')
wordcount <- table(revised)
wordcount <- wordcount[do.call(order, wordcount), ]
wordcount
pal <- brewer.pal(5,"Set1")
par(family='AppleGothic')
wordcloud(names(wordcount), freq=wordcount, scale=c(5,.5),rot.per=0.15,min.freq=3,max.words=200,random.order=F,random.color=T,colors=pal)
|
c279d6a8fb2fb03f5eca9606a9b50fb65a3a4cb7
|
81032fad71d91d4f2a41cc505969b59f5e7dc0a5
|
/scripts/fig3_tnk.R
|
4865d0d59fb49a30fc75cef56c63750293a378f9
|
[] |
no_license
|
yujijun/covid_analysis
|
836c232b5ea5bb87856fbb61731bb399dec4295c
|
88ded1c5c16ea8e2b949bb471ac4ed470267b69b
|
refs/heads/master
| 2022-12-03T18:14:05.476184
| 2020-08-22T11:58:54
| 2020-08-22T11:58:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,719
|
r
|
fig3_tnk.R
|
#extract t/nk cells
seu[,seu$celltype %in% c("T cell","NK cell")] ->seu_T_NK
#remove doubelt because driving clustering
seu_T_NK <- seu_T_NK[,seu_T_NK$doublet==FALSE]
seu_T_NK$previous_cluster <- seu$seurat_clusters[seu$celltype %in% c("T cell","NK cell")]
#remove unwanted genes
seu_T_NK = seu_T_NK[!(grepl("^MTRNR",rownames(seu_T_NK)) | grepl("MT-",rownames(seu_T_NK)) | grepl("ACTB",rownames(seu_T_NK))| grepl("RPS",rownames(seu_T_NK)) |grepl("RPL",rownames(seu_T_NK)) ),]
library(Seurat)
library(SeuratData)
library(SeuratWrappers)
seu_T_NK <- RunFastMNN(object.list = SplitObject(seu_T_NK, split.by = "Sample"),features = 2000)
seu_T_NK <- RunUMAP(seu_T_NK, reduction = "mnn", dims = 1:30,seed.use=22L)
seu_T_NK <- FindNeighbors(seu_T_NK, reduction = "mnn", dims = 1:30 ,k.param = 10)
seu_T_NK <- FindClusters(seu_T_NK)
#seu_T_NK <- FindClusters(seu_T_NK,resolution = 0.4)
library(SingleR)
pred_cell_fine_tnk <- pred_cell_fine[rownames(pred_cell_fine) %in% colnames(seu_T_NK),]
tab <- table(Assigned = pred_cell_fine_tnk$pruned.labels, Cluster = seu_T_NK$seurat_clusters)
library(pheatmap)
pdf("T_NK/seurat_pheatmap_cluster_labels_fine.pdf",height = 7,width=8)
p <-pheatmap(log2(tab + 10), color = colorRampPalette(c("white", "blue"))(101))
print(p)
dev.off()
#Idents(seu_T_NK) <- seu_T_NK$RNA_snn_res.0.4
Idents(seu_T_NK) <- seu_T_NK$seurat_clusters
FindAllMarkers(seu_T_NK, only.pos = TRUE) -> seu_nk_markers
saveRDS(seu_T_NK,"T_NK/seu_T_NK.rds")
saveRDS(seu_nk_markers,"T_NK/seu_nk_markers.rds")
top10 <- seu_nk_markers %>% group_by(cluster) %>% top_n(n = 30, wt = -p_val_adj) %>% top_n(n = 15, wt = avg_logFC)
top10[!duplicated(top10$gene),]->top10
for (i in c(3,8,16)) {
seu_nk_markers$gene[seu_nk_markers$cluster==i] -> features
if (length(features)>180){features<- features[1:180]}
pdf(paste0("T_NK/individual/dotplot-",i,".pdf"),height =20,width = 7)
p <- DotPlot(seu_T_NK,features = features,cols="Spectral",cluster.idents = TRUE) + coord_flip()
print(p)
dev.off()
}
for (i in unique(seu_T_NK$seurat_clusters)){
seu_nk_markers$gene[seu_nk_markers$cluster==i][1:12] -> features
png(paste0("T_NK/individual/umap-",i,".png"),width = 10, height = 8,units="in",res=300)
p<- FeaturePlot(seu_T_NK, features = features)
print(p)
dev.off()
}
Idents(seu_T_NK) <- seu_T_NK$seurat_clusters
### assign cell types
seu_T_NK$celltype <- 1
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="0"] <- "Naive CD4+ T"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="1"] <- "Effector memory CD4+ T"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="2"|seu_T_NK$seurat_clusters=="6"] <- "Effector CD8+ T"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="3"] <- "Naive CD8+ T"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="4"|seu_T_NK$seurat_clusters=="8"] <- "CD56dim CD16+ NK"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="5"] <- "Central memory CD8+ T"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="7"] <- "Effector memory CD4+ T"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="9"] <- "γδT"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="10"] <- "Effector CD8+ T"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="11"] <- "Effector CD4+ T"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="12"] <- "MAIT"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="13"] <- "Central memory CD4+ T"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="14"] <- "CD56bri CD16- NK"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="15"] <- "Naive CD4+ T"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="16"] <- "Proliferative T/NK"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="17"&seu_T_NK$previous_cluster=="22"] <- "iNK"
seu_T_NK$celltype[seu_T_NK$seurat_clusters=="17"&!(seu_T_NK$previous_cluster=="22")] <- "Th2-like"
###
seu_T_NK@meta.data ->metadata
tmp1 = metadata %>% group_by(patient,days) %>% summarise(cell_n=n())
tmp2 = metadata %>% group_by(patient,days,seurat_clusters) %>% summarise(cluster_n=n())
tmp2 = tmp2 %>% left_join(tmp1)
tmp2$cluster_n = tmp2$cluster_n/tmp2$cell_n
tmp2 = tmp2 %>% full_join(unique(metadata[,c("patient","condition")]),by=c("patient"="patient"))
tmp2 = tmp2 %>% full_join(unique(metadata[,c("patient","days","Sample")]))
Idents(seu_T_NK) <- seu_T_NK$celltype
UMAPPlot(seu_T_NK,cols=c(celltype_color,"orange","pink"))
ggsave("T_NK/celltype_umap.png")
seu_T_NK$celltype <- factor(seu_T_NK$celltype,levels=c("Naive CD4+ T","Central memory CD4+ T","Effector memory CD4+ T","Effector CD4+ T",
"Naive CD8+ T",
"Central memory CD8+ T","Effector CD8+ T",
"γδT","Proliferative T/NK","MAIT","CD56bri CD16- NK","CD56dim CD16+ NK",
"iNK","Th2-like"))
Idents(seu_T_NK) <- seu_T_NK$celltype
UMAPPlot(seu_T_NK,cols=c(celltype_color[-10],"orange","pink")) +theme(axis.ticks = element_blank(),axis.text = element_blank())
ggsave("fig3/new/celltype_umap3.png",width=10,height=6.5,units="in",dpi=300)
#assign stage
seu_T_NK$condition[seu_T_NK$condition=="Health control"]="HC"
seu_T_NK@meta.data ->metadata
metadata$stage = ">20"
metadata$stage[metadata$days<20] = "10-20"
metadata$stage[metadata$days<10] = "<10"
metadata$stage[metadata$condition=="Asymptomatic"]="Asymptomatic"
metadata$stage[metadata$condition=="HC"]="HC"
metadata$stage[metadata$condition=="Severe"]="Severe"
metadata$stage = factor(metadata$stage,levels = c("HC","Asymptomatic","Severe","<10","10-20",">20"))
#marker genes dotplots
#heatmap t_nk
tmp1 = metadata %>% group_by(patient,days) %>% summarise(cell_n=n())
tmp2 = metadata %>% group_by(patient,days,celltype) %>% summarise(cluster_n=n())
tmp2 = tmp2 %>% left_join(tmp1)
tmp2$cluster_n = tmp2$cluster_n/tmp2$cell_n
tmp2 = tmp2 %>% full_join(unique(metadata[,c("patient","condition","stage","sex","days")]),by=c("patient"="patient","days"="days"))
tmp2 %>% dplyr::select(celltype,cluster_n) %>% distinct() %>% group_by(celltype) %>% summarise(n=sum(cluster_n)) -> celltype_per
tmp2 = tmp2 %>% full_join(celltype_per,by="celltype") %>% mutate(final_n=cluster_n/n)
tmp2$id <- paste0(tmp2$patient,"_",tmp2$days)
##use celltype_pct
library(reshape2)
dcast(tmp2 %>% dplyr::select(id,celltype, final_n),celltype ~ id,mean) -> tmp2_heatmap2
tmp2_heatmap2[tmp2_heatmap2=="NaN"] <-0
rownames(tmp2_heatmap2) <- tmp2_heatmap2[,1]
tmp2_heatmap2[,-1] -> tmp2_heatmap2
tmp2 %>% dplyr::select(patient,days,sex,id,condition) %>% distinct() -> tmp_col
tmp2_heatmap2[,tmp_col$id] -> tmp2_heatmap2
tmp2_heatmap2[tmp2_heatmap2>0.13] <- 0.13
tmp2_heatmap2[tmp2_heatmap2<0.01] <- 0.01
patient_col <- celltype_color[1:13]
names(patient_col) <- unique(tmp_col$patient)
library(RColorBrewer)
sex_col <- c(brewer.pal(2,"Paired"))[1:2]
names(sex_col) <- unique(tmp_col$sex)
library(pheatmap)
pheatmap(tmp2_heatmap2,
annotation_col = data.frame(
Patient=tmp_col$patient,
Sex= tmp_col$sex,
Days= tmp_col$days,
Condition=tmp_col$condition,
row.names = colnames(tmp2_heatmap2)
),
annotation_colors = list(Patient=patient_col,Sex=sex_col,Condition=condition_color),
max.labels = Inf,
normalize = TRUE,
show.labels = FALSE,
fontsize = 12,
show_colnames = FALSE,
treeheight_row = 0,
treeheight_col = 0,
border_color=NA ,filename ="fig3/new/heatmap_celltypes_labels.pdf" ,height=5,width=13)
pdf("fig3/heatmap_celltypes_labels3.pdf",height=8,width=8)
print(p)
dev.off()
#stage percentage barplot
tmp2
library(ggpubr)
my_comparisons <- list( c("Asymptomatic", "HC"),
c("<10", "HC"),
c("Severe", "HC"),
c("<10", "Severe"),
c("Asymptomatic", "Severe"),
c("<10", "Asymptomatic"),c("<10",">20"),
c("<10","10-20"),
c("10-20",">20"),
c(">20","Asymptomatic"),
c(">20","HC"),
c(">20","Severe"),
c("10-20","Asymptomatic"),
c("10-20","HC"),
c("10-20","Severe")
)
ggplot(data=tmp2,aes(x=stage,cluster_n,fill=stage))+
geom_boxplot(outlier.size = 0.1)+
geom_jitter(size=0.1)+
scale_fill_manual(values = cond_palette)+
facet_wrap(~celltype,nrow=3,scales = "free_y")+
stat_compare_means(label = "p.signif",method="wilcox.test",comparisons=my_comparisons ,hide.ns=TRUE)+
theme_cowplot(font_size = 15)+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +labs(y="Percentage")
ggsave("fig3/new/stage_allp_bar.pdf",width = 15,height = 12)
#reduce
my_comparisons <- list( c("Asymptomatic", "HC"),
c("<10", "HC"),
c("Severe", "HC"),
c("<10", "Severe"),
c("Asymptomatic", "Severe"),
c("<10", "Asymptomatic"),
c(">20","Asymptomatic"),
c(">20","HC"),
c(">20","Severe"),
c("10-20","Asymptomatic"),
c("10-20","HC"),
c("10-20","Severe")
)
ggplot(data=tmp2 %>% filter(!celltype %in% c("Naive CD4+ T","MAIT","Naive CD8+ T","Proliferative T")),aes(x=stage,cluster_n,fill=stage))+
geom_boxplot(outlier.size = 0.1)+
geom_jitter(size=0.1)+
scale_fill_manual(values = cond_palette)+
facet_wrap(~celltype,nrow=2,scales = "free_y")+
stat_compare_means(label = "p.signif",method="wilcox.test",comparisons=my_comparisons ,hide.ns=TRUE)+
theme_cowplot(font_size = 15)+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +labs(y="Percentage")
ggsave("fig3/stage_somep_bar.pdf",width = 15,height = 5)
##dotplot
Idents(seu_T_NK) <- seu_T_NK$celltype
featurstnk <- c("CD3D","CD4","GPR183","CCR7","SELL","S100A4","CD8A","GZMA","GZMB","NKG7","LTB","TRDV2","MKI67","TRAV1-2","NCAM1","FCGR3A","KIT","PTGDR2")
DotPlot(seu_T_NK,features=featurstnk,cols="Spectral") +theme(axis.text.x = element_text(hjust = 1,angle = 60))
ggsave("fig3/new/dotplot2.pdf",width = 8,height = 5)
##umap for those genes
FeaturePlot(seu_T_NK,featurstnk,order = TRUE)
ggsave("fig3/umaps_features.png",width=16,height=18)
#correspondence between previous clusters and new clusters
library(tidyverse)
tmp <- seu_T_NK@meta.data
tmp %>% dplyr::select(celltype) -> tmp
tmp$cluster_cell <- seu$cell_type[match(rownames(tmp),colnames(seu))]
tmp %>% group_by(celltype,cluster_cell) %>% summarise(n=n()) -> tmp2
tmp2 %>% full_join(tmp %>% group_by(celltype) %>% summarise(cn=n()),by=c("celltype"="celltype")) %>% mutate(per=n/cn) -> tmp3
library()
reshape2::dcast(tmp3,celltype~cluster_cell,mean)-> tmp4
tmp4[tmp4=="NaN"] <-NA
rownames(tmp4) <- tmp4[,1]
tmp4[,-1] -> tmp4
#tmp4 <- t(scale(t(tmp4)))
library(pheatmap)
pheatmap(tmp4,
#max.labels = Inf,
#normalize = TRUE,
#scale="column",
color = colorRampPalette(colors = c("white","blue"))(100),
fontsize = 10,
na_col="white",
cluster_rows = FALSE,
cluster_cols = FALSE,filename ="fig3/new/celltypes_cor.pdf" ,width = 6,height =5)
|
2f0b7e3b947e82e940dc18011e532643d4d5b142
|
a4d5770c9a1fb3f8a36c302bc171156bd5c7118c
|
/Code/primary_abund_manyglm_predictive.R
|
d39b8a7876a2e2b1822394cc0146b4bb4cc7636b
|
[
"CC-BY-4.0"
] |
permissive
|
CHANGElab-AlbacoreTuna/traits-review
|
c40a5b2d5ec5529e5292ef8d3f779d67ea1b1f63
|
73ad8ca0d26c3d66d551476445f0c653c2e1c5b3
|
refs/heads/master
| 2023-04-25T04:12:53.453286
| 2021-05-18T09:42:14
| 2021-05-18T09:42:14
| 292,337,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,828
|
r
|
primary_abund_manyglm_predictive.R
|
##########
##########
# This code contains the a multivariate regression component of the analysis
# presented in Green et al. (2020)
# A review on the use of traits in ecological research
##########
##########
# AUTHOR: Cole B. Brookson & Natasha A. Hardy
# DATE OF CREATION: 2021-01-20
##########
##########
# set-up =======================================================================
library(tidyverse)
library(vegan)
library(mvabund)
library(reshape2)
library(here)
primary_abundance =
read_csv(here(paste0('./data/processed-data',
'/primary_traits_dummy_abundance_models.csv')))
# Multivariate Regression ======================================================
###### begin NOTE ##############################################################
# Some below sections have been commented out for simpler use. Some of the
# regressions take a good while to run, so we've saved the result as an RDS
# object which can be easily read into the environment using the code provided.
###### end NOTE ################################################################
# split into 'sites' and 'species' just to put it into typical ecological
# multivariate context
primary_abundance_traits = data.frame(primary_abundance[,1:11])
primary_abundance_species =
data.frame(primary_abundance[,12:ncol(primary_abundance)])
# need to make it an mvabund readable object
primary_abundance_mv = mvabund(primary_abundance_species)
# go ahead with negative binomial
mv_pd_nb_primabun = manyglm(primary_abundance_mv ~
primary_abundance_traits$PredictiveCat,
data= primary_abundance_traits,
family = 'negative.binomial')
plot(mv_pd_nb_primabun)
qqnorm(residuals(mv_pd_nb_primabun)[which(residuals(mv_pd_nb_primabun)<10000)])
saveRDS(mv_pd_nb_primabun,
here("./data/manyglm-intermediate/mv_pd_nb_primabun.rds"))
# model output significance test
# mv_pd_nb_primabun_an = anova.manyglm(mv_pd_nb_primabun)
# saveRDS(mv_pd_nb_primabun_an,
# here("./data/manyglm-intermediate/mv_pd_nb_primabun_anova.rds"))
mv_pd_nb_primabun_an =
readRDS(here("./data/manyglm-intermediate/mv_pd_nb_primabun_anova.rds"))
# write a table for this
write_csv(mv_pd_nb_primabun_an$table,
here("./output-tables/mv_pd_nb_primabun_anova_table.csv"))
# individual adjusted p-values for species/traits - get univariate p-values
# mv_pd_nb_primabun_an_uni = anova.manyglm(mv_pd_nb_primabun,p.uni="adjusted")
# saveRDS(mv_pd_nb_primabun_an_uni,
# here("./data/manyglm-intermediate/mv_pd_nb_primabun_univs.rds"))
mv_pd_nb_primabun_an_uni =
readRDS(here("./data/manyglm-intermediate/mv_pd_nb_primabun_univs.rds"))
# get the direction of effect for each species with the main effect
pd_coef_primabun = coef(mv_pd_nb_primabun)
#figure out what the top traits are - recall traits are our 'species' here
mv_pd_nb_primabun_species =
sort(mv_pd_nb_primabun_an$uni.test[2,],
decreasing=T,index.return=T)[1:5] #sort and select top species/traits
mv_pd_nb_primabun_species$ix[1:5] #the column #s of the top impacted spp/traits
#Need > 50% deviance explainaed --> result = 25 traits explain > 50% deviance
sum(mv_pd_nb_primabun_an$uni.test[2,mv_pd_nb_primabun_species$ix[1:5]])*100/
sum(mv_pd_nb_primabun_an$uni.test[2,]) #25 species = 81.25088% Deviance
#3 species explained >85% deviance
pd_top_primabun =
data.frame(dimnames(primary_abundance_species)[[2]][
mv_pd_nb_primabun_species$ix[1:5]]) #df with the names of the top 20 traits
pd_top_primabun = pd_top_primabun %>%
dplyr::rename('traits' = names(pd_top_primabun))
# write table for amount of deviance explained
write_csv(pd_top_primabun, here("./output-tables/primary_predictive_top5.csv"))
# Look at Top Coefficients =====================================================
# Create df to combine coef values, also p-values from univ anovas & the top 20
pd_coef_prim = data.frame(t(pd_coef_primabun))
pd_coef_prim$traits = rownames(pd_coef_prim) #convert rownames to a column
pd_coef_prim = pd_coef_prim %>%
dplyr::rename('coef_intercept' = `X.Intercept.`,
'coef_pd_yes' = names(pd_coef_prim)[2])
pd_top_coeffs = merge(pd_top_primabun, pd_coef_prim,
by.x = 'traits',
by.y = 'traits')
# need to join with test statistic values
pd_an_test = as.data.frame(t( # first transpose coef_filter
mv_pd_nb_primabun_an_uni$uni.test))
pd_an_test$traits = rownames(pd_an_test) #convert rownames to a column
pd_an_test = pd_an_test %>%
dplyr::rename('deviance_explained' = names(pd_an_test)[2])
pd_top_coeffs = merge(pd_top_coeffs,
pd_an_test,
by.x = 'traits',
by.y = 'traits')
pd_top_coeffs = pd_top_coeffs %>%
select(-"(Intercept)")
# need to join with p-values
pd_an_pvalue = data.frame(t( # first transpose coef_filter
mv_pd_nb_primabun_an_uni$uni.p))
pd_an_pvalue$traits = rownames(pd_an_pvalue) #convert rownames to a column
pd_an_pvalue = pd_an_pvalue %>%
select(-names(pd_an_pvalue)[1])
pd_an_pvalue = pd_an_pvalue%>%
dplyr::rename('p_value' = names(pd_an_pvalue)[1])
pd_top_coeffs = merge(pd_top_coeffs,
pd_an_pvalue,
by.x = 'traits',
by.y = 'traits')
write_csv(pd_top_coeffs,
here("./output-tables/primary_predictive_top_coefs.csv"))
#See how many papers actually have those traits
papers_with_top_3_pd = primary_abundance_species
top_3_pd = pd_top_primabun$traits
papers_with_top_3_pd = papers_with_top_3_pd[top_3_pd]
rownames(papers_with_top_3_pd) = primary_abundance_traits$DOI
papers_with_top_3_pd =
papers_with_top_3_pd[rowSums(papers_with_top_3_pd[, -1])>0, ]
|
120ea16aed6c8300c91b60d43b14d1326f3d7176
|
64f0944e9c82019706e9af9502f08719133ed7ca
|
/R/customerClustering.R
|
19bbe70499e161b5ada749c9849d95b758c6bae7
|
[] |
no_license
|
you-leee/purchase-prediction
|
f6f8404d136e0a907ee0c21156e9817952ff72e5
|
dbef52814ef6b871c4ef86772bbcdb4370168193
|
refs/heads/master
| 2021-07-01T17:04:14.406956
| 2017-09-20T15:54:12
| 2017-09-20T15:54:12
| 104,228,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 462
|
r
|
customerClustering.R
|
library(NbClust)
plotSoSByClusterNum <- function(data, max_clusters = 15) {
wss <- (nrow(data) - 1) * sum(apply(data, 2, var))
for (i in 2:max_clusters) wss[i] <- sum(kmeans(data, centers=i)$withinss)
plot(1:max_clusters, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares")
}
fitCluster <- function(data, cluster_num) {
fit <- kmeans(data, cluster_num)
print(aggregate(data, by = list(fit$cluster), FUN=mean))
fit
}
|
b5567ca02b9fac7cd056eead2c19e706413f6b25
|
ffee56ec405b7f3551c4727ca2a0e1b035268212
|
/R_exercise/SpherHam/gauleg.r
|
4c41062d1c77d2619a48840e3187eb7476d2e83a
|
[] |
no_license
|
rovinapinto/Klimadynamics
|
187ea6780119e2bfe15d6c4047033068ddaf0bb1
|
fff5cb0081d2b45fa48e10621cdfde46c6423c51
|
refs/heads/main
| 2023-02-28T14:20:52.459934
| 2021-02-11T09:36:27
| 2021-02-11T09:36:27
| 325,517,865
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,093
|
r
|
gauleg.r
|
## source("~/R/KlimDyn/SpherHam/gauleg.r")
##
## NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING
##
gauleg <- function(n)
{
## Given the lower and upper limits of integration x1 and x2, and given n, this routine returns
## arrays x[1..n] and w[1..n] of length n, containing the abscissas and weights of the Gauss-
## Legendre n-point quadrature formula.
EPS <- 3.e-14
# n - Number of quadrature points. (Input)
x <- array(0,c(n))
w <- array(0,c(n))
# IWEIGH WT(X) Interval Name
# 1 1 (-1,+1) Legendre
#
x1 = -1 #
x2 = +1
#
#
m=(n+1)/2 # The roots are symmetric in the interval, so
xm=0.5*(x2+x1) # we only have to find half of them.
xl=0.5*(x2-x1)
for ( i in 1:m ) { # Loop over the desired roots.
z=cos(3.141592654*(i-0.25)/(n+0.5))
# Starting with the above approximation to the ith root,
# we enter the main loop of
# refinement by Newton’s method.
repeat {
p1=1.0
p2=0.0
for ( j in 1:n ) { # Loop up the recurrence relation to get the
p3=p2 # Legendre polynomial evaluated at z.
p2=p1
p1=((2.0*j-1.0)*z*p2-(j-1.0)*p3)/j
}
# p1 is now the desired Legendre polynomial.
# We next compute pp, its derivative,
# by a standard relation involving also p2,
# the polynomial of one lower order.
pp=n*(z*p1-p2)/(z*z-1.0)
z1=z
z=z1-p1/pp # Newton’s method.
if ( abs(z-z1) <= EPS) break
}
x[i]=xm-xl*z # Scale the root to the desired interval,
x[n+1-i]=xm+xl*z # and put in its symmetric counterpart.
w[i]=2.0*xl/((1.0-z*z)*pp*pp) # Compute the weight
w[n+1-i]=w[i] # and its symmetric counterpart.
}
return( list ( x=x , w=w ) )
}
|
b4fd23b909da5fc6825fb112354c2ceb340f38a2
|
5a096058e7bdcb4ad9779fc234cd5bf306f8b9a2
|
/the real deal.R
|
89c17df3958c213d24aa715390a1397bb4b8ee79
|
[] |
no_license
|
brianpclare/Network_Analysis
|
844752ede63da2b29cf68716b2d2f51906ff32b0
|
71094ca3e2411743d2df52b22114c382392a9334
|
refs/heads/master
| 2020-04-24T23:37:35.992223
| 2019-03-10T11:41:46
| 2019-03-10T17:05:42
| 172,350,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,215
|
r
|
the real deal.R
|
library(arm)
library(tidyverse)
library(magrittr)
library(data.table)
# chunk_size <- 5782
chunk_size <- 500
data <- fread("all_edges_real.csv",
colClasses = c("character", "character", "numeric"))
names <- union(data$node_1, data$node_2) %>%
unique()
n <- length(names)
num_rows <- n * (n - 1) / 2
# rm(data)
betas <- matrix(0, nrow = n, ncol = 1)
rownames(betas) <- names
# betas[, 1] <- 0
pi_vec <- rep(0.5, chunk_size)
make_preds <- function(sample, chunk_size){
x <- rep(0, chunk_size)
for(i in 1:chunk_size){
bl <- ncol(betas)
x[i] <- 1/(1 + exp(-(betas[rownames(betas) == .subset2(sample, 1)[i], bl] +
betas[rownames(betas) == .subset2(sample, 2)[i], bl])))
}
return(unlist(x))
}
chunk_calc <- function(index, chunk_size, sample){
Z <- matrix(0, nrow = chunk_size, ncol = n)
colnames(Z) <- names
Y <- rep(0, chunk_size)
for (i in 1:chunk_size) {
Z[[i, .subset2(sample, 1)[i]]] <- 1
Z[[i, .subset2(sample, 2)[i]]] <- 1
Y[i] <- .subset2(sample, 3)[i]
}
pi_vec <- make_preds(sample, chunk_size)
W <- (pi_vec) * (1 - pi_vec)
ZYPI <- crossprod(Z, (Y - pi_vec))
ZTWZ <- crossprod(Z, (W*Z))
return(list(ZTWZ, ZYPI))
}
do_an_iteration <- function(c_name, chunk_size){
print(Sys.time())
index <- 0
ztwz <- matrix(0, nrow = n, ncol = n)
zypi <- rep(0, n)
while(TRUE){
df <- data[(index + 1):(index + chunk_size), ]
new_stuff <- chunk_calc(index, nrow(df), df)
ztwz <- ztwz + new_stuff[[1]]
zypi <- zypi + new_stuff[[2]]
index <- index + chunk_size
print(index)
if(index >= num_rows){
# if(index >= 1){
break
}
}
bl <- ncol(betas)
C <- chol(ztwz)
betas <<- betas %>% cbind(betas[, bl] +
backsolve(C, backsolve(C, zypi,
transpose = TRUE)) )
print(Sys.time())
}
do_an_iteration("two", chunk_size)
# do_an_iteration("three", chunk_size)
# do_an_iteration("four", chunk_size)
#
# cns <- c("five", "six", "seven", "eight", "nine", "ten")
#
# for(j in cns){
# do_an_iteration(j, chunk_size)
# }
data$preds <- make_preds(data, chunk_size = 93096)
|
144d11c7bdebe700e49b11b11e5748bee15789aa
|
2e1fd39810a0359d0d0ddb31de93541e60e2466c
|
/cy_distribution.R
|
016a4d72a696384fcc9d24748674de0851034f6d
|
[] |
no_license
|
rtmag/TBlab
|
ac10ce46531da83c70c4a6920496926345849399
|
7b36fb699a04d0f7cd40c7029c827aff1cb49840
|
refs/heads/master
| 2021-01-19T09:52:22.666857
| 2020-12-23T16:34:46
| 2020-12-23T16:34:46
| 82,148,894
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,466
|
r
|
cy_distribution.R
|
inter=read.table("~/Downloads/AML_001_run1_nanoSV-WGS-intersect.tsv",sep="\t",stringsAsFactors=F)
nano = inter[,5]
nano[nano=="E-Nov_Ins_bp"]="Novel Insertion"
nano[nano=="S-Nov_Ins_bp"]="Novel Insertion"
nano[nano=="Nov_Ins"]="Novel Insertion"
nano[nano=="Inter-Ins(1)"]="Translocation"
nano[nano=="InterTx"]="Translocation"
nano[nano=="Intra-Ins"]="Translocation"
nano[nano=="Intra-Ins(1)"]="Translocation"
nano[nano=="E-Nov_Ins"]="TRASH"
nano[nano=="Inter-Ins(2)"]="TRASH"
nano[nano=="Intra-Ins(2)"]="TRASH"
nano[nano=="S-Nov_Ins"]="TRASH"
nano[nano=="Del"]="Deletion"
nano[nano=="Inv"]="Inversion"
nano[nano=="TDupl"]="Duplication"
#
ngs = inter[,10]
ngs[ngs=="<DEL>"]="Deletion"
ngs[ngs=="<DUP>"]="Duplication"
ngs[ngs=="<INV>"]="Inversion"
ngs[ngs=="<TRA>"]="Translocation"
###########
colu=cbind(ngs,nano)
colu_noTrash=colu[!(colu[,2]=="TRASH"),]
nano_table=table(colu_noTrash[,2])
ngs_table=table(colu_noTrash[,1])
ngs_table=c(ngs_table[1:3],0,ngs_table[4])
names(ngs_table)=c("Deletion","Duplication","Inversion","Novel Insertion","Translocation")
pdf("PLOT.R")
par(mfrow=c(1,2))
barplot(ngs_table,border=NA,col="salmon",main="NGS",ylim=c(0,250))
barplot(nano_table,border=NA,col="darkturquoise",main="NANOPORE",ylim=c(0,250))
dev.off()
#####################################################################################################
inter=read.table("~/Downloads/AML_001_run1_nanoSV-WGS-intersect.tsv",sep="\t",stringsAsFactors=F)
nano = inter[,5]
nano[nano=="E-Nov_Ins_bp"]="Novel Insertion"
nano[nano=="S-Nov_Ins_bp"]="Novel Insertion"
nano[nano=="Nov_Ins"]="Novel Insertion"
nano[nano=="Inter-Ins(1)"]="Insertion"
nano[nano=="InterTx"]="Tra/Ins"
nano[nano=="Intra-Ins"]="Tra/Ins"
nano[nano=="Intra-Ins(1)"]="Insertion"
nano[nano=="E-Nov_Ins"]="TRASH"
nano[nano=="Inter-Ins(2)"]="TRASH"
nano[nano=="Intra-Ins(2)"]="TRASH"
nano[nano=="S-Nov_Ins"]="TRASH"
nano[nano=="Del"]="Deletion"
nano[nano=="Inv"]="Inversion"
nano[nano=="TDupl"]="Duplication"
#
ngs = inter[,10]
ngs[ngs=="<DEL>"]="Deletion"
ngs[ngs=="<DUP>"]="Duplication"
ngs[ngs=="<INV>"]="Inversion"
ngs[ngs=="<TRA>"]="Translocation"
###########
colu=cbind(ngs,nano)
colu_noTrash=colu[!(colu[,2]=="TRASH"),]
nano_table=table(colu_noTrash[,2])
ngs_table=table(colu_noTrash[,1])
#pdf("PLOT.R")
par(mfrow=c(1,2))
barplot(ngs_table,border=NA,col="salmon",main="NGS",ylim=c(0,250))
barplot(nano_table,border=NA,col="darkturquoise",main="NANOPORE",ylim=c(0,250))
dev.off()
|
b99225678c0f35e65d62d43e699b20c24567080b
|
2fa7a8ebaa5e98d1345a4556341209cbb9d68148
|
/challenges/2021-04-27-sum-arrays/solutions/rust/ericwburden/ericwburden.R
|
2cd55505d097a89539618e814eefcd29f694a28b
|
[
"MIT"
] |
permissive
|
SubhamChoudhury/CodingDojo
|
28ed36ab923b2d5ac294ef09e9ba45b6a5e03589
|
d8eec1da557fea483269bc5ce044db621863e7d6
|
refs/heads/main
| 2023-08-06T17:58:40.649768
| 2021-10-01T17:16:19
| 2021-10-01T17:16:19
| 412,515,255
| 1
| 0
|
MIT
| 2021-10-01T15:12:11
| 2021-10-01T15:12:10
| null |
UTF-8
|
R
| false
| false
| 1,276
|
r
|
ericwburden.R
|
#' # Sum Arrays
#'
#' Given an array of values, return the sum of the values.
#'
#' ## Business Rules/Errata
#'
#' - Input must be an array.
#' - The array may be nested more than one level.
#' - All values must be integers.
#' - Solutions shall not use built in methods to flatten the array to
#' one-dimension.
#'
#' ## Examples
#'
#' One dimension:
#' ```
#' sum_of_array([1,2,3,4,5]) => 15
#' ```
#'
#' Two dimensions:
#' ```
#' sum_of_array([1,2,[1,2,3],4,5]) => 18
#' ```
#'
#' n dimensions:
#'
#' ```
#' sum_of_array([1,[1,2,[3,4],5],[6,7]]) => 29
#' ```
library(testthat)
sum_of_array <- function(input_vec) {
if (length(input_vec) > 1) {
total <- 0
for (i in input_vec) { total <- total + sum_of_array(i) }
total
} else {
input_vec
}
}
test_that("Handles flat lists", {
expect_equal(sum_of_array(c(1, 2, 3, 4, 5)), 15)
})
test_that("Handles 2D lists", {
input <- c(1, 2, c(1, 2, 3), 4, 5)
expect_equal(sum_of_array(input), 18)
})
test_that("Handles nD lists", {
input <- c(1, c(1, 2, c(3, 4), 5), c(6, 7))
expect_equal(sum_of_array(input), 29)
})
test_that("An interesting case", {
input <- c(c(1, 2), c(c(3, 4), 5), c(c(c(6, 7), 8), 9))
expect_equal(sum_of_array(input), 45)
})
|
785e0d5d2ea56d5256582c6b985bda99cfb0328a
|
f1f999c8facf7bdbdad4b49446679fb8101cb2db
|
/R/ComputeClustersByTrt.R
|
2a20b3d1f443016d2d724db834b1211512ecd442
|
[] |
no_license
|
daskelly/earlycross
|
f840995f28c7b6d07600ee0e15e4642e22cf6abf
|
813804fe8f2010c51a843370895a9d2b78edb233
|
refs/heads/master
| 2023-02-27T00:37:10.949245
| 2023-02-09T14:55:56
| 2023-02-09T14:55:56
| 149,645,819
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,192
|
r
|
ComputeClustersByTrt.R
|
#' Given a Seurat object and treatment variable, compare
#' clusters between levels of the treatment
#'
#' @param obj Seurat object
#' @param trt_var Treatment variable
#' @param rep_var Replicate/Individual ID variable
#' @param ci Whether to plot binomial confidence intervals (Jeffreys)
#' @param group_by Name of grouping variable (default is Idents)
#'
#' @importFrom magrittr '%>%'
#' @examples
#' pbmc_small$trt <- sample(c('drug', 'control'), ncol(pbmc_small), replace=TRUE)
#' pbmc_small$genotype <- sample(c('1', '2', '3'), ncol(pbmc_small), replace=TRUE)
#' head(ComputeClustersByTrt(pbmc_small, trt, genotype))
#'
#' @export
ComputeClustersByTrt <- function(obj, trt_var, rep_var, group_by = NULL,
ci = TRUE, ci_alpha = 0.05) {
# For curly curly {{ syntax see
# https://www.tidyverse.org/blog/2019/06/rlang-0-4-0/
meta <- obj@meta.data %>%
as.data.frame() %>%
tibble::rownames_to_column("cell")
if (is.null(group_by)) {
meta$ident <- Idents(obj)
} else {
meta$ident <- meta[[group_by]]
}
grp_dat <- dplyr::group_by(meta, {
{
trt_var
}
}, {
{
rep_var
}
}) %>%
dplyr::mutate(N_tot = dplyr::n()) %>%
dplyr::ungroup() %>%
dplyr::group_by({
{
trt_var
}
}, {
{
rep_var
}
}, ident, N_tot)
grp_dat <- grp_dat %>%
dplyr::summarize(N = dplyr::n(), .groups = 'keep') %>%
dplyr::mutate(frac = N/N_tot)
# Put binomial confidence intervals around the cell abundance estimates Use
# Jeffreys interval -- posterior dist'n is Beta(x + 1/2, n – x + 1/2)
grp_stats <- dplyr::mutate(grp_dat, cells_per_thousand = frac * 1000) %>%
dplyr::mutate(trt_var = {{ trt_var }})
if (ci) {
grp_stats <- dplyr::mutate(grp_stats,
lower = qbeta(ci_alpha/2, N + 1/2, N_tot - N + 1/2),
upper = qbeta(1 - ci_alpha/2, N + 1/2, N_tot - N + 1/2),
lower_per_thousand = lower * 1000,
upper_per_thousand = upper * 1000)
}
grp_stats
}
|
ce0b7d26e41687e8c0651915767d868b0c39ce30
|
3a3e3e050d6deb8544ff2838ab4b698a492d2eb7
|
/R/read_data_hooks.R
|
918af1db33299926b3fc3f2a0b6d8e944604c9da
|
[] |
no_license
|
jokergoo/epik
|
f9eb86c38eab46913a2787296fe5e023caf70f2b
|
16ae793be02554ddda89401a888327dce87c5a4a
|
refs/heads/master
| 2021-01-12T05:25:21.598897
| 2019-09-27T08:04:03
| 2019-09-27T08:04:03
| 77,924,435
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,429
|
r
|
read_data_hooks.R
|
# == title
# Read methylation dataset
#
# == param
# -... please ignore, see 'details' section.
# -RESET remove all hooks
# -READ.ONLY please ignore
# -LOCAL please ignore
#
# == detail
# Methylation dataset from whole genome bisulfite sequencing is always huge and it does not
# make sense to read them all into the memory. Normally, the methylation dataset is stored
# by chromosome and this hook function can be set to read methylation data in a per-chromosome
# manner. In the package, there are many functions use it internally to read methylation datasets.
#
# Generally, for methylation dataset, there are methylation rate (ranging from 0 to 1), CpG coverage and genomic positions
# for CpG sites. Sometimes there is also smoothed methylation rate. All these datasets can be set
# by defining a proper ``methylation_hooks$get_by_chr``. The value for ``methylation_hooks$get_by_chr``
# is a function with only one argument which is the chromosome name. This function defines how to
# read methylation dataset for a single chromosome. The function must return a list which contains
# following mandatory elements:
#
# -gr a `GenomicRanges::GRanges` object which contains genomic positions for CpG sites. Positions should be sorted.
# -meth a matrix which contains methylation rate. This will be the main methylation dataset the epik
# package uses, so it should be smoothed methylation rate if the CpG coverage is not high.
# Note, this matrix must have column names which is sample names and will be used to match
# other datasets (e.g. RNASeq)
# -cov a matrix which contains CpG coverage.
#
# It can also contain some optional elements and they are not needed for the core analysis:
#
# -raw a matrix which contains unsmoothed methylation rate (or the original methylation rate calculatd
# as the fraction of methylated CpG in a CpG site)
#
# Note each row in above datasets should correspond to the same CpG site.
#
# In following example code, assume the methylation data has been processed by bsseq package and saved as
# ``path/bsseq_$chr.rds``, then the definition of ``methylation_hooks$get_by_chr`` is:
#
# methylation_hooks$get_by_chr = function(chr) {
# obj = readRDS(paste0("path/bsseq_", chr, ".rds"))
# lt = list(gr = granges(obj),
# raw = getMeth(obj, type = "raw"),
# cov = getCoverage(obj, type = "Cov"),
# meth = getMeth(obj, type = "smooth")
# return(lt)
# }
#
# After ``methylation_hooks$get_by_chr`` is properly set, the "current chromosome" for the methylation dataset
# can be set by ``methylation_hooks$set_chr(chr)`` where ``chr`` is the chromosome name you want to go.
# After validating the dataset, following variables can be used directly:
#
# - ``methylation_hooks$gr``
# - ``methylation_hooks$meth``
# - ``methylation_hooks$sample_id``
# - ``methylation_hooks$cov``
# - ``methylation_hooks$raw`` if available
#
# ``methylation_hooks$set_chr(chr)`` tries to reload the data only when the current chromosome changes.
#
# == value
# Hook functions
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
#
methylation_hooks = function(..., RESET = FALSE, READ.ONLY = NULL, LOCAL = FALSE) {}
METH_OBJ = new.env()
assign("meth", NULL, envir = METH_OBJ)
assign("raw", NULL, envir = METH_OBJ)
assign("cov", NULL, envir = METH_OBJ)
assign("gr", NULL, envir = METH_OBJ)
assign("chr", NULL, envir = METH_OBJ)
assign("sample_id", NULL, envir = METH_OBJ)
methylation_hooks = setGlobalOptions(
get_by_chr = list(.value = NULL, .class = "function"),
meth = list(.value = function() METH_OBJ$meth, .private = TRUE, .visible = FALSE),
raw = list(.value = function() METH_OBJ$raw, .private = TRUE, .visible = FALSE),
cov = list(.value = function() METH_OBJ$cov, .private = TRUE, .visible = FALSE),
gr = list(.value = function() METH_OBJ$gr, .private = TRUE, .visible = FALSE),
sample_id = list(.value = function() METH_OBJ$sample_id, .class = "character", .private = TRUE, .visible = FALSE),
set_chr = list(.value = NULL, .class = "function", .private = TRUE, .visible = FALSE)
)
methylation_hooks$set_chr = function(chr, verbose = TRUE) {
previous_chr = METH_OBJ$chr
if(!is.null(previous_chr)) {
if(previous_chr == chr) {
if(verbose) message(qq("[@{chr}] @{chr} is already set."))
return(invisible(NULL))
}
}
obj = methylation_hooks$get_by_chr(chr)
# validate obj
if(is.null(obj$meth)) {
stop("The list which is returned by `methylation_hooks$get_by_chr` should contain `meth`.")
}
if(is.null(obj$gr)) {
stop("The list which is returned by `methylation_hooks$get_by_chr` should contain `gr`.")
}
if(is.null(obj$raw)) {
cat("The list which is returned by `methylation_hooks$get_by_chr` has no `raw`.")
}
if(is.null(obj$cov)) {
cat("The list which is returned by `methylation_hooks$get_by_chr` has no `cov`.")
}
sample_id = colnames(obj$meth)
if(is.null(sample_id)) {
stop("The methylation matrix which is represented as `meth` must have column names which represents as sample ids.")
}
if(inherits(obj$meth, "data.frame")) {
obj$meth = as.matrix(obj$meth)
}
if(!inherits(obj$meth, "matrix")) {
stop("`meth` should be a matrix.")
}
if(!inherits(obj$gr, "GRanges")) {
stop("`gr` should be a GRanges object.")
}
if(is.unsorted(start(obj$gr))) {
stop("`gr` should be sorted.")
}
if(!is.null(obj$raw)) {
if(inherits(obj$raw, "data.frame")) {
obj$raw = as.matrix(obj$raw)
}
if(!inherits(obj$raw, "matrix")) {
stop("`raw` should be a matrix.")
}
}
if(!is.null(obj$cov)) {
if(inherits(obj$cov, "data.frame")) {
obj$cov = as.matrix(obj$cov)
}
if(!inherits(obj$cov, "matrix")) {
stop("`cov` should be a matrix.")
}
}
if(length(obj$gr) != nrow(obj$meth)) {
stop("Number of rows in `meth` should be the same as the length of `gr`.")
}
if(!is.null(obj$raw)) {
if(length(obj$gr) != nrow(obj$raw)) {
stop("Number of rows in `raw` should be the same as the length of `gr`.")
}
if(!identical(colnames(obj$meth), colnames(obj$raw))) {
stop("Column names of `raw` should be identical to the column names of `meth`.")
}
}
if(!is.null(obj$cov)) {
if(length(obj$gr) != nrow(obj$cov)) {
stop("Number of rows in `cov` should be the same as the length of `gr`.")
}
if(!identical(colnames(obj$meth), colnames(obj$cov))) {
stop("Column names of `cov` should be identical to the column names of `meth`.")
}
}
assign("meth", obj$meth, envir = METH_OBJ)
assign("raw", obj$raw, envir = METH_OBJ)
assign("cov", obj$cov, envir = METH_OBJ)
assign("gr", obj$gr, envir = METH_OBJ)
assign("chr", chr, envir = METH_OBJ)
assign("sample_id", sample_id, envir = METH_OBJ)
if(verbose) {
message(qq("Following methylation datasets have been set for @{chr}:"))
message(qq("- `methylation_hooks$gr`: a GRanges object which contains positions of CpG sites."))
message(qq("- `methylation_hooks$meth`: methylation matrix"))
if(!is.null(obj$raw)) message(qq("- `methylation_hooks$raw`: raw methylation matrix (unsmoothed)"))
if(!is.null(obj$cov)) message(qq("- `methylation_hooks$cov`: CpG coverage matrix"))
message(qq("There are @{length(obj$gr)} CpG sites, @{length(sample_id)} samples."))
}
return(invisible(NULL))
}
class(methylation_hooks) = c("methylation_hooks", "GlobalOptionsFun")
# == title
# Print the methylation_hooks object
#
# == param
# -x a `methylation_hooks` objects
# -... additional arguments
#
# == value
# No value is returned
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
#
print.methylation_hooks = function(x, ...) {
if(is.null(methylation_hooks$get_by_chr)) {
str = "Please set `methylation_hooks$get_by_chr` to import methylation dataset. The value is a function with only one argument which is the chromosome name. The returned value should be a list which contains:\n"
qqcat(str, strwrap = TRUE)
qqcat("- `gr`: a GRanges object which contains positions of CpG sites.\n")
qqcat("- `meth`: methylation matrix (mainly used in the package)\n")
qqcat("- `raw`: raw methylation matrix (unsmoothed), optional.\n")
qqcat("- `cov`: CpG coverage matrix.\n")
} else {
if(is.null(METH_OBJ$chr)) {
qqcat("`methylation_hooks$get_by_chr` has been set. Use `methylation_hooks$set_chr() to set a chromosome.\n")
} else {
qqcat("`methylation_hooks$get_by_chr` has been set. Current chromosome is @{METH_OBJ$chr}\n")
}
}
}
strwrap2 = function(x) {
paste0(strwrap(x), collapse = "\n")
}
# == title
# Read ChIP-Seq dataset
#
# == param
# -... please ignore, see 'details' section.
# -RESET remove all hooks
# -READ.ONLY please ignore
# -LOCAL please ignore
#
# == details
# Unlike methylation dataset which is always stored as matrix, ChIP-Seq dataset is stored
# as a list of peak regions that each one corresponds to peaks in one sample. In many cases,
# there are ChIP-Seq datasets for multiple histone marks that each mark does not include all
# samples sequenced in e.g. whole genome bisulfite sequencing or RNA-Seq, thus, to import
# such type of flexible data format, users need to define following hook functions:
#
# -sample_id This self-defined function returns a list of sample IDs given the name of a histone mark.
# -peak This function should return a `GenomicRanges::GRanges` object which are peaks for a given
# histone mark in a given sample. The `GenomicRanges::GRanges` object should better have a meta column named "density"
# which is the density of the histone modification signals. (**Note when you want to take the histone
# modification signals as quatitative analysis, please make sure they are properly normalized between samples**)
# -chromHMM This hook is optional. If chromatin segmentation by chromHMM is avaialble, this hook
# can be defined as a function which accepts sample ID as argument and returns
# a `GenomicRanges::GRanges` object. The `GenomicRanges::GRanges` object should have a meta column named
# "states" which is the chromatin states inferred by chromHMM.
#
# The ``chipseq_hooks$peak()`` must have two arguments ``mark`` and ``sid`` which are the name of the histone mark
# and the sample id. There can also be more arguments such as chromosomes.
#
# As an example, let's assume the peak files are stored in a format of ``path/$sample_id/$mark.bed``, then we can define
# hooks functions as:
#
# # here `qq` is from GetoptLong package which allows simple variable interpolation
# chipseq_hooks$sample_id = function(mark) {
# peak_files = scan(pipe(qq("ls path/*/@{mark}.bed")), what = "character")
# sample_id = gsub("^path/(.*?)/.*$", "\\1", peak_files)
# return(sample_id)
# }
#
# # here ... is important that the epik package will pass more arguments to it
# chipseq_hooks$peak = function(mark, sid, ...) {
# peak_file = qq("path/@{sid}/@{mark}.bed")
# df = read.table(peak_file, sep = "\t", stringsAsFactors = FALSE)
# GRanges(seqnames = df[[1]], ranges = IRanges(df[[2]], df[[3]]), density = df[[5]])
# }
#
# Normally ``chipseq_hooks$peak()`` are not directly used, it is usually used by `get_peak_list` to read peaks from all samples as a list.
# You can also add more arguments when defining ``chipseq_hooks$peak()`` that these arguments can be passed from `get_peak_list` as well.
# For example, you can add chromosome name as the third argument that you do not need to read the full dataset at a time:
#
# # to make it simple, let's assume it only allows one single chromosome
# chipseq_hooks$peak = function(mark, sid, chr) {
# peak_file = qq("path/@{sid}/@{mark}.bed")
# df = read.table(pipe(qq("awk '$1==\"@{chr}\"' @{peak_file}")), sep = "\t", stringsAsFactors = FALSE)
# GRanges(seqnames = df[[1]], ranges = IRanges(df[[2]], df[[3]]), density = df[[5]])
# }
#
# then you can call `get_peak_list` as:
#
# get_peak_list(mark, chr = "chr1")
#
# The ``chipseq_hooks$chromHMM()`` must have one argument ``sid`` which is the sample id, also there can be more arguments such as chromosomes.
# The usage for the additional argumetns are same as ``chipseq_hooks$peak()``.
#
# == value
# Hook functions
#
# == seealso
# `get_peak_list`, `get_chromHMM_list`
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
#
chipseq_hooks = function(..., RESET = FALSE, READ.ONLY = NULL, LOCAL = FALSE) {}
chipseq_hooks = setGlobalOptions(
sample_id = list(.value = function(mark) stop("you need to define `sample_id` hook"),
.class = "function",
.validate = function(f) length(as.list(f)) == 2,
.failed_msg = strwrap2("The function should only have one argument which is the name of the histone mark.")),
peak = list(.value = function(mark, sid, ...) stop("you need to define `peak` hook"),
.class = "function",
.validate = function(f) length(as.list(f)) >= 4,
.failed_msg = strwrap2("The function should have more than two arguments which are the name of the histone mark, sample id and other stuff. If you only use the first two, simply add `...` as the third argument.")),
chromHMM = list(.value = NULL,
.class = "function",
.validate = function(f) length(as.list(f)) >= 3,
.failed_msg = strwrap2("The function should have more than one arguments which are the sample id and other stuff. If you only use the first one, simply add `...` as the second argument."))
)
# == title
# Get a list of peak regions for a given histone mark
#
# == param
# -mark name of the histone mark
# -sample_id a vector of sample IDs. If not defined, it is the total samples that are available for this histone mark.
# -... more arguments pass to `chipseq_hooks`$peak().
#
# == details
# It works after `chipseq_hooks` is set.
#
# == value
# A list of `GenomicRanges::GRanges` objects.
#
# If you e.g. set "chr" as the third argument when defining `chipseq_hooks`$peak(), "chr" can also be passed here through ``...``.
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
#
get_peak_list = function(mark, sample_id = chipseq_hooks$sample_id(mark), ...) {
peak_list = lapply(sample_id, function(sid) {
oe = try(gr <- chipseq_hooks$peak(mark, sid, ...))
if(inherits(oe, "try-error")) {
return(NULL)
} else {
return(gr)
}
})
names(peak_list) = sample_id
peak_list[!sapply(peak_list, is.null)]
}
# == title
# Get a list of chromatin segmentation regions
#
# == param
# -sample_id a vector of sample IDs.
# -merge if the sample IDs specified are from a same subgroup and user wants to merge them as consensus states
# -window window
# -min minimal overlap
# -... more arguments pass to `chipseq_hooks`$chromHMM().
#
# == details
# It works after `chipseq_hooks` is set.
#
# == value
# A list of `GenomicRanges::GRanges` objects.
#
# If you e.g. set "chr" as the third argument when defining `chipseq_hooks`$peak(), "chr" can also be passed here through ``...``.
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
#
get_chromHMM_list = function(sample_id, merge = FALSE, window = NULL, min = 0.5, ...) {
lt = lapply(sample_id, function(sid) {
oe = try(gr <- chipseq_hooks$chromHMM(sid, ...))
if(inherits(oe, "try-error")) {
return(NULL)
} else {
return(gr)
}
})
names(lt) = sample_id
lt = lt[!sapply(lt, is.null)]
if(merge) {
gr_list_1 = lt
if(is.null(window)) {
window = numbers::mGCD(c(sapply(gr_list_1, function(gr) numbers::mGCD(unique(width(gr))))))
message(qq("window is set to @{window}"))
if(window == 1) {
message("when converting bed files to GRanges objects, be careful with the 0-based and 1-based coordinates.")
}
}
# if(is.null(all_states)) {
all_states = unique(c(unlist(lapply(gr_list_1, function(gr) {unique(as.character(mcols(gr)[, 1]))}))))
all_states = sort(all_states)
message(qq("@{length(all_states)} states in total"))
# }
message("extracting states")
m1 = as.data.frame(lapply(gr_list_1, function(gr) {
k = round(width(gr)/window)
s = as.character(mcols(gr)[, 1])
as.integer(factor(rep(s, times = k), levels = all_states))
}))
m1 = as.matrix(m1)
gr = makeWindows(gr_list_1[[1]], w = window)
message("counting for each state")
t1 = rowTabulates(m1)
l = rowMaxs(t1) >= floor(min*length(lt))
t1 = t1[l, , drop = FALSE]
gr = gr[l]
message("determine states")
states1 = rowWhichMax(t1)
mcols(gr) = NULL
gr$states = all_states[states1]
return(gr)
} else {
return(lt)
}
}
|
dcb6ece7fc0d76ce0617e709b083ea1ad42c0b32
|
0cbb895c4ae0dcf7053b7618a5cf23bcc503061a
|
/R/clean_query.R
|
9ab7c1656a7073be63c132aaad0a85c689d316d6
|
[] |
no_license
|
cran/rDataPipeline
|
9e729f6e5c1fdb775d4bd833b02e060313f53e23
|
495e47def394d7026e465ea6ebda392259b55280
|
refs/heads/master
| 2023-09-03T09:45:45.611148
| 2021-11-17T20:00:06
| 2021-11-17T20:00:06
| 429,284,686
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 520
|
r
|
clean_query.R
|
#' Clean query
#'
#' Function to clean a query and return it without an api prefix
#'
#' @param data a \code{list} containing a valid query for the table, *e.g.*
#' \code{list(field = value)}
#' @param endpoint endpoint
#'
clean_query <- function(data, endpoint) {
data_tmp <- lapply(data, function(x) {
if (!is.character(x)) {
output <- x
} else if (grepl(paste0(endpoint, ".*([0-9]+$|[0-9]+/$)"), x)) {
output <- basename(x)
} else {
output <- x
}
output
})
data_tmp
}
|
0836052612c972233aeb50a0ebedb46a2c958a6a
|
84ef40286b50ab5b905a0b2897959e2df27c70c2
|
/TVS credit/TVSCredit.R
|
68ee3207eb30103fdc7e4003073fba3936fc2e5e
|
[] |
no_license
|
sumanpal94/Competitions
|
32d8f9b2ba9875923af5894d963d316994ecc777
|
5d81b75fef5afc90333aacecd1eb7cbf190873ac
|
refs/heads/master
| 2022-04-24T22:00:14.320366
| 2020-04-28T12:06:05
| 2020-04-28T12:06:05
| 259,606,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,586
|
r
|
TVSCredit.R
|
library(readr)
GIM_Dataset <- read_csv("~/Downloads/5dbabd3a3e7e8_GIM_Dataset/GIM_Dataset.csv")
names(GIM_Dataset)
GIM_Dataset1 <- GIM_Dataset[,-1]
# GIM_Dataset1 <- fastDummies::dummy_cols(GIM_Dataset[,-1])
# colnames(GIM_Dataset1) <- gsub(" ","_",colnames(GIM_Dataset1))
# colnames(GIM_Dataset1) <- gsub("/","_",colnames(GIM_Dataset1))
# colnames(GIM_Dataset1) <- gsub("-","_",colnames(GIM_Dataset1))
labels=as.matrix(GIM_Dataset1['V27'])
df_train=GIM_Dataset1[-grep('V27',colnames(GIM_Dataset1))]
#df_train=df_train[,c(importance_matrix$Feature[1:30])]
X <- df_train
y <- labels
dtrain <- xgb.DMatrix(data.matrix(X), label = labels)
cv <- xgb.cv(data = dtrain, nrounds = 30, nthread = 4, nfold = 10, metrics = list("rmse","auc"),
max_depth = 40, eta = 0.05, objective = "binary:logistic")
print(cv)
print(cv, verbose=TRUE)
xgb <- xgboost(data = data.matrix(X),
label = y,
eta = 0.5,
max_depth = 30,
nround=50,
colsample_bytree = 0.5,
seed = 1,
eval_metric = "logloss",
objective = "binary:logistic",
nthread = 3
)
y_pred <- predict(xgb,data.matrix(X))
y_pred
pred <- prediction(y_pred , y)
perf <- performance(pred,"tpr","fpr")
plot(perf,colorize=TRUE)
y_pred[y_pred>=.5]=1
y_pred[y_pred<.5]=0
table(y,y_pred)
AUC(y_pred,y)
names <- dimnames(X)[[2]]
importance_matrix <- xgb.importance(names, model = xgb)
xgb.plot.importance(importance_matrix[1:30])
xgb.plot.tree(feature_names = names, model = xgb, n_first_tree = 2)
|
ce511e634f5c20d7094010d4f1575bbac7965ca4
|
f082479af82cb029536ce9186e2b76f876bac126
|
/examples/05do_GCluster07.R
|
b6b3230de58ba4719f0266f2b10a9babb79d90c5
|
[] |
no_license
|
Heng19/GCluster
|
536a01806c24427a4b5243727cf7b45ecb213bc2
|
0a17ec6717c21e28905614aa8e3060c06b362d47
|
refs/heads/master
| 2023-01-28T12:12:49.059543
| 2020-12-06T21:11:20
| 2020-12-06T21:11:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 714
|
r
|
05do_GCluster07.R
|
rm(list=ls());
library(GCluster);
dat <- read.table("example_data/Rtsne_1.txt", header=T, row.names=1);
out.s <- mstGCluster(dat=dat, k=0);
out <- out.s[,c(2,3,5,6)];
out <- as.matrix(out);
out2 <- GCluster(dat=dat, wt=4, k=10);
clu.i <- out2$membership;
out.c <- data.frame(dat, cluster=clu.i);
write.table(out.c, "example_data/Rtsne_1_clu.txt", quote=F, sep="\t");
clu.n <- length(unique(clu.i));
cols <- rainbow(clu.n, alpha=0.5);
pdf("05do_GCluster07.pdf", 6, 6);
plot(dat, type="n", main="", xlab="", ylab="");
points(dat, pch=19, col=cols[clu.i], cex=1.2);
segments(as.numeric(out[,1]), as.numeric(out[,2]),
as.numeric(out[,3]), as.numeric(out[,4]), col=cols[9], lwd=2);
dev.off();
|
642371d2f6dfe83dc459fd44945b8716c780e99e
|
90ac397141853922c876e7a11cefb8e132aaf710
|
/Source Code/Analysis/Setting Up Movement Analysis.R
|
a389b8533772d03d88e99cfb4436a34970df3d0b
|
[] |
no_license
|
hsiadd/Turtle-Backpack
|
8d685198beff8e25eada671b703550190db1a7df
|
fbb99ab32f6a14043a49220a382415d47ee57294
|
refs/heads/master
| 2022-10-13T09:41:47.840597
| 2020-06-11T12:55:45
| 2020-06-11T12:55:45
| 271,599,796
| 0
| 0
| null | 2020-06-11T16:51:49
| 2020-06-11T16:51:48
| null |
UTF-8
|
R
| false
| false
| 6,104
|
r
|
Setting Up Movement Analysis.R
|
#### Framework for beginning movement analysis
## This just allows you to generate a dataframe with information on turtle movement as collected through the GPS logger
## This data can then be used for analysis in any way
## Libraries
library(tidyverse)
library(sf)
library(data.table)
## Set up the dataframe, including filtering out outliers
setwd("C:/Users/luke/Dropbox/Black Rock/Data/GPS Backpacks")
file_names <- dir("C:/Users/luke/Dropbox/Black Rock/Data/GPS Backpacks")
GPSBase <- do.call("rbind", lapply(file_names, read.csv))
GPSBase <- GPSBase %>%
filter(Lat > 40) %>%
filter(Lat < 42) %>%
filter(Lon < -73) %>%
filter(Lon > -75)
GPSBase <- filter(GPSBase, Lon != 0)
GPSBase <- mutate(GPSBase, tottime = (Month - 6) * 30 * 24 * 60 + Day * 24 * 60 + Hour * 60 + Minute)
GPSBase <- GPSBase %>%
mutate(run = rleidv(GPSBase, cols = seq_along(c("Lon", "Lat")))) %>%
distinct(run, .keep_all = TRUE)
GPSBase <- mutate(GPSBase, vhf = as.character(vhf))
GPSBase <- mutate(GPSBase, tothours = (Month - 6) * 30 * 24 + Day * 24 + Hour + Minute / 60)
GPSPoints <- st_as_sf(GPSBase, coords = c("Lon", "Lat"),
crs = "+proj=longlat +datum=WGS84", remove = TRUE)
GPSPoints <- st_transform(GPSPoints, crs = "+proj=utm +zone=18 +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
pythag <- function(a, b){
dist <- sqrt(a^2 + b^2)
return(dist)
}
GPSPointsCoords <- st_coordinates(GPSPoints)
GPSBase <- GPSBase %>%
mutate(mN = GPSPointsCoords[,2]) %>%
mutate(mE = GPSPointsCoords[,1])
GPSBase <- GPSBase %>%
group_by(vhf) %>%
arrange(tothours) %>%
mutate(mdiff = pythag(mN - lag(mN), mE - lag(mE))) %>%
mutate(m2diff = pythag(lead(mN) - lag(mN), lead(mE) - lag(mE)))
GPSBase <- GPSBase %>%
group_by(vhf) %>%
arrange(tothours) %>%
mutate(hourdiff = tothours - lag(tothours, default = first(tothours))) %>%
mutate(mdiff = ifelse(hourdiff > 6,
NA,
mdiff))
GPSBase <- GPSBase %>%
group_by(vhf) %>%
arrange(tothours) %>%
mutate(hour2diff = tothours - lead(tothours, default = first(tothours))) %>%
mutate(m2diff = ifelse(hour2diff < -6,
NA,
m2diff))
GPSBase <- GPSBase %>%
mutate(outlier = ifelse(mdiff > 150 & mdiff / m2diff > 2.5| mdiff > 100 & mdiff / m2diff > 3 |
mdiff > 50 & mdiff /m2diff > 3.5 | mdiff > 150 & hourdiff < 1.5 &
is.na(m2diff) == TRUE,
"yes",
"no"))
GPSBase <- GPSBase %>%
filter(outlier == "no" | is.na(outlier) == TRUE)
## Calculate movement speed for intervals (meters per hour)
#### This is already filtering out significant jumps in time (>6 hours) due to the conditions when creating mdiff
GPSBase <- mutate(GPSBase, mph = mdiff / hourdiff)
## Determine if turtle is moving at a given point (defined as movement over 25 meters)
GPSBase <- mutate(GPSBase, moving = ifelse(mph > 25,
1,
0))
GPSBase <- mutate(GPSBase, moving = ifelse(is.na(mph) == TRUE,
NA,
moving))
## Calculate the time of day at the middle of the interval for each samplin point
#### This can be used to explore daily activity patterns
GPSBase <- GPSBase %>%
group_by(vhf) %>%
arrange(tothours) %>%
mutate(inthourav = ifelse(Day - lag(Day, default = first(Day)) == 0,
(Hour + lag(Hour, default = first(Hour))) / 2,
(Hour + 24 + lag(Hour, default = first(Hour))) / 2)) %>%
mutate(inthourav = ifelse(inthourav > 24,
inthourav - 24,
inthourav))
## Add a variable to indicate nocturnal movement
### This could optimized using a dataframe containing daily sunset/sunrise for more long term projects
### ^Shouldn't be too hard, just the dataframes based on the date then mutate by comparing the two variables
GPSBase <- mutate(GPSBase, period = ifelse(inthourav >= 21 | inthourav <= 6,
"night",
"day"))
## Create a new dataframe with information on turtle "trips" (periods of sustained movement)
#### This is essentially done by using the alternating movement variable to create unique trip ids
#### Trips are then summarized by total movement, time, and differences in start and end position
#### Critical here is the difference between distance (total movement between points throughout trip) and displacement (start location to end location)
#### Split into two dataframes, one representing movement periods and one representing rest periods
GPSBase <- GPSBase %>%
ungroup() %>%
arrange(vhf, tothours) %>%
mutate(tripnum = rleidv(moving))
GPSBase <- GPSBase %>%
group_by(tripnum) %>%
mutate(trippoints = sum(tripnum) / max(tripnum)) %>%
mutate(triptime = sum(hourdiff)) %>%
mutate(tripdistance = sum(mdiff))
GPSBase <- GPSBase %>%
group_by(tripnum) %>%
mutate(final_mN = last(mN)) %>%
mutate(final_mE = last(mE)) %>%
mutate(first_mN = first(mN)) %>%
mutate(first_mE = first(mE)) %>%
mutate(withindisplace = ((((final_mN - first_mN) ** 2 +
(final_mE - first_mE) ** 2) ** .5)))
GPSTrips <- GPSBase %>%
dplyr::select(vhf, moving, tripnum, triptime, trippoints,tripdistance, final_mN, final_mE,
withindisplace) %>%
distinct()
GPSTrips <- GPSTrips %>%
group_by(vhf) %>%
mutate(tripdisplace = ((((final_mN - lag(final_mN, default = first(final_mN))) ** 2 +
(final_mE - lag(final_mE, default = first(final_mE))) ** 2) ** .5))) %>%
mutate(tripdisplace = ifelse(tripdisplace > 0,
tripdisplace,
withindisplace))
GPSTrips <- mutate(GPSTrips, triplinearity = tripdisplace / tripdistance)
GPSMoving <- filter(GPSTrips, moving == 1)
GPSRest <- filter(GPSTrips, moving == 0)
|
35e4ac68a16b0ea0b1e8a23227538af018a5a277
|
38949272ee54e899efc80a34bf05bea9acfe783d
|
/apps/mediation-multipleregression/original/ui.R
|
9eae0f7b2c9ffbddf65fc028b00a721a0c12efc2
|
[] |
no_license
|
WdeNooy/Statistical-Inference
|
8b9e05fca9fad67bf1f22269811b926e3e557fe5
|
2e2414ecd7b6ba7681e865d96be1ced87f393587
|
refs/heads/master
| 2023-04-06T18:57:35.475152
| 2023-03-23T08:00:31
| 2023-03-23T08:00:31
| 82,560,055
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 991
|
r
|
ui.R
|
library(shiny)
fig.width = 400
fig.height = 400
shinyUI(
fluidPage(
fluidRow(column(3,
br(), br(),
aligh = "left",
div(
checkboxGroupInput("predcheckbox",
label = "Predictors",
choices = c("Age" = "age",
"Education" = "education",
"Pol. Interest" = "polinterest",
"News site use" = "newssite"),
inline = FALSE
)
)
),
column(9,
align = "center",
plotOutput("mainplot",
width = fig.width,
height = fig.height
)
)
)
)
)
|
018bed2d23b5e33e1c93e22378e32b66bbaec29a
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleclassroomv1.auto/man/CourseWork.Rd
|
5a9aa071184669a5a649f73dcb18cb9c227c9a8d
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,906
|
rd
|
CourseWork.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classroom_objects.R
\name{CourseWork}
\alias{CourseWork}
\title{CourseWork Object}
\usage{
CourseWork(courseId = NULL, id = NULL, title = NULL, description = NULL,
materials = NULL, state = NULL, alternateLink = NULL,
creationTime = NULL, updateTime = NULL, dueDate = NULL,
dueTime = NULL, maxPoints = NULL, workType = NULL,
associatedWithDeveloper = NULL, submissionModificationMode = NULL,
assignment = NULL, multipleChoiceQuestion = NULL)
}
\arguments{
\item{courseId}{Identifier of the course}
\item{id}{Classroom-assigned identifier of this course work, unique per course}
\item{title}{Title of this course work}
\item{description}{Optional description of this course work}
\item{materials}{Additional materials}
\item{state}{Status of this course work}
\item{alternateLink}{Absolute link to this course work in the Classroom web UI}
\item{creationTime}{Timestamp when this course work was created}
\item{updateTime}{Timestamp of the most recent change to this course work}
\item{dueDate}{Optional date, in UTC, that submissions for this this course work are due}
\item{dueTime}{Optional time of day, in UTC, that submissions for this this course work are due}
\item{maxPoints}{Maximum grade for this course work}
\item{workType}{Type of this course work}
\item{associatedWithDeveloper}{Whether this course work item is associated with the Developer Console project making the request}
\item{submissionModificationMode}{Setting to determine when students are allowed to modify submissions}
\item{assignment}{Assignment details}
\item{multipleChoiceQuestion}{Multiple choice question details}
}
\value{
CourseWork object
}
\description{
CourseWork Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Course work created by a teacher for students of the course.
}
|
62c627b1c209ac2cc6fad9e1279be2e16ee26d6c
|
da77843864acb811a7bad8764cdfbdc025baaac1
|
/app.R
|
eb66b4d0788b3a5e97b0aa76cc2c09759ecc6514
|
[] |
no_license
|
iamstu12/donkey_games
|
b92d1848195474cadb8ce7c2c7775064bf8e88ce
|
82ee7af6f6de69de29713e7e05ea3201729ab284
|
refs/heads/main
| 2023-04-13T16:40:10.126882
| 2021-04-21T19:28:33
| 2021-04-21T19:28:33
| 332,702,972
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,075
|
r
|
app.R
|
# USER INTERFACE ---------------------------------------------------------------
ui <- dashboardPage(
# Theme ----
skin = "blue",
# Main title ----
dashboardHeader(title = "Video Games",
titleWidth = 250),
# Sidebar ----
dashboardSidebar(
width = 250,
sidebarMenu(
menuItem("Games History", tabName = "history", icon = icon("scroll")),
menuItem("Game Genres", tabName = "genres", icon = icon("gamepad")),
menuItem("Regional Sales", tabName = "regional", icon = icon("globe")),
menuItem("Console Sales", tabName = "console", icon = icon("gamepad"))
)
),
# Main body and tabs ----
dashboardBody(
tabItems(
# <----------------------------------------------------------------- tab 1
tabItem(tabName = "history",
h2("Games History"),
fluidRow( # <----------------------------------------- fluid row 1
box(title = "Controls", # <----------- drop down
status = "primary",
solidHeader = TRUE,
width = 6,
height = NULL,
selectInput("decade",
"Choose your decade:",
choices = unique(decade$decade)
)
)
), # <----------------------------------------- closes fluid row 1
fluidRow( # <----------------------------------------- fluid row 2
box(title = "Plot", # <--------------- plot
status = "primary",
solidHeader = TRUE,
width = 12,
height = NULL,
plotOutput("plot1")
)
), # <------------------------------------------ close fluid row 2
fluidRow( # <----------------------------------------- fluid row 3
box(title = "Table", # <-------------- table
status = "primary",
solidHeader = TRUE,
width = 12,
height = NULL,
DT::dataTableOutput("table1")
)
) # <-------------------------------------------------- closes fluid row 3
), # <------------------------------------------------------- closes tab 1
# <----------------------------------------------------------------- tab 2
tabItem(tabName = "genres",
h2("Game Genres"),
fluidRow( # <----------------------------------------- fluid row 1
box(title = "Controls", # <----------- drop down
status = "primary",
solidHeader = TRUE,
width = 6,
height = NULL,
selectInput("genre",
"Choose your genre:",
choices = unique(genre$genre),
)
),
), # <----------------------------------------- closes fluid row 1
fluidRow( # <----------------------------------------- fluid row 2
box(title = "Plot", # <--------------- plot
status = "primary",
solidHeader = TRUE,
width = 12,
height = NULL,
plotOutput("plot2")
)
), # <------------------------------------------ close fluid row 2
fluidRow( # <----------------------------------------- fluid row 3
box(title = "Table", # <-------------- table
status = "primary",
solidHeader = TRUE,
width = 12,
height = NULL,
DT::dataTableOutput("table2")
)
) # <------------------------------------------ closes fluid row 3
), # <------------------------------------------------------- closes tab 2
# <----------------------------------------------------------------- tab 3
tabItem(tabName = "regional",
h2("Regional Sales"),
fluidRow( # <----------------------------------------- fluid row 1
box(title = "Controls", # <----------- drop down
status = "primary",
solidHeader = TRUE,
width = 6,
height = NULL,
selectInput("region",
"Choose your region:",
choices = unique(regional$region),
)
),
), # <----------------------------------------- closes fluid row 1
fluidRow( # <----------------------------------------- fluid row 2
box(title = "Plot", # <--------------- plot
status = "primary",
solidHeader = TRUE,
width = 12,
height = NULL,
plotOutput("plot3")
)
), # <------------------------------------------ close fluid row 2
fluidRow( # <----------------------------------------- fluid row 3
box(title = "Table", # <-------------- table
status = "primary",
solidHeader = TRUE,
width = 12,
height = NULL,
DT::dataTableOutput("table3")
)
) # <------------------------------------------ closes fluid row 3
), # <------------------------------------------------------- closes tab 3
# <----------------------------------------------------------------- tab 4
tabItem(tabName = "console",
h2("Top Games"),
fluidRow( # <----------------------------------------- fluid row 1
box(title = "Controls", # <----------- drop down
status = "primary",
solidHeader = TRUE,
width = 3,
height = NULL,
selectInput("console",
"Choose your console:",
choices = unique(console$console),
)
)
), # <----------------------------------------- closes fluid row 1
fluidRow( # <----------------------------------------- fluid row 2
box(title = "Plot", # <--------------- plot
status = "primary",
solidHeader = TRUE,
width = 12,
height = NULL,
plotOutput("plot4")
)
), # <----------------------------------------- closes fluid row 2
fluidRow( # <----------------------------------------- fluid row 3
box(title = "Table", # <-------------- table
status = "primary",
solidHeader = TRUE,
width = 12,
height = NULL,
DT::dataTableOutput("table4")
)
), # <------------------------------------------ close fluid row 3
) # <-------------------------------------------------------- closes tab 4
) # <------------------------------------------------------ closes tab items
) # <--------------------------------------------------- closes dashboard body
) # <----------------------------------------------------- closes dashboard page
# SERVER -----------------------------------------------------------------------
server <- function(input, output) {
# Plot 1 ----
output$plot1 <- renderPlot({
decade %>%
ggplot(aes(x = reorder(genre, global_sales_millions),
y = global_sales_millions)) +
geom_col(alpha = 0.8, colour = "white", fill = "#cc9900",
data = decade[decade$decade ==
input$decade,]) +
coord_flip() +
theme_light() +
labs(title = "Top Performing Games Genres",
subtitle = "",
x = "Genre",
y = "Global Unit Sales (millions)")
})
# Table 1 ----
output$table1 <- DT::renderDataTable({
games %>%
mutate(year_of_release = as.numeric(year_of_release)) %>%
filter(year_of_release != "Unknown") %>%
mutate(decade = floor(year_of_release / 10) * 10) %>%
filter(decade != 2020) %>%
group_by(decade, genre, name) %>%
summarise(global_sales_millions = sum(global_sales)) %>%
arrange(desc(global_sales_millions)) %>%
filter(decade == input$decade)
})
# Plot 2 ----
output$plot2 <- renderPlot({
genre %>%
ggplot(aes(x = decade, y = decade_sales)) +
geom_area(alpha = 0.5, fill = "#666633",
data = genre[genre$genre ==
input$genre,]) +
geom_line(colour = "#cc9900", size = 1.2,
data = genre[genre$genre ==
input$genre,]) +
geom_point(colour = "#666633",
data = genre[genre$genre ==
input$genre,]) +
labs(title = "",
x = "Decade",
y = "Global Unit Sales (millions)") +
theme_light()
})
# Table 2 ----
output$table2 <- DT::renderDataTable({
games %>%
group_by(name, genre) %>%
summarise(total_global_sales = sum(global_sales)) %>%
arrange(desc(total_global_sales)) %>%
filter(genre == input$genre)
})
# Plot 3 ----
output$plot3 <- renderPlot({
regional %>%
ggplot(aes(x = reorder(genre, sales_millions),
y = sales_millions)) +
geom_col(alpha = 0.8, colour = "white", fill = "#cc9900",
data = regional[regional$region == input$region,]) +
coord_flip() +
theme_light() +
labs(title = "",
subtitle = "",
x = "Genre",
y = "Sales (millions)")
})
# Table 3 ----
output$table3 <- DT::renderDataTable({
regional_2 %>%
group_by(name, region) %>%
summarise(total_sales = sum(sales)) %>%
arrange(desc(total_sales)) %>%
filter(region == input$region)
})
# Plot 4 ----
output$plot4 <- renderPlot({
console2 %>%
filter(console == input$console) %>%
ggplot(aes(x = reorder(genre, sales_millions),
y = sales_millions)) +
geom_col(alpha = 0.8, colour = "white", fill = "#cc9900") +
coord_flip() +
theme_light() +
labs(title = "",
subtitle = "",
x = "Genre",
y = "Sales (millions)")
})
# Table 4 ----
output$table4 <- DT::renderDataTable({
console %>%
group_by(name, console) %>%
summarise(total_sales = sum(sales)) %>%
arrange(desc(total_sales)) %>%
filter(console == input$console)
})
}
# APP FUNCTION -----------------------------------------------------------------
shinyApp(ui, server)
|
ace60e07f6d5b638b1401ef936a10862314a5170
|
81a2fa3228451179b12779bb0149398cbfc8e9b1
|
/man/filt3dimArr.Rd
|
f6a1e4d43b48fb31447647ea5b12c6e810a53c46
|
[] |
no_license
|
cran/wrMisc
|
c91af4f8d93ad081acef04877fb7558d7de3ffa2
|
22edd90bd9c2e320e7c2302460266a81d1961e31
|
refs/heads/master
| 2023-08-16T21:47:39.481176
| 2023-08-10T18:00:02
| 2023-08-10T19:30:33
| 236,959,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,509
|
rd
|
filt3dimArr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filt3dimArr.R
\name{filt3dimArr}
\alias{filt3dimArr}
\title{Filter three-dimensional array of numeric data}
\usage{
filt3dimArr(
x,
filtVal,
filtTy = ">",
filtCrit = NULL,
displCrit = NULL,
silent = FALSE,
debug = FALSE,
callFrom = NULL
)
}
\arguments{
\item{x}{array (3-dim) of numeric data}
\item{filtVal}{(numeric, length=1) for testing inferior/superor/equal condition}
\item{filtTy}{(character, length=1) which type of testing to perform (may be 'eq','inf','infeq','sup','supeq', '>', '<', '>=', '<=', '==')}
\item{filtCrit}{(character, length=1) which column-name consider when filtering filter with 'filtVal' and 'filtTy'}
\item{displCrit}{(character) column-name(s) to display}
\item{silent}{(logical) suppress messages}
\item{debug}{(logical) additional messages for debugging}
\item{callFrom}{(character) allow easier tracking of messages produced}
}
\value{
This function returns a list of filtered matrixes (by 3rd dim)
}
\description{
Filtering of matrix or (3-dim) array \code{x} : filter column according to \code{filtCrit} (eg 'inf') and threshold \code{filtVal}
}
\details{
and extract/display all col matching 'displCrit'.
}
\examples{
arr1 <- array(11:34, dim=c(4,3,2), dimnames=list(c(LETTERS[1:4]),
paste("col",1:3,sep=""), c("ch1","ch2")))
filt3dimArr(arr1,displCrit=c("col1","col2"),filtCrit="col2",filtVal=7)
}
\seealso{
\code{\link{filterList}}; \code{\link{filterLiColDeList}};
}
|
344432a5dff76e76feb748db3021617f463358c1
|
372981c36a83fcb9a39b636758c8646cec367a3f
|
/plot3.R
|
24885dbd153cae0200c99454f1291438b4450837
|
[] |
no_license
|
alnever/ExData_Plotting1
|
deb4a7e4f1dcf62c578f31ade1afe611c99b6e38
|
f6f49af55962ac2c7598263644d7cd8bc6880581
|
refs/heads/master
| 2021-01-15T18:45:01.318808
| 2016-02-15T13:17:40
| 2016-02-15T13:17:40
| 51,743,050
| 0
| 0
| null | 2016-02-15T09:03:21
| 2016-02-15T09:03:21
| null |
UTF-8
|
R
| false
| false
| 2,036
|
r
|
plot3.R
|
## Plot 3 by Al.Neverov
library(dplyr)
## Download and unzip datafile
if (!file.exists("household_power_consumption.txt")) {
if (!file.exists("exdata_data_household_power_consumption.zip")) {
fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, "exdata_data_household_power_consumption.zip")
}
unzip("exdata_data_household_power_consumption.zip")
}
## Read datafile and filter it
data <- tbl_df(read.table("household_power_consumption.txt",
sep=";",
header = TRUE,
na.strings=c("NA","N/A","null","?"),
stringsAsFactors = FALSE,
colClasses = c("character", "character", rep("numeric", times = 7)))) %>%
mutate(aDateTime = paste(Date, Time, sep = " "),
Date = as.Date(Date,"%d/%m/%Y"),
pDateTime = as.POSIXct(aDateTime, format = "%d/%m/%Y %H:%M:%S") ) %>%
filter(as.POSIXlt(Date)$year + 1900 == 2007 & as.POSIXlt(Date)$mon + 1 == 2 & as.POSIXlt(Date)$mday %in% c(1,2))
## Set parameters for device (screen)
par(mfrow = c(1,1))
## Draw the graphic for Sub metering 1
with(data, plot(pDateTime, Sub_metering_1, col = "black", type = "l",
xlab = "", main = "", ylab = "Energy sub metering"
))
## Add lines for Sub metering 2
with(data, lines(pDateTime, Sub_metering_2, col = "red", type = "l",
xlab = "", main = "", ylab = "Energy sub metering"
))
## Add lines for Sub metering 3
with(data, lines(pDateTime, Sub_metering_3, col = "blue", type = "l",
xlab = "", main = "", ylab = "Energy sub metering"
))
## Add legend
legend("topright",
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = 1, lwd = 1
)
## Copy the graph into the png file
dev.copy(png, "plot3.png")
dev.off()
|
d7e30a99bcadfe10a2a341d3a29b94f1f483e74a
|
eb7f96cbae59a1678cb5672d633f7c0318d59b86
|
/functional-analysis.R
|
2b945a099a4335d5c25dc7edcfca9a6a0477129e
|
[] |
no_license
|
ckmah/FAT
|
ff5ad6da322b69b9c5551ff27d7398fb5c7f88b9
|
95482d97efd315030762960fb5effe3c6298f407
|
refs/heads/master
| 2020-12-20T02:56:54.688710
| 2016-06-05T23:27:40
| 2016-06-05T23:27:40
| 60,473,153
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,139
|
r
|
functional-analysis.R
|
library(cummeRbund)
library(gage)
library(pathview)
library(preprocessCore)
library(clusterProfiler)
keggAnalysis <- function(fpkmData, cuff, kegg.gs, ref) {
ids <- rownames(fpkmData)
geneSet <- getGenes(cuff, ids)
geneIDs <- featureNames(geneSet)
# select genes that map and are not duplicates
isUniqueMap <- !is.na(geneIDs[, 2]) & !duplicated(geneIDs[, 2])
mapped <- fpkmData[isUniqueMap,]
# get entrez ids
rownames(mapped) <- geneIDs[isUniqueMap, 2]
# reference and sample column indices
fpkmData.ref <- ref
# fpkmData.samp <- 2
# convert from gene ID to entrez id
entrezIDs <- pathview::id2eg(rownames(mapped), category = "symbol")
entrezIDs.sel <- !is.na(entrezIDs[, 2])
# select converted genes
mapped <- mapped[entrezIDs.sel,]
rownames(mapped) <- entrezIDs[entrezIDs.sel, 2]
return(list("mapped" = mapped, "kegg.gage" = gage(mapped, kegg.gs, sam.dir = FALSE)))
# gs=unique(unlist(kegg.gs[rownames(keggs.p$greater)[1:3]]))
# cnts.d = cnts.norm[, samp.idx] - rowMeans(cnts.norm[, ref.idx])
# sel <- cnts.kegg.p$greater[, "q.val"] < 0.1 & !is.na(cnts.kegg.p$greater[, "q.val"])
# path.ids <- rownames(cnts.kegg.p$greater)[sel]
# sel.l <- cnts.kegg.p$less[, "q.val"] < 0.1 & !is.na(cnts.kegg.p$less[, "q.val"])
# path.ids.l <- rownames(cnts.kegg.p$less)[sel.l]
# path.ids2 <- substr(c(path.ids, path.ids.l), 1, 8)
# library(pathview)
# pv.out.list <- sapply(path.ids2, function(pid)
# pathview(
# gene.data = cnts.d,
# pathway.id = pid,
# species = "hsa"
# ))
#
}
#
# # extract gene names
# gnames = cuff.res$gene
#
# # select genes present in cuffdiff output
# sel = gnames != "-"
# gnames = as.character(gnames[sel])
# cuff.fc = cuff.fc[sel]
# names(cuff.fc) = gnames
#
# # convert to entrez gene id
# gnames.eg = pathview::id2eg(gnames, category = "symbol")
#
# # filter for genes with > 0 fold change
# sel2 = gnames > ""
# cuff.fc = cuff.fc[sel2]
#
# names(cuff.fc) = gnames[sel2]
# range(cuff.fc)
#
# # max of 10 fold change
# cuff.fc[cuff.fc > 10] = 10
# cuff.fc[cuff.fc < -10] = -10
# exp.fc = cuff.fc
# out.suffix = "cuff"
#
|
c90d062065b0493ed115e4cd6a61d8c09c754b42
|
03187e5731b6527e09a68ce82405880685e43de0
|
/scripts/Sample_variance.R
|
d87b8e4493eed424b328badee7b9673622726755
|
[] |
no_license
|
ElliotMeador84/misc_plots
|
1bfcaf96ed80be8ecfdee377d91cf9b0e3e3780c
|
71ea1b7461445665d86658a344ece5cef8f8b51d
|
refs/heads/master
| 2020-11-25T02:42:35.156679
| 2019-12-20T17:20:22
| 2019-12-20T17:20:22
| 228,458,111
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,042
|
r
|
Sample_variance.R
|
library(tidyverse)
library(gganimate)
library(glue)
vary_pop <- function(x) {
mean((mean(x) - x) ^ 2)
}
k <- rnorm(1000000, mean = 500, sd = 50)
#
#
df_5 <- map_df(1:10000, function(x) {
tibble(value = sample(k, 5, T)) %>%
mutate(sample = paste0('df.5_', x),
size = 'Sample of 5')
})
#
#
df_10 <- map_df(1:10000, function(x) {
tibble(value = sample(k, 10, T)) %>%
mutate(sample = paste0('df.10_', x),
size = 'Sample of 10')
})
#
#
df_25 <- map_df(1:10000, function(x) {
tibble(value = sample(k, 25, T)) %>%
mutate(sample = paste0('df.25_', x),
size = 'Sample of 25')
})
#
#
df_50 <- map_df(1:10000, function(x) {
tibble(value = sample(k, 50, T)) %>%
mutate(sample = paste0('df.50_', x),
size = 'Sample of 50')
})
df <- bind_rows(df_5, df_10, df_25, df_50)
#
df_var <- df %>%
group_by(sample) %>%
mutate('Sample variance' = var(value),
'Population variance' = vary_pop(value)) %>%
select(-value) %>%
distinct() %>%
gather(calculation, value, -sample, -size) %>%
ungroup()
#
size_levs <- df_var %>%
count(size, sort = T) %>%
pull(size)
size.levels <- size_levs[c(3, 1, 2, 4)]
df_var_gg <- df_var %>%
mutate(
calculation = fct_relevel(calculation,
'Sample variance',
'Population variance'),
size = fct_relevel(size,
size.levels)
) %>%
filter(between(value, 0, 6000)) %>%
group_by(size, calculation) %>%
mutate(diverge = value - median(value)) %>%
arrange(diverge)
# save(df_var, file = '~/df_var.RData')
# save(k, file = '~/k.RData')
#
# load('~/df_var.RData')
# load('~/k.RData')
df_median <- df_var_gg %>%
group_by(size, calculation) %>%
summarise(median = median(value),
diff = round(vary_pop(k), 0) -
round(median, 0)) %>%
ungroup() %>%
mutate(true.vary = ifelse(
calculation == 'Population variance',
'True\npopulation\nvariance',
NA))
p <- df_var_gg %>%
ggplot(aes(value)) +
geom_histogram(bins = 100,
fill = Spectral[5],
show.legend = F) +
geom_vline(xintercept = vary_pop(k),
color = 'grey',
size = 1.25) +
geom_vline(
data = df_median,
aes(xintercept = median),
linetype = 4,
size = 1.25,
color = Spectral[11]
) +
geom_text(
data = df_median,
aes(label = paste('Difference of\n', diff),
x = median - 500),
y = 475,
size = 3) +
geom_text(
data = df_median,
aes(label = true.vary),
x = 2855,
y = 50,
size = 3
) +
geom_text(
data = df_median,
aes(label = size),
x = 3500,
y = 350,
size = 3.25
)+
scale_x_continuous(labels = scales::comma) +
scale_fill_manual(values = YlGnBu_n(100)) +
facet_wrap(. ~ calculation ,
scales = 'free_x',
ncol = 1) +
theme_minimal()+
theme(panel.grid.minor = element_blank(),
plot.caption = element_text(hjust = 0))+
labs()
variance_p <- p + labs(title = expression(paste('Comparing sample variance ' [frac(Sigma(X - mu)^2, N - 1)], ' with population variance ' [frac(Sigma(X - mu)^2, N)])),
subtitle = expression(paste('The difference between '[N-1], ' and '[N], ' in stastical analysis.')),
caption = 'I created a random normal vector of length 1,000,000 with a mean of 500 and standard deviation\nof 50.\nThen I took 10,000 random samples of size 5, 25 and 50.\nLastly, I calculated the population variance and sample variance for each to see the difference that\ndividing by N and N-1 makes.\nThe difference between the formulas for sample and population variance is small, but it makes a\nBIG difference in accuracy.\nu/lane_dog')
animated_plot <- variance_p +
transition_states(
size,
transition_length = 1,
state_length = 1,
wrap = T
)
p_gganimate <- gganimate::animate(animated_plot)
anim_save(p_gganimate, filename = 'p_gganimate.gif')
|
f62a76329bdcb202d97189c56d8e053b105ece42
|
e90ea4192abe4f35a964b37578bdada212a2adce
|
/tests/testthat/testSuite002-states-testState8.3.R
|
d980151d7125b94fab45d02f51a2a85fa4ce4184
|
[
"MIT"
] |
permissive
|
jstockwin/EpiEstimApp
|
e34ead9d002c377adc07849c3c2d7d00fb5345f5
|
3c9c74180a9ed4684094ef5c42bd0bdd1a9ec815
|
refs/heads/master
| 2021-01-19T07:06:41.410215
| 2020-06-04T09:05:09
| 2020-06-04T09:05:09
| 76,178,348
| 30
| 14
|
MIT
| 2020-06-04T09:05:11
| 2016-12-11T14:09:55
|
R
|
UTF-8
|
R
| false
| false
| 2,583
|
r
|
testSuite002-states-testState8.3.R
|
context("Test Suite 2 (States) --> State 8.3")
library(RSelenium)
library(testthat)
source("functions.R", local=TRUE)
drivers <- getRemDrivers("Test Suite 2 (States) --> State 8.3")
rD <- drivers$rDr
remDr <- drivers$remDr
openRemDriver(remDr)
tryCatch({
test_that("can connect to app", {
connectToApp(remDr)
})
test_that("app is ready within 30 seconds", {
waitForAppReady(remDr)
})
test_that("can navigate to state 8.3", {
navigateToState(remDr, "8.3")
})
test_that("SISample file upload buttons are displaying correctly", {
expect_true(isDisplayed(remDr, pages$state8.3$selectors$si_sample_data_upload_label))
expect_equal(getText(remDr, pages$state8.3$selectors$si_sample_data_upload_label),
"Choose a serial interval data file to upload")
expect_true(isDisplayed(remDr, pages$state8.3$selectors$si_sample_data_upload_browse))
expect_true(isDisplayed(remDr, pages$state8.3$selectors$si_sample_header_button))
})
test_that("n2 input is displayed correctly", {
expect_true(isDisplayed(remDr, pages$state8.3$selectors$n2_label))
expect_equal(getText(remDr, pages$state8.3$selectors$n2_label),
paste("Choose n2, the posterior sample size to be drawn for",
"R for each SI distribution sampled"))
expect_true(isDisplayed(remDr, pages$state8.3$selectors$n2_input))
})
test_that("seed input is displayed correctly", {
expect_true(isDisplayed(remDr, pages$state8.3$selectors$seed_label))
expect_equal(getText(remDr, pages$state8.3$selectors$seed_label),
paste("Set a seed to be used by EpiEstim. A random seed will",
"be chosen if this is left blank"))
expect_true(isDisplayed(remDr, pages$state8.3$selectors$seed_input))
})
test_that("relevant control buttons are displayed", {
expect_false(isDisplayed(remDr, pages$common$selectors$stop_button))
expect_true(isDisplayed(remDr, pages$common$selectors$prev_button))
expect_true(isEnabled(remDr, pages$common$selectors$prev_button))
expect_false(isDisplayed(remDr, pages$common$selectors$next_button))
expect_true(isDisplayed(remDr, pages$common$selectors$go_button))
expect_true(isEnabled(remDr, pages$common$selectors$go_button))
})
test_that("no errors are displaying", {
expect_false(isDisplayed(remDr, pages$common$selectors$error_message))
expect_equal(getText(remDr, pages$common$selectors$error_message), "")
})
},
error = function(e) {
closeRemDrivers(remDr, rD)
stop(e)
})
closeRemDrivers(remDr, rD)
|
c28c045b59833130e89aa434c9fd0110651f4a47
|
2a41bf8adf928c9c3337a5740bd1e2e64083027b
|
/visualization.R
|
8dbc291998ac1a6d7a040193afc14f30865fde3f
|
[] |
no_license
|
ibrahimBGurhan/7COM1079-Project
|
4660819f65ec516a222ec029948846e92eea3876
|
00954aba4fe88bc7f44bbeebde18387cb7ba00d9
|
refs/heads/main
| 2023-03-08T15:41:23.684334
| 2021-02-27T21:44:00
| 2021-02-27T21:44:00
| 342,962,366
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,064
|
r
|
visualization.R
|
library(readr)
library(dplyr)
food <- read_csv("Food_Supply_Quantity_kg_Data.csv")
#Categorize food types
food$animal <- rowSums(food[,c("Animal fats", "Animal Products", "Aquatic Products, Other", "Eggs", "Fish, Seafood", "Meat", "Milk - Excluding Butter", "Miscellaneous", "Offals")], na.rm=TRUE)
food$plant <- rowSums(food[,c("Alcoholic Beverages", "Cereals - Excluding Beer", "Fruits - Excluding Wine", "Oilcrops", "Pulses", "Spices", "Starchy Roots", "Stimulants", "Sugar & Sweeteners", "Sugar Crops", "Treenuts", "Vegetable Oils", "Vegetables", "Vegetal Products")], na.rm=TRUE)
food$total <- rowSums(food[,c("animal", "plant")], na.rm=TRUE)
#Remove null rows
food<-food[!(food$Deaths==0),]
food<-food[!is.na(food$Deaths),]
#Plot
pdf("visualization.pdf")
plot(food$plant, food$Deaths, pch = 19, frame = TRUE, col = "blue", main = "Mortality rate from Covid-19 vs plant-based product consumption rate", xlab = "Plant-based product consumption rate (%)", ylab = "Mortality rate from Covid-19 (%)")
abline(lm(food$Deaths ~ food$plant), col = "red")
dev.off()
|
bac92f696765cf81986ff36c54dffef0bfd7d9fd
|
e3476214d48b504d537af1dc38af0ee4d95108f3
|
/man/draw_null_bar.Rd
|
2072c191d8d289b3b30b023afa2e639d9d57ca5d
|
[] |
no_license
|
ABMI/GEMINI
|
f9a8153429dae53e6c7a9db072eb182cecab3311
|
484454ba9c04665a4d2c2ed629c901c049e229ab
|
refs/heads/g_temp
| 2021-07-13T19:35:30.520415
| 2020-02-13T08:53:40
| 2020-02-13T08:53:40
| 157,642,058
| 0
| 6
| null | 2020-06-22T09:04:17
| 2018-11-15T02:43:42
|
R
|
UTF-8
|
R
| false
| true
| 610
|
rd
|
draw_null_bar.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Draw_function.R
\name{draw_null_bar}
\alias{draw_null_bar}
\title{Draw bar plot with null ratio about CDM data}
\usage{
draw_null_bar(dbName, rds, rdsConcept)
}
\arguments{
\item{dbName}{CDM database name}
\item{rds}{loaded CDM data}
\item{rdsConcept}{rds}
}
\value{
Return two bar plot.
}
\description{
\code(draw_null_bar) Draw bar plot with null ratio
}
\details{
\code(draw_null_bar) Draw bar plot with null ratio
}
\examples{
\dontrun{
draw_null_bar(dbName = 'MYCDM', rds = loaded_rds, rdsConcept = 'persontbl_location')
}
}
|
43fbb21d48b2b5ab518ce3f4504c48c30c03f2eb
|
d03924f56c9f09371d9e381421a2c3ce002eb92c
|
/man/EllipticalSymmetry.Rd
|
dea6031f1df140dead08d9393f2671a130172bde
|
[] |
no_license
|
cran/distr
|
0b0396bbd5661eb117ca54026afc801afaf25251
|
c6565f7fef060f0e7e7a46320a8fef415d35910f
|
refs/heads/master
| 2023-05-25T00:55:19.097550
| 2023-05-08T07:10:06
| 2023-05-08T07:10:06
| 17,695,561
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 784
|
rd
|
EllipticalSymmetry.Rd
|
\name{EllipticalSymmetry}
\alias{EllipticalSymmetry}
\title{Generating function for EllipticalSymmetry-class}
\description{Generates an object of class \code{"EllipticalSymmetry"}.}
\usage{EllipticalSymmetry(SymmCenter = 0)}
\arguments{
\item{SymmCenter}{ numeric: center of symmetry }
}
%\details{}
\value{Object of class \code{"EllipticalSymmetry"}}
%\references{}
\author{Matthias Kohl \email{Matthias.Kohl@stamats.de}}
%\note{}
\seealso{\code{\link{EllipticalSymmetry-class}}, \code{\link{DistributionSymmetry-class}}}
\examples{
EllipticalSymmetry()
## The function is currently defined as
function(SymmCenter = 0){
new("EllipticalSymmetry", SymmCenter = SymmCenter)
}
}
\concept{elliptical symmetry}
\concept{symmetry}
\keyword{distribution}
|
3c9caba2ff95c231a063eb85c374196a2ce79473
|
c86ac6b3e9b82ce5daaa7d2a11bc739eede2698f
|
/Runway_and_HVP_NFF_ECN_Date_Table.r
|
26122144abe2cf6096e7d79cfc0bdfea8dcfda79
|
[] |
no_license
|
Waylan49/Productivity_and_Quality_Data_Analysis
|
a2cad88faae44b56d5bee469b56381521a0ed937
|
4e3fb63a5cdcf9f579403776e92d24194fd38274
|
refs/heads/master
| 2023-03-28T20:07:55.186116
| 2021-04-05T20:02:03
| 2021-04-05T20:02:03
| 354,957,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,334
|
r
|
Runway_and_HVP_NFF_ECN_Date_Table.r
|
setwd("C:/Users/Weilun_Chiu/Desktop/QWE/Qspeak")
test1<-read_xlsx("Qspeak_Report.xlsx", sheet = 1, na = "") ### Be aware of "NA" problem
temp<-test1 %>% select(AssemblyNo, Fault.Code.1)
sum_record<-data.frame(table(temp$AssemblyNo))
sum_record<-sum_record %>% arrange(desc(Freq))
sum_qty<-sum(sum_record$Freq)
nff<-temp %>% filter(is.na(Fault.Code.1))
nff_record<-data.frame(table(nff$AssemblyNo)) %>% arrange(desc(Freq))
names(nff_record)<-c("Assy", "Qty.")
nffpart<-as.character(nff_record$Assy)
len1<-length(nffpart)
total_qty<-rep(0, len1)
for(i in 1:len1){
ind<-which(sum_record$Var1==nffpart[i])
total_qty[i]<-sum_record$Freq[ind]
}
nff_record<-cbind(nff_record, total_qty)
nff_record$percent<-round(nff_record$Qty./nff_record$total_qty, 3)
nff_qty<-sum(nff_record$Qty.)
nff_rate<-as.data.frame(round(t(data.frame(NFF=nff_qty, Sum=sum_qty, Percent=nff_qty/sum_qty)),4))
nff_top5<-nff_record[1:5, 1:2]
others<-c("Others",nff_qty-sum(nff_top5$Qty.))
Total_NFF<-c("Total NFF", nff_qty)
nff_top5$Assy<-as.character(nff_top5$Assy)
nff_top5_1<-rbind(nff_top5, others, Total_NFF)
nff_record<-nff_record %>% arrange(desc(percent)) %>% select(Assy, total_qty, Qty., percent)
top5_nff_percent<-nff_record[1:5, ]
nff_record1<-nff_record %>% arrange(desc(Qty.))
top5_nff_qty_percent<-nff_record1[1:5, ]
top20_nff_percent<-nff_record[1:20, ]
NFF_Final<-list(NFF_Rate=nff_rate, NFF_TOP5=nff_top5_1, TOP5_NFF_Percent=top5_nff_percent,
TOP5_NFF_Qty_Percent=top5_nff_qty_percent, TOP20_NFF_Percent=top20_nff_percent, Raw.NFF=nff_record)
write_xlsx(NFF_Final, "NFF_Data.xlsx")
ecn<-temp %>% filter(Fault.Code.1 =="101")
ecn_record<-data.frame(table(ecn$AssemblyNo)) %>% arrange(desc(Freq))
names(ecn_record)<-c("Assy", "Qty.")
ecnpart<-as.character(ecn_record$Assy)
len2<-length(ecnpart)
total_qty1<-rep(0, len1)
for(i in 1:len2){
ind<-which(sum_record$Var1==ecnpart[i])
total_qty1[i]<-sum_record$Freq[ind]
}
ecn_record<-cbind(ecn_record, total_qty1)
ecn_record$percent<-round(ecn_record$Qty./ecn_record$total_qty, 3)
ecn_record<-ecn_record %>% select(Assy, total_qty1, Qty., percent)
ecn_qty<-sum(ecn_record$Qty.)
ecn_rate<-as.data.frame(round(t(data.frame(ECN=ecn_qty, Sum=sum_qty, Percent=ecn_qty/sum_qty)),4))
ecn_top5<-ecn_record[1:5, c(1,3)]
others<-c("Others",ecn_qty-sum(ecn_top5$Qty.))
Total_ECN<-c("Total ECN", ecn_qty)
ecn_top5$Assy<-as.character(ecn_top5$Assy)
ecn_top5_1<-rbind(ecn_top5, others, Total_ECN)
ecn_record<-ecn_record %>% arrange(desc(percent))
top5_ecn_percent<-ecn_record[1:5, ]
ecn_record1<-ecn_record %>% arrange(desc(Qty.))
top5_ecn_qty_percent<-ecn_record1[1:5, ]
top20_ecn_percent<-ecn_record[1:20, ]
ECN_Final<-list(ECN_Rate=ecn_rate, ECN_TOP5=ecn_top5_1, TOP5_ECN_Percent=top5_ecn_percent,
TOP5_ECN_Qty_Percent=top5_ecn_qty_percent, TOP20_ECN_Percent=top20_ecn_percent, Raw.ECN=ecn_record)
write_xlsx(ECN_Final, "ECN_Data.xlsx")
#############################HVP Section
temp<-QHVP %>% select(SupplierID, AssemblyNo, NFF)
temp1<-mutate(temp, Type=ifelse(SupplierID %in% c("AMD", "INTEL", "INTEl", "IMTEL", "CAVIUM", "AMD"), "CPU", "Memory"))
mem<-temp1 %>% filter(Type=="Memory")
mem1<-mem[, c(2,3)]
cpu<-temp1 %>% filter(Type=="CPU")
cpu1<-cpu[, c(2,3)]
#########################################################Memory
mem_total<-data.frame(table(mem1$AssemblyNo))
MEM_TOTAL_QTY<-sum(mem_total$Freq)
mem_nff<-mem1 %>% filter(NFF=="Y")
mem_nff1<-data.frame(table(mem_nff$AssemblyNo))
mem_nff_part<-as.character(mem_nff1$Var1)
len1<-length(mem_nff_part)
total_qty<-rep(0, len1)
for(i in 1:len1){
ind<-which(mem_total$Var1==mem_nff_part[i])
total_qty[i]<-mem_total$Freq[ind]
}
mem_nff_2<-cbind(mem_nff1, total_qty)
mem_nff_3<-mem_nff_2 %>% select(Var1, total_qty, Freq)
names(mem_nff_3)<-c("Assy", "Total_Qty", "NFF")
mem_nff_3$Percent<-round(mem_nff_3$NFF/mem_nff_3$Total_Qty, 4)
MEM_NFF_QTY<-sum(mem_nff_3$NFF)
MEM_NFF_PERCENT<-round(MEM_NFF_QTY/MEM_TOTAL_QTY, 4)
mem_nff_rate<-as.data.frame(t(data.frame(NFF=MEM_NFF_QTY, Sum=MEM_TOTAL_QTY, Percent=MEM_NFF_PERCENT)))
mem_nff_top5<-mem_nff_3 %>% arrange(desc(NFF)) %>% select(Assy, NFF)
mem_nff_top5<-mem_nff_top5[1:5,]
Others<-c("Others", MEM_NFF_QTY-sum(mem_nff_top5$NFF))
Total<-c("Total", MEM_NFF_QTY)
mem_nff_top5$Assy<-as.character(mem_nff_top5$Assy)
mem_nff_top5<-rbind(mem_nff_top5, Others, Total)
mem_nff_3<-mem_nff_3 %>% arrange(desc(NFF)) %>% arrange(desc(Percent))
mem_top5_nff_percent<-mem_nff_3[1:5,]
mem_nff_4<-mem_nff_3 %>% arrange(desc(NFF))
mem_top5_nff_qty<-mem_nff_4[1:5, ]
MEM_NFF_FINAL<-list(MEM_NFF_RATE=mem_nff_rate, MEM_NFF_TOP5=mem_nff_top5, MEM_TOP5_NFF_PERCENT=mem_top5_nff_percent,
MEM_TOP5_NFF_QTY=mem_top5_nff_qty, Raw.MEM.NFF=mem_nff_3)
write_xlsx(MEM_NFF_FINAL, "MEM_NFF_Data.xlsx")
#########################################################CPU
cpu_total<-data.frame(table(cpu1$AssemblyNo))
CPU_TOTAL_QTY<-sum(cpu_total$Freq)
cpu_nff<-cpu1 %>% filter(NFF=="Y")
cpu_nff1<-data.frame(table(cpu_nff$AssemblyNo))
cpu_nff_part<-as.character(cpu_nff1$Var1)
len1<-length(cpu_nff_part)
total_qty1<-rep(0, len1)
for(i in 1:len1){
ind<-which(cpu_total$Var1==cpu_nff_part[i])
total_qty1[i]<-cpu_total$Freq[ind]
}
cpu_nff_2<-cbind(cpu_nff1, total_qty1)
cpu_nff_3<-cpu_nff_2 %>% select(Var1, total_qty1, Freq)
names(cpu_nff_3)<-c("Assy", "Total_Qty", "NFF")
cpu_nff_3$Percent<-round(cpu_nff_3$NFF/cpu_nff_3$Total_Qty, 4)
CPU_NFF_QTY<-sum(cpu_nff_3$NFF)
CPU_NFF_PERCENT<-round(CPU_NFF_QTY/CPU_TOTAL_QTY, 4)
cpu_nff_rate<-as.data.frame(t(data.frame(NFF=CPU_NFF_QTY, Sum=CPU_TOTAL_QTY, Percent=CPU_NFF_PERCENT)))
cpu_nff_top5<-cpu_nff_3 %>% arrange(desc(NFF)) %>% select(Assy, NFF)
cpu_nff_top5<-cpu_nff_top5[1:5,]
Others<-c("Others", CPU_NFF_QTY-sum(cpu_nff_top5$NFF))
Total<-c("Total", CPU_NFF_QTY)
cpu_nff_top5$Assy<-as.character(cpu_nff_top5$Assy)
cpu_nff_top5<-rbind(cpu_nff_top5, Others, Total)
cpu_nff_3<-cpu_nff_3 %>% arrange(desc(NFF)) %>% arrange(desc(Percent))
cpu_top5_nff_percent<-cpu_nff_3[1:5,]
cpu_nff_4<-cpu_nff_3 %>% arrange(desc(NFF))
cpu_top5_nff_qty<-cpu_nff_4[1:5, ]
CPU_NFF_FINAL<-list(CPU_NFF_RATE=cpu_nff_rate, CPU_NFF_TOP5=cpu_nff_top5, CPU_TOP5_NFF_PERCENT=cpu_top5_nff_percent,
CPU_TOP5_NFF_QTY=cpu_top5_nff_qty, Raw.CPU.NFF=cpu_nff_3)
write_xlsx(CPU_NFF_FINAL, "CPU_NFF_Data.xlsx")
|
2c71c2d7d883dda8236c9914baaefd21ef9cb1e1
|
cb1edbd312fe5583702e8567e1aa6e32e103d300
|
/R/anc.ML.R
|
27ada5e3256a135b93d6d80dfd30203ba7a54f8b
|
[] |
no_license
|
cran/phytools
|
e8cb2ddac5592a9c27a0036df4599649a393717a
|
910fa95b3f5f1619c85ac420bd07286a3fe8cfcf
|
refs/heads/master
| 2023-07-22T15:18:46.363446
| 2023-07-14T20:00:02
| 2023-07-14T21:30:43
| 17,698,535
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,085
|
r
|
anc.ML.R
|
## lightweight version of ace(...,method="ML") for continuous traits
## also allows missing data in x, in which case missing data are also estimated
## written by Liam J. Revell 2011, 2013, 2014, 2015
anc.ML<-function(tree,x,maxit=2000,model=c("BM","OU","EB"),...){
if(!inherits(tree,"phylo")) stop("tree should be an object of class \"phylo\".")
if(model[1]=="BM") obj<-anc.BM(tree,x,maxit,...)
else if(model[1]=="OU") obj<-anc.OU(tree,x,maxit,...)
else if(model[1]=="EB") obj<-anc.EB(tree,x,maxit,...)
else stop(paste("Do not recognize method",model))
obj
}
## internal to estimate ancestral states under a BM model
## written by Liam J. Revell 2011, 2013, 2014, 2016, 2018
anc.BM<-function(tree,x,maxit,...){
if(hasArg(trace)) trace<-list(...)$trace
else trace<-FALSE
if(hasArg(vars)) vars<-list(...)$vars
else vars<-FALSE
if(hasArg(CI)) CI<-list(...)$CI
else CI<-FALSE
if(hasArg(se)) se<-list(...)$se
else se<-setNames(rep(0,length(x)),names(x))
SE<-setNames(rep(0,Ntip(tree)),tree$tip.label)
SE[names(se)]<-se
E<-diag(SE)
colnames(E)<-rownames(E)<-names(SE)
if(hasArg(tol)) tol<-list(...)$tol
else tol<-10*.Machine$double.eps
## check to see if any tips are missing data
xx<-setdiff(tree$tip.label,names(x))
## function returns the log-likelihood
likelihood<-function(par,C,invC,detC,xvals,msp,trace,E=0){
sig2<-par[1]
a<-par[2]
y<-par[1:(tree$Nnode-1)+2]
xvals<-c(xvals,setNames(par[1:length(msp)+tree$Nnode+1],msp))
xvals<-xvals[rownames(C)[1:length(tree$tip.label)]]
z<-c(xvals,y)-a
if(trace) cat(paste(round(sig2,6)," --- ",sep=""))
if(sum(E)>0){
C<-sig2*C
C[rownames(E),colnames(E)]<-C[rownames(E),colnames(E)]+E
invC<-solve(C)
detC<-determinant(C,logarithm=TRUE)$modulus[1]
}
logLik<-(-z%*%invC%*%z/(2*sig2)-nrow(C)*log(2*pi)/2-nrow(C)*log(sig2)/2-
detC/2)[1,1]
if(trace) cat(paste(round(logLik,6),"\n"))
-logLik
}
## compute C
C<-vcvPhylo(tree)
invC<-solve(C)
detC<-determinant(C,logarithm=TRUE)$modulus[1]
## assign starting values
zz<-fastAnc(tree,c(x,setNames(rep(mean(x),length(xx)),xx)))
y<-zz[2:length(zz)]
a<-zz[1]
bb<-c(c(x,setNames(rep(mean(x),length(xx)),xx))[tree$tip.label],y)
sig2<-((bb-a)%*%invC%*%(bb-a)/nrow(C))[1,1]
fit<-optim(c(sig2,a,y,rep(mean(x),length(xx))),fn=likelihood,C=C,invC=invC,
detC=detC,xvals=x,msp=xx,trace=trace,E=E,method="L-BFGS-B",
lower=c(tol,rep(-Inf,tree$Nnode+length(xx))),
control=list(maxit=maxit))
if(vars||CI){
H<-hessian(likelihood,fit$par,C=C,invC=invC,detC=detC,
xvals=x,msp=xx,trace=trace,E=E)
vcv<-solve(H)
}
states<-fit$par[1:tree$Nnode+1]
names(states)<-c(length(tree$tip)+1,rownames(C)[(length(tree$tip)+1):nrow(C)])
obj<-list(sig2=fit$par[1],ace=states,logLik=-fit$value,counts=fit$counts,
convergence=fit$convergence,message=fit$message,model="BM")
if(vars) obj$var<-setNames(diag(vcv)[1:tree$Nnode+1],
c(length(tree$tip)+1,rownames(C)[(length(tree$tip)+1):nrow(C)]))
if(CI){
obj$CI95<-cbind(obj$ace-1.96*sqrt(diag(vcv)[1:tree$Nnode+1]),
obj$ace+1.96*sqrt(diag(vcv)[1:tree$Nnode+1]))
rownames(obj$CI95)<-c(length(tree$tip)+1,
rownames(C)[(length(tree$tip)+1):nrow(C)])
}
if(length(xx)>0){
obj$missing.x<-setNames(fit$par[1:length(xx)+tree$Nnode+1],xx)
if(vars) obj$missing.var<-setNames(diag(vcv)[1:length(xx)+
tree$Nnode+1],xx)
if(CI){
obj$missing.CI95<-cbind(obj$missing.x-
1.96*sqrt(diag(vcv)[1:length(xx)+tree$Nnode+1]),
obj$missing.x+1.96*sqrt(diag(vcv)[1:length(xx)+tree$Nnode+1]))
rownames(obj$missing.CI95)<-xx
}
}
class(obj)<-"anc.ML"
obj
}
## internal to estimate ancestral states under an OU model (this may not work)
## written by Liam J. Revell 2014
anc.OU<-function(tree,x,maxit=2000,...){
## check to see if any tips are missing data
xx<-setdiff(tree$tip.label,names(x))
if(length(xx)>0) stop("Some tips of the tree do not have data. Try model=\"BM\".")
if(hasArg(tol)) tol<-list(...)$tol
else tol<-1e-8
if(hasArg(trace)) trace<-list(...)$trace
else trace<-FALSE
if(hasArg(a.init)) a.init<-list(...)$a.init
else a.init<-2*log(2)/max(nodeHeights(tree))
likOU<-function(par,tree,x,trace){
sig2<-par[1]
alpha<-par[2]
a0<-par[3]
a<-par[1:(tree$Nnode-1)+3]
logLik<-logMNORM(c(x,a),rep(a0,Ntip(tree)+tree$Nnode-1),sig2*vcvPhylo(tree,model="OU",alpha=alpha))
if(trace) print(c(sig2,alpha,logLik))
-logLik
}
x<-x[tree$tip.label]
pp<-rep(NA,tree$Nnode+2)
pp[1:tree$Nnode+2]<-fastAnc(tree,x)
pp[1]<-phyl.vcv(as.matrix(c(x,pp[2:tree$Nnode+2])),vcvPhylo(tree),lambda=1)$R[1,1]
pp[2]<-a.init ## arbitrarily
fit<-optim(pp,likOU,tree=tree,x=x,trace=trace,method="L-BFGS-B",
lower=c(tol,tol,rep(-Inf,tree$Nnode)),upper=rep(Inf,length(pp)),
control=list(maxit=maxit))
obj<-list(sig2=fit$par[1],alpha=fit$par[2],
ace=setNames(fit$par[1:tree$Nnode+2],1:tree$Nnode+length(tree$tip.label)),
logLik=-fit$value,counts=fit$counts,convergence=fit$convergence,
message=fit$message,model="OU")
class(obj)<-"anc.ML"
obj
}
## EB is the Early-burst model (Harmon et al. 2010) and also called the ACDC model
## (accelerating-decelerating; Blomberg et al. 2003). Set by the a rate parameter, EB fits a model where
## the rate of evolution increases or decreases exponentially through time, under the model
## r[t] = r[0] * exp(a * t), where r[0] is the initial rate, a is the rate change parameter, and t is
## time. The maximum bound is set to -0.000001, representing a decelerating rate of evolution. The minimum
## bound is set to log(10^-5)/depth of the tree.
## internal to estimate ancestral states under an EB model
## written by Liam J. Revell 2017
anc.EB<-function(tree,x,maxit=2000,...){
## check to see if any tips are missing data
xx<-setdiff(tree$tip.label,names(x))
if(length(xx)>0) stop("Some tips of the tree do not have data. Try model=\"BM\".")
if(hasArg(tol)) tol<-list(...)$tol
else tol<-1e-8
if(hasArg(trace)) trace<-list(...)$trace
else trace<-FALSE
if(hasArg(vars)) vars<-list(...)$vars
else vars<-FALSE
if(hasArg(CI)) CI<-list(...)$CI
else CI<-FALSE
if(hasArg(r.init)){
r.init<-list(...)$r.init
obj<-phyl.vcv(as.matrix(x[tree$tip.label]),
vcv(ebTree(tree,r.init)),1)
s2.init<-obj$R[1,1]
a0.init<-obj$alpha[1,1]
} else {
## optimize r.init
lik<-function(p,tree,x)
logLik<--logMNORM(x,rep(p[3],Ntip(tree)),
p[1]*vcvPhylo(tree,model="EB",r=p[2],anc.nodes=F))
obj<-phyl.vcv(as.matrix(x[tree$tip.label]),vcv(tree),1)
fit.init<-optim(c(obj$R[1,1],0,obj$alpha[1,1]),
lik,tree=tree,x=x,method="L-BFGS-B",lower=c(tol,-Inf,-Inf),
upper=rep(Inf,3))
r.init<-fit.init$par[2]
s2.init<-fit.init$par[1]
a0.init<-fit.init$par[3]
}
likEB<-function(par,tree,x,trace){
sig2<-par[1]
r<-par[2]
obj<-fastAnc(ebTree(tree,r),x)
a0<-obj[1]
a<-obj[2:length(obj)]
logLik<-logMNORM(c(x,a),rep(a0,Ntip(tree)+tree$Nnode-1),
sig2*vcvPhylo(tree,model="EB",r=r))
if(trace) print(c(sig2,r,logLik))
-logLik
}
x<-x[tree$tip.label]
pp<-rep(NA,2)
pp[1]<-s2.init
pp[2]<-r.init
fit<-optim(pp,likEB,tree=tree,x=x,trace=trace,method="L-BFGS-B",
lower=c(tol,-Inf),upper=rep(Inf,2),control=list(maxit=maxit))
obj<-list(sig2=fit$par[1],r=fit$par[2],
ace=unclass(fastAnc(ebTree(tree,fit$par[2]),x)),
logLik=-fit$value,counts=fit$counts,convergence=fit$convergence,
message=fit$message,model="EB")
if(vars||CI){
likEB.hessian<-function(par,tree,y){
sig2<-par[1]
r<-par[2]
a<-par[3:length(par)]
logLik<-logMNORM(c(y,a[2:length(a)]),rep(a[1],Ntip(tree)+tree$Nnode-1),
sig2*vcvPhylo(tree,model="EB",r=r))
-logLik
}
H<-hessian(likEB.hessian,c(fit$par,obj$ace),tree=tree,y=x)
vcv<-solve(H)
if(vars) obj$var<-setNames(diag(vcv)[1:tree$Nnode+1],1:tree$Nnode+Ntip(tree))
if(CI){
obj$CI95<-cbind(obj$ace-1.96*sqrt(diag(vcv)[1:tree$Nnode+2]),
obj$ace+1.96*sqrt(diag(vcv)[1:tree$Nnode+2]))
rownames(obj$CI95)<-1:tree$Nnode+Ntip(tree)
}
}
class(obj)<-"anc.ML"
obj
}
logMNORM<-function(x,x0,vcv)
-t(x-x0)%*%solve(vcv)%*%(x-x0)/2-length(x)*log(2*pi)/2-determinant(vcv,logarithm=TRUE)$modulus[1]/2
## print method for "anc.ML"
## written by Liam J. Revell 2015, 2016
print.anc.ML<-function(x,digits=6,printlen=NULL,...){
cat(paste("Ancestral character estimates using anc.ML under a(n)",
x$model,"model:\n"))
Nnode<-length(x$ace)
if(is.null(printlen)||printlen>=Nnode) print(round(x$ace,digits))
else printDotDot(x$ace,digits,printlen)
if(!is.null(x$var)){
cat("\nVariances on ancestral states:\n")
if(is.null(printlen)||printlen>=Nnode) print(round(x$var,digits))
else printDotDot(x$var,digits,printlen)
}
if(!is.null(x$CI95)){
cat("\nLower & upper 95% CIs:\n")
colnames(x$CI95)<-c("lower","upper")
if(is.null(printlen)||printlen>=Nnode) print(round(x$CI95,digits))
else printDotDot(x$CI95,digits,printlen)
}
cat("\nFitted model parameters & likelihood:\n")
if(x$model=="BM"){
obj<-data.frame(round(x$sig2,digits),round(x$logLik,digits))
colnames(obj)<-c("sig2","log-likelihood")
rownames(obj)<-""
print(obj)
} else if(x$model=="OU"){
obj<-data.frame(round(x$sig2,digits),round(x$alpha,digits),
round(x$logLik,digits))
colnames(obj)<-c("sigma^2","alpha","logLik")
rownames(obj)<-""
print(obj)
} else if(x$model=="EB"){
obj<-data.frame(round(x$sig2,digits),round(x$r,digits),
round(x$logLik,digits))
colnames(obj)<-c("sigma^2","r","logLik")
rownames(obj)<-""
print(obj)
}
if(x$convergence==0) cat("\nR thinks it has found the ML solution.\n\n")
else cat("\nOptimization may not have converged.\n\n")
}
## S3 logLik method for "anc.ML" object class
logLik.anc.ML<-function(object,...){
lik<-object$logLik
if(object$model=="BM") attr(lik,"df")<-length(object$ace)+1
else if(object$model=="EB") attr(lik,"df")<-length(object$ace)+2
else if(object$model=="OU") attr(lik,"df")<-length(object$ace)+2
lik
}
|
ef1d34c3927606fda4f32ec9a00b01bbafdd82d0
|
356191f477fd43ccea322b86b2641ed1c073c3f4
|
/man/calEquation.Rd
|
6854f97f4c32fc571b9d7b13e2f18dbb29f21afa
|
[] |
no_license
|
cardiomoon/predict3d
|
71ac03a1979fee00a8c49df389c991bd46c41f32
|
888d93335e8636585e520fd92e2c30f074080e7a
|
refs/heads/master
| 2023-04-13T18:47:48.816345
| 2023-04-11T11:01:31
| 2023-04-11T11:01:31
| 172,454,005
| 6
| 4
| null | 2023-01-08T22:38:34
| 2019-02-25T07:15:50
|
R
|
UTF-8
|
R
| false
| true
| 903
|
rd
|
calEquation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_lines.R
\name{calEquation}
\alias{calEquation}
\title{calculated slope and intercept from object of class lm}
\usage{
calEquation(
fit,
mode = 1,
pred = NULL,
modx = NULL,
modx.values = NULL,
label = NULL,
maxylev = 6,
digits = 2
)
}
\arguments{
\item{fit}{An object of class lm}
\item{mode}{A numeric}
\item{pred}{name of predictor variable}
\item{modx}{name of modifier variable}
\item{modx.values}{Numeric. Values of modifier variable}
\item{label}{A character string}
\item{maxylev}{maximum length of unique value of variable to be treated as a categorial variable}
\item{digits}{Integer indicating the number of decimal places}
}
\description{
calculated slope and intercept from object of class lm
}
\examples{
fit=lm(mpg~wt*hp+carb,data=mtcars)
calEquation(fit)
calEquation(fit,pred="hp")
}
|
c615d71a218cb2224185485bb5fc5346c701ec96
|
10014c3d04f46832cc6e1e5d4fd33358f21208b9
|
/shiny_project.r
|
dcfa0702347b1a0f9af958b0899e27f29678ec8a
|
[] |
no_license
|
Ferrah-hichem/shiny_dashboard
|
cbc3c0c4e9ec66c61adb5fa6ddbc3b5a280330df
|
781729e77f0bca2d83bbcd1036f705914a05b83c
|
refs/heads/main
| 2023-04-01T05:24:22.183035
| 2021-04-14T09:41:01
| 2021-04-14T09:41:01
| 357,847,563
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 57,722
|
r
|
shiny_project.r
|
library(shiny)
library(Hmisc)
library(UsingR)
library(reshape2)
library(ggpubr)
library(shinydashboard)
library(DT)
library(plyr)
library(plotly)
library(reader)
library("ElemStatLearn")
library("class")
library("plotrix")
library(CatEncoders)
library(MLmetrics)
library(ROSE)
library(smotefamily)
library(ggplot2)
library(GGally)
ui <- dashboardPage(
dashboardHeader(
title="FERRAH & GHARBI"
),
dashboardSidebar(
#chargement du fichier
fileInput(inputId = "file1", label = "Veuillez choisir votre fichier CSV",
accept = c("text/plain", ".csv")
),
sidebarMenu(
#Donnees quantitatives discrètes:
menuItem("Accueil", tabName = "accueill", icon = icon("home")),
menuItem("Le dataset", tabName = "readData", icon = icon("table")),
menuItem("Analyse des Donnees", tabName="datanalys", icon=icon("chart-line"),
menuSubItem("Univariee", tabName = "quant", icon=icon("dice-one")),
menuSubItem("Bivariee", tabName = "qual", icon=icon("dice-two")),
menuSubItem("Qualitative VS Quantitative", tabName = "vs", icon=icon("chart-pie")),
menuSubItem("Qualitative VS Qualitative", tabName = "vs2", icon=icon("chart-pie"))),
menuItem("Churn", tabName="churn", icon=icon("recycle"),
menuSubItem("Churn Variable", tabName = "ChurnIn", icon=icon("dice-one")),
menuSubItem("Churn Vs Qualitatives", tabName = "ChurnQual", icon=icon("dice-one")),
menuSubItem("Churn Vs Quantitatives", tabName = "ChurnQuant", icon=icon("dice-one")) ),
menuItem("Modeles d'apprentissages", tabName = "app", icon=icon("recycle")),
menuItem("Balances des Donnees", tabName="databalance", icon=icon("recycle"))
)
),
dashboardBody(
tags$head(tags$style(HTML('
.content-wrapper {
background-color: #fcfffff5;
}
.main-sidebar{
background-color: #595f5e !important;
}
.navbar {
background-color: #595f5e !important;
}
.col-sm-8{
width :100% !important;
}
.sidebar-toggle:hover{
background-color: #595f5e !important;
}
.logo{
background-color: #595f5e !important;
}
'
))),
tabItems(
tabItem(tabName = "readData",
h3("Les données du dataset IRIS",align = "center"),
#tableOutput(outputId = "contents")
DT::dataTableOutput("contents")
),
tabItem(tabName = "analydata",
tabItem(tabName = "Something"),
),
tabItem(tabName = "stat",
h3("Statistiques ",align = "center"),
tableOutput(outputId = "statistique")
),
tabItem(tabName = "qual",
mainPanel(
fluidRow(
column(6,uiOutput('quantlistbi1'),
#varSelectInput("choix", "Le choix de la variable", data(), multiple = FALSE),
#selectInput("choix", "Le choix de la variable",
#tableOutput(outputId = "input_list"),selected = 1),
),
column(6,uiOutput('quantlistbi2'),
#selectInput("choixx", "Le choix de la variable",
# tableOutput(outputId = "input_list"),selected = 1),
)
),
tabsetPanel(
tabPanel("Nuage de points",
fluidRow(
h3("Nuage de point avec la regression linéaire", align="center"),
column(6, plotOutput("nuagePoints")),
column(6, textOutput("correlation"))
)
),
tabPanel("Histogrammes dos à dos",
fluidRow(
column(8, offset = 1, plotOutput("histbackback"))
), style="padding-left: 150px; margin-top: 10px; padding-right: 350px;"
),
tabPanel("Nuage de points et Histogrammes",
fluidRow(
column(8, offset = 1, plotOutput("nuagePointshist"))
), style="padding-left: 150px; margin-top: 10px; padding-right: 350px;"
),
tabPanel("Caractéristiques", tableOutput("caract")),
tabPanel("Boîtes parallèles",
fluidRow(
column(6, plotOutput("boxplotBasic")),
column(6, plotOutput("boxplotGgplot"))
),
fluidRow(
column(4, offset = 4, textOutput("cor"))
)
)
)
)),
tabItem(tabName = "quant",
tabsetPanel(
tabPanel("Variables Quantitatives",
mainPanel(uiOutput('quantlist'),
#selectInput("radio", "Le choix de la variable",
# tableOutput(outputId = "input_quant"),selected = 1),
tabsetPanel(
tabPanel("Le SUMMARY",
fluidRow(
h3("Le summary de la variable choisie", align = "center"),
# Affichage d'un summary
verbatimTextOutput(outputId = "summary")
)
),
tabPanel("Graphes + boxplot",
fluidRow(
column(4,
# Zone d'affichage du diagramme en bÃÂâtons des effectifs
plotOutput(outputId = "effectifsDiag")),
column(4,
# Zone d'affichage du diagramme en bÃÂâtons des effectifs cumulÃÂés
plotOutput(outputId = "effectifsCumDiag")),
column(4,
# Zone d'affichage de la boÃÂîte ÃÂàmoustaches
plotOutput(outputId = "boiteMoustaches"))
)
),
tabPanel("Histogrammes et courbes ",
h3("Visualisation des graphes ", align = "center"),
fluidRow(
column(4,
h5("Histogramme des effectifs", align = "center"),
plotOutput(outputId = "effectifsHist")),
column(4,
h5("Histogramme des densités de frequences", align = "center"),
plotOutput(outputId = "effectifsHistFreqDens")),
column(4,
h5("Courbe cumulative", align ="center"),
plotOutput(outputId = "effectifsCumCurve"))
),
)
)
)
),
tabPanel("Variables Qualitatives",
fluidRow(uiOutput('qualist'),
tabsetPanel(
tabPanel("Histogramme des effectifs",fluidPage(column(6,
h5("Histogramme des effectifs", align = "center"),
tableOutput(outputId = "statqq")),
column(6,
h5("Courbe cumulative", align ="center"),
plotOutput(outputId = "effectifsDiagq")))),
tabPanel("Diagrammes",fluidPage(column(6,
h5("Diagramme en colonnes", align = "center"),
plotOutput("colonnes")),
column(6,
h5("Diagramme en secteurs", align ="center"),
plotOutput("secteurs"))))
),
))
),
),
tabItem(tabName = "vs",
h3("Quantitative VS Qualitative ",align = "center"),
mainPanel(
fluidRow(
column(6,uiOutput('quantlistvs'),
#varSelectInput("choix", "Le choix de la variable", data(), multiple = FALSE),
#selectInput("choix", "Le choix de la variable",
#tableOutput(outputId = "input_list"),selected = 1),
),
column(6,uiOutput('qualistvs'),
#selectInput("choixx", "Le choix de la variable",
# tableOutput(outputId = "input_list"),selected = 1),
)
),
tabsetPanel(
tabPanel("Diag. Barres (1 var.)",
fluidRow(
column(6, plotOutput("barplotUni")),
column(6, plotOutput("barplotOrderedUni"))
)
),
tabPanel("Diag. Barres (2 var.)",
fluidRow(
column(6, plotOutput("barplotBi")),
column(6, plotOutput("barplotDodgeBi"))
)
)
)
, style = "font-size: 75%")),
tabItem(tabName = "vs2",
h3("Qualitative VS Qualitative ",align = "center"),
mainPanel(
fluidRow(
column(6,uiOutput('qualistvs1'),
#varSelectInput("choix", "Le choix de la variable", data(), multiple = FALSE),
#selectInput("choix", "Le choix de la variable",
#tableOutput(outputId = "input_list"),selected = 1),
),
column(6,uiOutput('qualistvs2'),
#selectInput("choixx", "Le choix de la variable",
# tableOutput(outputId = "input_list"),selected = 1),
)
),
tabsetPanel(
tabPanel("Diag. Profils",
fluidRow(
column(6, plotOutput("barplotProfils")),
column(6, tableOutput("contingency"))
)
),
tabPanel("Indices",
fluidRow(
column(6, offset = 2, tableOutput("force"))
)
)
)
, style = "font-size: 75%")),
tabItem(tabName = "accueill",
h3(" A propos du tp ",align = "center"),
br(),
br(),
br(),
strong("Problèmatique", style="font-size :15pt"),
br(),
br(),
p("Le travail présenté dans cette application, rentre dans le cadre d’un Projet du module Data Science 2 pour les étudiants MLDS de l’université de Paris pour l’année universitaire 2020/2021.
Le travail consiste de faire une analyse uni-variée, bi-variée, le traitement des donnees, la comparaison des methodes de classification et le churn sur l’ensemble du dataset choisi .
L’application interagit d’une manière réactive à la demande de l’utilisateur.
L’utilisateur à la possibilité de l’explorer.
", style=" font-size :13pt"),
br(),
br(),
strong("Données utilisées :", style="font-size :15pt"),
p("Le Data-set utilisé dans ce travail est un data-set classique intitulé « Bank additional » qui regroupe 41189 lignes d’observations de données des clients de la banque qui sont identifiées par 21 variables (9 quantitatives et 12 qualitative).
", style=" font-size :13pt"),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
h3("Réalisé Par : FERRAH Hichem & GHARBI Mohamed",align = "center")
),
#-------------------- CHURN -------------
tabItem(tabName = "ChurnIn",
uiOutput('churnchoice'), plotlyOutput(outputId = "churnIntro1")),
tabItem(tabName = "ChurnQual",
uiOutput('churnqual'),plotlyOutput(outputId = "churnqualplot"),
),
tabItem(tabName = "ChurnQuant",
uiOutput('churnquant'),
tabsetPanel(
tabPanel("Pie Charts",
sliderInput('sliderquant', "Ajuster la distribution pour ameliorer le Rendu ", 1, 5, 3,1),plotlyOutput(outputId = "churnquantplot")
),
tabPanel("Bar plots",
plotlyOutput(outputId = "churnquantplot2"))
,
tabPanel("pairwise Scatter",
uiOutput('pairwise1'),uiOutput('pairwise2'),
plotOutput(outputId = "churnquantplot3")
)
)
),
#---------------------- Apprentissage Unbalanced ------------
tabItem(tabName = "app",
mainPanel(
tabsetPanel(
tabPanel("KNN",
fluidRow(
h3("Apprentissage Supervise knn", align="center"),
column(6, plotOutput("plot1"))
)
),
tabPanel("LR",
fluidRow(
h3("Apprentissage Supervise LR", align="center"),
column(8, offset = 1, plotOutput("plot2"), style="width: 850px !important")
)
)))),
#---------------------- data Balancing ---------------
tabItem(tabName = "databalance",
mainPanel(
tabsetPanel(
tabPanel("Equilibre",
fluidRow(
column(12,uiOutput("varbalance"),uiOutput("sampling"),uiOutput("go"),plotlyOutput("compare") )
)
),
tabPanel("KNN",
fluidRow(
h3("Apprentissage Supervise knn", align="center"),
column(6, plotOutput("plot4"))
)
),
tabPanel("LR",
fluidRow(
h3("Apprentissage Supervise LR", align="center"),
column(8, offset = 1, plotOutput("plot5"), style="width: 850px !important")
)
))))
)
)
)
server <- function(input, output) {
options(shiny.maxRequestSize=200*1024^2)
data <- reactive({
# Initialement, class(input$file1) = NULL
# AprÃÂès chargement, class(input$file1) = data.frame
# avec les colonnes 'size', 'type', and 'datapath' columns.
inFile <- input$file1
if (is.null(inFile)) return(NULL)
delim<-get.delim(inFile$datapath, n = 10, comment = "#", skip = 0, delims = c("\t", "\t| +", " ", ";", ","), large = 10, one.byte = TRUE)
print(delim)
read.csv(inFile$datapath, header = TRUE, sep=delim)
# data <- readLines(con <- file)
# records <- sapply(data, strsplit, split=separators)
# dataFrame <- data.frame(t(sapply(records,c)))
# rownames(dataFrame) <- 1: nrow(dataFrame)
# return(as.data.frame(dataFrame,stringsAsFactors = FALSE))
})
#data <- read.csv(url("https://raw.githubusercontent.com/Ferrah-hichem/datasets/master/bank-full.csv"), header = TRUE, sep=';')
#input_list <- reactive({)})
# output$input_list <- renderTable(colnames(data))
# # input_list <<- colnames(data)
# # input_quant <<- reactive({colnames(data)[!grepl('factor|logical|character',sapply(data,class))]})
# # input_qual <<- reactive({colnames(data)[grepl('factor|logical|character',sapply(data,class))]})
# output$input_quant <- renderTable(colnames(data)[!grepl('factor|logical|character',sapply(data,class))])
# output$input_qual <- renderTable(colnames(data)[grepl('factor|logical|character',sapply(data,class))])
#
quant<- reactive({
names(data())[!grepl('factor|logical|character',sapply(data(),class))]
})
output$list = renderUI({
selectInput('choix', 'Le choix de la variable', names(data()))
})
output$list2 = renderUI({
selectInput('choix', 'Le choix de la variable', names(data()))
})
output$quantlist = renderUI({
selectInput('radio', 'Le choix de la variable', names(data())[!grepl('factor|logical|character',sapply(data(),class))])
})
output$quantlistbi1 = renderUI({
selectInput('quantlistbi1', 'Le choix de la variable', names(data())[!grepl('factor|logical|character',sapply(data(),class))])
})
output$quantlistbi2 = renderUI({
selectInput('quantlistbi2', 'Le choix de la variable', names(data())[!grepl('factor|logical|character',sapply(data(),class))])
})
output$quantlistvs = renderUI({
selectInput('quantlistvs', 'Le choix de la variable', names(data())[!grepl('factor|logical|character',sapply(data(),class))])
})
output$qualist = renderUI({
selectInput('choixx', 'Le choix de la variable', names(data())[grepl('factor|logical|character',sapply(data(),class))])
})
output$qualistvs = renderUI({
selectInput('qualistvs', 'Le choix de la variable', names(data())[grepl('factor|logical|character',sapply(data(),class))])
})
output$qualistvs1 = renderUI({
selectInput('qualistvs1', 'Le choix de la variable', names(data())[grepl('factor|logical|character',sapply(data(),class))])
})
output$qualistvs2 = renderUI({
selectInput('qualistvs2', 'Le choix de la variable', names(data())[grepl('factor|logical|character',sapply(data(),class))])
})
output$churnchoice = renderUI({
selectInput('churnchoice', 'Le choix de la variable', names(data()))
})
output$churnqual = renderUI({
selectInput('churnqual', 'Choisissez une Variable pour la comparer avec la variable Churn', names(data())[grepl('factor|logical|character',sapply(data(),class))])
})
output$churnquant = renderUI({
selectInput('churnquant', 'Choisissez une Variable pour la comparer avec la variable Churn', names(data())[!grepl('factor|logical|character',sapply(data(),class))])
})
output$varbalance = renderUI({
selectInput("varbalance", 'Choisissez une Variable Binaire "Desiquilibre" ', names(data()))
})
output$pairwise1 = renderUI({
selectInput('pairwise1', 'Choisissez une 2eme Variable', names(data())[!grepl('factor|logical|character',sapply(data(),class))])
})
output$pairwise2 = renderUI({
selectInput('pairwise2', 'Choisissez une 3eme Variable', names(data())[!grepl('factor|logical|character',sapply(data(),class))])
})
output$go =renderUI({
validate(
need(nrow(unique(data()[input$varbalance])) == 2, "... En attente de la variable ...")
)
actionButton("go",label="valider",style="margin-left:51%; margin-top:-20%;")
})
output$sampling = renderUI({
validate(
need(nrow(unique(data()[input$varbalance])) == 2, "Cette Methode marche uniquement avec les variables binaires, veuillez choisir une autre")
)
selectInput("sampling","Choisissez La methode du Data balancing",c("Random Oversampling","Random UnderSampling","Both"))
})
tabStats <- reactive({
dt = data()
dt2 =dt[,input$radio]
# Calculer les effectifs et les effectifs cumulés
table.tmp <- as.data.frame(table(dt2))
table.tmp <- cbind(table.tmp, cumsum(table.tmp[[2]]))
# Calculer les fréquences et les fréquences cumulés
table.tmp <- cbind(table.tmp,
table.tmp[[2]]/nrow(data)*100,
table.tmp[[3]]/nrow(data)*100)
# Ajouter des noms de colonnes
colnames(table.tmp) <- c(dt[1,input$radio], "Effectifs", "Effectifs Cum.",
"Frequences", "Frequences Cum.")
# Renvoyer le tableau statistique
return(table.tmp)
})
tabStat <- reactive({
dt = data()
dt2 =dt[,input$choixx]
# Calculer les effectifs et les effectifs cumulés
table.tmp <- as.data.frame(table(dt2))
table.tmp <- cbind(table.tmp, cumsum(table.tmp[[2]]))
# Calculer les fréquences et les fréquences cumulés
table.tmp <- cbind(table.tmp,
table.tmp[[2]]/nrow(data())*100,
table.tmp[[3]]/nrow(data())*100)
# Ajouter des noms de colonnes
colnames(table.tmp) <- c(input$choixx, "Effectifs", "Effectifs Cum.",
"Frequences", "Frequences Cum.")
# Renvoyer le tableau statistique
print(dim(table.tmp[,1]))
return(table.tmp)
})
# Commande pour le calcul du summary
output$contents <- DT::renderDataTable({
#data()
DT::datatable(
data(),
filter = 'top', extensions = c('Buttons'),
options = list(scrollY = 650,
scrollX = 500,
deferRender = TRUE,
scroller = TRUE,
# paging = TRUE,
# pageLength = 25,
buttons = list('excel',
list(extend = 'colvis', targets = 0, visible = FALSE)),
dom = 'lBfrtip',
fixedColumns = TRUE),
rownames = FALSE)
})
# Boîtes parallèles
# ----
output$boxplotBasic <- renderPlot({
d <- data()
d.stack <- melt(d, measure.vars = quant())
# Boxplot basique
d.stack$value <- as.numeric(d.stack$value)
boxplot(d.stack$value ~ d.stack$variable , col="grey",
xlab = "Modalités", ylab = "Mesures")
})
output$boxplotGgplot <- renderPlot({
d <- data()
d.stack <- melt(d, measure.vars = quant())
d.stack$value <- as.numeric(d.stack$value)
# Boxplot élaborée
qplot(x = d.stack[,2], y = d.stack[,1],
xlab = "Modalités", ylab = "Mesures",
geom=c("boxplot", "jitter"), fill=d.stack[,2]) +
theme(legend.title=element_blank())
})
output$histbackback <- renderPlot({
options(digits=1)
x.var = input$quantlistbi1 ; y.var = input$quantlistbi2;
dt = data()
dt2 =dt[,input$quantlistbi1]
dt2 = as.numeric(dt2)
dt = data()
dt3 =dt[,input$quantlistbi2]
dt3 = as.numeric(dt3)
histbackback(x = dt2, y = dt3,
xlab = c(x.var, y.var), main = paste(x.var, "and", y.var),
las = 2)
})
output$nuagePoints <- renderPlot({
# Simple nuage de point
options(scipen=999)
x.var = input$quantlistbi1 ; y.var = input$quantlistbi2;
plot(x = data()[, x.var], y = data()[, y.var], col = "blue",
las = 2, cex.axis = 0.7,
main = paste(y.var, "en fonction de", x.var),
xlab = x.var, ylab = y.var, cex.lab = 1.2
)
# Droite de régression linéaire (y~x)
abline(lm(data()[, y.var]~data()[, x.var]), col="red", lwd = 2)
options(scipen=0)
})
output$correlation <- renderText({
dt = data()
dt2 =dt[,input$quantlistbi1]
dt2 = as.numeric(dt2)
dt = data()
dt3 =dt[,input$quantlistbi2]
dt3 = as.numeric(dt3)
#x.var = input$choix ; y.var = input$choixx;
coeff.tmp <- cov(dt2, dt3)/(sqrt(var(dt2)*var(dt3)))
paste("Coefficient de corrélation linéaire =", round(coeff.tmp,digits = 2))
})
output$summary <- renderPrint({
dt = data()
dt2 =dt[,input$radio]
#print(dt2)
t(summary.default(as.numeric(as.character(dt2))))
t(summary(dt2))})
output$statistique <- renderTable({
tabStats() })
output$statqq <- renderTable({
tabStat() })
# Nuage de points et histogrammes
# ----
output$nuagePointshist <- renderPlot({
options(digits=1)
dt = data()
dt2 =dt[,input$quantlistbi1]
dt2 = as.numeric(dt2)
dt = data()
dt3 =dt[,input$quantlistbi2]
dt3 = as.numeric(dt3)
EF = dt2;
CA = dt3;
scatter.with.hist( EF, CA)
})
output$caract <- renderTable({
# Définition des colonnes choisies
var.names <- quant()
# Initialisation de la table
caract.df <- data.frame()
# Pour chaque colonne, calcul de min, max, mean et ecart-type
for(strCol in var.names){
caract.vect <- c(min(data()[, strCol]), max(data()[,strCol]),
mean(var(data()[,strCol])), sqrt(var(data()[,strCol])))
caract.df <- rbind.data.frame(caract.df, caract.vect)
}
# Définition des row/colnames
rownames(caract.df) <- var.names
colnames(caract.df) <- c("Minimum", "Maximum", "Moyenne", "Ecart-type")
# Renvoyer la table
caract.df
}, rownames = TRUE, digits = 0)
# Commande pour l'affichage du plot des effectifs
output$effectifsDiag <- renderPlot({
dt = data()
plot(table(data.frame(dt[,input$radio])), col ="blue", xlab =dt[1,input$radio], ylab ="Effectifs",
main ="Distribution des effectifs")
})
output$effectifsDiagq <- renderPlot({
dt = data()
plot(table(data.frame(dt[,input$choixx])), col ="blue", xlab =dt[,input$choixx], ylab ="Effectifs",
main ="Distribution des effectifs")
})
effectifs <- reactive({
dt = data()
return(table(dt[,input$choixx]))
})
# Diagramme en colonnes
output$colonnes <- renderPlot({
barplot(effectifs(), main = " ",
ylab="Effectifs", las = 2,
names.arg = substr(names(effectifs()), 1, 4))
})
# Diagramme en secteurs
output$secteurs <- renderPlot({
pie(effectifs(), labels = substr(names(effectifs()), 1, 4),
main = " ", col=c())
})
# Commande pour l'affichage du plot des fréquences cumulées
output$effectifsCumDiag <- renderPlot({
dt= table(data()[,input$radio])
dt= cumsum(dt)/nrow(data())*100
plot(ecdf(as.numeric(as.character(dt))),
col ="blue", xlab = "La variable" , ylab ="Frequences cumulees",
main ="Frequences cumulees ")
})
# output$effectifsCumDiag <- renderPlot({
# # plot(ecdf(as.numeric(as.character(data.frame(tabStats()[,5])))),
# # col ="blue", xlab = "La variable" , ylab ="Frequences cumulees",
# # main ="Frequences cumulees ")
# dt = data()
# dt2= table(data.frame(dt[,input$radio]))/nrow(dt)*100
#
# plot(dt2,
# col ="blue", xlab = "La variable" , ylab ="Frequences cumulees",
# main ="Frequences cumulees ")
# })
# Commande pour l'affichage de la boîte àmoustaches
output$boiteMoustaches <- renderPlot({
# Boîte àmoustaches
dt = data()
boxplot( data.frame(as.numeric(as.character(dt[,input$radio]))), col = grey(0.8),
main = " ",
ylab = "", las = 1)
# Affichage complémentaires en Y des différents âges
rug(data()[,input$radio], side = 2)
})
# Récupération des valeurs fecondite
fecondite <- reactive({
if(!"Sepal.Length" %in% colnames(data())) return(NULL)
data()$Sepal.Length
})
# Histogrammes
# ----
output$effectifsHist <- renderPlot({
dt = data()
# Histogramme des effectifs
hist(as.numeric(as.character(dt[,input$radio])) , freq = TRUE, cex.axis = 1.5, cex.main = 1.5,
main = "Histogramme", col = "blue",
xlab = dt[1,input$radio], ylab = "Effectifs", las = 1,
right = FALSE, cex.lab = 1.5)
})
output$effectifsCumCurve <- renderPlot({
dt = data()
# Récupération des infos à partir de l'histogramme
tmp.hist <- hist(as.numeric(as.character(dt[,input$radio])) , plot = FALSE,
right = FALSE)
# Courbe cumulative (effectifs)
plot(x = tmp.hist$breaks[-1], y = cumsum(tmp.hist$counts),
xlab = dt[1,input$radio],
ylab = "Effectifs cumulés", cex.axis = 1.5, cex.lab = 1.5,
main = "Courbe cumulative ",
type = "o", col = "blue", lwd = 2, cex.main = 1.5)
})
output$effectifsHistFreqDens <- renderPlot({
dt = data()
# Histogramme des densités de fréquences
hist( as.numeric(as.character(dt[,input$radio])), freq = FALSE, cex.axis = 1.5, cex.main = 1.5,
main = "Histogramme de la variable", col = "green",
xlab = dt[1,input$radio] , ylab = "Densité de fréquences", las = 1,
right = FALSE, cex.lab = 1.5)
})
# Force de la liaison entre 'especes' et 'couleur'
# ----
output$force <- renderTable({
force.df <- as.data.frame(matrix(NA, nrow = 3, ncol = 1))
rownames(force.df) = c("X2", "Phi2", "Cramer")
# La table de contingence des profils observés
#tab = with(data(), table(input$quantlistvs, input$qualistvs))
var1<-input$qualistvs1
var2<-input$qualistvs2
data<-cbind(data()[var1],data()[var2])
data<-as.data.frame(data)
colnames(data)<-c("var1","var2")
tab =with(data,table(var1,var2))
print(tab)
# La table de contigence s'il y a indépendence
tab.indep = tab
n = sum(tab)
tab.rowSum = apply(tab, 2, sum)
tab.colSum = apply(tab, 1, sum)
print(tab.colSum)
print(tab.rowSum)
print(c)
for(i in c(1:length(tab.colSum))){
for(j in c(1:length(tab.rowSum))){
tab.indep[i,j] = tab.colSum[i]*tab.rowSum[j]/n
}
}
# Calcul du X²
force.df[1,1] = sum((tab-tab.indep)^2/tab.indep)
# Calcul du Phi²
force.df[2,1] = force.df[1,1]/n
# Calcul du Cramer
force.df[3,1] = sqrt(force.df[2,1]/(min(nrow(tab), ncol(tab))-1))
force.df
}, rownames=TRUE, colnames=FALSE)
# Unidimensionnel
output$barplotUni <- renderPlot({
# Diagramme en barres de la variable 'Level' avec ggplot
# list<-as.list(data()[,input$quantlistvs])
# print(list)
# ggplot(data(), aes(x = paste(data))) + geom_bar(stat="identity")
var<-sym(input$quantlistvs)
ggplot(data(),aes(x = !!var) )+ geom_bar()
})
output$barplotProfils <- renderPlot({
# Diagramme de profils entre les variables 'V2' et 'V6'
var1<-sym(input$qualistvs1)
var2<-sym(input$qualistvs2)
ggplot(data(), aes(x = !!var2, fill = !!var1)) + geom_bar(position = "fill")
})
# Bidimensionnel
output$barplotBi <- renderPlot({
# Diagramme en barres entre les variables 'V2' et 'V6'
var1<-sym(input$quantlistvs)
var2<-sym(input$qualistvs)
ggplot(data(), aes(x = !!var1, fill = !!var2)) + geom_bar()
})
# output$barplotProfils <- renderPlot({
# var1<-sym(input$quantlistvs)
# var2<-sym(input$qualistvs)
# ggplot(data(), aes(x = !!var2, fill = !!var1)) + geom_bar(position = "fill")
# })
#
#
# Table de contingence entre 'Sex' et 'Level'
# ----
output$contingency <- renderTable({
var1<-input$qualistvs1
var2<-input$qualistvs2
data<-cbind(data()[var1],data()[var2])
data<-as.data.frame(data)
colnames(data)<-c("var1","var2")
tab = with(data,table(var1, var2))
# print(tab)
# tab2 = with(data(), table(y, default))
# print(tab2)
#
print(tab)
round(tab/sum(tab), 3)
tab
print(tab)
})
#------------------ CHURN PART : ---------------
churnvar <- reactive({
validate(
need(nrow(unique(data()[input$churnchoice])) == 2, "Cette Variable ne peut pas etre une Varibale Churn Veuillez choisir une autre (Variable Binaire).")
)
return(data()[input$churnchoice])
})
output$churnIntro1 <- renderPlotly({
var<-as.data.frame(churnvar())
# # pie(var$freq, labels = unique(data()$y), main = "Churn Variable", col=c("#ffa600","#bc5090"))
# bp<- ggplot(df, aes(x="", y=value, fill=group))+
# geom_bar(width = 1, stat = "identity") + coord_polar("y", start=0)
freq<- count(var)
print(freq)
freq<- cbind(freq,round(freq$freq/ nrow(var)*100,2))
colnames(freq) <- c("value","occu","percent")
print(freq)
# Compute the cumulative percentages (top of each rectangle)
freq$ymax <- cumsum(freq$percent)
# Compute the bottom of each rectangle
freq$ymin <- c(0, head(freq$ymax, n=-1))
# Compute label position
freq$labelPosition <- (freq$ymax + freq$ymin) / 2
# Compute a good label
freq$label <- paste0(freq$percent,"%")
print(freq)
# Make the plot
plot<-plot_ly(freq,
labels = ~value,
values = ~percent, type = 'pie',
text="",
textinfo = "",hoverinfo="text",hovertext=paste(freq$value," ",freq$occu)) %>%
layout(title = paste("Variable Churn"),
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
showlegend = TRUE)
return(plot)
})
slidervalues<- reactive({
values<-data()[input$churnchoice]
max<-max(values)*0.5
min<-min(values)*0.05
})
output$churnqualplot <- renderPlotly({
print(nrow(count(names(data())[grepl('factor|logical|character',sapply(data(),class))])))
validate(
need(nrow(unique(data()[input$churnchoice])) == 2, "Veuillez Choisir une variable Churn dans l'onglet 'Churn Variable' "),
need(nrow(count(names(data())[grepl('factor|logical|character',sapply(data(),class))])) != 0, "Aucune Variable qualitative Detecte")
)
filtervar1 <- data()
filtervar2 <- data()
var <- input$churnchoice
values<-unique(data()[var])
print(values)
print(values[1,1])
print(values[2,1])
valeurchurn1 <- values[1,1]
valeurchurn2 <- values[2,1]
filtervar1 <- data()[data()[var] == valeurchurn1, input$churnqual]
filtervar2 <- data()[data()[var] == valeurchurn2, input$churnqual]
var1<-as.data.frame(filtervar1)
freq<- count(var1)
print(freq)
freq<- cbind(freq,round(freq$freq/ nrow(var1)*100,2))
colnames(freq) <- c("value","occu","percent")
print(freq)
# Compute the cumulative percentages (top of each rectangle)
freq$ymax <- cumsum(freq$percent)
# Compute the bottom of each rectangle
freq$ymin <- c(0, head(freq$ymax, n=-1))
# Compute label position
freq$labelPosition <- (freq$ymax + freq$ymin) / 2
# Compute a good label
freq$label <- paste0(freq$percent,"%")
print(freq)
# Make the plot
var2<-as.data.frame(filtervar2)
freq2<- count(var2)
print("freq2")
print(freq2)
freq2<- cbind(freq2,round(freq2$freq/ nrow(var2)*100,2))
colnames(freq2) <- c("value","occu","percent")
print(freq2)
# Compute the cumulative percentages (top of each rectangle)
freq2$ymax <- cumsum(freq2$percent)
# Compute the bottom of each rectangle
freq2$ymin <- c(0, head(freq2$ymax, n=-1))
# Compute label position
freq2$labelPosition <- (freq2$ymax + freq2$ymin) / 2
# Compute a good label
freq2$label <- paste0(freq2$percent,"%")
print(freq2)
# Make the plot
print(values[1,1])
print(values[2,1])
plot <- plot_ly(labels = ~value, values = ~percent,textinfo = 'value+percent') %>%
add_pie(data = freq, name = values[1,1], domain = list(row = 0, column = 0))%>%
add_pie(data = freq2, name = values[2,1], domain = list(row = 0, column = 1))%>%
layout(title = paste("Distribution de",input$churnqual,"par rapport a ",input$churnchoice), showlegend = T,
grid=list(rows=1, columns=2),
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
annotations = list(x = c(0, .5),
y = c(0, 0),text = c(paste("Distribution \n % Churn = ", values[1,1]),paste("Distribution \n % Churn = ", values[2,1])),
xref = "papper",
yref = "papper",
showarrow = F
))
return(plot)
})
output$plot1 <- renderPlot({
df<- data.frame(lapply(data(), function(x) {
if(!is.numeric(x)) as.numeric(factor(x)) else x
}))
df$y <- ifelse(df$y==1,0,1)
samples <- sample(1:nrow(df), 0.7*nrow(df))
trainclass <- df[samples, ]$y
testclass <- df[-samples, ]$y
train = df[samples, 1:(ncol(df)-1)]
test = df[-samples, 1:(ncol(df)-1)]
kmax <- 10
err_valid <- rep(NA,kmax)
for (k in 1:kmax)
{
print(k)
samples <- sample(1:nrow(df), 0.7*nrow(df))
pred <- knn(train,test,trainclass,k=18)
err_valid[k] <- F1_Score(y_pred=pred,y_true=testclass)
}
#plot(x=seq(1,kmax,by=1),y=err_test,type="o",col="blue")
boxplot(err_valid)
})
output$plot2 <- renderPlot({
df <- data()
df$y <- ifelse(df$y=="no",0,1)
kmax <- 10
err_valid <- rep(NA,kmax)
for (k in 1:kmax)
{
samples <- sample(1:nrow(df), 0.7*nrow(df))
test_y <- df[-samples, ncol(df)]
test_X <- df[-samples, 1:(ncol(df)-1)]
print(k)
train <- df[samples, ]
model <- glm(y ~ .,data = train)
pred <- predict(model,test_X,type="response")
pred <- ifelse(pred>0.5,1,0)
err_valid[k] <- F1_Score(y_pred=pred,y_true=test_y)
}
boxplot(err_valid)
})
output$churnquantplot <- renderPlotly({
print(nrow(count(names(data())[!grepl('factor|logical|character',sapply(data(),class))])))
validate(
need(nrow(unique(data()[input$churnchoice])) == 2, "Veuillez Choisir une variable Churn dans l'onglet 'Churn Variable' "),
need(nrow(count(names(data())[!grepl('factor|logical|character',sapply(data(),class))])) != 0, "Aucune Variable qualitative Detecte")
)
filtervar1 <- data()
filtervar2 <- data()
var <- input$churnchoice
values<-unique(data()[var])
print(values)
print(values[1,1])
print(values[2,1])
valeurchurn1 <- values[1,1]
valeurchurn2 <- values[2,1]
filtervar1 <- data()[data()[var] == valeurchurn1, input$churnquant]
filtervar2 <- data()[data()[var] == valeurchurn2, input$churnquant]
maxvar1<-max(filtervar1)
maxvar2<-max(filtervar2)
minvar1<-min(filtervar1)
minvar2<-min(filtervar2)
stepvar1<-abs((1/(2**(input$sliderquant-1)))*maxvar1)
stepvar2<-abs((1/(2**(input$sliderquant-1)))*maxvar2)
length<-2**(input$sliderquant)
print(input$sliderquant)
print(stepvar1)
print(stepvar2)
filtervar1<-cut(filtervar1, seq(minvar1,maxvar1,length.out = length))
filtervar2<-cut(filtervar2, seq(minvar2,maxvar2,length.out = length))
var1<-as.data.frame(filtervar1)
freq<- count(var1)
print(freq)
freq<- cbind(freq,round(freq$freq/ nrow(var1)*100,2))
colnames(freq) <- c("value","occu","percent")
print(freq)
# Compute the cumulative percentages (top of each rectangle)
freq$ymax <- cumsum(freq$percent)
# Compute the bottom of each rectangle
freq$ymin <- c(0, head(freq$ymax, n=-1))
# Compute label position
freq$labelPosition <- (freq$ymax + freq$ymin) / 2
# Compute a good label
freq$label <- paste0(freq$percent,"%")
print(freq)
# Make the plot
var2<-as.data.frame(filtervar2)
freq2<- count(var2)
print("freq2")
print(freq2)
freq2<- cbind(freq2,round(freq2$freq/ nrow(var2)*100,2))
colnames(freq2) <- c("value","occu","percent")
print(freq2)
# Compute the cumulative percentages (top of each rectangle)
freq2$ymax <- cumsum(freq2$percent)
# Compute the bottom of each rectangle
freq2$ymin <- c(0, head(freq2$ymax, n=-1))
# Compute label position
freq2$labelPosition <- (freq2$ymax + freq2$ymin) / 2
# Compute a good label
freq2$label <- paste0(freq2$percent,"%")
print(freq2)
# Make the plot
print(values[1,1])
print(values[2,1])
plot <- plot_ly(labels = ~value, values = ~percent,textinfo = 'value+percent') %>%
add_pie(data = freq, name = values[1,1], domain = list(row = 0, column = 0))%>%
add_pie(data = freq2, name = values[2,1], domain = list(row = 0, column = 1))%>%
layout(title = paste("Distribution de",input$churnquant,"par rapport a ",input$churnchoice), showlegend = T,
grid=list(rows=1, columns=2),
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
annotations = list(x = c(0, .5),
y = c(0, 0),text = c(paste("Distribution \n % Churn = ", values[1,1]),paste("Distribution \n % Churn = ", values[2,1])),
xref = "papper",
yref = "papper",
showarrow = F
))
return(plot)
})
output$churnquantplot2 <- renderPlotly({
print(nrow(count(names(data())[!grepl('factor|logical|character',sapply(data(),class))])))
validate(
need(nrow(unique(data()[input$churnchoice])) == 2, "Veuillez Choisir une variable Churn dans l'onglet 'Churn Variable' "),
need(nrow(count(names(data())[!grepl('factor|logical|character',sapply(data(),class))])) != 0, "Aucune Variable qualitative Detecte")
)
filtervar1 <- data()
filtervar2 <- data()
var <- input$churnchoice
values<-unique(data()[var])
print(values)
print(values[1,1])
print(values[2,1])
valeurchurn1 <- values[1,1]
valeurchurn2 <- values[2,1]
filtervar1 <- data()[data()[var] == valeurchurn1, input$churnquant]
filtervar2 <- data()[data()[var] == valeurchurn2, input$churnquant]
var1<-as.data.frame(filtervar1)
freq<- count(var1)
print(freq)
freq<- cbind(freq,round(freq$freq/ nrow(var1)*100,2))
colnames(freq) <- c("value","occu","percent")
print(freq)
# Compute the cumulative percentages (top of each rectangle)
freq$ymax <- cumsum(freq$percent)
# Compute the bottom of each rectangle
freq$ymin <- c(0, head(freq$ymax, n=-1))
# Compute label position
freq$labelPosition <- (freq$ymax + freq$ymin) / 2
# Compute a good label
freq$label <- paste0(freq$percent,"%")
print(freq)
# Make the plot
var2<-as.data.frame(filtervar2)
freq2<- count(var2)
print("freq2")
print(freq2)
freq2<- cbind(freq2,round(freq2$freq/ nrow(var2)*100,2))
colnames(freq2) <- c("value","occu","percent")
print(freq2)
# Compute the cumulative percentages (top of each rectangle)
freq2$ymax <- cumsum(freq2$percent)
# Compute the bottom of each rectangle
freq2$ymin <- c(0, head(freq2$ymax, n=-1))
# Compute label position
freq2$labelPosition <- (freq2$ymax + freq2$ymin) / 2
# Compute a good label
freq2$label <- paste0(freq2$percent,"%")
print(freq2)
# Make the plot
print(values[1,1])
print(values[2,1])
tab<-cbind(freq$value,freq$occu,freq2$occu)
tab<- as.data.frame(tab)
colnames(tab)<-c("value","occu1","occu2")
colnames(tab)
fig <- plot_ly(tab, x = ~value, y = ~occu1, type = 'bar', name = valeurchurn1)
fig <- fig %>% add_trace(y = ~occu2, name = valeurchurn2)
fig <- fig %>% layout(yaxis = list(title = 'Count'), barmode = 'group')
return(fig)
})
output$churnquantplot3 <- renderPlot({
print(nrow(count(names(data())[!grepl('factor|logical|character',sapply(data(),class))])))
validate(
need(nrow(unique(data()[input$churnchoice])) == 2, "Veuillez Choisir une variable Churn dans l'onglet 'Churn Variable' "),
need(nrow(count(names(data())[!grepl('factor|logical|character',sapply(data(),class))])) != 0, "Aucune Variable qualitative Detecte")
)
tab<-cbind(data()[input$churnchoice],data()[input$churnquant],data()[input$pairwise1],data()[input$pairwise2])
tab<-as.data.frame(tab)
colnames(tab)<-c("var0","var1","var2","var3")
colnames(tab)
print(tab)
my_cols <- c("#000FF", "#FF0000")
print(tab[,2:4])
fig <- ggpairs(tab, columns = 2:4,olumnLabels = c(input$churnquant,input$pairwise1,input$pairwise2),ggplot2::aes(colour=var0))
return(fig)
})
#------------------ Balanced Data -----------------
samplevalue<- reactive({
validate(
need(nrow(unique(data()[input$varbalance])) == 2, "... En attente de la variable ...")
)
method<-input$sampling
choice<-input$varbalance
df<- data.frame(lapply(data(), function(x) {
if(!is.numeric(x)) as.numeric(factor(x)) else x
}))
df[,input$varbalance] <- ifelse(df[,input$varbalance]==1,0,1)
samples <- sample(1:nrow(df), 0.7*nrow(df))
train = df[samples, 1:(ncol(df))]
data<-train
values<-unique(data[choice])
print(values)
print(values[1,1])
print(values[2,1])
valeurchoice1 <- values[1,1]
valeurchoice2 <- values[2,1]
print(valeurchoice1)
print("----------------------------------")
print(method)
var1<-as.data.frame(data[data[choice] == valeurchoice1,choice])
var2<-as.data.frame(data[data[choice] == valeurchoice2,choice])
print(count(var1))
if(method=="Random Oversampling"){
n_samp<-max(count(var1)$freq,count(var2)$freq)
print("n_samp")
print(n_samp)
return(n_samp)
}
if(method=="Random UnderSampling"){
n_samp<-min(count(var1)$freq,count(var2)$freq)
print("n_samp")
print(n_samp)
return(n_samp)
}
if(method=="Both"){
n_samp<-max(count(var1)$freq,count(var2)$freq)
print("n_samp")
print(n_samp)
return(n_samp)
}
# if(method=="Smote"){
# n_samp<-max(count(var1)$freq,count(var2)$freq)
# n_samp<-cbind(n_samp,min(count(var1)$freq,count(var2)$freq))
# print(method)
# return(n_samp)
# }
})
balanceddata<- eventReactive(input$go,{
print("worked!")
df<- data.frame(lapply(data(), function(x) {
if(!is.numeric(x)) as.numeric(factor(x)) else x
}))
df[,input$varbalance] <- ifelse(df[,input$varbalance]==1,0,1)
samples <- sample(1:nrow(df), 0.7*nrow(df))
train = df[samples, 1:(ncol(df))]
data<-train
print(data)
method<-input$sampling
choice<-input$varbalance
n_samp<-samplevalue()*2
f<-paste(choice," ~ ",paste(names(data()[names(data())!=choice]),collapse=" + "))
print(paste("f = ",f))
print(paste("f as formula = ",f))
f<-as.formula(f)
curr_frame <<- sys.nframe()
if(method=="Random Oversampling"){
over_res<-ovun.sample(get("f", sys.frame(curr_frame)), data=get("data", sys.frame(curr_frame)), method="over", N=get("n_samp", sys.frame(curr_frame)), seed=2021)$data
print(over_res)
return(over_res)
}
if(method=="Random UnderSampling"){
under_res<-ovun.sample(get("f", sys.frame(curr_frame)), data=get("data", sys.frame(curr_frame)), method="under", N=get("n_samp", sys.frame(curr_frame)), seed=2021)$data
print(under_res)
return(under_res)
}
if(method=="Both"){
both_res<-ovun.sample(get("f", sys.frame(curr_frame)), data=get("data", sys.frame(curr_frame)), method="both", N=get("n_samp", sys.frame(curr_frame)),p=0.5, seed=2021)$data
print(both_res)
return(both_res)
}
# if(method=="Smote"){
# n1<-n_samp[0]
# n0<-n_samp[1]
# r0<-0.6
# ntimes<-(( 1 - r0) / r0 ) * ( n0 / n1 ) - 1
# data[input$varbalance]<-factor(data[input$varbalance])
# print(names(data[names(data)!=input$varbalance]))
# smote_res<-SMOTE(X = data[names(data)!=input$varbalance], target = data[input$varbalance], K = 5, dup_size = ntimes)$data
# print(smote_res)
# return(smote_res)}
})
output$compare<-renderPlotly({
print("I'm here -1 ")
print("I'm here 0 ")
var <- input$varbalance
filtervar1 <- data()[var]
filtervar2 <- balanceddata()[var]
print("I'm here 2 ")
var1<-as.data.frame(filtervar1)
freq<- count(var1)
print(freq)
freq<- cbind(freq,round(freq$freq/ nrow(var1)*100,2))
colnames(freq) <- c("value","occu","percent")
print(freq)
print("I'm here 3 ")
# Compute the cumulative percentages (top of each rectangle)
freq$ymax <- cumsum(freq$percent)
# Compute the bottom of each rectangle
freq$ymin <- c(0, head(freq$ymax, n=-1))
# Compute label position
freq$labelPosition <- (freq$ymax + freq$ymin) / 2
# Compute a good label
freq$label <- paste0(freq$percent,"%")
print(freq)
# Make the plot
var2<-as.data.frame(filtervar2)
freq2<- count(var2)
print("freq2")
print(freq2)
freq2<- cbind(freq2,round(freq2$freq/ nrow(var2)*100,2))
colnames(freq2) <- c("value","occu","percent")
print(freq2)
# Compute the cumulative percentages (top of each rectangle)
freq2$ymax <- cumsum(freq2$percent)
# Compute the bottom of each rectangle
freq2$ymin <- c(0, head(freq2$ymax, n=-1))
# Compute label position
freq2$labelPosition <- (freq2$ymax + freq2$ymin) / 2
# Compute a good label
freq2$label <- paste0(freq2$percent,"%")
print(freq2)
# Make the plot
plot <- plot_ly(labels = ~value, values = ~percent,textinfo = 'occu',hovertext=~occu) %>%
add_pie(data = freq, name = 'Avant', domain = list(row = 0, column = 0))%>%
add_pie(data = freq2, name = 'Apres', domain = list(row = 0, column = 1))%>%
layout(title = paste("Comparaisons des donnees avant et apres le sampling "), showlegend = T,
grid=list(rows=1, columns=2),
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
annotations = list(x = c(0, .5),
y = c(0, 0),text = c("Avant","Apres"),
xref = "papper",
yref = "papper",
showarrow = F
))
return(plot)
})
output$balancedplot <- renderPlotly({
plot_ly(data = balanceddata(), x = balanceddata()[,1], y = balanceddata()[,3],color = balanceddata()[input$varbalance])
})
# KNN ET LR APRES L'EQUILIBRE
output$plot4 <- renderPlot({
df<- data.frame(lapply(balanceddata(), function(x) {
if(!is.numeric(x)) as.numeric(factor(x)) else x
}))
print(df)
df$y <- ifelse(df$y==1,0,1)
samples <- sample(1:nrow(df), 0.7*nrow(df))
trainclass <- df[samples, ]$y
testclass <- df[-samples, ]$y
train = df[samples, 1:(ncol(df)-1)]
test = df[-samples, 1:(ncol(df)-1)]
kmax <- 10
err_valid <- rep(NA,kmax)
for (k in 1:kmax)
{
print(k)
samples <- sample(1:nrow(df), 0.7*nrow(df))
pred <- knn(train,test,trainclass,k=18)
err_valid[k] <- F1_Score(y_pred=pred,y_true=testclass)
}
#plot(x=seq(1,kmax,by=1),y=err_test,type="o",col="blue")
boxplot(err_valid)
})
output$plot5 <- renderPlot({
df <- data()
df$y <- ifelse(df$y=="no",0,1)
kmax <- 10
err_valid <- rep(NA,kmax)
for (k in 1:kmax)
{
samples <- sample(1:nrow(df), 0.7*nrow(df))
test_y <- df[-samples, ncol(df)]
test_X <- df[-samples, 1:(ncol(df)-1)]
print(k)
train <- df[samples, ]
model <- glm(y ~ .,data = train)
pred <- predict(model,test_X,type="response")
pred <- ifelse(pred>0.5,1,0)
err_valid[k] <- F1_Score(y_pred=pred,y_true=test_y)
}
boxplot(err_valid)
})
}
shinyApp(ui, server)
|
47f674d3a6a6c5eeee41f6c6419fa04193426692
|
7a1a550b35501fe853ab86ba1ec77e2c7c884432
|
/plot4.R
|
1f4cca96d01f7bb9155229f4243d07d876fc45d6
|
[] |
no_license
|
AaronWong/ExData_Plotting1
|
07937f57be8299581509b384f2bacd87190f00a8
|
a798cb0b997e4b0a9a3c93784de1a20618d29f60
|
refs/heads/master
| 2021-01-22T19:55:24.225190
| 2015-01-11T17:36:41
| 2015-01-11T17:36:41
| 29,087,997
| 1
| 0
| null | 2015-01-11T10:19:55
| 2015-01-11T10:19:54
| null |
GB18030
|
R
| false
| false
| 1,248
|
r
|
plot4.R
|
## Reading the data of household power consumption
HPCData <- read.table("household_power_consumption.txt", header = TRUE,
sep = ";", na.strings = "?")
##提取2007年2月1日与2007年2月2日的数据,存入子集data4
HPCData$Time <- paste(HPCData$Date, HPCData$Time)
HPCData$Date <- as.Date(HPCData$Date, "%d/%m/%Y")
HPCData$Time <- strptime(HPCData$Time, "%d/%m/%Y %H:%M:%S")
day1 <- which(HPCData$Date == "2007-02-01")
day2 <- which(HPCData$Date == "2007-02-02")
data4 <- HPCData[c(day1, day2), ]
##绘图
png(filename = "plot4.png")
par(mfcol = c(2,2))
##第一副图
plot(data4[,c(2,3)], type = "l", xlab = "", ylab = "Global Active Power(kilowatts)")
##第二副图
plot(data4[, c(2,7)], type = "l", xlab = "", ylab = "Energy sub metering")
points(data4[, c(2,8)], type = "l", col = "red")
points(data4[, c(2,9)], type = "l", col = "blue")
legend("topright", bty= "n",
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
pch = c("_","_","_"),
col = c("black","red","blue"))
##第三幅图
plot(data4[,c(2,5)], type = "l", xlab = "datetime", ylab = "Voltage")
##第四幅图
plot(data4[,c(2,4)], type = "l",
xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
8a123ae3c644dc4c3e73a20dbef9c4c895ce40ba
|
031954f00cb6a0c0761d3f3040711350524d8fbf
|
/man/heatMatrix.Rd
|
d5870904a177f38e9352c7d51501797708efd048
|
[] |
no_license
|
katwre/genomation
|
881c4f4fa716f0f9befc31181a69ddb831332d47
|
d18cad2e4e45d097631ff843d4927bfaf99b6f64
|
refs/heads/master
| 2021-01-18T17:14:41.553781
| 2018-01-30T16:35:52
| 2018-01-30T16:35:52
| 33,183,415
| 2
| 0
| null | 2015-03-31T12:11:28
| 2015-03-31T12:11:27
| null |
UTF-8
|
R
| false
| true
| 5,339
|
rd
|
heatMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotMatrix.R
\name{heatMatrix}
\alias{heatMatrix}
\title{Draw a heatmap of a given ScoreMatrix object}
\usage{
heatMatrix(mat, grid = FALSE, col = NULL, xcoords = NULL, group = NULL,
group.col = NULL, order = FALSE, user.order = FALSE, winsorize = c(0,
100), clustfun = NULL, main = "", legend.name = NULL, cex.legend = 1,
xlab = NULL, cex.main = 1, cex.lab = 1, cex.axis = 1,
newpage = TRUE)
}
\arguments{
\item{mat}{a \code{ScoreMatrix} object}
\item{grid}{if TRUE, grid graphics will be used. if FALSE, base graphics
will be used on the top level, so users can use par(mfrow)
or par(mfcol) prior to calling the function. Default:FALSE}
\item{col}{a vector of colors, such as the ones created by heat.colors(10).
If NULL (which is default), jet color scheme (common in matlab
plots) will be used.}
\item{xcoords}{a vector of numbers showing relative positions of the bases or
windows. It must match the number of columns in the \code{ScoreMatrix}.
Alternatively, it could be a numeric vector of two elements. Such
as c(0,100) showing the relative start and end coordinates of the first
and last column of the \code{ScoreMatrix} object.}
\item{group}{a list of vectors of row numbers or a factor. This grouping is
used for rowside colors of the heatmap. If it is a list,
each element of the list must be a vector of row numbers. Names
of the elements of the list will be used as names of groups.
If \code{group} is a factor
, it's length must match the number of rows of the matrix, and
factor levels will be used as the names of the groups in the plot.}
\item{group.col}{a vector of color names to be used at the rowside colors if
\code{group} argument is given or \code{clustfun} function is given.}
\item{order}{Logical indicating if the rows should be ordered or not
(Default:FALSE). If \code{order=TRUE} the matrix will be ordered
with rowSums(mat) values in descending order.
If \code{group} argument is provided, first the groups
will be ordered in descending order of sums of rows then, everything
within the clusters will be ordered by sums of rows.
If \code{clustfun} is given then rows within clusters
will be order in descending order of sums of rows.}
\item{user.order}{a numerical vector indicating the order of groups/clusters (it works only
when \code{group} or \code{clustfun} argument is given).}
\item{winsorize}{Numeric vector of two, defaults to c(0,100). This vector
determines the upper and lower percentile values to limit the
extreme values. For example, c(0,99) will limit the values to
only 99th percentile, everything above the 99 percentile will
be equalized to the value of 99th percentile.This is useful
for visualization of matrices that have outliers.}
\item{clustfun}{a function for clustering
rows of \code{mat} that returns
a vector of integers indicating the cluster to which
each point is allocated (a vector of cluster membership),
e.g. k-means algorithm with 3 centers:
function(x) kmeans(x, centers=3)$cluster.
By default FALSE.}
\item{main}{a character string for the plot title}
\item{legend.name}{a character label plotted next to the legend}
\item{cex.legend}{A numerical value giving the amount by which
legend axis marks should be magnified relative to the default}
\item{xlab}{label a character string for x-axis of the heatmap}
\item{cex.main}{A numerical value giving the amount by which
plot title should be magnified}
\item{cex.lab}{A numerical value giving the amount by which
axis labels (including 'legend.name')
should be magnified relative to the default.}
\item{cex.axis}{A numerical value giving the amount by which
axis marks should be magnified relative to the default}
\item{newpage}{logical indicating if \code{grid.newpage()} function should be
invoked if \code{grid=TRUE}.}
}
\value{
returns clustering result invisibly, if clustfun is definied
}
\description{
The function makes a heatmap out of given \code{ScoreMatrix} object. If desired
it can use clustering using given clustering function
(e.g. k-means) and plot cluster color codes as a sidebar.
In addition, user can define groups of rows using 'group' argument.
}
\examples{
data(cage)
data(promoters)
scores1=ScoreMatrix(target=cage,windows=promoters,strand.aware=TRUE,
weight.col="tpm")
set.seed(1000)
\donttest{
heatMatrix(mat=scores1,legend.name="tpm",winsorize=c(0,99),xlab="region around TSS",
xcoords=-1000:1000,
cex.legend=0.8,main="CAGE clusters on promoters",cex.lab=1,
cex.axis=0.9,grid=FALSE)
## examples using clustering functions
## k-means
cl1 <- function(x) kmeans(x, centers=3)$cluster
set.seed(1000)
heatMatrix(mat=scores1,legend.name="tpm",winsorize=c(0,99),xlab="region around TSS",
xcoords=-1000:1000,clustfun=cl1,
cex.legend=0.8,main="CAGE clusters on promoters",cex.lab=1,
cex.axis=0.9,grid=FALSE,
user.order=c(1,3,2))
## hierarchical clustering
cl2 <- function(x) cutree(hclust(dist(x), method="complete"), k=3)
set.seed(1000)
heatMatrix(mat=scores1,legend.name="tpm",winsorize=c(0,99),xlab="region around TSS",
xcoords=-1000:1000,clustfun=cl2,
cex.legend=0.8,main="CAGE clusters on promoters",cex.lab=1,
cex.axis=0.9,grid=FALSE)
}
}
|
5fc75935767b4b863faf5399f2aae8849d20baf6
|
8c5f0222a10ce128bcf20a0f62b03b8795ee4c3d
|
/man/wassermann.Rd
|
ecd231312f2b56c0f6428921bd216c0db34e183f
|
[] |
no_license
|
green-striped-gecko/PopGenReport
|
15a58e5184b877b65791a14b2271487c5d979b81
|
d6b970e91d2b90476704ff95586b0b6e40892111
|
refs/heads/master
| 2023-07-10T17:01:23.072377
| 2023-06-26T23:55:04
| 2023-06-26T23:55:04
| 33,985,286
| 6
| 5
| null | 2023-06-26T23:55:06
| 2015-04-15T09:33:23
|
R
|
UTF-8
|
R
| false
| true
| 1,687
|
rd
|
wassermann.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wassermann.r
\name{wassermann}
\alias{wassermann}
\title{Partial Mantel tests on costdistance matrices}
\usage{
wassermann(gen.mat, cost.mats, eucl.mat = NULL, plot = TRUE, nperm = 999)
}
\arguments{
\item{gen.mat}{pairwise genetic distance matrix}
\item{cost.mats}{pairwise cost distance matrix}
\item{eucl.mat}{pairwise Eukclidean distance matrix}
\item{plot}{switch for control plots of the partial mantel test}
\item{nperm}{number of permutations for the partial mantel test}
}
\value{
A table with the results of the partial mantel test. Using plot=TRUE
results in diagnostic plots for the partial mantel tests.
}
\description{
This function implements the Causal modelling approach as suggested by
Wassermann et al. 2010 and Cushman et al. 2010. It tests for the effect of
landscape features using a cost distance matrix on the genetic structure of
subpopulation/individuals.
}
\details{
see \code{\link{landgenreport}}
}
\examples{
\donttest{
library(raster)
fric.raster <- readRDS(system.file("extdata","fric.raster.rdata", package="PopGenReport"))
glc <- genleastcost(landgen, fric.raster, "D", NN=8)
wassermann(eucl.mat = glc$eucl.mat, cost.mats = glc$cost.mats, gen.mat = glc$gen.mat)
}
}
\references{
Wassermann, T.N., Cushman, S. A., Schwartz, M. K. and Wallin, D.
O. (2010). Spatial scaling and multi-model inference in landscape genetics:
Martes americana in northern Idaho. Landscape Ecology, 25(10), 1601-1612.
}
\seealso{
\code{\link{popgenreport}}, \code{\link{genleastcost}},
\code{\link{landgenreport}}, \code{\link{lgrMMRR}}
}
\author{
Bernd Gruber (bernd.gruber@canberra.edu.au)
}
|
7680eac76b376e3079ab80f75deac8445a341549
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612968539-test.R
|
7c17e0a123feb90a90d992b1a7837b93a56f393b
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,216
|
r
|
1612968539-test.R
|
testlist <- list(x1 = c(NaN, NaN, 1.390658140702e-309, -5.48143621863725e+303, 7.29057588494813e-304, 0, 7.78776273814619e-308, 1.11897728190755e+87, 2.77478592360575e+180, 2.77448002212291e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001456822e+180, 5.50814852228496e-310, 2.77448001456315e+180, 2.77448001762435e+180, 2.06930173233488e+26, 2.7744800176243e+180, 2.74814147082125e+180, 8.73508392609645e-201, -1.51460052722868e+304, 3.01181841091939e-304, NaN, 4.056010690358e-312, 0, NaN, NaN, -9.64741376890647e+206, 2.77124415362576e+180, NaN, 6.76597883938242e-251, 2.77447923392651e+180, 2.83962624393768e+238, 2.8396262443943e+238, 2.8396262443943e+238 ), x2 = numeric(0), y1 = c(2.85279195360568e+180, 1.20826074561973e-178, 7.29112200597562e-304, 7.2911220195564e-304, 4.11214344899755e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result)
|
aa071e24024125be21a0a3619aea0724079b0adc
|
e5b1416f3d7434fc19fee3a51474069cb2478e29
|
/man/reduce_lr_cb.Rd
|
aceaa26e7ce89e9936f767480948ccc887bcbda5
|
[] |
no_license
|
anilgunduz/deepG
|
16c13a8e0d2d372913506ab626ad31d4af76c428
|
e47c415f04da15e363b46c39027c30255e0b698e
|
refs/heads/master
| 2023-06-26T23:38:27.845094
| 2021-07-30T09:51:12
| 2021-07-30T09:51:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 546
|
rd
|
reduce_lr_cb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callbacks.R
\name{reduce_lr_cb}
\alias{reduce_lr_cb}
\title{learning rate callback}
\usage{
reduce_lr_cb(patience, cooldown, lr.plateau.factor, monitor = "val_acc")
}
\arguments{
\item{patience}{Number of epochs waiting for decrease in loss before reducing learning rate.}
\item{cooldown}{Number of epochs without changing learning rate.}
\item{lr.plateau.factor}{Factor of decreasing learning rate when plateau is reached.}
}
\description{
learning rate callback
}
|
62713d3413d656d16c9565792e66c5b3a177cd80
|
8b8ace74c5c9d618ac294b6183048da0393b8791
|
/R/alcohol.R
|
0bbdee63cd2e648705147d9aa1b14ab98c0192eb
|
[
"MIT"
] |
permissive
|
wli168/cchsflow
|
c3a6d87da0e1e3395f3e9add21d2903647b0f87f
|
ed7eebe4c0b8baf82d44bc46ebc2a16e46e6001d
|
refs/heads/master
| 2023-01-24T04:31:44.496243
| 2020-09-16T19:36:33
| 2020-09-16T19:36:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,998
|
r
|
alcohol.R
|
#' @title Number of drinks consumed in the past week
#'
#' @description \strong{NOTE:} this is not a function.
#'
#' This is a continuous variable derived by Statistics Canada that quantifies
#' the amount of alcohol that is consumed in a week. This is calculated by
#' adding the number of drinks consumed each day in the past week.
#' Respondents of each CCHS cycle are asked how much alcohol they have
#' consumed each day in the past week (ie. how much alcohol did you consume on
#' Sunday, how much did you consume on Monday etc.). Each day is considered an
#' individual variable and ALWDWKY takes the sum of all daily variables.
#'
#' @details This variable is present in every CCHS cycle used in cchsflow, and
#' how it was derived remains consistent.
#'
#' @param ALWDWKY cchsflow variable name for number of drinks consumed in the
#' past week
#'
#' @examples
#' library(cchsflow)
#' ?ALWDWKY
#'
#' @export
ALWDWKY <- function(ALWDWKY) {
# this is for documentation purposes only
}
#' @title Average daily alcohol consumption
#'
#' @description \strong{NOTE:} this is not a function.
#'
#' This is a continuous variable derived by Statistics Canada that quantifies
#' the mean daily consumption of alcohol. This takes the value of ALWDWKY and
#' divides it by 7.
#'
#' @details This variable is present in every CCHS cycle used in cchsflow, and
#' how it was derived remains consistent.
#'
#' @param ALWDDLY cchsflow variable name for average daily alcohol consumption
#'
#' @examples
#' library(cchsflow)
#' ?ALWDDLY
#'
#' @export
ALWDDLY <- function(ALWDDLY) {
# this is for documentation purposes only
}
#' @title Type of drinker
#'
#' @description \strong{NOTE:} this is not a function.
#'
#' This is a categorical variable derived by Statistics Canada that uses
#' various intermediate alcohol variables to categorize individuals into 4
#' distinct groups:
#'
#' \enumerate{
#' \item Regular Drinker
#' \item Occasional Drinker
#' \item Former Drinker
#' \item Never Drinker
#' }
#'
#' @details This variable is used in CCHS cycles from 2001 to 2007. How it was
#' derived remained consistent during these years.
#'
#' Starting in 2007, Statistics Canada created a derived variable that looked
#' at drinking type in the last 12 months. This new derived variable did not
#' distinguish between former and never drinkers. If your research requires you
#' to differentiate between former and never drinkers, we recommend using
#' earlier cycles of the CCHS.
#'
#' @param ALCDTYP cchsflow variable name for type of drinker
#'
#' @examples
#' library(cchsflow)
#' ?ALCDTYP
#'
#' @export
ALCDTYP <- function(ALCDTYP) {
# this is for documentation purposes only
}
#' @title Type of drinker (12 months)
#'
#' @description \strong{NOTE:} this is not a function.
#'
#' This is a categorical variable derived by Statistics Canada that uses
#' various intermediate alcohol variables to categorize individuals into 3
#' distinct groups:
#'
#' \enumerate{
#' \item Regular Drinker
#' \item Occasional Drinker
#' \item No drink in the last 12 months.
#' }
#'
#' @details This variable was introduced in the 2007-2008 cycle of the CCHS, and
#' became the sole derived variable that categorized people into various
#' drinker types from 2009 onwards. Unlike ALCDTYP, this variable does not
#' distinguish between former and never drinkers.
#'
#' @param ALCDTTM cchsflow variable name for type of drinker (12 months)
#'
#' @examples
#' library(cchsflow)
#' ?ALCDTTM
#'
#' @export
ALCDTTM <- function(ALCDTTM) {
# this is for documentation purposes only
}
#' @title Binge drinking
#'
#' @description This function creates a derived categorical variable that
#' flags for binge drinking based on the number drinks consumed on a single
#' day.
#'
#' @details In health research, binge drinking is defined as having an excess
#' amount of alcohol in a single day. For males, this is defined as having five
#' or more drinks; and for females it is four or more drinks. In the CCHS,
#' respondents are asked to count the number of drinks they had during each
#' day of the last week.
#'
#' @param DHH_SEX sex of respondent (1 - male, 2 - female)
#'
#' @param ALW_1 Drinks in the last week (1 - yes, 2 - no)
#'
#' @param ALW_2A1 Number of drinks on Sunday
#'
#' @param ALW_2A2 Number of drinks on Monday
#'
#' @param ALW_2A3 Number of drinks on Tuesday
#'
#' @param ALW_2A4 Number of drinks on Wednesday
#'
#' @param ALW_2A5 Number of drinks on Thursday
#'
#' @param ALW_2A6 Number of drinks on Friday
#'
#' @param ALW_2A7 Number of drinks on Saturday
#'
#' @return Categorical variable (binge_drinker) with two categories:
#'
#' \enumerate{
#' \item 1 - binge drinker
#' \item 2 - non-binge drinker
#' }
#'
#' @examples
#'
#' # Using binge_drinker_fun() to create binge_drinker values across CCHS cycles
#' # binge_drinker_fun() is specified in variable_details.csv along with the
#' # CCHS variables and cycles included.
#'
#' # To transform binge_drinker, use rec_with_table() for each CCHS cycle
#' # and specify binge_drinker, along with the various alcohol and sex
#' # variables. Then by using bind_rows() you can combine binge_drinker
#' # across cycles.
#'
#' library(cchsflow)
#' binge2001 <- rec_with_table(
#' cchs2001_p, c(
#' "ALW_1", "DHH_SEX", "ALW_2A1", "ALW_2A2", "ALW_2A3", "ALW_2A4",
#' "ALW_2A5", "ALW_2A6", "ALW_2A7", "binge_drinker"
#' )
#' )
#'
#' head(binge2001)
#'
#' binge2009_2010 <- rec_with_table(
#' cchs2009_2010_p, c(
#' "ALW_1", "DHH_SEX", "ALW_2A1", "ALW_2A2", "ALW_2A3", "ALW_2A4",
#' "ALW_2A5", "ALW_2A6", "ALW_2A7", "binge_drinker"
#' )
#' )
#'
#' tail(binge2009_2010)
#'
#' combined_binge <- bind_rows(binge2001, binge2009_2010)
#'
#' head(combined_binge)
#'
#' tail(combined_binge)
#'
#' # Using binge_drinker_fun() to generate binge_drinker with user-inputted
#' # values.
#' #
#' # Let's say you are a male, and you had drinks in the last week. Let's say
#' # you had 3 drinks on Sunday, 1 drink on
#' # Monday, 6 drinks on Tuesday, 0 drinks on Wednesday, 3 drinks on Thurday,
#' # 8 drinks on Friday, and 2 drinks on Saturday. Using binge_drinker_fun(),
#' # we can check if you would be classified as a drinker.
#'
#' binge <- binge_drinker_fun(DHH_SEX = 1, ALW_1 = 1, ALW_2A1 = 3, ALW_2A2 = 1,
#' ALW_2A3 = 6, ALW_2A4 = 0, ALW_2A5 = 3,
#' ALW_2A6 = 8, ALW_2A7 = 2)
#'
#' print(binge)
#' @export
binge_drinker_fun <-
function(DHH_SEX, ALW_1, ALW_2A1, ALW_2A2, ALW_2A3, ALW_2A4, ALW_2A5, ALW_2A6,
ALW_2A7) {
# If respondents had alcohol in the last week
if_else2(ALW_1 == 1,
# Males with at least one day with 5 or more drinks
if_else2((DHH_SEX == 1 & (ALW_2A1 >= 5 | ALW_2A2 >= 5 | ALW_2A3 >=5 |
ALW_2A4 >= 5 | ALW_2A5 >= 5 | ALW_2A6 >= 5 |
ALW_2A7 >= 5)), 1,
# Males with no days with 5 or more drinks
if_else2((DHH_SEX == 1 & (ALW_2A1 %in% (0:4) & ALW_2A2 %in% (0:4) &
ALW_2A3 %in% (0:4) & ALW_2A4 %in% (0:4) &
ALW_2A5 %in% (0:4) & ALW_2A6 %in% (0:4) &
ALW_2A7 %in% (0:4))), 2,
# Females with at least one day with 4 or more drinks
if_else2((DHH_SEX == 2 & (ALW_2A1 >= 4 | ALW_2A2 >= 4 | ALW_2A3 >= 4 |
ALW_2A4 >= 4 | ALW_2A5 >= 4 | ALW_2A6 >= 4 |
ALW_2A7 >= 4)), 1,
# Females with no days with 4 or more drinks
if_else2((DHH_SEX == 2 & (ALW_2A1 %in% (0:3) & ALW_2A2 %in% (0:3) &
ALW_2A3 %in% (0:3) & ALW_2A4 %in% (0:3) &
ALW_2A5 %in% (0:3) & ALW_2A6 %in% (0:3) &
ALW_2A7 %in% (0:3))), 2, "NA(b)")))),
# Respondents who didn't indicate they had alcohol in the last week
"NA(a)")
}
|
1bc8ddba2ddf6ffccb8d6dda4083ed2f0a61b27d
|
36ca7c3ca01b9dddd352b06b55fe1785c1cd318c
|
/R/ciPhase.R
|
e98cba2193663bfb9d10a10bb4a247a34b3b8770
|
[] |
no_license
|
cran/season
|
551ddcbda4a8dc417e0b8b6c021fccaec81685ea
|
9e054890fe969261939be9b26eeea9f5650f1a55
|
refs/heads/master
| 2022-05-16T12:07:10.945488
| 2022-03-21T07:30:11
| 2022-03-21T07:30:11
| 17,699,528
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,345
|
r
|
ciPhase.R
|
# ciPhase.R
# Confidence interval for circular phase
#' Mean and Confidence Interval for Circular Phase
#'
#' Calculates the mean and confidence interval for the phase based on a chain
#' of MCMC samples.
#'
#' The estimates of the phase are rotated to have a centre of \eqn{\pi}, the
#' point on the circumference of a unit radius circle that is furthest from
#' zero. The mean and confidence interval are calculated on the rotated values,
#' then the estimates are rotated back.
#'
#' @param theta chain of Markov chain Monte Carlo (MCMC) samples of the phase.
#' @param alpha the confidence level (default = 0.05 for a 95\% confidence
#' interval).
#' @return \item{mean}{the estimated mean phase.} \item{lower}{the estimated
#' lower limit of the confidence interval.} \item{upper}{the estimated upper
#' limit of the confidence interval.}
#' @author Adrian Barnett \email{a.barnett@qut.edu.au}
#' @references Fisher, N. (1993) \emph{Statistical Analysis of Circular Data}.
#' Cambridge University Press. Page 36.
#'
#' Barnett, A.G., Dobson, A.J. (2010) \emph{Analysing Seasonal Health Data}.
#' Springer.
#' @examples
#' \donttest{
#' theta = rnorm(n=2000, mean=0, sd=pi/50) # 2000 normal samples, centred on zero
#' hist(theta, breaks=seq(-pi/8, pi/8, pi/30))
#' ciPhase(theta)
#' }
#'
#' @export ciPhase
ciPhase<-function(theta,alpha=0.05){
thetac<-seq(0,2*pi,pi/100) # proposed centres
m<-length(thetac)
d.theta<-vector(length=m,mode='numeric')
for (i in 1:m){
d.theta[i]<-pi-mean(abs(pi-abs(theta-thetac[i]))) # Fisher page 36
}
centre<-thetac[d.theta==min(d.theta)]
if(length(centre)>1){centre<-centre[1]}
# plot(thetac,d.theta)
# rotate data to be centred on pi
# only rotate if centre is in top-half of the circle
ideal<-theta
diff<-0
if (centre<pi/2|centre>3*pi/2){
diff<-pi-centre
diffneg<- (-2*pi)+diff
ideal<-theta+diff*(theta<pi)+diffneg*(theta>pi)
}
toret<-list()
toret$mean<-mean(ideal)-diff
toret$lower<-as.numeric(quantile(ideal,prob=alpha/2)-diff)
toret$upper<-as.numeric(quantile(ideal,prob=1-(alpha/2))-diff)
# cat('Mean',mean,'Lower',lower,'Upper',upper,'\n')
return(toret)
}
# example
# theta<-rnorm(n=2000,mean=0,sd=pi/50) # Normal, centred on zero
# cis<-ciPhase(theta)
# hist(theta,breaks=seq(-pi/8,pi/8,pi/30))
|
075323e40cad300deb195b82147a8815bf56fb7d
|
c18e8580e8823727df2da5a1e7f1e23ac80f64f3
|
/codes/howard.R
|
bf45657ca159e74ea7f0d3dfc0f17ccb1244504c
|
[] |
no_license
|
sophielee1/NLP_byline_filter
|
4a4d30fd80ed8238706f582850b85c6eb18eed4a
|
400f04a7562e79ae7dd1fb7cd5303da65ea075c3
|
refs/heads/master
| 2021-01-17T23:22:02.797657
| 2016-08-12T23:13:32
| 2016-08-12T23:13:32
| 42,749,046
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58
|
r
|
howard.R
|
### howard's code here #
##
## end of howard's code
|
9d240f0e0b42d4894450d53798a1cfc04d541d21
|
16cc0f4bb42a2081e38f358cc202dcd8e9f30faf
|
/mapas_R.R
|
b63d179ccff01d95e590dfa0e671e971b2d1182d
|
[] |
no_license
|
Prof-Rodrigo-Silva/ScriptR
|
d71d76998a391656ffde4ca04111334df6775346
|
67bc2e7ba7bad07597849d1ecaf342632a913777
|
refs/heads/master
| 2022-07-31T15:15:07.568838
| 2022-07-15T17:52:07
| 2022-07-15T17:52:07
| 181,120,450
| 13
| 16
| null | null | null | null |
UTF-8
|
R
| false
| false
| 804
|
r
|
mapas_R.R
|
install.packages("devtools")
install.packages("htmltools")
install.packages("httpuv")
install.packages("mime", dependencies=T)
install.packages("xtable", dependencies=T)
install.packages("magrittr", dependencies=T)
install.packages("crosstalk", dependencies=T)
install.packages("jsonlite", dependencies=T)
install.packages("yaml", dependencies=T)
install.packages("shiny", dependencies=T) # explorar mais
install.packages("leaflet", dependencies=T)
install.packages("Rcpp", dependencies=T)
install.packages("ggplot2",dependencies = T)
install.packages("ggmap",dependencies = T)
install.packages("rgdal",dependencies = T)
library(rgdal)
library(ggplot2)
library(maps)
library(ggmap)
library(magrittr)
library(leaflet)
library(jsonlite)
shape =
register_google(key = "")
|
cbdb1e1579c72471199499c8695a1173a1f1b2fa
|
8529a7550069ce84924044cc345d7f8a66b55579
|
/man/secure-app.Rd
|
e56e6c6d6e4b785a874dc1e796e117da2b0262c5
|
[] |
no_license
|
pvictor/shinymanager
|
f4a58d7bc11e40a33ebdb98dd54a0f0e59dc307a
|
501e975b0d190757d96a7c855c94728464737313
|
refs/heads/master
| 2020-05-13T18:24:04.457886
| 2019-04-16T08:14:07
| 2019-04-16T08:14:07
| 181,648,867
| 3
| 0
| null | 2019-04-16T08:35:50
| 2019-04-16T08:35:49
| null |
UTF-8
|
R
| false
| true
| 1,709
|
rd
|
secure-app.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/secure-app.R
\name{secure-app}
\alias{secure-app}
\alias{secure_app}
\alias{secure_server}
\title{Secure a Shiny application and manage authentication}
\usage{
secure_app(ui, ..., enable_admin = FALSE, head_auth = NULL)
secure_server(check_credentials,
session = shiny::getDefaultReactiveDomain())
}
\arguments{
\item{ui}{UI of the application.}
\item{...}{Arguments passed to \code{\link{auth_ui}}.}
\item{enable_admin}{Enable or not access to admin mode, note that
admin mode is only available when using SQLite backend for credentials.}
\item{head_auth}{Tag or list of tags to use in the \code{<head>}
of the authentication page (for custom CSS for example).}
\item{check_credentials}{Function passed to \code{\link{auth_server}}.}
\item{session}{Shiny session.}
}
\description{
Secure a Shiny application and manage authentication
}
\examples{
if (interactive()) {
# define some credentials
credentials <- data.frame(
user = c("shiny", "shinymanager"),
password = c("azerty", "12345"),
stringsAsFactors = FALSE
)
library(shiny)
library(shinymanager)
ui <- fluidPage(
tags$h2("My secure application"),
verbatimTextOutput("auth_output")
)
# Wrap your UI with secure_app
ui <- secure_app(ui)
server <- function(input, output, session) {
# call the server part
# check_credentials returns a function to authenticate users
res_auth <- secure_server(
check_credentials = check_credentials(credentials)
)
output$auth_output <- renderPrint({
reactiveValuesToList(res_auth)
})
# your classic server logic
}
shinyApp(ui, server)
}
}
|
9a1d5c31ce790f007a9cc2bd44be1f5264c7f961
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/E_Penman/libFuzzer_E_Penman/E_Penman_valgrind_files/1612738732-test.R
|
0baa49cc6da18d8bacf6258e508d5c1c69c17366
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 821
|
r
|
1612738732-test.R
|
testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(7.06327792665088e-304, NaN, 5.43230922486616e-312, -6.90308500254685e+305, 9.07075240201063e-97, 9.70418706716128e-101, 1.67818560720548e-307, 9.70418706716122e-101, 9.70418706716128e-101, 9.70418706720286e-101, 9.70418706716128e-101, NaN, NaN, 1.33240877681e-105, 2.61035155593226e+180, 2.14838935209055e+174, 9.54424427230557e-307, 2.07507571253324e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result)
|
369424a8ab89cee5c0c7871306c833ac38581ebb
|
ff3564485f1d4f65140800fdf474924e5039e4cc
|
/cleanup.R
|
5f1a06a23ce8654c5a3021b5c71faa77eafd8783
|
[] |
no_license
|
nkannan97/-Soccer-Optimization
|
3c6e0a701ac72f5fe3ec7a8a3ed9559f6bc376c8
|
2348b18eec9d71bfb08e5f90e50399fdd92eff6c
|
refs/heads/master
| 2020-04-11T01:38:58.732942
| 2018-12-12T02:31:19
| 2018-12-12T02:31:19
| 161,421,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,885
|
r
|
cleanup.R
|
# read in the dataset
t <- read.csv('/home/adithya/Desktop/SPRING2017/CS524/Project/data/CompleteDataset.csv', stringsAsFactors = FALSE)
# get columns related to player attributes
stats <- t[, which(colnames(t) == "Acceleration"):ncol(t)]
stats <- stats[, -which(colnames(stats) == "Preferred.Positions")]
# replace missing data with 0 (most likely goal keepers without forward playing statistics)
stats[is.na(stats)] <- 0
# look for positions with +/- in their attributes and add/subtract those number
stats2 <- apply(stats, 1, function(x) {
temp = unlist(x)
pos = grep("+", temp, fixed = T)
if(any(pos)) {
d = strsplit(temp[pos], "+", fixed = T)
for(i in 1:length(d)) {
temp[pos[i]] <- sum(as.integer(d[[i]]))
}
}
pos = grep("-", temp, fixed = T)
if(any(pos)) {
d = strsplit(temp[pos], "-", fixed = T)
for(i in 1:length(d)) {
temp[pos[i]] <- as.integer(d[[i]][1]) - as.integer(d[[i]][2])
}
}
x <- temp
})
# transpose and convert back to dataframe
stats2 <- t(stats2)
stats2 <- as.data.frame(stats2)
# attach back to t dataframe
t[, colnames(stats2)] <- stats2
t <- t[-which(t[,"X"] == "13771"),] # remove T. Wilson because he doesn't have a card on FUT
# change players without a club to "No Club/Non FIFA"
t[which(t[,"Club"] == ""),"Club"] <- "No Club/Non FIFA"
t <- t[,-1]
# change the values into numbers
values <- t[,"Value"]
values <- sapply(values, function(x) {
substring(x, 2, nchar(x))
})
values <- unname(values)
for(i in 1:length(values)) {
if(substring(values[i], nchar(values[i])) == "M") {
println(as.double(substring(values[i], 1, nchar(values[i])-1)))
} else if (substring(values[i], nchar(values[i])) == "K") {
}
}
values <- unlist(values)
# write to file
write.csv(t, file = '../data/modifiedCompleteDataset.csv')
|
35fd3b05c2baebc43cc600effe03bddff2369fa4
|
4d74d9b07cf820c5456cdf0af9375000d2b06422
|
/src/plot_data.R
|
cc7507ae09ea38341242403ac8b5055d48bbe724
|
[] |
no_license
|
TinaQian2017/Reproduction_of_Projects
|
ac9146eab4470af9b3f80e576fd832ebb69a825e
|
a2f73946d0ec21bb893a5ad49223a87fb3e76a90
|
refs/heads/master
| 2021-04-27T04:38:03.233555
| 2018-02-23T06:35:24
| 2018-02-23T06:35:24
| 122,582,551
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 543
|
r
|
plot_data.R
|
# DSCI 522 milestone2
# Author: Yue (Tina) Qian
# Date: 2017-12-13
# Function: the script generates a plot of data points for better understanding of the data
# Read data
data<-read.csv("results/simulated_data/summarized_data.csv",header = T)
# visualize the data and save the plot
library(ggplot2)
g<-ggplot(data = data, aes(x=species,y=sepal_length))+
geom_boxplot()+
labs(x="Species",y="Sepal Length",title="Influence of Species on the Sepal Length")+
geom_jitter()
ggsave(filename = "results/final_results/plot_data.png",plot = g)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.