blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
53ddb8263f52c24361192d512f3e66e75ec76a3a
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/FRK/man/show_basis.Rd
|
6eedf96e2329e1a83990c282878d1276818f88af
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,820
|
rd
|
show_basis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGeneric.R, R/plottingfns.R
\docType{methods}
\name{show_basis}
\alias{show_basis}
\alias{show_basis,Basis-method}
\alias{show_basis,TensorP_Basis-method}
\title{Show basis functions}
\usage{
show_basis(basis, ...)
\S4method{show_basis}{Basis}(basis, g = ggplot() + theme_bw() + xlab("") +
ylab(""))
\S4method{show_basis}{TensorP_Basis}(basis, g = ggplot())
}
\arguments{
\item{basis}{object of class \code{Basis}}
\item{...}{not in use}
\item{g}{object of class \code{gg} (a \code{ggplot} object) over which to overlay the basis functions (optional)}
}
\description{
Generic plotting function for visualising the basis functions.
}
\details{
The function \code{show_basis} adapts its behaviour to the manifold being used. With \code{real_line}, the 1D basis functions are plotted with colour distinguishing between the different resolutions. With \code{plane}, only local basis functions are supported (at present). Each basis function is shown as a circle with diameter equal to the \code{scale} parameter of the function. Linetype distinguishes the resolution. With \code{sphere}, the centres of the basis functions are shown as circles, with larger sizes corresponding to coarser resolutions. Space-time basis functions of subclass \code{TensorP_Basis} are visualised by showing the spatial basis functions and the temporal basis functions in two separate plots.
}
\examples{
library(ggplot2)
library(sp)
data(meuse)
coordinates(meuse) = ~x+y # change into an sp object
G <- auto_basis(manifold = plane(),data=meuse,nres = 2,regular=2,prune=0.1,type = "bisquare")
\dontrun{show_basis(G,ggplot()) + geom_point(data=data.frame(meuse),aes(x,y))}
}
\seealso{
\code{\link{auto_basis}} for automatically constructing basis functions.
}
|
da1902f21d4cd1816958eed718aba4279dd96c31
|
ede21985ba2bab4ad9acecbc0683e45d2047bfc5
|
/estimates_BC_All.R
|
a2e58c19e9d370386152e6a38a558cb78236ed57
|
[] |
no_license
|
rgangnon/NonBreastCancerMortality_by_Race
|
577cf2837d11e49ace0667b1799e28dfd4f32cfb
|
41317b8a0d8693b0a416b325f8512950a4136fad
|
refs/heads/master
| 2022-11-28T17:11:58.194399
| 2020-07-20T15:08:57
| 2020-07-20T15:08:57
| 281,140,486
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,552
|
r
|
estimates_BC_All.R
|
library(tidyverse)
library(mgcv)
BC_All_Estimates <- tibble(age=rep(0:99,length(1968:2018)),
period=rep(1968:2018,rep(length(0:99),length(1968:2018)))) %>%
mutate(cohort=period-age)
BC_All_Estimates <- predict_APC(BC_All_APC_AllRaces,BC_All_Estimates) %>%
rename(lp_all=lp,se_lp_all=se_lp)
BC_All_Estimates <- predict_APC(BC_All_APC_White,BC_All_Estimates) %>%
rename(lp_white=lp,se_lp_white=se_lp)
BC_All_Estimates <- predict_APC(BC_All_APC_Black,BC_All_Estimates) %>%
rename(lp_black=lp,se_lp_black=se_lp)
BC_All_Estimates <- predict_APC(BC_All_APC_Asian,BC_All_Estimates) %>%
rename(lp_asian=lp,se_lp_asian=se_lp)
BC_All_Estimates <- predict_APC(BC_All_APC_Hispanic,BC_All_Estimates) %>%
rename(lp_hisp=lp,se_lp_hisp=se_lp)
BC_All_Estimates <- predict_APC(BC_All_APC_NHWhite,BC_All_Estimates) %>%
rename(lp_nhwhite=lp,se_lp_nhwhite=se_lp)
BC_All_Estimates <- predict_APC(BC_All_APC_NHBlack,BC_All_Estimates) %>%
rename(lp_nhblack=lp,se_lp_nhblack=se_lp)
BC_All_Estimates_AllRaces <- BC_All_Estimates %>%
rename(Age=age,Period=period) %>%
mutate(Race="All Races",Logit_BC_All=lp_all,SE_Logit_BC_All=se_lp_all) %>%
select(Age,Period,Race,Logit_BC_All,SE_Logit_BC_All)
BC_All_Estimates_White <- BC_All_Estimates %>%
rename(Age=age,Period=period) %>%
mutate(Race="White",Logit_BC_All=lp_all+lp_white,SE_Logit_BC_All=sqrt(se_lp_all^2+se_lp_white^2)) %>%
select(Age,Period,Race,Logit_BC_All,SE_Logit_BC_All)
BC_All_Estimates_Black <- BC_All_Estimates %>%
rename(Age=age,Period=period) %>%
mutate(Race="Black",Logit_BC_All=lp_all+lp_black,SE_Logit_BC_All=sqrt(se_lp_all^2+se_lp_black^2)) %>%
select(Age,Period,Race,Logit_BC_All,SE_Logit_BC_All)
BC_All_Estimates_Asian <- BC_All_Estimates %>%
rename(Age=age,Period=period) %>%
mutate(Race="Asian",Logit_BC_All=lp_all+lp_asian,SE_Logit_BC_All=sqrt(se_lp_all^2+se_lp_asian^2)) %>%
select(Age,Period,Race,Logit_BC_All,SE_Logit_BC_All)
BC_All_Estimates_Hispanic <- BC_All_Estimates %>%
rename(Age=age,Period=period) %>%
mutate(Race="Hispanic",Logit_BC_All=lp_all+lp_hisp,SE_Logit_BC_All=sqrt(se_lp_all^2+se_lp_hisp^2)) %>%
select(Age,Period,Race,Logit_BC_All,SE_Logit_BC_All)
BC_All_Estimates_NHWhite <- BC_All_Estimates %>%
rename(Age=age,Period=period) %>%
mutate(Race="Non-Hispanic White",Logit_BC_All=lp_all+lp_white+lp_nhwhite,
SE_Logit_BC_All=sqrt(se_lp_all^2+se_lp_white^2+se_lp_nhwhite^2)) %>%
select(Age,Period,Race,Logit_BC_All,SE_Logit_BC_All)
BC_All_Estimates_NHBlack <- BC_All_Estimates %>%
rename(Age=age,Period=period) %>%
mutate(Race="Non-Hispanic Black",Logit_BC_All=lp_all+lp_black+lp_nhblack,
SE_Logit_BC_All=sqrt(se_lp_all^2+se_lp_black^2+se_lp_nhblack^2)) %>%
select(Age,Period,Race,Logit_BC_All,SE_Logit_BC_All)
BC_All_Estimates <- rbind(BC_All_Estimates_AllRaces,
BC_All_Estimates_Asian,
BC_All_Estimates_Black,
BC_All_Estimates_Hispanic,
BC_All_Estimates_NHBlack,
BC_All_Estimates_NHWhite,
BC_All_Estimates_White) %>%
mutate(BC_All=alogit(Logit_BC_All),
BC_All_LCL=alogit(Logit_BC_All-1.96*SE_Logit_BC_All),
BC_All_UCL=alogit(Logit_BC_All+1.96*SE_Logit_BC_All))
rm(BC_All_Estimates_AllRaces,
BC_All_Estimates_Asian,
BC_All_Estimates_Black,
BC_All_Estimates_Hispanic,
BC_All_Estimates_NHBlack,
BC_All_Estimates_NHWhite,
BC_All_Estimates_White)
|
eab4230d4bc81a48ae78f6daa4c4ee772f5d2544
|
a024aa0784f79403bcb4309de77ba7cd2019e2a0
|
/cachematrix.R
|
19bd11539eef8e32e819d2f705e444479540e2cb
|
[] |
no_license
|
jorgerojasrivas/ProgrammingAssignment2
|
b673fc26a553dd3ea7b88979f4964d4534fe2304
|
ae5e75c15f90b7daa13b0f0b2f4e41594421f670
|
refs/heads/master
| 2020-04-03T02:48:24.044844
| 2015-09-23T23:44:13
| 2015-09-23T23:44:13
| 42,889,154
| 0
| 0
| null | 2015-09-21T19:33:40
| 2015-09-21T19:33:40
| null |
UTF-8
|
R
| false
| false
| 1,451
|
r
|
cachematrix.R
|
## makeCacheMatrix is a function that creates a "matrix" object that caches
## its inverse.
## cacheSolve does the actual computation of the inverse of the object returned
## by makeCacheMatrix. cacheSolve uses the solve(X)function in R.
## If the inverse has already been calculated, and the matrix has not changed,
## then cacheSolve gets the inverse from cache memory.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
set_inverse <- function(inverse) m <<- inverse ## scoping rules in R
get_inverse <- function () m
list(set = set, get = get, set_inverse = set_inverse, get_inverse = get_inverse)
}
## cacheSolve checks to see if there is an inverse matrix already calculated.
## If so, gets the inverse from cache memory and avoids a time-consuming calculation
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$get_inverse()
## checks to see if the inverse has been already calculated
if(!is.null(m)){
message("getting inverse from cache data")
return(m)
}
## gets the value of the matrix whose inverse is needed
data <- x$get()
## uses solve() to calculate the inverse
m <- solve(data)
## delivers the inverse
x$set_inverse(m)
m
## to test the function type in cacheSolve(makeCacheMatrix(matrix(c(1,2,3,4), nrow = 2, ncol = 2)))
}
|
ab614b976fc6d0f392bda0e25154596fd0d93d32
|
935753e266e577fc241529a2f1cef200c6fb0e5d
|
/analysis/groceries/food_nutrients_dplyrXdf.R
|
6b3b04cdf0a5bb2ffbde9006dd4e1b8435219e71
|
[
"Apache-2.0"
] |
permissive
|
rmhorton/sim_diabetes_data
|
a35f5e856aa3464a970e0872051cde6f5a8f8485
|
ac0d3d2a01e7a27bf0407d5ccd259f645e098b08
|
refs/heads/master
| 2021-01-09T20:26:36.241025
| 2016-12-17T00:15:18
| 2016-12-17T00:15:18
| 61,665,128
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,017
|
r
|
food_nutrients_dplyrXdf.R
|
# dplyrXdf version by Seth Mottaghinejad and Ali-Kazim Zaidi, Microsoft
# Ali's note:
# Unfortunately, dplyrXdf isn’t as smart as dplyr in regrouping, especially when the xdf is piped
# off to a mutate operation, which typically doesn’t want a grouped object. Adding an ungroup()
# verb to the pipeline seems to fix the issues.
# devtools::install_github("Hong-Revo/dplyrXdf")
library(dplyrXdf)
food_nutrient_table_long <- read.csv("food_nutrients.csv", header=FALSE,
col.names=c("food", "nutrient", "quantity"), stringsAsFactors=FALSE)
grocery_table <- read.csv("grocery_sample.csv", header=FALSE,
col.names=c("diet_id", "item", "quantity", "units"), stringsAsFactors=FALSE)
grocery_table_xdf <- RxXdfData("grocery.xdf")
rxDataStep(grocery_table, grocery_table_xdf, overwrite = TRUE)
food_nutrient_table_long_xdf <- RxXdfData("foodnutrient.xdf")
rxDataStep(food_nutrient_table_long, food_nutrient_table_long_xdf, overwrite = TRUE)
grocery_table_xdf %>%
rename(food = item, quantity_bought = quantity) %>%
group_by(diet_id, nutrient) %>%
inner_join(food_nutrient_table_long_xdf, by = 'food') %>%
ungroup() %>%
mutate(total_quantity = quantity * quantity_bought/100) %>%
group_by(diet_id, nutrient) %>%
summarize(quantity = sum(total_quantity)) %>%
ungroup() %>%
mutate(is_carb = nutrient == 'carbs', is_energy = nutrient == 'energy',
carb_calories = is_carb * 4.1 * quantity, calories = is_energy * quantity) %>%
group_by(diet_id) %>%
summarize(carb_calories = sum(carb_calories), calories = sum(calories)) %>%
ungroup() %>%
mutate(pct_calories_carbs = 100*carb_calories/calories) %>%
select(diet_id, pct_calories_carbs) -> grocery_xdf
grocery_xdf_check <- as.data.frame(grocery_xdf, stringsAsFactors=FALSE)
## compare to result from dplyr:
# grocery_check <- grocery_df %>% inner_join(grocery_xdf_check, by = "diet_id")
# all.equal(grocery_df$pct_calories_carbs, grocery_xdf_check$pct_calories_carbs) # TRUE
|
1601d3ec1a97aee6b0de0c322c17b37c5cc70f3f
|
0be9cdf6fa55b333c0b85cec0f11a59b21da3662
|
/man/LRH01.Rd
|
603e2788b492ad70745b090777c6a6beb6c8f91a
|
[] |
no_license
|
cran/caribou
|
869fc7e68a2d526227309aa5b07cacce55f8505d
|
52178b1b67681b9b91a156a78da27fb06f7cd954
|
refs/heads/master
| 2022-05-04T03:48:42.848347
| 2022-04-13T20:32:46
| 2022-04-13T20:32:46
| 17,694,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,382
|
rd
|
LRH01.Rd
|
\name{LRH01}
\alias{LRH01}
\docType{data}
\title{
Leaf River Herd 2001 Data
}
\description{
Population size of the Leaf River (Riviere aux Feuilles) herd (Quebec)
from a postcalving survey in July 2001.
}
\usage{data(LRH01)}
\format{
17 by 2 numeric matrix, with the following columns:
\describe{
\item{\code{xi}}{number of radio-collared animals in the detected (photographed) groups}
\item{\code{gni}}{size of the detected groups}
}
}
\details{
During this survey, 120 collars were active.\cr
23 collars (19\%) were photographed among the 17 detected groups.
}
\note{
The small sample size of this census was caused by technical and weather
related problems in July 2001. This provided an opportunity
to see the behaviour of the different models under low sampling regime.
}
\source{
Couturier, S., Jean, D., Otto, R. and Rivard, S. (2004). \emph{Demography of the
migratory tundra caribou (Rangifer tarandus) of the Nord-du-Quebec
region and Labrador}. Min. Ressources naturelles, Faune et Parcs, Quebec. 68 p.
ISBN: 2-550-43725-X
}
\examples{
petersen(LRH01, M=120)
abundance(LRH01, n=120, model="H")
abundance(LRH01, n=120, model="I")
abundance(LRH01, n=120, model="T", B=2)
# The threshold model with B >= 3 is equivalent
# to the homogeneity model for this data set
# because max(LRH01$xi)=2
}
\keyword{datasets}
|
0e7b8bd83d47e92e755500cb8df460eed76c06a0
|
152c728d929462e15cb6be8ff052977a43051647
|
/plot1.R
|
52547be08dd2f04b69e8e2d7e713c56458e1cd13
|
[] |
no_license
|
Bibus/ExData_Plotting1
|
7159a77ce35bb7e2d8d8b8ab6c2e6d86e5c461bd
|
bd4fe2ee167de8dab7db02de3957d559a829d87a
|
refs/heads/master
| 2020-04-06T04:35:19.430987
| 2015-07-11T11:44:35
| 2015-07-11T11:44:35
| 38,903,248
| 0
| 0
| null | 2015-07-10T21:40:05
| 2015-07-10T21:40:05
| null |
UTF-8
|
R
| false
| false
| 556
|
r
|
plot1.R
|
# read data frame with all data
dataAll <- read.csv2("household_power_consumption.txt", na.strings = "?", stringsAsFactors=FALSE)
# converts date to the right structure and make subset in data frame data
dataAll$Date <- as.Date(dataAll$Date, format = "%d/%m/%Y")
data <- subset(dataAll, Date == "2007-02-02" | Date == "2007-02-01")
#make histogram of Global_active_power and save as plot1.png
png(file = "plot1.png")
hist(as.numeric(data$Global_active_power), col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off()
|
6a902ee37e8ff381681ed02f021d599e1145d223
|
2b252b44d3177ab9e10a2b34f4551198db67eeb5
|
/Week 2 Assignment R Coursera.R
|
53e3171a60d272e27de61f73b4fe527b114641cf
|
[] |
no_license
|
dhrubasattwata/R-Programming
|
3c661a95a0d7c870993811483c063e2d7db0c128
|
76a72c46b1fec33795fe5903c3af14a48e7ddd75
|
refs/heads/master
| 2021-01-19T18:41:51.823540
| 2017-08-24T11:32:34
| 2017-08-24T11:32:34
| 101,155,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,187
|
r
|
Week 2 Assignment R Coursera.R
|
#specdata folder containing 332 csv files in your wd
#Poluution_Mean
pollutantmean <- function(directory, pollutant, id = 1:332) {
files_full <- list.files(directory, full.names = TRUE)
dat <- data.frame()
for (i in id) {
dat <- rbind(dat, read.csv(files_full[i]))
}
mean(dat[, pollutant], na.rm = TRUE)
}
pollutantmean("specdata", "sulfate", 1:10)
pollutantmean("specdata", "nitrate", 70:72)
pollutantmean("specdata", "sulfate", 34)
pollutantmean("specdata", "nitrate")
#Complete.R
complete <- function(directory, id = 1:332) {
files_full <- list.files(directory, full.names = TRUE)
dat <- data.frame()
for (i in id) {
moni_i <- read.csv(files_full[i])
nobs <- sum(complete.cases(moni_i))
tmp <- data.frame(i, nobs)
dat <- rbind(dat, tmp)
}
colnames(dat) <- c("id", "nobs")
dat
}
cc <- complete("specdata", c(6, 10, 20, 34, 100, 200, 310))
print(cc$nobs)
cc <- complete("specdata", 54)
print(cc$nobs)
set.seed(42)
cc <- complete("specdata", 332:1)
use <- sample(332, 10)
print(cc[use, "nobs"])
#Corr.r
corr <- function(directory, threshold = 0) {
files_full <- list.files(directory, full.names = TRUE)
dat <- vector(mode = "numeric", length = 0)
for (i in 1:length(files_full)) {
moni_i <- read.csv(files_full[i])
csum <- sum((!is.na(moni_i$sulfate)) & (!is.na(moni_i$nitrate)))
if (csum > threshold) {
tmp <- moni_i[which(!is.na(moni_i$sulfate)), ]
submoni_i <- tmp[which(!is.na(tmp$nitrate)), ]
dat <- c(dat, cor(submoni_i$sulfate, submoni_i$nitrate))
}
}
dat
}
cr <- corr("specdata")
cr <- sort(cr)
set.seed(868)
out <- round(cr[sample(length(cr), 5)], 4)
print(out)
cr <- corr("specdata", 129)
cr <- sort(cr)
n <- length(cr)
set.seed(197)
out <- c(n, round(cr[sample(n, 5)], 4))
print(out)
cr <- corr("specdata", 2000)
n <- length(cr)
cr <- corr("specdata", 1000)
cr <- sort(cr)
print(c(n, round(cr, 4)))
|
8616a5bf9bcb2d89a5a578c7f8dd5d063680d64f
|
5017bd957040d7345071f46888f7a058e00daa49
|
/tree_tools/make_busco_completeness_figure.R
|
84adcd2409917ac6b8e3914f0bd55bf01c4b9ecf
|
[] |
no_license
|
kirstengott/scripts
|
e2bbcc4dc1b3f7d851cf2c0f9d7358a826e86e2d
|
5638d82928548d003978c9a1c290ca4d5d2657f3
|
refs/heads/master
| 2023-08-08T13:37:39.912095
| 2023-07-26T05:35:50
| 2023-07-26T05:35:50
| 61,399,217
| 0
| 1
| null | 2016-06-21T18:31:04
| 2016-06-17T20:02:23
|
R
|
UTF-8
|
R
| false
| false
| 2,164
|
r
|
make_busco_completeness_figure.R
|
#!/usr/bin/Rscript
args <- commandArgs(TRUE)
if (length(args) == 0L || any(c('-h', '--help') %in% args)) {
message('usage: path/to/busco_plot.R busco_db_path busco_dir
busco_db_path path to the busco lineage database
busco_dir path to directory holding busco results to collate in the plot
-h, --help to print help messages')
q('no')
}
library(tidyverse)
lineage_data <- list.files(args[1], recursive = TRUE, pattern = 'orthogroup_info', full.names = TRUE)
busco_annot <- read_tsv(lineage_data)
data <- list.files(args[2], pattern = 'full', recursive = TRUE, full.names = TRUE) %>%
purrr::map(., .f = function(x){
df <- read_delim(x,
comment = "#",
delim = "\t",
col_names = c('Busco', "Status", "Sequence", "Score", "Length"))
df$name <- basename(x) %>% sub("run_", "", .) %>%
sub('full_table_', '', .) %>%
sub('.all.maker.proteins.fasta.tsv', '', .) %>%
sub('_genomic.faa.tsv', '', .)
df
}) %>%
bind_rows() %>%
mutate(name = sub(".tsv", "", name))
data_summary <- data %>%
group_by(name) %>%
mutate(Total_Busco = length(Status)) %>%
add_count(Status) %>%
select(Status, Total_Busco, n) %>%
distinct() %>%
mutate(Percent = (n/Total_Busco)*100)
poor_quality <- data_summary %>% filter(Status == 'Complete', Percent < 80) %>% .$name
message(paste('Warning, remove these genomes as they have less than 80% completeness!',
paste(poor_quality, collapse = "\n"), sep = '\n'))
ggplot(data_summary, aes(x = name, y = Percent, fill = Status)) +
geom_bar(stat = 'identity', position = 'dodge') +
scale_y_continuous(breaks = seq(0, 100, by = 10), labels = seq(0, 100, by = 10),
expand = c(0,0)) +
theme_linedraw(base_size = 11) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ggsave(filename = 'busco_genome_completeness.pdf', device = 'pdf', width = 10, height = 20, units = 'in')
data_out <- data_summary %>% select(name, Status, Percent) %>%
spread(key = Status, value = Percent)
write_csv(data_out, path = "busco_genome_completeness.csv")
|
eac330d4a8b9bcc644d01714f34fdc28f9587b69
|
244474826a849c395337f02e7b13db8eae78b148
|
/jjBarret.R
|
ffafe2f9e8db6116ad460bf65bdf8b057ff5b016
|
[] |
no_license
|
IvoVillanueva/jjBarret
|
ff08569d1610378dbf47e52e6c7548c29cee8f03
|
088a85e2a90ebbe52935d7173faf1745f98977aa
|
refs/heads/main
| 2023-04-03T19:11:13.747411
| 2021-04-10T17:50:52
| 2021-04-10T17:50:52
| 356,653,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,927
|
r
|
jjBarret.R
|
library(tidyverse)
library(rvest)
library(janitor)
library(stringr)
library(extrafont)
library(cowplot)
library(ggtext)
library(ggimage)
theme_ivo <- function () {
theme_minimal(base_size=8, base_family="Chivo") %+replace%
theme(
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = '#f4f4f4', color = "#f4f4f4"),
plot.caption = element_markdown(size = 6.5)
)
}
# sacar datos de basketaball-reference
df <- read_html("https://www.basketball-reference.com/leagues/NBA_2021_per_poss.html") %>%
html_node("table") %>%
html_table %>%
clean_names() %>% # Janitor library que transforma los nombres a unos mas accesibles
filter(player != "Player" )
# Lo guardamos en csv
write.csv(df, "df_jjbarret.csv", row.names = FALSE)
jjbarret <- read.csv("df_jjbarret.csv", stringsAsFactors = FALSE)
# calculamos los datos ----------------------------------------------------
jjbarret <- jjbarret %>%
mutate(tm=ifelse(player=="R.J. Hampton"& tm=="DEN","NON",tm),
tm=ifelse(player=="R.J. Hampton"& tm=="ORL","NON",tm),
tm=ifelse(player=="R.J. Hampton"& tm=="TOT","ORL",tm)
) %>%
filter(age <= 20 & tm != "NON" & g > 12) %>%
select(player, Pts = pts, Ast=ast, Stl = stl, Blk = blk,Tov = tov,Trb = trb) %>%
arrange(desc(Pts)) %>% slice(1:20) %>%
pivot_longer(c(Pts, Trb, Ast,Tov, Stl, Blk ), names_to = "stats", values_to = "n")
# Creamos la tabla --------------------------------------------------------
jjbarret <- jjbarret %>% group_by(player, stats) %>%
ggplot(aes(x = fct_inorder(stats), y = n, fill = stats)) +
geom_col() +
facet_wrap(~fct_inorder(player), scales = 'free_x', strip.position = 'bottom') +
theme_ivo()+
fishualize::scale_fill_fish(discrete = TRUE, option = "Trimma_lantana", alpha = .96)+
geom_hline(yintercept = seq(1, 40, 1), color = "#f4f4f4", size = .4) +
geom_hline(yintercept = 0, size = .68, color = 'black') +
theme(legend.position = 'none',
axis.text.x = element_text(face = 'bold', margin = margin(t = -1.5)),
panel.grid.major.x = element_blank(),
strip.placement = 'outside',
strip.text.x = element_text(vjust = 3.5),
panel.spacing.x = unit(1, "lines"),
plot.title = element_text(face = 'bold', size = 15, hjust = 0.5),
plot.subtitle = element_text(size = 8, hjust = 0.5),
plot.title.position = 'plot',
plot.margin = margin(10, 10, 20, 10)) +
labs(x = "Leaderboard por 100 posesiones",
y = "",
title = "Los mejores 20 de 20 por 100 posesiones",
subtitle = paste0("Los mejores 20 jugadores de hasta 20 años | Updated ", format(Sys.Date(), "%d %B, %Y")),
caption ="<br><br>**Datos**: *@bball_ref* **Gráfico**: *Ivo Villanueva*")
ggsave(
"jjbarret .png", jjbarret
,
height = 7, width = 7, dpi = "retina"
)
|
864ce74c48455235f880d41ec0608c51f3e0219b
|
94daab40ddcd9397471f55b5b54df7c4cf509bd4
|
/man/rotation.Rd
|
3d48c9a114baf6bfb58b546d77d89f4faea42155
|
[] |
no_license
|
cran/alphahull
|
5b7eb253e79eebdc34e85e0957ed504e1aba1481
|
1b465435829c449568242d41ecbf3b7b7d5584d7
|
refs/heads/master
| 2022-06-22T06:10:48.363549
| 2022-06-16T16:00:02
| 2022-06-16T16:00:02
| 17,694,339
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 804
|
rd
|
rotation.Rd
|
\name{rotation}
\alias{rotation}
%- Also NEED an '\alias' for EACH other topic documented here.
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Clockwise rotation}
\description{
This function calculates the clockwise rotation of angle \eqn{\theta} of a given vector \eqn{v} in the plane.
}
\usage{
rotation(v, theta)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{v}{Vector \eqn{v} in the plane.}
\item{theta}{Angle \eqn{\theta} (in radians).}
}
\value{
\item{v.rot}{Vector after rotation.}
}
\examples{
\dontrun{
# Rotation of angle pi/4 of the vector (0,1)
rotation(v = c(0, 1), theta = pi/4)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{nonparametric}
|
87565fb742f7f56847f8420d85f6593340325c0f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MDplot/examples/ramachandran.Rd.R
|
d7eab81ff9ce7922e7644164acd11dcba2243e0f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 397
|
r
|
ramachandran.Rd.R
|
library(MDplot)
### Name: ramachandran
### Title: Ramachandran plot for two dihedral angles
### Aliases: ramachandran
### Keywords: ramachandran angles dihedral
### ** Examples
# GROMOS (see load_rmsd() for other input possibilities)
ramachandran( load_ramachandran( system.file( "extdata/ramachandran_example.txt.gz",
package = "MDplot" ) ) )
|
c6e5427133c5f9ad8b5cf0654824198674e0d032
|
7b3ee03c13245b259dfc5bda1718e0a96e7979a9
|
/plot2.R
|
abe9c30eead0a8a273a876232a614aec7bb725e8
|
[] |
no_license
|
freshsnow/ExData_Plotting1
|
1bd3cd2345a5a9050cd9692ec02bf9219fe2fa88
|
adb3d857f7614f1a4c1ba301cb732367ca7fbea0
|
refs/heads/master
| 2021-01-15T09:09:46.895426
| 2015-12-11T19:48:44
| 2015-12-11T19:48:44
| 47,843,571
| 0
| 0
| null | 2015-12-11T18:19:19
| 2015-12-11T18:19:19
| null |
UTF-8
|
R
| false
| false
| 923
|
r
|
plot2.R
|
#figure out class time of columns to aid in faster data loading
initial <- read.table("household_power_consumption.txt", header=TRUE, sep=";", comment.char = "", nrows=100, na.strings = c("?"))
classes <- sapply(initial, class)
#read raw data and extract rows pertaining to period of interets
hpc <- read.table("household_power_consumption.txt", header=TRUE, sep=";", comment.char = "", colClasses = classes, na.strings = c("?"))
d = hpc[which(hpc$Date=="1/2/2007" | hpc$Date=="2/2/2007"),]
#d<-dget("d.R") #relevant data across period of interest has been extracted into d.R
#create DateTime column with the proper class
d$DateTime <- paste(d$Date, d$Time)
d$DateTime <- strptime(d$DateTime, "%d/%m/%Y %H:%M:%S")
#set png as graphics device and plot
png(filename = "plot2.png", width = 480, height = 480)
plot(d$DateTime, d$Global_active_power, type="l", ylab = "Global Active Power (kilowatts)", xlab="")
dev.off()
|
a25edc1b58bd033d8547664e4e64fa90892d0a0a
|
02f053ce70b065724d4a02619fb402adcc0ec997
|
/analysis/boot/boot468.R
|
3eacfd3e90f16c9b037488bc3985c247d166b75f
|
[] |
no_license
|
patperry/interaction-proc
|
27950482929240bba55c7d0f2f8c5235d770feea
|
cf8dfd6b5e1d0684bc1e67e012bf8b8a3e2225a4
|
refs/heads/master
| 2021-01-01T06:11:47.125853
| 2012-12-04T20:01:42
| 2012-12-04T20:01:42
| 673,564
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,760
|
r
|
boot468.R
|
seed <- 468
log.wt <- -14.421658319440617
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 224718.55944468727
df.resid <- 35402
df <- 165
coefs <- c(6.360779415126783, 5.833475839273251, 5.736281907040804, 5.4446668265203835, 5.135576201770559, 4.898616308284427, 4.755748383515136, 4.648513297335195, 4.40732695369811, 4.303410239437712, 4.347459532307642, 4.174211861838285, 4.025126325692499, 3.9845060125543674, 3.7750335432586613, 3.5401237575685114, 3.2863360148067273, 2.942120984415681, 2.5345036254382873, 2.041720603832374, 1.71822452851573, 0.9246031330118061, 1.0583401062319495, 0.5011857756770082, 0.41218932591523316, -1.5157781988182688, -0.12491277297932607, 0.9541545959421137, 1.0925595855340307, -1.4396063744737848, -2.20303748873413, -2.777851355642518, -0.14265918172204897, 0.7601380093144571, 1.3697047360491474, -0.8650385483110654, -0.5243655725565401, -1.437135138748906, 1.8623254466244764e-2, -1.0555905429190202, 0.967103392157827, 1.0574102359300745, -0.6457453883622523, -2.1442752703705286, -1.3546348708299354, -0.7987090270966256, -0.6233337153797323, 0.16585550407812283, 0.5033337126552563, -0.7887584526795687, -0.1175510878560095, 0.9174810622068362, -2.736715836768936, 1.8050506414874428, 0.7629746431819628, 0.9800613618546125, -1.6888273150496598, -6.315181540838082e-2, -0.3928578456506417, 1.0135689852101508, 1.0639638085793772, 0.6452173473278263, -2.20454203776659, -1.1175160158357755, -0.43157945214351295, -0.3279904480083168, 0.7311017043486602, -0.3377956923707198, -1.610382080062845, -0.8385373193639194, -2.9908114079726413, -0.5165458861803055, 0.6730064396287287, 0.9945961473892587, 0.7534853437435355, -0.5839029127153892, -1.2966028320325447, -1.0903165019619536, -0.1257926620250086, 0.7698410962622726, 1.1998850479642993, 9.860339236848037e-2, 0.34279384391169726, -1.8294620751661774, 0.44474396133633726, 0.43883945420099774, 1.2402367969589618, 0.6268570370929805, 0.8790799738774606, -2.0586682985438505, 0.4149883165949635, 0.7503568958676935, 0.8213611249417057, 0.4333507075407687, 0.2901170563804266, 1.3308176594325667, -0.9078759203543374, 0.6199457468050454, 6.829958195679606e-2, 0.14441726530485122, 0.34058163999211294, -0.4647592274961893, 0.8172738685565201, 0.26459589697600144, 0.7399368348786359, 0.8653467667807194, 1.2414417415496168, -0.2572184286722641, -1.1595291425613494, -0.746967256926987, 0.41704834486250897, 0.6955131348146671, 1.6139764649146437, -0.44421106168871793, -0.20000640020898255, -0.8500550406419307, 0.7893311791599008, -0.2832081508366848, 0.5097592320750941, 0.47703246808726524, -0.5593027171522752, -0.20048576847396263, -1.2745708861017335, -0.9064867494429948, 0.2725725637861135, 0.913688962722332, -6.2223125892757726e-2, 1.0241964564219406, -0.5258432712649298, -0.3239763357717709, 0.38181715394806826, 0.8497274979429856, 0.8703861650091745, 0.40043467155855955, 7.126526810541713e-2, 1.0549215785584471, -0.3356086964801557, 1.1071784642166038, 0.6723140841733061, 0.9986701484817999, 0.7327528599687654, -0.655542523000736, -1.0746064804310147, 0.7941146141502338, 0.45743637787345665, 0.57856647349823, -0.12562826260476706, -0.5247720403474421, -1.953896140429731, 1.2387577563983792, 4.648384180865166e-2, 1.267341185677133, -0.14354248042350642, -0.1189168375381817, -0.11236442071202445, -1.6640375662485145, -1.2739864383730568, 1.0115249748678101, 1.278246457214918, -0.20507641265926538, 1.5885293375929797, -0.2965341485820824, -0.2083313934409694, 5.491431748414149e-2, 1.1826221524394755)
|
3b856ecf80393be9e61e311e40c06d94714d2f20
|
69b49ce61413bc8190227621b0aa8dfaf951a048
|
/src/Concerto/TestBundle/Resources/R/concerto5/R/concerto.test.getVariables.R
|
fbbed0602144148bb21f4f5738148a0c42e9496a
|
[
"Apache-2.0"
] |
permissive
|
campsych/concerto-platform
|
de926ae820f2a3cf6985598f3824dee8f4615232
|
988b67e8d52acbf25fdc9078e7592cc07d2dd9a3
|
refs/heads/master
| 2023-08-31T08:09:05.570628
| 2023-08-23T16:43:03
| 2023-08-23T16:43:03
| 55,242,761
| 164
| 109
|
Apache-2.0
| 2023-07-26T15:10:48
| 2016-04-01T15:34:25
|
PHP
|
UTF-8
|
R
| false
| false
| 326
|
r
|
concerto.test.getVariables.R
|
concerto.test.getVariables = function(testId){
idField <- "test_id"
testId <- dbEscapeStrings(concerto$connection,toString(testId))
result <- dbSendQuery(concerto$connection,sprintf("SELECT id, name, value, type FROM TestVariable WHERE %s='%s'",idField,testId))
response <- fetch(result,n=-1)
return(response)
}
|
3f0b94458015716199c8a651c2150cabda23e59f
|
4c0a4c1539ab2ae54ffb69696fa8298ca55adc58
|
/R/roc.utils.percent.R
|
7d213a0aa05480eefc7480edb1f6212eb7777d2e
|
[] |
no_license
|
xrobin/pROC
|
d00ee196d2ac7530c913324d2977c5cfad72457c
|
435109419a29f538afa766e5fd113afd8958c722
|
refs/heads/master
| 2023-08-04T11:18:38.037879
| 2023-07-04T06:38:35
| 2023-07-04T06:38:35
| 2,377,999
| 114
| 35
| null | 2023-06-25T07:54:20
| 2011-09-13T12:09:08
|
R
|
UTF-8
|
R
| false
| false
| 5,024
|
r
|
roc.utils.percent.R
|
# pROC: Tools Receiver operating characteristic (ROC curves) with
# (partial) area under the curve, confidence intervals and comparison.
# Copyright (C) 2010-2014 Xavier Robin, Alexandre Hainard, Natacha Turck,
# Natalia Tiberti, Frédérique Lisacek, Jean-Charles Sanchez
# and Markus Müller
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Helper functions to safely convert ROC objects from percent=TRUE to percent=FALSE
# and inversely. These are internal and experimental. They shouldn't be exposed
# to the end user.
# Returns a ROC curve with percent=FALSE
roc_utils_unpercent <- function(x) {
UseMethod("roc_utils_unpercent")
}
roc_utils_unpercent.roc <- function(x) {
if (x$percent) {
if (! is.null(x$auc)) {
x$auc <- roc_utils_unpercent(x$auc)
}
x$sensitivities <- x$sensitivities / 100
x$specificities <- x$specificities / 100
x$percent <- FALSE
if (!is.null(x$call)) {
x$call$percent <- FALSE
}
if (!is.null(x$ci)) {
x$ci <- roc_utils_unpercent(x$ci)
}
}
return(x)
}
roc_utils_unpercent.auc <- function(x) {
if (attr(x, "percent")) {
newx <- x / 100
attributes(newx) <- attributes(x)
x <- newx
attr(x, "percent") <- FALSE
if (is.numeric(attr(x, "partial.auc"))) {
attr(x, "partial.auc") <- attr(x, "partial.auc") / 100
}
if (! is.null(attr(x, "roc"))) {
attr(x, "roc") <- roc_utils_unpercent(attr(x, "roc"))
}
}
return(x)
}
roc_utils_unpercent.ci.auc <- function(x) {
if (attr(attr(x, "auc"), "percent")) {
x[] <- x / 100
attr(x, "auc") <- roc_utils_unpercent(attr(x, "auc"))
}
return(x)
}
roc_utils_unpercent.ci.thresholds <- function(x) {
if (attr(x, "roc")$percent) {
x$sensitivity[] <- x$sensitivity / 100
x$specificity[] <- x$specificity / 100
attr(x, "roc") <- roc_utils_unpercent(attr(x, "roc"))
}
return(x)
}
roc_utils_unpercent.ci.sp <- function(x) {
if (attr(x, "roc")$percent) {
x[] <- x / 100
attr(x, "sensitivities") <- attr(x, "sensitivities") / 100
rownames(x) <- attr(x, "sensitivities")
attr(x, "roc") <- roc_utils_unpercent(attr(x, "roc"))
}
return(x)
}
roc_utils_unpercent.ci.se <- function(x) {
if (attr(x, "roc")$percent) {
x[] <- x / 100
attr(x, "specificities") <- attr(x, "specificities") / 100
rownames(x) <- attr(x, "specificities")
attr(x, "roc") <- roc_utils_unpercent(attr(x, "roc"))
}
return(x)
}
roc_utils_unpercent.ci.coords <- function(x) {
stop("Cannot convert ci.coords object to percent = FALSE")
}
# Returns a ROC curve with percent=TRUE
roc_utils_topercent <- function(x) {
UseMethod("roc_utils_topercent")
}
roc_utils_topercent.roc <- function(x) {
if (! x$percent) {
if (! is.null(x$auc)) {
x$auc <- roc_utils_topercent(x$auc)
}
x$sensitivities <- x$sensitivities * 100
x$specificities <- x$specificities * 100
x$percent <- TRUE
if (!is.null(x$call)) {
x$call$percent <- TRUE
}
if (!is.null(x$ci)) {
x$ci <- roc_utils_topercent(x$ci)
}
}
return(x)
}
roc_utils_topercent.auc <- function(x) {
if (! attr(x, "percent")) {
newx <- x * 100
attributes(newx) <- attributes(x)
x <- newx
attr(x, "percent") <- TRUE
if (is.numeric(attr(x, "partial.auc"))) {
attr(x, "partial.auc") <- attr(x, "partial.auc") * 100
}
if (! is.null(attr(x, "roc"))) {
attr(x, "roc") <- roc_utils_topercent(attr(x, "roc"))
}
}
return(x)
}
roc_utils_topercent.ci.auc <- function(x) {
if (! attr(attr(x, "auc"), "percent")) {
x[] <- x * 100
attr(x, "auc") <- roc_utils_topercent(attr(x, "auc"))
}
return(x)
}
roc_utils_topercent.ci.thresholds <- function(x) {
if (! attr(x, "roc")$percent) {
x$sensitivity[] <- x$sensitivity * 100
x$specificity[] <- x$specificity * 100
attr(x, "roc") <- roc_utils_topercent(attr(x, "roc"))
}
return(x)
}
roc_utils_topercent.ci.sp <- function(x) {
if (! attr(x, "roc")$percent) {
x[] <- x * 100
attr(x, "sensitivities") <- attr(x, "sensitivities") * 100
rownames(x) <- paste(attr(x, "sensitivities"), "%", sep="")
attr(x, "roc") <- roc_utils_topercent(attr(x, "roc"))
}
return(x)
}
roc_utils_topercent.ci.se <- function(x) {
if (! attr(x, "roc")$percent) {
x[] <- x * 100
attr(x, "specificities") <- attr(x, "specificities") * 100
rownames(x) <- paste(attr(x, "specificities"), "%", sep="")
attr(x, "roc") <- roc_utils_topercent(attr(x, "roc"))
}
return(x)
}
roc_utils_topercent.ci.coords <- function(x) {
stop("Cannot convert ci.coords object to percent = TRUE")
}
|
4ef97c729fc356dccf7a61d7eb2daff29ebb5870
|
c8267d9b8cc820543db41e9061c66371af97928a
|
/scripts/ipedsgeo.r
|
cb72df24ee03457a7f8ccf3a9f87389c5e443431
|
[] |
no_license
|
btskinner/colchoice_rep
|
be2e81d8c5436639568a6d7b15c5249076d1487a
|
61afe56177636220284acb6feb9765f842889479
|
refs/heads/master
| 2021-11-09T20:24:29.330541
| 2021-11-08T16:05:42
| 2021-11-08T16:05:42
| 133,262,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,457
|
r
|
ipedsgeo.r
|
################################################################################
##
## IPEDS geocode
## Benjamin Skinner
## INIT: 15 January 2015
##
################################################################################
## clear memory
rm(list=ls())
## libraries
library(zipcode); data(zipcode)
## raw file location
raw <- '../ipeds/'
## cleaned data location
cln <- '../data/'
################################################################################
## FUNCTIONS
################################################################################
ipedsgeo <- function(f){
## file names
dest <- paste0(raw, paste0(f,'.zip')); flat <- paste0(tolower(f),'.csv')
## check to see if file exists on in raw directory
if(file.exists(dest)){
## message
message(paste0('\n ',f, '...get local file...'))
## unzip and store data
ipeds <- read.csv(unz(dest, flat), header = TRUE)
} else {
## message
message(paste0('\n ', f, '...downloading file...'))
## download the file, save locally, and open
url <- paste0('nces.ed.gov/ipeds/datacenter/data/', paste0(f,'.zip'))
download.file(url, dest, method = 'curl')
ipeds <- read.csv(unz(dest, flat), header = TRUE)
}
## get year; correct 1990s years
year <- as.numeric(gsub('\\D','',f))
if (year < 1000) {year <- as.numeric(paste0('19',year))}
if (year > 9000) {year <- as.numeric(paste0('19',substr(year,1,2)))}
## lower names
names(ipeds) <- tolower(names(ipeds))
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## SUBSET LISTS
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## areas that aren't lower 48 + DC (need for below)
outstates <- c('AK','HI','AS','FM','GU','MH','MP','PW','PR','VI')
## states with zip codes that begin with zero
zip0st <- c('CT','MA','ME','NH','NJ','RI','VT')
## Title IV eligibility codes and associated years (not consistent)
opecode <- list(c(1),c(1,2),c(1,2,4),c(1,2,8))
opflyrs <- list(c(2002),c(1997:1999,2001,2011,2013),c(2000,2003:2005),
c(2006:2010,2012))
## fix bad zipcodes: tuple => unitid,corrected zip
fix <- list(c(219578,37205),c(246974,94578),c(371830,92841),c(400187,92843),
c(438063,79903),c(438674,39401),c(439279,63043),c(444884,24016),
c(122269,90275),c(180142,59405),c(192855,10458),c(203951,43614),
c(249779,19114),c(407009,85306),c(420574,85212),c(434399,82930),
c(437574,30033),c(440615,31023),c(443313,98109),c(445106,90804),
c(445805,91203),c(448886,85004),c(448187,75007),c(178022,64156),
c(178785,64111),c(192475,11201),c(193317,10801),c(204547,44131),
c(384360,30907),c(384379,31605),c(384388,30213),c(384397,31093),
c(423476,31909),c(440305,64057),c(442000,64120),c(445009,19464),
c(448549,46410),c(451680,33868),c(103945,86004),c(112455,92866),
c(138497,34450),c(217907,29550),c(367981,60465),c(431035,92264),
c(406608,92612),c(157827,42701),c(137078,33781),c(109785,91702),
c(116439,92618),c(138309,34474),c(262448,92868),c(372718,33764),
c(434885,33607),c(381334,92590),c(373951,48210),c(146685,62794),
c(140711,30297),c(119128,92845),c(120537,92831),c(123943,92831),
c(133085,33759),c(136491,33760),c(377421,20110))
## check for lon/lat
if('longitud' %in% colnames(ipeds)){
## message
message('\n lon/lat exist in this file...adding...')
## subset vars
vars <- c('unitid','zip','stabbr','longitud','latitude','sector',
'opeflag'); subset <- ipeds[,vars]
## drop if not 48 + DC
subset <- subset[!(subset$stabbr %in% outstates),]
## drop if an administrative unit
subset <- subset[subset$sector != 0,]
## add vars for sector
subset$pub4yr <- ifelse(subset$sector == 1, 1, 0)
subset$pub2yr <- ifelse(subset$sector == 4, 1, 0)
subset$public <- ifelse(subset$sector == 1 | subset$sector == 4, 1, 0)
## drop if not Title IV
if (year %in% opflyrs[[1]]){
subset <- subset[(subset$opeflag %in% opecode[[1]]),]
} else if (year %in% opflyrs[[2]]){
subset <- subset[(subset$opeflag %in% opecode[[2]]),]
} else if (year %in% opflyrs[[3]]){
subset <- subset[(subset$opeflag %in% opecode[[3]]),]
} else {
subset <- subset[(subset$opeflag %in% opecode[[4]]),]
}
## clean zips: remove hyphens; strip ZIP+4; add leading zeros
subset$zip <- gsub('-', '', subset$zip)
ind <- (subset$stabbr %in% zip0st)
subset$zip[ind] <- substr(as.numeric(subset$zip[ind]), 1, 4)
subset$zip[!ind] <- substr(as.numeric(subset$zip[!ind]), 1, 5)
ind <- (as.numeric(subset$zip) < 1000 & !is.na(as.numeric(subset$zip)))
subset$zip[ind] <- paste0('00',as.numeric(subset$zip[ind]))
ind <- (as.numeric(subset$zip) < 10000 & as.numeric(subset$zip) > 1000
& !is.na(as.numeric(subset$zip)))
subset$zip[ind] <- paste0('0',as.numeric(subset$zip[ind]))
## loop to fix bad zipcodes
for(i in 1:length(fix)){
ind <- (subset$unitid == fix[[i]][[1]])
subset$zip[ind] <- fix[[i]][[2]]
}
## subset and add to list
vars <- c('unitid','zip','longitud','latitude','stabbr','public',
'pub4yr','pub2yr'); subset <- subset[,vars]
## message
message('\n adding dataframe to list...')
## return dataframe as list element
gls <- list(subset); names(gls) <- paste0('y', year); return(gls)
} else {
## message
message('\n no lon/lat...comparing to 2013 data...')
## subset data (NB: variables the same for 1997-2008...except once...)
if (year == 1997){
vars <- c('unitid','stabbr','zip','sector','opeind')
subset <- ipeds[,vars]
names(subset)[names(subset) == 'opeind'] <- 'opeflag'
} else {
vars <- c('unitid','stabbr','zip','sector','opeflag')
subset <- ipeds[,vars]
}
## drop if not 48
subset <- subset[!(subset$stabbr %in% outstates),]
## drop if an administrative unit
subset <- subset[subset$sector != 0,]
## add vars for sector
subset$pub4yr <- ifelse(subset$sector == 1, 1, 0)
subset$pub2yr <- ifelse(subset$sector == 4, 1, 0)
subset$public <- ifelse(subset$sector == 1 | subset$sector == 4, 1, 0)
## drop if not Title IV
if (year %in% opflyrs[[1]]){
subset <- subset[(subset$opeflag %in% opecode[[1]]),]
} else if (year %in% opflyrs[[2]]){
subset <- subset[(subset$opeflag %in% opecode[[2]]),]
} else if (year %in% opflyrs[[3]]){
subset <- subset[(subset$opeflag %in% opecode[[3]]),]
} else {
subset <- subset[(subset$opeflag %in% opecode[[4]]),]
}
## message
message('\n fixing bad zipcodes...')
## clean zipcodes: only 5 ZIP, add leading 0
subset$zip <- gsub('-', '', subset$zip)
ind <- (subset$stabbr %in% zip0st)
subset$zip[ind] <- substr(as.numeric(subset$zip[ind]), 1, 4)
subset$zip[!ind] <- substr(as.numeric(subset$zip[!ind]), 1, 5)
ind <- (as.numeric(subset$zip) < 1000 & !is.na(as.numeric(subset$zip)))
subset$zip[ind] <- paste0('00',as.numeric(subset$zip[ind]))
ind <- (as.numeric(subset$zip) < 10000 & as.numeric(subset$zip) > 1000
& !is.na(as.numeric(subset$zip)))
subset$zip[ind] <- paste0('0',as.numeric(subset$zip[ind]))
## loop to fix bad zipcodes
for(i in 1:length(fix)){
ind <- (subset$unitid == fix[[i]][[1]])
subset$zip[ind] <- fix[[i]][[2]]
}
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## ATTEMPT (1): FILL IN
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## message
message('\n first attempt: backfill from future matches...')
## init comb data; create new lon/lat columns; merge
vars <- c('unitid','zip','stabbr','public','pub4yr','pub2yr')
comb <- subset[,vars]; comb$lat <- comb$lon <- NA
## loop through latest 4 years that have lon/lat
for(i in 1:4){
## get year; message
y <- 2014 - i; message(paste0('\n trying year: ', y,'...'))
## get data from newer year; don't need stabbr or sector
n <- geolist[[i]]
n <- n[,!colnames(n) %in% c('stabbr','public','pub4yr','pub2yr')]
## merge
comb <- merge(comb, n, by = 'unitid', all.x = T)
## fill in if zip is the same
ind <- (is.na(comb$lon) & !is.na(comb$zip.y)
& comb$zip.x == comb$zip.y)
comb$lon[ind] <- comb$longitud[ind]
ind <- (is.na(comb$lat) & !is.na(comb$zip.y)
& comb$zip.x == comb$zip.y)
comb$lat[ind] <- comb$latitude[ind]
## clean up
vars <- c('unitid','zip.x','lon','lat','stabbr','public',
'pub4yr','pub2yr'); comb <- comb[,vars]
names(comb)[names(comb) == 'zip.x'] <- 'zip'
## show proportion missing
message('\n proportion missing...'); print(propmiss(comb))
}
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## ATTEMPT (2): ZIP CODE CENTROID
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## message
message('\n second attempt: use zipcode centroids...')
## try to geocode zipcode next (could use google geocode...)
comb <- merge(comb, zipcode, by = 'zip', all.x = T)
## replace missing lon/lat with zip centroid lon/lat
ind <- (is.na(comb$lon)); comb$lon[ind] <- comb$longitude[ind]
ind <- (is.na(comb$lat)); comb$lat[ind] <- comb$latitude[ind]
## show proportion missing
vars <- c('unitid','zip','lon','lat','stabbr','public','pub4yr','pub2yr')
comb <- comb[,vars]
message('\n proportion missing...'); print(propmiss(comb))
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## OUTPUT
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## final subset of data; rename
names(comb)[names(comb) == 'lon'] <- 'longitud'
names(comb)[names(comb) == 'lat'] <- 'latitude'
## message
message('\n adding dataframe to list...')
## return dataframe as list element
gls <- list(comb); names(gls) <- paste0('y', year); return(gls)
}
}
## from: https://gist.github.com/stephenturner/841686
propmiss <- function(dataframe) {
m <- sapply(dataframe, function(x) {
data.frame(
nmiss=sum(is.na(x)),
n=length(x),
propmiss=sum(is.na(x))/length(x)
)
})
d <- data.frame(t(m))
d <- sapply(d, unlist)
d <- as.data.frame(d)
d$variable <- row.names(d)
row.names(d) <- NULL
d <- cbind(d[ncol(d)],d[-ncol(d)])
return(d[order(d$propmiss), ])
}
################################################################################
## RUN
################################################################################
## IPEDS files
ipedsfiles <- list('HD2013','HD2012','HD2011','HD2010','HD2009','HD2008',
'HD2007','HD2006','HD2005','HD2004','HD2003','HD2002',
'FA2001HD','FA2000HD','IC99_HD','ic98hdac','ic9798_HDR')
## init final list (needs to be called geolist...kludge)s
geolist <- list()
## iterate through IPEDS files; add to list
for(i in 1:length(ipedsfiles)){
f <- ipedsfiles[i]
geolist <- c(geolist, ipedsgeo(f))
}
################################################################################
## CHECK MISSING
################################################################################
## apply across list items
lapply(geolist, FUN = function(x){propmiss(x)})
################################################################################
## SAVE
################################################################################
save(geolist, file = paste0(cln, 'ipedsyeargeo.rda'))
|
9dff8cb333eb86fba109a2e93e848091e110e3b0
|
34b1ab46a70fe81143874a40d6493c0254f1e5c9
|
/R/empirical_bayes_baseball_binomial_regression.R
|
be91ed84e3a69a9fe9bcb3505e11c66872f0f541
|
[] |
no_license
|
yama1968/Spikes
|
5f974a20812dbd88f789cabf7720826d358f8e76
|
498b0cacfc23627ecee743f012a6fda6451cda7f
|
refs/heads/master
| 2021-06-06T00:33:33.637745
| 2020-11-14T18:49:25
| 2020-11-14T18:49:25
| 29,531,065
| 2
| 0
| null | 2020-11-12T21:13:21
| 2015-01-20T13:29:35
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 2,461
|
r
|
empirical_bayes_baseball_binomial_regression.R
|
##
# http://varianceexplained.org/r/empirical_bayes_baseball/
library(dplyr)
library(tidyr)
library(Lahman)
career <- Batting %>%
filter(AB > 0) %>%
anti_join(Pitching, by = "playerID") %>%
group_by(playerID) %>%
summarize(H = sum(H), AB = sum(AB)) %>%
mutate(average = H / AB)
# use names along with the player IDs
career <- Master %>%
tbl_df() %>%
select(playerID, nameFirst, nameLast) %>%
unite(name, nameFirst, nameLast, sep = " ") %>%
inner_join(career, by = "playerID") %>%
select(-playerID)
# just like the graph, we have to filter for the players we actually
# have a decent estimate of
career_filtered <- career %>%
filter(AB >= 500)
m <- MASS::fitdistr(career_filtered$average, dbeta,
start = list(shape1 = 1, shape2 = 10))
m
alpha0 <- m$estimate[1]
beta0 <- m$estimate[2]
career_eb <- career %>%
mutate(eb_estimate = (H + alpha0) / (AB + alpha0 + beta0)) %>%
mutate(alpha1 = H + alpha0,
beta1 = AB - H + beta0) %>%
arrange(desc(eb_estimate))
career_eb %>% arrange(eb_estimate)
career_eb %>% arrange(desc(eb_estimate))
library(VGAM)
# negative log likelihood of data given alpha; beta
ll <- function(alpha, beta) {
-sum(dbetabinom.ab(career$H, career$AB, alpha, beta, log = TRUE))
}
m <- mle(ll, start = list(alpha = 1, beta = 10), method = "L-BFGS-B")
coef(m)
## http://varianceexplained.org/r/bayesian_fdr_baseball/
career_eb <- career_eb %>%
mutate(PEP = pbeta(.3, alpha1, beta1)) %>%
arrange(PEP) %>%
mutate(qvalue = cummean(PEP))
## http://varianceexplained.org/r/beta_binomial_baseball/
library(ggplot2)
career %>%
filter(AB >= 20) %>%
ggplot(aes(AB, average)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
scale_x_log10()
library(gamlss)
library(broom)
fit <- gamlss(cbind(H, AB - H) ~ log(AB),
data = career_eb,
family = BB(mu.link = "identity"))
td <- tidy(fit)
td
mu <- fitted(fit, parameter = "mu")
sigma <- fitted(fit, parameter = "sigma")
head(mu)
career_eb_wAB <- career_eb %>%
dplyr::select(name, H, AB, original_eb = eb_estimate) %>%
mutate(mu = mu,
alpha0 = mu / sigma,
beta0 = (1 - mu) / sigma,
alpha1 = alpha0 + H,
beta1 = beta0 + AB - H,
new_eb = alpha1 / (alpha1 + beta1))
qplot(data = career_eb_wAB, original_eb, new_eb, alpha = I(0.1), color = log10(AB))
qplot(data = career_eb_wAB, AB, original_eb - new_eb, alpha = I(0.03), log = "x")
|
e92452b43830355e3d6659338bb45ce16fb316f7
|
8d70cec5f2dba44c28ab8407e44b9076a449ff76
|
/tests/testthat/test-zoo.R
|
a560b9ec98bc74c208da9f0c50a40e2335901564
|
[
"Apache-2.0"
] |
permissive
|
poissonconsulting/nrp
|
b0cc4d538a646c99bb4f37a173600058a48780f3
|
69bc4c52f38ec7fde95b5e0b7a77e87eaf4cabaf
|
refs/heads/main
| 2023-08-21T12:16:09.905121
| 2023-08-15T22:48:09
| 2023-08-15T22:48:09
| 169,335,839
| 0
| 0
|
NOASSERTION
| 2023-08-15T22:48:10
| 2019-02-06T00:42:44
|
R
|
UTF-8
|
R
| false
| false
| 3,862
|
r
|
test-zoo.R
|
test_that("nrp_read_zooplankton_file works", {
conn <- nrp_create_db(path = ":memory:", ask = FALSE)
teardown(DBI::dbDisconnect(conn))
path <- system.file("extdata", "zooplankton/Arzp20.xlsx",
package = "nrp", mustWork = TRUE)
wrong_path <- system.file("extdata", "ar-empty.rtf", package = "nrp", mustWork = TRUE)
data <- nrp_read_zooplankton_file(path = path, db_path = conn) %>%
suppressWarnings()
expect_is(data, "tbl_df")
expect_identical(nrow(data), 60L)
check_zoo_raw_data(data)
expect_error(nrp_read_zooplankton_file(path = path, db_path = conn, system = "columbia"),
"'system' must be one of 'arrow', 'kootenay'.")
expect_error(nrp_read_zooplankton_file(path = wrong_path, db_path = conn),
"Please ensure input data is a valid excel spreadsheet \\(.xlsx\\).")
})
test_that("nrp_read_zooplankton works", {
conn <- nrp_create_db(path = ":memory:", ask = FALSE)
teardown(DBI::dbDisconnect(conn))
path <- system.file("extdata", "zooplankton",
package = "nrp", mustWork = TRUE)
data <- nrp_read_zooplankton(path, db_path = conn)
expect_is(data, "tbl_df")
expect_identical(length(data), 164L)
expect_identical(nrow(data), 132L)
expect_error(nrp_read_zooplankton("not-a-path", db_path = conn),
"path 'not-a-path' must exist")
path <- system.file("extdata",
package = "nrp", mustWork = TRUE)
data <- nrp_read_zooplankton(path, db_path = conn)
expect_identical(data, list(x = 1)[-1])
})
test_that("nrp_upload_zooplankton and nrp_download_zoo_sample works", {
conn <- nrp_create_db(path = ":memory:", ask = FALSE)
teardown(DBI::dbDisconnect(conn))
path <- system.file("extdata", "zooplankton/Arzp20.xlsx",
package = "nrp", mustWork = TRUE)
data <- nrp_read_zooplankton_file(path = path, db_path = conn) %>%
suppressWarnings()
nrp_upload_zooplankton(data = data, db_path = conn)
db_data <- readwritesqlite::rws_read_table("Zooplankton", conn = conn)
expect_identical(length(db_data), 7L)
expect_identical(nrow(db_data), 8760L)
db_sample <- readwritesqlite::rws_read_table("ZooplanktonSample", conn = conn)
expect_identical(length(db_sample), 12L)
expect_identical(nrow(db_sample), 60L)
nrp_upload_zooplankton(data = data, db_path = conn, replace = TRUE)
db_data <- readwritesqlite::rws_read_table("Zooplankton", conn = conn)
expect_identical(length(db_data), 7L)
expect_identical(nrow(db_data), 8760L)
expect_error(nrp_upload_zooplankton(data = data, db_path = conn),
"UNIQUE constraint failed: ZooplanktonSample.Date, ZooplanktonSample.SiteID, ZooplanktonSample.Replicate, ZooplanktonSample.FileName")
db_data <- nrp_download_zooplankton(start_date = "2020-04-01",
end_date = "2020-04-25", db_path = conn)
expect_identical(length(db_data), 150L)
expect_identical(nrow(db_data), 12L)
db_data <- nrp_download_zooplankton(start_date = "2020-04-01",
end_date = "2020-04-25",
counts = TRUE,
db_path = conn)
expect_identical(length(db_data), 150L)
expect_identical(nrow(db_data), 12L)
expect_true(all(is.na(db_data$BBosm)))
expect_error(nrp_download_zooplankton(start_date = "2020-04-01",
end_date = "2020-04-25", db_path = conn,
sites = "wrong"),
"1 or more invalid site names")
expect_error(nrp_download_zooplankton(start_date = "2020-04-01",
end_date = "2020-04-25", db_path = conn,
parameters = "wrong"),
"1 or more invalid parameter names")
})
|
89c3a2caea79d381820781ad3abb44c07d97de59
|
3cdc64f0da9df8e84a83edc7270c37e519128f42
|
/spatialAnalysis/UIBDownscaling.R
|
462387733dd5d0d810d420e3677d5865404e7ccc
|
[] |
no_license
|
fidelsteiner/BasicCode
|
9360144a67dfb7a95194c2b9797b3cea89730e11
|
7bd00f81f3ce1fc53219b2565503e02fde68c69c
|
refs/heads/master
| 2023-03-04T20:10:26.485009
| 2023-02-21T16:55:40
| 2023-02-21T16:55:40
| 210,885,359
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,123
|
r
|
UIBDownscaling.R
|
################################################################################
# Downscaling of climate data for the Upper Indus Basin
#
# UIBDownscaling.R
#
# ReadMe:
#
# Downscaling climate data (T, P, snow cover) from ERA5-Land based on HydroBasins.
# Downscaling using the KrigR package
# Requires ECMWF API Key and Username
#
# Input:
# 1) HydroBasins (https://www.hydrosheds.org/products/hydrobasins) for domain
#
#
# Created: 2023/02/12
# Latest Revision: 2023/02/21
#
# Jakob F Steiner | jakob@x-hydrolab.org | x-hydrolab.org
################################################################################
# clear entire workspace (excl. packages)
rm(list = ls())
gc()
# define &-sign for pasting string-elements
'&' <- function(...) UseMethod('&')
'&.default' <- .Primitive('&')
'&.character' <- function(...) paste(...,sep='')
# packages (if not installed yet: install.packages('examplePackage')
library(devtools)
#Sys.setenv(R_REMOTES_NO_ERRORS_FROM_WARNINGS="true")
#devtools::install_github("https://github.com/ErikKusch/KrigR")
library(KrigR)
# load basin shp file
path_outlines <- 'C:\\Work\\GeospatialData\\HydroSheds\\hybas_as_lev01-12_v1c'
fnUIBOutline <- 'hybas_as_lev08_v1c'
RootDir <- 'C:\\Work\\Research\\Collaborations\\HMA\\NeoshaMIT\\'
path_output <- paste(RootDir&'Output')
path_rawdata <- paste(RootDir&'BaseData\\ERA5Land')
path_DEMdata <- paste(RootDir&'BaseData\\DEMData')
# Load subcatchments
ogrInfo(path_outlines,fnUIBOutline)
UIB_pEXT<-readOGR(dsn=path_outlines,layer=fnUIBOutline)
UIB_pEXT<-SpatialPolygons(UIB_pEXT@polygons,proj4string=UIB_pEXT@proj4string)
#projection(UIB_pEXT)<-CRS("+init=epsg:4326")
# Read ECMWF API KEY DATA
ECMWF_API <- read.csv('C:\\Work\\Code\\ecmwf_API.csv')
# Load Temperature data
UIB_RAW_T <- download_ERA(
Variable = '2m_temperature',
Type = 'reanalysis',
DataSet = 'era5-land',
DateStart = '2017-01-01',
DateStop = '2017-12-31',
TResolution = 'day',
TStep = 1,
Extent = UIB_pEXT[19283],
Dir = path_rawdata,
API_User = ECMWF_API$ECMWF_USER,
API_Key = ECMWF_API$ECMWF_KEY
)
UIB_RAW_P <- download_ERA(
Variable = 'total_precipitation',
Type = 'reanalysis',
DataSet = 'era5-land',
DateStart = '2017-01-01',
DateStop = '2017-12-31',
TResolution = 'day',
TStep = 1,
Extent = UIB_pEXT[19283],
Dir = path_rawdata,
API_User = ECMWF_API$ECMWF_USER,
API_Key = ECMWF_API$ECMWF_KEY
)
UIB_RAW_SC <- download_ERA(
Variable = 'snow_cover',
Type = 'reanalysis',
DataSet = 'era5-land',
DateStart = '2017-01-01',
DateStop = '2017-01-31',
TResolution = 'day',
TStep = 1,
Extent = UIB_pEXT[19283],
Dir = path_rawdata,
API_User = ECMWF_API$ECMWF_USER,
API_Key = ECMWF_API$ECMWF_KEY
)
Covs_ls <- download_DEM(Train_ras = UIB_RAW_SC,
Target_res = 0.01,
Shape = UIB_pEXT[19283],
Dir = path_DEMdata,
Keep_Temporary = TRUE)
Shisper_Krig_T <- krigR(Data = UIB_RAW_T,
Covariates_coarse = Covs_ls[[1]],
Covariates_fine = Covs_ls[[2]],
KrigingEquation = 'ERA ~ DEM',
Keep_Temporary = TRUE,
Cores = 4,
FileName = 'ShisperDownscaled_T.nc',
Dir = path_output)
Shisper_Krig_P <- krigR(Data = UIB_RAW_P,
Covariates_coarse = Covs_ls[[1]],
Covariates_fine = Covs_ls[[2]],
KrigingEquation = 'ERA ~ DEM',
Keep_Temporary = TRUE,
Cores = 4,
FileName = 'ShisperDownscaled_P.nc',
Dir = path_output)
Shisper_Krig_SC <- krigR(Data = UIB_RAW_SC,
Covariates_coarse = Covs_ls[[1]],
Covariates_fine = Covs_ls[[2]],
KrigingEquation = 'ERA ~ DEM',
Keep_Temporary = TRUE,
Cores = 4,
FileName = 'ShisperDownscaled_SC.nc',
Dir = path_output)
|
c0ba02e93d03c8303a1f235a4106b6cda2cd32ec
|
c2b88cbb2214e43945be1884a3e279a84bf1c945
|
/man/getSymFracBSignal.Rd
|
1923ca77bc8251687b8a903ed74a444f70f47356
|
[] |
no_license
|
sblanck/MPAgenomics
|
fc95ac97a917e02884c7b8d580cd8fc600bb828a
|
cb9450732d3bd060b5ef1332c55085860c08ed47
|
refs/heads/master
| 2021-07-06T06:40:08.862794
| 2021-04-23T14:51:48
| 2021-04-23T14:51:48
| 73,392,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,964
|
rd
|
getSymFracBSignal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getSymFracBsignal.R
\name{getSymFracBSignal}
\alias{getSymFracBSignal}
\title{Extract symmetrized allele B fraction signal from aroma files}
\usage{
getSymFracBSignal(
dataSetName,
file,
chromosome,
normalTumorArray,
verbose = TRUE
)
}
\arguments{
\item{dataSetName}{The name of the data-set folder (it must correpond to a folder name in rawData folder.)}
\item{file}{The name of the file in dataSetName to extract.}
\item{chromosome}{A vector with the chromosomes for which the symetrized signal will be extracted.}
\item{normalTumorArray}{Only in the case of normal-tumor study. A csv file or a data.frame containing the mapping between normal and tumor files
The first column contains the name of normal files and the second the names of associated tumor files.}
\item{verbose}{If TRUE, print some informations.}
}
\value{
a list of length the number of chromosome containing a data.frame with columns:
\describe{
\item{chromosome}{chromosome corresponding to the signal.}
\item{position}{Positions associated to the allele B fraction.}
\item{fracB}{One column named by the data file name. It contains the symmetrized allele B fraction signal for the specified profile.}
\item{featureNames}{Names of the probes.}
}
}
\description{
Extract symmetrized allele B fraction signals from aroma files. It requires to have executed the normalization process suggested by aroma packages, by using
\link{signalPreProcess} for example.
}
\details{
The aroma architecture must be respected. The working directory must contain rawData folder and totalAndFracBData folder.
To easily access the names of the files available in a dataset, one can use the \link{getListOfFiles} function.
}
\examples{
\dontrun{
#DO NOT EXECUTE before reading the vignette
fracB=getSymFracBSignal("data1",5,normalTumorArray)
fracB=getSymFracBSignal("data2",5)
}
}
\author{
Quentin Grimonprez
}
|
f1180c51bcb9a51d8f7b4e4d2ea21260619f42f3
|
3dfbdf712149ad437f9757e75b394f36e27c231d
|
/generate_revisions_on_cities.R
|
7097a4e2975c6c4efd9950a5f6e35394002ecf86
|
[] |
no_license
|
ELAZHARIOussama/OPTD
|
8cafe8adceb14788a233995717ed2c69b86cc9e6
|
2576ba8d754b2eda3d27b806432e11a10b29b0e8
|
refs/heads/master
| 2021-01-19T11:02:58.653476
| 2015-09-08T09:57:37
| 2015-09-08T09:57:37
| 40,601,237
| 0
| 1
| null | 2015-08-14T11:33:26
| 2015-08-12T13:09:59
|
R
|
UTF-8
|
R
| false
| false
| 2,098
|
r
|
generate_revisions_on_cities.R
|
# this function do the joining and provides as result a data frame containig all revision done on cities
revisions_on_cities<-function(input,optdCities,wikipediaWikidata,wikivoyageWikidata,output){
#reading the wikivoyage revision history.
data_frame<-read.table(input,sep="," ,na.strings = "NA",col.names=c("title","timestamp","contributor"))
#Url encoding for titles as short_url. Short_url will be used for doing join
data_frame$short_url<-sapply(data_frame$title,function(x) {URLencode(gsub(" ","_",x))})
#reading the file extracted from OPTD
citiesByName<-read.table(optd_cities,sep="^",col.names=c("IATA code","City name","Type of location","wikipedia url redirection"))
citiesByName$short_url <- substr (citiesByName[,4],30,1000000L)
#reading the files extracted form wikidata_sitelinks
wikipedia_wikidata<-read.table(wikipediaWikidata,sep=" ")
wikivoyage_wikidata<-read.table(wikivoyageWikidata,sep="^")
# First join between OPTD and wikipedia_wikidata
optd_wd<-merge(citiesByName[citiesByName$short_url!="",],wikipedia_wikidata,by.x="short_url",by.y="V1",all.x = TRUE)
names(optd_wd)<-c("short_url","IATA code","city name","type of location","wikipedia url redirection","wikidata reference")
# Second join between the optd_wd and wikivoyage_wikidata
optd_wv<-merge(optd_wd,wikivoyage_wikidata,by.x="wikidata reference",by.y="V2",all.x=TRUE)
write.table(optd_wv, file="optd_wv.csv", sep ="^")
#Last join betwwen optd_wv et data_frame
cities1=merge(optd_wv,data_frame,by.x="V1",by.y="short_url")
# drop double column from cities
to_drop <- c("short_url","City.name", "city name","V1")
cities<-cities1[,!(names(cities1) %in% to_drop)]
names(cities)<-c("wikidata reference","short url","IATA code","type of location", "wikipedia url redirection","timestamp","contributor")
write.table(cities, file=output, sep ="^")
}
revisions_on_cities("parse_enwikivoyage-20150702-stub-meta-history_split.csv","citiesByNames1.csv","wikipedia_wikidata.csv","wikivoyage_wikidata.csv","wikivoyage_cities_history.csv")
|
3d3637d0ffda56cf3800ab3f160e0d869dc1cdc8
|
c194c5236006a758b29bd4d530ad563dc9ecab7e
|
/inst/apps/probability_functions2/ui.R
|
8c77e7d881991a72eab85133e3a0d0e6eef6daea
|
[] |
no_license
|
Auburngrads/teachingApps
|
1087c20a21992433a2f8451db7b1eaa7d1d2cb89
|
b79c192e5f74c5e8376674d4fb9e0b95a426fe03
|
refs/heads/master
| 2021-03-16T07:49:56.579527
| 2020-06-14T12:10:12
| 2020-06-14T12:10:12
| 51,677,745
| 15
| 7
| null | 2018-03-01T03:44:58
| 2016-02-14T03:22:47
|
R
|
UTF-8
|
R
| false
| false
| 2,509
|
r
|
ui.R
|
ui = navbarPage(title = 'Probability Functions',
collapsible = T,
position = 'fixed-top',
theme = add_theme(getShinyOption('theme')),
header = add_css(),
footer = add_logo(),
tabPanel('Overview',
uiOutput('overview', class = 'ta-text')),
tabPanel('Relationship Table',
uiOutput('functable', class = 'ta-text')),
navbarMenu('The Functions', icon = icon('folder-open'),
tabPanel('Cumulative Distribution Function',
tabsetPanel(type = 'pills',
tabPanel('Properties of the CDF',
uiOutput('cdfdemo', class = 'ta-text')),
tabPanel('Computing CDF Values in R',
uiOutput('cdfr', class = 'ta-text')),
tabPanel('Interactive CDF Shiny App',
sidebarLayout(
sidebarPanel(width = 4,
shinyAce::aceEditor(fontSize = 16,
wordWrap = T,
outputId = "cdfplot",
mode = "r",
theme = "github",
value =
"par(family = 'serif',mar = c(4,6,2,1))
curve(
pweibull(x,shape = 1.7, scale = 1),
xlab = 'Time, t',
ylab = expression(F(t)[Weibull]),
ylim = c(0,1),
xlim = c(0,3),
lwd = 3,
lty = 1,
col = 2,
cex.lab = 1.5,
cex.axis = 1.5,
las = 1)"),
actionButton("evalcdf", "Evaluate")),
mainPanel(plotOutput("plotcdf", height = "600px"), width = 8))))),
tabPanel('Probability Density Function',
tabsetPanel(type = 'pills',
tabPanel('Properties',
uiOutput('pdfdemo', class = 'ta-text')),
tabPanel('Computing Values in R',
uiOutput('pdfr', class = 'ta-text')),
tabPanel('Shiny App',
sidebarLayout(
sidebarPanel(
shinyAce::aceEditor(fontSize = 16,
wordWrap = T,
outputId = "pdfplot",
mode = "r",
theme = "github",
value =
"par(family = 'serif',mar = c(4,6,2,1))
curve(dexp(x,rate = 1.7),
xlab = 'Time, t',
ylab = expression(f(t)[Exponential]),
xlim = c(0,3),
lwd = 3,
lty = 2,
col = 3,
cex.lab = 1.5,
cex.axis = 1.5,
las = 1)"),
actionButton("evalpdf", "Evaluate")),
mainPanel(plotOutput("plotpdf", height = "600px"))))))))
|
6e89387fd93a2a25d2f62520d6195bc00ee8a128
|
a9aa19072af46cbc51c1fe95c221ad5227a81b9c
|
/plot1.R
|
af5c222507540f7256aa67b491b154f416053e0d
|
[] |
no_license
|
gravi-teja/ExData_Plotting1
|
e54e8e4559644980003c4fecfe51a86e8007fa45
|
4fa6470a3d113ccf4285561b46c3d9b611e8e52e
|
refs/heads/master
| 2021-01-17T23:14:12.860913
| 2015-08-16T21:01:30
| 2015-08-16T21:01:30
| 40,806,262
| 0
| 0
| null | 2015-08-16T09:00:43
| 2015-08-16T09:00:43
| null |
UTF-8
|
R
| false
| false
| 466
|
r
|
plot1.R
|
plot1<-function(){
data<-read.csv2("household_power_consumption.txt", na.strings="?");
data$Date<-strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S");
data<-data[data$Date<strptime("2007-02-03", "%F"),];
data<-data[data$Date>=strptime("2007-02-01", "%F"),];
png(filename="plot1.png")
hist(as.numeric(data$Global_active_power), col="Red", xlab="Global Active Power(kilowatts)", ylab="Frequency", main="Global Active Power");
dev.off();
}
|
dfd898238c3e6be24ee46817e1fcb51d6c2396d8
|
789c0c3b0fc4abe910453a520ff6a0392b43d30d
|
/PrimRecur/waterfall_IOD.r
|
76c94da86d4b89bfd34294ba139cc151395c3e76
|
[] |
no_license
|
SMC1/JK1
|
7c71ff62441494c09b148b66b4d766377c908b4b
|
db8ec385468f8ff6b06273d6268e61ff4db3701f
|
refs/heads/master
| 2016-09-05T17:29:15.217976
| 2015-04-29T05:58:02
| 2015-04-29T05:58:02
| 6,700,479
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 661
|
r
|
waterfall_IOD.r
|
waterfall_IOD <- function(
inFile,
outFile
)
{
dataT = read.delim(inFile)
dataT = dataT[order(dataT[,2],decreasing=F),]
pdf(outFile)
barplot(dataT[,2], space=0,col="white", names.arg=dataT[,1], cex.names=0.7, las=3, ylim=c(-100,100),ylab="% IOD Chnage")
dev.off()
}
#waterfall_IOD('EGFR_IOD_change.txt',"IHC_IOD_waterfall.pdf")
#waterfall_IOD('/EQL1/PrimRecur/paired/EGFR_IHC.txt',"/EQL1/PrimRecur/paired/EGFR_IHC.pdf")
#waterfall_IOD('/EQL1/PrimRecur/paired/EGFR_IHC_xeno.txt',"/EQL1/PrimRecur/paired/EGFR_IHC_xeno.pdf")
waterfall_IOD('/EQL1/PrimRecur/paired/perc_shared_mutation.txt',"/EQL1/PrimRecur/paired/perc_shared_mutation.txt")
|
27df77beb3adef23273fa5e6b82b6284708f2d58
|
0419c49a00967c2eae4c577a9fac79e7464b675b
|
/Boxplots.R
|
9694477c3be44b163b5874bf5b40bdb3cf187c7b
|
[] |
no_license
|
zerland/PhD_Code
|
1a6348f89e98da387dffd5fdfd9c2a104d116213
|
51d702adf900117d64a8f250879820e6d89e91de
|
refs/heads/master
| 2023-03-18T18:59:56.892139
| 2019-03-08T11:50:15
| 2019-03-08T11:50:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
Boxplots.R
|
####BOXPLOT ANALYSIS OF MICROARRAY DATA####
dev.off()
par(mfrow=c(3,2))
#C9orf72
boxplot(exp_C9.LCM, las=2,
col = c("red","red","red","red","red","red","red",
"red","green","green","green"), main = "C9orf72")
legend(100, 100, legend=c("Disease", "Control"), col=c("red","green"))
#CHMP2B#
boxplot(exp_CHMP2B.LCM, las=2,
col = c("red","red","red","red","red","red","red"
,"green","green","green"), main = "CHMP2B")
#sALS#
boxplot(exp_SALS.LCM, las=2,
col = c("red","red","red","red","red","red","red"
,"green","green","green"), main = "sALS")
#FTLD#
boxplot(FTLD, las=2,
col = c("red","red","red","red","red","red","red",
"red","red","red","red","red","red","red",
"red", "red","green","green","green",
"green","green","green","green","green"), main = "FTLD")
#VCP#
boxplot(VCP, las=2,
col = c("red","red","red","red","red","red","red"
,"green","green","green"), main = "VCP")
|
bffa15f96bc581dd42f2cccf05b089cf6c1da277
|
eff55072683a6e7f712ac56ad5b74013a9deada7
|
/run_analysis.R
|
361f5d2778d37125203b9d300d02623b54c50cad
|
[] |
no_license
|
AdryLu/Getting-and-Cleaning-Data-Week4
|
bc55c6c7042a120ceb91a5ea0f3dd01e2eadc416
|
8862b2cd747d9c6cc31f61ae749824b8838cf87b
|
refs/heads/master
| 2021-01-19T15:46:27.188281
| 2017-04-16T04:30:04
| 2017-04-16T04:30:04
| 88,227,919
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,724
|
r
|
run_analysis.R
|
##Unzip the file containing the data sets
##Set your working directory to the extracted folder "UCI HAR Dataset"
## The following line is used to get the path to the working directory ("UCI HAR Dataset")
datafolder<-getwd()
## Read the features.txt file in the UCI HAR Dataset folder, this file cointains the variable names
variables<-read.table(paste0(datafolder,"/features.txt"))
variablenames<-as.character(variables[,2])
variablenames<-as.list(variablenames) ##convert to list to use lapply next
## Remove special characters from variable names
variablenames<-lapply(variablenames,sub,pattern="()",replacement="",fixed=TRUE)
variablenames<-lapply(variablenames,gsub,pattern="-",replacement="",fixed=TRUE)
variablenames<-unlist(variablenames) ##transform back to character vector
##Read activity labels file in the UCI HAR Dataset folder
activity_labels<-read.table(paste0(datafolder,"/activity_labels.txt"),col.names=c("level","activity"))
#Remove "_" form the activity lables
activity_labels$activity<-gsub("_","",activity_labels$activity)
##Read files in 'test' folder
# Read subject_test file, and create a descriptive variable name =subject
subject_test<-read.table(paste0(datafolder,"/test/subject_test.txt"),col.names=c("subject"))
# Read y_test file, and create a descriptive variable name =activity
activity_test<-read.table(paste0(datafolder,"/test/y_test.txt"),col.names=c("activity"))
# Read X_test file, and include unique/descriptive variable names extracted from features.txt file
data_test<-read.table(paste0(datafolder,"/test/X_test.txt"),col.names=variablenames)
#Merge all test tables
testtable<-cbind(subject_test,activity_test,data_test)
## Read the files in the 'train' folder
# Read subject_test file, and create a descriptive variable name =subject, sames as variable name in test table
subject_train<-read.table(paste0(datafolder,"/train/subject_train.txt"),col.names=c("subject"))
# Read y_train file, and create a descriptive variable name =activity,sames as variable name in test table
activity_train<-read.table(paste0(datafolder,"/train/y_train.txt"),col.names=c("activity"))
# Read X_train file, and include unique/descriptive variable names extracted from features.txt file
data_train<-read.table(paste0(datafolder,"/train/X_train.txt"),col.names=variablenames)
#Merge all train tables
traintable<-cbind(subject_train,activity_train,data_train)
##Merge the test and train table
activitydata<-rbind(testtable,traintable)
## EXTRACT ONLY THE MEASUREMENTS ON THE MEAN AND STANDARD DEVIATION
# use grepl to find variable names cointaining "mean" or "std" , exclude meanFrequency data
index<-(grepl("mean",names(activitydata),fixed=TRUE)
| grepl("std",names(activitydata),fixed=TRUE)
| grepl("subject",names(activitydata),fixed=TRUE)
| grepl("activity",names(activitydata),fixed=TRUE))& !grepl("meanFreq",names(activitydata),fixed=TRUE)
# Get a subset of the activity data table using the index vector created above
activitysubset<-activitydata[,index]
## USE DESCRIPTIVE ACTIVITY NAMES TO NAME THE ACTIVITIES IN THE DATA SET
## define activity variable as a factor
activitysubset$activity<-factor(activitysubset$activity,levels=activity_labels[,1],labels=activity_labels[,2])
## CREATE A TIDY DATASET WITH THE AVERAGE OF EACH VARIABLE FOR EACH ACTIVITY AND EACH SUBJECT
# Load dplyr package to group and summarize the data
library(dplyr)
averageactivitydata<-activitysubset %>% group_by(subject,activity) %>% summarise_each(funs(mean))
## Save the dataframe in the working directory
write.table(averageactivitydata, "averageactivitydata.txt",row.names=FALSE)
|
e0360d0827b675e65e4908096680747a0951a314
|
6d65a534673543684f0a97740e7e7b831f50ea47
|
/inst/scripts/hh2/yatesppl.ex.R
|
0d5b9fb7a315d6338680bd127afd8af0fa6d4caf
|
[] |
no_license
|
cran/HH
|
91151d240d6ecc1334fd79f1b0dfbbc28ca68df6
|
a6ee768cedcebd4477bb9a5b4d0baa3d16e4dca0
|
refs/heads/master
| 2022-09-01T20:50:52.605306
| 2022-08-09T15:10:07
| 2022-08-09T15:10:07
| 17,691,800
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,968
|
r
|
yatesppl.ex.R
|
### a. The whole plot column space is defined by the
### plots %in% blocks
### dummy variables generated by the
## alternate residuals formula: orthogonal contrasts are critical
data(yatesppl)
yatesppl.resida.aov <- aov(y ~ blocks/plots,
data=yatesppl, x=TRUE,
contrasts=list(blocks=contr.helmert,
plots=contr.helmert))
summary(yatesppl.resida.aov)
t(yatesppl.resida.aov$x)
###
### b. This is the same column space defined by the
### variety + blocks:variety
### dummy variables generated by the
## computational shortcut
yatesppl.short.aov <-
aov(terms(y ~ blocks + variety + blocks*variety +
nitrogen + variety*nitrogen,
keep.order=TRUE), ## try it without keep.order=TRUE
data=yatesppl, x=TRUE)
summary(yatesppl.short.aov)
t(yatesppl.short.aov$x)
###
### c. We illustrate this by regressing the response variable y on
### the variety + blocks:variety dummy variables
## project y onto blocks/plots dummy variables
plots.aov <- lm(y ~ yatesppl.resida.aov$x[,7:18], data=yatesppl)
summary.aov(plots.aov)
y.bp <- predict(plots.aov)
variety.aov <- aov(y.bp ~ blocks*variety, data=yatesppl)
summary(variety.aov)
### and seeing that we reproduce the plots %in% blocks
### stratum of the ANOVA table
### Error: plots %in% blocks
### Df Sum of Sq Mean Sq F Value Pr(F)
### variety 2 1786.361 893.1806 1.48534 0.2723869
### Residuals 10 6013.306 601.3306
### obtained from the complete five-factor specification.
###
## split plot analysis
yatesppl.anova <- aov(y ~ variety*nitrogen +
Error(blocks/plots/subplots),
data=yatesppl)
summary(yatesppl.anova)
###
|
6607107b8a5024f924858c3333fdeae79a472a11
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fromo/examples/runningmean.Rd.R
|
76a1cd14e42938934a97ef4bdd542ed62b663140
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 221
|
r
|
runningmean.Rd.R
|
library(fromo)
### Name: running_sum
### Title: Compute sums or means over a sliding window.
### Aliases: running_sum running_mean
### ** Examples
x <- rnorm(1e5)
xs <- running_sum(x,10)
xm <- running_mean(x,100)
|
8ed1d37c4e8dbefe5949fe863b50a68f1d188079
|
8aeeec08825e1b66a3a267a410e1655c06dace7b
|
/project_selection/180216_boxplots_comment_and_pullrequest_count.R
|
2fa03b7dc87c36e3c74bd13fc7249acf808db6a0
|
[] |
no_license
|
maxthemillion/gitNetAnalyzer
|
80736502f70efb9b76befeb3c7dfcf58acec0f3f
|
87d2c671dc37401fc8e097946aea0f6aaf6af333
|
refs/heads/master
| 2021-05-09T13:13:06.388117
| 2018-04-27T10:33:51
| 2018-04-27T10:33:51
| 119,028,440
| 0
| 0
| null | 2018-04-26T09:28:03
| 2018-01-26T08:50:51
|
R
|
UTF-8
|
R
| false
| false
| 3,997
|
r
|
180216_boxplots_comment_and_pullrequest_count.R
|
import_path = "Import/180216_no_comments_and_pull_requests_per_owner.csv"
df1 = read.csv(import_path, header=TRUE, sep=",", stringsAsFactors = TRUE)
import_path = "Import/180216_count_commits_per_owner.csv"
df2 = read.csv(import_path, header=TRUE, sep=",", stringsAsFactors = TRUE)
df1$comment_count_log = log(df1$comment_count)
df1$pull_request_count_log = log(df1$pull_request_count)
df2$commit_count_log = log(df2$commit_count)
r_comment_count <- hist(df1$comment_count_log)
r_pull_request_count <- hist(df1$pull_request_count_log)
r_commit_count <- hist(df2$commit_count_log)
logplot <- function(r, title, xlab){
plot(r$breaks[-1],
r$counts,
type='l',
log = 'y',
main = title,
xlab = xlab,
ylab = "log(frequency)")
}
logplot(r=r_comment_count,
title="log-log plot of no comments to frequency per owner
(considers all comments between 2014-01-01 and 2017-07-31)",
xlab = "log(count comments per owner)")
logplot(r_pull_request_count,
title = "log-log plot of no pull_requests to frequency per owner
(considers all pull_requtests between 2014-01-01 and 2017-07-31)",
xlab = "log(count pull requests per owner)")
logplot(r_commit_count,
title = "log-log plot of no commits to frequency per owner
(considers all commits between 2014-01-01 and 2017-07-31)",
xlab = "log(count commits per owner)")
quantile(df1$comment_count, prob = c(0.8, 0.9, 0.98, 0.985, 0.99, 0.999))
quantile(df1$pull_request_count, prob = c(0.8, 0.9, 0.98, 0.985, 0.99, 0.999))
quantile(df2$commit_count, prob = c(0.8, 0.9, 0.98, 0.985, 0.99, 0.999))
# hist(df$count)
summary(df1)
summary(df2)
boxplot_sample <- function(data, c_name, sample_factor){
df_sample <- data.frame(na.omit(data[, c_name]))
sample_size = round(nrow(df_sample)*sample_factor)
df_sample <- df_sample[sample(nrow(df_sample), sample_size), ]
boxplot(df_sample,
ylab = c_name,
sub = paste(
"population: ", toString(nrow(data)),
"\nsample size: ", toString(sample_size),
" (", toString(sample_factor*100),"%)" ),
log = "y")
}
boxplot_sample(df1, "comment_count", 0.002)
boxplot_sample(df1, "pull_request_count", 0.002)
boxplot_sample(df2, "commit_count", 0.001)
df3 = merge(x = df1,
y = df2,
by = "owner_id",
all = TRUE)
############# filters
lim_commit_count = 500
lim_pull_request_count = 100
lim_comment_count = 2000
df4 <- select(filter(df3, comment_count >= lim_comment_count), c(owner_id, comment_count, pull_request_count, commit_count))
df5 <- select(filter(df4, (pull_request_count >= lim_pull_request_count | commit_count >= lim_commit_count)),
c(owner_id, comment_count, pull_request_count, commit_count))
boxplot(df5[,-1], log='y',
ylab = 'count (log-transformed)',
main = 'Distribution of the remaining owner data after applying selection criteria',
sub = paste(
'remaining number of owners: ', toString(nrow(df5)),
'(', toString(round(nrow(df5)/nrow(df3)*100, digits = 2)), '% of original data)',
'\ncriteria: ',
'no commits >= ', toString(lim_commit_count), ' OR',
' no pull requests >= ', toString(lim_pull_request_count), 'AND',
' no comments >= ', toString(lim_comment_count))
)
df3_log = data.frame(commit_count = log(df3$commit_count),
pull_request_count = log(df3$pull_request_count),
comment_count = log(df3$comment_count))
plot(df3_log)
r <- hist(na.omit(df3_log$commit_count))
plot(r$breaks[-1],
r$counts,
type='l',
main = "Number of commits to owners on a log scale to frequency",
sub = "considers data between 2014-01-01 and 2017-07-31",
xlab = "log(count commits per owner)",
ylab = "frequency")
abline(v=log(lim_commit_count), lty = 2, col = 'blue')
|
b128ff5ebfafe36e1966c7bced7c2aec010c4e13
|
44039e6fa92f04c587839174ce79797cf4ca575d
|
/tests/testthat/test_lengthen_hydrographs.R
|
98415247813cb5c298badc1efc60f21bcbf3053e
|
[
"CC0-1.0"
] |
permissive
|
mpdougherty/razviz
|
58ee97162d9c0360a9b50060f83131a7508cce3d
|
a4edd0b9fe89707ba29c1bb4d767d8011bc5c846
|
refs/heads/master
| 2023-04-04T20:11:30.106910
| 2021-03-26T19:22:08
| 2021-03-26T19:22:08
| 273,563,278
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,299
|
r
|
test_lengthen_hydrographs.R
|
context("lengthen hydrographs")
library(razviz)
# Event 2008, Calibration #9
folder <- system.file("extdata/hydrographs/2008_calibration_9",
package = "razviz")
event <- "2008"
run_number <- 9
run_type <- "Calibration"
cal_2008 <- razviz::import_csv_manualrasoutput(folder = folder,
event = event,
run_number = run_number,
run_type = run_type)
# Event 2013, Calibration #9
folder <- system.file("extdata/hydrographs/2013_calibration_9",
package = "razviz")
event <- "2013"
run_number <- 9
run_type <- "Calibration"
cal_2013 <- razviz::import_csv_manualrasoutput(folder = folder,
event = event,
run_number = run_number,
run_type = run_type)
# Event 2014, Calibration #9
folder <- system.file("extdata/hydrographs/2014_calibration_9",
package = "razviz")
event <- "2014"
run_number <- 9
run_type <- "Calibration"
cal_2014 <- razviz::import_csv_manualrasoutput(folder = folder,
event = event,
run_number = run_number,
run_type = run_type)
# Event 2017, Calibration #9
folder <- system.file("extdata/hydrographs/2017_calibration_9",
package = "razviz")
event <- "2017"
run_number <- 9
run_type <- "Calibration"
cal_2017 <- razviz::import_csv_manualrasoutput(folder = folder,
event = event,
run_number = run_number,
run_type = run_type)
# Combine hydrograph events
hydrograph_list <- list(cal_2008, cal_2013, cal_2014, cal_2017)
cal_wide <- razviz::combine_hydrographs(hydrograph_list)
# Convert to long format suitable for plotting
cal <- razviz::lengthen_hydrographs(cal_wide)
hydrograph_variables <- c("WS_Elev", "Model_Q","Obs_WS", "Obs_Q")
test_that("lengthen hydrographs", {
expect_true(is.data.frame(cal))
expect_true(length(unique(cal$Type)) == length(hydrograph_variables))
})
|
309067bd4902d7d52ba4febde5e8ebdef9f623ff
|
7c45a36bfcd825044b743023e9f522ef17f32198
|
/man/format_sec.Rd
|
1c5509a1a8ebaed4a26ce1644fca471ef3ba3f43
|
[
"MIT"
] |
permissive
|
andybega/demspaces
|
bffe324c24fa4919a7ca2e958aa2a72185e2be92
|
111544b8a278969067027cf47128719495d768f0
|
refs/heads/master
| 2021-06-20T08:22:24.165205
| 2021-04-15T07:33:56
| 2021-04-15T07:33:56
| 205,905,499
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 286
|
rd
|
format_sec.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{format_sec}
\alias{format_sec}
\title{Format seconds}
\usage{
format_sec(x)
}
\arguments{
\item{x}{a numeric vector}
}
\description{
Format seconds into hh:mm:ss
}
\examples{
format_sec(1683)
}
|
0e7e7fcdba554b6c1f7b70a8edb693403bf26b19
|
494154618993f6a3264dcf35de48416efdb63197
|
/man/Rhabdo1.Rd
|
57e52741906e3b34f6df97181d56ecdd148b0a0a
|
[] |
no_license
|
collinn/sassyR
|
c90178613843fd22adebc725edc268aedf3d2865
|
2c9bd0a86a3f09d126726fb0997619c862673790
|
refs/heads/master
| 2021-01-15T01:08:54.037104
| 2020-04-29T15:03:01
| 2020-04-29T15:03:01
| 242,824,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 489
|
rd
|
Rhabdo1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rhabdo1_data.R
\docType{data}
\name{Rhabdo1}
\alias{Rhabdo1}
\title{Hawkeye Rhabdo Football Study}
\format{
An object of class \code{"data.table"}
}
\usage{
data(Rhabdo1)
}
\description{
Rhabdo dataset including None, 1_to_3 and 4_to_5
}
\examples{
data(Rhabdo1)
print(Rhabdo1)
tabs <- proc_freq(Rhabdo1, "Freq", "Shakes*Rhabdo / ChiSq CellChi2 Exact")
}
\references{
BIOS:7410 class notes
}
\keyword{datasets}
|
805e82ec36d93ea171d958b73c2a812a6f592aa2
|
437ea30837d0068b8bca815f500396f30cd2ff74
|
/man/pair.compare.Rd
|
314765a2a9485f8e47331dfa6c44da218661c6c3
|
[] |
no_license
|
hummelma/GlobalAncova
|
0d51390638a353a3d2732a962f05b9a2a73606fc
|
f2512c80850a0b0ebb6d5ee53c6ed9228b85b74c
|
refs/heads/master
| 2021-06-15T19:42:03.969139
| 2021-01-31T09:39:31
| 2021-01-31T09:39:31
| 123,418,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,176
|
rd
|
pair.compare.Rd
|
\name{pair.compare}
\alias{pair.compare}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Pairwise comparisons of factor levels within GlobalAncova}
\description{Pairwise comparisons of gene expression in different levels of a factor by GlobalAncova tests.
The method uses the reduction in residual sum of squares obtained when two respective factor levels are set to the same level.
Holm-adjusted permutation-based p-values are given.
}
\usage{
pair.compare(xx, formula, group, model.dat = NULL, test.genes = NULL, perm = 10000)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{xx}{Matrix of gene expression data, where columns correspond to samples
and rows to genes. The data should be properly normalized beforehand
(and log- or otherwise transformed). Missing values are not allowed.
Gene and sample names can be included as the row and column
names of \code{xx}.}
\item{formula}{Model formula for the linear model.}
\item{group}{Factor for which pairwise comparisons shall be calculated.}
\item{model.dat}{Data frame that contains all the variable information for each sample.}
\item{test.genes}{Vector of gene names or a list where each element is a vector of gene names.}
\item{perm}{Number of permutations to be used for the permutation approach. The default is 10,000.}
}
\value{
An ANOVA table, or list of ANOVA tables for each gene set, for the pairwise comparisons.
}
%\references{!!!}
\author{Ramona Scheufele \email{ramona.scheufele@charite.de} \cr
Reinhard Meister \email{meister@tfh-berlin.de}\cr
Manuela Hummel \email{m.hummel@dkfz.de} \cr
Urlich Mansmann \email{mansmann@ibe.med.uni-muenchen.de}}
\note{This work was supported by the NGFN project 01 GR 0459, BMBF, Germany.}
\seealso{\code{\link{GlobalAncova}}, \code{\link{GlobalAncova.decomp}}}
\examples{
data(vantVeer)
data(phenodata)
data(pathways)
pair.compare(xx = vantVeer, formula = ~ grade, group = "grade", model.dat = phenodata, test.genes = pathways[1:3], perm = 100)
}
\keyword{ models }% at least one, from doc/KEYWORDS
|
ab328afdfc478fbc47c4ba0078aca83ea6bfc9a4
|
599c3ee6ad8f20e3bf98374aac4946e7f611e225
|
/clusterassessment.r
|
1e981764d308c40c652920e3a4c404d67de162ff
|
[] |
no_license
|
MatsushitaT/Codes
|
2842c806309189c788c5c5beb589954a30d6fa09
|
e5064fd1e606dc113cb4f7fc43bbb920e49843cb
|
refs/heads/master
| 2020-04-06T12:12:15.759495
| 2012-09-07T16:22:07
| 2012-09-07T16:22:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,370
|
r
|
clusterassessment.r
|
rm(list=ls())
setwd("~/Analysis/Cortex_thickness/")
source("~/Analysis/Cortex_thickness/Codes/cor.test.2.r")
##### READ DATA
## read table
left <- read.table("cortical-thickness-left-hemi.txt",
sep="\t",header=T,quote="", comment.char="",as.is=T)
## name first four columns
names(left)[1:4] <- c("ID","date","msID1","msID2")
## make EPIC ID
left$ID <- paste("EPIC",left$ID, sep="")
## clean up date
left$date <- gsub("'","",left$date)
left$date <- gsub("=","/",left$date)
## clean up msID1
left$msID1 <- gsub("'","",left$msID1)
left$msID1 <- gsub(" ","",left$msID1)
left$msID1 <- gsub("/","_",left$msID1)
## msID1 and msID2 should match
summary(paste(left$msID1,"t1v",sep="_") == left$msID2) ## must be all true
## read table
right <- read.table("cortical-thickness-right-hemi.txt",
sep="\t",header=T,quote="", comment.char="",as.is=T)
## name first four columns
names(right)[1:4] <- c("ID","date","msID1","msID2")
## make EPIC ID
right$ID <- paste("EPIC",right$ID, sep="")
## clean up date
right$date <- gsub("'","",right$date)
right$date <- gsub("=","/",right$date)
## clean up msID1
right$msID1 <- gsub("'","",right$msID1)
right$msID1 <- gsub(" ","",right$msID1)
right$msID1 <- gsub("/","_",right$msID1)
## msID1 and msID2 should match
summary(paste(right$msID1,"t1v",sep="_") == right$msID2) ## must be all true
## join tables
summary(left$msID1 == right$msID1) ## must be all true
summary(left$msID2 == right$msID2) ## must be all true
summary(left$ID == right$ID) ## must be all true
cortical.thickness <- cbind(left[,-1:-4],right[-1:-4])
rownames(cortical.thickness) <- left$ID
colnames(cortical.thickness) <- gsub("_thickness","",colnames(cortical.thickness))
## remove non-joined data
rm(left,right)
cortical.thickness[cortical.thickness==0] <- NA
cort3sd <- cortical.thickness
cort3sd <- apply(cort3sd,2,function(x){
m <- mean(x,na.rm=T)
sd <- sd(x,na.rm=T)
pmin(pmax(x,m-3*sd),m+3*sd)})
## missing rate in each individual
(ind <- apply(cortical.thickness, 1, function(x)sum(is.na(x))/length(x)))
(ind2 <- apply(cort3sd, 1, function(x)sum(is.na(x))/length(x)))
## missing rate in each cortex
(cereb <- apply(cortical.thickness, 2, function(x)sum(is.na(x))/length(x)))
(cereb2 <- apply(cort3sd, 2, function(x)sum(is.na(x))/length(x)))
cortical.thickness <- cortical.thickness[ind<0.05,cereb<0.05]
cort3sd <- cort3sd[ind2<0.05,cereb2<0.05]
tmp <- cortical.thickness[, c(rbind(1:34, 1:34+34))]
##make distance matrix by correlation (pearson)
cormat <- as.dist(1-cor(tmp,use='pair'))
##make distance matrix by correlation (spearman)
corspear <- as.dist(1-cor(tmp,use='pair', method="spearman"))
##make distance matrix by Euclidean method
eucmat <- dist(t(cortical.thickness))
##make distance matrix by Manhattan method
manmat <- dist(t(cortical.thickness), method="manhattan")
methods <- c("ward", "single", "complete", "average", "mcquitty", "median", "centroid")
## plot dendrogram by each method using distance information based on the correlation coefficient (pearson)
pdf("clustering_by_pearson.pdf")
par(ps=8)
for(m in methods)plot(hclust(cormat,method=m),main=paste(m,"Pearson", sep=":"))
dev.off()
## plot dendrogram by each method using distance information based on the correlation coefficient (spearman)
pdf("clustering_by_spearman.pdf")
par(ps=8)
for(m in methods)plot(hclust(corspear,method=m),main=paste(m,"Spearman", sep=":"))
dev.off()
## plot dendrogram by each method using distance information by Euclidean method
pdf("clustering_by_Euc.pdf")
par(ps=8)
for(m in methods)plot(hclust(eucmat,method=m),main=paste(m,"Euclidean", sep=":"))
dev.off()
## plot dendrogram by each method using distance information by Manhattan method
pdf("clustering_by_Manhattan.pdf")
par(ps=8)
for(m in methods)plot(hclust(manmat, method=m),main=paste(m,"Manhattan", sep=":"))
dev.off()
## Brain regions included in each cluster (3-34) by Ward method
## based on the information of the correlation coefficient (pearson), and
## frequency of combination of regions showing r>0.5 and p<0.05 in each cluster.
for(i in 3:34){
Cor.group <- cutree(hclust(cormat, method="ward"),i)
cat("\n","Divided by ", i, "groups","\n\n",append=T,file="Grouped_by_pearson.txt")
rpfull <- 0; total<-0
for(numg in 1:i){
cat("Group",numg, "\n", append=T, file="Grouped_by_pearson.txt")
sink("Grouped_by_pearson.txt", append=T)
print(names(Cor.group[Cor.group==numg]),quote=F)
sink()
if(sum(Cor.group==numg)<2){next}
cts <- cor.test.2(cortical.thickness[names(which(Cor.group==numg))],"pearson")
rplist <- rbind(r=sapply(cts, "[[" , "estimate"), p.value=sapply(cts, "[[" , "p.value"))
rpfull <- rpfull+ncol(as.data.frame(rplist[,rplist[1,]>0.5&rplist[2,]<0.05]))
total <- total+ncol(rplist)
cat("\n", "Combinations with r > 0.5 and p <0.05: ", ncol(as.data.frame(rplist[,rplist[1,]>0.5&rplist[2,]<0.05])),
"in ", ncol(rplist), "\n\n", append = T, file="Grouped_by_pearson.txt")}
cat("\n","Total: ",rpfull, "in", total, "\n",append = T, file = "Grouped_by_pearson.txt")}
## Brain regions included in each cluster (3-34) by Ward method
## based on the information of the correlation coefficient (spearman), and
## frequency of combination of regions showing r>0.5 and p<0.05 in each cluster.
for(i in 3:34){
Corspear.group <- cutree(hclust(corspear, method="ward"),i)
cat("\n","Divided by ", i, "groups","\n\n",append=T,file="Grouped_by_spearman.txt")
rpfull <- 0; total<-0
for(numg in 1:i){
cat("Group",numg, "\n", append=T, file="Grouped_by_spearman.txt")
sink("Grouped_by_spearman.txt", append=T)
print(names(Corspear.group[Corspear.group==numg]),quote=F)
sink()
if(sum(Corspear.group==numg)<2){next}
cts <- cor.test.2(cortical.thickness[names(which(Corspear.group==numg))],"spearman")
rplist <- rbind(r=sapply(cts, "[[" , "estimate"), p.value=sapply(cts, "[[" , "p.value"))
rpfull <- rpfull+ncol(as.data.frame(rplist[,rplist[1,]>0.5&rplist[2,]<0.05]))
total <- total+ncol(rplist)
rplist <- rbind(r=sapply(cts, "[[" , "estimate"), p.value=sapply(cts, "[[" , "p.value"))
cat("\n", "Combinations with r > 0.5 and p <0.05: ", ncol(as.data.frame(rplist[,rplist[1,]>0.5&rplist[2,]<0.05])),
"in ", ncol(rplist), "\n\n", append = T, file="Grouped_by_spearman.txt")}
cat("\n","Total: ",rpfull, "in", total, "\n",append = T, file = "Grouped_by_spearman.txt")}
## Brain regions included in each cluster (3-9) by Ward method
## based on the Euclidean distance, and
## frequency of samples showing p<0.05 diffrence between each cluster
for(i in 3:9){
Euc.group <- cutree(hclust(eucmat, method="ward"),i)
oneway <- lapply(as.data.frame(t(cortical.thickness)),function(x)oneway.test(x~Euc.group))
nump <- sum(sapply(oneway, function(x)x$p.value<0.05))
cat("\n","Divided by ", i, "groups","\n\n",append=T,file="Grouped_by_Euc.txt")
for(numg in 1:i){
cat("Group",numg, "\n", append=T, file="Grouped_by_Euc.txt")
sink("Grouped_by_Euc.txt", append=T)
print(names(Euc.group[Euc.group==numg]),quote=F)
sink()}
cat("\n","p<0.05: ", nump, "in", nrow(cortical.thickness), "\n\n", append=T, file="Grouped_by_Euc.txt")}
## Brain regions included in each cluster (3-9) by Kmeans method
## and frequency of samples showing p<0.05 diffrence between each cluster
for(i in 3:9){
ans <- kmeans(t(cortical.thickness),i, nstart=10000)
oneway <- lapply(as.data.frame(t(cortical.thickness)),function(x)oneway.test(x~ans[[1]]))
nump <- sum(sapply(oneway, function(x)x$p.value<0.05))
cat("\n","Divided by ", i, "groups","\n\n",append=T,file="Grouped_by_Kmeans.txt")
for(numg in 1:i){
cat("Group",numg, "\n", append=T, file="Grouped_by_Kmeans.txt")
sink("Grouped_by_Kmeans.txt", append=T)
print(names(ans[[1]][ans[[1]]==numg]),quote=F)
sink()}
cat("\n","p<0.05: ", nump, "in", nrow(cortical.thickness), "\n\n", append=T, file="Grouped_by_Kmeans.txt")}
#cor.test.2(cortical.thickness[c("lh_caudalanteriorcingulate",
# "lh_caudalmiddlefrontal",
# "rh_bankssts",
# "lh_parstriangularis")])
#which(Euc.group==8)
#names(which(Euc.group==8))
#cts <- cor.test.2(cortical.thickness[names(which(Euc.group==8))])
#sapply(cts, "[[" , "estimate")
#sapply(cts, "[[" , "p.value")
|
4600ee22ea5013adea479d1e8bb25fa57f0deb9b
|
a50ef8150427fd6490c28f0d5ca00eb5b6da0ba9
|
/Clase 02/multiple_regression_PREDICT.R
|
1384a35848f54a5353df99ef120c0a06f26e207d
|
[] |
no_license
|
isajar/CursoAlgoTrading
|
e86271173d9020ad928818bde7ae928aac227b36
|
ca8dcfe935f215732440ed2f97ac9c5f99d99728
|
refs/heads/master
| 2023-02-26T20:27:25.549880
| 2021-02-08T20:16:00
| 2021-02-08T20:16:00
| 337,196,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,284
|
r
|
multiple_regression_PREDICT.R
|
## Cargo el set de datos.
## Chequear el PATH donde se encuentra guardado el de set de datos de entrenamiento.
## seteo directorio del proyecto
setwd("/Users/isajarspector/Desktop/algoTrading/Clase 02")
#header true siempre hay que poner cuando tenga header los datos (para obviar la primera fila)
training <- read.csv("data/Regresion_Multiple_training.csv",header=T,sep=',')
head(training)
## Solo considero las columnas de la 2 a la 6
## siempre sacar columna ID
## Quito la primera variable (PERIODO), ya que habiamos determinado que no aportaba ningun tipo de informacion
data_training <- training[,2:6]
head(data_training)
## Creo el modelo de Regresion Lineal
## ENROL es la variable dependiente, y para no escribir una por una las variables independientes, se usa el "." (punto)
training_lm <- lm(data_training$ENROL ~ ., data=data_training)
summary(training_lm)
training_lm
coef(training_lm)
## ver que descarto la columna RAND en el summary con los *** y la prueba de hipotesis
## Obtenemos el Siguiente Modelo:
## -9263.1450 + 502.94*DESEMPLEO + 0.4574*GRADUADOS + 3.8411*INGRESOS
## En nuestro testing set: DESEMPELO=7, GRADUADOS=16816, INGRESOS=3345
## Por lo cual la prediccion de ENROL es: 14797
## Vamos a importar el set de datos de training
testing <- read.csv("data/Regresion_Multiple_testing.csv",header=T,sep=',')
head(testing)
## Quito nuevamente la columna PERIODO, y tambien quito la columna ENROL, ya que esta justamente es la que deseo PREDECIR)
testing_data <- subset( testing, select = c(3:6) )
# otras formas de sacar columnas
## testing_data <- testing[,c(3,4,6)]
testing_data <- subset( testing, select = -c(1) )
head(testing_data)
## Uso el poderoso comando "predict" para realizar tal prediccion, usando el modelo generado "training_lm" y el set de datos
## que deseo predecir "testing_data"
predict(training_lm, testing_data)
## La prediccion que arroja es de: "14798"
## El valor verdadero (lo conozco!) fue de: "16081"
## Puedo ver el intervalo de confianza del 95%
predict(training_lm, testing_data, interval="predict")
## El resultado es el siguiente:
## fit lwr upr
## 1 14797.98 13246.99 16348.97
## Esto se lee: La prediccion es de: 14797, y el valor va a estar entre 13.247 y 16349 con una confianza del 95%
|
0bd1429ec1d20127d2e8cbe85d9b946ad5faf5e2
|
ffb160e58424d271c0cc32bd5c20e38a81cec151
|
/time_series/calculate_no_nc_seasonality_strengths.R
|
bbd5286ac84552ffcfce99ba0d849aa077e23e14
|
[] |
no_license
|
wilkox/diurnal_variation
|
4bbd56cbfa5e1118f58df2df4f1c0ece85088445
|
e84e7582c8ce4d29ac3275e44cc4d3f6037c7af7
|
refs/heads/master
| 2023-01-11T14:14:27.408295
| 2020-11-19T08:03:43
| 2020-11-19T08:03:43
| 314,176,883
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,011
|
r
|
calculate_no_nc_seasonality_strengths.R
|
# Libraries
library(tidyverse)
library(furrr)
plan(multisession)
# Function to decompose a time series of species abundances
decompose_abundances <- function(x) {
x %>%
select(timepoint, abundance) %>%
mutate_at("timepoint", ~ factor(., levels = min(.):max(.))) %>%
complete(timepoint, fill = list(abundance = NA)) %>%
arrange(timepoint) %>%
pull(abundance) %>%
ts(frequency = 2) %>%
imputeTS::na_seadec() %>%
decompose("additive") %>%
.[1:4] %>%
map(as.double) %>%
as_tibble() %>%
mutate(timepoint = 1:n()) %>%
select(timepoint, observed = x, everything())
}
# Function to calculate seasonality strength
seasonality_strength <- function(time_series) {
time_series <- slice(time_series, -1, -nrow(time_series))
ss <- 1 - (sd(time_series$random) / sd(time_series$random + time_series$seasonal))
ss <- replace_na(ss, 0)
max(c(0, ss))
}
# Function to calculate permuted seasonality strength
permuted_seasonality_strengths <- function(x, permutations = 999) {
# Generate random permutations of abundance
tibble(
permutation = 1:permutations,
abundances = rep(list(select(x, timepoint, abundance)), permutations)
) %>%
mutate(abundances = map(abundances,
~ mutate(.x, abundance = sample(abundance)))) %>%
mutate(ts = map(abundances, decompose_abundances)) %>%
mutate(ss = map_dbl(ts, seasonality_strength)) %>%
pull(ss)
}
# Load list of species that appear in negative control samples
nc_species <- "../abundances/control_species_abundances.tsv" %>%
read_tsv() %>%
filter(abundance > 0) %>%
pull(species) %>%
unique()
# Load abundances, remove the negative control species, and renormalise
# remaining species abundances
abundances <- read_tsv("../abundances/species_abundances_decontaminated.tsv") %>%
select(sample, site, location, timepoint, species, abundance) %>%
filter(! species %in% nc_species) %>%
group_by(sample, site, location, timepoint) %>%
mutate(abundance = 100 * abundance / sum(abundance)) %>%
ungroup()
# Run time series decompositions and seasonality strength calculations
seasonality_strengths <- abundances %>%
select(site, location, timepoint, species, abundance) %>%
add_count(site, location, species, wt = abundance, name = "total_abundance") %>%
filter(total_abundance > 0) %>%
select(-total_abundance) %>%
nest(abundances = c(timepoint, abundance)) %>%
mutate(ts = map(abundances, decompose_abundances)) %>%
mutate(observed_ss = map_dbl(ts, seasonality_strength)) %>%
select(-ts) %>%
mutate(permuted_ss = future_map(abundances, permuted_seasonality_strengths,
.progress = TRUE)) %>%
select(-abundances)
# Calculate p values
seasonality_strengths <- seasonality_strengths %>%
mutate(p = map2_dbl(observed_ss, permuted_ss, ~ sum(.y >= .x) / length(.y))) %>%
select(-permuted_ss) %>%
rename(seasonality_strength = observed_ss)
# Write to file
write_tsv(seasonality_strengths, "no_nc_seasonality_strengths.tsv")
|
e50406114e9de5b2cf5e782b2dfabc8846bb7de4
|
79b935ef556d5b9748b69690275d929503a90cf6
|
/man/Jinhom.Rd
|
e67eb3231113c6cbbed2d6a7f0bb31917bcffbd1
|
[] |
no_license
|
spatstat/spatstat.core
|
d0b94ed4f86a10fb0c9893b2d6d497183ece5708
|
6c80ceb9572d03f9046bc95c02d0ad53b6ff7f70
|
refs/heads/master
| 2022-06-26T21:58:46.194519
| 2022-05-24T05:37:16
| 2022-05-24T05:37:16
| 77,811,657
| 6
| 10
| null | 2022-03-09T02:53:21
| 2017-01-02T04:54:22
|
R
|
UTF-8
|
R
| false
| false
| 6,986
|
rd
|
Jinhom.Rd
|
\name{Jinhom}
\alias{Jinhom}
\title{
Inhomogeneous J-function
}
\description{
Estimates the inhomogeneous \eqn{J} function of
a non-stationary point pattern.
}
\usage{
Jinhom(X, lambda = NULL, lmin = NULL, ...,
sigma = NULL, varcov = NULL,
r = NULL, breaks = NULL, ratio=FALSE,
update = TRUE, warn.bias=TRUE, savelambda=FALSE)
}
\arguments{
\item{X}{
The observed data point pattern,
from which an estimate of the inhomogeneous \eqn{J} function
will be computed.
An object of class \code{"ppp"}
or in a format recognised by \code{\link{as.ppp}()}
}
\item{lambda}{
Optional.
Values of the estimated intensity function.
Either a vector giving the intensity values
at the points of the pattern \code{X},
a pixel image (object of class \code{"im"}) giving the
intensity values at all locations, a fitted point process model
(object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which
can be evaluated to give the intensity value at any location.
}
\item{lmin}{
Optional. The minimum possible value of the intensity
over the spatial domain. A positive numerical value.
}
\item{sigma,varcov}{
Optional arguments passed to \code{\link{density.ppp}}
to control the smoothing bandwidth, when \code{lambda} is
estimated by kernel smoothing.
}
\item{\dots}{
Extra arguments passed to \code{\link{as.mask}} to control
the pixel resolution, or passed to \code{\link{density.ppp}}
to control the smoothing bandwidth.
}
\item{r}{
vector of values for the argument \eqn{r} at which
the inhomogeneous \eqn{K} function
should be evaluated. Not normally given by the user;
there is a sensible default.
}
\item{breaks}{
This argument is for internal use only.
}
\item{ratio}{
Logical.
If \code{TRUE}, the numerator and denominator of
the estimate will also be saved,
for use in analysing replicated point patterns.
}
\item{update}{
Logical. If \code{lambda} is a fitted model
(class \code{"ppm"} or \code{"kppm"})
and \code{update=TRUE} (the default),
the model will first be refitted to the data \code{X}
(using \code{\link{update.ppm}} or \code{\link{update.kppm}})
before the fitted intensity is computed.
If \code{update=FALSE}, the fitted intensity of the
model will be computed without fitting it to \code{X}.
}
\item{warn.bias}{
Logical value specifying whether to issue a warning
when the inhomogeneity correction factor takes extreme values,
which can often lead to biased results. This usually occurs
when insufficient smoothing is used to estimate the intensity.
}
\item{savelambda}{
Logical value specifying whether to save the values of
\code{lmin} and \code{lambda} as attributes of the result.
}
}
\details{
This command computes estimates of the
inhomogeneous \eqn{J}-function (Van Lieshout, 2010)
of a point pattern. It is the counterpart, for inhomogeneous
spatial point patterns, of the \eqn{J} function
for homogeneous point patterns computed by \code{\link{Jest}}.
The argument \code{X} should be a point pattern
(object of class \code{"ppp"}).
The inhomogeneous \eqn{J} function is computed as
\eqn{Jinhom(r) = (1 - Ginhom(r))/(1-Finhom(r))}
where \eqn{Ginhom, Finhom} are the inhomogeneous \eqn{G} and \eqn{F}
functions computed using the border correction
(equations (7) and (6) respectively in Van Lieshout, 2010).
The argument \code{lambda} should supply the
(estimated) values of the intensity function \eqn{\lambda}{lambda}
of the point process. It may be either
\describe{
\item{a numeric vector}{
containing the values
of the intensity function at the points of the pattern \code{X}.
}
\item{a pixel image}{
(object of class \code{"im"})
assumed to contain the values of the intensity function
at all locations in the window.
}
\item{a fitted point process model}{
(object of class \code{"ppm"} or \code{"kppm"})
whose fitted \emph{trend} can be used as the fitted intensity.
(If \code{update=TRUE} the model will first be refitted to the
data \code{X} before the trend is computed.)
}
\item{a function}{
which can be evaluated to give values of the intensity at
any locations.
}
\item{omitted:}{
if \code{lambda} is omitted, then it will be estimated using
a `leave-one-out' kernel smoother.
}
}
If \code{lambda} is a numeric vector, then its length should
be equal to the number of points in the pattern \code{X}.
The value \code{lambda[i]} is assumed to be the
the (estimated) value of the intensity
\eqn{\lambda(x_i)}{lambda(x[i])} for
the point \eqn{x_i}{x[i]} of the pattern \eqn{X}.
Each value must be a positive number; \code{NA}'s are not allowed.
If \code{lambda} is a pixel image, the domain of the image should
cover the entire window of the point pattern. If it does not (which
may occur near the boundary because of discretisation error),
then the missing pixel values
will be obtained by applying a Gaussian blur to \code{lambda} using
\code{\link{blur}}, then looking up the values of this blurred image
for the missing locations.
(A warning will be issued in this case.)
If \code{lambda} is a function, then it will be evaluated in the
form \code{lambda(x,y)} where \code{x} and \code{y} are vectors
of coordinates of the points of \code{X}. It should return a numeric
vector with length equal to the number of points in \code{X}.
If \code{lambda} is omitted, then it will be estimated using
a `leave-one-out' kernel smoother.
The estimate \code{lambda[i]} for the
point \code{X[i]} is computed by removing \code{X[i]} from the
point pattern, applying kernel smoothing to the remaining points using
\code{\link{density.ppp}}, and evaluating the smoothed intensity
at the point \code{X[i]}. The smoothing kernel bandwidth is controlled
by the arguments \code{sigma} and \code{varcov}, which are passed to
\code{\link{density.ppp}} along with any extra arguments.
}
\value{
An object of class \code{"fv"}, see \code{\link{fv.object}},
which can be plotted directly using \code{\link{plot.fv}}.
}
\references{
van Lieshout, M.N.M. and Baddeley, A.J. (1996)
A nonparametric measure of spatial interaction in point patterns.
\emph{Statistica Neerlandica} \bold{50}, 344--361.
van Lieshout, M.N.M. (2010)
A J-function for inhomogeneous point processes.
\emph{Statistica Neerlandica} \bold{65}, 183--201.
}
\seealso{
\code{\link{Ginhom}},
\code{\link{Finhom}},
\code{\link{Jest}}
}
\examples{
# plot(Jinhom(swedishpines, sigma=bw.diggle, adjust=2))
plot(Jinhom(swedishpines, sigma=10))
}
\author{
Original code by Marie-Colette van Lieshout.
C implementation and R adaptation by \adrian
and \ege.
}
\keyword{spatial}
\keyword{nonparametric}
|
b957c0e12de2c6539e167c49b09c38330dc1162b
|
a9d06b27407d3b1f03d1b61d637200eec5d77475
|
/5_PollenTransport.R
|
b5f74db2c1ed7892c91b00cd56900c0bed9dc6da
|
[] |
no_license
|
CallumJMacgregor/Chapter-5
|
ae2dac2f57adedc34f5b70309c178b47995fb3b8
|
dfcd155048b1b028f39c17cba4324a0ccbc5d812
|
refs/heads/master
| 2021-06-05T05:26:20.256272
| 2016-09-29T12:19:40
| 2016-09-29T12:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,947
|
r
|
5_PollenTransport.R
|
########################################################
#### Script for basic pollen transport analysis ####
########################################################
### Clear the workspace
rm(list=ls())
### install if necessary and then load the libraries you need
j <- c("lme4","car","ggplot2","RVAideMemoire","arm","MASS")
new.packages <- j[!(j %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(j, require, character.only = TRUE) # loads up any libraries that aren't already loaded
# for some reason the package glmmADMB won't install via the usual methods, so:
#install.packages("R2admb")
#install.packages("glmmADMB",
# repos=c("http://glmmadmb.r-forge.r-project.org/repos",
# getOption("repos")),
# type="source")
library(glmmADMB)
### load up Callum's custom set of functions
k <- c("CheckResidsFunction.R","CheckConvergenceFunction.R")
lapply(k,source)
### read in the data - this is the .txt file you produced in the PreparingData.R script.
dframe1<-read.table("Data/MatrixNoct.txt", header=TRUE)
summary(dframe1) # Check it's imported correctly
### tell R that SampleID and SlideNumber should be treated as factors
dframe1$SampleID <- factor(dframe1$SampleID)
dframe1$SlideNumber <- factor(dframe1$SlideNumber)
dframe1$Month<-ordered(dframe1$Month, levels=c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"))
dframe1$Year <- factor(dframe1$Year)
summary(dframe1)
names(dframe1)
### total up the pollen grains for each insect
dframe1$PollenLoad <- rowSums(dframe1[c(13:length(dframe1))])
### total up the number of pollen species for each insect (i.e. how many columns do not contain 0)
dframe1$PollenTypes <- rowSums(dframe1[c(13:length(dframe1))] != 0)
### create a binary (yes/no) variable for whether each insect is carrying any pollen
dframe1$PollenYN <- ifelse(dframe1$PollenTypes==0,0,1)
summary(dframe1$PollenYN)
dframe1B <- dframe1[dframe1$Treatment=="Fire",]
dframe1U <- dframe1[dframe1$Treatment=="NoFire",]
summary(dframe1B$PollenYN)
summary(dframe1U$PollenYN)
### create a subset dframe containing only the interactions
interactions <- subset(dframe1, select=-c(SampleID,Date,Site,SlideNumber,PollenCount,Treatment,SamplingDay,Sample,PollenTypes,PollenLoad,PollenYN,Season,Month,Year))
summary(interactions)
### now you're ready to start looking for patterns!
### let's do some exploratory plots first
dframe1Sum <- dframe1[dframe1$Season=="Summer",]
dframe1Spr <- dframe1[dframe1$Season=="Spring",]
dframe1Win <- dframe1[dframe1$Season=="Winter",]
dframe1Aut <- dframe1[dframe1$Season=="Autumn",]
plot(PollenLoad ~ Treatment, dframe1Sum, outline=F)
plot(PollenLoad ~ Treatment, dframe1Spr, outline=F)
plot(PollenLoad ~ Treatment, dframe1Win, outline=F)
plot(PollenLoad ~ Treatment, dframe1Aut, outline=F)
plot(PollenTypes ~ Treatment, dframe1Sum)
plot(PollenTypes ~ Treatment, dframe1Spr)
plot(PollenTypes ~ Treatment, dframe1Win)
plot(PollenTypes ~ Treatment, dframe1Aut)
plot(PollenYN ~ Treatment, dframe1Sum)
plot(PollenYN ~ Treatment, dframe1Spr)
plot(PollenYN ~ Treatment, dframe1Win)
plot(PollenYN ~ Treatment, dframe1Aut)
### Let's first look at pollen load per-moth
### Plot it against treatment so you have an idea of what to expect
plot(PollenLoad ~ Treatment, data = dframe1, outline=F)
plot(PollenLoad ~ Season, data = dframe1, outline=FALSE)
plot(PollenLoad ~ Month, data = dframe1, outline=FALSE)
hist(dframe1$PollenLoad)
hist(log(dframe1$PollenLoad+1,10))
### Data clearly have lots of skew, so it's worth checking for overdispersion
### Simplest test is to compare the mean and variance.
### In a regular Poisson distribution, mean ~= variance; if variance is much larger, data are overdispersed
mean(dframe1$PollenLoad)
var(dframe1$PollenLoad)
### Data appear overdispersed, so we will try two types of model in addition to Poisson: quasi-Poisson, and negative binomial
### Zero-inflated models might be appropriate to try as well
# construct models using Date and Site as random effects
# random effects are factors that might affect the output variable, but not in a way that is interesting to us
# you might see variation between different sampling days, or between different fire or non-fire sites...
# ...but we are only really interested in variation due to Treatment
# Poisson model using lme4
model1P <- glmer(PollenLoad ~ Treatment*Season # fixed effects
+(1|Year) + (1|Site) + (1|Date), # random effects
family = poisson (link = "log"),
data = dframe1)
# inspect and test the model
summary(model1P)
# the best test of a GLMM is a Likelihood Ratio Test, which compares the fit of the model to if each term was removed one at a time using Chi-squared tests
# to run an LRT, use the drop1 function (i.e. drop 1 term at a time), specifying Chi-squared
drop1(model1P, test="Chisq")
# check the model's residuals
# this custom function produces a selection of plots that you can scroll through to check that residuals look ok
# you want residuals that are roughly normally distributed around zero with no obvious trends
chkres(model1P, dframe1$Treatment, dframe1$Season) # these residuals do appear to have a negative trend so they are not ideal; this might be driven by zeroes
# QuasiPoisson model using MASS
model1Q <- glmmPQL(PollenLoad ~ Treatment*Season,
random = list(~1|Year, ~1|Site, ~1|Date),
family = quasipoisson (link = "log"),
data = dframe1)
summary(model1Q)
Anova(model1Q, type="III") # drop1 doesn't work properly with this model class so we use a Type III Anova instead
# check residuals
# this function produces a subset of the previous plots that are available for this model class
chkres.PQL(model1Q, dframe1$Treatment, dframe1$Season) # these are bad
# Zero-inflated Poisson
#model1ZIP <- glmmadmb(PollenLoad ~ Treatment*Season
# + (1|Year) + (1|Site) + (1|Date), #Random effects
# zeroInflation=TRUE,
# family = "poisson",
# data = dframe1)
#summary(model1ZIP)
#Anova(model1ZIP, type="III")
#drop1(model1ZIP, test="Chisq")
# chkres.zi(model1ZIP, dframe1$Treatment, dframe1$Season) # these aren't great either
# negative binomial
model1NB <- glmer.nb(PollenLoad ~ Treatment*Season # fixed effects
+ (1|Year) + (1|Site) + (1|Date), # random effects
data = dframe1)
chkconv(model1NB)
summary(model1NB)
drop1(model1NB, test= "Chisq") # glmer.nb produces the same model class as glmer so we can treat it the same
chkres(model1NB, dframe1$Treatment, dframe1$Season) # these are still worse than just the Poisson
# Gaussian with log transformation
model1G <- lmer(log(PollenLoad+1,10) ~ Treatment*Season
+ (1|Year) + (1|Site) + (1|Date),
data = dframe1)
summary(model1G)
drop1(model1G, test = "Chi")
chkres(model1G, dframe1$Treatment, dframe1$Season) # these are still affected by the zeroes but are probably the most balanced yet
### choose from these candidate error families
### we can see from the residuals that model1G is the best option so we use this result
summary(model1G)
drop1(model1G, test = "Chi")
### Let's now look at pollen types per-moth
### Plot it against treatment so you have an idea of what to expect
plot(PollenTypes ~ Treatment, data = dframe1)
plot(PollenTypes ~ Season, data = dframe1)
hist(dframe1$PollenTypes)
hist(log(dframe1$PollenTypes+1,10))
### Again data clearly have lots of skew, though not so bad, so it's worth checking for overdispersion
### Simplest test is to compare the mean and variance.
### In a regular Poisson distribution, mean ~= variance; if variance is much larger, data are overdispersed
mean(dframe1$PollenTypes)
var(dframe1$PollenTypes)
### Data appear possibly overdispersed, so we will try two types of model in addition to Poisson: quasi-Poisson, and negative binomial
# construct models using Date and Site as random effects
# Poisson model using lme4
model2P <- glmer(PollenTypes ~ Treatment * Season # fixed effects
+ (1|Year) + (1|Site) + (1|Date), # random effects
family = poisson (link = "log"),
data = dframe1)
# inspect and test the model
summary(model2P)
# the best test of a GLMM is a Likelihood Ratio Test, which compares the fit of the model to if each term was removed one at a time using Chi-squared tests
# to run an LRT, use the drop1 function (i.e. drop 1 term at a time), specifying Chi-squared
drop1(model2P, test="Chisq")
# check the model's residuals
chkres(model2P, dframe1$Treatment, dframe1$Season) # these residuals do appear to have a slight positive trend but probably nothing to worry about too much
# QuasiPoisson model using MASS
model2Q <- glmmPQL(PollenTypes ~ Treatment * Season,
random = list(~1|Year, ~1|Site, ~1|Date),
family = quasipoisson (link = "log"),
data = dframe1)
summary(model2Q)
Anova(model2Q, type="III") # drop1 doesn't work properly with this model class so we use a Type III Anova instead
# check residuals
chkres.PQL(model2Q, dframe1$Treatment, dframe1$Season) # these look better in some ways, worse in others - slight negative trend
# Negative binomial model using lme4
model2NB <- glmer.nb(PollenTypes ~ Treatment * Season # fixed effects
+ (1|Year) + (1|Site) + (1|Date), # random effects
data = dframe1)
summary(model2NB)
drop1(model2NB, test= "Chisq") # glmer.nb produces the same model class as glmer so we can treat it the same
chkres(model2NB, dframe1$Treatment, dframe1$Season) # these are very similar to the Poisson residuals
# Gaussian with log transformation
model2G <- lmer(log(PollenTypes+1,10) ~ Treatment * Season
+ (1|Year) + (1|Site) + (1|Date),
data = dframe1)
summary(model2G)
drop1(model2G, test = "Chi")
chkres(model2G, dframe1$Treatment, dframe1$Season) # again, these look to be the most balanced of the lot
### choose from these candidate error families
### the residuals are all reasonable but have minor faults. The only ones without obvious trends are model2G
### finally, let's look at proportion of moths carrying pollen
### Plot it against treatment so you have an idea of what to expect
plot(PollenYN ~ Treatment, data = dframe1)
plot(PollenYN ~ Season, data = dframe1)
hist(dframe1$PollenYN)
### this data is definitely binomial, so we don't need to worry too much about model selection or residuals:
model3B <- glmer(PollenYN~Treatment*Season
+ (1|Year) + (1|Site) + (1|Date),
family = binomial (link = "logit"),
data = dframe1)
summary(model3B)
drop1(model3B, test="Chi")
|
eeda4fb1e6865807501cdcc236699dd0f0013c77
|
5b094ed8f782328eeb84389b0473c26b0bc8c333
|
/plot1.R
|
62544a477ffac40d9ddb555e00216b0f2cd82841
|
[] |
no_license
|
OnlineSquirrel/ExData_Plotting1
|
243e0d61cd41f9c204cdffc3af18fda4dcec9e72
|
94b265ef6bccf9376d7f02bf7108e0a7e005814a
|
refs/heads/master
| 2021-01-15T15:26:46.459491
| 2015-05-09T03:16:47
| 2015-05-09T03:16:47
| 35,194,741
| 0
| 0
| null | 2015-05-07T02:39:28
| 2015-05-07T02:39:28
| null |
UTF-8
|
R
| false
| false
| 916
|
r
|
plot1.R
|
#### plot1.R ####
## This is a script that creates a histogram for Global Active POwer variable in the household_power_consumption.txt data set
## Script loads the household_power_consumption.txt into the data.frame and then, base on the requirements,
## plots the histogram for 2 days only - 2/1/2007 and 2/2/2007
consDF = read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE)
print("... Loaded household_power_consumption.txt ...")
## Filtering 2/1/2007 and 2/1/2007
consDF = consDF[(consDF$Date=="1/2/2007" | consDF$Date=="2/2/2007"),]
png(filename="plot1.png", width=480, height=480, units="px")
print("... Opened PNG graphics device ...")
hist(as.numeric(consDF$Global_active_power), xlab="Global Active Power (in kilowatts)",
main="Global Active Power", col="red")
print("... Histogram created in plot1.png ...")
dev.off()
print("... Graphics device closed ...")
|
c38a6f71c05918b2cbd4806e9b72b1d41498f450
|
26630b20a11c0f7438daeead44290c02cff70e8c
|
/cachematrix.R
|
9500862cf1597e88029780e669e3dd4a784a4aea
|
[] |
no_license
|
IshaniG/ProgrammingAssignment2
|
e20b396bc45fd5261e61a05512b7ea2e74d1d02f
|
c9b9317aa07a79970434429c5cbdf3dba2c8142e
|
refs/heads/master
| 2020-12-29T03:30:46.112310
| 2016-02-25T21:21:43
| 2016-02-25T21:21:43
| 52,556,278
| 0
| 0
| null | 2016-02-25T20:58:05
| 2016-02-25T20:58:04
| null |
UTF-8
|
R
| false
| false
| 1,973
|
r
|
cachematrix.R
|
## makeCacheMatrix takes a square invertible matrix as its arguments and returns a list containing 4 functions to
## 1. set the matrix
## 2. get the matrix
## 3. set the inverse of the matrix
## 4. get the inverse of the matrix
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL # i is a scalar, which has a NULL value at this point
# i will hold the inverse of the square matrix x
set <- function(y) {
x <<- y # '<<-' operator assigns a value to an object in an environment that is different from the current environment
i <<- NULL # set function sets the matrix that is to be inverted
}
get <- function() x # gets the matrix
setinv <- function(solve) i <<- solve # setinv function sets the inverse of the matrix
getinv <- function() i #getinv function gets the inverse of the matrix
list(set = set, get = get,
setinv = setinv,
getinv = getinv )
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv() # Before computing the matrix inversion, it checks for its value in the cache
if(!is.null(i)) {
message("getting cached data")# if the inverse is already available in the cache, returns a message "getting cached data"
return(i) # returns the inverse of the matrix
}
data <- x$get() # If the inverse is not in the cache, this step gets the matrix
i <- solve(data, ...) # Computes the inverse using the Solve function in R
x$setinv(i)
i # returns the inverse of the matrix as its final value
}
|
e2d7e3ab24dd661b39d9cbfbc6b863dc7f57b84b
|
8eb4b0e89610dc7fdcc68463bdc57e5adbf10849
|
/R/vcf_merge.R
|
84857aa28b2c01ef535c8c273cfff9af80288efc
|
[] |
no_license
|
cmcouto-silva/snpsel
|
f0b1fa1675fb10e15329cae874441f241f3a5e15
|
e3898308f5b0b2ae071cefdc111f5334a0319cf7
|
refs/heads/master
| 2023-05-07T22:19:08.444440
| 2021-06-06T15:55:12
| 2021-06-06T15:55:12
| 127,948,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 672
|
r
|
vcf_merge.R
|
#' @title Merge vcf files
#' @description Merge multiple vcf files in a single one
#' @param ... Character vector. Vcf file must be provided as strings (with full or relative paths).
#' @return Data.table/data.frame object with merged vcf files.
#' @examples
#' \dontrun{
#' vcf_files <- gtools::mixedsort(list.files(path = ".", pattern = "\\.vcf$"))
#' vcf_merged <- vcf_merge(vcf_files)
#' }
#' @export
vcf_merge <- function(...) {
vcf_with_header <- data.table::fread(...[1], header = TRUE)
another_vcfs <- lapply(...[-1], data.table::fread, header = FALSE)
vcf_merged <- data.table::rbindlist(l = c(list(vcf_with_header), another_vcfs))
return(vcf_merged)
}
|
0f717f9109b61351eccc2c90f57cbddb74541974
|
d9d213281d875a47089f1a0114919eeceedf4a60
|
/Figure 3/DESeq2.R
|
69d6763ce5ce83c9f9dc60a5013ec4edca6d5d1c
|
[] |
no_license
|
hibberd-lab/Xiong_High-light-response-of-the-rice-bundle-sheath
|
6845c4eac2300e8621cd7cd738cda95b2d91906a
|
221b6b7a9dfd87b67d6325ea77a3efb3db1d8d96
|
refs/heads/main
| 2023-03-31T08:04:41.543999
| 2021-04-15T08:20:10
| 2021-04-15T08:20:10
| 306,381,513
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,802
|
r
|
DESeq2.R
|
library("dplyr")
library("ggplot2")
library(DESeq2)
library(tximport)
library(readr)
library("vsn")
library("pheatmap")
library("RColorBrewer")
library(ComplexHeatmap)
#
# make rice transcript database TxDb file
Os_tx2gene <- read.csv ("Os_tx2gene.csv", header = TRUE)
Os_tx2gene <- Os_tx2gene[,-1]
head (Os_tx2gene)
library(tximport)
library(readr)
files = list.files(pattern="*quant.sf")
files
txi <- tximport(files, type = "salmon", tx2gene = Os_tx2gene)
#make sample table
sampleTable <- data.frame(condition = c(rep("0_BSS",3), rep("0_M", 3)))
rownames(sampleTable) <- c("BSS_0_1","BSS_0_2","BSS_0_3","M_0_1","M_0_2","M_0_3")
sampleTable
# extract raw counts from txi object
rawcounts <- txi$counts
head(rawcounts)
class(rawcounts)
colnames(rawcounts)<- c("BSS_0_1","BSS_0_2","BSS_0_3","M_0_1","M_0_2","M_0_3")
head(rawcounts)
write.csv(rawcounts, file= "rawcounts.csv")
library(DESeq2)
dds0 <- DESeqDataSetFromTximport(txi, sampleTable, ~condition)
head(dds0)
colData(dds0)
head(dds0)
colnames(dds0)<- c("BSS_0_1","BSS_0_2","BSS_0_3","M_0_1","M_0_2","M_0_3")
# prefilter the data
nrow(dds0)
dds0 <- estimateSizeFactors(dds0)
idx <- rowMeans(counts(dds0, normalized=TRUE)) >= 10
dds <- dds0[idx,]
class(dds)
head(dds)
nrow(dds)
dim(dds)
save(dds,file="dds_basemean_10_0MIN.RData")
# get and save normalized counts
norm <- as.data.frame(counts(dds, normalized=TRUE ))
head (norm)
write.csv(norm, "hnormalized_counts.csv")
norm$GENEID<-rownames(norm)
# We plot the standard deviation of each row (genes) against the mean
lambda <- 10^seq(from = -1, to = 2, length = 1000)
cts <- matrix(rpois(1000*100, lambda), ncol = 100)
library("vsn")
meanSdPlot(cts, ranks = FALSE)
# for logarithm-transformed counts:
log.cts.one <- log2(cts + 1)
meanSdPlot(log.cts.one, ranks = FALSE)
rld <- rlog(dds, blind = FALSE)
head(assay(rld), 3)
vsd <- vst(dds, blind = FALSE)
head(assay(vsd), 3)
library("dplyr")
library("ggplot2")
df <- bind_rows(
as_data_frame(log2(counts(dds, normalized=TRUE)[, 1:2]+1)) %>%
mutate(transformation = "log2(x + 1)"),
as_data_frame(assay(rld)[, 1:2]) %>% mutate(transformation = "rlog"),
as_data_frame(assay(vsd)[, 1:2]) %>% mutate(transformation = "vst"))
colnames(df)[1:2] <- c("x", "y")
ggplot(df, aes(x = x, y = y)) + geom_hex(bins = 80) +
coord_fixed() + facet_grid( . ~ transformation)
#
df <- bind_rows(
as_data_frame(log2(counts(dds, normalized=TRUE)[, 1:2]+1)) %>%
mutate(transformation = "log2(x + 1)"))
colnames(df)[1:2] <- c("x", "y")
ggplot(df, aes(x = x, y = y)) + geom_hex(bins = 80) +
coord_fixed() + facet_grid( . ~ transformation)
sampleDists <- dist(t(assay(rld)))
sampleDists<-as.data.frame(sampleDists)
str(sampleDists)
dim(sampleDists)
write.csv(sampleDists,"sampleDists_0min.csv")
library("pheatmap")
library("RColorBrewer")
sampleDistMatrix <- as.matrix( sampleDists )
write.csv(sampleDistMatrix,file="heatmap_sampleDistance_0min.csv")
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
pheatmap(sampleDistMatrix,
#clustering_distance_rows = sampleDists,
#clustering_distance_cols = sampleDists,
col = colors)
#Rplot_hierachical clustering
# sample distance
library("PoiClaClu")
poisd <- PoissonDistance(t(counts(dds)))
samplePoisDistMatrix <- as.matrix( poisd$dd )
pheatmap(samplePoisDistMatrix,
clustering_distance_rows = poisd$dd,
clustering_distance_cols = poisd$dd,
col = colors)
plotPCA(rld)
pcaData <- plotPCA(rld, intgroup = c("condition"), returnData = TRUE)
pcaData
write.csv(pcaData, "pcaData_rld_0min.csv")
percentVar <- round(100 * attr(pcaData, "percentVar"))
ggplot(pcaData, aes(x = PC1, y = PC2, shape = group, color = group)) +
geom_point(size = 3) +
xlab(paste0("PC1: ", percentVar[1], "% variance")) +
ylab(paste0("PC2: ", percentVar[2], "% variance")) +
coord_fixed()
mds <- as.data.frame(colData(rld)) %>% cbind(cmdscale(sampleDistMatrix))
ggplot(mds, aes(x = `1`, y = `2`, color = condition, shape = condition )) +
geom_point(size = 3) +
xlab(paste0("PC1: ", percentVar[1], "% variance")) +
ylab(paste0("PC2: ", percentVar[2], "% variance")) +
coord_fixed()
?coord_fixed()
#DE gene analysis
dds <- DESeq(dds, betaPrior=FALSE)
head(dds)
# pairwise comparision
##no big change if use lfcShink function, but p-value is not consistent with pairwise comparasion##
res<-results(dds)
BSS_0_vs_M_0<-lfcShrink(dds, contrast=c("condition", "0_BSS", "0_M"))
#summary(BSS_0_vs_M_0)
#head(BSS_0_vs_M_0)
write.csv(BSS_0_vs_M_0,"BSS_0_vs_M_0_logFC.csv")
|
3e0747a748ade7a02c1786a48538249f4dd25ac8
|
986f4587e6dd7aeb9783ccf7938f3da88cb33791
|
/tools/ui.R
|
83720de72acce3748c5a53f566ed8ecec382b833
|
[
"MIT"
] |
permissive
|
saini-insead/Data_Analytics_Case_SP500
|
71e18403b357446dca1509c2ff1b59fb68b7c7c6
|
05ce3dfd5379ef9b8fec73e763fdadb123f8ee20
|
refs/heads/master
| 2021-01-13T15:02:14.102329
| 2015-09-09T13:38:45
| 2015-09-09T13:38:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,436
|
r
|
ui.R
|
shinyUI(pageWithSidebar(
##########################################
# STEP 1: The name of the application
headerPanel("S&P 500 Daily Returns App"),
##########################################
# STEP 2: The left menu, which reads the data as
# well as all the inputs exactly like the inputs in RunStudy.R
sidebarPanel(
HTML("Please reload the web page any time the app crashes. <strong> When it crashes the whole screen turns into grey.</strong> If it only stops reacting it may be because of
heavy computation or traffic on the server, in which case you should simply wait. Plots may at times fade: you do <strong>not</strong>
need to reload the app when this happens, simply continue using the app.This is a test version. </h4>"),
###########################################################
# STEP 2.1: read the data
HTML("<hr>"),
HTML("Choose a data file:"),
selectInput('datafile_name_coded', '',
c("Financial Sector Stocks", "Tech Sector Stocks", "All Stocks (slow...)"),multiple = FALSE),
###########################################################
# STEP 2.2: read the INPUTS.
# THESE ARE THE *SAME* INPUT PARAMETERS AS IN THE RunStudy.R
numericInput("start_date", "Select Starting date (use the arrows or type a number from 1 to 2586):", 1),
numericInput("end_date", "Select End date (more than starting date, less than 2586):", 2586),
numericInput("numb_components_used", "Select the number of PCA risk factors (between 1 and the total number of stocks):", 3),
###########################################################
# STEP 2.3: buttons to download the new report and new slides
HTML("<hr>"),
HTML("<h4>Download the new HTML report </h4>"),
downloadButton('report', label = "Download"),
HTML("<hr>"),
HTML("<h4>Download the new HTML5 slides </h4>"),
downloadButton('slide', label = "Download"),
HTML("<hr>")
),
###########################################################
# STEP 3: The output tabs (these follow more or less the
# order of the Rchunks in the report and slides)
mainPanel(
# Just set it up
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"
),
# Now these are the taps one by one.
# NOTE: each tab has a name that appears in the web app, as well as a
# "variable" which has exactly the same name as the variables in the
# output$ part of code in the server.R file
# (e.g. tableOutput('parameters') corresponds to output$parameters in server.r)
tabsetPanel(
tabPanel("Parameters",
div(class="row-fluid",
div(class="span12",h5("Note: The returns generated may be different from the returns of, say, the S&P 500 index, as the universe of stocks/data used may be biased (e.g. survivorship bias).
All returns reported correspond to returns if 1 dollar is invested every day from-close-to-close. No transaction costs included.")),
tags$hr(),
tags$hr(),
actionButton("action_parameters", "Show/Update Results"),
HTML("<br>"),
div(class="span12",h4("Summary of Key Parameters")),
tags$hr(),
tableOutput('parameters')
)
),
tabPanel("Ordered Stocks",
div(class="row-fluid",
selectInput("order_criterion", "Select the criterion used to order the stocks:", choices=c("returns","sharpe","drawdown"), selected="returns", multiple=FALSE),
numericInput("stock_order", "Select the stock to plot (e.g. 1 is the best in terms of the selected criterion during this period, 2 is second best, etc):", 1),
actionButton("action_order_stocks", "Show/Update Results"),
HTML("<br>"),
div(class="span12",h4("Cumulative Returns and Table of Returns (below)")),
div(class="span12",plotOutput('chosen_stock')),
tags$hr(),
div(class="span6",tableOutput("chosen_stock_pnl_matrix"))
)
),
tabPanel("Select Stock",
div(class="row-fluid",
div(class="span12",h4("Select Stock")),
textInput("ind_stock", "Select the ticker of the stock to show (use capital letters e.g. AAPL):", "AAPL"),
actionButton("action_select_stock", "Show/Update Results"),
HTML("<br>"),
div(class="span12",h4("Cumulative Returns and Table of Returns (Below) of Selected Stock")),
div(class="span12",plotOutput('stock_returns')),
tags$hr(),
div(class="span6",tableOutput("stock_pnl_matrix"))
)
),
tabPanel("Histogram: All Stocks",
actionButton("action_histogram_all", "Show/Update Results"),
HTML("<br>"),
plotOutput('histogram')),
tabPanel("The Market",
div(class="row-fluid",
div(class="span12",h4("The Equally Weighted Basket of all Stocks")),
div(class="span12",h5("NOTE: All returns reported correspond to returns if 1 dollar is invested every day from-close-to-close. No transaction costs included.")),
actionButton("action_market", "Show/Update Results"),
HTML("<br>"),
div(class="span12",h4("Cumulative Returns and Table of Returns (below)")),
div(class="span12",plotOutput('market')),
div(class="span6",tableOutput("market_pnl_matrix"))
)
),
tabPanel("Histogram: Market",
actionButton("action_histogram_market", "Show/Update Results"),
HTML("<br>"),
plotOutput('histogram_market')),
tabPanel("Market Mean Reversion",
div(class="row-fluid",
div(class="span12",h4("Mean Reversion Strategy of Equal Weighted Basket of all Stocks")),
div(class="span12",h5("NOTE: All returns reported correspond to returns if 1 dollar is invested every day from-close-to-close. No transaction costs included.")),
actionButton("action_market_mr", "Show/Update Results"),
HTML("<br>"),
div(class="span12",h4("Cumulative Returns and Table of Returns (below)")),
div(class="span12",plotOutput('mr_strategy')),
div(class="span6",tableOutput("mr_strategy_pnl_matrix"))
)
),
tabPanel("Negative Market Mean Reversion",
div(class="row-fluid",
div(class="span12",h4("Mean Reversion Strategy of Equal Weighted Basket of all Stocks only days after the market dropped")),
div(class="span12",h5("NOTE: All returns reported correspond to returns if 1 dollar is invested every day from-close-to-close. No transaction costs included.")),
actionButton("action_market_mr_neg", "Show/Update Results"),
HTML("<br>"),
div(class="span12",h4("Cumulative Returns and Table of Returns (below)")),
div(class="span12",plotOutput('both_markets')),
div(class="span6",tableOutput("both_markets_pnl_matrix"))
)
),
tabPanel("Eigenvalues Plot",
actionButton("action_eigenvalues", "Show/Update Results"),
HTML("<br>"),
plotOutput("eigen_plot")),
tabPanel("Eigenvector Returns",
div(class="row-fluid",
numericInput("vector_plotted", "Select the eigenvector to plot (e.g.1):", 1),
actionButton("action_eigenvector", "Show/Update Results"),
HTML("<br>"),
div(class="span12",h4("Cumulative Returns and Table of Returns (below)")),
div(class="span12",plotOutput('eigen_returns')),
div(class="span6",tableOutput("eigen_strategy_pnl_matrix"))
)
),
tabPanel("Ordered Residuals",
numericInput("residuals_order", "Select the stock to plot residuals portfolio for (e.g. 1 is the best, 2 is second best, etc):", 1),
actionButton("action_ordered_res", "Show/Update Results"),
HTML("<br>"),
div(class="span12",plotOutput('chosen_residual'))),
tabPanel("Residuals Market",
actionButton("action_market_res", "Show/Update Results"),
HTML("<br>"),
plotOutput('res_market')),
tabPanel("Residuals Hindsight Portfolio",
actionButton("action_hindsight", "Show/Update Results"),
HTML("<br>"),
plotOutput('res_hindsight'))
)
)
))
|
7560310ee7e7e10a50b58961ade184ad9a976dec
|
9f17c48bf7f46cabbe96681fe6c1b8a7b49fcb80
|
/man/delete_activity_log.Rd
|
17cf9d69f3a1debb171c9c6a395b41707fdbe60d
|
[
"MIT"
] |
permissive
|
af12066/fitbitr
|
65bb496c9b2b4083aeb4bb3e72ec71ccdf43f5b6
|
1536bb801385d7c62b537218bc99ab9e92243336
|
refs/heads/master
| 2021-05-15T00:13:56.159195
| 2016-12-14T05:54:11
| 2016-12-14T05:54:11
| 103,651,464
| 0
| 0
| null | 2017-09-15T11:52:23
| 2017-09-15T11:52:22
| null |
UTF-8
|
R
| false
| true
| 561
|
rd
|
delete_activity_log.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/activity.R
\name{delete_activity_log}
\alias{delete_activity_log}
\title{Delete Activity Log}
\usage{
delete_activity_log(token, activity_log_id)
}
\arguments{
\item{token}{An OAuth 2.0 token generated by oauth_token()}
\item{activity_log_id}{The id of the activity log entry.}
}
\description{
The Delete Activity Log endpoint deletes a user's activity log entry with the given ID. A successful request will return a 204 status code with an empty response body.
}
|
b9565a9dedaf900e0fb5ee13c75082a04da5f691
|
9be0cc2090b476e5b1983a0054e70a31947317f1
|
/DeepKMeans.R
|
e97fc3479c0b24bfba9e779ad4a370a3073a2f1a
|
[] |
no_license
|
vilcek/Deep_KMeans
|
389f151a778f444f200131bd6b54a1e7037c932e
|
eba8892b03060b2be1b366466c6750fc5430bd7e
|
refs/heads/master
| 2020-12-24T18:51:23.443855
| 2018-05-15T21:34:57
| 2018-05-15T21:34:57
| 58,093,214
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,361
|
r
|
DeepKMeans.R
|
library(kernlab)
library(igraph)
library(KRLS)
createGraph <- function(dir) {
g_file <- read.table(paste('./', dir, '/network.dat', sep=''))
G <- graph.data.frame(g_file, directed=T)
A <- get.adjacency(G)
CM <- read.table(paste('./', dir, '/community.dat', sep=''))
GT <- CM$V2
names(GT) <- CM$V1
return(list(G, A, GT))
}
runSpecCluster <- function(A, GT) {
return(specc(as.matrix(A), centers=length(unique(GT))))
}
runDeepKMeans <- function(G, GT, l, n) {
S <- similarity.jaccard(G)
d <- degree(G,mode='all')
D <- diag(1/d)
X <- D %*% S
for(layer in 1:l) {
K <- kmeans(X, centers=dim(X)[1]/2, iter.max=i, nstart=n)
X <- K$centers
}
return(kmeans(t(X), centers=length(unique(GT)), iter.max=i, nstart=n))
}
Graph <- createGraph('./data/200')
G <- Graph[[1]]
A <- Graph[[2]]
GT <- Graph[[3]]
l <- 3
i <- 100
n <- 100
set.seed(12345)
igraph.options(vertex.size=10)
SC <- runSpecCluster(A, GT)
cat('Spectral Clustering Accuracy:', compare(GT, SC[1:length(GT)], method='nmi'), '\n')
plot(create.communities(G, membership=SC[1:length(GT)]), as.undirected(G), layout=layout.kamada.kawai(as.undirected(G)))
DKM <- runDeepKMeans(G, GT, l, n)
cat('Deep K-Means Accuracy:', compare(GT, DKM$cluster, method='nmi'))
plot(create.communities(G, membership=DKM$cluster), as.undirected(G), layout=layout.kamada.kawai(as.undirected(G)))
|
250833a6f155aca7fd85d044f812c5451da321ff
|
4eea50b0d743bc6b0f81e71bca73327bc7007938
|
/tutorial/example_1.R
|
c9039f6a762c1d415a745fcd967ac5557a779307
|
[] |
no_license
|
rral0/FAO_Bfast_workshop
|
909b6641bd046dff61b28b838fde49a6fd12e60c
|
4489bfb9a37024eee08be96b67c3c09003e55c9f
|
refs/heads/master
| 2021-06-12T12:15:05.247716
| 2017-03-15T01:48:36
| 2017-03-15T01:48:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,600
|
r
|
example_1.R
|
# Example 1: ####
source("~/wur_bfast_workshop/R-scripts/tutorial_0.R")
source("~/wur_bfast_workshop/R-scripts/accuracy_assessment.R")
example_title <- 1
results_directory <- file.path(results_directory,paste0("example_",example_title))
dir.create(results_directory)
log_filename <- file.path(results_directory, paste0(format(Sys.time(), "%Y-%m-%d-%H-%M-%S"), "_example_", example_title, ".log"))
start_time <- format(Sys.time(), "%Y/%m/%d %H:%M:%S")
result <- file.path(results_directory, paste0("example_", example_title, ".grd"))
time <- system.time(bfmSpatial(ndmiStack, start = c(2010, 1),
formula = response ~ harmon,
order = 1, history = "all",
filename = result,
mc.cores = detectCores()))
write(paste0("This process started on ", start_time,
" and ended on ",format(Sys.time(),"%Y/%m/%d %H:%M:%S"),
" for a total time of ", time[[3]]/60," minutes"), log_filename, append=TRUE)
## Post-processing ####
bfm_ndmi <- brick(result)
#### Change
change <- raster(bfm_ndmi,1)
plot(change, col=rainbow(7),breaks=c(2010:2016))
#### Magnitude
magnitude <- raster(bfm_ndmi,2)
magn_bkp <- magnitude
magn_bkp[is.na(change)] <- NA
plot(magn_bkp,breaks=c(-5:5*1000),col=rainbow(length(c(-5:5*1000))))
plot(magnitude, breaks=c(-5:5*1000),col=rainbow(length(c(-5:5*1000))))
#### Error
error <- raster(bfm_ndmi,3)
plot(error)
#### Detect deforestation
def_ndmi <- magn_bkp
def_ndmi[def_ndmi>0]=NA
plot(def_ndmi)
plot(def_ndmi,col="black", main="NDMI_deforestation")
writeRaster(def_ndmi,filename = file.path(results_directory,paste0("example_",example_title,"_deforestation_magnitude.grd")),overwrite=TRUE)
def_years <- change
def_years[is.na(def_ndmi)]=NA
years <- c(2010,2011,2012,2013,2014,2015,2016,2017)
plot(def_years, col=rainbow(length(years)),breaks=years, main="Detecting deforestation after 2010")
writeRaster(def_ndmi,filename = file.path(results_directory,paste0("example_",example_title,"_deforestation_dates.grd")),overwrite=TRUE)
#### Accuracy Assessment
Forest_mask <- raster(file.path(workshop_folder,"data/Fmask_2010_Peru.tif"))
validation_forest_map <- raster(file.path(workshop_folder,"data/Validation_forest_2016.tif"))
sample_size <- calcSampleSize(def_years,Forest_mask,c(0.9,0.7),0.01)
samples <- extractRandomSamples(def_years,Forest_mask,sample_size,results_directory,"samples")
val_sample <- extractValidationValues(validation_forest_map, samples, Forest_mask)
conf_matrix <- assessAcuracy(samples,val_sample)
conf_matrix
|
1542f96bafec335847b5ba0274a0dbd68f4aec77
|
c32c54f47c35737ea4ba3a026c81b594fd02b1cf
|
/man/dateRangeForm.Rd
|
92d77c056a6685c2c47b81e453d8d30517684554
|
[] |
no_license
|
quinnpertuit/rDailyFantasy
|
cd46596122d979b5c389d67b19bc354109fa0722
|
fb00d802573c855f58d5b7b4d84f96d6724a66a6
|
refs/heads/master
| 2022-10-24T06:04:42.025973
| 2020-01-11T15:12:36
| 2020-01-11T15:12:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 450
|
rd
|
dateRangeForm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dateRangeFormFunction.R
\name{dateRangeForm}
\alias{dateRangeForm}
\title{labs dateRange Formatting Function}
\usage{
dateRangeForm(yyyymmdd = "2018/03/29", days = 33)
}
\arguments{
\item{yyyymmdd}{"2018/03/29"}
\item{days}{any# i.e:44}
}
\value{
list of dates for use
}
\description{
labs dateRange Formatting Function
}
\examples{
dateRnageForm("2018/03/29",days=33)
}
|
87044863f12ab244ab2294650712cabb84eae855
|
c12e3bb6a547c043bd7458d94f0b7a98f3cbd1fd
|
/man/geom_normalviolin.Rd
|
9a9d0608fe17bcbd554c62362ba71f24e51baf79
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
wjschne/ggnormalviolin
|
5767fd6732b918ecb04a07a31a3e1ef018b296aa
|
b6358cf9621cd03cfc502f729971b9cefbdad264
|
refs/heads/main
| 2021-06-06T07:28:15.746140
| 2021-05-10T20:49:18
| 2021-05-10T20:49:18
| 154,597,584
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,471
|
rd
|
geom_normalviolin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{geom_normalviolin}
\alias{geom_normalviolin}
\title{Creates normal violins with specified means and standard deviations}
\usage{
geom_normalviolin(
mapping = NULL,
data = NULL,
mu = NULL,
sigma = NULL,
nsigma = 4,
p_tail = 0,
p_lower_tail = p_tail/2,
p_upper_tail = p_tail/2,
tail_fill = "black",
tail_alpha = 0.4,
width = 0.6,
upper_limit = NA,
lower_limit = NA,
face_left = TRUE,
face_right = TRUE,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
...
)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}} or
\code{\link[ggplot2:aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[ggplot2:fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data. A \code{function} can be created
from a \code{formula} (e.g. \code{~ head(.x, 10)}).}
\item{mu}{A vector of means}
\item{sigma}{A vector of standard deviations}
\item{nsigma}{The number of standard deviations each violin should extend}
\item{p_tail}{The 2-tailed proportion that should be highlighted.
Can be overridden with p_lower_tail and/or p_upper_tail}
\item{p_lower_tail}{The proportion of the distribution that should be
highlighted in the lower tail. Defaults to half of `p_tail`.}
\item{p_upper_tail}{The proportion of the distribution that should be
highlighted in the upper tail. Defaults to half of `p_tail`.}
\item{tail_fill}{fill color for tails}
\item{tail_alpha}{alpha value for tails}
\item{width}{Width of normal violin}
\item{upper_limit}{upper limit for polygons. Needed in case setting
limits in scale_y_continuous or ylim distorts the polygons.}
\item{lower_limit}{lower limit for polygons. Needed in case setting
limits in scale_y_continuous or ylim distorts the polygons.}
\item{face_left}{Display left half of violins. Defaults to `TRUE`}
\item{face_right}{Display right half of violins. Defaults to `TRUE`}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[ggplot2:borders]{borders()}}.}
\item{...}{Other arguments passed on to \code{\link[ggplot2:layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
}
\description{
Creates normal violins with specified means and standard deviations
}
\section{Aesthetics}{
\code{geom_normviolin} understands the following aesthetics
(required aesthetics are in bold):
\itemize{
\item \strong{x}
\item \strong{mu} (mean of the normal distribution)
\item \strong{sigma} (standard deviation of the normal distribution)
\item width (width of violin)
\item nsigma (number of standard deviations to which the violins extend)
\item p_tail (2-tailed proportion of tails highlighted)
\item p_upper_tail (proportion of upper tails highlighted)
\item p_lower_tail (proportion of lower tails highlighted)
\item face_left (display left half of violin?)
\item face_right (display right half of violin?)
\item color
\item fill
\item alpha (of fills)
\item group
\item linetype
\item size (of lines)
}
}
|
00f9870de954f9b6f119ebb912ac2ecb01d61804
|
bc1597ec48bd9e1ec9978427f99f97b3a70f9518
|
/rWorkspace/Rcode.R
|
efbad1b15c3de88636d2637392e9c3382e696a64
|
[] |
no_license
|
visparashar/alarm-flood-analysis
|
6a73509ea01974ba4e3b399da8f0907645117906
|
e2db82586000bf48d5e7322f49ca4b481ec8f836
|
refs/heads/master
| 2021-04-12T04:54:16.463578
| 2018-06-13T20:00:37
| 2018-06-13T20:00:37
| 125,963,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,662
|
r
|
Rcode.R
|
callRFunction <- function(
input_file_path ,output_file_path,source_of_other_lib,algorithm_to_run,
is_model_need_to_run_or_training_set,rworkspace_path , second_input_for_prefilter,pattern_mining_file_path
,cluster_result_file_path,merged_result_file_path,test_rca_file_path,frequency_count_file_path,other_path_if_required){
getwd()
setwd(rworkspace_path)
source('filename_constants.R')
source('logger.R')
# setwd(input_file_path)
input_path <- try(read.csv(input_file_name), silent = TRUE)
# need to write function to check whether the required library are installed or not
# if (inherits(input_path, "try-error"))
# {
# message_string = paste("Error 1: Unable to read file")
# logEvent(message_string, "Error")
# print (message_string)
# return (FALSE)
# }
if(algorithm_to_run == CONST_PREDICTION_ALOG && is_model_need_to_run_or_training_set == 'true')
{
source(paste0(source_of_other_lib,'/',CONST_NAIVE_BAYES))
naive_baye_response = CustomNaiveBayesFunc(input_file_path,source_of_other_lib,output_file_path,'true')
return(naive_baye_response)
}else if(algorithm_to_run == CONST_PREDICTION_ALOG && is_model_need_to_run_or_training_set == 'false'){
# neeed to right the model to be called
source(paste0(source_of_other_lib,'/',CONST_NAIVE_BAYES))
naive_baye_response = CustomNaiveBayesFunc(input_file_path,source_of_other_lib,output_file_path,'false')
return(naive_baye_response)
}
if(algorithm_to_run == CONST_PREFILTER_ALGO && is_model_need_to_run_or_training_set == 'true')
{
source(paste0(source_of_other_lib,'/',CONST_PREFILTER_FILENAME))
prefilter_response = PrefilterFunc(input_file_path,output_file_path,second_input_for_prefilter)
return(prefilter_response)
}else if (algorithm_to_run == CONST_PREFILTER_ALGO && is_model_need_to_run_or_training_set == 'false'){
}
if(algorithm_to_run == CONST_MSW_CLUSTER_ALSO && is_model_need_to_run_or_training_set == 'true')
{
source(paste0(source_of_other_lib,'/',CONST_MSW_CLUSTER_FILENAME))
clasturing_response = CalculateMSWMatrix(input_file_path,second_input_for_prefilter,output_file_path)
source(paste0(other_path_if_required,'/',CONST_FREQ_PTRN_MINING_FILENAME))
frequentpattern = CalculateFrequentPattern(second_input_for_prefilter,pattern_mining_file_path,cluster_result_file_path,
merged_result_file_path,test_rca_file_path,frequency_count_file_path)
return(frequentpattern)
}else if(algorithm_to_run == CONST_TEST_RCA_ALGO && is_model_need_to_run_or_training_set == 'false'){
source(paste0(source_of_other_lib,'/',CONST_TEST_RCA_FILENAME))
recommendation = RootCauseAnalysis(input_file_path,test_rca_file_path,output_file_path)
return(recommendation)
}
}
# callRFunction(
# 'C:/Workspace_alarmflood/alarm-food-analysis/data/input_data/prediction_data/merged_data/training_data',
# 'C:/Workspace_alarmflood/alarm-food-analysis/data/prediction_output_data/',
# 'C:/Workspace_alarmflood/alarm-food-analysis/rWorkspace/prediction',
# 'prediction',
# FALSE,
# 'C:/Workspace_alarmflood/alarm-food-analysis/rWorkspace'
#
#
# )
# callRFunction('C:/Workspace_alarmflood/alarm-food-analysis/data/input_data/prediction_data/merged_data/training_data',
# 'C:/Workspace_alarmflood/alarm-food-analysis/data/prediction_output_data/',
# 'C:/Workspace_alarmflood/alarm-food-analysis/rWorkspace/prediction',
# 'prediction',
# 'true',
# 'C:/Workspace_alarmflood/alarm-food-analysis/rWorkspace'
# )
|
e75023b8fee96f764178ece23ac5b37ac9b3bf4e
|
a6440406634a0eab79376dbe749b4ea4fc9d8e5b
|
/R/calcZScale.R
|
e4d7451623345ce920dd4d234779db57aa492d67
|
[
"MIT"
] |
permissive
|
wStockhausen/wtsGMT
|
fe984ffb9a90e87a8ddd066b3a16819cf0091ed5
|
5b5c036ab2bd4965f78fb1bbfa0d7535cae2dce1
|
refs/heads/master
| 2020-12-24T08:42:20.096481
| 2016-09-08T02:32:27
| 2016-09-08T02:32:27
| 21,180,360
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 618
|
r
|
calcZScale.R
|
#'
#'@title Calculate a z scale for plotting data on a map.
#'
#'@description Function to calculate a z scale for plotting data on a map.
#'
#'@param z - data vector to calculate scale from
#'@param logtr - flag (T/F) to ln-transform z before calculating the scale
#'@param satfac - saturation factor relative to maximum
#'
#'@return the scale to be used for plotting z
#'
#'@details none.
#'
#'@export
#'
calcZScale<-function(z,logtr=FALSE,satfac=1.1){
if (logtr) {
zscl<-satfac*max(log(z+1)/log(10),na.rm=TRUE);
} else {
zscl<-satfac*max(z,na.rm=TRUE);
}
return(zscl)
}
|
e921de535f5f1cfe606c388ab6f80b5e00087de4
|
9711b85eb0754c2bcbe33a3a2acad8d49af64962
|
/run_sensitivities.R
|
5253314eff8a17a56693d7964647697151c1ccbe
|
[] |
no_license
|
Cole-Monnahan-NOAA/stpollock
|
9be39d8ff74124c055107ce3d0ea56b8fa6ff064
|
56225e5a830e4246823b4c84bc70863fd9e7e766
|
refs/heads/master
| 2021-06-03T07:46:33.828893
| 2021-05-19T16:19:50
| 2021-05-19T16:19:50
| 150,113,487
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 665
|
r
|
run_sensitivities.R
|
### A series of sensitivity analyses to run. Each of these takes a
### long time to run, and specific settings can be found in each
### script. The result is a plot in the 'plots' folder and a
### result file in 'results' folder.
## Different assumptions about the decorrelation range
source("sensitivities/run_kappa.R")
## Aniso turned off (isotropic) or fixed at the values from
## previous run
source("sensitivities/run_aniso.R")
## Test different spatial configurations: no space (NS), space only (S), or
## full spatiotemporal (ST)
source("sensitivities/run_spatialconfig.R")
source("sensitivities/run_catchability.R")
source("sensitivities/run_resolution.R")
|
71c6cd3a10f4c5a55dab7fde6c66f58f69205bd4
|
c4dd634c7bb89b5594da4e06033dfaad1bffebe1
|
/join_turnstile_gtfs.R
|
8bd1c1ed09eb5de0d0266d9ff1831d36ac76bcef
|
[] |
no_license
|
rivatal/subway
|
a55206ba6160d71dc270eb5acb591a72a71caa10
|
644c286bd96e81e14a5745e5c4a7921966dbcbbd
|
refs/heads/master
| 2020-12-06T14:59:59.286042
| 2016-10-10T03:54:21
| 2016-10-10T03:54:21
| 66,381,469
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 451
|
r
|
join_turnstile_gtfs.R
|
###############################################################################################
#Riva Tropp
#Join turnstile and gtfs dataframes
###############################################################################################
library(dplyr)
setwd(".")
matchtable <- read.table("smalleredits.txt",header=FALSE,
sep=",",fill=TRUE,quote = "",row.names = NULL,
stringsAsFactors = FALSE)
|
50354ed91c077d83aa68d4469e6a7c15882bfa23
|
6b9391e0a520fd33212ebd60eb4f9439d768d0a3
|
/expo_graphic.R
|
186acc6354d354d23c798c2da2635c73dd5152c8
|
[] |
no_license
|
limafb/datasciencecoursera
|
6f2bfbe2c0b4966615838ccc481f51ae22dba269
|
767aad461aae23057fd85c8f9abd143743f9ecf8
|
refs/heads/master
| 2016-09-06T01:36:58.076758
| 2015-03-02T12:24:55
| 2015-03-02T12:24:55
| 25,431,341
| 0
| 0
| null | 2015-02-12T16:13:30
| 2014-10-19T15:42:53
| null |
UTF-8
|
R
| false
| false
| 2,935
|
r
|
expo_graphic.R
|
old.wd <- getwd()
new.wd <- ("C:/Users/FabioDesk/Documents/GitHub/datasciencecoursera")
setwd(new.wd)
pollution <- read.csv("data/avgpm25.csv", colClasses = c("numeric", "character", "factor", "numeric", "numeric"))
head(pollution)
summary(pollution$pm25)
boxplot(pollution$pm25, col = "blue")
abline(h = 12)
hist(pollution$pm25, col = "green")
rug(pollution$pm25)
#Working with breaks
hist(pollution$pm25, col = "green", breaks = 100)
rug(pollution$pm25)
hist(pollution$pm25, col = "green")
abline(v = 12, lwd = 2)
abline(v = median(pollution$pm25), col = "magenta", lwd = 4)
barplot(table(pollution$region), col = "wheat", main = "Number of Counties in Each Region")
#Multiple Boxplots
boxplot(pm25 ~region, data = pollution, col = "red")
#Multiple Histograms
par(mfrow = c(2, 1), mar = c(4, 4, 2, 1))
hist(subset(pollution, region == "east") $pm25, col = "green")
hist(subset(pollution, region == "west") $pm25, col = "green")
#Scatterplot
with(pollution, plot(latitude, pm25))
abline(h = 12, lwd = 2, lty = 2)
#Scatterplot - Using Color
with(pollution, plot(latitude, pm25, col = region))
abline(h = 12, lwd = 2, lty = 2)
#Multiple Scatterplots
par(mfrow = c(1, 2), mar = c(5, 4, 2, 1))
with(subset(pollution, region == "west"), plot(latitude, pm25, main = "West"))
with(subset(pollution, region == "east"), plot(latitude, pm25, main = "East"))
#Base Plot
library(datasets)
data(cars)
with(cars, plot(speed, dist))
#Lattice Plot
library(lattice)
state <- data.frame(state.x77, region = state.region)
xyplot(Life.Exp ~Income | region, data = state, layout = c(4, 1))
#ggplot2 Plot
library(ggplot2)
data(mpg)
qplot(displ, hwy, data = mpg)
#Simple Base Graphics: Histogram
library(datasets)
hist(airquality$Ozone) ##Draw a new plot
#Simple Base Graphics: Scatterplot
library(datasets)
with(airquality, plot(Wind, Ozone))
#Simple Base Graphics: Boxplot
library(datasets)
airquality <- transform(airquality, Month = factor(Month))
boxplot(Ozone ~ Month, airquality, xlab = "Month", ylab = "Ozone (ppb)")
#How Does a Plot Get Created?
library(datasets)
with(faithful, plot(eruptions, waiting)) ##Make plot appear on screen device
title(main = "Old Faithful Gayser data") ## Annotate with a title
pdf(file = "myplot.pdf") ##Open PDF device; creat 'myplot.pdf' in my working directory
## Create plot and send to a file (no plot appears on screen)
with(faithful, plot(eruptions, waiting))
title(main = "Old Faithful Geyser data") ## Annotate plot; still nothing on screen
dev.off() ## Close the PDF file device
# Now you can view the file 'myplot.pdf' on your computer
#Copying Plots
library(datasets)
with(faithful, plot(eruptions, waiting)) ## Create plot on screen device
title(main = "Old Faithful Geyser data") ## Add a main title
dev.copy(png, file = "geyserplot.png") ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device!
|
b5314ca598af3656380c6da1e5e8044d0a9618b1
|
95f7d939cc3b941f3eec5b7ae7e99a0976816852
|
/main_Materials.R
|
4806e92db4a8fc284db849a518667a8472d568f3
|
[] |
no_license
|
fengzixue96/IE580-Trading-Strategy-Project
|
aacd6dd976b1db46ea2a68a827345ce613ec7499
|
5150ce7096d6175c7cea42630a2238fc4573be44
|
refs/heads/master
| 2022-10-31T15:02:08.591674
| 2020-06-14T07:59:04
| 2020-06-14T07:59:04
| 272,157,711
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 14,967
|
r
|
main_Materials.R
|
rm(list=ls()) #清除全部对象
Sys.setenv(TZ = 'UTC') #设置系统市区到“GMT(UTC)”,使其变为默认时区。from 'xts'
setwd("C:/Users/HP/Desktop/Lesson/580 Trading Stretegy/previous/code") #设置工作路径,也就是临时设置工作目录
library(parallel)
library(xts)
library(quantmod)
library(PerformanceAnalytics)
library(caret)
detectCores() #检查当前电脑可用核数
#使用全部核数
if (Sys.info()['sysname'] == "Windows")
{
library(doParallel) #支持Windows和unix-like
registerDoParallel(cores = detectCores())
print("Windows parallel backend 'doParallel' registered
- required for 'foreach'")
} else {
library(doMC) #只支持unix-like
registerDoMC(cores = detectCores())
print("Unix parallel backend 'doMC' registered - required for 'foreach'")
}
source(file = "UtilityFunctions.R") #“预装“已写好的函数
#自己读取rds
library(data.table)
data <- readRDS("data/svmOptlist1.tech.oos.rds")
start_time <- Sys.time()
# Load our GICS sector seperated CRSP data
crsp_daily_xts <- readRDS(file = "data/crsp_daily_Materials_xts.rds")
# Load the market returns (S&P500)
sp500_xts <- readRDS(file = "data/sp500_1981_2018_xts.rds")
# Load computstat
computstat <- readRDS(file = "data/computstatQuery_subset.rds")
# Load gvkey2Permno
gvkey2Permno <- readRDS(file = "data/gvkey2Permno_subset.rds")
start_date = "2007-01-01"
end_date = "2010-12-31"
# Subset
sp500_xts <- sp500_xts[paste0(start_date,"::",end_date)]
crsp_daily_xts <-
crsp_daily_xts[paste0(start_date,"::",end_date)]
#survivial bias
securities_start <- unique(na.omit(crsp_daily_xts[index(sp500_xts)[1], c("PERMNO", "PRC", "RETX")])$PERMNO)
securities_end <- unique(na.omit(crsp_daily_xts[index(sp500_xts)[length(sp500_xts)], c("PERMNO", "PRC", "RETX")])$PERMNO)
securities <- intersect(securities_start,securities_end)
############################Training Data Construction##############################
# parameteres:
target.win = 1
# fundamental indicators
fundamentals <-
c(
#28 fundamental features
"actq",
"atq",
"ceqq",
"chq",
"cshoq",
"dlcq",
"dlttq",
"dpq",
"dvpq",
"epsfxq",
"invtq",
"ivstq",
"lctq",
"ltq",
"niq",
"oancfy",
"piq",
"rectq",
"revtq",
"txtq",
"uniamiq",
"uopiq",
"wcapq",
"xoprq",
"glaq",
"rectq",
"lltq",
"chechy"
)
sig = function(x) ifelse(is.na(x),0,ifelse(x>0,1,0))
# append data with training labels and features:
for(i in 1:length(securities)){
print(i)
x = crsp_daily_xts[crsp_daily_xts$PERMNO == securities[i], c("PERMNO", "PRC", "RETX", "VOL")]
if(length(x$PERMNO) != length(sp500_xts)) next
storage.mode(x) <- "double"
#clean data
x$PRC = na.locf(abs(x$PRC),fromLast = TRUE)
x[x$VOL == 0, "VOL"] <- NA
x$VOL = na.locf(abs(x$VOL),fromLast = TRUE)
# target return
# EWMA vol adjusted forward looking log return:
x$rtn = diff(log(x$PRC), target.win)
x$fwd.raw.rtn = lag(x$rtn, -target.win)
x$ewma.vol = ema.vol(x$fwd.raw.rtn, 2/(124+1)) #180
x$vol.adj.rtn = x$fwd.raw.rtn / x$ewma.vol
# 3MO and 1YR momentum indicators:
# 3MO and 1Y trailing returns for symbol and sp500:
x$trailing.3MO.rtn = diff(log(x$PRC), 63)
x$trailing.1YR.rtn = diff(log(x$PRC), 252)
x$trailing.3MO.rtn.mkt = rollapply(sp500_xts, 63, FUN = sum, align = "right")
x$trailing.1YR.rtn.mkt = rollapply(sp500_xts, 252, FUN = sum, align = "right")
# momentum - 3MO and 1YR excess returns:
x$mom.3MO = x$trailing.3MO.rtn - x$trailing.3MO.rtn.mkt
x$mom.1YR = x$trailing.1YR.rtn - x$trailing.1YR.rtn.mkt
# 3MO and 1MO delta volume indicators:
x$vol.delta.3MO = PctVolDelta(x$VOL, 63)
x$vol.delta.1MO = PctVolDelta(x$VOL, 21)
# number of 12-month highs and 12-month lows indicator:
x$n.high = lag(runMax(x$PRC, 252),1)
x$n.low = lag(runMin(x$PRC, 252),1)
x$n.high.low = ifelse(x$PRC > x$n.high, 1,
ifelse(x$PRC < x$n.low, -1, 0))
# max daily return indicator:
x$max.rtn = rollapply(x$rtn, 21, FUN = max, align = "right")
# resistance indicator:
x$rl.pct.diff = RLxts(na.trim(x$PRC), 21, 21)$PctDiff
# data filters:
x$pv = x$PRC*x$VOL
# LIQ
x$pred = sign(x$rtn) * log(x$pv)
x$liq = rollapply(x, 63, function(d) coef(lm(rtn~pred, data=d))[2], align = "right", by.column=FALSE)
# DTV
x$dtv = ema.dtv(x$pv,2/(63+1)) #91
# fundamental features:
permno = unique(x[,"PERMNO"])
gvkey = gvkey2Permno[gvkey2Permno$LPERMNO == permno,"gvkey"]
comp = computstat[(computstat$GVKEY == gvkey),]
comp = comp[order(comp$fdateq), ]
dates = index(x)
fund = data.frame(matrix(ncol = 28, nrow = length(dates)))
colnames(fund) <- fundamentals
for(j in 1:length(dates)){
# use fdateq first
comp$diff = as.numeric(as.Date(as.character(comp$fdateq), format("%Y/%m/%d"))
-as.Date(as.character(dates[j])))
match = tail(comp[comp$diff<=0,fundamentals],1)
if (nrow(match)==1) {
fund[j,] = t(match)
next
}
# use rdq as fall back
comp$diff = as.numeric(as.Date(as.character(comp$rdq), format("%Y/%m/%d"))
-as.Date(as.character(dates[j]))) + 31 #45
match = tail(comp[comp$diff<=0,fundamentals],1)
if (nrow(match)==1) {
fund[j,] = t(match)
next
}
}
fund = cbind(dates, fund)
fund$sac = fund$actq - fund$chq - fund$lctq + fund$dlcq
fund$abbs = fund$sac - lag(fund$sac, 252)
fund$abcf = fund$uniamiq-fund$oancfy
fund$fh = sig(fund$uniamiq)+sig(fund$oancfy)+sig(diff(fund$uniamiq, 252))+sig(-fund$abcf)+
sig(-diff(fund$dlttq/fund$atq, 252))+sig(diff(fund$actq/fund$lctq, 252))
fund$wa = fund$actq-fund$lctq
fund$qr = (fund$actq-fund$invtq)/fund$lctq
fund$dpr = fund$dvpq/fund$niq
fund$bv = fund$atq-fund$ltq
fund$bvtd = fund$bv-fund$dlcq
fund$rs = fund$rectq/fund$revtq
fund$da = fund$dlcq/fund$atq
fund$de = fund$dlcq/fund$ceqq
fund$ca = fund$chq/fund$atq
fund$li = fund$ltq/fund$niq
fund$re = fund$niq/fund$ceqq
fund$ss = fund$revtq/fund$cshoq
fund = xts(fund[,-1], order.by=fund[,1])
x = merge(x,fund)
x = na.locf(x, fromLast = TRUE)
x[is.na(x)] = -1
if(i == 1){
train.data = x
} else {
train.data = rbind(train.data, x)
}
rm(x)
}
#saveRDS(train.data, "data/train.data.tech.fund.rds")
#train.data = readRDS("data/train.data.tech.fund.oos.rds")
# clean training data:
train.data = train.data[complete.cases(train.data), ]
train.data = train.data[order(index(train.data)), ]
train.data = as.data.frame(train.data)
tdf = cbind(Date = as.Date(rownames(train.data)), train.data)
#saveRDS(tdf, "data/tdf.tech.oos.2.rds")
#tdf = readRDS("data/tdf.rds")
#################################Portfolio Construction#################################
# training dates:
uniq.dates = sort(unique(tdf$Date))
# first and last trading date:
start = as.Date("2008-04-01")
end = as.Date("2010-10-01")
month.end = seq(start, end, by = "month") - 1
idx = rep(NA, length(month.end))
# isloate closest trading day to month end:
for(i in 1:length(month.end)){
date.diff = month.end[i] - uniq.dates
if(month.end[i] %in% uniq.dates){
idx[i] = match(month.end[i], uniq.dates)
} else {
idx[i] = which.min(pmax(date.diff, 0)) - 1
}
}
train.dates = uniq.dates[idx]
# account equity, transaction, and model tuning containers:
#---------------------------------------
# model list containers:
svmOptlist1 = list(NA)
randlist1 = list(NA)
svmOptlist2 = list(NA)
randlist2 = list(NA)
svmOptlist3 = list(NA)
randlist3 = list(NA)
# equity:
equity = xts(matrix(1e6, ncol = 6, nrow = length(train.dates)),
order.by = train.dates)
colnames(equity) = c("svmOpt1", "svmOpt2", "svmOpt3", "rand1", "rand2", "rand3")
#saveRDS(equity, "data/equity.tech.rds")
registerDoMC(detectCores())
################## main loop to generate transactions:##################
#-----------------------------------------------------------------------
#parameters
hist = 60
lag = 1
beta = 0.45
NL = 10
NS = 10
ftype = "tech"
tune = TRUE
linear = FALSE
a = 1:(length(train.dates)-5)
b = a[seq(1, length(a), 3)]
# The first portfolio
print("Processing the first portfolio...")
for(i in b){
# optimal svm model:
print(paste(i, "Processing...", sep = " "))
svmOptlist1[[i]] = subPort(tdf,
train.dates[i],
train.dates[i+3]-train.dates[i],
hist,
lag,
beta,
equity[i,"svmOpt1"],
NL,
NS,
ftype,
tune,
linear,
rand = FALSE)
equity[i+1,"svmOpt1"] = svmOptlist1[[i]]$PandL["eomEquity.1m"]
equity[i+2,"svmOpt1"] = svmOptlist1[[i]]$PandL["eomEquity.2m"]
equity[i+3,"svmOpt1"] = svmOptlist1[[i]]$PandL["eomEquity"]
print(svmOptlist1[[i]]$PandL["Total"])
# random long short test:
randlist1[[i]] = subPort(tdf,
train.dates[i],
train.dates[i+3]-train.dates[i],
hist,
lag,
beta,
equity[i,"rand1"],
NL,
NS,
ftype,
tune,
linear,
rand = TRUE)
equity[i+1,"rand1"] = randlist1[[i]]$PandL["eomEquity.1m"]
equity[i+2,"rand1"] = randlist1[[i]]$PandL["eomEquity.2m"]
equity[i+3,"rand1"] = randlist1[[i]]$PandL["eomEquity"]
print(randlist1[[i]]$PandL["Total"])
}
#The second portfolio
print("Processing the second portfolio...")
for(i in (b+1)){
# optimal svm model:
print(paste(i, "Processing...", sep = " "))
svmOptlist2[[i]] = subPort(tdf,
train.dates[i],
train.dates[i+3]-train.dates[i],
hist,
lag,
beta,
equity[i,"svmOpt2"],
NL,
NS,
ftype,
tune,
linear,
rand = FALSE)
equity[i+1,"svmOpt2"] = svmOptlist2[[i]]$PandL["eomEquity.1m"]
equity[i+2,"svmOpt2"] = svmOptlist2[[i]]$PandL["eomEquity.2m"]
equity[i+3,"svmOpt2"] = svmOptlist2[[i]]$PandL["eomEquity"]
print(svmOptlist2[[i]]$PandL["Total"])
# random long short test:
randlist2[[i]] = subPort(tdf,
train.dates[i],
train.dates[i+3]-train.dates[i],
hist,
lag,
beta,
equity[i,"rand2"],
NL,
NS,
ftype,
tune,
linear,
rand = TRUE)
equity[i+1,"rand2"] = randlist2[[i]]$PandL["eomEquity.1m"]
equity[i+2,"rand2"] = randlist2[[i]]$PandL["eomEquity.2m"]
equity[i+3,"rand2"] = randlist2[[i]]$PandL["eomEquity"]
print(randlist2[[i]]$PandL["Total"])
}
#The third portfolio
print("Processing the third portfolio...")
for(i in (b+2)){
# optimal svm model:
print(paste(i, "Processing...", sep = " "))
svmOptlist3[[i]] = subPort(tdf,
train.dates[i],
train.dates[i+3]-train.dates[i],
hist,
lag,
beta,
equity[i,"svmOpt3"],
NL,
NS,
ftype,
tune,
linear,
rand = FALSE)
equity[i+1,"svmOpt3"] = svmOptlist3[[i]]$PandL["eomEquity.1m"]
equity[i+2,"svmOpt3"] = svmOptlist3[[i]]$PandL["eomEquity.2m"]
equity[i+3,"svmOpt3"] = svmOptlist3[[i]]$PandL["eomEquity"]
print(svmOptlist3[[i]]$PandL["Total"])
# random long short test:
randlist3[[i]] = subPort(tdf,
train.dates[i],
train.dates[i+3]-train.dates[i],
hist,
lag,
beta,
equity[i,"rand3"],
NL,
NS,
ftype,
tune,
linear,
rand = TRUE)
equity[i+1,"rand3"] = randlist3[[i]]$PandL["eomEquity.1m"]
equity[i+2,"rand3"] = randlist3[[i]]$PandL["eomEquity.2m"]
equity[i+3,"rand3"] = randlist3[[i]]$PandL["eomEquity"]
print(randlist3[[i]]$PandL["Total"])
}
registerDoSEQ()
#saveRDS(svmOptlist1, "data/svmOptlist1.tech.25.oos.rds")
#saveRDS(svmOptlist2, "data/svmOptlist2.tech.25.oos.rds")
#saveRDS(svmOptlist3, "data/svmOptlist3.tech.25.oos.rds")
#saveRDS(equity, "data/equity.tech.25.oos.rds")
#########################Performance Evaluation##############################
# assign realized classes and construct confusion matricies:
# svm model classes:
svmClasses = lapply(svmOptlist1, function(x) x$Classes)
svmClasses = do.call("rbind", svmClasses)
svm.cm = confusionMatrix(as.factor(svmClasses$predClass),
as.factor(svmClasses$actClass))
svmClasses$accuracy = ifelse(svmClasses$predClass == svmClasses$actClasses,
"Right",
"Wrong")
#equity = readRDS(file = "data/equity.tech.50.rds")
# performance reporting:
equity$svmOpt = equity$svmOpt1+equity$svmOpt2+equity$svmOpt3
equity$rand = equity$rand1+equity$rand2+equity$rand3
drops <- c("svmOpt1","svmOpt2","svmOpt3","rand1","rand2","rand3")
equity.total = equity[,!(names(equity) %in% drops)]
returns = diff(log(equity.total[4:(length(train.dates)-3)]))
mkt.rtns = rollapply(sp500_xts, 21, FUN = sum, align = "right")
mkt.rtns = mkt.rtns[index(mkt.rtns) %in% train.dates[4:(length(train.dates)-3)]]
returns = merge(returns, mkt.rtns)
returns$svmOpt[1]=1
returns$mkt.rtns[1]=1
returns$rand[1]=1
#saveRDS(returns, "data/returns.tech.oos.rds")
#returns = readRDS("data/returns.tech.oos.rds")
charts.PerformanceSummary(returns,
colorset = set8equal,
main = "SVM Model Performance"
)
Return.annualized(returns)
#SharpeRatio(returns)
SharpeRatio.annualized(returns)
vol.annual = sapply(returns, FUN = function(x) sqrt(12)*sd(x))
print("Annualized Volatility:")
print(vol.annual)
maxDrawdown(returns)
end_time <- Sys.time()
time = end_time - start_time
|
140ad4a5e2c974b79c42ddd6d8f9873502a5b13a
|
a01fe8e31792d79dab00764ef6ae505e699524b4
|
/R/SetupLikelihoods.R
|
bb8e0214b793785f81501bcea59d533c359d79e9
|
[] |
no_license
|
dpwynne/mmnst
|
7c5972d75fad0d983c65ac044cf2b566531720e5
|
cadda88522a025115eb9a163510629ec2f55c672
|
refs/heads/master
| 2023-08-06T23:43:19.372858
| 2023-07-26T15:47:40
| 2023-07-26T15:47:40
| 202,798,431
| 4
| 2
| null | 2019-12-18T19:46:11
| 2019-08-16T20:59:31
|
R
|
UTF-8
|
R
| false
| false
| 1,274
|
r
|
SetupLikelihoods.R
|
#' Setup likelihood function
#'
#' Create a list of common parameters necessary to calculate likelihood functions for additive and multiplicative models.
#'
#' @param terminal.points a numeric vector containing the endpoints of the dyadic partitioning
#'
#' @return A list of length 5 is returned. The list contains the following scalars:
#'
#' DeltaDi: the delta D value in the likelihood derivation; see Ramezan *et al*. (2014).
#'
#' Di.1: the first Di value in the likelihood derivation; see Ramezan *et al*. (2014).
#'
#' Di.0: the initial Di value in the likelihood derivation; see Ramezan *et al*. (2014).
#'
#' T.data: The length of the data recording window.
#'
#' J: The resolution based on which the piecewise constant intensity function \eqn{c(t)} has been estimated.
#'
#' @references Ramezan, R., Marriott, P., and Chenouri, S. (2014), *Statistics in Medicine*, **33**(2), 238-256. doi: 10.1002/sim.5923.
#'
#' @export
SetupLikelihoods <-function(terminal.points){
D.i.plus.one <- terminal.points[-c(1)]
D.i <- terminal.points[-length(terminal.points)]
DeltaDi <- D.i.plus.one-D.i
T.data <- max(terminal.points)-min(terminal.points)
J <- log(length(terminal.points)-1, 2)
return(list(DeltaDi=DeltaDi,Di.1=D.i.plus.one,Di.0=D.i,T.data=T.data,J=J))
}
|
33ca462d1770a36fcb9190f3879077f94f850e45
|
50e72dd9731cf1e259653bca3f977dc27551fdcc
|
/packrat/bundles/cholera_microhotspots/source/manuscript_script.R
|
8a23a9fe7f60a0ef57ada26c241fbd9172a62209
|
[
"MIT"
] |
permissive
|
esbwenge80/cholera_microhotspots
|
3d1096440b46c55625f69c4235bb35e193aab0d1
|
5f6073f0a0514c54829454bb741b3b67a585b476
|
refs/heads/master
| 2021-09-14T15:13:54.108366
| 2018-05-15T13:45:19
| 2018-05-15T13:45:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,549
|
r
|
manuscript_script.R
|
## script file for manuscript
source("source/utils.R")
reload_source()
## bring in main data
kal <- load_kalemie_data()
ndj <- load_ndj_data()
###################
## MAIN ANALYSES ##
###################
## some key numbers for the manuscript
## these are the min and max (and midpoints) for the tau-distance windows
r.mins <- c( 0, seq(5,950,10))
r.maxs <- c(40, seq(55,1000,10))
r.mids <- (r.maxs + r.mins)/2
## time windows
d.mins<- c(0,5,10,15,20,25,15,1,0)
d.maxs<- c(5,10,15,20,25,30,30,5,2)
d.mids <- (d.maxs+d.mins)/2
## these functions run the primary analyses and save the outputs in the <filename>
## this will take a while so we have included the outputs in the repository if needed
run_main_analyis(point_mat=kal %>% as.matrix,filename="generated_data/kal_main.rds")
run_main_analyis(point_mat=ndj %>% as.matrix,filename="generated_data/ndj_main.rds")
## reading outputs
kal_main <- readRDS("generated_data/kal_main.rds")
ndj_main <- readRDS("generated_data/ndj_main.rds")
## looking at a bit more fine scale at time dimension
## similar runs for for generating data for figure 2
## note these were run on cluster to save time
run_main_analyis(point_mat=kal %>% as.matrix,
d.mins=c(0,1:18),
d.maxs=c(2:20),
filename="generated_data/kalout_tighttime.rds")
run_main_analyis(point_mat=ndj %>% as.matrix,
d.mins=c(0,1:18),
d.maxs=c(2:20),
filename="generated_data/ndjout_tighttime.rds")
kalout_tighttime <- readRDS("generated_data/kalout_tighttime.rds")
ndjout_tighttime <- readRDS("generated_data/ndjout_tighttime.rds")
## ------- ##
## Figures ##
## ------- ##
## make figure 1
make_figure_1(ndj_main=ndj_main,kal_main=kal_main,r.mids=r.mids)
## make figure 2
make_figure_2(kalout_tighttime = kalout_tighttime,ndjout_tighttime = ndjout_tighttime)
## ----------------------------------- ##
## some key numbers for the manuscript ##
## ----------------------------------- ##
##################
## first 5-days ##
##################
r.mids[which(ndj_main[[1]][[2]][1,]<1)][2] ## 330
r.mids[which(ndj_main[[8]][[2]][1,]<1)][2] ## 310
r.mids[which(kal_main[[1]][[2]][1,]<1)][2] ## 220
r.mids[which(kal_main[[8]][[2]][1,]<1)][2] ## 90
## but it first crosses at 100
r.mids[which(kal_main[[1]][[2]][1,]<1)][1] ## 210
r.mids[which(kal_main[[8]][[2]][1,]<1)][1] ## 80
## those living within 40-meters of a case
ndj_main[[1]][[1]][1]
ndj_main[[1]][[2]][,1]
kal_main[[1]][[1]][1]
kal_main[[1]][[2]][,1]
## those at 100-meters from a case
ndj_main[[1]][[1]][9]
ndj_main[[1]][[2]][,9]
kal_main[[1]][[1]][9]
kal_main[[1]][[2]][,9]
#################
## day 0 and 1 ##
#################
## those living within 20-meters of a case
ndj_main[[9]][[1]][1]
ndj_main[[9]][[2]][,1]
kal_main[[9]][[1]][1]
kal_main[[9]][[2]][,1]
## extent within the first days
r.mids[which(ndj_main[[9]][[2]][1,]<1)][2] ## 340
r.mids[which(kal_main[[9]][[2]][1,]<1)][2] ## 80
## at 100 meteres
ndj_main[[9]][[1]][9]
ndj_main[[9]][[2]][,9]
kal_main[[9]][[1]][9]
kal_main[[9]][[2]][,9]
####################
## exluding day 0 ##
####################
r.mids[which(ndj_main[[8]][[2]][1,]<1)][2] ## 310
r.mids[which(kal_main[[8]][[2]][1,]<1)][2] ## 90
## those living within 40-meters of a case
ndj_main[[8]][[1]][1]
ndj_main[[8]][[2]][,1]
kal_main[[8]][[1]][1]
kal_main[[8]][[2]][,1]
## those at 100-meters from a case
kal_main[[8]][[1]][9]
kal_main[[8]][[2]][,9]
ndj_main[[8]][[1]][9]
ndj_main[[8]][[2]][,9]
#########################
## longer time periods ##
#########################
## 2-4 weeks before
r.mids[which(ndj_main[[7]][[2]][2,]>1)]
r.mids[which(kal_main[[7]][[2]][2,]>1)]
## those living within 20-meters of a case
1/ndj_main[[7]][[1]][1]
1/ndj_main[[7]][[2]][,1]
1/kal_main[[7]][[1]][1]
1/kal_main[[7]][[2]][,1]
## those at 100-meters from a case
kal_main[[1]][[1]][9]
kal_main[[1]][[2]][,9]
ndj_main[[8]][[1]][9]
ndj_main[[8]][[2]][,9]
######################################################
## Now lets consider zones of increased risk to be ##
## the max. extent where the RR is greater than 1.1 ##
######################################################
ndj_risk <- get_riskzone(
dat=ndj %>% as.matrix,
d.mins=c(0,1,0),
d.maxs = c(5,5,1),
filename="generated_data/ndj_riskzone.rds",
n_boots=1000)
ndj_risk <- readRDS(file="generated_data/ndj_riskzone.rds")
quantile(get_risk_zone_dist(ndj_risk[[1]][[2]],risk_thresh=1.2,r.mids=r.mids),probs=c(.025,.5,.975),na.rm=T)
quantile(get_risk_zone_dist(ndj_risk[[2]][[2]],risk_thresh=1.2,r.mids=r.mids),probs=c(.025,.5,.975),na.rm=T)
kal_risk <- get_riskzone(
dat=kal %>% as.matrix,
d.mins=c(0,1,0),
d.maxs = c(5,5,1),
filename="generated_data/kal_riskzone.rds",
n_boots=1000)
kal_risk <- readRDS(file="generated_data/kal_riskzone.rds")
quantile(get_risk_zone_dist(kal_risk[[1]][[2]],risk_thresh=1.2,r.mids=r.mids),probs=c(.025,.5,.975))
quantile(get_risk_zone_dist(kal_risk[[2]][[2]],risk_thresh=1.2,r.mids=r.mids),probs=c(.025,.5,.975))
## now only for day 1
ndj_risk_day1 <- get_riskzone(
dat=ndj %>% as.matrix,
d.mins=c(0),
d.maxs = c(1),
filename="generated_data/ndj_riskzone_day1.rds",
n_boots=1000)
kal_risk_day1 <- get_riskzone(
dat=kal %>% as.matrix,
d.mins=c(0),
d.maxs = c(1),
filename="generated_data/kal_riskzone_day1.rds",
n_boots=1000)
##########################
## supplemental figures ##
##########################
## ----------------------------------------------------- ##
## Estimating tau at 3 different points in each epidemic ##
## ----------------------------------------------------- ##
## now run analyses
##t1
runme_inf_kal(d.mins=c(0,1),
d.maxs=c(5,5),
kal=kal %>% filter(day <= 200) %>% as.matrix ,
filename="GeneratedData/kalout_50mWin_10mspace_days0_to_200.rds")
runme_inf_ndj(d.mins=c(0,1),
d.maxs=c(5,5),
ndj=ndj %>% filter(time <= (120-71)) %>% as.matrix ,
filename="GeneratedData/ndjout_50mWin_10mspace_days0_to_120.rds")
##t2
runme_inf_kal(d.mins=c(0,1),
d.maxs=c(5,5),
kal=kal %>% filter(day>200 & day<=300) %>% as.matrix,
filename="GeneratedData/kalout_50mWin_10mspace_days200_to_300.rds")
runme_inf_ndj(d.mins=c(0,1),
d.maxs=c(5,5),
ndj=ndj %>% filter(time > (120-71) & time <= (150-71)) %>% as.matrix ,
filename="GeneratedData/ndjout_50mWin_10mspace_days120_to_150.rds")
#t3
runme_inf_kal(d.mins=c(0,1),
d.maxs=c(5,5),
kal=kal %>% filter(day>300) %>% as.matrix,
filename="GeneratedData/kalout_50mWin_10mspace_days300_381.rds")
runme_inf_ndj(d.mins=c(0,1),
d.maxs=c(5,5),
ndj=ndj %>% filter(time > (150-71) & time <=(231-71)) %>% as.matrix ,
filename="GeneratedData/ndjout_50mWin_10mspace_days150_to_231.rds")
kal_t1 <- readRDS("GeneratedData/kalout_50mWin_10mspace_days0_to_200.rds")
kal_t2 <- readRDS("GeneratedData/kalout_50mWin_10mspace_days200_to_300.rds")
kal_t3 <- readRDS("GeneratedData/kalout_50mWin_10mspace_days300_381.rds")
ndj_t1 <- readRDS("GeneratedData/ndjout_50mWin_10mspace_days0_to_120.rds")
ndj_t2 <- readRDS("GeneratedData/ndjout_50mWin_10mspace_days120_to_150.rds")
ndj_t3 <- readRDS("GeneratedData/ndjout_50mWin_10mspace_days150_to_231.rds")
kal_time_periods <- bind_rows(
tidy_tau_out(kal_main[[1]],r.mids=r.mids) %>% mutate(time_period="all"),
tidy_tau_out(kal_t1[[1]],r.mids=r.mids) %>% mutate(time_period="t1"),
tidy_tau_out(kal_t2[[1]],r.mids=r.mids) %>% mutate(time_period="t2"),
tidy_tau_out(kal_t3[[1]],r.mids=r.mids) %>% mutate(time_period="t3"))
ndj_time_periods <- bind_rows(
tidy_tau_out(ndj_main[[1]],r.mids=r.mids) %>% mutate(time_period="all"),
tidy_tau_out(ndj_t1[[1]],r.mids=r.mids) %>% mutate(time_period="t1"),
tidy_tau_out(ndj_t2[[1]],r.mids=r.mids) %>% mutate(time_period="t2"),
tidy_tau_out(ndj_t3[[1]],r.mids=r.mids) %>% mutate(time_period="t3"))
kal_tps <- kal_time_periods %>% ggplot() +
geom_line(data=kal_time_periods %>% filter(type=="median"),aes(x=distance,y=value,color=time_period)) +
geom_ribbon(data=kal_time_periods %>% spread(type,value),aes(x=distance,ymin=ci_l,ymax=ci_h,fill=time_period),alpha=.2) +
scale_y_log10() +
coord_cartesian(xlim=c(0, 500)) + theme_minimal() + ylab("relative cholera risk (tau)")
to.pdf(print(kal_tps + theme(legend.position = "bottom")),filename="figures/kal_time_periods_full.pdf",width=12)
ndj_tps <- ndj_time_periods %>% ggplot() +
geom_line(data=ndj_time_periods %>% filter(type=="median"),aes(x=distance,y=value,color=time_period)) +
geom_ribbon(data=ndj_time_periods %>% spread(type,value),aes(x=distance,ymin=ci_l,ymax=ci_h,fill=time_period),alpha=.2) +
scale_y_log10() +
coord_cartesian(xlim=c(0, 500)) + theme_minimal() + ylab("relative cholera risk (tau)")
to.pdf(print(ndj_tps + theme(legend.position = "bottom")),filename="figures/ndj_time_periods_full.pdf",width=12,height=8)
to.pdf(multiplot(ndj_tps + theme(legend.position = "bottom"),kal_tps + theme(legend.position = "bottom")),filename="figures/time_periods_full.pdf",width=12,height=14)
simple_tau_plot(kal_t1[[1]],
r.mids,col=1,add=FALSE,,xlab="",ylab="")
grid()
text(28,0.5,"days 0-4")
text(430,100,"(A)")
runme_inf_ndj(d.mins=c(0,1),
d.maxs=c(5,5),
filename="GeneratedData/ndjout_50mWin_10mspace_days0_to_120.rds")
|
590428949b05d57adb88a842b95b5589a106be8a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ProTrackR/examples/write.module.Rd.R
|
e238f9448185de5b080735c6146ae4981cbdefd0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 578
|
r
|
write.module.Rd.R
|
library(ProTrackR)
### Name: write.module
### Title: Export an PTModule object as a ProTracker module file
### Aliases: write.module write.module,PTModule,ANY-method
### write.module,PTModule,character-method
### ** Examples
## get the PTModule object provided with the ProTrackR package
data("mod.intro")
## save the object as a valid ProTracker module file:
write.module(mod.intro, "intro.mod")
## or create the connection yourself:
con <- file("intro2.mod", "wb")
write.module(mod.intro, con)
## don't forget to close the connection after you're done:
close(con)
|
29139d0dd1e5cec1b2571ad2acb0e899fa7daa16
|
be09161442f32566b62242fe156e869180ee8c0a
|
/man/move_imgs.Rd
|
7929ecfa04e670f0c1bc09b186a34da944628acd
|
[] |
no_license
|
Tubbz-alt/warbleR
|
7ce5f1eb943cfc840c1498b7c011b25a458bc0e4
|
be0b62c29d201c22c6b1241996324a723753696a
|
refs/heads/master
| 2022-12-19T18:27:58.139244
| 2020-09-23T13:46:19
| 2020-09-23T13:46:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 563
|
rd
|
move_imgs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/move.imgs.R
\name{move_imgs}
\alias{move_imgs}
\title{alternative name for \code{\link{move.imgs}}}
\usage{
move_imgs(
from = NULL,
to = NULL,
it = "all",
cut = TRUE,
overwrite = FALSE,
create.folder = TRUE,
folder.name = "image_files",
parallel = 1,
pb = TRUE
)
}
\description{
alternative name for \code{\link{move.imgs}}
}
\details{
see \code{\link{move.imgs}} for documentation. \code{\link{move.imgs}} will be deprecated in future versions.
}
\keyword{internal}
|
b2ab0f765e55ea6a52329322dd241a3e9f4982a6
|
a6ffd76d5cef7d370f322996ded2351681d4a4f5
|
/R/2. Imputation of missing values.R
|
d25d5b1bbc1dd27b1c4886c1e6f31e2657f76759
|
[] |
no_license
|
michelleg06/Educational_Outcomes_Ecuador
|
f71af3ae024ee48e2ae62d32380403054d50c475
|
f4e0b5a628f396ab1af309b2e5ae8e53fe695c82
|
refs/heads/main
| 2023-07-20T07:07:30.203000
| 2021-09-02T18:37:18
| 2021-09-02T18:37:18
| 332,708,280
| 1
| 1
| null | 2021-05-11T13:38:22
| 2021-01-25T10:22:45
|
Python
|
UTF-8
|
R
| false
| false
| 125
|
r
|
2. Imputation of missing values.R
|
# This file is currently empty.
source("1. Translation and cleaning.R", echo=T) # Returns cleaned data_transl data.table.
|
fa1b9cbb613d5dc492f45548b85508bcba191f29
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/Luminescence/R/plot_ViolinPlot.R
|
5cb22c879d05d9103bb985928cb948eea1c33454
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,738
|
r
|
plot_ViolinPlot.R
|
#' Create a violin plot
#'
#' Draws a kernal densiy plot in combination with a boxplot in its middle. The shape of the violin
#' is constructed using a mirrored density curve. This plot is especially designed for cases
#' where the individual errors are zero or to small to be visualised. The idea for this plot is
#' based on the the 'volcano plot' in the ggplot2 package by Hadely Wickham and Winston Chang.
#' The general idea for the Violin Plot seems to be introduced by Hintze and Nelson (1998).
#'
#' The function is passing several arguments to the function \code{\link{plot}},
#' \code{\link[stats]{density}}, \code{\link[graphics]{boxplot}}: Supported arguments are: \code{xlim}, \code{main}, \code{xlab},
#' \code{ylab}, \code{col.violin}, \code{col.boxplot}, \code{mtext}, \code{cex}, \code{mtext}
#'
#' \bold{\code{Valid summary keywords}}\cr
#'
#' 'n', 'mean', 'median', 'sd.abs', 'sd.rel', 'se.abs', 'se.rel', 'skewness', 'kurtosis' \cr
#'
#' @param data \code{\link{numeric}} or \code{\linkS4class{RLum.Results}}
#' object (required): input data for plotting. Alternatively a \code{\link{data.frame}} or
#' a \code{\link{matrix}} can be provided, but only the first column will be considered by the
#' function
#'
#' @param boxplot \code{\link{logical}} (with default): enable or disable boxplot
#'
#' @param rug \code{\link{logical}} (with default): enable or disable rug
#'
#' @param summary \code{\link{character}} (optional): add statistic measures of
#' centrality and dispersion to the plot. Can be one or more of several
#' keywords. See details for available keywords.
#'
#' @param summary.pos \code{\link{numeric}} or \code{\link{character}} (with
#' default): optional position keywords (cf., \code{\link{legend}})
#' for the statistical summary. Alternatively, the keyword \code{"sub"} may be
#' specified to place the summary below the plot header. However, this latter
#' option in only possible if \code{mtext} is not used.
#'
#' @param na.rm \code{\link{logical}} (with default): exclude NA values
#' from the data set prior to any further operations.
#'
#' @param \dots further arguments and graphical parameters passed to
#' \code{\link{plot.default}}, \code{\link[stats]{density}} and \code{\link{boxplot}}. See details for
#' further information
#'
#' @note Although the code for this function was developed independently and just the idea for the plot
#' was based on the 'ggplot2' package plot type 'volcano', it should be mentioned that, beyond this,
#' two other R packages exist providing a possibility to produces this kind of plot, namely:
#' 'vioplot' and 'violinmplot' (see References for details).
#'
#' @section Function version: 0.1.0
#'
#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
#'
#' @references
#'
#' Daniel Adler (2005). vioplot: A violin plot is a combination of a box plot and a kernel density plot.
#' R package version 0.2 http://CRAN.R-project.org/package=violplot
#'
#' Hintze, J.L., Nelson, R.D., 1998. A Box Plot-Density Trace Synergism. The American Statistician 52, 181-184.
#'
#' Raphael W. Majeed (2012). violinmplot: Combination of violin plot with mean and standard deviation.
#' R package version 0.2.1. http://CRAN.R-project.org/package=violinmplot
#'
#' Wickham. H (2009). ggplot2: elegant graphics for data analysis. Springer New York.
#'
#' @seealso \code{\link[stats]{density}}, \code{\link{plot}}, \code{\link{boxplot}}, \code{\link{rug}},
#' \code{\link{calc_Statistics}}
#'
#' @examples
#' ## read example data set
#' data(ExampleData.DeValues, envir = environment())
#' ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019))
#'
#' ## create plot straightforward
#' plot_ViolinPlot(data = ExampleData.DeValues)
#'
#' @export
plot_ViolinPlot <- function(
data,
boxplot = TRUE,
rug = TRUE,
summary = NULL,
summary.pos = "sub",
na.rm = FALSE,
...
) {
# Integrity tests and conversion --------------------------------------------------------------
##Prechecks
if(missing(data)){
stop("[plot_ViolinPlot()] I don't know what to do, data input needed." )
}else{
##check for RLum.Results object
if(is(data, "RLum.Results")){
data <- get_RLum(data)
}
##if data.frame or matrix
if(is(data, "data.frame") | is(data, "matrix")){
data <- data[,1]
}
}
##Remove NA values
if(na.rm){
data <- na.exclude(data)
}
#Further checks
if(!is(summary.pos, "character")){
stop("[plot_ViolinPlot()] argument 'summary.pos' needs to be of type character!")
}
# Pre-calculations ----------------------------------------------------------------------------
##density for the violin
density <-
density(x = data,
bw = ifelse("bw" %in% names(list(...)),list(...)$bw,"nrd0"))
##some statistical parameter, get rid of the weighted statistics
stat.summary <- suppressWarnings(calc_Statistics(as.data.frame(data), digits = 2))[[-1]]
##make valid summary string
if(is.null(summary)){
summary <- c("n","median")
}
##at least show a warning for invalid keywords
if(!all(summary %in% names(stat.summary))){
warning(paste0("[plot_ViolinePlot()] At least one 'summary' keyword is invalid. Valid keywords are: ",
paste(names(stat.summary), collapse = ", ")), call. = FALSE)
}
##make sure that only valid keywords make it
summary <- summary[(summary %in% names(stat.summary))]
stat.text <-
paste(names(stat.summary[summary]), " = ", stat.summary[summary], collapse = " \n")
stat.mtext <-
paste(names(stat.summary[summary]), " = ", stat.summary[summary], collapse = " | ")
# Plot settings -------------------------------------------------------------------------------
##set default values
plot.settings <- list(
xlim = range(density$x),
main = "Violin Plot",
xlab = expression(paste(D[e], "/(a.u.)")),
ylab = "Density",
col.violin = rgb(0,0,0,0.2),
col.boxplot = NULL,
mtext = ifelse(summary.pos != 'sub', "", stat.mtext),
cex = 1
)
##modify list accordingly
plot.settings <- modifyList(plot.settings, val = list(...))
# Plot ----------------------------------------------------------------------------------------
##open empty plot area
plot(
NA,NA,
xlim = plot.settings$xlim,
ylim = c(0.2,1.8),
xlab = plot.settings$xlab,
ylab = plot.settings$ylab,
yaxt = "n",
main = plot.settings$main,
cex = plot.settings$cex
)
##add polygon ... the violin
polygon(
x = c(density$x, rev(density$x)),
y = c(1 + density$y / max(density$y) * 0.5,
rev(1 - density$y / max(density$y) * 0.5)),
col = plot.settings$col.violin,
border = plot.settings$col.violin
)
##add the boxplot
if(boxplot){
boxplot(
data,
outline = TRUE,
boxwex = 0.4,
horizontal = TRUE,
axes = FALSE,
add = TRUE,
col = plot.settings$col.boxplot
)
}
##add rug
if(rug){
rug(x = data)
}
##add mtext
if(!is.null(plot.settings$mtext)){
mtext(side = 3, text = plot.settings$mtext)
}
##add stat.text
if (summary.pos != "sub") {
valid_keywords <-
c(
"bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right", "center"
)
if (any(
summary.pos %in% valid_keywords
)) {
legend(summary.pos, legend = stat.text, bty = "n")
}else{
warning_text <- paste0("Value provided for 'summary.pos' is not a valid keyword, valid keywords are:",
paste(valid_keywords, collapse = ", "))
warning(warning_text)
}
}
}
|
d0e6decd1c807b4c1f3d8496dc29d8ca41878676
|
ac8be031ab50a75403f5423ef057a9dc2c01927e
|
/timeseries/ts_plots.R
|
a96900061664969044b95a2da1eedb68f3b310bb
|
[] |
no_license
|
yukikongju/R-Tutorial
|
9f006acf8d5aa2cf4bb595eb1c2cd4af1a5d5757
|
675fa89bbb7e2166223c1a06095844ce332d4a48
|
refs/heads/master
| 2022-12-08T03:49:25.272906
| 2020-09-03T21:02:48
| 2020-09-03T21:02:48
| 245,560,126
| 1
| 0
| null | 2020-09-03T21:02:49
| 2020-03-07T03:18:35
|
HTML
|
UTF-8
|
R
| false
| false
| 3,056
|
r
|
ts_plots.R
|
# -------------------- Dependencies -----------------
library(ggplot2)
library(dyplr)
library(fpp2)
# -------------- Time Plot -------------------
autoplot(AirPassengers) +
ggtitle("Airline Passengers") +
xlab("Year") +
ylab("Passengers")
# -------------- Time Plot with facets -------------------
autoplot(elecdaily[,c("Demand", "Temperature")], facets = TRUE)+
ylab("")+
ggtitle("Daily Demand in Victoria, Australia")
# ------------------ Seasonal Plot -------------
ggseasonplot(AirPassengers, year.labels = TRUE, year.labels.left = TRUE) +
ylab("Passengers") +
ggtitle("Air Passengers by seasons")
# ------------------ Polar Seasonal Plot -------------
ggseasonplot(AirPassengers, polar = TRUE)+
ylab("Passengers") +
ggtitle("Polar plot: Air Passengers by seasons")
# --------- Seasonal Subseries Plots ------------------
ggsubseriesplot(AirPassengers) +
ylab("Passengers") +
ggtitle("Subseries Plot: Air Passengers by seasons")
# ------------- Scatter plot matrices -----------------------
autoplot(visnights[,1:4], facets=TRUE)+
ylab("Number of visitors each quarter (millions)")
# ------------------- qplot ---------------------
qplot(data = as.data.frame(elecdemand), Temperature, Demand )
# -------------- Corrrelation plot --------------
GGally:: ggpairs(as.data.frame(visnights[,1:5]))
# ------------------ Lag plots -------------------
d_beer <- window(ausbeer, start = 1985)
gglagplot(d_beer)
# ------------ autocorrelation plot ---------------
ggAcf(d_beer, lag.max = 40)
# ---------- Trend and seasonality in ACF plots ---
d_elec <- window(elec, start = 1975)
autoplot(d_elec) + xlab("Year") + ylab("Demand")
# ----------- white noise plot -----------------
set.seed(420)
y <- ts(rnorm(50))
autoplot(y) + ggtitle("white noise")
# ----------- Exercices 1 -------------------------
autoplot(gold)
autoplot(woolyrnq)
autoplot(gas)
frequency(gold)
frequency(woolyrnq)
frequency(gas)
# find outliers
which.max(gold)
# ----------- Exercice 4 --------------------
autoplot(bicoal)
autoplot(chicken)
autoplot(usdeaths)
autoplot(goog) + ggtitle("Google stock price ")
# ----------- Exercice 5 --------------------
ggseasonplot(writing)
ggsubseriesplot(writing)
ggseasonplot(fancy)
ggsubseriesplot(fancy)
# ----------- Exercice 6 --------------------
autoplot(hsales)
ggseasonplot(hsales)
ggsubseriesplot(hsales)
gglagplot(hsales)
ggAcf(hsales)
# ----------- Exercice 7 --------------------
autoplot(arrivals, facets = TRUE)
ggseasonplot(arrivals[, "Japan"])
ggseasonplot(arrivals[, "NZ"])
ggseasonplot(arrivals[, "UK"])
ggseasonplot(arrivals[, "US"])
ggsubseriesplot(arrivals[, "Japan"])
ggsubseriesplot(arrivals[, "NZ"])
ggsubseriesplot(arrivals[, "UK"])
ggsubseriesplot(arrivals[, "US"])
# ----------- Exercice 9 --------------------
d_pigs <- window(pigs, start=1990)
autoplot(d_pigs)
ggseasonplot(d_pigs)
ggsubseriesplot(d_pigs)
gglagplot(d_pigs)
ggAcf(d_pigs)
# ---------- Exercice 10 ---------------
djj <- diff(dj)
autoplot(djj)
ggAcf(djj)
# Note: probably white noise
|
3d2dc3b6a46b1ce8a0849e4736a76b0bd48bb70d
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/rbhl/R/zzz.R
|
0c7523d4a54fcabea56f6bd4114abcef560ffdfa
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,013
|
r
|
zzz.R
|
query2message <- function(url, x) {
mylist <- list()
for (i in 1:length(x)) {
mylist[i] <- paste(names(x[i]), '=', x[i][[1]], sep = '')
}
paste('API call:\n', paste(url, "?", paste(mylist, collapse = "&"), sep = ''))
}
return_results <- function(x, y, z) {
if (y == 'raw') {
return( x )
} else if (y == 'list') {
return( jsonlite::fromJSON(I(x)) )
} else {
if (z == "json") {
return(jsonlite::fromJSON(I(x)))
} else {
return(XML::xmlTreeParse(I(x)))
}
}
}
check_key <- function(x) {
tmp <- if (is.null(x)) Sys.getenv("BHL_KEY", "") else x
if (tmp == "") getOption("bhl_key", stop("need an API key for BHL")) else tmp
}
bhl_GET <- function(as, args, ...){
out <- GET(bhl_url(), query = args, ...)
stop_for_status(out)
res <- switch(as,
xml = xmlSize(xpathSApply(content(out), "//Result")[[1]]),
json = length(content(out)$Result),
list = length(content(out)$Result),
table = length(content(out)$Result))
if (is.null(res) || res == 0) {
stop("No results found", call. = FALSE)
}
tt <- content(out, as = "text")
switch(as, json = tt, xml = tt, list = fjson(tt), table = todf(tt))
}
todf <- function(x){
temp <- jsonlite::fromJSON(I(x), TRUE)$Result
if (is.character(temp)) {
temp
} else {
tmp <- if (!is.null(names(temp))) {
data.frame(bhlc(temp), stringsAsFactors = FALSE)
} else {
do.call(rbind.fill, lapply(bhlc(temp), data.frame))
}
structure(list(data = tmp), class = "bhldf")
}
}
fjson <- function(x) jsonlite::fromJSON(x, FALSE)
bhl_url <- function() "http://www.biodiversitylibrary.org/api2/httpquery.ashx"
bhlc <- function(l) Filter(Negate(is.null), l)
as_f <- function(x) {
as <- match.arg(x, c("table","list","json","xml"))
if (as %in% c('list','table','json')) 'json' else 'xml'
}
#' @export
print.bhldf <- function(x, ..., n = 10) {
cat(sprintf("<bhl data> [%d, %d]", NROW(x$data), NCOL(x$data)), sep = "\n")
trunc_mat(x$data, n = n)
}
|
d02fe95e0c34610fcb58465c89d1c460a28079b4
|
b05b3c5f3bdaf1dd6e115caeec6cb187ef158e2e
|
/App.R
|
bcfc65e94ed8305375e5ba5b3358501ac501cd83
|
[] |
no_license
|
AmineFrj/ShinyDataAnalysis
|
9e78e93ef3d8802008236b2a3730991b64560053
|
4dd995a18f643ba768c183b3b9c3b4f9f294f54b
|
refs/heads/master
| 2020-08-07T14:40:58.732815
| 2019-10-14T22:19:07
| 2019-10-14T22:19:07
| 213,491,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 193
|
r
|
App.R
|
library(shiny)
library(ggplot2)
library(UsingR)
library(DT)
library(reshape)
library(Amelia)
library(psych)
source('UI.R', local = TRUE)
source('Server.R')
shinyApp(ui = ui, server = server)
|
a677beedd0ee9c783e816dc736ac14f0e77063ec
|
516603468a29ec93396a49fe40606697250d67b2
|
/.ipynb_checkpoints/03_tableUniqueCHIRPS-checkpoint.R
|
a01bcdfc8bd07e43f6e31aa54ae1462bd202f804
|
[] |
no_license
|
fabiolexcastro/Gates-smallholder-adaptation
|
9256d014ba43924636e138c5e053af06c50e5413
|
fb599eb172c44e73e655669b0083dcd90f803244
|
refs/heads/master
| 2022-08-10T01:30:56.339281
| 2022-07-19T08:01:02
| 2022-07-19T08:01:02
| 250,032,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,917
|
r
|
03_tableUniqueCHIRPS-checkpoint.R
|
g=gc()
rm(list=ls())
require(raster);require(dplyr); require(foreach); require(parallel)
path <- '//dapadfs/data_cluster_4/observed/gridded_products/chirps/daily/32bits/'
base <- raster(paste0(path, "chirps-v2.0.1981.01.01.tif"))
fls <- list.files(path = path, pattern = ".tif",full.names = TRUE)
year <- 1985:2019
year <- as.character(year)
n_a <- shapefile("//dapadfs/workspace_cluster_13/GATES/data/shp/base/continents.shp")
proj4string(n_a) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
n_a <- n_a[n_a@data$CONTINENT == "North America",]
library(foreach)
library(parallel)
library(doParallel)
cores<- detectCores()
cl<- makeCluster(cores-10)
registerDoParallel(cl)
system.time(prec<-foreach(i=1:length(year)) %dopar% {
mylist <- list()
require(raster)
fls_year <- fls[grep(year[i], fls)]
r <- raster:::stack(fls_year)
df <- lapply(1:nlayers(r) , function(j){
raster <- r[[j]]
raster <- crop(raster, c(-179.1411, -40, 5.499059, 83.62746))
n_a <- crop(n_a, c(-179.1411, -40, 5.499059, 83.62746))
raster <- crop(raster,c(-179.1411, -40, 5.499059, 83.62746))
raster <- mask(raster,n_a)
df <- as.data.frame(rasterToPoints(r))
col <- colnames(df)
col <- col[3]
# dte <- stringr::str_sub(col, start = 13, end = nchar(col))
# dte <- gsub('\\.', '-', dte)
# colnames(df) <- c("x","y",dte)
# cellID <- cellFromXY(base, df[,1:2])
# df <- cbind(cellID, df)
# df$x <- NULL
# df$y <- NULL
})
system.time(df <- as.data.frame(rasterToPoints(r)))
df[,3][which(df[,3] < 0)] <- NA
df <- na.omit(df)
cellID <- cellFromXY(base, df[,1:2])
df <- cbind(cellID,df)
df[,2:3]<- NULL
mylist[[i]] <- df
})
stopCluster(cl)
tabla <- do.call(cbind, prec)
n<- seq(3,5000,by=2)
tabla1 <- tabla[,-n]
saveRDS(tabla1, "//dapadfs/workspace_cluster_13/GATES/rds/chirps/complet/prec_5.rds")
|
a90b2453967c21e6dc389e1273d97463e7bfa2bf
|
4ef9b69074a88ab4510535a0db95db4fef924c5e
|
/puzzle_04.R
|
725add0ab33d40c3afd24ed629106fcf7849b737
|
[] |
no_license
|
msheker/mindsumo_puzzles
|
4852ec8be407e14337fe8591e60c4e8bf2fd725b
|
13a31c2e064e6648d0a09c488b02e1da33b0a950
|
refs/heads/local
| 2016-09-03T06:37:12.837145
| 2015-09-18T23:26:56
| 2015-09-18T23:26:56
| 42,751,634
| 0
| 0
| null | 2015-09-18T23:28:42
| 2015-09-18T23:27:55
|
R
|
UTF-8
|
R
| false
| false
| 1,262
|
r
|
puzzle_04.R
|
# Miguel Sheker 09/16/15
# Purpose: Find every integer value that can be formed using a difference of squares
# Method: Use double for-loop in which outer loop decrements from upperbound and inner loop
# increments from 0
##### FUNCTION #####
# Name: diffOfSq
# Desc: returns vector containing every number between min and max inclusive that can be
# formed using an integer difference of squares
# Para: min >> minimum of values to test
# max >> maximum of values to test
# Ret: Vector containing every integer value between min and max that can be formed through
# a difference of squares
####################
diffOfSq<- function( min, max)
{
result = c()
# found by extracting upperbound from "x^2 - (x-1)^2"
upper<- ceiling( .5 * max + .5)
for( i in upper:0)
{
for( j in 0:i)
{
# check if within bounds
if( i^2 - j^2 >= min && i^2 - j^2 <= max)
{
result<- union( result, c( i^2 - j^2))
# debug print
# print( sprintf('DoS %d, I %d, J %d', i^2 - j^2, i, j))
}
}
}
return( result)
}
##### TESTING #####
#print( 'test1')
test1<- diffOfSq( 1, 10)
#print( 'test2')
test2<- diffOfSq( 4, 10)
final<- diffOfSq( 1, 1000)
#print( length( final))
|
a5ba3f195a6f635db971c25c7e161496fc92daf1
|
f7cf0267b134fde66cfbb725c35984ac8d7418fc
|
/pre-submission.R
|
9ace8ae5c5790fd02a8c3974d02eb12e9e1a977e
|
[
"MIT"
] |
permissive
|
jla-data/czechrates
|
4af955ae0f0de8082fd333daa03721a40cb40fa7
|
3537e8752ff3c339d39b73290ec4ce2d31722758
|
refs/heads/master
| 2023-05-13T05:31:52.884546
| 2021-05-26T07:00:49
| 2021-05-26T07:00:49
| 265,378,977
| 0
| 0
|
NOASSERTION
| 2021-05-06T20:32:49
| 2020-05-19T22:07:30
|
R
|
UTF-8
|
R
| false
| false
| 302
|
r
|
pre-submission.R
|
# pre-submission routine
# nepřeskakovat, důležité! encoding je sviň
rhub::check_for_cran(platforms = "debian-clang-devel")
# možno nahrát jedním vrzem na https://win-builder.r-project.org/upload.aspx
devtools::check_win_release()
devtools::check_win_devel()
# once ready
devtools::release()
|
e85d6418c8f086cc2acbf16be9209b2be6d78654
|
acdfd492413b683a27afe8b800e45217e9d5d11c
|
/man/window.uts_vector.Rd
|
4fac88e00c1b9cc8d6dfc7ee05c2dc020ccf0d9e
|
[] |
no_license
|
andreas50/utsMultivariate
|
25a67467eb3ac37dd7abbe4d131b94306b95b042
|
46c1133abe58a53f1228fe05fbd089aa21509310
|
refs/heads/master
| 2021-09-28T02:22:12.357689
| 2021-09-23T18:44:38
| 2021-09-23T18:44:38
| 35,906,754
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,158
|
rd
|
window.uts_vector.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uts_vector.R
\name{window.uts_vector}
\alias{window.uts_vector}
\alias{time.uts_matrix}
\title{Time Window}
\usage{
\method{window}{uts_vector}(x, start = NULL, end = NULL, ...)
}
\arguments{
\item{x}{a \code{"uts_vector"} object.}
\item{start, end}{\code{\link{POSIXct}} object or coercible using \code{\link{as.POSIXct}}. The start and end times, respectively, for the individual subperiod time series. If there are fewer times than time series, then they are recycled in the standard fashion.}
\item{\dots}{further arguments passed to or from methods.}
}
\description{
Extract a subperiod time series between times \code{start} and \code{end}.
}
\examples{
# For each time series, drop observations before 2007-11-09 Eastern Standard Time
window(ex_uts_vector(), start="2007-11-09 EST")
# Use a different end time for each subperiod time series
window(ex_uts_vector(), end=c("2007-11-09 12:00:00 EST", "2007-11-09 EST"))
}
\seealso{
\code{\link{head}}, \code{\link{head_t}}, \code{\link{tail}}, \code{\link{tail_t}} for other methods that extract a subperiod time series.
}
|
f57fed491369b52bc729916fddabbdf4f2cd640a
|
968d50016d23fe745bc7c4e1cd9c313e5b3d607b
|
/packages/tweetlstm/R/model_management.R
|
8dfaced330780b1afad8935c54bb907ab4d86654
|
[] |
no_license
|
schnee/trump-rnn
|
d72dd2dad06e7178dc38de1e8a5423fc0298f408
|
54c6be507c57a5b265fb89541975f3b87936423f
|
refs/heads/master
| 2021-01-17T05:50:29.372512
| 2019-04-24T13:37:26
| 2019-04-24T13:37:26
| 64,761,697
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,122
|
r
|
model_management.R
|
library(keras)
library(tidyverse)
library(tokenizers)
library(lubridate)
get_max_length <- function() {
max_length <- 40
max_length
}
#' clean_and_tokenize
#'
#' @param df
#'
#' @return
#' @export
#'
#' @examples
clean_and_tokenize <- function(df) {
df %>%
mutate(text = iconv(text, from="utf-8", to="ASCII", sub="")) %>%
## embedded quotes
mutate(text = str_replace_all(text,'\"','')) %>%
mutate(text = str_replace_all(text, "(http|ftp|https):/{2}[%+!:?#&=_0-9A-Za-z\\./\\-]+", " ")) %>%
mutate(text = str_replace_all(text, "&", ' ')) %>%
pull(text) %>%
str_c(collapse = " ") %>%
tokenize_characters(lowercase = FALSE, strip_non_alphanum = FALSE, simplify = TRUE)
}
#' get_vectors
#'
#' @param text
#' @param alphabet
#' @param max_length
#'
#' @return
#' @export
#'
#' @examples
get_vectors <- function(text, alphabet, max_length) {
dataset <- map(
seq(1, length(text) - max_length - 1, by = 3),
~list(sentence = text[.x:(.x + max_length - 1)],
next_char = text[.x + max_length])
)
dataset <- transpose(dataset)
vectorize <- function(data, chars, max_length){
x <- array(0, dim = c(length(data$sentence), max_length, length(chars)))
y <- array(0, dim = c(length(data$sentence), length(chars)))
for(i in 1:length(data$sentence)){
x[i,,] <- sapply(chars, function(x){
as.integer(x == data$sentence[[i]])
})
y[i,] <- as.integer(chars == data$next_char[[i]])
}
list(y = y,
x = x)
}
vectors <- vectorize(dataset, alphabet, max_length)
vectors
}
#' create_model
#'
#' @param chars
#' @param max_length
#'
#' @return
#' @export
#'
#' @examples
create_model <- function(chars, max_length){
keras_model_sequential() %>%
bidirectional(layer_cudnn_lstm(units=256,
return_sequences = TRUE,
input_shape = c(max_length, length(chars)))) %>%
layer_dropout(rate = 0.5) %>%
bidirectional(layer_cudnn_lstm(units=256)) %>%
layer_dense(length(chars)) %>%
layer_activation("softmax") %>%
compile(
loss = "categorical_crossentropy",
optimizer = optimizer_adam(lr = 0.001)
)
}
#' fit_model
#'
#' @param model
#' @param vectors
#' @param epochs
#'
#' @return
#' @export
#'
#' @examples
fit_model <- function(model, vectors, epochs = 1, view_metrics = FALSE){
model %>% fit(
vectors$x, vectors$y,
batch_size = 32,
epochs = epochs,
validation_split= 0.1,
#callbacks = list(callback_early_stopping(patience= 4)),
view_metrics = view_metrics
)
}
#' generate_phrase
#'
#' @param model
#' @param seedtext
#' @param chars
#' @param max_length
#' @param output_size
#' @param diversity
#'
#' @return
#' @export
#'
#' @examples
generate_phrase <- function(model, seedtext, chars, max_length, output_size = 200, diversity){
# this function chooses the next character for the phrase
choose_next_char <- function(preds, chars, temperature){
preds <- log(preds) / temperature
exp_preds <- exp(preds)
preds <- exp_preds / sum(exp(preds))
next_index <- rmultinom(1, 1, preds) %>%
as.integer() %>%
which.max()
chars[next_index]
}
convert_sentence_to_data <- function(sentence, chars){
x <- sapply(chars, function(x){
as.integer(x == sentence)
})
array_reshape(x, c(1, dim(x)))
}
# the inital sentence is from the text
start_index <- sample(1:(length(seedtext) - max_length), size = 1)
sentence <- seedtext[start_index:(start_index + max_length - 1)]
generated <- ""
# while we still need characters for the phrase
for(i in 1:(output_size)){
sentence_data <- convert_sentence_to_data(sentence, chars)
# get the predictions for each next character
preds <- predict(model, sentence_data)
# choose the character
next_char <- choose_next_char(preds, chars, diversity)
# add it to the text and continue
generated <- str_c(generated, next_char, collapse = "")
sentence <- c(sentence[-1], next_char)
}
generated
}
|
c54f21f4606d4247b152747fe50ad3c103ee5c6b
|
a48797beca55474d7b39676389f77f8f1af76875
|
/man/graph_f2_function.Rd
|
8d20b292f567c4c5b7d2319d78386293ba5e0ac4
|
[] |
no_license
|
uqrmaie1/admixtools
|
1efd48d8ad431f4a325a4ac5b160b2eea9411829
|
26759d87349a3b14495a7ef4ef3a593ee4d0e670
|
refs/heads/master
| 2023-09-04T02:56:48.052802
| 2023-08-21T21:15:27
| 2023-08-21T21:15:27
| 229,330,187
| 62
| 11
| null | 2023-01-23T12:19:57
| 2019-12-20T20:15:32
|
R
|
UTF-8
|
R
| false
| true
| 882
|
rd
|
graph_f2_function.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toposearch.R
\name{graph_f2_function}
\alias{graph_f2_function}
\title{Make a function representing a graph}
\usage{
graph_f2_function(
graph,
admix_default = 0.5,
drift_default = 0.01,
random_defaults = FALSE
)
}
\arguments{
\item{graph}{An admixture graph}
\item{admix_default}{The default weights for admixture edges}
\item{drift_default}{The default weights for drift edges}
\item{random_defaults}{Set default weights randomly for each edge between 0 and 1}
}
\value{
A function mapping edge weights to f2-statistics
}
\description{
This function takes an igraph object and turns it into a function that takes edge weights as input,
and outputs the expected f2-statistics.
}
\examples{
\dontrun{
mygraph = graph_f2_function(example_igraph)
mygraph(N3N8 = 0.1, `N2N1|Vindija.DG` = 0.4)
}
}
|
b96a886fdd4021041e9c5ae644b6be164e99a771
|
b78da44a956cf9f9ee8b4a1837d70bc32c4c7a8d
|
/scripts/03A-go_bp_perm.r
|
090dc390d0a8d29ec9cf7da3aa098d7c37b7a8b1
|
[] |
no_license
|
delahayefabien/PaperLGAHSPC
|
ecaa9d3b7b547b842076bbbe6d330df340187230
|
bee75f9a98236f2f2499f77f6ae0a2258e631156
|
refs/heads/master
| 2023-07-28T04:53:22.614898
| 2021-09-07T10:10:01
| 2021-09-07T10:10:01
| 402,807,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,582
|
r
|
03A-go_bp_perm.r
|
### Project Setup ==================================================================================
library(here)
out <- here("outputs", "03-pathway_analysis")
dir.create(out, recursive = TRUE, showWarnings = FALSE, mode = "0775")
### Load Packages ==================================================================================
suppressPackageStartupMessages({
source("scripts/utils/new_utils.R")
library(clusterProfiler)
library(parallel)
library(org.Hs.eg.db)
})
### Tables and Figures Theme =======================================================================
# theme_set(theme_light())
### Functions ======================================================================================
### Analysis =======================================================================================
res_g<-fread("outputs/03-pathway_analysis/res_gsea_go.csv")
go_sig<-res_g[p.adjust<0.001]$ID #already filter but for understanding
res_perm<-fread("outputs/02-gene_score_calculation_and_validation/res_1000perm_genescore_add.csv.gz")
genes.df<-bitr(res_perm[perm==1]$gene,
fromType = 'SYMBOL',
toType = 'ENTREZID',
OrgDb = org.Hs.eg.db)
res_g_perm<-Reduce(function(x,y)rbind(x,y,fill=T),mclapply(1:1000,function(i){
print(i)
resg<-res_perm[perm==i]
gene_scores<-resg$gene_score_add
names(gene_scores)<-resg$gene
gene_scores<-gene_scores[genes.df$SYMBOL]
names(gene_scores)<-genes.df$ENTREZID
gene_scores<-sort(gene_scores,decreasing = T)
resgo<-gseGO(geneList = rank(gene_scores),
ont = "BP",
minGSSize = 50,
pvalueCutoff = 1,
OrgDb = org.Hs.eg.db,
verbose=F)
if("gseaResult" %in% class(resgo)){
res_gsea_go<- data.table(as.data.frame(resgo))
return(res_gsea_go[,perm:=i][ID%in%go_sig][,.(ID,p.adjust,perm)])
}else{
return(data.table())
}
},mc.cores=10))
res_g[,perm:=0]
res_gp<-merge(res_g,res_g_perm,all=T)
res_gp[,p.perm:=sum(p.adjust[perm==0]>=p.adjust[perm>0],na.rm=T)/(sum(perm>0,na.rm=T),by="ID"]
message(nrow(res_gp[is.na(perm)&p.perm<0.01]), " traits are signif !")
fwrite(res_gp[is.na(perm)][,-"perm"],"outputs/03-pathway_analysis/res_gsea_go_perm.csv")
### Complete =======================================================================================
message("Success!", appendLF = TRUE)
|
396544d480e094b66700f69dd23de34997d53cd6
|
f76a68a57b657692be677d81cf8863f347de2e69
|
/RNA-Seq/hw3.R
|
add02a6d7bed1071852d83e09f148394b6de60f9
|
[] |
no_license
|
Leoberium/NGS
|
b4b893cdbaad32cfde869f4db154107bf583e7ef
|
07b87cd1e811793cba50759143f391ea82fe54ea
|
refs/heads/master
| 2020-11-30T20:16:27.973488
| 2019-12-31T12:47:25
| 2019-12-31T12:47:25
| 230,470,412
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,790
|
r
|
hw3.R
|
# Мазаев Лев, мДБАМ18
# Загрузим файл с ридами, полученный на предыдущем этапе
raw_counts <- read.delim(file = 'counts.out', header = FALSE, comment.char = '#',
stringsAsFactors = FALSE)
samples <- c('B14.5', 'B15.5', 'B17.5', 'B20', 'B34',
'C14.5', 'C15.5', 'C17.5', 'C20', 'C34')
age <- as.numeric(substring(samples, 2))
tissue <- as.factor(substring(samples, 1, 1))
colnames(raw_counts) <- c('gene', samples)
# Отфильтруем неэкспрессирующиеся гены
keep <- rowSums(raw_counts[, 2:11]) >= 1
counts <- raw_counts[keep, ]
# Загрузим библиотеку edgeR и создадим объект DGEList, нормализуем данные
library(edgeR)
rownames(counts) <- counts$gene
counts$gene <- NULL
er <- DGEList(counts = as.matrix(counts),
group = tissue)
er <- calcNormFactors(er, method = 'RLE')
# Оценим дисперсионные параметры
formula_er <- ~ tissue + age
dm <- model.matrix(formula_er)
er <- estimateGLMCommonDisp(er, dm)
er <- estimateGLMTrendedDisp(er, dm)
er <- estimateGLMTagwiseDisp(er, dm)
strict.disp <- pmax(er$tagwise.dispersion,
er$trended.dispersion,
er$common.dispersion)
plotBCV(er)
# Поcтроим GLM-модель
gfit <- glmFit(er, dm, dispersion = strict.disp)
lrt_tissue <- glmLRT(gfit, 2)
lrt_age <- glmLRT(gfit, 3)
# Скорректированные p-value
library(ggplot2)
res_tissue <- topTags(lrt_tissue, n = Inf, adjust.method = 'BH', sort.by = 'none')
qplot(res_tissue$table$PValue, bins = 100,
xlab = 'Adjusted P-value', ylab = 'Frequency',
main = 'By tissue')
res_age <- topTags(lrt_age, n = Inf, adjust.method = 'BH', sort.by = 'none')
qplot(res_age$table$PValue, bins = 100,
xlab = 'Adjusted P-value', ylab = 'Frequency',
main = 'By age')
# Межтканевые различия не менее, чем в 2 раза
t2f <- res_tissue$table$logFC >= 1 | res_tissue$table$logFC <= -1
sum(t2f) # 237
# Гены со значимым изменением между тканями
st <- t2f & (res_tissue$table$PValue < 0.05)
sum(st) # 73 гена
rownames(res_tissue$table)[st]
# Гены со значимым изменением по возрасту
sa <- res_age$table$PValue < 0.05
sum(sa) # 311 генов
rownames(res_age$table)[sa]
# Скластеризуем гены значимые хотя бы по одному фактору
significant <- (st | sa)
sum(significant) # 352 гена
norm_counts <- cpm(er) # нормализованные каунты
sig_norm_counts <- norm_counts[significant, ] # векторы экспрессии по значимым генам
corr_data <- cor(t(sig_norm_counts), method = 'spearman')
h_data <- hclust(as.dist(1 - corr_data))
cl <- cutree(h_data, k = 6)
plot(h_data, hang = -1, cex = 0.75, xlab = 'Gene')
rect.hclust(tree = h_data, k = 6, border = 1:6, cluster = cl)
# Z-score от возраста по каждому кластеру
z <- t(scale(t(sig_norm_counts)))
all(rownames(z) == names(cl))
sel <- tissue == 'B'
par(mfrow = c(3, 2))
legy <- c(0.75, 0, 1.0, 0.25, 1.0, 0)
for (i in 1:6) {
v <- colMeans(z[cl == i, ])
title_ <- paste('Cluster', i)
plot(x = age[sel], y = v[sel], type='b', col = 'red',
xlab = 'Age', ylab = 'Average Z-Score',
main = title_, pch = 19,
ylim = c(min(v), max(v)))
lines(x = age[!sel], y = v[!sel], type = 'b', col = 'blue',
pch = 19)
legend(x = 30, y = legy[i], legend = c('B', 'C'), fill = c('red', 'blue'))
}
# Ткань B - красный, видимо это кора
# Ткань C - синий, видимо это мозжечок (cerebellum)
|
be3b47a37976962e7f2eef9197186907fb050b95
|
f27ec285b6b79f0a8988497f8536bb6dab5b3586
|
/Decision-tree.r
|
196bdc85fc2b341169f2d245b84f4de773f17130
|
[] |
no_license
|
bioengsamar/ML_project_to_detect_-Breast-cancer
|
bae5bc007a3fdd26689722176225a6fa8a22192f
|
b538ab5af5ba38220ba968f2cf4b78ba2af3a7b4
|
refs/heads/main
| 2023-06-05T04:21:48.135513
| 2021-06-29T03:31:16
| 2021-06-29T03:31:16
| 381,227,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,638
|
r
|
Decision-tree.r
|
data.set <- read.csv("C:/Users/user1/Desktop/prototype/data-set.csv", sep=";")
#prepare data
data<-data.set
x<- is.na(data)#check if there's missing value
for (i in x){
if(i ==TRUE){
na.omit(x)
}
}
data <- data[,-1] #remove 1st column(id number)
#categorical encoding
data[,1] <- factor(data[,1], levels=c('M' , 'B'), labels=c("malignant", "benign"))
print(summary(data))
#feature selection
library(Boruta)
library(ggplot2)
set.seed(1234)
bor<-Boruta(diagnosis~.,data = data,doTrace=1)
print(bor)
plot(bor,las=2)
borr<-TentativeRoughFix(bor)
print(borr)
attStats(borr)
ggplot(data, aes(area_worst, concave.points_worst, col=diagnosis) ) +
geom_point(size = 3, aes(pch = diagnosis))
# Decision tree with k-fold CV :
library(rpart)
k<- 10
folds <- cut(seq(1,nrow(data)), breaks = k,labels = F)
folds
accu_train<-0
SN_train<-0
SP_train<-0
accu_test<-0
SN_test<-0
SP_test<-0
for(i in 1:k){
test<- data[folds==i,]
train <- data[folds!=i,]
model<- rpart(diagnosis~.,data=train, method = "class")
tab<- table(predict(model,type = "class"),train$diagnosis)
print(tab)
accu_train[i]<-(sum(diag(tab))/sum(tab))
SN_train[i]<-(sum(tab[1,1])/sum(tab[,1]))
SP_train[i]<-(sum(tab[2,2])/sum(tab[,2]))
tested<- predict(model,type = "class",newdata=test)
tab_test<-table(tested,test$diagnosis)
print(tab_test)
accu_test[i]<-(sum(diag(tab_test))/sum(tab_test))
SN_test[i]<-(tab_test[1,1]/sum(tab_test[,1]))
SP_test[i]<-(tab_test[2,2]/sum(tab_test[,2]))
}
#get average
mean(accu_train)
mean(SN_train)
mean(SP_train)
mean(accu_test)
mean(SN_test)
mean(SP_test)
|
a6098b0f4c97c95ed8b3033f346d3a95299a81e8
|
2d55770643ef3c26b970fe1971ddb587fb151ec2
|
/man/ds.Rd
|
8963634bc87503fefefbe2d484d4077ce57fda3d
|
[] |
no_license
|
Tomakazusa/ds
|
f806188fa5d68ebc182a20d96cf8d162021ab7e3
|
902a0b2327ec880c7808b4404ccc6bb285ad249d
|
refs/heads/main
| 2023-07-08T05:15:19.603853
| 2021-08-12T06:30:36
| 2021-08-12T06:30:36
| 395,215,887
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 279
|
rd
|
ds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ds.R
\name{ds}
\alias{ds}
\title{Creates histogram, boxplot and numeric summary}
\usage{
ds(x)
}
\arguments{
\item{x}{numeric variable}
}
\description{
Creates histogram, boxplot and numeric summary
}
|
885ee24ff028e1d4555cb64451296c115b2ebc22
|
43908ff2ca891aca04818e98b9e2590d754f440d
|
/Multiples of 3 and 5 .R
|
564c89a297950d03d29a3ae083d7b05978084715
|
[] |
no_license
|
zuhe26/chem160project2
|
c02a5db738028aced3dfdc034ad4e8533afc1c58
|
a98ea70cf62b5cb47e361235acd5923b30bdba31
|
refs/heads/main
| 2023-02-02T03:48:20.401330
| 2020-12-18T00:19:03
| 2020-12-18T00:19:03
| 322,444,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
Multiples of 3 and 5 .R
|
sum = 0
for ( i in 1:999){
if (i%%3==0 || i%%5==0) {
sum<-sum+i
}
}
cat(sum,"\n")
|
92499fd585fbb712dd8e49b2f0744524537d2c7b
|
f1ad76fa058a2235d3adb05ccefc6b262570478e
|
/man/summary_wind_2d.Rd
|
f206a52fe263f3f34d9ec4799552427ece028dc6
|
[
"CC-BY-3.0",
"MIT"
] |
permissive
|
Ostluft/rOstluft.plot
|
863f733b949dd37e5eaf1d8c1e197596242ef072
|
fbed7ce639ae6778e24c13773b73344942ca7dc2
|
refs/heads/master
| 2022-11-16T12:56:44.199402
| 2020-03-23T11:12:02
| 2020-03-23T11:12:02
| 180,803,285
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,744
|
rd
|
summary_wind_2d.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary_wind_2d.R
\name{summary_wind_2d}
\alias{summary_wind_2d}
\title{Summarise y values over binned wind data, split into u and v components.}
\usage{
summary_wind_2d(
data,
ws,
wd,
z,
groupings = grp(),
fun = "mean",
fun.args = list(),
nmin = 3,
ws_max = NA,
bins = 10^2,
smooth = TRUE,
k = 100,
extrapolate = TRUE,
dist = 0.1
)
}
\arguments{
\item{data}{a data.frame or tibble containing the data (wide format).
requires input data including at least three columns carrying information regarding:
\itemize{
\item wind direction (in °)
\item wind velocity
\item z-values (e.g. air pollutant concentration)
}}
\item{ws}{symbol giving the wind velocity parameter name (wind velocity preferably in m/s)}
\item{wd}{symbol giving the wind direction parameter name in degrees}
\item{z}{symbol giving the parameter name to be summarised}
\item{groupings}{additional groupings. Use helper \code{\link[=grp]{grp()}} to create}
\item{fun}{function or list of functions for summary.}
\item{fun.args}{a list of extra arguments to pass to fun.}
\item{nmin}{numeric, minimum number of values for fun, if n < nmin: NA is returned}
\item{ws_max}{numeric or Inf, maximum wind velocity for binning: above ws_max, z is set NA}
\item{bins}{numeric, number of bins over the range of values if \code{!groups \%in\% c("u", "v")}}
\item{smooth}{TRUE/FALSE, applies if groups = c("u", "v"); should smoothing of summary results should be performed
using \code{\link[=fit_gam_surface]{fit_gam_surface()}}?}
\item{k}{numeric, applies if smooth = TRUE; degree of smoothing in smooth term in \code{\link[=fit_gam_surface]{fit_gam_surface()}}}
\item{extrapolate}{TRUE/FALSE, applies if smooth = TRUE; \code{\link[=fit_gam_surface]{fit_gam_surface()}} returns extrapolated (predicted) values for u, v coordinates that otherwise would have have NA for summarised z
if extrapolate = TRUE, those values are returned (to a certain degree depending on the value of dist)}
\item{dist}{numeric, fraction of 1, applies if smooth = TRUE and extrapolate = TRUE; maximum distance to next coordinate-pair at which the result of
fit_gam_surface(z) should be returned}
}
\value{
a tibble with summarised data along u and v wind vectors
}
\description{
Input data should be original unbinned data including wind direction and wind velocity;
binning is done 2-dimensional over cartesian u and v wind vectors
}
\section{Computed variables}{
\itemize{
\item a tibble is returned, binned over u and v, with variables:
}
\itemize{
\item wd: wind direction corresponding to midpoint value of u and v
\item ws: wind velocity corresponding to midpoint value of u and v
\item u: midpoints of bins over u (from input wd and ws)
\item v: midpoints of bins over v (from input wd and ws)
\item z: result from fun(z, ...)
}
}
\examples{
library(ggplot2)
fn <- rOstluft.data::f("Zch_Stampfenbachstrasse_2010-2014.csv")
data <- rOstluft::read_airmo_csv(fn)
data <- rOstluft::rolf_to_openair(data)
# summary NO2
summary_wind_2d(data, ws, wd, NOx, smooth = FALSE)
# multiple stats: Pass function, by name, reference, as function or one sided formula
funs <- list(
"mean",
"median" = function(x) median(x, na.rm = TRUE),
"q95" = ~ stats::quantile(., probs = 0.95)
)
summary_wind_2d(data, ws, wd, NOx, fun = funs, smooth = FALSE)
# is for some reason fun.args used with multiple functions, use ... to catch
# superfluous arguments:
funs <- list(
"q95" = function(x, ...) stats::quantile(x, probs = 0.95),
"mean"
)
summary_wind_2d(data, ws, wd, NOx, fun = funs, fun.args = list(na.rm = TRUE),
smooth = FALSE)
# additional groupings
summary_wind_2d(data, ws, wd, NOx, groupings = grp(site), smooth = FALSE)
# we can use expressions in grp For better readability groupings is
# defined outside of the function call
groupings = grp("site", year = lubridate::year(date))
summary_wind_2d(data, ws, wd, NOx, groupings = groupings, smooth = FALSE)
# smoothing
df1 <- summary_wind_2d(data, ws, wd, NOx, bins = 100^2, smooth = FALSE)
df2 <- summary_wind_2d(data, ws, wd, NOx, bins = 100^2, extrapolate = FALSE)
df3 <- summary_wind_2d(data, ws, wd, NOx, bins = 100^2, smooth = TRUE)
df <- dplyr::bind_rows(
"smooth = F" = df1,
"smooth = T, extrapolate = F" = df2,
"smooth = T, extrapolate = T" = df3,
.id = "smoothing"
)
ggplot(df, aes(x = u, y = v, fill = NOx)) +
coord_fixed(expand = FALSE) +
lims(x = c(-7.5, 7.5), y = c(-7.5, 7.5)) +
geom_raster() +
scale_fill_viridis_c(na.value = NA) +
facet_wrap(vars(smoothing), ncol = 2)
# for a small number of bins reduce k
summary_wind_2d(data, ws, wd, NO2, bins = 5^2, smooth = TRUE, k = 5)
}
|
32d7563ea1f7a8b9ca6e38c91d8c60ee24532a98
|
48a3612f00fa39c262659196a614f9f3cf51013b
|
/bin/raceid4.R
|
48870b1fe42894e66184739c4552491689c4930c
|
[
"MIT"
] |
permissive
|
rsankowski/mathys-et-al-microglia
|
cea7d72c19e42330b258490b94bbbb07e00fefae
|
a05ed9a50eb56b2c6dc4d03dddf000c21b3ce460
|
refs/heads/master
| 2022-01-17T04:01:27.969326
| 2019-07-22T15:22:51
| 2019-07-22T15:22:51
| 198,243,940
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,928
|
r
|
raceid4.R
|
#RaceID4
library(tidyverse)
library(viridis)
library(RaceID)
library(Matrix)
library(data.table)
library(Seurat)
date = Sys.Date()
#download counts file from url: https://stackoverflow.com/questions/28986150/downloading-and-extracting-gz-data-file-using-r
#aut_counts dataset
#url <- "https://cells.ucsc.edu/autism/rawMatrix.zip"
#counts <- Read10X("data/")
counts <- readMM("data/filtered_count_matrix.mtx")
rownames(counts) <- readLines("data/filtered_gene_row_names.txt")
metadata <- read.delim("data/filtered_column_metadata2.txt")
counts <- counts[, which(metadata$amyloid.group == "low")]
which(genes == "TMEM119")
hist(counts["TMEM119",])
hist(counts["SLC2A5",])
hist(counts["P2RY12",])
sum(counts["TMEM119",]>0)
sum(counts["SLC2A5",]>0)
sum(counts["P2RY12",]>0)
micr_counts <- counts[,which(counts["SLC2A5",]>0 | counts["P2RY12",]>0 | counts["TMEM119",]>0)]
save(micr_counts, file = "data/mathys_ad_nuc_seq-microglia.Robj")
load("data/mathys_ad_nuc_seq-microglia.Robj")
prdata <- as.data.frame(as.matrix(micr_counts))
sc <- SCseq(prdata)
# filtering of expression data
a <- apply(prdata, 2, sum)
sc <- filterdata(sc, mintotal=quantile(a, 0.1)) # exlcude the lower quartile of the cells
sc <- CCcorrect(sc,
dimR = T,
nComp = 20,
CGenes = c('JUN',
'FOS',
'ZFP36',
'HSPA1A|HSPA1B',
'DUSP1',
'EGR1',
'MALAT1'))
sc <- compdist(sc,metric="pearson")
sc <- clustexp(sc)
plotsaturation(sc,disp=FALSE)
plotsaturation(sc,disp=TRUE)
plotjaccard(sc)
sc <- clustexp(sc,cln=14,sat=FALSE)
sc <- findoutliers(sc)
plotbackground(sc)
plotsensitivity(sc)
plotoutlierprobs(sc)
ord_clust <- clustheatmap(sc)
save(ord_clust, file = 'data/ord_clust.Robj')
pdf(paste0('plots/heatmaps/clustheatmap.pdf'))
clustheatmap(sc, final = T)
dev.off()
sc <- comptsne(sc)
sc <- compfr(sc,knn=10)
plotmap(sc)
plotmap(sc,fr=TRUE)
dev.off()
plotexpmap(sc,"MRC1",logsc=F,fr=F)
plotexpmap(sc,"LYVE1",logsc=F,fr=F)
plotexpmap(sc,"CD163",logsc=F,fr=F)
plotexpmap(sc,"TMEM119",logsc=F,fr=F)
plotexpmap(sc,"CX3CR1",logsc=F,fr=F)
plotexpmap(sc,"PTPRC",logsc=F,fr=F)
plotexpmap(sc,"CD3E",logsc=F,fr=F)
plotexpmap(sc,"ITGAM",logsc=F,fr=F)
plotexpmap(sc,"CD8A",logsc=F,fr=F)
plotexpmap(sc,"CD4",logsc=F,fr=F)
plotexpmap(sc,"P2RY12",logsc=F,fr=F)
plotexpmap(sc,"SLC2A5",logsc=F,fr=F)
plotexpmap(sc,"^EGR1",logsc=F,fr=F)
plotexpmap(sc,"JUN",logsc=F,fr=F)
plotexpmap(sc,"GPR34",logsc=F,fr=F)
dg <- clustdiffgenes(sc,4,pvalue=.01)
head(dg,25)
types <- sub("(\\_\\d+)$","", colnames(sc@ndata))
genes <- head(rownames(dg)[dg$fc>1],10)
plotmarkergenes(sc,genes,samples=types)
#Save sc file
save(sc, file = 'data/sc.Robj')
micr_ids <- names(sc@cpart)[sc@cpart %in% c(8:13)]
write_csv(as.data.frame(micr_ids), "data/microglia-cell-ids.csv")
|
8d978285f2cbd78f3b1485f4c30ae4b9b3c45c9a
|
97c050fc3abcb9a8b401808fd4f4be4afd7b11d7
|
/code/day16.R
|
aae25ccc49e964b65176cd5ed10a0672ce50e42a
|
[
"MIT"
] |
permissive
|
brianlle/advent_of_code_2020
|
eda81fda40bd30c8fae1694fb49c7eca2cb494dc
|
10d3ed1b69e1c446e58c5c64267d3f3cea93d861
|
refs/heads/main
| 2023-02-06T08:42:00.397013
| 2020-12-29T21:43:28
| 2020-12-29T21:43:28
| 318,314,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,695
|
r
|
day16.R
|
### ADVENT OF CODE, DAY 16
# PART 1
input <- read.delim("input/day16.txt", header = FALSE,
blank.lines.skip = FALSE)
input$V2 <- NA
rules <- input[1:20,]
my_ticket <- strsplit(as.character(input[23,1]), split = ",")[[1]]
other_tix <- input[26:263,]
#process the rules with dirty regex
rules$field <- gsub(": .*", "", rules$V1)
rules$nums <- gsub(".*: ", "", rules$V1)
rules$range1 <- gsub(" or .*", "", rules$nums)
rules$range2 <- gsub(".* or ", "", rules$nums)
rules$num1 <- gsub("-.*", "", rules$range1)
rules$num2 <- gsub(".*-", "", rules$range1)
rules$num3 <- gsub("-.*", "", rules$range2)
rules$num4 <- gsub(".*-", "", rules$range2)
#extract ranges of valid numbers for each rule
range <- list()
all_rules <- c()
for (i in 1:nrow(rules)){
range[[i]] <- c(as.numeric(rules$num1[i]):as.numeric(rules$num2[i]),
as.numeric(rules$num3[i]):as.numeric(rules$num4[i]))
all_rules <- c(all_rules, range[[i]])
}
#combine all rule ranges together for part 1 check, should prolly use an actual set
all_rules <- unique(all_rules)
#process tickets
tix <- list()
for (i in 1:nrow(other_tix)){
tix[[i]] <- strsplit(as.character(other_tix[i,1]), split = ",")[[1]]
}
invalid_value <- c()
for (i in 1:length(tix)){
ticket <- tix[[i]]
for (j in 1:length(ticket)){
if (ticket[j] %in% all_rules){
# if valid match, do nothing and keep checking fields
} else {
invalid_value <- c(invalid_value, as.numeric(ticket[j]))
}
}
}
print(sum(invalid_value))
### PART 2
# start by finding tickets with valid values
valid_tix <- c()
for (i in 1:length(tix)){
ticket <- tix[[i]]
valid = 0
for (j in 1:length(ticket)){
if (ticket[j] %in% all_rules){
valid = valid + 1
}
}
if (valid == length(ticket)){ # if all 20 ticket values are validm, ticket is valid
valid_tix <- c(valid_tix, i)
}
}
# using the valid tickets, for each rule, find ticket fields that would satisfy the rule
possible_fields <- list()
for (i in 1:20){
temp_range <- range[[i]]
possible_fields[[i]] <- c(0) # initialize with 0
for (field in 1:20){
valid <- 0
for (ticket_idx in valid_tix){
ticket_field <- tix[[ticket_idx]][[field]]
if (!ticket_field %in% temp_range){
print(paste("not", field))
break
} else {
valid <- valid + 1
}
}
if (valid == 190){possible_fields[[i]] <- c(possible_fields[[i]], field)}
}
}
for (i in 1:20){ # remove initailization value
possible_fields[[i]] <- possible_fields[[i]][-1]
}
# rules can have multiple fields that match, but appears to be a simple process of elim puzzle
field_match <- c() #rule fields: e.g. rule 1, rule 2, rule 3
ticket_match <- c() #ticdket fields: e.g. 1st number, 2nd number, 3rd number
for (k in 1:20){ # repeat 20 times total
for (i in 1:20){ #i is rule row; possible_fields is what ticket fields are possible for that row
if (length(possible_fields[[i]]) == 1){
field_found <- possible_fields[[i]]
field_match <- c(field_match, i)
ticket_match <- c(ticket_match, field_found)
possible_fields[[i]] <- 1:50
for (j in 1:20){
possible_fields[[j]] <- possible_fields[[j]][possible_fields[[j]] != field_found]
}
break
}
}
}
rule_names <- rules[1:20,3]
rules_df <- data.frame(rule_name = rule_names, rule_num = 1:20)
match_df <- data.frame(rule_num = field_match, ticket_field = ticket_match)
match_df <- merge(rules_df, match_df, by = "rule_num")
dep_fields <- match_df[1:6, 3] # departure fields are 1st 6 rows, so get matching ticket fields
my_ticket_fields <- my_ticket[dep_fields]
print(prod(as.numeric(my_ticket_fields)), digits = 20)
|
b000a98c8a2ea0d368c3da98feed352f6eab2691
|
bbdb9af551c5c4df45074e166f28b7e4a644cac9
|
/modeling/lightGBM3.R
|
4a10ff2fd9ab6a610c799245bf72bcdbe62dc671
|
[] |
no_license
|
yudong-94/Kaggle-Safe-Driver-Prediction
|
80e1ad5939b0c7aab24ca8490a0ed47476726ea1
|
7839f1eaedb88e0c45dab8000f22b6275d6c5bac
|
refs/heads/master
| 2021-08-23T00:22:30.014967
| 2017-12-01T21:56:04
| 2017-12-01T21:56:04
| 106,158,065
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,824
|
r
|
lightGBM3.R
|
setwd("/Users/hzdy1994/Desktop/Kaggle")
#library(parallel)
library(Matrix)
library(lightgbm)
####################################
# try to figure out the overfitting feature
load("data/new_feature_no_corr2.RData")
# suspects: avg_car13_on_car04
train_test$avg_car13_on_car04 = NULL
train_test[is.na(train_test)] = -1
train = train_test[train_test$data == "train", -2]
test = train_test[train_test$data != "train", -2]
train_matrix = sparse.model.matrix(target ~ .-1, data = train[, c(2:44)])
dlgb_train = lgb.Dataset(data = train_matrix, label = train$target)
test_matrix = as.matrix(test[,c(3:44)])
start = Sys.time()
param <- list(objective = "binary",
learning_rate = 0.0025,
num_leaves = 30,
max_depth = 4,
min_data_in_leaf = 2000,
min_sum_hessian_in_leaf = 50,
num_threads = 3)
cv = lgb.cv(param,
dlgb_train,
nrounds = 10000,
nfold = 5,
eval = "auc",
verbose = 1,
early_stopping_rounds = 20)
Sys.time() - start
# before getting rid of the feature: 0.642553
# after: 0.6419
#################################
# training with the normalized and missing value imputed data
load("data/new_feature_normalized.RData")
train = train_test[train_test$data == "train", -2]
test = train_test[train_test$data != "train", -2]
train_matrix = sparse.model.matrix(target ~ .-1, data = train[, c(2:51)])
dlgb_train = lgb.Dataset(data = train_matrix, label = train$target)
test_matrix = as.matrix(test[,c(3:51)])
cv_tunning = data.frame(num_leaves = numeric(0),
min_hessian = numeric(0),
min_data_in_leaf = numeric(0),
best_itr = numeric(0),
best_gini = numeric(0))
for (n_leaves in c(30, 50)) {
for (min_hessian in c(50, 100, 150)) {
for (min_leaf in c(1500, 2000, 2500)) {
param <- list(objective = "binary",
learning_rate = 0.0025,
num_leaves = n_leaves,
max_depth = 4,
min_data_in_leaf = min_leaf,
min_sum_hessian_in_leaf = min_hessian,
num_threads = 3)
cv = lgb.cv(param,
dlgb_train,
nrounds = 10000,
nfold = 5,
eval = "auc",
verbose = 1,
early_stopping_rounds = 50)
cv_tunning[nrow(cv_tunning)+1, ] = c(n_leaves,
min_hessian,
min_leaf,
cv$best_iter,
cv$best_score)
write.csv(cv_tunning, "tunning.csv", row.names = FALSE)
}
}
}
# not good.. best only 0.6423
# what if only normalization, no missing value imputation?
load("data/new_feature_no_corr3.RData")
train_test[is.na(train_test)] = -1
train_test[is.infinite(train_test$car13_car15),"car13_car15"] = 2.0
train_test[,c(4:52)] = scale(train_test[,c(4:52)])
train = train_test[train_test$data == "train", -2]
test = train_test[train_test$data != "train", -2]
train_matrix = sparse.model.matrix(target ~ .-1, data = train[, c(2:51)])
dlgb_train = lgb.Dataset(data = train_matrix, label = train$target)
test_matrix = as.matrix(test[,c(3:51)])
param <- list(objective = "binary",
learning_rate = 0.0025,
num_leaves = 50,
max_depth = 4,
min_data_in_leaf = 2000,
min_sum_hessian_in_leaf = 125,
num_threads = 3)
cv = lgb.cv(param,
dlgb_train,
nrounds = 10000,
nfold = 5,
eval = "auc",
verbose = 1,
early_stopping_rounds = 50)
# before normalization: 0.6428838
# after: 0.641625
# what if missing value imputation only, but not normalization?
load("data/new_feature_no_corr3.RData")
# input missing values with median
for (col in colnames(train_test)) {
if (anyNA(train_test[,col])) {
median_col = median(na.omit(train_test[,col]))
train_test[is.na(train_test[,col]),col] = median_col
}
}
train = train_test[train_test$data == "train", -2]
test = train_test[train_test$data != "train", -2]
train_matrix = sparse.model.matrix(target ~ .-1, data = train[, c(2:51)])
dlgb_train = lgb.Dataset(data = train_matrix, label = train$target)
test_matrix = as.matrix(test[,c(3:51)])
param <- list(objective = "binary",
learning_rate = 0.0025,
num_leaves = 50,
max_depth = 4,
min_data_in_leaf = 2000,
min_sum_hessian_in_leaf = 125,
num_threads = 3)
cv = lgb.cv(param,
dlgb_train,
nrounds = 10000,
nfold = 5,
eval = "auc",
verbose = 1,
early_stopping_rounds = 50)
# before missing value imputation: 0.6428838
# after: 0.641624
#################################
# new feature set 4 - added categorical regs
load("data/new_feature_4.RData")
train_test[is.na(train_test)] = -1
train = train_test[train_test$data == "train", -2]
test = train_test[train_test$data != "train", -2]
train_matrix = sparse.model.matrix(target ~ .-1, data = train[, c(2:51)])
dlgb_train = lgb.Dataset(data = train_matrix, label = train$target)
test_matrix = as.matrix(test[,c(3:51)])
cv_tunning = data.frame(num_leaves = numeric(0),
min_hessian = numeric(0),
best_itr = numeric(0),
best_gini = numeric(0))
for (n_leaves in c(25, 50)) {
for (min_hessian in c(50, 100, 125)) {
param <- list(objective = "binary",
learning_rate = 0.0025,
num_leaves = n_leaves,
max_depth = 4,
min_data_in_leaf = 2000,
min_sum_hessian_in_leaf = min_hessian,
num_threads = 3)
cv = lgb.cv(param,
dlgb_train,
nrounds = 10000,
nfold = 5,
eval = "auc",
verbose = 1,
early_stopping_rounds = 50)
cv_tunning[nrow(cv_tunning)+1, ] = c(n_leaves,
min_hessian,
cv$best_iter,
cv$best_score)
write.csv(cv_tunning, "tunning.csv", row.names = FALSE)
}
}
param <- list(objective = "binary",
learning_rate = 0.0025,
num_leaves = 50,
max_depth = 4,
min_data_in_leaf = 2000,
min_sum_hessian_in_leaf = 50,
num_threads = 3)
cv = lgb.cv(param,
dlgb_train,
nrounds = 10000,
nfold = 5,
eval = "auc",
verbose = 1,
early_stopping_rounds = 50)
start = Sys.time()
lgb_model <- lgb.train(data = dlgb_train,
objective = "binary",
learning_rate = 0.0025,
nrounds = 6545,
num_leaves = 50,
max_depth = 4,
min_data_in_leaf = 2000,
min_sum_hessian_in_leaf = 50,
num_threads = 3)
Sys.time() - start
# 7 min
pred <- predict(lgb_model, test_matrix)
prediction <- data.frame(cbind(test$id, pred))
colnames(prediction) = c("id", "target")
write.csv(prediction, "prediction.csv", row.names = FALSE)
# test gini = 0.286
# public lb = 0.280
importance = lgb.importance(lgb_model)
write.csv(importance, "importance.csv", row.names = FALSE)
# save the predictions for the training set
pred <- predict(lgb_model, train_matrix)
prediction <- data.frame(cbind(train$id, pred, train$target))
colnames(prediction) = c("id", "pred", "target")
write.csv(prediction, "lightgbm7_train.csv", row.names = FALSE)
#################################
# new feature set 5 - added ps_car_14 deviation features
train_test[is.na(train_test)] = -1
train = train_test[train_test$data == "train", -2]
test = train_test[train_test$data != "train", -2]
train_matrix = sparse.model.matrix(target ~ .-1, data = train[, c(2:54)])
dlgb_train = lgb.Dataset(data = train_matrix, label = train$target)
test_matrix = as.matrix(test[,c(3:54)])
cv_tunning = data.frame(num_leaves = numeric(0),
min_hessian = numeric(0),
best_itr = numeric(0),
best_gini = numeric(0))
for (n_leaves in c(25, 50)) {
for (min_hessian in c(50, 100, 125)) {
param <- list(objective = "binary",
learning_rate = 0.0025,
num_leaves = n_leaves,
max_depth = 4,
min_data_in_leaf = 2000,
min_sum_hessian_in_leaf = min_hessian,
num_threads = 3)
cv = lgb.cv(param,
dlgb_train,
nrounds = 10000,
nfold = 5,
eval = "auc",
verbose = 1,
early_stopping_rounds = 50)
cv_tunning[nrow(cv_tunning)+1, ] = c(n_leaves,
min_hessian,
cv$best_iter,
cv$best_score)
write.csv(cv_tunning, "tunning3.csv", row.names = FALSE)
}
}
# not good....
|
373eea7673969884ff370fe3eff1f6b82912c58f
|
c5d173f7755dc27e348ef616ebafba137ee7e1da
|
/R/Operators.R
|
d535b1b33de176f0421ca72aa1470a64a2754847
|
[
"MIT"
] |
permissive
|
Ilia-Kosenkov/RLibs
|
85679202753b7565d8995350e74b28a8c3d624d9
|
60e34778a96f5ba9705b5fa5da0bffcc3be482fd
|
refs/heads/master
| 2021-03-27T16:13:53.106985
| 2020-01-23T15:33:33
| 2020-01-23T15:33:33
| 96,409,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,060
|
r
|
Operators.R
|
# MIT License
#
# Copyright(c) 2017-2018 Ilia Kosenkov [ilia.kosenkov.at.gm@gmail.com]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission
# notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
# THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#' @title \code{is} interfix operator
#' @param object Object to test.
#' @param class Target type (supports \code{rlang} quosure).
#' @description Works atop of \code{vctrs}
#' @return \code{logical} \code{TRUE} if
#' \code{object} is of class \code{class}, \code{FALSE} otherwise.
#' @importFrom rlang quo_squash enquo sym exec
#' @importFrom vctrs vec_ptype vec_is
#' @export
`%is%` <- function(object, class) {
#lifecycle::deprecate_warn("0.6.1", "RLibs::`%is%`()")
class <- sym(quo_squash(enquo(class)))
ptype <- vec_ptype(exec(class))
vec_is(object, ptype)
}
#' @title Concat/add infix operator.
#' @param x Left summand.
#' @param y Right summand.
#' @description Performs (possibly) a vectorized summation operation,
#' which depends on the class of operators.
#' Following methods are implemented:
#' \code{character} + \code{character},
#' 1-to-1 vectorized, concatenation of strings.
#' Does the same as `%+%`.
#' @return Result of the aapropriate summation/concatenation.
#' @importFrom purrr map2_chr
#' @importFrom vctrs vec_ptype_common vec_recycle_common
#' @export
`%&%` <- function(x, y) {
ptype <- vec_ptype_common(x, y, character(0))
cast <- vec_recycle_common(x = vec_cast(x, ptype), y = vec_cast(y, ptype))
map2_chr(cast$x, cast$y, paste0)
}
#' @title Concat/add infix operator.
#' @param x Left summand.
#' @param y Right summand.
#' @description Performs (possibly) a vectorized summation operation,
#' which depends on the class of operators.
#' Following methods are implemented:
#' \code{character} + \code{character},
#' 1-to-1 vectorized, concatenation of strings.
#' @return Result of the aapropriate summation/concatenation.
#' @importFrom purrr map2_chr
#' @export
`%+%` <- function(x, y) {
lifecycle::deprecate_warn("0.6.1", "RLibs::`%+%`()", "RLibs::`%&%`()")
RLibs::`%&%`(x, y)
}
#' @title Null/empty-coalescing operator
#' @description Improves the \code{rlang::\%||\%} operator by
#' handling also cases of zero-length objects.
#' @param x Left side of the operator. To be tested.
#' @param y Right side of the operator. Is returned if left side is
#' null or empty.
#' @return Either x or y.
#' @importFrom rlang is_empty is_null
#' @export
`%??%` <- function(x, y) {
if (is_null(x) ||
is_empty(x))
y
else
x
}
#' @title Dot-product
#' @description A \code{vctrs}-typed replacement to \code{base::`\%*\%`}.
#' @param x LHS.
#' @param y RHS.
#'
#' @return A dot product of two vectors.
#' @export
`%.%` <- function(x, y) {
r <- vec_recycle_common(!!!vec_cast_common(x = x, y = y))
sum(r$x * r$y)
}
#' @title Deconstruction operators
#' @rdname deconst
#' @description Mimicks \code{zeallot}'s beahviour
#' @param lhs,rhs Left- and right-hand side of the operator
#'
#' @return Data (invisibly)
#' @export
`%->%` <- function(lhs, rhs) {
deconstructor(lhs, {{ rhs }})
}
#' @rdname deconst
#' @export
`%<-%` <- function(lhs, rhs) {
deconstructor(rhs, {{ lhs }})
}
deconstructor <- function(what, into) {
q <- enquo(into)
env <- quo_get_env(q)
expr <- as.list(quo_get_expr(q))
assert_that(expr[[1]] == sym("c"), msg = "Only `c` can be used to combine names")
names <- expr[-1]
assert_that(vec_size(what) == vec_size(names), msg = "LHS and RHS should have equal length")
invisible(walk2(what, names, ~ assign(as.character(.y), .x, envir = env)))
}
#' @title Compose functions
#' @rdname composer
#' @param x Lhs.
#' @param y Rhs.
#' @description Composes two functions using \code{purrr::compose}, in different directions.
#' Supports \code{rlang}-style lambdas (in parentheses).
#' @return A composed function
#'
#' @examples
#' (~.x ^ 2) %>>% (~.x + 5)
#' @export
`%>>%` <- function(x, y) compose(x, y, .dir = "forward")
#' @rdname composer
#' @export
`%<<%` <- function(x, y) compose(x, y)
|
a5bb9cdf85600f88448026a364a8b47979feb74d
|
c8e71af48d925c34d1cb9f4dad262c970e8968d5
|
/man/FlightResponse.Rd
|
1e518c74f99b915bd91f225401f4f894c4892678
|
[
"MIT"
] |
permissive
|
tessington/qsci381
|
43c7cd323ab64cf28ba738be35779157c93e62cf
|
b981f0bd345b250d42ff5f1c0609e5e61f5911f7
|
refs/heads/master
| 2022-12-24T20:56:56.045374
| 2020-09-24T20:50:29
| 2020-09-24T20:50:29
| 284,817,926
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,834
|
rd
|
FlightResponse.Rd
|
\name{FlightResponse}
\alias{FlightResponse}
\docType{data}
\title{Response of Migratory Geese to Helicopter Overflights}
\description{
Flight response of Pacific Brant to overflights of helicopters
}
\format{
A dataset with 464 observations on the following 7 variables.
\tabular{rl}{
\code{FlockID} \tab {Flock ID}\cr
\code{Altitude} \tab {Altitude of the overflight by the helicopter (in 100m)}\cr
\code{Lateral} \tab {Lateral distance (in 100m) between the aircraft and flock}\cr
\code{Flight} \tab {\code{1}=more than 10\% of flock flies away or \code{0}=otherwise}\cr
\code{AltLat} \tab {Product of Altitude x Lateral}\cr
\code{AltCat} \tab {Altitude categories: \code{low}=under 3, \code{mid}=3 to 6, \code{high}=over 6}\cr
\code{LatCat} \tab {Lateral categories: \code{1}under 10 to \code{4}=over 30}\cr
}
}
\details{
A 1994 study collected data on the effects of air
traffic on the behavior of the Pacific Brant (a small migratory goose).
The data represent the flight
response to helicopter "overflights" to see what the relationship between the proximity of a flight,
both lateral and altitudinal, would be to the propensity of the Brant to flee the area. For this
experiment, air traffic was restricted to helicopters because previous study had ascertained that
helicopters created more radical flight response than other aircraft.
The data are in FlightResponse. Each case represents a flock of Brant that has been observed
during one overflight in the study. Flocks were determined observationally as contiguous collections
of Brants, flock sizes varying from 10 to 30,000 birds.
}
\source{
Data come from the book Statistical Case Studies: A Collaboration Between Academe and Industry, Roxy Peck,
Larry D. Haugh, and Arnold Goodman, editors; SIAM and ASA, 1998.
}
\keyword{datasets}
|
61a7b74f23730118a91ea1d0d8f79517960945a4
|
e06965698053952f7f97c60349a590e42d08b633
|
/man/to_json.Rd
|
aa8adcd3f475a86f7904c965b376ef8a4f4a451f
|
[
"Apache-2.0"
] |
permissive
|
kcf-jackson/sketch
|
a9940c89ed8183627914861a11893856b1c47429
|
b597f01e540f35aab1f5ee2d3744f6f64c70c94d
|
refs/heads/master
| 2022-11-01T03:28:32.088340
| 2022-10-23T14:22:05
| 2022-10-23T14:22:05
| 222,058,097
| 106
| 5
|
NOASSERTION
| 2022-10-23T14:22:07
| 2019-11-16T06:36:59
|
HTML
|
UTF-8
|
R
| false
| true
| 851
|
rd
|
to_json.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assets-to_json.R
\name{to_json}
\alias{to_json}
\title{Convert a file into a JavaScript expression}
\usage{
to_json(input, as_data_frame, read_fun, ...)
}
\arguments{
\item{input}{A character string; the path to the input file.}
\item{as_data_frame}{TRUE or FALSE; whether the data are loaded as a data-frame.}
\item{read_fun}{A function to load the input file. Default settings are provided for
CSV files and JSON files. The function has to load a data file into an object that can
be handled by `jsonlite::toJSON`. Possible choices include `utils::read_delim`,
`readr::read_csv2`, etc.}
\item{...}{Extra arguments to be passed to `read_fun`.}
}
\description{
It supports csv and json by default and lets users provide
custom handlers if other file formats are used.
}
|
bfc5fd107e09bb016b1cea7a19697bf85a86e85d
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610052341-test.R
|
be119533a704a58447f581612bf5fc69ac459605
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
r
|
1610052341-test.R
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(-5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -5.04975683349975e-195, -8.81443064112718e-280, 7.30704244877387e-312, 0, 2.71615461306795e-312, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
973635d7e14db90142bcfeaef538814273250266
|
1f004277739de856870948b0d4c25a1d2300483b
|
/microarray fallplot.R
|
cf60b3400db242545b48e87bf5f0d5368153a005
|
[] |
no_license
|
fang2065/FangBioinfo
|
5c8e47d4740f96952b19d09ad3323106647100d5
|
e4e60936fc717dd3bc07799a81f1e22699609792
|
refs/heads/master
| 2023-03-11T21:46:41.862452
| 2023-02-26T20:17:05
| 2023-02-26T20:17:05
| 253,918,269
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,852
|
r
|
microarray fallplot.R
|
library(GEOquery)
library(limma)
library(tidyverse)
library(ggfortify)
library(gplots)
library(ggplot2)
library(RColorBrewer)
library(biomaRt)
library(ComplexHeatmap)
library(circlize)
library(fgsea)
library(labeling)
library(Hmisc)
library(ggrepel)
gset <- getGEO("GSE54646", GSEMatrix =TRUE, AnnotGPL=TRUE)
if (length(gset) > 1) idx <- grep("GPL4685", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
# make proper column names to match toptable
fvarLabels(gset) <- make.names(fvarLabels(gset))
# group membership for all samples
gsms <- paste0("40545005520666533255040560035542044300050053206030",
"20600545360653346500555666365665665665555661111111",
"1111XXXXXXXXXXXX")
sml <- strsplit(gsms, split="")[[1]]
# filter out excluded samples (marked as "X")
sel <- which(sml != "X")
sml <- sml[sel]
gset <- gset[ ,sel]
# log2 transformation
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
# assign samples to groups and set up design matrix
gs <- factor(sml)
groups <- make.names(c("PV_JAK2_Positive","Health","PV_JAK2_Negative","MF_JAK2_Negative","MF_JAK2_Positive","ET_JAK2_Positive","ET_JAK2_Negative"))
levels(gs) <- groups
gset$group <- gs
design <- model.matrix(~group + 0, gset)
colnames(design) <- levels(gs)
fit <- lmFit(gset, design) # fit linear model
# set up contrasts of interest and recalculate model coefficients
cont.matrix <- makeContrasts(PVvsCon = PV_JAK2_Positive-Health,
ETvsCon = ET_JAK2_Positive-Health,
MFvsCon = MF_JAK2_Positive-Health,
PVvsCon2 = PV_JAK2_Negative-Health,
ETvsCon2 = ET_JAK2_Negative-Health,
MFvsCon2 = MF_JAK2_Negative-Health,
levels=design)
fit2 <- contrasts.fit(fit, cont.matrix)
# compute statistics and table of top significant genes
fit2 <- eBayes(fit2, 0.01)
tT_PVvsCon <- topTable(fit2, coef=c("PVvsCon"), adjust="fdr", sort.by="logFC", p.value = 0.1, n = Inf)
Table_PVvsCon <- subset(tT_PVvsCon, select=c("Gene.symbol","adj.P.Val","logFC"))
resannot_PV <- Table_PVvsCon[!duplicated(Table_PVvsCon$Gene.symbol), ]
resannot_PV<-resannot_PV %>% dplyr::filter(!is.na(adj.P.Val) & !is.na(Gene.symbol))
# write.table(tT, file=stdout(), row.names=F, sep="\t")
trend_PV <-sapply(resannot_PV$logFC, function(x){if(x>0) 'up' else 'down'})
resannot_PV <- resannot_PV[order(resannot_PV$logFC, decreasing = T),]
pre_ranked_sig_genes_PV <- data.frame(resannot_PV, 'trend' = trend_PV, 'rank' = 1:nrow(resannot_PV), stringsAsFactors = F)
to_be_point_out_PV <- rbind(pre_ranked_sig_genes_PV[pre_ranked_sig_genes_PV$Gene.symbol == "CD24", ],
pre_ranked_sig_genes_PV[pre_ranked_sig_genes_PV$Gene.symbol == "CD47", ])
tiff(file="fallplot_PV.tiff", width=700, height=700, res = 150)
ggplot(pre_ranked_sig_genes_PV, aes(x=rank, y=logFC, color=logFC)) +
geom_point(size=1)+
geom_hline(yintercept = c(2,-2), linetype=2, size=0.25)+
geom_hline(yintercept = c(0), linetype=1, size=0.5)+
geom_vline(xintercept = 1636.5, linetype=2, size=0.25)+
scale_color_gradient2(low="navy", high="firebrick3", mid="white", midpoint = 0)+
geom_point(inherit.aes = F, data=to_be_point_out_PV, aes(x=rank, y=logFC), size = 3, color = 'black')+
geom_point(inherit.aes = F, data=to_be_point_out_PV, aes(x=rank, y=logFC), size = 2, color = 'yellow')+
ggrepel::geom_text_repel(inherit.aes = F, data = to_be_point_out_PV, aes(x=rank, y=logFC, label=Gene.symbol), size =5)+
xlab('rank of differentially expressed genes') +
theme_bw()+
theme(panel.grid = element_line(color = 'white'), legend.title.align = 0.5)
dev.off()
tT_ETvsCon <- topTable(fit2, coef=c("ETvsCon"), adjust="fdr", sort.by="logFC", p.value = 0.1, n = Inf)
Table_ETvsCon <- subset(tT_ETvsCon, select=c("Gene.symbol","adj.P.Val","logFC"))
resannot_ET <- Table_ETvsCon[!duplicated(Table_ETvsCon$Gene.symbol), ]
resannot_ET <-resannot_ET %>% dplyr::filter(!is.na(adj.P.Val) & !is.na(Gene.symbol))
trend_ET <-sapply(resannot_ET$logFC, function(x){if(x>0) 'up' else 'down'})
resannot_ET <- resannot_ET[order(resannot_ET$logFC, decreasing = T),]
pre_ranked_sig_genes_ET <- data.frame(resannot_ET, 'trend' = trend_ET, 'rank' = 1:nrow(resannot_ET), stringsAsFactors = F)
to_be_point_out_ET <- rbind(pre_ranked_sig_genes_ET[pre_ranked_sig_genes_ET$Gene.symbol == "CD24", ], pre_ranked_sig_genes_ET[pre_ranked_sig_genes_ET$Gene.symbol == "CD47", ])
tiff(file="fallplot_ET.tiff", width=700, height=700, res = 150)
ggplot(pre_ranked_sig_genes_ET, aes(x=rank, y=logFC, color=logFC)) +
geom_point(size=1)+
geom_hline(yintercept = c(2,-2), linetype=2, size=0.25)+
geom_hline(yintercept = c(0), linetype=1, size=0.5)+
geom_vline(xintercept = 1636.5, linetype=2, size=0.25)+
scale_color_gradient2(low="navy", high="firebrick3", mid="white", midpoint = 0)+
geom_point(inherit.aes = F, data=to_be_point_out_ET, aes(x=rank, y=logFC), size = 3, color = 'black')+
geom_point(inherit.aes = F, data=to_be_point_out_ET, aes(x=rank, y=logFC), size = 2, color = 'yellow')+
ggrepel::geom_text_repel(inherit.aes = F, data = to_be_point_out_ET, aes(x=rank, y=logFC, label=Gene.symbol), size =5)+
xlab('rank of differentially expressed genes') +
theme_bw()+
theme(panel.grid = element_line(color = 'white'), legend.title.align = 0.5)
dev.off()
tT_MFvsCon <- topTable(fit2, coef=c("MFvsCon"), adjust="fdr", sort.by="logFC", p.value = 0.1, n = Inf)
Table_MFvsCon <- subset(tT_MFvsCon, select=c("Gene.symbol","adj.P.Val","logFC"))
resannot_MF <- Table_MFvsCon[!duplicated(Table_MFvsCon$Gene.symbol), ]
resannot_MF <-resannot_MF %>% dplyr::filter(!is.na(adj.P.Val) & !is.na(Gene.symbol))
trend_MF <-sapply(resannot_MF$logFC, function(x){if(x>0) 'up' else 'down'})
resannot_MF <- resannot_MF[order(resannot_MF$logFC, decreasing = T),]
pre_ranked_sig_genes_MF <- data.frame(resannot_MF, 'trend' = trend_MF, 'rank' = 1:nrow(resannot_MF), stringsAsFactors = F)
to_be_point_out_MF <- rbind(pre_ranked_sig_genes_MF[pre_ranked_sig_genes_MF$Gene.symbol == "CD24", ], pre_ranked_sig_genes_MF[pre_ranked_sig_genes_MF$Gene.symbol == "CD47", ])
tiff(file="fallplot_MF.tiff", width=700, height=700, res = 150)
ggplot(pre_ranked_sig_genes_MF, aes(x=rank, y=logFC, color=logFC)) +
geom_point(size=1)+
geom_hline(yintercept = c(2,-2), linetype=2, size=0.25)+
geom_hline(yintercept = c(0), linetype=1, size=0.5)+
geom_vline(xintercept = 1636.5, linetype=2, size=0.25)+
scale_color_gradient2(low="navy", high="firebrick3", mid="white", midpoint = 0)+
geom_point(inherit.aes = F, data=to_be_point_out_MF, aes(x=rank, y=logFC), size = 3, color = 'black')+
geom_point(inherit.aes = F, data=to_be_point_out_MF, aes(x=rank, y=logFC), size = 2, color = 'yellow')+
ggrepel::geom_text_repel(inherit.aes = F, data = to_be_point_out_MF, aes(x=rank, y=logFC, label=Gene.symbol), size =5)+
xlab('rank of differentially expressed genes') +
theme_bw()+
theme(panel.grid = element_line(color = 'white'), legend.title.align = 0.5)
dev.off()
fit2 <- eBayes(fit2, 0.01)
tT_PVvsCon2 <- topTable(fit2, coef=c("PVvsCon2"), adjust="fdr", sort.by="logFC", p.value = 0.1, n = Inf)
Table_PVvsCon2 <- subset(tT_PVvsCon2, select=c("Gene.symbol","adj.P.Val","logFC"))
tT_ETvsCon2 <- topTable(fit2, coef=c("ETvsCon2"), adjust="fdr", sort.by="logFC", p.value = 0.1, n = Inf)
Table_ETvsCon2 <- subset(tT_ETvsCon2, select=c("Gene.symbol","adj.P.Val","logFC"))
tT_MFvsCon2 <- topTable(fit2, coef=c("MFvsCon2"), adjust="fdr", sort.by="logFC", p.value = 0.1, n = Inf)
Table_MFvsCon2 <- subset(tT_MFvsCon2, select=c("Gene.symbol","adj.P.Val","logFC"))
|
bffb69ba7c0b557668e137ae0accb78e2f1302d0
|
53f1c6c854a64ea0ef3f3ef356bfca12e7272345
|
/man/plotGeo.Rd
|
cde418bab07ad8174ed212cb3914058270c375cb
|
[
"MIT"
] |
permissive
|
vojind/gca
|
908c803647f2a138a1381d1c1330559e7a701070
|
5bb63317e5f30c048ee3dedcc90e2b0cfc811cab
|
refs/heads/main
| 2023-07-12T19:20:27.965937
| 2021-08-26T16:53:02
| 2021-08-26T16:53:02
| 372,430,967
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 309
|
rd
|
plotGeo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geoAnalysisModule.R
\name{plotGeo}
\alias{plotGeo}
\title{display incidence plots arranged as Germany}
\usage{
plotGeo(df)
}
\arguments{
\item{df}{dataframe}
}
\description{
display incidence for each state, arranged as germany.
}
|
7fb367726aa3c9a6e045b219fa86b185a4be1e6d
|
e8719bb67917ca9cb076b8ae83048507ecdb014a
|
/R/pperm.R
|
93937c1f92a0b117a486faa31da2c04381d14d6b
|
[] |
no_license
|
sauwai/NetRep
|
9b42df9ff5ca33adbf4f37878b9d23030cde23ed
|
90df54d99f00960a14eda2007242395801ba5f49
|
refs/heads/master
| 2021-01-20T19:09:26.663600
| 2016-02-12T06:03:50
| 2016-02-12T06:03:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,119
|
r
|
pperm.R
|
#' @title Permutation Test
#'
#' @description
#' Evaluates the statistical significance of a test statistic given a vector
#' of "nulls": values for that test statistic drawn from random sampling.
#'
#' @details
#' Calculates exact p-values for permutation tests when permutations are
#' randomly drawn with replacement using the \code{\link[statmod]{permp}}
#' function in the \code{\link{statmod}} package.
#'
#' @references
#' \enumerate{
#' \item{
#' Phipson, B. & Smyth, G. K. \emph{Permutation P-values should never be
#' zero: calculating exact P-values when permutations are randomly drawn.}
#' Stat. Appl. Genet. Mol. Biol. \strong{9}, Article39 (2010).
#' }
#' }
#'
#' @param permuted vector of values making up the empirical distribution.
#' @param observed the observed value of the test statistic.
#' @param subsetSize the size of the network subset the null distribution is
#' drawn for.
#' @param totalSize the size of the whole network
#' @param order logical; does the order of nodes in the permutation affect the
#' value of the test statistic?
#' @param alternative a character string specifying the alternative hypothesis,
#' must be one of "greater" (default), "less", or "two.sided".
#' You can specify just the initial letter.
#'
#' @aliases permutation permuted
#' @name permutation
#' @export
perm.test <- function(
permuted, observed, subsetSize, totalSize, order=TRUE, alternative="greater"
) {
validAlts <- c("two.sided", "less", "greater")
altMatch <- pmatch(alternative, validAlts)
if (is.na(altMatch))
stop("Alternative must be one of ", validAlts)
if (is.na(observed))
return(NA)
if (order) {
total.nperm = prod(totalSize:(totalSize - subsetSize + 1))
} else {
total.nperm = choose(totalSize, subsetSize)
}
permuted <- sort(permuted)
nPerm <- length(permuted)
less.extreme <- length(permuted[permuted <= observed])
more.extreme <- length(permuted[permuted >= observed])
lower.pval <- permp(less.extreme, nPerm, total.nperm=total.nperm)
upper.pval <- permp(more.extreme, nPerm, total.nperm=total.nperm)
if (altMatch == 1L) {
return(min(lower.pval, upper.pval)*2)
} else if (altMatch == 2L) {
return(lower.pval)
} else if (altMatch == 3L) {
return(upper.pval)
}
}
#' Exact permutation p-values wrapper
#'
#' Wrapper for \code{\link[statmod]{permp}} from the
#' \code{\link[statmod]{statmod}} library, which can crash if FORTRAN
#' libraries are not properly linked.
#'
#' @details
#' In the case \code{\link[statmod]{permp}} fails, the wrapper will fall back
#' to a slightly more conservative biased estimator: (1+x)/(1+nPerm).
#'
#' @param x number of permutations that yielded test statistics at least as
#' extreme as the observed data. May be a vector or an array of values.
#' @param nperm total number of permutations performed.
#' @param ... other arguments to pass to\code{\link[statmod]{permp}}.
#' @return
#' vector or array of p-values, of same dimensions as \code{x}.
#' @importFrom statmod permp
permp <- function(x, nperm, ...) {
tryCatch({
return(statmod::permp(x, nperm, ...))
}, error=function(e) {
warning(
"Error from statmod::permp:", e$message,
"\nUsing conservative biased estimator (1+x)/(1+nPerm) instead."
)
return(
(x + 1)/(nperm + 1)
)
})
}
#' @description
#' \code{requiredPerms}: how many permutations do I need to be able to detect
#' significance at a given threshold \code{alpha}?
#'
#' @param alpha desired significance threshold.
#' @return The minimum number of permutations required to detect any significant
#' associations at the provided \code{alpha}. The minimum p-value will always
#' be smaller than \code{alpha}.
#' @rdname permutation
#' @export
requiredPerms <- function(alpha, alternative="greater") {
validAlts <- c("two.sided", "less", "greater")
altMatch <- pmatch(alternative, validAlts)
if (is.na(altMatch))
stop("Alternative must be one of ", validAlts)
if (altMatch == 1) {
1/alpha*2
} else {
1/alpha
}
}
|
3efc8e9d2e4422cbaf5730e0094d8c079a31f680
|
4b3de47e87774d4e4cc72cbbe9ab3ce0a85d635e
|
/man/mlc_churn.Rd
|
56aae0044ded6bff8bd63cb71e5041adf42c7904
|
[
"MIT"
] |
permissive
|
tidymodels/modeldata
|
2a697daa91594c78bfeec05a0052a83693505c9a
|
d44d367dda2ceb58282c4bc844aa9453d33ad064
|
refs/heads/main
| 2023-08-11T07:58:18.615400
| 2023-08-09T19:22:49
| 2023-08-09T19:22:49
| 224,252,166
| 23
| 4
|
NOASSERTION
| 2023-08-09T19:21:51
| 2019-11-26T17:46:11
|
R
|
UTF-8
|
R
| false
| true
| 1,163
|
rd
|
mlc_churn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/churn.R
\docType{data}
\name{mlc_churn}
\alias{mlc_churn}
\title{Customer churn data}
\source{
Originally at \verb{http://www.sgi.com/tech/mlc/}
}
\value{
\item{mlc_churn}{a tibble}
}
\description{
A data set from the MLC++ machine learning software for modeling customer
churn. There are 19 predictors, mostly numeric: \code{state} (categorical),
\code{account_length} \code{area_code} \code{international_plan} (yes/no),
\code{voice_mail_plan} (yes/no), \code{number_vmail_messages}
\code{total_day_minutes} \code{total_day_calls} \code{total_day_charge}
\code{total_eve_minutes} \code{total_eve_calls} \code{total_eve_charge}
\code{total_night_minutes} \code{total_night_calls}
\code{total_night_charge} \code{total_intl_minutes}
\code{total_intl_calls} \code{total_intl_charge}, and
\code{number_customer_service_calls}.
}
\details{
The outcome is contained in a column called \code{churn} (also yes/no).
A note in one of the source files states that the data are "artificial based
on claims similar to real world".
}
\examples{
data(mlc_churn)
str(mlc_churn)
}
\keyword{datasets}
|
b0787e07d43f72efc712d1baa739c597d94d3b6c
|
15fda40dabc3b7e72a7496f61536d8075a29bc0a
|
/Metropolis-Hastings Algorithm 1.R
|
c50b17aff423610b276ea5684163fecd1a59e0ff
|
[] |
no_license
|
edwardmoradian/Bayesian-Statistics
|
499307f916e50018b0c75e3393ec69ae6b3dcaf0
|
3482239ed93be4c5f7a5edac221ae1d2db736067
|
refs/heads/master
| 2020-03-21T04:28:47.971720
| 2018-12-21T00:43:08
| 2018-12-21T00:43:08
| 138,110,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,547
|
r
|
Metropolis-Hastings Algorithm 1.R
|
### 1 parameter problem
f=function(theta)
{
dbeta(theta,7,5)
}
# loop values
N = 5000
N1 = N + 1
B = 1000
accept.ct = 0 # acceptance count
theta = c() # make theta a vector
theta[1] = runif(1) # intialize theta - we don't know the function
for(i in 1:N1)
{
theta.star = 2
while (theta.star > 1 || theta.star < 0 )
{
theta.star = runif(1,theta[i]-.2,theta[i]+.2) # symmetric proposal uniform, draw from uniform
}
f.star = f(theta.star) # compute beta function for theta.star
fi = f(theta[i]) # compute beta function for theta[i]
alpha = min(1,f.star/fi) # alpha is value compared to runif(1), calculate the acceptance probability
if(runif(1) <= alpha )
{
theta[i+1] = theta.star # Accept the theta value
accept.ct = accept.ct + 1
}
else
{
theta[i+1] = theta[i] # When not accepting, set next value to the current value
}
}
# acceptance count - helps to set proposal function
accept.ct/N
# compute mean and variance ecluding burn in iterations
mean(theta[B:N])
var(theta[B:N])
# credible interval
theta.cr = theta[B:N]
theta.cr = sort(theta.cr)
U = as.integer(.975*(N-B))
L = as.integer(.025*(N-B))
print(c("(",round(theta.cr[L],4),",",round(theta.cr[U],4),")"),quote=FALSE)
par(mfrow=c(3,1))
hist(theta[B:N])
plot(theta,type='p',pch='.')
acf(theta)
# check actual values
theta.mean.actual = round(7/(7+5),4)
var.actual = round(5*7/(12^2*13),4)
print(c("Actual theta mean and variance =",
theta.mean.actual,var.actual),quote=FALSE)
|
d5b49ae4c6aa07103084a67c432a99dd5d80961d
|
f1ddad5168a5b2f3ce66c6e436057cff5e91627f
|
/calculate_return.R
|
a62609676bbb155dc4d5d894d7687a0d33e10fe7
|
[] |
no_license
|
akizminet/nckh-ftu2020
|
d3889b48ce498e04f287bb4209958e79266e35cc
|
16e139a09d7c12a26c699c9291d8a1e985a265bc
|
refs/heads/master
| 2022-04-25T21:49:27.506449
| 2020-04-25T07:54:55
| 2020-04-25T07:54:55
| 258,074,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,733
|
r
|
calculate_return.R
|
library("dplyr")
# Read data
dat <- read.csv("price.csv")
dat$date <- as.Date(as.character(dat$date),"%Y%m%d") #Format date
dat <- mutate(dat, weekdays=weekdays(date)) #add weekday
dat <- dat[order(dat$date),] #sort by date
list_code <-sort(unique(as.character(dat$code))) #get tickets
list_wed = seq(from=as.Date("2009-12-23"), to=as.Date("2019-01-09"),by=7) #vector of wednesday from 2010-2018
list_day <- unique(as.Date(dat$date)) #get trading date
list_non_wed<-list_wed[!list_wed %in% list_day] #find non-wednesday
list_wed<-list_wed[list_wed %in% list_day]
#lay ngay gan nhat co giao dich
list_moi <- c()
for (day in list_non_wed) {
while(!day %in% list_day) {
day<-day-1
}
list_moi<-append(list_moi,day)
}
list_moi <- as.Date(list_moi,origin="1970-01-01")
#tong hop
ngay_giao_dich <- sort(c(list_wed,list_moi))
bang_tuan <- data.frame(date=ngay_giao_dich,tuan = seq(0,length(ngay_giao_dich)-1))
dat <- merge(filter(dat,date %in% ngay_giao_dich),bang_tuan,by="date")
cophieu <- list()
for (id in list_code){
cophieu[[id]] <- filter(dat,code == id,date %in% ngay_giao_dich)
}
# Calculate return
R <- list()
for (id in list_code) {
n <- nrow(cophieu[[id]])
R[[id]] <- log(cophieu[[id]]$price[2:n])-log(cophieu[[id]]$price[1:n-1])
}
for (id in list_code) {
tuan_dau <- cophieu[[id]]$tuan[1];
cophieu[[id]] <- cophieu[[id]] %>% filter(tuan>tuan_dau) %>% mutate(R=R[[id]])
}
for (id in list_code) {
if (cophieu[[id]]$tuan[nrow(cophieu[[id]])]-cophieu[[id]]$tuan[1]+1 != nrow(cophieu[[id]])) {
print(id)
}
}
# Chay hoi quy
last_week <- length(ngay_giao_dich)
w_start <- cophieu[["AAA"]]$tuan[1]
w_end <-
Rmw_2 <- R[["VNINDEX"]][wstart]
|
8b0f0a24c5ae53a85b9d24289e8de0d69a307750
|
b2a384e7c01344c8ec959443b0f879840ad6de46
|
/plot1.R
|
17461ea475701e1b9e5304f7886eaffaba66c9f6
|
[] |
no_license
|
MattKerns/ExData_Plotting1
|
d07a27147c436e58df4ec322c2d88dd4e1a0d26b
|
914bc9445770baefe2034f69cf05e5e0a4bc773f
|
refs/heads/master
| 2020-03-31T05:19:43.811919
| 2018-10-07T19:24:26
| 2018-10-07T19:24:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 512
|
r
|
plot1.R
|
plot1 <- function(data, path = getwd()){
filelocation <- file.path("~", path, data)
AllData <- read.table(data, header = TRUE, sep = ";", stringsAsFactors = FALSE)
MyData <- subset(AllData, AllData$Date == "1/2/2007"|AllData$Date =="2/2/2007")
MyData$Global_active_power <- as.numeric(MyData$Global_active_power)
png(filename = "plot1.png")
hist(MyData$Global_active_power, col = "red", xlab = "Global Active Power (in Kilowatts)", main = "Global Active Power")
dev.off()
}
|
9a11a46b4a5d91edf33f3b4d156f307992b0c98e
|
1bd342cb0949ef0cd21345bb1fd80304ed5f28a2
|
/scripts for data/saved-version-of-popups.R
|
3226e158d60ba266cc7f599d564f3756641c585b
|
[] |
no_license
|
BrookeGibbons/habitatMAPP
|
20f5e57099af5f598f3a18397f7e6144631366cb
|
80ab428cf517c1366fccc72ce3935f30c632ed48
|
refs/heads/master
| 2022-12-08T12:13:58.806094
| 2020-08-27T03:39:00
| 2020-08-27T03:39:00
| 258,127,575
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,278
|
r
|
saved-version-of-popups.R
|
# stereo-BRUV Images
addAwesomeMarkers(data=filter(map.dat, source%in%c("stereo-bruv.image")),
icon = icon.image,
clusterOptions = markerClusterOptions(),
group = "stereo-BRUV images",
popup = map.dat$image,
popupOptions=c(closeButton = TRUE,
minWidth = 0,
maxWidth = 700 # changed from 500 BG 28/07
))%>%
# stereo-BRUV video
addAwesomeMarkers(data=filter(map.dat, source%in%c("fish.video")),
icon = icon.video,
popup = map.dat$fish,
# clusterOptions = markerClusterOptions(),
group="stereo-BRUV videos",
popupOptions=c(closeButton = TRUE,
minWidth = 0,maxWidth = 700))%>%
# 3D models
addAwesomeMarkers(data=filter(map.dat, source%in%c("3d.model")),
icon = icon.laptop,
popup = map.dat$auv,
# clusterOptions = markerClusterOptions(),
group="3D models",
popupOptions=c(closeButton = TRUE,
minWidth = 0,maxWidth = 500))%>%
|
24bc6442b6ad7ebb8751b84f7c22018d21bd0d0f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/inTrees/examples/selectRuleRRF.Rd.R
|
dae61f7fea103b0bfa172d665a8b5b0ba829dcf4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 203
|
r
|
selectRuleRRF.Rd.R
|
library(inTrees)
### Name: selectRuleRRF
### Title: select a set of relevant and non-redundant rules
### Aliases: selectRuleRRF
### Keywords: select
### ** Examples
# See function "buildLearner:
|
b77e1b938fa184561338262c5eef02ff143888a8
|
9d59520955aec6e0d572d737900da5464f9c1cc6
|
/R/fit_flextable_to_page.R
|
1e78c34f3dbb7e6739edd58bcebf5ed5496b5f73
|
[] |
no_license
|
LisaHopcroft/CTutils
|
f4d40692364431d8a4c8183f8faf712564c83790
|
f8c052f3e4d54038da644446fb3d8909cf64df49
|
refs/heads/master
| 2023-09-01T15:43:54.850949
| 2021-10-04T07:35:36
| 2021-10-04T07:35:36
| 294,407,600
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 409
|
r
|
fit_flextable_to_page.R
|
#' Fit flextable object to the width of the page
#'
#' @param ft The flextable object
#' @param pgwidth The width of the page, in inches (default = 6in)
#'
#' @return The flextable with the width updated
#' @export
fit_flextable_to_page <- function(ft, pgwidth = 6){
ft_out = ft %>% autofit()
ft_out = width(ft_out, width = dim(ft_out)$widths*pgwidth /(flextable_dim(ft_out)$widths))
return( ft_out )
}
|
19df5b5652ca5a1e352148feb3236c734ae83046
|
cab51b0d16a16728aa9ee5f19ec366c8f36ff5ec
|
/R/test_intestinal_length.R
|
303936fe0e1b7273421900704ac982aa4845edac
|
[
"MIT"
] |
permissive
|
mattiaghilardi/FishCaCO3Model
|
b4fe491f2cc557321d05d3f878b5da18e3102142
|
17509e161cd094ebaf5f33930107684bbeab0dc5
|
refs/heads/master
| 2023-04-14T02:41:54.126353
| 2023-01-12T22:59:13
| 2023-01-12T22:59:13
| 507,607,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,274
|
r
|
test_intestinal_length.R
|
#' Test extrapolation of intestinal length
#'
#' @inheritParams intestinal_length
#' @inherit intestinal_length return
test_intestinal_length <- function(data, method = "fitted", ndraws = 1000, seed = 1,
mc.cores = getOption("mc.cores", 1L),
summary = TRUE, .point = mean, .width = 0.95) {
# Check species names in the Fish Tree of Life
# Not all species have the same name in FishBase and the Fish Tree of Life
# For these species traits cannot be estimated
suppressMessages(check_name_fishtree(unique(data$species)))
# Complete phylogeny: species included in the model + new species
sp <- unique(c(as.character(int_moor_f$species), as.character(data$species)))
tree <- fishtree::fishtree_complete_phylogeny(species = sp)
# Estimate phylogenetic effect for new species
unsampled <- gsub(" ", "_", unique(data$species))
phylo_effect <- test_phylo_effect(ndraws = ndraws, seed = seed,
mc.cores = mc.cores, phy = tree,
unsampled = unsampled)
# Extract model parameters
model_param <- tidybayes::spread_draws(m_intestine,
b_Intercept, b_scalesl_log, b_scaletrophic_level, b_scaleelon_log,
sigma, nu, ndraws = ndraws, seed = seed)
# Retain "ndraw" random draws
model_param <- dplyr::select(model_param, c(-.chain, -.iteration))
# Join phylogenetic effect to the other model parameters
all_param <- suppressMessages(dplyr::left_join(phylo_effect, model_param))
# Prepare dataset for trait's prediction
all_param$species <- gsub("_", " ", all_param$species)
newdata <- suppressMessages(dplyr::left_join(data, all_param))
newdata$sl_log <- log(newdata$sl)
newdata$elon_log <- log(newdata$elongation)
# Predict trait for each draw
sl_scale <- stats::sd(int_moor_f$sl_log)
sl_center <- mean(int_moor_f$sl_log)
troph_scale <- stats::sd(int_moor_f$trophic_level)
troph_center <- mean(int_moor_f$trophic_level)
elon_scale <- stats::sd(int_moor_f$elon_log)
elon_center <- mean(int_moor_f$elon_log)
newdata <- dplyr::mutate(newdata,
pred = b_Intercept + phy_eff +
(b_scalesl_log/sl_scale)*(sl_log - sl_center) +
(b_scaletrophic_level/troph_scale)*(trophic_level - troph_center) +
(b_scaleelon_log/elon_scale)*(elon_log - elon_center))
if (method == "fitted") {
newdata <- dplyr::select(newdata, c(id, species, sl, pred, .draw))
} else {
newdata <- dplyr::select(newdata, c(id, species, sl, pred, sigma, nu, .draw))
newdata <- dplyr::group_by(newdata, id)
newdata <- dplyr::mutate(newdata, pred = brms::rstudent_t(ndraws, nu, pred, sigma))
newdata <- dplyr::select(newdata, c(id, species, sl, pred, .draw))
}
# If summary=TRUE return median or mean and the chosen CIs
# If summary=FALSE return all draws
if (summary) {
newdata <- dplyr::group_by(newdata, id, species, sl)
newdata <- tidybayes::point_interval(newdata, pred, .width = .width, .point = .point)
newdata <- dplyr::ungroup(newdata)
newdata <- dplyr::select(newdata, -c(.point, .interval))
} else {
newdata
}
colnames(newdata)[4] <- "int_length"
newdata
}
#' Test estimation of phylogenetic effect for intestinal length
#' using observed or unobserved taxa
#'
#' @inheritParams phylo_effect
#' @param unsampled Vector of species (observed or unobserved) for
#' which the phylogenetic effect has to be predicted
#' @inherit phylo_effect return
test_phylo_effect <- function(ndraws = NULL, seed = NULL,
mc.cores = getOption("mc.cores", 1L), phy,
unsampled = NULL) {
# Check tree
if (!inherits(phy, "phylo") & !inherits(phy, "multiPhylo")) {
stop("'phy' must be of class 'phylo' or 'multiPhylo'")
}
# Extract draws of phylogenetic effect
phy_eff <- tidybayes::spread_draws(model = m_intestine, r_phylo[species,], ndraws = ndraws, seed = seed)
phy_eff <- dplyr::filter(phy_eff, ! species %in% unsampled)
phy_eff <- dplyr::select(phy_eff, c(-.chain, -.iteration))
phy_eff <- .named_group_split(phy_eff, .draw, .keep = FALSE)
# Predict traits for new species using phyEstimate() from "picante"
# If 'phy' is of class 'multiPhylo' use one random tree for each draw
phy_eff_pred <- parallel::mclapply(phy_eff, function(x){
x <- tibble::column_to_rownames(x, colnames(x)[1])
colnames(x)[1] <- "phy_eff"
# If tree is of class "multiPhylo" sample one random tree
if (inherits(phy, "phylo")) {
tree <- phy
} else {
tree <- sample(phy, 1)[[1]]
}
# Predict trait
trait_pred <- picante::phyEstimate(phy = tree, trait = x, method = "pic")
colnames(trait_pred)[1] <- "phy_eff"
trait_pred <- dplyr::select(trait_pred, -se)
x <- rbind(x, trait_pred)
x <- tibble::rownames_to_column(x, "species")
}, mc.cores = mc.cores)
# Convert list in data frame
phy_eff_pred <- dplyr::bind_rows(phy_eff_pred, .id = ".draw")
phy_eff_pred$.draw <- as.integer(phy_eff_pred$.draw)
phy_eff_pred
}
|
08bc8008b6d8243d55de40eea55367de282d2eb7
|
0080589afa3dc33bef22379e826fc1d1d138cae1
|
/src/produce_maps/script.R
|
401013ebc0d82fd19be929ad993196482bd460c4
|
[] |
no_license
|
vishalbelsare/covid19-forecasts-orderly
|
b72dfb64cd01fb967750c0e1806cfbd03176f65e
|
57f81690e3fc2e775962a55c6d88f8796f1a0896
|
refs/heads/master
| 2022-06-19T01:26:35.979392
| 2020-05-09T17:30:26
| 2020-05-09T17:30:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,540
|
r
|
script.R
|
# Produce maps of deaths by continent
# loading the map data (simple features format)
world <- rnaturalearth::ne_countries(scale = "medium", returnclass = "sf")
ens_week <- readRDS("ensemble_weekly_qntls.rds")
# generating the ISO3 code for the model predictions to link with the simple features map data
ens_week$iso_a3 <- countrycode::countrycode(gsub("_"," ",ens_week$country,fixed=T),"country.name","iso3c")
# filling in the missing values
if(any(is.na(ens_week$iso_a3))) warning(print("Country names need matching to ISO 3 codes"))
sis<-unique(ens_week$si)
for(j in 1:length(sis)){
si<-sis[j]
world_df<-merge(world,ens_week[ens_week$si==si,])
##world_df_pts<-st_point_on_surface(world_df)
world_df_pts <- sf::st_centroid(world_df)
coords <- as.data.frame(st_coordinates(world_df_pts))
coords$label <- paste0(world_df$geounit,"\n",prettyNum(world_df$`50%`,big.mark=","))
##coords$label <- paste0(world_df$geounit)
coords$continent<-world_df$continent
p <- ggplot() +
geom_sf(data = world,fill="grey",col="white",size=0.1)+
geom_sf(data = world_df_pts, aes(size=`50%`),alpha=0.2,col="red") +
geom_sf(data = world_df_pts, aes(size=`50%`),shape=1,col="red") +
geom_text_repel(data = coords, aes(x=X,y=Y,label=label)) +
guides(size=F) + labs(x="",y="")+
theme(panel.background = element_rect("white"),panel.grid=element_blank()) +
##coord_sf(xlim=dim[c(1,3)],ylim=dim[c(2,4)])
coord_sf()
ggsave(paste0("map_death_",gsub(" ","_",c,fixed=T),"_",si,".png"), p)
}
}
|
5abd3acfac6cd274c1a0ba2af0cbc4b171f92450
|
e66b550f3d132cb024d13ac0787269d6a1a98e70
|
/man/blblm-package.Rd
|
b8622ec523b64dfa545b852b003c76a83abea2cb
|
[
"MIT"
] |
permissive
|
JZNeilZ/blblm
|
7e09d9fdbc892c6ad7a5d7e5c10596cd10148e21
|
a8446b8a888a5763298548363b7f7a99c170b335
|
refs/heads/master
| 2022-10-19T06:10:42.896689
| 2020-06-11T12:14:42
| 2020-06-11T12:14:42
| 270,866,406
| 0
| 0
| null | 2020-06-09T00:42:31
| 2020-06-09T00:42:31
| null |
UTF-8
|
R
| false
| true
| 615
|
rd
|
blblm-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\docType{package}
\name{blblm-package}
\title{blblm: Build Linear Model Using Bag of Little Bootstraps}
\description{
This package is used to build linear regression/ generalized linear regression models by using
the Bag of Little Bootstrap (blb) method to increase the accuracy of estimated parameters. Besides setting
up the model, this package also provides an estimation of confidence interval on estimated parameters,
sigma, and predicted outcome.
}
\details{
Linear Regression with Little Bag of Bootstraps
}
|
143f19d65d88475ca5c1e121f0ff131832d966e0
|
d4cd92b8447fd6f62815a3b04734d3c719befef2
|
/man/fetch_given_muncipalityandyear.Rd
|
a9e9772afc1212b3251a283d79fc78a2f1ff36cc
|
[] |
no_license
|
shwva184/lab5
|
1138098df5668625c752d21b7d5d93a6d05e7e92
|
2f4cd4d12c8ec540533ca0e94e078efc5e8c056b
|
refs/heads/master
| 2023-01-28T22:22:06.392116
| 2020-12-14T20:05:35
| 2020-12-14T20:05:35
| 299,890,525
| 0
| 0
| null | 2020-10-08T08:10:19
| 2020-09-30T10:58:05
|
R
|
UTF-8
|
R
| false
| true
| 600
|
rd
|
fetch_given_muncipalityandyear.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_kolada_data.R
\name{fetch_given_muncipalityandyear}
\alias{fetch_given_muncipalityandyear}
\title{Retrieving data for given municipality and years}
\usage{
fetch_given_muncipalityandyear(municipality_id, year)
}
\arguments{
\item{municipality_id}{Passing the id of municipality}
\item{year}{A vector of years}
}
\value{
Returns a data.frame containing the data for a given municipality and year
}
\description{
Retrieving data for given municipality and years
}
\examples{
fetch_given_muncipalityandyear(1860,2009)
}
|
e591a25b025b08553185e5d81b960d9d6c3f1d74
|
0900287305c2a1c8e3ea89ce22dd86b42d3146e1
|
/man/lime.Rd
|
ddecec6c34ed2d2041531529d4cc7835e27ccff5
|
[] |
no_license
|
cran/GLMsData
|
29ff0d385b86d8b69b647a4a3cdde59c3fa995ef
|
adf13787011fc2603433c97edc40940a7408b48e
|
refs/heads/master
| 2022-09-11T10:58:41.904753
| 2022-08-22T05:20:08
| 2022-08-22T05:20:08
| 128,936,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,037
|
rd
|
lime.Rd
|
\name{lime}
\alias{lime}
\docType{data}
\title{Small-leaved lime trees}
\description{
Data from small-leaved lime trees grown in Russia
}
\usage{data(lime)}
\format{
A data frame containing 385 observations with the following 4 variables.
\describe{
\item{\code{Foliage}}{the foliage biomass, in kg (oven dried matter)}
\item{\code{DBH}}{the tree diameter, at breast height, in cm}
\item{\code{Age}}{the age of the tree, in years}
\item{\code{Origin}}{the origin of the tree;
one of
\code{Coppice},
\code{Natural},
\code{Planted}}
}
}
\details{
The data give measurements from small-leaved lime trees
(\emph{Tilia cordata}) growing in Russia.
}
\source{
Schepaschenko, Dmitry; Shvidenko, Anatoly; Usoltsev, Vladimir A; Lakyda, Petro; Luo, Yunjian;
Vasylyshyn, Roman; Lakyda, Ivan; Myklush, Yuriy; See, Linda; McCallum, Ian; Fritz, Steffen; Kraxner, Florian;
Obersteiner, Michael (2017):
Biomass tree data base. doi:10.1594/PANGAEA.871491,
In supplement to: Schepaschenko, D et al. (2017): A dataset of forest biomass structure for Eurasia.
\emph{Scientific Data}, 4, 170070, doi:10.1038/sdata.2017.70.
Extracted from \url{https://doi.pangaea.de/10.1594/PANGAEA.871491}
}
\references{
The source (Schepaschenko et al.) obtains the data from various sources:
\itemize{
\item
Dylis N.V., Nosova L.M. (1977)
\emph{Biomass of forest biogeocenoses under Moscow region}. Moscow: Nauka Publishing.
\item
Gabdelkhakov A.K. (2015)
\emph{Tilia cordata Mill.} tree biomass in plantations and coppice forests. \emph{Eco-potential}.
No. 3 (11). p. 7--16.
\item
Gabdelkhakov A.K. (2005)
\emph{Tilia cordata Mill.} tree biomass in plantations.
\emph{Ural forests and their management}.
Issue 26. Yekaterinburg: USFEU. p. 43--51.
\item
Polikarpov N.P. (1962)
Scots pine young forest dynamics on clear cut. \emph{Moscow: Academy of Sci.} USSR.
\item
Prokopovich E.V. (1995)
Ecological conditions of soil forming and biological cycle of matters in spruce forests of the Middle Ural.
Ph.D. Thesis. Ekaterinburg: Plant and Animals Ecology Institute.
\item
Remezov N.P., Bykova L.N., Smirnova K.M. (1959)
Uptake and cycling of nitrogen and ash elements in forests of European part of USSR.
Moscow: State University.
\item
Smirnov V.V. (1971) Organic mass of certain forest phytocoenoses at European part of USSR.
Moscow: Nauka.
\item
Uvarova S.S. (2005) Biomass dynamics of \emph{Tilia cordata} trees on the example of Achit forest enterprise of Sverdlovsk region.
\emph{Ural forests and their management}. Issue 26. Ekaterinburg: State Forest Engineering University, p. 38--40.
\item
Uvarova S.S. (2006) Growth and biomass of \emph{Tilia cordata} forests of Sverdlovsk region Dissertation.
Ekaterinburg: State Forest Engineering University. (USFEU library)
}
}
\examples{
data(lime)
summary(lime)
}
\keyword{datasets}
|
310c09a3db93790b6ae82e8c610cc66eb2b6280c
|
546e831114239552f6706273103e529dad6999c8
|
/files/rscripts/extension.R
|
8caff14b4cc510937c82f3ec57a1ce5844bf0265
|
[] |
no_license
|
ccardonaandrade/labour_replication
|
b88b612924fcfabb0c29c2d0d13bbe3cc490bd09
|
55bc478f7e950d18f780b91ee41004d6ffac039f
|
refs/heads/master
| 2020-05-16T10:56:05.026692
| 2019-04-23T11:25:17
| 2019-04-23T11:25:17
| 182,997,915
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,801
|
r
|
extension.R
|
#########################################################
# Replication Exercise
# Extension
########################################################
rm(list=ls()) #clear all
library(foreign)
library(stargazer)
library(data.table)
library(xlsx)
library(plyr)
library(dplyr)
library(lubridate)
library(plm)
library(xtable)
options(xtable.floating = FALSE)
options(xtable.timestamp = "")
library(RCurl)
#Directory
setwd("/Users/Carlos Cardona/Dropbox/PhD Warwick/Second Year/Labour Econ/Replication/Paper Files/")
#Loading both data sets. The panel and the first difference.
fd <- read.dta("1. fd_data.dta")
pool <- read.dta("2. pooled_data.dta")
################################################
## Table 3
################################################
# The regressions
# I realize that the variable cohort was not a dummy so I transformed into it.
pool$newcohort <- as.numeric(pool$cohort>0)
fd$newcohort <- as.numeric(fd$cohort>0)
pool$cohort <- NULL
fd$cohort <- NULL
setnames(pool, "newcohort", "cohort")
setnames(fd, "newcohort", "cohort")
### Column 1
column.1 <- lm(logoccscore ~ rf_ny_n +rf_ny_n : cohort + married + usspouse + nchild + uskid + migd + ysm + cohort , data=pool)
### Column 2
column.2 <- lm(logoccscore ~ rf_ny_n+rf_ny_n : cohort + married + usspouse + nchild + uskid +
migd + cohort + ysm + cobx : ysm + factor(cd) :ysm,
data = pool)
## Column 3
setnames(fd, "dni_ny_n", "rf_ny_n")
setnames(fd, "dmarried", "married")
setnames(fd, "dusspouse", "usspouse")
setnames(fd, "dnchild", "nchild")
setnames(fd, "duskid", "uskid")
setnames(fd, "dmig1", "migd")
column.3 <- lm(dlogoccscore ~ -1 + rf_ny_n+ rf_ny_n : cohort + married + usspouse +
nchild + uskid + migd + dysm + cohort, data = fd)
## Column 4
column.4 <- lm(dlogoccscore ~ -1 + rf_ny_n+ rf_ny_n : cohort + married + usspouse +
nchild + uskid + migd + dysm + cohort + cobx_p : dysm + factor(cd_d) :dysm, data = fd)
# column 5
setnames(fd,"rf_ny_n", "dni_ny_n")
setnames(fd, "dni_ny_n_pd", "rf_ny_n")
column.5 <- lm(dlogoccscore ~ -1 + rf_ny_n+ rf_ny_n : cohort + married + usspouse + nchild + uskid + migd + dysm + cohort , data=subset(fd, type1e==0))
#Column 6
column.6 <- lm(dlogoccscore ~ -1 + rf_ny_n+ rf_ny_n : cohort + married + usspouse + nchild + uskid + migd + dysm + cohort +cobx_p : dysm + factor(cd_d) :dysm, data=subset(fd, type1e==0))
# This is a loop for calculating robust standard errors equally as in Stata
for(i in 1:6) {
eval(parse(text=paste(paste0("cov", i), paste0("vcovHC(column.", i,", type = 'HC1')"), sep="<-")))
eval(parse(text=paste(paste0("robust_se.", i), paste0("sqrt(diag(cov", i,"))"), sep="<-")))
}
stargazer(column.1, column.2, column.3, column.4, column.5, column.6,
column.labels=c("OLS", "OLS", "First Differences", "First Differences","Name Changers Only","Name Changers Only"),
keep=c("rf_ny_n","cohort","rf_ny_n:cohort"),
covariate.labels=c("A","Arrival prior 1921","A*Arrival prior 1921"), digits = 3,
omit.stat=c("ser","adj.rsq","f", "ser"),
title="Extension Exercise",
table.placement = "H", no.space=TRUE, model.names = FALSE,
add.lines = list(c("Country of birth", "No", "Yes","","","",""),
c("Country of birth X YSM", "No", "Yes","No","Yes","No","Yes"),
c("Labor market", "No", "Yes","","","",""),
c("Labor market X YSM", "No", "Yes","No","Yes","No","Yes")),
se = list(robust_se.1,robust_se.2,robust_se.3,robust_se.4, robust_se.5, robust_se.6))
|
474f1065014e9f3588556039081fccbff0db56e9
|
c888195086f27f20b0f209d6e05eda04d9e18ae7
|
/rprogramming/assignment3/rankhospital.R
|
fcc1ece66e5af86c6348c76bad57f6f09b82eec9
|
[] |
no_license
|
l337/datasciencecoursera
|
494ffe4f048dcb0b5a23790a5f3bf2c6b28f3335
|
be0929029174eaf2da8ee31f7e15ade4d4075d03
|
refs/heads/master
| 2020-04-05T13:43:39.598164
| 2015-01-20T20:16:47
| 2015-01-20T20:16:47
| 27,475,424
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,679
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num="best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses="character", na.strings="Not Available, NA")
## Check that state and outcome are valid
checkstate <- data[,7]
statevalid <- match(state, checkstate)
outcomes <- c("heart attack", "heart failure", "pneumonia")
outcomevalid <- match(outcome, outcomes)
if(is.na(statevalid)) {
stop("invalid state");
} else if(is.na(outcomevalid)) {
stop("invalid outcome")
} else {
subregion <- subset(data, data$State==state)
subdata <- rank(subregion, outcome)
newdata <- na.omit(subdata[order(subdata$causeofdeath, subdata$name),])
# if num is outside the length of the data there shouldn't be any data for that result
if(is.numeric(num) & num > length(newdata$name)) {
print(NA)
} else {
if(num == "best") {
as.character(newdata$name[1])
} else if(num == "worst") {
as.character(newdata$name[length(newdata$name)])
} else {
as.character(newdata[num,'name'])
}
}
}
}
# picks correct column based on outcome/state returns a dataframe with all apporiate data
rank <- function(hospital, columnresult) {
outcome <- switch(columnresult,
"heart attack" = 11,
"heart failure" = 17,
"pneumonia" = 23)
values <- suppressWarnings(as.numeric(hospital[,outcome]))
data.frame(name=hospital$Hospital.Name, state=hospital$State, causeofdeath=values)
}
|
c91906c1eb7f2cb3c51a948eb02ab59b725e7edc
|
ac6a352a426ee9d057cac78b0e4dba0de15ab86b
|
/R/Simulation.R
|
ec303ce363d93572ee68e38effaf24a177b8189d
|
[] |
no_license
|
RaikOtto/artdeco
|
5dacc69fc38ea957e1c9f41afd6b272a7171a92d
|
cf3790ec46648c93169ad2b3709a7a8fc5cbc258
|
refs/heads/master
| 2021-11-07T22:40:44.489736
| 2021-10-29T14:52:07
| 2021-10-29T14:52:07
| 156,527,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,231
|
r
|
Simulation.R
|
#' Simulate expression profiles of specific cell-types.
#'
#' \code{simulateCellTypes} simulates \emph{in silico} expression profiles
#' of specific cell-types using a negative binomial distribution.
#' Simulation is based on biological data and marker genes
#' of these cell-types.
#'
#' @param referenceCellTypes Matrix of single-cell expression values
#' of the cell-type to be simulated. Used to estimate parameters for
#' negative binomial distribution. Samples as columns and genes as rows.
#' Counts have to be normalized beforehand.
#' @param markerGenes Character vector containing marker genes which
#' characterize the cell-type to be simulated. Gene identifiers need
#' to be consistend with \code{referenceCellTypes}.
#' @param numSamples An integer specifing the number of samples to be simulated.
#' @param seed An integer specifing seed for simulation. Default is \code{NULL}.
#' Random seed will be generated if no seed is provided.
#' @param verbose Logical, indicating whether status updates will be printed.
#' Default is \code{TRUE}.
#' @importFrom msir loess.sd
#' @importFrom stats rnbinom
#' @return Matrix with simulated count values. Samples as columns and genes
#' as rows.
#' @usage
#' simulateCellTypes(
#' referenceCellTypes,
#' markerGenes,
#' numSamples,
#' seed,
#' verbose
#' )
simulateCellTypes = function(
referenceCellTypes,
markerGenes,
numSamples,
seed = NULL,
verbose = TRUE
){
# Split reference into marker genes and non marker genes
if(verbose){message("Processing input files ...")}
markerCounts = referenceCellTypes[
rownames(referenceCellTypes) %in% markerGenes,]
if(nrow(markerCounts) == 0){
stop("Non of the marker genes present in reference cell-types!")
}
otherCounts = referenceCellTypes[
!rownames(referenceCellTypes) %in% markerGenes,]
# Estimate parameters for marker genes and other genes separately
if(verbose){message("Estimating parameters ...")}
paramMarkers = estimateParameters(
markerCounts
)
paramOthers = estimateParameters(
otherCounts
)
# Simulate counts
if(verbose){message("Simulating counts ...")}
if(is.null(seed)) {
seed = sample(1:1000000, size = 1)
}
#set.seed(seed)
sim_Counts_Markers = simulateCounts(
simParam = paramMarkers,
nSamples = numSamples,
nGenes = nrow(markerCounts),
simSeed = seed,
simMarkers = TRUE)
sim_Counts_Others = simulateCounts(
simParam = paramOthers,
nSamples = numSamples,
nGenes = nrow(otherCounts),
simSeed = seed,
simMarkers = FALSE
)
# Rename genes of simulated data, join and create sample IDs
if(verbose){message("Preparing output ...")}
rownames(sim_Counts_Markers) = rownames(markerCounts)
rownames(sim_Counts_Others) = rownames(otherCounts)
res = rbind(sim_Counts_Markers, sim_Counts_Others)
colnames(res) = paste0("simu_", 1:numSamples)
if(verbose){message("Done!")}
return(res)
}
#' Estimate parameters for negative binomial distribution
#'
#' \code{estimateParameters} estimates the parameters for the negative binomial
#' distribution from the provided reference expression profiles. This function
#' is used by \code{\link{simulateCellTypes}}.
#'
#' @param countData Matrix with the expression values of the reference
#' cell-types. Parameters are estimated based on this count data.
#' @return Returns a list with the estimated parameters.
estimateParameters = function(
countData
){
# Set parameters
sigma = 1.96
# Kick out empty samples and keep only expressed genes
totalS = ncol(countData)
totalG = nrow(countData)
fullS = colSums(countData, na.rm = TRUE) > 0
detectG = rowMeans(countData, na.rm = TRUE) > 0
countData = countData[detectG, fullS]
nsamples = dim(countData)[2]
counts0 = countData == 0
nn0 = rowSums(!counts0)
# the negative binomial
mu = rowSums(countData) / ncol(countData)
s2 = rowSums((countData - mu) ^ 2) / ncol(countData)
size = mu ^ 2 / (s2 - mu + 1e-04)
size = ifelse(size > 0, size, NA)
p0 = (nsamples - nn0) / nsamples
mu = mu[!is.na(size)]
p0 = p0[!is.na(size)]
remove = rownames(countData)[is.na(size)]
detectG[names(detectG) %in% remove] = FALSE
size = size[!is.na(size)]
phi.g = 1 / size
phi.c = mean(phi.g)
ldisp = log2(phi.g)
lsize = log2(size)
lmu = log2(mu + 1)
estG = length(mu)
estS = length(ncol(countData))
# meansizefit
meansizefit = loess.sd(lsize ~ lmu, nsigma = sigma)
# meandispfit
meandispfit = loess.sd(ldisp ~ lmu, nsigma = sigma)
# return object
paramData = list(means = mu,
dispersion = phi.g,
common.dispersion = phi.c,
size = size,
p0 = p0,
meansizefit = meansizefit,
meandispfit = meandispfit,
estS = estS,
estG = estG,
totalS = totalS,
totalG = totalG,
detectG = detectG,
sigma = sigma)
return(paramData)
}
#' Simulate count data based on negative binomial distribution
#'
#' \code{simulateCounts} simulates the expression values using
#' a negative binomial distribution with parameters estimated by
#' \code{\link{estimateParameters}}. This function is used by
#' \code{\link{simulateCellTypes}}.
#' @param simParam List of parameters estimated by
#' \code{\link{estimateParameters}}.
#' @param nSamples A integer specifing the number of samples to be simulated.
#' @param nGenes A integer specifing the number of genes to be simulated.
#' @param simSeed A integer specifing seed for simulation
#' @param simMarkers Logical, indicating whether genes to be simulated are
#' markers or not
#' @return Matrix with simulated count values.
simulateCounts = function(
simParam,
nSamples,
nGenes,
simSeed = NULL,
simMarkers = TRUE
){
if(simMarkers){
lfcs = as.matrix(rep(0, nGenes))
} else {
lfcs = as.matrix(rep(0, nGenes))
}
# define NB params
mu = simParam$means
meansizefit = simParam$meansizefit
# For markers use observed mean parameters, for other genes sample
if(simMarkers) {
present = simParam$detectG
if(sum(present) < nGenes){
warning("Detected one or more marker genes with no expression
in reference samples!")
true.means = rep(0, nGenes)
true.means[present] = mu
} else {
true.means = mu
}
} else {
index = sample(1:length(mu), size = nGenes, replace = TRUE)
true.means = mu[index]
}
# estimate size parameter associated with true mean values
lmu = log2(true.means + 1)
predsize.mean = approx(meansizefit$x, meansizefit$y, xout = lmu, rule = 2)$y
predsize.sd = approx(meansizefit$x, meansizefit$sd, xout = lmu, rule = 2)$y
sizevec = rnorm(n = length(lmu), mean = predsize.mean, sd = predsize.sd)
# size factor
#all.facs = rep(1, nSamples)
all.facs = sample(seq(1, 2.5, by = 0.1), size = nSamples, replace = TRUE)
# effective means
effective.means = outer(true.means, all.facs, "*")
mod = as.matrix(rep(1, nSamples))
# make mean expression with beta coefficients added as defined
# by model matrix
mumat = log2(effective.means + 1) + lfcs %*% t(mod)
mumat[mumat < 0] = min(log2(effective.means + 1))
# result count matrix
counts = matrix(
rnbinom(nSamples * nGenes, mu = 2 ^ mumat - 1, size = 2 ^ sizevec),
ncol = nSamples,
nrow = nGenes,
dimnames = list(paste0(rownames(mumat),"_", seq_len(nGenes)),
NULL))
return(counts)
}
#' Simulate expression data which does not follow a cell-type specific profile.
#'
#' \code{simulateNegativeControls} simulates random expression profiles
#' following a negative binomial distribution. Parameters of the negative
#' binomial are randomly sampled from a normal distribution.
#' Can be used as negative controls for benchmarking purposes.
#' @param nGenes A integer specifing the number of genes to be simulated.
#' @param numSamples A integer specifing the number of samples to be simulated.
#' @param normMean A integer specifing the mean parameter of the
#' normal distribution which is used to generate mean expression values for
#' the simulation.
#' @param normSD A integer specifing the standard deviation parameter of the
#' normal distribution which is used to to generate mean expression values for
#' the simulation.
#' @param seed A integer specifing seed for simulation. Default is \code{NULL}.
#' Random seed will be generated if no seed is provided.
#' @param verbose Logical, indicating whether status updates will be printed.
#' Default is \code{TRUE}.
#' @importFrom stats rnbinom
#' @return Matrix with simulated count values. Samples as columns and genes
#' as rows.
#' @usage
#' simulateNegativeControls(
#' nGenes,
#' numSamples,
#' normMean,
#' normSD,
#' seed,
#' verbose
#' )
simulateNegativeControls = function(
nGenes,
numSamples,
normMean = 50,
normSD = 500,
seed = NULL,
verbose = TRUE
){
if(is.null(seed)) {
seed = sample(1:1000000, size = 1)
}
#set.seed(seed)
# Simulate counts based on randomly sampled mean and size values
# for negative binomial distribution
means = rnorm(nGenes, mean = normMean, sd = normSD)
means[means < 0] = 0
all.facs = sample(seq(1, 3, by = 0.1), size = numSamples, replace = TRUE)
effective.means = outer(means, all.facs, "*")
mumat = log2(effective.means + 1)
mumat[mumat < 0] = min(log2(effective.means + 1))
sizevec = rnorm(nGenes, mean = 0, sd = 4)
counts = matrix(
rnbinom(numSamples * nGenes, mu = 2 ^ mumat - 1, size = 2 ^ sizevec),
ncol = numSamples,
nrow = nGenes
)
colnames(counts) = paste0("Negative_Control_", c(1:numSamples))
return(counts)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.