blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7183aa956fbdc46bb74df160d32db26f88140a84
|
f61064bb7d0013f111123206b230482514141d9e
|
/R/sir_xx_initialize.R
|
a183f94bebedb5b399784299f780fe214c55ebf2
|
[] |
no_license
|
nianqiaoju/agents
|
6e6cd331d36f0603b9442994e08797effae43fcc
|
bcdab14b85122a7a0d63838bf38f77666ce882d1
|
refs/heads/main
| 2023-08-17T05:10:49.800553
| 2021-02-18T23:01:47
| 2021-02-18T23:01:47
| 332,890,396
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,164
|
r
|
sir_xx_initialize.R
|
#' @title Gibbs sampler for SIR model
#' @description initialize agent states for SIR model given y such that the complete likelihood is not zero.
#' @param y population observations
#' @param model_config a list containinng parameters, features, and network structure
#' @export
#' @return agent_states
sir_xx_initialize <- function(y,model_config){
num_observations <- length(y);
xx <- matrix(0, nrow = model_config$N, ncol = num_observations);
## sample it - yt for every time
it <- rpois(n = num_observations, lambda = 0.5 * model_config$N * (1 - model_config$rho)) + y;
## make sure 1 <= it <= yt
it <- pmin(it, N);
it <- pmax(it, 1);
## make sure there are it infections at every t
for (t in 1:num_observations){
xx[sample.int(n = N, size = it[t]) , t] <- 1;
}
## fill the agent states such that it looks like 0000111112222
for (n in 1 : model_config$N){
infected_times <- which(xx[n,] == 1);
if (length(infected_times)){
start <- min(infected_times);
end <- max(infected_times);
xx[n,c(start:end)]<- 1 ;
if (end < num_observations) xx[n,(end + 1):(num_observations)]<- 2;
}
}
return(xx)
}
|
54b9b264c53952c191effde604720958fd3d7fbb
|
1689120410245895c81e873c902c3b889198be70
|
/man/chk_clm_rdr.Rd
|
55f6705fa20365bfa910e7f4e2c3e92fa518e813
|
[] |
no_license
|
seokhoonj/underwriter
|
9d4f0ff7ae35b7c9fdc529fc44a92d841654148b
|
347bbe69cb54136789d69abaf329962c46973ac1
|
refs/heads/master
| 2023-03-26T21:58:57.729018
| 2021-03-26T06:00:14
| 2021-03-26T06:00:14
| 293,969,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 512
|
rd
|
chk_clm_rdr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chk_clm_rdr.R
\name{chk_clm_rdr}
\alias{chk_clm_rdr}
\title{Create claim rider vector}
\usage{
chk_clm_rdr(rider, code, target)
}
\arguments{
\item{rider}{is a rider vector}
\item{code}{is a kcd code regular expression vector}
\item{target}{is a claim kcd code vector}
}
\description{
you can create the claim rider vector (from the claim boolean matrix included in the function).
}
\keyword{amount}
\keyword{claim}
\keyword{rider}
|
425ed7055652bc3286cd70ffad9020c3d7170caa
|
6fc4c3537514c05034823015f386a92887a5a8b5
|
/R/F.GEV.R
|
4c89fddb327522b888edb50436488b395bb20404
|
[] |
no_license
|
cran/PRSim
|
956adacd883a34cc94d94f0a846e481cc4b4ff96
|
56a2625a8a79f510a54ff6a87935ee9362607116
|
refs/heads/master
| 2023-06-22T01:23:53.243445
| 2023-06-13T13:40:05
| 2023-06-13T13:40:05
| 236,875,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
F.GEV.R
|
F.GEV <- function (x, xi, alfa, k)
{
if (k == 0) {
y <- (x - xi)/alfa
}
else {
y <- -k^(-1) * log(1 - k * (x - xi)/alfa)
}
F <- exp(-exp(-y))
return(F)
}
|
7e0ea8142172d6f7970e194a1e71b318bdc8acf9
|
83bd4ab4313515d2aefd13d96701267e9efc1018
|
/A4_Q2.R
|
183c157dd34932273addcc22ccb76e839f487d00
|
[] |
no_license
|
AkshayGovindaraj/Computational-Statistics
|
3a1e7f3bf323c3915e526d17b153d89e54300a7d
|
c73a71e2bbc474c18b9393e86cec4bccc811b67b
|
refs/heads/master
| 2020-04-17T05:21:39.182246
| 2019-09-17T20:03:04
| 2019-09-17T20:03:04
| 166,274,997
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 171
|
r
|
A4_Q2.R
|
#Facial Recognition - Computational Statistics
library(imager)
filename <- 'yalefaces/subject14.gif'
file <- system.file(filename,package='imager')
im <- load.image(file)
|
880e733c69e670c4c9860df59e615d3873ec262b
|
66f658595b4fd87c0c58486c389c2260605d2b71
|
/man/Ar1_sd.Rd
|
3e497346abe39ea783347dddae5611b5ab2788be
|
[
"MIT"
] |
permissive
|
cbuelo/tvsews
|
2f05ad1fd3f6b6eada639d2c43d4c12e68ba3906
|
8d7a0fbbee3d8033b42d1e50b629c72af9571c6c
|
refs/heads/master
| 2023-04-09T18:05:16.062047
| 2022-01-18T23:00:19
| 2022-01-18T23:00:19
| 332,109,603
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 563
|
rd
|
Ar1_sd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rolling_window.R
\name{Ar1_sd}
\alias{Ar1_sd}
\title{SD of lag-1 autocorrelation with detrending}
\usage{
Ar1_sd(x, detrend = FALSE, prop_req = 0.99)
}
\arguments{
\item{x}{numeric}
\item{detrend}{TRUE or FALSE, default = FALSE}
\item{prop_req}{numeric value between 0 and 1, default = 0.99}
}
\value{
numeric
}
\description{
SD of lag-1 autocorrelation with detrending
}
\examples{
y <- rnorm(10)
Ar1_sd(y)
Ar1_sd(y, detrend = TRUE)
y[1] <- NA
Ar1_sd(y)
Ar1_sd(y, prop_req = 0.5)
}
|
4dacdd4998d194a1bb2935b2961a24caf80b3226
|
85c526147ee8eb8976be4429cc719deb8077209c
|
/run_analysis.R
|
5cba664f813d550209d086034d86ddf017a56156
|
[] |
no_license
|
jjvornov/DataCleaning
|
b09b56d141661831fa6b2a28b2a0a92eea61f002
|
ec0805747efadb1ca62626017fc5a9373dd9594f
|
refs/heads/master
| 2020-06-05T18:36:34.396223
| 2015-05-20T16:30:38
| 2015-05-20T16:30:38
| 35,955,395
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,077
|
r
|
run_analysis.R
|
##First read the Samsung data in the working directory
##subject_test are the subject ids, activity the activity ids,
##features are the actual datapoints for subjects and activities
subject_test<-read.table("test/subject_test.txt")
activity_test<-read.table("test/y_test.txt")
features_test<-read.table("test/x_test.txt")
subject_train<-read.table("train/subject_train.txt")
activity_train<-read.table("train/y_train.txt")
features_train<-read.table("train/x_train.txt")
##combine test and train
subject<-rbind(subject_test,subject_train)
activity<-rbind(activity_test,activity_train)
features<-rbind(features_test,features_train)
##get the names of the columns read in. Read as characters since will be variable names
featureNames<- read.table("features.txt",stringsAsFactors=FALSE)
activityLabels<-read.table("activity_labels.txt",stringsAsFactors=FALSE)
names(activityLabels)<-c("ActivityCode","Activity")
##extract the name column and apply to the columns for the features
names(features)<-featureNames[[2]]
names(activity)<-"ActivityCode"
names(subject)<-"Subject"
#replace activity codes with labels. dplyr's join preserves order
library(dplyr)
activity2<-join(activity,activityLabels)
##add the subject and activity with cbind
Data<-cbind(subject,activity2[,2],features)
names(Data)[2]<-"Activity"
##Now select the columns with mean and std variables
##vector of the names containing "mean" with grep
##names(Data)[grep("mean",names(Data))]
##pull out all data columns first mean, then std, combine
select<-Data[,c(1,2,grep("mean",names(Data)))]
select2<-Data[,grep("std",names(Data))]
DataSelect<- cbind(select,select2)
##now create tidy data set of means of the variables by Subject and Activity
##aggregate is the easy way to aggregate as means by the two columns
##need to avoid aggregating first two columns which are the factors
means<-aggregate(DataSelect [,3:(ncol(DataSelect))],
list(Subject =DataSelect$Subject,Activity = DataSelect$Activity),
mean, na.action=na.omit)
write.table(means,"tidy.txt",row.name=FALSE)
|
4f5bd3ce26345952bfca0679791aa44547f2344c
|
53a9ea36ab32e5768f0c6e1f4c4f0135c1b65e46
|
/salmon_merge_table.R
|
b6ae26091c9e577358555d3e63b076c7c257b917
|
[] |
no_license
|
barrantesisrael/Dual_RNAseq_review_analysis
|
220bea938ddc154f9ad8fa6243bacfc678f194d1
|
468fa5e4f1fecdd2f5bd8f084d59bba2a45ff798
|
refs/heads/main
| 2023-03-28T11:23:38.473519
| 2021-03-30T10:06:58
| 2021-03-30T10:06:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,765
|
r
|
salmon_merge_table.R
|
#!/salmon_merge_table.R
#The script merges the Salmon reference-free alignments outputs into one file for both gene and transcript level.
#The script has 2 inputs:
#1)fileDirectory -> list of directories where the salmon's outputs are saved.
#2)gene_tx -> path to a file which has atleast two columns called "GeneID" and "TranscriptID".
#The script outputs two txt files for each directory
#1)"salmon_gene_count.txt" -> merges all the gene count
#2)"salmon_isoform_count.txt" -> merges all the transcripts count
#Load Library
library(tidyverse) #Collection of R packages designed for data science: ggplot2, dplyr, tidyr, readr, purrr, tibble, stringr, forcats.
library(tximport) #Import and summarize transcript-level estimates for transcript- and gene-level analysis
rm(list=ls(all=TRUE))
##### ##### ##### User's Input ##### ##### #####
fileDirectory <- c("Hsapiens_Spneumoniae_SingleStrand_SE/4_salmon") #Directory where the Salmon results are saved (DO NOT add the last "/")
gene_tx <- "Hsapiens_Spneumoniae_SingleStrand_SE/metadata/Hsapiens_Spneumoniae_tx_gene_rel.txt" #TranscriptID - GeneID - GTF - CDNA file's path
##################################################
##### Get Polyester Output Data
gene_tx_df <- read.csv2(gene_tx, sep="\t", header = TRUE)
tx2tx <- gene_tx_df[,c(1,1)] #Get count for transcripts
tx2gene <- gene_tx_df[,c(1,2)] #Get count for Genes
colnames(tx2tx) <- c("TXNAME", "GENEID")
colnames(tx2gene) <- c("TXNAME", "GENEID")
#Loop through each directory
for (iDir in fileDirectory)
{
#Get all the sample names
files <- list.dirs(iDir, recursive=FALSE)
files <- paste0(files, "/quant.sf")
dirNames <- str_split(list.dirs(iDir, recursive=FALSE), pattern = "/")
dirNames <- unlist (lapply(dirNames, function(x) x[length(x)]))
names(files) <- dirNames
#Remove the transcript version from the transcript ID column from the abundance.tsv files
for (iTxt in files)
{
iTxT_df <- read.csv2(iTxt, sep="\t", header = TRUE)
iTxT_df$Name <- sapply (str_split(iTxT_df$Name, "\\."), `[`, 1)
write.table(iTxT_df, file = unname(iTxt), row.names=F, sep="\t", quote=F)
}
#Get Gene Count Table
txi_salmon <- tximport(files, type = "salmon", tx2gene = tx2gene)
count <- as.data.frame(txi_salmon$counts) %>% rownames_to_column("ID")
write.table(count, file = sprintf("%s/salmon_gene_count.txt", iDir), row.names=F, sep="\t", quote=F)
##### Get Transcript Count Table
txi_salmon <- tximport(files, type = "salmon", tx2gene = tx2tx)
count <- as.data.frame(txi_salmon$counts) %>% rownames_to_column("ID")
count$ID <- str_replace (count$ID, "\\..*", "")
write.table(count, file = sprintf("%s/salmon_isoform_count.txt", iDir), row.names=F, sep="\t", quote=F)
}
|
c72fa58c6251d355dd2ea5652dacdd8fc80db12d
|
fbc824546d61ae83ff70b714b7540c27fa215970
|
/WGCNA.R
|
490b9108549bb378481baf041716793b442dc766
|
[
"MIT"
] |
permissive
|
ben-laufer/cffDNA-and-Brain-Manuscript
|
d997dc95d923a904160abd7a6752caeb8d0ee0a8
|
2fffac8d10b707fab8ae80d53c8522793d10c4b9
|
refs/heads/main
| 2023-04-17T01:56:31.093401
| 2021-08-31T19:50:22
| 2021-08-31T19:50:22
| 386,458,928
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 46,623
|
r
|
WGCNA.R
|
# WGCNA for DMRichR results
# Modified from: https://horvath.genetics.ucla.edu/html/CoexpressionNetwork/Rpackages/WGCNA/Tutorials/index.html
# Ben Laufer
rm(list=ls())
options(scipen=999)
cat("\n[DM.R] Loading packages \t\t\t", format(Sys.time(), "%d-%m-%Y %X"), "\n")
.libPaths("/share/lasallelab/programs/DMRichR/R_4.0")
packages <- c("DMRichR", "sva", "org.Mmu.eg.db",
"matrixStats","Hmisc","splines","foreach","doParallel","fastcluster",
"AnnotationHub", "ensembldb", "org.Mmu.eg.db", "plyranges",
"dynamicTreeCut","survival","GO.db","preprocessCore","impute", "WGCNA")
stopifnot(suppressMessages(sapply(packages, require, character.only = TRUE)))
AnnotationHub::setAnnotationHubOption("CACHE", "/share/lasallelab/programs/DMRichR/R_4.0")
rm(packages)
# Prepare data ------------------------------------------------------------
setwd("/share/lasallelab/Ben/Obesity/meta")
## Load DMRs --------------------------------------------------------------
loadDMRs <- function(source, contrast){
load(glue::glue("../{source}/DMRs/{contrast}/RData/DMRs.RData"))
assign(contrast, sigRegions, envir = .GlobalEnv)
}
cffDNA <- c("GD45_OvC", "GD90_OvC", "GD120_OvC", "GD150_OvC",
"GD45_RvC", "GD90_RvO", "GD120_RvO", "GD150_RvO",
"GD45_PvO", "GD90_PvO", "GD120_PvO", "GD150_PvO",
"GD45_RvC", "GD90_RvC", "GD120_RvC", "GD150_RvC")
purrr::walk(cffDNA,
loadDMRs,
source = "cffDNA")
brains <- c("Hippocampus_OvC", "Hypothalamus_OvC", "PrefrontalCortex_OvC",
"Hippocampus_RvO", "Hypothalamus_RvO", "PrefrontalCortex_RvO",
"Hippocampus_PvO", "Hypothalamus_PvO", "PrefrontalCortex_PvO",
"Hippocampus_RvC", "Hypothalamus_RvC", "PrefrontalCortex_RvC")
purrr::walk(brains,
loadDMRs,
source = "brains")
## Make consensus regions -------------------------------------------------
getConsensus <- function(contrasts){
contrasts %>%
purrr::map_dfr(function(dmrs){
get(dmrs) %>%
dplyr::as_tibble()
}) %>%
plyranges::as_granges() %>%
GenomicRanges::sort() %>%
plyranges::reduce_ranges()
}
purrr::walk(c("cffDNA", "brains"), function(tissue){
get(tissue) %>%
getConsensus() %>%
assign(glue::glue("{tissue}_consensus"), ., envir = .GlobalEnv)
})
## Smooth hippocampal methylomes ------------------------------------------
system("cp ../cytosine_reports/*.gz ./")
purrr::walk(c("Hippocampus"), function(Region,
testCovariate = "Group",
adjustCovariate = NULL,
matchCovariate = NULL,
coverage = 1,
cores = 10,
perGroup = 0.75,
genome = "rheMac10"){
cat("\n[DMRichR] Loading Bismark genome-wide cytosine reports \t\t", format(Sys.time(), "%d-%m-%Y %X"), "\n")
start_time <- Sys.time()
meta <- openxlsx::read.xlsx("sample_info_master.xlsx", colNames = TRUE) %>%
dplyr::mutate_if(is.character, as.factor) %>%
dplyr::filter(Region == !!Region)
bs.filtered <- DMRichR::processBismark(files = list.files(path = getwd(), pattern = "*.txt.gz"),
meta = meta,
testCovariate = testCovariate,
adjustCovariate = adjustCovariate,
matchCovariate = matchCovariate,
coverage = coverage,
cores = cores,
perGroup = perGroup
)
glue::glue("Saving Rdata...")
save(bs.filtered, file = glue::glue("{Region}_bismark.RData"))
glue::glue("Filtering timing...")
end_time <- Sys.time()
end_time - start_time
cat("\n[DMRichR] Smoothing individual methylation values \t\t", format(Sys.time(), "%d-%m-%Y %X"), "\n")
start_time <- Sys.time()
bs.filtered.bsseq <- bsseq::BSmooth(bs.filtered,
BPPARAM = MulticoreParam(workers = cores,
progressbar = TRUE)
)
bs.filtered.bsseq
glue::glue("Saving Rdata...")
save(bs.filtered.bsseq, file = glue::glue("{Region}_bsseq.RData"))
glue::glue("Individual smoothing timing...")
end_time <- Sys.time()
end_time - start_time
})
system("rm *.txt.gz")
## Extract methylation values ---------------------------------------------
extractMethyl <- function(dataset, consensus){
load(glue::glue("{dataset}_bsseq.RData"))
smoothed <- bs.filtered.bsseq %>%
bsseq::getMeth(BSseq = .,
regions = consensus,
type = "smooth",
what = "perRegion") %>%
as.data.frame(check.names = FALSE) %>%
dplyr::bind_cols(as.data.frame(consensus), .)
assign(glue::glue("{dataset}_meth"), smoothed, envir = .GlobalEnv)
save(smoothed, file = glue::glue("WGCNA/{dataset}/meth_consensus.RData"))
}
dir.create("/share/lasallelab/Ben/Obesity/meta/WGCNA/Hippocampus", recursive = TRUE)
consensus <- c(brains_consensus, cffDNA_consensus) %>%
plyranges::reduce_ranges()
extractMethyl(dataset = "Hippocampus",
consensus = consensus)
# Start WGCNA -------------------------------------------------------------
options(stringsAsFactors = FALSE)
enableWGCNAThreads(60)
setwd("/share/lasallelab/Ben/Obesity/meta/WGCNA/Hippocampus")
load("meth_consensus.RData")
## WGCNA 1: Data input and cleaning ----
smoothed <- Hippocampus_meth
names(smoothed) <- smoothed %>%
names() %>%
gsub("\\-.*","",.)
WGCNA_data0 <- as.data.frame(t(smoothed[, c(6:length(smoothed))]))
names(WGCNA_data0) <- paste(smoothed$seqnames,":", smoothed$start, "-", smoothed$end, sep = "")
rownames(WGCNA_data0) <- names(smoothed[, c(6:length(smoothed))])
# Check for missing values and outliers
WGCNA_gsg <- goodSamplesGenes(WGCNA_data0, verbose = 3)
WGCNA_gsg$allOK
# Remove any offending regions and/or samples
if (!WGCNA_gsg$allOK)
{
# Optionally, print the gene and sample names that were removed:
if (sum(!WGCNA_gsg$goodGenes)>0)
printFlush(paste("Removing genes:", paste(names(WGCNA_data0)[!WGCNA_gsg$goodGenes], collapse = ", ")));
if (sum(!WGCNA_gsg$goodSamples)>0)
printFlush(paste("Removing samples:", paste(rownames(WGCNA_data0)[!WGCNA_gsg$goodSamples], collapse = ", ")));
# Remove the offending genes and samples from the data:
WGCNA_data0 = WGCNA_data0[WGCNA_gsg$goodSamples, WGCNA_gsg$goodGenes]
}
# Cluster samples to check for outliers
sampleTree = hclust(dist(WGCNA_data0), method = "average");
# Plot the sample tree: Open a graphic output window of size 12 by 9 inches
# The user should change the dimensions if the window is too large or too small.
#sizeGrWindow(12,9)
pdf(file = "sampleClustering.pdf", width = 12, height = 9);
par(cex = 0.6);
par(mar = c(0,4,2,0))
plot(sampleTree, main = "Sample clustering to detect outliers", sub="", xlab="", cex.lab = 1.5,
cex.axis = 1.5, cex.main = 2)
## Manually remove outliers based on visualization (Change for each analysis)
# Plot a line to show what the cut would exclude
abline(h = 34, col = "red");
# Determine cluster under the line (use same value as line to cut)
clust = cutreeStatic(sampleTree, cutHeight = 34, minSize = 10)
table(clust)
# clust 1 contains the samples we want to keep, so remove outliers here (clust problem! minsize?)
keepSamples = (clust==1)
WGCNA_data = WGCNA_data0[keepSamples, ]
nGenes = ncol(WGCNA_data)
nSamples = nrow(WGCNA_data)
# Load and clean traits
sampleInfo <- readxl::read_xlsx("../sample_info_brain_master.xlsx") %>%
dplyr::filter(Region == "Hippocampus") %>%
dplyr::select(InfantID, Group, Obese, C_section, Foster, Cohort) %>%
dplyr::mutate("Obese Only" = dplyr::case_when(Group == "Obese" ~ 1,
TRUE ~ 0)) %>%
dplyr::mutate(Control = dplyr::case_when(Group == "Control" ~ 1,
TRUE ~ 0)) %>%
dplyr::mutate(Obese = dplyr::case_when(Obese == "Obese" ~ 1,
Obese == "Control" ~ 0)) %>%
dplyr::mutate(C_section = dplyr::case_when(C_section == "Yes" ~ 1,
C_section == "No" ~ 0)) %>%
dplyr::mutate(Foster = dplyr::case_when(Foster== "Yes" ~ 1,
Foster == "No" ~ 0)) %>%
dplyr::mutate(Intervention = dplyr::case_when(Group == "Restriction" ~ 1,
Group == "Pravastatin" ~ 1,
TRUE ~ 0)) %>%
dplyr::mutate(Restriction = dplyr::case_when(Group == "Restriction" ~ 1,
TRUE ~ 0)) %>%
dplyr::mutate(Pravastatin = dplyr::case_when(Group == "Pravastatin" ~ 1,
TRUE ~ 0)) %>%
dplyr::mutate(Year_1 = dplyr::case_when(Cohort == "YEAR 1" ~ 1,
TRUE ~ 0)) %>%
dplyr::mutate(Year_2 = dplyr::case_when(Cohort == "YEAR 2" ~ 1,
TRUE ~ 0)) %>%
#dplyr::select(-Group, -Cohort) %>%
dplyr::select(InfantID, Obese = "Obese Only", Control, Restriction, Pravastatin)
traits <- readxl::read_xlsx("../OBESITY Nov Pref.xlsx") %>%
dplyr::select(-COHORT,-"Group Assignment", -"contol obese/normal") %>%
dplyr::rename(InfantID = INFANT_ID) %>%
dplyr::mutate(InfantID = as.character(InfantID)) %>%
dplyr::select(InfantID,
"Recognition Memory: Abstract Stimuli" = "no. looks N/N+F",
"Recognition Memory: Social Stimuli" = NOVPA)
meta <- sampleInfo %>%
dplyr::inner_join(traits, by = "InfantID") %>%
as.data.frame()
str(meta)
WGCNA_samples <- rownames(WGCNA_data)
traitRows <- match(WGCNA_samples, meta$InfantID)
datTraits <- meta[traitRows, -1]
rownames(datTraits) <- meta[traitRows, 1]
collectGarbage()
# Re-cluster samples
sampleTree2 = hclust(dist(WGCNA_data), method = "average")
# Convert traits to a color representation: white means low, red means high, grey means missing entry
traitColors = numbers2colors(datTraits, signed = TRUE);
# Plot the sample dendrogram and the colors underneath.
plotDendroAndColors(sampleTree2, traitColors,
groupLabels = names(datTraits),
main = "Sample dendrogram and trait heatmap")
dev.off()
save(WGCNA_data, datTraits, file = "WGCNA_1.RData")
#load("WGCNA_1.RData")
## WGCNA 2: Automatic, one-step network construction and module detection ----
# Choose a set of soft-thresholding powers
powers = c(c(1:10), seq(from = 12, to = 30, by = 2))
# Call the network topology analysis function
sft = pickSoftThreshold(WGCNA_data,
powerVector = powers,
networkType = "signed",
corFnc = "bicor", # "pearson"
corOptions = list(maxPOutliers = 0.1), # list(use = 'p')
verbose = 5)
# Text way to assign (modified from SVA network paper)
if(!is.na(sft$powerEstimate)){
print(paste("Soft power should be", sft$powerEstimate))
wgcna_power <- sft$powerEstimate
}else if(is.na(sft$powerEstimate)){
print(paste("no power reached r-squared cut-off, assing power based on number of samples"))
wgcna_power <- 16
print(paste("Soft power should be", wgcna_power))
}
# Plot the results:
pdf("soft_thresholding_power.pdf", height = 5, width =9)
#sizeGrWindow(9, 5)
par(mfrow = c(1,2));
cex1 = 0.9;
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n",
main = paste("Scale independence"));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red");
# this line corresponds to using an R^2 cut-off of h
abline(h=0.85,col="red")
# Mean connectivity as a function of the soft-thresholding power
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity"))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
dev.off()
save(sft, wgcna_power, file = "WGCNA_sft.RData")
# load("WGCNA_sft.RData")
# Construct network and detect modules
# Use identified soft threshold (power) above for power below or if no obvious plateau use Horvath's pre-caluclated (topic 6 from link below)
# https://labs.genetics.ucla.edu/horvath/CoexpressionNetwork/Rpackages/WGCNA/faq.html
net = blockwiseModules(WGCNA_data,
power = wgcna_power, # Change based on results of wgcna_power, 24
networkType = "signed", # signed is recommended for methylation
TOMType = "signed",
corType = "bicor", # "bicor" is more powerful than "pearson", but also needs to be selected in soft thresholding (https://www.ncbi.nlm.nih.gov/pubmed/23217028)
maxPOutliers = 0.1, # Forces bicor to never regard more than the specified proportion of samples as outliers (https://horvath.genetics.ucla.edu/html/CoexpressionNetwork/Rpackages/WGCNA/faq.html)
minModuleSize = min(20, ncol(WGCNA_data)/2), # Functions default is used here
numericLabels = TRUE,
pamRespectsDendro = FALSE,
saveTOMs = TRUE,
saveTOMFileBase = "regionTOM",
loadTOM = TRUE,
blocks = NULL,
maxBlockSize = length(WGCNA_data), # split calculations into blocks or don't using = length(WGCNA_data), limit is = sqrt(2^31)
nThreads = 60,
verbose = 3)
# open a graphics window
#sizeGrWindow(12, 9)
pdf("module_dendogram.pdf", height = 5, width = 9)
# Convert labels to colors for plotting
mergedColors = labels2colors(net$colors)
# Plot the dendrogram and the module colors underneath
plotDendroAndColors(net$dendrograms[[1]], mergedColors[net$blockGenes[[1]]],
"Module colors",
dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05)
dev.off()
moduleLabels = net$colors
moduleColors = labels2colors(net$colors)
MEs = net$MEs;
geneTree = net$dendrograms[[1]];
save(MEs, moduleLabels, moduleColors, geneTree,
file = "WGCNA-networkConstruction-auto.RData")
## WGCNA 3: Relating modules to phenotypes and identifying important regions ----
# load("WGCNA_1.RData")
# load("WGCNA-networkConstruction-auto.RData")
# Quantify module-trait associations
# Define numbers of genes and samples
nGenes = ncol(WGCNA_data)
nSamples = nrow(WGCNA_data)
# Recalculate MEs with color labels
MEs0 = moduleEigengenes(WGCNA_data, moduleColors)$eigengenes
MEs = orderMEs(MEs0)
# Remove Grey
MEs <- MEs %>%
dplyr::select(-MEgrey)
save(datTraits, file = "daTraits.RData") # for mir663 analysis
save(MEs, datTraits, file = "hippocampus_MEs.RData")
### Infant hippocampus ----------------------------------------------------
load("hippocampus_MEs.RData")
datTraits <- datTraits %>%
tibble::rownames_to_column(var = "InfantID") %>%
dplyr::left_join(readxl::read_xlsx("hippocampusLipids.xlsx") %>%
dplyr::mutate(InfantID = as.character(InfantID)) %>%
dplyr::select(InfantID,
"C16:0 Palmitic Acid/Hexadecanoic Acid",
"C18:0 Stearic Acid/Octadecanoic Acid",
"C18:2 n-6 Linoleic Acid",
"C20:3 Homo-y-Linolenic Acid/8,11,14-Eicosatrienoic Acid" = "C20:3 Homo-gamma-linolenic acid/8,11,14-eicosatrienoic acid",
"C20:4 n-6 Arachidonic Acid" = "C20:4 n-6 Arachidonic acid" ,
"C22:6/C24:1 Docosahexaenoic Acid (DHA)" = "C22:6 (DHA)/C24:1 Docosahexaenoic Acid (DHA)"
)) %>%
dplyr::left_join(readr::read_csv("Infant_hippocampus_conc_adj.csv") %>%
dplyr::left_join(readr::read_csv("Infant_brain_meta.csv")) %>%
dplyr::select(Exp_ID, InfantID = Animal_ID,
"b-Hydroxybutyric Acid" = `3_Hydroxybutyrate`,
Asparagine, Citrate, Glutathione, Glycerol, Guanosine, UMP) %>%
dplyr::mutate(InfantID = as.character(InfantID)) %>%
dplyr::select(-Exp_ID)) %>%
tibble::column_to_rownames(var = "InfantID")
moduleTraitCor = cor(MEs, datTraits, use = "p")
moduleTraitPvalue = corPvalueStudent(moduleTraitCor, 25)
#### Heatmap --------------------------------------------------------------
pdf("hippocampus_tidy_module_trait_correlations.pdf", height = 4, width = 15)
textMatrix <- paste(signif(moduleTraitCor, 2), "\n(",
signif(moduleTraitPvalue, 1), ")", sep = "")
dim(textMatrix) <- dim(moduleTraitCor)
par(mar = c(6, 8.5, 3, 3))
labeledHeatmap(Matrix = moduleTraitCor,
xLabels = names(datTraits) %>%
stringr::str_wrap(25),
yLabels = names(MEs),
ySymbols = names(MEs),
colorLabels = FALSE,
colors = blueWhiteRed(50),
textMatrix = textMatrix,
setStdMargins = FALSE,
cex.text = 1,
zlim = c(-1,1),
main = paste("Infant Hippocampus Module-trait Relationships"),
verticalSeparator.x = c(4,6, 12),
horizontalSeparator.y = c(2,3))
dev.off()
### Blue Module Plots ----------------------------------------------------
data <- MEs %>%
tibble::rownames_to_column("ID") %>%
dplyr::left_join(datTraits %>%
tibble::rownames_to_column("ID")) %>%
dplyr::mutate(Group = dplyr::case_when(Obese == 1 ~ "Obese",
Control == 1 ~ "Control",
Restriction == 1 ~ "Restriction",
Pravastatin == 1 ~ "Pravastatin")) %>%
dplyr::mutate(Group = factor(Group, levels = c("Obese", "Pravastatin", "Restriction", "Control"))) %>%
dplyr::as_tibble()
#### Dot plot -------------------------------------------------------------
# https://github.com/cemordaunt/comethyl/blob/0b2b0a124d5e6c118aa25431e3d7bf58ccfcea9a/R/Explore_Module_Trait_Correlations.R#L109
# blueModule <- data %>%
# ggplot(aes(x = Group, y = MEblue, group = Group, color = Group)) +
# geom_boxplot() +
# geom_dotplot(binaxis = 'y', stackdir = 'center', dotsize = -.75, aes(fill = Group)) +
# ggplot2::theme_classic() +
# ggplot2::theme(text = element_text(size = 10),
# axis.title = element_text(size = 10),
# axis.title.x = element_blank(),
# legend.position = "none") +
# ylab("Eigengene") +
# scale_color_manual(values=c("firebrick3", "darkgoldenrod1", "forestgreen", "mediumblue")) +
# scale_fill_manual(values=c("firebrick3", "darkgoldenrod1", "forestgreen", "mediumblue"))
blueModule <- data %>%
ggplot2::ggplot(aes(x = Group,
y = MEblue,
fill = Group,
group = Group,)) +
ggplot2::geom_bar(stat = "summary",
position = position_dodge(),
color = "Black",
size = 0.25) +
ggplot2::theme_classic() +
ggplot2::theme(text = element_text(size = 8),
axis.title = element_text(size = 8),
axis.title.x = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1, size = 8),
legend.position = "none") +
scale_fill_manual(values = c("firebrick3", "darkgoldenrod1", "forestgreen", "mediumblue")) +
ylab("Eigengene")
#### Scatter plot ---------------------------------------------------------
# Modified from: https://github.com/cemordaunt/comethyl/blob/0b2b0a124d5e6c118aa25431e3d7bf58ccfcea9a/R/Explore_Module_Trait_Correlations.R#L151
smoothWGCNA <- function(data = data,
trait = trait){
data %>%
ggplot(ggplot2::aes_string(x = rlang::as_name(trait), y = "MEblue")) + # color = "Group"
geom_smooth(method = MASS::rlm,
formula = y ~ x,
color = "#56B1F7",
fill = "#336A98",
se = FALSE) +
geom_point(size = 1.5) +
ggplot2::theme_classic() +
ggplot2::theme(text = element_text(size = 8),
axis.title = element_text(size = 8),
axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "none") +
#scale_color_manual(values=c("Obese" = "firebrick3", "Pravastatin" = "darkgoldenrod1", "Restriction" = "forestgreen", "Control" ="mediumblue")) +
ylab("Eigengene") %>%
return()
}
#### Combine --------------------------------------------------------------
cowplot::plot_grid(blueModule,
smoothWGCNA(data, "`Recognition Memory: Abstract Stimuli`"),
smoothWGCNA(data, "`Recognition Memory: Social Stimuli`"),
smoothWGCNA(data, "`C18:2 n-6 Linoleic Acid`"),
smoothWGCNA(data, "Asparagine"),
smoothWGCNA(data, "Citrate"),
nrow = 1,
align = "h",
labels = c("B)", "C)", "D)", "E)", "F)", "G)"),
label_size = 12,
scale = 0.9) %>%
ggplot2::ggsave("blueModule.pdf",
plot = .,
width = 12,
height = 2)
### Maternal Serum --------------------------------------------------------
getModule <- function(GD,MEs){
print(glue::glue("Loading {GD}"))
tibble::tibble(InfantID = rownames(MEs)) %>%
dplyr::left_join(readRDS("chr9_all_DUX4_Block_cffDNA_individual_smoothed_methylation.rds") %>% # "chr9_blue_dmrs_whole_regioncffDNA_individual_smoothed_methylation.rds"
dplyr::mutate(Timepoint = dplyr::case_when(Timepoint == "GD45" ~ "GD040",
Timepoint == "GD90" ~ "GD090",
TRUE ~ as.character(Timepoint))) %>%
dplyr::filter(Timepoint == !!GD) %>%
dplyr::select(InfantID = Infant, Timepoint, "DUX4 Block", "DUX4 Region 1",
"DUX4 Region 3", "DUX4 Region 9", "DUX4 Region 19", "DUX4 Region 20") %>%
dplyr::mutate(InfantID = as.character(InfantID)) %>%
tidyr::pivot_wider(names_from = Timepoint)) %>%
dplyr::left_join(readxl::read_xlsx("IDs.xlsx") %>%
dplyr::mutate(InfantID = as.character(InfantID))) %>%
dplyr::left_join(readr::read_csv("Maternal_plasma_meta.csv") %>%
dplyr::left_join(readr::read_csv("Maternal_plasma_conc_adj.csv"), by = "Exp") %>%
dplyr::filter(Plasma_color == "normal") %>%
dplyr::filter(GD_targeted == !!GD) %>%
dplyr::select(Exp, MotherID = Animal_ID,
"Alkaline Phosphatase" = Alk_Phos,
Triglyceride,
Hematocrit,
"Lymphocytes" = Lymphocytes_ul,
"White Blood Cells" = WBC,
"MCP-1 (CCL2)" = MCP_1,
"IFN-g" = IFN_g,
"IL-1RA" = IL_ra,
"IL-2" = IL_2,
"IL-8" = IL_8,
"IL-10" = IL_10,
"IL-13" = IL_13,
"sCD40L" = sCD40L,
"TGF-a" = TGFa,
"hs-CRP" = hsCRP,
"a-Ketoglutaric acid" = `2_Oxoglutarate`,
"b-Hydroxybutyric acid" = `3_Hydroxybutyrate`,
Arginine,
"Acetoacetate" = Acetoacetate,
"Choline" = Choline,
Creatine,
Glycine,
Glutamine,
"Myo-inositol" = myo_Inositol,
Pyruvate,
Succinate,
Uridine)) %>%
dplyr::select(-Exp) %>%
dplyr::select(-MotherID) %>%
dplyr::mutate(InfantID = as.character(InfantID)) %>%
tibble::column_to_rownames(var = "InfantID") %>%
as.matrix() %>%
WGCNA::cor(MEs %>%
dplyr::select(!!GD := MEblue),
.,
use = "p")
}
load("hippocampus_MEs.RData")
moduleTraitCor <- purrr::map(c("GD040", "GD090", "GD120", "GD150"),
getModule,
MEs = MEs) %>%
do.call(rbind, .) %>%
magrittr::set_rownames(c("Trimester 1",
"Trimester 2",
"Early Trimester 3",
"Late Trimester 3"))
moduleTraitPvalue <- corPvalueStudent(moduleTraitCor, 25) %>%
t()
moduleTraitCor <- moduleTraitCor %>%
t()
#### Heatmap --------------------------------------------------------------
pdf("hippocampus_tidy_module_trait_correlations_maternal_plasma_select_blue.pdf", height = 15.5, width = 6) #50
textMatrix <- paste(signif(moduleTraitCor, 2), "\n(",
signif(moduleTraitPvalue, 1), ")", sep = "")
dim(textMatrix) <- dim(moduleTraitCor)
par(mar = c(6, 8.5, 3, 3))
labeledHeatmap(Matrix = moduleTraitCor,
xLabels = colnames(moduleTraitCor) %>%
stringr::str_wrap(25),
yLabels = rownames(moduleTraitCor),
ySymbols = rownames(moduleTraitCor),
colorLabels = FALSE,
colors = blueWhiteRed(50),
textMatrix = textMatrix,
setStdMargins = FALSE,
cex.text = 1,
zlim = c(-1,1),
main = paste("Hippocampus Blue Module-trait Relationships with Maternal Blood") %>%
stringr::str_wrap(40),
horizontalSeparator.y = c(6,11,21))
dev.off()
### Supplementary ---------------------------------------------------------
#### Brain ----------------------------------------------------------------
load("hippocampus_MEs.RData")
datTraits <- datTraits %>%
tibble::rownames_to_column(var = "InfantID") %>%
dplyr::left_join(readxl::read_xlsx("hippocampusLipids.xlsx") %>%
dplyr::mutate(InfantID = as.character(InfantID))) %>%
dplyr::left_join(readr::read_csv("Infant_hippocampus_conc_adj.csv") %>%
dplyr::left_join(readr::read_csv("Infant_brain_meta.csv") %>%
dplyr::select(Exp_ID, InfantID = Animal_ID) %>%
dplyr::mutate(InfantID = as.character(InfantID))) %>%
dplyr::select(-Exp_ID)) %>%
tibble::column_to_rownames(var = "InfantID")
moduleTraitCor = cor(MEs, datTraits, use = "p")
moduleTraitPvalue = corPvalueStudent(moduleTraitCor, 25)
##### Heatmap -------------------------------------------------------------
pdf("hippocampus_tidy_module_trait_correlations_long.pdf", height = 4, width = 50)
textMatrix <- paste(signif(moduleTraitCor, 2), "\n(",
signif(moduleTraitPvalue, 1), ")", sep = "")
dim(textMatrix) <- dim(moduleTraitCor)
par(mar = c(6, 8.5, 3, 3))
labeledHeatmap(Matrix = moduleTraitCor,
xLabels = names(datTraits) %>%
stringr::str_wrap(25),
yLabels = names(MEs),
ySymbols = names(MEs),
colorLabels = FALSE,
colors = blueWhiteRed(50),
textMatrix = textMatrix,
setStdMargins = FALSE,
cex.text = 1,
zlim = c(-1,1),
main = paste("Infant Hippocampus Module-trait Relationships"))
dev.off()
#### Maternal Serum -------------------------------------------------------
getModule <- function(GD,MEs){
print(glue::glue("Loading {GD}"))
tibble::tibble(InfantID = rownames(MEs)) %>%
dplyr::left_join(readRDS("chr9_all_DUX4_Block_cffDNA_individual_smoothed_methylation.rds") %>%
dplyr::mutate(Timepoint = dplyr::case_when(Timepoint == "GD45" ~ "GD040",
Timepoint == "GD90" ~ "GD090",
TRUE ~ as.character(Timepoint))) %>%
dplyr::filter(Timepoint == !!GD) %>%
dplyr::select(InfantID = Infant, Timepoint, contains("DUX4")) %>%
dplyr::mutate(InfantID = as.character(InfantID)) %>%
tidyr::pivot_wider(names_from = Timepoint)) %>%
dplyr::left_join(readxl::read_xlsx("IDs.xlsx") %>%
dplyr::mutate(InfantID = as.character(InfantID))) %>%
dplyr::left_join(readr::read_csv("Maternal_plasma_meta.csv") %>%
dplyr::select(Exp, EPV_b,EPV,WBC,RBC,Hemoglobin,Hematocrit,MCV,MCH,MCHC,Platelets,
Seg_Neutrophils_percent,Seg_Neutrophils_per.ul,Lymphocytes_percent,
Lymphocytes_ul,Monocytes_percent,Monocytes_per.ul,Eosinophils_percent,
Eosinophils_per.ul,Plasma_Protein,Fibrinogen,Sodium,Potassium,
Chloride,TCO2,Anion_Gap,Phosphorous,Calcium,BUN,Total_Protein,
Albumin,ALT,AST,CPK,Alk_Phos,GGT,LDH,Cholesterol,Triglyceride,Bili_Total,
Direct,hsCRP,GM_CSF,IFN_g,IL_1b,IL_ra,IL_2,IL_6,IL_8,IL_10,"IL_12/23_p40",IL_13,IL_15,
IL_17a,MCP_1,MIP_1b,MIP_1a,sCD40L,TGFa,TNFa,VEGF,C_Peptide,GIP,Insulin,
Insulin_uU.mL,Leptin,PP_53,PYY_54,Sx, Plasma_color, GD_targeted, Animal_ID) %>%
dplyr::left_join(readr::read_csv("Maternal_plasma_conc_adj.csv"), by = "Exp") %>%
dplyr::filter(Plasma_color == "normal") %>%
dplyr::filter(GD_targeted == !!GD) %>%
dplyr::rename(MotherID = Animal_ID)) %>%
dplyr::select(-Exp, -Plasma_color, -GD_targeted, -MotherID) %>%
dplyr::mutate(InfantID = as.character(InfantID)) %>%
tibble::column_to_rownames(var = "InfantID") %>%
as.matrix() %>%
WGCNA::cor(MEs %>%
dplyr::select(!!GD := MEblue),
.,
use = "p")
}
load("hippocampus_MEs.RData")
moduleTraitCor <- purrr::map(c("GD040", "GD090", "GD120", "GD150"),
getModule,
MEs = MEs) %>%
do.call(rbind, .) %>%
magrittr::set_rownames(c("Trimester 1",
"Trimester 2",
"Early Trimester 3",
"Late Trimester 3"))
moduleTraitPvalue <- corPvalueStudent(moduleTraitCor, 25) %>%
t()
moduleTraitCor <- moduleTraitCor %>%
t()
##### Heatmap -------------------------------------------------------------
pdf("hippocampus_tidy_module_trait_correlations_maternal_plasma_long_blue.pdf", height = 55, width = 6) #50
textMatrix <- paste(signif(moduleTraitCor, 2), "\n(",
signif(moduleTraitPvalue, 1), ")", sep = "")
dim(textMatrix) <- dim(moduleTraitCor)
par(mar = c(6, 8.5, 3, 3))
labeledHeatmap(Matrix = moduleTraitCor,
xLabels = colnames(moduleTraitCor) %>%
stringr::str_wrap(25),
yLabels = rownames(moduleTraitCor),
ySymbols = rownames(moduleTraitCor),
colorLabels = FALSE,
colors = blueWhiteRed(50),
textMatrix = textMatrix,
setStdMargins = FALSE,
cex.text = 1,
zlim = c(-1,1),
main = paste("Hippocampus Blue Module-trait Relationships with Maternal Blood") %>%
stringr::str_wrap(40))
dev.off()
### DMR significance and module membership ----------------------------------
# DMR significance and module membership
trait <- "Obese"
# Define variable diagnosis containing the diagnosis column of datTrait
trait <- datTraits %>%
dplyr::select(!!trait)
# names (colors) of the modules
modNames = substring(names(MEs), 3)
geneModuleMembership = as.data.frame(cor(WGCNA_data, MEs, use = "p"));
MMPvalue = as.data.frame(corPvalueStudent(as.matrix(geneModuleMembership), nSamples));
names(geneModuleMembership) = paste("MM", modNames, sep="");
names(MMPvalue) = paste("p.MM", modNames, sep="");
geneTraitSignificance = as.data.frame(cor(WGCNA_data, trait, use = "p"));
GSPvalue = as.data.frame(corPvalueStudent(as.matrix(geneTraitSignificance), nSamples));
names(geneTraitSignificance) = paste("GS.", names(trait), sep="");
names(GSPvalue) = paste("p.GS.", names(trait), sep="");
# Intramodular analysis
# Choose module from correlation heatmap
module = "blue"
column = match(module, modNames);
moduleGenes = moduleColors==module;
# sizeGrWindow(7, 7);
# par(mfrow = c(1,1));
pdf(paste0("hippocampus_blue_module_membership_", names(trait), ".pdf"), height = 7, width = 7)
verboseScatterplot(abs(geneModuleMembership[moduleGenes, column]),
abs(geneTraitSignificance[moduleGenes, 1]),
xlab = paste("Module Membership in", module, "module"),
ylab = paste("Gene significance for", names(trait)),
main = paste("Module membership vs. gene significance\n"),
cex.main = 1.2, cex.lab = 1.2, cex.axis = 1.2, col = module)
dev.off()
# Create the starting data frame for saving
geneInfo0 = data.frame(moduleColor = moduleColors,
geneTraitSignificance,
GSPvalue)
# Order modules by their significance for diagnosis
modOrder = order(-abs(cor(MEs, trait, use = "p")));
# Add module membership information in the chosen order
for (mod in 1:ncol(geneModuleMembership))
{
oldNames = names(geneInfo0)
geneInfo0 = data.frame(geneInfo0, geneModuleMembership[, modOrder[mod]],
MMPvalue[, modOrder[mod]]);
names(geneInfo0) = c(oldNames, paste("MM.", modNames[modOrder[mod]], sep=""),
paste("p.MM.", modNames[modOrder[mod]], sep=""))
}
# Order the genes in the geneInfo variable first by module color, then by geneTraitSignificance
geneOrder = order(geneInfo0$moduleColor, -abs(geneInfo0$GS.Obese))
geneInfo = geneInfo0[geneOrder, ]
write.csv(geneInfo, file = "WGCNA_DMR_Info_Hippocampus_Obese.csv")
#' tidyModules
#' @description Extract and tidy regions of methylation from significant WGCNA modules
#' @param sigModules Character vector with names of signficant modules
#' @param geneInfo geneInfo dataframe from WGCNA analysis (end of tutorial 3)
#' @return Genomic ranges object with coordinates for regions of methylation and module names as meta information
#' @export tidyModules
tidyModules <- function(sigModules = sigModules,
geneInfo = geneInfo){
geneInfo %>%
rownames_to_column() %>%
dplyr::as_tibble() %>%
dplyr::filter(moduleColor %in% sigModules) %>%
dplyr::select(rowname, moduleColor) %>%
tidyr::separate(rowname, into = c('seqnames', 'start', 'end')) %>%
makeGRangesFromDataFrame(keep.extra.columns = TRUE, ignore.strand = TRUE) %>%
return()
}
sigModuleRanges <- tidyModules(sigModules = "blue", geneInfo = geneInfo)
#sigModuleRanges <- split(sigModuleRanges, sigModuleRanges$moduleColor)
seqlevelsStyle(sigModuleRanges) <- "UCSC"
backgroundRanges <- makeGRangesFromDataFrame(smoothed)
seqlevelsStyle(backgroundRanges) <- "UCSC"
save(sigModuleRanges,backgroundRanges, file = "Hippocampus_blue_module_ranges.RData")
#### Annotate ---------------------------------------------------------------
annotateRegions <- function(regions = sigRegions,
TxDb = TxDb,
annoDb = annoDb){
print(glue::glue("Annotating {tidyRegions} regions from {tidyGenome} with gene symbols",
tidyRegions = length(regions),
tidyGenome = TxDb %>%
GenomeInfoDb::genome() %>%
unique()))
if(class(TxDb) == "EnsDb"){
seqlevelsStyle(regions) <- "Ensembl" # Work around for organism not supported
TxDb <- TxDb %>%
ensembldb::filter(GeneBiotypeFilter("protein_coding"))
#ensembldb::filter(~ symbol != "NA" & gene_name != "NA" & entrez != "NA" & tx_biotype == "protein_coding")
}
regions %>%
ChIPseeker::annotatePeak(TxDb = TxDb,
annoDb = annoDb,
overlap = "all",
verbose = FALSE
) %>%
dplyr::as_tibble() %>%
dplyr::select("seqnames",
"start",
"end",
"width",
"annotation",
"distanceToTSS",
"SYMBOL",
"GENENAME",
"geneId") %>%
dplyr::rename(geneSymbol = SYMBOL,
gene = GENENAME
) %>%
dplyr::mutate(annotation = gsub(" \\(.*","", annotation)) %>%
return()
}
DMReport <- function(sigRegions = sigRegions,
regions = regions,
bs.filtered = bs.filtered,
coverage = coverage,
name = "DMReport"){
cat("\n","Preparing HTML report...")
stopifnot(class(sigRegions) == c("tbl_df", "tbl", "data.frame"))
sigRegions %>%
dplyr::select(seqnames,
start,
end,
width,
annotation,
distanceToTSS,
geneSymbol,
gene,
geneId,
Name) %>%
gt::gt() %>%
gt::tab_header(
title = name,
subtitle = glue::glue("There are {tidySigRegions} regions \\
from {tidyRegions} background regions \\
assayed at {coverage}x coverage.
On average, the DMRs are {avgLength} bp long.",
tidySigRegions = nrow(sigRegions),
tidyRegions = length(regions),
avgLength = mean(sigRegions$width) %>% round()
)
) %>%
gt::fmt_number(
columns = gt::vars("width"),
decimals = 0) %>%
gt::as_raw_html(inline_css = FALSE) %>%
write(glue::glue("{name}.html"))
cat("Done", "\n")
}
##### Run -----------------------------------------------------------------
setwd("/share/lasallelab/Ben/Obesity/meta/WGCNA/Hippocampus")
load("Hippocampus_blue_module_ranges.RData")
.libPaths("/share/lasallelab/programs/DMRichR/R_4.0")
#BiocManager::install("jorainer/ChIPseeker")
packages <- c("AnnotationHub", "ensembldb", "DMRichR", "org.Mmu.eg.db", "plyranges")
stopifnot(suppressMessages(sapply(packages, require, character.only = TRUE)))
AnnotationHub::setAnnotationHubOption("CACHE", "/share/lasallelab/programs/DMRichR/R_4.0")
TxDb <- AnnotationHub::AnnotationHub()[["AH83244"]]
annoDb <- "org.Mmu.eg.db"
coverage <- 1
genome <- "rheMac10"
load("../../../brains/DMRs/all/Hippocampus_bsseq.RData")
print("Annotating module ranges")
sigModuleRanges %>%
annotateRegions(TxDb = TxDb,
annoDb = annoDb) %>%
dplyr::mutate(Name = dplyr::case_when(geneSymbol != "NA" ~ geneSymbol,
is.na(geneSymbol) ~ geneId)) %>%
dplyr::mutate(Name = dplyr::case_when(Name == "ENSMMUG00000060367" ~ "DUX4",
Name == "LOC715898" ~ "GOLGA6C",
Name == "LOC699789" ~ "DPY19L2",
Name == "LOC715936" ~ "ZNF721",
Name == "C14H11orf74" ~ "IFTAP",
Name == "ENSMMUG00000001136" ~ "KLK13",
Name == "ENSMMUG00000012704" ~ "ISOC2",
TRUE ~ as.character(Name))) %T>%
DMReport(regions = backgroundRanges,
bs.filtered = bs.filtered,
coverage = coverage,
name = "WGCNA_hippocampus_blue_module") %>%
openxlsx::write.xlsx("DMRs_annotated_Ensembl_WGCNA_hippocampus_blue_module.xlsx")
bs.filtered.bsseq %>%
DMRichR::getSmooth(sigModuleRanges) %>%
write.csv("blue_module_individual_smoothed_methylation.csv")
### Hub genes -------------------------------------------------------------
#setwd("/share/lasallelab/Ben/Obesity/meta/WGCNA/Hippocampus")
#load("WGCNA_1.RData")
#load("Hippocampus_tidy_datTraits.RData")
#load("WGCNA-networkConstruction-auto.RData")
hubGenes <- chooseTopHubInEachModule(
WGCNA_data,
moduleColors,
omitColors = "grey",
power = 4, # https://support.bioconductor.org/p/46342/
type = "signed") %>%
as.data.frame() %>%
tibble::rownames_to_column() %>%
purrr::set_names("module", "coordinate") %>%
tidyr::separate(coordinate, into = c('seqnames', 'start', 'end')) %>%
makeGRangesFromDataFrame(keep.extra.columns = TRUE, ignore.strand = TRUE)
hubGenes %>%
annotateRegions(TxDb = TxDb,
annoDb = annoDb) %>%
tibble::add_column(hubGenes$module) %>%
openxlsx::write.xlsx("Hubgenes_WGCNA_hippocampus.xlsx")
### Export to cytoscape ---------------------------------------------------
setwd("/share/lasallelab/Ben/Obesity/meta")
setwd("WGCNA/Hippocampus/")
load("regionTOM-block.1.RData") # Takes a long time to load
load("WGCNA_1.RData")
#load("Hippocampus_tidy_datTraits.RData")
load("WGCNA-networkConstruction-auto.RData")
# Select modules
modules = "blue"
# Select module probes
probes = names(WGCNA_data)
inModule = is.finite(match(moduleColors, modules))
modProbes = probes[inModule]
# Annotate module probes
TxDb <- AnnotationHub::AnnotationHub()[["AH83244"]]
annoDb <- "org.Mmu.eg.db"
# https://stackoverflow.com/questions/7659891/r-make-unique-starting-in-1
make.unique.2 = function(x, sep='.'){
ave(x, x, FUN=function(a){if(length(a) > 1){paste(a, 1:length(a), sep=sep)} else {a}})
}
modGenes <- modProbes %>%
dplyr::as_tibble() %>%
tidyr::separate(value, into = c('seqnames', 'start', 'end')) %>%
makeGRangesFromDataFrame(keep.extra.columns = TRUE, ignore.strand = TRUE) %>%
annotateRegions(TxDb = TxDb,
annoDb = annoDb) %>%
dplyr::mutate(nodeName = dplyr::case_when(geneSymbol != "NA" ~ geneSymbol,
is.na(geneSymbol) ~ geneId)) %>%
dplyr::mutate(nodeName = dplyr::case_when(nodeName == "ENSMMUG00000060367" ~ "DUX4",
nodeName == "LOC715898" ~ "GOLGA6C",
nodeName == "LOC699789" ~ "DPY19L2",
nodeName == "LOC715936" ~ "ZNF721",
nodeName == "C14H11orf74" ~ "IFTAP",
nodeName == "ENSMMUG00000001136" ~ "KLK13",
nodeName == "ENSMMUG00000012704" ~ "ISOC2",
nodeName == "ENSMMUG00000051744" ~ "Novel Gene 1",
nodeName == "ENSMMUG00000052059" ~ "Novel Gene 2",
nodeName == "ENSMMUG00000062616" ~ "Novel Gene 3",
nodeName == "ENSMMUG00000031128" ~ "Novel Gene 4",
nodeName == "ENSMMUG00000056847" ~ "Novel Gene 5",
nodeName == "ENSMMUG00000060266" ~ "Novel Gene 6",
nodeName == "ENSMMUG00000055454" ~ "Novel Gene 7",
nodeName == "ENSMMUG00000055981" ~ "Novel Gene 8",
nodeName == "ENSMMUG00000049528" ~ "Novel Gene 9",
TRUE ~ as.character(nodeName))) %>%
dplyr::mutate(nodeName = make.unique.2(.$nodeName, sep = " ")) %>%
purrr::pluck("nodeName")
# Select the corresponding Topological Overlap (This takes a long time)
# https://support.bioconductor.org/p/69715/
modTOM = as.matrix(TOM)[inModule, inModule]
dimnames(modTOM) = list(modProbes, modProbes)
save(modTOM, file = "modTOM.RData")
# Export the network into edge and node list files Cytoscape can read
cyt = exportNetworkToCytoscape(modTOM,
edgeFile = "Hippocampus_blue_CytoscapeInput-edges.txt",
nodeFile = "Hippocampus_blue_CytoscapeInput-nodes.txt",
weighted = TRUE,
threshold = 0.005, # 0.5 is default for function, 0.02 from tutorial
nodeNames = modGenes,
altNodeNames = modProbes,
nodeAttr = moduleColors[inModule])
# https://www.biostars.org/p/60896/
# On Cytoscape 3.8.0, you can import WGCNA network by File -> Import -> Network from File
# and selecting the module edge file. On the import dialogue box, you will typically
# select the fromNode column as the Source Node and the toNode column as the Target Node.
# The weight column should be left as an Edge Attribute.
# The direction column should be changed to interaction type.
# fromAltName is a Source Node Attribute while the toAltName is a Target Node Attribute.
|
c18f3bcc3d1d60de6fb9dd413c89498cc884869b
|
8bb16a139c8dda84505596c4528f71f4b440a924
|
/LG1/LabGuide1- Presentation/FirstRscript.R
|
c374e9ed5745e1072a118dfeee307bc2452d9961
|
[] |
no_license
|
berkozdoruk/Rstudio
|
4afe89247bccc1df20b8cf295e3fd199c6e52b73
|
c08cc1ddd0df9495cc512e51628157398d83fe34
|
refs/heads/master
| 2020-09-10T19:03:18.797693
| 2019-11-14T23:55:45
| 2019-11-14T23:55:45
| 221,807,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 201
|
r
|
FirstRscript.R
|
# this is my first R script
# do some things
x <- 34
y <- 16
z <- x + y # addition
w <- y/x # division
# display the results
x
y
z
w
# change x
x <- "some text"
# display the results
x
y
z
w
|
f8886b7bf533c9e2b7835b9c8f9f8f3033c9294f
|
68d91392e0d0274ce502db0f612cbd7a9f1cd155
|
/scripts/gem_emodel.R
|
dbcf49954bc44a9b3e770fd1e0037c4d6e991ca5
|
[] |
no_license
|
kwesterman/diet-methylome-catalog
|
b994d38069ce4d2ba828969bd94531d36ab39508
|
2985bcefbbddd93de1bb0500d5febf45eadd15f6
|
refs/heads/master
| 2020-06-19T17:38:00.111057
| 2018-12-06T23:03:07
| 2018-12-06T23:03:07
| 74,843,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,810
|
r
|
gem_emodel.R
|
library(data.table)
library(dplyr)
args <- commandArgs(trailingOnly=TRUE)
if (!(length(args)==2)) { # Check for exactly 2 arguments
stop("2 arguments needed: exposure_var and cov_suffix", call.=FALSE)
}
envVar <- args[1] # First argument is the dietary exposure of interest
cov_suffix <- args[2] # Second argument is the suffix describing the covariates included
foodLib <- c(cocoa="FFD118", n3="NUT_OMEGA", folate="NUT_FOLEQ", soy="FFD49", beans="FFD60", # For converting dietary factors into FFD codes
french_fries="FFD99", coffee="FFD112", tea="FFD113", red_wine="FFD115", SSB="FFD145",
nuts="FFD134", fat_pct="NUT_TFAT", vitk="NUT_VITK")
covCombos <- list(basic=c("id","SEX","AGE8","pc1","pc2","pc3","pc4","pc5"), # Associate covariate suffixes with specific sets of covariates
cov1=c("id","SEX","AGE8","pc1","pc2","pc3","pc4","pc5","Aspirin","CVD_med","Menopause","cig_d","smoking_now","drink_d_pwk","BMI","PAS7","NUT_CALOR"))
if(!(cov_suffix %in% names(covCombos))) stop("Covariate list not recognized.", call.=FALSE) # Check for existence of the requested covariate set
## Create methylation and additional variables files if they don't yet exist
if(!("methylation.txt" %in% list.files("../results/int"))) { # Does the re-formatted methylation data exist at the moment?
methCovData <- fread("../data/fram_methyl_cov.csv") # data.table fread for faster read-in
meth <- t(methCovData)
colnames(meth) <- meth[1,]
envData <- read.csv("../data/Off_Exam8_FFQ_diet.csv") %>% # Environmental exposure data comes from FHS FFQ and derived vars
dplyr::rename(id=shareid) %>%
dplyr::select(-dbGaP_Subject_ID) %>% # To avoid complications/confusion with the FHS ID
t()
colnames(envData) <- envData[1,]
common_ids <- as.character(sort(base::intersect(meth[1,], envData[1,]))) # Get set of IDs common to methylation and dietary data
write.table(meth[c(1,24:nrow(meth)),common_ids], "../results/int/methylation.txt", sep="\t", col.names=F, quote=F) # Write only the actual methylation data
all_vars <- rbind(meth[1:23,common_ids], envData[-1,common_ids]) # All_vars is a combination of the covariate data from the methylation file and the dietary data
write.table(all_vars, "../results/int/all_vars.txt", sep="\t", col.names=F, quote=F) # Write a file containing all possible exposure and covariate data
}
## Write experiment-specific data files for use by GEM
if(!(envVar %in% names(foodLib))) stop("dietary data not available") # Check for existence of the requested dietary factor
varData <- read.delim("../results/int/all_vars.txt", header=F, row.names=1) # all_vars contains all available covariates and dietary factors
env <- varData[c("id",foodLib[envVar]),] # Subset to grab only food variable of interest
write.table(env, paste0("../results/int/",envVar,".txt"), sep="\t", col.names=F, quote=F)
covs <- varData[covCombos[[cov_suffix]],] # Subset to grab only covariates of interest (based on above list)
write.table(covs, paste0("../results/int/cov_",cov_suffix,".txt"), sep="\t", col.names=F, quote=F)
## Run GEM EWAS analysis
library(GEM)
DATADIR <- getwd()
env_file_name <- paste(DATADIR, "../results/int/",paste0(envVar,".txt"), sep=.Platform$file.sep)
covariate_file_name <- paste(DATADIR, "../results/int", paste0("cov_",cov_suffix,".txt"), sep=.Platform$file.sep)
methylation_file_name <- paste(DATADIR, "../results/int/methylation.txt", sep=.Platform$file.sep)
Emodel_pv <- 1
Emodel_result_file_name <- paste0("../results/",envVar,"_",cov_suffix,"_result_Emodel.txt")
Emodel_qqplot_file_name <- paste0("../results/",envVar,"_",cov_suffix,"_qqplot_Emodel.jpg")
GEM_Emodel(env_file_name, covariate_file_name, methylation_file_name, Emodel_pv, Emodel_result_file_name, Emodel_qqplot_file_name, savePlot=T)
|
ba032a06250aab83bdf890bcaf3cc1a0afdc13da
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/localdepth/R/normal.R
|
9857dbe5583f60f91d1e46211098a97fe552d9b5
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,247
|
r
|
normal.R
|
lsdnorm <- function(x, mean=0, sd=1, tau=0.2) {
if (tau >= 1 | tau <= 0)
stop("quantile order of 'tau' must be in the range (0,1). For order equal to 1 use function 'sdnorm'")
tau <- 1-(1-tau)/2
tau <- qnorm(tau, 0, sqrt(2)*sd)
ldnorm.int <- function(x, tau, mean, sd) {
pdf <- function(x) dnorm(x,mean,sd)
cdf <- function(x) pnorm(x,mean,sd)
acca <- function(x) cdf(x+tau)+cdf(x-tau)-2*cdf(x)
int1 <- function(t) cdf(t+tau)*pdf(t)
integr1 <- function(x) integrate(int1,x-tau,x)[1]
int2 <- function(t) cdf(t-tau)*pdf(t)
integr2 <- function(x) integrate(int2,x,x+tau)[1]
a <- integr1(x); b <- integr2(x)
res <- a$value-b$value+cdf(x)*acca(x)
return(res)
}
y <- sapply(X=x, FUN=ldnorm.int, simplify=TRUE, tau=tau, mean=mean, sd=sd)
return(y)
}
hsnorm <- function(x, mean=0, sd=1, tau=0.2) {
if (tau >= 1 | tau <= 0)
stop("quantile order of 'tau' must be in the range (0,1)")
tau <- 1-(1-tau)/2
tau <- qnorm(tau, 0, sqrt(2)*sd)
acca <- pnorm(x+tau, mean, sd)+pnorm(x-tau, mean, sd)-2*pnorm(x, mean, sd)
return(acca)
}
sdnorm <- function(x, mean=0, sd=1) {
y <- 2*pnorm(x, mean, sd)*pnorm(x, mean, sd, lower.tail=FALSE)
return(y)
}
|
58554fc444bf0fb30fc711a62f2b44dffa282e53
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/arsenal/examples/write2specific.Rd.R
|
86ef299c08709d41ee662a086725dfe721e5fe08
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 814
|
r
|
write2specific.Rd.R
|
library(arsenal)
### Name: write2specific
### Title: write2word, write2html, write2pdf
### Aliases: write2specific write2word write2pdf write2html
### ** Examples
## Not run:
##D data(mockstudy)
##D # tableby example
##D tab1 <- tableby(arm ~ sex + age, data=mockstudy)
##D write2html(tab1, "~/trash.html")
##D
##D # freqlist example
##D tab.ex <- table(mockstudy[, c("arm", "sex", "mdquality.s")], useNA = "ifany")
##D noby <- freqlist(tab.ex, na.options = "include")
##D write2pdf(noby, "~/trash2.pdf")
##D
##D # A more complicated example
##D write2word(tab1, "~/trash.doc",
##D keep.md = TRUE,
##D reference_docx = mystyles.docx, # passed to rmarkdown::word_document
##D quiet = TRUE, # passed to rmarkdown::render
##D title = "My cool new title") # passed to summary.tableby
## End(Not run)
|
16e15dc70f38477913f48116568cab59b438d1a1
|
734951b8582b89a3336ab0244a2f55addb233a0a
|
/FactorAnalysis.R
|
089a9a84a3e1f91d15213d35a5ccf03d516d4151
|
[] |
no_license
|
gerardloquet/WP4pilot
|
222f84255704b1776b41a60dae81ab7d7c5a752a
|
4e5568c6b72225d39f264a380c2e1a73bf693b07
|
refs/heads/master
| 2023-07-13T20:15:41.185346
| 2021-08-27T12:19:13
| 2021-08-27T12:19:13
| 300,050,885
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,139
|
r
|
FactorAnalysis.R
|
# Load Packages
library(tidyverse)
library(here)
library(xlsx)
library(caret)
library(psych)
# Data path
data.path <- "C:/Users/loquetg/Documents/Data/"
# Load data
data.right <- read.xlsx(paste0(data.path,"DATA_All_Norm_R_V6.xlsx"),1)
#data.left <- read.xlsx(paste0(data.path,"DATA_All_Norm_L_V6.xlsx"),1)
# Factor analysis
# Headers: PTA, DS, SRT, Region, Sex, Age, HINT_V2, HINT_bin, ACALOS_HTL, ACALOS_MCL, ACALOS_UCL, ACALOS_slope, STM,
# IPD, BINP_diop, BINP_dichop, BIN_totp, BIN_falsep, HA, HINT_V3, IDENTIF_V3, NOISE_ANN_V3, DIRECTION_V3,
# HEAD_DIR_V3, HEAD_W_V3, JFC_V3, IG55_V3, IG65_V3, IG80_V3, HINT_V4, IDENTIF_V4, NOISE_ANN_V4, DIRECTION_V4,
# HEAD_DIR_V4, HEAD_W_V4, JFC_V4
data.right.source <- data.right %>% select(c(3,4,5,6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37))
data.right.cor <- cor(data.right.source, use = 'complete.obs')
#now the factor analysis - make them orthogonal first
#png('Scree.png',width = 1000, height = 700)
nr.factors_right <- fa.parallel(data.right.cor, n.obs = 75, fa = 'fa')
nr.factors
|
712b5994662e99c0c450f009b2af5386dfcae9c6
|
a05d526d7092349652c96f5318194cf1735f1cc3
|
/man/import.bedmolecule.Rd
|
6fce45e1e6a6bd43dc744f60229f0c35dc9c27d2
|
[] |
no_license
|
charles-plessy/CAGEr
|
220fb5b044df118a505905896af8ce2efcfdbed1
|
0c79a3a592c1c8f6b5da086082d6369736dfa4ff
|
refs/heads/devel
| 2023-08-30T20:52:40.435814
| 2023-07-27T23:55:10
| 2023-07-27T23:55:10
| 113,113,541
| 7
| 5
| null | 2023-07-18T01:58:27
| 2017-12-05T01:03:04
|
HTML
|
UTF-8
|
R
| false
| true
| 876
|
rd
|
import.bedmolecule.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ImportMethods.R
\name{import.bedmolecule}
\alias{import.bedmolecule}
\title{import.bedmolecule}
\usage{
import.bedmolecule(filepath)
}
\arguments{
\item{filepath}{The path to the BED file.}
}
\value{
Returns a \code{\link{CTSS}} object.
}
\description{
Imports a BED file where each line counts for one molecule in a
GRanges object where each line represents one nucleotide.
}
\examples{
# TODO: add exmaple file
# import.BED(system.file("extdata", "example.bed", package = "CAGEr"))
}
\seealso{
Other loadFileIntoGPos:
\code{\link{bam2CTSS}()},
\code{\link{import.CTSS}()},
\code{\link{import.bam.ctss}()},
\code{\link{import.bam}()},
\code{\link{import.bedCTSS}()},
\code{\link{import.bedScore}()},
\code{\link{loadFileIntoGPos}()},
\code{\link{moleculesGR2CTSS}()}
}
\concept{loadFileIntoGPos}
|
30b7aa2549e770896c6752b1db8d15ab9a789e67
|
24851be32893bfb1027b2a33164ef515fc4fb76b
|
/code/OLD/binning/totalproduction.r
|
ea8017ab88a0c76391be527942f247e173a5518d
|
[] |
no_license
|
qdread/forestlight
|
acce22a6add7ab4b84957d3e17d739158e79e9ab
|
540b7f0a93e2b7f5cd21d79b8c8874935d3adff0
|
refs/heads/master
| 2022-12-14T03:27:57.914726
| 2022-12-01T23:43:10
| 2022-12-01T23:43:10
| 73,484,133
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,763
|
r
|
totalproduction.r
|
# Correction factor for total production
prodtotals <- obs_totalprod %>%
group_by(fg, year) %>%
summarize(total = sum(bin_value * (bin_max-bin_min)))
dbh_pred <- exp(seq(log(1.2), log(315), length.out = 101))
mids <- function(a) a[-length(a)] + diff(a)/2
dbh_binwidth <- diff(dbh_pred)
dbh_df <- data.frame(dbh = mids(dbh_pred), binwidth = dbh_binwidth)
prodintegrals <- pred_totalprod %>%
left_join(dbh_df) %>%
mutate(int = q50 / binwidth) %>%
group_by(year, dens_model, prod_model, fg) %>%
summarize(int = sum(int, na.rm = TRUE))
library(pracma)
# Trapezoidal integration is used.
prodintegrals <- pred_totalprod %>%
group_by(year, dens_model, prod_model, fg) %>%
summarize_at(vars(starts_with('q')), funs(trapz(x = dbh, y = .)))
plot_totalprod(year_to_plot = 1995,
fg_names = c('all'),
model_fit_density = 'weibull',
model_fit_production = 'powerlaw',
y_limits = c(0.01, 500),
y_breaks = c(0.1, 1, 10, 100),
preddat = pred_totalprod %>% mutate_at(vars(starts_with('q')), funs(./0.7617)))
# Get the true numbers from the raw data.
area_core <- 42.84
library(purrr)
alltreeprod <- map_dbl(alltreedat, function(x) sum(x$production))
fgprod <- map_dfr(alltreedat, function(x) {
x %>%
mutate(fg = if_else(is.na(fg), 'unclassified', paste0('fg',fg))) %>%
group_by(fg) %>%
summarize(production = sum(production))
})
prodtotals <- rbind(data.frame(year = rep(seq(1985,2010,5), each=6), fgprod),
data.frame(year = seq(1985,2010,5), fg = 'alltree', production = alltreeprod)) %>%
arrange(year, fg)
write.csv(prodtotals, file = 'C:/Users/Q/google_drive/ForestLight/data/production_total.csv', row.names = FALSE)
|
bc8d90dd053ad23c75d0dd36bdb6a7dc5645365e
|
f0a34c8e93e8aec9220ccc865b27d43f357f4646
|
/scripts/process-baby-names.R
|
2038116aa0ad5c72c776cb1cbc213681fb62b8f7
|
[] |
no_license
|
organisciak/names
|
4867c1512319d7333ed58145f4cd34b78471c94f
|
7f0632c442fff5fb3d73633ceb959ded2605d05f
|
refs/heads/master
| 2016-09-06T14:38:58.384023
| 2014-12-03T20:36:50
| 2014-12-03T20:51:50
| 26,832,651
| 45
| 38
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,973
|
r
|
process-baby-names.R
|
library("data.table")
# Import name counts by state, sex, year
names <- fread("raw/us-names-by-gender-state-year.csv")
## Save name counts by sex
tmp <- names[, list(count=sum(count)), by=list(sex, name)][order(-count)]
write.csv(tmp, "data/us-names-by-gender.csv", quote=F, row.names=F)
## Smaller data:top 1000 names by sex
tmp <- names[sex=='F', list(count=sum(count)), by=list(sex,name)][order(-count)][1:1000]
write.csv(tmp[,list(name)], "lists/top-us-female-names.csv", quote=F, row.names=F)
tmp <- names[sex=='M', list(count=sum(count)), by=list(sex,name)][order(-count)][1:1000]
write.csv(tmp[,list(name)], "lists/top-us-male-names.csv", quote=F, row.names=F)
## Save name counts by decade
## Sorting by decade is newest-oldest
names[,decade:=year-(year %% 10),]
tmp <- names[, list(count=sum(count)), by=list(sex, name, decade)][order(-decade, -count)]
write.csv(tmp, "data/us-names-by-decade.csv", quote=F, row.names=F)
## Estimate name popularity order in 2014 by accounting for life expectancy
##
## P(alive|age)=P(age|alive)*P(alive)/P(age)
## where,
## P(age|alive) is the percentage of the 2014 population that is that age
## P(alive) is the current population divided by total American count.
## This is difficult to estimate (births+unique immigrants, but easier
## said than done), so currently, is ignored. Thus the COUNTS ARE NOT MEANINGFUL.
## P(age) is the percent of baby names at that age relative to all baby names.
ages <- fread("raw/us-population-by-age-and-sex-2014.csv")
names[, age:=as.factor(2014-year),]
names[age %in% as.character(100:104), age:="100+"]
names[age %in% as.character(95:99), age:="95-99"]
names[age %in% as.character(90:94), age:="90-94"]
names[age %in% as.character(85:89), age:="85-89"]
alive <- names[, list(count.in.names=sum(count)), by=list(sex, age)][,percent.of.names:=100*count.in.names/sum(count.in.names)]
ages <- merge(alive, ages, by=c("sex", "age"))
ages[,modifier:=percent/percent.of.names,]
# Rough P(alive) estimate, assuming most 1 year olds are alive
# In fact, we know how many don't survive by 1 year, 6.17/1000,
# but this would lend a false sense of precision
ages[, percent.alive:=modifier/modifier[1], by=list(sex)]
tmp <- merge(names, ages[,list(sex, age, percent.alive)], by=c("sex", "age"))
tmp[,alive.count:=round(count*percent.alive)]
names.in.2014 <- tmp[,percent.alive:=NULL]
rm(ages, alive, tmp)
## Save Living estimates per year
write.csv(names.in.2014[alive.count>10, list(state, sex, year, name, alive.count)], "data/us-living-estimate-names-by-sex-state-year.csv", quote=F, row.names=F)
## Save living estimate by gender/name. More useful.
tmp <- names.in.2014[, list(count=sum(alive.count)), by=list(sex, name)][order(-count)]
write.csv(tmp[count > 10],
"data/us-living-estimate-names-by-sex.csv",
quote=F, row.names=F)
write.csv(tmp[sex=='M'][1:1000][,list(name)], "lists/top-us-male-names-alive-in-2014.csv", quote=F, row.names=F)
write.csv(tmp[sex=='F'][1:1000][,list(name)], "lists/top-us-female-names-alive-in-2014.csv", quote=F, row.names=F)
## Save likelihood of name gender
tmp[,gender.prob:=count/sum(count), by=name]
write.csv(tmp[gender.prob > 0.5 & count > 100][order(-gender.prob, -count)][,list(sex, name, gender.prob)],
"data/us-likelihood-of-gender-by-name-in-2014.csv",
quote=F, row.names=F)
## For fun: gender-neutral names that are fairly common in 2014 in the US
write.csv(tmp[gender.prob > 0.5 & count > 1000][order(gender.prob)][1:50][, list(name, sex, gender.prob)],
"lists/us-50-gender-neutral-names.csv",
quote=F, row.names=F)
## For fun: Names for US-born people that are all but dead
tmp <- names.in.2014[, list(name.survival=sum(alive.count)/sum(count)), by=list(sex, name)][name.survival > 0][order(name.survival)][1:1000][,name]
write.csv(tmp, "lists/us-dead-names.csv", quote=F, row.names=F)
|
7059d7d6d79f6c7d5e51ff27f7e52359840851ca
|
a82065b9c6b3313294bb57c1cdee55c454cd3de4
|
/MBE_Code/networkmodels/unrestrictedmodel_stochasticmigrations/migstep3_runno1.R
|
0dd4c7a5383a67732e34cc22886180370428db55
|
[] |
no_license
|
khanna7/circularmigrations
|
88e49661ee0fa46c4580a517f2f8b89d3026447f
|
536da8f01680a6d99e1b1733eb0a2504d8810a53
|
refs/heads/master
| 2020-05-20T13:11:16.020880
| 2015-10-03T15:07:06
| 2015-10-03T15:07:10
| 38,516,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,975
|
r
|
migstep3_runno1.R
|
#####################################################
### Progression of versions
#####################################################
## Goal: put all pieces together for one simulation file
## control file -- just modify the mig_step --
## all output is sent to an external csv file
## 20 May 2012: Modified mig-step to 3 to have multiple runs at the
## same value
## 20 April 2012: Implemented change so only one cumulative network
## object is saved, and cross-sectional network objects are obtained from that
## 18 April 2012: investigate saving
## 2 March 2012: organized for final runs
## 30 Jan 2012: After discussion with Steve:
## a. reduced late stage to 9 months (40 wks) from 2 years.
## consequently, initial distribution of times of infection
## had to be changed to uniform between (0, 12+500+40)
## b. added functionality to store networks in real-time.
## c. output incidence data
## 27 Jan 2012: simulate using new term in estimation to account
## for desired partnership structure
## 22 Jan 2012: Need to run simulations longer than 3000
## steps to get results
## 20 Jan 2012: Re-run simulations with 'time.infected' distributed
## between 0 and 616
## 17 Jan 2012: Fixed 'time.infected' attribute, was
## not getting updated initially. This change slows
## down infection transmission because infected people
## enter the long phase of chronic infection during which
## infection passes forward very slowly. Changed to 50
## infected women in urban and rural regions eachb to have
## enough sero-discordant ties at the start.
## 13 Jan 2012: After discuossion with Steve,
## time.infected.fatal has been re-introduced.
## An infected individual dies 616 weeks after infection.
### do runs at intervals of increasing 3 weekly intervals
### 1 -- 30 weeks
## 3 Jan 2012: After conversation with Steve decided to not
## have a time.infected.fatal at all, instead for late-stage
## folks, there will be a reduced life expectancy.
## "inf.prob.death" re-labeled as "late.prob.death"
## 2 Jan 2012: added inf.prob.death to model reduced life-expectancy
## due to disease. had not coded this before.
## 1 Jan 2012: pop.rate.birth=625*8/(45*52) seems to work
## to produce a constant population in the absence of disease.
## experiment to see what happens particularly to total population
## size when we have disease in the population:
## infected: 1 rural + 1 urban female
## transmission_d6a.R contains the unvectorized transmission code
## which works
#####################################################
#####################################################
### Top-Matter
#####################################################
rm(list=ls())
library(ergm)
load(file="est_dur100_meandeg12.RData")
source("../MSM.common.functions.R")
#source("../networkDynamic.access.R")
#source("../networkDynamic.simulation.R")
#source("../networkDynamic.cross.section.R")
#source("../networkDynamic.zzz.R")
source("../vital_dynamics_d6a.R") # 24 Mar 12: this vresion should
# have no k-star term
##source("../migration_wrapper_d1.R")
source("../transmission_6h.R")
source("../update_network_d5.R")
#####################################################
#####################################################
### Migration-Step (Most Important Variable)
#####################################################
mig_step <- 3
runno <- 1
#####################################################''
#####################################################
### Network-Object
#####################################################
net <- migration.fit.network$network
#####################################################''
#####################################################
### Demography
#####################################################
pop.rate.of.birth <- 625*8/(45*52)
birth.type.prob <- c(1/8,1/4, 1/8, # type male: R, M, U
1/4,1/4) # type female: R, M, U
time.infected.fatal <- 12+500+40 # 31 Jan '12: after discussion with steve
std.prob.death <- 1/(45*52) # 45-year life expectancy
late.prob.death <- 1/(2*52) # 2 Jan 2012: had not added before
# though this is not used anymore because we have a time.infected.fatal
## for migration and disease transmission
#####################################################
### Biology
#####################################################
chronic.prob=0.0007
acute.prob=chronic.prob*26
late.prob=chronic.prob*7 # multiplied by 2.5 coital acts per week in
# transmission file
acute.chronic.transition=12 # check this from ode write-up
chronic.late.transition=500 # should be 500+12
late.stage.begin=acute.chronic.transition+chronic.late.transition
# no need to have
# this separately. should be the
#same as chronic.late.transition
#####################################################
### Other Simulation-Related Parameters
#####################################################
burnin.sim.network <- 25e3
ntimesteps <- 5e3
net <- network.extract(net, max(net%v%"active")-1)
popsize <- network.size(net)
net %v% "vertex.names" <- 1:popsize
net <- activate.vertices(net, onset=0, terminus=1,
v=1:popsize)
net <- activate.edges(net, onset=0, terminus=1,
e=1:network.edgecount(net))
activenet <- network.extract(net, at = 0)
#####################################################
#####################################################
### Organize Output
#####################################################
condition <- "vanilla"
filename <- paste(condition, "mig-step",
mig_step, "runno", runno, ".csv", sep="")
saveobject <- paste(condition, "mig_step_",
"runno", runno,
mig_step,".RData", sep="")
real_time_cumnet <- paste("real_time_cumnets", saveobject,
sep=""
)
cum.nets <- list() # object to store cumulative network
#####################################################
#####################################################
### Time-Loop
#####################################################
for (timestep in 1:ntimesteps){
set.seed(Sys.time()) # RNG
curr.time <- timestep
prev.time <- curr.time-1
net <- mig.vital.dynamics(net=net,
curr.time=curr.time,
pop.rate.of.birth=pop.rate.of.birth,
birth.type.prob=birth.type.prob,
time.infected.fatal=time.infected.fatal,
std.prob.death=std.prob.death,
late.prob.death=late.prob.death,
late.stage.begin=late.stage.begin,
filename=filename
)
cat("total number of nodes is ", network.size(net),",\n",
"total number of edges is ", network.edgecount(net), ",\n",
"at time-step " , curr.time, ".\n")
activenet <- network.extract(net, at = curr.time)
net <- update.network(net=net,
curr.time=curr.time,
theta.network=theta.network,
burnin.sim.network=burnin.sim.network, # put in run file
diss.network=diss.network,
gamma.network=gamma.network,
formula.network.w.offset=formula.network.w.offset,
constraints.network=constraints.network,
activenet=activenet,
dyninterval=dyninterval
## ## steve also has steady.model.change
## ## but that is not relevant here
)
## save cumulative network object
cum.nets <- net
save(cum.nets, file=real_time_cumnet)
active.net.updated <- network.extract(net, at = curr.time)
cat("Number of alive edges is ", length(active.net.updated$mel), ".\n")
net <- transmission(net=net,
mig_step=mig_step,
acute.prob=acute.prob,
chronic.prob=chronic.prob,
late.prob=late.prob,
acute.chronic.transition=acute.chronic.transition,
chronic.late.transition=chronic.late.transition,
curr.time=curr.time,
filename=filename
)
cat("Total Number of Infected, after transmissions at time-step",
curr.time,
"is",
length(which(net%v%"inf.status"==1)),
".\n", "\n")
}
#####################################################
#####################################################
### Final Object
#####################################################
save.image(saveobject)
#####################################################
|
e4cadd9634d2b1cda4b980aa6a06ee067cd2d6ca
|
5ead2261701abdad8a99016e46f5c0ebeb7f99c4
|
/chenb.r
|
ad1a3cbb6a5569c4f455061ab2a75edb19bcd736
|
[] |
no_license
|
liuzh811/NEchina
|
b833027d68a4a34046cb16e0b46ca54fae711444
|
7f1eefa21b5513df4384522f10890fb63dbb26c7
|
refs/heads/master
| 2021-01-21T14:24:34.588130
| 2017-10-07T02:55:53
| 2017-10-07T02:55:53
| 57,230,379
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,149
|
r
|
chenb.r
|
#download MODIS brdf to VI data for BAED
# load libraries
library(rgdal)
library(raster)
#################### Data pre-processing #######################
# list files
Work.Dir <- "D:/users/Zhihua/Landsat/p122024"
setwd(Work.Dir)
# read a polygon shapefile
testr1 = readOGR("D:\\users\\Zhihua\\Landsat\\p122024\\test_region", "testr3")
#list names
fn = list.files(path = "./images.zipped", pattern = "*.tar.gz")
#create folder for each images, and extract into folder
for (i in 1:length(fn)){
dir.create(paste("./images.unzipped/", substr(fn[i], 1, 16), sep = ""))
folder.name = substr(fn[i], 1, 16)
untar(paste("./images.zipped/", fn[i], sep = ""),
exdir = paste("./images.unzipped/", folder.name, sep = ""))
print(paste("Finish Extracting", i," of ", length(fn), " at ", format(Sys.time(), "%a %b %d %X %Y"), sep = " ") )
}
# get year and day of image acquisitions
Year = substr(fn, 10, 13); DOY = substr(fn, 14, 16)
YearDOY = as.numeric(substr(fn, 10, 16))
YearDOY_sorted = sort(YearDOY)
#rearrange the order of the files
Index = c(); for (i in 1:length(YearDOY_sorted)){Index = c(Index, which(YearDOY_sorted[i]==YearDOY))}
folder.name = substr(fn, 1, 16) #get folder names
folder.name = folder.name[Index] #rearrange the order of the files
#save vegetation index file into a list files
evi.list = list()
ndmi.list = list()
scene.name = rep("tmp",length(folder.name))
for (i in 1:length(folder.name)){
path1 = paste("./images.unzipped/", folder.name[i], sep = "")
#cloud, 0 - clear data; 1 - water; 2-shadow, 3-snow; 4-cloud
cloud.nm = list.files(path = path1, pattern = "*cfmask.tif")[1]
cloudm = raster(paste(path1, "\\", cloud.nm, sep = ""))
cloudm = crop(cloudm, testr1)
scene.name[i] <- substr(cloud.nm, 1, 21)
#list NDVI
evi.nm = list.files(path = path1, pattern = "*evi.tif")[1]
evi = raster(paste(path1,"\\", evi.nm, sep = ""))
evi = crop(evi, testr1)
evi[cloudm > 0] = NA
evi.list[[i]] <- evi
#list NDMI
ndmi.nm = list.files(path = path1, pattern = "*ndmi.tif")[1]
ndmi = raster(paste(path1,"\\", ndmi.nm, sep = ""))
ndmi = crop(ndmi, testr1)
ndmi[cloudm > 0] = NA
ndmi.list[[i]] <- ndmi
print(paste("Finish Listing files ", i," of ", length(folder.name), " at ", format(Sys.time(), "%a %b %d %X %Y"), sep = " ") )
}
evi.list = stack(evi.list)
ndmi.list = stack(ndmi.list)
names(evi.list) <- scene.name
names(ndmi.list) <- scene.name
writeRaster(evi.list,"D:\\users\\Zhihua\\Landsat\\p122024\\results\\evi.grd", overwrite=TRUE) #write a raster stack files
writeRaster(ndmi.list,"D:\\users\\Zhihua\\Landsat\\p122024\\results\\ndmi.grd", overwrite=TRUE) #write a raster stack files
#composite for each year
Year_uni = sort(as.numeric(unique(Year)))
for (i in Year_uni){
score.list = list()
b1.list = list()
b2.list = list()
b3.list = list()
b4.list = list()
b5.list = list()
b7.list = list()
folder.name1 = folder.name[which(as.numeric(substr(folder.name, 10, 13)) == i)]
for(j in 1:length(folder.name1)){
path1 = paste("./images.unzipped/", folder.name1[j], sep = "")
#calculate cloud score
#cloud, 0 - clear data; 1 - water; 2-shadow, 3-snow; 4-cloud
cloud.nm = list.files(path = path1, pattern = "*cfmask.tif")[1]
cloudm = raster(paste(path1, "\\", cloud.nm, sep = ""))
cloudm = crop(cloudm, testr1)
cloudm = cloudm == 2 | cloudm == 4
cloudm.dist = gridDistance(cloudm, origin=1)
cloudm.score = calc(cloudm.dist, Score_could)
#calculate DOY score
doy1 = as.numeric(substr(cloud.nm, 14,16))
doy.score = Score_doy(doy1)
total.score = cloudm.score + doy.score
total.score[cloudm > 0] = NA
score.list[[j]] <- total.score
#list band reflectance
if (substr(cloud.nm,1,3) == "LT5"| substr(cloud.nm,1,3) == "LE7"){ #for LT and LE
#b1
b1.nm = list.files(path = path1, pattern = "*sr_band1.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b1.list[[j]] <- b1
#b2
b1.nm = list.files(path = path1, pattern = "*sr_band2.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b2.list[[j]] <- b1
#b3
b1.nm = list.files(path = path1, pattern = "*sr_band3.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b3.list[[j]] <- b1
#b4
b1.nm = list.files(path = path1, pattern = "*sr_band4.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b4.list[[j]] <- b1
#b5
b1.nm = list.files(path = path1, pattern = "*sr_band5.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b5.list[[j]] <- b1
#b7
b1.nm = list.files(path = path1, pattern = "*sr_band7.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b7.list[[j]] <- b1
} else { #for LC
b1.nm = list.files(path = path1, pattern = "*sr_band2.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b1.list[[j]] <- b1
#b2
b1.nm = list.files(path = path1, pattern = "*sr_band3.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b2.list[[j]] <- b1
#b3
b1.nm = list.files(path = path1, pattern = "*sr_band4.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b3.list[[j]] <- b1
#b4
b1.nm = list.files(path = path1, pattern = "*sr_band5.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b4.list[[j]] <- b1
#b5
b1.nm = list.files(path = path1, pattern = "*sr_band6.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b5.list[[j]] <- b1
#b7
b1.nm = list.files(path = path1, pattern = "*sr_band7.tif")[1]
b1 = raster(paste(path1,"\\", b1.nm, sep = ""))
b1 = crop(b1, testr1)
b1[cloudm > 0] = NA
b1[b1 > 10000] = NA
b7.list[[j]] <- b1
} #end of list
print(paste("Finish listing files for Year ", i,", ", j," of ", length(folder.name1), " at ", format(Sys.time(), "%a %b %d %X %Y"), sep = " ") )
}
score.list = stack(score.list)
b1.list = stack(b1.list)
b2.list = stack(b2.list)
b3.list = stack(b3.list)
b4.list = stack(b4.list)
b5.list = stack(b5.list)
b7.list = stack(b7.list)
#数据裁切 crop
ext = c(max(ext[,1]),min(ext[,2]),max(ext[,3]),min(ext[,4]))
ledaps.list2 = list()
#tca.list2 = list()
#tc.list2 = list()
for (k in 1:length(ledaps.list)){
ledaps = crop(ledaps.list[[k]], ext)
#tca = crop(tca.list[[k]], ext)
#tc = crop(tc.list[[k]], ext)
extent(ledaps) <- ext
#extent(tca) <- ext
#extent(tc) <- ext
ledaps.list2[[k]] <- ledaps
#tca.list2[[k]] <- tca
#tc.list2[[k]] <- tc
print(paste("Cropping:::: ", scenor[s1], '::', scene_id[i], " for year ", tm.year[j], " at ", format(Sys.time(), "%a %b %d %X %Y"), sep = " ") )
}
#compositing
b1.list = list()
b2.list = list()
b3.list = list()
b4.list = list()
b5.list = list()
b6.list = list()
#tc1.list = list()
#tc2.list = list()
#tc3.list = list()
for (k in 1:length(ledaps.list)){
b1 = ledaps.list2[[k]][[1]]
b1[b1 > 10000 | b1 < 0] = NA
b1.list[[i]] <- b1
b2 = ledaps.list2[[k]][[2]]
b2[b2 > 10000 | b2 < 0] = NA
b2.list[[i]] <- b2
b3 = ledaps.list2[[k]][[3]]
b3[b3 > 10000 | b3 < 0] = NA
b3.list[[i]] <- b3
b4 = ledaps.list2[[k]][[4]]
b4[b4 > 10000 | b4 < 0] = NA
b4.list[[i]] <- b4
b5 = ledaps.list2[[k]][[5]]
b5[b5 > 10000 | b5 < 0] = NA
b5.list[[i]] <- b5
b6 = ledaps.list2[[k]][[6]]
b6[b6 > 10000 | b6 < 0] = NA
b6.list[[i]] <- b1
#tc1.list = tc.list2[[k]][[1]]
#tc2.list = tc.list2[[k]][[2]]
#tc3.list = tc.list2[[k]][[3]]
print(paste("Compositing:::: ", scenor[s1], '::', scene_id[i], " for year ", tm.year[j], " at ", format(Sys.time(), "%a %b %d %X %Y"), sep = " ") )
}
b1.list = stack(b1.list)
b2.list = stack(b2.list)
b3.list = stack(b3.list)
b4.list = stack(b4.list)
b5.list = stack(b5.list)
b6.list = stack(b6.list)
#tc1.list = stack(tc1.list)
#tc2.list = stack(tc2.list)
#tc3.list = stack(tc3.list)
#tca.list2 = stack(tca.list2)
if (nlayers(b1.list) > 1) {
b1.list = calc(b1.list, mean, na.rm = TRUE)
b2.list = calc(b2.list, mean, na.rm = TRUE)
b3.list = calc(b3.list, mean, na.rm = TRUE)
b4.list = calc(b4.list, mean, na.rm = TRUE)
b5.list = calc(b5.list, mean, na.rm = TRUE)
b6.list = calc(b6.list, mean, na.rm = TRUE)
#tc1.list = calc(tc1.list, mean, na.rm = TRUE)
#tc2.list = calc(tc2.list, mean, na.rm = TRUE)
#tc3.list = calc(tc3.list, mean, na.rm = TRUE)
#tca.list2 = calc(tca.list2, mean, na.rm = TRUE)
#stacking:
}
b = stack(b1.list,b2.list,b3.list,b4.list,b5.list,b6.list)
#tc = stack(tc1.list,tc2.list,tc3.list)
writeRaster(b,
paste(output.dir, scene_id[i], "/",scenor[s1], substr(basename[1], 4,13), "_reflectance_composite.grd", sep = ""),
overwrite=TRUE) #write a raster stack files
#writeRaster(tc,paste(output.dir, scene_id[i], "/LT", substr(basename[1], 4,13), "_tc_composite.grd", sep = ""), overwrite=TRUE) #write a raster stack files
#writeRaster(tca,paste(output.dir, scene_id[i], "/LT", substr(basename[1], 4,13), "_tca_composite.tif", sep = ""), format="GTiff",overwrite=TRUE) #write a raster stack files
removeTmpFiles(h=24) # remove temporal files 24 hours before
} # end of year
} # end of scene
#} # end of scenor
#doing some plot: in ROSA
setwd("D:/users/Zhihua/Landsat/XinganImages/output")
library(raster)
library(rgdal)
library(rasterVis)
library(colorRamps)
dxal.sp = readOGR(dsn="D:/users/Zhihua/Landsat/XinganImages/boundry",layer="dxal_bj_proj_polygon")
wrs2.sp = readOGR(dsn="D:/users/Zhihua/Landsat/XinganImages/boundry",layer="wrs2_descending_dxal")
wrs2.sp = spTransform(wrs2.sp, projection(dxal.sp))
spectral.indices = c("tca", "tcb","tcg","tcw")
spectral.indices.names = c("TC Angle", "TC Brightness","TC Greenness","TC Wetness")
for (j in 1:length(spectral.indices)){
j = 3
tca.files = list()
for (i in 2000:2015){
tca.file = raster(paste("./merge_files/",spectral.indices[j], "_",i, "_merge.tif", sep = ""))
#tca.file = crop(tca.file, dxal.sp)
tca.file = crop(tca.file, dxal.sp[3,])
tca.files[[i - 1999]] <- tca.file
print(paste("Finishing listing :::: Year ", i, " for ", spectral.indices[j], " at ", format(Sys.time(), "%a %b %d %X %Y"), sep = " ") )
}
tca.files = stack(tca.files)
#rasterVis plot
names(tca.files) <- paste("Year", 2000:2015, sep = "")
color_tc3 = rev(rainbow(99, start=0,end=1))
color_tc32 = rev(blue2green2red(99))
qu.val = quantile(as.vector(as.matrix(tca.files)), prob = c(0.01, 0.99), na.rm = T)
breaks_tc3 <- round(seq(min(minValue(tca.files)),max(maxValue(tca.files)),length.out=100),3)
legendbrks2_tc3 <- round(seq(min(minValue(tca.files)),max(maxValue(tca.files)),length.out=10),0)
breaks_tc3 <- round(seq(min(qu.val[1]),max(qu.val[2]),length.out=100),3)
legendbrks2_tc3 <- round(seq(min(qu.val[1]),max(qu.val[2]),length.out=10),0)
png(paste("annual_", spectral.indices[j], ".png", sep = ""),height = 6000, width = 6000, res = 300, units = "px")
levelplot(tca.files,
main=paste("Annual ", spectral.indices[j], " Map", sep = ""),
at= breaks_tc3, margin=FALSE,
maxpixels = nrow(tca.files)*ncol(tca.files),
col.regions=color_tc32,
colorkey= list(labels= list(labels= legendbrks2_tc3,at= legendbrks2_tc3, cex = 1.5), space = "bottom"),
layout=c(4, 4))+
latticeExtra::layer(sp.polygons(dxal.sp, col = "black", lwd = 2))
#+ latticeExtra::layer(sp.polygons(wrs2.sp, col = "black", lty = 2, lwd = 1))
dev.off()
} # end of j
#some statistics: annual numbers of LT, LE, and LC for each scenes
#replace bad value with na
library(raster)
library(rgdal)
library(rasterVis)
rasterOptions(tmpdir="D:/users/Zhihua/Landsat/XinganImages/TempDir2")
scene_id = c("122023","122024","121023","120023","120024", "121025","120025","123023","121024")
scene_id = rev(scene_id)
#create folders, if folder NOT exist, create it
mainDir <- "D:/users/Zhihua/Landsat/XinganImages2/"
setwd(mainDir)
for (i in 1:length(scene_id)){
for (j in 1999:2015){
files = list.files(path = paste("./Images.unzipped/",scene_id[i], "/", j, sep = ""), pattern = "*.cfmask.tif$")
basenames = substr(files, 1, 21)
if (length(files) >= 1){
for (k in 1:length(basenames)){
cloud = raster(paste("./Images.unzipped/",scene_id[i], "/", j, "/",basenames[k], "_cfmask.tif",sep = ""))
cloud[cloud > 0] = NA
cloud[cloud == 0] = 1
evi = raster(paste("./Images.unzipped/",scene_id[i], "/", j, "/",basenames[k], "_sr_evi.tif",sep = ""))
evi = evi*cloud
nbr = raster(paste("./Images.unzipped/",scene_id[i], "/", j, "/",basenames[k], "_sr_nbr.tif",sep = ""))
nbr = nbr*cloud
nbr2 = raster(paste("./Images.unzipped/",scene_id[i], "/", j, "/",basenames[k], "_sr_nbr2.tif",sep = ""))
nbr2 = nbr2*cloud
writeRaster(evi,paste("./Images.unzipped/",scene_id[i], "/", j, "/",basenames[k], "_sr_evi-2.tif",sep = ""), format="GTiff", overwrite=TRUE)
writeRaster(nbr,paste("./Images.unzipped/",scene_id[i], "/", j, "/",basenames[k], "_sr_nbr-2.tif",sep = ""), format="GTiff", overwrite=TRUE)
writeRaster(nbr2,paste("./Images.unzipped/",scene_id[i], "/", j, "/",basenames[k], "_sr_nbr2-2.tif",sep = ""), format="GTiff", overwrite=TRUE)
}
}
removeTmpFiles(h=24) # remove temporal files 24 hours before
print(paste("Finishing replacing :::: Year ", j, " for scene ", scene_id[i], " at ", format(Sys.time(), "%a %b %d %X %Y"), sep = " ") )
}
}
#calculate vegetation indices for 2011
file.2011 = list()
for (i in 1:length(year.file.2011)){
cloud = raster(paste(".\\XinganImages2\\Images.unzipped\\122024\\2011", "\\", basename.2011[i],"_cfmask.tif", sep = ""))
cloud = crop(cloud, fire.sp[1,])
b1 = raster(paste(".\\XinganImages2\\Images.unzipped\\122024\\2011", "\\", basename.2011[i],"_sr_band1.tif", sep = ""))
b1 = crop(b1, fire.sp[1,])
b2 = raster(paste(".\\XinganImages2\\Images.unzipped\\122024\\2011", "\\", basename.2011[i],"_sr_band2.tif", sep = ""))
b2 = crop(b2, fire.sp[1,])
b3 = raster(paste(".\\XinganImages2\\Images.unzipped\\122024\\2011", "\\", basename.2011[i],"_sr_band3.tif", sep = ""))
b3 = crop(b3, fire.sp[1,])
b4 = raster(paste(".\\XinganImages2\\Images.unzipped\\122024\\2011", "\\", basename.2011[i],"_sr_band4.tif", sep = ""))
b4 = crop(b4, fire.sp[1,])
b5 = raster(paste(".\\XinganImages2\\Images.unzipped\\122024\\2011", "\\", basename.2011[i],"_sr_band5.tif", sep = ""))
b5 = crop(b5, fire.sp[1,])
b6 = raster(paste(".\\XinganImages2\\Images.unzipped\\122024\\2011", "\\", basename.2011[i],"_sr_band7.tif", sep = ""))
b6 = crop(b6, fire.sp[1,])
stk = stack(b1,b2,b3,b4,b5,b6)
stk = stk/10000
stk[cloud != 0] = NA
ndvi = (stk[[4]]-stk[[3]])/(stk[[4]]+stk[[3]])
evi = 2.5*(stk[[4]]-stk[[3]])/(stk[[4]]+2.4*stk[[3]]+1)
savi = 1.5*(stk[[4]]-stk[[3]])/(stk[[4]]+stk[[3]]+0.5)
ndwi = (stk[[4]]-stk[[5]])/(stk[[4]]+stk[[5]])
nbr = (stk[[4]]-stk[[6]])/(stk[[4]]+stk[[6]])
nbr2 = (stk[[5]]-stk[[6]])/(stk[[5]]+stk[[6]])
#calculate TC index
if(substr(basename.2011[i],1,2) == "LE"){
brightness = 0.3561*stk[[1]]+0.3972*stk[[2]]+0.3904*stk[[3]]+0.6966*stk[[4]]+0.2286*stk[[5]]+0.1596*stk[[6]] #brightness
greenness = -0.3344*stk[[1]]-0.3544*stk[[2]]-0.4556*stk[[3]]+0.6966*stk[[4]]-0.0242*stk[[5]]-0.2630*stk[[6]] #greenness
wetness = 0.2626*stk[[1]]+0.2141*stk[[2]]+0.0926*stk[[3]]+0.0656*stk[[4]]-0.7629*stk[[5]]-0.5388*stk[[6]] #wetness
} else if (substr(basename.2011[i],1,2) == "LT"){
brightness = 0.3037*stk[[1]]+0.2793*stk[[2]]+0.4343*stk[[3]]+0.5585*stk[[4]]+0.5082*stk[[5]]+0.1863*stk[[6]] #brightness
greenness = -0.2848*stk[[1]]-0.2435*stk[[2]]-0.5436*stk[[3]]+0.7246*stk[[4]]+0.0840*stk[[5]]-0.18*stk[[6]] #greenness
wetness = 0.1509*stk[[1]]+0.1793*stk[[2]]+0.3299*stk[[3]]+0.3406*stk[[4]]-0.7112*stk[[5]]-0.4572*stk[[6]] #wetness
}
tca = greenness/brightness
file.2011.st = stack(stk, ndvi, evi,savi, ndwi, nbr, nbr2, brightness, greenness, wetness, tca)
names(file.2011.st) <- c("b1","b2","b3","b4","b5","b7","ndvi", "evi","savi", "ndwi", "nbr", "nbr2", "tcb", "tcg", "tcw", "tca")
file.2011[[i]] <- file.2011.st
}
#extract values
le.2011.df = data.frame(extract(file.2011[[1]],field.sp))
lt.2011.df = data.frame(extract(file.2011[[2]],field.sp))
le.2012.df = data.frame(extract(file.2012[[1]],field.sp))
field.df2 = field.sp@data[,c("CoverUnderstory", "DensityTotal","UnderstoryANPP","TotalANPP","DBH","TreeANPP")]
field.df2$UnderstoryANPPper = field.df2$UnderstoryANPP/field.df2$TotalANPP
field.df2$aboveANPPper = 1-field.df2$UnderstoryANPPper
field.df2$DensityTotal = log(field.df2$DensityTotal)
field.df2$TotalANPP = log(field.df2$TotalANPP)
field.df2$TreeANPP = log(field.df2$TreeANPP)
###############################################################################################################
# calculate vegetation indice in the peak growing season
# DO NOT composite, only select a image most close to peak growing season
# preference on sensors: 7 > 5 > 8
proj.geo = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0"
proj.utm = "+proj=utm +zone=51 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"
proj.utm52 = "+proj=utm +zone=52 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"
fire.sp = readOGR(dsn=".\\XinganImages\\boundry",layer="FirePatch")
fire.sp@data$Id2 = 1:nrow(fire.sp)
fire.sp@data$area = sapply(slot(fire.sp, "polygons"), slot, "area")
fire.sp.utm52 = spTransform(fire.sp, proj.utm52)
fire.sp.utm51 = fire.sp
for (i in 1:nrow(fire.sp)){
#go to path/rows, and list the directory
dirs = list.dirs(paste("./XinganImages2/Images.unzipped/", fire.sp[i,]$wrspr, sep = ""), recursive=FALSE)
for(j in 1:length(dirs)){ #for each year
year.file = list.files(path = dirs[j], pattern = "*cfmask.tif$")
if (length(year.file) > 0) { #must have files under the folder
basename = substr(year.file, 1,21)
basename.doy = as.numeric(substr(basename, 14, 16))
basename.sensor = substr(basename, 1, 2)
#change projection if they are not the same
proj = raster(paste(dirs[j], "\\", basename[1],"_cfmask.tif", sep = ""))
proj = projection(proj)
if (proj == proj.utm) {fire.sp = fire.sp.utm51} else {fire.sp = fire.sp.utm52}
#select a image to calcuate the vegetation indices,
#using a weighted methods to calculate the preference of images
basename.doy = as.numeric(substr(basename, 14, 16))
basename.sensor = substr(basename, 1, 2)
w.df = data.frame(doy = basename.doy, sensor = basename.sensor)
w.df$doy2 = 1
w.df$doy2[w.df$doy >= 200 & w.df$doy <= 245] = 4
w.df$doy2[w.df$doy < 200 | w.df$doy > 245] = 2
w.df$sensor2 = 1
w.df$sensor2[w.df$sensor == "LE"] = 3
w.df$sensor2[w.df$sensor == "LT"] = 2
w.df$weight = w.df$doy2 + w.df$sensor2
k = which.max(w.df$weight)[1]
files = list()
#for (k in 1:length(year.file)){
if(substr(basename[k],1,2) == "LE" | substr(basename[k],1,2) == "LT"){
cloud = raster(paste(dirs[j], "\\", basename[k],"_cfmask.tif", sep = ""))
cloud = crop(cloud, fire.sp[i,])
b1 = raster(paste(dirs[j], "\\", basename[k],"_sr_band1.tif", sep = ""))
b1 = crop(b1, fire.sp[i,])
b2 = raster(paste(dirs[j], "\\", basename[k],"_sr_band2.tif", sep = ""))
b2 = crop(b2, fire.sp[i,])
b3 = raster(paste(dirs[j], "\\", basename[k],"_sr_band3.tif", sep = ""))
b3 = crop(b3, fire.sp[i,])
b4 = raster(paste(dirs[j], "\\", basename[k],"_sr_band4.tif", sep = ""))
b4 = crop(b4, fire.sp[i,])
b5 = raster(paste(dirs[j], "\\", basename[k],"_sr_band5.tif", sep = ""))
b5 = crop(b5, fire.sp[i,])
b6 = raster(paste(dirs[j], "\\", basename[k],"_sr_band7.tif", sep = ""))
b6 = crop(b6, fire.sp[i,])
} else if (substr(basename[k],1,2) == "LC"){
cloud = raster(paste(dirs[j], "\\", basename[k],"_cfmask.tif", sep = ""))
cloud = crop(cloud, fire.sp[i,])
b1 = raster(paste(dirs[j], "\\", basename[k],"_sr_band2.tif", sep = ""))
b1 = crop(b1, fire.sp[i,])
b2 = raster(paste(dirs[j], "\\", basename[k],"_sr_band3.tif", sep = ""))
b2 = crop(b2, fire.sp[i,])
b3 = raster(paste(dirs[j], "\\", basename[k],"_sr_band4.tif", sep = ""))
b3 = crop(b3, fire.sp[i,])
b4 = raster(paste(dirs[j], "\\", basename[k],"_sr_band5.tif", sep = ""))
b4 = crop(b4, fire.sp[i,])
b5 = raster(paste(dirs[j], "\\", basename[k],"_sr_band6.tif", sep = ""))
b5 = crop(b5, fire.sp[i,])
b6 = raster(paste(dirs[j], "\\", basename[k],"_sr_band7.tif", sep = ""))
b6 = crop(b6, fire.sp[i,])
}
stk = stack(b1,b2,b3,b4,b5,b6)
stk[cloud != 0] = NA
stk = stk/10000
ndvi = (stk[[4]]-stk[[3]])/(stk[[4]]+stk[[3]])
evi = 2.5*(stk[[4]]-stk[[3]])/(stk[[4]]+2.4*stk[[3]]+1)
savi = 1.5*(stk[[4]]-stk[[3]])/(stk[[4]]+stk[[3]]+0.5)
ndwi = (stk[[4]]-stk[[5]])/(stk[[4]]+stk[[5]])
nbr = (stk[[4]]-stk[[6]])/(stk[[4]]+stk[[6]])
nbr2 = (stk[[5]]-stk[[6]])/(stk[[5]]+stk[[6]])
#calculate TC index
if(substr(basename[k],1,2) == "LE"){
brightness = 0.3561*stk[[1]]+0.3972*stk[[2]]+0.3904*stk[[3]]+0.6966*stk[[4]]+0.2286*stk[[5]]+0.1596*stk[[6]] #brightness
greenness = -0.3344*stk[[1]]-0.3544*stk[[2]]-0.4556*stk[[3]]+0.6966*stk[[4]]-0.0242*stk[[5]]-0.2630*stk[[6]] #greenness
wetness = 0.2626*stk[[1]]+0.2141*stk[[2]]+0.0926*stk[[3]]+0.0656*stk[[4]]-0.7629*stk[[5]]-0.5388*stk[[6]] #wetness
} else if (substr(basename[k],1,2) == "LT"){
brightness = 0.3037*stk[[1]]+0.2793*stk[[2]]+0.4343*stk[[3]]+0.5585*stk[[4]]+0.5082*stk[[5]]+0.1863*stk[[6]] #brightness
greenness = -0.2848*stk[[1]]-0.2435*stk[[2]]-0.5436*stk[[3]]+0.7246*stk[[4]]+0.0840*stk[[5]]-0.18*stk[[6]] #greenness
wetness = 0.1509*stk[[1]]+0.1793*stk[[2]]+0.3299*stk[[3]]+0.3406*stk[[4]]-0.7112*stk[[5]]-0.4572*stk[[6]] #wetness
} else if (substr(basename[k],1,2) == "LC"){
brightness = 0.3029*stk[[1]]+0.2786*stk[[2]]+0.4733*stk[[3]]+0.5599*stk[[4]]+0.508*stk[[5]]+0.1872*stk[[6]] #brightness
greenness = -0.2941*stk[[1]]-0.243*stk[[2]]-0.5424*stk[[3]]+0.7276*stk[[4]]+0.0713*stk[[5]]-0.1608*stk[[6]] #greenness
wetness = 0.1511*stk[[1]]+0.1973*stk[[2]]+0.3283*stk[[3]]+0.3407*stk[[4]]-0.7117*stk[[5]]-0.4559*stk[[6]] #wetness
}
tca = greenness/brightness
file.2011.st = stack(stk, ndvi, evi,savi, ndwi, nbr, nbr2, brightness, greenness, wetness, tca)
names(file.2011.st) <- c("b1","b2","b3","b4","b5","b7","ndvi", "evi","savi", "ndwi", "nbr", "nbr2", "tcb", "tcg", "tcw", "tca")
files[[1]] <- file.2011.st
#} end of k
#write the vegetation indiecs
writeRaster(files[[1]],
paste("./analysis_results/fire_", i,"_", substr(dirs[j], nchar(dirs[j])-3, nchar(dirs[j])),
substr(basename[k], 1, 2),substr(basename[k], 14, 16),".grd", sep = ""),
overwrite=TRUE) #write a raster stack files
print(paste("Finishing for :::: Year ", dirs[j], " for fire ", i, " at ", format(Sys.time(), "%a %b %d %X %Y"), sep = " ") )
} # END of if (length(year.file)>0)
} #end of J
} # end of i
# END OF only using 1 image for each year
|
97d6c842e3495900b985e2851d78cf17a1222fc1
|
5426b7385d33b4b218a823df29347bba654a5c30
|
/nanopore_qc.R
|
9d384d72f1bf97bc6b8948216ac27b1bb2cf1bec
|
[] |
no_license
|
timplab/moth
|
9b5fe32da79d6c486f276f56cfa7073c757b5efb
|
19f6ce262357acb2abc8dab6412f533564f7feba
|
refs/heads/master
| 2022-12-07T12:14:28.340198
| 2020-09-01T00:44:11
| 2020-09-01T00:44:11
| 198,861,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,231
|
r
|
nanopore_qc.R
|
##Ok - this is an R to do qc from nanopore fastq
##Default input variable
sampdir="/kyber/Data/Nanopore/projects/moth/SRA_data"
modif="blah"
##Plots go here:
#plotdir=file.path(sampdir)
##Load libraries and sources
library(Biostrings)
library(ShortRead)
library(ggplot2)
library(reshape2)
library(plyr)
library(tidyverse)
fastq=dir(path=sampdir, pattern="*fastq", full.names=T)
fq=readFastq(fastq)
##Read length plot
r.length=tibble(len=width(fq)) %>%
arrange(len) %>%
mutate(cumbp=cumsum(as.numeric(len)))
stat.sum=summarize(r.length, n50=len[which.min(abs(cumbp-max(cumbp)/2))],
tot.yield=max(cumbp),
tot.reads=n(),
med.len=median(len),
max.len=max(len),
q75=quantile(len, .75),
q95=quantile(len, .95))
print(ggplot(r.length, aes(x=len, weight = len, y = stat(count/1e6)))+geom_histogram(bins=100, fill="orange")+scale_x_log10()+geom_vline(xintercept=stat.sum$n50, color="blue", size=1)+
annotate("text", label =paste0("N50= ", stat.sum$n50/1e3, " kb"), x=stat.sum$n50, y= Inf, vjust=2, hjust=-0.1, color="blue")+theme_classic()+labs(x= "Read length", y = "Number of bases (Mb)"))
|
89f2f4ecd818521b41caf8cd1a2cc78fefe31b28
|
d160c0b746059bc1c516e32cc5971c5cc5d5d9d4
|
/R/OpencgaFiles-methods.R
|
6ce83f1cb1e6a3057e66390600ae10afe8b15dd0
|
[
"Apache-2.0"
] |
permissive
|
melsiddieg/opencgaR
|
093b4806f5077a6bab5162ac47ecebd18f40810b
|
037f1bdd41fdfcccad1da47f0adf4d5251c0575b
|
refs/heads/master
| 2021-01-15T22:51:36.714130
| 2017-08-10T12:29:08
| 2017-08-10T12:29:08
| 99,919,869
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 592
|
r
|
OpencgaFiles-methods.R
|
#' @title A method to query Opencga Files
#' @aliases OpencgaFiles
#' @description This method allow the user to create, update and explore
#' files data and metadta
#' @export
setMethod("OpencgaFiles", "Opencga", definition = function(object,
id=NULL, action, params=NULL, ...){
category <- "files"
if(is.null(id)){
id <- NULL
}
if(is.null(params)){
params <- NULL
}
data <- excuteOpencga(object=object, category=category, id=id, action=action,
params=params, ...)
return(data)
})
|
b0a9d7f062a22beee8e9af2f01c96e4b8190fb59
|
004bd8664f1e19040c71442d01ad1847c535ff38
|
/data/test_cases/Step1b_build_data_cuba.R
|
494956d77f9c1c8485a2c0faf4a49b8412502d6f
|
[] |
no_license
|
MS-COM/mscom
|
ef2ddf27f49d169c3df3b650347df3d6aa5bebe4
|
45043a4f505b74972284778c3772bc5a8d68b14e
|
refs/heads/master
| 2021-05-11T13:05:24.821468
| 2018-11-28T06:22:11
| 2018-11-28T06:22:11
| 117,671,098
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,609
|
r
|
Step1b_build_data_cuba.R
|
# Setup
################################################################################
# Clear workspace
rm(list = ls())
# Turn off sci notation
options(scipen=999)
# Packages
library(rio)
library(plyr)
library(dplyr)
library(tidyr)
library(reshape2)
library(RColorBrewer)
# Directories
datadir <- "data/test_cases/original"
outdir <- "data/test_cases/data"
tabledir <- "tables"
# Format data
################################################################################
# Read data
data_orig <- import(file.path(datadir, "Original_A_Catch_1981-2015.xlsx"))
# Format data
data <- data_orig %>%
select(-ID) %>%
setNames(., gsub(" ", "_", colnames(.))) %>%
rename(comm_name=Nombre_Común, sci_name=Scientific_name) %>%
melt(id.vars=c("comm_name", "sci_name"), variable.name="year", value.name="catch") %>%
mutate(year=as.numeric(as.character(year))) %>%
arrange(comm_name, year)
# Check scientific names
freeR::suggest_names(unique(data$sci_name))
# Summarize catch
catch_sum <- data %>%
filter(year>2005) %>%
group_by(comm_name, sci_name) %>%
summarize(c_avg=mean(catch, na.rm=T)) %>%
ungroup() %>%
mutate(c_perc=c_avg/sum(c_avg)*100,
name_format=paste0(comm_name, " (", sci_name, ")"),
c_format=paste0(round(c_avg,1), " / ", round(c_perc, 1), "%")) %>%
select(name_format, comm_name, sci_name, c_avg, c_perc, c_format) %>%
arrange(desc(c_perc))
# Export data
write.csv(data, file.path(outdir, "cuban_nearshore_finfish_catch_data.csv"), row.names=F)
write.csv(catch_sum, file.path(tabledir, "TableX_cuba_summary.csv"), row.names=F)
|
a4e8e408941cbac82f05cb238f54c04e7beb7d33
|
473b8a4300845f256b2eacfde73be8620cfd5ac0
|
/Week3/code/TreeHeight.R
|
fb49e052dfde8d0ff0ad179dfc951b3b0ecadf68
|
[] |
no_license
|
ee-jackson/QMEEbootcamp
|
cf0771bf94980ed420449659cd6faff01c125d55
|
51b43b6cdfb0b6ec00d8b5fc453e22c1a28a5528
|
refs/heads/master
| 2021-07-17T04:58:14.494742
| 2021-07-05T11:48:10
| 2021-07-05T11:48:10
| 212,303,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
r
|
TreeHeight.R
|
#!/usr/bin/env Rscript
# This function calculates heights of trees given distance of each tree
# from its base and angle to its top, using the trigonometric formula
#
# height = distance * tan(radians)
#
# ARGUMENTS
# degrees: The angle of elevation of tree
# distance: The distance from base of tree (e.g., meters)
#
# OUTPUT
# The heights of the tree, same units as "distance"
TreeHeight <- function(degrees, distance){
radians <- degrees * pi / 180
height <- distance * tan(radians)
return (height)
}
tree.data<-read.csv("../data/trees.csv", header = TRUE)
tree.data$Tree.Height.m<-TreeHeight(tree.data$Angle.degrees, tree.data$Distance.m)
write.csv(tree.data,"../results/TreeHts.csv")
|
363c549a79bd25d9a4daffa2825c67b25872a896
|
8f0431de29762061acb57e06f492d22d5ce2604f
|
/R/gt_theme_guardian.R
|
2ccb7f403b23ca8260910b647ce96c46a2d53475
|
[
"MIT"
] |
permissive
|
adamkemberling/gtExtras
|
2c3e1a81d5dd97666dedab710d49377a2a7572dd
|
40d1e5a006fa67833a702733055c94606f8cffb7
|
refs/heads/master
| 2023-08-17T11:12:00.431133
| 2021-10-13T16:28:10
| 2021-10-13T16:28:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,691
|
r
|
gt_theme_guardian.R
|
#' Apply Guardian theme to a `gt` table
#'
#' @param gt_object An existing gt table object of class `gt_tbl`
#' @param ... Optional additional arguments to `gt::table_options()`
#' @return An object of class `gt_tbl`.
#' @importFrom gt %>%
#' @export
#' @import gt
#' @examples
#'
#' library(gt)
#' themed_tab <- head(mtcars) %>%
#' gt() %>%
#' gt_theme_guardian()
#' @section Figures:
#' \if{html}{\figure{gt_guardian.png}{options: width=100\%}}
#'
#' @family Themes
#' @section Function ID:
#' 1-4
gt_theme_guardian <- function(gt_object,...) {
stopifnot("'gt_object' must be a 'gt_tbl', have you accidentally passed raw data?" = "gt_tbl" %in% class(gt_object))
tab_out <- gt_object %>%
opt_table_font(
font = list(
google_font("Noto Sans"),
default_fonts()
)
) %>%
tab_style(
style = cell_borders(
sides = "top", color = "white", weight = px(0)
),
locations = cells_body(rows = 1)
) %>%
tab_style(
style = cell_text(color = "#005689", size = px(22), weight = 700),
locations = list(cells_title(groups = "title")
)
) %>%
tab_style(
style = cell_text(color = "#005689", size = px(16), weight = 700),
locations = list(cells_title(groups = "subtitle")
)
)
tab_out <- tab_out %>%
tab_options(
row.striping.include_table_body = TRUE,
table.background.color = "#f6f6f6",
row.striping.background_color = "#ececec",
column_labels.background.color = "#f6f6f6",
column_labels.font.weight = "bold",
table.border.top.width = px(1),
table.border.top.color = "#40c5ff",
table.border.bottom.width = px(3),
table.border.bottom.color = "white",
footnotes.border.bottom.width = px(0),
source_notes.border.bottom.width = px(0),
table_body.border.bottom.width = px(3),
table_body.border.bottom.color = "white",
table_body.hlines.width = "white",
table_body.hlines.color = "white",
row_group.border.top.width = px(1),
row_group.border.top.color = "grey",
row_group.border.bottom.width = px(1),
row_group.border.bottom.color = "grey",
row_group.font.weight = "bold",
column_labels.border.top.width = px(1),
column_labels.border.top.color = if(
is.null(tab_out[["_heading"]][["title"]])){
"#40c5ff"
} else {"#ececec"},
column_labels.border.bottom.width = px(2),
column_labels.border.bottom.color = "#ececec",
heading.border.bottom.width = px(0),
data_row.padding = px(4),
source_notes.font.size = 12,
table.font.size = 16,
heading.align = "left",
...
)
tab_out
}
|
6a350cd206ef11e72a8385aedd766e43974ba523
|
752b5cf8419142d8b4c1bff2f97149d00a694d48
|
/Excel_pipe_v1.01.R
|
5eaa88cf026940665eb121140527922aed600d23
|
[] |
no_license
|
cprabucki/R
|
fdfc854b8323ef7257a00c8db4c5038bfc5dcffc
|
33fa57eeeb20dd36cf51bfe0f2c231081e6f52ad
|
refs/heads/master
| 2021-01-01T05:03:24.462242
| 2016-12-30T00:12:38
| 2016-12-30T00:12:38
| 77,395,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,084
|
r
|
Excel_pipe_v1.01.R
|
# Carga la excel
library(readxl)
W01 <- read_excel("C:/Users/a212857/Downloads/W01.xlsx", 1)
# Obtiene el vector lógico para consultar las oportunidades con Closing date 2016
lfecha.closing <- as.Date.numeric(W01$`Closing Date`, origin="1899-12-30") < as.Date("2017/01/01") & as.Date.numeric(W01$`Closing Date`, origin="1899-12-30") > as.Date("2015/12/31")
#Obtiene la suma de OE de las oportunidades con closing date 2016
sum(W01$`Order Entry Total *1000`[lfecha.closing])
W <- list(W01)
lfecha.closing.vector <- list(lfecha.closing)
W47 <- read_excel("C:/Users/a212857/Downloads/W47.xlsx", 1)
# Obtiene el vector lógico para consultar las oportunidades con Closing date 2016
lfecha.closing <- as.Date.numeric(W47$`Closing Date`, origin="1899-12-30") < as.Date("2017/01/01") & as.Date.numeric(W47$`Closing Date`, origin="1899-12-30") > as.Date("2015/12/31")
#Obtiene la suma de OE de las oportunidades con closing date 2016
sum(W47$`Order Entry Total *1000`[lfecha.closing])
W[[length(W)+1]] <- W47
lfecha.closing.vector[[length(lfecha.closing.vector)+1]] <- lfecha.closing
|
48e4aae7ddeae48c4a870837a076955459f180cf
|
6c1cebe424c5ffed3295cc108fc36ff044cfb260
|
/man/Mapper.Rd
|
af26a26487a7d32df76b5e0c69414d61e427cac9
|
[] |
no_license
|
corybrunson/Mapper
|
fec7b778b8cea0557984b95b1219d79169cad568
|
76b861b1a74a99fa9bb43e0beeeba7434b8ca5f8
|
refs/heads/master
| 2022-06-18T00:59:43.087056
| 2022-05-22T15:53:06
| 2022-05-22T15:53:06
| 190,648,752
| 0
| 0
| null | 2019-06-06T20:56:49
| 2019-06-06T20:56:48
| null |
UTF-8
|
R
| false
| true
| 2,954
|
rd
|
Mapper.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapper.R
\encoding{UTF-8}
\name{mapper}
\alias{mapper}
\title{mapper}
\usage{
mapper(
X,
filter,
cover = c(cover = "fixed interval", number_intervals = 10L, percent_overlap = 35),
distance_measure = "euclidean",
clustering_algorithm = c(cl = "single"),
return_reference = FALSE
)
}
\arguments{
\item{X}{Either an \eqn{n x D} data matrix.}
\item{filter}{An \eqn{n x d} data matrix, or a function.}
\item{cover}{Named list of cover parameters. See details.}
\item{distance_measure}{String indicating the measure in the data space. Accepts any in \link[proxy]{pr_DB}.}
\item{clustering_algorithm}{Named list of clustering parameters. See details.}
\item{return_reference}{Boolean whether or not to return the reference class used to construct Mapper. See \link[Mapper]{MapperRef}.}
}
\value{
If \code{return_reference} is TRUE, the \code{MapperRef} object is returned, otherwise a list given by \link{exportMapper}
}
\description{
Computes the mapper graph. Mapper is a tool for summarizing topological information from datasets and
maps defined on them. It takes as input a set of 'point cloud' data, a (possibly lower dimensional) map defined on the data,
and produces a topological summary of the data expressed through a cover equipped to the codomain of the map. For more
information, see the references below.
}
\details{
\code{mapper} is a generic function that concisely parameterizes the Mapper framework into a single function definition.
This function serves as a convenience wrapper around the \link[Mapper]{MapperRef} R6 generator for users that prefer a single
function to parameterize the construction.
For finer control over the mapper construction, it's recommended to use \link[Mapper]{MapperRef} instead.
If \code{return_reference} is TRUE, the \link[Mapper]{MapperRef} instance used by this function is returned.
The \code{cover} must be a named list of all of the parameters needed by the cover, as is used in e.g. \link{use_cover}.
The \code{clustering_algorithm} must be a named list the parameters needed to parameterize the clustering algorith, as is used in e.g.
\link{use_clustering_algorithm}
}
\examples{
data("noisy_circle", package="Mapper")
left_pt <- noisy_circle[which.min(noisy_circle[, 1]),]
f_x <- matrix(apply(noisy_circle, 1, function(pt) (pt - left_pt)[1]))
m <- mapper(X = noisy_circle,
filter = f_x,
cover = list(cover="fixed interval", number_intervals=10L, percent_overlap=50),
distance_measure = "euclidean",
clustering_algorithm = list(cl="single", threshold = 0.0))
}
\references{
Singh, Gurjeet, Facundo Memoli, and Gunnar E. Carlsson. "Topological methods for the analysis of high dimensional data sets and 3d object recognition." SPBG. 2007.
}
\seealso{
\code{\link[Mapper]{MapperRef}}
}
\author{
Matt Piekenbrock, \email{matt.piekenbrock@gmail.com}
}
|
658ec0944ac0210593bcf797e2d1a0c124bc62ea
|
7e5f89d948abbdc3ee5314ef561e7229763ca225
|
/R/ui.R
|
d400e7b01721633a6ad0460bb8b494fa3cf5954e
|
[] |
no_license
|
GiulioGenova/SMCcalibration
|
bb798adfc34100ac7575d798788a92c79a145aad
|
21fbee9c3374ef422515f6e747b16bb1d258036d
|
refs/heads/master
| 2021-08-08T17:12:08.981088
| 2018-07-10T06:58:46
| 2018-07-10T06:58:46
| 135,586,313
| 0
| 2
| null | 2018-05-31T13:18:25
| 2018-05-31T13:18:25
| null |
UTF-8
|
R
| false
| false
| 8,696
|
r
|
ui.R
|
if (!require("Cairo")) install.packages("Cairo")
if (!require("robustbase")) install.packages("robustbase")
if (!require("dplyr")) install.packages("dplyr")
if (!require("tidyr")) install.packages("tidyr")
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("leaflet")) install.packages("leaflet")
if (!require("leaflet.extras")) install.packages("leaflet.extras")
data_def<-read.csv("SensorVSample_new.csv",sep=",",dec=".")
ui <- fluidPage(
sidebarLayout(fluid = T, position = "left",
# Sidebar with a slider input
sidebarPanel(width=2,
#data_def$depth %>% unique %>% as.numeric
#data_def$project %>% levels
#data_def$landuse %>% levels
#data_def$station %>% levels
#data_def$date_obs %>% levels
#data_def$sensorType %>% levels
#data_def$sensorName %>% levels
#data_def$soilType %>% levels
selectInput("Project", label = h4("project"),"placeholder1",
multiple=T),
#selectInput("Project", label = h4("project"),
# choices = list("ALL","matsch","monalisa")),
selectInput("Landuse", label = h4("land use"), "placeholder2",multiple=T),
#selectInput("Landuse", label = h4("land use"),
# choices = list("ALL","appleorchards","meadow","pasture","forest","grassland")),
selectInput("Depth", label = h4("soil depth"),"placeholder3",multiple=T),#choices = list("ALL","5","20","40")
#selectInput("Station", label = h4("station"),
# choices = list("ALL","B1","B2","B3","domef1500","domes1500","eppanberg","girlan","gries","I1","I3",
# "kaltern","lana6","latsch1","latsch3","latsch4","M1","M3","M4","M5","M6",
# "M7","nals","nemef1500","nemes1500","neumarkt","P1","P2","P3","S2","S4",
# "S5","stpauls","terlanalt","terlanneu","tramin13","unterrain","vimef2000","vimes2000","XS1")),
selectInput("Station", label = h4("station"), "placeholder4",multiple=T),
selectInput("Date", label = h4("date"), "placeholder5",multiple=T),
#selectInput("Date", label = h4("date"),
# choices = list("ALL","2013","2014","2015")),
selectInput("SoilType", label = h4("soil type"), "placeholder6",multiple=T),
#selectInput("SoilType", label = h4("soil type"),
# choices = list("ALL","sandy","loamy","clay")),
selectInput("SensorType", label = h4("sensor type"), "placeholder7",multiple=T),
#selectInput("SensorType", label = h4("sensor type"),
# choices = list("ALL","CS655","Decagon10HS","OnsetSMB005")),
selectInput("SensorName", label = h4("sensor name"), "placeholder8",multiple=T),
#selectInput("SensorName", label = h4("sensor name"), selected = "SensorMean",
# choices = list("ALL","SensorMean","A","B","C","CI","LSp","LBL","CSt","T","L","LSt","CSn","TSt","LS","TSn")),
br(),
checkboxInput("robust", label = "robust estimates", value = FALSE),
br(),
checkboxInput("facet", label = "facet grid", value = FALSE),
checkboxInput("Rownames", label = "show row.names", value = FALSE),
checkboxInput("Zoom", label = "zoom in", value = FALSE),
fileInput('datafile', 'Choose CSV file',
accept = c('text/csv', 'text/comma-separated-values,text/plain', '.csv'))#,
#selectInput("upload_file", label = "use an uploaded file?",choices=c("default","uploaded"))#, value = FALSE
),
mainPanel(
tabsetPanel(
tabPanel("Model Fit",
#fluidRow(
#column(width = 6,
plotOutput("plot1", height = 800, width = 800,
click = "plot1_click",
brush = brushOpts(
id = "plot1_brush"
))
# )
# ,
#column(width = 4,
# leafletOutput("map")
# )
,
# column(width = 4,
actionButton("exclude_toggle", "Toggle points")
# )
,
#column(width = 4,
actionButton("exclude_reset", "Reset")
#)
#)
),
tabPanel("Map Table",
leafletOutput("map")),
tabPanel("Diagnostics", plotOutput("plot2", height = 1000, width = 1000,
click = "plot2_click",
brush = brushOpts(
id = "plot2_brush"
))),
tabPanel("Data Table",
br(),
dataTableOutput("table")),
tabPanel("Description",
br(),
h4("Side Panel for Data Subsetting"),
p("The left side panel enables subsetting of the data set. By default the whole unique data set is used. An option for ggplot's facet_grid functionality is included. Klick", em("facet grid"), "and the data set will be shown grouped by landuse and soil depth in the Model Fit panel. One can enable", em("Show row.names"), "to easily choose points to remove. Zooming to the data range of 0 to 60 %vol is also possible."),
p("For a description of the data set have a look at", code("?SensorVSample")),
p(""),
br(),
hr(),
h4("The Model Fit Panel"),
p("The data subset is visualised in a scatter plot. Moreover, the statistical model with the 95% confidence intervall for the estimates is ablined. Estimates are computed either with the lm() function or with robust statistics (SMDM fit from the", strong("robustbase"), "R-package). One can toogle outlying points by klicking one or mark multiple and apply", em("Toogle points"), ". A helpful descision tool for indicating possible outliers are the diagnostic plots on the next panel."),
br(),
hr(),
h4("The Diagnostic Panel"),
p("Four diagostic plots for the roblm object are visualised: (1) Standardized residuals vs. Robust Distances, (2) Normal Q-Q vs. Residuals, (3) Residuals vs. Fitted Values, (4) Sqrt of abs(Residuals) vs. Fitted Values"),
br(),
hr(),
h4("The Data Table Panel"),
p("The last panel contains the data table of the data subset."))
)
)
)
)
|
607f9249afb07987de5b885113b0975fb845dcdf
|
e3ce3ad557ebd51429ed7acfea936723149a8d4c
|
/R/mof.dent.R
|
003e88eaa6499c18a3e9bfbfed327c4dc41b15e5
|
[] |
permissive
|
jakobbossek/smoof
|
87512da9d488acfe3a7cc62aa3539a99e82d52ba
|
d65247258fab57d08a5a76df858329a25c0bb1b8
|
refs/heads/master
| 2023-03-20T02:05:12.632661
| 2023-03-08T13:59:27
| 2023-03-08T13:59:27
| 22,465,741
| 32
| 27
|
BSD-2-Clause
| 2022-01-21T10:02:19
| 2014-07-31T10:39:43
|
R
|
UTF-8
|
R
| false
| false
| 1,619
|
r
|
mof.dent.R
|
#' @title
#' Dent Function
#'
#' @description
#' Builds and returns the bi-objective Dent test problem, which is defined as
#' follows:
#' \deqn{f(\mathbf{x}) = \left(f_1(\mathbf{x}_1), f_2(\mathbf{x})\right)}
#' with
#' \deqn{f_1(\mathbf{x}_1) = 0.5 \left( \sqrt(1 + (x_1 + x_2)^2) + \sqrt(1 + (x_1 - x_2)^2) + x_1 - x_2\right) + d}
#' and
#' \deqn{f_1(\mathbf{x}_1) = 0.5 \left( \sqrt(1 + (x_1 + x_2)^2) + \sqrt(1 + (x_1 - x_2)^2) - x_1 + x_2\right) + d}
#' where \eqn{d = \lambda * \exp(-(x_1 - x_2)^2)} and \eqn{\mathbf{x}_i \in [-1.5, 1.5], i = 1, 2}.
#'
#' @return [\code{smoof_multi_objective_function}]
#' @export
makeDentFunction = function() {
# define the two-objective Dent function
fn = function(x) {
assertNumeric(x, len = 2L, any.missing = FALSE, all.missing = FALSE)
lambda = 0.85
d = lambda * exp(-1 * (x[1] - x[2])^2)
f = c(
0.5 * (sqrt(1 + (x[1] + x[2])^2) + sqrt(1 + (x[1] - x[2])^2) + x[1] - x[2]) + d,
0.5 * (sqrt(1 + (x[1] + x[2])^2) + sqrt(1 + (x[1] - x[2])^2) - x[1] + x[2]) + d
)
return(f)
}
makeMultiObjectiveFunction(
name = "Dent Function",
id = paste0("dent_2d_2o"),
description = "Dent Function",
fn = fn,
par.set = makeNumericParamSet(
len = 2L,
id = "x",
lower = c(-1.5, -1.5),
upper = c(1.5, 1.5),
vector = TRUE
),
n.objectives = 2L,
ref.point = c(4.5, 4.5)
)
}
class(makeDentFunction) = c("function", "smoof_generator")
attr(makeDentFunction, "name") = c("Dent")
attr(makeDentFunction, "type") = c("multi-objective")
attr(makeDentFunction, "tags") = c("multi-objective")
|
cb31e17e4dcb9e18072ece96fc34ebcc386db78b
|
768c8adb2b2dfc7809201819c9a6f162d0749a4e
|
/Pizza.R
|
24fb100dc7854e93e3789492dc38a9873e99d9b2
|
[] |
no_license
|
ohmpatthanay/R-language-Basic-Program-
|
badbc52a34e31be1534894f098c09471aad55cc1
|
71787744b4831ddfc622f1a36c992affcb8ff4af
|
refs/heads/master
| 2020-08-23T14:28:54.567253
| 2019-10-21T18:45:09
| 2019-10-21T18:45:09
| 216,639,603
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,792
|
r
|
Pizza.R
|
pizza <- function(y = 0.8){
opar <- par()$mar
on.exit(par(mar = opar))
par(mar = rep(0, 4))
plot.new()
circle(0.5, 0.5, 4, "cm", , 4)
circle(0.5, 0.5, 3.5, "cm", , 4)
circle(0.42, 0.62, 0.3, "cm", , 4)
circle(0.47, 0.4, 0.3, "cm", , 4)
circle(0.60, 0.45, 0.3, "cm", , 4)
circle(0.66, 0.54, 0.3, "cm", 4, 4)
circle(0.55, 0.65, 0.3, "cm", 4, 4)
circle(0.55, 0.65, 0.3, "cm", 4, 4)
circle(0.36, 0.55, 0.3, "cm", 3, 4)
circle(0.33, 0.44, 0.3, "cm", 3, 4)
circle(0.56, 0.35, 0.3, "cm", 3, 4)
segments(0.31, 0.35, 0.69, 0.65, lwd = 4)
segments(0.5, 0.74, 0.5, 0.26, lwd = 4)
segments(0.68, 0.34, 0.31, 0.65, lwd = 4)
segments(0.26, 0.495, 0.74, 0.495, lwd = 4)
points(0.38, y, pch=80,cex=3,col="orange")
points(0.43, y, pch=73,cex=3,col="red")
points(0.48, y, pch=90,cex=3,col="orange")
points(0.55, y, pch=90,cex=3,col="red")
points(0.62, y, pch=65,cex=3,col="orange")
}
circle <- function(x, y, radius, units=c("cm", "in"), segments=100,
lwd = NULL){
units <- match.arg(units)
if (units == "cm") radius <- radius/2.54
plot.size <- par("pin")
plot.units <- par("usr")
units.x <- plot.units[2] - plot.units[1]
units.y <- plot.units[4] - plot.units[3]
ratio <- (units.x/plot.size[1])/(units.y/plot.size[2])
size <- radius*units.x/plot.size[1]
angles <- (0:segments)*2*pi/segments
unit.circle <- cbind(cos(angles), sin(angles))
shape <- matrix(c(1, 0, 0, 1/(ratio^2)), 2, 2)
ellipse <- t(c(x, y) + size*t(unit.circle %*% chol(shape)))
lines(ellipse, lwd = lwd)
}
oopt <- animation::ani.options(interval = 0.1)
flipbook <- function(){
lapply(seq(1.01, 0.18, by=-0.05), function(i){
pizza(i)
animation::ani.pause()
})
}
pizza()
flipbook()
|
bca573d06c506c3272074cb38dd08191b85e55f9
|
075610f48fb5314e016574e246f486f1ab04ce3e
|
/R/03_calculation-C.R
|
d43f9b8950dfe968fb6a1af133a987ed31e918a2
|
[
"MIT"
] |
permissive
|
asri2/icr
|
aed09b5a39f7dbf640003eb310c36f2173074beb
|
ab6afaeeaef587b18186cb6e270857034167d63d
|
refs/heads/master
| 2022-09-13T20:55:11.001629
| 2020-06-04T07:11:49
| 2020-06-04T07:11:49
| 265,541,860
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,006
|
r
|
03_calculation-C.R
|
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Series :01
# Name: Day of the week and Volatility
# Description: This program estimate return and asset's volatility
# by using GARCH and Modified-GARCH
# Ticker: IDX; JKSE
# Author: Asri Surya
# Date: March 2020
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# start
cat("\014") #clear console
rm(list = ls())
#environment packages preparation ++++++
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
# List of packages
packages <- c("xtable", "rugarch","dplyr", "xtable")
ipak(packages)
# load dataset to environment ----
dataset <- readRDS(file = "data/dataset_clean.rds") #load dataset
dataset$date <-as.Date(dataset$date,"%Y-%m-%d")#convert date format
# Estimation period list
periodList <- c('2000-01-01','2005-01-01','2010-01-01','2015-01-01','2019-01-01')
loopLength <-length(periodList)-1
# an empty dataset to store result
mresult <- matrix(0, nrow = 24, ncol=loopLength)
cat("\014") #clear console
for (i in 1:loopLength) {
k <- dataset %>% # matrix for i-th period in periodList
filter(date > periodList[i] & date < periodList[i+1]) # pull data from dataset to matrix
l <- k %>%
select(px.return)#convert -return- from dataset type to matrix type
m <- k %>%
select(dayMon,dayTue,dayThu,dayFri)#external regressor exclude Wednesday
n <- data.matrix(m) #convert external regressor to matrix type
#modified GARCH with mean model only
spec1 <- ugarchspec(variance.model = list(model = "sGARCH", garchOrder = c(1, 1), #model spesification
submodel = NULL, external.regressors = n, variance.targeting = TRUE),
mean.model = list(armaOrder = c(0, 0),external.regressors = n,
distribution.model = "std"))
garch1 <- ugarchfit(spec=spec1,data=l[,1],solver.control=list(trace=0))
# extract coefficient and standard deviation from garch1 estimation (S4 Class object)
Coef <- as.matrix(garch1@fit$coef) # coeficient
Stdev <- as.matrix(garch1@fit$se.coef) # standard deviation garch1@fit[["se.coef"]]
CoefLength <-(length(Coef))
l<-1
for (j in 1:CoefLength){ #start to binding result per element matrix
mresult[l,i]=Coef[j]
k<-l+1
mresult[k,i]=Stdev[j]
l <-l+2
}
}
# create variable and paramater name
mres<-as.data.frame(garch1@fit$coef)
mm <- rownames(mres)
mmname <-matrix(1:24)
ij<-1
for (jj in 1: 12){
mmname[ij] = paste0(mm[jj])
k <- ij+1
mmname[k] = paste0(mm[jj],"_sd")
ij <- ij+2
}
# note: add likelihood ratio under the main table
#+++++++++ Final Output
garchresult <- cbind(mresult)
saveRDS(garchresult, file = "data/garch_clean.rds")
#End
|
8f1a971864a348d9715bac001e7974ca59f5083e
|
3c0531db5f30de38f06b8dffd741c3e13a94eeb5
|
/code/01_wrangle-data.R
|
20e71228ad613f2463a9d1c64f6e08e3f7d3548e
|
[] |
no_license
|
spoicts/ccweedmeta-analysis
|
df206efcbfd9d88f378e91b2aaf45eaeea28b2ba
|
63abef61a9a66d92fb604fc73626f798f4ae85ee
|
refs/heads/master
| 2023-05-08T23:19:39.195889
| 2021-06-01T20:42:55
| 2021-06-01T20:42:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,810
|
r
|
01_wrangle-data.R
|
#############################
#
# Author: Gina Nichols
#
# Date Created: March 5 2020
#
# Date last modified:
#
# Purpose: prepare data for analysis
#
# Inputs: ccweeddat (from the ccweedmetapkg)
#
# Outputs: wd_wide and wd_long in working_data folder
#
# Steps: 1) calculate termination-to-planting gaps
# 2) Find and address 0 values (there are none)
# 3) Calc LRR
# 4) Adjust modifiers
# 5) Address extreme values
#
# NOtes: There are no 0s
# Removing one outlier that drastically changed cc_bio vs LRR estimates due to very low LRR
#
#
##############################
rm(list=ls())
library(tidyverse)
library(readxl) #--to read excel files
library(naniar) #--allows you to replace . with NA
library(lubridate) #--for dates
library(janitor) #--to clean things
#devtools::install_github("vanichols/ccweedmetapkg")
library(ccweedmetapkg) #--contains raw dataset
library(here)
raw <- as_tibble(ccweeddat)
# step 1 term-planting gaps------------------------------------------------------------------
# Estimate termination-to-planting gap
d1 <-
raw %>%
#--cover crop planting dates
mutate(
#indicate you are estimating
cc_pest = case_when(
(is.na(cc_p_dom) & !is.na(cc_p_mnth2)) ~ "Y"),
#if there is a month but no date, assign dom as 15
cc_pDOM = ifelse( (is.na(cc_p_dom) & !is.na(cc_p_mnth)), 15, cc_p_dom),
#paste things to make a date, pretend everything is 2018
ccpl_lubdate = ifelse(!is.na(cc_p_dom), paste(cc_p_mnth2, cc_p_dom, "2018", sep = "/"), NA),
ccpl_lubdate = mdy(ccpl_lubdate),
ccpl_doy = yday(ccpl_lubdate)) %>%
#--cover crop termination dates
mutate(
# indicate you are estimating
cc_termest = case_when(
(is.na(cc_term_dom) & !is.na(cc_term_mnth2)) ~ "Y"),
# if there is a month but no date, assign it as 1
cc_term_dom = ifelse( (is.na(cc_term_dom) & !is.na(cc_term_mnth2)), 1, cc_term_dom),
# paste things to make a date, pretend all was termed in 2019
ccterm_lubdate = ifelse(!is.na(cc_term_dom), paste(cc_term_mnth2, cc_term_dom, "2019", sep = "/"), NA),
ccterm_lubdate = mdy(ccterm_lubdate),
ccterm_doy = yday(ccterm_lubdate)) %>%
#--crop planting dates
mutate(
# indicate you are estimating
crop_pest = case_when(
(is.na(crop_p_dom) & !is.na(crop_p_mnth2)) ~ "Y"),
# if there is a month but no date, assign it as 30
crop_p_dom = ifelse( (is.na(crop_p_dom) & !is.na(crop_p_mnth2)), 30, crop_p_dom),
# paste things to make a date
croppl_lubdate = ifelse(!is.na(crop_p_dom), paste(crop_p_mnth2, crop_p_dom, "2019", sep = "/"), NA),
croppl_lubdate = mdy(croppl_lubdate),
croppl_doy = yday(croppl_lubdate)) %>%
#--calc gaps
mutate(
cc_growdays = time_length(interval(ymd(ccpl_lubdate), ymd(ccterm_lubdate)), "day"),
termgap_days = time_length(interval(ymd(ccterm_lubdate), ymd(croppl_lubdate)), "day"))
# step 2 address 0s--------------------------------------------------------------
# find and address 0s
d1 %>%
# Identify pairs of 0s (there are currently none)
mutate(repl_1den = ifelse( (cc_wden == 0 & ctl_wden == 0), "yes", "no"),
repl_1bio = ifelse( (cc_wbio == 0 & ctl_wbio == 0), "yes", "no")) %>%
filter(repl_1bio == "yes" | repl_1den == "yes")
d2 <- d1
# step 3 calc log-response-ratios and weights-----------------------------------------
d3 <-
d2 %>%
#calculate LRR
mutate(bioLRR = log(cc_wbio / ctl_wbio),
denLRR = log(cc_wden / ctl_wden),
yieldLRR = log(cc_yield_kgha / ctl_yield_kgha) ) %>%
#calc weight based on reps
mutate(wgt = (reps * reps) / (reps + reps))
# step 4 adjust modifiers as desired ------------------------------------------------
d4 <-
d3 %>%
#categorize cc types
mutate(cc_type2 = recode(cc_type,
brassica = "non-grass",
legume = "non-grass",
mix = "non-grass")) %>%
mutate(cc_bm_Mgha = cc_bm_kgha / 1000)
# step 5 adjust extreme value ---------------------------------------------
#based on leave-one-out sensitivity analysis
secondmin <- d4 %>% filter(obs_no == 145) %>% select(bioLRR) %>% pull()
d4 %>% filter(obs_no == 76) %>% select(bioLRR) %>% pull()
d5 <-
d4 %>%
mutate(bioLRR = ifelse(obs_no == 76, secondmin, bioLRR))
#check it
d5 %>% select(bioLRR) %>% arrange(bioLRR)
# Write both a long and short form of the data -------------------------------------------------
d5 %>% write_csv("working_data/wd_wide.csv")
d5_long <-
d5 %>%
gather(bioLRR, denLRR, key = "resp", value = "LRR") %>%
mutate(resp = recode(resp,
"bioLRR" = "bio",
"denLRR" = "den")) %>%
filter(!is.na(LRR))
d5_long %>% write_csv("working_data/wd_long.csv")
|
c6cc10c5229f6a53c8614dc59082fae381631608
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/celestial/examples/deg2dms.Rd.R
|
06628f9f20b089ae1e86df77c07f5d884d5127d1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 306
|
r
|
deg2dms.Rd.R
|
library(celestial)
### Name: deg2dms
### Title: Convert decimal degrees to dms format.
### Aliases: deg2dms
### Keywords: convert
### ** Examples
print(deg2dms(12.345))
print(deg2dms(12.345,type='cat',sep=':'))
print(deg2dms(12.345,type='cat',sep='dms'))
print(deg2dms(12.345,type='cat',sep='DMS'))
|
16a0293b34482c3d4adc063a70560ff7ec950942
|
47a8dff9177da5f79cc602c6d7842c0ec0854484
|
/man/AugmentPlot.Rd
|
103643f7a41508f1705fe25a75bea88f6ec45d0f
|
[
"MIT"
] |
permissive
|
satijalab/seurat
|
8949973cc7026d3115ebece016fca16b4f67b06c
|
763259d05991d40721dee99c9919ec6d4491d15e
|
refs/heads/master
| 2023-09-01T07:58:33.052836
| 2022-12-05T22:49:37
| 2022-12-05T22:49:37
| 35,927,665
| 2,057
| 1,049
|
NOASSERTION
| 2023-09-01T19:26:02
| 2015-05-20T05:23:02
|
R
|
UTF-8
|
R
| false
| true
| 846
|
rd
|
AugmentPlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{AugmentPlot}
\alias{AugmentPlot}
\title{Augments ggplot2-based plot with a PNG image.}
\usage{
AugmentPlot(plot, width = 10, height = 10, dpi = 100)
}
\arguments{
\item{plot}{A ggplot object}
\item{width, height}{Width and height of PNG version of plot}
\item{dpi}{Plot resolution}
}
\value{
A ggplot object
}
\description{
Creates "vector-friendly" plots. Does this by saving a copy of the plot as a PNG file,
then adding the PNG image with \code{\link[ggplot2]{annotation_raster}} to a blank plot
of the same dimensions as \code{plot}. Please note: original legends and axes will be lost
during augmentation.
}
\examples{
\dontrun{
data("pbmc_small")
plot <- DimPlot(object = pbmc_small)
AugmentPlot(plot = plot)
}
}
\concept{visualization}
|
2d74cd55522b34bb776d69bdb4930afd86ce5d93
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MetaheuristicFPA/examples/rcpp_MetaheuristicFPA.Rd.R
|
f435514d81046f806363619a4bdb333d68a42561
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 714
|
r
|
rcpp_MetaheuristicFPA.Rd.R
|
library(MetaheuristicFPA)
### Name: fpa_optim
### Title: Metaheuristic - Flower Pollination Algorithm
### Aliases: fpa_optim
### ** Examples
# find the x-value that gives the minimum of the dejong benchmark function
# y = sum(x[i]^2), i=1:n, -5.12 <= x[i] <= 5.12
# global minimum is 0 when each x = 0
deJong<-function(x){
deJong = sum(x^2)
}
# run a simulation using the standard flower pollination algorithm
set.seed(1024) # for reproducive results
library(MetaheuristicFPA)
fpa_opt <- fpa_optim(N = 25, p =0.8 , beta = 1.5, eta = 0.1,
maxiter=5000, randEta=FALSE, gloMin=0, objfit=1e-7,
D=4, Lower = -5.12, Upper = 5.12, FUN = deJong)
x <- fpa_opt$best_solution
|
be9e76954e79136e135e672e40acb31fdf45cbb0
|
9e4da4d8a7640baf68b334212eac19701a0ecbb5
|
/demean_age.R
|
1a21563d3b02ce397e55b054de2a2b35f546c70f
|
[] |
no_license
|
poldrack/r_testing
|
6427c8f7a038dbdb6b2c39a5c0950952e4c14de1
|
ada0a58bb90242d4ff248a1996572b21e019df78
|
refs/heads/master
| 2021-01-01T05:46:28.656150
| 2016-05-10T20:06:03
| 2016-05-10T20:06:03
| 58,487,735
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 340
|
r
|
demean_age.R
|
args=commandArgs(trailingOnly = TRUE)
demographics <- read.csv(file=args[1], head=TRUE, sep="\t")
age <- demographics$age
mean_age=sum(age)/length(age)
demean_age <- age - mean_age
stopifnot(mean_age<100)
stopifnot(mean_age>10)
write.table(demean_age, file="age_demeaned.tsv", row.names=FALSE, col.names=FALSE, sep="\t")
print("done!")
|
93253544af7982bf013654e4c203279879c6692b
|
85cb470f1dfc89dd7b355cb0b4c9451e9f811a44
|
/model-scripts/Pre-Expiry-Implementation-Archive/clinical-engagement.R
|
93c389cd733c141a8681d306cc17d5e3614a5a43
|
[] |
no_license
|
khanna7/bc-navigation
|
541ad187d45bec8c79043e4ed7f181cb29ac6656
|
b2dd636ea5cb05c8e74b29dcc42447f87e416a0b
|
refs/heads/master
| 2021-12-01T07:30:09.250506
| 2021-11-14T18:01:46
| 2021-11-14T18:01:46
| 133,086,609
| 1
| 0
| null | 2020-09-21T17:16:22
| 2018-05-11T20:39:52
|
R
|
UTF-8
|
R
| false
| false
| 5,650
|
r
|
clinical-engagement.R
|
## module for clinical engagement
#baseline screening
# Load libraries ---------------------------
library(ergm)
#Initialize function
clinical_engagement <- function(net.f, institutional, social, control, time_step){
#this function simulates clinic visits
## get individual attributes
pop_size <- 5000
age <- net.f %v% "age"
symptom.severity <- net.f %v% "symptom.severity"
ss <- net.f %v% "symptom.severity"
time_since_pcp <- net.f %v% "time_since_pcp"
reg.pcp.visitor <- net.f %v% "reg.pcp.visitor"
diagnostic_referral <- net.f %v% "diagnostic_referral"
screening_referral <- net.f %v% "screening_referral"
diagnosis <- net.f %v% "diagnosis"
bc_status <- net.f %v% "bc_status"
diagnosis_time <- net.f %v% "diagnosis_time"
neighbor_navigated <- net.f %v% "neighbor_navigated"
neighborfp <- net.f %v% "neighborfp"
navigated <- net.f %v% "navigated"
antinavigated <- net.f %v% "antinavigated"
screen_complete <- net.f %v% "screen_complete"
dt_complete <- net.f %v% "diagnostic_test_complete"
diagnostic_referral_counter <- net.f %v% "diagnostic_referral_counter"
screening_referral_counter <- net.f %v% "screening_referral_counter"
screening_referral_checker <- net.f %v% "screening_referral_checker"
number_navigated_at_t <- 0
rolls_for_navigation <- 0
navigation_start_time <- net.f %v% "navigation_start_time"
navigation_end_time <- net.f %v% "navigation_end_time"
attrib_mtrx<-cbind(symptom.severity,
reg.pcp.visitor,
navigated,
antinavigated,
neighbor_navigated,
neighborfp)
all_agents<-which(net.f %v% "diagnosis" == 0)
#cat("NAV START: ", net.f %v% "navigation_start_time", "\n")
for (agent in all_agents){
agent_data<-attrib_mtrx[agent,]
#first: symptomatic agents without referrals roll for dt referrals
if(diagnostic_referral[agent]==0 &
screening_referral[agent]==0 &
ss[agent]>0){
diagnostic_referral[agent]<-rbinom(1,1,prob(agent_data,"dt"))
diagnostic_referral_counter[agent]<-diagnostic_referral_counter[agent]+diagnostic_referral[agent]
if(diagnostic_referral[agent] == 1){ #navigate direct-diagnosis agents
navigated[agent]<-rbinom(1,1, prob_institutional_navigation) #random institutional navigation
rolls_for_navigation <- rolls_for_navigation + 1
number_navigated_at_t <- number_navigated_at_t + navigated[agent]
##NEW
if(navigated[agent]==1){
navigation_start_time[agent] <- time_step
}
}
}
#second: all agents without referrals roll for sm referrals
if(diagnostic_referral[agent]==0 &
screening_referral[agent]==0){
screening_referral[agent]<-rbinom(1,1,prob(agent_data,"sm"))
screening_referral_counter[agent]<-screening_referral_counter[agent]+screening_referral[agent]
if(screening_referral[agent]==1){ #Limiting navigation rolls to the step where a referral is given
screening_referral_checker[agent] <- 1
if(institutional == TRUE){
#simulate navigation
if(isTRUE((navigated[agent]==0) &
(dt_complete[agent]==0) &
(screen_complete[agent]==0) &
(diagnostic_referral[agent]==1 |
screening_referral[agent]==1)
)){
navigated[agent]<-rbinom(1,1, prob_institutional_navigation) #random institutional navigation
rolls_for_navigation <- rolls_for_navigation + 1
number_navigated_at_t <- number_navigated_at_t + navigated[agent]
if(navigated[agent]==1){
navigation_start_time[agent] <- time_step
}
#cat("navigated[agent]:", navigated[agent], "\n")
#cat("prob_institutional_navigation:", prob_institutional_navigation, "\n")
#cat(net.f %v% "diagnostic_test_complete",file="dt.complete.txt",sep="\n",append=TRUE)
#cat(capture.output(table(net.f %v% "diagnostic_test_complete", exclude = NULL)),file="dt.complete.txt",sep="\n",append=TRUE)
}
}
}
}
if(social == TRUE){
if(isTRUE((navigated[agent]==0) &
(dt_complete[agent]==0) &
(screen_complete[agent]==0) &
(diagnostic_referral[agent]==1 |
screening_referral[agent]==1) &
(neighbor_navigated[agent]==1) #key component
)){
navigated[agent]<-rbinom(1,1,prob_social_navigation) #social navigation
if(navigated[agent]==1){
navigation_start_time[agent] <- time_step
}
}
}
}
cat("Number navigated at time t: ", number_navigated_at_t, "\n")
cat("Rolls for navigation at time t: ", rolls_for_navigation, "\n")
cat("Agents navigated per roll: ", number_navigated_at_t/rolls_for_navigation, "\n")
#cat(capture.output(table(net.f %v% "diagnostic_test_complete", exclude = NULL)),file="dt.complete.txt",sep="\n",append=TRUE)
net.f %v% "diagnostic_referral_counter" <- diagnostic_referral_counter
net.f %v% "screening_referral_counter" <- screening_referral_counter
net.f %v% "navigated" <- navigated
net.f %v% "screening_referral" <- screening_referral
net.f %v% "diagnostic_referral" <- diagnostic_referral
net.f %v% "screening_referral_checker" <- screening_referral_checker
net.f %v% "navigation_start_time" <- navigation_start_time
net.f %v% "navigation_end_time" <- navigation_end_time
return(net.f)
}
|
81489b25639238341b5b0a6201af8db5150a8ed7
|
3fb74b4fb8458a4328b90511a25d71778e71f107
|
/scripts/utils.R
|
7a5ccaa4bcae33445ec40fd7252d02ce2524235f
|
[] |
no_license
|
chblanc/zillow
|
52b5d5096ccc054467c477a4c9063e44cb29eedf
|
982872aaf46d95ac5dfa37da686dd55bdd3daebd
|
refs/heads/master
| 2021-01-25T10:55:58.939291
| 2017-06-18T21:09:21
| 2017-06-18T21:09:21
| 93,894,240
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,140
|
r
|
utils.R
|
# =========================================================================== #
# Utility Functions
# =========================================================================== #
# =================================== #
# plotting
# =================================== #
ggplotMissing <- function(x) {
#' plots missing values in a dataframe for quick and dirty exploration
#' purposes.
#'
#' args:
#' x: a dataframe
#'
#' outputs:
#' the output is a `ggplot` raster plot displaying missing values
#' for every observation and variables in the input dataframe, `x`
require(tidyr)
require(ggplot2)
x %>%
is.na %>%
as.data.frame() %>%
mutate(index = row_number()) %>%
gather(., variable, value, -index) %>%
ggplot(data = .,
aes(x = variable,
y = index)) +
geom_raster(aes(fill = value)) +
scale_fill_grey(name = "",
labels = c("Present","Missing")) +
theme_minimal() +
theme(axis.text.x = element_text(angle=45, vjust=0.5)) +
labs(x = "Variables in Dataset",
y = "Rows / Observations")
}
# =================================== #
# z-scores
# =================================== #
#' define functions to calculate z-scores for continuous variables
getZscore <- function(x, ...) {
#' calculate a z-score for an input variable, `x`. this function can handle
#' missing values by passing 'na.rm=T'.
#'
#' args:
#' x: a vector of numeric values
#' returns:
#' a z-score
stopifnot(is.numeric(x))
score <- (x - mean(x, ...)) / sd(x, ...)
return(score)
}
getRobustZscore <- function(x, ...) {
#' calculate a robust z-score for an input variable, `x`, where distance is
#' calculated by taking the deviation of `x` from the median of `x` and
#' dividing by the median absolute deviaition `mad`. this function can
#' handle missing values by passing 'na.rm=T'.
#'
#' args:
#' x: a vector of numeric values
#' returns:
#' a robust z-score
stopifnot(is.numeric(x))
score <- (x - median(x, ...)) / mad(x, ...)
return(score)
}
|
f2b83805fedec580bcbabc5fa7e2954fff5c3b84
|
b79956f25c9cc130ef7bf41629ed5909467bd4de
|
/man/whoconnected.Rd
|
c204a5543973f4802c2610f68e5105dbcb6e9bff
|
[] |
no_license
|
mbtyers/riverdist
|
652f6ca7153722741c5fa450834fe5d6e6be938e
|
160e9368d420b960f776a3e93f1b84b716a19e23
|
refs/heads/master
| 2023-08-09T18:23:30.990174
| 2023-08-07T17:11:08
| 2023-08-07T17:11:08
| 47,280,222
| 21
| 1
| null | 2023-08-02T18:37:35
| 2015-12-02T18:32:05
|
R
|
UTF-8
|
R
| false
| true
| 627
|
rd
|
whoconnected.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/riverdist_1.R
\name{whoconnected}
\alias{whoconnected}
\title{Check Which Segments are Connected to a Given Segment.}
\usage{
whoconnected(seg, rivers)
}
\arguments{
\item{seg}{The segment to check}
\item{rivers}{The river network object it belongs to}
}
\value{
A vector of segment numbers
}
\description{
Returns which segments are connected to a specified segment within a river network. It may be useful for error checking.
}
\examples{
data(Gulk)
plot(Gulk)
whoconnected(seg=4, rivers=Gulk)
}
\author{
Matt Tyers
}
|
96525f828b17422043dfe647de46c053aec9c8ad
|
01835557bc01c93e3927b05d7dcb86ac5f6f32d9
|
/man/getVideoMetrics.Rd
|
9aca1d54199139be99adddc04495edf5908f905c
|
[
"MIT"
] |
permissive
|
EricGoldsmith/rYouTube
|
ea414e2b3e3a9f5d0eff37f9f0db7316b586e953
|
50dbdc6b10875a19761ced733f3ca38014d937a5
|
refs/heads/main
| 2023-04-18T15:34:03.485125
| 2021-04-30T22:12:19
| 2021-04-30T22:12:19
| 363,250,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,017
|
rd
|
getVideoMetrics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getVideoMetrics.R
\name{getVideoMetrics}
\alias{getVideoMetrics}
\title{Get metrics for a list of videos}
\usage{
getVideoMetrics(token, contentOwner, from, to, videos, metrics = NULL)
}
\arguments{
\item{token}{Access token}
\item{contentOwner}{Content owner}
\item{from}{Starting date}
\item{to}{Ending date}
\item{videos}{List of videos}
\item{metrics}{List of metrics. Defaults to \code{c("views", "comments", "likes", "dislikes", "shares", "averageViewDuration", "averageViewPercentage")}}
}
\value{
Returns a \code{\link{data.frame}} of results
}
\description{
Get metrics for a list of videos
\href{https://developers.google.com/youtube/analytics/content_owner_reports}{https://developers.google.com/youtube/analytics/content_owner_reports}
}
\examples{
\dontrun{
videoMetrics <- getVideoMetrics(token, contentOwner = "ContentOwner",
from = "2019-02-03", to = "2019-02-09", videos$video)
}
}
|
809c7fffb2b424aba294deeb756c78722bf996c7
|
f8078eb0b79e56424915ceb1921a8ab11e5f12f7
|
/man/tradeOffTable.Rd
|
12a3d94c8a0168ec780dcf5f0c454a761eb8b54e
|
[] |
no_license
|
cran/pid
|
274b1a98e714a8635f9bfed0c77754f7488729bd
|
08e7af35733ea942ce048ab6fdd229306c66a7fb
|
refs/heads/master
| 2020-04-06T04:10:07.360728
| 2018-11-23T16:30:03
| 2018-11-23T16:30:03
| 38,597,979
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,323
|
rd
|
tradeOffTable.Rd
|
% http://cran.r-project.org/doc/manuals/r-release/R-exts.html
\name{tradeOffTable}
%name typically is the basename of the Rd file containing the documentation. It is the "name" of the Rd object represented by the file and has to be unique in a package. To avoid problems with indexing the package manual, it may not contain `, | nor @ , and to avoid possible problems with the HTML help system it should not contain '/' nor a space. (LaTeX special characters are allowed, but may not be collated correctly in the index.) There can only be one \name entry in a file, and it must not contain any markup. Entries in the package manual will be in alphabetic order of the \name entries.
\alias{tradeOffTable}
%The \alias sections specify all "topics" the file documents. This information is collected into index data bases for lookup by the on-line (plain text and HTML) help systems. The topic can contain spaces, but (for historical reasons) leading and trailing spaces will be stripped. Percent and left brace need to be escaped by a backslash.
\title{A trade-off table of fractional factorial designs}
%Title information for the Rd file. This should be capitalized and not end in a period; try to limit its length to at most 65 characters for widest compatibility.
% There must be one (and only one) \title section in a help file.
\description{Creates a new plot that shows a trade-off table for fractional factorial designs.}
%A short description of what the function(s) do(es) (one paragraph, a few lines only). (If a description is too long and cannot easily be shortened, the file probably tries to document too much at once.) This is mandatory except for package-overview files.
\usage{tradeOffTable()}
%One or more lines showing the synopsis of the function(s) and variables documented in the file. These are set in typewriter font. This is an R-like command.
% The usage information specified should match the function definition exactly (such that automatic checking for consistency between code and documentation is possible).
% A key:value list of input arguments.
% \arguments{}
% A detailed if possible precise description of the functionality provided, extending the basic information in the \description slot.
\details{Displays the following trade-off table:
\if{html}{\figure{trade-off-table.png}{options: width="100\%" alt="Figure: DOE-trade-off-table.png"}}
\if{latex}{\figure{trade-off-table.pdf}{options: width=7cm}}
The rows in the table are the number of experiments done in the fractional factorial (\eqn{n}).\cr
The columns are the number of factors under investigation in the design (\eqn{k}).\cr
The cell at a particular row/column intersection gives several pieces of information:
\itemize{
\item The top-left entry of the form: \eqn{2^{k-p}=n}. For example, \eqn{p=1} corresponds to half-fractions,
and \eqn{p=2} corresponds to quarter-fractions.
\item The subscript in the top-left entry, written in Roman numerals gives the design resolution. For example, \eqn{IV}
corresponds to a resolution 4 design, implying 2-factor interactions are at most confounded with other 2-factor interactions.
\item The bold entries in the bottom-right tell how to generate the remaining factors in the design. \cr
A "full" entry indicates a full factorial; while "twice" indicates a twice replicated full factorial.
}
Blank entries are impossible fractional factorial combinations.
A detailed explanation of the table is provided in the book reference.
}
% Description of the function's return value.
\value{Create a new plot displaying the trade-off table.}
% A section with references to the literature. Use \url{} or \href{}{} for web pointers.
\references{Chapter 5 of the following book: Kevin Dunn, 2010 to 2019, \emph{Process Improvement using Data}, \url{https://learnche.org/pid}
Please see this paper to gain an understanding of how these trade-off tables are constructed:\cr
Arthur Fries and William G. Hunter, (1980) Minimum Aberration \eqn{2^{k-p}} Designs, \emph{Technometrics}, \bold{22}(4), pp. 601-608, \url{https://www.jstor.org/stable/1268198}
}
% Use this for a special note you want to have pointed out. Multiple \note sections are allowed, but might be confusing to the end users.
\note{
Certain blocks are not unique. For example, a \eqn{2^{8-3}} resolution IV design (with 32 runs and 8 factors) is shown as having +/-\bold{F = ABC}, +/-\bold{G=ABD} and +/-\bold{H=ACDE}. But another option is +/-\bold{H=BCDE}, which you might see in other software, or tables in textbooks.
}
\author{Kevin Dunn, <kgdunn@gmail.com>}
%Information about the author(s) of the Rd file. Use \email{} without extra delimiters to specify email addresses, or \url{} or \href{}{} for web pointers.
\seealso{\code{\link{tradeoff}} which can be used to extend the table out to more factors or more experiments.}
%Pointers to related R objects, using \code{\link{...}} to refer to them (\code is the correct markup for R object names, and \link produces hyperlinks in output formats which support this. See Marking text, and Cross-references).
\examples{
tradeOffTable()
}
%Examples are not only useful for documentation purposes, but also provide test code used for diagnostic checking of R code. By default, text inside \examples{} will be displayed in the output of the help page and run by example() and by R CMD check. You can use \dontrun{} for text that should only be shown, but not run, and \dontshow{} for extra commands for testing that should not be shown to users, but will be run by example(). (Previously this was called \testonly, and that is still accepted.)
% Text inside \dontrun{} is 'verbatim', but the other parts of the \examples section are R-like text.
%For example,
%x <- runif(10) # Shown and run.
%\dontrun{plot(x)} # Only shown.
%\dontshow{log(x)} # Only run.
%Thus, example code not included in \dontrun must be executable! In addition, it should not use any system-specific features or require special facilities (such as Internet access or write permission to specific directories). Text included in \dontrun is indicated by comments in the processed help files: it need not be valid R code but the escapes must still be used for %, \ and unpaired braces as in other verbatim text.
\concept{ design of experiments }
|
19be00e5ac1ca7cb740d1138127748acf6f55454
|
baf88771e84f099c939564f6afe9efd2d330b984
|
/inst/script/pathwaysTXT.R
|
c9543888ed9a745a683102774dedd86c74d5ba75
|
[
"MIT"
] |
permissive
|
rosscm/fedup
|
0ba6e27f4f0fb66336bfc24690da5a0796a9a4d6
|
dab35971ae2cab7d0734d00c80d1de5275493c3e
|
refs/heads/main
| 2023-05-31T11:02:17.429245
| 2021-07-12T21:34:08
| 2021-07-12T21:34:08
| 302,429,834
| 7
| 0
|
MIT
| 2021-05-25T18:26:23
| 2020-10-08T18:32:57
|
R
|
UTF-8
|
R
| false
| false
| 1,303
|
r
|
pathwaysTXT.R
|
# Download raw data from https://boonelab.ccbr.utoronto.ca/supplement/costanzo2016/
# Data file S5 (sheet 3)
library(openxlsx)
library(tibble)
library(biomaRt)
library(dplyr)
# Use biomaRt to get gene members per SAFE term
#pathwayFile <- system.file("extdata", "Data_File_S5_SAFE_analysis_Gene_cluster_identity_and_functional_enrichments.xlsx", package = "fedup")
#pathway <- read.xlsx(pathwayFile, sheet = 3)
# Query Ensembl for gene symbols annotated to SAFE terms
#ensembl <- useMart("ensembl", dataset = "scerevisiae_gene_ensembl")
#ensembl_gene <- getBM(
# attributes = c("go_id", "ensembl_gene_id", "external_gene_name"),
# mart = ensembl
#)
#colnames(ensembl_gene) <- c("Enriched.GO.IDs", "ORF.ID", "Gene.ID")
#pathway <- left_join(pathway, ensembl_gene, by = "Enriched.GO.IDs")
#write.table(pathway, file.path("inst", "extdata", "SAFE_terms.txt"), quote = FALSE, sep = "\t")
# Raw data file annotated with gene symbols
pathwayFile <- system.file("extdata", "SAFE_terms.txt", package = "fedup")
pathwaysTXT <- readPathways(
pathwayFile,
header = TRUE,
pathCol = "Enriched.GO.names",
geneCol = "Gene.ID"
)
names(pathwaysTXT) <- stringi::stri_trans_general(names(pathwaysTXT), "latin-ascii")
usethis::use_data(pathwaysTXT, compress = "xz", version = 2, overwrite = TRUE)
|
cccd365af2825552450002210f0067660af20c12
|
2c38fc71287efd16e70eb69cf44127a5f5604a81
|
/R/tar_built.R
|
d0af9e05c0028745935e135ce50e7ba6e9de9043
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
ropensci/targets
|
4ceef4b2a3cf7305972c171227852338dd4f7a09
|
a906886874bc891cfb71700397eb9c29a2e1859c
|
refs/heads/main
| 2023-09-04T02:27:37.366455
| 2023-09-01T15:18:21
| 2023-09-01T15:18:21
| 200,093,430
| 612
| 57
|
NOASSERTION
| 2023-08-28T16:24:07
| 2019-08-01T17:33:25
|
R
|
UTF-8
|
R
| false
| false
| 1,276
|
r
|
tar_built.R
|
#' @title List built targets.
#' @export
#' @family progress
#' @description List targets whose progress is `"built"`.
#' @return A character vector of built targets.
#' @inheritParams tar_progress
#' @param names Optional, names of the targets. If supplied, the
#' function restricts its output to these targets.
#' You can supply symbols
#' or `tidyselect` helpers like [any_of()] and [starts_with()].
#' @examples
#' if (identical(Sys.getenv("TAR_EXAMPLES"), "true")) { # for CRAN
#' tar_dir({ # tar_dir() runs code from a temp dir for CRAN.
#' tar_script({
#' list(
#' tar_target(x, seq_len(2)),
#' tar_target(y, 2 * x, pattern = map(x))
#' )
#' }, ask = FALSE)
#' tar_make()
#' tar_built()
#' tar_built(starts_with("y_")) # see also any_of()
#' })
#' }
tar_built <- function(
names = NULL,
store = targets::tar_config_get("store")
) {
tar_assert_allow_meta("tar_built")
progress <- progress_init(path_store = store)
progress <- tibble::as_tibble(progress$database$read_condensed_data())
names_quosure <- rlang::enquo(names)
names <- tar_tidyselect_eval(names_quosure, progress$name)
if (!is.null(names)) {
progress <- progress[match(names, progress$name), , drop = FALSE] # nolint
}
progress$name[progress$progress == "built"]
}
|
3ad9bae6f9703be54d472d6260d09b4b1c6747a5
|
bc6bb93c2b160be814a0b95d58a63663e834ca35
|
/distrplots/ui.R
|
715a7c91a35aa672890418bc5691ceb30653f3ba
|
[] |
no_license
|
MaciekNowak/Developing-Data-Products
|
f91e9dac3a626c3848fbd240c86940f669a19945
|
92a4574ca49fdc30fbcfd497d65e465a515d0875
|
refs/heads/master
| 2021-01-10T17:01:51.894601
| 2015-11-12T03:56:53
| 2015-11-12T03:56:53
| 45,958,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,011
|
r
|
ui.R
|
#
# The UI part of the Developing Data Products Coursera Course.
#
library(shiny)
shinyUI(pageWithSidebar(
# The header panel with a title
#
headerPanel("Various distributions plots"),
# The side bar panel with controls that help adjust plots' parameters
#
sidebarPanel(
# Select a distribution
#
selectInput("distName", label = "Distribution:",
choices = list("Normal" = "norm", "Logistic" = "logis",
"Log Normal" = "lnorm"), selected = "norm"),
# How many values
#
sliderInput("n",
"Observations:",
min = 1,
max = 1000,
value = 500),
# The first parameter - varies depending on the distribution
#
strong(textOutput("arg1")),
sliderInput("arg1",
"",
min = -10,
max = 10,
value = 0),
# The second parameter - varies depending on the distribution
#
strong(textOutput("arg2")),
sliderInput("arg2",
"",
min = 0,
max = 10,
value = 1),
# Choose the function
#
radioButtons("funType", "Function:",
list("Distribution" = "r",
"Density" = "d",
"Cumulative probability" = "p"))
),
# The main panel consists of plots and help tabs
#
mainPanel(
tabsetPanel(
tabPanel("Plot", plotOutput("thePlot")),
tabPanel("Help/Documentation",
textOutput("help1"), br(),
textOutput("help2"), br(),
textOutput("help3"), br(),
textOutput("help4"), br(),
textOutput("help5"))
)
)
))
|
6078d6b4e181a06f5874d8dcb6a37db08e6389a5
|
f0fbf8f001e103c50309d68cbcc3cd88266c9833
|
/R/recode_values.R
|
5a2738d13c2a8d78fc5af2f1cfc4676655dbfc9d
|
[] |
no_license
|
DaanNieboer/DCTFmisc
|
13bebc942105563c4c6b583517623d3915358800
|
5dbffb1d11dfc515a4b899af9c1c3bde20948363
|
refs/heads/master
| 2021-04-29T17:57:54.045871
| 2018-03-06T11:17:26
| 2018-03-06T11:17:26
| 121,683,011
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 468
|
r
|
recode_values.R
|
#' Recode the values in a vector
#'
#' @param x vector containing values
#' @param from vector containing the unique elements of x
#' @param to values in which the corresponging elements of from needs to be changed in
#' @return Returns a vector where the values of x are changed from the values in the vector from to the values in the vector to
recode_values <- function(x, from, to){
pos_values <- match(x, from)
res <- to[pos_values]
return(res)
}
|
7893c2e63433b804816706fb19f4cdee9965148c
|
10e2f579a7e84ef8f7186265fb1fc12c9db62bde
|
/demo/plotKML.R
|
c00108bd29cc781060689460853edf2c7630b8e0
|
[] |
no_license
|
cran/plotKML
|
bddd88464e2fa5b0c981086a4f8a33a4fdbeac37
|
068aaaf06a1976d202222142a95f2e951da0f604
|
refs/heads/master
| 2022-06-30T19:58:42.092133
| 2022-06-07T13:00:02
| 2022-06-07T13:00:02
| 17,698,575
| 8
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,927
|
r
|
plotKML.R
|
## Complete tutorial available at: [http://plotkml.r-forge.r-project.org]
plotKML.env(kmz = FALSE)
## -------------- SpatialPointsDataFrame --------- ##
library(sp)
library(rgdal)
data(eberg)
coordinates(eberg) <- ~X+Y
proj4string(eberg) <- CRS("+init=epsg:31467")
## subset to 20 percent:
eberg <- eberg[runif(nrow(eberg))<.1,]
## bubble type plot:
plotKML(eberg["CLYMHT_A"])
plotKML(eberg["CLYMHT_A"], colour_scale=rep("#FFFF00", 2), points_names="")
## -------------- SpatialLinesDataFrame --------- ##
data(eberg_contours)
plotKML(eberg_contours)
## plot contour lines with actual altitudes:
plotKML(eberg_contours, colour=Z, altitude=Z)
## -------------- SpatialPolygonsDataFrame --------- ##
data(eberg_zones)
plotKML(eberg_zones["ZONES"])
## add altitude:
zmin = 230
plotKML(eberg_zones["ZONES"], altitude=zmin+runif(length(eberg_zones))*500)
## -------------- SpatialPixelsDataFrame --------- ##
library(rgdal)
library(raster)
data(eberg_grid)
gridded(eberg_grid) <- ~x+y
proj4string(eberg_grid) <- CRS("+init=epsg:31467")
TWI <- reproject(eberg_grid["TWISRT6"])
data(SAGA_pal)
plotKML(TWI, colour_scale = SAGA_pal[[1]])
## set limits manually (increase resolution):
plotKML(TWI, z.lim=c(12,20), colour_scale = SAGA_pal[[1]],
png.width = gridparameters(TWI)[1,"cells.dim"]*5,
png.height = gridparameters(TWI)[2,"cells.dim"]*5)
## categorical data:
eberg_grid$LNCCOR6 <- as.factor(paste(eberg_grid$LNCCOR6))
levels(eberg_grid$LNCCOR6)
data(worldgrids_pal)
## attr(worldgrids_pal["corine2k"][[1]], "names")
pal = as.character(worldgrids_pal["corine2k"][[1]][c(1,11,13,14,16,17,18)])
LNCCOR6 <- reproject(eberg_grid["LNCCOR6"])
plotKML(LNCCOR6, colour_scale=pal)
## -------------- SpatialPhotoOverlay --------- ##
library(RCurl)
imagename = "Soil_monolith.jpg"
urlExists = url.exists("https://commons.wikimedia.org")
if(urlExists){
x1 <- getWikiMedia.ImageInfo(imagename)
sm <- spPhoto(filename = x1$url$url, exif.info = x1$metadata)
# str(sm)
plotKML(sm)
}
# ## -------------- SoilProfileCollection --------- ##
# library(aqp)
# library(plyr)
# ## sample profile from Nigeria:
# lon = 3.90; lat = 7.50; id = "ISRIC:NG0017"; FAO1988 = "LXp"
# top = c(0, 18, 36, 65, 87, 127)
# bottom = c(18, 36, 65, 87, 127, 181)
# ORCDRC = c(18.4, 4.4, 3.6, 3.6, 3.2, 1.2)
# hue = c("7.5YR", "7.5YR", "2.5YR", "5YR", "5YR", "10YR")
# value = c(3, 4, 5, 5, 5, 7); chroma = c(2, 4, 6, 8, 4, 3)
# ## prepare a SoilProfileCollection:
# prof1 <- join(data.frame(id, top, bottom, ORCDRC, hue, value, chroma),
# data.frame(id, lon, lat, FAO1988), type='inner')
# prof1$soil_color <- with(prof1, munsell2rgb(hue, value, chroma))
# depths(prof1) <- id ~ top + bottom
# site(prof1) <- ~ lon + lat + FAO1988
# coordinates(prof1) <- ~ lon + lat
# proj4string(prof1) <- CRS("+proj=longlat +datum=WGS84")
# prof1
# plotKML(prof1, var.name="ORCDRC", color.name="soil_color")
## -------------- STIDF --------- ##
library(spacetime)
## daily temperatures for Croatia:
data(HRtemp08)
## format the time column:
HRtemp08$ctime <- as.POSIXct(HRtemp08$DATE, format="%Y-%m-%dT%H:%M:%SZ")
## create a STIDF object:
sp <- SpatialPoints(HRtemp08[,c("Lon","Lat")])
proj4string(sp) <- CRS("+proj=longlat +datum=WGS84")
HRtemp08.st <- STIDF(sp, time = HRtemp08$ctime, data = HRtemp08[,c("NAME","TEMP")])
## subset to first 500 records:
HRtemp08_jan <- HRtemp08.st[1:500]
str(HRtemp08_jan)
plotKML(HRtemp08_jan[,,"TEMP"], dtime = 24*3600, LabelScale = .4)
## foot-and-mouth disease data:
data(fmd)
fmd0 <- data.frame(fmd)
coordinates(fmd0) <- c("X", "Y")
proj4string(fmd0) <- CRS("+init=epsg:27700")
fmd_sp <- as(fmd0, "SpatialPoints")
dates <- as.Date("2001-02-18")+fmd0$ReportedDay
library(spacetime)
fmd_ST <- STIDF(fmd_sp, dates, data.frame(ReportedDay=fmd0$ReportedDay))
data(SAGA_pal)
plotKML(fmd_ST, colour_scale=SAGA_pal[[1]])
## -------------- STFDF --------- ##
## results of krigeST:
library(gstat)
library(sp)
library(spacetime)
library(raster)
## define space-time variogram
sumMetricVgm <- vgmST("sumMetric",
space=vgm( 4.4, "Lin", 196.6, 3),
time =vgm( 2.2, "Lin", 1.1, 2),
joint=vgm(34.6, "Exp", 136.6, 12),
stAni=51.7)
## example from the gstat package:
data(air)
rural = STFDF(stations, dates, data.frame(PM10 = as.vector(air)))
rr <- rural[,"2005-06-01/2005-06-03"]
rr <- as(rr,"STSDF")
x1 <- seq(from=6,to=15,by=1)
x2 <- seq(from=48,to=55,by=1)
DE_gridded <- SpatialPoints(cbind(rep(x1,length(x2)), rep(x2,each=length(x1))),
proj4string=CRS(proj4string(rr@sp)))
gridded(DE_gridded) <- TRUE
DE_pred <- STF(sp=as(DE_gridded,"SpatialPoints"), time=rr@time)
DE_kriged <- krigeST(PM10~1, data=rr, newdata=DE_pred,
modelList=sumMetricVgm)
gridded(DE_kriged@sp) <- TRUE
stplot(DE_kriged)
## plot in Google Earth:
png.width = DE_kriged@sp@grid@cells.dim[1]*20
png.height = DE_kriged@sp@grid@cells.dim[2]*20
z.lim = range(DE_kriged@data, na.rm=TRUE)
plotKML(DE_kriged, png.width=png.width,
png.height=png.height, z.lim=z.lim)
## add observations points:
plotKML(rr, z.lim=z.lim)
## -------------- STTDF --------- ##
#library(fossil)
library(spacetime)
library(adehabitatLT)
data(gpxbtour)
## format the time column:
gpxbtour$ctime <- as.POSIXct(gpxbtour$time, format="%Y-%m-%dT%H:%M:%SZ")
coordinates(gpxbtour) <- ~lon+lat
proj4string(gpxbtour) <- CRS("+proj=longlat +datum=WGS84")
xy <- as.list(data.frame(t(coordinates(gpxbtour))))
gpxbtour$dist.km <- sapply(xy, function(x) {
deg.dist(long1=x[1], lat1=x[2], long2=xy[[1]][1], lat2=xy[[1]][2])
} )
## convert to a STTDF class:
gpx.ltraj <- as.ltraj(coordinates(gpxbtour), gpxbtour$ctime, id = "th")
gpx.st <- as(gpx.ltraj, "STTDF")
gpx.st$speed <- gpxbtour$speed
gpx.st@sp@proj4string <- CRS("+proj=longlat +datum=WGS84")
str(gpx.st)
plotKML(gpx.st, colour="speed")
## -------------- Spatial Metadata --------- ##
data(eberg)
coordinates(eberg) <- ~X+Y
proj4string(eberg) <- CRS("+init=epsg:31467")
## subset to 20 percent:
eberg <- eberg[runif(nrow(eberg))<.1,]
eberg.md <- spMetadata(eberg["SNDMHT_A"],
Citation_title = 'Ebergotzen data set',
Citation_URL = 'http://geomorphometry.org/content/ebergotzen')
plotKML(eberg["CLYMHT_A"], metadata=eberg.md)
## -------------- RasterBrickTimeSeries --------- ##
library(raster)
library(sp)
data(LST)
gridded(LST) <- ~lon+lat
proj4string(LST) <- CRS("+proj=longlat +datum=WGS84")
dates <- sapply(strsplit(names(LST), "LST"), function(x){x[[2]]})
datesf <- format(as.Date(dates, "%Y_%m_%d"), "%Y-%m-%dT%H:%M:%SZ")
## begin / end dates +/- 4 days:
TimeSpan.begin = as.POSIXct(unclass(as.POSIXct(datesf))-4*24*60*60, origin="1970-01-01")
TimeSpan.end = as.POSIXct(unclass(as.POSIXct(datesf))+4*24*60*60, origin="1970-01-01")
## pick climatic stations in the area:
pnts <- HRtemp08[which(HRtemp08$NAME=="Pazin")[1],]
pnts <- rbind(pnts, HRtemp08[which(HRtemp08$NAME=="Crni Lug - NP Risnjak")[1],])
pnts <- rbind(pnts, HRtemp08[which(HRtemp08$NAME=="Cres")[1],])
coordinates(pnts) <- ~Lon + Lat
proj4string(pnts) <- CRS("+proj=longlat +datum=WGS84")
## get the dates from the file names:
LST_ll <- brick(LST[1:5])
LST_ll@title = "Time series of MODIS Land Surface Temperature images"
LST.ts <- new("RasterBrickTimeSeries", variable = "LST", sampled = pnts,
rasters = LST_ll, TimeSpan.begin = TimeSpan.begin[1:5],
TimeSpan.end = TimeSpan.end[1:5])
data(SAGA_pal)
## plot MODIS images in Google Earth:
plotKML(LST.ts, colour_scale=SAGA_pal[[1]])
## -------------- Spatial Predictions --------- ##
library(sp)
library(rgdal)
library(gstat)
data(meuse)
coordinates(meuse) <- ~x+y
proj4string(meuse) <- CRS("+init=epsg:28992")
## load grids:
data(meuse.grid)
gridded(meuse.grid) <- ~x+y
proj4string(meuse.grid) <- CRS("+init=epsg:28992")
## fit a model:
library(GSIF)
omm <- fit.gstatModel(observations = meuse, formulaString = om~dist,
family = gaussian(log), covariates = meuse.grid)
## produce SpatialPredictions:
om.rk <- predict(omm, predictionLocations = meuse.grid)
## plot the whole geostatical mapping project in Google Earth:
plotKML(om.rk, colour_scale = SAGA_pal[[1]],
png.width = gridparameters(meuse.grid)[1,"cells.dim"]*5,
png.height = gridparameters(meuse.grid)[2,"cells.dim"]*5)
## plot each cell as polygon:
plotKML(om.rk, colour_scale = SAGA_pal[[1]], grid2poly = TRUE)
## -------------- SpatialSamplingPattern --------- ##
#library(spcosa)
#library(sp)
## read a polygon map:
#shpFarmsum <- readOGR(dsn = system.file("maps", package = "spcosa"),
# layer = "farmsum")
## stratify `Farmsum' into 50 strata
#myStratification <- stratify(shpFarmsum, nStrata = 50)
## sample two sampling units per stratum
#mySamplingPattern <- spsample(myStratification, n = 2)
## attach the correct proj4 string:
#library(RCurl)
#urlExists = url.exists("https://spatialreference.org/ref/sr-org/6781/proj4/")
#if(urlExists){
# nl.rd <- getURL("https://spatialreference.org/ref/sr-org/6781/proj4/")
# proj4string(mySamplingPattern@sample) <- CRS(nl.rd)
# # prepare spatial domain (polygons):
# sp.domain <- as(myStratification@cells, "SpatialPolygons")
# sp.domain <- SpatialPolygonsDataFrame(sp.domain,
# data.frame(ID=as.factor(myStratification@stratumId)), match.ID = FALSE)
# proj4string(sp.domain) <- CRS(nl.rd)
# # create new object:
# mySamplingPattern.ssp <- new("SpatialSamplingPattern",
# method = class(mySamplingPattern), pattern = mySamplingPattern@sample,
# sp.domain = sp.domain)
# # the same plot now in Google Earth:
# shape = "http://maps.google.com/mapfiles/kml/pal2/icon18.png"
# plotKML(mySamplingPattern.ssp, shape = shape)
#}
## -------------- RasterBrickSimulations --------- ##
library(sp)
library(gstat)
data(barxyz)
## define the projection system:
prj = "+proj=tmerc +lat_0=0 +lon_0=18 +k=0.9999 +x_0=6500000 +y_0=0
+ellps=bessel +units=m
+towgs84=550.499,164.116,475.142,5.80967,2.07902,-11.62386,0.99999445824"
coordinates(barxyz) <- ~x+y
proj4string(barxyz) <- CRS(prj)
data(bargrid)
coordinates(bargrid) <- ~x+y
gridded(bargrid) <- TRUE
proj4string(bargrid) <- CRS(prj)
## fit a variogram and generate simulations:
Z.ovgm <- vgm(psill=1352, model="Mat", range=650, nugget=0, kappa=1.2)
sel <- runif(length(barxyz$Z))<.2
## Note: this operation can be time consuming
sims <- krige(Z~1, barxyz[sel,], bargrid, model=Z.ovgm, nmax=20,
nsim=10, debug.level=-1)
## specify the cross-section:
t1 <- Line(matrix(c(bargrid@bbox[1,1], bargrid@bbox[1,2], 5073012, 5073012), ncol=2))
transect <- SpatialLines(list(Lines(list(t1), ID="t")), CRS(prj))
## glue to a RasterBrickSimulations object:
library(raster)
bardem_sims <- new("RasterBrickSimulations", variable = "elevations",
sampled = transect, realizations = brick(sims))
## plot the whole project and open in Google Earth:
data(R_pal)
plotKML(bardem_sims, colour_scale = R_pal[[4]])
## -------------- SpatialVectorsSimulations --------- ##
data(barstr)
data(bargrid)
library(sp)
coordinates(bargrid) <- ~ x+y
gridded(bargrid) <- TRUE
## output topology:
cell.size = bargrid@grid@cellsize[1]
bbox = bargrid@bbox
nrows = round(abs(diff(bbox[1,])/cell.size), 0)
ncols = round(abs(diff(bbox[2,])/cell.size), 0)
gridT = GridTopology(cellcentre.offset=bbox[,1],
cellsize=c(cell.size,cell.size),
cells.dim=c(nrows, ncols))
bar_sum <- count.GridTopology(gridT, vectL=barstr[1:5])
## NOTE: this operation can be time consuming!
## plot the whole project and open in Google Earth:
plotKML(bar_sum,
png.width = gridparameters(bargrid)[1,"cells.dim"]*5,
png.height = gridparameters(bargrid)[2,"cells.dim"]*5)
|
74947b52d645b4aa10029fd4b831118900b0c315
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ggspectra/examples/scale_x_wl_continuous.Rd.R
|
ad9a46ce036e0dd7edadbfa617b80910078090c1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
r
|
scale_x_wl_continuous.Rd.R
|
library(ggspectra)
### Name: scale_x_wl_continuous
### Title: Wavelength x-scale
### Aliases: scale_x_wl_continuous
### ** Examples
library(ggplot2)
library(photobiology)
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous()
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous(-6)
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous(sec.axis = sec_axis_w_number())
ggplot(sun.spct) +
geom_line() +
scale_x_wl_continuous(unit.exponent = -6,
sec.axis = sec_axis_w_number())
|
51d84833a89f8340c24fda8d93e541629da1b526
|
dd726f4f83fdb6ef8c4a2b7486795da27b1b4fc2
|
/r/2_19/data_processing/data_processing/Script.R
|
8ec910dd06d23b04ba18660b2a0fc76bbbb2d164
|
[] |
no_license
|
mgh3326/big_data_web
|
84890dc72cd0aa1dd49be736ab1c6963611ee4a5
|
f5cae3c710414697a1190ad57469f26dd9c87d8a
|
refs/heads/master
| 2023-02-20T07:28:32.024292
| 2019-09-04T15:49:13
| 2019-09-04T15:49:13
| 119,160,730
| 0
| 1
| null | 2023-02-15T21:30:18
| 2018-01-27T12:02:38
|
HTML
|
UHC
|
R
| false
| false
| 5,001
|
r
|
Script.R
|
install.packages("dplyr")
library(dplyr)
setwd("c:\\easy_r")
exam <- read.csv("csv_exam.csv")
exam
exam %>% filter(class == 1)
exam %>% filter(class == 2)
exam %>% filter(class != 1)
exam %>% filter(class != 3)
exam %>% filter(math > 50)
exam %>% filter(math < 50)
exam %>% filter(math >= 80)
exam %>% filter(math <= 80)
exam %>% filter(class == 1 & math > 50)
exam %>% filter(class == 2 & math >= 80)
exam %>% filter(math >= 90 | english >= 90)
exam %>% filter(science < 50 | english < 90)
exam %>% filter(class == 1 | class == 3 | class == 5)
exam %>% filter(class %in% c(1, 3, 5))
class1 <- exam %>% filter(class == 1)
class2 <- exam %>% filter(class == 2)
mean(class1$math)
mean(class2$math)
library(ggplot2)
mpg <- as.data.frame(ggplot2::mpg)
#Q 아침 과제 혼자서 해보기 (6장)
#Q1
mpg1 <- mpg %>% filter(displ <= 4)
mpg2 <- mpg %>% filter(displ >= 5)
mean(mpg1$hwy)
mean(mpg2$hwy)
#mpg1이 더 큰것을 알수있다.
#Q2
mpg3 <- mpg %>% filter(manufacturer == "audi")
mpg4 <- mpg %>% filter(manufacturer == "toyota")
mean(mpg3$cty)
mean(mpg4$cty)
#mpg4가 더 큰것을 알수있다.
#Q3
mpg5 <- mpg %>% filter(manufacturer == "chevrolet" | manufacturer == "ford" | manufacturer == "honda")
mean(mpg5$hwy)
exam %>% select(math)
exam %>% select(english)
exam %>% select(class, math, english)
exam %>% select(-math)
exam %>% select(-math, - english)
exam %>% filter(class == 1) %>% select(english)
exam %>%
filter(class == 1) %>%
select(english)
exam %>%
select(id, math) %>%
head
exam %>%
select(id, math) %>%
head(10)
#혼자서 해보기
#Q1
mpg %>% select(class, cty)
#Q2
mpg6 <- mpg %>%
filter(class == "suv") %>%
select(cty)
mpg7 <- mpg %>%
filter(class == "compact") %>%
select(cty)
mean(mpg6$cty)
mean(mpg7$cty)
#compact가 더 큼을 알수 있습니다.
exam %>% arrange(math)
exam %>% arrange(desc(math))
exam %>% arrange(class, math)
#혼자서 해보기
mpg8 <- mpg %>% filter(manufacturer == "audi")
mpg8 %>% arrange(desc(hwy)) %>% head()
exam %>% mutate(total = math + english + science) %>% head
exam %>%
mutate(total = math + english + science, mean = (math + english + science) / 3) %>% head
exam %>%
mutate(test = ifelse(science >= 60, "pass", "fail")) %>% head
exam %>%
mutate(total = math + english + science) %>% arrange(total) %>% head
#혼자서 해보기
#Q1
mpg_copy <- mpg %>% mutate(total = hwy + cty) %>% head
mpg_copy
#Q2
mpg_copy %>% mutate(total_var = (hwy + cty) / 2) %>% head
#Q3
mpg_copy %>% mutate(total_var = (hwy + cty) / 2) %>% arrange(total_var) %>% head(3)
#Q4
exam %>% summarise(mean_math = mean(math))
exam %>%
group_by(class) %>%
summarise(mean_math = mean(math))
exam %>%
group_by(class) %>%
summarise(mean_math = mean(math),
sum_math = sum(math),
median_math = median(math),
n = n())
mpg %>%
group_by(manufacturer, drv) %>%
summarise(mean_city = mean(cty)) %>% head(10)
#dplyr 조합하기
#문제
#1
mpg %>%
group_by(manufacturer, drv)
#2
mpg %>%
group_by(manufacturer, drv) %>%
filter(class == "suv")
#3
mpg %>%
group_by(manufacturer, drv) %>%
filter(class == "suv") %>% mutate(total = hwy + cty)
#4
mpg %>%
group_by(manufacturer, drv) %>%
filter(class == "suv") %>% mutate(total = hwy + cty) %>%
summarise(mean_total = mean(total))
#5
mpg %>%
group_by(manufacturer, drv) %>%
filter(class == "suv") %>% mutate(total = hwy + cty) %>%
summarise(mean_total = mean(total)) %>% arrange(desc(mean_total))
#6
mpg %>%
group_by(manufacturer, drv) %>%
filter(class == "suv") %>% mutate(total = hwy + cty) %>%
summarise(mean_total = mean(total)) %>% arrange(desc(mean_total)) %>% head(5)
#혼자서 해보기
#Q1
mpg %>%
group_by(class) %>%
summarise(mean_cty = mean(cty))
#Q2
mpg %>%
group_by(class) %>%
summarise(mean_cty = mean(cty)) %>% arrange((mean_cty))
#Q3
mpg %>% arrange(hwy) %>% head(3)
#Q4 #아직못함
mpg %>% filter(class == "compact") %>%
#summarise(mean_math = mean(math),
#sum_math = sum(math),
#median_math = median(math),
#n = n())
test1 <- data.frame(id = c(1, 2, 3, 4, 5), midterm = c(60, 80, 70, 90, 85))
test2 <- data.frame(id = c(1, 2, 3, 4, 5), midterm = c(70, 83, 65, 95, 80))
total <- left_join(test1, test2, by = "id")
total
name <- data.frame(class = c(1, 2, 3, 4, 5), teacher = c("kim", "lee", "park", "choi", "jung"))
name
exam_new <- left_join(exam, name, by = "class")
exam_new
group_a <- data.frame(id = c(1, 2, 3, 4, 5), test = c(60, 80, 70, 90, 85))
group_b <- data.frame(id = c(6, 7, 8, 9, 10), test = c(70, 83, 65, 95, 80))
group_all <- bind_rows(group_a, group_b)
group_all
#혼자서 해보기
fuel <- data.frame(fl = c("c", "d", "e", "p", "r"),
price_fl = c(2.35, 2.38, 2.11, 2.76, 2.22),
stringAsFactor = F)
fuel
#Q1
mpg %>% head(5)
total <- left_join(mpg, fuel, by = "fl")
#분석도전
#문제1
midwest %>% mutate(total = poptotal) %>% head
|
062d9874dbe74565fa312258d5128336491c5cdc
|
0a42295e49af92434972d44674872ffa0d233db0
|
/man/TEIdy.Rd
|
7bd61559975d5675860d8361c9281e8a536a1eb9
|
[] |
no_license
|
HumanitiesDataAnalysis/TEIdy
|
130ee30914db62367dcbc7bfcfc7d536381fda49
|
facc5dabee401523a98ee46a7a00e9ecf631e8c6
|
refs/heads/master
| 2020-04-22T23:45:37.743425
| 2019-03-20T04:23:20
| 2019-03-20T04:23:20
| 170,752,248
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,032
|
rd
|
TEIdy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/framer.R
\name{TEIdy}
\alias{TEIdy}
\title{Load an XML document as a data.frame}
\usage{
TEIdy(fname, ignore = c(), discard = c(), keep = c())
}
\arguments{
\item{fname}{A filename}
\item{ignore}{Tags that, when encountered, will not be added as columns to the tidy representation. Data inside these tags
will be kept, but the tag will not be column.}
\item{discard}{Tags that, when encountered, will be discarded from the tidy representation. discard='teiHeader', for
example, will prevent header information from being included.}
\item{keep}{Tags to *keep* in the tidy representation. If this is used, 'ignore' and 'discard' arguments will apply
only inside tags defined by keep.}
}
\value{
A tibble, with a 'text' column indicating the lowest of text address found in the document.
}
\description{
Load an XML document as a data.frame
}
\examples{
tomorrow = system.file("extdata", "Tomorrow_and_tomorrow.xml", package="TEIdy")
TEIdy(tomorrow)
}
|
af411ee9c320fa30bec04acdc88a73cfafd574f7
|
4485cc6d9a2a089660ec7c5de835031c7719d031
|
/man/fill.f.Rd
|
0546be6c1e0b636b248b14cd78fa623ef605c7d0
|
[] |
no_license
|
theoldfather/KaggleR
|
02dbed9af4d210eee3d36cc5da73adfe4729ccec
|
df308ae6df89096c204c70343975d46ec55009e0
|
refs/heads/master
| 2021-01-20T17:02:35.249248
| 2016-06-29T02:48:30
| 2016-06-29T02:48:30
| 61,461,213
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 320
|
rd
|
fill.f.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{fill.f}
\alias{fill.f}
\title{Replace conditional on given function}
\usage{
fill.f(x, value, f = is.na)
}
\arguments{
\item{x}{values}
\item{value}{replacement value}
}
\description{
Replace conditional on given function
}
|
47f915b68ad70f0a544c190c5968398f946d1a8d
|
e6afe03209c6f0f522450857b1105a15d1cc0fb7
|
/R/potential-details.R
|
b6e8b950736155938faa29c8423f431ebb83065d
|
[] |
no_license
|
antiphon/PenGE
|
d5d73b032aead05bcd6aa19f23b169b69c0cf70a
|
020334fcedd3aac457b241bc3f8bca17afecf848
|
refs/heads/master
| 2021-06-28T16:02:49.054730
| 2019-07-30T08:44:03
| 2019-07-30T08:44:03
| 95,548,050
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,112
|
r
|
potential-details.R
|
#' Estimate Potential
#'
#' Details of the estimated interaction potential between two types
#'
#' @param fit Model fit returned by fitGlbin_CV
#' @param i Type 1 index, one of 1, ..., p
#' @param j Type 2 index, one of 1, ..., p
#'
#' @details The lasso-path coefficients of the interaction parameters between types i and j.
#'
#' @export
potential <- function(fit, i, j, k = NULL) {
Qpars <- fit$Qpars
p <- fit$datainfo$p
beta <- fit$fullfit$beta
types <- fit$datainfo$types
out <- list(model = "Multi-scale saturation potential", i=types[i], j=types[j], ij = c(i,j) )
#
if(is.null(k)) k <- 1:ncol(beta)
if(i != j) {
# solve k
ii <- min(i,j) - 1
jj <- max(i,j) - 1
ijk <- (ii*(2*p - ii - 3) + 2*jj - 2)/2 + 1
out$range <- Qpars$ranges2[[ijk]]
out$sat <- Qpars$sat2[[ijk]]
# which row:
ri <- grep(paste0(types[ii+1], "v", types[[jj+1]]), rownames(beta))
out$coef <- beta[ri, k]
}
else{
out$range <- Qpars$ranges1[[i]]
out$sat <- Qpars$sat1[[i]]
ri <- grep(paste0("intra_", types[ii+1]), rownames(beta))
out$coef <- beta[ri, k]
}
out
}
|
4d299c4dbc5e6e73e9b62af50cc0757161db5a0c
|
d4937db239ca48f728ab45eeed730e38b31a23fe
|
/R/help.R
|
a32cdc5b5197890253d37a5d697f6ade598a19bc
|
[] |
no_license
|
JiangXD/ggmap
|
8d6a4c11ac114cc0fa29670e56d2d47cc75df696
|
cee35370572b9011eadac7faeba8376274743a6c
|
refs/heads/master
| 2021-01-15T20:57:05.536219
| 2013-09-29T12:04:42
| 2013-09-29T12:04:42
| 13,191,594
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 144
|
r
|
help.R
|
#' @import proto scales RgoogleMaps png plyr reshape2 grid rjson mapproj
#' @docType package
#' @name ggmap
#' @aliases ggmap package-ggmap
NULL
|
d12ec3d26e32ebf47c004daa242d1d679a817127
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/r/generated/R/ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfo.r
|
eb827c07bc3e999196e3da2230809817ae495a7b
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
R
| false
| false
| 4,837
|
r
|
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfo.r
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfo Class
#'
#' @field pid
#' @field title
#' @field description
#' @field properties
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfo <- R6::R6Class(
'ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfo',
public = list(
`pid` = NULL,
`title` = NULL,
`description` = NULL,
`properties` = NULL,
initialize = function(`pid`, `title`, `description`, `properties`){
if (!missing(`pid`)) {
stopifnot(is.character(`pid`), length(`pid`) == 1)
self$`pid` <- `pid`
}
if (!missing(`title`)) {
stopifnot(is.character(`title`), length(`title`) == 1)
self$`title` <- `title`
}
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`properties`)) {
stopifnot(R6::is.R6(`properties`))
self$`properties` <- `properties`
}
},
toJSON = function() {
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject <- list()
if (!is.null(self$`pid`)) {
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject[['pid']] <- self$`pid`
}
if (!is.null(self$`title`)) {
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject[['title']] <- self$`title`
}
if (!is.null(self$`description`)) {
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject[['description']] <- self$`description`
}
if (!is.null(self$`properties`)) {
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject[['properties']] <- self$`properties`$toJSON()
}
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject
},
fromJSON = function(ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoJson) {
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject <- jsonlite::fromJSON(ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoJson)
if (!is.null(ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$`pid`)) {
self$`pid` <- ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$`pid`
}
if (!is.null(ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$`title`)) {
self$`title` <- ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$`title`
}
if (!is.null(ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$`description`)) {
self$`description` <- ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$`description`
}
if (!is.null(ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$`properties`)) {
propertiesObject <- ComDayCqReplicationImplContentDurboBinaryLessContentBuilderProperties$new()
propertiesObject$fromJSON(jsonlite::toJSON(ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$properties, auto_unbox = TRUE))
self$`properties` <- propertiesObject
}
},
toJSONString = function() {
sprintf(
'{
"pid": %s,
"title": %s,
"description": %s,
"properties": %s
}',
self$`pid`,
self$`title`,
self$`description`,
self$`properties`$toJSON()
)
},
fromJSONString = function(ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoJson) {
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject <- jsonlite::fromJSON(ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoJson)
self$`pid` <- ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$`pid`
self$`title` <- ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$`title`
self$`description` <- ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$`description`
ComDayCqReplicationImplContentDurboBinaryLessContentBuilderPropertiesObject <- ComDayCqReplicationImplContentDurboBinaryLessContentBuilderProperties$new()
self$`properties` <- ComDayCqReplicationImplContentDurboBinaryLessContentBuilderPropertiesObject$fromJSON(jsonlite::toJSON(ComDayCqReplicationImplContentDurboBinaryLessContentBuilderInfoObject$properties, auto_unbox = TRUE))
}
)
)
|
05035250b7f09e59d7bb529697da44c4799b6972
|
3b62fb6cfb2a9de90945a98849fd9354751d4432
|
/results/assignments/Navigator_Chapt_5.R
|
f5ccbd79d33830dab7b296c470e2715680524120
|
[] |
no_license
|
Rajani462/eda_rhine
|
490b2e9e55eaabff28103b91285f81d1979edf3e
|
95cdefc003f26c49e4d948d0716845d4fd527f67
|
refs/heads/master
| 2022-05-10T11:57:15.604207
| 2022-03-23T08:53:54
| 2022-03-23T08:53:54
| 253,453,477
| 0
| 0
| null | 2020-04-06T09:36:45
| 2020-04-06T09:36:44
| null |
UTF-8
|
R
| false
| false
| 2,105
|
r
|
Navigator_Chapt_5.R
|
##Navigator question-----------------
library(data.table)
library(ggplot2)
precip_raw <- readRDS('./data/raw/precip_day.rds')
runoff_summary <- readRDS('data/runoff_summary.rds')
runoff_summary_key <- readRDS('data/runoff_summary_key.rds')
runoff_stats <- readRDS('data/runoff_stats.rds')
runoff_month_key <- readRDS('data/runoff_month_key.rds')
runoff_summer_key <- readRDS('data/runoff_summer_key.rds')
runoff_winter_key <- readRDS('data/runoff_winter_key.rds')
runoff_year_key <- readRDS('data/runoff_year_key.rds')
runoff_summer <- readRDS('data/runoff_summer.rds')
runoff_winter <- readRDS('data/runoff_winter.rds')
colset_4 <- c("#D35C37", "#BF9A77", "#D6C6B9", "#97B8C2")
theme_set(theme_bw())
#---------------------------------------
#Q1-
year_thres <- 2000
runoff_month_key[year < year_thres, period := factor('pre_2000')]
runoff_month_key[year >= year_thres, period := factor('aft_2000')]
runoff_year_key[year < year_thres, period := factor('pre_2000')]
runoff_year_key[year >= year_thres, period := factor('aft_2000')]
ggplot(runoff_month_key, aes(factor(month), value, fill = period)) +
geom_boxplot() +
facet_wrap(~sname, scales = 'free') +
scale_fill_manual(values = colset_4[c(4, 1)]) +
xlab(label = "Month") +
ylab(label = "Runoff (m3/s)") +
theme_bw()
ggplot(runoff_month_key[year > 1983], aes(factor(month), value, fill = period)) +
geom_boxplot() +
facet_wrap(~sname, scales = 'free_y') +
scale_fill_manual(values = colset_4[c(4, 1)]) +
xlab(label = "Month") +
ylab(label = "Runoff (m3/s)") +
theme_bw()
ggplot(runoff_year_key, aes(year, value, fill = period)) +
geom_boxplot() +
facet_wrap(~sname, scales = 'free') +
scale_fill_manual(values = colset_4[c(4, 1)]) +
xlab(label = " Year") +
ylab(label = "Runoff (m3/sec)") +
theme_bw()
ggplot(runoff_year_key[year > 1983], aes(year, value, fill = period)) +
geom_boxplot() +
facet_wrap(~sname, scales = 'free') +
scale_fill_manual(values = colset_4[c(4, 1)]) +
xlab(label = " Year") +
ylab(label = "Runoff (m3/sec)") +
theme_bw()
#Q2--------
|
3c080520cacfb995db586bb014a75187c3b6024f
|
da725622bc962b639e1eb6df535b433e4366bcc5
|
/opportunityYouth/getDataFromMySQL.R
|
f35580c38efa3174d57c9dec0b7aff8f7ffcb383
|
[] |
no_license
|
bekahdevore/rKW
|
5649a24e803b88aa51a3e64020b232a23bd459fa
|
970dcf8dc93d4ec0e5e6a79552e27ddc0f850b91
|
refs/heads/master
| 2020-04-15T12:41:49.567456
| 2017-07-25T16:29:31
| 2017-07-25T16:29:31
| 63,880,311
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 959
|
r
|
getDataFromMySQL.R
|
library(RMySQL)
con <- dbConnect(MySQL(), group = "kwlmi", dbname = "kwlmi")
## Variables
kentucky <- "kentuckyPUMS"
peerCities <- "peerCityPUMS"
louisville <- "louisvilleMSA_PUMS"
statement <- function(place) {
paste("SELECT *", "FROM", place, ";")
}
# Pull data from MySQL Database, change place argument in statement to run different different population data
kentucky <- dbGetQuery(conn = con, statement = statement(kentucky))
peerCities <- dbGetQuery(conn = con, statement = statement(peerCities))
louisvilleMSA <- dbGetQuery(conn = con, statement = statement(louisville))
kentuckyAllDataAllVariables <- kentucky
peerCitiesAllDataAllVariables <- peerCities
louisvilleAllDataAllVariables <- louisvilleMSA
save(kentuckyAllDataAllVariables, file = "kentuckyAllDataAllVariables.RData")
save(peerCitiesAllDataAllVariables, file = "peerCitiesAllDataAllVariables.RData")
save(louisvilleAllDataAllVariables, file = "louisvilleAllDataAllVariables.RData")
|
b86d4ea0421fba01474b6d8e5329f48fe7db2eab
|
83278d193bf24349883f51a40860f04dfba23f9b
|
/data-raw/create_top_tracks.R
|
3959cc17b031044762f4a879e486313c202cbae7
|
[] |
no_license
|
sbudai/spotify-recommendations
|
6c93c6330bf5e74d45cc6c7035f374f9fd0c21f3
|
966306a98294f59126d6249f9c5e7d4760868054
|
refs/heads/main
| 2023-01-28T12:31:54.558736
| 2020-12-06T19:16:24
| 2020-12-06T19:16:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,175
|
r
|
create_top_tracks.R
|
## code to prepare `DATASET` dataset goes here
library(tidyverse)
library(spotifyrecommendations)
sk_top_tracks <- read.csv(file.path(
'data-raw', 'SK_Artist_Top_Tracks_Table.csv'
)) %>%
setNames(., c("spotify_artist_id", names(.)[2:ncol(.)]))
names ( sk_top_tracks )
listen_local_artists <- sk_top_tracks %>%
select ( all_of (c(
"spotify_artist_id", "name", "all_english_title",
"all_slovak_title", "any_slovak_title",
"is_considered_slovak", "considered_czech", "known_slovak_city",
"genre_1", "genre_2", "genre_3"
))) %>%
mutate (
language = case_when (
all_english_title == 1 ~ "en",
all_slovak_title == 1 ~ "sk",
TRUE ~ NA_character_),
national_identity = case_when (
considered_czech == 1 ~ "cz",
is_considered_slovak == 1 ~ "sk",
)
) %>%
rename ( city = known_slovak_city,
artist_name = name ) %>%
distinct ( spotify_artist_id, .keep_all = TRUE ) %>%
select ( all_of (c("spotify_artist_id", "artist_name",
"national_identity","language",
"genre_1", "genre_2", "genre_3")))
usethis::use_data(listen_local_artists, overwrite = TRUE)
|
7169b28e7fb228814eb0b9d71afe63a285efee5a
|
d5ea85feed4c01ce9db8c019d9142f30a0c68a0e
|
/R/20_bifd.R
|
937957eaa808262f1a8c5bb7aae564835766c2ab
|
[] |
no_license
|
yixuan/fdaplus
|
4b59d15d4a0501a4b66f21d0f6adab407107cc98
|
51abb6d5d6a0060a8117060135a8167642eb4b56
|
refs/heads/master
| 2016-09-06T14:19:30.570338
| 2015-05-16T00:59:17
| 2015-05-16T00:59:17
| 24,311,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,358
|
r
|
20_bifd.R
|
#' Bivariate Function Using Basis Expansion
#'
#' This class defines bivariate functions that can be expressed by two sets
#' of basis functions and the associated coefficient matrix. It takes the following
#' form:
#'
#' \deqn{K(s,t)=\sum_{i,j} a_{ij}f_i(s)g_j(t)}{K(s, t) = sum_{i,j} a_{ij} * f_i(s) * g_j(t)}
#'
#' Here \eqn{K(s, t)} is the bivariate function, \eqn{f_i} and \eqn{g_j} are
#' two basis systems, and \eqn{A = (a_{ij})} is the coefficient matrix.
#'
#' @slot sbasis,tbasis Basis objects of class \code{\link[=basis+-class]{basis+}},
#' not necessarily of the same type (for example, one can be
#' \code{\link[=bspline+-class]{bspline+}} and the other be
#' \code{\link[=fourier+-class]{fourier+}}).
#' @slot coefs A matrix of dimension \code{m} by \code{n} where \code{m}
#' is the number of functions in \code{sbasis}, and \code{n}
#' is the number of functions in \code{tbasis}.
#'
#' @export
setClass("bifd+", slots = c(coefs = "matrix",
sbasis = "basis+",
tbasis = "basis+"),
validity = function(object) {
if(nrow(object@coefs) != object@sbasis@ncoef)
return("nrow(coefs) must be equal to sbasis@ncoef")
if(ncol(object@coefs) != object@tbasis@ncoef)
return("ncol(coefs) must be equal to tbasis@ncoef")
return(TRUE)
}
)
#' Creating A Bivariate Function Using Basis Expansion
#'
#' This function constructs a \code{\link[=bifd+-class]{bifd+}} object
#' that represents a bivariate function.
#'
#' @param coefs The coefficient matrix.
#' @param sbasis,tbasis Basis objects of class \code{\link[=basis+-class]{basis+}},
#' not necessarily of the same type (for example, one can be
#' \code{\link[=bspline+-class]{bspline+}} and the other be
#' \code{\link[=fourier+-class]{fourier+}}).
#'
#' @return A \code{\link[=bifd+-class]{bifd+}} object with the given
#' bases and coefficients.
#' @author Yixuan Qiu <\url{http://statr.me/}>
#' @export
bifd_new = function(coefs, sbasis, tbasis = sbasis)
{
new("bifd+", coefs = coefs, sbasis = sbasis, tbasis = tbasis)
}
#' @describeIn wrap Converting "bifd" objects
wrap.bifd = function(obj, ...)
{
new("bifd+", coefs = obj$coefs, sbasis = wrap(obj$sbasis),
tbasis = wrap(obj$tbasis))
}
## Arithmetic between bifd+ and a scalar
#' @rdname arithmetic-methods
#'
#' @section Method (bifd+, numeric scalar):
#' A \code{\link[=bifd+-class]{bifd+}} object can be multiplied or divided by
#' a numeric scalar, and these operations will return the scaled bivariate
#' function object.
setMethod("*", signature(e1 = "bifd+", e2 = "numeric"),
function(e1, e2) {
initialize(e1, coefs = e2[1] * e1@coefs)
}
)
#' @rdname arithmetic-methods
setMethod("*", signature(e1 = "numeric", e2 = "bifd+"),
function(e1, e2) {
initialize(e2, coefs = e1[1] * e2@coefs)
}
)
#' @rdname arithmetic-methods
setMethod("/", signature(e1 = "bifd+", e2 = "numeric"),
function(e1, e2) {
initialize(e1, coefs = e1@coefs / e2[1])
}
)
## Arithmetic between bifd+ objects
#' @rdname arithmetic-methods
#'
#' @section Method (bifd+, bifd+):
#' A \code{\link[=bifd+-class]{bifd+}} object can be added to or subtracted from
#' another \code{\link[=bifd+-class]{bifd+}} object, if they have the same bases.
setMethod("+", signature(e1 = "bifd+", e2 = "bifd+"),
function(e1, e2) {
if(!identical(e1@sbasis, e2@sbasis) |
!identical(e1@tbasis, e2@tbasis))
stop("need to have the same basis functions");
initialize(e1, coefs = e1@coefs + e2@coefs)
}
)
#' @rdname arithmetic-methods
setMethod("-", signature(e1 = "bifd+", e2 = "bifd+"),
function(e1, e2) {
if(!identical(e1@sbasis, e2@sbasis) |
!identical(e1@tbasis, e2@tbasis))
stop("need to have the same basis functions");
initialize(e1, coefs = e1@coefs - e2@coefs)
}
)
#' @rdname feval-methods
#'
#' @section Method (bifd+, numeric, numeric):
#' \tabular{lcl}{
#' \code{f} \tab - \tab A \code{\link[=bifd+-class]{bifd+}} object. \cr
#' \code{x, y} \tab - \tab Numeric vectors.
#' }
#'
#' \code{feval(f, x, y)} returns a matrix \code{R} of
#' \code{length(x)} rows and \code{length(y)} columns, with \code{R[i, j]}
#' equal to the value of \code{f(x[i], x[j])}.
setMethod("feval", signature(f = "bifd+", x = "numeric"),
function(f, x, y, ...) {
y = as.numeric(y)
crossprod(feval(f@sbasis, x), f@coefs) %*% feval(f@tbasis, y)
}
)
#' @rdname plot-methods
setMethod("plot", signature(x = "bifd+", y = "missing"),
function(x, y, ..., engine = c("graphics", "rgl")) {
x0 = seq(x@sbasis@range[1], x@sbasis@range[2],
length.out = 101)
y0 = seq(x@tbasis@range[1], x@tbasis@range[2],
length.out = 101)
z = feval(x, x0, y0)
if(engine[1] == "rgl")
rgl::persp3d(x0, y0, z, ...)
else
persp(x0, y0, z, ...)
}
)
## Integral transform (product) of two bivariate functions
setMethod("%*%", signature(x = "bifd+", y = "bifd+"),
function(x, y) {
newcoef = x@coefs %*% (x@tbasis %*% y@sbasis) %*% y@coefs
new("bifd+", coefs = newcoef, sbasis = x@sbasis,
tbasis = y@tbasis)
}
)
## Integral transform (product) on fd+
setMethod("%*%", signature(x = "bifd+", y = "fd+"),
function(x, y) {
newcoef = x@coefs %*% (x@tbasis %*% y@basis) %*% t(y@coefs)
new("fd+", coefs = t(newcoef), basis = x@sbasis)
}
)
setMethod("%*%", signature(x = "fd+", y = "bifd+"),
function(x, y) {
newcoef = x@coefs %*% (x@basis %*% y@sbasis) %*% y@coefs
new("fd+", coefs = newcoef, basis = y@tbasis)
}
)
## Square root of a symmetric matrix
sqrtm = function(x)
{
if(!isSymmetric(x))
stop("x must be a symmetric matrix")
.Call("sqrtm", x, PACKAGE = "fdaplus")
}
## Inverse of the square root of a symmetric matrix
sqrtInvm = function(x)
{
if(!isSymmetric(x))
stop("x must be a symmetric matrix")
.Call("sqrtInvm", x, PACKAGE = "fdaplus")
}
## Both of above
sqrtBothm = function(x)
{
if(!isSymmetric(x))
stop("x must be a symmetric matrix")
.Call("sqrtBothm", x, PACKAGE = "fdaplus")
}
## Power of (symmetric) bivariate function
power_bifd = function(x, k)
{
if(!isSymmetric(x@coefs) |
!identical(x@sbasis, x@tbasis))
stop("need a symmetric bivariate function")
xmat = x@coefs
w = penmat(x@sbasis, 0)
wsqrtBoth = sqrtBothm(w)
wsqrt = wsqrtBoth$sqrt
wsqrtInv = wsqrtBoth$sqrtInv
mdecomp = wsqrt %*% xmat %*% wsqrt
e = eigen(mdecomp)
newcoef = wsqrtInv %*% e$vectors %*% diag(e$values^k) %*% t(e$vectors) %*% wsqrtInv
initialize(x, coefs = newcoef)
}
setMethod("^", signature(e1 = "bifd+", e2 = "numeric"),
function(e1, e2) power_bifd(e1, e2)
)
|
3e3e77acfaffd2efc5296664dbb76e109baee66b
|
ae5fb8c8cba912eb62290e2b124fb7f999cc824a
|
/DESEq2 Analysis.R
|
f9faedfc26bd99a3fbe4921de8d492e275098321
|
[] |
no_license
|
jessedunnack/LoTurco-RNASeq
|
cdccd5a67dfa40607717529301a4c9419b9536a3
|
22f5b9bfb4913d4908d9ddb3e64475395eee4c3d
|
refs/heads/master
| 2020-04-01T18:00:46.681004
| 2018-10-17T15:09:56
| 2018-10-17T15:09:56
| 153,465,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,264
|
r
|
DESEq2 Analysis.R
|
# WORKFLOW FOR PROCESSING RNASEQ USING DESEQ2
library(DESeq2)
library(biomaRt)
library(pheatmap)
#CHANGE THIS DEPENDING ON WHAT FILES YOU'RE PROCESSING
directory <- "/Volumes/NO NAME/RNASeq/FUS1 RNASeq/LoTurco/Counts/Name_Sorted/"
setwd(directory)
#ESTABLISH BIOMART OBJECT TO RENAME ENSEMBL IDs
mart <- useDataset("mmusculus_gene_ensembl", useMart(host = "aug2017.archive.ensembl.org", biomart = "ENSEMBL_MART_ENSEMBL"))
#MAY HAVE TO TWEAK THESE ENVIRONMENT VARIABLES BASED ON FILE NAMING CONVENTIONS
fileNames <- list.files(directory)
sampleNames <- sub("_Name_Sorted.txt","",fileNames)
conditionNames <- sub("_\\D\\d","",sampleNames)
sampleTable <- data.frame(sampleName = sampleNames,
fileName = fileNames,
condition = conditionNames)
ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
directory = directory,
design = ~ condition)
dds <- DESeq(ddsHTSeq)
res <- results(dds)
#Create counts table with statistics for 2-way comparison.
FPvC_counts <- (counts(dds, normalized=TRUE))[,-c(7,8,9,10)]
FPvC_data <- merge(as.data.frame(res), as.data.frame(FPvC_counts), by='row.names', sort=FALSE)
genes <- FPvC_data$Row.names
new_names <- getBM(filters= "ensembl_gene_id", attributes=c("ensembl_gene_id","external_gene_name"), values=genes,mart=mart)
out <- merge(n_FPvC_data,new_names,by.x="Row.names",by.y="ensembl_gene_id")
out <- out[,c(c(ncol(out),1:(ncol(out)-1)))]
out <- out[,-2]
write.table(out, file= "LoTurco_FUS1+P53_vs_CTRL.txt", sep='\t')
#Create a heatmap showing expression of the 43 ependymoma-associated TFs
enhancers <- read.table("/Volumes/NO NAME/RNASeq/FUS1_Enhancers.txt", sep="",header = FALSE,)
enhancers <- merge.data.frame(enhancers, getBM(filters="external_gene_name", attributes="ensembl_gene_id", enhancers, mart), by = "row.names")
counts <- counts(dds, normalized=TRUE)
a <- c(enhancers[3])
counts_enhancers <-counts[enhancers$ensembl_gene_id,]
row.names(counts_enhancers) <- enhancers[,"V1"]
cal_z_score <- function(x){
(x - mean(x)) / sd(x)
}
counts_enhancers_norm <- t(apply(counts_enhancers, 1, cal_z_score))
counts_enhancers_norm["Ccnt2",] <- 0
counts_enhancers_norm["Zfp423",] <- 0
pheatmap(counts_enhancers_norm)
|
aca3e237c07c996565f612fc0d1d405f99b448b4
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/magclass/R/getRegions.R
|
308e65943a562d978c2621e2167346ced352b804
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 857
|
r
|
getRegions.R
|
getRegions <- function(x) {
if(sum(substr(dimnames(x)[[1]],4,4)!=".")>0) { #not all regions have 3-character names (need to use slow method)
output <- unique(as.vector(as.matrix(cbind.data.frame(strsplit(dimnames(x)[[1]],'\\.'))[1,])))
} else { #region names all have 3 characters -> fast method
output <- unique(substr(dimnames(x)[[1]],1,3))
}
return(output)
}
"getRegions<-" <- function(x,value) {
reg <- getRegions(x)
if(!grepl(".",reg[1],fixed=TRUE)) {
getCells(x) <- value
return(x)
}
if(length(reg)!=length(value)) stop("Number of regions must agree with current number of regions!")
tmp <- paste("SAVEREPLACE",dimnames(x)[[1]])
for(i in 1:nregions(x)) {
tmp <- sub(paste("SAVEREPLACE ",reg[i],"\\.",sep=""),paste(value[i],"\\.",sep=""),tmp)
}
dimnames(x)[[1]] <- tmp
return(x)
}
|
94d375d79042e5d6c21eb872d85c4619945d9cfc
|
693e19181178a2ebeeb77c78eae958655d8d81f5
|
/Graphs/Graphs.R
|
a9344730b8fb255c64af2c1593341c1dafbdb7c3
|
[] |
no_license
|
adityaraj52/ConvNet
|
76bdd9d0689d0d7a25fb1cf8a0f80ed5f0f5f3fe
|
a3cb37293f1f4006b1ed69f27a532293e62929d5
|
refs/heads/master
| 2021-01-22T19:31:04.716095
| 2017-03-16T15:04:18
| 2017-03-16T15:04:18
| 85,207,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,598
|
r
|
Graphs.R
|
################################################################################
# Instructions to export the files
################################################################################
# Export files in landscape format and as A5 format
################################################################################
# Total loss graph of standard for 100k
################################################################################
# Read in file
standard100kTotalLossRaw <- read.csv("~/Documents/TU Clausthal/Neuronale Netze und statistisches Lernen/ConvNet/Graphs/run_Standard100K.csv")
# Specify options for plot
x1 = standard100kTotalLossRaw$Step
y1 = standard100kTotalLossRaw$Value
xlab = "Training steps"
ylab = "Loss"
labels = c("0k","20k","40k","60k","80k","100k")
# Plot the function
plot(x1, y1, type='l', xlab = xlab, ylab = ylab, col='gray70', xaxt="n")
axis(1, at=seq(0,100000,20000), labels = labels)
# Add smoothed line
smoothingSpline = smooth.spline(x1, y1, spar=0.35)
lines(smoothingSpline,lwd=2)
################################################################################
# Total loss graph with added conv layer
################################################################################
# Read in file
addedConvLayerTotalLossRaw <- read.csv("~/Documents/TU Clausthal/Neuronale Netze und statistisches Lernen/ConvNet/Graphs/run_AddedConvLayer.csv")
# Specify options for plot
x2 = addedConvLayerTotalLossRaw$Step
y2 = addedConvLayerTotalLossRaw$Value
xlab = "Training steps"
ylab = "Loss"
labels = c("0k","2k","4k","6k","8k","10k","12k","14k")
# xlim = c(0,100000)
# Plot the function
plot(x2, y2, type='l', xlab = xlab, ylab = ylab, col='gray70', xaxt="n")
axis(1, at=seq(0,14000,2000), labels = labels)
# Add smoothed line
smoothingSpline = smooth.spline(x2, y2, spar=0.35)
lines(smoothingSpline,lwd=2)
################################################################################
# Total loss graph standard
################################################################################
# Read in file
standardTotalLossRaw <- read.csv("~/Documents/TU Clausthal/Neuronale Netze und statistisches Lernen/ConvNet/Graphs/run_Standard.csv")
# Specify options for plot
x3 = standardTotalLossRaw$Step
y3 = standardTotalLossRaw$Value
xlab = "Training steps"
ylab = "Loss"
labels = c("0k","10k","20k","30k","40k")
# xlim = c(0,100000)
# Plot the function
plot(x3, y3, type='l', xlab = xlab, ylab = ylab, col='gray70', xaxt="n")
axis(1, at=seq(0,40000,10000), labels = labels)
# Add smoothed line
smoothingSpline = smooth.spline(x3, y3, spar=0.35)
lines(smoothingSpline,lwd=2)
################################################################################
# Total loss graph with 28x28 images
################################################################################
# Read in file
s28x28TotalLossRaw <- read.csv("~/Documents/TU Clausthal/Neuronale Netze und statistisches Lernen/ConvNet/Graphs/run_28x28Images.csv")
# Specify options for plot
x4 = s28x28TotalLossRaw$Step
y4 = s28x28TotalLossRaw$Value
xlab = "Training steps"
ylab = "Loss"
labels = c("0k","10k","20k","30k")
# xlim = c(0,100000)
# Plot the function
plot(x4, y4, type='l', xlab = xlab, ylab = ylab, col='gray70', xaxt="n")
axis(1, at=seq(0,30000,10000), labels = labels)
# Add smoothed line
smoothingSpline = smooth.spline(x4, y4, spar=0.35)
lines(smoothingSpline,lwd=2)
################################################################################
# All together
################################################################################
# Plot the function
plot(x1, y1, type='l', xlab = xlab, ylab = ylab, col='white', xaxt="n", ylim = c(0,1))
labels = c("0k","20k","40k","60k","80k","100k")
axis(1, at=seq(0,100000,20000), labels = labels)
# Add smoothed line of Standard100k
smoothingSpline = smooth.spline(x1, y1, spar=0.5)
lines(smoothingSpline,lwd=2, col='gray70')
# Add smoothed line of AddedConvLayer
smoothingSpline = smooth.spline(x2, y2, spar=0.5)
lines(smoothingSpline,lwd=2, col='deepskyblue')
# Add smoothed line of Standard
smoothingSpline = smooth.spline(x3, y3, spar=0.5)
lines(smoothingSpline,lwd=2, col='firebrick')
# Add smoothed line of 28x28 images
smoothingSpline = smooth.spline(x4, y4, spar=0.5)
lines(smoothingSpline,lwd=2, col='goldenrod1')
# Add a legend to the plot
legend("topright",
inset = .05,
legend= c("Standard 100k","Additional ConvLayer","Standard 40k","Increased size"),
lty=c(1,1,1,1),
lwd=c(2,2,2,2),
col = c('gray70','deepskyblue','firebrick','goldenrod1'))
|
742a1fb4ef7a962cf4693a189c0828dfd896ac70
|
2f94bc0d7c4c991297e294ce4afe672d3ab715da
|
/tests/testthat/test-geojson_properties.R
|
0362b535b404f91cca93424b021d076fa0f0be7d
|
[] |
no_license
|
techisdead/geojsonsf
|
4100b8c686962a2317dec66b97a8b2922fd41f9c
|
d5e757c3e60f969f6b369dab3f41e756351b1ce7
|
refs/heads/master
| 2020-03-10T21:40:23.692782
| 2018-04-15T09:21:18
| 2018-04-15T09:21:18
| 129,599,801
| 0
| 0
| null | 2018-04-15T10:33:33
| 2018-04-15T10:33:33
| null |
UTF-8
|
R
| false
| false
| 2,871
|
r
|
test-geojson_properties.R
|
context("properties")
test_that("properties captured correctly", {
f <- '{
"type": "Feature",
"properties": { "id" : 1, "name" : "foo" },
"geometry": {"type": "LineString", "coordinates": [[101.0, 0.0], [102.0, 1.0]]}
}'
sf <- geojson_sf(f)
wkt <- geojson_wkt(f)
expect_true(
all(names(sf) == c("geometry", "id", "name"))
)
expect_true(
all(names(wkt) == c("geometry", "id", "name"))
)
expect_true(
sf$id == 1
)
expect_true(
wkt$id == 1
)
expect_true(
sf$name == "foo"
)
expect_true(
wkt$name == "foo"
)
js <- '[
{
"type": "Feature",
"properties" : {},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[-10.0, -10.0],
[10.0, -10.0],
[10.0, 10.0],
[-10.0, -10.0]
]
]
}
},
{
"type": "Feature",
"properties" : { "id" : 1 },
"geometry": {
"type": "MultiPolygon",
"coordinates": [
[
[
[180.0, 40.0], [180.0, 50.0], [170.0, 50.0],
[170.0, 40.0], [180.0, 40.0]
]
],
[
[
[-170.0, 40.0], [-170.0, 50.0], [-180.0, 50.0],
[-180.0, 40.0], [-170.0, 40.0]
]
]
]
}
},
{
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {"id" : 2, "value" : "foo"},
"geometry": {
"type": "Point",
"coordinates": [100.0, 0.0]
}
},
{
"type": "Feature",
"properties": null,
"geometry": {
"type": "LineString",
"coordinates": [
[101.0, 0.0],
[102.0, 1.0]
]
}
}
]
},
{
"type": "GeometryCollection",
"geometries": [
{
"type": "Point",
"coordinates": [100.0, 0.0]
},
{
"type": "LineString",
"coordinates": [
[101.0, 0.0],
[102.0, 1.0]
]
},
{
"type" : "MultiPoint",
"coordinates" : [
[0,0],
[1,1],
[2,2]
]
}
]
},
{
"type": "Polygon",
"coordinates": [
[
[-10.0, -10.0],
[10.0, -10.0],
[10.0, 10.0],
[-10.0, -10.0]
]
]
}
]'
sf <- geojson_sf(js)
wkt <- geojson_wkt(js)
expect_true(
ncol(sf) == 3
)
expect_true(
ncol(wkt) == 3
)
expect_true(
sum(sf$id, na.rm = T) == 3
)
expect_true(
sum(wkt$id, na.rm = T) == 3
)
expect_true(
sf$value[!is.na(sf$value)] == "foo"
)
expect_true(
wkt$value[!is.na(wkt$value)] == "foo"
)
})
test_that("sf and sfc created equally", {
f <- '{
"type": "Feature",
"properties": { "id" : 1, "name" : "foo" },
"geometry": {"type": "LineString", "coordinates": [[101.0, 0.0], [102.0, 1.0]]}
}'
sf <- geojson_sf(f)
sfc <- geojson_sfc(f)
expect_true(
all(class(sf$geometry) == class(sfc))
)
})
|
8692ed3d416a0006fe8c958371250146207dca4b
|
75db022357f0aaff30d419c13eafb9dddfce885a
|
/inst/IP/LobsterFisheryAttributes/LicenceCharacteristics.r
|
f8299130fc9c8207c77476715f433ed9b3986fbc
|
[] |
no_license
|
LobsterScience/bio.lobster
|
d4c553f0f55f561bb9f9cd4fac52c585e9cd16f8
|
b2af955291cb70c2d994e58fd99d68c6d7907181
|
refs/heads/master
| 2023-09-01T00:12:23.064363
| 2023-08-23T16:34:12
| 2023-08-23T16:34:12
| 60,636,005
| 11
| 5
| null | 2017-01-20T14:35:09
| 2016-06-07T18:18:28
|
R
|
UTF-8
|
R
| false
| false
| 4,251
|
r
|
LicenceCharacteristics.r
|
## licences by port
require(ggplot2)
require(bio.lobster)
require(bio.utilities)
require(devtools)
load_all('~/git/bio.utilities')
a = lobster.db('process.logs.unfiltered')
b = lobster.db('community_code')
d = lobster.db('vessels.by.port')
d = na.zero(d)
d1 = aggregate(cbind(GROSS_TONNAGE, BHP, LOA, BREADTH, DEPTH,YEAR_BUILT)~VR_NUMBER+LFA+YR_FISHED,data=d,FUN=min)
d = na.zero(d1,rev=T)
w = lobster.db('port')
v = lobster.db('port_location')
#Demographics on Lic
o = read.csv(file.path(project.datadirectory('bio.lobster'),'data','LicenceHolder','LicenceHolderInfo2022a.csv'))
i = grep('X',names(o))
o = subset(o,Years_Licence_Held<100)
o$LFA = do.call(rbind, strsplit(o$Desc_Eng," - "))[,2]
o$LicStartDate = as.Date(o$Licence_Participant_Start_Date,'%b %d, %Y')
o$BDate = as.Date(o$Birthdate,'%b %d, %Y')
o = subset(o,select=c(Licence_Id, Name_Last_First, BDate, LicStartDate, LFA, Years_Licence_Held, Age))
ggplot(subset(o,LFA !=28),aes(x=Age)) + geom_histogram(bins=10, aes(y=..density..)) + facet_wrap(~LFA,scales='free_y')
aggregate(Age~LFA,data=o,FUN=function(x) quantile(x,probs=c(0.1,.5,.9)))
#Logbook Processing
a$DYR = lubridate::decimal_date(a$DATE_FISHED) - lubridate::year(a$DATE_FISHED)
a$WYR = ceiling(a$DYR*52)
a$DWYR = lubridate::year(a$DATE_FISHED) + a$WYR/52
a$P=1
xa = aggregate(cbind(WEIGHT_KG, NUM_OF_TRAPS)~SYEAR+VR_NUMBER+LICENCE_ID+LFA+SD_LOG_ID,data=a,FUN=sum)
xa$P = 1
x = aggregate(cbind(P,WEIGHT_KG, NUM_OF_TRAPS)~SYEAR+VR_NUMBER+LICENCE_ID+LFA,data=xa,FUN=sum)
x$CPUE = x$WEIGHT_KG/x$NUM_OF_TRAPS
#CPUE and vessel merge
xv = merge(x,d, by.x=c('VR_NUMBER','SYEAR','LFA'),by.y=c('VR_NUMBER','YR_FISHED','LFA'),all.x=T)
##CPUE and vessel and operator merge
xvo = merge(xv, o, by.x=c('LICENCE_ID','LFA'),by.y=c('Licence_Id','LFA'),all.x=T)
#how many trips
xx = aggregate(P~SYEAR+LFA,data=x,FUN=mean)
with(subset(xx,LFA==34),plot(SYEAR,P))
x2 = subset(x, SYEAR==2019)
x2$NTrips =x2$P
ggplot(x2, aes(x=NTrips))+geom_histogram()+facet_wrap(~LFA,scales='free_y')
#how many grids per year are they fishing
xg = aggregate(cbind(P,WEIGHT_KG, NUM_OF_TRAPS)~SYEAR+VR_NUMBER+LICENCE_ID+LFA+GRID_NUM,data=a,FUN=sum)
xgg = aggregate(GRID_NUM~SYEAR+VR_NUMBER+LICENCE_ID+LFA,data=subset(xg,GRID_NUM>0),FUN=length)
xvog = merge(xvo, xgg, by.x=c('SYEAR','LICENCE_ID','LFA', 'VR_NUMBER'),by.y=c('SYEAR','LICENCE_ID','LFA', 'VR_NUMBER'),all.x=T)
xvog$ageBoat = xvog$SYEAR - xvog$YEAR_BUILT
ggplot(subset(xvog, SYEAR==2019 & CPUE<8 & LFA != 28),aes(x=CPUE)) + geom_histogram(aes(y=..density..)) + facet_wrap(~LFA, scales='free_y')
ggplot(subset(xvog, SYEAR==2019),aes(x=BHP)) + geom_histogram() + facet_wrap(~LFA, scales='free_y')
ggplot(subset(xvog, SYEAR==2019),aes(x=WEIGHT_KG)) + geom_histogram() + facet_wrap(~LFA, scales='free_y')
ggplot(subset(xvog, SYEAR==2019),aes(x=LOA)) + geom_histogram() + facet_wrap(~LFA, scales='free_y')
ggplot(subset(xvog, SYEAR==2019),aes(x=ageBoat)) + geom_histogram() + facet_wrap(~LFA, scales='free_y')
ggplot(subset(xvog, SYEAR==2019& GRID_NUM<10),aes(x=GRID_NUM)) + geom_histogram() + facet_wrap(~LFA, scales='free_y')
##Statistics
(aggregate(GRID_NUM~LFA, data=subset(xvog,SYEAR==2019),FUN=function(x) c(mean(x),sd(x),length(x))))
(aggregate(Age~LFA, data=subset(xvog,SYEAR==2019),FUN=function(x) c(mean(x),sd(x),length(x))))
(aggregate(log10(WEIGHT_KG)~LFA, data=subset(xvog,SYEAR==2019),FUN=function(x) c(mean(x),sd(x),length(x))))
(aggregate(P~LFA, data=subset(xvog,SYEAR==2019),FUN=function(x) c(mean(x),sd(x),length(x))))
(aggregate(ageBoat~LFA, data=subset(xvog,SYEAR==2019),FUN=function(x) c(mean(x),sd(x),length(x))))
(aggregate(GROSS_TONNAGE~LFA, data=subset(xvog,SYEAR==2019),FUN=function(x) c(mean(x),sd(x),length(x))))
##gini index for evenness of catch using effort as well
t = subset(xvog, LFA==27 & SYEAR==2019)
ii = unique(x$LFA)
m=0
out=list()
for(i in 1:length(ii)){
k = subset(x, LFA==ii[i])
ll = unique(k$SYEAR)
for(l in 1:length(ll)) {
m=m+1
n = subset(k,SYEAR==ll[l])
out[[m]] = c(ii[i],ll[l],bio.survey::gini(x=n$CPUE,y=n$NUM_OF_TRAPS))
}
}
out = as.data.frame(do.call(rbind,out))
out = toNums(out, cols=2:3)
names(out) = c('LFA','SYEAR','GINI')
ggplot(out,aes(x=SYEAR,y=GINI))+geom_line() + facet_wrap(~LFA)
|
8ac02305a26885c252bd318232884263c7c4c1b6
|
5c21ffa379f009e9c8eff32a7ff67821f7e1638b
|
/sim_time_variation.R
|
64be43c50cb3be957cddb64f806c4ae7bd90956b
|
[] |
no_license
|
cfaustus/core_matrix_publish
|
cf8460356f4304c8ca323b290b1ab27902246335
|
548158c12c35af34bd0484bf29bdee3e06b0db0b
|
refs/heads/master
| 2022-07-10T14:09:19.685412
| 2022-07-06T12:31:04
| 2022-07-06T12:31:04
| 99,240,559
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,695
|
r
|
sim_time_variation.R
|
################################################################################
## script to look at how rate & amount of deforestation affect disease over time
## script made by christina.faust@gmail.com 14 april 16
################################################################################
rm(list = ls())
source('sim_core_matrix_baseline_func.R')
library(deSolve)
library(plyr)
times <- seq(0, 55, by = 0.01)
times1 <- seq(0, 55, by = 0.01)
times2 <- seq(55.01, 60, by = 0.01)
times3 <- seq(60.01, 65, by = 0.01)
times4 <- seq(65.01, 70, by = 0.01)
times5 <- seq(70.01, 75, by = 0.01)
times6 <- seq(75.01, 80, by = 0.01)
times7 <- seq(80.01, 85, by = 0.01)
times8 <- seq(85.01, 90, by = 0.01)
times9 <- seq(90.01, 100, by = 0.01)
phiseq <- seq(0.1,0.9, by = 0.1)
# CORE
R0.c <- 2.0
gamma.c = 0.01
alpha.c = 0.001
d.c = 0.25
k.c = 200
#MATRIX
R0.m = 0.3
gamma.m = 0.05
alpha.m = 0.01
d.m = 0.05
k.m = 200
phi = phiseq[1]
params.dd <- c(b.c = 0.5,
d.c = d.c,
k.c = k.c,
beta.cc = R0.c*(gamma.c+alpha.c+d.c), #beta.cc is transmission within core
beta.cm = 0.2, #beta.cm is transmission from core to matrix
gamma.c = gamma.c,
alpha.c = alpha.c,
sigma.c = 0.0,
b.m = 0.1,
d.m = d.m,
k.m = k.m,
beta.mm = R0.m*(gamma.m+alpha.m+d.m),
beta.mc = 0,
gamma.m = gamma.m,
alpha.m = alpha.m,
sigma.m = 0.0,
phi = phi,
epsilon = (1+cos(phi*(pi*3/2)-2.3)),
kappa = 1)
initial.values <- c(S.c = (1.01-phi)*params.dd[['k.c']]-1,
I.c = 1,
R.c = 0,
S.m = phi*params.dd[['k.m']],
I.m = 0,
R.m = 0)
out <- as.data.frame(ode(func = patch.matrix.model.phi,
y = initial.values,
parms = params.dd,
times = times1))
#method = 'ode45')
plot.dynamics(out)
##### round 2
max <- length(times)
initial.values2 <- c(S.c = out[max, 'S.c'],
I.c = out[max, 'I.c'],
R.c = out[max, 'R.c'],
S.m = out[max, 'S.m'],
I.m = out[max, 'I.m'],
R.m = out[max, 'R.m'])
params.dd['phi'] = phiseq[2]
params.dd['epsilon'] = (1+cos(phiseq[2]*(pi*3/2)-2.3))
out2 <- as.data.frame(ode(func = patch.matrix.model.phi,
y = initial.values2,
parms = params.dd,
times = times2)) #, method = 'ode45'
###### round 3
max2 <- length(times2)
initial.values3 <- c(S.c = out2[max2, 'S.c'],
I.c = out2[max2, 'I.c'],
R.c = out2[max2, 'R.c'],
S.m = out2[max2, 'S.m'],
I.m = out2[max2, 'I.m'],
R.m = out2[max2, 'R.m'])
params.dd['phi'] = phiseq[3]
params.dd['epsilon'] = (1+cos(phiseq[3]*(pi*3/2)-2.3))
out3 <- as.data.frame(ode(func = patch.matrix.model.phi,
y = initial.values3,
parms = params.dd,
times = times3)) #, method = 'ode45'
plot.dynamics(out3)
max3 <- length(times3)
initial.values4 <- c(S.c = out3[max3, 'S.c'],
I.c = out3[max3, 'I.c'],
R.c = out3[max3, 'R.c'],
S.m = out3[max3, 'S.m'],
I.m = out3[max3, 'I.m'],
R.m = out3[max3, 'R.m'])
params.dd['phi'] = phiseq[4]
params.dd['epsilon'] = (1+cos(phiseq[4]*(pi*3/2)-2.3))
out4 <- as.data.frame(ode(func = patch.matrix.model.phi,
y = initial.values4,
parms = params.dd,
times = times4))
max4 <- length(times4)
initial.values5 <- c(S.c = out4[max4, 'S.c'],
I.c = out4[max4, 'I.c'],
R.c = out4[max4, 'R.c'],
S.m = out4[max4, 'S.m'],
I.m = out4[max4, 'I.m'],
R.m = out4[max4, 'R.m'])
params.dd['phi'] = phiseq[5]
params.dd['epsilon'] = (1+cos(phiseq[5]*(pi*3/2)-2.3))
out5 <- as.data.frame(ode(func = patch.matrix.model.phi,
y = initial.values5,
parms = params.dd,
times = times5))
max5 <- length(times5)
initial.values6 <- c(S.c = out5[max5, 'S.c'],
I.c = out5[max5, 'I.c'],
R.c = out5[max5, 'R.c'],
S.m = out5[max5, 'S.m'],
I.m = out5[max5, 'I.m'],
R.m = out5[max5, 'R.m'])
params.dd['phi'] = phiseq[6]
params.dd['epsilon'] = (1+cos(phiseq[6]*(pi*3/2)-2.3))
out6 <- as.data.frame(ode(func = patch.matrix.model.phi,
y = initial.values6,
parms = params.dd,
times = times6))
max6 <- length(times6)
initial.values7 <- c(S.c = out6[max6, 'S.c'],
I.c = out6[max6, 'I.c'],
R.c = out6[max6, 'R.c'],
S.m = out6[max6, 'S.m'],
I.m = out6[max6, 'I.m'],
R.m = out6[max6, 'R.m'])
params.dd['phi'] = phiseq[7]
params.dd['epsilon'] = (1+cos(phiseq[7]*(pi*3/2)-2.3))
out7 <- as.data.frame(ode(func = patch.matrix.model.phi,
y = initial.values7,
parms = params.dd,
times = times7))
max7 <- length(times7)
initial.values8 <- c(S.c = out7[max7, 'S.c'],
I.c = out7[max7, 'I.c'],
R.c = out7[max7, 'R.c'],
S.m = out7[max7, 'S.m'],
I.m = out7[max7, 'I.m'],
R.m = out7[max7, 'R.m'])
params.dd['phi'] = phiseq[8]
params.dd['epsilon'] = (1+cos(phiseq[8]*(pi*3/2)-2.3))
out8 <- as.data.frame(ode(func = patch.matrix.model.phi,
y = initial.values8,
parms = params.dd,
times = times8))
max8 <- length(times8)
initial.values9 <- c(S.c = out8[max8, 'S.c'],
I.c = out8[max8, 'I.c'],
R.c = out8[max8, 'R.c'],
S.m = out8[max8, 'S.m'],
I.m = out8[max8, 'I.m'],
R.m = out8[max8, 'R.m'])
params.dd['phi'] = phiseq[9]
params.dd['epsilon'] = (1+cos(phiseq[9]*(pi*3/2)-2.3))
out9 <- as.data.frame(ode(func = patch.matrix.model.phi,
y = initial.values9,
parms = params.dd,
times = times9))
full<-rbind.fill(out,out2,out3,out4,out5,
out6,out7,out8,out9)
plot.dynamics(full)
par(mfrow=c(1,1))
plot(full$time, full$I.c/(full$S.c+full$I.c+full$R.c))
x<-nrow(full)
change <-full[(x/2):x,]
plot(change$time, change$I.c/(change$S.c+change$I.c+change$R.c),
ylim=c(0,0.6),
ylab= 'prevalence',
type='l',
col='forestgreen',
bty='n',lwd=3)
lines(change$time, change$I.m/(change$S.m+change$I.m+change$R.m),
col='darkgoldenrod', lwd=3)
####POPULATIONS
plot(change$time, (change$S.c+change$I.c+change$R.c),
ylim=c(0,150),
ylab= 'populations',
type='l',
col='forestgreen',
bty='n',lwd=3)
lines(change$time, (change$S.m+change$I.m+change$R.m),
col='darkgoldenrod', lwd=3)
lines(change$time, 100*change$I.c/(change$S.c+change$I.c+change$R.c),
type='l', col='forestgreen',
bty='n',lwd=3, lty=3)
lines(change$time, 100*change$I.m/(change$S.m+change$I.m+change$R.m),
col='darkgoldenrod', lwd=3, lty=3)
0
|
618f04d640b1386c61f437cf8b1d4ed5a847fba7
|
2d6f5821fca8e1d5ef62b4bf643af2c56e58bfbe
|
/9thMar18.R
|
3a10208e3954d54cfa0c4e7af498b52b806efb8b
|
[] |
no_license
|
shrishtripathi/RCodelatest
|
0d5b83ad2884a25f435187e320e6078b3828c3d3
|
bab3862da454e77bb1ef84fce0f66391eff839b6
|
refs/heads/master
| 2020-03-31T12:08:10.441464
| 2018-10-09T07:20:23
| 2018-10-09T07:20:23
| 152,204,431
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 967
|
r
|
9thMar18.R
|
vec1<-c('Shrish','Satya')
vec8<-c(36,29)
mat2<-cbind(vec1,as.character(vec8))
mat2
t(mat2)
vec9<-vec8<-vec1
vec9
vec10<-vec9
vec10
vec8
vec11<-c('shrish'=36,'satya'=29)
vec11
c
z
z<-c(11,12,13,14)
y<-c(12)
z>=y
z<-c(2,6,4,'k')
class(z)
z[4]
z[1]
y<-c(1,1)
sum(y) >1
count(y<=1)
sum(2<1)
y<-c(1,7,4,2)
y
sum(y[4])
sum(y[2>1])
sort(y)
sort(y,decreasing = T)
order(y)
sort(y)[2:4]
cbind((1:5),c(1,2))
w<-matrix(cbind(1:3,1:3),byrow = TRUE,nrow = 3)
w
rownames(w)<-c('r1','r2','r3')
w
colnames(w)<-c('c1','c2')
w
dimnames(w)<-list(c('a1','a2','a3'),c('b1','b2'))
w
vec1<-c(10,11,12)
x <- 1:4
(z <- x %*% x)
x<-matrix(c(1,2,3,4),nrow = 2,byrow = TRUE)
x
y<-matrix(c(1,1,1,1),nrow = 2, byrow = TRUE)
y
x %*% y
x+y
letters['a':'z']
'a':'z'
mz<-matrix(1:5,nrow = 33)
mz<-matrix(1:33,nrow = 3)
mz
matrix
ls
c
nrow
vec1<-c(10,11,12)
vec2<-c(7,8,9)
arr1<-array(c(vec1,vec2),c(2,3,2))
arr1
arr1<-array(c(vec1,vec2),c(2,3,1))
arr1
arr1<-array(c(vec1,vec2),c(4,4,1))
arr1
|
865c9597c008daed8bcce85fd38e9a266f4a4f97
|
8de68f566e3ca78a368a207e600c39ac35599ed3
|
/app.R
|
d000d2d27f5d30113307bf79a9d5c77affd1a55b
|
[] |
no_license
|
onohayat/Japan-Trade
|
5c7f38953ad18dde3265e7740a46207c94462f3f
|
ef3d0ba1c62d0717c49a2a40b89023b8f898bba1
|
refs/heads/master
| 2020-08-10T11:30:23.482961
| 2019-10-11T03:34:06
| 2019-10-11T03:34:06
| 214,333,268
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,916
|
r
|
app.R
|
ui <- fluidPage(
# Create a title for app
column(3, titlePanel("Japanese Trade")),
column(12,
# Create page for export map with slider input
tabsetPanel( tabPanel("Export Map",
sliderInput(inputId = "Years", h3("Choose the Year"),
min = 1962, max = 2017,
value=1962, sep="", ticks = FALSE),
leafletOutput("Map",height="600px")),
# Create page for import map with slider input
tabPanel("Import Map",
sliderInput(inputId = "Years2", h3("Choose the Year"),
min = 1962, max = 2017,
value=1962, sep="", ticks = FALSE),
leafletOutput("Map2", height="600px")),
# Create page for graphing export and import values for user's preferred country
tabPanel("Individual Country",
selectizeInput("country", label = "Choose a Country",
choices = AllCountriesAllYears$Destination
[AllCountriesAllYears$export_val!="na"],
options=list(create = FALSE)),
plotlyOutput("Plot", height="600px")),
# Create page for comparing export and import values for different regions
tabPanel("Region",
fluidRow(column(width=12,
plotlyOutput("Plot2", height="600px")))) )))
server <- function(input, output){
# Create map for export
qpal <- colorNumeric("YlOrBr", data1.2$log2, reverse = F)
output$Map <- renderLeaflet( {
leaflet(WorldCountry) %>%
addTiles() %>%
addPolygons(fillColor =
~qpal(data1.2$log2[data1.2$Years==input$Years]),
weight=2, color="black", fillOpacity=0.7,
label=~paste(data1.2$Destination[data1.2$Years==input$Years],
data1.2$export_val[data1.2$Years==input$Years])) %>%
setView(lng = 0, lat = 0, zoom = 2)} )
# Create map for import
qpal <- colorNumeric("YlOrBr", data1.2$log, reverse = F)
output$Map2 <- renderLeaflet( {
leaflet(WorldCountry) %>%
addTiles() %>%
addPolygons(fillColor =
~qpal(data1.2$log[data1.2$Years==input$Years2]),
weight=2, color="black", fillOpacity=0.7,
label=~paste(data1.2$Destination[data1.2$Years==input$Years2]
,data1.2$import_val[data1.2$Years==input$Years2])) %>%
setView(lng = 0, lat = 0, zoom = 2)} )
# Name axes for line/bar graphs
x <- list(title="Year")
y <- list(title="Value")
x2 <- list(title="")
y2 <- list(title="Value")
# Create line graph for individual countries
output$Plot <- renderPlotly( {
plot_ly(data1.2,x=~Years[data1.2$Destination==input$country]) %>%
add_lines(y=~export_val[data1.2$Destination==input$country],
name = "Export" ) %>%
add_lines(y=~import_val[data1.2$Destination==input$country],
name = "Import") %>%
layout( title = ~paste("Trade between Japan and", input$country),
xaxis = x, yaxis = y) })
# Create bar graph for comparing regions
output$Plot2 <- renderPlotly( {
plot_ly(data2, x=~fct_reorder(Region,Export)) %>%
layout(xaxis = list(title=x2),
yaxis= list(title=y2,inputautorange=TRUE)) %>%
add_bars(y=~Export/sum.Export., frame=~Years, name ="Export") %>%
add_bars(y=~Import/sum.Import., frame=~Years, name= "Import") })
}
shinyApp(ui, server)
|
c93ea9894ed3ba9ba44a82667893667d37051cf0
|
90e772dfeb9fc441424dcf5e5beaa545af606f1c
|
/man/calJSI.Rd
|
d1aa778256cb3462029ad3b5dfd6051a812a84b7
|
[
"GPL-3.0-only"
] |
permissive
|
chenjy327/MesKit
|
97d356c8c8ac73493ba6f60488d5a0c6aae23092
|
c9eb589fca6471e30e45cb9e03030af5ade69f83
|
refs/heads/master
| 2021-08-17T07:48:53.618404
| 2021-06-24T06:19:08
| 2021-06-24T06:19:08
| 304,196,319
| 0
| 0
|
MIT
| 2020-10-15T03:10:38
| 2020-10-15T03:10:37
| null |
UTF-8
|
R
| false
| true
| 1,943
|
rd
|
calJSI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calJSI.R
\name{calJSI}
\alias{calJSI}
\title{compareJSI}
\usage{
calJSI(
maf,
patient.id = NULL,
pairByTumor = FALSE,
min.ccf = 0,
plot = FALSE,
use.circle = TRUE,
title = NULL,
number.cex = 8,
number.col = "#C77960",
use.tumorSampleLabel = FALSE,
...
)
}
\arguments{
\item{maf}{Maf or MafList object generated by \code{\link{readMaf}} function.}
\item{patient.id}{Select the specific patients. Default NULL, all patients are included.}
\item{pairByTumor}{Compare JSI between different tumors. Default FALSE.}
\item{min.ccf}{The minimum value of CCF. Default 0.}
\item{plot}{Logical (Default: FALSE).}
\item{use.circle}{Logical (Default: TRUE). Whether to use "circle" as visualization method of correlation matrix.}
\item{title}{Title of the plot Default "Jaccard similarity".}
\item{number.cex}{The size of text shown in correlation plot. Default 8.}
\item{number.col}{The color of text shown in correlation plot. Default "#C77960".}
\item{use.tumorSampleLabel}{Logical (Default: FALSE). Rename the 'Tumor_Sample_Barcode' by 'Tumor_Sample_Label'.}
\item{...}{Other options passed to \code{\link{subMaf}}}
}
\value{
Correlation matrix and heatmap via Jaccard similarity coefficient method
}
\description{
The Jaccard similarity index (JSI) is applied to distinguish monoclonal versus polyclonal seeding in metastases.
}
\examples{
maf.File <- system.file("extdata/", "CRC_HZ.maf", package = "MesKit")
clin.File <- system.file("extdata/", "CRC_HZ.clin.txt", package = "MesKit")
ccf.File <- system.file("extdata/", "CRC_HZ.ccf.tsv", package = "MesKit")
maf <- readMaf(mafFile=maf.File, clinicalFile = clin.File, ccfFile=ccf.File, refBuild="hg19")
calJSI(maf)
}
\references{
Hu, Z., Li, Z., Ma, Z. et al. Multi-cancer analysis of clonality and the timing of systemic spread in paired primary tumors and metastases. Nat Genet (2020).
}
|
df48a9ca491ffb6028bc55ef812b4020a73f35f0
|
c0007ad0ff4aeb8ffae123b098e2f2a60c2c4a3a
|
/kmeans.R
|
0686fdb55bd79b1cacfd6f8e0f809c29a61cd22b
|
[] |
no_license
|
chetan015/DataMining_R
|
cc53ce32c2f9af759f3de3034375769b2d880801
|
feb04a6b1c8f84fcec6027da75cef276bdd9f028
|
refs/heads/master
| 2020-05-26T15:31:47.145264
| 2019-05-23T18:37:25
| 2019-05-23T18:37:25
| 188,287,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,560
|
r
|
kmeans.R
|
apr14<-read.csv("dataset/uber-raw-data-apr14.csv")
may14<-read.csv("dataset/uber-raw-data-may14.csv")
jun14<-read.csv("dataset/uber-raw-data-jun14.csv")
jul14<-read.csv("dataset/uber-raw-data-jul14.csv")
aug14<-read.csv("dataset/uber-raw-data-aug14.csv")
sep14<-read.csv("dataset/uber-raw-data-sep14.csv")
library(dplyr)
data14 <- bind_rows(apr14, may14, jun14, jul14, aug14, sep14)
summary(data14)
library(VIM)
aggr(data14)
library(lubridate)
data14$Date.Time <- mdy_hms(data14$Date.Time)
data14$Year <- factor(year(data14$Date.Time))
data14$Month <- factor(month(data14$Date.Time))
data14$Day <- factor(day(data14$Date.Time))
data14$Weekday <- factor(wday(data14$Date.Time))
data14$Hour <- factor(hour(data14$Date.Time))
data14$Minute <- factor(minute(data14$Date.Time))
data14$Second <- factor(second(data14$Date.Time))
head(data14, n=10)
set.seed(20)
clusters <- kmeans(data14[,2:3], 5)
data14$Division <- as.factor(clusters$cluster)
str(clusters)
library(ggmap)
NYCMap <- get_map("New York", zoom = 10)
ggmap(NYCMap) + geom_point(aes(x = Lon[], y = Lat[], colour = as.factor(Division)),data = data14) +
ggtitle("NYC Divisions using KMean")
library(DT)
data14$Month <- as.double(data14$Month)
month_division_14 <- count_(data14, vars = c('Month', 'Division'), sort = TRUE) %>%
arrange(Month, Division)
datatable(month_division_14)
library(dplyr)
monthly_growth <- month_division_14 %>%
mutate(Date = paste("04", Month)) %>%
ggplot(aes(Month, n, colour = Division)) + geom_line() +
ggtitle("Uber Monthly Growth - 2014")
monthly_growth
|
da3c1d494141113f111f8db36a3c13a2edb72dec
|
d669e19a5a9f8f3517578cd3411c3d2d4415d2d2
|
/data-raw/clean-charleston1.R
|
e6757a4629ee2fc081aab5214fa349b1176741d7
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spatialanalysis/geodaData
|
d44fc25392fe91ed437a97cca7e33506861e2c5a
|
1e27c7e77b14cbdadd9f43fd9ba56753930350f2
|
refs/heads/master
| 2021-07-13T11:17:45.096357
| 2020-10-05T19:06:42
| 2020-10-05T19:06:42
| 213,948,169
| 16
| 7
|
NOASSERTION
| 2020-10-05T19:06:43
| 2019-10-09T15:00:00
|
R
|
UTF-8
|
R
| false
| false
| 212
|
r
|
clean-charleston1.R
|
library(sf)
library(usethis)
charleston1 <- st_read("data-raw/sc_final_census2.shp",
quiet = TRUE,
stringsAsFactors = FALSE)
usethis::use_data(charleston1, overwrite = TRUE)
|
2b717b4253fc4f9befb2421ef9229f45247af544
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tidyhydat/examples/hy_daily_flows.Rd.R
|
7b4723a7233d972b6e6ccb2626c2c622e3f8e9d9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 390
|
r
|
hy_daily_flows.Rd.R
|
library(tidyhydat)
### Name: hy_daily_flows
### Title: Extract daily flows information from the HYDAT database
### Aliases: hy_daily_flows
### ** Examples
## Not run:
##D #download_hydat()
##D hy_daily_flows(station_number = c("08MF005"),
##D start_date = "1996-01-01", end_date = "2000-01-01")
##D
##D hy_daily_flows(prov_terr_state_loc = "PE")
##D
## End(Not run)
|
94a40ca49408579f915fb6aa473717bffdd1860f
|
1bc8ecb3c03577de895908534d849b687adc551d
|
/R/guide_axis_genomic.R
|
db40f3ac096f704e548a057de062121ba8b73e02
|
[
"MIT"
] |
permissive
|
teunbrand/ggnomics
|
ce30f3f302391b600e814b8844b7e6ae676e3c17
|
30568bef426d87b42a652619582f6beea5dd52aa
|
refs/heads/master
| 2021-09-11T10:17:02.129116
| 2020-07-29T21:12:08
| 2020-07-29T21:12:08
| 181,505,723
| 88
| 7
| null | 2020-06-19T19:36:06
| 2019-04-15T14:37:23
| null |
UTF-8
|
R
| false
| false
| 13,896
|
r
|
guide_axis_genomic.R
|
# Constructor -------------------------------------------------------------
#' @name guide_genomic_axis
#' @title Axis for genomic positions
#'
#' @description This axis guide is the genomic equivalent of
#' \code{\link[ggplot2]{guide_axis}} to pair with the genomic position scales
#' \code{\link[=scale_genomic]{scale_(x|y)_genomic()}}.
#'
#' @inheritParams ggplot2::guide_axis
#'
#' @details This guide places the sequence names in the middle or their limits
#' and places label for minor breaks at positions along the sequences.
#' Defaults to \code{guide_axis} behaviour if not exposed to genomic data.
#'
#' @return A \code{guide} object.
#' @family position guides
#'
#' @export
#'
#' @examples
#' NULL
guide_genomic_axis <- function(
title = waiver(),
check.overlap = FALSE,
angle = NULL,
n.dodge = 1,
order = 0,
position = waiver()
) {
structure(
list(
title = title,
check.overlap = check.overlap,
angle = angle,
n.dodge = n.dodge,
order = order,
position = position,
available_aes = c("x", "y"),
name = "genomic_axis"
),
class = c("guide", "genomic_axis", "axis")
)
}
# Trainer -----------------------------------------------------------------
#' @export
#' @describeIn guide_genomic_axis Trainer for genomic axis. See
#' \code{\link[ggplot2]{guide-exts}}.
#' @usage NULL
guide_train.genomic_axis <- function(guide, scale, aesthetic = NULL) {
aesthetic <- aesthetic %||% scale$aesthetics[1]
majorbreaks <- scale$get_breaks()
# Doesn't make sense to use this axis if the data isn't genomic or scale
# doesn't have a labeller for minor breaks.
if (!inherits(Nightfall(majorbreaks), 'ANYGenomic') ||
!("get_labels_minor" %in% union(names(scale), names(scale$super())))) {
guide <- NextMethod()
class(guide) <- setdiff(class(guide), "genomic_axis")
return(guide)
}
minorbreaks <- scale$get_breaks_minor()
lens <- c(length(majorbreaks), length(minorbreaks))
# Make a data.frame for empty ticks
empty_ticks <- .int$new_data_frame(
list(aesthetic = numeric(), .value = numeric(),
.label = character())
)
names(empty_ticks) <- c(aesthetic, ".value", ".label")
if (length(intersect(scale$aesthetics, guide$available_aes)) == 0) {
warning("genomic_axis guide needs appropriate scales: ",
guide$available_aes)
guide$key <- empty_ticks
guide$key_minor <- empty_ticks
} else {
guide$key <- format_guide_key(
breaks = majorbreaks, scale = scale, prototype = empty_ticks,
aesthetic = aesthetic, type = "major"
)
guide$key_minor <- format_guide_key(
breaks = minorbreaks, scale = scale, prototype = empty_ticks,
aesthetic = aesthetic, type = "minor"
)
}
guide$name <- paste0(guide$name, "_", aesthetic)
guide$hash <- digest::digest(list(guide$title, guide$key$.value,
guide$key$.label, guide$name))
guide
}
# Transformer -------------------------------------------------------------
#' @export
#' @describeIn guide_genomic_axis Transformer for genomic axis. See
#' \code{\link[ggplot2]{guide-exts}}.
#' @usage NULL
guide_transform.genomic_axis <- function(guide, coord, panel_params) {
if (is.null(guide$position) || nrow(guide$key) == 0) {
return(guide)
}
aesthetics <- names(guide$key)[!grepl("^\\.", names(guide$key))]
if (all(c("x", "y") %in% aesthetics)) {
guide$key <- coord$transform(guide$key, panel_params)
guide$key_minor <- coord$transform(guide$key_minor, panel_params)
} else {
other_aesthetic <- setdiff(c("x", "y"), aesthetics)
override_value <- if (guide$position %in% c("bottom", "left")) {
-Inf
} else {
Inf
}
guide$key[[other_aesthetic]] <- override_value
guide$key_minor[[other_aesthetic]] <- override_value
guide$key <- coord$transform(guide$key, panel_params)
guide$key_minor <- coord$transform(guide$key_minor, panel_params)
.int$warn_for_guide_position(guide)
}
# Average positions of major labels
major <- guide$key
aa <- split(major[aesthetics], factor(major$.label,
levels = unique(major$.label)))
aa <- matrix(vapply(aa, colMeans, numeric(length(aesthetics)),
USE.NAMES = FALSE),
ncol = length(aesthetics))
aa <- lapply(seq_along(aesthetics), function(i){aa[,i]})
major <- major[!duplicated(major$.label), ]
major[aesthetics] <- aa
guide$key <- major
guide
}
# Grob generator ----------------------------------------------------------
#' @export
#' @describeIn guide_genomic_axis Graphic object generator for genomic axis. See
#' \code{\link[ggplot2]{guide-exts}}.
#' @usage NULL
guide_gengrob.genomic_axis <- function(guide, theme) {
aesthetics <- names(guide$key)[!grepl("^\\.", names(guide$key))][1]
draw_genomic_axis(
break_positions = guide$key[[aesthetics]],
break_pos_minor = guide$key_minor[[aesthetics]],
break_labels = guide$key$.label,
break_lab_minor = guide$key_minor$.label,
axis_position = guide$position,
theme = theme,
check.overlap = guide$check.overlap,
angle = guide$angle,
n.dodge = guide$n.dodge
)
}
# Drawing function --------------------------------------------------------
draw_genomic_axis <- function(
break_positions,
break_pos_minor,
break_labels,
break_lab_minor,
axis_position,
theme,
check.overlap = FALSE,
angle = NULL,
n.dodge = 1
) {
# Setup assumptions
axis_position <- match.arg(axis_position, c("top", "bottom",
"right", "left"))
aesthetic <- if (axis_position %in% c("top", "bottom")) "x" else "y"
labels_first_gtable <- axis_position %in% c("left", "top")
# Do vertical vs horizontal
is_vertical <- axis_position %in% c("left", "right")
if (is_vertical) {
position_size <- "height"
non_position_size <- "width"
gtable_element <- gtable::gtable_row
measure_gtable <- gtable::gtable_width
measure_labels_non_pos <- grid::grobWidth
} else {
position_size <- "width"
non_position_size <- "height"
gtable_element <- gtable::gtable_col
measure_gtable <- gtable::gtable_width
measure_labels_non_pos <- grid::grobHeight
}
# Do primary vs secondary
if (axis_position %in% c("right", "top")) {
non_position_panel <- unit(0, "npc")
} else {
non_position_panel <- unit(1, "npc")
}
# Build axis line
line_grob <- setup_axis_line(axis_position, theme)
# Setup breaks
n_breaks <- length(break_pos_minor)
opposite_positions <- setNames(c("bottom", "top", "left", "right"),
c("top", "bottom", "right", "left"))
axis_position_opposite <- unname(opposite_positions[axis_position])
# Return empty
if (n_breaks == 0) {
return(grid::gTree(
children = grid::gList(line_grob),
width = grid::grobWidth(line_grob),
height = grid::grobHeight(line_grob),
cl = "abosluteGrob"
))
}
# Setup labels
label_grobs <- setup_axis_labels(
major = break_labels, minor = break_lab_minor,
major_pos = break_positions, minor_pos = break_pos_minor,
position = axis_position, theme = theme,
check.overlap = check.overlap,
angle = angle, n.dodge = 1
)
# Setup tickmarks
ticks <- setup_tickmarks(break_pos_minor, axis_position, theme)
# Combine ticks and labels
non_position_sizes <- paste0(non_position_size, "s")
label_dims <- base::do.call(grid::unit.c, lapply(label_grobs,
measure_labels_non_pos))
grobs <- c(list(ticks$grob), label_grobs)
grob_dims <- grid::unit.c(ticks$size, label_dims)
if (labels_first_gtable) {
grobs <- rev(grobs)
grob_dims <- rev(grob_dims)
}
# Build final grob
gt <- base::do.call(
gtable_element,
setNames(list("axis", grobs, grob_dims, unit(1, "npc")),
c("name", "grobs", non_position_sizes, position_size))
)
# Build viewport for text justification
justvp <- base::do.call(
grid::viewport,
setNames(list(non_position_panel, measure_gtable(gt),
axis_position_opposite),
c(setdiff(c("x", "y"), aesthetic), non_position_size, "just"))
)
# Comine the lot
grid::gTree(
children = grid::gList(line_grob, gt),
width = gtable::gtable_width(gt),
height = gtable::gtable_height(gt),
vp = justvp,
cl = "absoluteGrob"
)
}
# Helper functions --------------------------------------------------------
format_guide_key <- function(breaks = NULL,
scale,
prototype,
aesthetic = "x",
type = "major")
{
if (length(breaks) == 0) {
return(prototype)
} else {
if (scale$is_discrete()) {
mapped <- scale$map(breaks)
} else {
mapped <- breaks
}
key <- .int$new_data_frame(setNames(list(mapped), aesthetic))
key$.value <- breaks
if (type == "minor" && !is.null(scale$get_labels_minor)) {
key$.label <- scale$get_labels_minor(breaks)
} else {
key$.label <- scale$get_labels(breaks)
}
key$.label <- validate_labels(key$.label)
key <- key[is.finite(key[[aesthetic]]), ]
return(key)
}
}
validate_labels <- function(labels) {
if (is.list(labels)) {
if (any(vapply(labels, is.language, logical(1)))) {
labels <- base::do.call(expression, labels)
} else {
labels <- unlist(labels)
}
}
labels
}
# Helper for axis line in draw function
setup_axis_line <- function(
position, theme
) {
aesthetic <- if (position %in% c("top", "bottom")) "x" else "y"
alt <- if (position %in% c("right", "top")) 0 else 1
alt <- unit(alt, "npc")
# Resolve elements
line_element_name <- paste0("axis.line.", aesthetic, ".", position)
element <- calc_element(line_element_name, theme)
line_grob <- base::do.call(
element_grob,
setNames(
list(
element,
unit(c(0, 1), "npc"),
grid::unit.c(alt, alt)
),
c("element", aesthetic, setdiff(c("x", "y"), aesthetic))
)
)
}
# Helper for tickmarks in draw function
setup_tickmarks <- function(
break_pos,
position,
theme
) {
aesthetic <- if (position %in% c("top", "bottom")) "x" else "y"
# Calculate elements
element_name <- paste0("axis.ticks.", aesthetic, ".", position)
element_size <- paste0(
"axis.ticks.length.", aesthetic, ".", position
)
element <- calc_element(element_name, theme)
size <- calc_element(element_size, theme)
# Switch primary/secondary
if (position %in% c("right", "top")) {
dir <- 1
alt <- unit(0, "npc")
ord <- c(2, 1)
} else {
dir <- -1
alt <- unit(1, "npc")
ord <- c(1, 2)
}
n_breaks <- length(break_pos)
args <- list(
element,
rep(unit(break_pos, "native"), each = 2),
rep(grid::unit.c(alt + (dir * size),
alt)[ord],
times = n_breaks),
rep(2, times = n_breaks)
)
args <- setNames(args, c("element",
aesthetic,
setdiff(c("x", "y"), aesthetic),
"id.lengths"))
list(grob = do.call(element_grob, args), size = size)
}
# Helper for labels in draw function
setup_axis_labels <- function(
major, minor = NULL, major_pos, minor_pos = NULL,
position, theme, check.overlap = FALSE, angle = NULL, n.dodge = 1
) {
aesthetic <- if (position %in% c("top", "bottom")) "x" else "y"
vertical <- position %in% c("left", "right")
label_element_name <- paste0("axis.text.", aesthetic, ".", position)
element <- calc_element(label_element_name, theme)
# Validate labels if necessary
major <- validate_labels(major)
minor <- validate_labels(minor)
# Override theme defaults with guide specifics
if (inherits(element, "element_text")) {
overrides <- .int$axis_label_element_overrides(position, angle)
element$angle <- overrides$angle %||% element$angle
element$hjust <- overrides$hjust %||% element$hjust
element$vjust <- overrides$vjust %||% element$vjust
}
# Setup dodging
n_breaks <- length(minor_pos)
dodge_pos <- rep(seq_len(n.dodge), length.out = n_breaks)
dodge_indices <- split(seq_len(n_breaks), dodge_pos)
# Do minor labels
label_grobs <- lapply(dodge_indices, function(indices) {
.int$draw_axis_labels(
break_positions = minor_pos[indices], break_labels = minor[indices],
label_element = element,
is_vertical = vertical, check.overlap = check.overlap
)
})
# Do major labels
label_grobs <- append(
label_grobs, list(
.int$draw_axis_labels(
break_positions = major_pos, break_labels = major,
label_element = element,
is_vertical = vertical, check.overlap = check.overlap
)
)
)
label_grobs
}
|
3b715054304a06863a0bafb0f6eb19794f4280b1
|
13285d0f589987bfa5a4bde109be596ae6fba0ac
|
/kaggle/predict-wordpress-likes/carter_s/r/models.r
|
920b5ca20b1d9310a3d353733a4a4e71989388b0
|
[] |
no_license
|
raziakram/solutions
|
46508666b32f08baa998e95bf8b1748c390d9258
|
eeffe73300941b7768d3a062a0ee296e3bcd56e6
|
refs/heads/master
| 2020-12-25T15:30:39.395463
| 2014-05-20T05:32:02
| 2014-05-20T05:32:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,993
|
r
|
models.r
|
Sys.time()
require(biglm)
require(randomForest)
# load functions in here
source("./r/genfunc.r")
source("./r/datafunc.r")
# define dataset alteration functions
alter.dataset <- function()
{
dataset$segment <<- with(dataset,
ifelse(
(user_blog_hist_like_ct > 0) | (user_blog_all_user_like_blog_post_share > 0)
, ifelse(blog_author_ct > 1, "dcmu", "dcsu")
, ifelse(
(user_blog_pagerank_by_like_share_postrank > 0 & user_blog_pagerank_by_like_share_postrank <= 1000)
, "pr"
, "tp"
)
)
)
}
# load data
source("./r/build_train_data.r")
gc()
alter.dataset()
gc()
# create segmented model
{
models <- list()
models$dcmu = glm(
result ~ 1 + post_weekday
+ user_blog_all_blog_post_user_like_share + user_blog_hist_blog_post_user_like_share
+ user_blog_all_blog_like_user_like_share + user_blog_hist_blog_like_user_like_share
+ user_blog_all_user_like_blog_post_share + user_blog_hist_user_like_blog_post_share
+ user_blog_weekM1_blog_post_user_like_share + user_blog_weekM2_blog_post_user_like_share
+ user_blog_pagerank_by_like_share_blogprob
+ user_post_topic_proximity_mean + user_post_topic_proximity_max
+ user_blog_lang_proximity
+ user_post_tz_proximity_to_stim_likes
+ user_post_author_post_user_like_share + user_post_user_like_author_post_share
+ user_post_blog_post_author_post_share + user_post_blog_like_author_like_share
+ user_post_user_is_blog_author
+ user_post_user_is_post_author
+ ifelse(user_post_user_is_post_author=="True", user_as_author_post_user_like_share, 0)
, dataset[(dataset$segment == "dcmu"), ]
, family = binomial, weights = dataset$lrweight[dataset$segment == "dcmu"]
)
models$dcmu[c("data", "weights", "model", "residuals", "fitted.values", "y")] = NULL
models$dcsu = glm(
result ~ 1 + post_weekday
+ user_blog_all_blog_post_user_like_share + user_blog_hist_blog_post_user_like_share
+ user_blog_all_blog_like_user_like_share + user_blog_hist_blog_like_user_like_share
+ user_blog_all_user_like_blog_post_share + user_blog_hist_user_like_blog_post_share
+ user_blog_weekM1_blog_post_user_like_share + user_blog_weekM2_blog_post_user_like_share
+ user_blog_pagerank_by_like_share_blogprob
+ user_post_topic_proximity_mean + user_post_topic_proximity_max
+ user_blog_lang_proximity
+ user_post_tz_proximity_to_stim_likes
+ user_post_user_is_post_author
+ ifelse(user_post_user_is_post_author=="True", user_as_author_post_user_like_share, 0)
, dataset[(dataset$segment == "dcsu"), ]
, family = binomial, weights = dataset$lrweight[dataset$segment == "dcsu"]
)
models$dcsu[c("data", "weights", "model", "residuals", "fitted.values", "y")] = NULL
models$pr = glm(
result ~ 1 + post_weekday
+ user_blog_pagerank_by_like_share_blogprob
+ user_post_topic_proximity_mean + user_post_topic_proximity_max
+ user_blog_lang_proximity
+ user_post_tz_proximity_to_stim_likes
+ user_post_user_is_post_author
+ ifelse(user_post_user_is_post_author=="True", user_as_author_post_user_like_share, 0)
, dataset[(dataset$segment == "pr"), ]
, family = binomial, weights = dataset$lrweight[dataset$segment == "pr"]
)
models$pr[c("data", "weights", "model", "residuals", "fitted.values", "y")] = NULL
models$tp = glm(
result ~ 1 + post_weekday
+ user_blog_pagerank_by_like_share_blogprob
+ user_post_topic_proximity_mean + user_post_topic_proximity_max
+ user_blog_lang_proximity
+ user_post_tz_proximity_to_stim_likes
+ user_post_user_is_post_author
, dataset[ (dataset$segment == "tp"), ]
, family = binomial, weights = dataset$lrweight[dataset$segment == "tp"]
)
models$tp[c("data", "weights", "model", "residuals", "fitted.values", "y")] = NULL
strata = dataset$rfstrata[, drop = TRUE]
sampsize =rep(1, length(levels(strata)))
models$rf = randomForest(
as.factor(result) ~ post_weekday
+ blog_all_post_ct + blog_hist_post_ct + blog_weekM1_post_ct
+ blog_all_like_ct
+ user_all_like_ct + user_hist_like_ct + user_weekM1_like_ct
+ user_blog_all_blog_post_user_like_share + user_blog_hist_blog_post_user_like_share
+ user_blog_weekM1_blog_post_user_like_share + user_blog_weekM2_blog_post_user_like_share
+ user_blog_all_blog_like_user_like_share + user_blog_hist_blog_like_user_like_share
+ user_blog_weekM1_blog_like_user_like_share
+ user_blog_all_user_like_blog_post_share + user_blog_hist_user_like_blog_post_share
+ user_blog_weekM1_user_like_blog_post_share
+ user_blog_pagerank_by_like_share_blogprob
+ user_post_topic_proximity_max + user_post_topic_proximity_mean
+ user_is_english + blog_is_english + user_blog_lang_proximity
+ user_post_tz_proximity_to_stim_likes
+ blog_author_ct
+ user_post_author_post_user_like_share + user_post_user_like_author_post_share
+ user_post_blog_post_author_post_share + user_post_blog_like_author_like_share
+ user_post_user_is_blog_author
+ user_post_user_is_post_author + user_as_author_post_user_like_share
, dataset, strata = strata, sampsize = sampsize
, do.trace = TRUE, proximity = FALSE, ntree = 1000
)
save(models, file = "./rdata/models.rdata")
}
source("./r/build_prod_data.r")
gc()
alter.dataset()
gc()
# predict models
{
LR = rep(NA, nrow(dataset))
LR[dataset$segment == "dcmu"] = batch.win.predict(models$dcmu, dataset[dataset$segment == "dcmu", ], lr.win.predict, 250000)
LR[dataset$segment == "dcsu"] = batch.win.predict(models$dcsu, dataset[dataset$segment == "dcsu", ], lr.win.predict, 250000)
LR[dataset$segment == "pr"] = batch.win.predict(models$pr, dataset[dataset$segment == "pr", ], lr.win.predict, 250000)
LR[dataset$segment == "tp"] = batch.win.predict(models$tp, dataset[dataset$segment == "tp", ], lr.win.predict, 250000)
RF = batch.win.predict(models$rf, dataset, rf.win.predict, 250000)
EN = LR + RF
dataset$EN = EN
get.submission("EN", "FinalSubmission")
save(LR, RF, EN, file = "./rdata/predictions.rdata")
}
Sys.time()
|
27fb110c0d46d48946b9e1b498ecab0aefd8353f
|
724f59e3c9449ba8ffd56bf06512b3d2802babe8
|
/data-raw/HarvardOxford/ho_ctab.R
|
0df46d9ee4f26a8019549b84c9bb7ba078bf22d8
|
[
"MIT"
] |
permissive
|
bbuchsbaum/ggsegExtra
|
74088c50fa544f9b1032326de593fd1e05421b96
|
e1409c8453d6268cd13bf39bc2cb6a269ee847c3
|
refs/heads/master
| 2020-08-26T20:47:26.562016
| 2019-10-23T20:42:08
| 2019-10-23T20:42:08
| 217,143,613
| 0
| 0
|
NOASSERTION
| 2019-10-23T20:05:08
| 2019-10-23T20:05:07
| null |
UTF-8
|
R
| false
| false
| 929
|
r
|
ho_ctab.R
|
## make an annotation file for the harvard-oxford cortical atlas
library(tidyverse)
library(xml2)
ho <- xml2::read_xml(file.path(Sys.getenv("FSLDIR"), "/data/atlases/HarvardOxford-Cortical.xml"))
ll <- as_list(ho)
labels <- map_chr(ll$atlas$data, 1)
names(labels) <- NULL
labels <- c("unknown", labels)
HO <- tibble(idx=0:(length(labels)-1), labels=labels)
FS <- readr::read_table2(file.path(Sys.getenv("FREESURFER_HOME"), "FreeSurferColorLUT.txt"), skip=4, col_names=FALSE)
colnames(FS) <- c("idx", "label", "R", "G", "B", "A")
## choose 48 rows from here for colour
start <- which.max(FS$label=="ctx-lh-Unknown")
colours <- slice(FS, start:(start+nrow(HO)-1))
HO <- bind_cols(HO, select(colours, R, G, B, A))
HO <- mutate(HO, labels=make.names(labels))
readr::write_delim(HO, "../label/ho.annot.ctab", col_names = FALSE)
## Want the colours to be distinct - do any editing here.
## HLS transform could be an option.
|
73d85bf99e76a5fc1a5c9384e1c8fdde9b1203c6
|
2e2b4e090626d27adf065735751efe31248dcbbd
|
/lcebchk.R
|
d70006aec1fbe62553cbe77d88ff78a8810672fa
|
[] |
no_license
|
P-R-McWhirter/skycamT_variable_classification_functions
|
62742e89da01dee71bd690119f6079389721c7c4
|
ae32976f882a8fc999843571a4d0b82c5aed6538
|
refs/heads/master
| 2021-02-18T21:16:55.109810
| 2020-03-05T18:26:20
| 2020-03-05T18:26:20
| 245,238,114
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,766
|
r
|
lcebchk.R
|
lcebchk <- function(ts, per = 1.0, ep = 0.01, bins = 100, quiet = FALSE) {
library(e1071)
start <- Sys.time()
lowper <- per * (1 - ep)
highper <- per * (1 + ep)
pers <- seq(lowper, highper, length.out = 1000)
perf <- rep(0, length(pers))
res <- matrix(0, nrow = length(pers), ncol = 3)
ts[,2] <- ts[,2] - median(ts[,2])
m <- median(ts)
ts <- ts[which(ts[,2] > 0),]
for (i in 1:length(pers)){
fts <- ftsb
fts1 <- ftsb
p <- (t / (1 / freq[k])) %% 1
x[, 1] <- p
x <- x[order(p),]
#x[,2] <- (x[,2] - min(x[,2])) / (2.0 * (max(x[,2]) - min(x[,2]))) - 0.25
x1 <- x
for (i in 2:length(x[,1])) {
x1[i,] <- x[i-1,]
}
x1[1,] <- x[length(x[,1]),]
#LK[j] <- sum(((x1[,2] - x[,2])^2.0 + (x1[,1] - x[,1])^2.0)^0.5) / sum(((x[,2] - me)^2.0))
pwr[k] <- sum((x1[,2] - x[,2])^2) / sum((x[,2] - me)^2)
}
plot(pers, perf, type = "l")
print(res[which.max(perf),])
plot((((((ts[,1] - ts[which.max(ts[,2]),1])/ pers[which.max(perf)]) + 0.5) %% 1) - 0.5), ts[,2], pch=19)
#lines(seq(-0.5, 0.5, length.out = 10000), res[which.max(perf),1] * exp(-0.5 * ((seq(-0.5, 0.5, length.out = 10000) - res[which.min(perf),2])/res[which.min(perf),3])^2), col = "red")
print(Sys.time() - start)
pers[which.min(perf)]
}
lcebchk_old <- function(ts, per = 1.0, ep = 0.01, quiet = FALSE) {
start <- Sys.time()
lowper <- per * (1 - ep)
highper <- per * (1 + ep)
pers <- seq(lowper, highper, length.out = 1000)
perf <- rep(0, length(pers))
res <- matrix(0, nrow = length(pers), ncol = 3)
ts[,2] <- ts[,2] - median(ts[,2])
ts <- ts[which(ts[,2] > 0),]
for (i in 1:length(pers)){
fts <- ts
fts[,1] <- ((((fts[,1] - fts[which.min(fts[,2]),1])/ pers[i]) + 0.5) %% 1) - 0.5
fts <- fts[order(fts[,1]),]
fts <- fts[which(fts[,1] > -0.1 & fts[,1] < 0.1),]
r <- fts[,2]
f <- function(par)
{
m <- par[1]
sd <- par[2]
k <- par[3]
rhat <- k * exp(-0.5 * ((fts[,1] - m)/sd)^2)
sum((r - rhat)^2)
}
oldcurr <- 100000000
curr <- 1000000
#plot(fts, pch=19)
for (k in 1:100){
parm <- rep(0, 3)
parm[1] <- runif(1, 0.0, 2)
parm[2] <- runif(1, -0.5, 0.5)
parm[3] <- runif(1, 0.001, 0.05)
SSTlm <- lm(fts[,2] ~ -1 + exp(-0.5 * ((fts[,1])/0.05)^2), data = as.data.frame(fts), weights = rep(1, nrow(fts)))
#print(summary(SSTlm))
co <- SSTlm$coefficients
co[is.na(co)] <- 0
curr <- sum((fts[,2] - (co[1] + exp(-0.5 * ((fts[,1] - 0.017)/0.02)^2)))^2/rep(1, nrow(fts))^2)
#print(curr)
if (curr < oldcurr){
oldcurr <- curr
res[i,] <- c(co[1], 0.017, 0.02)
}
}
perf[i] <- curr
#print(curr)
#lines(seq(-0.5, 0.5, length.out = 10000), parm[1] * exp(-0.5 * ((seq(-0.5, 0.5, length.out = 10000) - parm[2])/parm[3])^2), col = "red")
}
plot(pers, perf, type = "l")
print(res[which.min(perf),])
plot((((((ts[,1] - ts[which.max(ts[,2]),1])/ pers[which.min(perf)]) + 0.5) %% 1) - 0.5), ts[,2], pch=19)
lines(seq(-0.5, 0.5, length.out = 10000), res[which.min(perf),1] * exp(-0.5 * ((seq(-0.5, 0.5, length.out = 10000) - res[which.min(perf),2])/res[which.min(perf),3])^2), col = "red")
pers[which.min(perf)]
}
|
896ab74ee1483461356d052b2b92a95891f7c924
|
a361f14c000fc1c153eaeb5bf9f4419951c7e3aa
|
/man/data_BTm_bms.Rd
|
7685b58bf7ad2f00454dd443be514419470695f6
|
[] |
no_license
|
kbenoit/sophistication
|
0813f3d08c56c1ce420c7d056c10c0d8db4c420e
|
7ab1c2a59b41bd9d916383342b6ace23df2b1906
|
refs/heads/master
| 2021-06-04T23:19:22.944076
| 2021-05-08T13:40:26
| 2021-05-08T13:40:26
| 101,664,780
| 43
| 7
| null | 2020-08-25T11:19:22
| 2017-08-28T16:40:21
|
R
|
UTF-8
|
R
| false
| true
| 732
|
rd
|
data_BTm_bms.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_BTm_bms}
\alias{data_BTm_bms}
\title{Fitted Bradley-Terry model from Benoit, Munger, and Spirling (2018)}
\format{
Fitted model of type \link[BradleyTerry2:BTm]{BTm}
}
\usage{
data_BTm_bms
}
\description{
Fitted Bradley-Terry model estimates from the "best" model of
Benoit, Munger, and Spirling (2018).
}
\references{
Benoit, Kenneth, Kevin Munger, and Arthur Spirling. October 30,
2017.
"\href{https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3062061}{Measuring and Explaining Political Sophistication Through Textual Complexity.}"
London School of Economics and New York University manuscript.
}
\keyword{datasets}
|
c80577e6523537e4693ca9d958fa1f080b7221b9
|
ef443d64d07775335795d28502cf5ed3990e3b76
|
/r-package/chebInterp/R/calculateChebyshevPolynomials.R
|
df2743edc83292dcdfa38d64864d2f5f16640cc5
|
[] |
no_license
|
walterwzhang/Chebyshev-Interpolation
|
b7fb68ffce07791d18af60c4c84c6846eca40918
|
7266c2783f2b88815042e4b66270de77620502eb
|
refs/heads/master
| 2021-07-09T20:48:24.899193
| 2020-07-20T20:51:14
| 2020-07-20T20:51:14
| 169,814,173
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 921
|
r
|
calculateChebyshevPolynomials.R
|
# calculateChebyshevPolynomials -------------------------------------------------------------------
#' Computes the polynomials for a given degree and vector of values.
#'
#' Resultant matrix of polynomials is of size length(x) by N + 1
#'
#' @param x Vector of values to compute the polynomials at (numeric)
#' @param N Highest Degree of the Polynomial (Integer)
#' @return A matrix of the polynomials (matrix)
#' @export
calculateChebyshevPolynomials <- function(x, N)
{
K <- length(x)
# Recursively Compute Polynomials
T <- matrix(0L, ncol = N + 1, nrow = K)
T[,1] <- 1
if (N >= 1)
{
T[,2] <- x
if (N >= 2)
{
for (k in 2:N)
{
T[,k+1] <- 2*x*T[,k] - T[,k-1]
}
}
}
return(T)
}
# -------------------------------------------------------------------------------------------------
|
753830f2cdf0a8891548011b7a4296a2641ffd76
|
31da9633913672a623a1635b9691a09e8dee52da
|
/man/MorphoLink.Rd
|
7efb5b7145af0f0bea292bfd5e31deb2f1f80f9d
|
[] |
no_license
|
ms609/MorphoBank
|
eecdeaf3de338a8e62c369cdbfbb7948a843c88e
|
6dd019a5be3d93b1e3301a5837f4f93966642db6
|
refs/heads/master
| 2023-04-28T10:01:49.483968
| 2023-04-17T10:01:03
| 2023-04-17T10:01:03
| 141,433,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 800
|
rd
|
MorphoLink.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text.R
\name{MorphoLink}
\alias{MorphoLink}
\title{Link to MorphoBank project}
\usage{
MorphoLink(id = getOption("MorphoBankProject"),
linkText = paste("project", id), checkAccess = TRUE)
}
\arguments{
\item{id}{Integer corresponding to the project's MorphoBank identifier.
A global default can be set using `options(MorphoBankProject=1234)`.}
\item{linkText}{Text to appear in link, once project is live}
\item{checkAccess}{Logical specifying whether to display a notice if the
project is not yet available to the public}
}
\value{
Text providing a link to the project, or if the project is not yet
publically available, a note instructing password holders how to log in.
}
\description{
Link to MorphoBank project
}
|
321af7e707acf199c9bc98102ef0225806491489
|
74bc48ba64859a63855d204f1efd31eca47a223f
|
/Corporacion/100.Prav_arima01.R
|
ff2a295ab8defdbb91ba8d5d5becf7b71922c6cb
|
[] |
no_license
|
PraveenAdepu/kaggle_competitions
|
4c53d71af12a615d5ee5f34e5857cbd0fac7bc3c
|
ed0111bcecbe5be4529a2a5be2ce4c6912729770
|
refs/heads/master
| 2020-09-02T15:29:51.885013
| 2020-04-09T01:50:55
| 2020-04-09T01:50:55
| 219,248,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,140
|
r
|
100.Prav_arima01.R
|
library(dplyr)
library(forecast)
library(reshape2)
library(data.table)
library(foreach)
library(date)
library(lubridate)
#library(doMC)
library(doParallel)
print(expm1(0))
print(expm1(1))
print(log1p(0))
print(log1p(1))
train <- fread('./input/train.csv')
test <-fread('./input/test.csv')
train$date <- as.Date(parse_date_time(train$date,'%y-%m-%d'))
test$date <- as.Date(parse_date_time(test$date,'%y-%m-%d'))
train$store_item_nbr <- paste(train$store_nbr, train$item_nbr, sep="_")
test$store_item_nbr <- paste(test$store_nbr, test$item_nbr, sep="_")
head(train)
# train_sub - use 2017 data to start with the forecast
train_sub <- train[date >= as.Date("2017-04-01"), ]
#train_sub <- as.data.frame(train_sub)
cols <- c('date','store_item_nbr', 'unit_sales')
train_sub <- train_sub[, cols,with=FALSE]
# clean up
rm(train)
head(train_sub)
# transform to log1p
train_sub$unit_sales <- as.numeric(train_sub$unit_sales)
train_sub$unit_sales[train_sub$unit_sales < 0] <- 0
train_sub$unit_sales <- log1p(train_sub$unit_sales)
# dcast the data from long to wide format for time series forecasting
train_sub_wide <- dcast(train_sub, store_item_nbr ~ date, value.var = "unit_sales", fill = 0)
train_ts <- ts(train_sub_wide, frequency = 7) # considering one week as a short shopping cycle
fcst_intv = 16 # 16 days of forecast interval (Aug 16 ~ 31) per the submission requirement
fcst_matrix <- matrix(NA,nrow=nrow(train_ts),ncol=fcst_intv)
# register 15 cores for parallel processing in ETS forecasting
#registerDoMC(detectCores()-1)
registerDoParallel(cores=25)
fcst_matrix <- foreach(i=1:nrow(train_ts),.combine=rbind, .packages=c("forecast")) %dopar% {
fcst_matrix <- forecast(auto.arima(train_ts[i,]),h=fcst_intv)$mean
}
# post-processing the forecast table
fcst_matrix[fcst_matrix < 0] <- 0
colnames(fcst_matrix) <- as.character(seq(from = as.Date("2017-08-16"),
to = as.Date("2017-08-31"),
by = 'day'))
fcst_df <- as.data.frame(cbind(train_sub_wide[, 1], fcst_matrix))
colnames(fcst_df)[1] <- "store_item_nbr"
# melt the forecast data frame from wide to long format for final submission
fcst_df_long <- melt(fcst_df, id = 'store_item_nbr',
variable.name = "fcst_date",
value.name = 'unit_sales')
fcst_df_long$store_item_nbr <- as.character(fcst_df_long$store_item_nbr)
fcst_df_long$fcst_date <- as.Date(parse_date_time(fcst_df_long$fcst_date,'%y-%m-%d'))
fcst_df_long$unit_sales <- as.numeric(fcst_df_long$unit_sales)
#fcst_df_long %>% filter(unit_sales > 5)
# transform back to exp1p
fcst_df_long$unit_sales <- expm1(fcst_df_long$unit_sales)
# generate the final submission file
submission <- left_join(test, fcst_df_long,
c("store_item_nbr" = "store_item_nbr", 'date' = 'fcst_date'))
submission$unit_sales[is.na(submission$unit_sales)] <- 0
head(submission)
summary(submission$unit_sales)
sum(submission$unit_sales)
submission <- submission %>% select(id, unit_sales)
write.csv(submission, "./submissions/Prav_arima01.csv", row.names = FALSE, quote = FALSE)
|
0a5016060f4603fc0b8518b243c13e26f58eafa5
|
02ba97c94a3293951417e4e9b8a94230b9c0d11e
|
/run_analysis.R
|
b5c903f322f6e48e8e0a05069819cb86688aa7f1
|
[] |
no_license
|
winstonyma/Datasciencecoursera-Course-3-Week-4-Assignment
|
6818cae9eeacac546047895d3c99d219b5206d56
|
78ffbc8249f7d66f6e294265020f18a09c8899be
|
refs/heads/master
| 2021-01-22T19:04:23.778004
| 2017-03-18T03:16:28
| 2017-03-18T03:16:28
| 85,156,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,833
|
r
|
run_analysis.R
|
# download and unzip data
## download data
if(!file.exists("C:/Users/v-wima/Desktop/Coursera")){dir.create("C:/Users/v-wima/Desktop/Coursera")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="C:/Users/v-wima/Desktop/Coursera/UCI HAR Dataset.zip")
## Unzip dataSet to /data directory
unzip(zipfile="C:/Users/v-wima/Desktop/Coursera/UCI HAR Dataset.zip",exdir="C:/Users/v-wima/Desktop/Coursera")
# merge training dataset with test dataset
## read datasets
### read train dataset
x_train <- read.table("C:/Users/v-wima/Desktop/Coursera/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("C:/Users/v-wima/Desktop/Coursera/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("C:/Users/v-wima/Desktop/Coursera/UCI HAR Dataset/train/subject_train.txt")
### read test dataset
x_test <- read.table("C:/Users/v-wima/Desktop/Coursera/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("C:/Users/v-wima/Desktop/Coursera/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("C:/Users/v-wima/Desktop/Coursera/UCI HAR Dataset/test/subject_test.txt")
### read feavtures and activity labels
features <- read.table("C:/Users/v-wima/Desktop/Coursera/UCI HAR Dataset/features.txt")
activity_labels <- read.table("C:/Users/v-wima/Desktop/Coursera/UCI HAR Dataset/activity_labels.txt")
## assign column names
colnames(x_train) <- features[,2]
colnames(y_train) <-"activityId"
colnames(subject_train) <- "subjectId"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityId"
colnames(subject_test) <- "subjectId"
colnames(activity_labels) <- c('activityId','activityType')
## merge data sets
train <- cbind(x_train, y_train, subject_train)
test <- cbind(x_test, y_test, subject_test)
all_in_one <- rbind(train, test)
# extract measurements of the mean and the std for each measurement
## set column names
col_names <- colnames(all_in_one)
## vector for pattern matching and replacement
mean_and_std <- (grepl("activityId" , col_names) |
grepl("subjectId" , col_names) |
grepl("mean.." , col_names) |
grepl("std.." , col_names))
## subsetting the data
mean_and_std <- all_in_one[ , mean_and_std == TRUE]
# merge data
activity_names <- merge(mean_and_std, activity_labels, by='activityId', all.x=TRUE)
# summarize and set in order the mean of each variable for each activty and each subject
secondtidydata <- aggregate(. ~subjectId + activityId, activity_names, mean)
secondtidydata <- secondtidydata[order(secondtidydata$subjectId, secondtidydata$activityId),]
# write second tidy data set
write.table(secondtidydata, "secondtidydata.txt", row.name=FALSE)
|
ba3ac3527f62672bcc3de7fa82d26d6a5289ee7e
|
82b1c5655856b660c053d18ec7ad94f3aa30a964
|
/man/get_package_function_usage.Rd
|
e26ad19d16fcd0aab56df32b8ce41d7e5087257d
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.fakin
|
9792dfa732a8dd1aaa8d2634630411119604757f
|
17ab0e6e9a63a03c6cb40ef29ee3899c2b2724a0
|
refs/heads/master
| 2022-06-09T22:25:09.633343
| 2022-06-08T21:24:14
| 2022-06-08T21:24:14
| 136,065,795
| 1
| 0
|
MIT
| 2021-03-15T10:55:17
| 2018-06-04T18:21:30
|
R
|
UTF-8
|
R
| false
| true
| 1,676
|
rd
|
get_package_function_usage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_package_function_usage.R
\name{get_package_function_usage}
\alias{get_package_function_usage}
\title{How Often Are the Functions of a Package Used?}
\usage{
get_package_function_usage(tree, package, simple = FALSE, by_script = FALSE)
}
\arguments{
\item{tree}{parse tree as returned by \code{\link{parse_scripts}}}
\item{package}{name of the package (must be installed)}
\item{simple}{if \code{TRUE}, a simple approach using a simple regular
expression is used. This approach is fast but not correct as it e.g. counts
function calls that are commented out or even string expressions that just
look like function calls. Leaving this argument to its default,
\code{FALSE}, will return only real function calls by evaluating the full
structure of parse tree.}
\item{by_script}{if \code{TRUE} the functions are counted and returned by
script, otherwise they are counted over all scripts}
}
\value{
data frame with columns \code{name} (name of the function),
\code{prefixed} (number of function calls prefixed with \code{<package>::}
or \code{<package>:::}), \code{non_prefixed} (number of function calls
that are not prefixed with the package name) and \code{total} (total
number of function calls)
}
\description{
How Often Are the Functions of a Package Used?
}
\examples{
# Read all scripts that are provided in the kwb.fakin package
tree <- kwb.code::parse_scripts(root = system.file(package = "kwb.fakin"))
# Check which functions from kwb.utils are used and how often
get_package_function_usage(tree, package = "kwb.utils")
# Hm, this does not seem to be the whole truth...
}
|
8c18ce8012a0f3016131e1f629a8dcf7f1a8face
|
4b39182868496103501ca43b0871fb69483d6927
|
/run_analysis.R
|
4ab5569ac8a3af8d153f123c538be53bd4ff82be
|
[] |
no_license
|
Nmoheby/GettingandCleaningData
|
32e8d3b6ba25331aafdd0de105afcd85fab56846
|
b7f2c9526a6c8757e7e30abf78583654b09de6b9
|
refs/heads/master
| 2020-03-15T17:47:47.679687
| 2018-05-05T17:34:49
| 2018-05-05T17:34:49
| 132,269,768
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,124
|
r
|
run_analysis.R
|
library(dplyr)
#---------------------------
# read train data
pathdata = file.path("./")
X_train <- read.table(file.path(pathdata, "train", "X_train.txt"),header = FALSE)
Y_train <- read.table(file.path(pathdata, "train", "y_train.txt"),header = FALSE)
Sub_train <- read.table(file.path(pathdata, "train", "subject_train.txt"),header = FALSE)
# read test data
X_test <- read.table(file.path(pathdata, "test", "X_test.txt"),header = FALSE)
Y_test <- read.table(file.path(pathdata, "test", "y_test.txt"),header = FALSE)
Sub_test <- read.table(file.path(pathdata, "test", "subject_test.txt"),header = FALSE)
# read data description
variable_names <- read.table(file.path(pathdata, "features.txt"),header = FALSE)
# read activity labels
activity_labels <- read.table(file.path(pathdata, "activity_labels.txt"),header = FALSE)
#---------------------------
#Merges the training and the test
colnames(X_train) = variable_names[,2]
colnames(Y_train) = "activityId"
colnames(Sub_train) = "subjectId"
colnames(X_test) = variable_names[,2]
colnames(Y_test) = "activityId"
colnames(Sub_test) = "subjectId"
colnames(activity_labels) <- c('activityId','activityType')
mrg_train = cbind(Y_train, Sub_train, X_train)
mrg_test = cbind(Y_test, Sub_test, X_test)
rgdata = rbind(mrg_train, mrg_test)
#---------------------------
#subsets only the measurements on the mean and standard deviation for each measurement.
colNames = colnames(mrgdata)
mean_and_std = (grepl("activityId" , colNames) | grepl("subjectId" , colNames) | grepl("mean.." , colNames) | grepl("std.." , colNames))
mrgdata_Mn_St <- mrgdata[ , mean_and_std == TRUE]
#---------------------------
#use activity names to name the activities in the data set
mrgdata_act_name = merge(activity_labels,mrgdata_Mn_St)
mrgdata_act_name<-select (mrgdata_act_name,-activityId)
#---------------------------
#creates a second, independent tidy data set with the average of each variable for each activity and each subject.
TidyTable <- aggregate(. ~subjectId + activityType, mrgdata_act_name, mean)
#Saving the new data
write.table(TidyTable, "TidyTable.txt", row.name=FALSE)
|
b53c27d50543805db322023b889b1e7eb6b81598
|
bf9bb0d310731e70ec2164fea0d192b663ebef3e
|
/man/read.survey-package.Rd
|
8be973d6a7f4e5a9f5a3e8379f89ada52ef93d95
|
[] |
no_license
|
DIRKMJK/read.survey
|
334f1fe7b3f50f7af7af4de774f0f03e35bd1cbc
|
50a57e76bcc2fe945295ddd45a157af1933bc4eb
|
refs/heads/master
| 2020-04-07T05:32:52.764714
| 2015-02-10T10:03:44
| 2015-02-10T10:03:44
| 30,558,716
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 578
|
rd
|
read.survey-package.Rd
|
\name{read.survey-package}
\alias{read.survey-package}
\alias{read.survey}
\docType{package}
\title{
Read Surveymonkey file
}
\description{
Prepare data from Surveymonkey xlsx or csv file for analysis.
}
\details{
\tabular{ll}{
Package: \tab read.survey\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-02-09\cr
License: \tab MIT\cr
}
read.survey(filename, format = 'xlsx', convert = FALSE) opens Surveymonkey export file.
}
\author{
Dirk Kloosterboer
Maintainer: Dirk Kloosterboer <info@dirkmjk.nl>
}
\references{
}
\keyword{ package }
\seealso{
}
\examples{
}
|
271469f945c182a5648939660bf464923e6e1f41
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dsa/examples/xtsplot.Rd.R
|
4978d55ef67716108615d545db5eeb965958ced4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 562
|
r
|
xtsplot.Rd.R
|
library(dsa)
### Name: xtsplot
### Title: Create a plot for xts series
### Aliases: xtsplot
### ** Examples
x <- xts::xts(rnorm(100), seq.Date(as.Date("2010-01-01"), length.out=100, by="months"))
y <- xts::xts(runif(100), seq.Date(as.Date("2010-01-01"), length.out=100, by="months"))
xtsplot(x, font="sans")
xtsplot(y, transform="diff", type="bar", font="sans")
xtsplot(y, font="sans")
xtsplot(y, transform="diff", type="bar", date_breaks="24 months", font="sans")
xtsplot(merge(x,y), names=c("Gaussian", "Uniform"), main="Simulated series", font="sans")
|
fb47c642bbe8a6b189d27720db3daf99295c8e54
|
79c50b22f90863654139e18731cac46ba046cb3c
|
/create_output_for_gene_list.R
|
183cd6bb7290d987392841bb3470ef9fc05c97ac
|
[] |
no_license
|
micktusker/ensembl_rest_api
|
5383344b4f8a7580f8bc1f6822a3a75adc5d1e8a
|
5a4384f3ae3abc2b6aa948cf5953fe3b3a49f8fd
|
refs/heads/master
| 2021-01-17T14:30:41.548493
| 2016-10-11T16:02:54
| 2016-10-11T16:02:54
| 70,240,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,204
|
r
|
create_output_for_gene_list.R
|
source('./ensembl_rest_api_functions.R')
source('./file_functions.R')
options(stringsAsFactors = FALSE)
# Returns the first column in a given file. Use this to turn
# a single column of gene names into a vector.
getGeneNamesFromFileAsVector <- function(gene.names.file) {
return(getColumnAsVector(gene.names.file))
}
# Create and return a two-column data frame of column name mapped to
# Ensembl gene ID.
getGeneNamesEnsemblIDsAsDataFrame <- function(gene.names) {
gene.names.ensembl.ids.dataframe <- data.frame()
for (gene.name in gene.names) {
ensembl.gene.ids <- getEnsemblGeneIDsForName(gene.name)
for (ensembl.gene.id in ensembl.gene.ids) {
print(c(gene.name, ensembl.gene.id))
row <- c(gene.name, ensembl.gene.id)
gene.names.ensembl.ids.dataframe <- rbind(row, gene.names.ensembl.ids.dataframe)
}
}
return(gene.names.ensembl.ids.dataframe)
}
# Create a large data frame where all the values from the Ensembl REST API Gene
# JSON appear as columns.
makeDataFrameForGeneList <- function(gene.names) {
known.id.apoe <- 'ENSG00000130203'
gene.attributes <- getEnsemblGeneAtrributeNames(known.id.apoe)
gene.details.dataframe <- data.frame()
for (gene.name in gene.names) {
ensembl.gene.ids <- getEnsemblGeneIDsForName(gene.name)
#print(gene.name)
for (ensembl.gene.id in ensembl.gene.ids) {
print(ensembl.gene.id)
attribute.getter <- getEnsemblGeneAtrribute(ensembl.gene.id)
attrribute.values <- rep(NA, length(gene.attributes))
for (i in 1:length(gene.attributes)) {
print(gene.attributes[i])
attribute.value <- attribute.getter(gene.attributes[i])
if(is.null(attribute.value)) {
attrribute.values[i] <- '##'
} else {
attrribute.values[i] <- attribute.getter(gene.attributes[i])
}
}
attrribute.values[length(attrribute.values) + 1] <- gene.name
gene.details.dataframe <- rbind(attrribute.values, gene.details.dataframe)
}
Sys.sleep(1)
}
df.names <- gene.attributes
df.names[length(df.names) + 1] <- 'given.gene.name'
names(gene.details.dataframe) <- df.names #gene.attributes
return(gene.details.dataframe)
}
|
9cc9459a9d93b34e1e63a5c93734ff643e248dd7
|
0a7308e8330385a6e42c04f7fa3b750c76708eb5
|
/Materials 2018/Week 1. Basic R 1/R-Wizardry 2018 week 1 (skeleton).R
|
f1b0cbbc6b1b2c4b63260d945c6c92d1a4d38270
|
[] |
no_license
|
imstatsbee/R-Wizardry
|
49852bc32fa79fabe22426c3bdd4749675b3fbf8
|
bbc291d7fe93d21dc309bdae00e9b68a77b2408b
|
refs/heads/master
| 2021-09-10T06:18:50.003815
| 2018-03-21T11:52:38
| 2018-03-21T11:52:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,917
|
r
|
R-Wizardry 2018 week 1 (skeleton).R
|
# R-Wizardry 2018
* R-Studio and environment: getting around and setting it up
* Data management and spreadsheet organization before starting.
* Plan well before starting to work.
## Day 1
1.1 R as a calculator
1.2 Commenting your code
1.3 Assigning variables
1.4 Data structure
### R as a calculator
R can do anything your basic, scientific, or graphic calcularor can.
#### Basic math
#### Mathematical functions
#### Plot equations
#### Common functions
?help
?c
?seq
?setwd()
?sort()
?dir()
?head()
?names()
?summary()
?dim()
?range()
?max()
?min()
?sum()
?pairs
?plot
### Commenting your code
#Inside a chunk of code in RMarkdown or anywhere in a R script, all text afer a hastag (#) will be ignored by R -and by many other programming languages. It's very useful to keep track of changes in your code and add explanations to the scripts. It is, in general, part of best coding practices to keep things tidy and organized.
#Comments may/should appear in three places:
#* The top of your script
#* Above every function you create
#* In line
#At the beginning of the script, describing the purpose of your script and what you are trying to solve
#In line: Describing a part of your code that is not inmediatly obvious what it is for.
### Assigning variables
#Now, let's create a new chunk of code by pressing simultaneously CTRL + ALT + i and create some variables.
#To run one line of code at the time into the console, press CTRL + ENTER (PC/LINUX) or COMMAND-ENTER (Mac). To run several lines at the time, highligth the lines of interest and proceed as describes above.
#Two ways to assign variables:
#Left hand assigner (original way to assign results)
#Rigth hand assigner
# sometimes the <- is necessary for functions originally created in S. often seen on R help forums if you Google future issues
### Data basics
#### R processes at each new line unless you use a semicolon (;)
#### R generally ignores spaces and tabs, use this to make code readable
#A more complex example
for (i in unique(raw_area_long$date)){sub.dat<-subset(raw_area_long, raw_area_long$date==i)
umoles<-(((sub.dat$area[sub.dat$date==i]-calib_coeff$intercept[calib_coeff$date==i])/calib_coeff$slope[calib_coeff$date==i])/1000)*sub.dat$headspace_mL[sub.dat$date==i]}
#Using spaces to organize the above code:
for (i in unique(raw_area_long$date)) {
sub.dat <- subset (raw_area_long, raw_area_long$date == i)
umoles <- ( ( ( sub.dat$area[sub.dat$date == i] - calib_coeff$intercept [calib_coeff$date == i] ) / calib_coeff$slope [ calib_coeff$date == i] ) / 1000 ) * sub.dat$headspace_mL
}
#Under some special circumstances, spaces are required, e.g. when using function paste() and its argument "sep = ' ' ".
#### R calculates the right side of the assignment first the result is then applied to the left
# this just overwrote your old 'result' object. Remember not to SCREW yourself
i = 1
i = i + 1
i = i + 1
i = i + 1
i = i + 1
i = i + 1
i
#### Naming conventions for objects stored in R
#* Cannot start with a number
#* Cannot have spaces in between the name
#* Avoid naming your variables using names already used by R.
#Basically, you have the following options:
#All lower case: e.g. myfirstobject
#Period separated: e.g. my.first.object
#use underscores: e.g. my_first_object
#Lower case: e.g. myFirstObject
#Upper case: e.g. MyFirstObject
#The most important aspect of naming is being concise and consistent!
#Case-sensitive
b; B; a; A; a + a;
#Named variables are stored differently than a function. **Avoid using functions' names to name your variables...**
#This is an object
# as soon as you add the () and pass it a value, it operates as the function
### Data structures
#### Vectors
#Let's start with vectors, the most common of R's data structures. Vectors are the bulding blocks of more complex objects.
#### Atomic vectors
#Atomic vectors are 1-dimensional data structures that must be all the same type
# Logical
# Integer
# Double
# Character
#A couple of points on atomic vectors:
#* They are constructed using the `c()` function
#* To specify an integer use the L suffix
#* A vector must be the all the same type
#* Use `typeof()` to see what type you have or an "is" function to check type
typeof(int_vector)
is.character(char_vector)
is.numeric(logical_vector)
#### Lists
#A list is like an atomic vector but each item in the list can be any type, including other lists, atomic vectors, data frames, or matrices. Use `list()` to make a list.
my_list =
#Lists are very powerful and although confusing at first it is worth spending time learning how to use them. In particular when we come to the "apply" family of functions we will see how lists can make our lives much easier.
#### Factors
#Ah, the dreaded factors! They are used to store categorical variables and although it is tempting to think of them as character vectors this is a dangerous mistake (you will get scratched, badly!).
#Factors make perfect sense if you are a statistician designing a programming language (!) but to everyone else they exist solely to torment us with confusing errors.
#A factor is really just an integer vector with an additional attribute, `levels()`, which defines the possible values.
crazy_factor =
print(crazy_factor)
levels(crazy_factor)
as.integer(crazy_factor) #Notice the alphabetic rearrangment in the results! Important to keep in mind when looping (week 5)
#But why not just use character vectors, you ask?
#Believe it or not factor do have some useful properties. For example factors allow you to specify all possible values a variable may take even if those values are not in your dataset.
cool_animals =
cool_animals_factor =
table(cool_animals_factor)
#But for the most part factors are important for various statistics involving categorical variables, as you'll see for things like linear models. Love 'em or hate 'em, factors are integral to using R so better learn 'em.
#### Matrices
#A matrix is a 2-dimensional vector and like atomic vectors must be all of a single type.
mat =
#Matrices can have row and column names
colnames(mat) =
rownames(mat) =
print(mat)
#### Arrays
#Arrays are matrices with more than two dimensions. For example, an array of (5, 4, 3) has three slices (we can think of them as layers), each having five rows and four columns. As it happens for matrices, arrays can store only a single type of data.
mat =
mat.2 =
mat.3 =
my.array =
str(my.array)
#Let's assign names to rows, columns and matrices in the array.
array.rows =
array.columns =
array.matrices =
my.array.named =
#To access data from an array, select the row, column and slice of interest.
#Let's get the datum in row two and column four from matrix 3
#Access all the observations (data points) crossing column 2 in matrix 3 by leaving the row space blank.
#### Data frames
#Data frames are very powerful data structures in R and will play a central role in most of the work you'll do. These are probably the most familiar data structures to most people as they resemble spreadsheets quite closely, at least on the surface.
#You can think of data frames as a set of identically sized lists lined up together in columns with a few key features. Each column must be of the same type but column types can be any of the basic data types (character, integer, etc).
#This makes them very useful for structured heterogeneous data, like what many of you generate in the lab everyday. However, it is very important to remember that they are not spreadsheets and to use them effectively you need to forget everything you've learned about Excel (which is probably a good idea anyway).
#Here let's use a data frame that comes built in to R
#Notice the `$` notation, similar to what we saw for lists. We can use this to extract singe columns.
#Alternatively,
#And now for some basic indexing.
# get the first 3 rows of the last 2 columns
# get the 10th row of the 'Petal.Width' column
# get the entire 4th row
#A brief on exploratory data analysis
#### S4 objects
#Is one of R's object oriented systems -the other two are S3 and R5. S4 objects are becoming more popular due to their capacity to efficiently handle big amounts of metadata (we can think of matrices where we can store different types of data), where each "matrix" is a slot.
#S4 objects are more strict and hard to work with than S3. For example, S3 objecs are easy to intercovert (a data frame into a matrix) by simply setting the class attribute; that's not the case for S4 objects.
setClass("Hockey Team", representation(team = "character", city = "character"))
hockey <- new("Hockey Team", team = "Calgary Flames", city = "Calgary")
hockey
#### Special data: NA and NaN values
#Missing values in R are handled as NA or (Not Available). Impossible values (like the results of dividing by zero) are represented by NaN (Not a Number). These two types of values, specially NAs, have special ways to be dealt with otherwise it may lead to errors in the functions.
brand <-
wheat.type <-
rating <-
NA.example <-
is.na(NA.example)
mean()
mean() #Avoid using just "T" as an abbreviation for "TRUE"
## Summary of week 1
#1.1 R can be used as a graphing calculator but R is a programming language and software.
#1.2 Commenting your code as you build your scripts up is a fundamental part of best coding practices. Do it always so your future you will not hate the paste you! It's for your own mental health.
#1.3 The capability of assigning results into variables is one of the most powerfull virtues of R which allows to simplify the data management for complex operations.
#1.4 There are several data structers that allow us to store data into variables.
|
088f82df263777f53db14cfd344c7cc30a36be91
|
09111a2b17b76ed68ae161291a230b05369584d6
|
/meetings/answers/ch3_supplementalQs_rkc.R
|
6ec4956ee2d0fbebfe9004470b49cacc761f47c0
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
rkclement/studyGroup
|
ba6f492b37085d568efeefd9b7d336d99d3bee4f
|
6f5ecd0fe042b1fd37aeb8c945e5a8bd22f5921e
|
refs/heads/gh-pages
| 2021-01-12T03:35:15.559713
| 2017-09-28T18:51:43
| 2017-09-28T18:51:43
| 78,233,449
| 0
| 1
| null | 2017-01-06T19:40:00
| 2017-01-06T19:39:59
| null |
UTF-8
|
R
| false
| false
| 8,971
|
r
|
ch3_supplementalQs_rkc.R
|
# Excerpt From: Taylor Arnold and Lauren Tilton, _Humanities Data in R_
# Chapter 3 Supplemental Questions
# 26. The dataset iris is a very well-known statistical data from the 1930s. It
# gives several measurements of iris sepal and petal lengths (in centimeters)
# for three species. Construct a table of sepal length rounded to the nearest
# centimeter versus Species.
table(round(iris$Sepal.Length), iris$Species)
# 27. Construct the same table, but rounded to the nearest half centimeter.
table(round(iris$Sepal.Length*2)/2, iris$Species)
# 28. Plot a histogram of the sepal length for the entire iris dataset.
hist(iris$Sepal.Length)
# 29. Replicate the previous histogram, but manually specify the break points
# for the histogram and add a custom title and axis labels.
hist(iris$Sepal.Length, breaks = seq(4, 8, by = 0.25),
xlab = "Sepal Length",
ylab = "Counts",
main = "Distribution of Sepal Length for Iris Dataset")
# 30. Plot three histograms showing the sepal length separately for each species.
# Make sure the histograms use the same break points for each plot (Hint: use
# the same as manually set in the previous question). Add helpful titles for the
# plots, and make sure to set R to display three plots at once.
par(mfrow = c(3,1))
hist(iris$Sepal.Length[iris$Species == "setosa"],
breaks = seq(4, 8, by = 0.25),
xlab = "",
ylab = "Counts",
main = "Distribution of Sepal Length for Setosa Species")
hist(iris$Sepal.Length[iris$Species == "versicolor"],
breaks = seq(4, 8, by = 0.25),
xlab = "",
ylab = "Counts",
main = "Distribution of Sepal Length for Versicolor Species")
hist(iris$Sepal.Length[iris$Species == "virginica"],
breaks = seq(4, 8, by = 0.25),
xlab = "Sepal Length",
ylab = "Counts",
main = "Distribution of Sepal Length for Virginica Species")
# 31. Calculate the deciles of the petal length for the entire iris dataset.
petals <- quantile(iris$Petal.Length, probs = seq(0, 1, 0.1))
# 32. Construct a table showing the number of samples from each species with
# petal length in the top 30 % of the dataset. How well does this help
# categorize the dataset by species?
petals <- quantile(iris$Petal.Length, probs = 0.7)
table(iris$Species, petals < iris$Petal.Length)
# This table shows the disparity between the petal lengths of the smaller setosa
# and versicolor species from the longer-petaled virginica species.
# 33. Now bin the iris dataset into deciles based on the petal length. Produce a
# table by species. How well does this categorize the dataset by species?
petals <- quantile(iris$Petal.Length, probs = seq(0, 1, 0.1), names = FALSE)
bins <- cut(iris$Petal.Length, petals, labels = FALSE, include.lowest = TRUE)
table(iris$Species, bins)
# This table even more effectively shows the spread from the short-petaled
# setosa, to the medium-petaled versicolor, to the long-petaled virginica.
# 34. We can get a very rough estimate of the petal area by multiplying the
# petal length and width. Calculate this area, bin the dataset into deciles on
# area, and compute table of the petal length deciles against the area deciles.
# How similar do these measurements seem?
petal_area <- quantile(iris$Petal.Length*iris$Petal.Width,
probs = seq(0, 1, 0.1), names = FALSE)
bins2 <- cut(iris$Petal.Length*iris$Petal.Width, petal_area,
labels = FALSE, include.lowest = TRUE)
table(bins, bins2)
# These measuremens are quite similar.
# 35. Without using a for loop, construct a vector with the median petal length
# for each species. Add appropriate names to the output.
median_petals <- rep(0, length = 3)
median_petals[1] <- median(iris$Petal.Length[iris$Species == "setosa"])
median_petals[2] <- median(iris$Petal.Length[iris$Species == "versicolor"])
median_petals[3] <- median(iris$Petal.Length[iris$Species == "virginica"])
names(median_petals) <- c("setosa", "versicolor", "virginica")
median_petals
# 36. Repeat the previous question using a for loop.
median_petals <- rep(0, length = 3)
species <- unique(iris$Species)
for (i in 1:3) {
median_petals[i] <- median(iris$Petal.Length[iris$Species == species[i]])
}
names(median_petals) <- as.vector(species)
median_petals
# 37. Finally, repeat again using tapply.
tapply(iris$Petal.Length, iris$Species, median)
# 38. As in a previous question, write a function which asks the user for a
# state abbreviation and returns the state name. However, this time, put the
# question in a for loop so the user can decode three straight state
# abbreviations.
askState2 <- function() {
for (i in 1:3) {
abr <- readline("Please enter a 2-letter state abbreviation to see the state: ")
abr <- toupper(abr)
ans <- abr %in% state.abb
pos <- which(state.abb == abr)
if (ans == FALSE) {
print("This is not a valid 2-letter state abbreviation.")
}
else
print(state.name[pos])
}
}
askState2()
# 39. The command break immediately exits a for loop; it is often used inside
# of an if statement. Redo the previous question, but break out of the loop
# when a non-matching abbreviation is given. You can increase the number of
# iterations to something large (say, 100), as a user can always get out of the
# function by giving a non-abbreviation.
askState3 <- function() {
for (i in 1:100) {
abr <- readline("Please enter a 2-letter state abbreviation to see the state: ")
abr <- toupper(abr)
ans <- abr %in% state.abb
pos <- which(state.abb == abr)
if (ans == FALSE) {
print("This is not a valid 2-letter state abbreviation. Goodbye.")
break
}
else
print(state.name[pos])
}
}
askState3()
# 40. Now, reverse the process so that the function returns when an abbreviation
# is found but asks again if it is not.
askState4 <- function() {
for (i in 1:100) {
abr <- readline("Please enter a 2-letter state abbreviation to see the state: ")
abr <- toupper(abr)
ans <- abr %in% state.abb
pos <- which(state.abb == abr)
if (ans == TRUE) {
print(state.name[pos])
break
}
}
}
askState4()
# 41. Using a for loop, print the sum 1+1/2+1/3+1/4 + ··· + 1/n for all n
# equal to 1 through 100.
for (i in 1:100) {
print(sum(1/(1:i)))
}
# 42. Now calculate the sum for all 100 values of n using a single function call.
cumsum(1/(1:100))
# 43. Ask the user for their year of birth and print out the age they turned for
# every year between then and now.
askBirth <- function() {
birthyear <- readline("What year were you born? ")
birthyear <- as.numeric(birthyear)
currentyear <- as.numeric(format(Sys.Date(), "%Y"))
age <- currentyear - birthyear
for (i in 1:age) {
print(paste("You turned", i, "years old in", currentyear - age + i))
}
}
askBirth()
# 44. The dataset InsectSprays shows the count of insects after applying one of
# six different insect repellents. Construct a two-row three-column grid of
# histograms, on the same scale, showing the number of insects from each spray.
# Do this using a for loop rather than coding each plot by hand.
par(mfrow = c(2,3))
sprays <- unique(InsectSprays$spray)
for (i in 1:length(sprays)) {
hist(InsectSprays$count[InsectSprays$spray == sprays[i]]/sum(InsectSprays$count[InsectSprays$spray == sprays[i]]),
breaks = seq(0, 0.3, by = 0.025),
ylim = c(0, 6))
}
# 45. Repeat the same two by three plot, but now remove the margins, axes, and
# labels. Replace these by adding the spray identifier (a single letter) to the
# plot with the text command.
par(mfrow = c(2,3))
par(mar = c(0,0,0,0))
sprays <- unique(InsectSprays$spray)
for (i in 1:length(sprays)) {
hist(InsectSprays$count[InsectSprays$spray == sprays[i]]/sum(InsectSprays$count[InsectSprays$spray == sprays[i]]),
breaks = seq(0, 0.3, by = 0.025),
axes = FALSE,
main = "",
xlab = "",
ylab = "",
ylim = c(0, 6),
col = "blue")
box()
text(x = .15,
y = 6,
label = paste("Spray:", sprays[i]),
col = "red")
}
# 46. Calculate the median insect count for each spray.
tapply(InsectSprays$count, InsectSprays$spray, median)
# 47. Using the WorldPhones dataset, calculate the total number of phones used
# in each year using a for loop.
totalphones <- rep(NA, nrow(WorldPhones))
for (i in 1:nrow(WorldPhones)) {
totalphones[i] <- sum(WorldPhones[i,])
}
totalphones
# 48. Calculate the total number of phones used in each year using a single
# apply function.
totalphones <- apply(WorldPhones, 1, sum)
totalphones
# 49. Calculate the percentage of phones that were in Europe over the years in
# question.
percEuroPhone <- round((WorldPhones[,2]/totalphones) * 100, 2)
percEuroPhone
# 50. Convert the entire WorldPhones matrix to percentages; in other words, each
# row should sum to 100.
percWorldPhone <- round((WorldPhones/totalphones) * 100, 2)
percWorldPhone
|
4db851bf10a2e13b414292ee3de7b1063c027e64
|
394597e53309453248426a7fa82c2c01457af122
|
/Log.R
|
58736fa8795dca8753522cd3eea0796fa6d6a248
|
[] |
no_license
|
dhruvchandralohani/Image-Processing-Code
|
a6b360ce7fca57cd2da8352b8bdb057b890a8ad1
|
1b8fb63d54b20e63eecc2991b97b7f0fdb6facc6
|
refs/heads/main
| 2023-06-06T07:39:22.842798
| 2021-06-26T03:50:54
| 2021-06-26T03:50:54
| 380,223,734
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 415
|
r
|
Log.R
|
img1 <- image_read("/home/dhruv/Desktop/doc.png")
plot(img1)
img1gray <- image_convert(img1,type = "grayscale")
plot(img1gray)
img1gray
img2gray <- magick2cimg(img1gray,alpha = "rm")
img2gray
plot(img2gray,rescale = FALSE)
img2_mat <- as.matrix(img2gray)
img_log <- matrix(0,220,445)
for(i in 1:220){
for(j in 1:445){
img_log[i,j] <- log(1 + img2_mat[i,j])
}
}
log_img <- as.cimg(img_log)
plot(log_img)
|
22b1e4620cc0f648c013817258877136b6f0c5f5
|
6e506411d591fd71e20730f89c497213b195c81f
|
/src/phageMain.R
|
344cbfb9164da47320556a3099c21e14e1ea978c
|
[
"MIT"
] |
permissive
|
pablocarderam/PhageModel
|
6aaff3b282cdb6619863d334afd6f0ab6d4cd7f8
|
a792bad36247165ce906904458c6d5165ff7313b
|
refs/heads/master
| 2021-01-19T13:41:36.593397
| 2017-09-01T12:34:40
| 2017-09-01T12:34:40
| 100,855,812
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,463
|
r
|
phageMain.R
|
# MAIN
# Clear workspace
rm(list = ls(all = TRUE))
# Imports
source("dat/phageParams.R") # needed for ODE
source("src/phageSolveODE.R") # needed for ODE
source("src/phagePlots.R") # needed for plots
# Run everything
param = getParameters()
setup_list = setup(param) # returns list with [[1]] list of starting vectors for each of the four treatments and [[2]] the time vector
solsCulture = runCulture(param,setup_list[[1]],setup_list[[2]]) # solves model numerically
transfer_times = c(24,48,72,96,120,144,168,192) # Transfer times in hours. Add one extra transfer time at end as an endpoint
solsTransfers = runTransferExperiment(param,transfer_times)
solsCulture = lapply(solsCulture, function (sol) {
sol = sol[seq(1, nrow(sol), 10),] # get every 10th row to render dashed lines correctly
return(sol)
})
solsTransfers = lapply(solsTransfers, function (sol) {
sol = sol[seq(1, nrow(sol), 10),] # get every 10th row to render dashed lines correctly
return(sol)
})
# Plot all:
gCompartments = plotCompGraphs(solsCulture) # graphs each compartment in model for every treatment
gTotalsBoth = plotAllTotals(solsCulture,solsTransfers,transfer_times) # compares treatments by graphing total populations and resistant fractions
ggsave(filename="Compartments.png",path="plt/",plot=gCompartments,width = 7, height = 6, units = c("in"), dpi = 300)
ggsave(filename="Totals.png",path="plt/",plot=gTotalsBoth,width = 8, height = 6, units = c("in"), dpi = 300)
|
4514ea942cec10598ddb08d61518a659cf3f6f4b
|
6d3fd06f9dfcb1e971c9a97607e0b04420ac8829
|
/PPT_scatter.R
|
a6b0364cf6e26e8ed6c467eef48d5d1311b02edd
|
[] |
no_license
|
mtnorton/CBPChangeDetection
|
5d941734774bc713b86ea7431fcbeb0971d7debd
|
c5484717b3514014a6456910629d56125a2a86ba
|
refs/heads/master
| 2021-04-12T12:53:15.429201
| 2017-09-11T14:42:56
| 2017-09-11T14:42:56
| 94,553,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 385
|
r
|
PPT_scatter.R
|
# sample scatterplot for PPT
# Michael Norton 6.27.17
data <- matrix(c(100,31.3,0,4.4,0,35.7,0,11.7,0,16.9),ncol = 2, byrow = TRUE)
plot(data[,1],data[,2],col=c("#008000", "#00FF00", "#FFAA00", "#FF0000", "#000000"), cex=2, pch=19,main="Land Cover Changes",xlim=c(0,100),ylim=c(0,100),ylab="Percentage of land cover 2013",xlab="Percentage of land cover 2009")
abline(a=0,b=1, lty=2)
|
858bddc89471704cf6aabc59d562afa2041f1823
|
f17b379365b21a6ecaed501228f3cafabf42bd62
|
/man/tpm4plot-class.Rd
|
d371b84bfbb0e8293cab3de6da24175943357991
|
[] |
no_license
|
ericaenjoy3/CHIPRNA
|
649efda9e6f6461e10e3d134653553e1599c18ff
|
1392c5f1c620d8ac421f4a99014debc20cefad3e
|
refs/heads/master
| 2021-07-19T03:05:33.745582
| 2017-10-25T20:09:27
| 2017-10-25T20:09:27
| 104,156,786
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,297
|
rd
|
tpm4plot-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CHIPRNAclass.R
\docType{class}
\name{tpm4plot}
\alias{tpm4plot}
\alias{tpm4plot-class}
\title{Store TPM and related information for plotting}
\description{
Store TPM and related information for plotting
}
\section{Slots}{
\describe{
\item{\code{tpm.val}}{A code{tbl} object, representing raw TPM values of either individual or grouped samples'}
\item{\code{tpm.ascale}}{A code{tbl} object, representing TPM values standardized of all samples}
\item{\code{tpm.rscale}}{A code{tbl} object, representing TPM values standardized across rows of all samples, zero variance rows were filtered according to var.idx}
\item{\code{info}}{A code{tbl} object, representing 'chr', 'start', 'end' and 'clus' of peaks genes are closest to.}
\item{\code{grp.before}}{A character vector the same data value as the 4th column of the \code{bed} slot of the \code{chip} object. The length of \code{grp.before} is identical to the row numbers of \code{tpm.value} and \code{tpm.ascale}}
\item{\code{grp.after}}{A character vector of \code{grp.before} filtered by \code{var.idx}.}
\item{\code{var.idx}}{A logical vector of the same length as \code{grp.before} or the same row numbers as either \code{tpm.val} or \code{tpm.ascale}.}
}}
|
c73815f24b7a4120ad6902e2281d69d853e911a2
|
bafdfcab4680d5208d451021f833f51b50ad2700
|
/R/word_classification_data.R
|
dbcc6c5275cb8ef9573a1806c5c0c9aa57d027bf
|
[] |
no_license
|
denis-arnold/AcousticNDLCodeR
|
b973c79c06efd3a3614e0b681a977b0a74d44f09
|
488f9a6300b23cb0aa93bbe51eeee9d764a982fd
|
refs/heads/master
| 2021-01-24T20:58:16.335997
| 2018-07-06T15:42:16
| 2018-07-06T15:42:16
| 123,262,982
| 0
| 2
| null | 2018-02-28T15:19:04
| 2018-02-28T09:37:05
|
R
|
UTF-8
|
R
| false
| false
| 1,759
|
r
|
word_classification_data.R
|
#' Data of PLoS ONE paper
#'
#' Dataset of a subject and modeling data for an auditory word identification task.
#'
#' @name word_classification_data
#' @docType data
#' @usage data(word_classification_data)
#'
#'
#' @format Data from the four experiments and model estimates
#' \describe{
#' \item{\code{ExperimentNumber}}{Experiment identifier}
#' \item{\code{PresentationMethod}}{Method of presentation in the experiment: loudspeaker, headphones 3. Trial: Trial number in the experimental list}
#' \item{\code{TrialScaled}}{scaled Trial}
#' \item{\code{Subject}}{anonymized subject identifier}
#' \item{\code{Item}}{word identifier -german umlaute and special character coded as 'ae' 'oe' 'ue' and 'ss'}
#' \item{\code{Activation}}{NDL activation}
#' \item{\code{LogActivation}}{log(activation+epsilon)}
#' \item{\code{L1norm}}{L1-norm (lexicality)}
#' \item{\code{LogL1norm}}{log of L1-norm}
#' \item{\code{RecognitionDecision}}{recognition decision (yes/no)}
#' \item{\code{RecognitionRT}}{latency for recognition decision}
#' \item{\code{LogRecognitionRT}}{log recognition RT}
#' \item{\code{DictationAccuracy}}{dictation accuracy (TRUE: correct word reported, FALSE otherwise) 15. DictationRT: response latency to typing onset}
#'}
#'
#' @references
#'
#' Denis Arnold, Fabian Tomaschek, Konstantin Sering, Florence Lopez, and R. Harald Baayen (2017).
#' Words from spontaneous conversational speech can be recognized with human-like accuracy by
#' an error-driven learning algorithm that discriminates between meanings straight from smart
#' acoustic features, bypassing the phoneme as recognition unit PLoS ONE 12(4):e0174623.
#' https://doi.org/10.1371/journal.pone.0174623
#' @keywords data
NULL
|
874e4a2746aee5071bd5907068a6ff3b1df7087d
|
51ccbfa4c644057b992d08630d20ce59a5521c4a
|
/man/savings_summary.Rd
|
ec6ff3f04554fa65ba6f1933dc0d6d5fb78668be
|
[
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] |
permissive
|
LBNL-ETA/RMV2.0
|
25e0000f5ec68c28c015e92e73b98038d8fb254b
|
979a96332fb8566063f25cd1d5d24e44b285221b
|
refs/heads/master
| 2023-03-01T20:21:07.746272
| 2020-10-28T00:50:58
| 2020-10-28T00:50:58
| 110,508,552
| 34
| 14
| null | null | null | null |
UTF-8
|
R
| false
| true
| 566
|
rd
|
savings_summary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/savings.R
\name{savings_summary}
\alias{savings_summary}
\title{Savings summary}
\usage{
savings_summary(sav_out)
}
\arguments{
\item{sav_out}{a shiny reactiveValues object where the baseline object and the pre/post data are stored}
\item{inCL}{a numerical value corresponding to the user specified confidence level}
}
\value{
a dataframe of the savings summary
}
\description{
\code{savings_summary} This function is used by the shiny application
to produce the savings summary table
}
|
96416dcd8e62e88c0a6c19f70cb1a5bc7c60a7f6
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/b6e6rl/R/nahvyb_expt.R
|
226b15499e16872e3183ccfb090d60ae9c3299df
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 282
|
r
|
nahvyb_expt.R
|
nahvyb_expt <-
function(N,k,expt){
opora <- 1:N
nargin <- length(as.list(match.call())) -1
if (nargin==3)
opora <- opora[-expt]
vyb <- rep(0,k)
for (i in 1:k){
index <- 1+trunc(runif(1)*length(opora))
vyb[i] <- opora[index]
opora <- opora[-index]
}
return(vyb)
}
|
6cab27124eeff88b381481371f8aeec920f7da8f
|
5e3d48e48438e540f0cabefa4572830d23479d5e
|
/TidyGEO/server/all_data/join_dfs.R
|
282cbd4c809dded660d9e8ec2fbef40bc56cb0d4
|
[
"Apache-2.0"
] |
permissive
|
srp33/TidyGEO
|
3b9ece9700873ebb5dfb1aa9cd000a8c5d77963e
|
7a31d49578a78e2375a2e960371db36e5a930b85
|
refs/heads/master
| 2023-02-26T22:08:48.537762
| 2023-02-18T06:10:18
| 2023-02-18T06:10:18
| 171,913,236
| 5
| 3
|
Apache-2.0
| 2023-02-18T06:10:19
| 2019-02-21T17:14:26
|
HTML
|
UTF-8
|
R
| false
| false
| 9,582
|
r
|
join_dfs.R
|
# side panel --------------------------------------------------------------
output$col_to_join1_selector <- renderUI({
col_selector_ui("data_to_join1", "col_to_join1")
})
output$col_to_join2_selector <- renderUI({
col_selector_ui("data_to_join2", "col_to_join2")
})
output$col_to_join3_selector <- renderUI({
col_selector_ui("data_to_join3", "col_to_join3")
})
observeEvent(input$add_dataset, {
set_x_equalto_y("join_datatypes_visible", get_data_member("all", "join_datatypes_visible") + 1, "all")
this_id <- paste0("data_to_join", get_data_member("all", "join_datatypes_visible"))
this_selector_id <- paste0("col_to_join", get_data_member("all", "join_datatypes_visible"))
insertUI(
selector = "#add_dataset",
where = "beforeBegin",
ui = div(id = paste0("selector_div", get_data_member("all", "join_datatypes_visible")),
selectInput(this_id, "Choose another dataset to join:", choices = c("clinical", "assay", "feature")),
uiOutput(paste0(this_selector_id, "_selector")),
radioButtons(paste0("join_behavior", get_data_member("all", "join_datatypes_visible")),
paste0("Please choose the action you would like to take when items in the first dataset",
" are not present in the second dataset (and visa versa):"),
choices = c("drop", "keep values from first dataset", "keep values from second dataset", "keep all"))
)
)
if (get_data_member("all", "join_datatypes_visible") > 2) disable("add_dataset")
enable("remove_dataset")
#insertUI(
# selector = "#data_to_join_previews",
# where = "beforeEnd",
# ui = div(id = paste0("preview_div", all_vals$join_datatypes_visible),
# column(1,
# br(),
# br(),
# br(),
# br(),
# icon("expand-arrows-alt", class = "center_align")
# ),
# column(4,
# DTOutput(paste0("data_to_join", all_vals$join_datatypes_visible, "_preview")),
# uiOutput(paste0("data_to_join", all_vals$join_datatypes_visible, "_rows"))
# )
# )
#)
})
observeEvent(input$remove_dataset, {
removeUI(
selector = paste0("#selector_div", get_data_member("all", "join_datatypes_visible"))
)
#removeUI(
# selector = paste0("#preview_div", all_vals$join_datatypes_visible)
#)
session$sendCustomMessage("resetValue", paste0("data_to_join", get_data_member("all", "join_datatypes_visible")))
session$sendCustomMessage("resetValue", paste0("col_to_join", get_data_member("all", "join_datatypes_visible")))
set_x_equalto_y("join_datatypes_visible", get_data_member("all", "join_datatypes_visible") - 1, "all")
enable("add_dataset")
if (get_data_member("all", "join_datatypes_visible") < 2) disable("remove_dataset")
})
observeEvent(input$join_columns, {
set_x_equalto_y(dataname("all"), get_data_member(input$data_to_join1, dataname(input$data_to_join1)), "all")
set_script_equal("all", input$data_to_join1)
withProgress({
incProgress(message = "Performing first join")
if (get_data_member("all", "join_datatypes_visible") > 1) {
join1_status <- eval_function("all", "join_data",
list(get_data_member_expr(input$data_to_join2, dataname(input$data_to_join2)),
input$col_to_join1,
input$col_to_join2,
input$join_behavior2),
"Joining datasets",
to_knit = c("all", input$data_to_join2))
}
if (join1_status == SUCCESS) {
incProgress(message = "Performing second join")
if (get_data_member("all", "join_datatypes_visible") > 2) {
join2_status <- eval_function("all", "join_data",
list(get_data_member_expr(input$data_to_join3, dataname(input$data_to_join3)),
input$col_to_join2,
input$col_to_join3,
input$join_behavior3),
"joining datasets",
to_knit = c("all", input$data_to_join3))
if (join2_status != SUCCESS) {
showModal(
error_modal("Error in second join", "Second join not performed.", join2_status)
)
}
}
} else {
showModal(
error_modal("Error in first join", "First join not performed.", join1_status)
)
}
})
updateTabsetPanel(session, "all_data_main_panel", "2")
updateSelectInput(session, inputId = "data_to_view", selected = "all")
})
navigation_set_server("1", "2", "3", "all_data_options", "all_data_options")
# main panel --------------------------------------------------------------
join_dfs_ui <- tagList(
h4("Joining datasets"),
p(paste("Here is a representation of the join you are about to perform. You will notice that you can join up to two",
"times for a total of three joined datasets. If you have only selected one dataset, no joins will be performed",
"but \"joined data\" will consist of the one dataset you have selected.",
"Blank boxes are placeholders that will not affect the final join.")),
br(),
fluidRow(
column(4,
box(
div(textOutput("selected_to_join1"), style = "text-align: center"),
background = "light-blue"
)
),
column(4,
box(
div(textOutput("selected_to_join2"), style = "text-align: center"),
background = "light-blue"
)
),
column(4,
box(
div(textOutput("selected_to_join3"), style = "text-align: center"),
background = "light-blue"
)
)
),
fluidRow(
column(1,
br()
),
column(4,
div(textOutput("selected_join_var1"), style = "text-align: center"),
style = "border-left: 6px solid; border-right: 6px solid; border-bottom: 6px solid"
),
column(4,
br(),
style = "border-right: 6px solid;")
),
fluidRow(
column(3,
br()),
column(6,
div(textOutput("selected_join_var2"), style = "text-align: center"),
style = "border-left: 6px solid; border-right: 6px solid; border-bottom: 6px solid;"
)
),
br(),
#p("Here is a preview of the first ten rows of each column you have selected to join."),
#br(),
#fluidRow(id = "data_to_join_previews",
# column(4,
# DTOutput("data_to_join1_preview"),
# uiOutput("data_to_join1_rows")
# )
#),
uiOutput("join_results_preview")
)
output$selected_to_join1 <- renderText({
input$data_to_join1
})
output$selected_to_join2 <- renderText({
input$data_to_join2
})
output$selected_to_join3 <- renderText({
input$data_to_join3
})
output$selected_join_var1 <- renderText({
paste0(input$col_to_join1, " = ", input$col_to_join2)
})
output$selected_join_var2 <- renderText({
paste0(input$col_to_join2, " = ", input$col_to_join3)
})
#if (FALSE) {
#
#get_data_to_join_rows <- function(datatype) {
# nrow(get_data_member(datatype, dataname(datatype)))
#}
#
#output$data_to_join1_preview <- renderDT({
# datatable(data_to_join1_data(), rownames = FALSE, colnames = input$data_to_join1, escape = FALSE,
# options = list(dom = "t", scrollY = 200))
#})
#
#output$data_to_join1_rows <- renderUI({
# HTML(paste0("<p><b>Rows: </b>", get_data_to_join_rows(input$data_to_join1), "</p>"))
#})
#
#
#
#output$data_to_join2_preview <- renderDT({
# datatable(data_to_join2_data(), rownames = FALSE, colnames = input$data_to_join2, escape = FALSE,
# options = list(dom = "t", scrollY = 200))
#})
#
#output$data_to_join2_rows <- renderUI({
# HTML(paste0("<p><b>Rows: </b>", get_data_to_join_rows(input$data_to_join2), "</p>"))
#})
#
#output$data_to_join3_preview <- renderDT({
# datatable(data_to_join3_data(), rownames = FALSE, colnames = input$data_to_join3, escape = FALSE,
# options = list(dom = "t", scrollY = 200))
#})
#
#output$data_to_join3_rows <- renderUI({
# HTML(paste0("<p><b>Rows: </b>", get_data_to_join_rows(input$data_to_join3), "</p>"))
#})
#
#}
get_data_to_join_preview <- function(datatype, selected_col) {
#if (FALSE) {
#this_data <- if (selected_col %in% "colnames") {
# selected_col <- "ID"
# eval(parse(text = paste0("withProgress(colnames(quickTranspose(", datatype, "_vals$", dataname(datatype), ")))")))
#} else {
# eval(parse(
# text = paste0("colnames(", datatype, "_vals$", dataname(datatype), ')')
# ))
#}
#font_weights <- sapply(this_data, function(x) if (x == selected_col) paste0("<b>", x, "</b>") else x,
# USE.NAMES = FALSE)
#matrix(font_weights)
#}
if (!is.null(datatype) && !is.null(selected_col)) {
this_func <- if (selected_col == "colnames") "row" else "col"
do.call(paste0("n", this_func), list(get_data_member(datatype, dataname(datatype))))
} else {
0
}
}
data_to_join1_data <- reactive({
get_data_to_join_preview(input$data_to_join1, input$col_to_join1)
})
data_to_join2_data <- reactive({
get_data_to_join_preview(input$data_to_join2, input$col_to_join2)
})
data_to_join3_data <- reactive({
get_data_to_join_preview(input$data_to_join3, input$col_to_join3)
})
output$join_results_preview <- renderUI({
HTML(
paste0(
"<p><b>Resulting number of columns: </b>", data_to_join1_data() + data_to_join2_data() + data_to_join3_data(), "</p>"
)
)
})
|
8ef0656f6df9f35b5caab99f0df42a34f14eab26
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/medicare/examples/price_deflate.Rd.R
|
4a8a4e4061b080bfa66fdf9827c8310b87880b2f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 266
|
r
|
price_deflate.Rd.R
|
library(medicare)
### Name: price_deflate
### Title: Deflate prices within a sector, relative to a base period.
### Aliases: price_deflate
### ** Examples
# convert $100 in current inpatient spending to year 2007 dollars
price_deflate(100, "ip", 2014, 2007)
|
caa446b6710b99e1cc890f2efcec6b55e5093ffb
|
1c8a1e0041124a546f13dc5e98997571111745f6
|
/prep/ICO/ICO.R
|
8c58bddb9befe6089543b644c06aa26512ca02a0
|
[] |
no_license
|
OHI-Science/chl
|
bdbf046f10fbcfd617747572f6b636238d443649
|
3699fd7b5d0ef08df3159beaa4e6b4314f6f5419
|
refs/heads/master
| 2023-08-17T10:31:53.577099
| 2023-08-10T19:14:12
| 2023-08-10T19:14:12
| 25,446,924
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,504
|
r
|
ICO.R
|
library(plyr)
library(maditr)
lyr1 = c('ico_spp_extinction_status' = 'risk_category')
lyr2 = c( 'ico_spp_popn_trend' = 'popn_trend')
# cast data ----
l_data1 = SelectLayersData(layers, layers=names(lyr1))
l_data2 = SelectLayersData(layers, layers=names(lyr2))
l_data1 <- select(l_data1, c("id_num", "category", "val_chr"))
l_data1<- dplyr:: rename(l_data1, c("risk_category" = "val_chr",'region_id'='id_num', 'sciname' ='category'))
l_data2<- select(l_data2, c("id_num", "category", "val_chr"))
l_data2<- dplyr:: rename(l_data2, c("popn_trend" = "val_chr",'region_id'='id_num', 'sciname' ='category'))
rk<- merge(l_data1, l_data2)
# lookup for weights status
w.risk_category = c('LC' = 0,
'NT' = 0.2,
'VU' = 0.4,
'EN' = 0.6,
'CR' = 0.8,
'EX' = 1)
# lookup for population trend
w.popn_trend = c('Decreciendo' = -0.5,
'Estable' = 0,
'Creciendo' = 0.5)
# status
r.status = rename(ddply(rk, .(region_id), function(x){
mean(1 - w.risk_category[x$risk_category], na.rm=T) * 100 }),
c('V1'='score'))
# trend
r.trend = rename(ddply(rk, .(region_id), function(x){
mean(w.popn_trend[x$popn_trend], na.rm=T) }),
c('V1'='score'))
# return scores
s.status = cbind(r.status, data.frame('dimension'='status'))
s.trend = cbind(r.trend , data.frame('dimension'='trend' ))
score = cbind(rbind(s.status, s.trend), data.frame('goal'='ICO'))
return(score)
|
270ebdfa0888aca6cc4a294510e3493303695e6e
|
af1895e041c8ccde1b333c4f0334bb88b3cca493
|
/fireSense_NWT_DataPrep.R
|
18ab57833bc9000997b7b4c67a82e8f57fbfaa84
|
[] |
no_license
|
PredictiveEcology/fireSense_NWT_DataPrep
|
2ffc85c9a78ff1a8a7da4fb7b505a495668275f6
|
99b21c4883e520eefb7f091a41c12828fc9bc9f0
|
refs/heads/master
| 2022-03-28T04:36:48.828324
| 2019-12-04T09:08:25
| 2019-12-04T09:08:25
| 168,408,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,001
|
r
|
fireSense_NWT_DataPrep.R
|
# Everything in this file gets sourced during simInit, and all functions and objects
# are put into the simList. To use objects, use sim$xxx, and are thus globally available
# to all modules. Functions can be used without sim$ as they are namespaced, like functions
# in R packages. If exact location is required, functions will be: sim$<moduleName>$FunctionName
defineModule(sim, list(
name = "fireSense_NWT_DataPrep",
description = "Prepare climate and vegetation data needed to run the fireSense modules for BCR6 and BCR6 contained in the Northwest Territories.", #"insert module description here",
keywords = NA, # c("insert key words here"),
authors = person("Jean", "Marchal", email = "jean.d.marchal@gmail.com", role = c("aut", "cre")),
childModules = character(0),
version = list(SpaDES.core = "0.2.4", fireSense_NWT_DataPrep = "1.0.0"),
spatialExtent = raster::extent(rep(NA_real_, 4)),
timeframe = as.POSIXlt(c(NA, NA)),
timeunit = "year",
citation = list("citation.bib"),
documentation = list("README.txt", "fireSense_NWT_DataPrep.Rmd"),
reqdPkgs = list("dplyr", "magrittr", "raster", "rlang", "sf", "tibble"),
parameters = rbind(
#defineParameter("paramName", "paramClass", value, min, max, "parameter description"),
defineParameter(name = "res", class = "numeric", default = 10000,
desc = "at which resolution should we aggregate the data? By
default, 10km."),
defineParameter(name = "train", class = "logical", default = TRUE,
desc = "train or predict mode. Defaults is TRUE, or train mode."),
defineParameter(name = ".runInitialTime", class = "numeric", default = start(sim),
desc = "when to start this module? By default, the start
time of the simulation."),
defineParameter(name = ".runInterval", class = "numeric", default = 1,
desc = "optional. Interval between two runs of this module,
expressed in years. By default, every year."),
defineParameter(name = ".useCache", class = "logical", default = FALSE,
desc = "Should this entire module be run with caching
activated? This is generally intended for data-type
modules, where stochasticity and time are not relevant"),
defineParameter(name = "RCP", class = "character", default = "85", min = NA, max = NA,
desc = "Which RCP should be used? Default to 85"),
defineParameter(name = "climateModel", class = "character", default = "CCSM4", min = NA, max = NA,
desc = "Which climate model should be used? Default to CCSM4"),
defineParameter(name = "ensemble", class = "character", default = "CCSM4", min = NA, max = NA,
desc = "Which ensemble model should be used? Default to ''. CCSM4 doesn't have ensemble, just CanESM2 (r11i1p1)"),
defineParameter(name = "climateResolution", class = "character", default = "3ArcMin", min = NA, max = NA,
desc = "Which DEM resolution was used for generating the climate layers? Default to '3ArcMin'."),
defineParameter(name = "climateFilePath", class = "character", default = "https://drive.google.com/open?id=17idhQ_g43vGUQfT-n2gLVvlp0X9vo-R8", min = NA, max = NA,
desc = "URL to zipped climate file coming from ClimateNA, containing all climate variables for all years of simulation")
),
inputObjects = bind_rows(
#expectsInput("objectName", "objectClass", "input object description", sourceURL, ...),
expectsInput(
objectName = "cloudFolderID",
objectClass = "character",
sourceURL = NA_character_,
desc = "GDrive folder ID for cloud caching."
),
expectsInput(
objectName = "LCC05",
objectClass = "RasterLayer",
sourceURL = "https://drive.google.com/open?id=1ziUPnFZMamA5Yi6Hhex9aZKerXLpVxvz",
desc = "Land Cover Map of Canada 2005 (LCC05)."
),
expectsInput(
objectName = "vegMap",
objectClass = "RasterLayer",
sourceURL = NA,
desc = "Land Cover Map of Canada 2005 (LCC05) cropped."
),
expectsInput(
objectName = "MDC_BCR6_NWT_250m",
objectClass = "RasterStack",
sourceURL = NA_character_,
desc = "Monthly Drought Code (April to September) within BCR6 as contained in the Northwest Territories."
),
expectsInput(
objectName = "NFDB_PO",
objectClass = "sf",
sourceURL = "http://cwfis.cfs.nrcan.gc.ca/downloads/nfdb/fire_poly/current_version/NFDB_poly.zip",
desc = "National Fire DataBase polygon data (NFDB_PO)."
),
expectsInput(
objectName = "NFDB_PT",
objectClass = "sf",
sourceURL = "http://cwfis.cfs.nrcan.gc.ca/downloads/nfdb/fire_pnt/current_version/NFDB_point.zip",
desc = "National Fire DataBase point data (NFDB_PT)."
),
expectsInput(
objectName = "rasterToMatch",
objectClass = "RasterLayer",
sourceURL = "https://drive.google.com/open?id=1NIjFbkckG3sewkTqPGaBGQDQLboPQ0wc",
desc = "a template raster describing the studyArea"
),
expectsInput(
objectName = "studyArea",
objectClass = "SpatialPolygonsDataFrame",
sourceURL = "https://drive.google.com/open?id=1LUxoY2-pgkCmmNH5goagBp3IMpj6YrdU",
desc = "a template polygon describing the studyArea"
),
expectsInput( # ~ TM added on 11AUG19 --> Not defining it was causing it to be NULL in simList
objectName = "MDC06",
objectClass = "RasterLayer",
sourceURL = NA,
desc = "Rasterlayer describing the Monthly Drougth Code of June for the current year."
),
expectsInput( # ~ TM added on 11AUG19 --> Not defining it was causing it to be NULL in simList
objectName = "wetLCC",
objectClass = "RasterLayer",
sourceURL = NA,
desc = "Rasterlayer with 3 values, generated from DUCKS unlimited, showing water = 1, wetlands = 2, and uplands = 3."
),
expectsInput( # ~ TM added on 04DEC19 --> Not defining it was causing it to be NULL in simList
objectName = "usrEmail",
objectClass = "character",
sourceURL = NA,
desc = "User e.mail for GDrive authorization"
)
),
outputObjects = bind_rows(
#createsOutput("objectName", "objectClass", "output object description", ...),
createsOutput(
objectName = "dataFireSense_EscapeFit",
objectClass = "data.frame",
desc = "Contains MDC, land-cover, fire data necessary to train the fireSense_EscapeFit SpaDES module for BCR6 as contained in the Northwest Territories."
),
createsOutput(
objectName = "dataFireSense_FrequencyFit",
objectClass = "data.frame",
desc = "Contains MDC, land-cover, fire data necessary to train the fireSense_FrequencyFit SpaDES module for BCR6 as contained in the Northwest Territories."
),
createsOutput(
objectName = "LCC",
objectClass = "RasterStack",
desc = "Contains LCC classes. Necessary to predict with fireSense for BCR6 as contained in the Northwest Territories."
),
createsOutput(
objectName = "MDC06",
objectClass = "RasterLayer",
desc = "Contains MDC06 for the current year. Necessary to predict with fireSense for BCR6 as contained in the Northwest Territories."
)
)
))
## event types
# - type `init` is required for initialization
doEvent.fireSense_NWT_DataPrep = function(sim, eventTime, eventType)
{
switch(
eventType,
init = {
sim <- Init(sim)
sim <- Run(sim) # Hack of the Friday afternoon --'
sim <- scheduleEvent(sim, eventTime = P(sim)$.runInitialTime + 1, "fireSense_NWT_DataPrep", "run")
},
run = {
sim <- Run(sim)
if (!is.na(P(sim)$.runInterval))
sim <- scheduleEvent(sim, time(sim) + P(sim)$.runInterval, "fireSense_NWT_DataPrep", "run")
},
warning(
paste(
"Undefined event type: '", current(sim)[1, "eventType", with = FALSE],
"' in module '", current(sim)[1, "moduleName", with = FALSE], "'", sep = ""
)
)
)
invisible(sim)
}
## event functions
# - keep event functions short and clean, modularize by calling subroutines from section below.
### template initialization
Init <- function(sim)
{
# smallSR <- shapefile("~/Desktop/smallSR.shp")
# names(smallSR) <- "PolyID"
#
# sim[["vegMap"]] <- postProcess(
# sim[["vegMap"]],
# studyArea = smallSR,
# filename2 = NULL
# )
# sim[["NFDB_PO"]] <- as(
# postProcess(
# as_Spatial(sim[["NFDB_PO"]]),
# studyArea = smallSR,
# filename2 = NULL
# ),
# "sf"
# )
# sim[["MDC06"]] <- postProcess(
# sim[["MDC06"]],
# studyArea = smallSR,
# filename2 = NULL
# )
# wetLCC code for Water 1
# wetLCC code for Wetlands 2
# wetLCC code for Uplands 3
message("Reclassifying water in LCC05...")
mod[["vegMap"]] <- sim[["vegMap"]]
if (is.null(sim[["vegMap"]]))
stop("vegMap is still NULL. Please debug .inputObjects")
mod[["vegMap"]][sim$wetLCC == 1] <- 37 # LCC05 code for Water bodies
mod[["vegMap"]][sim$wetLCC == 2] <- 19 # LCC05 code for Wetlands
if (P(sim)$train){
message("train is TRUE, preparing RTM. This should happen only if dataFireSense_EscapeFit \nand dataFireSense_FrequencyFit are not being passed.")
mod[["RTM"]] <- Cache(
aggregate,
sim[["vegMap"]],
fact = P(sim)$res / xres(sim[["vegMap"]]),
fun = function(x, ...) if (anyNA(x)) NA else 1
)
mod[["PX_ID"]] <- tibble(PX_ID = which(!is.na(mod[["RTM"]][])))
mod[["RTM_VT"]] <- bind_cols(
st_as_sf(rasterToPolygons(mod[["RTM"]])),
mod[["PX_ID"]]
)
}
invisible(sim)
}
PrepThisYearMDC <- function(sim)
{
mod[["MDC"]] <-
raster::stack(
Cache(
lapply,
raster::unstack(sim[["MDC_BCR6_NWT_250m"]]),
postProcess,
rasterToMatch = mod$RTM,
destinationPath = tempdir(),
omitArgs = "destinationPath",
maskWithRTM = TRUE,
method = "bilinear",
datatype = "FLT4S",
filename2 = NULL
)
)
return(invisible(sim))
}
PrepThisYearLCC <- function(sim)
{
year <- time(sim, "year")
#
# LCC05 with incremental disturbances
#
fires_this_year <- sim[["NFDB_PO"]] %>%
dplyr::filter(YEAR > (year - 15) & YEAR <= year)
if (nrow(fires_this_year) > 0)
{
# Setting the burned pixels of LCC05 to category 34 (recent burns)
spatialUnified <- as(st_union(fires_this_year),"Spatial")
spatialDF <- SpatialPolygonsDataFrame(Sr = spatialUnified, data = data.frame(ID = 1), match.ID = FALSE)
sfDF <- st_as_sf(spatialDF)
rasDF <- fasterize::fasterize(sf = sfDF, raster = mod[["vegMap"]])
mod[["vegMap"]][rasDF == 1] <- 34 # LCC05 code for recent burns
# This was way too slow and was failing for some reason...
# Cache(
# # cloudFolderID = sim[["cloudFolderID"]],
# `[<-`,
# x = mod[["vegMap"]],
# i = {
# # Calculate proportion of recently disturbed areas for each pixel of LCC05
# Cache(
# # cloudFolderID = sim[["cloudFolderID"]],
# rasterize,
# x = SpatialPolygonsDataFrame(
# as(
# st_union(
# fires_this_year
# ),
# "Spatial"
# ),
# data = data.frame(ID = 1),
# match.ID = FALSE
# ),
# y = mod[["vegMap"]],
# getCover = TRUE
# )[] >= .5
# },
# value = 34 # LCC05 code for recent burns
# )
}
if (P(sim)$train)
{
n_lcc <- 39
mod$pp_lcc <-
lapply(
1:n_lcc,
function(cl_i)
{
calc_prop_lcc <- function(x, cl = cl_i, na.rm = TRUE)
{
if (anyNA(x)) return(NA)
sum(x == cl, na.rm = na.rm) / (agg_fact ** 2)
}
col_name <- paste0("cl", cl_i)
agg_fact <- P(sim)$res / xres(mod[["vegMap"]])
tibble(
!!col_name := aggregate(
mod[["vegMap"]],
fact = agg_fact,
fun = calc_prop_lcc
)[]
)
}
) %>% bind_cols %>% filter_at(2, all_vars(!is.na(.)))
}
else
{ # This happens for predicting
sim[["LCC"]] <- setNames(
raster::stack(lapply(c(1:32, 34:35), function(x) mod[["vegMap"]] == x)),
nm = paste0("cl", c(1:32, 34:35))
)
}
invisible(sim)
}
PrepThisYearFire <- function(sim)
{
currentYear <- time(sim, "year")
NFDB_PT <- sim[["NFDB_PT"]] %>%
# Filter fire data for the current year
dplyr::filter(YEAR == currentYear) %>%
# Drop columns containing info we don't need
dplyr::select(LATITUDE, LONGITUDE, YEAR, SIZE_HA, CAUSE) %>%
# Keep only lightning fires
dplyr::filter(CAUSE == "L")
mod[["fires"]] <- st_set_geometry(
mutate(
filter(
st_join(mod[["RTM_VT"]], NFDB_PT),
!is.na(YEAR)
),
YEAR = currentYear
),
NULL
)
invisible(sim)
}
Run <- function(sim){
if (P(sim)$train)
{
sim <- PrepThisYearMDC(sim)
sim <- PrepThisYearFire(sim)
} # Fire and MDC only get prepped when train == TRUE, while LCC gets prepped every time the module `fireSense_NWT_DataPrep runs`
if (is.null(sim$usrEmail))
warning("If in a non-interactive session, please make sure you supply the object `usrEmail` for google authentication")
sim$MDC06 <- usefun::prepareClimateLayers(authEmail = sim$usrEmail,
pathInputs = inputPath(sim), studyArea = sim$studyArea,
rasterToMatch = sim$rasterToMatch, years = time(sim),
variables = "fireSense", model = "fireSense",
returnCalculatedLayersForFireSense = TRUE,
RCP = P(sim)$RCP,
climateModel = P(sim)$climateModel,
ensemble = P(sim)$ensemble,
climateFilePath = P(sim)$climateFilePath,
fileResolution = P(sim)$climateResolution)
sim$MDC06 <- sim$MDC06[[paste0("year", time(sim))]]
sim <- PrepThisYearLCC(sim)
if (P(sim)$train)
{
# Prepare input data for the fireSense_FrequencyFit module
browser() # Understand what the heck is going on down here. This only happens in training... so in theory I don't need this to predict
sim[["dataFireSense_FrequencyFit"]] <- bind_rows(
sim[["dataFireSense_FrequencyFit"]],
bind_cols(
mod[["fires"]] %>%
group_by(PX_ID, YEAR) %>%
summarise(n_fires = n()) %>%
ungroup %>%
right_join(mod[["PX_ID"]], by = "PX_ID") %>%
mutate(YEAR = time(sim, "year"), n_fires = ifelse(is.na(n_fires), 0, n_fires)),
rename(
as_tibble(mod[["MDC"]][mod[["PX_ID"]][["PX_ID"]]]),
MDC04 = 1,
MDC05 = 2,
MDC06 = 3,
MDC07 = 4,
MDC08 = 5,
MDC09 = 6
) %>% dplyr::select(MDC06),
mod[["pp_lcc"]]
)
)
#
## Filter out pixels where at least one variable evaluates to 0
#
### Filter out pixels where none of the classes of interest are present
#
sim[["dataFireSense_FrequencyFit"]] %<>%
dplyr::filter(
(
dplyr::select(., paste0("cl", c(1:32, 34:35))) %>% # 33, 36:39 do not burn. No need to estimate coefficients, it's 0.
rowSums()
) != 0
)
#
### Filter out pixels where MDC06 is > 0
#
sim[["dataFireSense_FrequencyFit"]] %<>% dplyr::filter(MDC06 > 0)
#
# Prepare input data for the fireSense_EscapeFit module
#
fire_escape_data <- mod[["fires"]] %>%
group_by(PX_ID, YEAR) %>%
summarise(n_fires = n(), escaped = sum(SIZE_HA > 1)) %>%
ungroup
sim[["dataFireSense_EscapeFit"]] <- bind_rows(
sim[["dataFireSense_EscapeFit"]],
bind_cols(
fire_escape_data,
rename(
as_tibble(mod[["MDC"]][fire_escape_data[["PX_ID"]]]),
MDC04 = 1,
MDC05 = 2,
MDC06 = 3,
MDC07 = 4,
MDC08 = 5,
MDC09 = 6
) %>% dplyr::select(MDC06),
dplyr::filter(mod[["pp_lcc"]], mod[["PX_ID"]][["PX_ID"]] %in% fire_escape_data[["PX_ID"]])
)
)
}
else
{
if (!is.null(sim[["MDC06"]])){
names(sim[["MDC06"]]) <- "MDC06" # If is.null(sim[["MDC06"]]), it errors. Coming from (MDC_NWT_DataPrep)! Wasn't defined.
} else stop("MDC06 is NULL. Possibly a problem in MDC_NWT_DataPrep module")
}
return(invisible(sim))
}
.inputObjects <- function(sim) {
# Any code written here will be run during the simInit for the purpose of creating
# any objects required by this module and identified in the inputObjects element of defineModule.
# This is useful if there is something required before simulation to produce the module
# object dependencies, including such things as downloading default datasets, e.g.,
# downloadData("LCC2005", modulePath(sim)).
# Nothing should be created here that does not create a named object in inputObjects.
# Any other initiation procedures should be put in "init" eventType of the doEvent function.
# Note: the module developer can check if an object is 'suppliedElsewhere' to
# selectively skip unnecessary steps because the user has provided those inputObjects in the
# simInit call, or another module will supply or has supplied it. e.g.,
# if (!suppliedElsewhere('defaultColor', sim)) {
# sim$map <- Cache(prepInputs, extractURL('map')) # download, extract, load file from url in sourceURL
# }
if (!suppliedElsewhere(object = "rasterToMatch", sim = sim))
{
sim[["rasterToMatch"]] <- Cache(
prepInputs,
url = "https://drive.google.com/open?id=1NIjFbkckG3sewkTqPGaBGQDQLboPQ0wc",
targetFile = "BCR6_NWT-2.tif",
destinationPath = tempdir(),
omitArgs = "destinationPath"
)
}
if (!suppliedElsewhere(object = "studyArea", sim = sim))
{
sim[["studyArea"]] <- Cache(
prepInputs,
url = "https://drive.google.com/open?id=1LUxoY2-pgkCmmNH5goagBp3IMpj6YrdU",
destinationPath = tempdir(),
omitArgs = "destinationPath"
)
}
if (!suppliedElsewhere(object = "vegMap", sim = sim) |
is.null(sim[["vegMap"]]))
{
sim[["vegMap"]] <- LandR::prepInputsLCC(destinationPath = dataPath(sim),
studyArea = sim$studyArea,
rasterToMatch = sim$rasterToMatch)
}
if (!suppliedElsewhere(object = "NFDB_PO", sim = sim))
{
sim[["NFDB_PO"]] <- Cache(
prepInputs,
url = "http://cwfis.cfs.nrcan.gc.ca/downloads/nfdb/fire_poly/current_version/NFDB_poly.zip",
fun = "sf::st_read",
destinationPath = tempdir(),
omitArgs = "destinationPath",
studyArea = sim[["studyArea"]],
useSAcrs = TRUE,
filename2 = NULL,
overwrite = TRUE
)
}
if (!suppliedElsewhere(object = "NFDB_PT", sim = sim))
{
sim[["NFDB_PT"]] <- Cache(
prepInputs,
url = "http://cwfis.cfs.nrcan.gc.ca/downloads/nfdb/fire_pnt/current_version/NFDB_point.zip",
fun = "sf::st_read",
destinationPath = tempdir(),
omitArgs = "destinationPath",
studyArea = sim[["studyArea"]],
useSAcrs = TRUE,
filename2 = NULL,
overwrite = TRUE
)
}
cacheTags <- c(currentModule(sim), "function:.inputObjects") ## uncomment this if Cache is being used
dPath <- asPath(getOption("reproducible.destinationPath", dataPath(sim)), 1)
message(currentModule(sim), ": using dataPath '", dPath, "'.")
if (!suppliedElsewhere("usrEmail", sim)){
sim$usrEmail <- if (pemisc::user() %in% c("tmichele", "Tati")) "tati.micheletti@gmail.com" else NULL
}
if (!suppliedElsewhere("wetLCC", sim)){
message("wetLCC not supplied. Loading water layer for the NWT...")
sim$wetLCC <- prepInputs(destinationPath = tempdir(), # Or another directory.
omitArgs = "destinationPath",
url = "https://drive.google.com/file/d/1YVTcIexNk-obATw2ahrgxA6uvIlr-6xm/view",
targetFile = "wetlandsNWT250m.tif",
rasterToMatch = sim[["rasterToMatch"]],
maskWithRTM = TRUE,
filename2 = NULL
)
}
return(invisible(sim))
}
### add additional events as needed by copy/pasting from above
|
88a9a149360c24f3791b5589c595ad782131343e
|
0c8ea2fb4d959cd1734cf0b9a17e0107250d696b
|
/Neural net with nnet.R
|
a8ab8d4d9483eb421af983b746409e57e8e8675b
|
[] |
no_license
|
aubhik-mazumdar/CS513-Project
|
d5601c3576d0e8472c0c1acfac04abec6bbc6586
|
c6f6cff6a60e671c60197ea48b33503991483adb
|
refs/heads/master
| 2020-04-03T23:47:19.167355
| 2018-12-04T15:35:30
| 2018-12-04T15:35:30
| 155,630,721
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,498
|
r
|
Neural net with nnet.R
|
Car_data <- read.csv("data.csv")
Car_data<- na.omit(Car_data)
Car_data <- Car_data[,-10]
c <- as.matrix(sapply(Car_data,as.numeric))
c1 <- scale(c)
index <- sample(nrow(c),as.integer(.75*nrow(c)))
Test_dataset<- c[-index,]
Training_dataset<- c[index, ]
library("neuralnet")
net_bc<- neuralnet(MSRP~Make+Model+Year+Engine.Fuel.Type+Engine.HP+Engine.Cylinders+Transmission.Type+Driven_Wheels+Number.of.Doors+
Vehicle.Style+highway.MPG+city.mpg+Popularity
,Training_dataset, hidden=5, threshold=0.1)
?sapply
plot(net_bc)
################################################################################################
library(nnet)
res <- nnet(MSRP ~ .,
data=Training_dataset,
size=10, linout=TRUE, skip=TRUE, MaxNWts=10000, trace=FALSE, maxit=100)
pred <- predict(res, newdata=Test_dataset[,-15])
pred <- round(pred)
# Mean error
mean_error <- sum(abs(pred-Test_dataset[,15]))/length(pred)
library(Metrics)
err <- rmse(Test_dataset[,15],pred)
err
mean_error
mean_percent <- mean_error/mean(Test_dataset[,15])
mean_percent
error <- as.data.frame(pred-test_set[,16])
################################################################################################
net_bc2<-compute(net_bc, Test_dataset[,c(-10,-15)])
net_bc2
ANN=as.numeric(net_bc2$net.result)
View(ANN)
table(ANN)
ANN_round<-round(ANN)
table(ANN_round)
ANN
table(Actual= Test_dataset[,15],ANN_round)
|
51020ed0110eb9d925641564f2887b59558f2196
|
e39cd762e483cb80774aec5c89a333e3ff4cfa36
|
/maitri_ref.R
|
833bb00e9a736b6e5022fefa8bd16f8bf7673bf2
|
[] |
no_license
|
CodeFire98/impCodes
|
eaad7171bd77f489822f8998b5bed35c7fa947ca
|
9740a3d53eedd71718e60d8b6f0e969f15afcdcb
|
refs/heads/master
| 2020-03-21T07:02:42.985096
| 2018-06-22T05:10:38
| 2018-06-22T05:10:38
| 138,257,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,947
|
r
|
maitri_ref.R
|
library(ggplot2)
library(reshape)
library(scales)
library(reshape2)
maitri = Final_File
#setwd("/root/R-project/data/polar data/Aws/IIG/Bharati")
#maitri<-read.csv("imd_maitri_2015.csv")
head(maitri)
maitri$obstime<-strptime(maitri$obstime,format="%d/%m/%Y %H:%M")
#maitri$obstime<-strptime(maitri$obstime,format="%m/%d/%Y %H:%M")
gg<-subset(maitri,obstime>='2012-01-01 00:00:00' & obstime<='2015-12-30 23:59:59')
gg<-gg[-c(5)]
# gg$obstime<-as.Date(gg$obstime,format="%Y-%m-%d")
# min<-min(gg$obstime)
# max<-max(gg$obstime)
gg<-gg[,c("obstime","tempr","ap","ws","rh")]
names(gg)<-c("obstime","Temperature","Air Pressure","Wind Speed","Relative Humidity")
dt<-"Full"
#names(gg)<-c("obstime","Temperature","Relative Humudity","Air Pressure","Wind Speed")
gg$obstime = as.POSIXct(gg$obstime)
gg <- melt(gg, "obstime")
jpeg(filename =paste("imd_bharati2",dt,".jpeg",sep = ""),width = 1200,height = 600)
ggplot(gg, aes(obstime, value, colour = variable,group==1)) + geom_line() +
#scale_x_date(breaks = date_breaks("year"),labels = date_format("%Y"))+
facet_wrap(~ variable, ncol = 1, scales = "free_y")+
ggtitle("Bharati-AWS data Full")+
theme(plot.title = element_text(family = "Trebuchet MS", color="red", face="bold", size=15, hjust=0)) +
theme(axis.title = element_text(family = "Trebuchet MS", color="red", face="bold", size=12))+
theme(plot.title = element_text(hjust = 0.5))+
theme(panel.background = element_rect(fill = "#f6f6f6",colour = "#f6f6f6",size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "lightblue"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "white"))+
theme(plot.background = element_rect(fill = "#fafafa"))+
theme(strip.text.x = element_text(colour = "red", size = 15,hjust = 0.5, vjust = 0.5))
#theme(axis.text.x = element_text(angle = 90, hjust = 1))
dev.off()
|
b5d9949e43c3977c30ea4abe918eaeb7c58f8bae
|
38261102ce03c8e67f96f72f2a87ac0378a3f607
|
/cytoscapeJsSimpleNetwork.R
|
6e47e1b96d0be6b8b8949738a37b8e8d44f597e7
|
[
"Apache-2.0"
] |
permissive
|
abhik1368/r-cytoscape.js
|
118c3e0d64b619263b7fabbed5db51ebb7851e0a
|
5e22fae5dc16bd0efb0eb5484a229bc0e003d2bc
|
refs/heads/master
| 2021-01-17T13:03:55.990784
| 2014-11-22T01:31:13
| 2014-11-22T01:31:13
| 30,137,434
| 0
| 1
| null | 2015-02-01T05:35:43
| 2015-02-01T05:35:43
| null |
UTF-8
|
R
| false
| false
| 9,326
|
r
|
cytoscapeJsSimpleNetwork.R
|
#' Generate a CytoscapeJS compatible network
#'
#' @param nodeData a data.frame with at least two columns: id and name
#' @param edgeData a data.frame with at least two columns: source and target
#' @param nodeColor a hex color for nodes (default: #666666)
#' @param nodeShape a shape for nodes (default: ellipse)
#' @param edgeColor a hex color for edges (default: #666666)
#' @param edgeSourceShape a shape for arrow sources (default: none)
#' @param edgeTargetShape a shape for arrow targets (default: triangle)
#'
#' @return a list with two entries:
#' nodes: a JSON string with node information compatible with CytoscapeJS
#' edges: a JSON string with edge information compatible with CytoscapeJS
#'
#' If no nodes exist, then NULL is returned
#'
#' @details See http://cytoscape.github.io/cytoscape.js/ for shape details
#'
#' @examples
#' id <- c("Jerry", "Elaine", "Kramer", "George")
#' name <- id
#' nodeData <- data.frame(id, name, stringsAsFactors=FALSE)
#'
#' source <- c("Jerry", "Jerry", "Jerry", "Elaine", "Elaine", "Kramer", "Kramer", "Kramer", "George")
#' target <- c("Elaine", "Kramer", "George", "Jerry", "Kramer", "Jerry", "Elaine", "George", "Jerry")
#' edgeData <- data.frame(source, target, stringsAsFactors=FALSE)
#'
#' network <- createCytoscapeNetwork(nodeData, edgeData)
createCytoscapeNetwork <- function(nodeData, edgeData,
nodeColor="#888888", nodeShape="ellipse",
edgeColor="#888888", edgeSourceShape="none",
edgeTargetShape="triangle", nodeHref="") {
# There must be nodes and nodeData must have at least id and name columns
if(nrow(nodeData) == 0 || !(all(c("id", "name") %in% names(nodeData)))) {
return(NULL)
}
# There must be edges and edgeData must have at least source and target columns
if(nrow(edgeData) == 0 || !(all(c("source", "target") %in% names(edgeData)))) {
return(NULL)
}
# NODES
## Add color/shape columns if not present
if(!("color" %in% colnames(nodeData))) {
nodeData$color <- rep(nodeColor, nrow(nodeData))
}
if(!("shape" %in% colnames(nodeData))) {
nodeData$shape <- rep(nodeShape, nrow(nodeData))
}
if(!("href" %in% colnames(nodeData))) {
nodeData$href <- rep(nodeHref, nrow(nodeData))
}
nodeEntries <- NULL
for(i in 1:nrow(nodeData)) {
tmpEntries <- NULL
for(col in colnames(nodeData)) {
tmp2 <- paste0(col, ":'", nodeData[i, col], "'")
tmpEntries <- c(tmpEntries, tmp2)
}
tmpEntries <- paste(tmpEntries, collapse=", ")
tmp <- paste0("{ data: { ", tmpEntries, "} }")
nodeEntries <- c(nodeEntries, tmp)
}
nodeEntries <- paste(nodeEntries, collapse=", ")
# EDGES
## Add color/shape columns if not present
if(!("color" %in% colnames(edgeData))) {
edgeData$color <- rep(edgeColor, nrow(edgeData))
}
if(!("sourceShape" %in% colnames(edgeData))) {
edgeData$edgeSourceShape <- rep(edgeSourceShape, nrow(edgeData))
}
if(!("targetShape" %in% colnames(edgeData))) {
edgeData$edgeTargetShape <- rep(edgeTargetShape, nrow(edgeData))
}
edgeEntries <- NULL
for(i in 1:nrow(edgeData)) {
tmpEntries <- NULL
for(col in colnames(edgeData)) {
tmp2 <- paste0(col, ":'", edgeData[i, col], "'")
tmpEntries <- c(tmpEntries, tmp2)
}
tmpEntries <- paste(tmpEntries, collapse=", ")
tmp <- paste0("{ data: { ", tmpEntries, "} }")
edgeEntries <- c(edgeEntries, tmp)
}
edgeEntries <- paste(edgeEntries, collapse=", ")
network <- list(nodes=nodeEntries, edges=edgeEntries)
return(network)
}
#' Generate an HTML string for a network visualized using CytoscapeJS
#'
#' @param nodeEntries a string with JSON for node information for CytoscapeJS
#' @param edgeEntries a stirng with JSON for edge information for CytoscapeJS
#' @param standAlone a boolean whether to produce a single page with embedded network;
#' set to FALSE for Shiny (default: FALSE)
#' @param layout a string describing the layout (default: cose)
#' @param height an integer height in pixels for network (default: 600)
#' @param width an integer width in pixels for network (default: 600)
#'
#' @details
#' Layouts: http://cytoscape.github.io/cytoscape.js/#layouts
#'
#' @return a string with a complete HTML file containing network
#'
#' @examples
#' id <- c("Jerry", "Elaine", "Kramer", "George")
#' name <- id
#' nodeData <- data.frame(id, name, stringsAsFactors=FALSE)
#'
#' source <- c("Jerry", "Jerry", "Jerry", "Elaine", "Elaine", "Kramer", "Kramer", "Kramer", "George")
#' target <- c("Elaine", "Kramer", "George", "Jerry", "Kramer", "Jerry", "Elaine", "George", "Jerry")
#' edgeData <- data.frame(source, target, stringsAsFactors=FALSE)
#'
#' network <- createCytoscapeNetwork(nodeData, edgeData)
#'
#' output <- cytoscapeJsSimpleNetwork(network$nodes, network$edges, standAlone=TRUE)
#' fileConn <- file("cytoscapeJsR_example.html")
#' writeLines(output, fileConn)
#' close(fileConn)
cytoscapeJsSimpleNetwork <- function(nodeEntries, edgeEntries,
standAlone=FALSE, layout="cola",
height=600, width=600, injectCode="") {
# Create webpage
PageHeader <- "
<!DOCTYPE html>
<html>
<head>
<meta name='description' content='[An example of getting started with Cytoscape.js]' />
<script src='http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js'></script>
<script src='http://cytoscape.github.io/cytoscape.js/api/cytoscape.js-latest/cytoscape.min.js'></script>
<script src='http://cytoscape.github.io/cytoscape.js/api/cytoscape.js-latest/arbor.js'></script>
<script src='http://cytoscape.github.io/cytoscape.js/api/cytoscape.js-latest/cola.v3.min.js'></script>
<script src='http://cytoscape.github.io/cytoscape.js/api/cytoscape.js-latest/springy.js'></script>
<script src='http://cytoscape.github.io/cytoscape.js/api/cytoscape.js-latest/dagre.js'></script>
<meta charset='utf-8' />
<title>Cytoscape.js in R Example</title>"
if(standAlone) {
NetworkCSS <- "<style>
#cy {
height: 100%;
width: 100%;
position: absolute;
left: 0;
top: 200;
border: 2px solid;
}
</style>"
} else {
NetworkCSS <- paste0("<style>
#cy {
height: ", height, "px;
width: ", width, "px;
position: relative;
left: 0;
top: 200;
border: 2px solid;
}</style>")
}
# Main script for creating the graph
MainScript <- paste0("
<script>
$(function(){ // on dom ready
$('#cy').cytoscape({
style: cytoscape.stylesheet()
.selector('node')
.css({
'content': 'data(name)',
'text-valign': 'center',
'color': 'white',
'text-outline-width': 2,
'shape': 'data(shape)',
'text-outline-color': 'data(color)',
'background-color': 'data(color)'
})
.selector('edge')
.css({
'line-color': 'data(color)',
'source-arrow-color': 'data(color)',
'target-arrow-color': 'data(color)',
'source-arrow-shape': 'data(edgeSourceShape)',
'target-arrow-shape': 'data(edgeTargetShape)'
})
.selector(':selected')
.css({
'background-color': 'black',
'line-color': 'black',
'target-arrow-color': 'black',
'source-arrow-color': 'black'
})
.selector('.faded')
.css({
'opacity': 0.25,
'text-opacity': 0
}),
elements: {
nodes: [",
nodeEntries,
"],
edges: [",
edgeEntries,
"]
},
layout: {
name: '", layout, "',
padding: 10
},
ready: function() {
window.cy = this;
//Injected options
",
injectCode
, "
cy.on('tap', 'node', function(){
if(this.data('href').length > 0) {
window.open(this.data('href'));
}
//console.log(this.data('href'));
});
}
});
}); // on dom ready
</script>")
PageBody <- "</head><body><div id='cy'></div>"
PageFooter <- "</body></html>"
if(standAlone) {
results <- paste0(PageHeader, NetworkCSS, MainScript, PageBody, PageFooter)
return(results)
} else {
results <- paste0(NetworkCSS, MainScript)
cat(results)
}
}
|
28e183dc36baf6a2ef47426a889289118c80a258
|
560bac9c5ab4c6d5bcc5266c07be3b574f3ede7d
|
/cachematrix.R
|
2ee8b48bfa03cf75a23ccedc67b8b3b96bb9bed4
|
[] |
no_license
|
Zeunouq/ProgrammingAssignment2
|
bacff9edc45adf0ef3ef3af9126bd65e7e48c9c9
|
cf936ce980f729ce7188791de49c02a9910e7562
|
refs/heads/master
| 2020-03-07T01:12:53.824862
| 2018-03-29T08:34:47
| 2018-03-29T08:34:47
| 127,177,813
| 0
| 0
| null | 2018-03-28T17:52:42
| 2018-03-28T17:52:41
| null |
UTF-8
|
R
| false
| false
| 1,534
|
r
|
cachematrix.R
|
## The function cachematrix.R is a set of two functions 1- makeCacheMatrix and 2- cacheSolve
## that allow to calculate the inverse of a matrix. If the contents of the matrix are not changing,
## the function will cache the value of the inverse so that when we need it again, it can
## be looked up in the cache rather than recomputed.
## The function makeCacheMatriw creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinvmatrix <- function(solve) m <<- solve
getinvmatrix <- function() m
list(set = set, get = get,
setinvmatrix = setinvmatrix,
getinvmatrix = getinvmatrix)
}
## The function cacheSolve computes the inverse of the special "matrix" returned by makeCacheMatrix above
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
## Otherwise, it calculates the inverse of the matrix
## and sets the value of the inverse in the cache via the setinvmatrix function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinvmatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinvmatrix(m)
m
}
|
d9b877cf6194a91d981a7d640a15f8e47abd1a1e
|
33bbdfdef10d809d4f0b4101fd89c6ad84980183
|
/R/methods_config.R
|
ba4ef0959a37e5afe84b17b81da00b07dc7c8522
|
[] |
no_license
|
Shelly-Lin-97/scRNAIdent
|
fdde83e50d9f1f898d0a533e93996fad6040516d
|
b5fb9090ca0a024dcee44f178d8168c733b31593
|
refs/heads/master
| 2023-01-13T17:58:15.310405
| 2020-11-17T17:02:56
| 2020-11-17T17:02:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 328
|
r
|
methods_config.R
|
methods.config.scmap <- list(nfeatures=500,threshold=0.5,seed=1)
methods.config.seurat <- list(nfeatures=2000,pc_dims=10,resolution=0.5)
methods.config.tscan <- list(cvcutoff=0.01,k=8)
methods.config.sc3 <- list(nfeatures=500,k=8)
methods.config.cellassign <- list(learning_rate=1e-2,shrinkage=TRUE,marker_gene_method='seurat')
|
037521473cc9d06024295aabf5415e86ca0c8367
|
4951e7c534f334c22d498bbc7035c5e93c5b928d
|
/statistics/misc/dist-overlap.R
|
b63476cd3cfa8e6be931211245966291d1986c1e
|
[] |
no_license
|
Derek-Jones/ESEUR-code-data
|
140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1
|
2f42f3fb6e46d273a3803db21e7e70eed2c8c09c
|
refs/heads/master
| 2023-04-04T21:32:13.160607
| 2023-03-20T19:19:51
| 2023-03-20T19:19:51
| 49,327,508
| 420
| 50
| null | null | null | null |
UTF-8
|
R
| false
| false
| 718
|
r
|
dist-overlap.R
|
#
# dist-overlap.R, 2 Dec 15
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
pal_col=diverge_hcl(2)
y1_sd=1
xpoints=seq(-0.96, 1+2*1.96, by=0.01)
y1_points=dnorm(xpoints, mean=0, sd=y1_sd)
y2_points=dnorm(xpoints, mean=2*y1_sd*1.96, sd=1)
plot(xpoints, y1_points, type="l", col=pal_col[2], bty="n",
xaxt="n", yaxt="n",
xlab="", ylab="",
xlim=range(xpoints), ylim=c(0, 0.5))
text(0, 0.2, "A", cex=2, col=pal_col[2])
lines(xpoints, y2_points, col=pal_col[1])
text(2*y1_sd*1.96, 0.2, "B", cex=2, col=pal_col[1])
upper_y=subset(y2_points, xpoints <= 1.96)
upper_x=subset(xpoints, xpoints <= 1.96)
polygon(c(upper_x, 1.96, 0), c(upper_y, 0, 0), col="red")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.