blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b70f41906a6dfbea830d90663deceae03012f7e
|
3ecd225072f2bfbb1ac74387391c23c9cf1add62
|
/man/treeSummarizedExperiment-class.Rd
|
dc7f7ae95ad8adcf0e76237e8cf187db58bc1528
|
[] |
no_license
|
jkanche/TreeSummarizedExperiment
|
7f4574ef9e9584fcad23be7f85475fd56b64d61d
|
566509a1fd43004c001022de70b2b06e593a0731
|
refs/heads/master
| 2020-05-03T03:30:26.951062
| 2019-03-28T14:01:42
| 2019-03-28T14:01:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,193
|
rd
|
treeSummarizedExperiment-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allClass.R
\docType{class}
\name{TreeSummarizedExperiment-class}
\alias{TreeSummarizedExperiment-class}
\title{An S4 class TreeSummarizedExperiment}
\description{
The class \strong{TreeSummarizedExperiment} is an extension class of standard
\code{\link[SummarizedExperiment]{SummarizedExperiment-class}} class. It has
five slots. Four of them are traditional slots from
\code{\link[SummarizedExperiment]{SummarizedExperiment-class}} class:
\code{assays}, \code{rowData} \code{colData} and \code{metadata}. The new
slot is \code{treeData} that is to store the hiearchical information of rows
(or columns or both) of \code{assays} tables.
}
\details{
The class \strong{TreeSummarizedExperiment} is designed to store
rectangular data for entities (e.g., microbes or cell types)
(\code{assays}), information about the hiearchical structure
(\code{treeData}), and the mapping information between the rows (or
columns, or both) of the rectangular data and the tree nodes
(\code{linkData}). Users could provide hiearchical information on the rows
or columns (or both) of the \code{assay} tables, and the \code{linkData}
will be automatically integrated as one part of the \code{rowData} or
\code{colData} or both, respectively. When the \code{linkData} is added to
\code{rowData} or \code{colData}, a class \code{LinkDataFrame} is used to
store data instead of \code{DataFrame}. Please see the page
\code{\link{LinkDataFrame}} for more details.
}
\section{Slots}{
\describe{
\item{\code{treeData}}{A list of phylo objects. It gives information about the
hiearchical structure of rows or columns of \code{assay} tables.}
\item{\code{...}}{Other slots from
\code{\link[SummarizedExperiment]{SummarizedExperiment-class}}}
}}
\section{Constructor}{
See \code{\link{TreeSummarizedExperiment-constructor}} for constructor
functions.
}
\section{Accessor}{
See \code{\link{TreeSummarizedExperiment-accessor}} for accessor functions.
}
\seealso{
\code{\link{TreeSummarizedExperiment}}
\code{\link{TreeSummarizedExperiment-accessor}}
\code{\link[SummarizedExperiment]{SummarizedExperiment-class}}
}
|
94496d61f85dabd4a3fd612477dfb7fbc9ded386
|
e4390e4480398bcb82206b705e68711e6a328297
|
/cachematrix.R
|
d17afbd1d21ddfa13d8a85ee59ac2f69e181a3ee
|
[] |
no_license
|
linwood81/ProgrammingAssignment2
|
cce8430f99ae4cc53cc2bddbde6109dab7b355a1
|
1c3d1762099c69eabd1b61d229df2919c81d4dd1
|
refs/heads/master
| 2021-01-21T05:40:22.711963
| 2016-07-26T04:19:14
| 2016-07-26T04:19:14
| 63,561,222
| 0
| 0
| null | 2016-07-18T01:33:33
| 2016-07-18T01:33:33
| null |
UTF-8
|
R
| false
| false
| 1,016
|
r
|
cachematrix.R
|
## These functions are used to store a matrix and to cache the value of its inverse.
## This function consists of a list of functions to set the value of a matrix, get
## the value of a matrix, set the value of a matrix inverse, and get the value of
## a matrix inverse.
makeCacheMatrix <- function(x = matrix()) {
m_inv <- NULL
set <- function(y) {
x <<- y
m_inv <<- NULL
}
get <- function() x
setinv <- function(inverse) m_inv <<- inverse
getinv <- function() m_inv
list(set = set, get =get, setinv = setinv, getinv = getinv)
}
## This function creates the inverse of the matrix created with the above function. If the
## inverse has already been created the function returns the cached value. Otherwise, it
## creates the inverse and sets the result in the cache.
cacheSolve <- function(x, ...) {
m_inv <- x$getinv()
if(!is.null(m_inv)) {
message("getting cached data")
return(m_inv)
}
matrix <- x$get()
m_inv <- solve(matrix, ...)
x$setinv(m_inv)
m_inv
}
|
267b8f90585b383178200aa0df9ffa6c6b3d2edc
|
7aaaf4a86c19bceba709168c413f62367c1fc630
|
/generate_synth_data.R
|
8231d2b7bec2d7556bbba1afa6ff00f15d868cdf
|
[] |
no_license
|
ChrisMattSam/once
|
69c2482b3231617a105e0ad38e68f6561eacc64a
|
81c5e63c758b4ad1544bba949239e7b6177c3e7b
|
refs/heads/master
| 2021-02-09T02:09:22.336269
| 2020-03-31T21:44:29
| 2020-03-31T21:44:29
| 244,226,999
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,551
|
r
|
generate_synth_data.R
|
'author: Christopher Matthew Sampah'
'date: 20190628'
rm(list = ls())
gc()
library(data.table)
library(plyr)
library(nnet)
library(ggplot2)
library(MASS)
library(lattice)
library(synthpop)
library(tictoc)
dir <- '//Pii_baboon/f/csampah/temp task'
set.seed(8)
tic('Read-in datasets to be synthesized and necessary functions:')
adm <- as.data.table(fread(paste0(dir,'/data_files/ADM1812.csv')))
rsm <- as.data.table( fread(paste0(dir,'/data_files/RSM1812.csv')))
source(paste0(dir,'/generation_fxns.R'))
toc()
temp <- format.data.for.synth(adm[sample(1000)])
tic('Format the data for synthesis')
adm.pre.syn <- format.data.for.synth(adm)
rsm.pre.syn <- format.data.for.synth(rsm)
toc()
#generate the synthetic data
load('//path/var_mtx.Rdata') # var.mtx specifies to the algorithm what other vars to use to make synthetic values for a var
syn.cols <- c('DIEUS_DT','PG_CD_num','PRI_DOD_OCC_CD','DTY_DOD_OCC_CD', 'SVC_CD')
tic(paste('Generate synthetic dataset for original dataset of',nrow(temp),'rows') )
temp.s <- syn(temp[,..syn.cols],predictor.matrix = var.mtx)
toc()
tic(paste('Generate synthetic dataset for original dataset of',nrow(adm.pre.syn),'rows') )
adm.s <- syn(adm.pre.syn[,..syn.cols],predictor.matrix = var.mtx)
toc()
tic(paste('Generate synthetic dataset for original dataset of',nrow(rsm.pre.syn),'rows') )
rsm.s <- syn(rsm.pre.syn[,..syn.cols],predictor.matrix = var.mtx)
toc()
#format and save synthetic data
temp.syn <- format.data.for.save(as.data.table(temp.s$syn), adm)
adm.syn <- format.data.for.save(as.data.table(adm.s$syn), adm)
rsm.syn <- format.data.for.save(as.data.table(rsm.s$syn), rsm)
#PRI/DTY_DOD_OCC_CD's generated synthetically MUST be among real possible values, not random large ints
#for(datum in c(adm.syn, rsm.syn))
all.codes <- unique(c(adm$PRI_DOD_OCC_CD, adm$DTY_DOD_OCC_CD,rsm$PRI_DOD_OCC_CD, rsm$DTY_DOD_OCC_CD))
stopifnot(temp.syn[!(PRI_DOD_OCC_CD %in% all.codes) | !(DTY_DOD_OCC_CD %in% all.codes),.N]==0)
stopifnot(adm.syn[!(PRI_DOD_OCC_CD %in% all.codes) | !(DTY_DOD_OCC_CD %in% all.codes),.N]==0)
stopifnot(rsm.syn[!(PRI_DOD_OCC_CD %in% all.codes) | !(DTY_DOD_OCC_CD %in% all.codes),.N]==0)
#save R objects for faster load in
save(adm.syn, file = paste0(dir,'/synthetic_data/r_data_objects/adm_synthetic.RData'))
save(rsm.syn, file = paste0(dir,'/synthetic_data/r_data_objects/rsm_synthetic.RData'))
save(adm.s, file = paste0(dir,'/synthetic_data/r_data_objects/adm_synthetic_object.RData'))
save(rsm.s, file = paste0(dir,'/synthetic_data/r_data_objects/rsm_synthetic_object.RData'))
|
d762cfdeaab1f222775a29381591de906b1ab9d8
|
4f7d405a0f58a965f5d3e7c03429020d377805c9
|
/R/getLocationsInRange.R
|
06cbf3037beac58f76942a2a07d8bfd1c57e88be
|
[] |
no_license
|
AFIT-R/instaExtract
|
7fa8e8c3441f5da5ce8d327ede36b3fb56d49188
|
360c22eecc78693dccde44846ccad6b72b16637d
|
refs/heads/master
| 2021-09-09T11:10:48.115188
| 2018-03-15T13:11:32
| 2018-03-15T13:11:32
| 112,007,664
| 0
| 3
| null | 2018-03-15T13:04:40
| 2017-11-25T13:59:16
|
HTML
|
UTF-8
|
R
| false
| false
| 1,715
|
r
|
getLocationsInRange.R
|
#'@title Get Current Top Media By Location ID
#'
#'@description Gets the top 9 media posts for a location with the given location ID.
#'
#'@param mapping A location mapping in the form generated by createLocationMapping
#'@param r Radius of search from give coordinates
#'@param lat Double for latitude of search center
#'@param long Double for longitude of search center
#'@param ... Additional options passed to a shinyAppDir
#'@import dplyr
#'
#'@return n x 9 DF where n is the number of locations in the scope: \cr
#'id, name, slug, city_ID, city_Name, city_slug,country_ID, country_Name, country_Slug
#'
#'
#'@examples
#'\dontrun{
#'mapping <- createLocationMapping("United States", "New York", TRUE)
#'
#'lat <- 40.7484
#'long <- 73.9857
#'radius <- 0.5 #miles
#'
#'smaller_mapping <- getLocationsInRange(mapping, radius, lat, long)}
#'@export
# Get Locations In Range
#
# returns the locations in Location Mapping
#
#INPUTS:
#mapping - A location mapping in the form generated by createLocationMapping
#r - Radius of search from give coordinates
#lat - Double for latitude of search center
#long - Double for longitude of search center
#
#OUTPUTS:
#
# n x 9 DF where n is the number of locations in the scope:
# id, name, slug, city_ID, city_Name, city_slug,country_ID, country_Name, country_Slug
getLocationsInRange <- function(mapping, r, lat, long, ...){
if(!is.numeric(r) || !is.numeric(lat) || !is.numeric(long)){
stop("r, lat, and long, must be numeric")
}
#stop the binding note
latitude <- dplyr::quo(latitude)
longitude <- dplyr::quo(longitude)
mapping <- filter(mapping, haversineDistance(latitude, longitude, lat, long) <= r)
return(mapping)
}
|
f4a00ee79983b144623d28cd303a2fffb20efce9
|
a623ec4bece7f0a3de574b22cd1a78cea185754d
|
/coRps_ggplot_week_3.R
|
ace3324ea39e39079cea5cd59df97468721abbf5
|
[] |
no_license
|
cadelson/coRps
|
53198a87513e9dbc4f88f2517f0f29edba120101
|
207eb9011c0d702a53cca753a84a00c4d0aa940b
|
refs/heads/master
| 2021-03-26T20:14:54.829418
| 2020-09-18T12:47:45
| 2020-09-18T12:47:45
| 247,746,072
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,088
|
r
|
coRps_ggplot_week_3.R
|
---
title: "ggplot Exercises | 2020-04-13"
output: html_document
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
## Overivew
#Below are three questions to work on to test our ggplot skills.
## Setup
```{r}
#Dependencies
library(tidyverse)
library(scales)
#import data
df_trends <- read_csv("https://raw.githubusercontent.com/USAID-OHA-SI/coRps/master/2020-04-13/FY18-20_Jupiter_txtrends.csv")
df_linkage <- read_csv("https://raw.githubusercontent.com/USAID-OHA-SI/coRps/master/2020-04-13/FY19_Saturn_linkage.csv")
df_achievement <- read_csv("https://raw.githubusercontent.com/USAID-OHA-SI/coRps/master/2020-04-13/FY20_Neptune_txnew_achievement.csv")
```
## Linkage
#The USAID/Saturn team is interested in knowing where partners are seeing less than ideal linkage to treatment. Using `df_linkage`, help them create a scatter plot that looks at testing and treatment, flagging where they're far from a 1:1 relationship. Consider using different aesthetics to convey a message to your audience.
glimpse(df_linkage)
ggplot(df_linkage)+
geom_point(aes(HTS_TST_POS, TX_NEW, color=linkage))+
geom_abline(intercept=0, slope=1)
facet_wrap(.~primepartner)
## Treatment Trends
#A number of questions have come up recently about treatment trends in Jupiter. You will want to use `df_trends` to review trends in TX_CURR, TX_NEW, and TX_NET_NEW from FY18 to the present visualize through a line chart. Consider using different aesthetics to convey a message to your audience.
glimpse(df_trends)
ggplot(df_trends)+
geom_line(aes(period, indicator))+
facet_wrap(.~indicator, nrow=3)
```
## Target Achievement
#In preparation for the FY20Q1 POART, you want to assess different partners' achievement for TX_NEW by sex in Neptune. You will want to show both the real results/targets as well as the percent achievement. Consider using different aesthetics to convey a message to your audience.
glimpse(df_achievement)
ggplot(df_achievement)+
geom_bar(aes(indicator), stat_count=cumulative)
|
09c4cc5c968ac29590ebd66646b22fd1f9735018
|
2e82eecaecb360e88e883d7841ba53bf856f1e43
|
/Workout03/binomial/man/bin_cumulative.Rd
|
753a754bc8bd0664a9606b7aece85a77f5884c9c
|
[] |
no_license
|
stat133-sp19/hw-stat133-devnavani
|
055c02c63ae361bb31a68f97d5f8db4aab95d3c5
|
a3143c9141220c58093bdbf5bbe33afe6560b961
|
refs/heads/master
| 2020-04-28T14:48:33.757350
| 2019-05-04T00:56:41
| 2019-05-04T00:56:41
| 175,349,885
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 602
|
rd
|
bin_cumulative.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin.R
\name{bin_cumulative}
\alias{bin_cumulative}
\title{binominal cumulative}
\usage{
bin_cumulative(trials, prob)
}
\arguments{
\item{trials}{how many trials}
\item{prob}{probability of a success in any given trial}
}
\value{
data frame with the probability distribution: sucesses in the first column, probability in the second, and cumulative in the third
}
\description{
gives data frame with the corresponding probability distribution with cumulative stats
}
\examples{
bin_cumulative(5, 0.5)
}
|
f5f9f7a1efe49fa19eb5449d79022a1582e39e29
|
b19d7f066a66952895c841155bcbe48ecdb5b3dc
|
/man/count_pct.Rd
|
da1885e2e5cfcb9868d441f08eba34620cac6683
|
[] |
no_license
|
cran/explore
|
25628e71e0c7776ae6ce9c3819adc54e7bed7143
|
81bc7ac49636fd32db44981a030726b13ec96b71
|
refs/heads/master
| 2023-01-22T10:53:46.173589
| 2023-01-14T10:10:02
| 2023-01-14T10:10:02
| 236,595,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 434
|
rd
|
count_pct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tools.R
\name{count_pct}
\alias{count_pct}
\title{Adds percentage to dplyr::count()}
\usage{
count_pct(data, ...)
}
\arguments{
\item{data}{A dataset}
\item{...}{Other parameters passed to count()}
}
\value{
Dataset
}
\description{
Adds variables total and
pct (percentage) to dplyr::count()
}
\examples{
count_pct(iris, Species)
}
|
067181d5be1d53f6e42c32e2828b1e86bc7fd011
|
fbb3e87f431c24fd0a503c974642107a16239d27
|
/run_analysis.R
|
f54db02dcc29da5c1c10b93f941cb9b77aad977a
|
[] |
no_license
|
asrulnb/Getting-and-Cleaning-Data-Assignment
|
e8436070d1add8a0a6a12c7d0d74d81de63faf9e
|
d48ac5776130b1cd69588d9729f67403c1887547
|
refs/heads/master
| 2021-01-10T19:51:04.458940
| 2015-07-26T17:11:14
| 2015-07-26T17:11:14
| 39,679,314
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,899
|
r
|
run_analysis.R
|
###[ Initializing Library ]
rm(list = ls())
library(dplyr)
library(data.table)
###[ Set working Directory to where the R source file is ]
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
# < Part 1 : Merges the training and the test sets to create one data set >
###[ Reading Training Data ]
subject.train <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE)
label.train <- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE)
data.train <- read.table("UCI HAR Dataset/train/x_train.txt", header = FALSE)
###[ Reading Test Data]
subject.test <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE)
label.test <- read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE)
data.test <- read.table("UCI HAR Dataset/test/x_test.txt", header = FALSE)
###[ Merging Data Table ]
subject <- rbind(subject.test,subject.train)
label <- rbind(label.test,label.train)
data <- rbind(data.test,data.train)
###[ remove temporary data ]
rm(subject.test)
rm(subject.train)
rm(label.test)
rm(label.train)
rm(data.test)
rm(data.train)
###[ Reading Features List from File ]
featureList <- read.table("UCI HAR Dataset/features.txt")
###[ Naming the Columns ]
colnames(subject) <- "Subject"
colnames(label) <- "Activity"
#### Saves the original column name
originalNames <- featureList[2]
#### Makes sure the names we going to use are valid
valid_names <- make.names(names = t(featureList[2]),unique = TRUE,allow_ = TRUE)
#### Assign the valid names
colnames(data) <- valid_names
###[ Combining everything into one Main DataSet ]
dsMain <- cbind(subject,label,data)
###[ remove temporary data ]
rm(subject)
rm(label)
rm(data)
rm(featureList)
#rm(valid_names)
# </Part 1 >
# < Part 2 : Extracts only the measurements on the mean and standard deviation for each measurement. >
dsMain <- select(dsMain,Subject,Activity,contains("mean"),contains("std"))
# </Part 2 >
# < Part 3 : Uses descriptive activity names to name the activities in the data set >
###[ Read the activity labels from file ]
ActivityLabels <- read.table("UCI HAR Dataset/activity_labels.txt",header = FALSE)
###[ Change the Activity ID with Activity Label ]
dsMain <- mutate(dsMain,Activity = ActivityLabels[Activity,2])
#### Remove variable that is no longer useful
rm(ActivityLabels)
# </Part 3 >
# < Part 4 : Appropriately labels the data set with descriptive variable names >
changedNames <- names(dsMain)
names(dsMain)<-gsub("Acc", "Accelerometer", names(dsMain))
names(dsMain)<-gsub("meanFreq", "MeanFrequency", names(dsMain))
names(dsMain)<-gsub("^t", "Time", names(dsMain))
names(dsMain)<-gsub("^f", "Frequency", names(dsMain))
names(dsMain)<-gsub("angle", "Angel", names(dsMain), fixed = TRUE)
names(dsMain)<-gsub("tBody", "TimeBody", names(dsMain))
names(dsMain)<-gsub("mean", "Mean", names(dsMain))
names(dsMain)<-gsub("std", "Std", names(dsMain))
names(dsMain)<-gsub("Mag", "Magnitude", names(dsMain))
names(dsMain)<-gsub("Gyro", "Gyroscope", names(dsMain))
names(dsMain)<-gsub("gravity", "Gravity", names(dsMain))
names(dsMain)<-gsub("BodyBody", "Body", names(dsMain))
names(dsMain)<-gsub("...", ".", names(dsMain), fixed = TRUE)
names(dsMain)<-gsub(".", "", names(dsMain), fixed = TRUE)
finalNames <- names(dsMain)
# </Part 4 >
# < Part 5 : creates a second, independent tidy data set with the average of each variable for each activity and each subject >
#### Change the "Subject" column to Factor type
dsMain$Subject <- as.factor(dsMain$Subject)
#### Change dsMain to Data Table type
dsMain <- data.table(dsMain)
#### Calculate the Mean for all column except "Subject" and "Activity"
tidyData <- aggregate(. ~Subject + Activity, dsMain, mean)
#### Order the Data
tidyData <- tidyData[order(tidyData$Subject,tidyData$Activity),]
#### Write the output file
write.table(tidyData, file = "tidy_data.txt", row.names = FALSE)
# </Part 5 >
|
d86c88f2ce4f15b9b4b91d151a348a87a947dd17
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/PAWL/R/tuningclass.R
|
021a801fe56fe026046593c4577dfc1fc1e910bf
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,187
|
r
|
tuningclass.R
|
###################################################
# This file is part of RPAWL.
#
# RPAWL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RPAWL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RPAWL. If not, see <http://www.gnu.org/licenses/>.
###################################################
setClass("tuningparameters",
representation(nchains = "numeric", niterations = "numeric",
computemean = "logical", computemeanburnin = "numeric",
saveeverynth = "numeric"))
setGeneric("tuningparameters", function(...)standardGeneric("tuningparameters"))
tuningparameters.constructor <- function(..., nchains, niterations, storeall,
computemean, computemeanburnin, saveeverynth){
if (missing(nchains))
nchains <- 1
if (missing(niterations))
stop(sQuote("niterations"), "has to be specified")
if (missing(saveeverynth)){
if (missing(storeall)){
#cat("storeall unspecified: set to FALSE\n")
storeall <- FALSE
saveeverynth <- -1
} else {
if (storeall){
saveeverynth <- 1
} else {
saveeverynth <- -1
}
}
}
if (missing(computemean)){
computemean <- FALSE
#cat("computemean unspecified: set to FALSE\n")
}
if (missing(computemeanburnin)){
computemeanburnin <- 0
}
new("tuningparameters",
nchains = nchains, niterations = niterations,
computemean = computemean, computemeanburnin = computemeanburnin,
saveeverynth = saveeverynth)
}
setMethod("tuningparameters",
definition = function(..., nchains, niterations, storeall, computemean,
computemeanburnin, saveeverynth){
tuningparameters.constructor(
nchains = nchains, niterations = niterations,
storeall = storeall, computemean = computemean,
computemeanburnin = computemeanburnin,
saveeverynth = saveeverynth)
})
setMethod(f = "show", signature = "tuningparameters",
def = function(object){
cat("Object of class ", class(object), ".\n", sep = "")
cat("*number of parallel chains:", object@nchains, "\n")
cat("*number of iterations:", object@niterations, "\n")
cat("*compute mean:", object@computemean, "\n")
cat("*compute mean (burnin):", object@computemeanburnin, "\n")
cat("*save every nth iteration:", object@saveeverynth, "\n")
})
|
19f5a7a4f973e540157c0c7651be7ecb3d6eeb6f
|
b67bef2e6295b68a6ba404e78505258a1ac2f95f
|
/man/kr.Rd
|
b0616f809a3220ff58f1e7333ce7a3dc63052aa8
|
[] |
no_license
|
cran/MGLM
|
beda91fe76a43884434647620d2bf4aebedc1a59
|
e0b8d5d6dec9b3b0dcc74514b0b68438276513d4
|
refs/heads/master
| 2022-05-01T07:22:15.450258
| 2022-04-13T22:32:32
| 2022-04-13T22:32:32
| 17,680,602
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,315
|
rd
|
kr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kr.R
\name{kr}
\alias{kr}
\title{Khatri-Rao product of two matrices}
\usage{
kr(A, B, w, byrow = TRUE)
}
\arguments{
\item{A, B}{matrices. The two matrices \code{A} and \code{B} should have the same number of columns.
We also give the user an option to do row-wise Kronecker product, to avoid transpose.
When doing row-wise Kronecker product, the number of rows of A and B should be the same.}
\item{w}{the weights vector. The length of the vector should match with the dimension of the matrices.
If performing column-wise Kronecker product, the length of w should be the same as the column number of A and B.
If performing row-wise Kronecker prodoct, the length of w should be the same as the row number of A and B.
The default is a vector of 1 if no value provided.}
\item{byrow}{a logical variable controlling whether to perform row/column-wise Kronecker product.
The default is \code{byrow}=TRUE.}
}
\value{
A matrix of the Khatri-Rao product.
}
\description{
Return the Khatri-Rao product of two matrices, which is a column-wise Kronecker product.
}
\details{
The column/row-wise Kronecker product.
}
\examples{
X <- matrix(rnorm(30), 10, 3)
Y <- matrix(runif(50), 10, 5)
C <- kr(X, Y)
}
\author{
Yiwen Zhang and Hua Zhou
}
|
4344bcd6078aa5746b7e203fa5eba75f935b5e72
|
914c515a6c85356ee148950f5d93be59ee112b4c
|
/R/regression_ladlasso.R
|
10d3ceddd1ccf171443f8899788b1d165789055a
|
[] |
no_license
|
Mufabo/Rrobustsp
|
917fb8e9a353ffc7a838544fa5cd1d39eae34f6c
|
1700f4fed27e63cec6dfb67a14a208d8f46e2418
|
refs/heads/master
| 2022-06-14T22:15:50.045970
| 2022-06-05T12:13:32
| 2022-06-05T12:13:32
| 222,286,547
| 0
| 3
| null | 2019-12-19T14:17:46
| 2019-11-17T17:36:00
|
R
|
UTF-8
|
R
| false
| false
| 3,706
|
r
|
regression_ladlasso.R
|
#' ladlasso
#'
#' ladlasso computes the LAD-Lasso regression estimates for given complex-
#' or real-valued data. If number of predictors, p, is larger than one,
#' then IRWLS algorithm is used, otherwise a weighted median algorithm
#' (N > 200) or elemental fits (N<200).
#'
#' @param y numeric response N x 1 vector (real/complex)
#' @param X sparse matrix, numeric feature N x p matrix (real/complex)
#' @param lambda numeric, non-negative penalty parameter
#' @param intcpt numeric optional initial start of the regression vector for
#' IRWLS algorithm. If not given, we use LSE (when p>1).
#' @param b0 numeric optional initial start of the regression vector for
#' IRWLS algorithm. If not given, we use LSE (when p>1).
#' @param reltol
#' Convergence threshold for IRWLS. Terminate when successive
#' estimates differ in L2 norm by a rel. amount less than reltol.
#' @param printitn print iteration number (default = 0, no printing)
#' @param iter_max number of iterations \cr
#' default = 2000
#'
#' @return b1: numeric, the regression coefficient vector of size N
#' @return iter: integer, number of iterations
#'
#' @examples
#'
#' ladlasso(rnorm(8), matrix(rnorm(8*3)*5+1, 8, 3), 0.5)
#'
#' @note
#'
#' File location: regression_ladlasso.R
#' @export
ladlasso <- function(y, X, lambda, intcpt = T, b0 = NULL, reltol = 1e-8, printitn = 0, iter_max = 2000){
N <- nrow(X)
p <- if(is.null(ncol(X))) 1 else ncol(X)
# make matrix sparse
# X <- sparseMatrix(i = row(X)[row(X) != 0], j = col(X)[col(X) != 0], x=c(X))
if(intcpt) X <- cbind(matrix(1, N, 1), X)
if(is.null(b0)) b0 <- qr.solve(X, y) # ginv(X) %*% y
iter <- NULL
if(printitn > 0) sprintf('Computing the solution for lambda = %.3f\n',lambda)
# The case of only one predictor
if(p == 1){
if(!intcpt){
b1 <- wmed( rbind(y / X, 0), rbind(abs(X), lambda))
return( list('b1' = c(b1), 'iter' = iter))}
if(!is.complex(y) && N < 200 && intcpt){
if(lambda == 0){
b <- elemfits(X[,2], y) # b is a matrix
b <- b[[1]]
}else{
b <- elemfits(c(X[,2], 0), c(y, lambda))
b <- b[[1]]
}}
res <- colSums(abs(repmat(y, ncol(b)) - X %*% b))
indx <- which.min(res)
b1 <- b[,indx]
}
else {
# use IRWLS always when p > 1
if(printitn > 0) print('Starting the IRWLS algorithm..\n')
if(lambda > 0){
y <- c(y, rep(0, p))
# slow
if(intcpt) X <- rbind(X, cbind(rep(0, p), diag(lambda, p, p))) else X <- rbind(X, diag(lambda, p, p))
}
if(class(X)[1] == "matrix"){
sweep2 <- function(x, y){sweep(x, MARGIN = 1, y, FUN = '/')}
solve2 <- function(Xstar, X, y){solve(t(Xstar) %*% X, (t(Xstar) %*% y))}
}
else{
sweep2 <- function(x, y){sweep_sparse(x, margin = 1, y, fun = '/')}
solve2 <- function(Xstar, X, y){solve(Matrix::t(Xstar) %*% X, (Matrix::t(Xstar) %*% y)@x)}
}
for(iter in 1:iter_max){
resid <- abs(y - X %*% b0)
resid[resid < 1e-6] <- 1e-6
# make if else for dense matrices
Xstar <- sweep2(X, resid)
b1 <- solve2(Xstar, X, y)
crit <- norm(b1-b0, type = "2") / norm(b0, type = "2")
if(printitn > 0 & iter %% printitn) sprintf('ladlasso: crit(%4d) = %.9f\n',iter,crit)
if(crit < reltol && iter > 10) break
b0 <- b1
}}
return( list('b1' = c(b1), 'iter' = iter))
}
# fRcpp ----
# library(Rcpp)
#
# cppFunction(depends='RcppArmadillo', code='
# arma::mat fRcpp (arma::mat Xstar, arma::mat X, arma::mat y) {
# arma::mat betahat ;
# betahat = (Xstar.t() * X ).i() * (Xstar.t() * y) ;
# return(betahat) ;
# }
# ')
|
90cfac2d9126d1168684a1d25463829ed3cc468b
|
af2f37fac72adaa0ae72ebe5789cddc1161d5951
|
/man/condrmaxlin.Rd
|
ba238d50fbd810a684a5e7c17dcf5855e33da04d
|
[] |
no_license
|
cran/SpatialExtremes
|
80d254ea37ac153710b0576e6d35e73a29ba93ab
|
9df4b8e6d8eb7042621d46fba6125dbf4f35787b
|
refs/heads/master
| 2022-05-04T17:37:03.474880
| 2022-04-19T09:22:42
| 2022-04-19T09:22:42
| 17,693,743
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,997
|
rd
|
condrmaxlin.Rd
|
\name{condrmaxlin}
\alias{condrmaxlin}
\title{
Conditional simulation of max-linear random fields
}
\description{
This function generates (approximate) conditional simulation of unit
Frechet max-linear random fields. It can be used to get approximate
conditional simulation for max-stable processes.
}
\usage{
condrmaxlin(n, coord, data.coord, data, cov.mod = "gauss", ..., grid =
FALSE, p = 10000)
}
\arguments{
\item{n}{Integer. The number of conditional simulations.}
\item{coord}{A numeric vector or matrix specifying the coordinates
where the process has to be generated. If \code{coord} is a matrix,
each row specifies one locations.}
\item{data.coord}{A numeric vector or matrix specifying the
coordinates where the process is conditioned.}
\item{data}{A numeric vector giving the conditioning observations.}
\item{cov.mod}{A character string specifying the max-stable model. See
section Details.}
\item{\dots}{The parameters of the max-stable model. See section
Details.}
\item{grid}{Logical. Does \code{coord} defines a grid?}
\item{p}{An integer. The number of unit Frechet random variables used
in the max-linear approximation.}
}
\details{
Any unit Frechet max-stable processes \eqn{\{Z(x)\}}{Z(x)} can be
approximated by a unit Frechet max-linear process, i.e.,
\deqn{Z(x) \approx \max_{j=1, \ldots, p} f_j(x) Z_j,}{Z(x) ~ max_{j=1,
\dots, p} f_j(x) Z_j,}
where \eqn{f_j}{f_j} are non-negative deterministic functions,
\eqn{p}{p} is a sufficiently large integer and \eqn{Z_j}{Z_j} are
independent unit Frechet random variables. Note that to ensure unit
Frechet margins, the following condition has to be satisfied
\deqn{\sum_{j=1, \ldots, p} f_j(x) = 1,}{\sum_{j=1, \dots, p} f_j(x) =
1,}
for all \eqn{x}{x}.
Currently only the discretized Smith model is implemented for which
\eqn{f_j(x) = c(p) \varphi(x - u_j ; \Sigma)}{f_j(x) = c(p) \phi(x -
u_j ; \Sigma)} where \eqn{\varphi(\cdot; \Sigma)}{\phi( . ; \Sigma)}
is the zero mean (multivariate) normal density with covariance matrix
\eqn{\Sigma}{\Sigma}, \eqn{u_j}{u_j} is a sequence of deterministic
points appropriately chosen and \eqn{c(p)}{c(p)} is a constant
ensuring unit Frechet margins.
}
\value{
A matrix containing observations from the required max-stable
model. Each column represents one stations. If \code{grid = TRUE}, the
function returns an array of dimension nrow(coord) x nrow(coord) x n.
}
\references{
Wang, Y. and Stoev, S. A. (2011) Conditional Sampling for Max-Stable
Random Fields. \emph{Advances in Applied Probability}.
}
\author{
Mathieu Ribatet
}
\section{Warnings}{
It may happen that some conditional observations are not honored
because the approximation of a max-stable process by a max-linear one
isn't accurate enough! Sometimes taking a larger \code{p} solves the
issue.
}
\seealso{
\code{\link{rmaxstab}}, \code{\link{condrmaxlin}}
}
\examples{
## One dimensional conditional simulations
n.cond.site <- 10
cond.coord <- runif(n.cond.site, -10, 10)
data <- rmaxlin(1, cond.coord, var = 3, p = 10000)
x <- seq(-10, 10, length = 250)
cond.sim <- condrmaxlin(5, x, cond.coord, data, var = 3)
matplot(x, t(log(cond.sim)), type = "l", lty = 1, pch = 1)
points(cond.coord, log(data))
## Two dimensional conditional simulation
cond.coord <- matrix(runif(2 * n.cond.site, -10, 10), ncol = 2)
data <- rmaxstab(1, cond.coord, "gauss", cov11 = 4, cov12 = 0, cov22 = 4)
x <- y <- seq(-10, 10, length = 75)
cond.sim <- condrmaxlin(4, cbind(x, y), cond.coord, data, cov11 = 4,
cov12 = 0, cov22 = 4, grid = TRUE, p = 2000)
## Note p is set to 2000 for CPU reasons but is likely to be too small
op <- par(mfrow = c(2, 2), mar = rep(1, 4))
for (i in 1:4){
image(x, y, log(cond.sim[,,i]), col = heat.colors(64), xaxt = "n", yaxt
= "n", bty = "n")
contour(x, y, log(cond.sim[,,i]), add = TRUE)
text(cond.coord[,1], cond.coord[,2], round(log(data), 2), col = 3)
}
par(op)
}
\keyword{distribution}
|
cd278c9b9d43bf1cab091795337952a7b47495a9
|
b9462761c08a291964c512d73d059186d196010d
|
/Week1 Assignment.R
|
a6a21d971719ffb09df84748bdcf096dfdaabede
|
[] |
no_license
|
blin261/Notebooks
|
7401b0ba71f44f3537db610b30e2f3bf99df3a20
|
9be5405c2ede1bb033189c08cb2ffe3cd65a2ed3
|
refs/heads/master
| 2020-05-22T03:59:18.554572
| 2016-12-17T03:00:06
| 2016-12-17T03:00:06
| 63,370,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 658
|
r
|
Week1 Assignment.R
|
#Question1
result<-1
for (i in 1:12)
{
result=result*i
}
print (result)
#Question2
nvect<-(20:50)
nvect<-nvect[nvect%%5==0]
nvect
#Question3
quadraticEquation<-function(a,b,c)
{
if(b^2-4*a*c<0)
{
x1<-complex (real=-1*b/(2*a), imaginary=sqrt(abs(b^2-(4*a*c)))/(2*a))
x2<-complex (real=-1*b/(2*a), imaginary=-1*sqrt(abs(b^2-(4*a*c)))/(2*a))
print(c(x1,x2))
}
else
{
x1<-(-1*b+sqrt((b^2-(4*a*c))))/(2*a)
x2<-(-1*b-sqrt((b^2-(4*a*c))))/(2*a)
print(c(x1, x2))
}
}
quadraticEquation(1,3,2)
quadraticEquation(1,0,-2)
quadraticEquation(2,3,4)
quadraticEquation(1,-6,25)
|
3b2c9fd5eede60820cf7d39ef31bfa0652959f7f
|
ea1191378907a7857c86b8628a5720074ae8bd7e
|
/UVAapp_v0.2/ui/ui_intro.R
|
b0b3ed99e58b19a79d71d8dd96c01417ac3b6fe0
|
[] |
no_license
|
simongonzalez/uva
|
cc10e0badaa97f6bf75aabd9045d8ea16ecbeb11
|
5458c0935ce0ed5830a7e6305735a4d85ff95e76
|
refs/heads/master
| 2022-12-03T04:10:16.986146
| 2022-11-23T00:44:20
| 2022-11-23T00:44:20
| 211,875,233
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,339
|
r
|
ui_intro.R
|
tabPanel("UVA",
h2(style="text-align:center", 'Welcome'),
tags$hr(),
HTML('<center><img src="uva.png" height = 300 width="600"></center>'),
div(style="text-align:center",
h1('Ultrasound Visualization & Analysis'),
h4('Visualise and analyse tongue contours for speech research'),
h5('Note: This is a Demo Version for showcasing purposes. Full version forthcoming.
The speed of data loading and plotting depends on the internet connection speed.'),
tags$hr(),
HTML('<li>'),
a('Author: Simon Gonzalez (PhD in Linguistics)', href="https://au.linkedin.com/in/gonzalezsimon", target="_blank"),
HTML('<li>'),
a('Email: simon.gonzalez@anu.edu.au',
href="https://app.griffith.edu.au/phonebook/phone-details.php?type=B&id=1671161",
target="_blank"),
HTML('<li>'),
a('Online CV', href="https://www.visualcv.com/simongonzalez", target="_blank"),
HTML('<li>'),
a('Thesis: Place oppositions in English coronal obstruents: an ultrasound study
- (University of Newcastle, Australia)',
href="http://hdl.handle.net/1959.13/1310302", target="_blank")
)
)
|
06c0b7e0caccba448219bb292fe56197883d517a
|
98c40fe72bfe9caafc3db5ca0a2c0944cad33988
|
/tests/testthat/test-encrypt.R
|
fc0971f3438536b0bc672133407b817a6afafa6d
|
[] |
no_license
|
datawookie/emayili
|
c91d38dc5bf0fc38cff45260dc0ba99bce7e754f
|
cb0f2f7e6c8738a30ddd88834abd88b63585244c
|
refs/heads/master
| 2023-09-01T00:02:36.105432
| 2023-08-30T10:49:01
| 2023-08-30T10:49:01
| 187,310,940
| 158
| 41
| null | 2023-08-03T06:52:34
| 2019-05-18T03:45:55
|
R
|
UTF-8
|
R
| false
| false
| 4,631
|
r
|
test-encrypt.R
|
skip_if_not_installed("gpg")
library(gpg)
# - If running locally then use random home directory for keyring.
# - Don't do this on CI because GPG doesn't work on macOS with non-default
# home folder.
#
if (Sys.getenv("CI") == "") {
message("Use random home folder for GPG.")
gpg_restart(home = tempdir(), silent = TRUE)
}
#
suppressWarnings({
gpg_keygen(name = "Alice", email = "alice@yahoo.com")
gpg_keygen(name = "Bob", email = "bob@gmail.com")
gpg_keygen(name = "Jim", email = "jim@aol.com")
gpg_keygen(name = "Jim", email = "jim@aol.com")
})
#
# The keys should all be RSA.
#
stopifnot(all(gpg_list_keys() %>% pull(algo) %in% c("RSA", "EdDSA")))
BEGIN_PGP_MESSAGE <- "-----BEGIN PGP MESSAGE-----"
END_PGP_MESSAGE <- "-----END PGP MESSAGE-----"
BEGIN_PGP_SIGNATURE <- "-----BEGIN PGP SIGNATURE-----"
END_PGP_SIGNATURE <- "-----END PGP SIGNATURE-----"
BEGIN_PGP_PUBLIC_KEY_BLOCK <- "-----BEGIN PGP PUBLIC KEY BLOCK-----"
END_PGP_PUBLIC_KEY_BLOCK <- "-----END PGP PUBLIC KEY BLOCK-----"
test_that("sign/encrypt empty message", {
msg <- envelope(
to = "alice@yahoo.com",
from = "bob@gmail.com",
encrypt = TRUE,
sign = TRUE
)
expect_error(as.character(msg))
})
test_that("sign", {
msg <- envelope(
to = "alice@yahoo.com",
from = "bob@gmail.com"
) %>%
text(TXTCONTENT) %>%
signature(public_key = FALSE)
expect_match(as.character(msg), BEGIN_PGP_SIGNATURE)
expect_match(as.character(msg), END_PGP_SIGNATURE)
expect_no_match(as.character(msg), BEGIN_PGP_MESSAGE)
expect_no_match(as.character(msg), END_PGP_MESSAGE)
expect_no_match(as.character(msg), BEGIN_PGP_PUBLIC_KEY_BLOCK)
expect_no_match(as.character(msg), END_PGP_PUBLIC_KEY_BLOCK)
})
test_that("encrypt", {
msg <- envelope(
to = "alice@yahoo.com",
from = "bob@gmail.com",
encrypt = TRUE
) %>% text(TXTCONTENT)
expect_match(as.character(msg), BEGIN_PGP_MESSAGE)
expect_match(as.character(msg), END_PGP_MESSAGE)
expect_no_match(as.character(msg), BEGIN_PGP_SIGNATURE)
expect_no_match(as.character(msg), END_PGP_SIGNATURE)
expect_no_match(as.character(msg), BEGIN_PGP_PUBLIC_KEY_BLOCK)
expect_no_match(as.character(msg), END_PGP_PUBLIC_KEY_BLOCK)
})
test_that("sign & encrypt", {
msg <- envelope(
to = "alice@yahoo.com",
from = "bob@gmail.com",
sign = TRUE,
encrypt = TRUE
) %>% text(TXTCONTENT)
expect_match(as.character(msg), BEGIN_PGP_MESSAGE)
expect_match(as.character(msg), END_PGP_MESSAGE)
expect_no_match(as.character(msg), BEGIN_PGP_SIGNATURE)
expect_no_match(as.character(msg), END_PGP_SIGNATURE)
expect_no_match(as.character(msg), BEGIN_PGP_PUBLIC_KEY_BLOCK)
expect_no_match(as.character(msg), END_PGP_PUBLIC_KEY_BLOCK)
})
test_that("public key", {
msg <- envelope(
to = "alice@yahoo.com",
from = "bob@gmail.com",
public_key = TRUE
) %>% text(TXTCONTENT)
expect_match(as.character(msg), BEGIN_PGP_PUBLIC_KEY_BLOCK)
expect_match(as.character(msg), END_PGP_PUBLIC_KEY_BLOCK)
expect_no_match(as.character(msg), BEGIN_PGP_SIGNATURE)
expect_no_match(as.character(msg), END_PGP_SIGNATURE)
expect_no_match(as.character(msg), BEGIN_PGP_MESSAGE)
expect_no_match(as.character(msg), END_PGP_MESSAGE)
})
test_that("fail without sender or recipients", {
expect_error(
envelope(to = "alice@yahoo.com") %>% encrypt() %>% as.character(),
"without sender"
)
expect_error(
envelope(from = "bob@gmail.com") %>% encrypt() %>% as.character(),
"without recipients"
)
})
test_that("missing public keys", {
# Missing sender key.
expect_error(
envelope(to = "alice@yahoo.com", from = "tim@gmail.com") %>%
encrypt() %>%
as.character(),
"missing keys",
ignore.case = TRUE
)
# Missing recipient key.
expect_error(
envelope(to = "jenny@yahoo.com", from = "bob@gmail.com") %>%
encrypt() %>%
as.character(),
"missing keys",
ignore.case = TRUE
)
})
test_that("sign with/without body", {
nobody <- envelope(to = "alice@yahoo.com", from = "bob@gmail.com")
body <- nobody %>% text("Hello!")
# With public key.
expect_error(nobody %>% encrypt(FALSE, TRUE, TRUE) %>% as.character(), NA)
expect_error(body %>% encrypt(FALSE, TRUE, TRUE) %>% as.character(), NA)
# Without public key.
expect_error(nobody %>% encrypt(FALSE, TRUE, FALSE) %>% as.character(), "empty message")
expect_error(body %>% encrypt(FALSE, TRUE, FALSE) %>% as.character(), NA)
})
test_that("multiple keys", {
msg <- envelope(to = "alice@yahoo.com", from = "jim@aol.com") %>%
text("Hello!") %>%
encrypt()
expect_error(msg %>% as.character(), NA)
})
|
f9fc0c5d5ddcd56c4084f6932a6ea06ebd516466
|
24fcc7a9446871f5affbc82d3ae1ed20d6a7c8aa
|
/man/set_smc.Rd
|
95004a51323197e0a78f9b8dce9e8ff7f676eeaa
|
[
"MIT"
] |
permissive
|
mrc-ide/malariasimulation
|
3188657f6ff9da4ea35646189d0bd75d6e35aa52
|
397a7b7efe90958dd01f97110a1d16c71d041f33
|
refs/heads/master
| 2023-08-23T11:29:10.050424
| 2023-07-03T15:58:32
| 2023-07-03T15:58:32
| 233,609,741
| 10
| 10
|
NOASSERTION
| 2023-08-17T15:48:41
| 2020-01-13T14:06:17
|
R
|
UTF-8
|
R
| false
| true
| 816
|
rd
|
set_smc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mda_parameters.R
\name{set_smc}
\alias{set_smc}
\title{Parameterise a Seasonal Malaria Chemoprevention}
\usage{
set_smc(parameters, drug, timesteps, coverages, min_ages, max_ages)
}
\arguments{
\item{parameters}{a list of parameters to modify}
\item{drug}{the index of the drug to administer}
\item{timesteps}{a vector of timesteps for each round of smc}
\item{coverages}{a vector of the proportion of the target population who receive each
round}
\item{min_ages}{a vector of minimum ages of the target population for each round exclusive (in timesteps)}
\item{max_ages}{a vector of maximum ages of the target population for each round exclusive (in timesteps)
drug}
}
\description{
Parameterise a Seasonal Malaria Chemoprevention
}
|
32ba3d7a7668f0a618983ae38157b0370df33053
|
05945974b558cdddc0bd1207345c5260f5ce967f
|
/run_analysis.R
|
8f952e8b8a66bd649860dfdb8b58c58a15be1401
|
[] |
no_license
|
josephyyw00/GCD-Course-Project
|
59ad754ed93fe5210ed3fd14fad78b758c95583d
|
b22c615a0285676f0b70c63dc6f61b02823a35a8
|
refs/heads/master
| 2021-04-07T17:39:05.841472
| 2020-03-30T03:43:34
| 2020-03-30T03:43:34
| 248,694,251
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,683
|
r
|
run_analysis.R
|
library(reshape2)
#Read activity_labels.txt
dsActivityLabel <- read.table("./UCI HAR Dataset/activity_labels.txt")
#Read features.txt
dsFeatures <- read.table("./UCI HAR Dataset/features.txt")
#Read Xtest datasets
dsXTest <- read.table("./UCI HAR Dataset/test/X_test.txt")
dsYTest <- read.table("./UCI HAR Dataset/test/Y_test.txt")
dsSTest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
#Set column names to dataset Test
colnames(dsXTest) <- dsFeatures$V2
#Add Cols
dsXTest$Activity <- dsYTest$V1
dsXTest$Subject <- factor(dsSTest$V1)
#Filter out unwanted columns
dsXTestFiltered <- dsXTest[,grepl("mean|std|Activity|Subject",colnames(dsXTest))]
#Read train datasets
dsXTrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
dsYTrain <- read.table("./UCI HAR Dataset/train/Y_train.txt")
dsSTrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
#Set column names to dataset Train
colnames(dsXTrain) <- dsFeatures$V2
#Add Cols
dsXTrain$Activity <- dsYTrain$V1
dsXTrain$Subject <- factor(dsSTrain$V1)
#Filter out unwanted columns
dsXTrainFiltered <- dsXTrain[,grepl("mean|std|Activity|Subject",colnames(dsXTrain))]
#Bind both tables into Master
dsMaster <- rbind(dsXTestFiltered,dsXTrainFiltered)
#Assign descriptive labels for Activity
dsMaster$ActivityLabels <- factor(dsMaster$Activity, labels= c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING"))
#Tidy dataset
dsMelted <- melt(dsMaster, id = c("Subject", "ActivityLabels"))
dsTidy <- dcast(dsMelted, Subject + ActivityLabels ~ variable, mean)
write.table(dsTidy, "./dsTidy.txt", row.names = FALSE, quote = FALSE)
|
6f81899b565db547267b9e3548fe9afcd0c945b4
|
cb7db765f1bf3d4e621af5e98d1c841581ad0aed
|
/R/spore-package.R
|
b0d1b1d6d93ee4d5a10a98c68d699d4fd1f2526f
|
[] |
no_license
|
noamross/spore
|
839c6dfff5bb3de77ee8e25b812a8cede109509f
|
f44888ffae861104c20de4ed35dd5d8d1b16366c
|
refs/heads/master
| 2021-01-01T19:47:17.817307
| 2015-08-18T04:59:06
| 2015-08-18T04:59:06
| 27,852,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
spore-package.R
|
#' @useDynLib spore
#' @importFrom Rcpp sourceCpp
#' @import RcppArmadillo
#' @import BH
NULL
|
03a8ff7651851e637b9587dd5b9f01a14bf99108
|
535cb08d66f59ac8e82794077590200f0b0d4b22
|
/R/cent.R
|
88b08d602d9b17acf5125bc1569ea2cc86fc1a97
|
[] |
no_license
|
cran/Omisc
|
0706995957a1b7926575d78863e7169ed0d94510
|
ed7e4cbb5d554d639ebf0e69f920ee8c1ce63221
|
refs/heads/master
| 2022-08-30T17:37:12.518418
| 2022-08-09T13:10:02
| 2022-08-09T13:10:02
| 152,997,312
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 195
|
r
|
cent.R
|
#' cent
#'
#' @param X vector to be centered
#'
#' @return Returns a centered vector
#' @export
#'
#' @examples X<-c(1:10)
#' cent(X)
cent<-function(X){
X<-X-mean(X)
return(X)
}
|
54d2264d3965d5f5f606af1c2e15d26f9c94ca3d
|
f8eb55c15aec611480ede47d4e15e5a6e472b4fa
|
/analysis/_jkb/0018_dow_lower_price.R
|
5fadc18acd84b8b5ca2988a85018ef86cbbd3e6f
|
[] |
no_license
|
nmaggiulli/of-dollars-and-data
|
a4fa71d6a21ce5dc346f7558179080b8e459aaca
|
ae2501dfc0b72d292314c179c83d18d6d4a66ec3
|
refs/heads/master
| 2023-08-17T03:39:03.133003
| 2023-08-11T02:08:32
| 2023-08-11T02:08:32
| 77,659,168
| 397
| 32
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,492
|
r
|
0018_dow_lower_price.R
|
cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(scales)
library(readxl)
library(lubridate)
library(ggrepel)
library(gganimate)
library(tidylog)
library(tidyverse)
folder_name <- "_jkb/0018_dow_lower_price"
out_path <- paste0(exportdir, folder_name)
dir.create(file.path(paste0(out_path)), showWarnings = FALSE)
########################## Start Program Here ######################### #
raw <- read.csv(paste0(importdir, "_jkb/0018_ycharts_dow_lower_price/ycharts_dji_data.csv"), skip = 1,
col.names = c("date","index_dow")) %>%
mutate(date = as.Date(substr(date, 1, 10), format = "%Y-%m-%d")) %>%
select(date, index_dow) %>%
arrange(date) %>%
filter(date <= "2020-03-23")
for(i in 1:nrow(raw)-1){
val <- raw[i, "index_dow"]
rest <- raw[(i+1):nrow(raw), "index_dow"]
lg <- val > rest
first_below <- ifelse(min(which(lg == TRUE)) > 9999, 0, min(which(lg == TRUE)))
raw[i, "days_to_lower"] <- first_below
print(i)
}
all_lower_in_future <- raw %>%
filter(days_to_lower != 0)
print(mean(all_lower_in_future$days_to_lower))
print(quantile(all_lower_in_future$days_to_lower, probs = 0.5))
# ############################ End ################################## #
|
2aee8dadaa9955f43ade997a416aa02de7fe680e
|
277dbb992966a549176e2b7f526715574b421440
|
/R_training/실습제출/신승현/1029/movie1.R
|
611611c92484407e43377d0a309893d9228e3bdc
|
[] |
no_license
|
BaeYS-marketing/R
|
58bc7f448d7486510218035a3e09d1dd562bca4b
|
03b500cb428eded36d7c65bd8b2ee3437a7f5ef1
|
refs/heads/master
| 2020-12-11T04:30:28.034460
| 2020-01-17T08:47:38
| 2020-01-17T08:47:38
| 227,819,378
| 0
| 0
| null | 2019-12-13T12:06:33
| 2019-12-13T10:56:18
|
C++
|
UTF-8
|
R
| false
| false
| 488
|
r
|
movie1.R
|
url <- "https://movie.daum.net/moviedb/grade?movieId=125080&type=netizen"
text <- read_html(url, encording="CP949")
text
nodes <- read_html(text, "em.emph_grade")
nodes <- read_html(text, "p.desc_review")
html_text(nodes)
review <- html_text(nodes, trim=TRUE); review
review <-gsub("\t", "", review)
review <-gsub("\r\n", "", review)
review <-gsub("\n", "", review)
review <-gsub("신고", "", review); review
page <-data.frame(title, review)
write.csv(page, "daummovie1.csv")
getwd()
|
fef12087dc5692c55cab1d9c9e42d919e1a6e2f1
|
7d90861940bf91667dbfbe19669261f5c76dfd70
|
/man/run_abimo_command_line.Rd
|
8a4d37f689f18df33fe231bd40a97e4c4dd5873a
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.abimo
|
4dbda8cf9e8a0770ea5a4c362c461d159b4c0c84
|
526d5a5f56eda577bd1e5f019282f8ee7c60fe05
|
refs/heads/master
| 2023-09-03T15:10:55.202853
| 2022-06-23T11:40:28
| 2022-06-23T11:40:28
| 348,737,659
| 0
| 1
|
MIT
| 2023-09-02T12:14:17
| 2021-03-17T14:21:55
|
R
|
UTF-8
|
R
| false
| true
| 565
|
rd
|
run_abimo_command_line.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_abimo.R
\name{run_abimo_command_line}
\alias{run_abimo_command_line}
\title{Run Abimo on the Command Line}
\usage{
run_abimo_command_line(args, tag = "v3.3.0")
}
\arguments{
\item{args}{vector of arguments to be passed to Abimo}
\item{tag}{version tag of Abimo release to be used, see
\url{https://github.com/KWB-R/abimo/releases}}
}
\value{
The function returns what Abimo.exe sent to the standard output (as a
vector of character).
}
\description{
Run Abimo on the Command Line
}
|
4cd9e30bef584518c659c0089837cca658928dfc
|
bd7207c29baca3175933c183a23bcca395d57d92
|
/R/margins.R
|
b57bb5f190f22234f578d9ebbd8eac449a7e86db
|
[] |
no_license
|
datalab-dev/GetDocElements
|
89ca5cb17d725202315e86f02b01134f5368e5af
|
8e656e27ccb68070693dead7d3789e34391ebe92
|
refs/heads/master
| 2022-01-19T19:59:17.125643
| 2019-07-07T19:42:16
| 2019-07-07T19:42:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,267
|
r
|
margins.R
|
margins =
# XXX Deal with rotation when the text is actually rotated.
# Specifying @rotation = 0 for case when there is a rotate line of text identifying how
# the document was retrieved, e.g. downloaded by UC Davis.....
#XXX should we extend this to get top and bottom.
#XXX and deal with a set of nodes, a page, and also an entire document.
#XXX remove header and footer
function(page, bbox = getBBox2(getNodeSet(page, ".//text[@rotation = 0]")), ...)
UseMethod("margins")
margins.character =
function(page, bbox = getBBox2(getNodeSet(page, ".//text[@rotation = 0]")), ...)
{
margins(readPDFXML(page), ...)
}
margins.PDFToXMLDoc =
function(page, bbox = getBBox2(getNodeSet(page, ".//text[@rotation = 0]")), asDataFrame = TRUE, ...)
{
ans = lapply(getPages(page), margins)
if(asDataFrame)
as.data.frame(do.call(rbind, ans))
else
ans
}
margins.XMLInternalNode = margins.PDFToXMLPage =
function(page, bbox = getBBox2(), ...)
{
margins(getNodeSet(page, ".//text[@rotation = 0]"))
}
margins.list = margins.XMLNodeSet =
function(page, bbox = getBBox2(unlist(page)), ...)
{
c(left = min(bbox[, 1]), right = max(bbox[,1] + bbox[,3]), top = min(bbox[,2]), bottom = max(bbox[,4]))
}
|
ddb3cc8de5fa84d3c87b2439327c47b6322eab15
|
90f01699b697ed9ad7746f483ce537b70fe8f75b
|
/datascience_course/上課程式碼/week_2/loadData.R
|
83e31f7ec7292bc63025a002e658f9f7e1df2fe3
|
[] |
no_license
|
bill0812/course_study
|
ca581499c13ff88587e1ffdb099fec5ddc36abfa
|
7c964cae8e42cb47597673ea60b54d7e707b396b
|
refs/heads/master
| 2021-02-08T01:31:28.093497
| 2020-05-13T13:47:54
| 2020-05-13T13:47:54
| 244,093,750
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
loadData.R
|
uciCar <- read.table( # Note: 1
'http://www.win-vector.com/dfiles/car.data.csv', # Note: 2
sep=',', # Note: 3
header=T # Note: 4
)
# Always Exploring your data first
class(uciCar)
summary(uciCar)
dim(uciCar)
|
f4a6fbe4c5d0f25a7f76b0758ac84e6e36889b07
|
35093296dd3fc9d3e392967cef4ae8930d6bd0ae
|
/R/IFPCA.R
|
085401095725e1a6d5c64d945e765e1d0288e572
|
[] |
no_license
|
celehs/IFPCA
|
27a4c1b98d1cc7713f71f71bbcdbb6ca1e1cc3ae
|
dee0a813e37da605b1e98ed7006e67d504f6879a
|
refs/heads/master
| 2023-02-02T18:01:03.227356
| 2020-12-14T14:30:42
| 2020-12-14T14:30:42
| 289,030,885
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,648
|
r
|
IFPCA.R
|
#' @importFrom utils head
#' @importFrom graphics hist
#' @importFrom stats IQR approx bw.SJ bw.bcv bw.nrd bw.nrd0 bw.ucv density dnorm optimize
NULL
VTM <- function(vc, dm) matrix(vc, ncol = length(vc), nrow = dm, byrow = TRUE)
GetPK <- function(id, t, tseq, fu) {
id = as.character(id)
PK <- rep(NA, length(fu))
names(PK)=names(fu)
e <- diff(tseq)
aa <- tapply(t, id, FUN = function(t) {
x <- hist(t, breaks = tseq, plot = FALSE)$counts
avg_sp <- cumsum(x) / tseq[-1]
avg_sp2 <- c(0, head(avg_sp, -1))
which.max((avg_sp - avg_sp2) / (avg_sp2 + e))
})
PK[unique(id)] <- tseq[aa + 1]
PK
}
#' @title Intensity Functional Principal Component Analysis (IFPCA)
#' @description Performs IFPCA to extract features from longitudinal encounter data.
#' @param time longitudinal encounter times. They should be greater than or equal to 1.
#' @param fu_train follow-up time (training)
#' @param fu_valid follow-up time (validation)
#' @param PPIC_K a logical indicating whether you want to use Pseudo-Poisson Information Criterion to choose
#' the number of principal components K (K.select="PPIC") \code{TRUE} or another criterion to choose
#' K (K.select="PropVar") \code{FALSE} in the PP_FPCA_Parallel function (hidden). Default is \code{FALSE}.
#' @param n.grid an integer value for grid points used in estimating covariance function g. Default is \code{401}.
#' @param propvar a proportion of variation used to select number of FPCs. Default is \code{0.85}.
#' @export
ifpca <- function(time, fu_train, fu_valid,
PPIC_K = FALSE, n.grid = 401, propvar = 0.85) {
#-----------------------
#----- data check ------
#-----------------------
#-1-- minimum in "time" should be greater than or equal to 1
if(!is.vector(time)) stop("Data Entry Issue: 'time' should be a vector")
chk=min(time)
if(chk < 1){
stop("Data Entry Issue: The minimum of 'time' should be 1. Please do not code Month 0 for Days 1 to 30 but Month 1.")
}
#-2-- fu_train and fu_valid should be all vecotrs and one-record per subject and mutually exclusive
if(!is.vector(fu_train)) stop("Data Entry Issue: 'fu_train' should be a vector")
if(!is.vector(fu_valid)) stop("Data Entry Issue: 'fu_valid' should be a vector")
chk1=unique(names(fu_train))
chk2=unique(names(fu_valid))
if(length(fu_train)!=length(chk1)) stop("Data Entry Issue: More than one entry from one subject in 'fu_train'")
if(length(fu_valid)!=length(chk2)) stop("Data Entry Issue: More than one entry from one subject in 'fu_valid'")
chk3=c(names(fu_train),names(fu_valid))
if(length(chk3)!=length(unique(chk3))) stop("Data Entry Issue: There are subjects who are in both 'fu_train' and 'fu_valid'")
#-3-- subjects in time should be embedded by fu_train or fu_valid
chk = match(names(time), c(names(fu_train),names(fu_valid)))
if(sum(is.na(chk)!=0)) stop("Data Entry Issue: Some subjects in 'time' do not have follow-up time information in 'fu_train' or 'fu_valid'")
#--- standardized version ---
Tend <- 1.0
as_int <- TRUE # FALSE
names_time <- names(time)
count <- tapply(time, names_time, length)
fu <- c(fu_train, fu_valid)
time_std <- time / fu[names_time]
train <- names(fu_train)
valid <- names(fu_valid)
time_train <- time[names_time %in% train]
time_valid <- time[names_time %in% valid]
names_time_train <- names(time_train)
names_time_valid <- names(time_valid)
if (as_int) {
names_time <- as.integer(names_time)
names_time_train <- as.integer(names_time_train)
names_time_valid <- as.integer(names_time_valid)
}
count_train <- count[names(count) %in% train]
count_valid <- count[names(count) %in% valid]
count_train_all <- 0 * fu_train
count_valid_all <- 0 * fu_valid
count_train_all[names(count_train)] <- count_train
count_valid_all[names(count_valid)] <- count_valid
#--- create TrainN and ValidN ---
NN=rep(0,length(c(fu_train,fu_valid))) ; names(NN)=c(names(fu_train), names(fu_valid)) ;
tmp=table(names(time)) ; NN[names(tmp)]=tmp
TrainN=data.frame(id = as.character(names(fu_train)), pred_total=NN[names(fu_train)])
ValidN=data.frame(id = as.character(names(fu_valid)), pred_total=NN[names(fu_valid)])
# TRAINING
PKTS <- GetPK(
id = names_time_train, ### INT/CHAR ###
t = time_train,
tseq = 0:floor(max(fu_train)),
fu = fu_train)
time_std_train <- time_std[names(time_std) %in% train]
time1_train <- tapply(time_train, names_time_train, min)
h1 <- bw.nrd(time_std_train)
h2 <- bw.nrd(time_std_train)^(5/6)
if (PPIC_K) {
tmp <- PP_FPCA(
time_std_train,
h1 = h1,
h2 = h2,
count_train,
bw = "nrd",
ngrid = n.grid,
Tend = Tend,
K.select = "PPIC",
derivatives = TRUE)
} else {
tmp <- PP_FPCA(
time_std_train,
h1 = h1,
h2 = h2,
count_train,
bw = "nrd",
ngrid = n.grid,
Tend = Tend,
K.select = "PropVar",
propvar = propvar,
derivatives = TRUE)
}
ft.e <- cbind(
matrix(fu_train, nrow = length(fu_train), ncol = 3),
-tmp$baseline[1], log(1 + count_train_all))
pos <- count_train_all > 0
ft.e[pos, 1] <- time1_train
locm <- unlist(apply(tmp$densities[, 1:sum(pos) + 1], 2, which.max))
ft.e[pos, 2] <- ft.e[pos, 2] * tmp$densities[locm, 1]
ft.e[pos, 3] <- ft.e[pos, 3] * tmp$derivatives[
sapply(1:sum(pos), function(i) {
which.max(tmp$derivatives[1:locm[i], i + 1])
}), 1]
ft.e[pos, 4] <- tmp$scores[, 2]
ft.e.S <- cbind(
ft.e[, 1], VTM(-tmp$baseline[1:4], length(fu_train)), log(1 + count_train_all))
ft.e.S[pos, 2:5] <- as.matrix(tmp$scores[, 2:5])
FPCA <- list(
K = tmp$K,
scores = tmp$scores,
dens = tmp$densities,
deriv = tmp$derivatives,
mean = tmp$mean,
basis = tmp$basis,
baseline = tmp$baseline)
colnames(ft.e) <- c("1stCode", "Pk", "ChP", "1stScore", "logN")
colnames(ft.e.S) <- c("1stCode", "1stScore", "2ndScore", "3rdScore", "4thScore", "logN")
rownames(ft.e.S) <- rownames(ft.e) <- names(PKTS) <- train
# VALIDATION
PKTS2 <- GetPK(
id = names_time_valid, ### INT/CHAR ###
t = time_valid,
tseq = 0:floor(max(fu_valid)),
fu = fu_valid)
time_std_valid <- time_std[names(time_std) %in% valid]
time1_valid <- tapply(time_valid, names_time_valid, min)
tmp <- PP.FPCA.Pred(time_std_valid, count_valid, FPCA$mean, FPCA$basis, FPCA$K)
FPCA$ValidPred <- tmp
ft.e2 <- cbind(
matrix(fu_valid, nrow = length(fu_valid), ncol = 3),
-tmp$baseline[1], log(1 + count_valid_all))
pos <- count_valid_all > 0
locm <- unlist(apply(tmp$densities[, 1:sum(pos) + 1], 2, which.max))
#-- just patch ( will come back to PP.FPCA.Pred later
locm = pmin(locm, nrow(tmp$derivatives))
ft.e2[pos, 2] <- ft.e2[pos, 2] * tmp$densities[locm, 1]
ft.e2[pos, 3] <- ft.e2[pos, 3] * tmp$derivatives[
sapply(1:sum(pos), function(i) {
which.max(tmp$derivatives[1:locm[i], i + 1])
}), 1]
ft.e2[pos, 4] <- tmp$scores[, 2]
ft.e.S2 <- cbind(
ft.e2[, 1], VTM(-tmp$baseline[1:4], length(fu_valid)), log(1 + count_valid_all))
ft.e.S2[pos, 2:5] <- as.matrix(tmp$scores[, 2:5])
colnames(ft.e2) <- c("1stCode", "Pk", "ChP", "1stScore", "logN")
colnames(ft.e.S2) <- c("1stCode", "1stScore", "2ndScore", "3rdScore", "4thScore", "logN")
rownames(ft.e.S2) <- rownames(ft.e2) <- names(PKTS2) <- valid
list(
# FPCA = FPCA
TrainN = TrainN,
ValidN = ValidN,
TrainFt = ft.e,
# TrainSc = ft.e.S
ValidFt = ft.e2,
# ValidSc = ft.e.S2,
TrainPK = PKTS,
ValidPK = PKTS2)
}
|
8c3e90f019f12d776947cd0cdb1fa6a06ac574e1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/geozoning/examples/searchNODcrit1.Rd.R
|
f99281780c019c6b600d32b01590fed66b772bc5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
r
|
searchNODcrit1.Rd.R
|
library(geozoning)
### Name: searchNODcrit1
### Title: searchNODcrit1
### Aliases: searchNODcrit1
### ** Examples
data(mapTest)
qProb=c(0.1,0.2,0.4);criti=correctionTree(qProb,mapTest) # 2 zonings at last level
res=searchNODcrit1(qProb,criti)# best one is frist element of last level
|
9370c33b8b50948d5012d1c36d8c685988fb3fc1
|
80a2a1366bb284db761c908a7c61581e96c03fe7
|
/R/title.R
|
b29665005392526ec509052ec6ca93e4d36da312
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-public-domain"
] |
permissive
|
USGS-R/gsplot
|
cfe3816af99d43821fd8f8a31ea123cf269dd2b2
|
4e6adae6ac53decc4e26d59165c44dfc41c26548
|
refs/heads/main
| 2023-04-13T04:55:29.029629
| 2023-04-08T13:03:13
| 2023-04-08T13:03:13
| 37,219,882
| 5
| 20
|
CC0-1.0
| 2023-04-07T23:12:03
| 2015-06-10T20:08:10
|
R
|
UTF-8
|
R
| false
| false
| 1,663
|
r
|
title.R
|
#' gsplot title
#'
#' Adds a title to the plot. See \code{\link[graphics]{title}} for more details.
#'
#' @details Additional graphical parameter inputs:
#' \itemize{
#' \item{\code{main}} {character string that goes above the plot}
#' \item{\code{sub}} {character string that goes below the plot}
#' \item{\code{col.main, col.sub}} {color for the main title and subtitle, respectively}
#' \item{\code{font.main, font.sub}} {numeric value specifying the font style (1=normal, 2=bold, 3=italic, 4=bold and italic)}
#' \item{\code{cex.main, cex.sub}} {numeric value specifying the size of the main title and subtitle}
#' }
#'
#' @param object gsplot object
#' @param \dots Further graphical parameters may also be supplied as arguments. See 'Details'.
#'
#' @seealso \code{\link[graphics]{title}}
#' @export
#' @examples
#' gs <- gsplot() %>%
#' points(y=1, x=2, col="blue", pch=18, legend.name="Points", xlab="Stuff") %>%
#' lines(c(3,4,3), c(2,4,6), legend.name="Lines", ylab="Data!") %>%
#' abline(b=1, a=0, legend.name="1:1") %>%
#' legend(location="topleft",title="Awesome!") %>%
#' title(main="Great Graph", col.main="grey", font.main=2, cex.main=2)
#' gs
#' gs <- gsplot() %>%
#' points(y=1, x=2) %>%
#' title(main="Great Graph")
#' gs
title <- function(object, ...) {
override("graphics", "title", object, ...)
}
title.gsplot <- function(object, ..., legend.name=NULL, side=c(1,2)){
to.gsplot <- set_args("title", ..., custom.config = object[["global"]][["config"]][["config.file"]], package = "graphics")
object$global$title <- append_replace(object$global$title, to.gsplot)
return(object)
}
|
9b089a8e55bbbf9f537ceb31cce709006816e475
|
3d926948a26a7152d08256b2b6c9a325ef225ace
|
/Course 3/C3S3Assessment.R
|
221e822bab68926dba9fcb969f1a7afc89d1d859
|
[] |
no_license
|
ConnorBruce/HarvardxDataScienceCertificate
|
0b2a5fe26e683aea7e8070582c8ba1646d39c2f4
|
692bea94eb8cb0e9c1d5582f483eda0eeaacd211
|
refs/heads/master
| 2023-06-11T17:02:26.665645
| 2021-07-08T16:30:30
| 2021-07-08T16:30:30
| 379,970,786
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 514
|
r
|
C3S3Assessment.R
|
avg = .2*1-.8*.25
sd = 1.25*sqrt(.2*.8)*sqrt(44)
1-pnorm(8,avg, sd)
set.seed(21)
S = replicate(10000, {
guess = sample(c(1,-.25),44,prob = c(.2,.8),replace = T)
sum(guess)
})
mean(S>8)
.25*44
sqrt(44)*1*sqrt(.25*.75)
p=seq(.25,.95,.05)
avg2 = p*44
sd2 = sqrt(44)*sqrt(p*(1-p))
data.frame(p,100*(1-pnorm(35,avg2,sd2)))
p_win = 5/38
p_lose = 1-p_win
6*p_win - 1*p_lose
7*sqrt(p_win*p_lose)
7*sqrt(p_win*p_lose)/sqrt(500)
avg3 = (6*p_win - 1*p_lose)*500
sd3 = 7*sqrt(p_win*p_lose)*sqrt(500)
pnorm(0,avg3,sd3)
|
3dac513c84b75d883834773b73d88c02a92025dd
|
a9178519e4577716b3d1f46fb89776825ecf468d
|
/analysis/parliamentary-secretary.R
|
59edd5d97fe23a2ecc993d2f42a8487b1fd92350
|
[] |
no_license
|
lchski/parliamentarians-analysis
|
c8fca90481a7bd11e4561d265c99b751ba315b51
|
95ce1e314436092b753d6ebbc9217e1412887097
|
refs/heads/master
| 2021-06-21T21:07:51.805505
| 2020-11-28T20:42:24
| 2020-11-28T20:42:24
| 140,504,009
| 1
| 0
| null | 2020-10-01T23:56:03
| 2018-07-11T01:08:38
|
R
|
UTF-8
|
R
| false
| false
| 2,805
|
r
|
parliamentary-secretary.R
|
roles %>%
filter(GroupingTitleEn %in% c(NA, "House of Commons Roles"))%>%
filter(str_detect(NameEn, "Parliamentary Secretary|Parliamentary Assistant")) %>%
count_group()
View()
## Find the secretaries
## Could also do `ministers %>% filter(str_detect(NameEn, "Parliamentary Secretary|Parliamentary Assistant"))`
## But there are a few extra entries (~21) that that misses.
parliamentary_secretaries <- roles %>%
filter(GroupingTitleEn %in% c(NA, "House of Commons Roles"))%>%
filter(str_detect(NameEn, "Parliamentary Secretary|Parliamentary Assistant")) %>%
remove_extra_columns(.) %>%
mutate(
period_in_office = period_in_role,
yrs_in_office = time_length(period_in_office, "years")
) %>%
left_join(
parliamentarians %>%
select(Person.PersonId, Person.DisplayName, Person.Gender)
)
parl_secs_gender_by_portfolio <- parliamentary_secretaries %>%
select(PortFolioEn) %>%
unique() %>%
arrange(PortFolioEn) %>%
mutate(
M = NA,
F = NA
) %>%
pivot_longer(
-PortFolioEn,
names_to = "Person.Gender"
) %>%
select(-value) %>%
left_join(parliamentary_secretaries %>%
summarize_roles_by_category(PortFolioEn, Person.Gender)) %>%
mutate(count = ifelse(is.na(count), 0, count))
parl_secs_gender_by_portfolio %>%
mutate(
avg_length_yrs = avg_length_mos / 12,
median_length_yrs = median_length_mos / 12,
min_length_yrs = min_length_mos / 12,
max_length_yrs = max_length_mos / 12
) %>%
mutate_at(
c("category_lifespan_yrs", "category_occupied_yrs", "avg_length_yrs", "avg_length_mos", "median_length_yrs", "median_length_mos", "min_length_mos", "min_length_yrs", "max_length_mos", "max_length_yrs"),
~ round(., digits = 2)
) %>% write_csv("data/out/parl_secs_gender_by_portfolio.csv")
parl_secs_gender_by_organization <- parliamentary_secretaries %>%
select(OrganizationLongEn) %>%
unique() %>%
arrange(OrganizationLongEn) %>%
mutate(
M = NA,
F = NA
) %>%
pivot_longer(
-OrganizationLongEn,
names_to = "Person.Gender"
) %>%
select(-value) %>%
left_join(parliamentary_secretaries %>%
summarize_roles_by_category(OrganizationLongEn, Person.Gender)) %>%
mutate(count = ifelse(is.na(count), 0, count))
parl_secs_gender_by_organization %>%
mutate(
avg_length_yrs = avg_length_mos / 12,
median_length_yrs = median_length_mos / 12,
min_length_yrs = min_length_mos / 12,
max_length_yrs = max_length_mos / 12
) %>%
mutate_at(
c("category_lifespan_yrs", "category_occupied_yrs", "avg_length_yrs", "avg_length_mos", "median_length_yrs", "median_length_mos", "min_length_mos", "min_length_yrs", "max_length_mos", "max_length_yrs"),
~ round(., digits = 2)
) %>% write_csv("data/out/parl_secs_gender_by_organization.csv")
|
290502ac8a544fc335cd592edca36ac12418bf61
|
67a6f1af8a7e28e3e64f123ce48fff3017364094
|
/R/mirnaEnrichment.R
|
c99637153eeb8437ed1ab3a1ceede7d59c9ec1a7
|
[] |
no_license
|
tastanlab/NoRCE
|
3257f0af8da9cbff2152313edc9df9e3b2b4dd1c
|
e8779cec9bdece0e71a8f85389259a8d1714465f
|
refs/heads/master
| 2020-05-28T14:17:11.643250
| 2019-05-26T12:11:24
| 2019-05-26T12:11:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45,736
|
r
|
mirnaEnrichment.R
|
op <- options(warn = (-1))
options(readr.num_columns = 0)
#' GO term enrichments of the microRNA genes with mRNAs that fall in the given upstream/downstream regions of the microRNA genes
#'
#' @param gene Input microRNA gene. It supports both pre-miRNA and mature miRNA, however, when target prediction is performed(target= TRUE), miRNA genes should be mature.
#' @param hg Genome assembly of interest for the analysis. Possible assemblies are "mm10" for mouse, "dre10" for zebrafish, "rn6" for rat, "dm6" for fruit fly, "ce11" for worm, "sc3" for yeast, "hg19" and "hg38" for human
#' @param upstream Upstream distance from the transcription start position
#' @param downstream Downstream distance from the transcription end position
#' @param searchRegion Search space of the cis-region. Possible values are "all", "exon", "intron"
#' @param GOtype Hierarchical category of the GO ontology. Possible values are "BP", "CC", "MF"
#' @param pCut Threshold value for the pvalue. Default value is 0.05
#' @param pAdjCut Cutoff value for the adjusted p-values using one of given method. Default value is 0.05.
#' @param pAdjust Methods of the adjusted p-values. Possible methods are "bonferroni", "holm", "BH"(default)
#' @param min Minimum number of genes that are required for enrichment. By default, this value is set to 5.
#' @param slim Boolean value stating whether set of annotation should be performed for high level GO terms (GO slim)
#' @param near Boolean value presents whether cis-neighbourhood should be considered in the analysis
#' @param target Boolean value shows whether miRNA target prediction should be performed
#' @param backGenes The set of genes that tested against to the input
#' @param isTADSearch Boolean value that shows whether TAD analysis is performed. This value has to be TRUE for TAD analysis.
#' @param TAD TAD genomic regions for the species. Predefined TAD regions or any new TAD regions can be used for the analysis. TAD regions must be formated as GRanges object. Predefined TAD regions are 'tad_hg19', 'tad_hg38', 'tad_mm10', 'tad_dmel' for hg19, hg38, mm9 and dm6 assembly, respectively.
#' @param cellline Cell lines for TAD regions.
#' @param backGType Type of the background gene. If miRNA gene set is used for background gene, backGType should be set to the 'mirna'
#' @param express Boolean variable whether co-expression analysis is performed. If this option is set to TRUE, co-expression analysis will be performed.
#' @param isCustomExp Boolean variable whether co-expression analysis with custom data will be performed. When this option is set, exp1 and exp2 parameters must be defined.
#' @param cancer Defines the name of the TCGA project code such as 'BRCA' for correlation analysis. Possible cancer types ACC, BLCA, BRCA, CESC, CHOL, COAD, COADREAD, DLBC, ESCA, GBMLGG, HNSC, KICH, KIPAN, KIRC, KIRP, LGG, LIHC, LUAD, LUSC, OV, PAAD, PCPG, PRAD, READ, SARC, SKCM, STAD, STES, TGCT, THCA, THYM, UCEC, UCS, UVM
#' @param exp1 Custom expression data matrix. Columns must be genes and rows must be patients. If gene names are provided as header, no need to redefine the headers(labels) of the expression data.
#' @param exp2 Custom expression data matrix. Columns must be genes and rows must be patients. If gene names are provided as header, no need to redefine the headers(labels) of the expression data.
#' @param label1 Gene names of the custom exp1 expression data. If it is not provided, column name of the exp1 data will be taken.
#' @param label2 Gene names of the custom exp2 expression data. If it is not provided, column name of the exp2 data will be taken.
#' @param corrMethod Correlation coeffient method that will be used for evaluation. Possible values are "pearson", "kendall", "spearman"
#' @param varCutoff Variance cutt off that genes have less variance than this value will be trimmed
#' @param pcut P-value cut off for the correlation values
#' @param alternate Holds the alternative hypothesis and "two.sided", "greater" or "less" are the possible values.
#' @param conf Confidence level for the returned confidence interval. It is only used for the Pearson correlation coefficient if there are at least 4 complete pairs of observations.
#' @param minAbsCor Cut-off value for the Pearson correlation coefficient of the miRNA-mRNA
#' @param databaseFile Path of miRcancer.db file
#' @param isUnionCorGene Boolean value that shows whether union of the output of the co-expression analysis and the other analysis should be considered
#'
#' @return MiRNA GO term enrichment object for the given input
#'
#' @examples
#'
#'
#' miGO <-mirnaGOEnricher(gene=brain_mirna,hg='hg19',near = TRUE,target = FALSE, pAdjust = "none")
#'
#'\dontrun{
#' miGO <-mirnaGOEnricher(gene=brain_mirna,hg='hg19',near = TRUE,target = TRUE)
#'
#' miGO <-mirnaGOEnricher(gene=brain_mirna,hg='hg19',near = TRUE,target = FALSE,
#' isTADSearch = TRUE,TAD = tad_hg19)
#'
#' miGO <- mirnaGOEnricher(gene = brain_mirna, hg = 'hg19', target=TRUE,
#' express = TRUE, exp1 = mirna, exp2 = mrna,
#' isCustomExp = TRUE, minAbsCor = 0.1)
#'
#' createNetwork(mrnaObject = miGO,n = 2)
#' }
#' @export mirnaGOEnricher
mirnaGOEnricher <-
function(gene,
hg,
upstream = 10000,
downstream = 10000,
searchRegion = 'all',
GOtype = "BP",
pCut = 0.05,
pAdjCut = 0.05,
pAdjust = "BH",
near = FALSE,
target = FALSE,
min = 5,
slim = FALSE,
backGenes = '',
backGType = 'pc_gene',
isTADSearch = FALSE,
TAD = NoRCE::tad_hg19,
cellline = 'all',
express = FALSE,
isCustomExp = FALSE,
cancer,
exp1,
exp2,
label1 = '',
label2 = '',
corrMethod = "pearson",
varCutoff = 0.0025,
minAbsCor = 0.3,
pcut = 0.05,
alternate = 'greater',
isUnionCorGene = FALSE,
conf = 0.95,
databaseFile = '') {
if (missing(gene)) {
message("Gene is missing.")
}
if (missing(hg)) {
message(
"Assembly version is missing. Possible assemblies are 'mm10' for mouse, 'dre10' for zebrafish, 'rn6' for rat, 'dm6' for fruit fly, 'ce11' for worm, 'hg19' and 'hg38' for human."
)
}
assembly(hg)
gene <- as.data.frame(gene)
colnames(gene) <- c("genes")
if (target) {
targetResult <- predictmiTargets(gene = gene$genes,
type = "mirna",
hg = hg)
if (is.null(targetResult))
{
message("There is no target!")
return(NULL)
}
targetResult <- unique(targetResult)
geneTargetLoc <-
convertGeneID(genetype = "Ensembl_gene",
genelist = targetResult[, 3],
hg = hg)
}
a <-
as.data.frame(gsub(paste(c("-3p", "-5p"), collapse = "|"), "", gene$genes))
colnames(a) <- 'genes'
a <- unique(rbind(a, gene$genes))
geneLoc <-
convertGeneID(genetype = "mirna",
genelist = a$genes,
hg = hg)
if (near) {
if (searchRegion == 'all')
miNearGene_temp <-
getUCSC(geneLoc, upstream, downstream, hg)
if (searchRegion == 'exon')
miNearGene_temp <-
getNearToExon(geneLoc, upstream, downstream, hg)
if (searchRegion == 'intron')
miNearGene_temp <-
getNearToIntron(geneLoc, upstream, downstream, hg)
geneLoc_temp <-
convertGeneID(genetype = "NCBI",
genelist = miNearGene_temp,
hg = hg)
if (target) {
geneL <- findOverlapPairs(geneLoc_temp, geneTargetLoc)
geneLo <- pintersect(geneL, ignore.strand = TRUE)
miNearGene <-
getUCSC(
bedfile = geneLo,
upstream = 0,
downstream = 0,
hg = hg
)
}
else{
miNearGene <- miNearGene_temp
}
}
else{
if (target) {
miNearGene <- targetResult[, 2]
}
else{
miNearGene <- gene
}
}
if (isTADSearch) {
tadGene <-
getTADOverlap(
bedfile = geneLoc,
TAD = TAD,
cellline = cellline,
hg = hg,
near = near,
upstream = upstream,
downstream = downstream
)
if (near | target)
miNearGene <-
as.data.frame(intersect(unlist(miNearGene), unlist(tadGene)))
else
miNearGene <- tadGene
}
if (express) {
if (!isCustomExp) {
nearG <- corrbased(
mirnagene = a$genes,
cancer = cancer,
minAbsCor = minAbsCor,
databaseFile = databaseFile
)
d <- nearG[which(a$genes %in% nearG$mirna_base),]
if (!isUnionCorGene)
miNearGene <- intersect(unlist(miNearGene), d$feature)
else
miNearGene <- union(unlist(miNearGene), d$feature)
}
else{
nearG <- calculateCorr(
exp1 = exp1,
exp2 = exp2,
label1 = label1 ,
label2 = label2,
corrMethod = corrMethod,
varCutoff = varCutoff,
corCutoff = minAbsCor,
pcut = pcut,
alternate = alternate,
conf = conf
)
tt <-
sapply(1:dim(a)[1], function(x)
unlist(which(
nearG$firstExp %in% tolower(a$genes[x])
)))
nearG <- nearG[unlist(tt),]
if (!isUnionCorGene)
miNearGene <-
intersect(unlist(miNearGene), nearG$SecondExp)
else
miNearGene <- union(unlist(miNearGene), nearG$SecondExp)
}
}
if (length(miNearGene) == 0) {
message("No common gene is found")
new(
"NoRCE",
ID = '',
Term = '',
geneList = list(),
pvalue = 0,
pAdj = 0,
GeneRatio = '',
BckRatio = ''
)
}
else{
miEnrich <-
goEnrichment(
gene = miNearGene,
GOtype = GOtype,
hg = hg,
pCut = pCut,
pAdjCut = pAdjCut,
pAdjust = pAdjust,
backG = backGenes,
backGType = backGType,
slim = slim,
min = min
)
if (length(miEnrich@Term)) {
miEnrich@ncGeneList <-
commonGene(
mrnaobject = miEnrich,
hg = hg,
downstream = downstream,
upstream = upstream,
inputGene = rbind(a, gene),
inGeneType = 'mirna'
)
}
objs <- ls(pos = ".GlobalEnv")
gloVar <- c("mart", "go", "genomee", "ucsc")
rm(list = objs[which(objs %in% gloVar)], pos = ".GlobalEnv")
rm(objs, pos = ".GlobalEnv")
rm(gloVar, pos = ".GlobalEnv")
return(miEnrich)
}
}
#' Pathway enrichments of the microRNA genes with mRNAs that fall in the given upstream/downstream regions of the microRNA genes
#'
#' @param gene Input microRNA gene. It supports both pre-miRNA and mature miRNA, however, when target prediction is performed(target= TRUE), miRNA genes should be mature.
#' @param hg Genome assembly of interest for the analysis. Possible assemblies are "mm10" for mouse, "dre10" for zebrafish, "rn6" for rat, "dm6" for fruit fly, "ce11" for worm, "sc3" for yeast, "hg19" and "hg38" for human
#' @param upstream Upstream distance from the transcription start position
#' @param downstream Downstream distance from the transcription end position
#' @param searchRegion Search space of the cis-region. Possible values are "all", "exon", "intron"
#' @param pCut Threshold value for the pvalue. Default value for pCut is 0.05
#' @param pAdjCut Cutoff value for the adjusted p-values using one of given method. Default value is 0.05.
#' @param pAdjust Methods of the adjusted p-values. Possible methods are "bonferroni", "holm", "BH"(default)
#' @param min Minimum number of genes that are required for enrichment. By default, it is set to 5
#' @param pathwayType Pathway database for enrichment. Possible values are 'reactome' for Reactome, 'kegg' for KEGG, 'wiki' for WikiPathways, 'other' for custom database
#' @param near Boolean value presents whether cis-neighbourhood should be considered in the analysis
#' @param target Boolean value shows whether miRNA target prediction should be performed
#' @param isTADSearch Boolean value that shows whether TAD analysis is performed. This value has to be TRUE for TAD analysis.
#' @param TAD TAD genomic regions for the species. Predefined TAD regions or any new TAD regions can be used for the analysis. TAD regions must be formated as GRanges object. Predefined TAD regions are 'tad_hg19', 'tad_hg38', 'tad_mm10', 'tad_dmel' for hg19, hg38, mm9 and dm6 assembly, respectively.
#' @param cellline Cell lines for TAD regions.
#' @param gmtName Custom pathway gmt file
#' @param isSymbol Boolean variable that hold the gene format of the gmt file. If it is set as TRUE, gene format of the gmt file should be symbol. Otherwise, gene format should be ENTREZ ID. By default, it is FALSE.
#' @param express Boolean variable whether co-expression analysis is performed. If this option is set to TRUE, co-expression analysis will be performed.
#' @param isCustomExp Boolean variable whether co-expression analysis with custom data will be performed. When this option is set, exp1 and exp2 parameters must be defined.
#' @param cancer Defines the name of the TCGA project code such as 'BRCA' for correlation analysis. Possible cancer types ACC, BLCA, BRCA, CESC, CHOL, COAD, COADREAD, DLBC, ESCA, GBMLGG, HNSC, KICH, KIPAN, KIRC, KIRP, LGG, LIHC, LUAD, LUSC, OV, PAAD, PCPG, PRAD, READ, SARC, SKCM, STAD, STES, TGCT, THCA, THYM, UCEC, UCS, UVM
#' @param exp1 Custom expression data matrix. Columns must be genes and rows must be patients. If gene names are provided as header, no need to redefine the headers(labels) of the expression data.
#' @param exp2 Custom expression data matrix. Columns must be genes and rows must be patients. If gene names are provided as header, no need to redefine the headers(labels) of the expression data.
#' @param label1 Gene names of the custom exp1 expression data. If it is not provided, column name of the exp1 data will be taken.
#' @param label2 Gene names of the custom exp2 expression data. If it is not provided, column name of the exp2 data will be taken.
#' @param corrMethod Correlation coeffient method that will be used for evaluation. Possible values are "pearson", "kendall", "spearman"
#' @param varCutoff Variance cutt off that genes have less variance than this value will be trimmed
#' @param pcut P-value cut off for the correlation values
#' @param alternate Holds the alternative hypothesis and "two.sided", "greater" or "less" are the possible values.
#' @param conf Confidence level for the returned confidence interval. It is only used for the Pearson correlation coefficient if there are at least 4 complete pairs of observations.
#' @param minAbsCor Cut-off value for the Pearson correlation coefficient of the miRNA-mRNA
#' @param databaseFile Path of miRcancer.db file
#' @param isUnionCorGene Boolean value that shows whether union of the output of the co-expression analysis and the other analysis should be considered
#' @param isGeneEnrich Boolean value whether gene enrichment should be performed
#'
#' @return MiRNA pathway enrichment object for the given input
#'
#' @examples
#' miPath <- mirnaPathwayEnricher(gene = brain_mirna,hg = 'hg19',near = TRUE,
#' pathwayType = 'wiki', pAdjust = "none")
#'
#'
#'
#' @export
mirnaPathwayEnricher <-
function(gene,
hg,
upstream = 10000,
downstream = 10000,
searchRegion = 'all',
pCut = 0.05,
pAdjCut = 0.05,
pAdjust = "BH",
min = 5,
pathwayType = 'kegg',
near = FALSE,
target = FALSE,
isTADSearch = FALSE,
TAD = tad_hg19,
cellline = 'all',
gmtName = '',
isSymbol = 'FALSE',
express = FALSE,
isCustomExp = FALSE,
cancer,
exp1,
exp2,
label1 = '',
label2 = '',
corrMethod = "pearson",
varCutoff = 0.0025,
minAbsCor = 0.3,
pcut = 0.05,
alternate = 'greater',
isUnionCorGene = FALSE,
conf = 0.95,
databaseFile,isGeneEnrich = FALSE) {
if (missing(gene)) {
message("Gene is missing.")
}
if (missing(hg)) {
message(
"Assembly version is missing. Possible assemblies are 'mm10' for mouse, 'dre10' for zebrafish, 'rn6' for rat, 'dm6' for fruit fly, 'ce11' for worm, 'hg19' and 'hg38' for human."
)
}
assembly(hg)
gene <- as.data.frame(gene)
colnames(gene) <- c("genes")
if (target) {
targetResult <- predictmiTargets(gene = gene$genes,
type = "mirna",
hg = hg)
if (is.null(targetResult))
{
message("There is no target!")
return(NULL)
}
targetResult <- unique(targetResult)
geneTargetLoc <-
convertGeneID(genetype = "Ensembl_trans",
genelist = targetResult,
hg = hg)
}
a <-
as.data.frame(gsub(paste(c("-3p", "-5p"), collapse = "|"), "", gene$genes))
colnames(a) <- 'genes'
a <- unique(rbind(a, gene$genes))
geneLoc <-
convertGeneID(genetype = "mirna",
genelist = a$genes,
hg = hg)
if (near) {
if (searchRegion == 'all')
miNearGene_temp <-
getUCSC(geneLoc, upstream, downstream, hg)
if (searchRegion == 'exon')
miNearGene_temp <-
getNearToExon(geneLoc, upstream, downstream, hg)
if (searchRegion == 'intron')
miNearGene_temp <-
getNearToIntron(geneLoc, upstream, downstream, hg)
geneLoc_temp <-
convertGeneID(genetype = "NCBI",
genelist = miNearGene_temp,
hg = hg)
if (target) {
geneL <- findOverlapPairs(geneLoc_temp, geneTargetLoc)
geneLo <- pintersect(geneL, ignore.strand = TRUE)
miNearGene <-
getUCSC(
bedfile = geneLo,
upstream = 0,
downstream = 0,
hg = hg
)
}
else{
miNearGene <- miNearGene_temp
}
}
else{
if (target) {
miNearGene <- targetResult[, 2]
}
else{
miNearGene <- gene
}
}
if (isTADSearch) {
tadGene <-
getTADOverlap(
bedfile = geneLoc,
TAD = TAD,
cellline = cellline,
hg = hg,
near = near,
upstream = upstream,
downstream = downstream
)
if (near | target)
miNearGene <-
as.data.frame(intersect(unlist(miNearGene), unlist(tadGene)))
else
miNearGene <- tadGene
}
if (express) {
if (!isCustomExp) {
nearG <- corrbased(
mirnagene = a$genes,
cancer = cancer,
minAbsCor = minAbsCor,
databaseFile = databaseFile
)
d <- nearG[which(a$genes %in% nearG$mirna_base),]
if (!isUnionCorGene)
miNearGene <- intersect(unlist(miNearGene), d$feature)
else
miNearGene <- union(unlist(miNearGene), d$feature)
}
else{
nearG <- calculateCorr(
exp1 = exp1,
exp2 = exp2,
label1 = label1 ,
label2 = label2,
corrMethod = corrMethod,
varCutoff = varCutoff,
corCutoff = minAbsCor,
pcut = pcut,
alternate = alternate,
conf = conf
)
tt <-
sapply(1:dim(a)[1], function(x)
unlist(which(
nearG$firstExp %in% tolower(a$genes[x])
)))
nearG <- nearG[unlist(tt),]
if (!isUnionCorGene)
miNearGene <-
intersect(unlist(miNearGene), nearG$SecondExp)
else
miNearGene <- union(unlist(miNearGene), nearG$SecondExp)
}
}
if (length(miNearGene) == 0) {
message("No common gene is found")
new(
"NoRCE",
ID = '',
Term = '',
geneList = list(),
pvalue = 0,
pAdj = 0,
GeneRatio = '',
BckRatio = ''
)
}
else{
if (pathwayType == 'kegg') {
miEnrich <-
KeggEnrichment(
genes = miNearGene,
hg = hg,
pCut = pCut,
pAdjCut = pAdjCut,
pAdjust = pAdjust,
min = min
)
}
else if (pathwayType == 'reactome') {
miEnrich <-
reactomeEnrichment(
genes = miNearGene,
hg = hg,
pCut = pCut,
pAdjCut = pAdjCut,
pAdjust = pAdjust,
min = min
)
}
else if (pathwayType == 'wiki') {
miEnrich <- WikiEnrichment(
hg = hg,
genes = miNearGene,
pCut = pCut,
pAdjCut = pAdjCut,
pAdjust = pAdjust,
min = min
)
}
else{
miEnrich <- pathwayEnrichment(
genes = miNearGene,
gmtFile = gmtName,
hg = hg,
pCut = pCut,
pAdjCut = pAdjCut,
pAdjust = pAdjust,
isSymbol = isSymbol,
min = min, isGeneEnrich = isGeneEnrich
)
}
if (length(miEnrich@Term) > 0)
{
miEnrich@ncGeneList <-
commonGene(
mrnaobject = miEnrich,
hg = hg,
downstream = downstream,
upstream = upstream,
inputGene = rbind(a, gene),
inGeneType = 'mirna'
)
}
objs <- ls(pos = ".GlobalEnv")
gloVar <- c("mart", "go", "genomee", "ucsc")
rm(list = objs[which(objs %in% gloVar)], pos = ".GlobalEnv")
rm(objs, pos = ".GlobalEnv")
rm(gloVar, pos = ".GlobalEnv")
return(miEnrich)
}
}
#' GO enrichments of the microRNA regions with mRNAs that fall in the given upstream/downstream regions of the microRNA genes
#'
#' @param region MiRNA region in a bed format
#' @param hg Genome assembly of interest for the analysis. Possible assemblies are "mm10" for mouse, "dre10" for zebrafish, "rn6" for rat, "dm6" for fruit fly, "ce11" for worm, "sc3" for yeast, "hg19" and "hg38" for human
#' @param upstream Upstream distance from the transcription start position
#' @param downstream Downstream distance from the transcription end position
#' @param searchRegion Search space of the cis-region. Possible values are "all", "exon", "intron"
#' @param GOtype Hierarchical category of the GO ontology. Possible values are "BP", "CC", "MF"
#' @param pCut Threshold value for the pvalue. Default value for pCut is 0.05
#' @param pAdjCut Cutoff value for the adjusted p-values using one of given method. Default value is 0.05.
#' @param pAdjust Methods of the adjusted p-values. Possible methods are "bonferroni", "holm", "BH"(default)
#' @param min Minimum number of genes that are required for enrichment. By default, it is set to 5.
#' @param slim Boolean value stating whether set of annotation should be performed for high level GO terms (GO slim)
#' @param backG The set of genes that tested against to the input
#' @param near Boolean value presents whether cis-neighbourhood should be considered in the analysis
#' @param target Boolean value shows whether miRNA target prediction should be performed
#' @param isTADSearch Boolean value that shows whether TAD analysis is performed. This value has to be TRUE for TAD analysis.
#' @param TAD TAD genomic regions for the species. Predefined TAD regions or any new TAD regions can be used for the analysis. TAD regions must be formated as GRanges object. Predefined TAD regions are 'tad_hg19', 'tad_hg38', 'tad_mm10', 'tad_dmel' for hg19, hg38, mm9 and dm6 assembly, respectively.
#' @param cellline Cell lines for TAD regions.
#' @param backGType Type of the background gene. If miRNA gene set is used for background gene, backGType should be set to the 'mirna'
#' @param express Boolean variable whether co-expression analysis is performed. If this option is set to TRUE, co-expression analysis will be performed.
#' @param isCustomExp Boolean variable whether co-expression analysis with custom data will be performed. When this option is set, exp1 and exp2 parameters must be defined.
#' @param cancer Defines the name of the TCGA project code such as 'BRCA' for correlation analysis. Possible cancer types ACC, BLCA, BRCA, CESC, CHOL, COAD, COADREAD, DLBC, ESCA, GBMLGG, HNSC, KICH, KIPAN, KIRC, KIRP, LGG, LIHC, LUAD, LUSC, OV, PAAD, PCPG, PRAD, READ, SARC, SKCM, STAD, STES, TGCT, THCA, THYM, UCEC, UCS, UVM
#' @param exp1 Custom expression data matrix. Columns must be genes and rows must be patients. If gene names are provided as header, no need to redefine the headers(labels) of the expression data.
#' @param exp2 Custom expression data matrix. Columns must be genes and rows must be patients. If gene names are provided as header, no need to redefine the headers(labels) of the expression data.
#' @param label1 Gene names of the custom exp1 expression data. If it is not provided, column name of the exp1 data will be taken.
#' @param label2 Gene names of the custom exp2 expression data. If it is not provided, column name of the exp2 data will be taken.
#' @param corrMethod Correlation coeffient method that will be used for evaluation. Possible values are "pearson", "kendall", "spearman"
#' @param varCutoff Variance cutt off that genes have less variance than this value will be trimmed
#' @param pcut P-value cut off for the correlation values
#' @param alternate Holds the alternative hypothesis and "two.sided", "greater" or "less" are the possible values.
#' @param conf Confidence level for the returned confidence interval. It is only used for the Pearson correlation coefficient if there are at least 4 complete pairs of observations.
#' @param minAbsCor Cut-off value for the Pearson correlation coefficient of the miRNA-mRNA
#' @param databaseFile Path of miRcancer.db file
#' @param isUnionCorGene Boolean value that shows whether union of the output of the co-expression analysis and the other analysis should be considered
#' @return MiRNA GO enrichment object for the given input
#'
#'@examples
#' regionNC <- readbed(dm_file = ncRegion,isText = FALSE)
#' a<- mirnaRegionGOEnricher(region = regionNC, hg = 'hg19', near = TRUE)
#'
#'
#' @export
mirnaRegionGOEnricher <-
function(region,
hg,
upstream = 10000,
downstream = 10000,
searchRegion = 'all',
GOtype = "BP",
pCut = 0.05,
pAdjCut = 0.05,
pAdjust = "BH",
near = FALSE,
target = FALSE,
min = 5,
slim = FALSE,
backG = '',
backGType = 'pc-genes',
isTADSearch = FALSE,
TAD = tad_hg19,
cellline = 'all',
express = FALSE,
isCustomExp = FALSE,
cancer,
exp1,
exp2,
label1 = '',
label2 = '',
corrMethod = "pearson",
varCutoff = 0.0025,
minAbsCor = 0.3,
pcut = 0.05,
alternate = 'greater',
isUnionCorGene = FALSE,
conf = 0.95,
databaseFile) {
if (missing(region)) {
message("Region of interest is missing.")
}
if (missing(hg)) {
message(
"Assembly version is missing. Possible assemblies are 'mm10' for mouse, 'dre10' for zebrafish, 'rn6' for rat, 'dm6' for fruit fly, 'ce11' for worm, 'hg19' and 'hg38' for human."
)
}
assembly(hg)
if (target) {
genes <-
getUCSC(
bedfile = region,
downstream = 0,
upstream = 0,
hg = hg
)
targetResult <- predictmiTargets(gene = genes,
type = "NCBI",
hg = hg)
if (is.null(targetResult))
{
message("There is no target!")
return(NULL)
}
targetResult <- unique(targetResult)
geneTargetLoc <-
convertGeneID(genetype = "Ensembl_trans",
genelist = targetResult,
hg = hg)
}
if (near) {
if (searchRegion == 'all')
miNearGene_temp <- getUCSC(region, upstream, downstream, hg)
if (searchRegion == 'exon')
miNearGene_temp <-
getNearToExon(region, upstream, downstream, hg)
if (searchRegion == 'intron')
miNearGene_temp <-
getNearToIntron(region, upstream, downstream, hg)
geneLoc_temp <-
convertGeneID(genetype = "NCBI",
genelist = miNearGene_temp,
hg = hg)
if (target) {
geneL <- findOverlapPairs(geneLoc_temp, geneTargetLoc)
geneLo <- pintersect(geneL, ignore.strand = TRUE)
miNearGene <-
getUCSC(
bedfile = geneLo,
upstream = 0,
downstream = 0,
hg = hg
)
}
else{
miNearGene <- miNearGene_temp
}
}
else{
if (target) {
miNearGene <- targetResult[, 2]
}
else{
miNearGene <-
getUCSC(
bedfile = region,
upstream = 0,
downstream = 0,
hg = hg
)
}
}
if (isTADSearch) {
tadGene <-
getTADOverlap(
bedfile = region,
TAD = TAD,
cellline = cellline,
hg = hg,
near = near,
upstream = upstream,
downstream = downstream
)
if (near | target)
miNearGene <-
as.data.frame(intersect(unlist(miNearGene), unlist(tadGene)))
else
miNearGene <- tadGene
}
if (express) {
if (!isCustomExp) {
nearG <-
corrbasedMrna(
mRNAgene = miNearGene,
cancer = cancer,
minAbsCor = minAbsCor,
databaseFile = databaseFile
)
if (!isUnionCorGene)
miNearGene <- intersect(unlist(miNearGene), nearG$feature)
else
miNearGene <- union(unlist(miNearGene), nearG$feature)
}
else{
nearG <- calculateCorr(
exp1 = exp1,
exp2 = exp2,
label1 = label1 ,
label2 = label2,
corrMethod = corrMethod,
varCutoff = varCutoff,
corCutoff = minAbsCor,
pcut = pcut,
alternate = alternate,
conf = conf
)
if (!isUnionCorGene)
miNearGene <-
intersect(unlist(miNearGene), nearG$SecondExp)
else
miNearGene <- union(unlist(miNearGene), nearG$SecondExp)
}
}
if (length(miNearGene) == 0) {
message("No common gene is found")
new(
"NoRCE",
ID = '',
Term = '',
geneList = list(),
pvalue = 0,
pAdj = 0,
GeneRatio = '',
BckRatio = ''
)
}
else{
miEnrich <-
goEnrichment(
gene = miNearGene,
GOtype = GOtype,
hg = hg,
pCut = pCut,
pAdjCut = pAdjCut,
pAdjust = pAdjust,
slim = slim,
backG = backG,
backGType = backGType,
min = min
)
objs <- ls(pos = ".GlobalEnv")
gloVar <- c("mart", "go", "genomee", "ucsc")
rm(list = objs[which(objs %in% gloVar)], pos = ".GlobalEnv")
rm(objs, pos = ".GlobalEnv")
rm(gloVar, pos = ".GlobalEnv")
return(miEnrich)
}
}
#' Pathway enrichments of the microRNA regions with mRNAs that fall in the given upstream/downstream regions of the microRNA genes
#'
#' @param region MiRNA region in a bed format
#' @param hg Genome assembly of interest for the analysis. Possible assemblies are "mm10" for mouse, "dre10" for zebrafish, "rn6" for rat, "dm6" for fruit fly, "ce11" for worm, "sc3" for yeast, "hg19" and "hg38" for human
#' @param upstream Upstream distance from the transcription start position
#' @param downstream Downstream distance from the transcription end position
#' @param searchRegion Search space of the cis-region. Possible values are "all", "exon", "intron"
#' @param pCut Threshold value for the pvalue. Default value is 0.05
#' @param pAdjCut Cutoff value for the adjusted p-values using one of given method. Default value is 0.05.
#' @param pAdjust Methods of the adjusted p-values. Possible methods are "bonferroni", "holm", "BH"(default)
#' @param min Minimum number of genes that are required for enrichment. By default, it is set to 5.
#' @param pathwayType Pathway database for enrichment. Possible values are 'reactome' for Reactome, 'kegg' for KEGG, 'wiki' for WikiPathways, 'other' for custom database
#' @param near Boolean value presents whether cis-neighbourhood should be considered in the analysis
#' @param target Boolean value shows whether miRNA target prediction should be performed
#' @param isTADSearch Boolean value that shows whether TAD analysis is performed. This value has to be TRUE for TAD analysis.
#' @param TAD TAD genomic regions for the species. Predefined TAD regions or any new TAD regions can be used for the analysis. TAD regions must be formated as GRanges object. Predefined TAD regions are 'tad_hg19', 'tad_hg38', 'tad_mm10', 'tad_dmel' for hg19, hg38, mm9 and dm6 assembly, respectively.
#' @param cellline Cell lines for TAD regions
#' @param gmtName Custom pathway gmt file
#' @param isSymbol Boolean variable that hold the gene format of the gmt file. If it is set as TRUE, gene format of the gmt file should be symbol. Otherwise, gene format should be ENTREZ ID. By default, it is FALSE.
#' @param express Boolean variable whether co-expression analysis is performed. If this option is set to TRUE, co-expression analysis will be performed.
#' @param isCustomExp Boolean variable whether co-expression analysis with custom data will be performed. When this option is set, exp1 and exp2 parameters must be defined.
#' @param cancer Defines the name of the TCGA project code such as 'BRCA' for correlation analysis. Possible cancer types ACC, BLCA, BRCA, CESC, CHOL, COAD, COADREAD, DLBC, ESCA, GBMLGG, HNSC, KICH, KIPAN, KIRC, KIRP, LGG, LIHC, LUAD, LUSC, OV, PAAD, PCPG, PRAD, READ, SARC, SKCM, STAD, STES, TGCT, THCA, THYM, UCEC, UCS, UVM
#' @param exp1 Custom expression data matrix. Columns must be genes and rows must be patients. If gene names are provided as header, no need to redefine the headers(labels) of the expression data.
#' @param exp2 Custom expression data matrix. Columns must be genes and rows must be patients. If gene names are provided as header, no need to redefine the headers(labels) of the expression data.
#' @param label1 Gene names of the custom exp1 expression data. If it is not provided, column name of the exp1 data will be taken.
#' @param label2 Gene names of the custom exp2 expression data. If it is not provided, column name of the exp2 data will be taken.
#' @param corrMethod Correlation coeffient method that will be used for evaluation. Possible values are "pearson", "kendall", "spearman"
#' @param varCutoff Variance cutt off that genes have less variance than this value will be trimmed
#' @param pcut P-value cut off for the correlation values
#' @param alternate Holds the alternative hypothesis and "two.sided", "greater" or "less" are the possible values.
#' @param conf Confidence level for the returned confidence interval. It is only used for the Pearson correlation coefficient if there are at least 4 complete pairs of observations.
#' @param minAbsCor Cut-off value for the Pearson correlation coefficient of the miRNA-mRNA
#' @param databaseFile Path of miRcancer.db file
#' @param isUnionCorGene Boolean value that shows whether union of the output of the co-expression analysis and the other analysis should be considered
#' @param isGeneEnrich Boolean value whether gene enrichment should be performed
#'
#' @return miRNA pathway enrichment object for the given input
#'
#' @examples
#'
#' regionNC <- readbed(dm_file = ncRegion,isText = FALSE)
#' a<- mirnaRegionGOEnricher(region = regionNC, hg = 'hg19', near = TRUE)
#'
#'
#' @export
mirnaRegionPathwayEnricher <-
function(region,
hg,
upstream = 10000,
downstream = 10000,
searchRegion = 'all',
pCut = 0.05,
pAdjCut = 0.05,
pAdjust = "BH",
min = 5,
pathwayType = 'kegg',
near = FALSE,
target = FALSE,
isTADSearch = FALSE,
TAD = tad_hg19,
cellline = 'all',
gmtName = '',
isSymbol = FALSE,
express = FALSE,
isCustomExp = FALSE,
cancer,
exp1,
exp2,
label1 = '',
label2 = '',
corrMethod = "pearson",
varCutoff = 0.0025,
minAbsCor = 0.3,
pcut = 0.05,
alternate = 'greater',
isUnionCorGene = FALSE,
conf = 0.95,
databaseFile, isGeneEnrich = FALSE) {
if (missing(region)) {
message("Region of interest is missing.")
}
if (missing(hg)) {
message(
"Assembly version is missing. Possible assemblies are 'mm10' for mouse, 'dre10' for zebrafish, 'rn6' for rat, 'dm6' for fruit fly, 'ce11' for worm, 'hg19' and 'hg38' for human."
)
}
assembly(hg)
if (target) {
genes <-
getUCSC(
bedfile = region,
downstream = 0,
upstream = 0,
hg = hg
)
targetResult <- predictmiTargets(gene = genes,
type = "NCBI",
hg = hg)
if (is.null(targetResult))
{
message("There is no target!")
return(NULL)
}
targetResult <- unique(targetResult)
geneTargetLoc <-
convertGeneID(genetype = "Ensembl_trans",
genelist = targetResult,
hg = hg)
}
if (near) {
if (searchRegion == 'all')
miNearGene_temp <- getUCSC(region, upstream, downstream, hg)
if (searchRegion == 'exon')
miNearGene_temp <-
getNearToExon(region, upstream, downstream, hg)
if (searchRegion == 'intron')
miNearGene_temp <-
getNearToIntron(region, upstream, downstream, hg)
geneLoc_temp <-
convertGeneID(genetype = "NCBI",
genelist = miNearGene_temp,
hg = hg)
if (target) {
geneL <- findOverlapPairs(geneLoc_temp, geneTargetLoc)
geneLo <- pintersect(geneL, ignore.strand = TRUE)
miNearGene <-
getUCSC(
bedfile = geneLo,
upstream = 0,
downstream = 0,
hg = hg
)
}
else{
miNearGene <- miNearGene_temp
}
}
else{
if (target) {
miNearGene <- targetResult[, 2]
}
else{
miNearGene <-
getUCSC(
bedfile = region,
upstream = 0,
downstream = 0,
hg = hg
)
}
}
if (isTADSearch) {
tadGene <-
getTADOverlap(
bedfile = region,
TAD = TAD,
cellline = cellline,
hg = hg,
near = near,
upstream = upstream,
downstream = downstream
)
if (near | target)
miNearGene <-
as.data.frame(intersect(unlist(miNearGene), unlist(tadGene)))
else
miNearGene <- tadGene
}
if (express) {
if (!isCustomExp) {
nearG <-
corrbasedMrna(
mRNAgene = miNearGene,
cancer = cancer,
minAbsCor = minAbsCor,
databaseFile = databaseFile
)
if (!isUnionCorGene)
miNearGene <- intersect(unlist(miNearGene), nearG$feature)
else
miNearGene <- union(unlist(miNearGene), nearG$feature)
}
else{
nearG <- calculateCorr(
exp1 = exp1,
exp2 = exp2,
label1 = label1 ,
label2 = label2,
corrMethod = corrMethod,
varCutoff = varCutoff,
corCutoff = minAbsCor,
pcut = pcut,
alternate = alternate,
conf = conf
)
if (!isUnionCorGene)
miNearGene <-
intersect(unlist(miNearGene), nearG$SecondExp)
else
miNearGene <- union(unlist(miNearGene), nearG$SecondExp)
}
}
if (length(miNearGene) == 0) {
message("No common gene is found")
new(
"NoRCE",
ID = '',
Term = '',
geneList = list(),
pvalue = 0,
pAdj = 0,
GeneRatio = '',
BckRatio = ''
)
}
else{
if (pathwayType == 'kegg') {
miEnrich <-
KeggEnrichment(
genes = miNearGene,
hg = hg,
pCut = pCut,
pAdjCut = pAdjCut,
pAdjust = pAdjust,
min = min
)
}
else if (pathwayType == 'reactome') {
miEnrich <-
reactomeEnrichment(
genes = miNearGene,
hg = hg,
pCut = pCut,
pAdjCut = pAdjCut,
pAdjust = pAdjust,
min = min
)
}
else if (pathwayType == 'wiki') {
miEnrich <- WikiEnrichment(
hg = hg,
genes = miNearGene,
pCut = pCut,
pAdjCut = pAdjCut,
pAdjust = pAdjust,
min = min
)
}
else{
miEnrich <- pathwayEnrichment(
genes = miNearGene,
gmtFile = gmtName,
hg = hg,
pCut = pCut,
pAdjCut = pAdjCut,
pAdjust = pAdjust,
isSymbol = isSymbol,
min = min, isGeneEnrich = isGeneEnrich
)
}
objs <- ls(pos = ".GlobalEnv")
gloVar <- c("mart", "go", "genomee", "ucsc")
rm(list = objs[which(objs %in% gloVar)], pos = ".GlobalEnv")
rm(objs, pos = ".GlobalEnv")
rm(gloVar, pos = ".GlobalEnv")
return(miEnrich)
}
}
#' Predict the miRNA targets for the miRNA or mRNA genes, which is specified with type parameter
#'
#' @param gene Data frame of miRNA or mRNA gene. Formats should be NCBI gene name, ENSEMBL gene or transcript id, and mirna
#' @param type Format of the gene, it should be "NCBI" for NCBI gene name, "Ensembl_gene" for ENSEMBL gene id, "Ensembl_trans" for Ensembl transcript id and "mirna" for miRNA gene
#' @param hg Analyzed genome assembly. Possible assemblies are "mm10" for mouse, "dre10" for zebrafish, "rn6" for rat, "dm6" for fruit fly, "ce11" for worm, "hg19" and "hg38" for human
#'
#' @return miRNA:mRNA target sets of the given genes
#'
#' @examples
#'
#' a<- predictmiTargets(gene = brain_mirna, hg = 'hg19', type = "mirna")
#'
#'
#' @export
predictmiTargets <- function(gene, type, hg)
{
if (missing(gene)) {
message(
"Genes are missing. Formats should be NCBI gene name, ENSEMBL gene or transcript id, and mirna"
)
}
if (missing(type)) {
message("Format of the gene is missing.")
}
if (missing(hg)) {
message(
"Genome assembly version is missing. Possible assemblies are 'mm10' for mouse, 'dre10' for zebrafish, 'rn6' for rat, 'dm6' for fruit fly, 'ce11' for worm, 'hg19' and 'hg38' for human."
)
}
if (!exists("targets")) {
if (hg == 'mm10') {
targets <- NoRCE::targets_mouse
}
else if (hg == 'dre10') {
targets <- NoRCE::targets_zebra
}
else if (hg == 'ce11') {
targets <- NoRCE::targets_worm
}
else if (hg == 'rn6') {
targets <- NoRCE::targets_rat
}
else if (hg == 'dm6') {
targets <- NoRCE::targets_fly
}
else{
targets <- NoRCE::targets_human
}
}
gene <- as.data.frame(gene)
colnames(gene) <- c("genes")
if (type == "NCBI") {
where <- targets[which(tolower(targets$X2) %in% gene$genes), ]
}
else if (type == "mirna") {
where <-
targets[which(tolower(targets$X4) %in% tolower(gene$genes)), ]
}
else if (type == "Ensembl_gene") {
where <- targets[which(tolower(targets$X1) %in% gene$genes), ]
}
else if (type == "Ensembl_trans") {
where <- targets[which(tolower(targets$X3) %in% gene$genes), ]
}
if (dim(where)[1] == 0) {
return(NULL)
}
else{
colnames(where) <- c('genesEns', 'genesHugo', 'geneTrans', 'mirna')
tmp1 <-
data.frame(trans = unlist(apply((where[, 3]), 2, strsplit, '[.]'))[2 *
(1:dim(where)[1]) - 1]) #substring(where$geneTrans, 1, 15)
tmp2 <-
data.frame(gene = unlist(apply((where[, 1]), 2, strsplit, '[.]'))[2 * (1:dim(where)[1]) -
1])
dat <-
cbind.data.frame(tmp1, where$genesHugo, tmp2, where$mirna)
return(dat)
}
}
|
a042c6fb276596dd45d3868d7a5167032fff8f48
|
2e00cb2b8561865fee56455ec46037287bc62495
|
/test.R
|
6dcd819a828f12497701f83698201b04b55aaac6
|
[] |
no_license
|
shivamg13/DynamicWindow
|
e2ddf5c771b89c6edc08501607d5cbdd97d60653
|
a88d640014a62c6e55c8fc1930d08a1355a1705d
|
refs/heads/master
| 2021-01-21T13:18:26.452205
| 2016-04-27T14:18:22
| 2016-04-27T14:18:22
| 54,929,839
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,208
|
r
|
test.R
|
library(stats)
getDemand <- function(N,p,b,i){
if(b==0)
print("Error! b cant't be 0.")
return (N*(1-p/b)+RandUni[i])
}
getRegret <- function(T,N,P,b,PP){
#PP: Price Pulled
RegVec=rep(0,T)
for(i in 1:T){
if(i!=1)
RegVec[i]=RegVec[i-1]+ max(P*N*(1-P/b[i]))-(PP[i]*N*(1-PP[i]/b[i]))
else
RegVec[i]=max(P*N*(1-P/b[i]))-(PP[i]*N*(1-PP[i]/b[i]))
}
return (RegVec)
}
getQuality <- function(cq,DemObs,PP,NF=0.2,w=0.5){
#cq: Cumulative quality
#DemObs: Demand Observed in the window
#PP: Vector of price pulled
#NF: Normalising factor (between 0 and 1)
mid=-1
for(i in (1:(length(PP)-1))){
if(PP[i]==PP[length(PP)])
{
mid=i
break
}
}
if(mid==-1)
{
return (cq)
}
d1=min(DemObs[length(DemObs)],DemObs[mid])
d2=max(DemObs[length(DemObs)],DemObs[mid])
iq = 2*exp(-(((d2-d1)/(NF*d1))^2))-1
cq =w*cq+(1-w)*iq
#print(iq)
return (cq)
}
getMeanQuality <- function(cq,DemObs,PP,NF=0.2,w=0.5,tempco,lim=10){
#cq: Cumulative quality
#DemObs: Demand Observed in the window
#PP: Vector of price pulled
#NF: Normalising factor (between 0 and 1)
d1 = 0
co1 = 0
for(i in (ceiling(length(PP)/2)):1){
if(PP[i]==PP[length(PP)])
{
d1=d1+DemObs[i]
co1=co1+1
}
}
d2 = 0
co2 = 0
for(i in (ceiling(length(PP)/2)):length(PP)){
if(PP[i]==PP[length(PP)])
{
d2=d2+DemObs[i]
co2=co2+1
}
}
if(co1==0 || co2==0)
return (cq)
dd1=d1/co1
dd2=d2/co2
d1=min(dd1,dd2)
d2=max(dd1,dd2)
iq = (2*exp(-(((d2-d1)/(0.1*NF*d1))^2))-1)*min(1,min(co1,co2)/lim)
cq =w*cq+(1-w)*iq
print(c(tempco,(d2-d1),co1,co2,iq))
return (cq)
}
sqrtMSE <- function (X,Y,fit)
{
V2 = X
return (sqrt(sum((predict(fit,newdata=as.data.frame(V2))-Y)^2)/(length(X)-2)))
}
findConfInt <- function (X,Y,fit,curX,alpha)
{
N = length(X)
if(N <= 2)
return (Inf)
s = sqrtMSE(X,Y,fit)
meanX = mean(X)
sy= s*sqrt((1/N)+(((curX-meanX)^2)/(sum((X-meanX)^2))))
#print (c(curX,sy,s,sqrt((1/N)+(((curX-meanX)^2)/(sum((X-meanX)^2)))),((curX-meanX)^2)/(sum((X-meanX)^2)),(curX-meanX)^2,curX,meanX))
return (qt(1-alpha/2,df=N-2)*sy)
}
T = 130 #Horizon
N = 800 #Market Size
StdDev = 40 #Standard Deviation
Tau = 20 #Sliding window length
P = c(2.0,3.0,4.0)
b = c(rep(5.5,40),rep(4.5,50),rep(9.0,40))
#b=c(rep(5.5,36),seq(5.4,4.6,-0.1),rep(4.5,40),seq(5,9,0.5),rep(9.0,36))
set.seed(1)
#RandUni = runif(T,-StdDev,StdDev) #Random error vector
RandUni = rnorm(T,0,StdDev) #Random error vector
ArmPulled = rep(0,T) #Arm pulled
PPulled = rep(0,T) #P[ArmPulled]
DemandObseved = rep(0,T) #Observed Demand
CumRew = rep(0,T) #Cumulative Reward
IQ = rep(0,T) #Vector of instantaneous quality
CQ = rep(0,T) #Vector of cumulative quality
#CQmean = rep(0,T)
##Greedy
for (i in 1:T){
curP=0
curT=min(Tau,i-1)
if(i <=2){
curP=i
} else if (sd(ArmPulled[(i-curT):(i-1)])==0) {
if(ArmPulled[i-1]==1){
curP=2} else {curP=1}
} else{
dfr = as.data.frame(cbind(DemandObseved[(i-curT):(i-1)] , (PPulled[(i-curT):(i-1)])))
fit = lm (V1 ~ V2 , dfr)
V2=P
predDem = predict(fit,as.data.frame(V2))
#print(predDem)
predRev = predDem*P
curP = which.max(predRev)
}
ArmPulled[i] = curP
PPulled[i] = P[curP]
DemandObseved[i] = getDemand(N,P[curP],b[i],i)
CumRew[i] = CumRew[max(i-1,1)] + PPulled[i]*DemandObseved[i]
if(i>1){
#dfr = as.data.frame(cbind(DemandObseved[(i-curT):(i-1)] , (PPulled[(i-curT):(i-1)])))
#NormFact=2*sd(DemandObseved[(i-curT):(i)])/mean(DemandObseved[(i-curT):(i)])
#if(length(which(ArmPulled[i]==ArmPulled[(i-curT):(i)]))>1){
#NormFact=2*sd(DemandObseved[(i-curT):(i)][ArmPulled[i]==ArmPulled[(i-curT):(i)]])/mean(DemandObseved[(i-curT):(i)][ArmPulled[i]==ArmPulled[(i-curT):(i)]])
NormFact=0.25
#print(c(NormFact,mean(DemandObseved[(i-curT):(i)][ArmPulled[i]==ArmPulled[(i-curT):(i)]])))
IQ[i]=getQuality(CQ[i-1],DemandObseved[(i-curT):(i)],PPulled[(i-curT):(i)],NormFact,w=0.5)
#CQmean[i]=getMeanQuality(CQmean[i-1],DemandObseved[(i-curT):(i)],PPulled[(i-curT):(i)],NormFact,w=0.5)
#}
}
else{
IQ[i]=1
#CQmean[i]=1
}
}
RegVec = getRegret(T,N,P,b,PPulled)
plot(c(1:130),RegVec,type="l",col="red",xlab="t",ylab="Regret")
##Greedy
##Greedy with window cutting
Tau=0
ArmPulled = rep(0,T) #Arm pulled
PPulled = rep(0,T) #P[ArmPulled]
DemandObseved = rep(0,T) #Observed Demand
CumRew = rep(0,T) #Cumulative Reward
TauRec = rep(0,T)
IQ = rep(0,T) #Vector of instantaneous quality
CQ = rep(0,T) #Vector of cumulative quality
for (i in 1:T){
curP=0
curT=Tau
TauRec[i]=Tau
if(Tau < 2){
if(Tau==0){
curP=Tau+1}
else{
curP=((ArmPulled[i-1]+1)%%(length(P)))+1}
} else if (sd(ArmPulled[(i-curT):(i-1)])==0) {
if(ArmPulled[i-1]==1){
curP=2} else {curP=1}
} else{
dfr = as.data.frame(cbind(DemandObseved[(i-curT):(i-1)] , (PPulled[(i-curT):(i-1)])))
fit = lm (V1 ~ V2 , data=dfr)
V2=P
predDem = predict(fit,newdata=as.data.frame(V2))
#print(predDem)
predRev = predDem*P
curP = which.max(predRev)
for (coi in 1:length(P))
{
print(c(Tau, P[coi]*findConfInt((PPulled[(i-curT):(i-1)]),DemandObseved[(i-curT):(i-1)] ,fit,P[coi],0.05), predRev[coi], sqrtMSE(PPulled[(i-curT):(i-1)],DemandObseved[(i-curT):(i-1)] ,fit)))
}
}
ArmPulled[i] = curP
PPulled[i] = P[curP]
DemandObseved[i] = getDemand(N,P[curP],b[i],i)
CumRew[i] = CumRew[max(i-1,1)] + PPulled[i]*DemandObseved[i]
if(i>1){
#NormFact=40/400
NormFact=2*sd(DemandObseved[(i-curT):(i)])/mean(DemandObseved[(i-curT):(i)])
CQ[i]=getQuality(CQ[i-1],DemandObseved[(i-curT):(i)],PPulled[(i-curT):(i)],NormFact,w=0.5)
if(CQ[i]<0)
{
Tau=ceiling(Tau/2)
#CQ[i]=0
}
#print (Tau)
}
else{
CQ[i]=1
#CQmean[i]=1
}
Tau=Tau+1
}
RegVec = getRegret(T,N,P,b,PPulled)
lines(c(1:130),RegVec,type="l",col="green")
|
003cb59b13ddc42f4a8d74c1a7a3c42f252f82de
|
c5b0584a453517869b6c03166bb7d3b2729866fc
|
/man/dataextr.time.Rd
|
273da117799a986b21ab8ed35b0ac8ec8ecc5943
|
[
"MIT"
] |
permissive
|
nealhaddaway/predicter
|
9fed1ff27e0b4636df7bdf5373ef95dba144727a
|
14b168f28f2378e4dd3e4a913f4d31b742460bf3
|
refs/heads/master
| 2022-09-26T08:52:22.056061
| 2022-07-29T11:56:19
| 2022-07-29T11:56:19
| 192,175,432
| 11
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,085
|
rd
|
dataextr.time.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataextr.time.R
\name{dataextr.time}
\alias{dataextr.time}
\title{Time needed to extract full data from all included records}
\usage{
dataextr.time(ca.number = 76.12213, dextraction.day = 6.9,
dextraction.checked = 0)
}
\description{
This function calculates the time needed to extract full data
(i.e. study qualitative or quantitative findings) from all
relevant records in a systematic review, based on the inputs of
the number of records passing the critical appraisal stage
('ca.number', see 'ca.number' function), the
number of studies from which full data can be extracted per day
('dextraction.day'), and the percentage of all studies that are
double checked for consistency of full data extraction
('dextraction.checked'). Where full dual screening of all
records is used, this will equal a percentage of 100 abstracts
being checked. Default values are provided based on the
empirical study of environmental systematic reviews by
Haddaway and Westgate (2018) https://doi.org/10.1111/cobi.13231.
}
|
c4646f9e413c60f55f60af54a21dd9678de2bbd6
|
f995416a8fad3e9089e66bad8c8bcea30209fbaa
|
/tokenize.R
|
a7314c1c8de21a50142fb5965b644d9b35bc193f
|
[
"CC0-1.0"
] |
permissive
|
bschousek/dss_capstone
|
bb8f7e4728cc0a79b3118825c2cc361e997e1ef4
|
27d52f1bc6066f7d9b2865c3c26df2e953b097f4
|
refs/heads/master
| 2016-09-06T20:01:02.867761
| 2015-07-18T03:29:39
| 2015-07-18T03:29:39
| 39,259,550
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 688
|
r
|
tokenize.R
|
pathname='~/coursera/data/split'
filename='en_US.train1.txt'
wd=getwd()
setwd(pathname)
if (!file.exists('wordlist.txt')) {
print('wow')
system2(file.path(wd,'proctext.sh'),paste(pathname,filename,sep=" "))
}
#fix proctest for spaces
wordlist=read.table('wordlist.txt',header=F,colClasses = c('integer','character'))
wordlist=wordlist[-which(wordlist$V2=='begintoken'),]
wordlist=wordlist[-which(wordlist$V2=='endtoken'),]
bigrams=read.table('bigrams.txt',header=F,colClasses = c('integer','character'))
trigrams=read.table('trigrams.txt',header=F,colClasses = c('integer','character'))
fourgrams=read.table('fourgrams.txt',header=F,colClasses = c('integer','character'))
setwd(wd)
|
7b0236eafe9d8db29ad6225af97a363288150fa2
|
b38d44a88c1406384a4d3cde3e07353578bd7ccc
|
/R/plotit.R
|
4afe5669d2f76b94a860c12bc77428a33d01481f
|
[] |
no_license
|
danno11/SMVCIR
|
e35dc5e644119c450c95620e31ef9d8a398a50ec
|
6d5d61129c3bcbec7de74cf1da92f2727ba7477e
|
refs/heads/master
| 2021-01-21T04:41:20.944765
| 2016-07-18T21:55:37
| 2016-07-18T21:55:37
| 55,713,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,065
|
r
|
plotit.R
|
plotit<-function (x, dimensions = c(1, 2), groups = 1:10, GL = FALSE,
ID = FALSE, build_svm = FALSE, kernel = NULL, svmModel, method = "SMVCIR")
{
if(class(x)=="smvcir"){
groupnum<-x$groups
groupcol<-ncol(x$direct)
compcases<-x$compcases
datmat<-x<-x$direct ####add x double assignment
} else{
groupcol<-NA
for (i in 1:ncol(x)){
if (class(x[,i])=="factor"){
groupcol[i]<-i
} else{
groupcol[i]<-NA
}
}
groupcol<-groupcol[!is.na(groupcol)]
if(length(groupcol)>1)
stop("Only numerical predictors allowed for smvcir, appears you have more than one variable of class=='factor'")
if(length(groupcol)==0)
stop("Missing a dependent group variable of class=='factor' in your coordinates")
groupnum<-length(unique(x[,groupcol]))
datmat<-x
#datmat[,groupcol]<-as.numeric(datmat[,groupcol])
compcases <- data.frame(complete.cases(datmat)) ###look ahead check line 72
compcases <- cbind(compcases, t(t(1:nrow(datmat))))
}
if (length(groups) >= groupnum) {
bx <- matrix("Rendering all Groups", 1, 1)
rownames(bx) <- ""
colnames(bx) <- ""
print(noquote(bx))
groups <- 1:(groupnum)
}
colors <- c("black", "red", "blue", "orange", "purple", "brown",
"green", "pink", "yellow", "aquamarine")
gsymbols <- c(0, 15, 2, 17, 3, 4, 8, 1, 16, 6)
gsymbolsExpl <- c("Empty Square", "Fill Square", "Empty Triangle",
"Fill Triangle", "Plus", "X", "Asterisk", "Empty Diamond",
"Fill Diamond", "Upside Down Triangle")
dispmat = c()
for (i in 1:length(groups)) {
if ((groups[i] > groupnum) || (groups[i] < 1)) {
print(noquote("User entered illegal group"))
return(NaN)
}
else {
dispmat <- rbind(dispmat, c(levels(x[,groupcol])[i], colors[groups[i]], #####change first item of c()
gsymbolsExpl[groups[i]]))
}
}
rownames(dispmat) <- rep("", nrow(dispmat))
colnames(dispmat) <- c("Group", "Color", "Symbol")
print(noquote(dispmat))
if (GL == TRUE) {
if (length(dimensions) < 3) {
dimensions = c(1, 2, 3)
}
else if (length(dimensions) != 3) {
print(noquote("Warning: More than 3 dimensions specified"))
dimensions = c(dimensions[1], dimensions[2], dimensions[3])
}
}
for (i in 1:length(dimensions)) {
if ((dimensions[i] < 1)) {
print(noquote("User entered illegal dimension"))
print(i)
return(NaN)
}
}
if (length(dimensions) == 1) {
plotit1D(wmat = datmat, dimension = dimensions, groups = groups)
}
else {
if (GL == TRUE) {
plotit3D(wmat = datmat, coords = dimensions, groups = groups,
ID = ID, compcases = compcases, build_svm = build_svm, groupcol = groupcol, kernel=kernel, svmModel = svmModel)
}
else {
plotit2D(wmat = datmat, dimensions = dimensions,
groups = groups, method = method, build_svm = build_svm, kernel = kernel, svmModel = svmModel)
}
}
}
|
eaf9044f4c80640f002dd831fa1ddfebd4baaf73
|
6b286ff42ae9135bcaeb1d8d537460f532ebab45
|
/man/dynBGBvariance-methods.Rd
|
a75a6a16efe87627448ab8eadc254fba274d4859
|
[] |
no_license
|
cran/move
|
6864db092eba41580170d4a09c5124758986b3ea
|
559c7a0ff40bd070373b82b43b880a862d4a33e2
|
refs/heads/master
| 2023-07-21T16:48:30.636533
| 2023-07-06T22:10:02
| 2023-07-06T22:10:02
| 17,697,651
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,031
|
rd
|
dynBGBvariance-methods.Rd
|
\name{dynBGBvariance}
\alias{dynBGBvariance}
\alias{dynBGBvariance,.MoveTrackSingle,numeric,numeric,numeric-method}
\docType{methods}
\title{Calculates the Bivariate Gaussian Bridge motion variance}
\description{A function to calculate the dynamic Bivariate Gaussian Bridge orthogonal and parallel variance for a movement track}
\usage{
dynBGBvariance(move, locErr, margin, windowSize,...)
}
\arguments{
\item{move}{a \code{\link[=Move-class]{move}} object. This object must be in a projection different to longitude/latitude, use \code{spTransform} to transform your coordinates.}
\item{locErr}{single numeric value or vector of the length of coordinates that describes the error of the location (sender/receiver) system in map units. Or a character string with the name of the column containing the location error can be provided.}
\item{margin}{The margin used for the behavioral change point analysis. This number has to be odd.}
\item{windowSize}{The size of the moving window along the track. Larger windows provide more stable/accurate estimates of the brownian motion variance but are less well able to capture more frequent changes in behavior. This number has to be odd.}
\item{...}{Additional arguments}
}
\details{
The function uses \code{windowApply} with the \code{BGBvarbreak} function in order to implement a dynamic calculation of the variance
}
\value{
a \link{dBGBvariance-class} object
}
\references{
Kranstauber, B., Safi, K., Bartumeus, F.. (2014), Bivariate Gaussian bridges: directional factorization of diffusion in Brownian bridge models. Movement Ecology 2:5. doi:10.1186/2051-3933-2-5.
}
\seealso{
\link{dynBGB}, \link{brownian.motion.variance.dyn}
}
\author{Bart Kranstauber & Anne Scharf}
\examples{
data(leroy)
leroy <- leroy[230:265,]
## change projection method to aeqd and center the coordinate system to the track
dataAeqd <- spTransform(leroy, CRSobj="+proj=aeqd +ellps=WGS84", center=TRUE)
dBGBvar <- dynBGBvariance(dataAeqd, locErr=9, windowSize=31, margin=15)
dBGBvar
}
|
dc32525a100314d9f2f7c9f63b55832cc36b3f0c
|
59cc9ac7cbd4d2e903d27841a0dd67c8588b12de
|
/man/extract_shots.Rd
|
22935f2c92782ec216972b7f5ba7b76dab80cb89
|
[] |
no_license
|
imadmali/NBAsportvu
|
aa792709e56ba193a5e92a319ff50fddc98a26a5
|
8575cc9bb52f552e5c3ecac4465c5b7455787ce3
|
refs/heads/master
| 2021-07-13T07:52:32.513932
| 2017-10-11T06:28:29
| 2017-10-11T06:28:29
| 106,061,914
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 822
|
rd
|
extract_shots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_shots.R
\name{extract_shots}
\alias{extract_shots}
\title{Extract Shots from SportVU Data}
\usage{
extract_shots(obj, pbp, margin = c(3, 3))
}
\arguments{
\item{obj}{A data frame returned from \code{\link[NBAsportvu]{sportvu_df}}.}
\item{pbp}{Play-by_play data frame (e.g. an object returned by
\code{\link[NBAapi]{get_pbp}}). One of the columns must contain the
\code{PCTIMESTRING} variable.}
\item{margin}{Margin around the game clock for each (shot) event. A vector of
length two with the first element indicating how far back to go and the
second element indicating how far forward to go (in seconds).}
}
\description{
Extract all field goals from the SportVU data frame within a certain time margin of the field goal occuring.
}
|
c549f548639acea49e2be54e31a4744d909ef708
|
f64974370c08f6bf7f5856cbd1c086b24257304f
|
/R Code/Creating corridor weight layer_AW.R
|
0a6acbd05d708cdc8d55b0131698433afe038504
|
[] |
no_license
|
nzwormgirl/RSP
|
73b7c746a36205d99e45b900e955de425ada3ac8
|
8adc616bad7937da689c7409d1212955d09d274d
|
refs/heads/master
| 2021-01-23T13:17:33.934321
| 2015-05-20T10:46:40
| 2015-05-20T10:46:40
| 8,638,618
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,237
|
r
|
Creating corridor weight layer_AW.R
|
rm(list=ls())
library(raster)
library(rgdal)
library(maptools)
computer <- "//654cw-20990/Amy"
LH.zonation.dir <- paste0(computer,"/GIS_data/Hunter/zonation/corridor/")
# import region mask for LHSA
LH.mask <- raster(paste0(computer,"/GIS_data/Hunter/mask files/LH.mask.tif"))
LH.mask[!is.na(LH.mask)] <- 0
# LHSA urban clipping mask
LH.urban.clipping.mask <- raster(paste0(computer,"/GIS_data/Hunter/mask files/LH.urban.clipping.mask.tif"))
LH.urban.clipping.mask[!is.na(LH.urban.clipping.mask)] <- 0
WoodyVeg100.mask <- raster(paste0(computer,"/GIS_data/Hunter/mask files/LH.woody.mask.100m.tif"))
WoodyVeg100.mask[!is.na(WoodyVeg100.mask)] <- 0
WoodyVeg25.mask <- raster(paste0(computer,"/GIS_data/Hunter/mask files/LH.woody.mask.25m.tif"))
WoodyVeg25.mask[!is.na(WoodyVeg25.mask)] <- 0
gap.resistance <- raster(paste0(LH.zonation.dir,"LH_GAP_CLoSR_OEH/luse_4fin.tif"))
# read in a shape and mask file file
# mask <- raster('C:/Users/hkujala/work/Mining offsets/LH.urban.clipping.mask.tif')
#
# # creating a corridor domain layer from LU resistance + gap distance
# # read files
# gap.resistance <- raster('C:/Users/hkujala/work/Corridor work/LH_GAP_CLoSR_OEH/luse_4fin.tif')
# woody.patches <- readShapePoly('C:/Users/hkujala/work/Corridor work/LH_GAP_CLoSR_OEH/Patch.shp')
# remove dispersal barriers
par(mfrow=c(1,1))
plot(gap.resistance)
gap.resistance[which(gap.resistance[]==65535)] <- 125
# remove existing woody patches
plot(gap.resistance)
# gap.res.project <- projectRaster(gap.resistance, WoodyVeg.mask, method='ngb')
gap.res.matrix <- mask(gap.resistance, WoodyVeg25.mask, inverse=T)
plot(gap.res.matrix)
# invert the resistance values to create an increasing weight
inv.gap.res.matrix <- 126-gap.res.matrix
plot(inv.gap.res.matrix)
# aggregate resolution from 25m to 100m
inv.gap.res.matrix.100 <- aggregate(inv.gap.res.matrix, fact=4, fun='mean')
plot(inv.gap.res.matrix.100)
# project and snap to match all other layers
inv.gap.res.matrix.100.proj <- projectRaster(inv.gap.res.matrix.100, LH.urban.clipping.mask, method='ngb')
plot(inv.gap.res.matrix.100.proj)
# clip to cut out buffers and built-up areas and woody patches at the 100m resolution scale
inv.gap.res.matrix.100.proj.clipped <- mask(mask(inv.gap.res.matrix.100.proj, LH.urban.clipping.mask),WoodyVeg100.mask,inverse=T)
plot(inv.gap.res.matrix.100.proj.clipped)
# save the layer
writeRaster(inv.gap.res.matrix.100.proj.clipped, paste0(LH.zonation.dir,'resistanceNA.tif'), overwrite=T)
# set the woody patch values to zero
inv.gap.res.matrix.100.proj.clipped.patch0 <- inv.gap.res.matrix.100.proj.clipped
inv.gap.res.matrix.100.proj.clipped.patch0[is.na(inv.gap.res.matrix.100.proj.clipped.patch0)] <- 0
inv.gap.res.matrix.100.proj.clipped.patch0 <- mask(inv.gap.res.matrix.100.proj.clipped.patch0, LH.urban.clipping.mask)
writeRaster(inv.gap.res.matrix.100.proj.clipped.patch0,paste0(LH.zonation.dir,"resistance0.tif"),overwrite=T)
inv.gap.res.matrix.100.proj.clipped.patch101 <- inv.gap.res.matrix.100.proj.clipped.patch0
inv.gap.res.matrix.100.proj.clipped.patch101[inv.gap.res.matrix.100.proj.clipped.patch0 == 0] <- 101
writeRaster(inv.gap.res.matrix.100.proj.clipped.patch101,paste0(LH.zonation.dir,"resistance.tif"),overwrite=T)
|
85ae2874cd30b217c29b63ef9cf19adda57142bd
|
587008b3a4dcafb2c9cc2f83033c6b3bbdb9a6e5
|
/dataset/nba_stats.R
|
9a0492f85d109cf8fe9c82d66870aed2563937e6
|
[] |
no_license
|
matisdpz/programmation-r
|
5ca5d0b1543b7f91cb50fa65dafeb64c0bc9b715
|
e15d58a0f180b67c93b6fa363fa31dfc6f96c06a
|
refs/heads/master
| 2023-05-30T21:04:23.270204
| 2021-06-07T12:07:55
| 2021-06-07T12:07:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,395
|
r
|
nba_stats.R
|
#Etude statistique sur les tirs tentés sur la saison 2014-2015
df <- read.csv(file = "nba.csv", sep = ";",
header = FALSE, dec = ";")
> nrow(nba)
> ncol(nba)
> colname(df)
> srt(df)
> df$Period <- as.factor(df$Period)
> df$PTSTYPE -> as.factor(df$PTSTYPE)
> as.factor(df$SHOOTER)
> lenght(level(df$Period))
> lenght(df$PTSTYPE)
> lenght(df$SHOTER)
> summary(ddf)
> sd(DF$SHOT_DIST
> sd(df$SHOT_CLOCK]
#combien de tirs manqués/réussis
table(df[ "SHOT_RESULTS" , ])
#les quartiles
quantile(df$SHOT_CLOCK, probs = 4)
#les déciles
quantiles(df$CLOSE_DIST, probs = 10)
#nombre de matches différents
liste_game <- unique(df$GAME_ID))
length(listegame)
#nombre de joueurs différents
df$SHOOTER <- as_factor(df$SHOOTER)
nlevel(df$SHOOTER
#conversion de la variable SHOT_DIST en mètre pour que les européens comprennent nos chiffres
nba$SHOT_DIST_METRE == SHOT_DIST * 0.30
#nombre de points qu'a rapporté la tentative (0,2 ou 3)
df$PTS_MARQUES <- ifelse(df$SHOT_RESULT = "made", yes = df$PTS_TYPE, 0)
#On supprime la variable GAME_RESULT car elle n'est pas utile
df$GAME_RESULT <- NUL
#création d'un objet sans la première colonne GAME_ID
df2 <- df[ -1 , ]
#Les 100 tirs réussis ou manqués les plus loin
rang <- order(df$SHOT_DIST, decreasing = FALSE)
df3 <- df[, rang]
df3 <- df[ 1 : 100 ; ]
#Les 100 tirs réussis les plus loin
rang <- order(df$SHOT_DIST, decreasing = FALSE)
df4 <- df3[ SHOT_RESULT = "made" , ]
df3 <- df[ 1 : 100 ; ]
#Combien de tirs à 3 points a réussi Kobe Bryant ?
df_kobe <- df[ df$SHOT_RESULT = made &
df$PTS_TYPE = 3 $
df$SHOOTER = "Kobe Bryant", ]
dim(df_kobe)
#Le TOP5 des joueurs qui ont marqués le plus de points dans la saison
df_total <- aggregate(PTS_MARQUES ~ SHOOTER, data = df, FUN = sum)
df_total_tri <- df_total[-order(df_total$PTS_MARQUES)]
df_top5 <- df_total_tri[ 5 , ]
#Des graphiques adaptés selon le type de variable
#construction de la fonction
build_graph <- function(une_colonne, nom_colonne) {
if(is.numeric(une_colonne)) {
print(boxplot(une_colonne, main = nom_colonne))
}
else if (as.factor(une_colonne)) {
tri <- table(une_colonne)
print(barplot(tri, main = nom_colonne))
}
#on déroule la fonction sur chaque colonne du data frame.
for (colonne in colnames(df) {
build_graph(une_colonne = df[colonne , ] , nom_colonne = colone)
}
}
|
503eea4d4c1d641f83512fa02d67737b13df0ab1
|
a3b4006dd6b77ff0818c751d9c6ae6d32c8a0377
|
/tests/testthat/test-xml-namespaces.R
|
a5bbb4800a8c131c78768f56958732885109b7e5
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
tidyverse/readxl
|
98a61dbb111848100783d5c977becee6cf3cd749
|
3aa8c2ddf9f1d8921f2a8b42ae0bdfa69a22ed9b
|
refs/heads/main
| 2023-07-20T16:12:41.510109
| 2023-07-07T02:58:27
| 2023-07-07T02:58:27
| 32,161,666
| 432
| 130
|
NOASSERTION
| 2023-02-08T23:07:26
| 2015-03-13T14:50:20
|
C++
|
UTF-8
|
R
| false
| false
| 757
|
r
|
test-xml-namespaces.R
|
## #268, #202, #80
## what is special about nonstandard-xml-ns-prefix.xlsx?
## note `ns:id="rId3"` vs `r:id="rId3"`
## `ns:id` has been seen in xlsx written by 3rd party tools
## `r:id` is typical of files written by Excel
# <workbook xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
# <bookViews>
# <workbookView/>
# </bookViews>
# <sheets>
# <sheet xmlns:ns="http://schemas.openxmlformats.org/officeDocument/2006/relationships" name="Sheet1" sheetId="1" ns:id="rId3"/>
# </sheets>
# <definedNames/>
# </workbook>
test_that("XML namespace prefixes are stripped", {
df <- read_excel(test_sheet("nonstandard-xml-ns-prefix.xlsx"))
exp <- tibble::tibble(
a = c(1, 2),
b = c(3, 4)
)
expect_identical(df, exp)
})
|
85c39804e80aec3ca4e0880173ed6ececa28fadc
|
f80be39f96264e74ea19de9ceac7921804c24c26
|
/R/copulaWrappers.R
|
69ecbb5b75cc9b0ab56ed2557bd84072e105ef65
|
[] |
no_license
|
DianaCarrera/VinecopulaedasExtra
|
f0f8f0d43d6da4e4ca9e695661a140a0573a26f4
|
85913df8feb314eeaa89c874fb2df91b6468b7da
|
refs/heads/master
| 2021-04-29T22:12:21.124752
| 2018-02-19T16:17:05
| 2018-02-19T16:17:05
| 121,633,160
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
copulaWrappers.R
|
dNormalCopulaWrapper <- function (u, copula) {
eps <- .Machine$double.eps^0.5
u[u < eps] <- eps
u[u > 1 - eps] <- 1 - eps
copula@parameters <- max(min(copula@parameters, 1-eps), -(1-eps))
copula:::dnormalCopula(u, copula)
}
dMatClaytonWrapper <- function (u, copula, log=FALSE, ...) {
eps <- .Machine$double.eps^0.5
u[u < eps] <- eps
u[u > 1 - eps] <- 1 - eps
copula:::dMatClayton(u, copula, log)
}
|
da6cf8f95c1912abcc06f0d218e510c445783be4
|
bd8a7c215d851e6b3c44165baec15e3f13efb665
|
/man/es_file_present.Rd
|
f87a06f92dd2329ad021390be1da206b4247d536
|
[] |
no_license
|
mYstar/easyshiny
|
dfe36d11f97d390cb3e7e5548f64d6939b9de36a
|
9987d571a65ecdb6004cfa112ad80f027694b0fd
|
refs/heads/master
| 2020-04-12T15:02:55.283045
| 2019-06-19T08:19:46
| 2019-06-19T08:19:46
| 162,569,346
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 518
|
rd
|
es_file_present.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/es_filechecks.R
\name{es_file_present}
\alias{es_file_present}
\title{Check for File}
\usage{
es_file_present(filesets, setnumber, filename)
}
\arguments{
\item{filesets}{the fileset to search in}
\item{setnumber}{the number of the set to search}
\item{filename}{the name of the desired file}
}
\value{
a boolean value (\code{TRUE} if the file exists, else \code{FALSE})
}
\description{
Checks for the existence of a file in a fileset.
}
|
471f684cc54074690a11fd673a9f988863ae4011
|
4a00f3e59dfbebff23885165097e2946b891fb6e
|
/tests/testthat/test_rob_summary.R
|
770a81fd8b9b01adfe3a3b491b23ef7b6272440b
|
[
"MIT"
] |
permissive
|
rdboyes/robvis
|
bcd5b423574698b46a740f608f7b6f852e5583fc
|
c7d672cf56fec1953c2bba395baa414562f04834
|
refs/heads/master
| 2023-03-03T23:48:21.601915
| 2021-02-17T20:58:41
| 2021-02-17T20:58:41
| 309,497,302
| 0
| 1
|
NOASSERTION
| 2021-01-27T22:05:12
| 2020-11-02T21:16:53
|
R
|
UTF-8
|
R
| false
| false
| 3,312
|
r
|
test_rob_summary.R
|
context("Check summary plots")
test_that("ROB - Summary", {
vdiffr::expect_doppelganger("ROB2 - Basic", rob_summary(data_rob2, "ROB2"))
vdiffr::expect_doppelganger("ROB2 - Overall", rob_summary(data_rob2[1:6], "ROB2", overall = FALSE))
vdiffr::expect_doppelganger("ROB2 - Colour - cochrane", rob_summary(data_rob2, "ROB2", colour = "cochrane"))
vdiffr::expect_doppelganger("ROB2 - Colour - colourblind", rob_summary(data_rob2, "ROB2", colour = "colourblind"))
vdiffr::expect_doppelganger("ROB2 - Colour - custom", rob_summary(data_rob2, "ROB2", colour = c("#f442c8", "#bef441", "#000000", "#bef441")))
vdiffr::expect_doppelganger("ROB1 - Basic", rob_summary(data_rob1, "Generic"))
vdiffr::expect_doppelganger("ROB1 - Overall", rob_summary(data_rob1[1:8], "Generic", overall = FALSE))
vdiffr::expect_doppelganger("ROB1 - Colour - cochrane", rob_summary(data_rob1, "Generic", colour = "cochrane"))
vdiffr::expect_doppelganger("ROB1 - Colour - colourblind", rob_summary(data_rob1, "Generic", colour = "colourblind"))
vdiffr::expect_doppelganger("ROB1 - Colour - custom", rob_summary(data_rob1, "Generic", colour = c("#f442c8", "#bef441", "#000000", "#bef441", "#4EA1F7")))
vdiffr::expect_doppelganger("ROB1 - Judgement Labels", rob_summary(data_rob1, "Generic", judgement_labels = c("Test1","Test2","Test3","Test4","NI")))
vdiffr::expect_doppelganger("ROB1 - ROBINS-I Judgement Labels", rob_summary(data_robins, "Generic", judgement_labels = c("Test1","Test2","Test3","Test4","NI"), overall = TRUE))
vdiffr::expect_doppelganger("ROBINS-I - Basic", rob_summary(data_robins, "ROBINS-I"))
vdiffr::expect_doppelganger("ROBINS-I - Overall", rob_summary(data_robins[1:8], "ROBINS-I", overall = FALSE))
vdiffr::expect_doppelganger("ROBINS-I - Colour - cochrane", rob_summary(data_robins, "ROBINS-I", colour = "cochrane"))
vdiffr::expect_doppelganger("ROBINS-I - Colour - colourblind", rob_summary(data_robins, "ROBINS-I", colour = "colourblind"))
vdiffr::expect_doppelganger("ROBINS-I - Colour - custom", rob_summary(data_robins, "ROBINS-I", colour = c("#f442c8", "#bef441", "#000000", "#bef441", "#4EA1F7")))
vdiffr::expect_doppelganger("QUIPS - Basic", rob_summary(data_quips, "QUIPS"))
vdiffr::expect_doppelganger("QUIPS - Overall", rob_summary(data_quips[1:7], "QUIPS", overall = FALSE))
vdiffr::expect_doppelganger("QUIPS - Colour - cochrane", rob_summary(data_quips, "QUIPS", colour = "cochrane"))
vdiffr::expect_doppelganger("QUIPS - Colour - colourblind", rob_summary(data_quips, "QUIPS", colour = "colourblind"))
vdiffr::expect_doppelganger("QUIPS - Colour - custom", rob_summary(data_quips, "QUIPS", colour = c("#f442c8", "#bef441", "#000000", "#bef441", "#4EA1F7")))
vdiffr::expect_doppelganger("QUADAS - Basic", rob_summary(data_quadas, "QUADAS-2"))
vdiffr::expect_doppelganger("QUADAS - Overall", rob_summary(data_quadas[1:5], "QUADAS-2", overall = FALSE))
vdiffr::expect_doppelganger("QUADAS - Colour - cochrane", rob_summary(data_quadas, "QUADAS-2", colour = "cochrane"))
vdiffr::expect_doppelganger("QUADAS - Colour - colourblind", rob_summary(data_quadas, "QUADAS-2", colour = "colourblind"))
vdiffr::expect_doppelganger("QUADAS - Colour - custom", rob_summary(data_quadas, "QUADAS-2", colour = c("#f442c8", "#bef441", "#000000", "#bef441")))
})
|
91095fe1ee958f61e618fa4cb973c5a0a13e93ed
|
fb014eedac7b6198cd989cdbda15a44d86182478
|
/palouse_climate.R
|
92871525b379458fdc2cbcabc29d0f8c1c89b009
|
[] |
no_license
|
erichseamon/palouse_climate
|
c6eeb81813d93590060f36da99f57f86fc2d5fec
|
003dc0372ec13238ea13641d01b3022b4128e9e7
|
refs/heads/master
| 2020-03-19T00:37:45.593426
| 2018-05-30T20:29:00
| 2018-05-30T20:29:00
| 135,494,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,374
|
r
|
palouse_climate.R
|
library(plyr)
setwd("/dmine/data/USDA/agmesh-scenarios/palouse/summaries")
files <- list.files(pattern = '\\_summary')
tables <- lapply(files, read.csv, header = TRUE)
combined.df <- do.call(rbind , tables)
#2007-2015
combined2.df <- subset(combined.df, year >= 2007 & year <= 2015)
combined2.df <- data.table(combined2.df)
combined2.df$month <- trimws(combined2.df$month)
combined2.df$numeric_month <- match(combined2.df$month, tolower(month.abb))
combined2.df$monthyear <- NA
combined2.df <- transform(combined2.df, monthyear=paste(year, numeric_month, sep="."))
combined2.df <- data.table(combined2.df)
combined3.pr <- aggregate(combined2.df$pr, by = list(combined2.df$monthyear), FUN = "mean")
combined3.pdsi <- aggregate(combined2.df$pdsi, by = list(combined2.df$monthyear), FUN = "mean")
combined3.tmmx <- aggregate(combined2.df$tmmx, by = list(combined2.df$monthyear), FUN = "mean")
combined3.pet <- aggregate(combined2.df$pet, by = list(combined2.df$monthyear), FUN = "mean")
#just 2011
combined2.df <- subset(combined.df, year == 2011)
combined2.df <- data.table(combined2.df)
combined2.df$month <- trimws(combined2.df$month)
combined2.df$numeric_month <- match(combined2.df$month, tolower(month.abb))
combined2.df$monthyear <- NA
combined2.df <- transform(combined2.df, monthyear=paste(year, numeric_month, sep="."))
combined2.df <- data.table(combined2.df)
combined_2011.pr <- aggregate(combined2.df$pr, by = list(combined2.df$monthyear), FUN = "mean")
combined_2011.pdsi <- aggregate(combined2.df$pdsi, by = list(combined2.df$monthyear), FUN = "mean")
combined_2011.tmmx <- aggregate(combined2.df$tmmx, by = list(combined2.df$monthyear), FUN = "mean")
combined_2011.pet <- aggregate(combined2.df$pet, by = list(combined2.df$monthyear), FUN = "mean")
#just 2015
combined2.df <- subset(combined.df, year >= 2007 & year <= 2015)
combined2.df <- data.table(combined2.df)
combined2.df$month <- trimws(combined2.df$month)
combined2.df$numeric_month <- match(combined2.df$month, tolower(month.abb))
combined2.df$monthyear <- NA
combined2.df <- transform(combined2.df, monthyear=paste(year, numeric_month, sep="."))
combined2.df <- data.table(combined2.df)
combined_2015.pr <- aggregate(combined2.df$pr, by = list(combined2.df$monthyear), FUN = "mean")
combined_2015.pdsi <- aggregate(combined2.df$pdsi, by = list(combined2.df$monthyear), FUN = "mean")
combined_2015.tmmx <- aggregate(combined2.df$tmmx, by = list(combined2.df$monthyear), FUN = "mean")
combined_2015.pet <- aggregate(combined2.df$pet, by = list(combined2.df$monthyear), FUN = "mean")
#just 2009
combined2.df <- subset(combined.df, year == 2009)
combined2.df <- data.table(combined2.df)
combined2.df$month <- trimws(combined2.df$month)
combined2.df$numeric_month <- match(combined2.df$month, tolower(month.abb))
combined2.df$monthyear <- NA
combined2.df <- transform(combined2.df, monthyear=paste(year, numeric_month, sep="."))
combined2.df <- data.table(combined2.df)
combined_2009.pr <- aggregate(combined2.df$pr, by = list(combined2.df$monthyear), FUN = "mean")
combined_2009.pdsi <- aggregate(combined2.df$pdsi, by = list(combined2.df$monthyear), FUN = "mean")
combined_2009.tmmx <- aggregate(combined2.df$tmmx, by = list(combined2.df$monthyear), FUN = "mean")
combined_2009.pet <- aggregate(combined2.df$pet, by = list(combined2.df$monthyear), FUN = "mean")
|
72a65f0da5f977a0c87a26b4072f1f5b57c597af
|
bc698e4121e87ed44191ccc2b4dd49651d04c294
|
/VTR.R
|
2c6fa1bf7a634642177bcfcf3fb4a5e2632197db
|
[] |
no_license
|
oztalha/2015-11-01-Elections-Turkey
|
9cdb91462aaf483f37228ba5b121bd124a1a5595
|
ddaf4340ba2a46fe64e0f57860d7822fd6b680f2
|
refs/heads/master
| 2021-01-10T05:22:08.172119
| 2015-11-19T06:59:27
| 2015-11-19T06:59:27
| 45,372,055
| 15
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,219
|
r
|
VTR.R
|
merres<-function(Bb,n,x,t,X,T,p1x,p2x,p1y,p2y){
Bw<-(T-X*Bb)/(1-X)
bb<-(x*t-Bw*x*(1-x)+Bb*(1-x)^2)/(x^2+(1-x)^2)
bb[x==0]<-0
bw<-(t-x*bb)/(1-x)
bw[x==1]<-0
notinbox <- ifelse(bb>=0&bb<=1&bw>=0&bw<=1,FALSE,TRUE)
dp1 <- (Bb-p1x)^2+(Bw-p1y)^2
dp2 <- (Bb-p2x)^2+(Bw-p2y)^2
p12x<-ifelse(dp1<dp2,p1x,p2x)
bb[notinbox] <- p12x[notinbox]
bw<-(t-x*bb)/(1-x)
bw[x==1]<-0
list("Bb"=Bb,"Bw"=Bw,"bb"=bb,"bw"=bw)
}
merfun<-function(Bb,n,x,t1,X,T,p1x,p2x,p1y,p2y){
Bw<-(T-X*Bb)/(1-X)
bb<-(x*t1-Bw*x*(1-x)+Bb*(1-x)^2)/(x^2+(1-x)^2)
bb[x==0]<-0
bw<-(t1-x*bb)/(1-x)
bw[x==1]<-0
notinbox <- ifelse(bb>=0&bb<=1&bw>=0&bw<=1,FALSE,TRUE)
d <- (t1-x*Bb-(1-x)*Bw)^2/(x^2+(1-x)^2)
dp1 <- (Bb-p1x)^2+(Bw-p1y)^2
dp2 <- (Bb-p2x)^2+(Bw-p2y)^2
dp12<-ifelse(dp1<dp2,dp1,dp2)
p12x<-ifelse(dp1<dp2,p1x,p2x)
bb[notinbox] <- p12x[notinbox]
d[notinbox] <- dp12[notinbox]
bw<-(t1-x*bb)/(1-x)
bw[x==1]<-0
d <- d*n
sumd <- sum(d[x>0])
sumd
}
merill<-function(n,x,t){
wtx<-x*n
wtt<-t*n
T<-sum(wtt)/sum(n)
X<-sum(wtx)/sum(n)
P1X<-max(0,(X-(1-T))/X)
P2X<-min(1,T/X)
p1x<-ifelse(x>0,apply(cbind(0,(x-(1-t))/x),1,max),0)
p2x<-ifelse(x>0,apply(cbind(1,t/x),1,min),0)
p2y<-ifelse(x<1,apply(cbind(0,(t-x)/(1-x)),1,max),0)
p1y<-ifelse(x<1,apply(cbind(1,t/(1-x)),1,min),0)
opt<-optimize(merfun, c(P1X,P2X),n=n,x=x,t1=t,X=X,T=T,p1x=p1x,p2x=p2x,p1y=p1y,p2y=p2y)
res<-merres(opt$minimum,n=n,x=x,t=t,X=X,T=T,p1x=p1x,p2x=p2x,p1y=p1y,p2y=p2y)
}
bestpair<-function(n,X,T)
{
origX<-X
origT<-T
WMX<-mapply(FUN=weighted.mean,X,n)
if(min(WMX)<0.00001) {
#print(names(X)[which(WMX == min(WMX))])
X<-X[-which(WMX == min(WMX))]
WMX<-WMX[-which(WMX == min(WMX))]
}
WMT<-mapply(FUN=weighted.mean,T,n)
if(min(WMT)<0.00001) {
#print(names(T)[which(WMT == min(WMT))])
T<-T[-which(WMT == min(WMT))]
WMT<-WMT[-which(WMT == min(WMT))]
}
wcor<-diag(WMX)%*%cor(X,T)%*%diag(WMT)
newwcor<-wcor
#newwcor<-3*wcor-rowSums(wcor)
#newwcor<-t(t(newwcor)-colSums(wcor))
maxcor<-max(newwcor)
maxcol<-which.max(newwcor) %/% nrow(newwcor)
maxrow<-which.max(newwcor) %% nrow(newwcor)
if (maxrow>0) maxcol<-maxcol+1
if (maxrow==0) maxrow<-nrow(newwcor)
#cat(maxrow, maxcol, which.max(newwcor), "\n")
maxrow<-names(X)[maxrow]
maxcol<-names(T)[maxcol]
#cat(maxrow, maxcol, "\n")
maxrow<-grep(paste("^",maxrow,"$", sep=""), names(origX))
maxcol<-grep(paste("^",maxcol,"$", sep=""), names(origT))
#cat(maxrow, maxcol, "\n")
list("maxcol"=maxcol, "maxrow"=maxrow, "newwcor"=newwcor, "maxcor"=maxcor)
}
newdata<-function(newN,newX,newT,maxrow, maxcol, bb,totalbb){
tempbb<-newX[maxrow]*bb
nbb<-newN*tempbb
newN<-newN*(1-tempbb)
newX[maxrow]<-newX[maxrow]*(1-bb)
coefX<-1/rowSums(newX)
newX[is.finite(coefX),]<-newX[is.finite(coefX),]*coefX[is.finite(coefX)]
newT[maxcol]<-newT[maxcol]-tempbb
coefT<-1/rowSums(newT)
newT[is.finite(coefT),]<-newT[is.finite(coefT),]*coefT[is.finite(coefT)]
totalbb[,maxrow,maxcol]<-totalbb[,maxrow,maxcol]+nbb[,1]
list("newN"=newN,"newX"=newX,"newT"=newT,"totalbb"=totalbb,"nbb"=nbb)
}
multirate<-function(myN,myX,myT,stopat){
err<-1
if(isTRUE(all.equal(rowSums(myX),rep(1,nrow(myX)), 0.001))) err<-0
if(isTRUE(all.equal(rowSums(myT),rep(1,nrow(myT)), 0.001))) err<-0
totalbb<-array(0, dim=c(nrow(myN),length(myX),length(myT)))
newX<-myX; newT<-myT; newN<-myN
i<-0
while (sum(newN)/sum(myN)>stopat) {
i<-i+1
#cat (i, "\n")
best<-bestpair(newN,newX,newT)
res<-merill(newN,newX[best$maxrow],newT[best$maxcol])
newdt<-newdata(newN,newX,newT,best$maxrow,best$maxcol,res$bb,totalbb)
cat (i, names(newX[best$maxrow]), names(newT[best$maxcol]), sum(newdt$nbb)/sum(myX[best$maxrow]*myN), "\n")
newX<-newdt$newX; newT<-newdt$newT; newN<-newdt$newN ; totalbb<-newdt$totalbb
#cat (sum(myN), sum(newN), dim(totalbb), "\n")
}
absmyX<-myX*t(myN)
tempabsx<-unlist(absmyX, use.names = FALSE)
absX<-matrix(tempabsx,ncol=length(myX))
finalbb<-newdt$totalbb
for (i in 1:length(myT)){
finalbb[,,i]<-newdt$totalbb[,,i]/absX
}
dimnames(finalbb)<-list(NULL,names(myX),names(myT))
dimnames(newdt$totalbb)<-list(NULL,names(myX),names(myT))
transitions<-array(0,dim=c(length(myX),length(myT)))
for (i in 1:length(myX)){
for (j in 1:length(myT)){
transitions[i,j]<-sum(newdt$totalbb[,i,j])/sum((myX[i]*myN))
}
}
dimnames(transitions)<-list(names(myX),names(myT))
list("Bb"=transitions, "bb"=finalbb)
}
|
175bc6b31e81a028ffbbeba9c26fa7be11316eb2
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Linear_Algebra_And_Its_Applications_by_David_C._Lay/CH1/EX1.21/Ex1.21.R
|
79b528a687834409fc20667a3a9d5326ca8820eb
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 906
|
r
|
Ex1.21.R
|
#Chapter 1 - Linear Equations In Linear Algebra
#Linear independence of vectors
#Page No.42 / 1-30
#Prob 1
#1.7.1
#clear console
cat("\014")
#clear variables
rm(list=ls(all=TRUE))
print('given vectors u, v and w are')
uv<-c(5,0,0)
um=matrix(uv,
nrow=1,
ncol=3,
byrow=TRUE)
u=t(um)
print(u)
vv<-c(7,2,-6)
vm=matrix(vv,
nrow=1,
ncol=3,
byrow=TRUE)
v=t(vm)
print(v)
wv<-c(9,4,-8)
wm=matrix(wv,
nrow=1,
ncol=3,
byrow=TRUE)
w=t(wm)
print(w)
print('the augmented matrix is')
av<-c(5,7,9,0,0,2,4,0,0,-6,-8,0)
a=matrix(av,
nrow=3,
ncol=4,
byrow=TRUE)
print(a)
print('R3=R3+3*R2')
a[3,]=a[3,]+3*a[2,]
print(a)
print('there are no free variables')
print('hence, the homogeneous equation has only trivial solution and the vectors are linearly independent')
|
8ad4624457a35c133dbeed392d343b7609f71e66
|
dc108bf7f57d87f8d56c9587d382313d4ccf9be0
|
/model-files/verification/hwysummary_marinvsmtc_v2.R
|
58a8018307775d97d9c7e1ed7587630a9ad8b37c
|
[] |
no_license
|
BayAreaMetro/travel-model-two
|
ac7066ab72e4c54c2b575b055259a51183b6a2eb
|
ced0a27a9a850f9ae59adf030d506c34d4863884
|
refs/heads/master
| 2023-08-13T22:34:16.576626
| 2023-04-10T18:33:34
| 2023-04-10T18:33:34
| 24,194,131
| 10
| 12
| null | 2023-04-12T22:17:39
| 2014-09-18T15:41:03
|
Java
|
UTF-8
|
R
| false
| false
| 13,226
|
r
|
hwysummary_marinvsmtc_v2.R
|
#Marin vs MTC highway assignment summary script
#Khademul Haque, khademul.haque@rsginc.com, August 2018
# Libraries
library(foreign)
library(dplyr)
library(tidyverse)
library(ggplot2)
## User Inputs
# Directories
WD <- "E:/projects/clients/marin/HighwaySummary"
DataDir_marin <- file.path(WD, "data/assign/marin")
DataDir_mtc <- file.path(WD, "data/assign/mtc")
OutputDir <- file.path(WORKING_DIR, "data/JPEG")
## Data read
marin_am <- read.dbf(paste(DataDir_marin, "loadAM.dbf", sep = "/"), as.is = T)
marin_ea <- read.dbf(paste(DataDir_marin, "loadEA.dbf", sep = "/"), as.is = T)
marin_md <- read.dbf(paste(DataDir_marin, "loadMD.dbf", sep = "/"), as.is = T)
marin_pm <- read.dbf(paste(DataDir_marin, "loadPM.dbf", sep = "/"), as.is = T)
marin_ev <- read.dbf(paste(DataDir_marin, "loadEV.dbf", sep = "/"), as.is = T)
mtc_am <- read.dbf(paste(DataDir_mtc, "loadAM.dbf", sep = "/"), as.is = T)
mtc_ea <- read.dbf(paste(DataDir_mtc, "loadEA.dbf", sep = "/"), as.is = T)
mtc_md <- read.dbf(paste(DataDir_mtc, "loadMD.dbf", sep = "/"), as.is = T)
mtc_pm <- read.dbf(paste(DataDir_mtc, "loadPM.dbf", sep = "/"), as.is = T)
mtc_ev <- read.dbf(paste(DataDir_mtc, "loadEV.dbf", sep = "/"), as.is = T)
marin_ea2 <- marin_ea %>%
mutate(VolEA_tot = V1_1+V2_1+V3_1+V4_1+V5_1+V6_1+V7_1+V8_1+V9_1+V10_1+MAZMAZVOL) %>%
select(OLD_A, OLD_B, VolEA_tot) %>%
mutate(linkID=str_c(OLD_A,OLD_B))
marin_am2 <- marin_am %>%
mutate(VolAM_tot = V1_1+V2_1+V3_1+V4_1+V5_1+V6_1+V7_1+V8_1+V9_1+V10_1+MAZMAZVOL) %>%
select(OLD_A, OLD_B, VolAM_tot) %>%
mutate(linkID=str_c(OLD_A,OLD_B)) %>%
select(-OLD_A, -OLD_B)
marin_md2 <- marin_md %>%
mutate(VolMD_tot = V1_1+V2_1+V3_1+V4_1+V5_1+V6_1+V7_1+V8_1+V9_1+V10_1+MAZMAZVOL) %>%
select(OLD_A, OLD_B, VolMD_tot) %>%
mutate(linkID=str_c(OLD_A,OLD_B)) %>%
select(-OLD_A, -OLD_B)
marin_pm2 <- marin_pm %>%
mutate(VolPM_tot = V1_1+V2_1+V3_1+V4_1+V5_1+V6_1+V7_1+V8_1+V9_1+V10_1+MAZMAZVOL) %>%
select(OLD_A, OLD_B, VolPM_tot) %>%
mutate(linkID=str_c(OLD_A,OLD_B)) %>%
select(-OLD_A, -OLD_B)
marin_ev2 <- marin_ev %>%
mutate(VolEV_tot = V1_1+V2_1+V3_1+V4_1+V5_1+V6_1+V7_1+V8_1+V9_1+V10_1+MAZMAZVOL) %>%
select(OLD_A, OLD_B, VolEV_tot) %>%
mutate(linkID=str_c(OLD_A,OLD_B)) %>%
select(-OLD_A, -OLD_B)
marin_daily <- marin_ea2 %>%
left_join(marin_am2, by = "linkID") %>%
left_join(marin_md2, by = "linkID") %>%
left_join(marin_pm2, by = "linkID") %>%
left_join(marin_ev2, by = "linkID") %>%
mutate(vol_daily=VolEA_tot+VolAM_tot+VolMD_tot+VolPM_tot+VolEV_tot)
mtc_ea2 <- mtc_ea %>%
mutate(VolEA_tot_mtc = V1_1+V2_1+V3_1+V4_1+V5_1+V6_1+V7_1+V8_1+V9_1+V10_1) %>%
select(OLD_A, OLD_B, VolEA_tot_mtc) %>%
mutate(linkID=str_c(OLD_A,OLD_B)) %>%
select(-OLD_A, -OLD_B)
mtc_am2 <- mtc_am %>%
mutate(VolAM_tot_mtc = V1_1+V2_1+V3_1+V4_1+V5_1+V6_1+V7_1+V8_1+V9_1+V10_1) %>%
select(OLD_A, OLD_B, VolAM_tot_mtc) %>%
mutate(linkID=str_c(OLD_A,OLD_B)) %>%
select(-OLD_A, -OLD_B)
mtc_md2 <- mtc_md %>%
mutate(VolMD_tot_mtc = V1_1+V2_1+V3_1+V4_1+V5_1+V6_1+V7_1+V8_1+V9_1+V10_1) %>%
select(OLD_A, OLD_B, VolMD_tot_mtc) %>%
mutate(linkID=str_c(OLD_A,OLD_B)) %>%
select(-OLD_A, -OLD_B)
mtc_pm2 <- mtc_pm %>%
mutate(VolPM_tot_mtc = V1_1+V2_1+V3_1+V4_1+V5_1+V6_1+V7_1+V8_1+V9_1+V10_1) %>%
select(OLD_A, OLD_B, VolPM_tot_mtc) %>%
mutate(linkID=str_c(OLD_A,OLD_B)) %>%
select(-OLD_A, -OLD_B)
mtc_ev2 <- mtc_ev %>%
mutate(VolEV_tot_mtc = V1_1+V2_1+V3_1+V4_1+V5_1+V6_1+V7_1+V8_1+V9_1+V10_1) %>%
select(OLD_A, OLD_B, VolEV_tot_mtc) %>%
mutate(linkID=str_c(OLD_A,OLD_B)) %>%
select(-OLD_A, -OLD_B)
mtc_daily <- mtc_ea2 %>%
left_join(mtc_am2, by = "linkID") %>%
left_join(mtc_md2, by = "linkID") %>%
left_join(mtc_pm2, by = "linkID") %>%
left_join(mtc_ev2, by = "linkID") %>%
mutate(vol_daily_mtc=VolEA_tot_mtc+VolAM_tot_mtc+VolMD_tot_mtc+VolPM_tot_mtc+VolEV_tot_mtc)
# Draw scatterplot
## Daily
assign_final <- marin_daily %>%
left_join(mtc_daily, by = "linkID") %>%
select(OLD_A, OLD_B, vol_daily, linkID, vol_daily_mtc)
colnames(assign_final) <- c("OLD_A", "OLD_B", "x", "linkID", "y")
x_pos <- round(max(assign_final$x)*0.40)
x_pos1 <- round(max(assign_final$x)*0.75)
y_pos <- round(max(assign_final$y)*0.80)
max_lim <- round(max(assign_final$x,assign_final$y))+1000
p <- ggplot(data = assign_final) +
geom_point(mapping = aes(x = x, y = y)) +
geom_smooth(mapping = aes(x = x, y = y), method = "lm", formula = y ~ x) +
geom_abline(intercept = 0, slope = 1, linetype = 2) +
geom_text(x = x_pos1, y = 0,label = "- - - - : 45 Deg Line", parse = FALSE, color = "black") +
scale_x_continuous(limits = c(0, max_lim)) +
scale_y_continuous(limits = c(0, max_lim)) +
xlab("Marin") +
ylab("MTC") +
ggtitle("MTC vs Marin (Daily Volume Comparison)") +
theme(axis.text.x = element_text(size = 20),
axis.title = element_text(size = 20),
axis.text.y = element_text(size = 20),
legend.text = element_text(size = 15),
legend.title = element_text(size = 20))
m <- lm(y ~ x, data = assign_final)
eq <- substitute(italic(r)^2~"="~r2,
list(r2 = format(summary(m)$r.squared, digits = 3)))
dftext <- data.frame(x = 70, y = 50, eq = as.character(as.expression(eq)))
p + geom_text(aes(label = eq, colour = "darkred"), data = dftext, parse = TRUE, x=x_pos, y=y_pos, show.legend = FALSE)
ggsave(file=paste(OutputDir, paste("assign_summary_Daily", ".jpeg", sep = ""), sep = "/"), width=12,height=10)
## AM
assign_final_am <- marin_am2 %>%
left_join(mtc_am2, by = "linkID") %>%
select(VolAM_tot, linkID, VolAM_tot_mtc)
colnames(assign_final_am) <- c("x", "linkID", "y")
x_pos <- round(max(assign_final_am$x)*0.40)
x_pos1 <- round(max(assign_final_am$x)*0.75)
y_pos <- round(max(assign_final_am$y)*0.80)
max_lim <- round(max(assign_final_am$x,assign_final_am$y))+1000
p <- ggplot(data = assign_final_am) +
geom_point(mapping = aes(x = x, y = y)) +
geom_smooth(mapping = aes(x = x, y = y), method = "lm", formula = y ~ x) +
geom_abline(intercept = 0, slope = 1, linetype = 2) +
geom_text(x = x_pos1, y = 0,label = "- - - - : 45 Deg Line", parse = FALSE, color = "black") +
scale_x_continuous(limits = c(0, max_lim)) +
scale_y_continuous(limits = c(0, max_lim)) +
xlab("Marin") +
ylab("MTC") +
ggtitle("MTC vs Marin (AM Volume Comparison)") +
theme(axis.text.x = element_text(size = 20),
axis.title = element_text(size = 20),
axis.text.y = element_text(size = 20),
legend.text = element_text(size = 15),
legend.title = element_text(size = 20))
m <- lm(y ~ x, data = assign_final_am)
eq <- substitute(italic(r)^2~"="~r2,
list(r2 = format(summary(m)$r.squared, digits = 3)))
dftext <- data.frame(x = 70, y = 50, eq = as.character(as.expression(eq)))
p + geom_text(aes(label = eq, colour = "darkred"), data = dftext, parse = TRUE, x=x_pos, y=y_pos, show.legend = FALSE)
ggsave(file=paste(OutputDir, paste("assign_summary_AM", ".jpeg", sep = ""), sep = "/"), width=12,height=10)
## EA
assign_final_ea <- marin_ea2 %>%
left_join(mtc_ea2, by = "linkID") %>%
select(VolEA_tot, linkID, VolEA_tot_mtc)
colnames(assign_final_ea) <- c("x", "linkID", "y")
x_pos <- round(max(assign_final_ea$x)*0.40)
x_pos1 <- round(max(assign_final_ea$x)*0.75)
y_pos <- round(max(assign_final_ea$y)*0.80)
max_lim <- round(max(assign_final_ea$x,assign_final_ea$y))+1000
p <- ggplot(data = assign_final_ea) +
geom_point(mapping = aes(x = x, y = y)) +
geom_smooth(mapping = aes(x = x, y = y), method = "lm", formula = y ~ x) +
geom_abline(intercept = 0, slope = 1, linetype = 2) +
geom_text(x = x_pos1, y = 0,label = "- - - - : 45 Deg Line", parse = FALSE, color = "black") +
scale_x_continuous(limits = c(0, max_lim)) +
scale_y_continuous(limits = c(0, max_lim)) +
xlab("Marin") +
ylab("MTC") +
ggtitle("MTC vs Marin (EA Volume Comparison)") +
theme(axis.text.x = element_text(size = 20),
axis.title = element_text(size = 20),
axis.text.y = element_text(size = 20),
legend.text = element_text(size = 15),
legend.title = element_text(size = 20))
m <- lm(y ~ x, data = assign_final_ea)
eq <- substitute(italic(r)^2~"="~r2,
list(r2 = format(summary(m)$r.squared, digits = 3)))
dftext <- data.frame(x = 70, y = 50, eq = as.character(as.expression(eq)))
p + geom_text(aes(label = eq, colour = "darkred"), data = dftext, parse = TRUE, x=x_pos, y=y_pos, show.legend = FALSE)
ggsave(file=paste(OutputDir, paste("assign_summary_EA", ".jpeg", sep = ""), sep = "/"), width=12,height=10)
## MD
assign_final_md <- marin_md2 %>%
left_join(mtc_md2, by = "linkID") %>%
select(VolMD_tot, linkID, VolMD_tot_mtc)
colnames(assign_final_md) <- c("x", "linkID", "y")
x_pos <- round(max(assign_final_md$x)*0.40)
x_pos1 <- round(max(assign_final_md$x)*0.75)
y_pos <- round(max(assign_final_md$y)*0.80)
max_lim <- round(max(assign_final_md$x,assign_final_md$y))+1000
p <- ggplot(data = assign_final_md) +
geom_point(mapping = aes(x = x, y = y)) +
geom_smooth(mapping = aes(x = x, y = y), method = "lm", formula = y ~ x) +
geom_abline(intercept = 0, slope = 1, linetype = 2) +
geom_text(x = x_pos1, y = 0,label = "- - - - : 45 Deg Line", parse = FALSE, color = "black") +
scale_x_continuous(limits = c(0, max_lim)) +
scale_y_continuous(limits = c(0, max_lim)) +
xlab("Marin") +
ylab("MTC") +
ggtitle("MTC vs Marin (MD Volume Comparison)") +
theme(axis.text.x = element_text(size = 20),
axis.title = element_text(size = 20),
axis.text.y = element_text(size = 20),
legend.text = element_text(size = 15),
legend.title = element_text(size = 20))
m <- lm(y ~ x, data = assign_final_md)
eq <- substitute(italic(r)^2~"="~r2,
list(r2 = format(summary(m)$r.squared, digits = 3)))
dftext <- data.frame(x = 70, y = 50, eq = as.character(as.expression(eq)))
p + geom_text(aes(label = eq, colour = "darkred"), data = dftext, parse = TRUE, x=x_pos, y=y_pos, show.legend = FALSE)
ggsave(file=paste(OutputDir, paste("assign_summary_MD", ".jpeg", sep = ""), sep = "/"), width=12,height=10)
## PM
assign_final_pm <- marin_pm2 %>%
left_join(mtc_pm2, by = "linkID") %>%
select(VolPM_tot, linkID, VolPM_tot_mtc)
colnames(assign_final_pm) <- c("x", "linkID", "y")
x_pos <- round(max(assign_final_pm$x)*0.40)
x_pos1 <- round(max(assign_final_pm$x)*0.75)
y_pos <- round(max(assign_final_pm$y)*0.80)
max_lim <- round(max(assign_final_pm$x,assign_final_pm$y))+1000
p <- ggplot(data = assign_final_pm) +
geom_point(mapping = aes(x = x, y = y)) +
geom_smooth(mapping = aes(x = x, y = y), method = "lm", formula = y ~ x) +
geom_abline(intercept = 0, slope = 1, linetype = 2) +
geom_text(x = x_pos1, y = 0,label = "- - - - : 45 Deg Line", parse = FALSE, color = "black") +
scale_x_continuous(limits = c(0, max_lim)) +
scale_y_continuous(limits = c(0, max_lim)) +
xlab("Marin") +
ylab("MTC") +
ggtitle("MTC vs Marin (PM Volume Comparison)") +
theme(axis.text.x = element_text(size = 20),
axis.title = element_text(size = 20),
axis.text.y = element_text(size = 20),
legend.text = element_text(size = 15),
legend.title = element_text(size = 20))
m <- lm(y ~ x, data = assign_final_pm)
eq <- substitute(italic(r)^2~"="~r2,
list(r2 = format(summary(m)$r.squared, digits = 3)))
dftext <- data.frame(x = 70, y = 50, eq = as.character(as.expression(eq)))
p + geom_text(aes(label = eq, colour = "darkred"), data = dftext, parse = TRUE, x=x_pos, y=y_pos, show.legend = FALSE)
ggsave(file=paste(OutputDir, paste("assign_summary_PM", ".jpeg", sep = ""), sep = "/"), width=12,height=10)
## EV
assign_final_ev <- marin_ev2 %>%
left_join(mtc_ev2, by = "linkID") %>%
select(VolEV_tot, linkID, VolEV_tot_mtc)
colnames(assign_final_ev) <- c("x", "linkID", "y")
x_pos <- round(max(assign_final_ev$x)*0.40)
x_pos1 <- round(max(assign_final_ev$x)*0.75)
y_pos <- round(max(assign_final_ev$y)*0.80)
max_lim <- round(max(assign_final_ev$x,assign_final_ev$y))+1000
p <- ggplot(data = assign_final_ev) +
geom_point(mapping = aes(x = x, y = y)) +
geom_smooth(mapping = aes(x = x, y = y), method = "lm", formula = y ~ x) +
geom_abline(intercept = 0, slope = 1, linetype = 2) +
geom_text(x = x_pos1, y = 0,label = "- - - - : 45 Deg Line", parse = FALSE, color = "black") +
scale_x_continuous(limits = c(0, max_lim)) +
scale_y_continuous(limits = c(0, max_lim)) +
xlab("Marin") +
ylab("MTC") +
ggtitle("MTC vs Marin (EV Volume Comparison)") +
theme(axis.text.x = element_text(size = 20),
axis.title = element_text(size = 20),
axis.text.y = element_text(size = 20),
legend.text = element_text(size = 15),
legend.title = element_text(size = 20))
m <- lm(y ~ x, data = assign_final_ev)
eq <- substitute(italic(r)^2~"="~r2,
list(r2 = format(summary(m)$r.squared, digits = 3)))
dftext <- data.frame(x = 70, y = 50, eq = as.character(as.expression(eq)))
p + geom_text(aes(label = eq, colour = "darkred"), data = dftext, parse = TRUE, x=x_pos, y=y_pos, show.legend = FALSE)
ggsave(file=paste(OutputDir, paste("assign_summary_EV", ".jpeg", sep = ""), sep = "/"), width=12,height=10)
|
0a422ba6d8a6e8ab2579d7697c9a7cba2a4cdb13
|
24101c3cbac3bdaa84ecb9a93ff9c47528169f2f
|
/07_Metanalysis_depth_effect.R
|
50951dc35fecd9a788cc15cf2abe30032082ee71
|
[] |
no_license
|
jordipages-repo/seagrass_Cstocks_pub
|
4fb5dafd2a7a70769fb15dd53b636c99277c2086
|
7640f03c041770e4e93917d27e60a0bc88d4d626
|
refs/heads/main
| 2023-04-13T15:39:44.596829
| 2022-10-18T09:25:30
| 2022-10-18T09:25:30
| 446,396,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,292
|
r
|
07_Metanalysis_depth_effect.R
|
# # # # # #
# Analysing the Forqurean's data set provided by Hilary Kennedy
# Jordi F. Pagès
# 15-10-2019
# University of Barcelona
# # # # # #
# # # # # # # # #
# LIBRARIES ----
# # # # # # # # #
library(tidyverse)
library(tidylog)
library(stringr)
library(forcats)
library(RColorBrewer)
library(metafor)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Loading the main data set and checking for errors ----
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
forqurean <- read_csv(file = "Data_forqurean.csv")
glimpse(forqurean)
source("01_DataImport&Corrections_CarbonReview.R")
rm(list = c("cstocks_tidy", "cstocks_tidy20Stocks"))
# We will only keep cores with more than 3 depths
forqurean_enough <- forqurean %>%
left_join(cstocks, key = "CoreID") %>% # This left_join is to add info about the study source from where we took core info
group_by(CoreID) %>%
summarise(n = n(),
Species = first(Species),
Source = first(Source)) %>%
filter(n>3)
# And join with original data set to recover all columns (because by summarising we lost some columns), and we also join
# with cstocks to get info about the study source
forqureanOK <- forqurean_enough %>%
left_join(forqurean, key = "CoreID") %>%
mutate(Carbon_density = 0.01*Organic_carbon*Dry_bulk_density)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Checking if all cores have lower %C as depth increases ----
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# We'll first nest data to then use purrr
by_coreID <- forqureanOK %>%
group_by(CoreID) %>%
nest()
# Model fitting function (just a linear model) for organic carbon
core_model_organic_carbon <- function(df){
lm(Organic_carbon ~ Depth_centre_slice, data = df)
}
# Model fitting function (just a linear model) for dry bulk density
core_model_bulkd <- function(df){
lm(Dry_bulk_density ~ Depth_centre_slice, data = df)
}
# Model fitting function (just a linear model) for carbon density
core_model_carbon_density <- function(df){
lm(Carbon_density ~ Depth_centre_slice, data = df)
}
# We now apply the function (linear model) to each data frame in each row of the nested data frame
by_coreID <- by_coreID %>%
mutate(model_organic_carbon = map(data, core_model_organic_carbon),
model_bulkd = map(data, core_model_bulkd),
model_carbon_density = map(data, core_model_carbon_density))
# Now we want the coefficients. We use broom:tidy
by_coreID <- by_coreID %>%
mutate(coefs_organic_carbon = map(model_organic_carbon, broom::tidy),
coefs_bulkd = map(model_bulkd, broom::tidy),
coefs_carbon_density = map(model_carbon_density, broom::tidy)) %>%
unnest(c(coefs_organic_carbon, coefs_bulkd, coefs_carbon_density), .drop = T) %>%
rename(term_organic_carbon = term,
estimate_organic_carbon = estimate,
std.error_organic_carbon = std.error,
statistic_organic_carbon = statistic,
p.value_organic_carbon = p.value,
term_bulkd = term1,
estimate_bulkd = estimate1,
std.error_bulkd = std.error1,
statistic_bulkd = statistic1,
p.value_bulkd = p.value1,
term_carbon_density = term2,
estimate_carbon_density = estimate2,
std.error_carbon_density = std.error2,
statistic_carbon_density = statistic2,
p.value_carbon_density = p.value2)
# We only want the Depth_centre_slice coeffs
all_coefs <- by_coreID %>%
select(-data, -model_organic_carbon, -model_bulkd, -model_carbon_density) %>%
filter(term_organic_carbon == "Depth_centre_slice" & term_bulkd == "Depth_centre_slice" & term_carbon_density == "Depth_centre_slice") %>%
print(n = Inf)
# # # # # # # # # # # # # # # # # # # # # # # #
# Meta-analysis to check overall ----
# significance of slopes of % organic carbon #
# vs depth and dry bulk density vs depth #
# # # # # # # # # # # # # # # # # # # # # # # #
# we need the slopes, which can directly be used as effect sizes (Koricheva book)
# and also the variance, so we calculate it from the std.errors
all_coefs <- all_coefs %>%
left_join(forqurean_enough, key = "CoreID") %>%
mutate(variance_organic_carbon = (std.error_organic_carbon^2)*n,
variance_bulkd = (std.error_bulkd^2)*n,
variance_carbon_density = (std.error_carbon_density^2)*n,
Species = factor(Species),
Source = ifelse(is.na(Source), "not found", Source))
# # # # # # # # # # # # # # # # # # #
# ORGANIC CARBON META-ANALYSIS ----
# # # # # # # # # # # # # # # # # # #
# Fixed Effects Meta-analysis for organic carbon
organic_carbon_meta <- rma(yi = estimate_organic_carbon,
vi = variance_organic_carbon,
method = "FE",
data = all_coefs)
summary(organic_carbon_meta)
# Fixed-Effects Model (k = 254)
#
# logLik deviance AIC BIC AICc
# 686.0280 173.6426 -1370.0560 -1366.5186 -1370.0401
#
# I^2 (total heterogeneity / total variability): 0.00%
# H^2 (total variability / sampling variability): 0.69
#
# Test for Heterogeneity:
# Q(df = 253) = 173.6426, p-val = 1.0000
#
# Model Results:
#
# estimate se zval pval ci.lb ci.ub
# -0.0007 0.0005 -1.5919 0.1114 -0.0017 0.0002
#
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# Test for heterogeneity validates this fixed effects model, because p-value is high.
# However, we have reasons to think there might be study level heterogeneity. LEt's check it.
# Random Effects Meta-analysis for organic carbon
organic_carbon_meta2 <- rma(yi = estimate_organic_carbon,
vi = variance_organic_carbon,
method = "REML",
test = "knha",
data = all_coefs)
summary(organic_carbon_meta2)
# Random-Effects Model (k = 254; tau^2 estimator: REML)
#
# logLik deviance AIC BIC AICc
# 685.5885 -1371.1769 -1367.1769 -1360.1101 -1367.1289
#
# tau^2 (estimated amount of total heterogeneity): 0.0000 (SE = 0.0000)
# tau (square root of estimated tau^2 value): 0.0027
# I^2 (total heterogeneity / total variability): 11.44%
# H^2 (total variability / sampling variability): 1.13
#
# Test for Heterogeneity:
# Q(df = 253) = 173.6426, p-val = 1.0000
#
# Model Results:
#
# estimate se tval pval ci.lb ci.ub
# -0.0017 0.0005 -3.4322 0.0007 -0.0027 -0.0007 ***
#
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# Adding species as a moderator
organic_carbon_meta3 <- rma(yi = estimate_organic_carbon,
vi = variance_organic_carbon,
mods = ~Species,
method = "REML",
test = "knha",
data = all_coefs)
summary(organic_carbon_meta3)
# Species is not a significant moderator.
# Adding study as a moderator, to see if we need to include it as a random effect to deal with NON INDEPENDENCE OF STUDIES WITHIN PAPERS
organic_carbon_meta4 <- rma.mv(yi = estimate_organic_carbon,
V = variance_organic_carbon,
mods = ~Source,
method = "REML",
data = all_coefs)
summary(organic_carbon_meta4)
# Source is significant P = 0.0503
# Let's add it as a random effect.
# Adding a random effect (Source), to account for all those studies coming from the same paper. We first need to take NA's out.
organic_carbon_meta5 <- rma.mv(yi = estimate_organic_carbon,
V = variance_organic_carbon,
random = ~ 1|Source,
method = "REML",
data = all_coefs)
summary(organic_carbon_meta5)
# Multivariate Meta-Analysis Model (k = 254; method: REML)
#
# logLik Deviance AIC BIC AICc
# 687.7859 -1375.5717 -1371.5717 -1364.5049 -1371.5237
#
# Variance Components:
#
# estim sqrt nlvls fixed factor
# sigma^2 0.0000 0.0020 17 no Source
#
# Test for Heterogeneity:
# Q(df = 253) = 173.6426, p-val = 1.0000
#
# Model Results:
#
# estimate se zval pval ci.lb ci.ub
# -0.0017 0.0009 -1.9844 0.0472 -0.0034 -0.0000 *
#
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# Now let's include moderator = Species
organic_carbon_meta6 <- rma.mv(yi = estimate_organic_carbon,
V = variance_organic_carbon,
mods = ~Species,
random = ~ 1|Source,
method = "REML",
data = all_coefs)
summary(organic_carbon_meta6)
# Species not significant moderator
# Thus, the best model, is the one with Source as random effects, but without species as moderator.
# That's organic_carbon_meta5
# Funnel plots
funnel(organic_carbon_meta5)
funnel(trimfill(organic_carbon_meta))
# # # # # # # # # # # # # # # # # #
# BULK DENSITY META-ANALYSIS ----
# # # # # # # # # # # # # # # # # #
# Fixed effects meta-analysis for BULK DENSITY
bulkd_meta <- rma(yi = estimate_bulkd,
vi = variance_bulkd,
method = "FE",
data = all_coefs)
summary(bulkd_meta)
# Fixed-Effects Model (k = 254)
#
# logLik deviance AIC BIC AICc
# 864.2459 209.1145 -1726.4918 -1722.9545 -1726.4760
#
# I^2 (total heterogeneity / total variability): 0.00%
# H^2 (total variability / sampling variability): 0.83
#
# Test for Heterogeneity:
# Q(df = 253) = 209.1145, p-val = 0.9797
#
# Model Results:
#
# estimate se zval pval ci.lb ci.ub
# 0.0026 0.0003 7.8872 <.0001 0.0020 0.0033 ***
#
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# Test for heterogeneity validates this fixed effects model, because p-value is high.
# However, we have reasons to think there might be study level heterogeneity. Let's check it.
# Random Effects Meta-analysis for organic carbon
bulkd_meta2 <- rma(yi = estimate_bulkd,
vi = variance_bulkd,
method = "REML",
test = "knha",
data = all_coefs)
summary(bulkd_meta2)
# Random-Effects Model (k = 254; tau^2 estimator: REML)
#
# logLik deviance AIC BIC AICc
# 870.7110 -1741.4221 -1737.4221 -1730.3553 -1737.3741
#
# tau^2 (estimated amount of total heterogeneity): 0.0000 (SE = 0.0000)
# tau (square root of estimated tau^2 value): 0.0030
# I^2 (total heterogeneity / total variability): 24.09%
# H^2 (total variability / sampling variability): 1.32
#
# Test for Heterogeneity:
# Q(df = 253) = 209.1145, p-val = 0.9797
#
# Model Results:
#
# estimate se tval pval ci.lb ci.ub
# 0.0033 0.0003 9.7829 <.0001 0.0027 0.0040 ***
# There is some heterogeneity, 24%
# Adding species as a moderator
bulkd_meta3 <- rma(yi = estimate_bulkd,
vi = variance_bulkd,
mods = ~Species,
method = "REML",
test = "knha",
data = all_coefs)
summary(bulkd_meta3)
# Species appears to be significant here.
# Adding study as a moderator, to see if we need to include it as a random effect to deal with NON INDEPENDENCE OF STUDIES WITHIN PAPERS
bulkd_meta4 <- rma.mv(yi = estimate_bulkd,
V = variance_bulkd,
mods = ~Source,
method = "REML",
data = all_coefs)
summary(bulkd_meta4)
# Source is significant P = 0.0001
# Let's add it as a random effect.
# Adding a random effect (Source), to account for all those studies coming from the same paper. We first need to take NA's out.
bulkd_meta5 <- rma.mv(yi = estimate_bulkd,
V = variance_bulkd,
random = ~ 1|Source,
method = "REML",
data = all_coefs)
summary(bulkd_meta5)
# Multivariate Meta-Analysis Model (k = 254; method: REML)
#
# logLik Deviance AIC BIC AICc
# 866.0125 -1732.0250 -1728.0250 -1720.9582 -1727.9770
#
# Variance Components:
#
# estim sqrt nlvls fixed factor
# sigma^2 0.0000 0.0026 17 no Source
#
# Test for Heterogeneity:
# Q(df = 253) = 209.1145, p-val = 0.9797
#
# Model Results:
#
# estimate se zval pval ci.lb ci.ub
# 0.0037 0.0008 4.4554 <.0001 0.0021 0.0053 ***
#
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# Now let's include moderator = Species
bulkd_meta6 <- rma.mv(yi = estimate_bulkd,
V = variance_bulkd,
mods = ~Species,
random = ~ 1|Source,
method = "REML",
data = all_coefs)
summary(bulkd_meta6)
# Species has to be included.
# bulkd_meta6 is the best-selected model.
# Funnel plots
funnel(bulkd_meta5)
funnel(trimfill(bulkd_meta))
# # # # # # # # # # # # # # # # # #
# CARBON DENSITY META-ANALYSIS ----
# # # # # # # # # # # # # # # # # #
# Fixed effects meta-analysis for CARBON DENSITY
carbon_density_meta <- rma(yi = estimate_carbon_density,
vi = variance_carbon_density,
method = "FE",
data = all_coefs)
summary(carbon_density_meta)
# Fixed-Effects Model (k = 254)
#
# logLik deviance AIC BIC AICc
# 1868.2117 131.5190 -3734.4234 -3730.8860 -3734.4075
#
# I^2 (total heterogeneity / total variability): 0.00%
# H^2 (total variability / sampling variability): 0.52
#
# Test for Heterogeneity:
# Q(df = 253) = 131.5190, p-val = 1.0000
#
# Model Results:
#
# estimate se zval pval ci.lb ci.ub
# 0.0000 0.0000 0.5161 0.6058 -0.0000 0.0000
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# Test for heterogeneity validates this fixed effects model, because p-value is high.
# However, we have reasons to think there might be study level heterogeneity. LEt's check it.
# Random Effects Meta-analysis for organic carbon
carbon_density_meta2 <- rma(yi = estimate_carbon_density,
vi = variance_carbon_density,
method = "REML",
test = "knha",
data = all_coefs)
summary(carbon_density_meta2)
# Random-Effects Model (k = 254; tau^2 estimator: REML)
#
# logLik deviance AIC BIC AICc
# 1860.1405 -3720.2811 -3716.2811 -3709.2143 -3716.2331
#
# tau^2 (estimated amount of total heterogeneity): 0.0000 (SE = 0.0000)
# tau (square root of estimated tau^2 value): 0.0000
# I^2 (total heterogeneity / total variability): 5.53%
# H^2 (total variability / sampling variability): 1.06
#
# Test for Heterogeneity:
# Q(df = 253) = 131.5190, p-val = 1.0000
#
# Model Results:
#
# estimate se tval pval ci.lb ci.ub
# -0.0000 0.0000 -0.3498 0.7267 -0.0000 0.0000
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# Adding species as a moderator
carbon_density_meta3 <- rma(yi = estimate_carbon_density,
vi = variance_carbon_density,
mods = ~Species,
method = "REML",
test = "knha",
data = all_coefs)
summary(carbon_density_meta3)
# Species is a significant moderator.
# Adding study as a moderator, to see if we need to include it as a random effect to deal with NON INDEPENDENCE OF STUDIES WITHIN PAPERS
carbon_density_meta4 <- rma.mv(yi = estimate_carbon_density,
V = variance_carbon_density,
mods = ~Source,
method = "REML",
data = all_coefs)
summary(carbon_density_meta4)
# Source is significant P = 0.0274
# Let's add it as a random effect.
# Adding a random effect (Source), to account for all those studies coming from the same paper. We first need to take NA's out.
carbon_density_meta5 <- rma.mv(yi = estimate_carbon_density,
V = variance_carbon_density,
random = ~ 1|Source,
method = "REML",
data = all_coefs)
summary(carbon_density_meta5)
# Multivariate Meta-Analysis Model (k = 254; method: REML)
#
# logLik Deviance AIC BIC AICc
# 1861.6324 -3723.2649 -3719.2649 -3712.1981 -3719.2169
#
# Variance Components:
#
# estim sqrt nlvls fixed factor
# sigma^2 0.0000 0.0000 17 no Source
#
# Test for Heterogeneity:
# Q(df = 253) = 131.5190, p-val = 1.0000
#
# Model Results:
#
# estimate se zval pval ci.lb ci.ub
# -0.0000 0.0000 -0.3695 0.7117 -0.0000 0.0000
#
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# Now let's include moderator = Species
carbon_density_meta6 <- rma.mv(yi = estimate_carbon_density,
V = variance_carbon_density,
mods = ~Species,
random = ~ 1|Source,
method = "REML",
data = all_coefs)
summary(carbon_density_meta6)
# Species not significant moderator
# Thus, the best model, is the one with Source as random effects, but without species as moderator.
# That's carbon_density_meta5
# Funnel plots
funnel(carbon_density_meta5)
funnel(trimfill(carbon_density_meta))
# Data to be plotted
data_organic_carbon <- tibble("Organic carbon",
organic_carbon_meta5$beta,
organic_carbon_meta5$ci.lb,
organic_carbon_meta5$ci.ub,
"*")
names(data_organic_carbon) <- c("variable",
"estimate",
"lower",
"upper",
"label")
data_bulk <- tibble("Dry bulk density",
bulkd_meta5$beta,
bulkd_meta5$ci.lb,
bulkd_meta5$ci.ub,
"***")
names(data_bulk) <- c("variable",
"estimate",
"lower",
"upper",
"label")
data_carbon_density <- tibble("Carbon density",
carbon_density_meta5$beta,
carbon_density_meta5$ci.lb,
carbon_density_meta5$ci.ub,
" ")
names(data_carbon_density) <- c("variable",
"estimate",
"lower",
"upper",
"label")
data_meta <- rbind(data_organic_carbon, data_bulk, data_carbon_density)
# Nice forest plot for both carbon and bulk density
ggplot(data_meta, aes(x = variable, y = estimate, ymax = upper, ymin = lower)) +
geom_point(aes(size = 1.2)) +
geom_errorbar(aes(width = 0.1)) +
geom_text(aes(label = label), size = 8, position = position_nudge(x = 0.1)) +
coord_flip() +
geom_hline(aes(yintercept = 0), lty = 2, size = 1) +
xlab("") +
ylab("Change along core depth") +
theme_bw() +
theme(legend.position = "none",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
text = element_text(size = 14))
# ggsave(filename = "Figs/Final_Figures_March2021/Meta-analysis.pdf")
|
4302044590b2ec0aa595f91cbe874c6f581d443c
|
43292259e34c3738d1775d5d55bc0b83f027607c
|
/material original/Problemas_y_Talleres/problemas_MATIII/inf_gestion_old/prob9.R
|
c9d9be97114b37ff1fafb619fcca71eae3b02b16
|
[] |
no_license
|
fenixin15/curso-estadistica-inferencial
|
12543ddac6fe43b41e713753b677d17088a84d2d
|
f655b56aff2f1ef69cd46301f202c05b258ee93e
|
refs/heads/master
| 2020-08-21T18:42:07.514819
| 2019-10-19T10:58:30
| 2019-10-19T10:58:30
| 216,220,287
| 1
| 0
| null | 2019-10-19T14:36:36
| 2019-10-19T14:36:36
| null |
ISO-8859-1
|
R
| false
| false
| 1,076
|
r
|
prob9.R
|
x<- c(4 , 4 , 2 , 10 , 1 , 9 , 5 , 3 , 4 , 5 , 6 , 6 , 7 , 6 , 8 , 7 , 6 , 8 , 7 , 6 , 5 , 4
, 4 , 4 , 5 , 6 , 6 , 7 , 5 , 6 , 6 , 7 , 5 , 6 , 6 , 7 , 5 , 6 , 4 , 3 , 2 , 6 , 6 , 7
, 7 , 8 , 8 , 9 , 8 , 7)
x
table(x)
mean(x)
sd(x)
sort(x)
length(x)
length(x)*20/100
sort(x)[length[x]-length(x)*20/100]
sort(x)[40]
## como vemos sale un numero que no se para el 20%.
## si consideramos los datoa agrupados en [0.5,1.5), [1.5,2.5)....
## El percentil 80 será.
table(x)-> frec
length(x)*80/100
which(cumsum(frec)>=length(x)*80/100)
## y el perentil 80 será (lo ponemos directamente)
6.5+1* (40-33)/9
##Gráficos
hist(x)
barplot(table(x),main="Resultados prueba")
## más gráficos
hist(x)-> histograma
histograma[]
## Cómo se ve histograma es un objeto que se puede dibujar
plot(histograma,main="Es un plot del objeto histograma")
class(histograma)
#cambiemos $conunts por cumsum(histograma$counts)
histograma$counts<- cumsum(histograma$counts)
#La aproximació a los poligónos de frecuencias se hace con
hist(x,prob=TRUE)
lines(density(x))
|
82942a664a99ed34a3ec3b0670d89d5950a73fdc
|
0299ee5e21a9d468d0c20345621b5c0b1e3ceebf
|
/Week 2 HW CSDA 5330 Alex Cline.R
|
8cb03b009d6588b390da6a9e9aadb74d679e0e1c
|
[] |
no_license
|
AlexCCline/Apriori
|
67a6a0a541f556343f1634e385c7440e6c78e20d
|
f665c3db77b8a1ef2ac043568b3f1cc18a9bceb3
|
refs/heads/master
| 2022-11-19T00:02:56.528926
| 2020-07-08T01:57:45
| 2020-07-08T01:57:45
| 277,664,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,569
|
r
|
Week 2 HW CSDA 5330 Alex Cline.R
|
library(arulesViz)
library(plotly)
library(arules)
#CSV
SAMCLUB.df<-read.csv("SAMCLUB.csv")
#3a summary of results
summary(SAMCLUB.df)
SAMCLUB.binary<-as(split(SAMCLUB.df[,2], SAMCLUB.df[,1]), "transactions")
inspect(SAMCLUB.binary)
#SAMCLUB rules
SAMCLUB.rules<- apriori(SAMCLUB.binary, parameter = list(supp= 0.001, conf = 0.1, target
= "rules"))
#3b static and interactive plots for SAMCLUB rules supp 0.001 conf 0.1
library(yaml)
library(colorspace)
plot(SAMCLUB.rules, control = list(col=sequential_hcl(100)))
plot(SAMCLUB.rules, col=sequential_hcl(100))
plot(SAMCLUB.rules, method = "two-key plot")
#interactive plot
plot(SAMCLUB.rules, method = "graph", engine = "htmlwidget")
#3c orange juice filter
SAMCLUB.filter <- SAMCLUB.df[SAMCLUB.df$Primary_Description == "ORANGE JUICE",]
SAMCLUB.filter
#orange juice rules
ORANGEJUICErules<-apriori(SAMCLUB.binary, parameter = list(support = 0.001, confidence = 0.1), appearance =
list(default = "lhs", rhs="ORANGE JUICE"))
#3d orange juice visualization
plot(ORANGEJUICErules, method = "two-key plot")
plot(ORANGEJUICErules, method = "graph", engine = "htmlwidget")
#3e and 3f conf>0.5rules and static and interactive plots
Cg0.5rules<-SAMCLUB.rules[quality(SAMCLUB.rules)$confidence>0.5]
plot(Cg0.5rules)
plot(Cg0.5rules, method = "two-key plot")
plot(Cg0.5rules, method = "graph", engine = "htmlwidget")
write.table(inspect(SAMCLUB.rules), "SAMCLUB.csv", sep="\t")
|
4b9bd678ca457f8f07a5b800fc17c1b1d7f97213
|
309c47cad0e7c2f795efe301277869b2bf192cfa
|
/_resampling/02_resampling_sPlot_within_the_PC1-PC2_environmental_space.R
|
7724e071b44e1b5dd5bdfeb698df610f8e8c9d61
|
[
"CC-BY-4.0"
] |
permissive
|
sPlotOpen/sPlotOpen_Code
|
9d19e0242f2082c1ce47b10fdc4743840861c769
|
cf5d2c396c4beeb94064f877a42fa6bef985f983
|
refs/heads/master
| 2023-02-01T09:59:08.621448
| 2020-12-18T13:01:20
| 2020-12-18T13:01:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,881
|
r
|
02_resampling_sPlot_within_the_PC1-PC2_environmental_space.R
|
#
# 01 February 2017
#
library(spdep)
library(raster)
library(maptools)
library(fBasics)
library(devtools)
library(parallelsugar)
library(colorRamps)
library(Rcpp)
library(bigmemory)
library(RcppArmadillo)
library(RcppParallel)
#
data(wrld_simpl)
#
# Loading the header data from the sPlot database v2.1
#
load("sPlot_header_20161124.RData")
ls() # header
dim(header) # 1121244 relevés and 51 variables
length(which(is.na(header$Longitude))) # 558 relevés without longitude coordinates
length(which(is.na(header$Latitude))) # 558 relevés without latitude coordinates
posit <- which(is.na(header$Longitude)&is.na(header$Latitude))
plot_data <- header[-posit, ]
dim(plot_data) # 1120686 relevés and 51 variables
rm(header)
#
# Make plot_data as a spatial dataframe
#
CRSlonlat <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0")
coords <- cbind(plot_data$Longitude, plot_data$Latitude)
coords <- SpatialPoints(coords, proj4string=CRSlonlat)
plot_data <- SpatialPointsDataFrame(coords, plot_data, proj4string=CRSlonlat)
class(plot_data)
#
# Compute the initial sampling effort across the geographical space per spatial unit of 2.5 arc minute
#
rgeo <- raster(nrows=360, ncols=720, xmn=-180, xmx=180, ymn=-90, ymx=90) # raster at half a degree resolution (cf. 30 arc minute resolution)
rgeo <- disaggregate(rgeo, fact=12) # raster at 2.5 arc minute resolution hich is about 5 km at the equator (25 km2 is the approximate area of a spatial unit at the equator)
init_seff_rgeo <- rasterize(plot_data@data[, c("POINT_X", "POINT_Y")], rgeo, fun="count")
sum(getValues(init_seff_rgeo), na.rm=TRUE) # 1120686 relevés
#
# Remove plots for which location accuracy is above 2821 m (cf. radius of a buffer circle of 25 km2 around the plot location)
#
plot_data <- plot_data@data
buffer <- round(sqrt(25/pi)*1000)
posit <- which(plot_data[, "Location uncertainty (m)"]>buffer) # Be careful, there are 261 plots without location uncertainty but with coordinates that we will keep in the end
length(posit) # 280224 relevés for which location accuracy exceeds a radius of 2821 m
plot_data <- plot_data[-posit, ]
dim(plot_data) # 840462 relevés and 51 variables
#
# Remove plots from wetlands
#
posit <- which(is.na(plot_data$Grassland)&is.na(plot_data$Forest)&is.na(plot_data$Shrubland)&is.na(plot_data$Sparse.vegetation)&plot_data$Wetland==1)
length(posit) # 8323 relevés that are pure wetlands
plot_data <- plot_data[-posit, ]
dim(plot_data) # 832139 relevés and 51 variables
#
# Remove plots from anthropogenic vegetation types
#
posit <- which(plot_data$Naturalness==3)
length(posit) # 30942 relevés from anthropogenic vegetation types
plot_data <- plot_data[-posit, ]
dim(plot_data) # 801197 relevés and 51 variables
#
# Import DT2 which is the full dataset (free of non-vascular plants) with species composition
#
load("DT2_20161025.RData")
dim(DT2) # 22195966 species occurrences and 7 variables
#
# Match it with the plot_data dataframe
#
length(unique(DT2$PlotObservationID)) # 1117369 relevés
length(unique(DT2$PlotObservationID))-length(unique(plot_data$PlotObservationID)) # 316172 relevés in DT2 but missing from plot_data
posit <- match(DT2$PlotObservationID, plot_data$PlotObservationID)
any(is.na(posit)) # TRUE: some relevés (n = 316172) in DT2 are not in plot_data
length(DT2$PlotObservationID[is.na(posit)]) # 6168698 rows in DT2 corresponding to the species lists of the relevés missing from plot_data
DT2 <- DT2[is.finite(posit), ]
length(unique(DT2$PlotObservationID)) # 799400 relevés
posit <- match(plot_data$PlotObservationID, DT2$PlotObservationID)
any(is.na(posit)) # TRUE: some relevés (n = 1797) in plot_data are not in DT2 (cf. plots with only non-vascular plants?)
length(plot_data$PlotObservationID[is.na(posit)]) # 1797 relevés in plot_data are not in DT2 (cf. plots with only non-vascular plants?)
plot_data <- plot_data[is.finite(posit), ]
length(unique(plot_data$PlotObservationID)) # 799400 relevés which matches with DT2
#
save(plot_data, file="plot_data.RData")
#
# Make plot_data as a spatial dataframe again
#
coords <- cbind(plot_data$Longitude, plot_data$Latitude)
coords <- SpatialPoints(coords, proj4string=CRSlonlat)
plot_data <- SpatialPointsDataFrame(coords, plot_data, proj4string=CRSlonlat)
class(plot_data)
#
# Check for relevés with identical spatial coordinates (just for information)
#
coordID <- paste(plot_data@data$Longitude, plot_data@data$Latitude, sep=":")
length(coordID) # 799400 relevés
length(unique(coordID)) # 509977 relevés with unique coordinates (about 64% of the relevés have unique coordinates)
#
# Plot the global sampling effort per spatial unit of 2.5 arc minute
#
seff_rgeo <- rasterize(plot_data@data[, c("POINT_X", "POINT_Y")], rgeo, fun="count")
sum(getValues(seff_rgeo), na.rm=TRUE) # 799400 relevés
tiff(filename="Sampling_effort_at_2.5_arc_minute.tiff", width=20, height=12, res=300, unit="cm")
par(mar=c(4, 4, 4, 1))
plot(log(seff_rgeo), legend=FALSE, asp=0, col=rev(divPalette(n=100, name="Spectral")), xlab="Longitude", ylab="Latitude")
plot(log(seff_rgeo), legend.only=TRUE, col=rev(divPalette(n=100, name="Spectral")), legend.width=1, legend.shrink=0.75, axis.args=list(at=seq(log(minValue(seff_rgeo)), log(maxValue(seff_rgeo)), length.out=5), labels=round(exp(seq(log(minValue(seff_rgeo)), log(maxValue(seff_rgeo)), length.out=5))), cex.axis=0.6), legend.args=list(text="N", side=3, font=2, line=0, cex=0.8))
plot(wrld_simpl, add=T, border="darkgrey", lwd=0.1)
title(main="Number (log-scale) of plots \nper 2.5 arc-minute spatial unit")
dev.off()
#
# Plot the global sampling effort per spatial unit of 0.5 degree
#
seff_rgeo <- aggregate(seff_rgeo, fact=12, fun=sum)
sum(getValues(seff_rgeo), na.rm=TRUE) # 799400 relevés
tiff(filename="Sampling_effort_at_0.5_degree.tiff", width=20, height=12, res=300, unit="cm")
par(mar=c(4, 4, 4, 1))
plot(log(seff_rgeo), legend=FALSE, asp=0, col=rev(divPalette(n=100, name="Spectral")), xlab="Longitude", ylab="Latitude")
plot(log(seff_rgeo), legend.only=TRUE, col=rev(divPalette(n=100, name="Spectral")), legend.width=1, legend.shrink=0.75, axis.args=list(at=seq(log(minValue(seff_rgeo)), log(maxValue(seff_rgeo)), length.out=5), labels=round(exp(seq(log(minValue(seff_rgeo)), log(maxValue(seff_rgeo)), length.out=5))), cex.axis=0.6), legend.args=list(text="N", side=3, font=2, line=0, cex=0.8))
plot(wrld_simpl, add=T, border="darkgrey", lwd=0.1)
title(main="Number (log-scale) of plots \nper 30 arc-minute spatial unit")
dev.off()
#
# Plot the global sampling effort per spatial unit of 1 degree
#
seff_rgeo <- aggregate(seff_rgeo, fact=2, fun=sum)
sum(getValues(seff_rgeo), na.rm=TRUE) # 799400 relevés
tiff(filename="Sampling_effort_at_1_degree.tiff", width=20, height=12, res=300, unit="cm")
par(mar=c(4, 4, 4, 1))
plot(log(seff_rgeo), legend=FALSE, asp=0, col=rev(divPalette(n=100, name="Spectral")), xlab="Longitude", ylab="Latitude")
plot(log(seff_rgeo), legend.only=TRUE, col=rev(divPalette(n=100, name="Spectral")), legend.width=1, legend.shrink=0.75, axis.args=list(at=seq(log(minValue(seff_rgeo)), log(maxValue(seff_rgeo)), length.out=5), labels=round(exp(seq(log(minValue(seff_rgeo)), log(maxValue(seff_rgeo)), length.out=5))), cex.axis=0.6), legend.args=list(text="N", side=3, font=2, line=0, cex=0.8))
plot(wrld_simpl, add=T, border="darkgrey", lwd=0.1)
title(main="Number (log-scale) of plots \nper 1 degree spatial unit")
dev.off()
#
# Plot the global sampling effort per spatial unit of 2 degrees
#
seff_rgeo <- aggregate(seff_rgeo, fact=2, fun=sum)
sum(getValues(seff_rgeo), na.rm=TRUE) # 799400 relevés
tiff(filename="Sampling_effort_at_2_degrees.tiff", width=20, height=12, res=300, unit="cm")
par(mar=c(4, 4, 4, 1))
plot(log(seff_rgeo), legend=FALSE, asp=0, col=rev(divPalette(n=100, name="Spectral")), xlab="Longitude", ylab="Latitude")
plot(log(seff_rgeo), legend.only=TRUE, col=rev(divPalette(n=100, name="Spectral")), legend.width=1, legend.shrink=0.75, axis.args=list(at=seq(log(minValue(seff_rgeo)), log(maxValue(seff_rgeo)), length.out=5), labels=round(exp(seq(log(minValue(seff_rgeo)), log(maxValue(seff_rgeo)), length.out=5))), cex.axis=0.6), legend.args=list(text="N", side=3, font=2, line=0, cex=0.8))
plot(wrld_simpl, add=T, border="darkgrey", lwd=0.1)
title(main="Number (log-scale) of plots \nper 2 degrees spatial unit")
dev.off()
#
# Plot the difference with the initial sampling effort per spatial unit of 2 degrees
#
init_seff_rgeo <- aggregate(init_seff_rgeo, fact=48, fun=sum)
sum(getValues(init_seff_rgeo), na.rm=TRUE) # 1120686 relevés
diff_seff_rgeo <- init_seff_rgeo-seff_rgeo
sum(getValues(diff_seff_rgeo), na.rm=TRUE) # 316728 relevés lost after data filtering
tiff(filename="Plot_loss_at_2_degrees.tiff", width=20, height=12, res=300, unit="cm")
par(mar=c(4, 4, 4, 1))
plot(log(diff_seff_rgeo+1), legend=FALSE, asp=0, col=c("grey", rev(divPalette(n=99, name="Spectral"))), xlab="Longitude", ylab="Latitude")
plot(log(diff_seff_rgeo+1), legend.only=TRUE, col=c("grey", rev(divPalette(n=99, name="Spectral"))), legend.width=1, legend.shrink=0.75, axis.args=list(at=seq(0, log(maxValue(diff_seff_rgeo)+1), length.out=5), labels=round(seq(0, exp(log(maxValue(diff_seff_rgeo)+1)), length.out=5)), cex.axis=0.6), legend.args=list(text="N", side=3, font=2, line=0, cex=0.8))
plot(wrld_simpl, add=T, border="darkgrey", lwd=0.1)
title(main="Number (log-scale) of plots \nper 2 degrees spatial unit")
dev.off()
#
# Compute the global sampling effort across the bivariate (PC1-PC2) environmental space (not the geographical space)
#
load("PC1_r.RData")
load("PC2_r.RData")
plot_data@data$cellID <- cellFromXY(rgeo, cbind(plot_data@data$POINT_X, plot_data@data$POINT_Y))
plot_data@data$pc1_val <- extract(PC1_r, coordinates(plot_data))
plot_data@data$pc2_val <- extract(PC2_r, coordinates(plot_data))
load("pca3.RData")
res <- 100 # Setting the number of bins per PCA axis to 100
reco <- raster(nrows=res, ncols=res, xmn=min(pca3$x[, 1]), xmx=max(pca3$x[, 1]), ymn=min(pca3$x[, 2]), ymx=max(pca3$x[, 2]))
PC1_PC2_r <- rasterize(pca3$x[, 1:2], reco, fun="count") # Compute the density of geographic grid cells across the entire bivariate (PC1-PC2) environmental space
sPlot_reco <- rasterize(plot_data@data[, c("pc1_val", "pc2_val")], reco, fun="count") # Compute the sampling effort (number of vegetation plots) per environmental unit (cell) across the entire bivariate (PC1-PC2) environmental space
temp1 <- getValues(PC1_PC2_r)
temp1[!is.na(temp1)] <- 0 # Put zero values for the empty cells (cf. there is no existing terrestrial grid cell available on Earth for the focal PC1-PC2 grid cell condition)
temp2 <- getValues(sPlot_reco)
temp2[which(temp1==0&is.na(temp2))] <- 0 # Put zero values for the empty cells (cf. there is no vegeteation plots available for those environmental conditions: gaps)
sPlot_reco <- setValues(reco, temp2)
#
# Plot the number of 2.5 arc-minute cells for each cell of the PC1-PC2 space
#
tiff(filename="Global_availability_PC1-PC2.tiff", width=12, height=12, res=300, unit="cm")
par(mar=c(4, 4, 4, 1))
plot(log(PC1_PC2_r), asp=0, col=rev(divPalette(n=100, name="RdBu")), xlab="PC1 (cold and seasonal to hot and stable)", ylab="PC2 (dry to wet)", legend=FALSE)
plot(log(PC1_PC2_r), asp=0, col=rev(divPalette(n=100, name="RdBu")), legend.only=TRUE, legend.width=1, legend.shrink=0.75, axis.args=list(at=seq(log(minValue(PC1_PC2_r)), log(maxValue(PC1_PC2_r)), length.out=5), labels=round(seq(exp(log(minValue(PC1_PC2_r))), exp(log(maxValue(PC1_PC2_r))), length.out=5)), cex.axis=0.6), legend.args=list(text="N", side=3, font=2, line=0, cex=0.8))
title(main="Number of 2.5 arc-minute spatial units \nper environmental cell (log scale)")
dev.off()
#
# Plot the number of sPlot relevés for each cell of the PC1-PC2 space
#
tiff(filename="Sampling_effort_PC1-PC2.tiff", width=12, height=12, res=300, unit="cm")
par(mar=c(4, 4, 4, 1))
plot(log(sPlot_reco+1), asp=0, col=c("grey", rev(divPalette(n=99, name="RdBu"))), xlab="PC1 (cold and seasonal to hot and stable)", ylab="PC2 (dry to wet)", legend=FALSE)
plot(log(sPlot_reco+1), asp=0, col=c("grey", rev(divPalette(n=99, name="RdBu"))), legend.only=TRUE, legend.width=1, legend.shrink=0.75, axis.args=list(at=seq(0, log(maxValue(sPlot_reco)+1), length.out=5), labels=round(seq(0, exp(log(maxValue(sPlot_reco)+1)), length.out=5)), cex.axis=0.6), legend.args=list(text="N", side=3, font=2, line=0, cex=0.8))
title(main="Number of sPlot relevés \nper environmental cell (log scale)")
dev.off()
#
# Plot for each cell of the PC1-PC2 space the ratio between the relative proportion of sPlot relevés and the relative proportion of spatial units available worldwide
#
tiff(filename="Sampling_effort_ratio_PC1-PC2.tiff", width=12, height=12, res=300, unit="cm")
par(mar=c(4, 4, 4, 1))
ratio_reco <- (sPlot_reco/max(getValues(sPlot_reco), na.rm=T))/(PC1_PC2_r/max(getValues(PC1_PC2_r), na.rm=T))
plot(log(ratio_reco+1), asp=0, col=c("grey", rev(divPalette(n=99, name="Spectral"))), xlab="PC1 cold and seasonal to hot and stable)", ylab="PC2 (dry to wet)")
title(main="Oversampled (>0.69) versus \nundersampled (<0.69) PC1-PC2 cells")
dev.off()
#
# Run a sensitivity analysis to define the most appropriate resolution of the bivariate (PC1-PC2) environmental space
#
res <- seq(10, 500, 10)
ncell_disp <- c()
ncell_samp <- c()
seff_med <- c()
seff_mean <- c()
seff_max <- c()
seff_min <- c()
nbrel_sel <- c()
for (i in 1:length(res)) {
print(paste(i, "of", length(res), sep=" "))
r <- raster(nrows=res[i], ncols=res[i], xmn=min(pca3$x[, 1]), xmx=max(pca3$x[, 1]), ymn=min(pca3$x[, 2]), ymx=max(pca3$x[, 2]))
temp <- rasterize(pca3$x[, 1:2], r, fun="count")
ncell_disp <- c(ncell_disp, length(which(getValues(temp)>0)))
temp <- rasterize(plot_data@data[, c("pc1_val", "pc2_val")], r, fun="count")
temp <- getValues(temp)
temp <- na.omit(temp)
ncell_samp <- c(ncell_samp, length(which(temp)>0))
seff_med <- c(seff_med, median(temp))
seff_mean <- c(seff_mean, mean(temp))
seff_max <- c(seff_max, max(temp))
seff_min <- c(seff_min, min(temp))
nbrel_sel <- c(nbrel_sel, length(temp[which(temp>median(temp)), ])*median(temp)+sum(temp[which(temp<=median(temp)), ]))
}
plot(res, seff_med)
plot(res, seff_max)
plot(res, seff_mean)
plot(res, nbrel_sel)
plot(res, ncell_samp/ncell_disp, ylim=c(0, 1))
#
# Resample sPlot within the PC1-PC2 environmental space to get an environmentally-balanced subset
#
plot_data <- plot_data@data
save(plot_data, file="plot_data.RData") # Save the latest version of plot_data
Sys.setenv("PKG_CXXFLAGS"="-fopenmp") # Set environment variables for other processes called from within R
sourceCpp("/_functions_TH/bray.part.OpenMP.cpp") # Source C++ fonctions written by Tarek Hattab from folder "_functions_TH"
sourceCpp("/_functions_TH/bray.part.C_RcppParallel.cpp") # Source C++ fonctions written by Tarek Hattab from folder "_functions_TH"
sourceCpp("/_functions_TH/hcr.C.cpp") # Source C++ fonctions written by Tarek Hattab from folder "_functions_TH"
sourceCpp("/_functions_TH/cast_binary.cpp") # Source C++ fonctions written by Tarek Hattab from folder "_functions_TH"
BigBrayPart <- function(bigMat) {
zeros <- big.matrix(nrow=nrow(bigMat), ncol=nrow(bigMat), init=0, type=typeof(bigMat), shared=FALSE, backingfile=paste("BrayMatrix_",i,sep=""), backingpath=getwd(), descriptorfile=paste("BrayMatrix_",i,".desc",sep=""))
bray_distance_OpenMP(bigMat@address, zeros@address)
return(zeros)
}
res <- 100 # Set the resolution of the environmental space based on the sensitivity analysis
r <- raster(nrows=res, ncols=res, xmn=min(pca3$x[, 1]), xmx=max(pca3$x[, 1]), ymn=min(pca3$x[, 2]), ymx=max(pca3$x[, 2])) # Prepare the environmental space restricted to sPlot relevés only (not the entire environmental space available at a global extent)
pca_sPlot_r <- rasterize(plot_data[, c("pc1_val", "pc2_val")], r, fun="count")
cutoff <- median(values(pca_sPlot_r), na.rm=TRUE) # Compute the cutoff value above which relevés have to be resampled for a given cell
tempZoneOut <- coordinates(pca_sPlot_r)[which(values(pca_sPlot_r)>cutoff), ] # Select only the coordinates of the environmental cells for which the total number of sPlot relevés available exceeds the cutoff value
repet <- 100 # Set the number of repetitions for the HCR function
sp_data <- DT2[, c(1, 2, 7)] # Prepare the species data table that will be used by the HCR approach
names(sp_data) <- c("plot_id", "sp_name", "rel_cov")
save(sp_data, file="sp_data.RData") # Save the latest version of sp_data
plotToRemove <- as.list(rep(NA, repet)) # Prepare an empty object to store the IDs of the relevés to be removed
for (i in 1:nrow(tempZoneOut)) {
print("--------")
print(paste(i, "out of", nrow(tempZoneOut), "cells", sep=" "))
plot(pca_sPlot_r, asp=0, xlab="PC1 (cold and seasonal to hot and stable)", ylab="PC2 (dry to wet)")
points(tempZoneOut[, 1], tempZoneOut[, 2], cex=0.5)
points(tempZoneOut[i, 1], tempZoneOut[i, 2], col="red", pch=19)
sel.plot <- which(plot_data$pc1_val > tempZoneOut[i, 1]-(res(r)[1]/2) &
plot_data$pc1_val < tempZoneOut[i, 1]+(res(r)[1]/2) &
plot_data$pc2_val > tempZoneOut[i, 2]-(res(r)[2]/2) &
plot_data$pc2_val < tempZoneOut[i, 2]+(res(r)[2]/2))
print(paste("This cell contains", length(sel.plot), "relevés", sep=" "))
idZoneOut <- plot_data[sel.plot, "PlotID"]
sel.comm <- sp_data[which(sp_data$plot_id%in%idZoneOut), c("plot_id", "sp_name", "rel_cov")]
sel.comm <- na.omit(sel.comm)
sel.comm [, 2]<- factor(sel.comm[, 2], labels=seq(1:length(unique(sel.comm[, 2]))))
sel.comm [, 2]<- as.numeric(sel.comm[, 2])
comm.data <- castC(iD=sel.comm[, 1], sp=sel.comm[, 2], cov=sel.comm[, 3])
rowNames <- comm.data[, 1]
comm.data <- comm.data[, -1]
print(paste("The total number of species is", dim(comm.data)[[2]], sep=" "))
gc()
if (nrow(comm.data) > 20000) {
bigComMatrix <- as.big.matrix(comm.data,shared=FALSE, backingfile=paste("Matrix_",i,sep=""), backingpath=getwd(), descriptorfile=paste("Matrix_", i, ".desc", sep="")) ; brayBalDist <- BigBrayPart(bigComMatrix)
} else {
brayBalDist <- bray_distance_RcppParallel(comm.data); brayBalDist <- as.big.matrix(brayBalDist)
}
for (j in 1:repet) {
selectedPlot <- HcrCPP(brayBalDist@address, nout=cutoff, nsampl=1000)
selectedPlot <- rowNames[selectedPlot]
selectedPlotIndex <- which(idZoneOut%in%selectedPlot)
plotToRemove[[j]] <- c(plotToRemove[[j]], idZoneOut[-selectedPlotIndex])
}
output <- list(i, plotToRemove)
save(output, file="plotToRemove.RData")
}
#
|
4962732affecddcfdd6ebc4d93b7a4a1597c57b0
|
f1530c8481b0ef3cc06402dacaf42426f16050df
|
/scripts/EDA.R
|
a0ef73ca3b6cf5e0a0ae912ddfad35f40145595d
|
[
"MIT"
] |
permissive
|
hadleyd2/group_09-1
|
4a749dc0751f2a2a4cac03d1e9ca6d5df720eb32
|
f486d6669e9b928aa3897618d79b1ee4c2c4045e
|
refs/heads/master
| 2021-02-18T18:50:00.838873
| 2020-04-07T23:58:54
| 2020-04-07T23:58:54
| 245,224,513
| 0
| 1
|
MIT
| 2020-03-23T16:55:34
| 2020-03-05T17:19:38
|
R
|
UTF-8
|
R
| false
| false
| 4,989
|
r
|
EDA.R
|
# author: Kristina Wright
# date: 2020-03-06
# edited by: Daniel Hadley
# edit date: 2020-03-13
"This script creates price density plots (density vs. listing price per night),
a correllogram (longitude, price, minimum stay, review number, host listings number),
and a violin plot (price vs. district) for exploratory data analysis and saves them as
seperate png files from cleaned data. This script takes in clean data CSV file path and
image directory path where plots will be exported as the variable arguments.
Usage: scripts/EDA.R --path_clean=<path_clean> --path_image=<path_image>" -> doc
## Load Libraries ####
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(corrplot)) # used to make correlogram plot
suppressPackageStartupMessages(library(gridExtra)) #used to arrange plots with grid.arrange()
suppressPackageStartupMessages(library(docopt))
suppressPackageStartupMessages(library(glue))
opt <- docopt(doc) # This is where the "Usage" gets converted into code that allows you to use commandline arguments
## Main Function Call ####
# saves plot images
main <- function(path_clean, path_image){
clean.dat <- load_clean(path_clean)
density_plot(clean.dat)
ggsave('density_plot.png', width = 8, height = 5, path = path_image)
png(glue(path_image, "/correlogram.png"), width = 5, height = 5, units = "in", res = 200)
correlogram(clean.dat)
dev.off()
mean.price <- mean_price(clean.dat)
violin_plot(clean.dat, mean.price)
ggsave('violin_plot.png', width = 8, height = 5, path = path_image)
print(glue("The exploratory data analysis has been successfully completed on {path_clean}! Plot images have been saved to {path_image}."))
}
## Define Functions Used in main() ####
#' Load clean data
#' This function loads in the clean/processed data
#' @param path_clean is the full path name to the clean data file
#' @examples
#' load_clean(https://raw.githubusercontent.com/STAT547-UBC-2019-20/group_09/master/data/clean_listings.csv)
load_clean <- function(path_clean) {
#Use col_types=cols() to suppress output of column type guessing
read_csv(file=path_clean,
col_types=cols())
}
#' Density plot
#' This function creates price density plot (density vs. listing price per night) for listings
#' @param df specifies the name of the data frame which should correspond to the clean data
#' @examples
#' density_plota(clean.dat)
density_plot <- function(df) {
df %>%
ggplot(aes(x=price)) +
geom_density() +
theme_bw(14) +
theme(plot.title = element_text(size = 14)) +
ggtitle(label="Price Density for All Listings") +
scale_x_continuous("Listing Price per Night", labels=scales::dollar_format(suffix="\u20AC", prefix='')) +
ylab("Density")
}
#' Numerical data
#' This function selects numerical values from dataframe only to be used in correlogram and calculates the correlation against all columns
#' @param df specifies the name of the data frame which should correspond to the clean data
#' @example
#' correlogram(clean.dat)
correlogram <- function(df){
df %>%
select(latitude,
longitude,
price,
min_stay,
reviews,
host_listings) %>%
cor() %>%
corrplot(type="upper",
method="color", # colour scale plot
tl.srt=45, #text angled for better viewing
addCoef.col = "black", # Add correlation coefficient
diag = FALSE,
title="Correlation between Relevant Variables",
mar=c(0,0,1,0)) # Correctly positions Title of Correlogram
}
#' Mean price
#' This function calculates the mean price of the listings in each district for the violin plot
#' @param df specifies the name of the data frame which should correspond to the clean data
#' @example
#' mean_price(clean.dat)
mean_price <- function(df){
df %>%
# calculate the mean price for each district for plot ordering
group_by(district) %>%
summarize(mean = mean(price)) %>%
arrange(desc(mean)) # mean price in descending order
}
#' Violin plot
#' This function creates a violin plot (price vs. district)
#' @param df specifies the name of the data frame which should correspond to the clean data
#' @example
#' violin_plot(clean.dat, mean.price)
violin_plot <- function(df, mean.price){
df%>%
filter(price != 0) %>% # remove price = 0
mutate(district = factor(district, levels = unique(mean.price$district))) %>% #factor district by descending mean price
ggplot(aes(district, price)) +
geom_violin(stat = "ydensity") +
scale_y_log10() + # change to log10 scale since density of price is skewed
ylab(paste("Price (", "\u20AC", ")", sep='')) +
xlab("District") +
ggtitle("Distribution of Price for Each Barcelona District") +
theme_bw(14) +
theme(plot.title = element_text(size = 14),
axis.text.x = element_text(angle = 60, hjust = 1))
}
### tests
main(opt$path_clean, opt$path_image)
|
ef48e46afa7ea85a87ded7c97a4d5171bb757b13
|
f349f6b265cda79de039508796618d0747f2250b
|
/assignment2_example.R
|
8a559f8859f38e04a5b9e2880acd617e630a0968
|
[] |
no_license
|
tlpgalvao/ProgrammingAssignment2
|
7daeaf22ee3671030bc066e802e915dcf1314c29
|
fe3267700d403c431ff915acc85503d2454903fd
|
refs/heads/master
| 2021-05-04T06:56:20.857302
| 2016-10-11T22:27:59
| 2016-10-11T22:27:59
| 70,531,536
| 0
| 0
| null | 2016-10-10T21:46:55
| 2016-10-10T21:46:54
| null |
UTF-8
|
R
| false
| false
| 1,712
|
r
|
assignment2_example.R
|
###
# object x is initialized as a function argument
makeVector <- function(x = numeric()) {
# object m is initialized as an object, within the makeVector()
# environment to be used by later code in the function
m <- NULL
# First, the set function is defined
set <- function(y) {
# The <<- assignment operator, assigns the value on the right side
# of the operator (y and NULL) to an object in the parent environment
# named (x and m)
# if there is already a mean cached in m, whenever x is reset,
# the value of m cached in the memory is cleared. Therefore, it forces
# a new calculation of the mean for the current x values.
x <<- y
m <<- NULL
}
# The get function is defined
# The x is not defined in the get function. So, R looks for it in the
# parent environment, using the lexical scoping.
get <- function() x
# the mean m is setted; the <<- operator assigns the mean value
# defined in the parent directory to m
setmean <- function(mean) m <<- mean
# The mean m is then defined for the getter
getmean <- function() m
# The functions defined above [set(), get(), setmean() and getmean()]
# are assigned as an element within a list, returning the elements
# to the parent environment naming them [set, get, setmean and getmean]
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
# Naming the functions this way allows us to retrieve their contents
# using the $ form of the extract operator
}
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
|
20f5cd4cd36ecd2be75307c6abda355e7699b503
|
74fbc04141004ee21aa1fdcb903c083ed3c46e03
|
/final code.R
|
e0ea15be26864293c7617c7f21687fe584af8785
|
[] |
no_license
|
moose9200/Housing_Regression_Price_Analysis_R
|
52dca0573e0b75138fc800f26f3dac5e603e83be
|
59e1641e6287a54393a987a55069d056290ea07f
|
refs/heads/master
| 2020-12-02T21:25:09.509588
| 2017-07-06T07:34:21
| 2017-07-06T07:34:21
| 96,312,775
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,879
|
r
|
final code.R
|
rm(list=ls())
library(ggplot2)
#set current working directory
setwd("C:/Users/Moose/Desktop/housemarket kaggale")
train = read.csv("train.csv")
#####################################################################
mis=sapply(train, function(x) sum(is.na(x))) # missing values by column
mis= as.data.frame(mis)
cn = row.names(mis)
mis = cbind(mis,cn)
row.names(mis)=NULL
mis =mis[order(-mis$mis),]
mis$mis = mis$mis/nrow(train)*100
#bar plot missing values percentage vs Column name
ggplot(data = mis[1:40,],
aes(x=reorder(cn,-mis),
y = mis)) + geom_bar(stat = "identity", fill = "grey") +
xlab("Parameter") + ggtitle("Missing_Data_Percentage (Train)") + theme_bw()
#scatter plot missing values percentage vs Column name
ggplot(mis[1:20,], aes_string(x = "mis", y = "cn")) +
geom_point(size = 2, shape = 23)
###########################################################################
# Look for columns having more than 40% of missing values and remove them
train = train[-c(25,11,8,161,162,163)]
mis = mis[-c(1,2,3,4,5,6),]
#############################################################################
#missing value tratment
mis_col=train[colSums(is.na(train)) > 0]
library(DMwR)
mis_col =knnImputation(mis_col)
write.csv(mis_col,"knn_imputed.csv", row.names = F)
knn_imputed = read.csv("knn_imputed.csv")
#########################################################################
train=train[!colSums(is.na(train)) > 0] # select columns with non missing values
train =cbind(train,knn_imputed) #bind non missing values with imputed missing values by knn
sum(is.na(train))
##########################################################################
#devide categorical and numeric data into different data set
train_num = as.data.frame(Filter(is.numeric, train))
train_cat = as.data.frame(Filter(is.factor, train))
train_cat[,1]=NULL
#Outlier treatment for numeric dataframe
capping <- function(x){
for (i in which(sapply(x, is.numeric))) {
quantiles <- quantile( x[,i], c(.05, .95 ), na.rm =TRUE)
x[,i] = ifelse(x[,i] < quantiles[1] , quantiles[1], x[,i])
x[,i] = ifelse(x[,i] > quantiles[2] , quantiles[2], x[,i])}
x}
# Replacing extreme values with percentiles
without_outliers_num= capping(train_num)
train = NULL
knn_imputed = NULL
library(clusterSim) #normlization
normalize<-function(object){ return((object-min(object))/(max(object)-min(object))) }
without_outliers_nor_num= normalize(without_outliers_num)[,-225]
without_outliers_nor_num=cbind(without_outliers_nor_num,without_outliers_num$price_doc)
without_outliers_num= NULL
names(without_outliers_nor_num)[270]="price_doc"
# load the library
library(mlbench)
library(rpart)
library(caret)
p = cbind(without_outliers_nor_num,train_cat)
fit = rpart(price_doc ~ ., data = p, method = "anova")
#variable Importance
gbmImp <- varImp(fit, scale = FALSE)
gbmImp
#Choose Important columns
imp_col = subset(gbmImp, Overall > 0)
#Subsetting only thoese column from dataset which are important
q = p[,c(row.names(imp_col))]
#target variable has been excluded by gbm so we have to bind our target variable again
q = cbind.data.frame(q,p$price_doc)
names(q)[18] = "price_doc"
sub_area = dummy(q$ sub_area )
q = cbind(q,sub_area )
which( colnames(q)=="sub_area" )
q[17] = NULL
q=sapply(q,function(x) as.numeric(x))
which( colnames(q)=="price_doc" )
q= as.data.frame(q)
write.csv(q, "trainxgb.csv", row.names = FALSE)
#Model BUilding
train = q[sample(nrow(q), 22000, replace = F), ]
test = q[!(1:nrow(q)) %in% as.numeric(row.names(train)), ]
library(data.table)
train = as.data.table(train)
test = as.data.table(test)
library(xgboost)
library(caret)
which( colnames(q)=="sub_area" )
#Model Bilding
dtrain = xgb.DMatrix(data = as.matrix(train[,-c('price_doc'),with=F]), label = train$price_doc)
dtest = xgb.DMatrix(data = as.matrix(test[,-c('price_doc'),with=F]), label = test$price_doc)
params <- list(booster = "gbtree", objective = "reg:linear",
eta=0.3, gamma=0, max_depth=5, min_child_weight=1, subsample=1, colsample_bytree=1)
xgb1 <- xgb.train (params = params, data = dtrain, nrounds = 82, watchlist = list(val=dtest,train=dtrain),
print_every_n = 10, early_stop_round = 10, maximize = F , eval_metric = "error")
xgbpred <- predict (xgb1,dtest)
xgbpred <- ifelse (xgbpred > 0.5,1,0)
confusionMatrix (xgbpred, test$final_status)
# Prection on test Data set
test_file = read.csv("test.csv")
#to combine id in final result take a backup of this
id = as.data.frame(test_file[1])
#Select important columns which are calculated earlier and stored in imp_col
test_file = test_file[,c(row.names(imp_col))]
sapply(test_file, function(x) sum(is.na(x)))
test_file = knnImputation(test_file)
sum(is.na(test_file))
test_file_num = as.data.frame(Filter(is.numeric, test_file))
test_file_cat = as.data.frame(Filter(is.factor, test_file))
test_without_outliers_num= capping(test_file_num)
test_without_outliers_num_nor= normalize(test_without_outliers_num)
test_data= cbind(test_without_outliers_num_nor,test_file_cat)
sub_area = dummy(test_data$ sub_area )
test_data = cbind(test_data,sub_area )
which( colnames(test_data)=="sub_area" )
test_data[17] = NULL
test_data=sapply(test_data,function(x) as.numeric(x))
test_data = as.data.frame(test_data)
write.csv(test_data, "test_xgb.csv", row.names = FALSE)
test_prediction = predict(fit1, test_data)
test_prediction = cbind(id,test_prediction)
names(test_prediction)[2] = "price_doc"
write.csv(test_prediction, "Predicted_result.csv", row.names = FALSE)
|
19b6cbc5a148534392f1d8e520a11cd9e0edd633
|
2a4c61d88ed173ceb6cfae08c951e4c414cdafba
|
/R/BPM.R
|
6d39ac37be00cf7a15e4f8ceb81447d476dca3c1
|
[
"MIT"
] |
permissive
|
VanAndelInstitute/cardiocyte
|
cc7ea01b19523c8179842c991c9888f188fddfca
|
feb74a83d597e10da0e1fe68e92ac47d5d1d3b1a
|
refs/heads/master
| 2021-07-23T08:10:46.448452
| 2020-07-09T20:33:13
| 2020-07-09T20:33:13
| 194,689,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
r
|
BPM.R
|
#' bpm
#'
#' Calculate the BPM of the transient (beats per minute)
#'
#' @param x vector of trace data
#' @param framerate the framerate
#' @param ... additional parameters to pass to find_peaks
#' @return The BPM of the transient
#' @export
#' @importFrom baseline baseline
#' @importFrom DescTools AUC
#' @examples
#' data(ca_flux)
#' dat <- bpm(ca_flux$Mean1)
bpm <- function(x, framerate, ...) {
BPF <- length(find_peaks(x, ...))/length(x)
BPM <- BPF * framerate *60
return(BPM)
}
|
f77eac47cc7690b36845a7b234ef7ebb63b8df57
|
f146b737d5caf14c047123f999fcf418fa57ed7e
|
/man/autoInitConv.Rd
|
291257f1214383b8b87b3bfd9d667d8911f49d32
|
[] |
no_license
|
Kiki1017/FluHMM
|
a935b5010576e7e48968a5a1487b5d6003cd9209
|
4ddc2900cdede6a6e193712925e82f30fba633ac
|
refs/heads/master
| 2021-06-22T09:00:21.874467
| 2017-08-10T07:51:50
| 2017-08-10T07:51:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,977
|
rd
|
autoInitConv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoUpdate.R
\name{autoInitConv}
\alias{autoInitConv}
\title{Run MCMC iterations until initial convergence}
\usage{
autoInitConv(x, iter = 5000, maxit = 95000, Rhat = 1.1)
}
\arguments{
\item{x}{An object of class `FluHMM', from which to generate posterior samples.}
\item{iter}{Number of iterations to run.}
\item{maxit}{Maximum number of iterations to run before giving up.}
\item{Rhat}{Gelman-Rubin diagnostic cutoff value to determine convergence
(only for the sigma[1] parameter).}
}
\value{
None. The function mutates its argument `x' directly.
}
\description{
This function is not normally called by the user; it is called from \code{\link{FluHMM}} provided
\code{initConv=TRUE} (the default). It generates posterior samples from the model repeatedly
until convergence is reached for the sigma[1] parameter (this is called "initial convergence").
}
\details{
The sigma[1] parameter (standard deviation of the pre-epidemic phase) is of primary
importance in the model, since the pre-epidemic phase comes first and its correct identification
is the basis on which to estimate the other phases. If convergence for sigma[1] has been reached,
the other parameters in the model are very likely to have converged too, with the exception of
beta[2] and beta[3] (slopes of the epidemic growth and epidemic decline phase); the latter mix
more slowly and may necessitate a longer, and possibly thinned, MCMC chain.
Therefore "initial convergence" is defined as convergence for the sigma[1] parameter. Unless
this is achieved, it is inadvisable to use the posterior samples for any inference at all. Only
_after_ this has been achieved can a new posterior sample be generated (using \code{\link{update}}).
Then convergence for all parameters is checked again and, if not achieved, a new sample is generated
from scratch or the current one further is further extended.
}
|
c5985f89b610e8ea13a54deb1c27186792db9db8
|
4d967585341ab6e5260aa57e4da93c83c1d7707a
|
/man/splitTable 2.Rd
|
8d2649d2d15a03984a30a4e7ec2b57762cf374da
|
[
"MIT"
] |
permissive
|
arnevpladsen/CARMA
|
34f081ba7b8b9b92693f8e18c1055842d33e4abc
|
ff8f36bf15955bcb5d6b30d4d41d2a1f6c887bae
|
refs/heads/master
| 2021-12-03T06:54:21.799686
| 2021-08-10T13:56:41
| 2021-08-10T13:56:41
| 233,879,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,373
|
rd
|
splitTable 2.Rd
|
\name{splitTable}
\alias{splitTable}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Split a CARMA score table
}
\description{
This function will not be called directly by the user. The function is used by the \code{normalizeCARMA} function to split a CARMA score table into a list containing sample-wise CARMA scores.
}
\usage{
splitTable(norm.res, carma.res.reg)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{norm.res}{
a data frame containing normalized regional carma scores for all samples.
}
\item{carma.res.reg}{
a list of length corresponding to the number of samples, each containing raw regional carma scores for a given sample.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
A list of length corresponding to the number of samples, each containing normalized regional carma scores for a given sample.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
c87ad16977faed6ed89c75eef965a20c3cb5002b
|
d143e583889c388ea98e2486f365b441260116a7
|
/R/AnalyseDE.R
|
f8cf195dbdd2f8270599dc503d48cb3dce86adab
|
[] |
no_license
|
hobrien/LabNotes
|
8c69355af679af100e7e14ad612660ebdf5ba262
|
557c06ca384526aa164760208a443b19a7309693
|
refs/heads/master
| 2021-01-24T09:21:14.425123
| 2017-09-06T13:19:45
| 2017-09-06T13:19:45
| 49,435,493
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,793
|
r
|
AnalyseDE.R
|
library(tidyverse)
library(GO.db)
#library(DOSE)
library(org.Hs.eg.db)
#library(topGO)
#library(GSEABase)
#library(clusterProfiler)
source("~/BTSync/FetalRNAseq/LabNotes//R/FormatGGplot.R")
GetFoldChanges <- function() {
Fold_changes <- read_delim("~/BTSync/FetalRNAseq/Counts/MvsF_12_HDBR_FDR_0.1_edgeR/tables/MalevsFemale.complete.txt",
"\t", escape_double = FALSE, trim_ws = TRUE) %>%
mutate(sig = ifelse(padj < 0.1, 1, 0)) %>%
dplyr::select(Id, log2FoldChange, sig) %>%
mutate('week' = 12) %>%
bind_rows(read_delim("~/BTSync/FetalRNAseq/Counts/MvsF_13_HDBR_excl_16491_FDR_0.1_edgeR/tables/MalevsFemale.complete.txt",
"\t", escape_double = FALSE, trim_ws = TRUE) %>%
mutate(sig = ifelse(padj < 0.1, 1, 0)) %>%
dplyr::select(Id, log2FoldChange, sig) %>%
mutate('week' = 13)
) %>%
bind_rows(read_delim("~/BTSync/FetalRNAseq/Counts/MvsF_14_HDBR_FDR_0.1_edgeR/tables/MalevsFemale.complete.txt",
"\t", escape_double = FALSE, trim_ws = TRUE) %>%
mutate(sig = ifelse(padj < 0.1, 1, 0)) %>%
dplyr::select(Id, log2FoldChange, sig) %>%
mutate('week' = 14)
) %>%
bind_rows(read_delim("~/BTSync/FetalRNAseq/Counts/MvsF_15_17_HDBR_excl_15641_FDR_0.1_edgeR/tables/MalevsFemale.complete.txt",
"\t", escape_double = FALSE, trim_ws = TRUE) %>%
mutate(sig = ifelse(padj < 0.1, 1, 0)) %>%
dplyr::select(Id, log2FoldChange, sig) %>%
mutate('week' = 15.5)
) %>%
bind_rows(read_delim("~/BTSync/FetalRNAseq/Counts/MvsF_17_20_HDBR_excl_18432_FDR_0.1_edgeR/tables/MalevsFemale.complete.txt",
"\t", escape_double = FALSE, trim_ws = TRUE) %>%
mutate(sig = ifelse(padj < 0.1, 1, 0)) %>%
dplyr::select(Id, log2FoldChange, sig) %>%
mutate('week' = 18)
) %>%
mutate(Id = gsub('\\..*', '', Id))
Fold_changes
}
PlotFC <- function(genes, set_name) {
plot <- ggplot(genes, aes(x=week, y=log2FoldChange, group=Id)) +
geom_line(alpha=.1) +
geom_point(colour='red', size=1, alpha=.25, data=filter(genes, sig==1)) +
scale_x_continuous(breaks=c(12,13,14,15.5,18), labels=c('12', '13', '14', '15-16', '17-19')) +
tufte_theme() +
ylab("<--- Female-biased Male-biased --->") +
xlab("Post Conception Week") +
theme(axis.title.y=element_text(size=9)) +
ggtitle(paste("Log fold differences in", set_name))
plot
}
PlotFC_by_ChrType <- function(genes, set_name) {
plot <- ggplot(genes, aes(x=week, y=log2FoldChange, group=Id)) +
geom_line(alpha=1, aes(colour=ChrType)) +
geom_point(colour='black', size=1, alpha=.25, data=filter(genes, sig==1)) +
scale_x_continuous(breaks=c(12,13,14,15.5,18), labels=c('12', '13', '14', '15-16', '17-19')) +
scale_colour_manual(values=brewer.pal(6, "Set1")[c(1,2,4)]) +
tufte_theme() +
ylab("<--- Female-biased Male-biased --->") +
xlab("Post Conception Week") +
theme(axis.title.y=element_text(size=9)) +
ggtitle(paste("Log fold differences in", set_name))
plot
}
min=12
max=20
GetTarget <- function(min, max, RIN_cutoff=0, exclude=NA, BrainBank='HDBR', varInt='Sex') {
PCW_cutoff <- c(min, max)
targetFile <- "~/BTSync/FetalRNAseq/LabNotes/MvsFmac.txt"
SequencingCentreFile <- "~/BTSync/FetalRNAseq/LabNotes/SampleProgress.txt"
if (BrainBank == 'HDBRexpression'){
SampleInfoFile <- "~/BTSync/FetalRNAseq/LabNotes/HDBRsample_info.txt"
} else {
SampleInfoFile <- "~/BTSync/FetalRNAseq/LabNotes/sample_info.txt"
}
target <- read_tsv(targetFile) # path to the design/target file
sample_info <- read_tsv(SampleInfoFile, col_types=cols(BrainBankID='c', Sex='c', PCW='n', RIN='n'))
if ( BrainBank == 'HDBRexpression') {
target <- right_join(target, sample_info, by = c("label" = "BrainBankID"))
} else {
target <- left_join(target, dplyr::select(sample_info, BrainBankID, Sex, PCW, RIN), by = c("label" = "BrainBankID"))
SequencingCentre <- read.delim(SequencingCentreFile)
target <- left_join(target, dplyr::select(SequencingCentre, sample, Centre), by = c("label" = "sample"))
}
target <- arrange(target, Sex)
if (!is.na(RIN_cutoff)) {
target <- filter(target, RIN >= RIN_cutoff)
}
if (!is.null(PCW_cutoff)) {
target <- filter(target, PCW >= PCW_cutoff[1] & PCW < PCW_cutoff[2])
}
if (length(exclude) > 0) {
target <- filter(target, !label %in% exclude)
}
if (BrainBank == 'HDBR') {
target <- filter(target, ! grepl('A', label))
}
# covert PCW+0 samples to PCW-1
if (BrainBank != 'HDBRexpression') {
newer_samples = c('11875',
'12107',
'12545',
'12546',
'12994',
'13142',
'12993',
'13008') #these are already the correct week
#target$PCW <- ifelse(target$PCW %% 1 == 0 & ! target$label %in% newer_samples,
# target$PCW -1, floor(target$PCW))
}
target$PCW <- floor(target$PCW)
target <- droplevels(target)
target <- mutate(target, Sex = factor(ifelse(Sex == 'unknown', NA, Sex)))
target <- as.data.frame(target)
target[,varInt] <- as.factor(target[,varInt])
target
}
my_db <- src_mysql("FetalRNAseq", host="localhost", user="root")
GetGeneIDs <- function(Ids) {
geneIDs <- tibble(Chr=character(), GeneID=character())
for (Id in Ids) {
geneIDs <- collect(tbl(my_db, sql(paste0("SELECT DISTINCT GencodeGTF.seqid AS 'Chr',
GencodeFeatures.value AS 'GeneID' FROM
GencodeFeatures, GencodeGTF WHERE
GencodeGTF.id = GencodeFeatures.id AND
GencodeFeatures.feature = 'gene_name' AND
GencodeFeatures.id IN (
SELECT GencodeGTF.id FROM GencodeGTF, GencodeFeatures WHERE
GencodeGTF.id = GencodeFeatures.id AND GencodeFeatures.Value =",
paste0("'", Id, "'"),
")"
)))) %>%
bind_rows(geneIDs, .)
}
geneIDs
}
GetCounts <- function(){
MalevsFemale_complete <- read_delim("~/BTSync/FetalRNAseq/Counts/MvsF_12_20_HDBR_excl_15641_18432_16491_FDR_0.1_edgeR/tables/MalevsFemale.complete.txt",
"\t", escape_double = FALSE, trim_ws = TRUE)
SexChrGenes <- read_delim("/Users/heo3/BTSync/FetalRNAseq/LabNotes/SexChrGenes.txt",
delim='\t',
col_names=c('Id', 'Chr'),
col_types=cols(Id='c', Chr='c')
)
MalevsFemale_complete <- MalevsFemale_complete %>%
full_join(SexChrGenes) %>%
mutate(ChrType = ifelse(is.na(Chr), 'autosomal', Chr), Id = gsub('\\..*', '', Id))
MalevsFemale_complete
}
GetGeneSets <- function() {
gene_sets <- read_delim("~/BTSync/FetalRNAseq/GSEA/MSigDB/PollenEtAl.txt",
"\t", escape_double = FALSE, col_names=c('gene_name', 'set', 'reference'),
trim_ws = TRUE) %>%
bind_rows(read_delim("~/BTSync/FetalRNAseq/GSEA/MSigDB/DarmanisEtAl2.txt",
"\t", escape_double = FALSE, col_names=c('gene_name', 'set', 'reference'),
trim_ws = TRUE)) %>%
filter(set %in% c('cluster7', 'cluster9') | reference == 'PollenEtAl2014')
#bind_rows(read_delim("~/BTSync/FetalRNAseq/GSEA/MSigDB/DarmanisEtAl.txt",
# "\t", escape_double = FALSE, col_names=c('gene_name', 'set', 'reference'),
# trim_ws = TRUE)) %>%
#bind_rows(read_delim("~/BTSync/FetalRNAseq/GSEA/MSigDB/DarmanisReanalysis.txt",
# "\t", escape_double = FALSE, col_names=c('gene_name', 'set', 'reference'),
# trim_ws = TRUE)) %>%
#bind_rows(read_delim("~/BTSync/FetalRNAseq/GSEA/MSigDB/CampEtAl.txt",
# "\t", escape_double = FALSE, col_names=c('gene_name', 'set', 'reference'),
# trim_ws = TRUE)) %>%
#bind_rows(read_delim("~/BTSync/FetalRNAseq/GSEA/MSigDB/Pollen15.txt",
# "\t", escape_double = FALSE, col_names=c('gene_name', 'set', 'reference'),
# trim_ws = TRUE))
gene_sets <- full_join(gene_sets,
bitr(gene_sets$gene_name, fromType="SYMBOL", toType="ENSEMBL", OrgDb="org.Hs.eg.db"),
by=c('gene_name' = 'SYMBOL')
)
BG <- GetCounts() %>%
filter(! is.na(FC)) %>%
dplyr::select(Id) %>%
mutate(set="Background", reference = NA)
gene_sets <- bitr(BG$Id, fromType="ENSEMBL", toType="SYMBOL", OrgDb="org.Hs.eg.db") %>%
full_join(BG, by=c('ENSEMBL' = 'Id')) %>%
mutate(gene_name = SYMBOL) %>%
dplyr::select(gene_name, set, reference, ENSEMBL) %>%
bind_rows(gene_sets)
gene_sets
}
PlotExpression<-function(geneID, fileName, id="A value that hopefully isn't in the dataset") {
Ensembl_Id <- bitr(geneID, fromType="SYMBOL", toType="ENSEMBL", OrgDb="org.Hs.eg.db")[,2]
data <- filter(fileName, Id == Ensembl_Id) %>%
dplyr::select(starts_with('norm')) %>%
gather() %>%
separate(key, into=c('norm', 'label'), sep='[.]') %>%
dplyr::select(label, value) %>%
left_join(target)
plot<- ggplot(subset(data, label != id), aes(x=PCW, y=value, colour=Sex)) +
geom_jitter(height = 0, width=.1, alpha=.75) +
geom_point(data=subset(data, label==id), colour='orange') +
geom_smooth() +
ylab("normalised counts") +
tufte_theme() +
scale_colour_brewer(type = "qual", palette = 6) +
ggtitle(geneID) +
theme(legend.position=c(0.1,.9)) +
theme(plot.background=element_blank())
plot
}
PlotTimepoint<-function(geneID, fileName) {
data <- fileName %>%
dplyr::select(starts_with('norm')) %>%
gather() %>%
separate(key, into=c('norm', 'label'), sep='[.]') %>%
dplyr::select(label, value) %>%
left_join(target)
mean <- fileName %>%
dplyr::select(Male, Female) %>%
gather()
plot<- ggplot(data, aes(x=Sex, y=value, colour=Sex)) +
geom_errorbar(aes(x=key, ymin=value, ymax=value), colour='black', size=1, width=.5, data=mean) +
geom_jitter(height = 0, width=.1, alpha=.75) +
ylab("normalised counts") +
xlab('') +
tufte_theme() +
scale_colour_brewer(type = "qual", palette = 6) +
ggtitle(geneID) +
theme(plot.background=element_blank())
plot
}
|
08f140b419aaceeb65943687a091c9ca08bd794c
|
95dd00f2a52b8a8ed2e99ffd392b00efbfd3612a
|
/inclass_0420.R
|
5d5ff00a13b16ed6cc0959cda30a89c99b33e394
|
[] |
no_license
|
Yueyang-Li-Elfa/DataAnalyticsSpring2020_Lab_YueyangLi
|
321f920a983a164690e6020db8ea9eb2c9a1ec09
|
35ac5ee65708a6ed3be16d3999aa5b4880f6ca98
|
refs/heads/master
| 2020-12-19T19:18:51.658251
| 2020-05-05T02:59:00
| 2020-05-05T02:59:00
| 235,826,702
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,587
|
r
|
inclass_0420.R
|
# In-class practice
# Apr 20
## LOWESS example using the Cars dataset
data(cars)
str(cars) # 50 * 2
# plot: speed vs distance
plot(speed ~ dist, data = cars)
# We can see a positive relationship between them
# the documentation for the lowess function
?lowess
# use the lowess() function: return values
lowess(cars$speed ~ cars$dist)
# combine with lines() function
lines(lowess(cars$speed ~ cars$dist, f = 2/3), col = "blue") # default f value
# change the f value
# larger values give more smoothness
lines(lowess(cars$speed ~ cars$dist, f = 0.8), col = "red")
lines(lowess(cars$speed ~ cars$dist, f = 0.9), col = "green")
lines(lowess(cars$speed ~ cars$dist, f = 0.1), col = 5)
lines(lowess(cars$speed ~ cars$dist, f = 0.01), col = 6) # almost connect each point, overfit
## Linear Discriminant Analysis example using Iris dataset
library(MASS)
names(iris)
dim(iris)
head(iris)
# train test split
set.seed(100)
index <- sample(1:nrow(iris), nrow(iris)/2)
iris_train <- iris[index, ]
iris_test <- iris[-index, ]
# the documentation of lda() function
?lda
# use lda() function to fit the model
fit1 <- lda(Species ~ ., data = iris_train)
# predict on training data
predict1 <- predict(fit1, iris_train)
predict1_class <- predict1$class
# confusion matrix
table1 <- table(predict1_class, iris_train$Species)
table1
# accuracy
sum(diag(table1)) / sum(table1)
# predict on testing data
predict2 <- predict(fit1, iris_test)
predict2_class <- predict2$class
# confusion matrix
table2 <- table(predict2_class, iris_test$Species)
table2
# accuracy
sum(diag(table2)) / sum(table2)
|
daea0e897aa20f2e90e86a085f87e4646fdb4d56
|
f24d543bd6186ba4570a8969f204d22da9e3764f
|
/R/functions/normalize_height.R
|
4afb516c35b47e305a8d6b569872159a3151eb40
|
[] |
no_license
|
robintrayler/collagen_demineralization
|
10b9b29fc1b715874818a5a33ae11799e6cda875
|
d7af30b23b625f6cb771556ef66a9e275a4b66dd
|
refs/heads/main
| 2023-08-23T15:35:44.397224
| 2023-08-05T01:39:25
| 2023-08-05T01:39:25
| 411,031,242
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
r
|
normalize_height.R
|
# data <- all_data %>%
# filter(time_min == 5)
# position = 1650
# width = 20
normalize_height <- function(data,
position,
width) {
# find the local maximum
divide_by <- data |>
filter(between(wavenumber,
left = position - width,
right = position + width)) |>
pull(absorbance) |>
max()
# normalize the data to that height
data <- data |>
mutate(absorbance = absorbance / divide_by)
return(data)
}
|
31879b521a348f6be51ff307bfa59dc9fd19a16c
|
0563103cc766f0cb981ccb8103594357e277dc74
|
/man/point_Hexp.Rd
|
bb4ade6711eed0f228994084a1f4c93e5ae59cb0
|
[] |
no_license
|
pblischak/polyfreqs
|
a140631ae572753ca6123180474f2b067508c27f
|
b03ec79562842f259ff89b368b6f8a1bf983c6bc
|
refs/heads/master
| 2021-01-21T03:33:59.306182
| 2016-12-16T22:05:25
| 2016-12-16T22:05:25
| 34,615,921
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,159
|
rd
|
point_Hexp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/point_Hexp.R
\name{point_Hexp}
\alias{point_Hexp}
\title{Estimation of expected heterozygosity}
\usage{
point_Hexp(p_samp, genotypes, ploidy)
}
\arguments{
\item{p_samp}{A posterior sample of allele frequencies from \code{\link{polyfreqs}}.}
\item{genotypes}{Matrix of genotypes sampled during MCMC.}
\item{ploidy}{The ploidy level of individuals in the population (must be >= 2).}
}
\value{
Returns the per locus estimates of expected heterozygosity (\code{per_locus_Hexp})
}
\description{
\emph{INTERNAL}: Estimates a posterior distribution for the per locus expected heterozygosity using the unbiased estimator of Hardy (2015) and the poterior samples of allele frequencies calculated by \code{\link{polyfreqs}}.
}
\details{
Posterior distributions for the per locus expected heterozygosity are automatically calculated and returned by the \code{\link{polyfreqs}} function.
}
\references{
Hardy, OJ. 2015. Population genetics of autopolyploids under a mixed mating model and the estimation of selfing rate. \emph{Molecular Ecology Resources}, doi: 10.1111/1755-0998.12431.
}
|
0f0ccce1aa44a4943d4d3ca39ae90e0b2a106320
|
354b7cc2a9f9ac132090f2afff0fa112b8254f1c
|
/man/ia_files.Rd
|
9c90cbd12e84ffd8a421f201b91f31e8ae2f3909
|
[] |
no_license
|
cran/internetarchive
|
ea833746e939c66bc447e1fb020a41a91b9f4f38
|
7e9bf3d100440ba3ed79c3f42aba27d5003f53dd
|
refs/heads/master
| 2021-01-15T15:31:34.753794
| 2016-12-08T19:38:21
| 2016-12-08T19:38:21
| 32,833,711
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 667
|
rd
|
ia_files.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ia_files.R
\name{ia_files}
\alias{ia_files}
\title{Access the list of files associated with an Internet Archive item}
\usage{
ia_files(items)
}
\arguments{
\item{items}{A list describing an Internet Archive items returned from
the API.}
}
\value{
A list containing the files as a list of character vectors.
}
\description{
Access the list of files associated with an Internet Archive item
}
\examples{
\dontrun{
ats_query <- c("publisher" = "american tract society")
ids <- ia_search(ats_query, num_results = 3)
items <- ia_get_items(ids)
files <- ia_files(items)
files
}
}
|
60a8031b4a6994889bc2bcabbe4ad2c092440fd6
|
6b18cafb5a709929ac6f653ee93c367ea1bf417b
|
/R/display.r
|
95ca823e99f2e94e86bf20e47cacad2a7b36a48d
|
[] |
no_license
|
skranz/bbsvg
|
9ef19d08183959252ad2420a6bbd2af7279f8ff0
|
a7f5dba91507aead66c783be2a08482d7abf95de
|
refs/heads/master
| 2022-10-14T15:26:34.231135
| 2022-10-06T14:47:04
| 2022-10-06T14:47:04
| 92,750,599
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 147
|
r
|
display.r
|
init.geom.display = function(geom, display) {
if (identical(display,"whisker")) {
return(paste0("{{display_",geom$id,"}}"))
}
display
}
|
c80e1b06668769579db8173924532f7018bf1ddc
|
baebdc762e7cf14157645eaae0e2bb642dfc1708
|
/inst/tests/test-stat-smooth.r
|
1e4b64bf3e67c76b70e2c992a08061944ba335ff
|
[] |
no_license
|
AmeliaMN/gg2v
|
d6bcab7d1b2da8cfbfac4cbce9b07f037c0aad6f
|
7c8a03bf4d352a4463d58c94d71446a10959139f
|
refs/heads/master
| 2021-01-18T05:04:47.553697
| 2013-07-16T22:20:54
| 2013-07-16T22:20:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 505
|
r
|
test-stat-smooth.r
|
context("stat-smooth")
set.seed(1014)
library(ggplot2)
df <- data.frame(
x = 1:10,
y = (-5:4) ^ 2 + rnorm(10),
z = factor(rep(1:2, each = 5)))
base <- ggplot(df, aes(x, y)) + geom_point()
base + geom_line(stat = "smooth")
save_spec("stat/smooth.js")
base + geom_line(stat = "smooth", method = "lm", formula = y ~ poly(x, 2))
save_spec("stat/smooth-lm.js")
base + geom_line(aes(group = z))
save_spec("stat/smooth-grouped.js")
base + geom_line(aes(colour = z))
save_spec("stat/smooth-colour.js")
|
311aa0443637fea1f6217aff5c3cb3c3d53972b1
|
14c80ea0be9edea6bda1114a5bcf55cec002309c
|
/Plotting/PlotDistributionFits.R
|
a59b5a2685b5fe0269e0f70535fa84e7b237272d
|
[] |
no_license
|
fahmidah/bridge.collapses
|
3c0b7bae9a854096d2848c4e690ac746480a5ca3
|
3710a254a67575f77e96a4e447fd50eebe14c187
|
refs/heads/master
| 2022-02-26T17:24:53.110648
| 2019-10-28T14:35:07
| 2019-10-28T14:35:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,528
|
r
|
PlotDistributionFits.R
|
# Plot panel of
PlotDistributionFits <- function(BridgesDataFrame,ls.df, TYPE = "USGS", SAVE = FALSE, TEXT = TRUE, ONLY = "LP3", ANY=NULL, labelsType = "yearsState",
SCALE = "LINEAR", LEGEND = "NONE", outputType = "PRINT", SIZE = c(4,13),
ANNOTATE_LOC = "BR",HURR_LAB="*",AREA_RATIO=TRUE){
# SETUP
colorsPDists <- colorsP$Dists
linesPDists <- linesP$Dists
limits <- c(0,5000)
ylab <- expression(paste("Cumulative density function of Q, ",F[Q]))
# LABELS
if (labelsType == "long"){
BridgesDataFrame$LABEL <- paste("c.",
substr(BridgesDataFrame[,"YR_BLT_EST"],1,4),
", f.",
substr(BridgesDataFrame[,"YR_FAIL"],1,4),
" - ",
sapply(1:nrow(BridgesDataFrame), function(i) df.States[df.States$STFIPS==BridgesDataFrame[i,"STFIPS"],"STATE_CODE"]),
sep="")
}
if (labelsType == "yearsState"){
BridgesDataFrame$LABEL <- paste(substr(BridgesDataFrame[,"YR_BLT_EST"],1,4),
" - ",
substr(BridgesDataFrame[,"YR_FAIL"],1,4),
" - ",
sapply(1:nrow(BridgesDataFrame), function(i) df.States[df.States$STFIPS==BridgesDataFrame[i,"STFIPS"],"STATE_CODE"]),
sep="")
}
BridgesDataFrame$BOOL_WAS_HURRICANE <- nchar(BridgesDataFrame$COMMENT_LINKED_HURRICANE)!=0
BridgesDataFrame$HURRICANE <- labelsP$Hurr[2]
BridgesDataFrame[BridgesDataFrame$BOOL_WAS_HURRICANE, "HURRICANE"] <- labelsP$Hurr[1]
BridgesDataFrame$HURRICANE <- factor(BridgesDataFrame$HURRICANE, levels = labelsP$Hurr, labels = labelsP$Hurr)
labs <- character(nrow(BridgesDataFrame))
if (HURR_LAB=="*"){
rows <- BridgesDataFrame$BOOL_WAS_HURRICANE & !is.na(BridgesDataFrame$BOOL_WAS_HURRICANE)
labs[rows] <- "H"
}
if (HURR_LAB=="COLOR"){
labelHurrCol <- sapply(limits,function(i) ifelse(BridgesDataFrame[BridgesDataFrame$LABEL == i,"HURRICANE"]=="YES",
colorsP$Hurr[1],
"#000000"))
}
else{
labelHurrCol <- "black"
}
if (AREA_RATIO==TRUE){
# >= 1.2
rows <- BridgesDataFrame$AREA_RATIO_NHD >= 1.2
labs[rows] <- paste(labs[rows],"+",sep="")
# >= 1.4
rows <- BridgesDataFrame$AREA_RATIO_NHD >= 1.4
labs[rows] <- paste(labs[rows],"+",sep="")
# <= 0.8
rows <- BridgesDataFrame$AREA_RATIO_NHD <= 0.8
labs[rows] <- paste(labs[rows],"-",sep="")
# <= 0.6
rows <- BridgesDataFrame$AREA_RATIO_NHD <= 0.6
labs[rows] <- paste(labs[rows]," -",sep="")
}
labs[!labs==""] <- paste("^'",labs[!labs==""],"'",sep = "")
BridgesDataFrame$LABEL <- paste(BridgesDataFrame$LABEL,labs,sep="")
p <- list()
# PANEL 1: USGS
if ("USGS" %in% TYPE){
df <- ls.df[["USGS"]][["df"]]
df$ECDF <- (df$GEV)
df.melt <- melt(df,id.vars = "Q", variable.name = "GROUP", value.name = "F_Q")
df.melt$GROUP <- factor(as.character(df.melt$GROUP), levels = labelsP$Dists[c("ECDF","LP3","GEV")], labels = labelsP$Dists[c("ECDF","LP3","GEV")])
colorsPDists["ECDF"] <- colorsP$Data["USGS"]
p1 <- ggplot(data=df.melt) +
stat_ecdf(data = subset(df.melt,GROUP=="ECDF"),aes(x=Q, color=GROUP, linetype=GROUP)) +
geom_line(data = subset(df.melt,GROUP!="ECDF"),aes(x=Q,y=F_Q,group=GROUP,color=GROUP,linetype=GROUP))
if (TEXT==TRUE & ANNOTATE_LOC=="BR"){
p1 <- p1 +
annotate("text",
x = 0.99*max(df$Q),
y = c(0.08,0.04),
label = c(paste("LP3: A2 = ",signif(ls.df[["USGS"]][["AD"]]$LP3,2)," (p = ",signif(ls.df[["USGS"]][["AD"]]$LP3p,2),")",sep=""),
paste("GEV: A2 = ",signif(ls.df[["USGS"]][["AD"]]$GEV,2)," (p = ",signif(ls.df[["USGS"]][["AD"]]$GEVp,2),")",sep="")),
size = 1*textP$annotate[outputType],
hjust = 1)#,
# parse = TRUE)
}
p1 <- p1 +
labs(x = "Annual Peak Flow, Q [cfs], USGS",y=ylab,title=BridgesDataFrame$LABEL) +
# xlim(limits)+
scale_color_manual(values = colorsPDists[c("ECDF","LP3","GEV")], name = legendsP$Dists, labels = c("USGS-ECDF","USGS-LP3","USGS-GEV")) +
scale_linetype_manual(values = linesPDists[c("ECDF","LP3","GEV")]) +
guides(color = guide_legend(order = 1,
override.aes = list(linetype=linesPDists[c("ECDF","LP3","GEV")]
)),
linetype = FALSE) +
theme(panel.background = element_rect(fill = "white"),
legend.key = element_rect(fill = "white") ,
panel.grid.major.y = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.minor.x = element_blank(),
axis.ticks.length=unit(-0.08, "cm"), axis.ticks.margin=unit(0.16, "cm"),
axis.text.x = element_text(color = "black", size = textP$reg[outputType]),
axis.title.x = element_text(color = "black", size = textP$sub[outputType]),
axis.text.y = element_text(color = "black", size = textP$reg[outputType]),
axis.title.y = element_text(color = "black", size = textP$sub[outputType]),
legend.text = element_text(color = "black", size = textP$sub[outputType]),
legend.title = element_text(color = "black", size = textP$sub[outputType]),
plot.title = element_text(color = "black", size = textP$head[outputType])
)
p[[1]] <- p1
}
# PANEL 2: VICG
if ("VICg" %in% TYPE){
df <- ls.df[["VICg"]][["df"]]
df$ECDF2 <- (df$GEV)
df.melt <- melt(df,id.vars = "Q", variable.name = "GROUP", value.name = "F_Q")
df.melt$GROUP <- factor(as.character(df.melt$GROUP), levels = c("ECDF2","LP3","GEV"), labels = c("ECDF2","LP3","GEV"))
colorsPDists["ECDF2"] <- colorsP$Data["VICG"]
linesPDists["ECDF2"] <- linesP$Dists["ECDF"]
p2 <- ggplot(data=df.melt) +
stat_ecdf(data = subset(df.melt,GROUP=="ECDF2"),aes(x=Q, color=GROUP, linetype=GROUP)) +
geom_line(data = subset(df.melt,GROUP!="ECDF2"),aes(x=Q,y=F_Q,group=GROUP,color=GROUP,linetype=GROUP))
if (TEXT==TRUE & ANNOTATE_LOC=="BR"){
p2 <- p2 +
annotate("text",
x = 0.99*max(df$Q),
y = c(0.08,0.04),
label = c(paste("LP3: A2 = ",signif(ls.df[["VICg"]][["AD"]]$LP3,2)," (p = ",signif(ls.df[["VICg"]][["AD"]]$LP3p,2),")",sep=""),
paste("GEV: A2 = ",signif(ls.df[["VICg"]][["AD"]]$GEV,2)," (p = ",signif(ls.df[["VICg"]][["AD"]]$GEVp,2),")",sep="")),
size = 1*textP$annotate[outputType],
hjust = 1)#,
# parse = TRUE)
}
p2 <- p2 +
labs(x = "Annual Maximum Daily Flow, Q [cfs], Daymet-VIC-Gauge",y=ylab,title=BridgesDataFrame$LABEL) +
# xlim(limits)+
scale_color_manual(values = colorsPDists[c("ECDF2","LP3","GEV")], name = legendsP$Dists, labels = c("VIC-GAUGE-ECDF","VIC-GAUGE-LP3","VIC-GAUGE-GEV")) +
scale_linetype_manual(values = linesPDists[c("ECDF2","LP3","GEV")]) +
guides(color = guide_legend(order = 1,
override.aes = list(linetype=linesPDists[c("ECDF2","LP3","GEV")]
)),
linetype = FALSE) +
theme(panel.background = element_rect(fill = "white"),
legend.key = element_rect(fill = "white") ,
panel.grid.major.y = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.minor.x = element_blank(),
axis.ticks.length=unit(-0.08, "cm"), axis.ticks.margin=unit(0.16, "cm"),
axis.text.x = element_text(color = "black", size = textP$reg[outputType]),
axis.title.x = element_text(color = "black", size = textP$sub[outputType]),
axis.text.y = element_text(color = "black", size = textP$reg[outputType]),
axis.title.y = element_text(color = "black", size = textP$sub[outputType]),
legend.text = element_text(color = "black", size = textP$sub[outputType]),
legend.title = element_text(color = "black", size = textP$sub[outputType]),
plot.title = element_text(color = "black", size = textP$head[outputType])
)
p[[length(p)+1]] <- p2
}
# PANEL 3: VICB
if ("VICb" %in% TYPE){
df <- ls.df[["VICb"]][["df"]]
df$ECDF3 <- (df$GEV)
df.melt <- melt(df,id.vars = "Q", variable.name = "GROUP", value.name = "F_Q")
df.melt$GROUP <- factor(as.character(df.melt$GROUP), levels = c("ECDF3","LP3","GEV"), labels = c("ECDF3","LP3","GEV"))
colorsPDists["ECDF3"] <- colorsP$Data["VICB"]
linesPDists["ECDF3"] <- linesP$Dists["ECDF"]
p3 <- ggplot(data=df.melt) +
stat_ecdf(data = subset(df.melt,GROUP=="ECDF3"),aes(x=Q, color=GROUP, linetype=GROUP)) +
geom_line(data = subset(df.melt,GROUP!="ECDF3"),aes(x=Q,y=F_Q,group=GROUP,color=GROUP,linetype=GROUP))
if (TEXT==TRUE & ANNOTATE_LOC=="BR"){
p3 <- p3 +
annotate("text",
x = 0.99*max(df$Q),
y = c(0.08,0.04),
label = c(paste("LP3: A2 = ",signif(ls.df[["VICb"]][["AD"]]$LP3,2)," (p = ",signif(ls.df[["VICb"]][["AD"]]$LP3p,2),")",sep=""),
paste("GEV: A2 = ",signif(ls.df[["VICb"]][["AD"]]$GEV,2)," (p = ",signif(ls.df[["VICb"]][["AD"]]$GEVp,2),")",sep="")),
size = 0.9*textP$annotate[outputType],
hjust = 1)#,
# parse = TRUE)
}
p3 <- p3 +
labs(x = "Annual Maximum Daily Flow, Q [cfs], Daymet-VIC-Bridge",y=ylab,title=BridgesDataFrame$LABEL) +
# xlim(limits)+
scale_color_manual(values = colorsPDists[c("ECDF3","LP3","GEV")], name = legendsP$Dists, labels = c("VIC-BRIDGE-ECDF","VIC-BRIDGE-LP3","VIC-BRIDGE-GEV")) +
scale_linetype_manual(values = linesPDists[c("ECDF3","LP3","GEV")]) +
guides(color = guide_legend(order = 1,
override.aes = list(linetype=linesPDists[c("ECDF3","LP3","GEV")]
)),
linetype = FALSE) +
theme(panel.background = element_rect(fill = "white"),
legend.key = element_rect(fill = "white") ,
panel.grid.major.y = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.minor.x = element_blank(),
axis.ticks.length=unit(-0.08, "cm"), axis.ticks.margin=unit(0.16, "cm"),
axis.text.x = element_text(color = "black", size = textP$reg[outputType]),
axis.title.x = element_text(color = "black", size = textP$sub[outputType]),
axis.text.y = element_text(color = "black", size = textP$reg[outputType]),
axis.title.y = element_text(color = "black", size = textP$sub[outputType]),
legend.text = element_text(color = "black", size = textP$sub[outputType]),
legend.title = element_text(color = "black", size = textP$sub[outputType]),
plot.title = element_text(color = "black", size = textP$head[outputType])
)
p[[length(p)+1]] <- p3
}
# ARRANGE GRID
grp <- lapply(p, function(i) ggplot_gtable(ggplot_build(i)) )
if (length(p) == 3){
maxHeight = unit.pmax(grp[[1]]$heights[2:3], grp[[2]]$heights[2:3], grp[[3]]$heights[2:3])
for (i in length(grp)){
grp[[i]]$heights[2:3] <- maxHeight
}
grid.arrange(grp[[1]],grp[[2]],grp[[3]],ncol = 3)
if (SAVE == TRUE){
pdf(file = paste("Distributions","pdf",sep="."), height = SIZE[1], width = SIZE[2])
grid.arrange(grp[[1]],grp[[2]],grp[[3]], ncol = 3)
dev.off()
}
}
if (length(p) == 2){
maxHeight = unit.pmax(grp[[1]]$heights[2:3], grp[[2]]$heights[2:3])
for (i in length(grp)){
grp[[i]]$heights[2:3] <- maxHeight
}
grid.arrange(grp[[1]],grp[[2]],ncol = 2)
if (SAVE == TRUE){
pdf(file = paste("Distributions","pdf",sep="."), height = SIZE[1], width = SIZE[2])
grid.arrange(grp[[1]],grp[[2]], ncol = 2)
dev.off()
}
}
if (length(p) == 1){
p[[1]]
if (SAVE == TRUE){
ggsave(filename = paste("Distributions","pdf",sep="."), height = SIZE[1], width = SIZE[2])
}
}
return(p)
}
|
9d7c047c504408ea48d0841ce9f4944bd9438464
|
b437086cb1c76af82e408ae60119e8119e7d0ddf
|
/make_stuff/make_2/main.r
|
4ca65664b31003c3ed144e8c7cb4963ab9ed0dc7
|
[] |
no_license
|
Zyoto/Demo
|
ea2eb76a007b8321eefdf30e3ce385be55a36cf5
|
501476aeb0ef2d66b074ccdd54daf3e3cc66a787
|
refs/heads/master
| 2016-09-03T03:18:06.165419
| 2013-09-27T16:51:35
| 2013-09-27T16:51:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 579
|
r
|
main.r
|
p = P
q = Q
paq = p&q
poq = p+q
piq = p->q
pbq = p<->q
drawtable(paq)
dRawtable(poq)
drAwtable(piq)
draWtable(pbq)
p = R
q = S
drawtable(paq)
dRawtable(poq)
drAwtable(piq)
draWtable(pbq)
dma = -(P&Q)<->(-P+-Q)
drawtable(dma)
isanything(dma)
dmb = -(P+Q)<->(-P&-Q)
drawtable(dmb)
isanything(dmb)
dmc = -(-P&-Q)<->(P+Q)
drawtable(dmc)
isanything(dmc)
dmd = -(-P+-Q)<->(P&Q)
drawtable(dmd)
isanything(dmd)
a = P&(P->Q)&(Q->R)
Implication(a: R)
p = P
equivalence(p, P)
fundamentals
expressions
operators
truth tables
architecture
compiler tools
http://flex.sourceforge.net/
|
89105d52a46ecd062952b62b90096d1dbfbefbe7
|
a9bc25c88e84546a5347f5d31dae56dd10ddd09e
|
/Aula03/Rmd_distprob_fonte/Goodcoin.R
|
bc2f3fe00bf390da9e128e60375486ce75233f2b
|
[] |
no_license
|
yadevi/2020
|
7215d15d5ee2bc85515869cb9fef11d07ea335ad
|
599708bbe5ca8c4be3a76b82d55d32c5f3c70232
|
refs/heads/master
| 2022-07-05T11:04:07.781681
| 2020-05-13T18:06:24
| 2020-05-13T18:06:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,834
|
r
|
Goodcoin.R
|
# Simulacao
# rodando este script np RStudio, observe em Plots
# suppress warnings
options(warn=-1)
cat ("\nDado um valor a receber em moedas de R$1.00, metade da quantia\n")
cat ("eh oferecida em moedas com um balanceamento de referencia,\n")
cat ("e metade em moedas falsas, conhecidas por balanceamento distinto.\n")
cat ("Seu desafio eh distinguir os dois conjuntos atraves de experimentos.\n")
valor_a_receber <- readline(prompt="Numero de moedas (inteiro, default=10000): ")
if (valor_a_receber=="") {valor_a_receber <- 10000}
valor_a_receber <- as.integer(valor_a_receber)
cat ("\nPara testar se a moeda eh verdadeira, joga-se\n")
cat ("cara ou coroa certo numero de vezes cada moeda (um experimento).\n")
jogadasporvez <- readline(prompt="Numero de lancamentos por experimento (numero inteiro, default=15): ")
if (jogadasporvez=="") {jogadasporvez <- 15}
jogadasporvez <- as.integer(jogadasporvez)
cat ("\nQual a proporcao maxima de moedas verdadeiras\n")
cat ("que voce aceita perder, i.e. alfa = probabilidade do\n")
cat ("erro do tipo I ou de falso-positivo).\n")
cat ("(numero entre 0 e 1).\n")
alfa <-readline(prompt="alfa (default=0.05): ")
if (alfa=="") {alfa <- 0.05}
alfa <- as.numeric(alfa)
# para sair coroa H1
cat ("\nAs moedas verdadeiras tem balanceamento de referencia (H0).\n")
cat ("(caso queira moedas balanceadas, escolha o valor igual a 0.5)\n")
cat ("Qual a probabilidade de sortear coroa para uma moeda verdadeira?\n")
cat ("(número entre 0 e 1).\n")
prob_coroa_H0 <-readline(prompt="P[coroa|H0] (default=0.5): ")
if (prob_coroa_H0=="") {prob_coroa_H0 <- 0.5}
prob_coroa_H0 <- as.numeric(prob_coroa_H0)
# para sair coroa H1
cat ("\nAs moedas falsas tem outro balanceamento.\n")
cat ("(para simular, forneça uma probabilidade diferente de",prob_coroa_H0,"\n")
cat (" ou deixe em branco para simular somente a moeda verdadeira)\n")
cat ("Qual a probabilidade de sortear coroa para uma moeda falsa?\n")
cat ("(número entre 0 e 1).\n")
prob_coroa_H1 <-readline(prompt=paste("P[coroa|H1] (default=",prob_coroa_H0,"): ",sep=""))
if (prob_coroa_H1=="") {prob_coroa_H1 <- prob_coroa_H0}
prob_coroa_H1 <- as.numeric(prob_coroa_H1)
# define o valor de alfa efetivo
# (o maximo que nao ultrapasse
# o valor de alfa solicitado acima)
limiarinf <- -1
limiarsup <- jogadasporvez+1
dt_contagem <- data.frame(0:jogadasporvez,0,0)
names(dt_contagem) <- c("coroas","verdadeira","falsa")
alfa2 <- alfa
alfa_teorico <- 0
if (prob_coroa_H1 == prob_coroa_H0 )
{
alfa2 <- alfa/2
}
d_prob <- dbinom(0:jogadasporvez,jogadasporvez,prob_coroa_H0)
# upper tail
if (prob_coroa_H1 > prob_coroa_H0 | prob_coroa_H1 == prob_coroa_H0)
{
i <- jogadasporvez+1
while(sum(d_prob[i:(jogadasporvez+1)]) < alfa2 ){i <- i-1}
i <- i+1
limiarsup <- i-1
alfa_teorico <- alfa_teorico + sum(d_prob[i:(jogadasporvez+1)])
# media.maiorint <- ceiling(jogadasporvez*prob_coroa_H0)
# testebin <- binom.test(media.maiorint, jogadasporvez, prob_coroa_H0,
# alternative="less", conf.level=1-alfa2)
# limiarsup <- round(testebin$conf.int[2]*jogadasporvez)
# alfa_teorico <- alfa_teorico + 1-pbinom(limiarsup,jogadasporvez,prob_coroa_H0)
if (prob_coroa_H1 != prob_coroa_H0)
{
limiarinfgrf <- limiarinf-0.5
}
limiarsupgrf <- limiarsup-0.5
}
# lower tail
if (prob_coroa_H1 < prob_coroa_H0 | prob_coroa_H1 == prob_coroa_H0)
{
i <- 1
while(sum(d_prob[1:i]) < alfa2 ){i <- i+1}
i <- i-1
limiarinf <- i-1
alfa_teorico <- alfa_teorico + sum(d_prob[1:i])
# media.menorint <- floor(jogadasporvez*prob_coroa_H0)
# testebin <- binom.test(media.menorint, jogadasporvez, prob_coroa_H0,
# alternative="greater", conf.level=1-alfa2)
# limiarinf <- round(testebin$conf.int[1]*jogadasporvez)
# alfa_teorico <- alfa_teorico + pbinom(limiarinf,jogadasporvez,prob_coroa_H0)
limiarinfgrf <- limiarinf+0.5
if (prob_coroa_H1 != prob_coroa_H0)
{
limiarsupgrf <- limiarsup+0.5
}
}
alfa_teorico <- round(alfa_teorico*100,1)
prob_acm <- limiarinf+limiarsup
cat ("\nRegra decisoria:\n")
cat ("Valor para alfa mais próximo do alfa solicitado = ", alfa_teorico,"%\n")
cat ("Em",jogadasporvez,"lancamentos:\n")
if (prob_coroa_H1 == prob_coroa_H0)
{
cat ("Rejeitar H0 se no. de coroas <= ",limiarinf," ou no. de coroas >= ",limiarsup,"\n")
cat ("Nao rejeitar H0 se no. de coroas >= ",limiarinf+1," e coroas <= ",limiarsup-1,"\n")
} else
{
if (prob_coroa_H1 > prob_coroa_H0) # H0 a esquerda
{
cat ("Rejeitar H0 se no. de coroas >= ",limiarsup,"\n")
cat ("Nao rejeitar H0 se no. de coroas <= ",limiarsup-1,"\n")
}
if (prob_coroa_H1 < prob_coroa_H0) # H0 a direita
{
cat ("Rejeitar H0 se no. de coroas <= ",limiarinf,"\n")
cat ("Nao rejeitar H0 se no. de coroas >= ",limiarinf+1,"\n")
}
}
cat ("(este eh o Intervalo de Confianca",round(100-alfa_teorico,2),"% centrado em H0: p[H0]=",prob_coroa_H0,")\n")
cat ("\nIniciando a simulacao\n")
# limpando as jogadas anteriores
jogadaboa <- rbinom(1, jogadasporvez, prob_coroa_H0)
moedaboa <- c(jogadaboa)
divide <- 1
if (prob_coroa_H1 != prob_coroa_H0)
{
jogadaruim <- rbinom(1, jogadasporvez, prob_coroa_H1)
moedaruim <- c(jogadaruim)
divide <- 2
}
# com que frequencia exibe o grafico (100 graficos)
exibegrf <- valor_a_receber/divide/100;
# loop, testando as moedas
moedasboasfora <- 0
moedasboasdentro <- 0
if (prob_coroa_H1 != prob_coroa_H0)
{
moedasruinsfora <- 0
moedasruinsdentro <- 0
}
hachura <- 500/jogadasporvez
if (hachura < 10) {hachura <- 10}
if (hachura > 35) {hachura <- 35}
for (i in 1:(valor_a_receber/divide))
{
# jogada de moeda boa
jogadaboa <- rbinom(1, jogadasporvez, prob_coroa_H0)
# cat ("boa: ",jogadaboa,"\n")
if (jogadaboa <= limiarinf || jogadaboa >= limiarsup) {moedasboasfora <- moedasboasfora+1}
else {moedasboasdentro <- moedasboasdentro+1}
if (prob_coroa_H1 != prob_coroa_H0)
{
# jogada de moeda desbalanceada
jogadaruim <- rbinom(1, jogadasporvez, prob_coroa_H1)
# cat ("ruim: ",jogadaruim,"\n")
if (jogadaruim > limiarinf && jogadaruim < limiarsup) {moedasruinsdentro <- moedasruinsdentro+1}
else {moedasruinsfora <- moedasruinsfora+1}
}
moedaboa <- c(moedaboa,jogadaboa)
if (prob_coroa_H1 != prob_coroa_H0)
{
moedaruim <- c(moedaruim,jogadaruim)
}
erro_1 = round(moedasboasfora/(moedasboasfora+moedasboasdentro)*100, digits=2)
if (prob_coroa_H1 != prob_coroa_H0)
{
erro_2 = round(moedasruinsdentro/(moedasruinsdentro+moedasruinsfora)*100, digits=2)
}
# exibe 50 graficos ao longo da simulacao,
# as primeiras 100 moedas, para mostrar o inicio
# e a rodada final, quando terminou
if ( (i %% exibegrf) == 0 || i <= 100 || i == (valor_a_receber/divide) )
{
if (prob_coroa_H1 != prob_coroa_H0)
{
titulo <- paste("Aceitou R$",moedasboasdentro," bons e R$", moedasruinsdentro, " falsos (beta=",erro_2,"%), ",jogadasporvez," lances/moeda", sep="")
subtitulo <- paste("Jogou fora: R$",moedasboasfora," bons (alfa=",erro_1,"%) e R$", moedasruinsfora, " falsos, alfa (teoria)=",alfa_teorico,"%", sep="")
}
else
{
titulo <- paste("Aceitou R$",moedasboasdentro," ",jogadasporvez," lances/moeda", sep="")
subtitulo <- paste("Jogou fora R$",moedasboasfora," (alfa=",erro_1,"%), alfa (teoria)=",alfa_teorico,"%", sep="")
}
moedaboa_table <- table(moedaboa)
numcoroas_b <- names(table(moedaboa))
numcoroas_b <- as.numeric(numcoroas_b)-0.15
ocorrencias_b <- as.numeric(paste(table(moedaboa)))
if (prob_coroa_H1 != prob_coroa_H0)
{
moedaboa_ruim <- table(moedaruim)
numcoroas_r <- names(table(moedaruim))
numcoroas_r <- as.numeric(numcoroas_r)+0.15
ocorrencias_r <- as.numeric(paste(table(moedaruim)))
}
else
{
ocorrencias_r <- c()
}
maxy = max(c(ocorrencias_b,ocorrencias_r))
plot (numcoroas_b, ocorrencias_b, xlab="coroas (sucessos)", ylab="Contagem",
main=titulo, cex.main=0.9, sub=subtitulo,
lwd=3, col="#1965B0", axes = FALSE,
xlim=c(0,jogadasporvez), ylim=c(0,maxy), type = "h")
posleg <<- ""
if (prob_coroa_H1 <= 0.5) {posleg <<- "topright"} else {posleg <<- "topleft"}
txtH0 <- paste("p[H0]=",prob_coroa_H0,sep="")
txtH1 <- paste("p[H1]=",prob_coroa_H1,sep="")
if(prob_coroa_H1 == prob_coroa_H0)
{
legend (posleg,c(txtH0,"alfa"),
lwd=c(3,15),
col=c("#1965B0","#1965B055"),
box.lwd=0)
} else
{
legend (posleg,c(txtH0,txtH1,"alfa","beta"),
lwd=c(3,3,15,15),
col=c("#1965B0","#ac4d12","#1965B055","#ac4d1255"),
box.lwd=0)
}
ticks <- seq(0:jogadasporvez)
ticks <- ticks - 1
axis(side = 1, at = ticks)
axis(side = 2)
if (prob_coroa_H1 != prob_coroa_H0)
{
lines(numcoroas_r, ocorrencias_r, lwd=3, col="#ac4d12", type="h")
}
lines(c(limiarinfgrf,limiarinfgrf),c(0,maxy),lwd=1,lty=2)
lines(c(limiarsupgrf,limiarsupgrf),c(0,maxy),lwd=1,lty=2)
# pontos Ho
points(numcoroas_b[numcoroas_b>limiarinfgrf & numcoroas_b<limiarsupgrf],
ocorrencias_b[numcoroas_b>limiarinfgrf & numcoroas_b<limiarsupgrf],
col="#1965B0", bg="#1965B0", pch=21)
if (prob_coroa_H1 != prob_coroa_H0)
{
# erro II
points(numcoroas_r[numcoroas_r>limiarinfgrf & numcoroas_r<limiarsupgrf],
ocorrencias_r[numcoroas_r>limiarinfgrf & numcoroas_r<limiarsupgrf],
col="#ac4d12", bg="#ac4d12", pch=21)
lines (numcoroas_r[numcoroas_r>limiarinfgrf & numcoroas_r<limiarsupgrf],
ocorrencias_r[numcoroas_r>limiarinfgrf & numcoroas_r<limiarsupgrf],
col="#ac4d1255", bg="#ac4d1255", lwd=hachura, type="h")
# nao rejeita Ho erroneamente
# x <- numcoroas_r[numcoroas_r>limiarinfgrf & numcoroas_r<limiarsupgrf]
# y <- ocorrencias_r[numcoroas_r>limiarinfgrf & numcoroas_r<limiarsupgrf]
# x <- c(min(x), x, max(x))
# y <- c(0, y, 0)
# polygon(x, y, col="#ac4d1288", border="#ac4d1288")
# pontos Ha
}
# erro I
points(numcoroas_b[numcoroas_b<limiarinfgrf | numcoroas_b>limiarsupgrf],
ocorrencias_b[numcoroas_b<limiarinfgrf | numcoroas_b>limiarsupgrf],
col="#1965B0", bg="#1965B0", pch=4)
lines(numcoroas_b[numcoroas_b<limiarinfgrf | numcoroas_b>limiarsupgrf],
ocorrencias_b[numcoroas_b<limiarinfgrf | numcoroas_b>limiarsupgrf],
col="#1965B055", bg="#1965B055", lwd=hachura, type="h")
# cauda inferior
# x <- numcoroas_b[numcoroas_b<limiarinfgrf]
# y <- ocorrencias_b[numcoroas_b<limiarinfgrf]
# x <- c(min(x), x, max(x))
# y <- c(0, y, 0)
# polygon(x, y, col="#1965B088", border="#1965B088")
# cauda superior
# x <- numcoroas_b[numcoroas_b>limiarsupgrf]
# y <- ocorrencias_b[numcoroas_b>limiarsupgrf]
# x <- c(min(x), x, max(x))
# y <- c(0, y, 0)
# polygon(x, y, col="#1965B088", border="#1965B088")
if (prob_coroa_H1 != prob_coroa_H0)
{
points(numcoroas_r[numcoroas_r<limiarinfgrf | numcoroas_r>limiarsupgrf],
ocorrencias_r[numcoroas_r<limiarinfgrf | numcoroas_r>limiarsupgrf],
col="#ac4d12", bg="#ac4d12", pch=4)
}
# pausa para ver o grafico
Sys.sleep(0.2)
}
}
cat ("\nTerminado\n")
# exibe a contagem das moedas
cat ("Distribuicoes\n")
cat ("- moedas verdadeiras:\n")
ocorrencias_v <- table(moedaboa)
print (ocorrencias_v)
if (prob_coroa_H1 != prob_coroa_H0)
{
cat ("- moedas falsas:\n")
ocorrencias_f <- table(moedaruim)
print (ocorrencias_f)
}
cat ("\n*** Regras Decisorias ***\n")
cat ("Em",jogadasporvez,"lancamentos:\n")
if (prob_coroa_H1 == prob_coroa_H0)
{
cat ("Rejeitar H0 se no. de coroas <= ",limiarinf," ou no. de coroas >= ",limiarsup,"\n")
} else
{
if (prob_coroa_H1 > prob_coroa_H0) # H0 a esquerda
{
cat ("Rejeitar H0 se no. de coroas >= ",limiarsup,"\n")
}
if (prob_coroa_H1 < prob_coroa_H0) # H0 a direita
{
cat ("Rejeitar H0 se no. de coroas <= ",limiarinf,"\n")
}
}
poder <- 0
if (prob_coroa_H1 != prob_coroa_H0)
{
poder <- 100-erro_2
cat ("Poder=",poder,"%:\n")
}
if (poder >= 90)
{
sufixo <- "Aceitar"
} else # falta poder
{
sufixo <- "Nao rejeitar"
}
if (prob_coroa_H1 == prob_coroa_H0)
{
cat (sufixo,"H0 se no. de coroas >= ",limiarinf+1," e coroas <= ",limiarsup-1,"\n")
} else
{
if (prob_coroa_H1 > prob_coroa_H0) # H0 a esquerda
{
cat (sufixo,"H0 se no. de coroas <= ",limiarsup-1,"\n")
}
if (prob_coroa_H1 < prob_coroa_H0) # H0 a direita
{
cat (sufixo,"H0 se no. de coroas >= ",limiarinf+1,"\n")
}
}
cat ("(este eh o Intervalo de Confianca",round(100-alfa_teorico,2),"% centrado em H0: p[H0]=",prob_coroa_H0,")\n")
# enable warnings
options(warn=0)
|
bd9af2ded3a1de40b226169a32c9df8074668364
|
eb8642cd2853dfbd0d425ae2c80224d45c82a890
|
/week4 assignment/R code for week4 assignment.R
|
47e4e19a0499a7509df08e436e02e7afc9633fda
|
[] |
no_license
|
vinod-desireddy/Coursera-Reproducible-Research
|
7e6fc3952d4a1d8598315ba0ae3e1a80776b3945
|
4b204fd56b4bbc117a593d3f756238e44ac6649d
|
refs/heads/master
| 2022-07-16T16:50:57.697699
| 2020-05-18T15:36:13
| 2020-05-18T15:36:13
| 264,501,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,175
|
r
|
R code for week4 assignment.R
|
#https://rpubs.com/vinod-desireddy/616194
url = 'https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2'
download.file(url, 'data', 'curl')
data = read.csv('data.csv')
str(data)
library(dplyr)
library(lubridate)
data1 = data %>%
select(EVTYPE, MAG, FATALITIES, INJURIES,
PROPDMG, PROPDMGEXP, CROPDMG, CROPDMGEXP)
unique(data1$PROPDMGEXP)
table(data1$PROPDMGEXP)
data1[!tolower(data1$PROPDMGEXP) %in% c('k','m','b'), 'PROPDMGEXP'] = 0
data1$PROPDMGEXP = gsub('k', 1000, x = tolower(data1$PROPDMGEXP))
data1$PROPDMGEXP = gsub('m', 1000000, x = tolower(data1$PROPDMGEXP))
data1$PROPDMGEXP = gsub('b', 1000000000, x = tolower(data1$PROPDMGEXP))
data1$PROPDMGEXP = as.numeric(data1$PROPDMGEXP)
data1$propdmgexp = data1$PROPDMG * data1$PROPDMGEXP
data1$PROPDMG = NULL
data1$PROPDMGEXP = NULL
unique(data1$CROPDMGEXP)
table(data1$CROPDMGEXP)
data1[!tolower(data1$CROPDMGEXP) %in% c('k','m','b'), 'CROPDMGEXP'] = 0
data1$CROPDMGEXP = gsub('k', 1000, x = tolower(data1$CROPDMGEXP))
data1$CROPDMGEXP = gsub('m', 1000000, x = tolower(data1$CROPDMGEXP))
data1$CROPDMGEXP = gsub('b', 1000000000, x = tolower(data1$CROPDMGEXP))
data1$CROPDMGEXP = as.numeric(data1$CROPDMGEXP)
data1$cropdmgexp = data1$CROPDMG * data1$CROPDMGEXP
data1$CROPDMG = NULL
data1$CROPDMGEXP = NULL
q1 = data1 %>% group_by(EVTYPE) %>%
summarise(total_property_damage = sum(propdmgexp, na.rm = T),
total_crop_damage = sum(cropdmgexp, na.rm = T)) %>%
mutate(total_damage = total_property_damage + total_crop_damage)
library(tidyr)
q2 = gather(q1, key = 'damagetype', value = 'damagevalue', -EVTYPE)
q3 = q2 %>% group_by(damagetype) %>% summarise(damagevalue = max(damagevalue))
q3 = as.data.frame(q3)
q4 = q2[(q2$damagetype %in% q3[,'damagetype']) & (q2$damagevalue %in% q3[,'damagevalue']),]
q4$damagevalue = round(q4$damagevalue/10^9, 1)
q4 = as.data.frame(q4)
rm(q1, q2, q3)
library(ggplot2)
ggplot(data = q4, aes(x = damagetype, y = damagevalue))+
geom_point() +
geom_text(aes(label = paste(EVTYPE, damagevalue, 'Billion USD', sep = '-')),
vjust = 1.3) +
xlab('type of damage') +
ylab('damage value in billions USD') +
ggtitle('Event with max damage in property, crops, property+crops')
r1 = data1 %>% group_by(EVTYPE) %>%
summarise(total_fatalities = sum(FATALITIES, na.rm = T),
total_injuries = sum(INJURIES, na.rm = T)) %>%
mutate(total_health_damage = total_fatalities + total_injuries)
r2 = gather(r1, key = 'damagetype', value = 'damagevalue', -EVTYPE)
r3 = r2 %>% group_by(damagetype) %>% summarise(damagevalue = max(damagevalue))
r3 = as.data.frame(r3)
r4 = r2[(r2$damagetype %in% r3[,'damagetype']) & (r2$damagevalue %in% r3[,'damagevalue']),]
r4 = as.data.frame(r4)
r4 = arrange(r4, damagevalue)
rm(r1, r2, r3)
ggplot(data = r4, aes(x = damagetype, y = damagevalue))+
geom_point() +
geom_text(aes(label = paste(EVTYPE, damagevalue, sep = '-')),
vjust = 1.3) +
xlab('type of health damage') +
ylab('No of persons affected') +
ggtitle('Event with max damage in health, fatalities, fatalities+health')
|
cc67f9fc9e76f8113a2de57d07130a56c442acb9
|
7906d9809924c2d9d6f7652aca712144e095cf35
|
/man/relatedness.Rd
|
b0279dc1d6bc2025cc57e0ff2ba5602df6da9561
|
[
"MIT"
] |
permissive
|
rdinnager/slimrlang
|
999d8f5af0162dbcc8c42ff93b4dc5ae2f0cb7fc
|
375a2667b0b3ab4fa03786fb69c5572f6367d36b
|
refs/heads/master
| 2022-10-19T04:02:43.013658
| 2020-06-14T10:31:45
| 2020-06-14T10:31:45
| 262,492,066
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,688
|
rd
|
relatedness.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_lang.R
\name{relatedness}
\alias{relatedness}
\alias{Individual$relatedness}
\alias{.I$relatedness}
\title{SLiM method relatedness}
\usage{
relatedness(individuals)
}
\arguments{
\item{individuals}{An object of type Individual object. See details for
description.}
}
\value{
An object of type float.
}
\description{
Documentation for SLiM function \code{relatedness}, which is a method of the
SLiM class \code{Individual}.
Note that the R function is a stub, it does not do anything in R (except bring
up this documentation). It will only do
anything useful when used inside a \code{\link{slim_block}} function further
nested in a \code{\link{slim_script}}
function call, where it will be translated into valid SLiM code as part of a
full SLiM script.
}
\details{
Returns a vector containing the degrees of relatedness between the
receiver and each of the individuals in individuals. The relatedness between A
and B is always 1.0 if A and B are actually the same individual; this facility
works even if SLiM’s optional pedigree tracking is turned off (in which case
all other relatedness values will be 0.0). Otherwise, if pedigree tracking is
turned on with initializeSLiMOptions(keepPedigrees=T), this method will use
the pedigree information described in section 23.6.1 to construct a relatedness
estimate. More specifically, if information about the grandparental generation
is available, then each grandparent shared by A and B contributes 0.125 towards
the total relatedness, for a maximum value of 0.5 with four shared grandparents.
If grandparental information in unavailable, then if parental information is
available it is used, with each parent shared by A and B contributing 0.25,
again for a maximum of 0.5. If even parental information is unavailable, then
the relatedness is assumed to be 0.0. Again, however, if A and B are the same
individual, the relatedness will be 1.0 in all cases. Note that this relatedness
is simply pedigree-based relatedness. This does not necessarily correspond
to genetic relatedness, because of the effects of factors like assortment and
recombination.
}
\section{Copyright}{
This is documentation for a function in the SLiM software, and has been
reproduced from the official manual,
which can be found here: \url{http://benhaller.com/slim/SLiM_Manual.pdf}. This
documentation is
Copyright © 2016–2020 Philipp Messer. All rights reserved. More information
about SLiM can be found
on the official website: \url{https://messerlab.org/slim/}
}
\author{
Benjamin C Haller (\email{bhaller@benhaller.com}) and Philipp W Messer
(\email{messer@cornell.edu})
}
|
8c46841235c7e93d07b184dd6a025c221c89e3ec
|
75b9ad06c919b6ef8e4df9df2188a85fbb69f6d7
|
/02122020-KMEANS/02122020-KMeans-R.R
|
feba694edd1f9263d713fe4dc92f81307557ed3d
|
[] |
no_license
|
jcms2665/LCF-UNAM
|
5218e188cbc52b90568b4ac2ba0523544f799d3d
|
c3ec6c3b4d7d6563aa11aaf58f50ea8f0a167777
|
refs/heads/main
| 2023-02-09T02:41:40.616777
| 2021-01-03T07:43:02
| 2021-01-03T07:43:02
| 310,419,641
| 1
| 14
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,039
|
r
|
02122020-KMeans-R.R
|
#--------------------------------------------------------------------------
# Creacion: 02-12-2020
# Autor: Julio C.
# Contacto: jcms2665@gmail.com
# Objetivo: Análisis de conglomerados: k-medias
# Datos: Latinobarometro_2018_Esp_R_v20190303.Rds
# Github: https://github.com/jcms2665/LCF-UNAM/tree/main/02122020-KMEANS
#--------------------------------------------------------------------------
# CONTENIDO
# 0. Entorno de trabajo
# 1. Cargar base
# 2. Arreglo de la base de datos
# 3. Algoritmo de k-medias con 3 grupos
# 4. Pegar grupos a la base original
# 5. Interpretación
#--------------------------------------------------------------------------
#0. Entorno de trabajo
rm(list=ls())
graphics.off()
library(foreign)
library(ggplot2)
library(psych)
library(dplyr)
library(psych)
library(tidyr)
library(htmltools)
library(klaR)
#1. Cargar base
setwd("D:/OneDrive - El Colegio de México A.C/5. Proyectos/2020/18. LCF/2 Noviembre/30112020-FA-Ordinal")
latino <- readRDS("Latinobarometro_2018_Esp_R_v20190303.Rds")
# Variables:
# P15STGBSC.A --- Confianza en las Fuerzas Armadas
# P15STGBSC.B --- Confianza en la Policía
# P15STGBSC.C --- Confianza en la Iglesia
# P15STGBSC.D --- Confianza en el Congreso
# P15STGBSC.E --- Confianza en el Gobierno
# Respuestas:
# 1.- Mucha confianza
# 2.- Algo de confianza
# 3.- Poca confianza
# 4.- Ninguna confianza
# -1-.- No sabe
# -2-.- No responde
# -4-.- No preguntada
#2. Arreglo de la base de datos
# Filtrar paises: Argentina, Bolivia, Brasil
dat<-latino%>%filter(as.numeric(IDENPA)==32 | as.numeric(IDENPA)==68 | as.numeric(IDENPA)==76)
# Filtrar variables
var<-c("REEDAD","SEXO","IDENPA","P15STGBSC.A","P15STGBSC.B","P15STGBSC.C","P15STGBSC.D", "P15STGBSC.E")
dat1<-dat[,var]
names(dat1)<-c("Edad","Sexo","País","Fuerzas Armadas","Policía","Iglesia","Congreso","Gobierno")
# Quitar respuestas inválidas
dat1[dat1 <=0] <- NA
dat1<-dat1%>%drop_na()
# Etiquetar variables (para identificar a la unidad de análisis: personas)
dat1$Edad<-factor(dat1$Edad,levels = c(1,2,3,4), labels = c("16-25 años","26-40 años","41-60 años","60 y más"))
dat1$Sexo<-factor(dat1$Sexo,levels = c(1,2), labels = c("Hombre","Mujer"))
dat1$País<-factor(dat1$País,levels = c(32,68,76), labels = c("Argentina","Bolivia","Brasil"))
View(dat1)
# Filtrar variables para el análisis
dat<-dat1[,4:8]
View(dat)
#3. Algoritmo de k-medias con 3 grupos
fit <-kmodes(dat, 3)
#4. Pegar grupos a la base original
dat.grupos <- data.frame(dat1, fit$cluster)
#5. Interpretación
View(dat.grupos)
table(dat.grupos$fit.cluster,dat.grupos$Edad)%>%prop.table(1)%>%`*`(100)%>%round(1)
table(dat.grupos$fit.cluster,dat.grupos$Sexo)%>%prop.table(1)%>%`*`(100)%>%round(1)
table(dat.grupos$fit.cluster,dat.grupos$País)%>%prop.table(1)%>%`*`(100)%>%round(1)
|
5e69ef6ece0da7e521c49f91e1e18eef4c1b009b
|
501bdb50a57af61e43ed72b14d74865bcdec052c
|
/sentiExExJunc.R
|
4b03a526955ada67a535059aa30af42b3d1f4187
|
[] |
no_license
|
gughi/eQTLPipeline
|
cf790a2772e0cc1a6f3f7b15e5553ab3816ff37d
|
5c98367321035e6cdc09f15e564ef1e86fd1e4ff
|
refs/heads/master
| 2021-03-24T11:05:02.336066
| 2016-06-01T11:41:36
| 2016-06-01T11:41:36
| 18,880,513
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,310
|
r
|
sentiExExJunc.R
|
#######################
### Sentinalisation ###
#######################
rm(list=ls())
library(devtools)
library(doParallel)
library(foreach)
load_all()
# Caprica
setwd("/home/seb/projectsR/eQTLPipeline/")
# Apollo
# setwd("/home/guelfi/eQTLPipeline/")
cl <- makeCluster(20)
clusterExport(cl,c("LDsentExonExonJunc","read.table.rows"))
registerDoParallel(cl)
# getDoParWorkers()
# ## path where get the unsentinalised eQTLs
pathUnsentinalised <- "data/results/genic/exonExonJunc/resMatrixEQTL/PUTM/"
junctions <- read.delim(pipe(paste0("ls ",pathUnsentinalised)),header=F)
## load the residual corrected expression
load("data/expr/normalisedCounts/genic/exonExonJunc/resids.PUTM.rda")
load("data/general/sampleInfo.rda")
IDs <- sampleInfo[which(sampleInfo$A.CEL_file %in% as.character(rownames(resids))),"U.SD_No"]
IDs <- gsub("/","_",IDs)
rownames(resids) <- IDs
rm(indID,IDs)
pathFinalSentinalised <-"data/results/genic/exonExonJunc/resMatrixEQTL/sentinalised/"
dir.create(file.path(pathFinalSentinalised),showWarnings=FALSE)
pathFinalSentinalised <-"data/results/genic/exonExonJunc/resMatrixEQTL/sentinalised/PUTM/"
dir.create(file.path(pathFinalSentinalised),showWarnings=FALSE)
dir.create(file.path("tmp", "PUTM"),showWarnings=FALSE)
# load the genetic PCs
my.covTMP <- read.table.rows(paste0("/home/seb/plinkOutput/eigenvec"), keepRows=rownames(resids), sep=" ",header=F)
## load mapping file
load("data/expr/rawCounts/genic/fullExExJun.rda")
rm(map,expr)
Sys.time()
foreach(i=1:nrow(junctions))%dopar%LDsentExonExonJunc(resids=resids,
regID=junctions[i,1],
mapExon=mapExon,
pathFinalSentinalised=pathFinalSentinalised,
pathUnsentinalised=pathUnsentinalised,
FDRthr=0.10,
my.covTMP=my.covTMP,
snpLocation="/home/seb/eQTL/snps/byGene/",
tmpFolder="tmp/")
Sys.time()
stopCluster(cl)
rm(list=ls())
# Caprica
setwd("/home/seb/projectsR/eQTLPipeline/")
# Apollo
# setwd("/home/guelfi/eQTLPipeline/")
cl <- makeCluster(20)
clusterExport(cl,c("LDsentExonExonJunc","read.table.rows"))
registerDoParallel(cl)
# getDoParWorkers()
# ## path where get the unsentinalised eQTLs
pathUnsentinalised <- "data/results/genic/exonExonJunc/resMatrixEQTL/SNIG/"
junctions <- read.delim(pipe(paste0("ls ",pathUnsentinalised)),header=F)
## load the residual corrected expression
load("data/expr/normalisedCounts/genic/exonExonJunc/resids.SNIG.rda")
load("data/general/sampleInfo.rda")
IDs <- sampleInfo[which(sampleInfo$A.CEL_file %in% as.character(rownames(resids))),"U.SD_No"]
IDs <- gsub("/","_",IDs)
rownames(resids) <- IDs
rm(indID,IDs)
pathFinalSentinalised <-"data/results/genic/exonExonJunc/resMatrixEQTL/sentinalised/"
dir.create(file.path(pathFinalSentinalised),showWarnings=FALSE)
pathFinalSentinalised <-"data/results/genic/exonExonJunc/resMatrixEQTL/sentinalised/SNIG/"
dir.create(file.path(pathFinalSentinalised),showWarnings=FALSE)
dir.create(file.path("tmp", "SNIG"),showWarnings=FALSE)
# load the genetic PCs
my.covTMP <- read.table.rows(paste0("/home/seb/plinkOutput/eigenvec"), keepRows=rownames(resids), sep=" ",header=F)
## load mapping file
load("data/expr/rawCounts/genic/fullExExJun.rda")
rm(map,expr)
Sys.time()
foreach(i=1:nrow(junctions))%dopar%LDsentExonExonJunc(resids=resids,
regID=junctions[i,1],
mapExon=mapExon,
pathFinalSentinalised=pathFinalSentinalised,
pathUnsentinalised=pathUnsentinalised,
FDRthr=0.10,
my.covTMP=my.covTMP,
snpLocation="/home/seb/eQTL/snps/byGene/",
tmpFolder="tmp/")
Sys.time()
stopCluster(cl)
|
4ea17201fb6b292289066828af5f272cb70039e5
|
49b5cb2296130f97d75033bda87ecf97871b008e
|
/cachematrix.R
|
36516ecdee457257d114cb75db883d1e6129b0ea
|
[] |
no_license
|
vinaymiriyala/ProgrammingAssignment2
|
1b7836c9621bd7c13b8497d6b6f3bc98847a6f60
|
f6d8594f3105a455b2e33dfa81ea7af6e2044937
|
refs/heads/master
| 2020-02-26T13:22:16.727761
| 2014-10-26T06:31:33
| 2014-10-26T06:31:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,568
|
r
|
cachematrix.R
|
## The following functions can be used to solve the inverse of a matrix. The inverse of a matrix will be
## returned readily if it has already been cached before, instead of being computed again.
## This saves time when doing time consuming computations.
## The first function, makeCacheMatrix creates a special matrix,
## which is really a list containing a function to
## set the value of the matrix
## get the value of the matrix
## set the inverse of the matrix
## get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix
## above. If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache. If the inverse has not yet been calculated
## it computes the inverse and caches the value with the setinverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
3d430b52d54c2975f2b6babffa55edc1a6074b77
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.xray/man/get_service_graph.Rd
|
f4e8d1ffaa71ae5cfa014ff9501c77bb9a22e2ed
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,357
|
rd
|
get_service_graph.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.xray_operations.R
\name{get_service_graph}
\alias{get_service_graph}
\title{Retrieves a document that describes services that process incoming requests, and downstream services that they call as a result}
\usage{
get_service_graph(StartTime, EndTime, GroupName = NULL,
GroupARN = NULL, NextToken = NULL)
}
\arguments{
\item{StartTime}{[required] The start of the time frame for which to generate a graph.}
\item{EndTime}{[required] The end of the timeframe for which to generate a graph.}
\item{GroupName}{The name of a group to generate a graph based on.}
\item{GroupARN}{The ARN of a group to generate a graph based on.}
\item{NextToken}{Pagination token. Not used.}
}
\description{
Retrieves a document that describes services that process incoming requests, and downstream services that they call as a result. Root services process incoming requests and make calls to downstream services. Root services are applications that use the AWS X-Ray SDK. Downstream services can be other applications, AWS resources, HTTP web APIs, or SQL databases.
}
\section{Accepted Parameters}{
\preformatted{get_service_graph(
StartTime = as.POSIXct("2015-01-01"),
EndTime = as.POSIXct("2015-01-01"),
GroupName = "string",
GroupARN = "string",
NextToken = "string"
)
}
}
|
030c1a580884f38de6cf0c810952f72ca0100f06
|
906467a7822ddefdfa585ba2c43d0e0db956667a
|
/man/getLogspaceNames.Rd
|
cdf442063d0144f02067cf295430373bc37c1898
|
[] |
no_license
|
chiphogg/gppois
|
47f0958f9cd18af94a6f89415257be858c9119cd
|
85137bfd7f659b0ac094d2e0e05a9cdfa045adfb
|
refs/heads/master
| 2020-05-18T12:57:26.688557
| 2012-07-20T15:05:03
| 2012-07-20T15:05:03
| 4,561,808
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 469
|
rd
|
getLogspaceNames.Rd
|
\name{getLogspaceNames}
\alias{getLogspaceNames}
\title{Names of "scale"-type parameters}
\arguments{
\item{\dots}{Not used.}
}
\value{
Names of parameters to be optimized in logspace.
}
\description{
A character vector of names, indicating which parameters
are considered to be \dQuote{scale} parameters. (Read
\dQuote{Optimization mode} section of
\code{\link{getParams.Covariance}} to see what this
means.)
}
\seealso{
\code{\link{CovarianceSE}}
}
|
adad165d097fbda30cb5ac4629f43faaa603b843
|
44598c891266cd295188326f2bb8d7755481e66b
|
/DbtTools/pareto/man/PDEcls.Rd
|
4af059c468b3ebcd728bc5fbb19112052f1d51e9
|
[] |
no_license
|
markus-flicke/KD_Projekt_1
|
09a66f5e2ef06447d4b0408f54487b146d21f1e9
|
1958c81a92711fb9cd4ccb0ea16ffc6b02a50fe4
|
refs/heads/master
| 2020-03-13T23:12:31.501130
| 2018-05-21T22:25:37
| 2018-05-21T22:25:37
| 131,330,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 432
|
rd
|
PDEcls.Rd
|
\name{PDEcls}
\alias{PDEcls}
\title{PDE class}
\description{
This function plots PDE densities for each class.}
\usage{
PDEcls(data, cls, show_legend=FALSE)
}
\arguments{
\item{data}{numeric matrix of data to be plotted.}
\item{cls}{numeric vector. class number for each dataset. }
\item{show_legend}{logical. If to show legend. FALSE by default.}
}
\references{Matlab dbt - Pareto}
\author{Friederike Matz }
|
6989bce07324cff2a97f1c51b5e919735fe14bad
|
bbc167551a93d2a6d7ea43f00fc901ff967a8c62
|
/man/funcEnrich.GSEA.Rd
|
bf320206aa8e17ef812296cfe8944ac83fbfef77
|
[
"Apache-2.0"
] |
permissive
|
jyyulab/NetBID
|
0c4c212cddd0180b96506e741350e6b7cfcacfce
|
86d62097eda88a6185b494491efdd8b49902e0c3
|
refs/heads/master
| 2023-04-30T04:16:40.752026
| 2023-02-28T02:57:04
| 2023-02-28T02:57:04
| 118,371,500
| 34
| 10
|
Apache-2.0
| 2022-08-23T13:44:58
| 2018-01-21T20:33:57
|
R
|
UTF-8
|
R
| false
| true
| 3,432
|
rd
|
funcEnrich.GSEA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipeline_functions.R
\name{funcEnrich.GSEA}
\alias{funcEnrich.GSEA}
\title{Gene Set Enrichment Analysis by GSEA}
\usage{
funcEnrich.GSEA(
rank_profile = NULL,
use_gs = NULL,
gs2gene = NULL,
min_gs_size = 5,
max_gs_size = 500,
Pv_adj = "fdr",
Pv_thre = 0.1,
test_strategy = "GSEA",
nperm = 1000,
use_seed = 999
)
}
\arguments{
\item{rank_profile}{a named vector of numerics, the differential values (DE or DA) calculated from a sample comparison (e.g. "G4 vs. Others").
Names of the vector must be gene names.
For the DA, user could use 'processDriverProfile()' to convert the DA profile into gene-name based profile.
The differential values can be "logFC" or "t-statistics".}
\item{use_gs}{a vector of characters, the names of gene sets.
If \code{gs2gene} is NULL, \code{all_gs2gene} will be used. The \code{use_gs} must be the subset of \code{names(all_gs2gene)}.
If "all", all the gene sets in \code{gs2gene} will be used.
If user input his own \code{gs2gene} list, \code{use_gs} will be set to "all" as default.
Default is c("H", "CP:BIOCARTA", "CP:REACTOME", "CP:KEGG").}
\item{gs2gene}{list, a list contains elements of gene sets.
The name of the element is gene set, each element contains a vector of genes in that gene set.
If NULL, will use \code{all_gs2gene}, which is created by function \code{gs.preload}. Default is NULL.}
\item{min_gs_size}{numeric, the minimum size of gene set to analysis. Default is 5.}
\item{max_gs_size}{numeric, the maximum size of gene set to analysis. Default is 500.}
\item{Pv_adj}{character, method to adjust P-value. Default is "fdr".
For details, please check \code{p.adjust.methods}.}
\item{Pv_thre}{numeric, threshold for the adjusted P-values. Default is 0.1.}
\item{test_strategy}{choose from "KS" and "GSEA". Default is "GSEA".
If "KS", will perform a Kolmogorov-Smirnov test to get the significance value.}
\item{nperm}{numeric, number of random permutations. Default is 1000.
This function only do gene-label based permutation reshuffling.}
\item{use_seed}{integer, the random seed. Default is 999.}
}
\value{
Return a data.frame, contains gene sets with significant enrichment statistics.
Column details are as follows (test_strategy=GSEA),
\item{#Name}{Name of the enriched gene set}
\item{Total_item}{Size in the profile}
\item{Num_item}{Number of genes in the gene set (filtered by the profile list)}
\item{Ori_P}{Original P-value from GSEA Test}
\item{Adj_P}{Adjusted P-value}
\item{ES}{Enrichment Score}
\item{NES}{normalized Enrichment Score}
}
\description{
\code{funcEnrich.GSEA} performs gene set enrichment analysis to the input gene list, by using the GSEA Test.
}
\examples{
analysis.par <- list()
analysis.par$out.dir.DATA <- system.file('demo1','driver/DATA/',package = "NetBID2")
NetBID.loadRData(analysis.par=analysis.par,step='ms-tab')
ms_tab <- analysis.par$final_ms_tab
## get significant gene set by driver's DA profile
DA_profile <- processDriverProfile(Driver_name=ms_tab$gene_label,
Driver_profile=ms_tab$logFC.G4.Vs.others_DA,
choose_strategy='absmax',
return_type ='gene_statistics')
res1 <- funcEnrich.GSEA(rank_profile=DA_profile,
use_gs=c('H'),
Pv_thre=0.1,Pv_adj = 'none')
\dontrun{
}
}
|
75279125772b208642b551eee3f5f82f7efd5c28
|
f434d9eabe9d0864063bf0e7daca622ba17db850
|
/malta.R
|
83d3059a64b7e008a6ae25451d74e2014921aed2
|
[] |
no_license
|
edwindj/eu_maps
|
2dfc2e726551634d43792a9a7f884caf1c87d8a1
|
0c6b5c9f9eb533702125c235065f6eda1cc7c1f0
|
refs/heads/master
| 2021-01-19T22:17:22.233810
| 2019-04-12T06:49:01
| 2019-04-12T06:49:01
| 88,789,935
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,455
|
r
|
malta.R
|
library(sf)
library(dplyr)
europe <- st_read("data/europe_map.geojson") %>%
st_set_crs(3035)
# fix two country codes...
country_code <- c( "FRA" = "FR"
, "NOR" = "NO"
, "GBR" = "GB" # coded in StatLine with gb instead of uk
)
i <- match(names(country_code), europe$ADM0_A3)
europe$statcode <- as.character(europe$statcode)
europe$statcode[i] = unname(country_code)
countries <- read.csv("data-raw/countries.csv", stringsAsFactors = FALSE)
i <- match(country_code, countries$statcode)
europe$statnaam <- as.character(europe$statnaam)
europe$statnaam[match(country_code, europe$statcode)] <- countries$statnaam[i]
# remove three letter code
europe <-
europe %>%
select(statcode, statnaam, statname)
scale_region <- function(sf, s=rep(1, nrow(sf))){
crs <- st_crs(sf)
center <- st_centroid(sf)
geom <- sf$geometry - center$geometry
for (r in seq_len(nrow(sf))){
geom[r] <- geom[r] * s[r]
}
sf$geometry <- geom + center$geometry
st_crs(sf) <- crs
sf
}
# rescale Malta
s <- ifelse(europe$statcode == "MT", sqrt(5), 1)
europe_MT <- scale_region(europe, s)
#mapview::mapview(europe_MT)
st_write(europe_MT, "data/europe_MT.geojson", delete_dsn = TRUE, layer_options = "COORDINATE_PRECISION=0")
st_write(europe_MT, "data/europe_MT.shp", delete_dsn = TRUE)
europe_MT %>%
st_set_geometry(NULL) %>%
write.csv(file = "data-raw/countries2.csv", row.names = FALSE, na = "")
|
a6b0cfd886d1d32db05af8f0d8d4836eb71beefe
|
7c326a26ffdd5f56d0dbdaf2eff8ceef11c35ae5
|
/First R Codes/DataManagement.R
|
4833c870b08e2578e591b62e8105c544397458b0
|
[] |
no_license
|
NiteshTiwari/Forecasting_in_R
|
ea562a09054deff3938693235ce8bf030468de41
|
c6ced0278fba78659fa088d9a4bcb4bf43207cc7
|
refs/heads/master
| 2021-06-03T23:37:55.368271
| 2016-09-14T12:52:31
| 2016-09-14T12:52:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,960
|
r
|
DataManagement.R
|
#Opens a Stata data file. Since Stata files automatically have headers, no need to specify that.
mydata<-read.dta("Y:/okofm/ElectoralRulesJuly2015/dataSTATA/DataExpTaxEmpPC.dta")
#Note that I call the dataset I open "mydata". Thus, I can refer to it by writing its name. For example, next line asks
# R to give the names of the variables in the dataset.
#Some examples for doing basic computations
mydata$sum<-mydata$Fire+mydata$Police
mydata$mean<-(mydata$Fire+mydata$Police)/2
#In the next example, I attach the data such that I don't have to refer to the dataset every time.
attach(mydata)
mydata$sum<-Fire+Police
mydata$mean<-(Fire+Police)/2
detach(mydata)
#Another way of creating new variables
mydata<-transform(mydata,
sum=Fire+Police,
mean=(Fire+Police)/2
)
#RECODING VARIABLES
#Create 2 categories depending on spending on Fire
mydata$firecat<-ifelse(mydata$Fire>5478000,c("High Spending"),c("Low Spending"))
#another example, create 3 fire spending categories
attach(mydata)
mydata$firecat[Fire>5702000]<-"High Spending"
mydata$firecat[Fire>728600 & Fire<=5702000]<-"Intermediate Spending"
mydata$firecat[Fire<=728600]<-"Low Spending"
detach(mydata)
#RENAMING VARIABLES
install.packages("reshape")
library(reshape)
mydata<-rename(mydata,c(Fire="fire"))
#AN EXAMPLE USING LOGICAL OPERATORS
x<-c(1:10)
x[(x>8)|(x<5)]
#NUMERIC FUNCTIONS
abs(5.78)
sqrt(5.78)
ceiling(5.78)
floor(5.78)
trunc(5.78)
round(5.78,digits=1)
signif(5.78,digits=1)
cos(7.78)
log(5.58)
log10(5.58)
exp(5.58)
#CHARACTER FUNCTIONS
x<-"abcdef"
substr(x,2,4)
grep("A",c("b","A","c","A"),fixed=TRUE) #this function searches for patterns
sub("p","o","Hellp World",ignore.case=FALSE) #Find a pattern and replace it
strsplit("abc","") #splits the elements of character vector
strsplit("abc","b")
paste("Today is", date())
paste("x",1:3,sep="") #concatenate strings after using sep to separate them
paste("x",1:3,sep="M")
toupper(x)
tolower(x)
#STATISTICAL PROBABILITY FUNCTIONS
#Normal density function
x<-pretty(c(-3,3),30)
y<-dnorm(x)
plot(x,y,type='l',xlab="Normal Deviate",ylab="Density",yaxs="i")
pnorm(1.96) #cumulative normal probability
qnorm(.6) #normal quantile. value at the p percentile of normal distribution
x<-rnorm(50,m=50,sd=10) #50 normal deviates with mean m and standard deviation sd
dbinom(0:5,10,.5) #probability of 0 to 5 heads of fair coin out of 10 flips
pbinom(5,10,.5) #probability of 5 or less heads of fair coin out of 10 flips
#Similar statistical probability functions exist for poisson, uniform, etc.
#Other statistical functions
mean(x)
mean(x,trim=.05,na.rm=TRUE) #trimmed mean, removing any missing values and
# 5 percent of highest and lowest scores
sd(x)
median(x)
quantile(x,.3)
range(x)
sum(x)
min(x)
max(x)
scale(x,center=TRUE,scale=TRUE) #column center or standardize a matrix
#Other useful functions
seq(1,10,2) #generate a sequence
indices<-seq(1,10,2)
rep(1:3,2) #repeat x n times
cut(x,5) #divide continuous variable in factor with 5 levels
###CONTROL STRUCTURES
#we will write a function to get the transpose of a matrix
#it's a poor alternative to built-in t() function, but good practice
mytrans<-function(x) {
if(!is.matrix(x)) {
warning("argument is not a matrix: returning NA")
return(NA_real_)
}
y<-matrix(1,nrow=ncol(x),ncol=nrow(x))
for (i in 1:nrow(x)){
for (j in 1:ncol(x)) {
y[j,i]<-x[i,j]
}
}
return(y)
}
z<-matrix(1:10,nrow=5,ncol=2)
tz<-mytrans(z)
###USER-WRITTEN FUNCTIONS
#function example -get measures of central tendency and spread for a numeric
#vector x. The user has a choice of measures and whether the results are printed.
mysummary<-function(x,npar=TRUE,print=TRUE){
if (!npar){
center <- mean(x); spread<-sd(x)
} else {
center <-median(x); spread <-mad(x)
}
if (print & !npar){
cat("Mean=", center, "\n", "SD=", spread, "\n")
} else if (print & npar){
cat("Median=", center, "\n", "MAD=", spread, "\n")
}
result <- list(center=center,spread=spread)
return(result)
}
#invoking the function
set.seed(1234)
x<-rpois(500,4)
y<-mysummary(x)
y<-mysummary(x,npar=FALSE,print=FALSE)
y<-mysummary(x,npar=FALSE)
###SORTING DATA
#sorting examples using the mtcars dataset
attach(mtcars)
#sort by mpg
newdata<-mtcars[order(mpg),]
#sort by mpg and cyl
newdata<-mtcars[order(mpg,cyl),]
#sort by mpg (ascending) and cyl (descending)
newdata<-mtcars[order(mpg,-cyl),]
detach(mtcars)
###AGGREGATING DATA
# aggregate data frame mtcars by cyl and vs, returning means for numeric variables
attach(mtcars)
aggdata<-aggregate(mtcars,by=list(cyl,vs),FUN=mean,na.rm=TRUE)
print(aggdata)
detach(mtcars)
###Reshaping data
#Transpose
mtcars
t(mtcars)
###CREATING A DATA FRAME FROM SCRATCH
z<- data.frame(cbind(c(1,1,2,2),c(1,2,1,2),c(5,3,6,2),c(6,5,1,4)))
#change the names of the columns
names(z)<-c("id","time","x1","x2")
#the reshape package
#example of melt function
library(reshape)
mdata<-melt(z,id=c("id","time"))
subjmeans<-cast(mdata,id~variable, mean)
timemeans<-cast(mdata,time~variable,mean)
###SUBSETTING DATA
#selecting (keeping) variables
myvars<-c("state_id","year","fire")
newdata<-mydata[myvars]
#select 1st and 5th thru 10th variables
newdata<-mydata[c(1,5:10)]
#exclude 3rd variable
newdata<-newdata[c(-3)]
#delete variables
newdata$idchanged <- newdata$typecode <- NULL
####selecting observations
#first 5 observations
smalldata<-mydata[1:5,]
#based on variable values
newdata2<-mydata[which(mydata$year>=1980 & mydata$fire>=5000000),]
#selection using the subset function
newdata3<-subset(mydata, year>=1980 & (fire>=5000000 | fire<4000000),
select=state_id:fire)
newdata4<-subset(mydata, year>=1980 & (fire>=5000000 | fire<4000000),
select=c(state_id,fire,Police))
###Random samples
#take a random sample of size 50 from a dataset mydata
#sample without replacement
mysample<-mydata[sample(1:nrow(mydata),50,replace=FALSE),]
|
0f76641da1e98b2a368725b43325df8e5a56e8e4
|
eb5756859c948b31c31839d94b36acf2a730a2df
|
/man/is_connection.Rd
|
7cbf65049c07fe4c8bf83fc5e3cdc0af7b30d453
|
[] |
no_license
|
cran/assertive.files
|
9073bf416e95bf74b104c2d94ac278b71797ac54
|
ce4c9f1d5291b89197fadb2370bf1f71bb6984ff
|
refs/heads/master
| 2021-01-21T14:08:40.275117
| 2016-05-10T10:30:32
| 2016-05-10T10:30:32
| 48,076,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,517
|
rd
|
is_connection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assert-is-connection.R, R/is-connection.R
\name{assert_is_bzfile_connection}
\alias{assert_is_bzfile_connection}
\alias{assert_is_connection}
\alias{assert_is_fifo_connection}
\alias{assert_is_file_connection}
\alias{assert_is_gzfile_connection}
\alias{assert_is_incomplete_connection}
\alias{assert_is_open_connection}
\alias{assert_is_pipe_connection}
\alias{assert_is_readable_connection}
\alias{assert_is_socket_connection}
\alias{assert_is_stderr}
\alias{assert_is_stdin}
\alias{assert_is_stdout}
\alias{assert_is_terminal_connection}
\alias{assert_is_text_connection}
\alias{assert_is_unz_connection}
\alias{assert_is_url_connection}
\alias{assert_is_writable_connection}
\alias{assert_is_xzfile_connection}
\alias{is_bzfile_connection}
\alias{is_connection}
\alias{is_fifo_connection}
\alias{is_file_connection}
\alias{is_gzfile_connection}
\alias{is_incomplete_connection}
\alias{is_open_connection}
\alias{is_pipe_connection}
\alias{is_readable_connection}
\alias{is_socket_connection}
\alias{is_stderr}
\alias{is_stdin}
\alias{is_stdout}
\alias{is_terminal_connection}
\alias{is_text_connection}
\alias{is_unz_connection}
\alias{is_url_connection}
\alias{is_writable_connection}
\alias{is_xzfile_connection}
\title{Is the input a connection?}
\usage{
assert_is_bzfile_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_connection(x, severity = getOption("assertive.severity", "stop"))
assert_is_fifo_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_file_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_gzfile_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_incomplete_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_open_connection(x, rw = "",
severity = getOption("assertive.severity", "stop"))
assert_is_pipe_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_readable_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_socket_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_stderr(x, severity = getOption("assertive.severity", "stop"))
assert_is_stdin(x, severity = getOption("assertive.severity", "stop"))
assert_is_stdout(x, severity = getOption("assertive.severity", "stop"))
assert_is_terminal_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_text_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_unz_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_url_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_writable_connection(x, severity = getOption("assertive.severity",
"stop"))
assert_is_xzfile_connection(x, severity = getOption("assertive.severity",
"stop"))
is_bzfile_connection(x, .xname = get_name_in_parent(x))
is_connection(x, .xname = get_name_in_parent(x))
is_fifo_connection(x, .xname = get_name_in_parent(x))
is_file_connection(x, .xname = get_name_in_parent(x))
is_gzfile_connection(x, .xname = get_name_in_parent(x))
is_incomplete_connection(x, .xname = get_name_in_parent(x))
is_open_connection(x, rw = "", .xname = get_name_in_parent(x))
is_pipe_connection(x, .xname = get_name_in_parent(x))
is_readable_connection(x, .xname = get_name_in_parent(x))
is_socket_connection(x, .xname = get_name_in_parent(x))
is_stderr(x, .xname = get_name_in_parent(x))
is_stdin(x, .xname = get_name_in_parent(x))
is_stdout(x, .xname = get_name_in_parent(x))
is_terminal_connection(x, .xname = get_name_in_parent(x))
is_text_connection(x, .xname = get_name_in_parent(x))
is_unz_connection(x, .xname = get_name_in_parent(x))
is_url_connection(x, .xname = get_name_in_parent(x))
is_writable_connection(x, .xname = get_name_in_parent(x))
is_xzfile_connection(x, .xname = get_name_in_parent(x))
}
\arguments{
\item{x}{Input to check.}
\item{severity}{How severe should the consequences of the assertion be?
Either \code{"stop"}, \code{"warning"}, \code{"message"}, or \code{"none"}.}
\item{rw}{Read-write status of connection. Passed to \code{isOpen}.}
\item{.xname}{Not intended to be used directly.}
}
\value{
\code{is_connection} checks for objects of class "connection".
\code{is_open_connection} and \code{is_incomplete_connection} wrap
\code{isOpen} and \code{isIncomplete} respectively, providing more
information on failure.
\code{is_readable_connection} and \code{is_writable_connection} tell you
whether the connection is readable from or writable to.
\code{is_bzfile_connection}, \code{is_fifo_connection},
\code{is_file_connection}, \code{is_pipe_connection},
\code{is_socket_connection}, \code{is_stderr}, \code{is_stdin},
\code{is_stdout}, \code{is_text_connection}, \code{is_unz_connection},
\code{is_url_connection} and \code{is_xzfile_connection} give more
specific tests on the type of connection.
The \code{assert_*} functions return nothing but throw an error if the
corresponding \code{is_*} function returns \code{FALSE}.
}
\description{
Various checks to see if the input is a (particular type of/open/incomplete)
connection.
}
\note{
\code{is_incomplete_connection} will return false for closed
connections, regardless of whether or not the connection ends with a newline
character.
(\code{isIncomplete} throws an error for closed connections.)
}
\examples{
assert_is_terminal_connection(stdin())
assert_is_readable_connection(stdin())
assert_is_open_connection(stdin())
assert_is_stdin(stdin())
# Next line is usually true but, e.g., devtools::run_examples overrides it
assertive.base::dont_stop(assert_is_terminal_connection(stdout()))
assert_is_writable_connection(stdout())
assert_is_open_connection(stdout())
assert_is_stdout(stdout())
assert_is_terminal_connection(stderr())
assert_is_writable_connection(stderr())
assert_is_open_connection(stderr())
assert_is_stderr(stderr())
tcon <- textConnection("txt", "w", local = TRUE)
assert_is_text_connection(tcon)
assert_is_open_connection(tcon)
cat("this has no final newline character", file = tcon)
assert_is_incomplete_connection(tcon)
close(tcon)
# These examples should fail.
assertive.base::dont_stop({
assert_is_connection("not a connection")
assert_is_readable_connection(stdout())
assert_is_writable_connection(stdin())
})
\dontrun{
fcon <- file()
close(fcon)
assert_is_open_connection(fcon)
}
}
\seealso{
\code{\link[base]{isOpen}}.
}
|
552901b3d2051d5a13f4fa424fde0e4f73f0b36b
|
e0733e54c3a9078e046663ad84ca5e7488489efd
|
/man/climateChangeMats.Rd
|
6f6b79ab7e11554f05db264ea8943f148f595e03
|
[] |
no_license
|
npp97-field/hadsstR
|
7afde0ff9e00bf48020d6b1043e7846c4a088d98
|
f218bfc992b96e413a6186fac00cd4f063564b2b
|
refs/heads/master
| 2020-12-28T23:16:19.584989
| 2015-04-24T14:23:44
| 2015-04-24T14:23:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 736
|
rd
|
climateChangeMats.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{climateChangeMats}
\alias{climateChangeMats}
\title{Climate Change matrices for 1960-2009}
\format{A \code{hadsstMats} object with a variety of matrices
\describe{
See \code{\link{getClimateChange}} for details of parts of object
}}
\source{
\url{http://github.com/jebyrnes/hadsstR}
}
\usage{
climateChangeMats
}
\description{
A hadsstMats object for processed data from 1960-2009
}
\examples{
data(climateChangeMats)
pal <- colorRampPalette(c("blue","white", "red"))
library(lattice)
latLonGrid <- expand.grid(lon = climateChangeMats$lon, lat = climateChangeMats$lat)
with(climateChangeMats, image(lon, lat, averageMat, col=pal(80)))
}
\keyword{datasets}
|
122c844ef88d667f0f5c146ffdb86f794cafc509
|
7f7d496444333e6e7fb457af654ad07aeb0516dd
|
/mypgm01.R
|
71a47d4304545549dd0ed6839236f3024cf98ba8
|
[] |
no_license
|
navnathsatre/DS_R
|
c0b07710e9511354fccb2f17952b502c3173169b
|
06c2f4def40e3ff3b7c0e1a1f8c90e4a21fe5b39
|
refs/heads/main
| 2023-01-30T02:30:50.419891
| 2020-12-17T11:32:01
| 2020-12-17T11:32:01
| 320,799,516
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 443
|
r
|
mypgm01.R
|
getwd()
#setwd("F:\\R.mypgm1")
# my first pgm in R
2+5
5^6
exp(2)
var1<-5
2>6
3*5+6
exp1
exp(1)
sqrt(2.6)
pi
pi
x<-5
x<-3
y<-6
z<-8
x+y+z
# R is case sensitive language
6*pi-688
2*pi*6378 # circomferance of earth at equator
This.Year
This.Year<-2020
nath <- c(1,2,3,4)
nath
vec <- c(10,20,30,40)
vec
vec1<-c("R","Python","java")
vec1<-c('c',"C++","C#")
var2<-c(10,"S",25,'java')
var2
nath[3]
nath[]
getwd()
|
9331de255d9cb81a85606c3d92b1a453267da337
|
92a5c49b416ad9bc6f6166d65abfb31e8bc64d01
|
/src/11-Three_data_features.R
|
34c94a2099570a84b912516178c0758b7aec175e
|
[
"MIT"
] |
permissive
|
chunjie-sam-liu/Immune-checkpoint-blockade
|
e87f131b97581008d4368e2a07f297d3410958bf
|
e1daf00b2b3661df3b415b4531c13594252a6235
|
refs/heads/master
| 2022-03-26T04:53:41.337724
| 2019-12-11T00:29:24
| 2019-12-11T00:29:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,490
|
r
|
11-Three_data_features.R
|
#use the all enriched functions' gene as features(GO,KEGG)
library()
#GO interaction-------------------------------------------------------------------------------------------------------
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/melanoma_PD1/up_enrichGO.txt",
header = T,as.is = TRUE,sep = "\t",quote = "")%>%
dplyr::filter(p.adjust <= 0.01)->GO_melanoma_PD1_up
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/melanoma_CTLA4/Second_Response_standard/up_enrichGO.txt",
header = T,as.is = TRUE,sep = "\t")%>%
dplyr::filter(p.adjust <= 0.01)->GO_melanoma_CTLA4_up
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/gastric_cancer/up_enrichGO.txt",
header = T,as.is = TRUE,sep = "\t")%>%
dplyr::filter(p.adjust <= 0.01)->GO_gastric_PD1_up
intersect(GO_melanoma_PD1_up$Description,GO_melanoma_CTLA4_up$Description) %>% intersect(GO_gastric_PD1_up$Description)->up_Intersection
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/melanoma_PD1/down_enrichGO.txt",
header = T,as.is = TRUE,sep = "\t",quote = "")%>%
dplyr::filter(p.adjust <= 0.01)->GO_melanoma_PD1_down
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/melanoma_CTLA4/Second_Response_standard/down_enrichGO.txt",
header = T,as.is = TRUE,sep = "\t")%>%
dplyr::filter(p.adjust <= 0.01)->GO_melanoma_CTLA4_down
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/gastric_cancer/down_enrichGO.txt",
header = T,as.is = TRUE,sep = "\t")%>%
dplyr::filter(p.adjust <= 0.01)->GO_gastric_PD1_down
intersect(GO_melanoma_PD1_down$Description,GO_melanoma_CTLA4_down$Description) %>% intersect(GO_gastric_PD1_down$Description)->down_Intersection
#KEGG interaction-----------------------
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/melanoma_PD1/up_enrichKEGG.txt",
header = T,as.is = TRUE,sep = "\t",quote = "")%>%
dplyr::filter(p.adjust <= 0.01)->KEGG_melanoma_PD1_up
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/melanoma_CTLA4/Second_Response_standard/up_enrichKEGG.txt",
header = T,as.is = TRUE,sep = "\t")%>%
dplyr::filter(p.adjust <= 0.01)->KEGG_melanoma_CTLA4_up
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/gastric_cancer/up_enrichKEGG.txt",
header = T,as.is = TRUE,sep = "\t")%>%
dplyr::filter(p.adjust <= 0.01)->KEGG_gastric_PD1_up
intersect(KEGG_melanoma_PD1_up$Description,KEGG_melanoma_CTLA4_up$Description) %>% intersect(KEGG_gastric_PD1_up$Description)->KEGG_up_Intersection
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/melanoma_PD1/down_enrichKEGG.txt",
header = T,as.is = TRUE,sep = "\t",quote = "")%>%
dplyr::filter(p.adjust <= 0.01)->KEGG_melanoma_PD1_down
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/melanoma_CTLA4/Second_Response_standard/down_enrichKEGG.txt",
header = T,as.is = TRUE,sep = "\t")%>%
dplyr::filter(p.adjust <= 0.01)->KEGG_melanoma_CTLA4_down
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/gastric_cancer/down_enrichKEGG.txt",
header = T,as.is = TRUE,sep = "\t")%>%
dplyr::filter(p.adjust <= 0.01)->KEGG_gastric_PD1_down
intersect(KEGG_melanoma_PD1_down$Description,KEGG_melanoma_CTLA4_down$Description) %>% intersect(KEGG_gastric_PD1_down$Description)->KEGG_down_Intersection
#0
#get all genes
fn_all_pathway_genes_GO <- function(enrich_result,GO_Description){
dplyr::filter(enrich_result,Description %in% GO_Description)%>%
dplyr::select(geneID)-> Genes
sapply(Genes$geneID,function(x) strsplit(x,"/"))%>%
unlist()%>%
as.character()%>%
unique()
}
readxl::read_excel("/data/liull/reference/All_EntrezID_Symbl_NCBI.xlsx",col_names = TRUE)%>%as.data.frame(stringsAsFactors)->EntrezID_Symbl
fn_all_pathway_genes_KEGG <- function(enrich_result,KEGG_Description){
dplyr::filter(enrich_result,Description %in% KEGG_Description)%>%
dplyr::select(geneID)-> Genes
sapply(Genes$geneID,function(x) strsplit(x,"/"))%>%
unlist()%>%
as.character()%>%
unique()->Genes
data.frame(EntrezID=Genes)%>%
merge(EntrezID_Symbl,by.x="EntrezID",by.y="GeneID")->Genes
Genes$Symbol
}
gene_set=list()
gene_set$T_cell_activation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"T cell activation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"T cell activation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"T cell activation"))
gene_set$regulation_of_T_cell_activation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"regulation of T cell activation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"regulation of T cell activation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"regulation of T cell activation"))
gene_set$positive_regulation_of_T_cell_activation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"positive regulation of T cell activation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"positive regulation of T cell activation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"positive regulation of T cell activation"))
#
#
gene_set$T_cell_costimulation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"T cell costimulation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"T cell costimulation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"T cell costimulation"))
#
#
gene_set$lymphocyte_costimulation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"lymphocyte costimulation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"lymphocyte costimulation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"lymphocyte costimulation"))
gene_set$lymphocyte_differentiation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"lymphocyte differentiation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"lymphocyte differentiation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"lymphocyte differentiation"))
gene_set$regulation_of_lymphocyte_activation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"regulation of lymphocyte activation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"regulation of lymphocyte activation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"regulation of lymphocyte activation"))
gene_set$positive_regulation_of_lymphocyte_activation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"positive regulation of lymphocyte activation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"positive regulation of lymphocyte activation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"positive regulation of lymphocyte activation"))
gene_set$lymphocyte_proliferation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"lymphocyte proliferation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"lymphocyte proliferation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"lymphocyte proliferation"))
gene_set$regulation_of_lymphocyte_proliferation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"regulation of lymphocyte proliferation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"regulation of lymphocyte proliferation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"regulation of lymphocyte proliferation"))
#
#
gene_set$leukocyte_cell_cell_adhesion <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"leukocyte cell-cell adhesion"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"leukocyte cell-cell adhesion"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"leukocyte cell-cell adhesion"))
gene_set$leukocyte_differentiation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"leukocyte differentiation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"leukocyte differentiation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"leukocyte differentiation"))
gene_set$leukocyte_proliferation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"leukocyte proliferation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"leukocyte proliferation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"leukocyte proliferation"))
gene_set$positive_regulation_of_leukocyte_activation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"positive regulation of leukocyte activation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"positive regulation of leukocyte activation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"positive regulation of leukocyte activation"))
gene_set$positive_regulation_of_leukocyte_cell_cell_adhesion <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"positive regulation of leukocyte cell-cell adhesion"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"positive regulation of leukocyte cell-cell adhesion"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"positive regulation of leukocyte cell-cell adhesion"))
gene_set$regulation_of_leukocyte_proliferation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"regulation of leukocyte proliferation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"regulation of leukocyte proliferation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"regulation of leukocyte proliferation"))
gene_set$regulation_of_leukocyte_cell_cell_adhesion <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"regulation of leukocyte cell-cell adhesion"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"regulation of leukocyte cell-cell adhesion"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"regulation of leukocyte cell-cell adhesion"))
#
#
gene_set$interferon_gamma_production <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"interferon-gamma production"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"interferon-gamma production"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"interferon-gamma production"))
gene_set$regulation_of_interferon_gamma_production <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"regulation of interferon-gamma production"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"regulation of interferon-gamma production"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"regulation of interferon-gamma production"))
#
#
gene_set$mononuclear_cell_proliferation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"mononuclear cell proliferation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"mononuclear cell proliferation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"mononuclear cell proliferation"))
gene_set$regulation_of_mononuclear_cell_proliferation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"regulation of mononuclear cell proliferation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"regulation of mononuclear cell proliferation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"regulation of mononuclear cell proliferation"))
#
#
gene_set$MHC_class_II_protein_complex <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"MHC class II protein complex"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"MHC class II protein complex"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"MHC class II protein complex"))
#
#
gene_set$regulation_of_cell_cell_adhesion <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"regulation of cell-cell adhesion"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"regulation of cell-cell adhesion"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"regulation of cell-cell adhesion"))
gene_set$positive_regulation_of_cell_adhesion <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"positive regulation of cell adhesion"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"positive regulation of cell adhesion"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"positive regulation of cell adhesion"))
gene_set$positive_regulation_of_cell_cell_adhesion <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"positive regulation of cell-cell adhesion"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"positive regulation of cell-cell adhesion"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"positive regulation of cell-cell adhesion"))
#
gene_set$antigen_receptor_mediated_signaling_pathway <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"antigen receptor-mediated signaling pathway"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"antigen receptor-mediated signaling pathway"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"antigen receptor-mediated signaling pathway"))
gene_set$cellular_defense_response <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"cellular defense response"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"cellular defense response"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"cellular defense response"))
gene_set$immune_response_activating_cell_surface_receptor_signaling_pathway <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"immune response-activating cell surface receptor signaling pathway"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"immune response-activating cell surface receptor signaling pathway"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"immune response-activating cell surface receptor signaling pathway"))
gene_set$immune_response_regulating_cell_surface_receptor_signaling_pathway <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"immune response-regulating cell surface receptor signaling pathway"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"immune response-regulating cell surface receptor signaling pathway"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"immune response-regulating cell surface receptor signaling pathway"))
gene_set$immunological_synapse <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"immunological synapse"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"immunological synapse"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"immunological synapse"))
gene_set$positive_regulation_of_cell_activation <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"positive regulation of cell activation"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"positive regulation of cell activation"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"positive regulation of cell activation"))
gene_set$regulation_of_immune_effector_process <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"regulation of immune effector process"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"regulation of immune effector process"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"regulation of immune effector process"))
gene_set$response_to_lipopolysaccharide <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"response to lipopolysaccharide"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"response to lipopolysaccharide"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"response to lipopolysaccharide"))
gene_set$response_to_molecule_of_bacterial_origin <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"response to molecule of bacterial origin"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"response to molecule of bacterial origin"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"response to molecule of bacterial origin"))
gene_set$side_of_membrane <- union(fn_all_pathway_genes_GO(GO_melanoma_PD1_up,"side of membrane"),fn_all_pathway_genes_GO(GO_melanoma_CTLA4_up,"side of membrane"))%>%
union(fn_all_pathway_genes_GO(GO_gastric_PD1_up,"side of membrane"))
#
#
gene_set$Hematopoietic_cell_lineage <- union(fn_all_pathway_genes_KEGG(KEGG_melanoma_PD1_up,"Hematopoietic cell lineage"),fn_all_pathway_genes_KEGG(KEGG_melanoma_CTLA4_up,"Hematopoietic cell lineage"))%>%
union(fn_all_pathway_genes_KEGG(KEGG_gastric_PD1_up,"Hematopoietic cell lineage"))
gene_set$Staphylococcus_aureus_infection <- union(fn_all_pathway_genes_KEGG(KEGG_melanoma_PD1_up,"Staphylococcus aureus infection"),fn_all_pathway_genes_KEGG(KEGG_melanoma_CTLA4_up,"Staphylococcus aureus infection"))%>%
union(fn_all_pathway_genes_KEGG(KEGG_gastric_PD1_up,"Staphylococcus aureus infection"))
gene_set$Intestinal_immune_network_for_IgA_production <- union(fn_all_pathway_genes_KEGG(KEGG_melanoma_PD1_up,"Intestinal immune network for IgA production"),fn_all_pathway_genes_KEGG(KEGG_melanoma_CTLA4_up,"Intestinal immune network for IgA production"))%>%
union(fn_all_pathway_genes_KEGG(KEGG_gastric_PD1_up,"Intestinal immune network for IgA production"))
gene_set$CAMs <- union(fn_all_pathway_genes_KEGG(KEGG_melanoma_PD1_up,"Cell adhesion molecules (CAMs)"),fn_all_pathway_genes_KEGG(KEGG_melanoma_CTLA4_up,"Cell adhesion molecules (CAMs)"))%>%
union(fn_all_pathway_genes_KEGG(KEGG_gastric_PD1_up,"Cell adhesion molecules (CAMs)"))
gene_set$Th17_cell_differentiation <- union(fn_all_pathway_genes_KEGG(KEGG_melanoma_PD1_up,"Th17 cell differentiation"),fn_all_pathway_genes_KEGG(KEGG_melanoma_CTLA4_up,"Th17 cell differentiation"))%>%
union(fn_all_pathway_genes_KEGG(KEGG_gastric_PD1_up,"Th17 cell differentiation"))
fn_get_feature_score <- function(expr,gene_list){
score_frame <- matrix(nrow = ncol(expr),ncol = length(gene_list))
colnames(score_frame) = names(gene_list)
rownames(score_frame) = colnames(expr)
t(expr)%>%as.data.frame(stringsAsFactors=FALSE)->t_expr
for (i in 1:ncol(score_frame)) {
which(names(gene_list)[]==colnames(score_frame)[i]) -> feature_id
gene_list[feature_id] %>% unlist()%>% as.character()->feature_gene
t_expr %>% dplyr::select(feature_gene) -> feature_gene_expr
apply(feature_gene_expr, 1, mean)->feature_score
names(feature_score) <- NULL
score_frame[,i] <- feature_score
}
score_frame
}
#train in melanoma PD1------------------------------
readxl::read_xlsx("/data/liucj/data/immune-checkpoint-blockade/all_metadata_available.xlsx",sheet = "SRA")%>%
dplyr::filter(Cancer == "melanoma")%>%
dplyr::filter(Anti_target =="anti-PD1")%>%
dplyr::filter(Library_strategy == "RNA-Seq")%>%
dplyr::filter(Biopsy_Time == "pre-treatment")%>%
dplyr::select(Run,SRA_Study,Response)%>%
dplyr::filter(Response != "NE")->metadata
metadata$Response %>%
gsub("PD","NR",.)%>% gsub("SD","NR",.)%>%
gsub("PR","R",.)%>%gsub("CR","R",.)->metadata$Response
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/melanoma_PD1_pretreatment_Symbol_log2CPM_expr.txt",
header = T)%>%
dplyr::select(metadata$Run)%>%
tibble::rownames_to_column()%>%
dplyr::filter(rowname %in% unique(as.character(unlist(gene_set))))%>%
tibble::column_to_rownames()->melanoma_PD1_log2CPM
# scale(melanoma_PD1_log2CPM,center = TRUE,scale = TRUE)%>%
# as.data.frame(stringsAsFactors=FALSE)->scaled_melanoma_PD1_log2CPM
fn_get_feature_score(melanoma_PD1_log2CPM,gene_set)%>%
as.data.frame(stringsAsFactors=FALSE)%>%
tibble::rownames_to_column()%>%
merge(metadata[,c(1,3)],by.x="rowname",by.y="Run")%>%
tibble::column_to_rownames()-> train_features_score
train_features_score$Response=as.factor(train_features_score$Response)
#test in melanoma CTLA4------------------------------
readxl::read_xlsx("/data/liucj/data/immune-checkpoint-blockade/all_metadata_available.xlsx",sheet = "dbGAP")%>%
dplyr::filter(Cancer == "melanoma")%>%
dplyr::filter(Anti_target =="anti-CTLA4")%>%
dplyr::filter(Library_strategy == "RNA-Seq")%>%
dplyr::filter(Biopsy_Time == "pre-treatment")%>%
dplyr::select(Run,SRA_Study,Second_Response_standard)%>%
dplyr::filter(Run != "SRR3083584")->metadata
metadata$Second_Response_standard %>%
gsub("long-survival","R",.)->metadata$Second_Response_standard
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/melanoma_CTLA4_pretreatment_Symbol_log2CPM_expr.txt",
header = T)%>%
dplyr::select(metadata$Run)%>%
tibble::rownames_to_column()%>%
dplyr::filter(rowname %in% unique(as.character(unlist(gene_set))))%>%
tibble::column_to_rownames()->melanoma_CTLA4_log2CPM
# scale(melanoma_CTLA4_log2CPM,center = TRUE,scale = TRUE)%>%
# as.data.frame(stringsAsFactors=FALSE)->scaled_melanoma_CTLA4_log2CPM
fn_get_feature_score(melanoma_CTLA4_log2CPM,gene_set)%>%
as.data.frame(stringsAsFactors=FALSE)%>%
tibble::rownames_to_column()%>%
merge(metadata[,c(1,3)],by.x="rowname",by.y="Run")%>%
tibble::column_to_rownames()-> test_features_score
test_features_score$Second_Response_standard=as.factor(test_features_score$Second_Response_standard)
#randomforest----------
ROC=c()
NR_1000_id=list()
for (i in 1:1000) {
which(train_features_score$Response[]=="NR") %>% sample(26)-> NR_id
which(train_features_score$Response[]=="R") -> R_id
randomForest(x=train_features_score[c(NR_id,R_id),-41], y=train_features_score[c(NR_id,R_id),]$Response, ntree = 500,type="classification",
importance=T )->features_rf
pred_test=predict(features_rf,test_features_score[,-41])
test_roc <- roc(test_features_score$Second_Response_standard,as.numeric(pred_test))
ROC[i] <- test_roc$auc[1]
NR_1000_id[[i]] <- NR_id
}
NR_1000_id[[which(ROC[]==max(ROC))]]->NR_id
#gastric cancer
readxl::read_xlsx("/data/liucj/data/immune-checkpoint-blockade/all_metadata_available.xlsx",sheet = "SRA")%>%
dplyr::filter(Cancer == "gastric cancer")%>%
dplyr::filter(Anti_target =="anti-PD1")%>%
dplyr::filter(Library_strategy == "RNA-Seq")%>%
dplyr::filter(Biopsy_Time == "pre-treatment")%>%
dplyr::select(Run,SRA_Study,Response)->metadata
metadata$Response %>%
gsub("PD","NR",.)%>% gsub("SD","NR",.)%>%
gsub("PR","R",.)%>%gsub("CR","R",.)->metadata$Response
read.table("/data/liull/immune-checkpoint-blockade/New_batch_effect_pipeline/gastric_cancer_PD1_pretreatment_Symbol_log2CPM_expr.txt",
header = T)%>%
dplyr::select(metadata$Run)%>%
tibble::rownames_to_column()%>%
dplyr::filter(rowname %in% unique(as.character(unlist(gene_set))))%>%
tibble::column_to_rownames()->gastric_cancer_PD1_log2CPM
# scale(gastric_cancer_PD1_log2CPM,center = TRUE,scale = TRUE)%>%
# as.data.frame(stringsAsFactors=FALSE)->scaled_gastric_cancer_PD1_log2CPM
fn_get_feature_score(gastric_cancer_PD1_log2CPM,gene_set)%>%
as.data.frame(stringsAsFactors=FALSE)%>%
tibble::rownames_to_column()%>%
merge(metadata[,c(1,3)],by.x="rowname",by.y="Run")%>%
tibble::column_to_rownames()-> gastric_cancer_features_score
gastric_cancer_features_score$Response=as.factor(gastric_cancer_features_score$Response)
pred_gastric=predict(features_rf,gastric_cancer_features_score[,-41])
test_roc <- roc(gastric_cancer_features_score$Response,as.numeric(pred_gastric))
|
9064c4c8d4b8080fed5c65738b857d6bef6cdc09
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spotifyr/examples/get_albums.Rd.R
|
5103590a8e76b4391c03c36558a910d9ceed1dbc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 246
|
r
|
get_albums.Rd.R
|
library(spotifyr)
### Name: get_albums
### Title: Get Albums
### Aliases: get_albums
### Keywords: albums
### ** Examples
## Not run:
##D artists <- get_artists('radiohead')
##D albums <- get_albums(artists$artist_uri[1])
## End(Not run)
|
3a20a754bee62d62e13dba005e79116ede7ed4bb
|
07305c42feb0b90dda0769a820e3a9495f1d7eb3
|
/tests/testthat/helper-dummy-vars.R
|
9c80c3511c9e5a459efe28a4da2219468adb6844
|
[
"MIT"
] |
permissive
|
cderv/proxyconfig
|
46080c5984a2df07ee71670a8f62c1d681e053f8
|
b2f89ab5fa2a463960700a0d106ab3935a31a629
|
refs/heads/master
| 2020-03-09T08:01:59.853232
| 2019-10-06T17:40:16
| 2019-10-06T17:40:16
| 128,679,659
| 15
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 355
|
r
|
helper-dummy-vars.R
|
dummy_proxy_url <- "http://user:pwd@proxy.info:5656"
dummy_env_var_lc <- c(http_proxy = dummy_proxy_url,
https_proxy = dummy_proxy_url,
no_proxy = ".dummy.domain")
dummy_env_var_uc <- purrr::set_names(dummy_env_var_lc, toupper(names(dummy_env_var_lc)))
dummy_env_var <- c(dummy_env_var_uc, dummy_env_var_lc)
|
e0c7f996f0e87bab0455e7f1ca8275ca0755ca4e
|
e9a5a9e952a9ccac535efe64b96cc730b844677b
|
/man/XLC.Rd
|
75c4ba761f8e1e2207ffb539d2ad2209aa662b81
|
[] |
no_license
|
miraisolutions/xlconnect
|
323c22258439616a4d4e0d66ddc62204094196c9
|
ae73bfd5a368484abc36638e302b167bce79049e
|
refs/heads/master
| 2023-09-04T05:27:42.744196
| 2023-08-30T07:10:44
| 2023-08-30T07:10:44
| 8,108,907
| 114
| 35
| null | 2023-08-30T07:10:46
| 2013-02-09T11:17:42
|
R
|
UTF-8
|
R
| false
| false
| 4,660
|
rd
|
XLC.Rd
|
\name{XLC}
\alias{XLC}
\title{
XLConnect Constants
}
\description{
List structure defining several constants used across \pkg{XLConnect}.
}
\format{
The format is:
\preformatted{
List of 90
$ ERROR.WARN : chr "WARN"
$ ERROR.STOP : chr "STOP"
$ DATA_TYPE.BOOLEAN : chr "BOOLEAN"
$ DATA_TYPE.NUMERIC : chr "NUMERIC"
$ DATA_TYPE.STRING : chr "STRING"
$ DATA_TYPE.DATETIME : chr "DATETIME"
$ STYLE_ACTION.XLCONNECT : chr "XLCONNECT"
$ STYLE_ACTION.NONE : chr "NONE"
$ STYLE_ACTION.PREDEFINED : chr "PREDEFINED"
$ STYLE_ACTION.NAME_PREFIX : chr "STYLE_NAME_PREFIX"
$ STYLE_ACTION.DATA_FORMAT_ONLY: chr "DATA_FORMAT_ONLY"
$ BORDER.DASHED : num 3
$ BORDER.DASH_DOT : num 9
$ BORDER.DASH_DOT_DOT : num 11
$ BORDER.DOTTED : num 7
$ BORDER.DOUBLE : num 6
$ BORDER.HAIR : num 4
$ BORDER.MEDIUM : num 2
$ BORDER.MEDIUM_DASHED : num 8
$ BORDER.MEDIUM_DASH_DOT : num 10
$ BORDER.MEDIUM_DASH_DOT_DOT : num 12
$ BORDER.NONE : num 0
$ BORDER.SLANTED_DASH_DOT : num 13
$ BORDER.THICK : num 5
$ BORDER.THIN : num 1
$ COLOR.BLACK : num 8
$ COLOR.WHITE : num 9
$ COLOR.RED : num 10
$ COLOR.BRIGHT_GREEN : num 11
$ COLOR.BLUE : num 12
$ COLOR.YELLOW : num 13
$ COLOR.PINK : num 14
$ COLOR.TURQUOISE : num 15
$ COLOR.DARK_RED : num 16
$ COLOR.GREEN : num 17
$ COLOR.DARK_BLUE : num 18
$ COLOR.DARK_YELLOW : num 19
$ COLOR.VIOLET : num 20
$ COLOR.TEAL : num 21
$ COLOR.GREY_25_PERCENT : num 22
$ COLOR.GREY_50_PERCENT : num 23
$ COLOR.CORNFLOWER_BLUE : num 24
$ COLOR.MAROON : num 25
$ COLOR.LEMON_CHIFFON : num 26
$ COLOR.ORCHID : num 28
$ COLOR.CORAL : num 29
$ COLOR.ROYAL_BLUE : num 30
$ COLOR.LIGHT_CORNFLOWER_BLUE : num 31
$ COLOR.SKY_BLUE : num 40
$ COLOR.LIGHT_TURQUOISE : num 41
$ COLOR.LIGHT_GREEN : num 42
$ COLOR.LIGHT_YELLOW : num 43
$ COLOR.PALE_BLUE : num 44
$ COLOR.ROSE : num 45
$ COLOR.LAVENDER : num 46
$ COLOR.TAN : num 47
$ COLOR.LIGHT_BLUE : num 48
$ COLOR.AQUA : num 49
$ COLOR.LIME : num 50
$ COLOR.GOLD : num 51
$ COLOR.LIGHT_ORANGE : num 52
$ COLOR.ORANGE : num 53
$ COLOR.BLUE_GREY : num 54
$ COLOR.GREY_40_PERCENT : num 55
$ COLOR.DARK_TEAL : num 56
$ COLOR.SEA_GREEN : num 57
$ COLOR.DARK_GREEN : num 58
$ COLOR.OLIVE_GREEN : num 59
$ COLOR.BROWN : num 60
$ COLOR.PLUM : num 61
$ COLOR.INDIGO : num 62
$ COLOR.GREY_80_PERCENT : num 63
$ COLOR.AUTOMATIC : num 64
$ FILL.NO_FILL : num 0
$ FILL.SOLID_FOREGROUND : num 1
$ FILL.FINE_DOTS : num 2
$ FILL.ALT_BARS : num 3
$ FILL.SPARSE_DOTS : num 4
$ FILL.THICK_HORZ_BANDS : num 5
$ FILL.THICK_VERT_BANDS : num 6
$ FILL.THICK_BACKWARD_DIAG : num 7
$ FILL.THICK_FORWARD_DIAG : num 8
$ FILL.BIG_SPOTS : num 9
$ FILL.BRICKS : num 10
$ FILL.THIN_HORZ_BANDS : num 11
$ FILL.THIN_VERT_BANDS : num 12
$ FILL.THIN_BACKWARD_DIAG : num 13
$ FILL.THIN_FORWARD_DIAG : num 14
$ FILL.SQUARES : num 15
$ FILL.DIAMONDS : num 16
}
}
\author{
Martin Studer\cr
Mirai Solutions GmbH \url{https://mirai-solutions.ch}
}
\details{
The \code{XLC} list structure defines several constants used throughout \pkg{XLConnect}. The general convention for
enumeration types is to address corresponding constants via \var{XLC$"<ENUM_TYPE>.<VALUE>"} where \var{<ENUM_TYPE>} specifies
the name of the enumeration and \var{<VALUE>} specifies a corresponding enumeration value. An example is \code{XLC$"COLOR.RED"} where
"COLOR" is the enumeration type and "RED" is the corresponding color enumeration value.
}
\keyword{datasets}
\keyword{list}
\keyword{utilities}
|
26c84cb5a456edea6b95cfce655793e8277f75f1
|
5fea5696e21e120581c12a086a6cf1e01d08e20a
|
/bagofword.R
|
a474bffe0825bdaa8245dc58531dc790e7c0cc8c
|
[] |
no_license
|
lamelia/titanic
|
696eb87e1f99d197e2d44edf45c26a42d14019f5
|
49373413b40db9734a10c97ee88278e180fc0959
|
refs/heads/master
| 2021-01-10T08:03:57.296767
| 2016-03-07T15:31:37
| 2016-03-07T15:31:37
| 51,232,672
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,054
|
r
|
bagofword.R
|
#export data to csv format
all_complete <- read.csv(("all_complete_titanic.csv"))
#text mining
require(tm)
# build a corpus
all_complete.corpus <- Corpus(VectorSource(all_complete$Name))
# make each letter lowercase
all_complete.corpus <- tm_map(all_complete.corpus, content_transformer(tolower))
# remove punctuation
all_complete.corpus <- tm_map(all_complete.corpus, removePunctuation)
# remove generic and custom stopwords
# my_stopwords <- c(stopwords('english'), 'prolife', 'prochoice')
# cT.corpus <- tm_map(cT.corpus, removeWords, my_stopwords)
# build a term-document matrix
all_complete.dtm <- TermDocumentMatrix(all_complete.corpus)
# inspect the document-term matrix
all_complete.dtm
# inspect most popular words
findFreqTerms(all_complete.dtm, lowfreq=30)
all_complete.dtm2 <- removeSparseTerms(all_complete.dtm, sparse=0.999)
bag.df <- t(as.data.frame(inspect(all_complete.dtm2)))
#divide bag.df to train and test
bag_train <- bag.df[1:891,]
bag_test <- bag.df[892:1309,]
#divide all to train and test dataset
titanic_train <- all_complete[1:891,]
titanic_test <- all_complete[892:1309,]
#remove unwanted predictors in train and test data
cT_train <- titanic_train[c(-1,-2,-5,-9,-10)]
cT_test <- titanic_test[c(-1,-2,-5,-9,-10)]
#factor Pclass,new_embarked for cT_train and cT_test
cT_train$Pclass <- factor(cT_train$Pclass)
cT_train$new_Embarked <- factor(cT_train$new_Embarked)
cT_test$Pclass <- factor(cT_test$Pclass)
cT_test$new_Embarked <- factor(cT_test$new_Embarked)
str(cT_train)
str(cT_test)
#merge data train/test with bag.df
c_train <- cbind(cT_train,bag_train)
c_test <- cbind(cT_test,bag_test)
#glmnet
library(boot)
library(glmnet)
set.seed(123)
#formula from data
f <- as.formula(Survived~.+log(Age)+Pclass*SibSp+Pclass*Sex+Parch*Pclass+SibSp*Parch+Parch*Sex+log(Age)+Age*Sex*Pclass+Age*Parch*Pclass+Age*SibSp*Pclass+Age*Fare*Pclass+Sex*Fare*Pclass)
#transform data to matrices required by glmnet
x <- model.matrix(f,c_train)
y <- as.matrix(c_train$Survived,ncol=1)
cv.fit <- cv.glmnet(x,y,alpha=1,family="binomial", type.measure = "class",nfolds = 20)
cv.fit$cvm
min(cv.fit$cvm)
#apply model to test data (c_test)
c_test$Survived <- 0
xtest <- model.matrix(f,c_test)
glm.pred <- rep(0,418)
glm.prob <- predict(cv.fit,newx = xtest,type="class", s="lambda.min")
glm.pred[glm.prob > 0.5]=1
#create new data frame containing only passenger id and survival prediction
Survived <- c(glm.pred)
PassengerId <- all$PassengerId[892:1309]
prediction <- data.frame(PassengerId,Survived)
#next export to csv file
write.csv(prediction,file="prediction8.csv")
#3rd submission
#sparse=0.996, alpha=0.5, fold=20
#error=16.16 aplha=0.5 sparse=0.995, fold=20
#error=16.04 aplha=0.5 sparse=0.995, fold=30
#4th submission
#error=15.93 aplha=0.5 sparse=0.995, fold=40
#6th submission and 7th submission
#error=16.9 alpha=0.5 sparese=0.999 fold=20 -> best 80% correctly predicted
#error=18.6 alpha=0 sparese=0.999 fold=20
#8th submission
#error=17.39 alpa=1 sparse=0.999 fold=20 -> best 0.80861 correctly predicted
|
69eb6add882b3b1f065359b764dd330bb461b80b
|
4419dcaad86d41cca6ad026a6a6c72e408fa62eb
|
/R/bind-iterations.R
|
0615620235f3b733ea358db33aacc34596cfa90d
|
[
"MIT"
] |
permissive
|
poissonconsulting/mcmcr
|
c122a92676e7b1228eedb7edaebe43df823fdeb8
|
ca88071369472483e7d73914493b99fd9bda9bd5
|
refs/heads/main
| 2023-06-24T09:43:04.640793
| 2023-06-13T00:19:29
| 2023-06-13T00:19:29
| 70,411,531
| 15
| 3
|
NOASSERTION
| 2022-06-21T03:07:27
| 2016-10-09T15:18:09
|
HTML
|
UTF-8
|
R
| false
| false
| 1,388
|
r
|
bind-iterations.R
|
#' @export
universals::bind_iterations
#' @export
bind_iterations.mcarray <- function(x, x2, ...) {
chk_s3_class(x2, "mcarray")
if (!identical(pdims(x), pdims(x2))) {
abort_chk("`x` and `x2` must have the same parameter dimensions")
}
if (!identical(nchains(x), nchains(x2))) {
abort_chk("`x` and `x2` must have the same number of chains")
}
x <- abind(x, x2, along = ndims(x) - 1)
set_class(x, "mcarray")
}
#' @export
bind_iterations.mcmcarray <- function(x, x2, ...) {
chk_s3_class(x2, "mcmcarray")
if (!identical(pdims(x), pdims(x2))) {
abort_chk("`x` and `x2` must have the same parameter dimensions")
}
if (!identical(nchains(x), nchains(x2))) {
abort_chk("`x` and `x2` must have the same number of chains")
}
x <- abind(x, x2, along = 2, dimnames = FALSE)
set_class(x, "mcmcarray")
}
#' @export
bind_iterations.mcmcr <- function(x, x2, ...) {
chk_s3_class(x2, "mcmcr")
x <- sort(x)
x2 <- sort(x2)
if (!identical(pars(x), pars(x2))) {
abort_chk("`x` and `x2` must have the same parameters")
}
if (!identical(pdims(x), pdims(x2))) {
abort_chk("`x` and `x2` must have the same parameter dimensions")
}
if (!identical(nchains(x), nchains(x2))) {
abort_chk("`x` and `x2` must have the same number of chains")
}
x <- mapply(x, x2, FUN = bind_iterations, SIMPLIFY = FALSE)
set_class(x, "mcmcr")
}
|
eae2f5a6da8fd88f34b9e8cb3a972271b12a0c7f
|
0a9051c89c5087c8152ef06601ec3200a10a4a2d
|
/src/eRNA.target.correlation.xyplot.R
|
e17e852849c979eb59a70a927b3233280c671880
|
[] |
no_license
|
sterding/BRAINcode
|
cfd79ab37f43f8db9126cc276d92e1d86d465275
|
7b4c5e816ff1cf9af86041326b71cf3f3e2e4bf6
|
refs/heads/master
| 2021-07-12T20:52:30.163069
| 2021-05-24T14:50:51
| 2021-05-24T14:50:51
| 1,508,241
| 11
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 564
|
r
|
eRNA.target.correlation.xyplot.R
|
args<-commandArgs(TRUE)
inputfile=args[1]
outputfile=args[2]
df=read.table(inputfile, header=T)
rownames(df)=df[,1]; df=df[,-1]; df=t(df); colnames(df)=c("eRNA","target_gene"); df=as.data.frame(df)
df1=log(0.005+df)
reg1 <- lm(target_gene ~ eRNA, data=df1)
pdf(paste(outputfile,"pdf",sep='.'))
par(cex=.8)
plot(target_gene ~ eRNA, data=df1, main=outputfile)
abline(reg1)
legend("topleft",c(paste("Pearson's r:",round(cor(df1$target_gene,df1$eRNA, method="pearson"),3)), paste("Spearman's rho:",round(cor(df$target_gene,df$eRNA, method="spearman"),3))))
dev.off()
|
36e42bfcacbeafea88a223728426bbc80a2ed919
|
b73ba9d91f872931cbf88d50999411c0bb7c211e
|
/man/prepare-metadata.Rd
|
14583f251f51c6e3f470fd75f9d165fb2825269c
|
[
"MIT"
] |
permissive
|
weecology/portalcasting
|
73347ce66f8c1e5c080a1f1029ec17026c912588
|
a35a77214d41dbdaa50bb39452b5fe49c3763a83
|
refs/heads/main
| 2023-08-20T12:48:59.392495
| 2023-05-23T01:16:33
| 2023-05-23T01:16:33
| 129,144,321
| 8
| 12
|
NOASSERTION
| 2023-05-23T01:16:34
| 2018-04-11T19:34:03
|
R
|
UTF-8
|
R
| false
| true
| 1,649
|
rd
|
prepare-metadata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare_metadata.R
\name{prepare metadata}
\alias{prepare metadata}
\alias{prep-metadata}
\alias{metadata}
\alias{prepare_metadata}
\title{Prepare a Model-Running Metadata List}
\usage{
prepare_metadata(
main = ".",
datasets = prefab_datasets(),
new_datasets_controls = NULL
)
}
\arguments{
\item{main}{\code{character} value of the name of the main component of the directory tree.}
\item{datasets}{\code{character} vector of name(s) of dataset(s) to include.}
\item{new_datasets_controls}{\code{list} of controls for any new datasets (not in the prefab datasets) listed in \code{datasets} that are to be added to the control list and file.}
}
\value{
\code{list} of forecasting metadata, which is also saved out as a YAML file (\code{.yaml}) if desired.
}
\description{
Sets up the metadata list used for forecasting, in particular the matching of time period across the datasets, according to the \code{\link{directory_settings}}.
}
\examples{
\dontrun{
main1 <- file.path(tempdir(), "metadata")
create_dir(main = main1)
fill_resources(main = main1)
fill_forecasts(main = main1)
fill_fits(main = main1)
fill_models(main = main1)
prepare_newmoons(main = main1)
prepare_rodents(main = main1)
prepare_covariates(main = main1)
prepare_metadata(main = main1)
unlink(main1, recursive = TRUE)
}
}
\seealso{
Content preparation functions:
\code{\link{directory filling}},
\code{\link{prepare covariates}},
\code{\link{prepare models}},
\code{\link{prepare newmoons}},
\code{\link{prepare rodents}}
}
\concept{content-prep}
|
aa58ea06c68820bb1010dca1ea13ae50fab10ab0
|
cc541c98fa97f871643e4c83784180404368cc94
|
/tests/testthat.R
|
a06512ae5a97bcf62ef72023a62561cd12e94c77
|
[] |
no_license
|
ojessen/ojUtils
|
34e81a8511b96039b5ebb24e08f1d191eefe4b37
|
ce748448fdd71f4d9a459a94ea2b912da75e79a4
|
refs/heads/master
| 2018-11-06T06:50:41.609782
| 2018-09-10T19:02:49
| 2018-09-10T19:02:49
| 21,038,018
| 0
| 1
| null | 2014-06-20T14:55:59
| 2014-06-20T13:01:13
|
C++
|
UTF-8
|
R
| false
| false
| 58
|
r
|
testthat.R
|
library(testthat)
library(ojUtils)
test_check("ojUtils")
|
eb04e0574d312a88378c0ebb427fab8b5b8d5cca
|
45d0314e631abe9743a594424b9f76db540cc3dc
|
/man/add_heroku_key.Rd
|
f2ccdf675d20ce2621be0d728a613df71de114fb
|
[] |
no_license
|
isabella232/circleci
|
ab93b3c34359c311058b426e40fdd1455b4e9132
|
9756f339d11af3885b83dcbba478dbc9a36ee174
|
refs/heads/master
| 2022-04-12T21:07:02.652984
| 2016-08-09T14:42:10
| 2016-08-09T14:42:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
rd
|
add_heroku_key.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/endpoints.R
\name{add_heroku_key}
\alias{add_heroku_key}
\title{Add Heroku key to Circle CI}
\usage{
add_heroku_key(...)
}
\arguments{
\item{...}{Additional arguments passed to an HTTP request function, such as \code{\link[httr]{GET}}, via \code{\link{circleHTTP}}.}
}
\value{
Something...
}
\description{
Add Heroku key to Circle CI
}
\details{
Add Heroku key to Circle CI
}
|
772a43406ff4fc9a24506b1a463d7e66353849ac
|
bb1e610719504eadec848afaeea6dcb0069bedc4
|
/EPL_League create features.R
|
18272ec15c70d0b8ae9474143acd9e20b00f1091
|
[] |
no_license
|
batemansogq/R_cde_bkup
|
c8a5a4ef3a5bf8024f631c79bf0063c485d08946
|
8b4a9f51ee28c067e869df538bdf3634d09b142b
|
refs/heads/master
| 2020-12-07T23:02:32.150473
| 2020-03-17T10:54:40
| 2020-03-17T10:54:40
| 67,341,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,081
|
r
|
EPL_League create features.R
|
#############################################################
# create the additional features
#############################################################
# read in source data
setwd("E:/R/Football")
df_data_rds <- read.csv2( file="df_data_rds.csv", header = TRUE, sep=";")
df_data_lad <- read.csv2( file="df_data_lad.csv", header = TRUE, sep=";")
df_data_res <- read.csv2( file="df_data_res.csv", header = TRUE, sep=";")
ref_team <- read.csv2("FB_Ref_data_teams.csv", header =TRUE, sep=",")
# load packages
x <- c("dplyr", "plyr", "zoo")
lapply(x, require, character.only=TRUE)
#==================================================================================
# group the table
#==================================================================================
for (i in 1:nrow(df_data_lad)) {
#go thru each row and find the current position
if (df_data_lad[i,"pos"]>=17) {
tab_pos=4} else if (df_data_lad[i,"pos"]>=11) {
tab_pos=3} else if (df_data_lad[i,"pos"]>=5) {
tab_pos=2} else {tab_pos=1}
#update the df
df_data_lad$tab_grp[i] <- tab_pos
}
#==================================================================================
# form H
#==================================================================================
#merge rounds with ref for joining
df_data_rds <- df_data_rds %>% left_join(ref_team, c("hm_tm" = "Ladder")) %>% select(-Rounds)
#cleanup
colnames(df_data_rds)[6]="Hm_Res_lk"
#away team link
df_data_rds <- df_data_rds %>% left_join(ref_team, c("aw_tm" = "Ladder")) %>% select(-Rounds)
#cleanup
colnames(df_data_rds)[7]="Aw_Res_lk"
#merge rds to schedule for home
df_home <- df_data_rds %>% left_join(df_data_res, c("Hm_Res_lk"="HomeTeam", "dt"="Date"))
#remove 2009 season detail
#df_home <- subset(df_home, !is.na(df_home$aw_tm))
#plyr func mapvalues
df_home$Res <-as.numeric(as.character(mapvalues(df_home$FTR, from=c("H", "D", "A"), to=c(1,0,-1))))
hm_form_3 <- function() {
frm <- data.frame(Team = character(),
dt = as.Date(character()),
F3_FTHG = numeric(),
F3_FTAG = numeric(),
F3_HTHG = numeric(),
F3_HTAG = numeric(),
F3_HS= numeric(),
F3_AS= numeric(),
F3_HST= numeric(),
F3_AST= numeric(),
F3_HF= numeric(),
F3_AF= numeric(),
F3_HC= numeric(),
F3_AC= numeric(),
F3_HY= numeric(),
F3_AY= numeric(),
F3_HR= numeric(),
F3_AR= numeric(),
Frm3 = numeric()
)
#make the distinct list of teams
df_tm <- df_home %>% distinct(hm_tm)
for (i in 1:nrow(df_tm)) {
#make df with for querying
df_frm <- df_home %>%
select(hm_tm, dt,FTHG, FTAG, HTHG, HTAG, HS, AS, HST, AST, HF, AF, HC,
AC, HY, AY, HR, AR,Res) %>%
dplyr::filter(hm_tm==df_tm[i,1]) %>%
arrange(dt) %>%
mutate(F3_FTHG = rollmean(FTHG, 3, align = "right", fill = 0),
F3_FTAG = rollmean(FTAG, 3, align = "right", fill = 0),
F3_HTHG = rollmean(HTHG, 3, align = "right", fill = 0),
F3_HTAG = rollmean(HTAG, 3, align = "right", fill = 0),
F3_HS= rollmean(HS, 3, align = "right", fill = 0),
F3_AS= rollmean(AS, 3, align = "right", fill = 0),
F3_HST= rollmean(HST, 3, align = "right", fill = 0),
F3_AST= rollmean(AST, 3, align = "right", fill = 0),
F3_HF= rollmean(HF, 3, align = "right", fill = 0),
F3_AF= rollmean(AF, 3, align = "right", fill = 0),
F3_HC= rollmean(HC, 3, align = "right", fill = 0),
F3_AC= rollmean(AC, 3, align = "right", fill = 0),
F3_HY= rollmean(HY, 3, align = "right", fill = 0),
F3_AY= rollmean(AY, 3, align = "right", fill = 0),
F3_HR= rollmean(HR, 3, align = "right", fill = 0),
F3_AR= rollmean(AR, 3, align = "right", fill = 0),
Frm3 = rollsum(Res, 3, align = "right", fill = 0))
#correct for 1st & second row entries of the rolling calcs
for (i in 1:16) {
df_frm[1,(i+19)] <- df_frm[1, (i+2)]
df_frm[2,(i+19)] <- mean(df_frm[1:2, (i+2)])
}
#correct for res which is a sum
df_frm$Frm3[1] <- df_frm$Res[1]
df_frm$Frm3[2] <- sum(df_frm$Res[1:2])
#append matrix
frm <- rbind(frm, df_frm)
}
#remove excess columns
frm <- frm[,-3:-19]
#add in home flag
frm$H_A <- "H"
return (frm)
}
# find the form for the home team
df_frm <- hm_form_3()
# append back to the home df
df_home <- df_home %>% left_join(df_frm, c("hm_tm"="hm_tm", "dt"="dt"))
#remove excess cols from merge
df_home <- df_home[,-8]
#==================================================================================
# form AW
#==================================================================================
#merge rds to schedule for away
df_away <- df_data_rds %>% left_join(df_data_res, c("Aw_Res_lk" = "AwayTeam", "dt"="Date"))
#remove 2009 season detail
#df_away <- subset(df_away, !is.na(df_away$aw_tm))
#plyr func mapvalues
df_away$Res <-as.numeric(as.character(mapvalues(df_away$FTR, from=c("H", "D", "A"), to=c(-1,0,1))))
aw_form_3 <- function() {
aw_frm <- data.frame(Team = character(),
dt = as.Date(character()),
F3_FTHG = numeric(),
F3_FTAG = numeric(),
F3_HTHG = numeric(),
F3_HTAG = numeric(),
F3_HS= numeric(),
F3_AS= numeric(),
F3_HST= numeric(),
F3_AST= numeric(),
F3_HF= numeric(),
F3_AF= numeric(),
F3_HC= numeric(),
F3_AC= numeric(),
F3_HY= numeric(),
F3_AY= numeric(),
F3_HR= numeric(),
F3_AR= numeric(),
Frm3 = numeric()
)
#make the distinct list of teams
df_tm <- df_away %>% distinct(aw_tm)
for (i in 1:nrow(df_tm)) {
#make df with for querying
aw_df_frm <- df_away %>%
select(aw_tm, dt,FTHG, FTAG, HTHG, HTAG, HS, AS, HST, AST, HF, AF, HC,
AC, HY, AY, HR, AR,Res) %>%
dplyr::filter(aw_tm==df_tm[i,1]) %>%
arrange(dt) %>%
mutate(F3_FTHG = rollmean(FTHG, 3, align = "right", fill = 0),
F3_FTAG = rollmean(FTAG, 3, align = "right", fill = 0),
F3_HTHG = rollmean(HTHG, 3, align = "right", fill = 0),
F3_HTAG = rollmean(HTAG, 3, align = "right", fill = 0),
F3_HS= rollmean(HS, 3, align = "right", fill = 0),
F3_AS= rollmean(AS, 3, align = "right", fill = 0),
F3_HST= rollmean(HST, 3, align = "right", fill = 0),
F3_AST= rollmean(AST, 3, align = "right", fill = 0),
F3_HF= rollmean(HF, 3, align = "right", fill = 0),
F3_AF= rollmean(AF, 3, align = "right", fill = 0),
F3_HC= rollmean(HC, 3, align = "right", fill = 0),
F3_AC= rollmean(AC, 3, align = "right", fill = 0),
F3_HY= rollmean(HY, 3, align = "right", fill = 0),
F3_AY= rollmean(AY, 3, align = "right", fill = 0),
F3_HR= rollmean(HR, 3, align = "right", fill = 0),
F3_AR= rollmean(AR, 3, align = "right", fill = 0),
Frm3 = rollsum(Res, 3, align = "right", fill = 0))
#correct for 1st & second row entries of the rolling calcs
for (i in 1:16) {
aw_df_frm[1,(i+19)] <- aw_df_frm[1, (i+2)]
aw_df_frm[2,(i+19)] <- mean(aw_df_frm[1:2, (i+2)])
}
#correct for res which is a sum
aw_df_frm$Frm3[1] <- aw_df_frm$Res[1]
aw_df_frm$Frm3[2] <- sum(aw_df_frm$Res[1:2])
#append matrix
aw_frm <- rbind(aw_frm, aw_df_frm)
}
#remove excess columns
aw_frm <- aw_frm[,-3:-19]
#add in home flag
aw_frm$H_A <- "A"
return (aw_frm)
}
# find the form for the away team
df_frm <- aw_form_3()
# append back to the away df
df_away <- df_away %>% left_join(df_frm, c("aw_tm" = "aw_tm", "dt"="dt"))
#remove excess cols from merge
df_away <- df_away[,-8 ]
#==================================================================================
# Season form
#==================================================================================
# join ladder to results for home team
df_Stg_H <- df_data_lad %>% left_join(df_data_rds, c("tm" = "hm_tm", "Rd"="i", "Year"="Season"))
# remove away teams
df_Stg_H <- subset(df_Stg_H, !is.na(df_Stg_H$dt))
#update the opp team col
colnames(df_Stg_H)[11] <- "Opp.Team"
# join to the home df to complete the view
df_Stg_H <- df_Stg_H %>% left_join(df_home, c("tm" = "hm_tm", "Rd"="i", "dt"="dt"))
# clean up df
df_Stg_H <- df_Stg_H[,c( -13:-18 ) ]
#repeat for away detail
df_Stg_A <- df_data_lad %>% left_join(df_data_rds, c("tm" = "aw_tm", "Rd"="i", "Year"="Season"))
df_Stg_A <- subset(df_Stg_A, !is.na(df_Stg_A$dt))
colnames(df_Stg_A)[11] <- "Opp.Team"
df_Stg_A <- df_Stg_A %>% left_join(df_away, c("tm" = "aw_tm", "Rd"="i", "dt"="dt"))
df_Stg_A <- df_Stg_A[,c( -13:-18 ) ]
#merge both dfs together for complete listing
df_full <- rbind(df_Stg_H, df_Stg_A)
#create the season form guide
season_form <- function() {
sea_frm <- data.frame(Team = character(),
dt = as.Date(character()),
SE_FTHG = numeric(),
SE_FTAG = numeric(),
SE_HTHG = numeric(),
SE_HTAG = numeric(),
SE_HS= numeric(),
SE_AS= numeric(),
SE_HST= numeric(),
SE_AST= numeric(),
SE_HF= numeric(),
SE_AF= numeric(),
SE_HC= numeric(),
SE_AC= numeric(),
SE_HY= numeric(),
SE_AY= numeric(),
SE_HR= numeric(),
SE_AR= numeric(),
SE_FRM = numeric()
)
#make the distinct list of teams
df_se_tm <- df_full %>% distinct(tm, Year)
for (i in 1:nrow(df_se_tm)) {
#make df with for querying
df_sea_frm <- df_full %>%
#using rd for ease, but should be data, i.e. 2010 spurs play toffies in rd1, in Jan?
select(tm, Rd, Year, FTHG, FTAG, HTHG, HTAG, HS, AS, HST, AST, HF, AF, HC,
AC, HY, AY, HR, AR,Res) %>%
dplyr::filter(tm==df_se_tm[i,1], Year==df_se_tm[i,2] ) %>%
arrange(Rd)
#roll through each rd to calc season totals
for (i in 1:38) {
df_rd_frm <- df_sea_frm %>% filter(Rd<=i) %>%
mutate(SE_FTHG = rollmean(FTHG, i, align = "right", fill = 0),
SE_FTAG = rollmean(FTAG, i, align = "right", fill = 0),
SE_HTHG = rollmean(HTHG, i, align = "right", fill = 0),
SE_HTAG = rollmean(HTAG, i, align = "right", fill = 0),
SE_HS= rollmean(HS, i, align = "right", fill = 0),
SE_AS= rollmean(AS, i, align = "right", fill = 0),
SE_HST= rollmean(HST, i, align = "right", fill = 0),
SE_AST= rollmean(AST, i, align = "right", fill = 0),
SE_HF= rollmean(HF, i, align = "right", fill = 0),
SE_AF= rollmean(AF, i, align = "right", fill = 0),
SE_HC= rollmean(HC, i, align = "right", fill = 0),
SE_AC= rollmean(AC, i, align = "right", fill = 0),
SE_HY= rollmean(HY, i, align = "right", fill = 0),
SE_AY= rollmean(AY, i, align = "right", fill = 0),
SE_HR= rollmean(HR, i, align = "right", fill = 0),
SE_AR= rollmean(AR, i, align = "right", fill = 0),
SE_FRM = rollsum(Res, i, align = "right", fill = 0))
#append to return df
sea_frm <- rbind(sea_frm, df_rd_frm[i,])
}
}
#remove excess columns
#sea_frm <- sea_frm[,-3:-19]
return (sea_frm)
}
# find the form for the home team
df_sea <- season_form()
#remove redundant cols for joining
df_sea <- df_sea[,c(-4:-20)]
#join back to full dataframe to complete the dataset.
df_full <- df_full %>% left_join(df_sea, c("tm" = "tm", "Year"="Year", "Rd"="Rd"))
#check file
write.csv(df_full, file="E://R/Football/df_full.csv", row.names = FALSE)
#==================================================================================
# make model hm and away detail
#==================================================================================
#reorder df for ease of function
df_full <- cbind(df_full[,c(1,3,11:12)],df_full[,c(-1,-3,-11:-12)] )
#current detail, including HvsA Ind
head(df_full[, c(1:4, 31:39, 58)])
#detail previous
head(df_full[,c(1:30, 41:57, 59:75)])
## refactor for updated df
#get the lagged values, based upon sets
lag_set <- function() {
#create the results df
df_lag_res <- df_full[0,c(1:30, 41:57, 59:75)]
#re-order for the merge
df_lag_res <- df_lag_res[c(1:4, 11, 5:10, 12:64)]
#make the distinct list of teams
df_lag_tm <- df_full %>% distinct(tm, Year)
for (i in 1:nrow(df_lag_tm)) {
#filter the df with for each team/season
df_lag_frm <- df_full[,c(1:30, 41:57, 59:75)] %>%
dplyr::filter(tm==df_lag_tm[i,1], Year==df_lag_tm[i,2] ) %>%
arrange(Rd)
#break up the df & drop the last row
df_lag_data <- df_lag_frm[-38,c(5:10, 12:64)]
#generate a NA row
lag_rw <- seq(0,60,1)
lag_rw[] <- NA
#create the lagged season view
df_lag_data <- rbind(lag_rw, df_lag_data)
#make complete df for Team/season
df_lag_sub <- cbind((df_lag_frm[,c(1:4,11)]),df_lag_data )
#add back on to results df
df_lag_res <- rbind(df_lag_res, df_lag_sub)
}
#update col names
colnames(df_lag_res)[6:64] <- paste0("Lg_", names(df_lag_res[,6:64]))
return (df_lag_res)
}
# get the lagged detail for the modeling
df_lag <- lag_set()
#create the final model data set.
df_lag_full <- df_full[, c(1:4,11, 31:39, 58)] %>%
join(df_lag[,c(-3, -4)], c( "Rd"="Rd", "tm" = "tm", "Year"="Year"))
##########################################
# start here
# sort the betting stuff out, relate to team, not H & A
##########################################
#check file
write.csv(df_lag_full, file="E://R/Football/df_lag_full.csv", row.names = FALSE)
#create opp df
df_lag_opp <- df_lag_full[,c(-3,-6:-14)]
#update column names
colnames(df_lag_opp) <- paste0("Opp_", names(df_lag_opp))
#join back for model view hm & away
#join back to full dataframe to complete the dataset.
df_model <- df_lag_full %>% left_join(df_lag_opp, c("Opp.Team" = "Opp_tm", "Year"="Opp_Year", "Rd"="Opp_Rd"))
#remove NA rds for random forest modeling
df_model<- subset(df_model, Rd != 1)
#add in the result of the match for the modeling
df_model <- df_full %>% select(tm, dt, Res) %>% inner_join(df_model, c("tm"="tm", "dt"="dt"))
#check file
write.csv(df_model, file="E://R/Football/df_model.csv", row.names = FALSE)
|
eea6f8cb653363b835a6fdb2bdf38cd24c7e2c2f
|
78651b8c0ee5e2de85d786d2c3879268592bb281
|
/Estudio-enfermedades-autoinmunes/v4/GenerateHeatMaps.R
|
cbce8fb2acc343248e286a3f61c378f8e96ee490
|
[] |
no_license
|
kdis-lab/IMIBIC-projects
|
3a42b34f7b5ea6119dd312939a85c300cf169cd2
|
bd4e222fa61f089a576ba1f5e6d82056cf876e78
|
refs/heads/master
| 2020-11-23T22:41:21.153444
| 2019-12-16T12:57:31
| 2019-12-16T12:57:31
| 227,834,349
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,025
|
r
|
GenerateHeatMaps.R
|
library(pheatmap)
library(Cairo)
setwd("/media/oscar/DATA/OneDrive - Universidad de Córdoba/workspace/IMIBIC/")
PlotHeatMap<-function(data, imgName="HeatMap", format="png", smplDist= "euclidean",
clstDist= "ward.D", viewOpt="detail", rowV=TRUE, colV=TRUE, border=TRUE){
classes <- data[ncol(data)]
data <- data[-ncol(data)]
colnames(data)<-substr(colnames(data),1,18) # some names are too long
rownames(data) <- 1:nrow(data)
rownames(classes) <- rownames(data)
#Set the name of image
imgName <- paste(imgName, ".", format, sep="")
minW <- 630;
myW <- nrow(data)*18 + 150;
if(myW < minW){
myW <- minW;
}
w <- round(myW/72,2);
myH <- ncol(data)*18 + 150;
h <- round(myH/72,2);
if(border){
border.col<-"grey60";
}else{
border.col <- NA;
}
Cairo(file = imgName, unit="in", dpi=72, width=w, height=h, type=format, bg="white");
# Specify colors
ann_colors = list(Clinico=c(N = "red", S = "green"))
pheatmap(t(data),fontsize=8, fontsize_row=8, clustering_distance_rows = smplDist,
clustering_distance_cols = smplDist, clustering_method = clstDist,
border_color = border.col, cluster_rows = colV,
cluster_cols = rowV, annotation_col=classes, annotation_colors = ann_colors)
dev.off()
}
#distance measure used in clustering rows. Possible values are "correlation"
#for Pearson correlation and all the distances supported by dist, such as "euclidean", "maximum",
# "manhattan", "canberra", "binary" or "minkowski".
#etc. If the value is none of the above it is assumed that a distance matrix is provided.
clustering_distance_rows <- "euclidean"
#distance measure used in clustering columns
clustering_distance_cols <- "euclidean"
#the agglomeration method to be used. This should be one of "ward.D", "ward.D2", "single",
#"complete", "average" (= UPGMA), "mcquitty" (= WPGMA), "median" (= WPGMC) or "centroid" (= UPGMC).
clustering_method <- "ward.D"
#scale character indicating if the values should be centered and scaled in either the row
#direction or the column direction, or none. Corresponding values are "row",
#"column" and "none"
scaleOpt <- "row"
#Load the dataset
datasetNames <- c("LinfoAnt-preproc", "MonoAnt-preproc", "NeutroAnt-preproc",
"LinfoCMIT-preproc", "MonoCMIT-preproc", "NeutroCMIT-preproc",
"LinfoComplicObstet-preproc", "MonoComplicObstet-preproc", "NeutroComplicObstet-preproc",
"LinfodsDNA-preproc", "MonodsDNA-preproc", "NeutrodsDNA-preproc",
"LinfoHta-preproc", "MonoHta-preproc", "NeutroHta-preproc",
"LinfoTrombosis-preproc", "MonoTrombosis-preproc", "NeutroTrombosis-preproc")
# The threshold value to filter out the clustering configurations
threshold <- 0.80
#for each dataset
for(datasetName in datasetNames){
datasetPath <- paste("datasets/enfermedades-autoinmunes/v4/with0s/",
datasetName, sep = "")
data <- read.csv(paste(datasetPath,".csv", sep=""))
#Load csv with cluster configurations
pathOutput <- paste("reports/enfermedades-autoinmunes/v4/with0s/",
datasetName, "v2/HierarchicalClusterer/notAllSignificantFactors", sep = "")
clusters <- read.csv(paste(pathOutput,".csv",sep=""))
if(nrow(clusters)!=0){
#for each cluster
for(i in 1:nrow(clusters)){
auc <- clusters[i,"AUC"]
if(auc >= threshold){
atts <- clusters[i,"Atts"]
items <- unlist(strsplit(as.character(atts), " "))
indexes <- match(items, colnames(data))
#construct the dataframe
indexes <- c(indexes,ncol(data))
newData <- data [,indexes]
imgName <- paste(pathOutput,i,sep = "")
PlotHeatMap(data = newData, imgName = imgName, smplDist = clustering_distance_rows,
clstDist = clustering_method)
}
}
}
}
|
fb950ff3eb0edee26567a0d826cd6c66060c08df
|
fabdac62fcb0951d98d71a8ec0604900479bfba8
|
/tests/testthat/test-docs.R
|
5a1cdc66631c3febd1ac20ab0224a32fa6c273f3
|
[
"MIT"
] |
permissive
|
r-lib/generics
|
bec3bcb8b7f55fbda1e22998b562d067e6cb268a
|
387397154bb8488a0b6dfbcbba91b781d598ee0e
|
refs/heads/main
| 2022-07-11T20:36:04.652473
| 2022-07-05T21:25:22
| 2022-07-05T21:25:22
| 137,095,400
| 53
| 18
|
NOASSERTION
| 2022-07-05T21:23:47
| 2018-06-12T15:55:26
|
R
|
UTF-8
|
R
| false
| false
| 1,296
|
r
|
test-docs.R
|
test_that("generics methods can be reexported and extended", {
local_load_all("testGenericsExtension")
expect_snapshot(methods_rd("tidy"))
})
test_that("multiple packages have multiple headers", {
local_load_all("testMultiMethod")
local_load_all("testMultiPackage")
expect_snapshot(methods_rd("multi_method"))
})
test_that("S4 bullets print with no issues", {
local_load_all("testS4Docs")
expect_snapshot(methods_rd("multi_method"))
})
test_that("S4 and S3 packages can intermingle", {
local_load_all("testS4Docs")
local_load_all("testMultiMethod")
expect_snapshot(methods_rd("multi_method"))
})
test_that("multiple methods but same rdname are comma separated", {
local_load_all("testSameRd")
expect_snapshot(methods_rd("same_rd_name"))
})
test_that("single method is correctly itemized", {
local_load_all("testSingleMethod")
expect_snapshot(methods_rd("single_method"))
})
test_that("multiple methods are correctly itemized", {
local_load_all("testMultiMethod")
expect_snapshot(methods_rd("multi_method"))
})
test_that("no methods case returns default output", {
expect_equal(
methods_rd("methods_rd"),
"No methods found in currently loaded packages."
)
})
test_that("nonexistant generic is an error", {
expect_error(methods_rd("xyz"))
})
|
d956e3f39ff3460d2204e5b92e80d6c2c7432691
|
77f742348fd6b898343aa54adc849e1026be345e
|
/code/BEdaily2_grid_membership_usa_state.R
|
23982997ddb282acb7294bf211cc9e50dfc7f9f3
|
[] |
no_license
|
jshrader/data_weather
|
07df2d30b85436b52a7b09b8386e6507abef3257
|
8937b766cc967cdea4d4f41dd1b6e01108f16dec
|
refs/heads/master
| 2022-12-25T08:36:40.399001
| 2022-12-14T17:16:16
| 2022-12-14T17:16:16
| 431,595,709
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,297
|
r
|
BEdaily2_grid_membership_usa_state.R
|
## R code to find the state membership of data from Berkeley Earth.
##
## Code by Jeff Shrader
## 2021-05-20
##
## To do:
##
## Sources and citations:
# Preliminaries -----------------------------------------------------------
rm(list = ls())
packages <- c('sp','rgeos','grid','ggplot2','sf','reticulate',
'stringr','rgdal','maptools','raster','maptools',
'parallel','colorRamps','lubridate','data.table',
'tictoc','pbmcapply','haven')
new_packages <- packages[!(packages %in% installed.packages()[,'Package'])]
if(length(new_packages)) install.packages(new_packages)
lapply(packages, library, character.only = TRUE)
# Toggles and Switches -----------------------------------------------------
# If you want to create graphs to check that your output files make sense, set
# this to TRUE. Otherwise FALSE.
check_file <- TRUE
# Define Directories -------------------------------------------------------
# Many of these might be the same thing depending on your environment
# Project directory (top level)
base_dir <- "~/Dropbox/research/projects/active/elites/"
# data directory
data_dir <- '/media/jgs/datadrive/data/weather/berkeley_earth/'
# Place where shapefiles are stored
map_dir <- paste0("/home/jgs/Dropbox/research/data/maps/usa/state/cb_2014_us_state_500k/")
# Place where you want maps (for debugging and data checking) to go
graph_dir <- paste0(base_dir, "output/graph/data_creation/")
# Place where output data should go
out_dir <- paste0(base_dir, "data/")
# Import Shapefiles -----------------------------------------------------
setwd(map_dir)
# Import the Shapefile for your geographic level
shp <- shapefile(paste0(map_dir,"cb_2014_us_state_500k.shp"))
# Generate a data.frame from the polygons
poly <- as.data.table(as.data.frame(shp@data, xy=TRUE))
names(poly) <- tolower(names(poly))
poly[,ID:=1:.N]
# Load data --------------------------------------------------------------
# Load a single, representative file (which for nicely formatted ERA data is any one of the)
# so we can determine grid membership
#file <- paste0(data_dir,"Complete_TAVG_LatLong1.nc")
file <- paste0(data_dir,"daily/Complete_TMAX_Daily_LatLong1_1920.nc")
# The file is a brick in reality (one slice per month) but we just need
# one of the slices to find spatial membership.
# We will take a layer from one of the post 1880 years because that is the p
# period on which we will focus. It shouldn't matter, but I just want to be
# safe in case something weird happens with missing values.
data <- raster(paste0(file), varname=c("temperature"))
clim <- brick(paste0(file), varname=c("climatology"))
#data <- brick(paste0(file))
crsData <- crs(data) # Define System of Data
# We expect this to be 1 by 1 for the base data and 0.25 by 0.25 for the CONUS data
resData <- res(data) # Define resolution of data
# Project Shape File into Data System
## If want to change coordinates: extent(rasterdata) <- c(36,37,-3,-2)
## Assign Projection: WGS84 (World Geodetic System - standard for cartography.
## 84 is the lastest version which is from 1984) - Use same as shape file
projection(data) <- shp@proj4string
# Find spatial membership for each point ---------------------------------
cell_member <- as.data.table(suppressWarnings(raster::extract(data, shp, cellnumbers=TRUE, df=TRUE, small=TRUE, weights=TRUE, normalizeWeights=FALSE)))
cell_member <- cell_member[!is.na(cell)]
# To verify that the hand-coded cell numbers are working, run:
if(check_file==TRUE){
temp <- as.data.table(as.data.frame(data, xy=TRUE))
temp[, cell:=1:.N]
m <- merge(cell_member, temp, by=c("cell"))
identical(m[, 3], m[, 3])
rm(m)
rm(temp)
}
# Check that all locations received at least a few grid points. Small locations like
# Washington DC can slip through the cracks.
if(identical(length(unique(poly$ID)), length(unique(cell_member$ID)))==FALSE){
warning("Some geographic units were not matched with any grid points. This might be benign (for example, due to CONUS data and a complete USA shapefile), but make sure.")
}
# Count of matched grid points
cell_member[, count:=.N, by=ID]
few_cells <- unique(cell_member[count<3][, .(ID,count)])
look_into <- merge(poly[ID%in%few_cells$ID], few_cells, by=c("ID"), all=TRUE)
## These locations only had a few grid points (1 or 2). Maybe make sure that
# you don't need to densify your grid or use something like centroid matching.
look_into
# Check that the weather values make sense (warmer closer to equator, eg)
names(cell_member)[3] <- "weather"
centroid <- as.data.frame(gCentroid(shp, byid=TRUE))
names(centroid) <- c("c_lon","c_lat")
centroid_with_id <- as.data.table(cbind(shp@data[, "GEOID"],centroid))
names(centroid_with_id)[1] <- "geoid"
test <- merge(centroid_with_id, poly, by=c("geoid"))
test <- merge(test, cell_member, by=c("ID"))
# If this is the level of temperature, the slope coef should be negative.
# If it is another variable, all bets are off.
summary(lm(test$weather ~ test$c_lat))
# Look at unmatched locations
test[, .(mean(weather, na.rm=TRUE), mean(count)), by=c("name")][is.na(V1)]
test_c <- test[, mean(weather, na.rm=TRUE), by=c("name", "c_lon","c_lat")]
test_c <- test_c[!is.nan(V1)]
ggplot(data=test_c, aes(x=c_lon,y=c_lat, color=V1)) + geom_point() +
geom_text(aes(label=name),hjust=0, vjust=0) +
scale_color_gradient(low = "yellow", high = "red") + theme_void(base_size=20)
ggsave(filename=paste0(graph_dir, "temperature_centroid_match_check_tmax_daily.pdf"))
# Drop weather: we just want the spatial membership
cell_member <- cell_member[, weather:=NULL][
, count := NULL]
## Merge with admin boundaries to get county membership of each grid cell
## This is what needs to be saved to merge with each weather file.
mem <- merge(cell_member, poly, by=c("ID"))
# To check that you have grid points in each member, you can use
# cell_member_admin[, .N, by=.(NAME_3)][1:141]
# Save the membership file for use when calculating averages for the other
# locations.
fname <- paste0(data_dir,"daily_1by1_grid_membership.rds")
saveRDS(object=mem, file=fname)
# Make a dataset that contains the names of locations, IDs, and other info
locs <- merge(centroid_with_id, poly, by=c("geoid"))
fname <- paste0(data_dir,"location_names.dta")
write_dta(data=locs, path=fname)
# EOF
|
4714c878f136784ae35974e64014ec287b31dd2e
|
330224d3b01919ef0cdab07a4cbb2271951a9d67
|
/R/Clayton.Markov.MLE.R
|
45289a73c6f88445439bd03973fd1a5010848ab6
|
[] |
no_license
|
cran/Copula.Markov
|
28923cdffa68a2ea69b02007080204bf5f02878b
|
f27c0753c1d586d5c12f1579ddba6754d84f64b4
|
refs/heads/master
| 2021-12-08T22:36:21.425421
| 2021-11-29T04:40:13
| 2021-11-29T04:40:13
| 32,750,024
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,624
|
r
|
Clayton.Markov.MLE.R
|
Clayton.Markov.MLE <-
function(Y,k=3,D=1,plot=TRUE,GOF=FALSE, method = "nlm"){
n=length(Y) ##sample size##
log.L = function(par){
mu = par[1]
sigma = exp(par[2])
alpha = exp(par[3])
g_yt = dnorm(Y, mean = mu, sd = sigma)
G_yt = pnorm(Y, mean = mu, sd = sigma)
C_11 = function(u,v){
(1+alpha)*(u^(-alpha)+v^(-alpha)-1)^(-1/alpha-2)*v^(-alpha-1)*u^(-alpha-1)
}
res = sum( log(g_yt) ) + sum( log( C_11( G_yt[-n], G_yt[-1] ) ) )
return(-res)
}
if (method == "Newton"){
G=function(y,mu,sigma){pnorm((y-mu)/sigma)} #G function
g=function(y,mu,sigma){dnorm((y-mu)/sigma)} #g function
##############Log-likelihood function #################
L_function=function(mu,sigma,alpha){
U_t_1=G(Y[1:n-1],mu,sigma);U_t=G(Y[2:n],mu,sigma)
u_t_1=g(Y[1:n-1],mu,sigma);u_t=g(Y[2:n],mu,sigma)
A=U_t_1^(-alpha)+U_t^(-alpha)-1
Z=log(1+alpha)-(1+alpha)*log(U_t_1)-(1+alpha)*log(U_t)-(1/alpha+2)*log(A)
ZZ=log(g(Y[1:n],mu,sigma)/sigma)
return((sum(Z)+sum(ZZ))/n)
}
##############dL/dmu############################
dL_dmu=function(mu,sigma,alpha){
U_t_1=G(Y[1:n-1],mu,sigma);U_t=G(Y[2:n],mu,sigma)
u_t_1=g(Y[1:n-1],mu,sigma);u_t=g(Y[2:n],mu,sigma)
A1=u_t_1/U_t_1+u_t/U_t
A2=(u_t_1*U_t_1^(-(alpha+1))+u_t*U_t^(-(alpha+1)))/(U_t_1^(-alpha)+U_t^(-alpha)-1)
A=(alpha+1)/sigma*A1-(2*alpha+1)/sigma*A2
A3=(Y[1:n]-mu)/sigma^2
return((sum(A)+sum(A3))/n)
}
############dL/dsigma###########################
dL_dsigma=function(mu,sigma,alpha){
U_t_1=G(Y[1:n-1],mu,sigma);U_t=G(Y[2:n],mu,sigma)
u_t_1=g(Y[1:n-1],mu,sigma);u_t=g(Y[2:n],mu,sigma)
B1=(Y[1:n-1]-mu)/sigma^2*u_t_1/U_t_1
B2=(Y[2:n]-mu)/sigma^2*u_t/U_t
B3=(Y[1:n-1]-mu)/sigma^2*U_t_1^(-(alpha+1))*u_t_1
B4=(Y[2:n]-mu)/sigma^2*U_t^(-(alpha+1))*u_t
B5=(U_t_1^(-alpha)+U_t^(-alpha)-1)
B=(alpha+1)*B1+(alpha+1)*B2-(2*alpha+1)*(B3+B4)/B5
B6=(Y[1:n]-mu)^2/sigma^3-1/sigma
return((sum(B)+sum(B6))/n)
}
############dL/dalpha#############################
dL_dalpha=function(mu,sigma,alpha){
U_t_1=G(Y[1:n-1],mu,sigma);U_t=G(Y[2:n],mu,sigma)
u_t_1=g(Y[1:n-1],mu,sigma);u_t=g(Y[2:n],mu,sigma)
E1=log(U_t_1*U_t)
E2=log( U_t_1^(-alpha)+U_t^(-alpha)-1 )/alpha^2
E3=U_t_1^(-alpha)*log(U_t_1)+U_t^(-alpha)*log(U_t)
E5=(U_t_1^(-alpha)+U_t^(-alpha)-1)
E=1/(1+alpha)-E1+E2+(2+1/alpha)*E3/E5
return(sum(E)/n)
}
############F function##############################
F=function(mu,sigma,alpha){
c(dL_dmu(mu,sigma,alpha),dL_dsigma(mu,sigma,alpha),dL_dalpha(mu,sigma,alpha))
}
############d^2L/dmu^2#################################
d2L_dmu2=function(mu,sigma,alpha){
U_t_1=G(Y[1:n-1],mu,sigma);U_t=G(Y[2:n],mu,sigma)
u_t_1=g(Y[1:n-1],mu,sigma);u_t=g(Y[2:n],mu,sigma)
H1=((Y[1:n-1]-mu)/sigma^2*u_t_1*U_t_1+u_t_1^2/sigma)/U_t_1^2
H2=((Y[2:n]-mu)/sigma^2*u_t*U_t+u_t^2/sigma)/U_t^2
H3=U_t_1^(-(2+alpha))*u_t_1^2
H4=(Y[1:n-1]-mu)/sigma^2*u_t_1*U_t_1^(-(1+alpha))
H5=U_t^(-(2+alpha))*u_t^2
H6=(Y[2:n]-mu)/sigma^2*u_t*U_t^(-(1+alpha))
H7=(U_t_1^(-alpha)+U_t^(-alpha)-1)
H8=U_t_1^(-(1+alpha))*u_t_1+U_t^(-(1+alpha))*u_t
H=(alpha+1)/sigma*(H1+H2)-(2*alpha+1)/sigma*(((alpha+1)/sigma*H3+H4+(alpha+1)/sigma*H5+H6)*H7-alpha/sigma*H8^2)/H7^2
return((sum(H)+(-n/sigma^2))/n)
}
############d^2L/dsigma^2#################################
J=J1=J2=J3=J4=J5=J6=J7=J8=J9=J10=J11=J12=J13=J14=J15=J16=c()
d2L_dsigma2=function(mu,sigma,alpha){
U_t_1=G(Y[1:n-1],mu,sigma);U_t=G(Y[2:n],mu,sigma)
u_t_1=g(Y[1:n-1],mu,sigma);u_t=g(Y[2:n],mu,sigma)
J1=(Y[1:n-1]-mu)/sigma^3*u_t_1/U_t_1
J2=(Y[1:n-1]-mu)^2/sigma^2+(Y[1:n-1]-mu)/sigma*u_t_1/U_t_1
J4=(Y[2:n]-mu)/sigma^3*u_t/U_t
J5=(Y[2:n]-mu)^2/sigma^2+(Y[2:n]-mu)/sigma*u_t/U_t
J7=(Y[1:n-1]-mu)/sigma^3*U_t_1^(-(alpha+1))*u_t_1
J8=(Y[1:n-1]-mu)/sigma*u_t_1/U_t_1
J9=(Y[1:n-1]-mu)^2/sigma^2
J10=(Y[2:n]-mu)/sigma^3*U_t^(-(alpha+1))*u_t
J11=(Y[2:n]-mu)/sigma*u_t/U_t
J12=(Y[2:n]-mu)^2/sigma^2
J13=(U_t_1^(-alpha)+U_t^(-alpha)-1)
J15=(Y[1:n-1]-mu)/sigma^2*U_t_1^(-(alpha+1))*u_t_1
J16=(Y[2:n]-mu)/sigma^2*U_t^(-(alpha+1))*u_t
J=(alpha+1)*(J1*(-2+J2)+J4*(-2+J5))-(2*alpha+1)*( (J7*(-2+(alpha+1)*J8+J9)+J10*(-2+(alpha+1)*J11+J12))*J13-alpha*(J15+J16)^2 )/J13^2
J14=-3*(Y[1:n]-mu)^2/sigma^4+1/sigma^2
return((sum(J)+sum(J14))/n)
}
############d^2L/dalpha^2#################################
d2L_dalpha2=function(mu,sigma,alpha){
U_t_1=G(Y[1:n-1],mu,sigma);U_t=G(Y[2:n],mu,sigma)
u_t_1=g(Y[1:n-1],mu,sigma);u_t=g(Y[2:n],mu,sigma)
K1=(U_t_1^(-alpha)+U_t^(-alpha)-1)
K2=U_t_1^(-alpha)*log(U_t_1)+U_t^(-alpha)*log(U_t)
K4=U_t_1^(-alpha)*log(U_t_1)^2+U_t^(-alpha)*log(U_t)^2
K=-1/(1+alpha)^2-2/alpha^3*log(K1)-2/alpha^2*K2/K1+(1/alpha+2)*( K2^2/K1^2-K4/K1 )
return(sum(K)/n)
}
############d^2L/dmudsigma#################################
d2L_dmudsigma=function(mu,sigma,alpha){
U_t_1=G(Y[1:n-1],mu,sigma);U_t=G(Y[2:n],mu,sigma)
u_t_1=g(Y[1:n-1],mu,sigma);u_t=g(Y[2:n],mu,sigma)
L1=-(1+alpha)/sigma^2*(u_t_1/U_t_1+u_t/U_t)
L2=((Y[1:n-1]-mu)^2/sigma^3*u_t_1*U_t_1+(Y[1:n-1]-mu)/sigma^2*(u_t_1)^2)/U_t_1^2
L3=((Y[2:n]-mu)^2/sigma^3*u_t*U_t+(Y[2:n]-mu)/sigma^2*(u_t)^2)/U_t^2
L4=(U_t_1^(-1-alpha)*u_t_1+U_t^(-1-alpha)*u_t)/(U_t_1^(-alpha)+U_t^(-alpha)-1)
L5=1/(U_t_1^(-alpha)+U_t^(-alpha)-1)^2
L6=(alpha+1)*U_t_1^(-alpha-2)*(Y[1:n-1]-mu)/sigma^2*u_t_1^2
L7=U_t_1^(-alpha-1)*u_t_1*(Y[1:n-1]-mu)^2/sigma^3
L8=(alpha+1)*U_t^(-alpha-2)*(Y[2:n]-mu)/sigma^2*u_t^2
L9=U_t^(-alpha-1)*u_t*(Y[2:n]-mu)^2/sigma^3
L10=(U_t_1^(-alpha)+U_t^(-alpha)-1)
L11=U_t_1^(-1-alpha)*u_t_1+U_t^(-1-alpha)*u_t
L12=(Y[1:n-1]-mu)/sigma^2*U_t_1^(-1-alpha)*u_t_1+(Y[2:n]-mu)/sigma^2*U_t^(-1-alpha)*u_t
L=L1+(alpha+1)/sigma*(L2+L3)+(2*alpha+1)/sigma^2*L4-(2*alpha+1)/sigma*L5*( (L6+L7+L8+L9)*L10-alpha*L11*L12 )
LL=-2*(Y[1:n]-mu)/sigma^3
return((sum(L)+sum(LL))/n)
}
############d^2L/dmudalpha#################################
d2L_dmudalpha=function(mu,sigma,alpha){
U_t_1=G(Y[1:n-1],mu,sigma);U_t=G(Y[2:n],mu,sigma)
u_t_1=g(Y[1:n-1],mu,sigma);u_t=g(Y[2:n],mu,sigma)
M1=(u_t_1/U_t_1+u_t/U_t)/sigma
M2=(U_t_1^(-(alpha+1))*u_t_1+U_t^(-(alpha+1))*u_t)/(U_t_1^(-alpha)+U_t^(-alpha)-1)*2/sigma
M3=-U_t_1^(-(1+alpha))*log(U_t_1)*u_t_1-U_t^(-(1+alpha))*log(U_t)*u_t
M4=(U_t_1^(-alpha)+U_t^(-alpha)-1)
M5=U_t_1^(-(alpha+1))*u_t_1+U_t^(-(alpha+1))*u_t
M6=-U_t_1^(-alpha)*log(U_t_1)-U_t^(-alpha)*log(U_t)
M=M1-M2-(2*alpha+1)/sigma*( M3*M4-M5*M6 )/M4^2
return(sum(M)/n)
}
############d^2L/dsigmadalpha#################################
d2L_dsigmadalpha=function(mu,sigma,alpha){
U_t_1=G(Y[1:n-1],mu,sigma);U_t=G(Y[2:n],mu,sigma)
u_t_1=g(Y[1:n-1],mu,sigma);u_t=g(Y[2:n],mu,sigma)
O1=(Y[1:n-1]-mu)/sigma^2*u_t_1/U_t_1+(Y[2:n]-mu)/sigma^2*u_t/U_t
O3=(Y[1:n-1]-mu)/sigma^2*U_t_1^(-(1+alpha))*u_t_1
O4=(Y[2:n]-mu)/sigma^2*U_t^(-(1+alpha))*u_t
O5=(U_t_1^(-alpha)+U_t^(-alpha)-1)
O6=U_t_1^(-alpha)*log(U_t_1)+U_t^(-alpha)*log(U_t)
O=O1-2*(O3+O4)/O5-(2*alpha+1)*( (-log(U_t_1)*O3-log(U_t)*O4)*O5+(O3+O4)*O6 )/O5^2
return(sum(O)/n)
}
#############Jacobian function######################################
Ja=function(mu,sigma,alpha){
AA=matrix( c(d2L_dmu2(mu,sigma,alpha),d2L_dmudsigma(mu,sigma,alpha),d2L_dmudalpha(mu,sigma,alpha),d2L_dmudsigma(mu,sigma,alpha),d2L_dsigma2(mu,sigma,alpha),d2L_dsigmadalpha(mu,sigma,alpha),d2L_dmudalpha(mu,sigma,alpha),d2L_dsigmadalpha(mu,sigma,alpha),d2L_dalpha2(mu,sigma,alpha)),3,3 )
return(AA)
}
###############Multivariate Newton Raphson#####################
X=matrix(,1,3)
tau=cor(Y[1:n-1],Y[2:n],method="kendall")
alpha_est=-2*tau/(tau-1)
X[1,]=c(mean(Y),sd(Y),alpha_est) #initial value
i=2
Ran.num=1
iter.num = 0
repeat{
Z=X
X=matrix(,i,3)
X[1:i-1,]=Z[1:i-1,]
##
Aa=Ja(X[i-1,1],X[i-1,2],X[i-1,3])
Ainv11=Aa[2,2]*Aa[3,3]-Aa[3,2]*Aa[2,3]
Ainv12=Aa[1,2]*Aa[3,3]-Aa[3,2]*Aa[1,3]
Ainv13=Aa[1,2]*Aa[2,3]-Aa[2,2]*Aa[1,3]
Ainv21=Aa[2,1]*Aa[3,3]-Aa[3,1]*Aa[2,3]
Ainv22=Aa[1,1]*Aa[3,3]-Aa[3,1]*Aa[1,3]
Ainv23=Aa[1,1]*Aa[2,3]-Aa[1,3]*Aa[2,1]
Ainv31=Aa[2,1]*Aa[3,2]-Aa[3,1]*Aa[2,2]
Ainv32=Aa[1,1]*Aa[3,2]-Aa[1,2]*Aa[3,1]
Ainv33=Aa[1,1]*Aa[2,2]-Aa[1,2]*Aa[2,1]
Ainv=matrix(c(Ainv11,-Ainv21,Ainv31,-Ainv12,Ainv22,-Ainv32,Ainv13,-Ainv23,Ainv33),3,3)/det(Aa)
##
X[i,]=X[i-1,]-Ainv%*%F(X[i-1,1],X[i-1,2],X[i-1,3])
if(1*is.nan(X)[i,1]==1){
X=matrix(,2,3)
X[1,]=c(mean(Y),sd(Y),alpha_est+runif(1,-D,D)) #initial value
Ran.num=Ran.num+1
i=1
}else if(abs(X[i,1]-X[i-1,1])<0.0001&abs(X[i,2]-X[i-1,2])<0.0001&abs(X[i,3]-X[i-1,3])<0.0001&abs(X[i,3]-alpha_est)>5){
X=matrix(,2,3)
X[1,]=c(mean(Y),sd(Y),alpha_est+runif(1,-D,D)) #initial value
Ran.num=Ran.num+1
i=1
}else if(abs(X[i,1]-X[i-1,1])<0.0001&abs(X[i,2]-X[i-1,2])<0.0001&abs(X[i,3]-X[i-1,3])<0.0001&X[i,2]>0&abs(X[i,3]-alpha_est)<5){break
}else if(abs(X[i,1]-X[i-1,1])<0.0001&abs(X[i,2]-X[i-1,2])<0.0001&abs(X[i,3]-X[i-1,3])<0.0001&X[i,2]<0){
X=matrix(,2,3)
X[1,]=c(mean(Y),sd(Y),alpha_est+runif(1,-D,D)) #initial value
Ran.num=Ran.num+1
i=1
}else if(abs(X[i,1]-X[i-1,1])>10^10&abs(X[i,2]-X[i-1,2])>10^10&abs(X[i,3]-X[i-1,3])>10^10){
X=matrix(,2,3)
X[1,]=c(mean(Y),sd(Y),alpha_est+runif(1,-D,D)) #initial value
Ran.num=Ran.num+1
i=1
}
if(Ran.num>=100){break}
i=i+1
iter.num = iter.num + 1
if(iter.num>10000){stop("iteration fail")}
}
mle.res=X[length(X[,1]),]
if(Ran.num>=10){ mle.res=c(mean(Y),sd(Y),alpha_est) }
mu.hat=mle.res[1]
sigma.hat=mle.res[2]
alpha.hat=mle.res[3]
Gradient=n^3*F(mu.hat,sigma.hat,alpha.hat)
Hessian=n*Ja(mu.hat,sigma.hat,alpha.hat)
SE_mu = SE_sigma = SE_alpha = lower_mu = upper_mu = lower_sigma = upper_sigma =
lower_alpha = upper_alpha = NA
}
if (method == "nlm"){
tau_0 = cor(Y[-n],Y[-1],method="kendall")
initial = c(mean(Y),log(sd(Y)), log(ifelse(tau_0 < 0, 1, 2*tau_0/(1-tau_0))+1))
mle.res = nlm(f = log.L, p = initial, hessian = TRUE)
mu.hat=mle.res$estimate[1]
sigma.hat=exp(mle.res$estimate[2])
alpha.hat=exp(mle.res$estimate[3])
Gradient=-mle.res$gradient
Hessian=-mle.res$hessian
inverse_Hessian=solve(Hessian,tol=10^(-50))
SE_mu = sqrt(-inverse_Hessian[1,1])
SE_sigma = sqrt(-inverse_Hessian[2,2])*sigma.hat
SE_alpha = sqrt(-inverse_Hessian[3,3])*alpha.hat
lower_mu = mu.hat-1.96*SE_mu
upper_mu = mu.hat+1.96*SE_mu
lower_sigma=sigma.hat*exp(-1.96*SE_sigma/sigma.hat)
upper_sigma=sigma.hat*exp(1.96*SE_sigma/sigma.hat)
lower_alpha=alpha.hat*exp(-1.96*SE_alpha/alpha.hat)
upper_alpha=alpha.hat*exp(1.96*SE_alpha/alpha.hat)
}
UCL=mu.hat+k*sigma.hat
LCL=mu.hat-k*sigma.hat
CL = c(Center = mu.hat, Lower = LCL, Upper = UCL)
result.mu = c(estimate = mu.hat, SE = SE_mu, Lower = lower_mu, Upper = upper_mu)
result.sigma = c(estimate = sigma.hat, SE = SE_sigma, Lower = lower_sigma, Upper = upper_sigma)
result.alpha = c(estimate = alpha.hat, SE = SE_alpha, Lower = lower_alpha, Upper = upper_alpha)
####### Plot Control Chart #######
if(plot==TRUE){
Min=min(min(Y),LCL)
Max=max(max(Y),UCL)
ts.plot(Y,type="b",ylab="Y",ylim=c(Min,Max))
abline(h=mu.hat)
abline(h=UCL,lty="dotted",lwd=2)
abline(h=LCL,lty="dotted",lwd=2)
text(0,LCL+(mu.hat-LCL)*0.1,"LCL")
text(0,UCL-(UCL-mu.hat)*0.1,"UCL")
}
out_control=which( (Y<LCL)|(UCL<Y) )
if(length(out_control)==0){out_control="NONE"}
### Goodness-of-fit ###
F_par=pnorm( (sort(Y)-mu.hat)/sigma.hat )
F_emp=1:n/n
CM.test=sum( (F_emp-F_par)^2 )
KS.test=max( abs( F_emp-F_par ) )
if(GOF==TRUE){
plot(F_emp,F_par,xlab="F_empirical",ylab="F_parametric",xlim=c(0,1),ylim=c(0,1))
lines(x = c(0,1), y = c(0,1))
}
return(
list(mu=result.mu, sigma = result.sigma, alpha = result.alpha,
Control_Limit = CL, out_of_control=out_control,
Gradient=Gradient,Hessian=Hessian,Eigenvalue_Hessian=eigen(Hessian)$value,
CM.test=CM.test, KS.test = KS.test, log_likelihood = -log.L(c(mu.hat, log(sigma.hat), log(alpha.hat))) )
)
}
|
8be25edd35997b2a30b06e1063cf0fc29da54ddb
|
13b3e061b3f603c4a9ddaf0e37ae60fdc7e39855
|
/src/investmentscleanup.R
|
1c7cc6a5ab0ec71e52d061db7b8821fbbfa0fac6
|
[] |
no_license
|
akma327/VCNetworks
|
a14705683d8c49869c29ca0dd353d2453f8ca871
|
1ae323e18cbf562039a46b7dba76ab50641f9b8b
|
refs/heads/master
| 2020-06-11T04:52:03.384461
| 2016-12-09T05:40:18
| 2016-12-09T05:40:18
| 76,002,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,630
|
r
|
investmentscleanup.R
|
investments_start <- read.csv("../data/investments.csv", header = TRUE, stringsAsFactors = FALSE)
library(plyr)
#Remove investments without investor name and companies that are listed as investors
investments <- investments_start[investments_start$investor_name != "",]
investors <- unique(investments$investor_name)
companies <- unique(investments$company_name)
overlap <- companies[companies %in% investors]
investments <- investments[!(investments$company_name %in% overlap),]
#Formulate SNAP graph data. (bipartite investors -> companies)
#This graph has multiple edges, meaning an investor has invested more than once.
#writes out to investmentsBipartite.txt
companies <- unique(investments$company_name)
investors <- unique(investments$investor_name)
everything <- c(companies, investors)
rangecompanies <- c(1:length(companies))
rangeinvestors <- c(1:length(investors)) + length(companies)
investorEdgeIds <- match(investments$investor_name, everything)
companyEdgeIds <- match(investments$company_name, everything)
bipartiteGraph <- data.frame(investors = investorEdgeIds, companies = companyEdgeIds)
write.table(bipartiteGraph, file = "../data/investmentsBipartite.txt", sep = "\t",row.names = FALSE)
#Formulate the folded graph
#Writes out to investorsFolded.txt
FindCooperatingInvestors = function(x, bipartiteGraph) {
investorCoopIds <- bipartiteGraph[bipartiteGraph$companies == x,"investors"]
investorCoopIds <- unique(investorCoopIds)
if(length(investorCoopIds) > 1){
investorCoopEdges <- combn(investorCoopIds,2)
investorCoopEdges <- t(investorCoopEdges)
investorCoopEdges <- data.frame(investorCoopEdges)
}else{
return(NA)
}
return(investorCoopEdges)
}
investorEdges <- lapply(rangecompanies,FindCooperatingInvestors, bipartiteGraph)
df <- ldply(investorEdges, data.frame)
investorEdgesDF <- df[!is.na(df$X1), c("X1","X2")]
write.table(investorEdgesDF, file = "../data/investorsFolded.txt", sep = "\t", row.names = FALSE)
#Write txt file that writes company name or investor name to nodeId companyIDs
nameTable <- data.frame(id = c(1:length(everything)), name = everything)
write.table(nameTable, file = "../data/companyIDs.txt", sep = "\t",row.names = FALSE)
#Generate timeseries data of bipartite and folded graph at each time
investments_time <- investments[investments$funded_quarter != "",]
funded_quarters <- sort(unique(investments_time$funded_quarter))
for(quarter in funded_quarters){
if(substr(quarter,7,8) == "1"){
investments_history <- investments_time[investments_time$funded_quarter <= quarter,]
investorEdgeIds <- match(investments_history$investor_name, everything)
companyEdgeIds <- match(investments_history$company_name, everything)
bipartiteGraph <- data.frame(investors = investorEdgeIds, companies = companyEdgeIds)
filename <- paste("../data/timeseriesBipartite/investmentsBipartite",quarter,".txt",sep="")
write.table(bipartiteGraph, file = filename, sep = "\t",row.names = FALSE)
investorEdges <- lapply(rangecompanies,FindCooperatingInvestors, bipartiteGraph)
df <- ldply(investorEdges, data.frame)
if(ncol(df) > 1){
investorEdgesDF <- df[!is.na(df$X1), c("X1","X2")]
filename <- paste("../data/timeseriesFolded/investmentsFolded",quarter,".txt",sep="")
write.table(investorEdgesDF, file = filename, sep = "\t", row.names = FALSE)
}
}
}
#Generate folded graph that is unidirectional investor1 -> investor2 if investor 2 invests after investor 1
investorEdgeIds <- match(investments_time$investor_name, everything)
companyEdgeIds <- match(investments_time$company_name, everything)
bipartiteGraph <- data.frame(investors = investorEdgeIds, companies = companyEdgeIds, time = investments_time$funded_month)
write.table(bipartiteGraph, file = "../data/investmentsBipartiteTime.txt", sep = "\t",row.names = FALSE)
FindCooperatingInvestorsTime = function(x, bipartiteGraph){
investorCoopIds <- bipartiteGraph[bipartiteGraph$companies == x,c("investors","time")]
investorCoopIds <- investorCoopIds[order(investorCoopIds$time),]
investorCoopIds <- investorCoopIds$investors
if(length(investorCoopIds) > 1){
investorCoopEdges <- combn(investorCoopIds,2)
investorCoopEdges <- t(investorCoopEdges)
investorCoopEdges <- data.frame(investorCoopEdges)
}else{
return(NA)
}
return(investorCoopEdges)
}
investorEdges <- lapply(rangecompanies,FindCooperatingInvestorsTime, bipartiteGraph)
df <- ldply(investorEdges, data.frame)
investorEdgesDF <- df[!is.na(df$X1), c("X1","X2")]
write.table(investorEdgesDF, file = "../data/investorsFoldedTime.txt", sep = "\t", row.names = FALSE)
|
f7e6461924fed4bae484462ad112086af4f27c9a
|
18ba9ff84fc08d91bf675e86d3a596ffe777da93
|
/src/assignments/sba2/code/get_intersect.R
|
7160ea836b78b904a8b56c0f60bff7dce7659345
|
[] |
no_license
|
supersubscript/compbio
|
0656e2da1ac77d845b87da9ff026a022443b166b
|
e705b317078ea3cb3582ef80ddd71d5072a22965
|
refs/heads/master
| 2021-01-17T18:45:56.893352
| 2019-08-01T16:26:33
| 2019-08-01T16:26:33
| 80,166,886
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,711
|
r
|
get_intersect.R
|
setwd("/home/henrik/compbio/src/assignments/sba2/code")
mol1.data.mine = as.character(read.table("../data/3uon_within_5.dat.unique")[, 1])
mol1.data.key = as.character(read.table("../data/3uon_key_binding_sites.txt", header = F)[, 1])
mol1.data.key = mol1.data.key[-c((length(mol1.data.key) - 1):length(mol1.data.key))]
mol2.data.mine = as.character(read.table("../data/4mqs_within_5.dat.unique")[, 1])
mol2.data.key = as.character(read.table("../data/4mqs_key_binding_sites.txt", header = F)[, 1])
mol3.2cu.data.mine = as.character(read.table("../data/4mqt_within_5_2CU.dat.unique")[, 1])
mol3.2cu.data.key = as.character(read.table("../data/4mqt_2CU_key_binding_sites.txt", header = F)[, 1])
mol3.ixo.data.mine = as.character(read.table("../data/4mqt_within_5_IXO.dat.unique")[, 1])
mol3.ixo.data.key = as.character(read.table("../data/4mqt_IXO_key_binding_sites.txt", header = F)[, 1])
mol1.paper.important = c("ASP103",
"ASN404",
"PHE181")
mol2.paper.important = c("ASP103",
"ASN404")
mol3.cu.paper.important = c("ASN410",
"TRP422",
"TYR426",
"TYR80",
"ASN419",
"TYR177",
"GLU172")
mol3.ixo.paper.important = c("ASP103",
"ASN404")
all.cases = list(
mol1.data.key,
mol1.data.mine,
mol1.paper.important,
mol2.data.key,
mol2.data.mine,
mol2.paper.important,
mol3.2cu.data.key,
mol3.2cu.data.mine,
mol3.cu.paper.important,
mol3.ixo.data.key,
mol3.ixo.data.mine,
mol3.ixo.paper.important
)
all.aa = sort(unique(
c(
mol1.data.key,
mol1.data.mine,
mol2.data.key,
mol2.data.mine,
mol3.2cu.data.key,
mol3.2cu.data.mine,
mol3.ixo.data.key,
mol3.ixo.data.mine,
mol1.paper.important,
mol3.ixo.paper.important,
mol3.cu.paper.important,
mol2.paper.important
)
))
out.data = data.frame(aa = all.aa)
for (case in all.cases) {
out = vector(mode = "double", length = nrow(out.data))
for (ii in 1:nrow(out.data))
if (out.data[ii, 1] %in% case)
out[ii] = 1
out.data = cbind(out.data, out)
}
rownames(out.data) = out.data[, 1]
out.data = out.data[, -1]
colnames(out.data) = c(
"3UON PDB",
"3UON OWN",
"3UON PAPER",
"4MQS PDB",
"4MQS OWN",
"4MQS PAPER",
"4MQT.2CU PDB",
"4MQT.2CU OWN",
"4MQT.2CU PAPER",
"4MQT.IXO PDB",
"4MQT.IXO OWN",
"4MQT.IXO PAPER"
)
library(RColorBrewer)
library(scales) #imports alpha
library(stats)
palette(brewer.pal(n = 8, name = "Set1"))
line.size = 2
par(mar=c(8,10,2,4))
x = data.matrix(out.data)
image(x[, ncol(x):1], xaxt = "n", yaxt = "n", col = 0:5)
axis(2, at = seq(0, 1, length.out = ncol(x)), labels = rev(colnames(x)), las = 2)
axis(1, at = seq(0, 1, length.out = nrow(x)), labels = rownames(x), las = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[9:10]), lwd = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[6:7]), lwd = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[3:4]), lwd = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[2:3]), lwd = 1, lty = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[1:2]), lwd = 1, lty = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[4:5]), lwd = 1, lty = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[5:6]), lwd = 1, lty = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[7:8]), lwd = 1, lty = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[8:9]), lwd = 1, lty = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[10:11]), lwd = 1, lty = 2)
abline(h = mean(seq(0, 1, length.out = ncol(x))[11:12]), lwd = 1, lty = 2)
|
ef4e6b8a9b79ccbc35815f8aa229159fe6465008
|
744394478c9cb10d7d762003a5f1c0ba41174fbb
|
/tests/testthat/test_request_time_bounds.R
|
e274119588738376b835eeb30d8ca94cf2f9144c
|
[] |
no_license
|
jjvanderwal/climates
|
8dc06d66a50123fdd08e47b916e45a70e0b87848
|
51952d2c3c69018a975e3e93e1d41d2f10a03cdc
|
refs/heads/master
| 2016-09-06T11:34:13.984914
| 2015-11-24T14:44:51
| 2015-11-24T14:44:51
| 15,891,417
| 5
| 4
| null | 2015-11-24T14:44:51
| 2014-01-14T04:03:59
|
R
|
UTF-8
|
R
| false
| false
| 862
|
r
|
test_request_time_bounds.R
|
library(climates)
context("Test request_time_bounds function")
test_that("daymet time stuff is handled correctly.",{
ncdf4_handle<-c()
# daymet time units format.
ncdf4_handle$dim$time$units<-"days since 1980-01-01 00:00:00 UTC"
#Silly 0.5 time dimension is a real problem.
ncdf4_handle$dim$time$vals<-sequence(365*2)-0.5
time_out<-request_time_bounds(ncdf4_handle,"1980","1981")
expect_that(time_out$origin[["month"]], equals(1))
expect_that(time_out$origin[["year"]], equals(1980))
})
test_that("bcca time is handled correctly.",{
ncdf4_handle<-c()
ncdf4_handle$dim$time$units<-"days since 1950-01-01 00:00:00"
ncdf4_handle$dim$time$vals<-sequence(365*2)-0.5+20454
time_out<-request_time_bounds(ncdf4_handle,"2006","2007")
expect_that(time_out$origin[["month"]], equals(1))
expect_that(time_out$origin[["year"]], equals(1950))
})
|
0854f5808b3f96d9ab3341f8cad4a36e319f3035
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RNeXML/examples/nexml_publish.Rd.R
|
a477600b21f66d5c407b8a88ec699b662cd1f3cd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 315
|
r
|
nexml_publish.Rd.R
|
library(RNeXML)
### Name: nexml_publish
### Title: publish nexml files to the web and receive a DOI
### Aliases: nexml_publish
### ** Examples
## Not run:
##D data(bird.orders)
##D birds <- add_trees(bird.orders)
##D doi <- nexml_publish(birds, visibility = "public", repository="figshare")
## End(Not run)
|
923e6737462a9dfb82163dd779450a81cf127667
|
3241454d7786ac05bf7e711a03fcd22085cc3c0c
|
/R_Chicago_Crime_NetworkAnalysis/R_and_Rmd_Files/ChicagoCrime_ExploreBipartiteSplitCrimes_KP.R
|
ded85abbf1522a92556bf0cd4f3bd96322de7655
|
[] |
no_license
|
karipalmier/DePaul_Projects
|
d20df481ce6b2be62d1541ffa9f241353db309cb
|
b8718d9a5a7a3e198a3af3a5407d5206293f8584
|
refs/heads/master
| 2021-04-12T09:54:21.911225
| 2019-03-29T04:42:23
| 2019-03-29T04:42:23
| 126,392,388
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,409
|
r
|
ChicagoCrime_ExploreBipartiteSplitCrimes_KP.R
|
######################################################################################################
#
# ChicagoCrime_ExploreBipartiteSplitCrimes.R
#
# The file performs analysis on the bipartite crime projections of the
# violent and non-violent Feb 2017 Chicago Crime datasets. The first step is to
# create the bipartite projections of both the violent and non-violent graph data.
# Weighted degree, assortativity,
# transitivity, and modularity analyses were then performed on the giant components
# of the both networks.
#
# Created By Date
# ---------- ----
# Kari Palmier 6/2/2018
#
######################################################################################################
library("igraph")
library("ggplot2")
library("GGally")
# Must load other packages first
library("sand")
library("intergraph")
base_path = "C:\\DePaulCoursework\\Spring CSC 495\\Project\\"
source(paste(base_path, "mycugtest.R", sep=""))
source(paste(base_path, "myqaptest.R", sep=""))
dir.create(file.path(base_path, "R_Output"), showWarnings = FALSE)
base_out_path = paste(base_path, "R_Output\\", sep = "")
dir.create(file.path(base_out_path, "Crimes"), showWarnings = FALSE)
output_path = paste(base_out_path, "Crimes\\", sep = "")
path = paste(base_path, "Graph_Data\\", sep = "")
setwd(path)
out_file_name = paste(output_path, "Graph_Summaries.txt", sep = '')
outFile = file(out_file_name, open="wt")
sink(file = outFile, append = TRUE)
# Summarize data
print("Original Violent Graph Summary:")
violent_gr = read.graph("feb_violent.graphml", format = "graphml")
print(summary(violent_gr))
print("", quote=FALSE)
print("", quote=FALSE)
print("Original NonViolent Graph Summary:")
nonviolent_gr = read.graph("feb_nonviolent.graphml", format = "graphml")
print(summary(nonviolent_gr))
print("", quote=FALSE)
print("", quote=FALSE)
# Projections
print("Violent Projection Graph Summaries:")
violent_comm = bipartite.projection(violent_gr, which = "FALSE")
print(summary(violent_comm))
print("", quote=FALSE)
print("", quote=FALSE)
print("NonViolent Projection Graph Summaries:")
nonviolent_comm = bipartite.projection(nonviolent_gr, which = "FALSE")
print(summary(nonviolent_comm))
print("", quote=FALSE)
print("", quote=FALSE)
# Remove non-matching attributes
print("Violent Projections After Vertex Removal:")
violent_filter = delete_vertex_attr(violent_comm, "Prop.Occupied")
violent_filter = delete_vertex_attr(violent_filter, "Prop.Rented")
violent_filter = delete_vertex_attr(violent_filter, "Prop.Vacant")
violent_filter = delete_vertex_attr(violent_filter, "Prop.Owned")
violent_filter = delete_vertex_attr(violent_filter, "Median.Age")
violent_filter = delete_vertex_attr(violent_filter, "Total.Population")
violent_filter = delete_vertex_attr(violent_filter, "Prop.African")
violent_filter = delete_vertex_attr(violent_filter, "Prop.White")
violent_filter = delete_vertex_attr(violent_filter, "Prop.Asian")
violent_filter = delete_vertex_attr(violent_filter, "Community.Area")
print(summary(violent_filter))
print("", quote=FALSE)
print("", quote=FALSE)
print("NonViolent Projections After Vertex Removal:")
nonviolent_filter = delete_vertex_attr(nonviolent_comm, "Prop.Occupied")
nonviolent_filter = delete_vertex_attr(nonviolent_filter, "Prop.Rented")
nonviolent_filter = delete_vertex_attr(nonviolent_filter, "Prop.Vacant")
nonviolent_filter = delete_vertex_attr(nonviolent_filter, "Prop.Owned")
nonviolent_filter = delete_vertex_attr(nonviolent_filter, "Median.Age")
nonviolent_filter = delete_vertex_attr(nonviolent_filter, "Total.Population")
nonviolent_filter = delete_vertex_attr(nonviolent_filter, "Prop.African")
nonviolent_filter = delete_vertex_attr(nonviolent_filter, "Prop.White")
nonviolent_filter = delete_vertex_attr(nonviolent_filter, "Prop.Asian")
nonviolent_filter = delete_vertex_attr(nonviolent_filter, "Community.Area")
print(summary(nonviolent_filter))
print("", quote=FALSE)
print("", quote=FALSE)
# Calculate graph densities
violent_density = edge_density(violent_filter)
print(paste("Violent Graph Density:", violent_density))
print("", quote=FALSE)
nonviolent_density = edge_density(nonviolent_filter)
print(paste("NonViolent Graph Density:", nonviolent_density))
print("", quote=FALSE)
print("", quote=FALSE)
# See if there are any components
print("Violent Graph Components:")
violent_decomp = decompose(violent_filter)
print(violent_decomp)
print("", quote=FALSE)
print("NonViolent Graph Components:")
nonviolent_decomp = decompose(nonviolent_filter)
print(nonviolent_decomp)
print("", quote=FALSE)
print("", quote=FALSE)
print("Violent Edge Weight Summary:")
print(summary(E(violent_filter)$weight))
print("", quote=FALSE)
print("NonViolent Edge Weight Summary:")
print(summary(E(nonviolent_filter)$weight))
print("", quote=FALSE)
print("", quote=FALSE)
violent_wdeg = graph.strength(violent_filter)
nonviolent_wdeg = graph.strength(nonviolent_filter)
print("Violent Weighted Degree Summary:")
print(summary(violent_wdeg))
print("", quote=FALSE)
print("NonViolent Weighted Degree Summary:")
print(summary(nonviolent_wdeg))
print("", quote=FALSE)
print("", quote=FALSE)
# Plot graphs
temp_jpg = paste(output_path, "Violent_Graph.jpg", sep = "")
jpeg(file = temp_jpg)
plot(violent_filter, layout = layout_with_kk)
dev.off()
temp_jpg = paste(output_path, "Nonviolent_Graph.jpg", sep = "")
jpeg(file = temp_jpg)
plot(nonviolent_filter, layout = layout_with_kk)
dev.off()
sink()
close(outFile)
closeAllConnections()
###################### Edge Weights ##############################################################
# Violent Edge weight histogram
temp_jpg = paste(output_path, "Violent_Hist_EdgeWeight.jpg", sep = "")
jpeg(file = temp_jpg)
g_violentw = ggplot(data = data.frame(weights = E(violent_filter)$weight), aes(x=weights)) +
geom_histogram(bins = 50) +
ggtitle("Violent Community Edge Weights Histogram") +
labs(x = "Edge Weights", y = "Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g_violentw)
dev.off()
# Nonviolent Edge weight histogram
temp_jpg = paste(output_path, "Nonviolent_Hist_EdgeWeight.jpg", sep = "")
jpeg(file = temp_jpg)
g_nonviolentw = ggplot(data = data.frame(weights = E(nonviolent_filter)$weight), aes(x=weights)) +
geom_histogram(bins = 50) +
ggtitle("Non-Violent Community Edge Weights Histogram") +
labs(x = "Edge Weights", y = "Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g_nonviolentw)
dev.off()
# Combined Edge Weight Distribution Histogram
weight_df = data.frame(type = rep("VIOLENT", length(E(violent_filter)$weight)),
weight = E(violent_filter)$weight)
weight_df = rbind(weight_df, data.frame(type = rep("NONVIOLENT", length(E(nonviolent_filter)$weight)),
weight = E(nonviolent_filter)$weight))
temp_jpg = paste(output_path, "Combined_Hist_EdgeW.jpg", sep = "")
jpeg(file = temp_jpg)
g_combined_ehist = ggplot(data = weight_df, aes(x = weight, fill = type)) +
geom_histogram(position = "dodge", bins = 30) +
ggtitle("Edge Weight By Crime Type") +
labs(x = "Edge Weight", y = "Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g_combined_ehist)
dev.off()
# Combined Edge Weight Log-Log Distribution
v_num_edge_bins = 30
v_min_edge = min(E(violent_filter)$weight)
v_max_edge = max(E(violent_filter)$weight)
v_step_edge = (v_max_edge - v_min_edge) / v_num_edge_bins
v_edge_breaks = seq(v_min_edge, v_max_edge, v_step_edge)
edge_bins = cut(E(violent_filter)$weight, breaks = v_edge_breaks, labels=FALSE)
v_tab_edge = data.frame(tabulate(edge_bins))
v_tab_edge$edge <- v_edge_breaks[1:(length(v_edge_breaks)-1)]
v_tab_edge = v_tab_edge[v_tab_edge$tabulate.edge_bins.>0,]
v_tab_edge$type = rep("VIOLENT", length(v_tab_edge$edge))
nv_num_edge_bins = 30
nv_min_edge = min(E(nonviolent_filter)$weight)
nv_max_edge = max(E(nonviolent_filter)$weight)
nv_step_edge = (nv_max_edge - nv_min_edge) / nv_num_edge_bins
nv_edge_breaks = seq(nv_min_edge, nv_max_edge, nv_step_edge)
edge_bins = cut(E(nonviolent_filter)$weight, breaks = nv_edge_breaks, labels=FALSE)
nv_tab_edge = data.frame(tabulate(edge_bins))
nv_tab_edge$edge <- nv_edge_breaks[1:(length(nv_edge_breaks)-1)]
nv_tab_edge = nv_tab_edge[nv_tab_edge$tabulate.edge_bins.>0,]
nv_tab_edge$type = rep("NONVIOLENT", length(nv_tab_edge$edge))
log_weight_df = rbind(v_tab_edge,nv_tab_edge)
x_num_breaks = 5
x_min_edge = min(log_weight_df$edge)
x_max_edge = max(log_weight_df$edge)
x_step_edge = (x_max_edge - x_min_edge) / x_num_breaks
y_num_breaks = 5
y_min_edge = min(log_weight_df$tabulate.edge_bins.)
y_max_edge = max(log_weight_df$tabulate.edge_bins.)
y_step_edge = (y_max_edge - y_min_edge) / y_num_breaks
temp_jpg = paste(output_path, "Combined_Log_EdgeW_Filtered.jpg", sep = "")
jpeg(file = temp_jpg)
g_combined_log = ggplot(log_weight_df, aes(x=edge, y=tabulate.edge_bins., color = type)) +
geom_point() + geom_line() +
scale_x_log10(name="Edge Weights", breaks = seq(x_min_edge, x_max_edge, x_step_edge)) +
scale_y_log10("Frequency", breaks = seq(y_min_edge, y_max_edge, y_step_edge)) +
ggtitle("Filtered Edge Weight By Crime Type") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g_combined_log)
dev.off()
###################### Weigthed Degree #############################################################
# Violent Degree Distribution
temp_jpg = paste(output_path, "Violent_Hist_WDegree.jpg", sep = "")
jpeg(file = temp_jpg)
g_violent_wdeg = ggplot(data = data.frame(WDegree = violent_wdeg), aes(x=WDegree)) +
geom_histogram(bins = 30) +
ggtitle("Violent Community Weighted Degree Histogram") +
labs(x = "Weighted Degree", y = "Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g_violent_wdeg)
V(violent_filter)$wdegree = violent_wdeg
dev.off()
# Non-Violent Degree Distribution
temp_jpg = paste(output_path, "Nonviolent_Hist_WDegree.jpg", sep = "")
jpeg(file = temp_jpg)
g_nonviolent_wdeg = ggplot(data = data.frame(WDegree = nonviolent_wdeg), aes(x=WDegree)) +
geom_histogram(bins = 30) +
ggtitle("Non-Violent Community Weighted Degree Histogram") +
labs(x = "Weighted Degree", y = "Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g_nonviolent_wdeg)
V(nonviolent_filter)$wdegree = nonviolent_wdeg
dev.off()
# Combined Weighted Degree Distribution Histogram
degree_df = data.frame(type = rep("VIOLENT", length(violent_wdeg)), degree = violent_wdeg)
degree_df = rbind(degree_df, data.frame(type = rep("NONVIOLENT", length(nonviolent_wdeg)),
degree = nonviolent_wdeg))
temp_jpg = paste(output_path, "Combined_Hist_WDegree.jpg", sep = "")
jpeg(file = temp_jpg)
g_combined_hist = ggplot(data = degree_df, aes(x = degree, fill = type)) +
geom_histogram(position = "dodge", bins = 30) +
ggtitle("Weighted Degree By Crime Type") +
labs(x = "Weighted Degree", y = "Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g_combined_hist)
dev.off()
# Combined Weighted Degree Log-Log Distribution
v_num_wdeg_bins = 30
v_min_wdeg = min(violent_wdeg)
v_max_wdeg = max(violent_wdeg)
v_step_wdeg = (v_max_wdeg - v_min_wdeg) / v_num_wdeg_bins
v_wdeg_breaks = seq(v_min_wdeg, v_max_wdeg, v_step_wdeg)
wdeg_bins = cut(violent_wdeg, breaks = v_wdeg_breaks, labels=FALSE)
v_tab_wdeg = data.frame(tabulate(wdeg_bins))
v_tab_wdeg$wdeg <- v_wdeg_breaks[1:(length(v_wdeg_breaks)-1)]
v_tab_wdeg = v_tab_wdeg[v_tab_wdeg$tabulate.wdeg_bins.>0,]
v_tab_wdeg$type = rep("VIOLENT", length(v_tab_wdeg$wdeg))
nv_num_wdeg_bins = 30
nv_min_wdeg = min(nonviolent_wdeg)
nv_max_wdeg = max(nonviolent_wdeg)
nv_step_wdeg = (nv_max_wdeg - nv_min_wdeg) / nv_num_wdeg_bins
nv_wdeg_breaks = seq(nv_min_wdeg, nv_max_wdeg, nv_step_wdeg)
wdeg_bins = cut(nonviolent_wdeg, breaks = nv_wdeg_breaks, labels=FALSE)
nv_tab_wdeg = data.frame(tabulate(wdeg_bins))
nv_tab_wdeg$wdeg <- nv_wdeg_breaks[1:(length(nv_wdeg_breaks)-1)]
nv_tab_wdeg = nv_tab_wdeg[nv_tab_wdeg$tabulate.wdeg_bins.>0,]
nv_tab_wdeg$type = rep("NONVIOLENT", length(nv_tab_wdeg$wdeg))
log_weight_df = rbind(v_tab_wdeg,nv_tab_wdeg)
x_num_breaks = 5
x_min_wdeg = min(log_weight_df$wdeg)
x_max_wdeg = max(log_weight_df$wdeg)
x_step_wdeg = (x_max_wdeg - x_min_wdeg) / x_num_breaks
y_num_breaks = 5
y_min_wdeg = min(log_weight_df$tabulate.wdeg_bins.)
y_max_wdeg = max(log_weight_df$tabulate.wdeg_bins.)
y_step_wdeg = (y_max_wdeg - y_min_wdeg) / y_num_breaks
temp_jpg = paste(output_path, "Combined_Log_WDeg_Init.jpg", sep = "")
jpeg(file = temp_jpg)
g_combined_log = ggplot(log_weight_df, aes(x=wdeg, y=tabulate.wdeg_bins., color = type)) +
geom_point() + geom_line() +
scale_x_log10(name="Weight Degree", breaks = seq(x_min_wdeg, x_max_wdeg, x_step_wdeg)) +
scale_y_log10("Frequency", breaks = seq(y_min_wdeg, y_max_wdeg, y_step_wdeg)) +
ggtitle("Weighted Degree By Crime Type") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g_combined_log)
dev.off()
###################### Transitivity #################################################################
dir.create(file.path(output_path, "Transitivity"), showWarnings = FALSE)
trans_path = paste(output_path, "Transitivity\\", sep = "")
out_file_name = paste(trans_path, "Violent_Transitivity.txt", sep = '')
outFile = file(out_file_name, open="wt")
sink(file = outFile, append = TRUE)
v_local_trans = mean(transitivity(violent_filter, type = "local"))
print(paste("Mean Local Transitivity:", v_local_trans))
temp_cug = mycugtest(violent_filter, transitivity, cmode = "edges", directed = FALSE, type = "local")
print("Local Transitivity CUG Test Results:")
print.cug.test(temp_cug)
print("", quote=FALSE)
tmp_cug = paste(trans_path, "Violent_Local_Transitivity_CUG.jpg", sep = '')
jpeg(file = tmp_cug)
plot.cug.test(temp_cug)
dev.off()
temp_qap = myqaptest(violent_filter,transitivity, directed = FALSE, type = "local")
print("Local Transitivity QAP Test Results:")
print(summary.qaptest(temp_qap))
print("", quote=FALSE)
tmp_qap = paste(trans_path, "Violent_Local_Transitivity_QAP.jpg", sep = '')
jpeg(file = tmp_qap)
plot.qaptest(temp_qap)
dev.off()
v_global_trans = transitivity(violent_filter, type = "global")
print(paste("Global Transitivity:", v_global_trans))
temp_cug = mycugtest(violent_filter, transitivity, cmode = "edges", directed = FALSE, type = "global")
print("Global Transitivity CUG Test Results:")
print.cug.test(temp_cug)
print("", quote=FALSE)
tmp_cug = paste(trans_path, "Violent_Global_Transitivity_CUG.jpg", sep = '')
jpeg(file = tmp_cug)
plot.cug.test(temp_cug)
dev.off()
temp_qap = myqaptest(violent_filter, transitivity, directed = FALSE, type = "global")
print("Global Transitivity QAP Test Results:")
print(summary.qaptest(temp_qap))
print("", quote=FALSE)
tmp_qap = paste(trans_path, "Violent_Global_Transitivity_QAP.jpg", sep = '')
jpeg(file = tmp_qap)
plot.qaptest(temp_qap)
dev.off()
sink()
close(outFile)
closeAllConnections()
out_file_name = paste(trans_path, "NonViolent_Transitivity.txt", sep = '')
outFile = file(out_file_name, open="wt")
sink(file = outFile, append = TRUE)
v_local_trans = mean(transitivity(nonviolent_filter, type = "local"))
print(paste("Mean Local Transitivity:", v_local_trans))
temp_cug = mycugtest(nonviolent_filter, transitivity, cmode = "edges", directed = FALSE, type = "local")
print("Local Transitivity CUG Test Results:")
print.cug.test(temp_cug)
print("", quote=FALSE)
tmp_cug = paste(trans_path, "NonViolent_Local_Transitivity_CUG.jpg", sep = '')
jpeg(file = tmp_cug)
plot.cug.test(temp_cug)
dev.off()
temp_qap = myqaptest(nonviolent_filter,transitivity, directed = FALSE, type = "local")
print("Local Transitivity QAP Test Results:")
print(summary.qaptest(temp_qap))
print("", quote=FALSE)
tmp_qap = paste(trans_path, "NonViolent_Local_Transitivity_QAP.jpg", sep = '')
jpeg(file = tmp_qap)
plot.qaptest(temp_qap)
dev.off()
v_global_trans = transitivity(nonviolent_filter, type = "global")
print(paste("Global Transitivity:", v_global_trans))
temp_cug = mycugtest(nonviolent_filter, transitivity, cmode = "edges", directed = FALSE, type = "global")
print("Global Transitivity CUG Test Results:")
print.cug.test(temp_cug)
print("", quote=FALSE)
tmp_cug = paste(trans_path, "NonViolent_Global_Transitivity_CUG.jpg", sep = '')
jpeg(file = tmp_cug)
plot.cug.test(temp_cug)
dev.off()
temp_qap = myqaptest(nonviolent_filter, transitivity, directed = FALSE, type = "global")
print("Global Transitivity QAP Test Results:")
print(summary.qaptest(temp_qap))
print("", quote=FALSE)
tmp_qap = paste(trans_path, "NonViolent_Global_Transitivity_QAP.jpg", sep = '')
jpeg(file = tmp_qap)
plot.qaptest(temp_qap)
dev.off()
sink()
close(outFile)
closeAllConnections()
###################### Community Detection #####################################################
set.seed(20170423)
out_file_name = paste(output_path, "Graph_Modularities.txt", sep = '')
outFile = file(out_file_name, open="wt")
sink(file = outFile, append = TRUE)
# Violent community detection
comm.sg = cluster_spinglass(violent_filter)
V(violent_filter)$comm_sg = comm.sg$membership
comm.lv = cluster_louvain(violent_filter)
V(violent_filter)$comm_lv = comm.lv$membership
comm.le = cluster_leading_eigen(violent_filter)
V(violent_filter)$comm_le = comm.le$membership
comm.fg = cluster_fast_greedy(violent_filter)
V(violent_filter)$comm_fg = comm.fg$membership
comm.bt = cluster_edge_betweenness(violent_filter, weights = NULL)
V(violent_filter)$comm_bt = comm.bt$membership
comm.wt5 = cluster_walktrap(violent_filter, steps = 2)
V(violent_filter)$comm_wt5 = comm.wt5$membership
comm.wt10 = cluster_walktrap(violent_filter, steps = 3)
V(violent_filter)$comm_wt10 = comm.wt10$membership
comm.mod = lapply(list(comm.sg, comm.lv, comm.le, comm.fg, comm.bt, comm.wt5, comm.wt10), modularity)
comm.len = lapply(list(comm.sg, comm.lv, comm.le, comm.fg, comm.bt, comm.wt5, comm.wt10), length)
print(comm.mod)
print(comm.len)
algorithms = c("SG", "LV", "LE", "FG", "BT", "WT2", "WT3")
comm_df = data.frame(Modularity = as.vector(comm.mod, mode = "numeric"),
Length = as.vector(comm.len, mode = "numeric"), Algorithm = algorithms)
# Create bar chart of lengths
temp_jpg = paste(output_path, "Violent_Cluster_Sizes.jpg", sep = "")
jpeg(file = temp_jpg)
p = ggplot(data = comm_df, aes(x = Algorithm, y = Length, fill = Algorithm))
p = p + geom_bar(stat = "identity")
p = p + geom_text(aes(label=round(Length, 4)), vjust=0)
p = p + ggtitle("Number of Community Clusters By Algorithm")
p = p + labs(x = "Community Detection Algorithm", y = "Number of Clusters (Length)")
print(p)
dev.off()
# NonViolent community detection
commNV.sg = cluster_spinglass(nonviolent_filter)
V(nonviolent_filter)$comm_sg = commNV.sg$membership
commNV.lv = cluster_louvain(nonviolent_filter)
V(nonviolent_filter)$comm_lv = commNV.lv$membership
commNV.le = cluster_leading_eigen(nonviolent_filter)
V(nonviolent_filter)$comm_le = commNV.le$membership
commNV.fg = cluster_fast_greedy(nonviolent_filter)
V(nonviolent_filter)$comm_fg = commNV.fg$membership
commNV.bt = cluster_edge_betweenness(nonviolent_filter, weights = NULL)
V(nonviolent_filter)$comm_bt = commNV.bt$membership
commNV.wt5 = cluster_walktrap(nonviolent_filter, steps = 2)
V(nonviolent_filter)$comm_wt5 = commNV.wt5$membership
commNV.wt10 = cluster_walktrap(nonviolent_filter, steps = 3)
V(nonviolent_filter)$comm_wt10 = commNV.wt10$membership
commNV.mod = lapply(list(commNV.sg, commNV.lv, commNV.le, commNV.fg, commNV.bt, commNV.wt5, commNV.wt10), modularity)
commNV.len = lapply(list(commNV.sg, commNV.lv, commNV.le, commNV.fg, commNV.bt, commNV.wt5, commNV.wt10), length)
print(commNV.mod)
print(commNV.len)
algorithms = c("SG", "LV", "LE", "FG", "BT", "WT2", "WT3")
comm_df = data.frame(Modularity = as.vector(commNV.mod, mode = "numeric"),
Length = as.vector(commNV.len, mode = "numeric"), Algorithm = algorithms)
# Create bar chart of lengths
temp_jpg = paste(output_path, "NonViolent_Cluster_Sizes.jpg", sep = "")
jpeg(file = temp_jpg)
p = ggplot(data = comm_df, aes(x = Algorithm, y = Length, fill = Algorithm))
p = p + geom_bar(stat = "identity")
p = p + geom_text(aes(label=round(Length, 4)), vjust=0)
p = p + ggtitle("Number of Community Clusters By Algorithm")
p = p + labs(x = "Community Detection Algorithm", y = "Number of Clusters (Length)")
print(p)
dev.off()
sink()
close(outFile)
closeAllConnections()
# Save updated files
out_file_violent = paste(path, "feb_violent_crimes.graphml", sep = "")
write_graph(violent_filter, out_file_violent, format = "graphml")
out_file_nonviolent = paste(path, "feb_nonviolent_crimes.graphml", sep = "")
write_graph(nonviolent_filter, out_file_nonviolent, format = "graphml")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.