blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6604bd798c523d15564c8ce65b8a76b779bee978
|
7afbb148ec11b3105aaead6bdd900f847e49eb18
|
/tests/testthat/test-stringsAsFactors.R
|
6a3ca57d2505b0393493bbf32bffea235959da18
|
[
"MIT"
] |
permissive
|
tidymodels/recipes
|
88135cc131b4ff538a670d956cf6622fa8440639
|
eb12d1818397ad8780fdfd13ea14d0839fbb44bd
|
refs/heads/main
| 2023-08-15T18:12:46.038289
| 2023-08-11T12:32:05
| 2023-08-11T12:32:05
| 76,614,863
| 383
| 123
|
NOASSERTION
| 2023-08-26T13:43:51
| 2016-12-16T02:40:24
|
R
|
UTF-8
|
R
| false
| false
| 1,356
|
r
|
test-stringsAsFactors.R
|
library(testthat)
library(recipes)
n <- 20
set.seed(752)
as_fact <- data.frame(
numbers = rnorm(n),
fact = factor(sample(letters[1:3], n, replace = TRUE)),
ord = factor(sample(LETTERS[22:26], n, replace = TRUE),
ordered = TRUE
)
)
as_str <- as_fact
as_str$fact <- as.character(as_str$fact)
as_str$ord <- as.character(as_str$ord)
test_that("strings_as_factors = FALSE", {
rec1 <- recipe(~., data = as_fact) %>%
step_center(numbers)
rec1 <- prep(rec1,
training = as_fact,
strings_as_factors = FALSE, verbose = FALSE
)
rec1_as_fact <- bake(rec1, new_data = as_fact)
expect_snapshot(rec1_as_str <- bake(rec1, new_data = as_str))
expect_equal(as_fact$fact, rec1_as_fact$fact)
expect_equal(as_fact$ord, rec1_as_fact$ord)
expect_equal(as_str$fact, rec1_as_str$fact)
expect_equal(as_str$ord, rec1_as_str$ord)
})
test_that("strings_as_factors = TRUE", {
rec2 <- recipe(~., data = as_fact) %>%
step_center(numbers)
rec2 <- prep(rec2,
training = as_fact,
strings_as_factors = TRUE, verbose = FALSE
)
rec2_as_fact <- bake(rec2, new_data = as_fact)
expect_snapshot(rec2_as_str <- bake(rec2, new_data = as_str))
expect_equal(as_fact$fact, rec2_as_fact$fact)
expect_equal(as_fact$ord, rec2_as_fact$ord)
expect_equal(as_fact$fact, rec2_as_str$fact)
expect_equal(as_fact$ord, rec2_as_str$ord)
})
|
c00bbdf80b57623ede3a38899e7aeacfbbda3f8c
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/2161_2/rinput.R
|
90268212ecbae1bb085e738eea85f3c3360e93bd
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("2161_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2161_2_unrooted.txt")
|
5755827f3d0d4a28000eaca07c16ffb170c11ad1
|
008f618a0c800605f6cdc664aa40c36abc211a67
|
/R code/Introductory tutorials/test.R
|
ba20b397b3bfa8e8bfaab22feab32155e47023a8
|
[] |
no_license
|
ThomasKraft/Dartmouth-EEES-Modeling-Group
|
2b4e509ffec61650ea4f3ee295ea533123c23cfc
|
7144974d4b4ec896cb46e3b96aa7531dd90009bb
|
refs/heads/master
| 2021-01-10T15:15:04.661372
| 2015-11-20T19:06:43
| 2015-11-20T19:06:43
| 45,926,068
| 1
| 1
| null | 2015-11-20T19:06:43
| 2015-11-10T16:49:26
|
R
|
UTF-8
|
R
| false
| false
| 478
|
r
|
test.R
|
### Test to help everyone get started!
#new code
install.packages("ggplot2")
install.packages("RCurl")
install.packages("foreign")
library(ggplot2)
library(RCurl)
library(foreign)
url1 <- getURL("https://raw.githubusercontent.com/ThomasKraft/Dartmouth-EEES-Modeling-Group/master/Data/serotiny.csv")
cdat <- read.csv(textConnection(url1))
str(cdat) #if you dont get any errors then this is a success!
cdat %>%
group_by(PROV, SITE) %>%
summarize(prop.serot=mean(SEROT))
|
f3c1f90ae4befad8de8aa1b97886780209589899
|
d9b8cc600e0bc75224f170988dff4891e10d492a
|
/cachematrix.R
|
3b79fe099984a6aee0c7b178f9bd3b48f0d0d8de
|
[] |
no_license
|
skom2308/ProgrammingAssignment2
|
20e9794116277386e7d5bfeea8e7dd77a787d144
|
c705a826d15171f6411ffc5c68499c1d27b6ffde
|
refs/heads/master
| 2020-12-11T01:52:17.021994
| 2015-05-09T18:26:55
| 2015-05-09T18:26:55
| 35,337,630
| 0
| 0
| null | 2015-05-09T17:15:05
| 2015-05-09T17:15:04
| null |
UTF-8
|
R
| false
| false
| 1,237
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## The first function, makeCacheMatrix creates a special "marix", which is really a list containing a function to:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse matrix
## 4. get the value of the inverse matrix
## Sample usage:
## x <- matrix(c(1,3,2,4,3,2,5,4,8),3,3)
## x1<-makeCacheMatrix(x)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## The following function calculates the mean of the special "matrix" created with the above function
## Sample Usage:
## cacheSolve(x1)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
b3c961c3cdeb5cba51a59efe0684f2620e33415d
|
4f27aabc6080cc95e79dfb3d20730a6b0d1552ed
|
/plot2.R
|
caedfe95b0b1ccb58a1dc0c4d40345bdb64d07ba
|
[] |
no_license
|
eminnett/exploratory-data-analysis-2
|
a3a401acfd2ff6db8f9165b442cb3eb2251151ea
|
aceac74fced6a044426374e48c71e797ecf72b3b
|
refs/heads/master
| 2020-12-03T03:51:17.532266
| 2015-11-22T21:19:22
| 2015-11-22T21:19:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,693
|
r
|
plot2.R
|
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips == "24510") from 1999 to 2008? Use the base plotting system to make
# a plot answering this question.
library(dplyr)
# This function assumes https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip has been downloaded and expanded into a sibling folder called 'data'.
plot2 <- function(){
# Load data.
data_location <- paste(getwd(), "/data/exdata-data-NEI_data", sep='')
data_summary_scc_pm25 <- readRDS(paste(data_location, "/summarySCC_PM25.rds", sep=''), refhook = NULL)
# Convert data to dplyr data frames.
summary_scc_pm25 <- tbl_df(data_summary_scc_pm25)
# Filter the data by Baltimore City, Maryland (fips == "24510").
baltimore_pm25 <- filter(summary_scc_pm25, fips == "24510")
# Group the data by year.
baltimore_pm25_by_year <- group_by(baltimore_pm25, year)
# Summarise the data by taking the sum of all emissions for each year group.
baltimore_pm25_by_year_totals <- summarise(baltimore_pm25_by_year, sum(Emissions))
# Rename the collumns of the summarised data to be more meaningful.
colnames(baltimore_pm25_by_year_totals) <- c("Year", "Total.Emissions")
# Open the PNG stream.
png(filename = "plot2.png")
# Plot baltimore_pm25_by_year_totals to view the trend.
plot(baltimore_pm25_by_year_totals$Year,
baltimore_pm25_by_year_totals$Total.Emissions,
main = "Total PM25 Emissions over Time for Baltimore City, Maryland",
ylab = "Total PM25 Emissions",
xlab = "Year",
col = "red",
pch = 19)
# Close the PNG stream and write to the file.
dev.off()
}
|
c9a514fadc4d5505068a79006a84de4f78f43e65
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/weibulltools/examples/kaplan_method.Rd.R
|
5b1ea8d5380be3e8eb9048af591690c3f04be541
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 805
|
r
|
kaplan_method.Rd.R
|
library(weibulltools)
### Name: kaplan_method
### Title: Estimation of Failure Probabilities using Kaplan-Meier
### Aliases: kaplan_method
### ** Examples
# Example 1
obs <- seq(10000, 100000, 10000)
state <- c(0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
uic <- c("3435", "1203", "958X", "XX71", "abcd", "tz46",
"fl29", "AX23","Uy12", "kl1a")
df_kap <- kaplan_method(x = obs, event = state, id = uic)
# Example 2
df <- data.frame(obs = c(10000, 10000, 20000, 20000, 30000,
30000, 30000, 30000, 40000, 50000,
50000, 60000, 70000, 70000, 70000,
70000, 80000, 80000, 80000, 80000,
90000, 90000, 100000),
state = rep(1, 23))
df_kap2 <- kaplan_method(x = df$obs, event = df$state)
|
c308b8352e4847236b9cd72c0d484d9462fef61b
|
87fdffc82b36e9c907506b60cbbb979a264b1fad
|
/EDAProject2/plot2.r
|
376c52798f8cd29687261f852977516cc15e185d
|
[] |
no_license
|
Whitchurch/datasciencecoursera
|
5aacc924b93b413bf3743f93e7eba5e74b0c747a
|
aa99702d13fc33de9f9bd1981d428c60164ad33a
|
refs/heads/master
| 2020-11-29T06:45:55.222830
| 2020-06-05T17:23:20
| 2020-06-05T17:23:20
| 230,049,998
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,912
|
r
|
plot2.r
|
#load dplyr
library(dplyr)
library(tidyr)
#Function to read and return the files
readfiles <- function(filenametoreplace, filenametosubstitute)
{
currentpath <- rstudioapi::getSourceEditorContext()$path
currentpath <- gsub(filenametoreplace,filenametosubstitute, currentpath)
if(file.exists(currentpath))
{
print("The file exists")
readRDS(currentpath)
}else{
print("Check if the file: Source_Classification_Code.rds is in the same folder as plot2.r")
}
}
SCC <- readfiles("plot2.r","Source_Classification_Code.rds")
NEI <- readfiles("plot2.r","summarySCC_PM25.rds")
#Subset only the data for Baltimore City based on it's fips code, and sum all it's PM2.5 pollutants
baltimoreonly <- NULL
baltimoreonly <- group_by(NEI, year)
baltimoreonly <- group_by(baltimoreonly, fips, add = TRUE)
baltimoreonly <- summarize_at(baltimoreonly,.vars = c("Emissions") ,.funs = sum)%>%filter(fips == "24510")
print(baltimoreonly)
#Prepare the data to be fed into a boxplot
baltimoreonly <- select(baltimoreonly,-fips) #drop fips as we dn't need for display purposes
names(baltimoreonly) # verify that fips column is dropped
#convert the data into a one dimensional array with attributes for display in barplot
length(baltimoreonly$year)
arrayForBarplot <- array(baltimoreonly$Emissions, dim = length(baltimoreonly$year))
dimnames(arrayForBarplot) <- list(baltimoreonly$year)
#Plot the data
options(scipen = 999) #deactivate scientific notations
currentpath <- rstudioapi::getSourceEditorContext()$path
path1 = gsub("plot2.r","plot2.png", currentpath)
png(path1,width = 480, height = 480)
barplot(arrayForBarplot, xlab = "Years", ylab = "Total emission in (Tons)", main = "(Baltimore City) 1999 - 2008: PM 2.5 emission")
lines(arrayForBarplot, col = "green")
points(arrayForBarplot, pch = 16, col = "green")
legend("topright",legend = c("Trend"),lty = 1,col ="green", bty = "n")
dev.off()
|
b9b17e0f43f5d14d34104b6336bd6bb233231555
|
5a531782c95a3a258058d90210171895ab0abc43
|
/man/plot_close.Rd
|
8e0f166263252499caffad08be84b0e03ad12435
|
[
"MIT"
] |
permissive
|
colin-fraser/questradeR
|
8d74b9e6a21053a625208882382bce93f161645a
|
d5f73b99ce2dd8a75faa9076fada9aac4dc0da46
|
refs/heads/master
| 2023-03-04T05:04:09.371340
| 2020-12-23T17:44:38
| 2020-12-23T17:44:38
| 322,346,721
| 0
| 0
|
NOASSERTION
| 2021-02-16T16:01:50
| 2020-12-17T16:10:43
|
R
|
UTF-8
|
R
| false
| true
| 486
|
rd
|
plot_close.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extra.R
\name{plot_close}
\alias{plot_close}
\title{Plot close prices from candle df}
\usage{
plot_close(candle_df, yaxis = c("price", "return"), facet = FALSE)
}
\arguments{
\item{candle_df}{data frame returned from candles()}
\item{yaxis}{'price' for raw prices, 'return' for returns since the start}
\item{facet}{facet each symbol?}
}
\value{
a ggplot
}
\description{
Plot close prices from candle df
}
|
a49e0f48f4dbd5b1ad84a441bff19de5f9546e3b
|
97d7c3a59494aaefd41e97197d1896c1ca267096
|
/R/StripRepetitions.R
|
f7cc17eb7c681e1e709021d0d28b96d38808954a
|
[] |
no_license
|
cran/TauP.R
|
052e5b006ffa792116ea66507bef6a34391be699
|
d520d562eb5f6b0fd268727babad86f8f87b5902
|
refs/heads/master
| 2021-06-02T15:03:44.398055
| 2018-08-31T07:09:07
| 2018-08-31T07:09:07
| 17,693,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 713
|
r
|
StripRepetitions.R
|
StripRepetitions <-
function(phase)
{
phase=strsplit(phase,'')[[1]]
phaseout=phase
repetitions=NULL
w=getOption('warn')
options(warn=-1)
if(!is.na(as.numeric(phase[length(phase)]))){
repstring=NULL
done=0
len=length(phase)
indy=len
while( done==0){
if(!is.na(as.numeric(phase[indy]))){
repstring=c(phase[indy],repstring)
indy=indy-1
if( indy==0 ){
done=1
}
}else{
done=1
}
}
repetitions=as.numeric(paste(repstring,collapse=''))
phaseout=paste(phaseout[1:indy],collapse='')
}else{
repetitions=0
}
options(warn=w)
return(list(paste(phaseout,collapse=''),repetitions))
}
|
b6efd6e2f026008b16d7782ff3b2105d7527fe91
|
0f188217a4a2b3955ebb33ec753c0f27bab62ec9
|
/R/logprimedown.R
|
7bfa9ea41fbee9bc0736813a557c7e8c348be6c4
|
[] |
no_license
|
Monneret/MarginalCausality
|
f15ace3619494eef9502b0498649ce3d07622f0a
|
4cd442f52284cfa302e1f5e17d7dbc15294c22ea
|
refs/heads/master
| 2021-01-12T16:24:30.354511
| 2016-10-05T09:39:50
| 2016-10-05T09:39:50
| 70,046,341
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,530
|
r
|
logprimedown.R
|
#' Compute the gradient of the log-likelihood for the downstream model.
#'
#' @export
#' @param wt Observationnal data, matrix of dimension N*2, where N stand for the number of replicate.
#' The first column correspond to the knocked-out gene.
#' @param ko Interventionnal data, matrix of dimension N*2, where N stand for the number of replicate.
#' The first column correspond to the knocked-out gene.
#' @param par Set of parameter, such that the first correspond to alpha,
#' the second and third to the mean, and the last two ones to the log of the standard deviation
#' @return The value of the gradient of the log likelihood for the downstream model.
#' @examples
#' wt <- rnorm(n=10,mean=-2,sd=0.8)
#' wt <- cbind(wt,2*wt+rnorm(n=10,mean=1,sd=1.7))
#' ko <- rnorm(n=10,mean=0,sd=0.01)
#' ko <- cbind(wt,2*wt+rnorm(n=10,mean=1,sd=1.7))
#' par <- c(-2,1,0.3,-4,10)
#' logprime.down(wt,ko,par)
logprime.down <- function(wt,ko,par) {
alpha <- par[1]; mu <- par[2:3]; sigma <- exp(par[4:5])
pmuX <- sum(wt[,2]-mu[2]-alpha*wt[,1])/sigma[2]^2+
sum(ko[,2]-mu[2]-alpha*ko[,1])/(sigma[2]^2)
pmuG <- sum(wt[,1]-mu[1])/sigma[1]^2
psigmaX <- sum(-1/sigma[2]+(wt[,2]-mu[2]-alpha*wt[,1])^2/sigma[2]^3)+
sum(-1/sigma[2]+((ko[,2]-mu[2]-alpha*ko[,1])^2)/sigma[2]^3)
psigmaG <- sum(-1/sigma[1]+(wt[,1]-mu[1])^2/sigma[1]^3)
palpha <- sum(wt[,1]*(wt[,2]-mu[2]-alpha*wt[,1]))/sigma[2]^2+
sum(ko[,1]*(ko[,2]-mu[2]-alpha*ko[,1])/sigma[2]^2)
return(c(pmuX=pmuX,pmuG=pmuG,psigmaX=psigmaX,psigmaG=psigmaG,palpha=palpha))
}
|
c7e20b1af682db06192854a055aeeccc05b0612c
|
df10b7fb39449e70b92d28c8ce791d9fcd772c8a
|
/make_clembets.R
|
7579ee3b8e8b9d8504aeb1ad0ddcd2ac743f5cea
|
[] |
no_license
|
adndebanane/zdsbets
|
3848c7a39090ff7644ab20f03d13af2e3a59baa8
|
95a0d149384468eb9e7652e1ffd53881f336a4a6
|
refs/heads/master
| 2020-04-17T10:12:48.460050
| 2017-06-08T07:52:04
| 2017-06-08T07:52:04
| 67,291,486
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,054
|
r
|
make_clembets.R
|
load("results20162017.rda")
cur.season <- season20162017
bets <- read.table(file = "00-datasets/20162017/players_bets.csv", header = TRUE, sep = ",",
stringsAsFactors = F)
load("players.rda")
##List of games to play with
list.betted.games <- unique(bets$idmatch)
nb.betted.games <- length(list.betted.games)
nb.played.games <- length(cur.season$idmatch)
tobeplayed.games <- list.betted.games[!(list.betted.games %in% cur.season$idmatch)]
nb.tobeplayed.games <- length(tobeplayed.games)
mybets <- data.frame(idmatch = tobeplayed.games,
random.bets = rep(NA, nb.tobeplayed.games),
deterministic.bets = rep(NA, nb.tobeplayed.games),
votes = rep(NA, nb.tobeplayed.games))
##Let's play!
for(igame in 1:nb.tobeplayed.games){
bets.games <- subset(bets, idmatch == tobeplayed.games[igame])
vote.1 <- 0
vote.N <- 0
vote.2 <- 0
for(ibet in 1:length(bets.games$bet)){
cur.player <- bets.games$player[ibet]
cur.weight <- players$weights[players$player == cur.player]
if(bets.games$bet[ibet] == 1){
vote.1 <- vote.1 + cur.weight
} else if(bets.games$bet[ibet] == 2){
vote.2 <- vote.2 + cur.weight
} else{
vote.N <- vote.N + cur.weight
}
}
weight.distribution <- c(vote.1, vote.N, vote.2) / sum(c(vote.1, vote.N, vote.2))
mybets$random.bets[igame] <- sample(c(1, "N", 2), 1, prob = weight.distribution)
mybets$deterministic.bets[igame] <- c(1, "N", 2)[which.max(c(vote.1, vote.N, vote.2))]
mybets$votes[igame] <- max(c(vote.1, vote.N, vote.2))/sum(players$weights)*100
}
print(mybets)
oldbets <- mybets
write.table(oldbets, file = "00-datasets/20162017/clembets.csv", sep = ",", row.names = FALSE,
col.names = FALSE, append = TRUE)
library(knitr)
df2print <- mybets[, c("idmatch", "deterministic.bets", "votes")]
names(df2print) <- c("IdMatch", "Prono", "%Poids")
kable(df2print[, ], format = "markdown", row.names = F)
|
de7c821fd150c971c8cc6c6a0c1cc03d0390e08e
|
1febe6b946637c6b7a946976d281a8352899e2dc
|
/man/anyOf.Rd
|
17e9024fbc49bb73a2f0aa87313cd0a9177cab92
|
[
"CC0-1.0"
] |
permissive
|
rmflight/vRbalExpressions
|
136b446889e9641c196a2c2de55b18f6663c210c
|
19042e1254d0092157112b2ddcb70ba7ca5ddfdd
|
refs/heads/master
| 2021-01-15T12:26:03.255398
| 2013-08-27T20:11:18
| 2013-08-27T20:11:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119
|
rd
|
anyOf.Rd
|
\name{anyOf}
\alias{anyOf}
\title{Any given character}
\usage{
anyOf(value)
}
\description{
Any given character
}
|
8c3e1e0c5593853cdda8a2a91a88f3bb8e8abdaa
|
272c346ee4a331272851866cf2395c0ae8d6e508
|
/RaghuInterview (1).R
|
915239e10e42cc3eb07775f3edae15d43e234495
|
[] |
no_license
|
SRV17/Analysis-on-LAPD-Crime-Data
|
f9e770b61bf661c533d2213a81e227f8436b74d4
|
64c10a7704390d227d4ea0b5d242e8ece517cd96
|
refs/heads/master
| 2021-01-22T22:58:01.262464
| 2017-03-28T22:02:53
| 2017-03-28T22:02:53
| 85,598,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,900
|
r
|
RaghuInterview (1).R
|
library(lubridate)
#install.packages("lubridate")
library(stringr)
#install.packages("stringr")
library(dplyr)
#install.packages("dplyr")
library(tidyr)
#install.packages("tidyr")
library(ggplot2)
#Load Data
RawData <- as.data.frame(read.csv("C:/users/Shwetha Hara/Downloads/Crimes_2012-2015.csv", stringsAsFactors = FALSE))
#Rename Columns
names(RawData) <- c("DateReported", "ReportNumber", "DateOfOccurence", "TimeOfOccurence", "AreaCode", "AreaName", "Road", "CrimeCode", "CrimeDescription", "CaseStatusCode", "CaseStatus", "Location", "CrossStreet", "LatLong")
#Changing variables to appropriate data types
RawData$ReportNumber <- as.character(RawData$ReportNumber)
RawData$AreaCode <- as.factor(RawData$AreaCode)
RawData$AreaName <- as.factor(RawData$AreaName)
RawData$Road <- as.character(RawData$Road)
RawData$CrimeCode <- as.character(RawData$CrimeCode)
RawData$CaseStatusCode <- as.factor(RawData$CaseStatusCode)
RawData$CaseStatus <- as.factor(RawData$CaseStatus)
View(RawData)
#Formatting the column TimeOfOccurence
RawData$TimeOfOccurence <- sprintf("%04d",as.numeric(RawData$TimeOfOccurence))
RawData$TimeOfOccurence <- format(strptime(RawData$TimeOfOccurence, format = "%H%M", tz = "America/Los_Angeles"), "%H:%M")
#Creating separate Latitude and Longitude columns
RawData$LatLong <- str_extract(RawData$LatLong, "[-]*\\d+.\\d+[,]\\s[-]*\\d+.\\d+")
RawData <- RawData %>% separate(LatLong, c("Latitude", "Longitude"), sep = "[,]\\s", extra = "drop", remove = TRUE)
#Separate Time
RawData <- RawData %>% separate(TimeOfOccurence, c("Hour", "Min"), sep = ":", extra = "drop", remove = FALSE)
RawData <- RawData %>% distinct
View(RawData)
#date
RawData$DateOfOccurence <- mdy(RawData$DateOfOccurence)
#Creating a month column
RawData$MonthOfOccurence <- month(RawData$DateOfOccurence, label = TRUE)
#Creating new column for the day of week
RawData$DayOfWeek <- wday(RawData$DateOfOccurence, label = TRUE)
RawData$crime_category <- ifelse(grepl("THEFT*|PICKPOCKET|STOLEN*", RawData$CrimeDescription), "THEFT",
ifelse(grepl("BURGLARY*", RawData$CrimeDescription), "BURGLARY",
ifelse(grepl("HOMICIDE*", RawData$CrimeDescription), "HOMICIDE",
ifelse(grepl("ROBBERY*",RawData$CrimeDescription), "ROBBERY",
ifelse(grepl("ASSAULT*",RawData$CrimeDescription), "ASSAULT",
ifelse(grepl("VANDALISM*", RawData$CrimeDescription), "VANDALISM",
ifelse(grepl("TRAFFIC*", RawData$CrimeDescription), "TRAFFIC_RELATED",
ifelse(grepl("SEXUAL*", RawData$CrimeDescription),"SEXUAL","OTHER"))))))))
#shifts
timeoftheday <- function(hour) {
if (hour >= 0 && hour < 7) { return (as.factor("12AM - 7AM"))}
else if (hour >= 7 && hour < 11) { return (as.factor("7AM-12PM"))}
else if (hour >= 12 && hour < 19) { return (as.factor("12PM-7PM"))}
else return (as.factor("7PM-12AM"))
}
RawData$TimeOftheDay <- sapply(RawData$Hour,timeoftheday)
View(RawData)
#Hourly Distribution
ggplot(RawData, aes(x = Hour)) +
geom_histogram(binwidth = 1, aes(fill = ..count..))+
scale_fill_gradient("Count", low = "green", high = "red")
#Weekly Distribution
ggplot(RawData, aes(x = DayOfWeek)) +
geom_bar(aes(fill = ..count..))+
scale_fill_gradient("Count", low = "green", high = "red")
#Hourly Dist through each day of the week
ggplot(RawData, aes(x = Hour)) +
geom_histogram(binwidth = 1, aes(fill = ..count..))+
scale_fill_gradient("Count", low = "green", high = "red") +
facet_wrap(~DayOfWeek, nrow = 7)
RawData$state <- "CA"
|
ee0a2b90cbc254594b40a43f1e8fc017bd53ad77
|
781615c49118f65be9286f1339067a88c8111b46
|
/R/psmc2history.R
|
169de95cdaa17e475af24008ed2cd3eb82a02331
|
[
"MIT"
] |
permissive
|
P2C2M/P2C2M_PSMC
|
b0512c611cc5e18e96dc406a51287562c2abe77f
|
0f872cbaa867cf43a0d7ad1cd8ebcf5f0487e4c1
|
refs/heads/master
| 2022-09-04T12:17:08.115460
| 2020-05-19T12:51:59
| 2020-05-19T12:51:59
| 265,243,240
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,734
|
r
|
psmc2history.R
|
##### Function for converting psmc output file to history file #####
### Testing ###
# file_name = "French_upper.psmc" # name of psmc output file
# n_iterations = 20
# u = 1.1e-08
psmc2history <- function(file_name, n_iterations, gen, u, from_file, save_output){ # file_name = name of PSMC output file or list of lines from output, n_iterations = which iteration of the PSMC to pull values from, gen = generation time (years), u = mutation rate (subs/site/gen), from_file = whether PSMC information should be read from a file, save_output = whether to save history to a file. Note: don't use save_output with from_file = FALSE
if (from_file == TRUE){
psmc_file <- file(file_name, "r+")
psmc_lines <- suppressWarnings(readLines(psmc_file)) # read in psmc ouput file
close(psmc_file)
psmc_obj <- psmc(name = file_name, gen = gen, u = u, n_iterations = n_iterations) # create psmc class object
psmc_obj@truncated <- unlist(strsplit(psmc_obj@name, ".psmc")[[1]])
} else{
psmc_lines <- file_name
psmc_obj <- psmc(gen = gen, u = u, n_iterations = n_iterations) # create psmc class object
}
psmc_meta <- psmc_lines[which(lapply(psmc_lines, function(x) strsplit(x, "\t")[[1]][1]) == "MM")][1:5] # get metadata lines
psmc_meta <- lapply(psmc_meta, function(x) strsplit(x, "\t")[[1]][2]) # remove "MM"
psmc_obj@pattern <- unlist(strsplit(unlist(strsplit(psmc_meta[[2]][1], ","))[1], ":")[[1]][2]) # get pattern of atomic intervals
psmc_obj@n_free_lambdas <- as.numeric(unlist(strsplit(unlist(strsplit(psmc_meta[[2]][1], ","))[3], ":")[[1]][2])) # get number of free lambda variables
#n_iterations <- as.numeric(unlist(strsplit(unlist(strsplit(psmc_meta[[3]][1], ","))[1], ":")[[1]][2])) # get number of iterations
iteration <- psmc_lines[(which(psmc_lines == "//")[psmc_obj@n_iterations] + 1): (which(psmc_lines == "//")[psmc_obj@n_iterations + 1] - 1)] # get lines from last iteration
psmc_obj@theta <- as.numeric(strsplit(iteration[6], "\t")[[1]][2]) # get theta
psmc_obj@rho <- as.numeric(strsplit(iteration[6], "\t")[[1]][3]) # get rho
curve_lines <- iteration[9:(length(iteration) -1)] # get lines with PSMC values
curve_values <- lapply(curve_lines, function(x) as.list(unlist(strsplit(x, "\t")))) # split lines by \t
curve_df <- as.data.frame(do.call("rbind", curve_values)) # combine PSMC values into df
lambda_rows <- lapply(unique(curve_df[,4]), function(x) which(curve_df[,4] == x)[1]) # get indices of start times for each atomic interval
lambda_df <- curve_df[unlist(lambda_rows),] # get df with start times for each atomic interval
psmc_obj@lk0 <- as.numeric(unlist(lambda_df[1,4])) # get lambda at time 0
psmc_obj@N0 <- as.numeric((psmc_obj@theta / 4 / psmc_obj@u) * psmc_obj@lk0) # get Ne at time 0
calc_time <- function(t_k, N0, lk0, u){ # t_k = time at interval k, N0 = Ne at time 0, lk0, lambda value at time 0, u = mutation rate
if (u <= 0){
time <- t_k
} else{
time <- (as.numeric(t_k) * (2 * N0)) / lk0 # calculate time of interval
}
return(time)
}
calc_ne <- function(lk, N0, lk0, u){ # lk = lambda at time interval k, N0 = Ne at time 0, lk0 = lambda value at time 0, u = mutation rate
if (u <= 0){
ne <- lk
} else{
ne <- (as.numeric(lk) * N0) / lk0 # calculate Ne of interval
}
return(ne)
}
psmc_obj@base_times <- as.numeric(sapply(seq(1, psmc_obj@n_free_lambdas), function(x) calc_time(lambda_df[x,3], psmc_obj@N0, psmc_obj@lk0, 0)))
psmc_obj@base_nes <- as.numeric(sapply(seq(1, psmc_obj@n_free_lambdas), function(x) calc_ne(lambda_df[x,4], psmc_obj@N0, psmc_obj@lk0, 0)))
psmc_obj@times <- as.numeric(sapply(seq(1, psmc_obj@n_free_lambdas), function(x) calc_time(lambda_df[x,3], psmc_obj@N0, psmc_obj@lk0, psmc_obj@u))) # calculate times
psmc_obj@nes <- as.numeric(sapply(seq(1, psmc_obj@n_free_lambdas), function(x) calc_ne(lambda_df[x,4], psmc_obj@N0, psmc_obj@lk0, psmc_obj@u))) # calculate Ne values
if (save_output == TRUE){
param_lines <- c(sprintf("T %s", psmc_obj@theta), sprintf("R %s", psmc_obj@rho), sprintf("N %s", psmc_obj@n_free_lambdas)) # lines for head of file
est_lines <- mapply(function(x,y) sprintf("H %s\t%s", x, y), psmc_obj@times, psmc_obj@nes, SIMPLIFY = TRUE) # create interval lines
hist_lines <- append(param_lines, est_lines, after = length(param_lines)) # combine lines
new_file_name <- paste0(psmc_obj@truncated, sprintf("_n%s.history", n_iterations)) # create new file name
history <- file(new_file_name, "w+") # create history file
writeLines(hist_lines, history) # write history lines
close(history)
}
return(psmc_obj)
}
|
3d96dcb740a73a92d1faaf12a6be4e8184a9c409
|
fbb65f366b801846471ecd30c5a3c03f29618027
|
/man/ExampleAnswers.Rd
|
eab4ee4e7a656a376903544e5ae5549e103c6742
|
[] |
no_license
|
ReneMayer/questionnaire
|
55cb8cc13cf31e6725aeb0dd256a5049d722e9ec
|
a5ff66455285572e317818fe775f5ae5bea60347
|
refs/heads/master
| 2021-01-21T13:08:39.552796
| 2014-04-11T05:02:12
| 2014-04-11T05:02:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
rd
|
ExampleAnswers.Rd
|
\docType{data}
\name{ExampleAnswers}
\alias{ExampleAnswers}
\title{norm: items aggregated within scales}
\format{a \code{data.frame} instance, 1 row per item (115 total).}
\source{
simulated data set
}
\description{
This data set contains an example responses to a launched
questionnaire object. The data would be the output from
the function calls questionnaire(), launch() and the
responses assumed to be stored in an object with
get.answers().
}
\keyword{datasets}
|
cf43bd97d45a621ba9c327f8d4df0561d08606cc
|
983c9935444c604a1634eb98b8ab3d4e3fc188cf
|
/pca/in_class_pca.r
|
5dc367ee86771b1e02b74961ea82acb274a66be0
|
[] |
no_license
|
37chandler/applied-data-analytics
|
8929b36e617ea40ea2228cd12c4809b87230599d
|
d771c42e29aeb13d7ad2ffe042b45eff75f1fdf7
|
refs/heads/master
| 2021-01-25T17:10:36.974393
| 2019-11-20T22:07:31
| 2019-11-20T22:07:31
| 203,245,280
| 0
| 2
| null | 2019-09-02T20:03:25
| 2019-08-19T20:26:28
|
HTML
|
UTF-8
|
R
| false
| false
| 3,156
|
r
|
in_class_pca.r
|
# Here's a PCA on some Wedge Data. Give
# this a look. Can you explain what's going
# on here?
library(dplyr)
library(ggplot2)
library(reshape2)
library(scales)
d <- readr::read_tsv("owner_level_top_prod_sales.txt")
d <- d %>%
filter(owner != 3)
# To make our code run a bit faster and get rid of some extreme
# values, let's total up the amount spent on the top products and
# cut down our data.
total.spend <- data.frame(owner=d$owner,
spend=rowSums(d[,2:1000]))
ggplot(total.spend,
aes(x=spend)) +
geom_density() +
scale_x_log10(label=dollar)
quantile(total.spend$spend,
prob=0:10/10)
# Let's cutoff at $15,000
mean(total.spend$spend < 15000)
d <- d %>%
filter(owner %in%
(total.spend %>%
filter(spend < 15000) %>%
pull(owner)))
pca1 <- prcomp(d[,-1])
for.plot <- data.frame(sd=pca1$sdev)
for.plot <- for.plot %>%
mutate(eigs=sd^2) %>%
mutate(cume.var = cumsum(eigs/sum(eigs)),
id=1:n())
names(for.plot) <- c("Standard Deviation","eigs",
"Cumulative Variance","id")
for.plot <- melt(for.plot,
id.vars = "id")
ggplot(for.plot %>% filter(variable != "eigs"),
aes(x=id,y=value)) +
geom_line() +
facet_grid(variable ~ .,
scales="free") +
theme_bw() +
labs(y="Variance",
x="Component Number")
if (ncol(d) > 31){
max.col <- 20
} else {
max.col <- ncol(d)
}
sort(pca1$rotation[,1],decreasing = T)[1:max.col]
sort(pca1$rotation[,1],decreasing = F)[1:max.col]
sort(pca1$rotation[,2],decreasing = T)[1:(max.col/2)]
sort(pca1$rotation[,2],decreasing = F)[1:(max.col/2)]
sort(pca1$rotation[,3],decreasing = T)[1:(max.col/2)]
sort(pca1$rotation[,3],decreasing = F)[1:(max.col/2)]
sort(pca1$rotation[,4],decreasing = T)[1:(max.col/2)]
sort(pca1$rotation[,4],decreasing = F)[1:(max.col/2)]
# Let's build derived variables from these components.
# first, let's illustrate the idea.
pc1.loadings <- pca1$rotation[,1] # loadings on first PC
# Owner 19682 spent a lot ($35519.11), Owner 49219 spent very little ($3.08).
# Let's look at their scores on PCA 1
as.numeric(d[d$owner=="19682",2:1000]) %*% pc1.loadings
# %*% is matrix multiplication in R
as.numeric(d[d$owner=="49219",2:1000]) %*% pc1.loadings
# we can do this en masse and add columns to d
# based on the PCs
num.pcs <- 5
for(i in 1:num.pcs) {
col.name <- paste0("score_PC",i)
d[,col.name] <- as.matrix(d[,2:1000]) %*% pca1$rotation[,i]
}
ggplot(d %>% sample_frac(0.1),
aes(x=score_PC3,y=score_PC4)) +
geom_point(alpha=0.2) +
theme_minimal()
# Interesting, some crazy outlier there. Let's look at them
d %>%
filter(score_PC4 > 400) %>%
select(owner,score_PC4) %>%
arrange(as.numeric(score_PC4))
d %>%
filter(owner==50028) %>%
melt(id.vars="owner") %>%
arrange(value) %>%
tail(n=20)
# this person spent a *ton* on deli stuff. Seems weird
# we could remove some of these extreme values
# (I wouldn't call them "outliers") and maybe
# get better a better PCA. right now we may be pulling off
# just a handful of people with each dimension.
|
761bbddc3e6ecb3c86c456030b0886dfceb3e321
|
e1cbbf8791b0ac6d40f6d5b397785560105441d9
|
/man/partri.Rd
|
b295705447fa22195fe6c7f89e0d7fc36a875123
|
[] |
no_license
|
wasquith/lmomco
|
96a783dc88b67017a315e51da3326dfc8af0c831
|
8d7cc8497702536f162d7114a4b0a4ad88f72048
|
refs/heads/master
| 2023-09-02T07:48:53.169644
| 2023-08-30T02:40:09
| 2023-08-30T02:40:09
| 108,880,810
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,236
|
rd
|
partri.Rd
|
\name{partri}
\alias{partri}
\title{Estimate the Parameters of the Asymmetric Triangular Distribution}
\description{
This function estimates the parameters of the Asymmetric Triangular distribution given the L-moments of the data in an L-moment object such as that returned by \code{\link{lmoms}}. The relations between distribution parameters and L-moments are seen under \code{\link{lmomtri}}.
The estimtion by the \code{partri} function is built around simultaneous numerical optimization of an objective function defined as
\deqn{\epsilon = \biggl(\frac{\lambda_1 - \hat\lambda_1}{\hat\lambda_1}\biggr)^2 + \biggl(\frac{\lambda_2 - \hat\lambda_2}{\hat\lambda_2}\biggr)^2 + \biggl(\frac{\tau_3 - \hat\tau_3}{1}\biggr)^2}
for estimation of the three parameters (\eqn{\nu}, minimum; \eqn{\omega}, mode; and \eqn{\psi}, maximum) from the sample L-moments (\eqn{\hat\lambda_1}, \eqn{\hat\lambda_2}, \eqn{\hat\tau_3}). The divisions shown in the objective function are used for scale removal to help make each L-moment order somewhat similar in its relative contribution to the solution. The coefficient of L-variation is not used because the distribution implementation by the \pkg{lmomco} package supports entire real number line and the loss of definition of \eqn{\tau_2} at \eqn{x = 0}, in particular, causes untidiness in coding.
The function is designed to support both left- or right-hand right triangular shapes because of (1) \code{paracheck} argument availability in \code{\link{lmomtri}}, (2) the sorting of the numerical estimates if the mode is no compatable with either of the limits, and (3) the snapping of \eqn{\nu = \omega \equiv (\nu^\star + \omega^\star)/2} when \eqn{\hat\tau_3 > 0.142857} or \eqn{\psi = \omega \equiv (\psi^\star + \omega^\star)/2} when \eqn{\hat\tau_3 < 0.142857} where the \eqn{\star} versions are the optimized values if the \eqn{\tau_3} is very near to its numerical bounds.
}
\usage{
partri(lmom, checklmom=TRUE, ...)
}
\arguments{
\item{lmom}{An L-moment object created by \code{\link{lmoms}} or \code{\link{vec2lmom}}.}
\item{checklmom}{Should the \code{lmom} be checked for validity using the \code{\link{are.lmom.valid}} function. Normally this should be left as the default and it is very unlikely that the L-moments will not be viable (particularly in the \eqn{\tau_4} and \eqn{\tau_3} inequality). However, for some circumstances or large simulation exercises then one might want to bypass this check.}
\item{...}{Other arguments to pass.}
}
\value{
An \R \code{list} is returned.
\item{type}{The type of distribution: \code{tri}.}
\item{para}{The parameters of the distribution.}
\item{obj.val}{The value of the objective function, which is the error of the optimization.}
\item{source}{The source of the parameters: \dQuote{partri}.}
}
\author{W.H. Asquith}
\seealso{\code{\link{lmomtri}},
\code{\link{cdftri}}, \code{\link{pdftri}}, \code{\link{quatri}}
}
\examples{
lmr <- lmomtri(vec2par(c(10,90,100), type="tri"))
partri(lmr)
partri(lmomtri(vec2par(c(-11, 67,67), type="tri")))$para
partri(lmomtri(vec2par(c(-11,-11,67), type="tri")))$para
}
\keyword{distribution (parameters)}
\keyword{Distribution: Asymmetric Triangular}
\keyword{Distribution: Triangular}
|
abc5b3a233d4b4e5d5b2446c9aaa1877480c4253
|
50eda1d64a7a0dc2c12ecaf1149868e91498fd64
|
/R/AllGenerics.R
|
35c94f66933d2e45adbcb61792580af1fafac747
|
[
"MIT"
] |
permissive
|
hliu2016/bcbioRNASeq
|
19af39216ed640b77a44866429650c7eff9e9b77
|
ebca75ae614d4f757dd164d33760e35f44e3eff5
|
refs/heads/master
| 2021-05-06T10:20:07.691114
| 2017-12-11T23:57:36
| 2017-12-11T23:57:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,854
|
r
|
AllGenerics.R
|
#' S4 Generics
#'
#' @rdname AllGenerics
#' @name AllGenerics
#' @keywords internal
#'
#' @param object Object.
#' @param x Object.
#' @param i An integer or numeric scalar.
#' @param value Value to assign.
#' @param withDimnames A `logical`, indicating whether dimnames should be
#' applied to extracted assay elements.
#' @param ... *Additional arguments (for the S4 generic definition).*
#'
#' @return No value.
NULL
#' @rdname alphaSummary
#' @export
setGeneric("alphaSummary", function(object, ...) {
standardGeneric("alphaSummary")
})
#' @rdname meltLog10
#' @export
setGeneric("meltLog10", function(object, ...) {
standardGeneric("meltLog10")
})
#' @rdname plot53Bias
#' @export
setGeneric("plot53Bias", function(object, ...) {
standardGeneric("plot53Bias")
})
#' @rdname plotCorrelationHeatmap
#' @export
setGeneric("plotCorrelationHeatmap", function(object, ...) {
standardGeneric("plotCorrelationHeatmap")
})
#' @rdname plotCountDensity
#' @export
setGeneric("plotCountDensity", function(object, ...) {
standardGeneric("plotCountDensity")
})
#' @rdname plotCountsPerGene
#' @export
setGeneric("plotCountsPerGene", function(object, ...) {
standardGeneric("plotCountsPerGene")
})
#' @rdname plotDEGHeatmap
#' @export
setGeneric("plotDEGHeatmap", function(object, counts, ...) {
standardGeneric("plotDEGHeatmap")
})
#' @rdname plotExonicMappingRate
#' @export
setGeneric("plotExonicMappingRate", function(object, ...) {
standardGeneric("plotExonicMappingRate")
})
#' @rdname plotGenderMarkers
#' @export
setGeneric("plotGenderMarkers", function(object, ...) {
standardGeneric("plotGenderMarkers")
})
#' @rdname plotHeatmap
#' @export
setGeneric("plotHeatmap", function(object, ...) {
standardGeneric("plotHeatmap")
})
#' @rdname plotGeneSaturation
#' @export
setGeneric(
"plotGeneSaturation",
function(object, counts, ...) {
standardGeneric("plotGeneSaturation")
})
#' @rdname plotGenesDetected
#' @export
setGeneric("plotGenesDetected", function(object, counts, ...) {
standardGeneric("plotGenesDetected")
})
#' @rdname plotIntronicMappingRate
#' @export
setGeneric("plotIntronicMappingRate", function(object, ...) {
standardGeneric("plotIntronicMappingRate")
})
#' @rdname plotMappedReads
#' @export
setGeneric("plotMappedReads", function(object, ...) {
standardGeneric("plotMappedReads")
})
#' @rdname plotMappingRate
#' @export
setGeneric("plotMappingRate", function(object, ...) {
standardGeneric("plotMappingRate")
})
#' @rdname plotMeanSD
#' @export
setGeneric("plotMeanSD", function(object, ...) {
standardGeneric("plotMeanSD")
})
#' @rdname plotPCACovariates
#' @export
setGeneric("plotPCACovariates", function(object, ...) {
standardGeneric("plotPCACovariates")
})
#' @rdname plotRRNAMappingRate
#' @export
setGeneric("plotRRNAMappingRate", function(object, ...) {
standardGeneric("plotRRNAMappingRate")
})
#' @rdname plotTotalReads
#' @export
setGeneric("plotTotalReads", function(object, ...) {
standardGeneric("plotTotalReads")
})
#' @rdname plotVolcano
#' @export
setGeneric("plotVolcano", function(object, ...) {
standardGeneric("plotVolcano")
})
#' @rdname prepareRNASeqTemplate
#' @export
setGeneric("prepareRNASeqTemplate", function(object, ...) {
standardGeneric("prepareRNASeqTemplate")
})
#' @rdname resultsTables
#' @export
setGeneric("resultsTables", function(object, ...) {
standardGeneric("resultsTables")
})
#' @rdname tmm
#' @export
setGeneric("tmm", function(object) {
standardGeneric("tmm")
})
#' @rdname topTables
#' @inheritParams AllGenerics
#' @export
setGeneric("topTables", function(object, ...) {
standardGeneric("topTables")
})
#' @rdname tpm
#' @export
setGeneric("tpm", function(object) {
standardGeneric("tpm")
})
|
b77bdc9515e68ddb5a924a735026f3ba5ab7574d
|
5cf9bcc6478d95b3a8d15c398cea4f77c00c9307
|
/Loading data.R
|
611a717f2d3ba69c2e91a1803886cd41e0b79b7c
|
[] |
no_license
|
cianlarkin/Final-Project
|
e4f10b9439f8d0ce0ad83cb60a4b58568e9b61d8
|
75cc55a5dd6cc623fe0ddc75070defeb42114333
|
refs/heads/main
| 2023-04-20T15:59:16.419155
| 2021-05-16T13:33:49
| 2021-05-16T13:33:49
| 367,888,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,072
|
r
|
Loading data.R
|
library(tidyverse)
#LOADING DATA
PremierLeague2020 <- read.csv("english_premier_league_2020.csv", header = TRUE, sep = ",")
PremierLeague2020PlayersIn <- read.csv("Prem2020PlayersIn.csv", header = TRUE, sep = ",")
PremierLeague2019 <- read.csv("english_premier_league_2019.csv", header = TRUE, sep = ",")
PremierLeague2019PlayersIn <- read.csv("Prem2019PlayersIn.csv", header = TRUE, sep = ",")
PremierLeague2018 <- read.csv("english_premier_league_2018.csv", header = TRUE, sep = ",")
PremierLeague2018PlayersIn <- read.csv("Prem2018PlayersIn.csv", header = TRUE, sep = ",")
PremierLeague2017 <- read.csv("english_premier_league_2017.csv", header = TRUE, sep = ",")
PremierLeague2016 <- read.csv("english_premier_league_2016.csv", header = TRUE, sep = ",")
PremierLeague2015 <- read.csv("english_premier_league_2015.csv", header = TRUE, sep = ",")
Championship2020 <- read.csv("english_championship_2020.csv", header = TRUE, sep = ",")
Championship2020PlayersIn <- read.csv("Champ2020PlayersIn.csv", header = TRUE, sep = ",")
Championship2019 <- read.csv("english_championship_2019.csv", header = TRUE, sep = ",")
Championship2019PlayersIn <- read.csv("Champ2019PlayersIn.csv", header = TRUE, sep = ",")
Championship2018 <- read.csv("english_championship_2018.csv", header = TRUE, sep = ",")
Championship2018PlayersIn <- read.csv("Champ2018PlayersIn.csv", header = TRUE, sep = ",")
Championship2017 <- read.csv("english_championship_2017.csv", header = TRUE, sep = ",")
Championship2016 <- read.csv("english_championship_2016.csv", header = TRUE, sep = ",")
Championship2015 <- read.csv("english_championship_2015.csv", header = TRUE, sep = ",")
#EXPORTING DATASETS SO THEY CAN BE SORTED BY PLAYERS IN
write.csv(PremierLeague2020, "Prem2020PlayersIn.csv")
write.csv(PremierLeague2019, "Prem2019PlayersIn.csv")
write.csv(PremierLeague2018, "Prem2018PlayersIn.csv")
write.csv(Championship2020, "Champ2020PlayersIn.csv")
write.csv(Championship2019, "Champ2019PlayersIn.csv")
write.csv(Championship2018, "Champ2018PlayersIn.csv")
|
06d86120b3953e7a94d3486ddeec84e8cce7774b
|
d62b2953237ecf2d42e15582a8eec47443fd8ff1
|
/R/calculate_coverage.R
|
c583d68912bcd5a2ca77d5b704dc522ca6c517b2
|
[] |
no_license
|
MHi-C/MHiC
|
2b9c440c5bd3792c4db1cfba9fb7d7401efee472
|
06a1e7176b106e33570c59b17c113d4f118bff5f
|
refs/heads/master
| 2020-06-25T18:59:54.351246
| 2019-10-01T02:51:25
| 2019-10-01T02:51:25
| 199,396,268
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,716
|
r
|
calculate_coverage.R
|
calculate_coverage<-function(interactions,flag = TRUE){
#
##########################################################
if(nrow(interactions)>1e8){
t <- ceiling(nrow(interactions)/1e8)
IList <- list()
IList[[1]] <- interactions[1:1e8,]
for(i in 2:t){
IList[[i]] <- interactions[(((i-1)*1e8)+1):min((i*1e8),nrow(interactions)),]
}
dtList <- lapply(IList, data.table)
covAs <- lapply(dtList, function(x) x[,sum(frequencies), by=int1])
covBs <- lapply(dtList, function(x) x[,sum(frequencies), by=int2])
covAm <- do.call(rbind, covAs)
covBm <- do.call(rbind, covBs)
covA <- covAm[,sum(V1),by=int1]
covB <- covBm[,sum(V1),by=int2]
}
##########################################################
else{
binned_interactions=data.table(interactions)
covA <- binned_interactions[,sum(frequencies),by=int1]
covB <- binned_interactions[,sum(frequencies),by=int2]
}
##########################################################
covA <- setkey(covA,key='int1')
setnames(covB, 1,'int1')
covB <- setkey(covB,key='int1')
cov=merge(covA,covB,all.x=TRUE,all.y=TRUE,by='int1')
cov$V1.x[is.na(cov$V1.x)]=0
cov$V1.y[is.na(cov$V1.y)]=0
cov$coverage=cov$V1.x+cov$V1.y
coverage=cov$coverage
names(coverage)=cov$int1
sumcov <- sum(coverage)
relative_coverage <- coverage/sumcov
names(relative_coverage)=names(coverage)
interactions$coverage_source <- relative_coverage[interactions$int1]
interactions$coverage_target <- relative_coverage[interactions$int2]
##########################################################
if(flag){return(interactions)}
else{return(relative_coverage)}
}
|
51da781e0f748ac21349807fb5f6fa737825d50e
|
f3d1e8504b45792eb5efea527e253dccd338c2fe
|
/crossfit_prep/experimentation.R
|
908217190b059ca3b579d45088057bb82a15e640
|
[] |
no_license
|
vbernardes/crossfit-2015-eda
|
eb0e604cba2e4ed455df9560fad72cd005eff07c
|
a9aceeafd2739253189a6108a6614f4c6b763d39
|
refs/heads/master
| 2020-05-25T12:50:24.691173
| 2019-05-21T15:49:52
| 2019-05-23T19:37:26
| 187,806,820
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,017
|
r
|
experimentation.R
|
library(data.world)
library(tidyverse)
library(GGally)
athletes <- read.csv('bgadoci-crossfit-data/data/athletes.csv')
leaderboard <- read.csv('bgadoci-crossfit-data/data/leaderboard_15.csv')
#Using an inner join, match on athlete ids that are in both dataframes
cf <- leaderboard %>%
inner_join(athletes, by="athlete_id")
##########################################
filter(cf, stage == 1 & rank == 1)
str(cf)
###########
# Simple plots to get started
# Age histogram
ggplot(data = cf,
aes(x = age)) +
geom_histogram(binwidth = 1) +
coord_cartesian(xlim = c(15, 54)) +
scale_x_continuous(breaks = seq(15, 54, 5))
# Male height histogram
ggplot(data = subset(cf, gender == 'Male'),
aes(x = height)) +
geom_histogram(binwidth = 1) +
scale_x_continuous(limits = c(55, quantile(cf$height, .99, na.rm = T)),
breaks = seq(55, 100, 5))
# Male weight histogram
ggplot(data = subset(cf, gender == 'Male'),
aes(x = weight)) +
geom_histogram(binwidth = 5) +
scale_x_continuous(limits = c(100, 300))
# Male Rx score vs. height
ggplot(data = subset(cf, gender == 'Male' & scaled == 'false'),
aes(x = height, y = score)) +
geom_jitter(alpha = 1/25) +
scale_x_continuous(limits = c(55, quantile(cf$height, .99, na.rm = T)))
# Score vs. rank (should be highly correlated)
ggplot(data = cf[cf$stage == 3,],
aes(x = score, y = rank)) +
geom_jitter(aes(color = gender))
# what the hell is up with the above plot?
# It's not scale vs. Rx, I've checked it:
# Score vs. rank 2
ggplot(data = cf[cf$stage == 4 & cf$scaled == 'false',],
aes(x = score, y = rank)) +
geom_point(aes(color = gender))
# zooming in:
ggplot(data = cf[cf$stage == 4 & cf$scaled == 'false',],
aes(x = score, y = rank)) +
geom_jitter(aes(color = gender)) +
scale_y_continuous(limits = c(70000, 90000))
# Maybe it's age? Let's see:
ggplot(data = male.open15.4,
aes(x = score, y = rank)) +
geom_point(aes(color = age))
# Damn. It's not age either.
# Let's try another approach at scaled vs. Rx
ggplot(data = male.open15.4,
aes(x = score, y = rank)) +
geom_point(aes(color = scaled))
# GODAMMIT! I had tested it before!
# WHY?!:
ggplot(data = subset(cf, stage == 4 & scaled == 'false' & gender == 'Male'),
aes(x = score, y = rank)) +
geom_point(aes(color = gender))
cf <- cf
cf$rx <- cf$scaled == 'false'
ggplot(data = subset(cf, stage == 4 & gender == 'Male' & rx),
aes(x = score, y = rank)) +
geom_jitter(aes(color = rx), height = 5000, alpha = 1/10) +
scale_x_continuous(limits = c(90, 110))
########################################
latin_america <- subset(cf, region == 'Latin America')
length(unique(latin_america$athlete_id))
#######################################
# ggpairs:
#pairs <- ggpairs(cf,
# columns = c('stage', 'rank', 'score', 'scaled', 'gender', 'age', 'height', 'weight'))
#######################################
male.open.15.4 <- subset(cf, division == 'Male' & stage == '4')
# Score vs. age:
ggplot(male.open.15.4[male.open.15.4$category == 'Rx',],
aes(x = age, y = score)) +
geom_jitter(alpha = 1/10)
# Rank vs. age (first 10000):
ggplot(male.open.15.4[male.open.15.4$category == 'Rx' & male.open.15.4$rank <= 10000,],
aes(x = age, y = rank)) +
geom_jitter()
# Score vs. age and let's add affiliate (top 100):
ggplot(male.open.15.4[male.open.15.4$category == 'Rx' & male.open.15.4$rank <= 100,],
aes(x = age, y = score)) +
geom_jitter(aes(color = affiliate))
# Score vs. age vs. region (top 1000):
ggplot(male.open.15.4[male.open.15.4$category == 'Rx' & male.open.15.4$rank <= 1000,],
aes(x = age, y = score)) +
geom_jitter(aes(color = region)) +
scale_color_brewer() +
theme_dark()
########################################
open.15.4.rx <- subset(cf, stage == '4' & category == 'Rx')
# Score vs. age vs. division:
ggplot(open.15.4.rx,
aes(x = age, y = score)) +
geom_jitter(aes(color = division), alpha = 1/50) +
scale_y_continuous(limits = c(0, 170))
# other approach:
# Score vs. age vs. division:
ggplot(open.15.4.rx,
aes(x = age, y = score)) +
facet_wrap(~division) +
geom_jitter(alpha = 1/20, aes(color = age_bucket)) +
scale_y_continuous(limits = c(0, 170))
### Let's do age buckets:
cf$age_bucket <- cut(cf$age, c(seq(15, 55, 5)))
########################################
# Let's take a look at some body measurements
# Score vs. height (exclude outliers)
ggplot(subset(open.15.4.rx, division == 'Male'),
aes(x = height, y = score)) +
geom_jitter(alpha = 1/10) +
geom_smooth() +
scale_x_continuous(limits = c(quantile(open.15.4.rx$height, .05, na.rm = T),
quantile(open.15.4.rx$height, .98, na.rm = T)))
########################################
# Let's try to see the light at the end of the tunnel with benchmarks
male.open.15.5.rx <- subset(cf,
stage == '5' & division == 'Male' & category == 'Rx')
# Score vs. Fran
ggplot(male.open.15.5.rx,
aes(x = fran, y = score)) +
geom_jitter(alpha = .8, aes(color = age_bucket)) +
scale_color_brewer() +
scale_x_continuous(limits = c(quantile(male.open.15.5.rx$fran, .02, na.rm = T),
quantile(male.open.15.5.rx$fran, .98, na.rm = T))) +
scale_y_continuous(limits = c(0, 1500))
cor.test(male.open.15.5.rx$fran, male.open.15.5.rx$score)
###
male.open.15.1.rx <- subset(cf,
stage == '1' & division == 'Male' & category == 'Rx')
# Score vs. snatch and deadlift
ggplot(male.open.15.1.rx,
aes(x = deadlift, y = score)) +
geom_jitter(alpha = 1/15) +
scale_x_continuous(limits = c(quantile(male.open.15.1.rx$deadlift, .02, na.rm = T),
quantile(male.open.15.1.rx$deadlift, .98, na.rm = T)))
ggplot(male.open.15.1.rx,
aes(x = snatch, y = score)) +
geom_jitter(alpha = 1/15) +
scale_x_continuous(limits = c(quantile(male.open.15.1.rx$snatch, .02, na.rm = T),
quantile(male.open.15.1.rx$snatch, .98, na.rm = T)))
cor.test(male.open.15.1.rx$snatch, male.open.15.1.rx$score)
### Let's subset only people that have a snatch benchmark
male.open.15.1a.rx <- subset(cf,
stage == '1.1' &
division == 'Male' &
category == 'Rx')
ggplot(male.open.15.1a.rx,
aes(x = snatch, y = score)) +
geom_jitter(alpha = 1/10) +
scale_x_continuous(limits = c(quantile(male.open.15.1a.rx$snatch, .01, na.rm = T),
quantile(male.open.15.1a.rx$snatch, .99, na.rm = T)))
cor.test(male.open.15.1a.rx$snatch, male.open.15.1a.rx$score)
ggplot(male.open.15.1a.rx,
aes(x = deadlift, y = score)) +
geom_jitter(alpha = 1/15) +
scale_x_continuous(limits = c(quantile(male.open.15.1a.rx$deadlift, .01, na.rm = T),
quantile(male.open.15.1a.rx$deadlift, .99, na.rm = T)))
ggplot(male.open.15.1a.rx,
aes(x = grace, y = score)) +
geom_jitter(alpha = 1/20) +
scale_x_continuous(limits = c(quantile(male.open.15.1a.rx$grace, .01, na.rm = T),
quantile(male.open.15.1a.rx$grace, .99, na.rm = T)))
cor.test(male.open.15.1a.rx$grace, male.open.15.1a.rx$score)
### Top 10000 for 15.1a:
grace.15.1a <- subset(male.open.15.1a.rx,
!is.na(grace) & rank <= 10000)
ggplot(grace.15.1a,
aes(x = grace, y = score)) +
geom_jitter(aes(color = age_bucket)) +
scale_color_brewer() +
scale_x_continuous(limits = c(quantile(grace.15.1a$grace, .01, na.rm = T),
quantile(grace.15.1a$grace, .99, na.rm = T)))
#############
# 15.2:
male.open.15.2.rx <- subset(cf,
stage == '2' &
division == 'Male' &
category == 'Rx' &
!is.na(pullups))
ggplot(male.open.15.2.rx,
aes(x = pullups, y = score)) +
geom_jitter(alpha = 1/10) +
scale_x_continuous(limits = c(quantile(male.open.15.2.rx$pullups, .01, na.rm = T),
quantile(male.open.15.2.rx$pullups, .99, na.rm = T)))
##############
# General pullups vs. score on 15.2
ggplot(subset(cf, !is.na(pullups) & stage == '2'),
aes(x = pullups, y = score)) +
geom_jitter(alpha = 1/10, aes(color = division)) +
scale_x_continuous(limits = c(quantile(cf$pullups, .01, na.rm = T),
quantile(cf$pullups, .99, na.rm = T)))
############################
# Try to get overall rank
overallrank_male <- cf[cf$category == 'Rx' & cf$division == 'Male', ] %>%
group_by(athlete_id) %>%
summarise(name = first(name),
point_total = sum(rank))
overallrank_male$overall_rank <- rank(overallrank_male$point_total)
overallrank_male$overall_rank <- as.integer(overallrank_male$overall_rank)
overallrank_female <- cf[cf$category == 'Rx' & cf$division == 'Female', ] %>%
group_by(athlete_id) %>%
summarise(name = first(name),
point_total = sum(rank))
overallrank_female$overall_rank <- rank(overallrank_female$point_total)
overallrank_female$overall_rank <- as.integer(overallrank_female$overall_rank)
# Join rank on main table
cf_rank <- cf %>%
left_join(overallrank_male[, c('athlete_id', 'overall_rank')], by='athlete_id')
cf_rank <- cf_rank %>%
left_join(overallrank_female[, c('athlete_id', 'overall_rank')], by='athlete_id')
cf_rank$overall_rank.x <- ifelse(!is.na(cf_rank$overall_rank.y),
cf_rank$overall_rank.y,
cf_rank$overall_rank.x)
cf_rank$overall_rank.y <- NULL
names(cf_rank)[names(cf_rank) == 'overall_rank.x'] <- 'overall_rank'
###################################
unique_athletes <- cf[!duplicated(cf$athlete_id),]
# let's try to get something vs. overall rank
ggplot(subset(cf, category == 'Rx' & stage == 1),
aes(x = score, y = overall_rank)) +
geom_jitter(alpha = 1/20)
###################################
ggplot(male.open.15.2.rx,
aes(x = age, y = score)) +
geom_jitter(alpha = 1/10, color = 'orange') +
geom_line(stat = 'summary', fun.y = mean, color = 'blue')
###################################
ggplot(male.open.15.5.rx,
aes(x = 5*round(weight/5), y = score)) +
#scale_color_gradient(low = "red", high = "blue") +
scale_color_brewer() +
scale_x_continuous(limits = c(quantile(unique_athletes$weight, .01, na.rm = T),
quantile(unique_athletes$weight, .99, na.rm = T))) +
scale_y_continuous(limits = c(250, 1500)) +
geom_jitter(alpha = 1/3, aes(color = age_bucket)) +
geom_line(stat = 'summary', fun.y = mean, color = 'blue')
####### Elite athletes
elite <- subset(cf, rank <= 100 & category == 'Rx')
ggplot(subset(elite, stage == 2),
aes(x = fran, y = score)) +
geom_jitter(aes(color = division)) +
scale_x_continuous(limits = c(100, 300))
######## Top teams
# ???
###### How long
# Freq. table of how_long
sort(table(athletes$howlong), decreasing = T)
ggplot(athletes[athletes$howlong %in% valid_how_long,], aes(x = howlong)) +
geom_bar() +
coord_flip()
valid_how_long <- c('1-2 years|', '2-4 years|', '6-12 months|', 'Less than 6 months|', '4+ years|')
unique_ath_temp <- unique_athletes %>%
left_join(athletes[, c('athlete_id', 'howlong')], by = 'athlete_id')
ggplot(subset(unique_ath_temp, howlong %in% valid_how_long),
aes(x = howlong, y = overall_rank)) +
geom_jitter(alpha = 1/25)
cf_howlong <- cf %>%
left_join(athletes[, c('athlete_id', 'howlong')], by = 'athlete_id')
cf_howlong[!(cf_howlong$howlong %in% valid_how_long), ]$howlong <- NA
cf_howlong$howlong <- factor(cf_howlong$howlong)
levels(cf_howlong$howlong) <- ordered(c('Less than 6 months|', '6-12 months|', '1-2 years|', '2-4 years|', '4+ years|'))
ggplot(subset(cf_howlong,
category == 'Rx' &
stage == 3 &
division == 'Female' &
!is.na(howlong)) %>%
group_by(filthy50) %>%
top_n(-10, score),
aes(x = filthy50, y = score)) +
geom_point(alpha = 1, aes(size = weight, color = age)) +
#scale_y_continuous(limits = c(0, 55000)) +
#scale_color_brewer(palette = 'Reds') +
facet_wrap(~stage, scales = 'free') +
scale_x_continuous(limits = c(50, 1000)) +
scale_color_gradient(low = 'red', high = 'blue')
top10.15.3.byage <- subset(open.15.3, division == 'Female') %>%
group_by(age) %>%
top_n(10, score)
####################################
# Rx vs scaled
ggplot(cf,
aes(x = score)) +
geom_histogram(binwidth = 5,
aes(fill = category)) +
facet_wrap(c('stage', 'division'),
scales = 'free')
####################################
# 15.3: Rx vs scaled
ggplot(subset(cf, stage == 3),
aes(x = score)) +
geom_histogram(binwidth = 10, aes(fill = category)) +
facet_wrap(~division, scales = 'free')
###################################
# Strength vs. results on workouts
cf_strength <- as.data.frame(cf[cf$stage == 1.1, c('score', 'athlete_id')])
names(cf_strength)[ names(cf_strength) == 'score'] <- 'score1.1'
cf_str <- cf
cf_str <- left_join(cf_str, cf_strength, by = 'athlete_id')
ggplot(subset(cf_str, category == 'Rx'),
aes(x = score1.1, y = score)) +
geom_jitter(alpha = 1/250, aes(color = division)) +
geom_smooth() +
facet_wrap(c('stage', 'division'), scales = 'free') +
scale_x_continuous(limits = c(quantile(cf_str$score1.1, .05, na.rm = T),
quantile(cf_str$score1.1, .98, na.rm = T)))
# General correlation between strength and all workouts
cor.test(cf_str$score1.1, cf_str$score)
str.male.open.15.5.rx <- subset(cf_str, division == 'Male' &
stage == 5 &
category == 'Rx')
cor.test(str.male.open.15.5.rx$score1.1, str.male.open.15.5.rx$score)
cor.test(str.male.open.15.5.rx$weight, str.male.open.15.5.rx$score)
cor.test(str.male.open.15.5.rx$height, str.male.open.15.5.rx$score)
##### Overall Rank by Scores
ggplot(subset(cf_rank, category == 'Rx'),
aes(x = score, y = overall_rank)) +
geom_jitter(alpha = 1/150, aes(color = division)) +
geom_smooth() +
facet_wrap(c('division', 'stage'), scales = 'free')
##### Overall Rank by Strength
cf_rank_str <- subset(cf_rank, stage == 1.1 & category == 'Rx' & overall_rank < 75000)
ggplot(cf_rank_str,
aes(x = score, y = overall_rank)) +
geom_jitter(alpha = 1/150, aes(color = division)) +
geom_smooth() +
facet_wrap(~division)
male_cf_rank_str <- subset(cf_rank_str, division == 'Male')
cor.test(male_cf_rank_str$score, male_cf_rank_str$overall_rank)
####### How long
ggplot(subset(unique_athletes, !is.na(howlong)),
aes(x=howlong)) +
geom_bar()
######## 15.1a -> weight
ggplot(data = open.15.1a.rx,
aes(x = 5*round(weight/5), y = score)) +
geom_line(stat = 'summary',
fun.y = mean,
aes(color = division)) +
scale_color_brewer(palette = 'Set1') +
scale_x_continuous(limits = c(quantile(unique_athletes$weight, .01, na.rm = T),
quantile(unique_athletes$weight, .99, na.rm = T)))
cor.test(open.15.1a.rx$weight, open.15.1a.rx$score)
########## Age vs. overall rank
ggplot(subset(cf_rank, category == 'Rx'),
aes(x = age, y = overall_rank)) +
geom_jitter(alpha = 1/250, aes(color = division)) +
facet_wrap(~division) +
geom_smooth() +
scale_x_continuous(limits = c(quantile(unique_athletes$age, .03, na.rm = T),
quantile(unique_athletes$age, .97, na.rm = T)))
######## 15.5 Top 20
open.15.5.rx <- subset(cf, stage == '5' & category == 'Rx')
ggplot(subset(cf, category == 'Rx' & rank <= 100),
aes(x = age, y = score)) +
geom_jitter(aes(size = weight, color = division)) +
facet_wrap(~stage, scales = 'free')
######### Weight vs. deadlift
ggplot(unique_athletes,
aes(x = weight, y = overall_rank)) +
geom_point(alpha = 1/2, aes(color = howlong)) +
scale_color_brewer() +
scale_x_continuous(limits = c(0, 400)) +
scale_y_continuous(limits = c(0, 700)) +
geom_smooth(data = subset(unique_athletes, category == 'Rx'),
color = 'red') +
geom_smooth(data = subset(unique_athletes, category == 'Scaled'),
color = 'blue') +
facet_wrap(~division, scales = 'free')
cor.test(unique_athletes[unique_athletes$division == 'Male',]$weight,
unique_athletes[unique_athletes$division == 'Male',]$deadlift)
######### 15.4: weight vs height vs score
ggplot(subset(open.15.4.rx,
division == 'Male' &
weight >= 100 &
weight <= 400),
aes(x = height, y = score)) +
geom_jitter(alpha = 1/20, aes(color = weight)) +
scale_color_gradient(low = 'red', high = 'green') +
geom_smooth() +
scale_x_continuous(limits = c(61, 80))
#### Athlete weight vs height
ggplot(subset(unique_athletes,
category == 'Rx' &
height >= 60 &
height <= 80),
aes(x = weight, y = height)) +
geom_jitter() +
geom_smooth() +
facet_wrap(~division, scales = 'free') +
scale_x_continuous(limits = c(quantile(unique_athletes$weight, .01, na.rm = T),
quantile(unique_athletes$weight, .99, na.rm = T)))
ggplot(subset(unique_athletes,
category == 'Rx' &
height >= 60 &
height <= 80),
aes(x = weight, y = height)) +
geom_boxplot() +
facet_wrap(~division, scales = 'free') +
scale_x_continuous(limits = c(quantile(unique_athletes$weight, .01, na.rm = T),
quantile(unique_athletes$weight, .99, na.rm = T)))
########
ggplot(open.15.4.rx,
aes(y = height, x = 10*round(score/10))) +
geom_line(stat = 'summary',
fun.y = mean,
aes(color = division)) +
scale_color_brewer(palette = 'Set1') +
scale_y_continuous(limits = c(62, 82))
|
6735295ef6815bf92364d60bf628ed21c1971630
|
ef6681b93278cbba78b89d5679c88517cdf70305
|
/init_script.R
|
a71f0a149916b303e7d9faa78f08f6529cd9b18b
|
[] |
no_license
|
lehmkudc/mtg-database
|
966f48f28023b828a738c81b065356ea934e59e3
|
43cb3a51df48cac8f2ba7e55ad1fd55ec1ad5ea2
|
refs/heads/master
| 2020-03-09T06:39:04.878364
| 2018-09-01T19:08:31
| 2018-09-01T19:08:31
| 128,599,306
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 799
|
r
|
init_script.R
|
rm( list = ls() )
options(stringsAsFactors = FALSE)
library(magrittr)
library(shiny)
library(RMySQL)
library(rhandsontable)
library(rlist)
library(rjson)
empty <- data.frame( QTY=as.integer(0), CardName=rep('',20),
SetName = rep('',20), CNumber=rep('',20),
Notes = rep('',20), Mult=rep(1,20),
Price = rep(0,20), Fresh=rep('2010-01-01',20) )
name_source <- readLines('mtg-database/data_prep/card_names.txt' )
set_source <- read.csv( 'mtg-database/data_prep/set_names.csv' )
set_source <- set_source$SetName
binders <- list.load( 'mtg-database/binders.rdata')
source( 'mtg-database/transactions.R' )
source( 'C:/Users/Dustin/Desktop/config.R')
source( 'mtg-database/dashboard_app.R')
kill_connections()
DF <- empty
dashboard()
|
5d85693bdfd2890e1aede80d9c118d087d4c5037
|
efe3bdc6afd1f111ece86b830fc92cc4bec8910e
|
/man/age_df_countries.Rd
|
218631be170a17d080dcd29d010e6d4bd8b7c6fc
|
[
"MIT"
] |
permissive
|
Bisaloo/contactdata
|
cd2b35db31ae2d32b52721bc13e01cc80a5e4484
|
444ba7569703863092ed488f8f4b572a6453c5e6
|
refs/heads/main
| 2023-04-11T15:34:34.543076
| 2023-03-22T11:19:38
| 2023-03-22T11:19:48
| 293,047,665
| 6
| 2
|
NOASSERTION
| 2023-09-05T11:32:09
| 2020-09-05T09:52:13
|
R
|
UTF-8
|
R
| false
| true
| 866
|
rd
|
age_df_countries.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/age_countries.R
\name{age_df_countries}
\alias{age_df_countries}
\title{Get a data.frame (in long format) of population by age for multiple countries}
\usage{
age_df_countries(countries)
}
\arguments{
\item{countries}{A character string or a vector of character containing the
names of the countries for which to return contact data}
}
\value{
A data.frame (in long format) with 3 columns:
\itemize{
\item \code{country}: the country name
\item \code{age}: the age group
\item \code{population}: the number of people in this age group
}
}
\description{
Get a data.frame (in long format) of population by age for multiple countries
}
\examples{
age_df_countries(c("Austria", "Belgium"))
}
\references{
\url{https://www.census.gov/programs-surveys/international-programs/about/idb.html}
}
|
b7449fda2f83bbeb3dcbcf58a362104ec8577865
|
49e2a0b28e9398f788ad8e481fbbe4c223e6a196
|
/projektMOW/man/winequality-white.csv.Rd
|
7ec14cdf72c4c367d4d1afb432c308b20ec4d222
|
[] |
no_license
|
eatrunner/MOW
|
850e5d7d1e2f0f0c031c7a764eebf0fd6c870ef7
|
d80b306bf9d6d24b26c81c93a2fa9bb34cf77f97
|
refs/heads/master
| 2021-09-04T22:18:50.641509
| 2018-01-22T16:47:44
| 2018-01-22T16:47:44
| 112,388,294
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,253
|
rd
|
winequality-white.csv.Rd
|
\name{winequality-white.csv}
\alias{winequality-white.csv}
\docType{data}
\title{
White wine dataset.
}
\description{
Dataset was created, using white wine samples.
The inputs include objective tests (e.g. PH values)
and the output is based on sensory data
(median of at least 3 evaluations made by wine experts).
}
\usage{data("winequality-white.csv")}
\format{
A data frame with 4898 observations on the following 12 variables.
\describe{
\item{\code{fixedAcidity}}{fixed acidity}
\item{\code{volatileAcidity}}{volatile acidity}
\item{\code{citricAcid}}{citric acid}
\item{\code{residualSugar}}{residual sugar}
\item{\code{chlorides}}{chlorides}
\item{\code{freeSulfurDioxide}}{free sulfur dioxide}
\item{\code{totalSulfurDioxide}}{total sulfur dioxide}
\item{\code{density}}{density}
\item{\code{pH}}{pH}
\item{\code{sulphates}}{sulphates}
\item{\code{alcohol}}{alcohol}
\item{\code{quality}}{quality (score between 0 and 10)}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
}
\keyword{datasets}
|
417bf497f97e170476bd036ac294c4d527da6af9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Mqrcm/examples/predict.iMqr.Rd.R
|
3190ddc5632051f3d292a1daf9b3253e5c2de629
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,317
|
r
|
predict.iMqr.Rd.R
|
library(Mqrcm)
### Name: predict.iMqr
### Title: Prediction After M-Quantile Regression Coefficients Modeling
### Aliases: predict.iMqr
### Keywords: methods
### ** Examples
# using simulated data
n <- 250
x <- runif(n)
y <- rlogis(n, 1 + x, 1 + x)
# true quantile function: Q(p | x) = beta0(p) + beta1(p)*x, with
# beta0(p) = beta1(p) = 1 + log(p/(1 - p))
model <- iMqr(y ~ x, formula.p = ~ I(log(p)) + I(log(1 - p)))
# (fit asymmetric logistic distribution)
# predict beta(0.25), beta(0.5), beta(0.75)
predict(model, type = "beta", p = c(0.25,0.5, 0.75))
# predict the CDF and the PDF at new values of x and y
predict(model, type = "CDF", newdata = data.frame(x = c(.1,.2,.3), y = c(1,2,3)))
# computes the quantile function at new x, for p = (0.25,0.5,0.75)
predict(model, type = "QF", p = c(0.25,0.5,0.75), newdata = data.frame(x = c(.1,.2,.3)))
# simulate data from the fitted model
ysim <- predict(model, type = "sim") # 'newdata' can be supplied
# NOTE: data are generated using the fitted M-quantile function as if
# it was a quantile function. This means that the simulated data will
# have quantiles (and not M-quantiles) described by the fitted model.
# There is no easy way to generate data with a desired M-quantile function.
|
13624921f5b9ba81a60e428fa8883022ff97117b
|
9d1166d3ef82256211f215979fa60bef1b4b511f
|
/R/server.R
|
2c205d36bcd6f0ef23f7eed2521ef94ebdfcdb09
|
[] |
no_license
|
antonioricardojr/vereador
|
cfccbcf467ab72bae09b41440913a422727c447c
|
e10fb41b24e9f0c32da9958a6044936903bce250
|
refs/heads/master
| 2020-09-14T07:25:53.182817
| 2016-09-12T22:17:51
| 2016-09-12T22:17:51
| 66,152,429
| 1
| 0
| null | 2016-08-28T22:12:15
| 2016-08-20T15:00:13
|
HTML
|
UTF-8
|
R
| false
| false
| 5,227
|
r
|
server.R
|
library(dplyr, warn.conflicts = F)
library(stringi, warn.conflicts = F)
library(RPostgreSQL)
library(lubridate, warn.conflicts = F)
library(purrr, warn.conflicts = F)
library(futile.logger)
source("data_access.R")
source("vereadores_logic.R")
camara_db = start_camara_db()
#* @get /ementas/contagem
get_theme_count = function(count_by = "tema", apenas_legislacao = FALSE){
#' Conta as ementas por mês.
#' TODO: retornamos apenas a partir de 2013.
traducao = list("tema" = "main_theme",
"situacao" = "situation",
"tipo" = "tipo_ato",
"tipo_detalhado" = "ementa_type")
count_by = traducao[[count_by]]
if (is.null(count_by)) {
stop("count_by não suportado")
}
apenas_legislacao = as.logical(apenas_legislacao)
if (is.na(apenas_legislacao)) {
stop("valor não suportado para apenas_legislacao")
}
t1 = proc.time()
answer = get_sumario_no_tempo(camara_db, count_by, apenas_legislacao = apenas_legislacao)
flog.info(sprintf("GET contagem demorou %gs", (proc.time() - t1)[[3]]))
names(answer)[2] = "count_by"
return(answer)
}
#* @get /ementas/radial
get_weekly_radialinfo = function(){
#' Retorno dia, min, media, máx, aprovados
ementas = get_ementas_all(camara_db) %>%
mutate(weekly = floor(yday(published_date) / 7)) %>%
sumariza_no_tempo("situation", "weekly")
answer = ementas %>%
group_by(time) %>%
summarise(
min = min(count),
max = max(count),
media = mean(count)
)
aprovados = ementas %>%
filter(situation == "APROVADO") %>%
group_by(time) %>%
summarise(aprovados = sum(count))
return(left_join(answer, aprovados))
}
#* @get /vereadores
get_vereador = function(id = NA, ano_eleicao = 2012){
id = as.numeric(id)
ano_eleicao = as.numeric(ano_eleicao)
vereador = get_vereadores(camara_db, id, ano_eleicao)
return(vereador)
}
#* @get /vereadores/ementas
get_vereador_ementas = function(id_candidato = NA, ano_eleicao = 2012){
checa_id(id_candidato)
ano_eleicao = as.numeric(ano_eleicao)
ementas_vereador = get_ementas_por_vereador(camara_db, id_candidato = id_candidato, ano_eleicao)
if (NROW(ementas_vereador) != 0) {
ementas_vereador = ementas_vereador %>%
select(
sequencial_candidato,
nome_urna_candidato,
document_number,
process_number,
ementa_type,
published_date,
approval_date,
title,
source,
proponents,
situation,
main_theme,
tipo_ato
)
}
return(ementas_vereador)
}
#* @get /vereadores/ementas/sumario
get_sumario_vereador = function(id_candidato = NA, ano_eleicao = 2012, apenas_legislacao = FALSE){
ano_eleicao = as.numeric(ano_eleicao)
if (is.na(ano_eleicao)) {
stop("informe o ano em que o vereador foi eleito")
}
t1 = proc.time()
ementas_vereador = get_ementas_por_vereador(camara_db, id_candidato, ano_eleicao, apenas_legislacao)
if (NROW(ementas_vereador) == 0)
return(data.frame())
flog.info(sprintf("GET /vereadores/ementas/sumario demorou %gs", (proc.time() - t1)[[3]]))
return(
list(
"situation" = sumario2json_format(ementas_vereador, "situation"),
"tipo" = sumario2json_format(ementas_vereador, "tipo_ato"),
"tema" = sumario2json_format(ementas_vereador, "main_theme")
)
)
}
#* @get /relevancia/ementas
get_relevacia_propostas = function(ano = 2012){
relevancia_propostas = get_relevancia_ementas(camara_db, ano)
return(relevancia_propostas)
}
#* @get /relevancia/vereadores
get_relevacia_vereadores = function(ano_eleicao = 2012){
relevancia_vereadores = get_relevancia_vereadores(camara_db, ano_eleicao)
return(relevancia_vereadores)
}
sumario2json_format = function(ementas, campo) {
df = ementas %>%
count_(c("sequencial_candidato", campo))
nomes = ementas %>%
select(sequencial_candidato, nome_urna_candidato) %>%
unique()
x1 = unique(unlist(ementas[, "sequencial_candidato"]))
x2 = unique(unlist(ementas[, campo]))
df =
left_join(
expand.grid(x1, x2,
stringsAsFactors = F),
df,
by = c("Var1" = "sequencial_candidato", "Var2" = campo)
) %>%
mutate(n = ifelse(is.na(n), 0, n))
names(df) = c("sequencial_candidato", "count_by", "n")
df = df %>%
left_join(nomes, by = "sequencial_candidato")
projson = df %>%
split(.$sequencial_candidato) %>%
map(~list("values" = .[,c("count_by", "n")],
"total" = sum(.$n),
"nome" = .$nome_urna_candidato[1],
"id" = .$sequencial_candidato[1]))
names(projson) = NULL
return(projson)
}
checa_id = function(id_candidato) {
if (is.na(id_candidato) | id_candidato == '') {
stop("é necessário informar o id sequencial do candidato segundo o TSE")
}
}
|
77611d5e777a3efc840a1017bf70048320f7dce0
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/mnis/tests/testthat/test_extra.R
|
3933cce0bb75b5b053c5ae5232a8323c4c3e518a
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 286
|
r
|
test_extra.R
|
library(mnis)
context("mnis_extra")
test_that("mnis_extra returns expected format", {
skip_on_cran()
xmnise <- mnis_extra(4019)
expect_length(xmnise, 188)
expect_type(xmnise, "list")
expect_true(nrow(xmnise)==1)
expect_true(tibble::is_tibble(xmnise))
})
|
4bba16aa9127ac229b367e2c498000241917beb2
|
304b2c50ecbc07dfb68521690381ea908dda638f
|
/master.R
|
c5a1eaddbe01e1bbbebf83e26b005544623dd184
|
[] |
no_license
|
haachicanoy/climate-risk-profiles
|
73af330cc3dcc7bf1111fbc262db56d8ee30dd67
|
4e8d883c06c6cd6177383c883dac0c9c97e17df4
|
refs/heads/master
| 2023-03-09T21:49:28.791901
| 2021-02-24T18:45:40
| 2021-02-24T18:45:40
| 299,702,287
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,876
|
r
|
master.R
|
# -------------------------------------------------- #
# Climate Risk Profiles -- Master code
# A. Esquivel, H. Achicanoy, and J. Ramirez-Villegas
# Alliance Bioversity-CIAT, 2021
# -------------------------------------------------- #
# R options and load packages
options(warn = -1, scipen = 999)
suppressMessages(if(!require(pacman)){install.packages('pacman'); library(pacman)})
suppressMessages(pacman::p_load(tidyverse, raster,
terra, sp, compiler))
root <- 'D:/OneDrive - CGIAR/PARM toolkit/notebook'
# Establish working directory
setwd(root)
# Import functions
if(!dir.exists('./scripts')){
# Download updated repository
download.file(url = 'https://github.com/haachicanoy/climate-risk-profiles/archive/master.zip', destfile = "./crp.zip")
# Unzip the .zip file
unzip(zipfile = "crp.zip")
dir.create('./scripts', recursive = T)
file.copy2 <- Vectorize(FUN = file.copy, vectorize.args = c('from','to'))
file.copy2(from = list.files(path = './climate-risk-profiles-master/', pattern = '*.R$', full.names = T),
to = paste0('./scripts/',list.files(path = './climate-risk-profiles-master/', pattern = '*.R$', full.names = F)))
unlink('./climate-risk-profiles-master', recursive = T)
file.remove('./crp.zip')
source('./scripts/00_functions.R')
source('./scripts/_get_soil_data.R')
} else {
source('./scripts/00_functions.R')
source('./scripts/_get_soil_data.R')
}
# Object created from shiny app
inputs <- list(country = 'Burkina Faso',
county = 'Sud-Ouest',
iso3c = 'BFA',
adm_lvl = 1,
seasons = 'All year',
m_seasons = NULL,
n_wtts = NULL,
big_cnt = T,
ncores = 2)
## Step 1. Setting up the study region
# Load country shapefile
shp <- raster::shapefile(paste0(root,'/data/shps/',inputs$iso3c,'.shp'))
# Load a reference raster to obtain geographical coordinates
ref <- raster::raster(paste0(root,'/data/rasters/tmplt.tif'))
# Crop the reference raster and fit it to the shapefile region
ref <- ref %>% raster::crop(x = ., y = raster::extent(shp)) %>% raster::mask(x = ., mask = shp)
# Get the geographical coordinates and their id
crd <- ref %>% raster::rasterToPoints() %>% base::as.data.frame() %>% dplyr::select(x, y)
crd$id <- raster::cellFromXY(object = ref, xy = crd[,c('x','y')])
crd <- crd %>% dplyr::select(id, x, y)
## Step 2. Get soil data
# This function requires to be connected to the dapadfs storage cluster
get_soil(crd = crd, root_depth = 60, outfile = './soilcp_data.fst')
## Step 3. Get daily climate data. FIX
# This function requires to be connected to the dapadfs storage cluster
get_observational_data(country = inputs$country, county = inputs$county, iso3 = inputs$iso3c, adm_lvl = inputs$iso3c)
## Step 4. Calculate indices. FIX
calc_indices(country = inputs$country, county = inputs$county, iso3c = inputs$iso3c, adm_lvl = inputs$adm_lvl,
seasons = NULL, # Seasons manually defined
n_ssns = 2, # 2-seasons automatically defined
n_wtts = 100, # 100-wettest days
big_cnt = TRUE,
ncores = 10)
## Step 5. Do graphs
## Step 5.1. Climatology
do_climatology(country = 'Kenya',
county = 'Vihiga',
seasons = TRUE, # Climatology without any season (manual or automatic)
manual = NULL, # Seasons defined manually e.g. list(s1 = c(11:12,1:4)
auto = list(n = 2))
## Step 5.2. Time series plots
time_series_plot(country = inputs$country, county = inputs$county)
## Step 5.3. Maps
do_maps(country = inputs$country, county = inputs$county)
## Step 5.4. Elevation map
do_alt_map(country = inputs$country, county = inputs$county)
## Step 5.6. Country maps
do_country_maps(country = inputs$country)
|
fba20d5f361099c1c4f474c8f422677f55e96f1f
|
ec6ec7b40992d2db8b3d6fed6a04980d32ffeae6
|
/test/support/pfacat.R
|
49b5db2d16712a7ac118f1996972c40b316f9a7f
|
[
"MIT"
] |
permissive
|
jimmy05/eps
|
49b66c75420a1b5c971ac35104317b84ae29e6cf
|
fe476bca27ede37ffb70603bc6f78fe2a6995911
|
refs/heads/master
| 2020-04-19T03:18:29.459096
| 2019-02-04T18:11:22
| 2019-02-04T18:11:22
| 167,926,964
| 0
| 0
|
MIT
| 2019-01-28T08:35:48
| 2019-01-28T08:35:47
| null |
UTF-8
|
R
| false
| false
| 198
|
r
|
pfacat.R
|
library(aurelius)
x <- 1:4
weekday <- c("Sunday", "Sunday", "Monday", "Monday")
y <- c(12, 14, 22, 24)
data <- data.frame(x=x, weekday=weekday, y=y)
model <- lm(y ~ ., data)
write_pfa(pfa(model))
|
e93b5acab45fcabca2e61c4a49fe191fe222ae86
|
a6185dbfaa3349971f10972df80379a11fde2159
|
/preprocessData.R
|
45c04e19a889034adebed185448212c1510b0690
|
[
"MIT"
] |
permissive
|
sarikayamehmet/CryptoPortfolio
|
37d6add39df49175b011dc486496570bbb0a15ef
|
b88937bc56926d129531de8f72b7328a81b2a690
|
refs/heads/master
| 2020-05-18T07:39:32.829256
| 2019-11-12T06:32:18
| 2019-11-12T06:32:18
| 184,271,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,754
|
r
|
preprocessData.R
|
library(highcharter)
library(lubridate) # ymd
library(dplyr) # group_by
library(tidyr) #complete
#Read market data
df <- read.csv("marketPrices.csv")
df$T <- as.Date(df$T)
#Calculate the period-over-period percentage change at each point in time
percentageCalc <- function(marketName){
marketData <- df[df$MarketName==marketName,]
#Open, Closed, High, Low, Volume, BaseVolume
data <- marketData[,c("O","C","H","L","V","BV")]
data$Mean <- (data$H + data$L)/2
newdf <- (tail(data, -1) - head(data, -1))/data[-nrow(data),]
newdf$T <- tail(marketData$T, -1)
newdf$MarketName <- marketName
newdf$Reward <- newdf$Mean*100
return(newdf)
}
list_of_dataframes <- lapply(unique(df$MarketName), function(x) percentageCalc(x))
prepData <- bind_rows(list_of_dataframes, .id = "ID")
prepData$T <- datetime_to_timestamp(ymd(prepData$T)) # its easy to convert to a date
series_asset <- prepData %>%
arrange(T) %>%
group_by(name = MarketName) %>% # we create a series for every category
do(data = list.parse2(data_frame(.$T, .$Reward))) %>% # a line chart need IN ORDER the x and y
list.parse3() # then convert every row in a list preserving names
highchart() %>%
hc_chart(type = "line") %>%
hc_xAxis(type = 'datetime',labels = list(format = '{Reward:%Y %m %d}')) %>%
hc_add_series_list(series_asset) %>%
hc_title(text="Yillara gore tahmin",align="center") %>%
hc_subtitle(text="2014-2019 icin degerler",align="center") %>%
hc_add_theme(hc_theme_elementary())
#Simplify data to quick test
# We will only use ETH, LTC, XRP, XLM, USDT for BTC market as Multi-armed bandit approach
baseMarkets <- c("BTC-ETH", "BTC-LTC", "BTC-XRP", "BTC-XLM", "USDT-BTC")
baseData <- prepData[prepData$MarketName %in% baseMarkets,]
# ETH -> "2015-08-15"
# LTC -> "2014-03-10"
# XRP -> "2014-12-23"
# XLM -> "2015-11-19"
# USDT -> "2015-12-15"
# So we take "2015-12-15" as min date to arbitrage test
baseData <- baseData[baseData$T >= "2015-12-15",]
#Fill missing
USDData <- baseData[baseData$MarketName=="USDT-BTC",] %>%
mutate(T = as.Date(T)) %>%
complete(T = seq.Date(min(T), max(T), by="day")) %>%
#group_by(ID, MarketName) %>%
fill(ID,MarketName,O,C,H,L,V,BV,Mean,Reward)
XLMData <- baseData[baseData$MarketName=="BTC-XLM",] %>%
mutate(T = as.Date(T)) %>%
complete(T = seq.Date(min(T), max(T), by="day")) %>%
#group_by(ID, MarketName) %>%
fill(ID,MarketName,O,C,H,L,V,BV,Mean,Reward)
#Merge all in a bundle
trainData <- baseData[baseData$MarketName!="USDT-BTC",]
trainData <- trainData[trainData$MarketName!="BTC-XLM",]
trainData <- rbind(trainData, XLMData, USDData)
#1235 day for each asset
write.csv(trainData, file = "trainData.csv", row.names = F)
|
61ed1b2ab28cd2cf041d3f52f9c839e5ec5b3d0e
|
0a230e74f7343a500cf667c725a798d42ec12539
|
/man/post_html_from_Rmd_addin.Rd
|
9f6add1647384c418276b87e49df830936833083
|
[
"MIT"
] |
permissive
|
kbttik/posteR
|
2793a0495a226e254a598756e8cc6a4098fdc86b
|
fedbf6d12ad924be9421a0bc12e010be6065eae0
|
refs/heads/master
| 2022-12-11T15:00:10.896933
| 2020-09-10T08:56:31
| 2020-09-10T08:56:31
| 285,782,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 445
|
rd
|
post_html_from_Rmd_addin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/post_html.R
\name{post_html_from_Rmd_addin}
\alias{post_html_from_Rmd_addin}
\title{htmlレポート転送用アドイン関数}
\usage{
post_html_from_Rmd_addin()
}
\value{
\code{post_html_from_Rmd()} returns the URL of the html page.
}
\description{
RstudioのAddinsで関数の起動ができるようにする。開いているRmdファイルを対象にする
}
|
d40234a2555633409d35f8475d27ba8f56374f30
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/markovchain/examples/markovchainListFit.Rd.R
|
4c369a840e9ba946fd131c3ec4bcb8c6d1d0e2d5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 358
|
r
|
markovchainListFit.Rd.R
|
library(markovchain)
### Name: markovchainListFit
### Title: markovchainListFit
### Aliases: markovchainListFit
### ** Examples
# using holson dataset
data(holson)
# fitting a single markovchain
singleMc <- markovchainFit(data = holson[,2:12])
# fitting a markovchainList
mclistFit <- markovchainListFit(data = holson[, 2:12], name = "holsonMcList")
|
c2174639b6ca17c79a314ee1c91174039a639252
|
8615a79ededec2faaf0fee8ddf047280d3f9cad9
|
/R/PredictoR.xgboost.R
|
f702b80a2be16d73af3edf4614a9ed505079339d
|
[
"Apache-2.0"
] |
permissive
|
htssouza/predictoR
|
5923720ce35b8d57216a14ad2b2c2910e9c3e501
|
dd920ca1edda92816859300caf100d0dc7987ffc
|
refs/heads/master
| 2021-01-11T15:28:16.498822
| 2018-01-08T23:33:00
| 2018-01-08T23:33:00
| 80,353,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,341
|
r
|
PredictoR.xgboost.R
|
################################################################################
# PredictoR.xgboost
################################################################################
################################################################################
# External dependencies
################################################################################
library(data.table)
library(logging)
################################################################################
# Functions
################################################################################
PredictoR.BuildXGBData <- function(x, object, withLabel) {
loginfo("PredictoR.BuildXGBData: begin")
library(xgboost)
y <- matrix(nrow=nrow(x), ncol=nrow(object$params$featuresMetadata))
colIndex <- 1
for(feature in object$params$featuresMetadata[, feature]) {
col <- x[, get(feature)]
colClass <- class(col)
if(colClass == "character") {
stop("Character is not a valid type for xgboost")
} else {
y[, colIndex] <- as.numeric(col)
}
colIndex <- (colIndex + 1)
}
if (withLabel) {
y <- xgb.DMatrix (as.matrix(y), label=as.numeric(as.character(x[, get(object$params$responseColName)])), missing=NaN)
} else {
y <- xgb.DMatrix (as.matrix(y), missing=NaN)
}
loginfo("PredictoR.BuildXGBData: end")
return (y)
}
PredictoR.Fit.xgboost <- function(object, modelMetadata, dataWithLabel) {
loginfo("PredictoR.Fit.xgboost: begin")
library(xgboost)
if (! is.null(modelMetadata$num_class)) {
fit <- xgboost(dataWithLabel,
objective=modelMetadata$objective,
nrounds=modelMetadata$nrounds,
num_class=modelMetadata$num_class)
} else {
fit <- xgboost(dataWithLabel,
objective=modelMetadata$objective,
nrounds=modelMetadata$nrounds)
}
loginfo("PredictoR.Fit.xgboost: end")
return (fit)
}
PredictoR.PredictModel.xgboost <- function(object, modelMetadata, fit, dataWithoutLabel) {
loginfo("PredictoR.PredictModel.xgboost: begin")
library(xgboost)
if (! ("xgb.DMatrix" %in% class(dataWithoutLabel))) {
dataWithoutLabel <- PredictoR.BuildXGBData(dataWithoutLabel, object, FALSE)
}
y <- predict(fit, dataWithoutLabel)
loginfo("PredictoR.PredictModel.xgboost: end")
return (y)
}
|
53d9acf7b4606051f6d4954dbf2643aaf7338b68
|
510bc25ad2b6e67e4a3c13043cacd4424b75552e
|
/R/print_commodity.r
|
5194f81a55dcc2ca4700a5ad4747fa46dec3b334
|
[] |
no_license
|
orangeluc/energyRt
|
ff7423a2010d8edc3915034c396f079662ea4315
|
c72d1a528a95ef8fada215e0abef45d523383758
|
refs/heads/master
| 2020-04-24T06:04:06.819280
| 2019-02-20T13:26:26
| 2019-02-20T13:26:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,100
|
r
|
print_commodity.r
|
#---------------------------------------------------------------------------------------------------------
#! print.commodity < -function(x) : print commodity
#---------------------------------------------------------------------------------------------------------
print.commodity <- function(x) {
# print commodity
if_print_data_frame <- function(x, sl) {
if(nrow(slot(x,sl)) != 0) {
cat('\n', sl, '\n')
print(slot(x, sl))
cat('\n')
}
}
cat('Name: ', x@name, '\n')
if (x@type != '') cat('type: ', x@type, '\n')
if (x@description != '') cat('description: ', x@description, '\n')
if (x@origin != '') cat('Region of origin: ',x@origin, '\n')
if (x@color != '') cat('color: ', x@color, '\n')
if (length(x@source) != 0) {
cat('source:\n')
print(x@source)
}
if (length(x@other) != 0) {
cat('other:\n')
print(x@other)
}
g <- getClass("commodity")
zz <- names(g@slots)[sapply(names(g@slots), function(z) g@slots[[z]] ==
"data.frame")]
for(i in zz) if_print_data_frame(x, i)
}
|
f1d6b151bc587ad152c892c9967559e7e68d68d0
|
ae0d624abcfa7033b97fdef60642594d5448e29e
|
/Plot2.R
|
0dcb9f5b1106464815a434bab74ec11aa1800c5b
|
[] |
no_license
|
kcbaskar/ExData_Plotting1
|
709b70999326fe724bda47f7f7160aca54c46c10
|
64be72db0ae2410bd53caba815bd825884429ee5
|
refs/heads/master
| 2021-01-09T07:03:33.392057
| 2015-01-11T21:00:17
| 2015-01-11T21:00:17
| 29,097,005
| 0
| 0
| null | 2015-01-11T16:02:47
| 2015-01-11T16:02:46
| null |
UTF-8
|
R
| false
| false
| 306
|
r
|
Plot2.R
|
# Load data
source('load_data.R')
#open png file
png(filename="plot2.png", width=480, height=480)
#plot 2 now
plot(data$DateTime,
data$Global_active_power,
type="l",
col="black",
xlab="",
ylab="Global Active Power (kilowatts)",
main="")
#close the png file
dev.off()
|
d08976b6c912e73488c352f91b8cdd0f38bb0c91
|
0e5804e1c61ebfe908e0b6a116538ba21587c1c7
|
/cachematrix.R
|
613d299cfe05d27825f8945f70649257a8645b84
|
[] |
no_license
|
kpokk/ProgrammingAssignment2
|
13ce29d2148ebbd7274a299c87bb8ba4817004fd
|
74429888579597c4c246be280e8367a0d47a6221
|
refs/heads/master
| 2021-01-24T14:47:24.216664
| 2015-09-25T14:37:34
| 2015-09-25T14:37:34
| 43,149,079
| 0
| 0
| null | 2015-09-25T14:26:12
| 2015-09-25T14:26:12
| null |
UTF-8
|
R
| false
| false
| 1,221
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Functions makeCacheMatrix and cacheSolve are used to create a
## special object that stores a matrix and cache's its inverse
## Write a short comment describing this function
## Function makeCacheMatrix makes list that has functions to
## get and set a value of a matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(solve) i <<- solve
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## Function cacheSolve returns (and if necessary calculates)
## inverse of a matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if(!is.null(i)) {
message("getting cached data")
return (i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
0b7743dca02e0c42e66de25cba0ad02cf9f829de
|
08457a9bcd1fa7bf10d1f22116ce42f8696dad32
|
/R/grd.box.ts.R
|
eaf593b1c6d071354b59457361d41b3e833ec654
|
[] |
no_license
|
ogutu/clim.pact
|
8a249acc5d4e3c68c0293fca0f3ec6effbfef910
|
dd4a01bb38f0f8d92fcdc443ab38ecec16f95523
|
refs/heads/master
| 2021-01-18T16:30:56.801697
| 2011-11-22T00:00:00
| 2011-11-22T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,846
|
r
|
grd.box.ts.R
|
grd.box.ts <- function(x,lon,lat,lev=NULL,what="abs",greenwich=TRUE,mon=NULL,
col="grey10",lwd=1,lty=1,pch=".",add=FALSE,
filter=NULL,type="l",main=NULL,sub=NULL,xlab=NULL,ylab=NULL,
xlim=NULL,ylim=NULL) {
library(akima)
# library(ts)
if ((class(x)[1]!="field") & (class(x)[2]!="monthly.field.object") &
(class(x)[2]!="daily.field.object") ) stop("Need a field.object")
n.dims <- length(dim(x$dat))
if ( (n.dims==4) & (is.null(lev)) ) stop("For 4D objects, the level must be given")
if (greenwich) {
x$lon[x$lon > 180] <- x$lon[x$lon > 180]-360
x.srt <- order(x$lon)
x$lon <- x$lon[x.srt]
#print(n.dims); print(dim(x$dat)); print(length(x.srt))
if (n.dims==3) x$dat <- x$dat[,,x.srt] else
if (n.dims==4) x$dat <- x$dat[,,,x.srt]
}
daysayear <- 365.25
cmon<-c('Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec')
descr <- "Interpolated value"
date <- " "
if (!is.null(mon)) {
im <- x$mm== mon
if (n.dims==3) x$dat <- x$dat[im,,] else
if (n.dims==4) x$dat <- x$dat[im,,,]
x$yy <- x$yy[im]
x$mm <- x$mm[im]
x$dd <- x$dd[im]
x$id.t <- x$id.t[im]
date <- cmon[mon]
}
dx <- x$lon[2] - x$lon[1]
dy <- x$lat[2] - x$lat[1]
x.keep <- (x$lon - 3*dx <= lon) & (x$lon + 3*dx >= lon)
y.keep <- (x$lat - 3*dy <= lat) & (x$lat + 3*dx >= lat)
n.dims <- length(dim(x$dat))
if (n.dims==4) {
if (length(x$lev)>1) {
dz <- x$lev[2] - x$lev[1]
z.keep <- (1:length(x$lev))[(x$lev >= lev)][1]
x$lev <- x$lev[z.keep]
} else if (length(x$lev)==1){
z.keep <- 1
x$lev <- x$lev[z.keep]
#print(dim(x$dat))
}
}
x$lon <- x$lon[x.keep]
x$lat <- x$lat[y.keep]
if (n.dims==3) x$dat <- x$dat[,y.keep,x.keep] else
if (n.dims==4) x$dat <- x$dat[,z.keep,y.keep,x.keep]
if (sum(!is.finite(x$dat))>0) x$dat[!is.finite(x$dat)] <- 0
lat.x<-rep(x$lat,length(x$lon))
lon.x<-sort(rep(x$lon,length(x$lat)))
nt <- length(x$yy)
y <- rep(NA,nt)
for (it in 1:nt) {
if (n.dims==3) Z.in<-as.matrix(x$dat[it,,]) else
if (n.dims==4) Z.in<-as.matrix(x$dat[it,z.keep,,])
Z.out<-interp(lat.x,lon.x,Z.in,lat,lon)
y[it] <- Z.out$z
}
# print("time unit")
if (!is.null(attributes(x$tim)$unit)) {
attr(x$tim,"units") <- attributes(x$tim)$unit
}
#print(attributes(x$tim)$units)
#print(attributes(x$tim)$unit)
#print(summary(y))
tunit <- attributes(x$tim)$units
if (!is.null(tunit)) tunit <- lower.case(substr(tunit,1,3)) else {
tunit <- attributes(x$tim)$units
if (!is.null(tunit)) tunit <- lower.case(substr(tunit,1,3)) else
if (min(diff(x$mm))==1) tunit <- "mon" else
tunit <- "day"
}
if (tunit== "mon") {
clim <- y
for (im in 1:12) {
ii <- mod((1:nt)-1,12)+1 == im
clim[ii] <- mean(y[ii],na.rm=TRUE)
}
} else {
ac.mod<-matrix(rep(NA,nt*6),nt,6)
if (tunit=="day") jtime <- x$tim
if (tunit=="hou") jtime <- x$tim/24
if (!is.null(x$attributes$daysayear)) daysayear <- x$attributes$daysayear else
daysayear <- 365.25
ac.mod[,1]<-cos(2*pi*jtime/daysayear); ac.mod[,2]<-sin(2*pi*jtime/daysayear)
ac.mod[,3]<-cos(4*pi*jtime/daysayear); ac.mod[,4]<-sin(4*pi*jtime/daysayear)
ac.mod[,5]<-cos(6*pi*jtime/daysayear); ac.mod[,6]<-sin(6*pi*jtime/daysayear)
ac.fit<-lm(y ~ ac.mod); clim <- ac.fit$fit
}
#print("what?")
ts <- switch(lower.case(substr(what,1,3)),
"ano"=y - clim,
"cli"=clim,
"abs"=y)
descr <- switch(lower.case(substr(what,1,3)),
"ano"="anomaly",
"cli"="climatological",
"abs"="absolute value")
if (!is.null(filter)) ts <- filter(ts,filter)
if (is.null(main)) main <- x$v.name
if (is.null(sub)) sub <- paste("Interpolated at ",lon,"E, ",lat,"N ",date,sep="")
if (is.null(xlab)) xlab <- "Time"
if (is.null(ylab)) ylab <- attributes(x$dat)$unit
#print(summary(ts)); print("plot")
if (!add) {
plot(x$yy+x$mm/12+x$dd/daysayear,ts,type=type,pch=pch,xlim=xlim,ylim=ylim,
main=main,sub=sub,xlab=xlab,ylab=ylab,col=col,lwd=lwd,lty=lty)
points(x$yy+x$mm/12+x$dd/daysayear,ts,pch=pch,col=col)
} else {
if (type!='p') lines(x$yy+x$mm/12+x$dd/daysayear,ts,type=type,col=col,lwd=lwd,lty=lty)
points(x$yy+x$mm/12+x$dd/daysayear,ts,pch=pch,col=col)
}
grid()
#print("plotted")
dd.rng <- range(x$dd)
if (is.null(attr(x$tim,"units"))) attr(x$tim,"units") <- "unknown"
if ( (tunit=="mon") |
((dd.rng[2]-dd.rng[1]<4) & (x$mm[2]-x$mm[1]>0)) ) {
# print("Monthly")
results <- station.obj(ts,yy=x$yy,mm=x$mm,obs.name=x$v.name,
unit=x$attributes$unit,ele=NA,
station=NA,lat=round(lat,4),lon=round(lon,4),alt=NA,
location="interpolated",wmo.no=NA,
start=min(x$yy),yy0=attr(x$tim,"time_origin"),country=NA,
ref="grd.box.ts.R (clim.pact)")
} else {
attr(x$tim,"daysayear") <- daysayear
results <- station.obj.dm(t2m=ts,precip=rep(NA,length(ts)),
dd=x$dd,mm=x$mm,yy=x$yy,
obs.name=x$v.name,unit=x$attributes$unit,ele=NA,
station=NA,lat=round(lat,4),lon=round(lon,4),alt=NA,
location="interpolated",wmo.no=NA,
start=min(x$yy),yy0=attr(x$tim,"time_origin"),country=NA,
ref="grd.box.ts.R (clim.pact)")
}
# print("exit grd.box.ts()")
invisible(results)
}
|
7a8a3043ba9b8ac52d3feb1f4e70773feb6f994e
|
f0b1bc9d37d67113311f9b09dd559b9abbec166f
|
/inst/tests/tInfo1.R
|
d5d5f2bc6cabd4ae479fc4007ac02123ef261c36
|
[] |
no_license
|
GAMS-dev/gdxrrw-miro
|
b0a8f28c730eaa02fb63887a6832d861f48914c5
|
91486406f60986429b385cf37b2741648ac5b2e2
|
refs/heads/master
| 2023-04-03T17:53:31.082850
| 2023-03-15T17:24:04
| 2023-03-15T17:24:04
| 219,473,354
| 2
| 1
| null | 2021-01-28T09:19:28
| 2019-11-04T10:19:39
|
C
|
UTF-8
|
R
| false
| false
| 1,335
|
r
|
tInfo1.R
|
### Test gdxInfo
# We get a list of symbol names from the transport data
if (! require(gdxrrwMIRO)) stop ("gdxrrw package is not available")
if (0 == igdx(silent=TRUE)) stop ("the gdx shared library has not been loaded")
source ("chkSame.R")
tryCatch({
fn <- "trnsport.gdx"
s <- gdxInfo (fn, dump=FALSE, returnList=TRUE)
if (!is.list(s))
stop ("Expected gdxInfo output to be in list form")
if (10 != length(s))
stop ("Expected gdxInfo output to have length 10")
if (12 != s$symCount)
stop ("gdxInfo: expected trnsport symCount==12")
if (5 != s$uelCount)
stop ("gdxInfo: expected trnsport uelCount==5")
if (! chkSameVec("sets", c("i","j"), s$sets))
stop ("gdxInfo: s$sets for trnsport is bogus")
if (! chkSameVec("parameters", c("a","b", "d", "f", "c"), s$parameters))
stop ("gdxInfo: s$parameters for trnsport is bogus")
if (! chkSameVec("variables", c("x","z"), s$variables))
stop ("gdxInfo: s$variables for trnsport is bogus")
if (! chkSameVec("equations", c("cost","supply", "demand"), s$equations))
stop ("gdxInfo: s$equations for trnsport is bogus")
if (! chkSameVec("aliases", character(0), s$aliases))
stop ("gdxInfo: s$aliases for trnsport is bogus")
print ("Successfully completed gdxInfo test 1")
TRUE
}
, error = function(ex) { print(ex) ; FALSE }
)
|
29ad1933366a2945371bd90bcbefa4e9ce20f605
|
04ad3ce63344d5a050f76111db01eb9e7d47e05c
|
/NNforMNIST_ori.r
|
19b6e59bf13372fd9c078097d4a70cc6917392d2
|
[] |
no_license
|
Saw-Aung/Making-Neural-network-with-R-Programming-language
|
bd40ca9e4ebea735bb13678d77350203058fa49c
|
1af20c8f7e9fd4bc0b52b3f418340c4a8525f7d2
|
refs/heads/main
| 2023-05-12T05:18:53.955111
| 2021-06-04T07:01:43
| 2021-06-04T07:01:43
| 373,751,134
| 0
| 0
| null | null | null | null |
ISO-8859-7
|
R
| false
| false
| 3,469
|
r
|
NNforMNIST_ori.r
|
rm(list=ls())
setwd("c:/usr/doc/")
## ³tf[^Ctestf[^ΜΗέέ
dt.tr <- read.csv("mnist_train.csv",header=F)
res.tr <- dt.tr[,1]; let.tr <- dt.tr[,2:(28*28+1)]
ns.tr <- 60000
dt.te <- read.csv("mnist_test.csv",header=F)
res.te <- dt.te[,1]; let.te <- dt.te[,2:(28*28+1)]
ns.te <- 10000
## O[XP[πC0.01-1 ΜΝΝlΙΟ·
let.tr <- let.tr/255*0.99 + 0.01
let.te <- let.te/255*0.99 + 0.01
for(ii in 3:10){alp <- ii/10
## όΝwm[h; Bκwm[h; oΝwm[h
nn.inp <- 784; nn.hid <- 100; nn.out <- 10
## όΝw¨Bκwdέsρ
w.ih <- array(rnorm(nn.inp*nn.hid, mean=0, sd=0.3),c(nn.inp,nn.hid))
## Bκw¨oΝwdέsρ
w.ho <- array(rnorm(nn.hid*nn.out, mean=0, sd=0.3),c(nn.hid,nn.out))
## όΝwCoΝwΜl
inp <- array(0,c(ns.tr,nn.inp)); inp <- as.matrix(let.tr)
out <- array(0,c(ns.tr,nn.out)); for(n in 1:ns.tr) out[n,res.tr[n]+1] <- 1
inp.te <- array(0,c(ns.te,nn.inp)); inp.te <- as.matrix(let.te)
out.te <- array(0,c(ns.te,nn.out)); for(n in 1:ns.te) out.te[n,res.te[n]+1] <- 1
## sigmoidΦΜθ`
sig <-function(x) return(1/(1+exp(-x)))
## wK¦
alp <- 0.2
for(it in 1:10){ err <- 0
for(n in 1:ns.tr){
## BκwCoΝwm[hlΜvZ
nd.hid <- sig(t(w.ih) %*% inp[n,])
nd.out <- sig(t(w.ho) %*% nd.hid)
## oΝwCBκwG[lΜvZ
er.out <- out[n,] - nd.out
er.hid <- w.ho %*% er.out
## όΝw¨BκwCBκw¨oΝwΜdέsρXV
w.ih <- w.ih + alp*t((er.hid*nd.hid*(1-nd.hid)) %*% inp[n,])
w.ho <- w.ho + alp*t((er.out*nd.out*(1-nd.out)) %*% t(nd.hid))
## oΝwG[ΞlΜvZ
err <- err + sum(abs(er.out))/nn.out
}
err <- err/ns.tr
cat("IT=",it," err=",err,"\n")
## I¦ΜvZ for ³tf[^
res <- array(0,c(ns.tr,1)); hit <- array(0,c(10,10))
for(n in 1:ns.tr){
nd.hid <- sig(t(w.ih) %*% inp[n,])
nd.out <- sig(t(w.ho) %*% nd.hid)
res[n] <- which.max(nd.out)-1
hit[res.tr[n]+1,res[n]+1] <- hit[res.tr[n]+1,res[n]+1] + 1
}
print(hit)
rt <- 0; for(n in 1:10) rt <- rt + hit[n,n]
rt <- rt*100/ns.tr; cat("³t: Hit rate[%]: ",rt,"\n")
## I¦ΜvZ for testf[^
res <- array(0,c(ns.te,1)); hit <- array(0,c(10,10))
for(n in 1:ns.te){
nd.hid <- sig(t(w.ih) %*% inp.te[n,])
nd.out <- sig(t(w.ho) %*% nd.hid)
res[n] <- which.max(nd.out)-1
hit[res.te[n]+1,res[n]+1] <- hit[res.te[n]+1,res[n]+1] + 1
}
print(hit)
rt <- 0; for(n in 1:10) rt <- rt + hit[n,n]
rt <- rt*100/ns.te; cat("test: Hit rate[%]: ",rt,"\n")
}
#########################################
## `ζpΙf[^πμ¬
library(ggplot2)
mn <- array(0,c(100,28,28))
for(i in 1:100){ nn <- 0
for(j in 28:1){
for(k in 1:28){
nn <- nn+1; mn[i,k,j] <- let[i,nn]
}}}
par(ask=T)
for(n in 1:100){
gr <- array(0,c(28*28,3))
nn <- 0
for(i in 1:28){
for(j in 1:28){nn <- nn +1
gr[nn,1] <- i; gr[nn,2] <- j; gr[nn,3] <- mn[n,i,j]
}}
gr <- data.frame(gr); num <- as.character(res[n])
gm <- ggplot(data=gr, aes(x=gr[,1], y=gr[,2], color=255-gr[,3]))+geom_point(size=5) +
annotate("text", x=2, y=2, label=num,size=10)
print(gm)
} ## `ζCΉ
|
aabe44c304131117f163462113065ad6306ceb2f
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609960684-test.R
|
667e5457a4a72674c013e6ec158ee7fe0f781e88
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 234
|
r
|
1609960684-test.R
|
testlist <- list(x = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 65535L, -1L, -10726L, 805306367L, -1L, -16777216L ), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
4f76ff827492198117438042070c09eb3cb7fb1c
|
f8485998852a260211eabfcb54a51fcb0878581f
|
/man/Ocgrtools.Rd
|
af143bed5e565ed180bfcdcff375319606221f89
|
[] |
no_license
|
aposacka/Ocgrtools
|
1ee047998e9f2831cb0f201add5fa163692dfb64
|
ff2f998bdad77fb71e93dbd5a77f9a221c55f188
|
refs/heads/master
| 2021-06-11T10:44:36.448973
| 2017-02-23T06:36:03
| 2017-02-23T06:36:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 452
|
rd
|
Ocgrtools.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Ocgrtools-package.r
\docType{package}
\name{Ocgrtools}
\alias{Ocgrtools}
\alias{Ocgrtools-package}
\title{Ocgrtools: Visualize oceanographic datasets}
\description{
Readlily plot depth profiles of ocean properties and utilize a dataset of various
properties collected as a part of an expedition in the subarctic NE Pacific
(LineP cruise) that is contained in the package
}
|
5893d4d3218677aa52d553d09b077befcac3962e
|
6006b35ac1272e6808f813171747783019a6b0a1
|
/Question # 5.R
|
0fbebf61fa3bafc1bed28f6bd66aa6e8ebc2a9fc
|
[] |
no_license
|
Ory-Data-Science/r-dataframes-assignment-dylan-ryan-daniel
|
1532c2c378fd0559a85536f9bf1137c77ef4f2f1
|
43ab16b0dfa101d226d79bb2790a59005cd86b55
|
refs/heads/master
| 2021-07-20T00:57:12.579203
| 2017-10-25T17:49:56
| 2017-10-25T17:49:56
| 108,028,359
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 423
|
r
|
Question # 5.R
|
# Our code here is mean to find the mean volume of the shrubs by a given group, site and experiment, respectively.
shrub_data <- read.csv("shrub-volume-experiment.csv")
shrub_data %>%
mutate(volume = length * width * height) %>%
group_by(site) %>%
summarize(mean_volume = mean(volume))
shrub_data %>%
mutate(volume = length * width * height) %>%
group_by(experiment) %>%
summarize(mean_volume = mean(volume))
|
0c1b15cac6b5dbd507f011cb30ef18b4d41ea6a0
|
b609b0a398c7d2b1ae74b05704600ef872d673d3
|
/R/poissonDisc.R
|
07b3aa7e32f36cb5433cd413bb3b274e4e8b6d80
|
[] |
no_license
|
anyanyany/WAE
|
e96f63ddbce2a2faac3946b296dbf003cd6395d9
|
8287c6b504d7ed1120e9b3a7b6f742b850560eb3
|
refs/heads/master
| 2016-09-14T05:54:09.119468
| 2016-06-03T15:30:56
| 2016-06-03T15:30:56
| 58,619,631
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,281
|
r
|
poissonDisc.R
|
A = -100
B = 100
x = list()
y = list()
plotPoints = function(accuracy)
{
plot(A:B, A:B, type="n", ylim=c(A,B))
grid = generatePoints(accuracy, 2)
x = list()
y = list()
for(i in c(1:length(grid[1,,1])))
{
for(j in c(1:length(grid[1,,1])))
{
if(all(grid[i, j,] == c(-1, -1)) == FALSE)
{
point = grid[i, j, ]
x[[length(x) + 1]] <- point[1]
y[[length(y) + 1]] <- point[2]
}
}
}
plot(x, y, type="p")
}
GetPointsPoissonDisc2D = function(accuracy)
{
grid = generatePoints(accuracy, 2)
x = list()
y = list()
for(i in c(1:length(grid[1,,1])))
{
for(j in c(1:length(grid[1,,1])))
{
if(all(grid[i, j,] == c(-1, -1)) == FALSE)
{
point = grid[i, j, ]
x[[length(x) + 1]] <- point[1]
y[[length(y) + 1]] <- point[2]
}
}
}
points=list();
for(i in c(1:length(x)))
{
points[[i]]=c(x[[i]],y[[i]]);
}
return (points);
}
GetPointsPoissonDisc5D = function(accuracy)
{
grid = generatePoints(accuracy, 5)
x = list()
y = list()
z = list()
u = list()
v = list()
for(i in c(1:length(grid[1,,1,1,1,1])))
{
for(j in c(1:length(grid[1,,1,1,1,1])))
{
for(k in c(1:length(grid[1,,1,1,1,1])))
{
for(l in c(1:length(grid[1,,1,1,1,1])))
{
for(m in c(1:length(grid[1,,1,1,1,1])))
{
if(all(grid[i, j, k, l, m, ] == c(-1, -1, -1, -1, -1)) == FALSE)
{
point = grid[i, j, k, l, m, ]
x[[length(x) + 1]] <- point[1]
y[[length(y) + 1]] <- point[2]
z[[length(z) + 1]] <- point[3]
u[[length(u) + 1]] <- point[4]
v[[length(v) + 1]] <- point[5]
}
}
}
}
}
}
points=list();
for(i in c(1:length(x)))
{
points[[i]]=c(x[[i]],y[[i]],z[[i]],u[[i]],v[[i]]);
}
return (points);
}
pointToGridCell = function(point, cellSize)
{
gridCell = c(1:length(point))
for(i in c(1:length(point)))
gridCell[i] = floor((point[i] - A) / cellSize) + 1
return(gridCell)
}
getGridVector = function(grid, position, dim)
{
result = c(1:dim)
len = length(position) + 1
for(i in c(1:dim))
{
position[len] = i
result[i] = grid[t(position)]
}
length(position) <- (len - 1)
return (result)
}
setGridVector = function(grid, position, dim, input)
{
len = length(position) + 1
for(i in c(1:dim))
{
position[len] = i
grid[t(position)] = input[i]
}
length(position) <- (len - 1)
return (grid)
}
generatePoints = function(minDist, dim)
{
cellSize = minDist / sqrt(dim)
cellNum = round((B - A) / cellSize)
grid = array(rep(-1, dim), dim = c(rep(cellNum + 1, dim), dim))
activeList = list()
initialPoint = runif(dim, A, B)
position = pointToGridCell(initialPoint, cellSize)
setGridVector(grid, position, dim, position)
activeList[[length(activeList) + 1]] <- initialPoint
grid = poissonDisc(minDist, cellSize, cellNum+1, grid, activeList, dim)
return (grid)
}
poissonDisc = function(minDist, cellSize, cellNum, grid, activeList, dim)
{
# Until active list is not empty
while(length(activeList) > 0 )
{
# Randomize current active point
listLength = length(activeList)
index = round(runif(1, 1, listLength))
activePoint = activeList[[index]]
activeList[[index]] = NULL
isStillActive = FALSE
# Select k neighbours for point
for(i in c(1:30))
{
# Get new coordinates
nPoint = nearPoint(activePoint, minDist)
nPointCell = pointToGridCell(nPoint, cellSize)
delta = ceiling(minDist / cellSize) # number of cells to look up
position = c(1:dim)
isOk = recursiveInsert(grid, cellNum, minDist, dim, nPointCell, nPoint, delta, position, 1)
if(isOk)
{
isStillActive = TRUE
grid = setGridVector(grid, nPointCell, dim, nPoint)
activeList[[length(activeList) + 1]] <- nPoint
}
}
if(isStillActive)
activeList[[length(activeList) + 1]] <- activePoint
}
return (grid)
}
recursiveInsert = function(grid, cellNum, minDist, dim, nPointCell, nPoint, delta, position, recursion)
{
if(recursion > dim)
{
# We can try to insert value
if(all(getGridVector(grid, position, dim) == rep(-1, dim)) == TRUE)
return (TRUE)
if(getDistance(nPoint, getGridVector(grid, position, dim)) < minDist)
return (FALSE)
return (TRUE)
}
isGood = FALSE
for(i in c((nPointCell[recursion] - delta) : (nPointCell[recursion] + delta)))
{
if(i < 1 || i > cellNum) next()
isGood = TRUE
position[recursion] = i
result = recursiveInsert(grid, cellNum, minDist, dim, nPointCell, nPoint, delta, position, recursion+1)
if(!result) return (FALSE)
}
return (isGood)
}
nearPoint = function(point, minDist)
{
# For algorithm explanation look here:
# https://en.wikipedia.org/wiki/N-sphere#Spherical_coordinates
dim = length(point) # getting dimension number
newPoint = c(1:dim) # prepare new point
rand = runif(dim, 0, 1) # get randoms for calculations
radius = minDist * (rand[1] + 1) # get radius for n-sphere
angles = c(1:(dim-1)) # vector of angles
sinValue = 1 # cumulative value of sin() multiplication
# Getting all newPoint coordinates
for(i in c(1:(dim-1)))
{
angles[i] = 2 * pi * rand[i+1]
newPoint[i] = point[i] + radius * cos(angles[i]) * sinValue
sinValue = sinValue * sin(angles[i])
}
newPoint[dim] = point[dim] + radius * sinValue
# Fixing coordinates
for(i in c(1:dim))
newPoint[i] = min(B, max(newPoint[i], A))
return (newPoint)
}
attr(nearPoint, "comment") <- "Returns new point lying not further than minDist from point"
getDistance = function(point1, point2)
{
return (dist(rbind(point1, point2))[1])
}
|
4026713414dcb05c0ac55d9a72372eba50b763ea
|
051eb2a9b907e5a1e5aa834b93972862cc0703c6
|
/04_CalculateSpearmans.R
|
9a814998935719fa86e5757caff32676837cf0dc
|
[] |
no_license
|
ddiannae/tcga-rawcounts-comparison
|
69b12241e2e9a4f1998bce21644161abbc770d05
|
9afc6c11d37a5254c8041ce78f770147379bcfd8
|
refs/heads/master
| 2020-03-30T14:37:39.501434
| 2018-10-02T22:46:52
| 2018-10-02T22:46:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,495
|
r
|
04_CalculateSpearmans.R
|
setwd("~/Workspace/rnapipeline/tcga-rawcounts-comparison")
load(file = "cancerRawCleanLegacy.RData")
load(file = "healthyRawCleanLegacy.RData")
load(file = "cancerRawClean.RData")
load(file = "healthyRawClean.RData")
### Define Spearmans Function
spearmanCorr <- function(newGenes, legacyGenes){
dim(newGenes)
dim(legacyGenes)
genes.ranked <- data.frame(cbind(rank(newGenes, ties.method = 'average'),
rank(legacyGenes, ties.method = 'average')))
colnames(genes.ranked) <- c('new', 'legacy')
rho <- cov(genes.ranked) / (sd(genes.ranked$new) * sd(genes.ranked$legacy))
n <- length(genes.ranked$new)
r <- cor(x = genes.ranked$new, y = genes.ranked$legacy, method = 'pearson')
s <- (n^3 - n) * (1 - r) / 6
t <- r * sqrt((n - 2) / (1 - r^2))
p <- 2 * (1-pt(q = t, df = n - 2))
return(c(rho[[2]], s, p))
}
#### First Normal Data
normal.targets <- normal$targets
normal.targets.legacy <- normal.legacy$targets
head(normal.targets)
head(normal.targets.legacy)
normal.targets <- normal.targets[order(normal.targets$Case), ]
normal.targets.legacy <- normal.targets.legacy[order(normal.targets.legacy$Case), ]
## Do we have them all?
size <- unique(rbind(dim(normal.targets), dim(normal.targets.legacy))[1])
cases <- cbind(as.character(normal.targets$Case), as.character(normal.targets.legacy$Case))
cases <- t(unique(t(cases)))
stopifnot(dim(cases) == c(size[1], 1))
normal.df <- data.frame(normal$Counts)
normal.df.legacy <- data.frame(normal.legacy$Counts)
head(normal.df)
head(normal.df.legacy)
results.df <- data.frame()
for (i in 1:nrow(normal.targets)) {
results.df <- rbind(results.df,
spearmanCorr(normal.df[, normal.targets[i, "ID"]],
normal.df.legacy[, normal.targets.legacy[i, "ID"]]))
}
colnames(results.df) <- c("rho", "S", "p")
library(ggplot2)
# Basic histogram
df <- data.frame(normal.targets$Case, results.df$rho)
colnames(df) <- c("Case", "Rho")
df <- df[order(df$Rho), ]
df
ggplot(data=df, aes(x=Case, y=Rho, group=1)) +
geom_line(linetype = "dashed")+
geom_point()
mean(results.df$rho)
write.table(df, file="normal_spearman.txt", quote=F, sep="\t")
## Now Tumor Data
tumor.targets <- tumor$targets
tumor.targets.legacy <- tumor.legacy$targets
head(tumor.targets)
head(tumor.targets.legacy)
tumor.targets <- tumor.targets[order(tumor.targets$Case), ]
tumor.targets.legacy <- tumor.targets.legacy[order(tumor.targets.legacy$Case), ]
## Do we have them all?
size <- unique(rbind(dim(tumor.targets), dim(tumor.targets.legacy))[1])
cases <- cbind(as.character(tumor.targets$Case), as.character(tumor.targets.legacy$Case))
cases <- t(unique(t(cases)))
stopifnot(dim(cases) == c(size[1], 1))
tumor.df <- data.frame(tumor$Counts)
tumor.df.legacy <- data.frame(tumor.legacy$Counts)
head(tumor.df)
head(tumor.df.legacy)
results.tumor.df <- data.frame()
for (i in 1:nrow(tumor.targets)) {
results.tumor.df <- rbind(results.tumor.df,
spearmanCorr(tumor.df[, tumor.targets[i, "ID"]],
tumor.df.legacy[, tumor.targets.legacy[i, "ID"]]))
}
colnames(results.tumor.df) <- c("rho", "S", "p")
df <- data.frame(tumor.targets$Case, results.tumor.df$rho)
colnames(df) <- c("Case", "Rho")
df <- df[order(df$Rho), ]
df
ggplot(data=df, aes(x=Case, y=Rho, group=1)) +
geom_line(linetype = "dashed")+
geom_point()
mean(results.tumor.df$rho)
write.table(df, file="cancer_spearman.txt", quote=F, sep="\t")
## Listo, con 18547 genes
|
1cbff098d34d4f590761456e436b07c21ac6a4c4
|
57d4fbdc1a124c16794952be5c4ff02e0c9c1d97
|
/man/plot.haplo.score.Rd
|
3fa0ab2ae7ff57a2d137f04097b67f476545de20
|
[] |
no_license
|
cran/haplo.score
|
60e30209f513758169fb88d088af8ac06a3f6a71
|
69ff52bf74f28e5141a6234d092e92e539ae0584
|
refs/heads/master
| 2021-01-10T19:19:50.096127
| 2002-09-13T00:00:00
| 2002-09-13T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,965
|
rd
|
plot.haplo.score.Rd
|
%
% Copyright 2001 Mayo Foundation for Medical Education and Research.
%
% This program is free software; you can redistribute it and/or
% modify it under the terms of the GNU General Public License
% as published by the Free Software Foundation; either version 2
% of the License, or (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program; if not, write to the Free Software
% Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
% 02111-1307, USA.
%
%
\name{plot.haplo.score}
\alias{plot.haplo.score}
\title{
Plot Haplotype Frequencies versus Haplotype Score Statistics
}
\description{
Method function to plot a class of type haplo.score
}
\usage{
plot.haplo.score(x, ...)
}
\arguments{
\item{x}{The object returned from haplo.score (which has class haplo.score).}
\item{...}{Optional arguments}
}
\value{
Nothing is returned.
}
\section{Side Effects}{
}
\details{
This is a plot method function used to plot haplotype frequencies on
the x-axis and haplotype-specific scores on the y-axis. Because
haplo.score is a class, the generic plot function
can be used, which in turn calls this plot.haplo.score function.
}
\section{References}{
Schaid DJ, Rowland CM, Tines DE, Jacobson RM, Poland GA.
Score tests for association of traits with haplotypes when
linkage phase is ambiguous. Submitted to Amer J Hum Genet.
}
\seealso{
haplo.score
}
\examples{
\dontrun{
save <- haplo.score(y, geno, trait.type = "gaussian")
# Example illustrating generic plot function:
plot(save)
# Example illustrating specific method plot function:
plot.haplo.score(save)
}
}
\keyword{}
% docclass is function
% Converted by Sd2Rd version 1.21.
|
90653ee3bd821ff357e177ffe08ddad5d81faf12
|
486e749753ee0cb1f984d64c39370fb20ca95e27
|
/R files/Data Preprocessing Script.R
|
57485e459dae1919a99afe0efe3ffb656886f8c5
|
[] |
no_license
|
54heart/OceanCleanup
|
44ee6b1276df481a4f2170ea8327318af7491e8e
|
f097cf69f135f019f4d85b538640566297374d89
|
refs/heads/master
| 2021-01-09T15:51:24.747340
| 2020-03-03T05:04:36
| 2020-03-03T05:04:36
| 242,361,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,182
|
r
|
Data Preprocessing Script.R
|
library(readxl)
raw_data <- read_excel("~/Downloads/Data_Level5_BAH_OceanCleanup.xlsx")
library(plotly)
library(dplyr)
library(tidyverse)
library(ggplot2)
plot(x = raw_data$State, y = )
# df = raw_data[, -1] # remove the ID column
# df = unique(df) # get unique rows
# write.csv(df, "~/Documents/GitHub/OceanCleanup/Cleaned/clean6.csv", row.names=FALSE) # export csv
# import cleaned data
# df <- read_csv("~/Documents/GitHub/OceanCleanup/Cleaned/clean3.csv",
# col_types = cols(Adults = col_integer(),
# Children = col_integer(), `Group Name` = col_character(),
# People = col_integer()))
clean6 <- read_csv("~/Documents/GitHub/OceanCleanup/Cleaned/clean6.csv",
col_types = cols(`Group Name` = col_character()))
df <- clean6
### convert data type
df[, 11:60] <- sapply(df[, 11:60], as.integer)
sapply(df, class) # view the data type
df %>%
select(Year>=2019)
#
df %>%
select(Zone, Country)
df %>%
# Plots
boxplot(df$`Total Items Collected`)
barplot(df$State, df$`Total Items Collected`)
ggplot(data = df) +
geom_bar(mapping = aes(x = Zone))
|
e1a46a076bac9b556c0b66855c2253e2f97dbc9a
|
37e2a536296418edb66bbef82a65b02831b86ea7
|
/Weekly Best Practice Figures/3D Fix Cars.R
|
89497dd12aa978291b34053a8e7c07e9706c9138
|
[] |
no_license
|
frankmuzio/datavisualization
|
54bdc0da361a34969424c241955557b0a9725f3d
|
bbc182bf08c05c400a83af53a2eb095c0588f3b5
|
refs/heads/main
| 2023-04-14T03:45:22.503198
| 2021-04-21T12:13:48
| 2021-04-21T12:13:48
| 335,643,100
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,684
|
r
|
3D Fix Cars.R
|
### Fixing a 3D plot####
## March 24, 2021
# Frank Muzio
#------------------------------
library(ggplot2)
library(ggpubr)
head(mtcars)
p1 <- ggplot(mtcars, aes(x = mpg, y = qsec))
p2 <- ggplot(mtcars, aes(x = mpg, y = hp))
p3 <- ggplot(mtcars, aes(x = qsec, y = hp))
p4 <- ggplot(mtcars, aes(x = disp, y = hp))
a <- p1 + geom_point(color = "steelblue2") +
labs(x= "Gas Mileage (mpg)", y= "0-60mph (sec)") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
b <- p2 + geom_point(color = "steelblue2") +
labs(x= "Gas Mileage (mpg)", y= "Horsepower") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
c <- p3 + geom_point(color = "steelblue2") +
labs(x= "0-60mph (sec)", y= "Horsepower")+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
d <- p4 + geom_point(color = "steelblue2") +
labs(x= "Engine Displacement", y= "Horsepower")+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
full <- ggarrange(a,b,c,d, nrow=2, ncol=2)
print(full)
# annotate_figure(full, top = text_grob("MTCars Data", color = "Black", size = 14))
print(a)
|
ab0b57df5815afcf778b34e602fda48be50b446d
|
3b0d1151da42d9ebf2baa30983dcb273dc2ab180
|
/pRRophetic/man/predictionAccuracyByCv.Rd
|
4983851ccef63193b8bcbe883cfd76e72dbe8e2e
|
[] |
no_license
|
SiYangming/pRRophetic2
|
d0c127737fceae7b548ff8a270959335ca091750
|
f3ad0a78f1b1afec1b314a4354ad570300861edd
|
refs/heads/master
| 2023-03-19T04:08:28.036196
| 2017-05-12T14:45:59
| 2017-05-12T14:45:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,585
|
rd
|
predictionAccuracyByCv.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/compute_phenotype_function.R
\name{predictionAccuracyByCv}
\alias{predictionAccuracyByCv}
\title{Cross validation on training dataset}
\usage{
predictionAccuracyByCv(trainingExprData, trainingPtype, testExprData = -1,
cvFold = -1, powerTransformPhenotype = TRUE, batchCorrect = "eb",
removeLowVaryingGenes = 0.2, minNumSamples = 10, selection = 1)
}
\arguments{
\item{trainingExprData}{The training data. A matrix of expression levels, rows contain genes and columns contain samples, "rownames()" must be specified and must contain the same type of gene ids as "testExprData"}
\item{trainingPtype}{The known phenotype for "trainingExprData". A numeric vector which MUST be the same length as the number of columns of "trainingExprData".}
\item{testExprData}{The test data where the phenotype will be estimted. It is a matrix of expression levels, rows contain genes and columns contain samples, "rownames()" must be specified and must contain the same type of gene ids as "trainingExprData".}
\item{cvFold}{Specify the "fold" requried for cross validation. "-1" will do leave one out cross validation (LOOCV)}
\item{powerTransformPhenotype}{Should the phenotype be power transformed before we fit the regression model? Default to TRUE, set to FALSE if the phenotype is already known to be highly normal.}
\item{batchCorrect}{How should training and test data matrices be homogenized. Choices are "eb" (default) for ComBat, "qn" for quantiles normalization or "none" for no homogenization.}
\item{removeLowVaryingGenes}{What proportion of low varying genes should be removed? 20 precent be default}
\item{minNumSamples}{How many training and test samples are requried. Print an error if below this threshold}
\item{selection}{How should duplicate gene ids be handled. Default is -1 which asks the user. 1 to summarize by their or 2 to disguard all duplicates.}
\item{printOutput}{Set to FALSE to supress output}
}
\value{
An object of class "pRRopheticCv", which is a list with two members, "cvPtype" and "realPtype", which correspond to the cross valiation predicted phenotype and the user provided measured phenotype respectively.
}
\description{
This function does cross validation on a training set to estimate prediction accuracy on a training set.
If the actual test set is provided, the two datasets can be subsetted and homogenized before the
cross validation analysis is preformed. This may improve the estimate of prediction accuracy.
}
\keyword{phenotype}
\keyword{predict}
|
f534b43f1d133d13536601514ed9fa035600ef4f
|
66e5fcdae6a48a6ca4e5290a0fde540d3fa3e992
|
/src/kosovo-population-animation-v2.r
|
433af0f5b7ac23766873a2a653aa83dd4c00f3f5
|
[] |
no_license
|
gentrexha/kosovo-population-animation
|
7bee7b4dc304301b7914d4654bfa7ed763441c2d
|
2e8878e05069c08951d14694afe62853304440a4
|
refs/heads/master
| 2021-07-19T08:17:49.237768
| 2020-08-07T14:51:20
| 2020-08-07T14:51:20
| 203,448,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,458
|
r
|
kosovo-population-animation-v2.r
|
library(readr)
library(dplyr)
library(rgdal)
library(ggplot2)
library(maps)
library(ggthemes)
library(gganimate)
library(hablar)
library(mapproj)
library(scales)
library(RColorBrewer)
library(ggmap)
library(transformr)
library(magick)
# set wd
setwd("C:/Projects/Personal/kosovo-covid-data/src")
# Load data
url_csv <- "../data/covid-komunat.csv"
ks_pop <- read_csv(url_csv)
# Set map
shapefile <- readOGR(dsn = path.expand("../data/kosovo-shapefile"),
"XK_EA_2018", use_iconv = TRUE, encoding = "UTF-8")
# Next the shapefile has to be converted to a dataframe for
# use in ggplot2
shapefile_df <- fortify(shapefile, name = "XK_NAME")
# Add id to ks_pop, right?
merged_df <- merge(shapefile_df, ks_pop, by = "id", all.x = TRUE)
final_df <- merged_df[order(merged_df$order), ]
# aggregate data to get mean latitude and mean longitude for
# each state
cnames <- aggregate(cbind(long, lat) ~ komuna, data = final_df,
FUN = function(x) mean(range(x)))
# new cpalette
#getPalette = colorRampPalette(brewer.pal(9, "Greens"))
# basic plot
ggplot() + geom_polygon(data = final_df[which(final_df$date == "2020-07-08"), ],
aes(x = long, y = lat, group = group, fill = new),
color = "black", size = 0.25) +
coord_map() +
labs(title = "Population in Kosovo in 1948") +
scale_fill_distiller(name = "Population", palette = "Greens",
direction = 1, breaks = pretty_breaks(n = 7), limits = c(min(final_df$new,
na.rm = TRUE), max(final_df$new, na.rm = TRUE))) +
theme_nothing(legend = TRUE) + geom_text(data = cnames, aes(long,
lat, label = komuna_me_e), size = 3, fontface = "bold")
# Final plots
for (year in c(1948:2018)) {
int_plot <- ggplot() + geom_polygon(data = final_df[which(final_df$year ==
year), ], aes(x = long, y = lat, group = group, fill = population),
color = "black", size = 0.25) + coord_map() + labs(title = paste("Population of Kosovo in ",
toString(year))) + scale_fill_distiller(name = "Population",
palette = "Greens", direction = 1, breaks = pretty_breaks(n = 7),
limits = c(min(final_df$population, na.rm = TRUE), max(final_df$population,
na.rm = TRUE))) + theme_nothing(legend = TRUE) +
geom_text(data = cnames, aes(long, lat, label = komuna_me_e),
size = 3, fontface = "bold")
ggsave(sprintf("animation-v2/pop_%s.png", toString(year)),
plot = int_plot)
}
|
d39cc0a0075693d2e3ab42d01971461cce05c6d2
|
a87b9e6961a0e99bb8a0f51d2b117cf6bfec014a
|
/r/image_processing_aux/VIs_Calculation.R
|
f78fdd258a92afb5e1d69c51a447808e1e814792
|
[] |
no_license
|
Lorenagzp/Scripts
|
9e12785c512f5de70370ea9e75b8f89c02ea5c38
|
5d7007dd59287cf98e9fa3067af23f31fbe1837b
|
refs/heads/master
| 2021-07-07T18:30:07.610185
| 2020-07-14T17:29:34
| 2020-07-14T17:29:34
| 141,351,391
| 0
| 1
| null | 2019-10-16T21:20:35
| 2018-07-17T22:22:16
|
Python
|
UTF-8
|
R
| false
| false
| 12,449
|
r
|
VIs_Calculation.R
|
#####################################################################
# Title: ENVI_Indices
# Name: Francisco Manuel Rodriguez Huerta
# Date: 9 Abril 2014
# Description: Compute Indices
#####################################################################
#Load required libraries
require(raster)
require(rgdal)
### nos posicionamos en el directorio de trabajo ###
rm(list = ls())
work.dir <- "I:\\CIMMYT\\YQ_Variability\\Data\\Images_PAexp\\1200m\\Resampled" ### Change the input directory
setwd(work.dir)
getwd()
#set filename
filename<-"H140507_PA_1m_62bands.dat" ### check file name
#create rasterstack object
r1 <- raster(filename, band=1)
rs <- stack(r1)
index <- r1@file@nbands
rm(r1)
for (i in 2:index){
r1 <- raster(filename, band=i)
rs <- stack(rs,r1)
rm(r1)
}
show(rs)
#numbers of rows and columns
nr <- nrow(rs)
nc <- ncol(rs)
nlayers(rs)
#get values
R <- getValuesBlock(rs, row=1, nrows=nr, col=1, ncols=nc)/10000
dim(R)
#read identifier of bands and wavelengths
#setwd("H:\\CIMMYT\\YQ_Variability\\Data\\Images_PAexp\\")
#getwd()
bands <- read.table("bands62.csv",sep=",",header=TRUE)
setwd("I:\\CIMMYT\\YQ_Variability\\Data\\Images_PAexp\\1200m\\VIs\\140507") ### Output folder to VI maps
getwd()
#Important bands
R400 <- R[,bands[which.min(abs(bands[,2]-400)),1]]
R445 <- R[,bands[which.min(abs(bands[,2]-445)),1]]
R450 <- R[,bands[which.min(abs(bands[,2]-450)),1]]
R470 <- R[,bands[which.min(abs(bands[,2]-470)),1]]
R500 <- R[,bands[which.min(abs(bands[,2]-500)),1]]
R510 <- R[,bands[which.min(abs(bands[,2]-510)),1]]
R513 <- R[,bands[which.min(abs(bands[,2]-513)),1]]
R515 <- R[,bands[which.min(abs(bands[,2]-515)),1]]
R530 <- R[,bands[which.min(abs(bands[,2]-530)),1]]
R550 <- R[,bands[which.min(abs(bands[,2]-550)),1]]
R570 <- R[,bands[which.min(abs(bands[,2]-570)),1]]
R635 <- R[,bands[which.min(abs(bands[,2]-635)),1]]
R670 <- R[,bands[which.min(abs(bands[,2]-670)),1]]
R675 <- R[,bands[which.min(abs(bands[,2]-675)),1]]
R680 <- R[,bands[which.min(abs(bands[,2]-680)),1]]
R700 <- R[,bands[which.min(abs(bands[,2]-700)),1]]
R710 <- R[,bands[which.min(abs(bands[,2]-710)),1]]
R720 <- R[,bands[which.min(abs(bands[,2]-720)),1]]
R740 <- R[,bands[which.min(abs(bands[,2]-740)),1]]
R746 <- R[,bands[which.min(abs(bands[,2]-746)),1]]
R750 <- R[,bands[which.min(abs(bands[,2]-750)),1]]
R760 <- R[,bands[which.min(abs(bands[,2]-760)),1]]
R770 <- R[,bands[which.min(abs(bands[,2]-770)),1]]
R800 <- R[,bands[which.min(abs(bands[,2]-800)),1]]
hist(R500)
hist(R800)
###################
### Get indices ###
###################
date <- strsplit(filename,split="_")[[1]][1]
#Structural Indices
NDVI <- (R800-R670)/(R800+R670)
Rastout <- raster(ncol=nc,nr=nr)
Rastout[] <- NDVI
Rastout@extent <- rs@extent
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout, filename=paste(date,"NDVI.tif",sep="_"), format="GTiff")
hist(NDVI)
RDVI <- (R800-R670)/sqrt(R800+R670)
Rastout[] <- RDVI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"RDVI.tif",sep="_"),format="GTiff")
OSAVI <- (1+0.16)*(R800-R670)/(R800+R670+0.16)
Rastout[] <- OSAVI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"OSAVI.tif",sep="_"),format="GTiff")
SR <- R800/R670
Rastout[] <- SR
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"SR.tif",sep="_"),format="GTiff")
MSR <- (SR-1)/sqrt(SR)+1
Rastout[] <- MSR
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"MSR.tif",sep="_"),format="GTiff")
MTVI1 <- 1.2*(1.2*(R800-R550)-2.5*(R670-R550))
Rastout[] <- MTVI1
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"MTVI1.tif",sep="_"),format="GTiff")
MCARI1 <- 1.2*(2.5*(R800-R670)-1.3*(R800-R550))
Rastout[] <- MCARI1
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"MCARI1.tif",sep="_"),format="GTiff")
deno <- sqrt((2*R800+1)^2-(6*R800-5*sqrt(R670))-0.5)
MCARI2 <- (1.5/1.2)*MCARI1/deno
Rastout[] <- MCARI2
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"MCARI2.tif",sep="_"),format="GTiff")
MTVI2 <- (1.5/1.2)*MTVI1/deno
Rastout[] <- MTVI2
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"MTVI2.tif",sep="_"),format="GTiff")
#Chlorophyll indices
TVI <- 0.5*(120*(R750-R550)-200*(R670-R550))
Rastout[] <- TVI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"TVI.tif",sep="_"),format="GTiff")
MCARI <- ((R700-R670)-0.2*(R700-R550))*(R700/R670)
Rastout[] <- MCARI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"MCARI.tif",sep="_"),format="GTiff")
TCARI <- 3*MCARI
Rastout[] <- TCARI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"TCARI.tif",sep="_"),format="GTiff")
TCARI_OSAVI <- TCARI/OSAVI
Rastout[] <- TCARI_OSAVI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"TCARI_OSAVI.tif",sep="_"),format="GTiff")
MCARI_OSAVI <- MCARI/OSAVI
Rastout[] <- MCARI_OSAVI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"MCARI_OSAVI.tif",sep="_"),format="GTiff")
GM1 <- R750/R550
Rastout[] <- GM1
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"GM1.tif",sep="_"),format="GTiff")
GM2 <- R750/R700
Rastout[] <- GM2
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"GM2.tif",sep="_"),format="GTiff")
#Red edge ratios
#ZM
CI <- R750/R710
Rastout[] <- CI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"CI.tif",sep="_"),format="GTiff")
R750_R700 <- R750/R700
Rastout[] <- R750_R700
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"R750_R700.tif",sep="_"),format="GTiff")
R750_R670 <- R750/R670
Rastout[] <- R750_R670
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"R750_R670.tif",sep="_"),format="GTiff")
R710_R700 <- R710/R700
Rastout[] <- R710_R700
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"R710_R700.tif",sep="_"),format="GTiff")
R710_R670 <- R710/R670
Rastout[] <- R710_R670
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"R710_R670.tif",sep="_"),format="GTiff")
#RGB Indices
R700_R670 <- R700/R670
Rastout[] <- R700_R670
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"R700_R670.tif",sep="_"),format="GTiff")
G <- R550/R670
Rastout[] <- G
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"G.tif",sep="_"),format="GTiff")
#Indices PDF
CAR <- R515/R570
Rastout[] <- CAR
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"CAR.tif",sep="_"),format="GTiff")
PRI <- (R570-R530)/(R570+R530)
Rastout[] <- PRI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"PRI.tif",sep="_"),format="GTiff")
PRIn <- PRI*R670/(RDVI*R700)
Rastout[] <- PRIn
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"PRIn.tif",sep="_"),format="GTiff")
SIPI <- (R800-R445)/(R800+R680)
Rastout[] <- SIPI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"SIPI.tif",sep="_"),format="GTiff")
RARS <- R746/R513
Rastout[] <- RARS
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"RARS.tif",sep="_"),format="GTiff")
PSSRa <- R800/R680
Rastout[] <- PSSRa
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"PSSRa.tif",sep="_"),format="GTiff")
PSSRb <- R800/R635
Rastout[] <- PSSRb
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"PSSRb.tif",sep="_"),format="GTiff")
PSSRc <- R800/R470
Rastout[] <- PSSRc
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"PSSRc.tif",sep="_"),format="GTiff")
PSNDc <- (R800-R470)/(R800+R470)
Rastout[] <- PSNDc
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"PSNDc.tif",sep="_"),format="GTiff")
RNIRxCRI550 <- (1/R510)-(1/R550)*R770
Rastout[] <- RNIRxCRI550
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"RNIRxCRI550.tif",sep="_"),format="GTiff")
RNIRxCRI700 <- (1/R510)-(1/R700)*R770
Rastout[] <- RNIRxCRI700
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"RNIRxCRI700.tif",sep="_"),format="GTiff")
PRI515 <- (R515-R530)/(R515+R530)
Rastout[] <- PRI515
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"PRI515.tif",sep="_"),format="GTiff")
PRIxCI <- PRI*((R760/R700)-1)
Rastout[] <- PRIxCI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"PRIxCI.tif",sep="_"),format="GTiff")
PSRI <- (R680-R500)/R750
Rastout[] <- PSRI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"PSRI.tif",sep="_"),format="GTiff")
VOG <- R740/R720
Rastout[] <- PSRI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"VOG.tif",sep="_"),format="GTiff")
BGI1 <- R400/R550
Rastout[] <- BGI1
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"BGI1.tif",sep="_"),format="GTiff")
BGI2 <- R450/R550
Rastout[] <- BGI2
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"BGI2.tif",sep="_"),format="GTiff")
EVI <- 2.5*(R800-R670)/(R800+6*R670-7.5*R400+1)
Rastout[] <- EVI
proj4string(Rastout) <- CRS("+proj=utm +zone=12 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
writeRaster(Rastout,filename=paste(date,"EVI.tif",sep="_"),format="GTiff")
|
cab831026d4c64bb8fb103f36e23a5af3ccab7d5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/climatol/examples/dd2m.Rd.R
|
2da327a2446c3f76391c289ab5ea8ce09ab27057
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
dd2m.Rd.R
|
library(climatol)
### Name: dd2m
### Title: Compute monthly data from daily series
### Aliases: dd2m
### Keywords: datagen
### ** Examples
#Set a temporal working directory and write input files:
wd <- tempdir()
wd0 <- setwd(wd)
data(Ttest)
write(dat,'Ttest_1981-2000.dat')
write.table(est.c,'Ttest_1981-2000.est',row.names=FALSE,col.names=FALSE)
rm(dat,est.c) #remove loaded data from memory space
#Now run the example:
dd2m('Ttest',1981,2000)
#Return to user's working directory:
setwd(wd0)
#Input and output files can be found in directory:
print(wd)
|
3637df55a1b7f5b03b1b6cc8a16e2e165aa9ea3a
|
337e914fb4383becb3694d1bb0b453b6a1b01dd2
|
/Shiny_server/ui.R
|
1788cde892b6fd6925611bfb3fc1d886539c3253
|
[] |
no_license
|
cguillamet/Shiny-App-Cancer
|
0c14db5f55a449b08b498c2fda01809396680133
|
121351f23c572713ddb950396232a7095b9686a7
|
refs/heads/master
| 2023-04-17T20:39:39.080814
| 2021-04-26T15:45:38
| 2021-04-26T15:45:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,759
|
r
|
ui.R
|
library(shiny)
library(ggplot2)
library(dplyr)
library(shinydashboard)
datos <- read.csv("base2015_2020.csv")
datos$año <- substr(datos$FECDIAG, start = 1, stop = 4)
datos <- filter(datos, año %in% c("2015", "2016", "2017", "2018", "2019"))
datos2 <- datos %>%
group_by(TOP..cat.) %>%
tally() %>%
mutate(porc = round(n/sum(n),2)) %>%
filter(porc > 0.02)
tipo <- datos %>%
group_by(TOP..cat., año) %>%
tally() %>% rename(tipo = "TOP..cat.")
tipo2 <- datos %>%
group_by(TOP..cat., año, SEXO..desc.) %>%
tally() %>% rename(tipo = "TOP..cat.", sexo = "SEXO..desc.")
# Use a fluid Bootstrap layout
ui <- dashboardPage(
skin = "green",
dashboardHeader(title = "Cáncer en TDF"),
dashboardSidebar(sidebarMenu(
menuItem("Casos en TDF", icon = icon("bar-chart-o"),
selectInput("tipo", "Tipo de cáncer:",
choices=sort(unique(datos$TOP..cat.)),
multiple = TRUE),
helpText("Para eliminar un tipo utilice la tecla",
br(), #salto de línea
"'delete' de su teclado")),
menuItem("Características", icon = icon("bar-chart-o"),
radioButtons("carac", "Tipo de cáncer:",
choices=sort(unique(datos2$TOP..cat.)))))),
dashboardBody(
tags$head(tags$style(HTML(
'.main-header .logo{
font-family: "Calibri", sans-serif;
font-size: 24px;}
.body{
background-color: green;
}'
))),
fluidRow(
box(plotOutput("cancerPlot"), width = 12),
box(plotOutput("cplot")),
box(plotOutput("edadplot"))
)
)
)
|
f360e10052dd89e2c0356af06be6ca14092c97e1
|
c252ef51fd38429f60f1674bfbf9bcdefe1484e5
|
/plot6.R
|
da08aaa18d2d4e9ee3fb67a5d5eaa42d777799cd
|
[] |
no_license
|
albiondervishi/Exploratory-Data-Analysis-Course-Project-2
|
b777eb91fdb1b790741a153f6656efa4b5ae298f
|
475515497284dce72c4ef9017513a9c5388ff12f
|
refs/heads/master
| 2016-09-06T09:12:05.414120
| 2015-02-22T19:00:38
| 2015-02-22T19:00:38
| 31,154,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,395
|
r
|
plot6.R
|
# install.packages("ggplot2")
# install.packages("plyr")
library(plyr)
library(ggplot2)
NEI <- readRDS("./exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("./exdata-data-NEI_data/Source_Classification_Code.rds")
# subset the data to contain only LA informations
losangelesdata <- subset(NEI, NEI$fips == "06037")
temp<-subset(SCC,SCC.Level.One=="Mobile Sources")
losangelesmobile <- losangelesdata[losangelesdata$SCC %in% temp$SCC,]
# using plyr pocket to make possible conversion of data
pm24motor<-ddply(losangelesmobile, .(year), summarise, totalEmissions = sum(Emissions))
# subset the data to contain only baltimore informations
baltimore<- subset(NEI, NEI$fips == "24510")
temp<-subset(SCC,SCC.Level.One=="Mobile Sources")
baltimoremobile <- baltimore[baltimore$SCC %in% temp$SCC,]
# using plyr pocket to make possible conversion of data
pm25motor<-ddply(baltimoremobile, .(year), summarise, totalEmissions = sum(Emissions))
# add the city factor to the file
pm24motor$location <- "Los Angeles County"
pm25motor$location <- "Baltimore"
#bind the data in one file
data7 <- rbind(pm24motor, pm25motor)
# creating plot
png("plot6.png", width = 640, height = 640)
ggplot(data=data7, aes(x=year, y=totalEmissions, fill = location)) + geom_bar(stat="identity", position=position_dodge()) + ggtitle("MotorVehicle Emissions between Baltimore and Los Angeles")
#close the plot
dev.off()
|
e5b961bc44d0c707ec527032a1fe07011865132f
|
df8133c5f01ffd0ed4d3fe816f681b8242058b1c
|
/man/arguments.Rd
|
0a80bfd71976354d16ef742edba46dc1788ec1f9
|
[] |
no_license
|
rauschenberger/semisup
|
ea6c7c54e0356bb879eaa944ddc69ea215d83595
|
0c92f0fb28047183bdb11f83f1769027128f55cd
|
refs/heads/master
| 2021-06-20T00:34:44.182268
| 2020-05-08T12:42:24
| 2020-05-08T12:42:24
| 113,054,516
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,462
|
rd
|
arguments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkgname.R
\name{arguments}
\alias{arguments}
\title{Documentation}
\arguments{
\item{y}{\strong{observations:}
numeric vector of length \code{n}}
\item{Y}{\strong{observations:}
numeric vector of length \code{n},
or numeric matrix with \code{n} rows (samples)
and \code{q} columns (variables)}
\item{z}{\strong{class labels:}
integer vector of length \code{n},
with entries \code{0}, \code{1} and \code{NA}}
\item{Z}{\strong{class labels:}
numeric vector of length \code{n},
or numeric matrix with \code{n} rows (samples)
and \code{p} columns (variables),
with entries \code{0} and \code{NA}}
\item{dist}{distributional assumption\strong{:}
character \code{"norm"} (Gaussian),
\code{"nbinom"} (negative bionomial),
or \code{"zinb"} (zero-inflated negative binomial)}
\item{phi}{dispersion parameters\strong{:}
numeric vector of length \code{q},
or \code{NULL}}
\item{pi}{zero-inflation parameter(s)\strong{:}
numeric vector of length \code{q},
or \code{NULL}}
\item{gamma}{offset\strong{:}
numeric vector of length \code{n},
or \code{NULL}}
\item{test}{resampling procedure\strong{:}
character \code{"perm"} (permutation) or
\code{"boot"} (parametric bootstrap),
or \code{NULL}}
\item{iter}{(maximum) number of resampling iterations \strong{:}
positive integer, or \code{NULL}}
\item{kind}{resampling accuracy\strong{:}
numeric between \code{0} and \code{1}, or \code{NULL}\strong{;}
all \code{p}-values above \code{kind} are approximate}
\item{starts}{restarts of the \code{EM} algorithm\strong{:}
positive integer (defaults to \code{1})}
\item{it.em}{(maximum) number of iterations in the \code{EM} algorithm\strong{:}
positive integer (defaults to \code{100})}
\item{epsilon}{convergence criterion for the \code{EM} algorithm\strong{:}
non-negative numeric (defaults to \code{1e-04})}
\item{debug}{verification of arguments\strong{:}
\code{TRUE} or \code{FALSE}}
\item{pass}{parameters for parametric bootstrap algorithm}
\item{...}{settings \code{EM} algorithm\strong{:}
\code{starts}, \code{it.em} and \code{epsilon}
(see \code{\link{arguments}})}
}
\description{
This page lists and describes all arguments
of the R package \code{\link{semisup}}.
}
\seealso{
Use \code{\link{mixtura}} for model fitting,
and \code{\link{scrutor}} for hypothesis testing.
All other functions of the R package \code{\link{semisup}}
are \code{\link{internal}}.
}
\keyword{internal}
|
4bd5fc1a5a5c12195466e8aa436cfd4c4de7313b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/uptimeRobot/tests/test-account.details.R
|
38d0fa5c84d2203fcfc0b21bf6780f236d83d9a2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,089
|
r
|
test-account.details.R
|
test_that("uptimerobot.account.details", {
skip_on_cran()
api.key <- Sys.getenv("KEY", "")
account.details.list <- uptimerobot.account.details(api.key)
# Output is a list
expect_is(account.details.list, "list")
# Output list has 5 elements
expect_equal(length(account.details.list), 5)
# Elements are named as expected
expect_identical(names(account.details.list), c("monitorLimit", "monitorInterval", "upMonitors", "downMonitors", "pausedMonitors"))
account.details.vector <- uptimerobot.account.details(api.key, unlist = TRUE)
# Output is a vector
expect_is(account.details.vector, "integer")
# Output vector has 5 rows
expect_equal(length(account.details.vector), 5)
# Error in case of invalid api.key
expect_error(uptimerobot.account.details("blahblah"), "apiKey not mentioned or in a wrong format")
# Error in case the api.key is NULL or empty
expect_error(uptimerobot.account.details("", "api.key cannot be empty or NULL"))
# Clean the environment
rm(list = ls())
})
|
09b7b83993eb7996364849a76a8e3f11910f9f6d
|
78d4bb19db2d2baaeeca5c3ea13106bd83410904
|
/R/trainer.R
|
2ca224ccb51be3ae196b60f5993529eefd9e19a7
|
[] |
no_license
|
IshmaelBelghazi/bigoptim
|
77964dcbc8a14aed1a53d82bd67e6fbea3449686
|
720e80b0c2b935a2d0c814c159b8b971ad7d7e73
|
refs/heads/master
| 2016-09-05T16:34:10.796934
| 2015-12-09T14:45:15
| 2015-12-09T14:45:15
| 37,093,936
| 12
| 4
| null | 2015-09-03T17:13:12
| 2015-06-08T21:30:13
|
C
|
UTF-8
|
R
| false
| false
| 10,613
|
r
|
trainer.R
|
##' @title Stochastic Average Gradient
##' @param X Matrix, possibly sparse of features.
##' @param y Matrix of targets.
##' @param lambda Scalar. L2 regularization parameter.
##' @param maxiter Maximum number of iterations.
##' @param w Matrix of weights.
##' @param alpha constant step-size. Used only when fit_alg == "constant"
##' @param stepSizeType scalar default is 1 to use 1/L, set to 2 to use 2/(L + n*myu). Only used when fit_alg="linesearch"
##' @param Li Scalar or Matrix.Initial individual Lipschitz approximation.
##' @param Lmax Initial global Lipschitz approximation.
##' @param increasing Boolean. TRUE allows for both increase and decrease of lipschitz coefficients. False allows only decrease.
##' @param d Initial approximation of cost function gradient.
##' @param g Initial approximation of individual losses gradient.
##' @param covered Matrix of covered samples.
##' @param standardize Boolean. Scales the data if True
##' @param tol Real. Miminal required approximate gradient norm before convergence.
##' @param family One of "binomial", "gaussian", "exponential" or "poisson"
##' @param fit_alg One of "constant", "linesearch" (default), or "adaptive"
##' @param monitor Boolean. If TRUE returns matrix of weights after each effective pass through the dataset.
##' @param user_loss_function User supplied R or C loss and gradient functions
##' @param ... Any other pass-through parameters.
##' @export
##' @return object of class SAG_fit
##' @useDynLib bigoptim, .registration=TRUE
sag_fit <- function(X, y, lambda=0, maxiter=NULL, w=NULL, alpha=NULL,
stepSizeType=1, Li=NULL, Lmax=NULL, increasing=TRUE,
d=NULL, g=NULL, covered=NULL, standardize=FALSE,
tol=1e-3, family="binomial", fit_alg="constant",
monitor=FALSE, user_loss_function=NULL, ...) {
## Checking for sparsity
sparse <- is.sparse(X)
##,-------------------
##| Data preprocessing
##`-------------------
if (standardize && !sparse) {
X <- scale(X)
}
##,------------------------------
##| Initializing common variables
##`------------------------------
if (is.null(maxiter)) {
if (monitor) stop("monitoring not allowed with unbounded maximum iterations")
maxiter <- .Machine$integer.max
}
## Initializing weights
if (is.null(w)) {
w <- matrix(0, nrow=NCOL(X), ncol=1)
}
## Initializing loss derivatives
if (is.null(d)) {
d <- matrix(0, nrow=NCOL(X), ncol=1)
}
## Initializing sum of loss derivatives
if (is.null(g)) {
g <- matrix(0, nrow=NROW(X), ncol=1)
}
## Iniitializing covered values tracker
if (is.null(covered)) {
covered <- matrix(0L, nrow=NROW(X), ncol=1)
}
if ( family == "c_shared") {
if (length(user_loss_function$lib_file_path) == 0) {
stop("unspecified shared lib file path")
} else {
if (!file.exists(user_loss_function$lib_file_path))
stop("misspecified shared lib file path.")
}
if (length(user_loss_function$loss_name) == 0)
stop("unspecified loss function name")
if (length(user_loss_function$grad_name) == 0) {
stop("unspecified grad function name")
}
}
##,-----------------
##| Setting model id
##`-----------------
family_id <- switch(family,
gaussian=0,
binomial=1,
exponential=2,
poisson=3,
c_shared=4,
R=5,
stop("unrecognized model"))
##,-------------------
##| Setting fit_alg id
##`-------------------
fit_alg_id <- switch(fit_alg,
constant=0,
linesearch=1,
adaptive=2,
stop("unrecognized model"))
##,------------------------
##| Fit algorithm selection
##`------------------------
switch(fit_alg,
constant={
if (is.null(alpha)) {
Lmax <- 0.25 * max(Matrix::rowSums(X^2)) + lambda
alpha <- 1/Lmax ## 1/(16 * Lmax)
}
},
linesearch={
if (is.null(Lmax)) {
## TODO(Ishmael): Confusion between Lmax and stepSize
Li <- 1
}
},
adaptive={
if (is.null(Lmax)) {
## Initial guess of overall Lipschitz Constant
Lmax <- 1
}
if (is.null(Li)) {
## Initial guess of Lipschitz constant of each function
Li <- matrix(1, nrow=NROW(X), ncol=1)
}
},
stop("unrecognized fit algorithm"))
sag_fit <- .Call("C_sag_fit", w, Matrix::t(X), y, lambda,
alpha, as.integer(stepSizeType), Li, Lmax,
as.integer(increasing),
d, g, covered, tol,
as.integer(maxiter),
as.integer(family_id),
as.integer(fit_alg_id),
user_loss_function,
as.integer(sparse),
as.integer(monitor))
##,---------------------------
##| Structuring SAG_fit object
##`---------------------------
sag_fit$input <- list(maxiter=maxiter, family=family, lambda=lambda, tol=tol, alpha=alpha, fit_alg=fit_alg)
class(sag_fit) <- "SAG_fit"
sag_fit
}
##' @title Stochastic Average Gradient with warm-starting
##' @param X Matrix, possibly sparse of features.
##' @param y Matrix of targets.
##' @param lambdas Vector. Vector of L2 regularization parameters.
##' @param maxiter Maximum number of iterations.
##' @param w Matrix of weights.
##' @param alpha constant step-size. Used only when fit_alg == "constant"
##' @param stepSizeType scalar default is 1 to use 1/L, set to 2 to use 2/(L + n*myu). Only used when fit_alg="linesearch"
##' @param Li Scalar or Matrix.Initial individual Lipschitz approximation.
##' @param Lmax Initial global Lipschitz approximation.
##' @param increasing Boolean. TRUE allows increase of Lipschitz coeffecient. False allows only decrease.
##' @param d Initial approximation of cost function gradient.
##' @param g Initial approximation of individual losses gradient.
##' @param covered Matrix of covered samples.
##' @param standardize Boolean. Scales the data if True
##' @param tol Real. Miminal required approximate gradient norm before convergence.
##' @param family One of "binomial", "gaussian", "exponential" or "poisson"
##' @param fit_alg One of "constant", "linesearch" (default), or "adaptive".
##' @param user_loss_function User supplied R or C loss and gradient functions
##' @param ... Any other pass-through parameters.
##' @export
##' @return object of class SAG
##' @useDynLib bigoptim, .registration=TRUE
sag <- function(X, y, lambdas, maxiter=NULL, w=NULL, alpha=NULL,
stepSizeType=1, Li=NULL, Lmax=NULL, increasing=TRUE,
d=NULL, g=NULL, covered=NULL, standardize=FALSE,
tol=1e-3, family="binomial", fit_alg="constant", user_loss_function=NULL,
...) {
lambdas <- sort(lambdas, decreasing=TRUE)
## Checking for sparsity
sparse <- is.sparse(X)
##,-------------------
##| Data preprocessing
##`-------------------
if (standardize && !sparse) {
X <- scale(X)
}
##,------------------------------
##| Initializing common variables
##`------------------------------
if (is.null(maxiter)) {
maxiter <- .Machine$integer.max
}
## Initializing weights
if (is.null(w)) {
w <- matrix(0, nrow=NCOL(X), ncol=1)
}
## Initializing loss derivatives
if (is.null(d)) {
d <- matrix(0, nrow=NCOL(X), ncol=1)
}
## Initializing sum of loss derivatives
if (is.null(g)) {
g <- matrix(0, nrow=NROW(X), ncol=1)
}
## Iniitializing covered values tracker
if (is.null(covered)) {
covered <- matrix(0L, nrow=NROW(X), ncol=1)
}
if ( family == "c_shared") {
if (length(user_loss_function$lib_file_path) == 0) {
stop("unspecified shared lib file path")
} else {
if (!file.exists(user_loss_function$lib_file_path))
stop("misspecified shared lib file path.")
}
if (length(user_loss_function$loss_name) == 0)
stop("unspecified loss function name")
if (length(user_loss_function$grad_name) == 0) {
stop("unspecified grad function name")
}
}
##,-----------------
##| Setting model id
##`-----------------
family_id <- switch(family,
gaussian=0,
binomial=1,
exponential=2,
poisson=3,
c_shared=4,
stop("unrecognized model"))
##,-------------------
##| Setting fit_alg id
##`-------------------
fit_alg_id <- switch(fit_alg,
constant=0,
linesearch=1,
adaptive=2,
stop("unrecognized model"))
##,------------------------
##| Fit algorithm selection
##`------------------------
switch(fit_alg,
constant={
if (is.null(alpha)) {
## TODO(Ishmael): Lmax depends on lambda for warmstarting
Lmax <- 0.25 * max(Matrix::rowSums(X^2)) + lambdas[1]
alpha <- 1/Lmax ## 1/(16 * Lmax)
}
},
linesearch={
if (is.null(Lmax)) {
Li <- 1
}
},
adaptive={
if (is.null(Lmax)) {
## Initial guess of overall Lipschitz Constant
Lmax <- 1
}
if (is.null(Li)) {
## Initial guess of Lipschitz constant of each function
Li <- matrix(1, nrow=NROW(X), ncol=1)
}
},
stop("unrecognized fit algorithm"))
sag_fits <- .Call("C_sag", w, Matrix::t(X), y, lambdas,
alpha, as.integer(stepSizeType), Li, Lmax,
increasing, d, g, covered, tol,
as.integer(maxiter),
as.integer(family_id),
as.integer(fit_alg_id),
user_loss_function,
as.integer(sparse))
##,---------------------------
##| Structuring SAG_fit object
##`---------------------------
sag_fits$input <- list(maxiter=maxiter,
family=family,
lambdas=lambdas,
tol=tol,
alpha=alpha,
stepSizeType=stepSizeType,
fit_alg=fit_alg)
class(sag_fits) <- "SAG"
sag_fits
}
|
8982a3acc05a483e3bc3d1943eeac3541bf38e92
|
b7a86e371a0c3ec4516ecc471c9bb96a1c77c071
|
/R/04_sensitivity_nimble_bym2.R
|
d96ab913bc3584e1db6ea62631fd7b44c2ae0fd5
|
[] |
no_license
|
sophie-a-lee/spatial_smooth_framework
|
7a81d4333be83d4f7b3b635d853c5c65db3c0be2
|
c43ea298bbf220a0f6890e8d4efe66cdbb736c59
|
refs/heads/main
| 2023-04-18T19:30:02.164885
| 2022-09-06T14:42:33
| 2022-09-06T14:42:33
| 496,954,346
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,534
|
r
|
04_sensitivity_nimble_bym2.R
|
###################################################################
#### ####
#### Sensitivity study 1: a distance-based spatial structure ####
#### NIMBLE CAR model comparison ####
#### ####
###################################################################
#### Load packages, data and functions ####
source("00_load_data_functions.R")
#### Load simulated data ####
## NOTE: this code will only work if 01_simstudy1_simulation.R has been run
df_sim_lonlat <- read_rds("data/df_sim_lonlat.rds")
#### Load spatial smooth model ####
## NOTE: this code will only work if 01_simstudy1_simulation.R and
## 02_simstudy1_run_models.R has been run
model_list <- read_rds("output/sim_study1/smooth_model_list.rds")
#### Load INLA BYM2 model ####
inla_model_list <- read_rds("output/sim_study1/inla_model_list.rds")
#### Fit BYM2 model using NIMBLE ####
## Create nb object from shapefile
row.names(shp) = shp$municip_code_ibge
nb.south <- poly2nb(shp)
# Convert to WinBUGS object
nb.WB <- nb2WB(nb.south)
# Calculate scale parameter (needed for BYM2 scaling and model fit)
nb.mat <- nb2mat(nb.south, style = "B")
colnames(nb.mat) <- rownames(nb.mat)
nb.scale <- -nb.mat
diag(nb.scale) <- abs(apply(nb.scale, 1, sum))
# solve(W.scale) # this should not work since by definition the matrix is singular
Q = inla.scale.model(nb.scale,
constr = list(A = matrix(1, nrow = 1,
ncol = nrow(nb.scale)), e = 0))
scale = exp((1/nrow(nb.scale)) * sum(log(1/diag(Q))))
#### Fit BYM2 model with NIMBLE for comparison ####
bym2_results <- lapply(df_sim_lonlat, BYM2_nimble_fit)
## Save model results
# write_rds(bym2_results, file = "output/simstudy1/bym2_sens_models.rds")
#### Extract intercept estimates from simulations ####
b_est_smooth <- lapply(model_list, b_extract_smooth)
b_est_bym2 <- lapply(bym2_results, b_extract_bym2)
b_est_inla <- lapply(inla_model_list, b_extract_inla)
# Combine estimates and add known phi values
b_table_smooth <- reduce(b_est_smooth, rbind) %>%
# Add true phi value
mutate(phi = seq(0, 1, by = .1))
b_table_bym2 <- reduce(b_est_bym2, rbind) %>%
# Add true phi value
mutate(phi = seq(0, 1, by = .1))
b_table_inla <- reduce(b_est_inla, rbind) %>%
# Add true phi value
mutate(phi = seq(0, 1, by = .1))
## Combine intercept estimates into one dataset
b_est_full <- full_join(b_table_smooth, b_table_bym2, by = "phi",
suffix = c(".smooth", ".bym2")) %>%
full_join(., b_table_inla, by = "phi")
#### Plot intercept estimates for smooth & BYM2 models ####
b_line_full <- ggplot(data = b_est_full) +
# Plot smooth estimates + 95% CI
geom_point(aes(x = phi, y = b_est.smooth)) +
geom_linerange(aes(ymin = b_lq.smooth, ymax = b_uq.smooth, x = phi), lwd = 1) +
# Add INLA estimates + 95% CI
geom_point(aes(x = (phi + .01), y = b_est_inla.mean), col = "blue") +
geom_linerange(aes(ymin = b_lq_inla.0.025quant,
ymax = b_uq_inla.0.975quant, x = (phi + .01)), lwd = 1,
col = "blue") +
# Plot BYM2 NIMBLE estimates + 95% CI
geom_point(aes(x = (phi - .01), y = b_est.bym2), col = "magenta") +
geom_linerange(aes(ymin = b_lq.bym2,
ymax = b_uq.bym2,
# Shift to avoid overlap
x = (phi - .01)), lwd = 1, col = "magenta") +
# Add reference line with true value (b = 0)
geom_hline(yintercept = 0, linetype = "dashed") +
scale_x_continuous(name = expression(phi), breaks = seq(0, 1, by = .1)) +
scale_y_continuous(name = "Intercept estimate") +
theme_bw()
ggsave(b_line_full, filename = "output/sim_study1/bym2_nimble_b_comp_full.png")
#### Extract estimates for phi/mixing parameters ####
n <- nrow(shp)
## Spatial smooth model
smooth_model_vars <- lapply(model_list, extract_vars_2re)
## BYM2 nimble model
bym2_model_vars <- lapply(bym2_results, phi_extract_bym2)
## INLA model
phi_inla <- NA
phi_inla_lq <- NA
phi_inla_uq <- NA
for(i in 1: length(inla_model_list)) {
phi_inla[i] <- inla_model_list[[i]]$summary.hyperpar[2,1]
phi_inla_lq[i] <- inla_model_list[[i]]$summary.hyperpar[2, 3]
phi_inla_uq[i] <- inla_model_list[[i]]$summary.hyperpar[2, 5]
}
inla_phi_est <- data.table(true_phi = seq(0, 1, by = .1),
inla_phi = phi_inla,
inla_phi_lq = phi_inla_lq,
inla_phi_uq = phi_inla_uq)
## Combine the estimated phi from each model and calculate mean and 95% CI
smooth_phi_est <- reduce(smooth_model_vars, rbind) %>%
# Add true phi value
mutate(phi = rep(seq(0, 1, by = .1),
each = nrow(smooth_model_vars[[1]]))) %>%
group_by(phi) %>%
summarise(phi_est = mean(propn_var),
phi_lq = quantile(propn_var, .025),
phi_uq = quantile(propn_var, .975)) %>%
ungroup()
bym2_phi_est <- reduce(bym2_model_vars, rbind) %>%
# Add true phi value
mutate(phi = seq(0, 1, by = .1))
#### Plot mean & 95% CI phi estimates for all 3 models ####
phi_comp_plot_full <- ggplot() +
geom_point(data = smooth_phi_est,
aes(x = phi, y = phi_est)) +
geom_linerange(data = smooth_phi_est,
aes(ymin = phi_lq, ymax = phi_uq, x = phi), lwd = 1) +
geom_point(data = bym2_phi_est,
# Shift to avoid overlap
aes(x = (phi - .01), y = phi_est), col = "magenta") +
geom_linerange(data = bym2_phi_est,
aes(ymin = phi_lq, ymax = phi_uq,
x = (phi - .01)), lwd = 1, col = "magenta") +
geom_point(data = inla_phi_est,
# Shift to avoid overlap
aes(x = (true_phi + .01), y = inla_phi), col = "blue") +
geom_linerange(data = inla_phi_est,
aes(ymin = inla_phi_lq, ymax = inla_phi_uq,
x = (true_phi + .01)), lwd = 1, col = "blue") +
# Add reference line with simulation value
geom_abline(intercept = 0, slope = 1, linetype = "dashed") +
labs(x = expression("True" ~ phi), y = expression("Estimated"~ phi)) +
expand_limits(x = c(0, 1), y = c(0, 1)) +
theme_bw()
ggsave(phi_comp_plot_full, filename = "output/sim_study1/bym2_nimble_phi_comp_full.png")
## Mean absolute error
## BYM2 NIMBLE
# Extract predicted values
n <- nrow(df_sim_lonlat[[1]])
mu_est <- lapply(bym2_results, lambda_pred_2re)
# Calculate the absolute error for each model
for(i in 1:length(df_sim_lonlat)) {
df_sim_lonlat[[i]] <- df_sim_lonlat[[i]] %>%
mutate(lambda_est = (mu_est[[i]]/E),
absolute_error = abs(lambda - lambda_est))
}
# Calculate mean absolute error
mae_bym2<- lapply(df_sim_lonlat, function(x) mean(x$absolute_error))
# Extract WAIC
waic_bym2 <- lapply(bym2_results, function(x) x$WAIC$WAIC)
## Import other comparison statistics
## NOTE: this code will only work if 01_simstudy1_simulation.R,
## 02_simstudy1_run_models.R and 03_simstudy1_model_output have been run
inla_smooth_comp <- fread(file = "output/sim_study1/model_comp.csv")
# Combine stats for smooth INLA and BYM2 models
model_comp <- data.table(inla_smooth_comp,
bym2_mae = round(unlist(mae_bym2), 2),
bym2_waic = round(unlist(waic_bym2), 2),
bym2_phi = round(bym2_phi_est$phi_est, 3))
fwrite(model_comp, file = "output/sim_study1/bym2_nimble_comp_full.csv")
|
3f487be18c54d6af5c5f7344defc7c4c1924b2c3
|
338f244da7ed149b69d7365102eabbb6db30c674
|
/man/check_pedig_parent.Rd
|
99f6aaa1861e391299fd70ae62602bec0b10e87e
|
[
"MIT"
] |
permissive
|
fbzwsqualitasag/qpdt
|
4103572a71a149b4cff79643228dbf0c164501de
|
736d6e45d1240affbf9d9f3cfaaf6a957bf562a6
|
refs/heads/main
| 2023-04-06T22:04:40.170526
| 2021-04-01T06:24:44
| 2021-04-01T06:24:44
| 344,161,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,916
|
rd
|
check_pedig_parent.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_pedigree_parent.R
\name{check_pedig_parent}
\alias{check_pedig_parent}
\title{Check Properties of Parents in a Pedigree}
\usage{
check_pedig_parent(
ps_pedig_path,
ps_delim = "|",
ps_id_col = "#IDTier",
ps_sire_col = "IDVater",
ps_dam_col = "IDMutter",
ps_bd_col = "Birthdate",
ps_sex_col = "Geschlecht",
pcol_types = NULL,
ptbl_pedigree = NULL,
pn_bd_tol = 0,
pl_wrong_sex = list(sire = "F", dam = "M")
)
}
\arguments{
\item{ps_pedig_path}{path to the pedigree input file}
\item{ps_delim}{column delimiting character}
\item{ps_id_col}{column title for animal IDs}
\item{ps_sire_col}{column title for sire IDs}
\item{ps_dam_col}{column title for dam IDs}
\item{ps_bd_col}{column title for birthdates}
\item{ps_sex_col}{column title for sex}
\item{pcol_types}{column types of pedigree in ps_pedig_path}
\item{ptbl_pedigree}{tibble containing pedigree information}
\item{pn_bd_tol}{minimal tolerance for age difference between animal and parents (in days)}
\item{pl_wrong_sex}{list with characters denoting the wrong sex}
}
\description{
Some descriptive statistics about the pedigree are collected. The main
check consists of the comparison of the birthdates of animals to the
birthdates of their parents. The check of birthdates can be parametrized
by a minimal tolerance of the difference using the argument \code{pn_bd_tol}.
The last check lists all animals that have the same IDs as one of their
parents.
}
\details{
The comparison of the birthdates is done via a join of the parent birthdates
to a tibble that consists of only animals, their birthdates and their parents.
The comparison is done for sires and dams in two separate steps.
}
\examples{
\dontrun{
check_pedig_parent(ps_pedig_path = system.file('extdata',
'PopReport_SN_ohne_20210115.csv_adaptfin2.csv',
package = 'qpdt'))
}
}
|
8a3382ebcceea2b744f59d18ea6496509582d081
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/BayesCR/R/UtilitariosSim.r
|
2c178cdb45b1606bd6e739c22776ef981152f033
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,894
|
r
|
UtilitariosSim.r
|
PearsonVII <- function(y,mu,sigma2,nu,delta)
{
Acum <- z <- vector(mode = "numeric", length = length(y))
sigma2a <- sigma2*(delta/nu)
z <- (y-mu)/sqrt(sigma2a)
Acum <- pt(z,df=nu)
return(Acum)
}
AcumSlash <- function(y,mu,sigma2,nu)
{
Acum <- z <- vector(mode = "numeric", length = length(y))
z <- (y-mu)/sqrt(sigma2)
for (i in 1:length(y))
{
f1 <- function(u) nu*u^(nu-1)*pnorm(z[i]*sqrt(u))
Acum[i]<- integrate(f1,0,1)$value
}
return(Acum)
}
AcumNormalC <- function(y,mu,sigma2,nu)
{
Acum <- vector(mode = "numeric", length = length(y))
eta <- nu[1]
gama <- nu[2]
Acum <- eta*pnorm(y,mu,sqrt(sigma2/gama)) + (1-eta)*pnorm(y,mu,sqrt(sigma2))
return(Acum)
}
dPearsonVII<- function(y,mu,sigma2,nu,delta)
{
f <- z <- vector(mode = "numeric", length = length(y))
sigma2a <- sigma2*(delta/nu)
z <- (y-mu)/sqrt(sigma2a)
f <- dt(z,df=nu)/sqrt(sigma2a)
return(f)
}
dSlash <- function(y,mu,sigma2,nu)
{
resp <- z <- vector(mode = "numeric", length = length(y))
z <- (y-mu)/sqrt(sigma2)
for (i in 1:length(y))
{
f1 <- function(u) nu*u^(nu-0.5)*dnorm(z[i]*sqrt(u))/sqrt(sigma2)
resp[i] <- integrate(f1,0,1)$value
}
return(resp)
}
dNormalC <- function(y,mu,sigma2,nu)
{
Acum <- vector(mode = "numeric", length = length(y))
eta <- nu[1]
gama <- nu[2]
Acum <- eta*dnorm(y,mu,sqrt(sigma2/gama)) + (1-eta)*dnorm(y,mu,sqrt(sigma2))
return(Acum)
}
verosCN <- function(auxf,cc,cens,sigma2,nu)
{
auxf1 <- sqrt(auxf)
if (cens=="1")
{
ver1 <-(sum(log(dNormalC(auxf1[cc==0],0,1,nu)/sqrt(sigma2)))+ sum(log(AcumNormalC(auxf1[cc==1],0,1,nu))))
}
if (cens=="2")
{
ver1 <-(sum(log(dNormalC(auxf1[cc==0],0,1,nu)/sqrt(sigma2)))+ sum(log(AcumNormalC(auxf1[cc==1],0,1,nu))))
}
return(ver1)
}
Rhat1 <- function(param,n.iter,burnin,n.chains,n.thin)
{
param <- as.matrix(param)
efect <- (n.iter-burnin)/n.thin
p <- ncol(param)
mat <- matrix(param,nrow=efect,ncol=n.chains*p)
rhat <- matrix(0,nrow=p,ncol=1)
for(i in 1:p)
{
l1 <- 2*(i-1)+1
c1 <- 2*i
rhat[i,1] <- Rhat(mat[,l1:c1])
}
return(rhat=rhat)
}
Rhat <- function(mat)
{
m <- ncol(mat)
n <- nrow(mat)
b <- apply(mat,2,mean)
B <- sum((b-mean(mat))^2)*n/(m-1)
w <- apply(mat,2,var)
W <- mean(w)
s2hat <- (n-1)/n*W + B/n
Vhat <- s2hat + B/m/n
covWB <- n /m * (cov(w,b^2)-2*mean(b)*cov(w,b))
varV <- (n-1)^2 / n^2 * var(w)/m + (m+1)^2 / m^2 / n^2 * 2*B^2/(m-1) + 2 * (m-1)*(n-1)/m/n^2 * covWB
df <- 2 * Vhat^2 / varV
R <- sqrt((df+3) * Vhat / (df+1) / W)
return(R)
}
hpd <- function(x, alpha)
{
n <- length(x)
m <- max(1, ceiling(alpha * n))
y <- sort(x)
a <- y[1:m]
b <- y[(n - m + 1):n]
i <- order(b - a)[1]
structure(c(a[i], b[i]), names = c("Lower Bound", "Upper Bound"))
}
MHnu<-function(last,U,lambda,prior="Jeffreys",hyper)
{
n <- length(U)
if(prior=="Jeffreys")
{
gJeffreys <- function(nu,U)
{
n<-length(U)
ff<- log(sqrt(nu/(nu+3))*sqrt(trigamma(nu/2)-trigamma((nu+1)/2)-2*(nu+3)/((nu)*(nu+1)^2)))+0.5*n*nu*log(nu/2)+(0.5*nu)*sum(log(U)-U)-n*log(gamma(nu/2))
return(ff)
}
Fonseca1 <-deriv(~log(sqrt(nu/(nu+3))*sqrt(trigamma(nu/2)-trigamma((nu+1)/2)-2*(nu+3)/((nu)*(nu+1)^2))) +0.5*n*nu*log(nu/2)-n*log(gamma(nu/2)),c("nu"),function(nu){},hessian=TRUE)
Fonseca2 <- deriv(~(0.5*nu)*(log(U)-U),c("nu"),function(U,nu){},hessian=TRUE)
aux1 <- Fonseca1(last)
aux2 <- Fonseca2(U,last)
q1 <- attr(aux1,"gradient")[1]+sum(attr(aux2,"gradient"))
q2 <- attr(aux1,"hessian")[1]+sum(attr(aux2,"hessian"))
aw <- last-q1/q2
bw <- max(0.001,-1/q2)
cand <- rtrunc(1, spec="norm",a=2.1, b=100, mean = aw, sd = sqrt(bw))
alfa <- (exp(gJeffreys(cand,U))/exp(gJeffreys(last,U)))*(dtrunc(last,spec="norm",a=2.1, b=100, mean = aw, sd = sqrt(bw))/dtrunc(cand,spec="norm", a=2.1, b=100, mean = aw, sd = sqrt(bw)))
}
if(prior=="Exp")
{
gExp <- function(nu,U,hyper)
{
n<-length(U)
ff<- (-hyper*nu) + 0.5*n*nu*log(nu/2)+(0.5*nu)*sum(log(U)-U)-n*log(gamma(nu/2))
return(ff)
}
Fonseca1<- deriv(~ (-hyper*nu) + 0.5*n*nu*log(nu/2)-n*log(gamma(nu/2)),c("nu"),function(nu){},hessian=TRUE)
Fonseca2<- deriv(~(0.5*nu)*(log(U)-U),c("nu"),function(U,nu){},hessian=TRUE)
aux1<-Fonseca1(last)
aux2<-Fonseca2(U,last)
q1<-attr(aux1,"gradient")[1]+sum(attr(aux2,"gradient"))
q2<-attr(aux1,"hessian")[1]+sum(attr(aux2,"hessian"))
aw<- last-q1/q2
bw<- max(0.001,-1/q2)
cand <- rtrunc(1, spec="norm",a=2.1, b=100, mean = aw, sd = sqrt(bw))
alfa<-(exp(gExp(cand,U,hyper))/exp(gExp(last,U,hyper)))*(dtrunc(last,spec="norm",a=2.1, b=100, mean = aw, sd = sqrt(bw))/dtrunc(cand,spec="norm", a=2.1, b=100, mean = aw, sd = sqrt(bw)))
}
if(prior=="Unif")
{
gUnif <- function(nu,U)
{
n<-length(U)
ff<- 0.5*n*nu*log(nu/2)+(0.5*nu)*sum(log(U)-U)-n*log(gamma(nu/2))
return(ff)
}
Fonseca1<- deriv(~ 0.5*n*nu*log(nu/2)-n*log(gamma(nu/2)),c("nu"),function(nu){},hessian=TRUE)
Fonseca2<- deriv(~(0.5*nu)*(log(U)-U),c("nu"),function(U,nu){},hessian=TRUE)
aux1<-Fonseca1(last)
aux2<-Fonseca2(U,last)
q1<-attr(aux1,"gradient")[1]+sum(attr(aux2,"gradient"))
q2<-attr(aux1,"hessian")[1]+sum(attr(aux2,"hessian"))
aw<- last-q1/q2
bw<- max(0.001,-1/q2)
cand <- rtrunc(1, spec="norm",a=2.1, b=100, mean = aw, sd = sqrt(bw))
alfa<-(exp(gUnif(cand,U))/exp(gUnif(last,U)))*(dtrunc(last,spec="norm",a=2.1, b=100, mean = aw, sd = sqrt(bw))/dtrunc(cand,spec="norm", a=2.1, b=100, mean = aw, sd = sqrt(bw)))
}
if(prior=="Hierar")
{
gHierar <- function(nu,U)
{
n<-length(U)
ff<- (-lambda*nu) + 0.5*n*nu*log(nu/2)+(0.5*nu)*sum(log(U)-U)-n*log(gamma(nu/2))
return(ff)
}
Fonseca1<- deriv(~ (-lambda*nu) + 0.5*n*nu*log(nu/2)-n*log(gamma(nu/2)),c("nu"),function(nu){},hessian=TRUE)
Fonseca2<- deriv(~(0.5*nu)*(log(U)-U),c("nu"),function(U,nu){},hessian=TRUE)
aux1<- Fonseca1(last)
aux2<-Fonseca2(U,last)
q1<-attr(aux1,"gradient")[1]+sum(attr(aux2,"gradient"))
q2<-attr(aux1,"hessian")[1]+sum(attr(aux2,"hessian"))
aw<- last-q1/q2
bw<- max(0.001,-1/q2)
cand <- rtrunc(1, spec="norm",a=2.1, b=100, mean = aw, sd = sqrt(bw))
alfa<-(exp( gHierar(cand,U))/exp( gHierar(last,U)))*(dtrunc(last,spec="norm",a=2.1, b=100, mean = aw, sd = sqrt(bw))/dtrunc(cand,spec="norm", a=2.1, b=100, mean = aw, sd = sqrt(bw)))
}
ifelse(runif(1) < min(alfa, 1), last <- cand, last<-last)
return(last)
}
MHrhoCN <-function(last,U,cc,sigma2,nu,s0,s1,cens)
{
last1 <- last/(1-last)
desv <- .001
cand <- rlnorm(1,meanlog=log(last1), sdlog=sqrt(desv))
ver <- verosCN(U,cc,cens=cens,sigma2,nu)
g <- function(r,ver)
{
r1 <- r/(1+r)
ff<- (r1)^(s0-1)*(1-r1)^(s1-1)*ver*(1/(1+r)^2)
return(ff)
}
alfa <- g(cand,ver)*cand/(g(last1,ver)*last1)
ifelse(runif(1) < min(alfa, 1), last <- cand, last<-last)
return(last)
}
BayCens <- function(start.iter,end.iter,j,cad, ...)
{
pb <- txtProgressBar(start.iter,end.iter,
initial = start.iter, style=3, width=10,
char=ifelse((cad ==1||cad==3),"+","*"))
Sys.sleep(0.5); setTxtProgressBar(pb, j)
cat("\r")
cat("\r")
}
|
36d574471b63f06db020b56dec5f125887308875
|
d01ec57d0f07840b1844fa2e10701904fec1a693
|
/step1.R
|
96215a400b013a052f35e2c1a6b73327f4fc1578
|
[] |
no_license
|
OpiDataScientists/textmining
|
81e693264407d0af95507c2d4cd7507660eefeaf
|
5e7eec1307e623da86e35292ab51ef0dea6b16be
|
refs/heads/master
| 2021-05-02T03:00:38.207865
| 2018-03-01T09:58:20
| 2018-03-01T09:58:20
| 120,890,950
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,697
|
r
|
step1.R
|
library(XML)
xml = xmlTreeParse("FR_BCKST66_TO_2017-34_0015.xml", useInternalNodes=TRUE,encoding = "UTF-8") # faut supprimer la balise Transaction et la remplacer
app_number=xpathApply(xml, "//ApplicationNumber", xmlValue)
app_d=xpathApply(xml, "//ApplicationDate", xmlValue)
app_filling_place=xpathApply(xml, "//FilingPlace", xmlValue)
app_des=xpathApply(xml, "//GoodsServicesDetails", xmlValue)
descri=xpathApply(xml, "//ClassDescriptionDetails", xmlValue)
desc=xpathApply(xml, "//GoodsServicesDescription", xmlValue)
save(app_number, file="app_number.RData")
save(app_des, file="app_des.RData")
rm(list=ls())
gc()
load("app_number.RData")
load("app_des.RData")
app_number=cbind(c(app_number))
app_number=as.data.frame(app_number)
app_des=cbind(c(app_des))
app_des=as.data.frame(app_des)
data=cbind(app_number,app_des)
colnames(data)=c("application numb","descrip")
save(data, file="data.RData")
rm(list=ls())
gc()
load("data.RData")
###########################################
Unaccent <- function(text) {
text <- gsub("['`^~\"]", " ", text)
text <- iconv(text, to="ASCII//TRANSLIT//IGNORE")
text <- gsub("['`^~\"]", "", text)
return(text)
}
colnames(data)=c("doc_id","text")
data$text=as.character(data$text)
data$text=Unaccent(data$text)
data$doc_id=as.character(data$doc_id)
library(tm)
ds <- DataframeSource(data)
dsc<-Corpus(ds)
dtm<- DocumentTermMatrix(dsc,control = list(removePunctuation = TRUE,removeNumbers = TRUE,stopwords = TRUE)) # on perd UTF 8
print(Terms(dtm))
library(stringi)
stri_enc_mark(Terms(dtm))
all(stri_enc_isutf8(Terms(dtm)))
print(Terms(dtm))
scanner <- function(x) strsplit(x," ")
ap.tdm <- TermDocumentMatrix(dsc,control=list(tokenize=scanner))
findFreqTerms(ap.tdm, lowfreq=30)
library(tidytext)
#### we loose UTF 8 ICI
dat<- tidy(dtm,encoding = "UTF-8")
prep=c("pour","non","les","des","notamment")
dat =dat[ ! dat$term %in% prep, ]
scanner <- function(x) strsplit(x,c(",",";"))
dtm2 <- DocumentTermMatrix(dsc,control=list(tokenize=scanner))
newd <- tidy(dtm2)
View(dtm)
library(tm)
ds <- DataframeSource(test)
dsc<-Corpus(ds)
dtm<- DocumentTermMatrix(dsc)
library(tidytext)
dat<- tidy(dtm)
scanner <- function(x) strsplit(x," ")
dtm2 <- DocumentTermMatrix(dsc,control=list(tokenize=scanner))
newd <- tidy(dtm2)
##%######################################################%##
# #
#### 2emepartie #manuellle ####
# #
##%######################################################%##
load("data.RData")
colnames(data)=c("doc_id","text")
data$text=as.character(data$text)
data$doc_id=as.character(data$doc_id)
s=strsplit(as.character(data$text),',')
x=data.frame(t1=unlist(s),t2=rep(data$doc_id, sapply(s, FUN=length)))
ss=strsplit(as.character(x$t1),';')
xx=data.frame(t1=unlist(ss),t2=rep(x$t2, sapply(ss, FUN=length)))
library(tm)
ds <- DataframeSource(data)
dsc<-Corpus(ds)
dtm<- DocumentTermMatrix(dsc,control = list(removePunctuation = TRUE,removeNumbers = TRUE,stopwords = TRUE)) # on perd UTF 8
print(Terms(dtm))
library(stringi)
stri_enc_mark(Terms(dtm))
all(stri_enc_isutf8(Terms(dtm)))
print(Terms(dtm))
scanner <- function(x) strsplit(x," ")
ap.tdm <- TermDocumentMatrix(dsc,control=list(tokenize=scanner))
findFreqTerms(ap.tdm, lowfreq=30)
library(tidytext)
#### we loose UTF 8 ICI
dat<- tidy(dtm,encoding = "UTF-8")
prep=c("pour","non","les","des","notamment")
dat =dat[ ! dat$term %in% prep, ]
##%######################################################%##
# #
#### s ####
# #
##%######################################################%##
text <- "mécanique , chérie bébé elle est magnifique mécanique éléve syb orazhhha zkkk zzz kk kk bébé chérie , mécanique soléil soléil platoaaàzaiiea"
text <- iconv(text, from="ASCII//TRANSLIT//IGNORE", to="UTF-8")
Encoding(text)
# [1] "unknown"
Encoding(text) <- "UTF-8"
# [1] "UTF-8"
ap.corpus <- Corpus(DataframeSource(data.frame(text)))
ap.corpus <- tm_map(ap.corpus, removePunctuation)
ap.corpus <- tm_map(ap.corpus, content_transformer(tolower))
content(ap.corpus[[1]])
ap.tdm <- TermDocumentMatrix(ap.corpus)
ap.m <- as.matrix(ap.tdm)
ap.v <- sort(rowSums(ap.m),decreasing=TRUE)
ap.d <- data.frame(word = names(ap.v),freq=ap.v)
print(table(ap.d$freq))
# 1 2 3
# 62 5 1
print(findFreqTerms(ap.tdm, lowfreq=2))
plot(tdm, terms = findFreqTerms(tdm, lowfreq = 6)[1:25], corThreshold = 0.5)
|
fd6fd01c4c215fce39d9a47aff032dd7aa56406b
|
b5d37b88584dda24636216703bef79ef270e4ba6
|
/drawingSimple3TipTrees.R
|
ba78193d0f4b836bb33214d9e59cc783517238da
|
[] |
no_license
|
NikVetr/dissertation_work
|
7aadd278a30dcf20a6d1783e7fadf417037c13d1
|
6f0f4ba5bed36c351eca135ce4c4f45195266d87
|
refs/heads/master
| 2022-01-16T19:46:32.809960
| 2022-01-06T01:05:08
| 2022-01-06T01:05:08
| 216,916,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,805
|
r
|
drawingSimple3TipTrees.R
|
library(latex2exp)
library(pBrackets)
rootStates <- rbind(c(30,100), c(100,70), c(30,1), c(100,1), c(150,1))
rootStates <- rbind(c(20,5), c(70,5), c(100,5), c(170,5), c(210,5))
plot(0:270, 0:270*.5, col = 0, axes = F, xlab = "", ylab = "")
text2 <- function(label, coordinates, cex, font) {text(label = label, x = coordinates[1], y = coordinates[2], cex = cex, font = font)}
par(mar = c(1,1,1,1))
ang <- 60
angh <- ang / 2
angr <- angh / 180 * pi
liwd <- 5
#tree1
v1 <- 50
v2 <- 30
v3 <- 35
v4 <- 40
E <- rootStates[1,]
A <- E + c(-v1 * sin(angr), v1 * cos(angr))
D <- E + c(v4 * sin(angr), v4 * cos(angr))
B <- D + c(-v2 * sin(angr), v2 * cos(angr))
C <- D + c(v3 * sin(angr), v3 * cos(angr))
lines(rbind(E, A), lwd = liwd)
lines(rbind(E, D), lwd = liwd)
lines(rbind(D, B), lwd = liwd)
lines(rbind(D, C), lwd = liwd)
text2("A", A + c(0, 5), cex = 1.3, font = 4)
text2("B", B + c(0, 5), cex = 1.3, font = 4)
text2("C", C + c(0, 5), cex = 1.3, font = 4)
text2("D", D + c(3, -3), cex = 1.3, font = 4)
text2("E", E + c(0, -5), cex = 1.3, font = 4)
text2(TeX("v_1"), E + c(-v1 * sin(angr), v1 * cos(angr)) / 2 + c(-4, -3), cex = 1.1, font = 3)
text2(TeX("v_2"), D + c(-v2 * sin(angr), v2 * cos(angr)) / 2 + c(-4, -3), cex = 1.1, font = 3)
text2(TeX("v_3"), D + c(v3 * sin(angr), v3 * cos(angr)) / 2 + c(4, -3), cex = 1.1, font = 3)
text2(TeX("v_4"), E + c(v1 * sin(angr), v1 * cos(angr)) / 2 + c(4, -3), cex = 1.1, font = 3)
#tree2, reroot on A
# plot(0:200, 0:200, col = 0, axes = F, xlab = "", ylab = "")
A <- rootStates[2,]
E <- A + c(0, v1)
D <- E + c(0, v4)
B <- D + c(-v2 * sin(angr), v2 * cos(angr))
C <- D + c(v3 * sin(angr), v3 * cos(angr))
lines(rbind(A, E), lwd = liwd)
lines(rbind(E, D), lwd = liwd)
lines(rbind(D, B), lwd = liwd)
lines(rbind(D, C), lwd = liwd)
text2("A", A + c(0, -5), cex = 1.3, font = 4)
text2("B", B + c(0, 5), cex = 1.3, font = 4)
text2("C", C + c(0, 5), cex = 1.3, font = 4)
text2("D", D + c(4, -2), cex = 1.3, font = 4)
text2("E", E + c(4, 0), cex = 1.3, font = 4)
text2(TeX("v_1"), A + c(0, v1) / 2 + c(-4, -3), cex = 1.1, font = 3)
text2(TeX("v_2"), D + c(-v2 * sin(angr), v2 * cos(angr)) / 2 + c(-4, -3), cex = 1.1, font = 3)
text2(TeX("v_3"), D + c(v3 * sin(angr), v3 * cos(angr)) / 2 + c(4, -3), cex = 1.1, font = 3)
text2(TeX("v_4"), E + c(0, v4) / 2 + c(-4, -3), cex = 1.1, font = 3)
#tree2, reroot on B
# plot(0:200, 0:200, col = 0, axes = F, xlab = "", ylab = "")
B <- rootStates[3,]
D <- B + c(0, v2)
E <- D + c(v4 * sin(angr), v4 * cos(angr))
A <- E + c(v1 * sin(angr), v1 * cos(angr))
C <- D + c(-v3 * sin(angr), v3 * cos(angr))
lines(rbind(A, E), lwd = liwd)
lines(rbind(E, D), lwd = liwd)
lines(rbind(D, B), lwd = liwd)
lines(rbind(D, C), lwd = liwd)
text2("A", A + c(0, 5), cex = 1.3, font = 4)
text2("B", B + c(0, -5), cex = 1.3, font = 4)
text2("C", C + c(0, 5), cex = 1.3, font = 4)
text2("D", D + c(4, -2), cex = 1.3, font = 4)
text2("E", E + c(4, 0), cex = 1.3, font = 4)
text2(TeX("v_1"), E + c(v1 * sin(angr), v1 * cos(angr)) / 2 + c(4, -4), cex = 1.1, font = 3)
text2(TeX("v_2"), B + c(0, v2) / 2 + c(-5, 0), cex = 1.1, font = 3)
text2(TeX("v_3"), D + c(-v3 * sin(angr), v3 * cos(angr)) / 2 + c(-4, -4), cex = 1.1, font = 3)
text2(TeX("v_4"), D + c(v4 * sin(angr), v4 * cos(angr)) / 2 + c(4, -4), cex = 1.1, font = 3)
#tree4, reroot along v4
v1 <- 50
v2 <- 30
v3 <- 35
v4 <- 40
subBL <- v4 / 2
R <- rootStates[4,]
E <- R + c(-subBL * sin(angr), subBL * cos(angr))
A <- E + c(-v1 * sin(angr), v1 * cos(angr))
D <- R + c((v4-subBL) * sin(angr), (v4-subBL) * cos(angr))
B <- D + c(-v2 * sin(angr), v2 * cos(angr))
C <- D + c(v3 * sin(angr), v3 * cos(angr))
# plot(0:200, 0:200, col = 0, axes = F, xlab = "", ylab = "")
lines(rbind(R, E), lwd = liwd)
lines(rbind(E, A), lwd = liwd)
lines(rbind(R, D), lwd = liwd)
lines(rbind(D, B), lwd = liwd)
lines(rbind(D, C), lwd = liwd)
text2("A", A + c(0, 5), cex = 1.3, font = 4)
text2("B", B + c(0, 5), cex = 1.3, font = 4)
text2("C", C + c(0, 5), cex = 1.3, font = 4)
text2("D", D + c(3, -3), cex = 1.3, font = 4)
text2("E", E + c(-5, 0), cex = 1.3, font = 4)
text2(TeX("v_1"), E + c(-v1 * sin(angr), v1 * cos(angr)) / 2 + c(-4, -3), cex = 1.1, font = 3)
text2(TeX("v_2"), D + c(-v2 * sin(angr), v2 * cos(angr)) / 2 + c(-4, -3), cex = 1.1, font = 3)
text2(TeX("v_3"), D + c(v3 * sin(angr), v3 * cos(angr)) / 2 + c(4, -3), cex = 1.1, font = 3)
text2(TeX("r"), R + c(-subBL * sin(angr), subBL * cos(angr)) / 2 + c(-4, -1), cex = 1.1, font = 3)
text2(TeX("v_4-r"), R + c((v4-subBL) * sin(angr), (v4-subBL) * cos(angr)) / 2 + c(5, -3), cex = 1.1, font = 3)
#tree5, reroot along v3
v1 <- 50
v2 <- 30
v3 <- 35
v4 <- 40
subBL <- v3 / 2
R <- rootStates[5,]
D <- R + c((v3-subBL) * sin(angr), (v3-subBL) * cos(angr))
C <- R + c(-subBL * sin(angr), subBL * cos(angr))
E <- D + c(v4 * sin(angr), v4 * cos(angr))
B <- D + c(-v2 * sin(angr), v2 * cos(angr))
A <- E + c(v1 * sin(angr), v1 * cos(angr))
# plot(0:200, 0:200, col = 0, axes = F, xlab = "", ylab = "")
lines(rbind(R, C), lwd = liwd)
lines(rbind(R, D), lwd = liwd)
lines(rbind(D, E), lwd = liwd)
lines(rbind(D, B), lwd = liwd)
lines(rbind(E, A), lwd = liwd)
text2("A", A + c(0, 5), cex = 1.3, font = 4)
text2("B", B + c(0, 5), cex = 1.3, font = 4)
text2("C", C + c(0, 5), cex = 1.3, font = 4)
text2("D", D + c(3, -3), cex = 1.3, font = 4)
text2("E", E + c(-5, 0), cex = 1.3, font = 4)
text2(TeX("v_1"), E + c(v1 * sin(angr), v1 * cos(angr)) / 2 + c(-4, 3), cex = 1.1, font = 3)
text2(TeX("v_2"), D + c(-v2 * sin(angr), v2 * cos(angr)) / 2 + c(4, 3), cex = 1.1, font = 3)
text2(TeX("v_4"), D + c(v3 * sin(angr), v3 * cos(angr)) / 2 + c(4, -3), cex = 1.1, font = 3)
text2(TeX("r"), R + c(-subBL * sin(angr), subBL * cos(angr)) / 2 + c(-4, -1), cex = 1.1, font = 3)
text2(TeX("v_3-r"), R + c((v4-subBL) * sin(angr), (v4-subBL) * cos(angr)) / 2 + c(5, -3), cex = 1.1, font = 3)
## let's try to show the pruning algorithm ##
rootStates <- rbind(c(20,5), c(110,5), c(200,5))
plot(0:270, 0:270*.5, col = 0, axes = F, xlab = "", ylab = "")
par(mar = c(1,1,1,1))
ang <- 60
angh <- ang / 2
angr <- angh / 180 * pi
liwd <- 5
#tree1
v1 <- 50
v2 <- 30
v3 <- 35
v4 <- 40
E <- rootStates[1,]
A <- E + c(-v1 * sin(angr), v1 * cos(angr))
D <- E + c(v4 * sin(angr), v4 * cos(angr))
B <- D + c(-v2 * sin(angr), v2 * cos(angr))
C <- D + c(v3 * sin(angr), v3 * cos(angr))
lines(rbind(E, A), lwd = liwd)
lines(rbind(E, D), lwd = liwd)
lines(rbind(D, B), lwd = liwd)
lines(rbind(D, C), lwd = liwd)
text2("A", A + c(0, 5), cex = 1.3, font = 4)
text2("B", B + c(0, 5), cex = 1.3, font = 4)
text2("C", C + c(0, 5), cex = 1.3, font = 4)
text2("D", D + c(3, -3), cex = 1.3, font = 4)
text2("E", E + c(0, -5), cex = 1.3, font = 4)
text2(TeX("v_1"), E + c(-v1 * sin(angr), v1 * cos(angr)) / 2 + c(-4, -3), cex = 1.1, font = 3)
text2(TeX("v_2"), D + c(-v2 * sin(angr), v2 * cos(angr)) / 2 + c(-4, -3), cex = 1.1, font = 3)
text2(TeX("v_3"), D + c(v3 * sin(angr), v3 * cos(angr)) / 2 + c(4, -3), cex = 1.1, font = 3)
text2(TeX("v_4"), E + c(v1 * sin(angr), v1 * cos(angr)) / 2 + c(4, -3), cex = 1.1, font = 3)
arrows(x0 = 60, y0 = 35, x1 = 75, y1 = 35, lwd = 3)
#tree2
v1 <- 50
v2 <- 30
v3 <- 35
v4 <- 40
v4c <- v4 + v2*v3/(v2+v3)
E <- rootStates[2,]
A <- E + c(-v1 * sin(angr), v1 * cos(angr))
D <- E + c(v4 * sin(angr), v4 * cos(angr))
Dp <- E + c(v4c * sin(angr), v4c * cos(angr))
lines(rbind(E, A), lwd = liwd)
lines(rbind(E, D), lwd = liwd)
lines(rbind(D, Dp), lwd = liwd/2.4, lty = 3)
text2("A", A + c(0, 5), cex = 1.3, font = 4)
text2("D'", Dp + c(0, 5), cex = 1.3, font = 4)
text2("E", E + c(0, -5), cex = 1.3, font = 4)
text2(TeX("v_1"), E + c(-v1 * sin(angr), v1 * cos(angr)) / 2 + c(-4, -3), cex = 1.1, font = 3)
text2(TeX("v_4"), E + c(v1 * sin(angr), v1 * cos(angr)) / 2 + c(4, -3), cex = 1.1, font = 3)
text2(TeX("$\\frac{v_2v_3}{v_2+v_3}"), D + c(v2*v3/(v2+v3) * sin(angr), v2*v3/(v2+v3) * cos(angr)) / 2 + c(-10, 3), cex = 1.1, font = 3)
lines(rbind(c(140, 35),c(148,35)), lwd = 2); lines(rbind(c(144, 31),c(144,39)), lwd = 2)
lines(rbind(c(160, 5),c(160, v2+v3+5)), lwd = liwd)
text2("C", c(160, v2+v3+5) + c(0, 5), cex = 1.3, font = 4)
text2("B", c(160, 5) + c(0, -5), cex = 1.3, font = 4)
text2(TeX("v_2+v_3"), c(160, 5) + c(0, v2+v3) / 2 + c(9, 0), cex = 1.1, font = 3)
arrows(x0 = 180, y0 = 35, x1 = 195, y1 = 35, lwd = 3)
#full decomposition
lines(rbind(c(140, 35),c(148,35)), lwd = 2); lines(rbind(c(144, 31),c(144,39)), lwd = 2)
lines(rbind(c(225, 5),c(225, 5+v1+v4+v2*v3/(v2+v3))), lwd = liwd)
text2("D'", c(225, 5+v1+v4+v2*v3/(v2+v3)) + c(0, 5), cex = 1.3, font = 4)
text2("A", c(225, 5) + c(0, -5), cex = 1.3, font = 4)
text2(TeX("v_1+v_4+$\\frac{v_2v_3}{v_2+v_3}"), c(225, 5) + c(0, 5+v1+v4+v2*v3/(v2+v3)) / 2 + c(-17, 0), cex = 1.1, font = 3)
lines(rbind(c(250, 5),c(250, v2+v3)), lwd = liwd)
text2("C", c(250, v2+v3) + c(0, 5), cex = 1.3, font = 4)
text2("B", c(250, 5) + c(0, -5), cex = 1.3, font = 4)
text2(TeX("v_2+v_3"), c(250, 5) + c(0, v2+v3) / 2 + c(9, 0), cex = 1.1, font = 3)
lines(rbind(c(234, 35),c(242,35)), lwd = 2); lines(rbind(c(238, 31),c(238,39)), lwd = 3)
plotMatrix <- function(mobject, size, location, lwd = 2, grid = T, font = 1, cex = 1, rownames = T, colnames = T, title = T, title.label = "Matrix Object"){
lines(rbind(location, location + c(0,size[2])), lwd = lwd)
lines(rbind(location, location + c(size[1]/8,0)), lwd = lwd)
lines(rbind(location + c(0, size[2]), location + c(size[1]/8,size[2])), lwd = lwd)
lines(rbind(location + c(size[1],0), location + size), lwd = lwd)
lines(rbind(location + size, location + size - c(size[1]/8,0)), lwd = lwd)
lines(rbind(location + c(size[1],0), location + c(size[1],0) - c(size[1]/8,0)), lwd = lwd)
if(grid == T){
for(i in 1:(dim(mobject)[1]-1)){
lines(rbind(location + c(0,i*size[2]/dim(mobject)[1]), location + c(size[1], i*size[2]/dim(mobject)[1])))
}
for(j in 1:(dim(mobject)[2]-1)){
lines(rbind(location + c(j*size[1]/dim(mobject)[2],0), location + c(j*size[1]/dim(mobject)[2], size[2])))
}
}
if(class(mobject[1,1]) != "expression" & class(mobject[1,1]) != "character"){mobject <- matrix(as.character(mobject), nrow = dim(mobject)[1], ncol = dim(mobject)[2])}
for(i in 1:(dim(mobject)[1])){
for(j in 1:dim(mobject)[2]){
text(labels = mobject[i,j], x = location[1] + (j-1/2)*size[1]/dim(mobject)[2], y = location[2] + size[2] - (i-1/2)*size[2]/dim(mobject)[1], font = font, cex = cex)
}
}
if(title){
text(title.label, x = location[1] + size[1]/2, y = location[2] + size[2] + strheight(title.label, font = 2, cex = 1.5)/1.5, cex = 1.5, font = 2)
}
if(rownames){
for(i in 1:dim(mobject)[1]){
text(rownames(mobject)[i], x = location[1] - strwidth(rownames(mobject)[i])/2 - size[1]/(ncol(mobject)*6), y = location[2] + size[2] - (i-1/2)*size[2]/dim(mobject)[2])
}
}
if(colnames){
for(i in 1:dim(mobject)[1]){
text(colnames(mobject)[i], x = location[1] + (i-1/2)*size[1]/dim(mobject)[1], y = location[2] - strheight(colnames(mobject)[i])/2- size[2]/(nrow(mobject)*6))
}
}
}
Q <- matrix("", nrow = 2, ncol = 2)
plotMatrix(mobject = Q, size = c(25,20), location = c(1,110), title.label = "Q")
y <- matrix("", nrow = 2, ncol = 1)
plotMatrix(mobject = y, size = c(25,20), location = c(31,110), title.label = "y")
P <- matrix(c(TeX("v_1"), 0, 0, 0, TeX("v_4+v_2"), TeX("v_4"), 0, TeX("v_4"), TeX("v_4+v_3")), nrow = 3, ncol = 3); rownames(P) <- colnames(P) <- LETTERS[1:3]
plotMatrix(mobject = P, size = c(40,20), location = c(1,80), title.label = "P")
x <- matrix(c(TeX("x_A"),TeX("x_B"),TeX("x_C")), nrow = 3, ncol = 1)
plotMatrix(mobject = x, size = c(10,20), location = c(46,80), title.label = "x")
arrows(x0 = 60, y0 = 105, x1 = 75, y1 = 105, lwd = 3)
Q <- matrix(c(TeX("v_2+v_3"), 0, 0, ""), nrow = 2, ncol = 2)
plotMatrix(mobject = Q, size = c(30,20), location = c(80,110), title.label = "Q")
y <- matrix(c(TeX("x_B-x_C"), ""), nrow = 2, ncol = 1)
plotMatrix(mobject = y, size = c(20,20), location = c(116,110), title.label = "y")
P <- matrix(c(TeX("v_1"), 0, 0, 0, TeX("v_4+v_2"), TeX("v_4"), 0, TeX("v_4"), TeX("v_4+v_3")), nrow = 3, ncol = 3); rownames(P) <- colnames(P) <- LETTERS[1:3]
plotMatrix(mobject = P, size = c(40,20), location = c(80,80), title.label = "P")
x <- matrix(c(TeX("x_A"),TeX("x_B"),TeX("x_C")), nrow = 3, ncol = 1)
plotMatrix(mobject = x, size = c(10,20), location = c(126,80), title.label = "x")
arrows(x0 = 140, y0 = 105, x1 = 155, y1 = 105, lwd = 3)
Q <- matrix(c(TeX("v_2+v_3"), 0, 0, TeX("v_1+v_4+$\\frac{v_2v_3}{v_2+v_3}")), nrow = 2, ncol = 2)
plotMatrix(mobject = Q, size = c(60,50), location = c(160,80), title.label = "Q")
y <- matrix(c(TeX("x_B-x_C"), TeX("x_a - $\\frac{v_3x_b+v_2x_c}{v_2+v_3}")), nrow = 2, ncol = 1)
plotMatrix(mobject = y, size = c(30,50), location = c(230,80), title.label = "y")
|
6987967f27614e1f599baecfe2f42e4d02656e18
|
f883fb9434f63f7ef62a154832e27de407969630
|
/dashboard/config_utils.R
|
0fc040b63b8a4fe235ba48b5766a50d3c40ad603
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rowanlimb/Clew
|
06756069829a51d0c785b36da364293e2ee15df7
|
b34e0c5364c9be7059f5c58cd8bb51a0893700ac
|
refs/heads/master
| 2021-01-23T02:10:42.881213
| 2017-03-23T16:31:37
| 2017-03-23T16:31:37
| 85,972,007
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,992
|
r
|
config_utils.R
|
#Copyright (c) 2017 BT Plc (www.btplc.com)
#
#You should have received a copy of the MIT license along with Clew.
#If not, see https://opensource.org/licenses/MIT
config_utils = new.env()
#Functions for checking validity of config.yml
#Eventually this script will be generated by a build script
#so that it can be built based on the known default config from the git repo
#At the moment we do not check validity of values. The config library will fail to load if
#A key does not have a value, so just check all the required key names are in the loaded
#config object will suffice
#Iterates through expected config keys. If any is missing in config.yml
#function returns number of missing key, else 0
#If config.yml does not exist, or is empty, returns -1
config_utils$checkConfig <- function() {
if(!file.exists("config.yml")) return(-1)
errlist <- list()
config.names <- c('mr_jobhist_host_list', 'mr_jobhist_port_list', 'num_clusters', 'cluster_name_list', 'yarn_rm_host_list', 'yarn_rm_port_list', 'mysql_option_group', 'mysql_latency_table_list', 'hive_currentlatency_alert_threshold', 'hive_currentlatency_alert_colour', 'hive_currentlatency_warn_threshold', 'hive_currentlatency_warn_colour', 'hive_currentlatency_ok_colour', 'hive_maxlatency_alert_threshold', 'hive_maxlatency_alert_colour', 'hive_maxlatency_warn_threshold', 'hive_maxlatency_warn_colour', 'hive_maxlatency_ok_colour', 'yarn_apps_pending_alert_threshold', 'yarn_apps_pending_warn_threshold', 'yarn_memreserved_alert_threshold', 'yarn_memreserved_warn_threshold', 'yarn_containers_pending_alert_threshold', 'yarn_containers_pending_warn_threshold', 'yarn_unhealthynodes_alert_threshold', 'yarn_unhealthynodes_warn_threshold', 'yarn_lostnodes_alert_threshold', 'yarn_lostnodes_warn_threshold', 'yarn_alert_colour', 'yarn_warn_colour', 'yarn_ok_colour', 'dashboard_title')
for(key in config.names) {
if(is.null(config::get(key))) {
errlist <- c(errlist,key)
}
}
return(length(errlist))
}
|
b3bc67b67b5d34b2d147842a4f6040355d7af098
|
9f039abf455336e8f8b56f576999e9514c8bad5b
|
/plot3.R
|
14bcb4a2a903372ead5833fd5e350a3cd472e31a
|
[] |
no_license
|
jordicuadrado/ExData_Plotting1
|
d5ccdcd9aba1646498576d9ad3ebc4cfd12f09b1
|
2314165bb96d061156451a8ca2f686f693270cf1
|
refs/heads/master
| 2021-01-14T12:47:15.087707
| 2015-10-11T17:10:53
| 2015-10-11T17:10:53
| 43,952,338
| 0
| 0
| null | 2015-10-09T11:50:13
| 2015-10-09T11:50:13
| null |
UTF-8
|
R
| false
| false
| 2,945
|
r
|
plot3.R
|
# Project 1 from coursera's Data Science Specialization: 4 - Exploratory Data Analysis course.
# Reconstruct Plot 3: Global Active Power (kilowatts) over the period of of time of two days of the three submeters (1: kitchen, 2: laundry room, 3: HVAC) in the same graph.
# The code in this file is intended to be easily used in RStudio. One can construct a function to generate the PNG directly from the R terminal.
# Uses "household_power_consumption.txt" (133MB).
#setwd("~/Data Science Specialization/4 - Exploratory Data Analysis/Project 1/ExData_Plotting1")
archivoEntrada <- "household_power_consumption.txt"
handlerArchivo <- file(archivoEntrada) #opens the file handler to work directly with the "file stream"
#instead of read.table(archivoEntrada, header=TRUE, sep=";", na="?") we use a regexp in order to save memory by only reading the proper dates into the dataframe
#also, because of the regular expression we need to define the Column names (as we only have the data from the two dates and headers are gone)
power2days <- read.table(text = grep("^[1,2]/2/2007", readLines(handlerArchivo), value = TRUE), col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), header=TRUE, sep=";", na="?") #regex (grep) used to efficiently open and load the relevant data to memory
close(handlerArchivo)
#convert the Date and Time vars to a new merged Datetime class. Required because format YYYY-MM-DD is used in the assignment instructions. Also more useful to deal with datetimes.
datetime <- paste(as.Date(power2days$Date, "%d/%m/%Y"), power2days$Time)
power2days$Datetime <- as.POSIXct(datetime) #Also we can use something like power2days$dateTime <- strptime(paste(power2days$Date, power2days$Time), "%d/%m/%Y %H:%M:%S")
#construct the plot
#here, we are using png() instead of dev.copy() used in plot1-2.R because the legend doesn't fit in the box: http://stackoverflow.com/questions/9400194/legend-truncated-when-saving-as-pdf-using-saveplot
png("plot3.png", width=480, height=480, units="px")
with(power2days, { #with is needed to define the dataframe to be used in the following instructions
plot(Sub_metering_1 ~ Datetime, ylab = "Energy sub metering", xlab = "", type="l") #type L defines the graph as a line chart
lines(Sub_metering_2 ~ Datetime, col = "Red") #lines() is needed to draw in the same area of the graph: https://stat.ethz.ch/R-manual/R-devel/library/graphics/html/lines.html
lines(Sub_metering_3 ~ Datetime, col = "Blue")
})
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("Black", "Red", "Blue"), lty = 1, lwd = 3) #https://stat.ethz.ch/R-manual/R-devel/library/graphics/html/legend.html lty, lwd: the line types and widths for lines appearing in the legend. One of these two must be specified for line drawing.
#closing the graphics device
dev.off()
|
22d96175d472aa11bb75c24c4dd54b3c37fca039
|
a156e30406427ce722bb61916d5d0aa2e0b2d71b
|
/R/match_thrC.R
|
6bf7d14d040eec9f425ec804de2bab90e6245d75
|
[] |
no_license
|
krmclaughlin/RCPS
|
c0fbde59528cc3d9519a32a61077836fb187c3de
|
9244d8bc1f7150e8c9e0804720e96b536fd7e23f
|
refs/heads/main
| 2023-07-09T10:22:07.014209
| 2021-08-13T03:45:31
| 2021-08-13T03:45:31
| 395,510,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,276
|
r
|
match_thrC.R
|
## Generate documentation using roxygen
#' Generate recruitment chain based on utilities.
#'
#' Generates a recruitment chain based on the given utilities \code{U} and \code{V}. Uses the multiple matching process I developed. This is the matching algorithm for the UNRANKED process (i.e., if a person recruits three peers, we do not make assumptions about those peers' relative utilities.)
#'
#' @param U Matrix of utilities from recruiters to peers. It has dimension \eqn{n_r \times (n_p + n_c)}. Currently implemented for \eqn{n_c=3}.
#' @param V Matrix of utilities from peers to recruiters. It has dimension \eqn{n_p \times (n_r + 1)}.
#' @param reslocal Logical: use additional information about the underlying network? Makes
#' computation faster. If TRUE, assumes that \code{netstage} takes three values: 1 indicates a
#' recruitment tie (and thus a tie in the underlying network); 0 indicates no recruitment tie,
#' but a tie does exist in the underlying network; NA indicates no tie in the underlying network,
#' and thus no possibility of recruitment.
#' @param siml Logical: is this a simulation (gives warning when < 3 peers).
#'
#' @return An annotated adjacency matrix with the generated recruitment chain. It has dimension \eqn{(n_r+n_c) \times (n_p+1)} where \eqn{n_r} is the number of recruiters, \eqn{n_p} is the number of peers, and \eqn{n_c} is the number of coupons.
#'
#' @examples
#' U <- matrix(rnorm(50), nrow=5)
#' V <- matrix(rnorm(42), nrow=7)
#' recfromUV3urR(U, V)
recfromUV3urR <- function(U, V, reslocal=FALSE, siml=FALSE) {
nrec <- nrow(U)
npeer <- nrow(V)
# add in stop/error when have too few peers
if(siml == TRUE & npeer < 3) {
stop("Not enough peers")
}
stopifnot(nrec == ncol(V)-1)
stopifnot(npeer == ncol(U)-3)
Urank <- matrix(nrow=nrec, ncol=npeer+3)
Vrank <- matrix(nrow=npeer, ncol=nrec+1)
for (i in 1:nrec) {
Urank[i, ] <- rank(U[i, ], na.last="keep") #1 is low
}
for (j in 1:npeer) {
Vrank[j, ] <- rank(V[j, ], na.last="keep") # 1 is low
}
## Call C function to generate matching from U, V
recruitreturn <- recfromUV3urC(Urank, Vrank, reslocal) #cpp function
utiladj <- recruitreturn$utiladj
rownames(utiladj) <- rownames(U)
colnames(utiladj) <- rownames(V)
return(utiladj)
}
|
eba87a73a734c26550613f1deeae2af04cbd7809
|
5989a671b320818c74d546ea3ca119ca57d82984
|
/man/SESOI_upper_independent_func.Rd
|
461e4ef4932a9ee9a15e745d5218a6f66b603c71
|
[
"MIT"
] |
permissive
|
mladenjovanovic/bmbstats
|
5ea699af6884bbc6a3dbc936a4e1ac5c04494b79
|
ab62109bad903b65000eeac5539c36774f5675f3
|
refs/heads/master
| 2022-11-23T08:47:16.514748
| 2020-08-03T07:22:28
| 2020-08-03T07:22:28
| 274,726,285
| 4
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 932
|
rd
|
SESOI_upper_independent_func.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare_independent_groups.R
\name{SESOI_upper_independent_func}
\alias{SESOI_upper_independent_func}
\title{SESOI upper threshold for \code{\link{compare_independent_groups}}}
\usage{
SESOI_upper_independent_func(group_a, group_b, na.rm = FALSE)
}
\arguments{
\item{group_a}{Numeric vector. This group represents baseline/control, observed variable, Pre-test in the paired design, or "practical" measure}
\item{group_b}{Numeric vector. This group represents experimental, predicted variable, Post-test in the paired design, or "criterion" measure}
\item{na.rm}{Should NAs be removed? Default is \code{FALSE}}
}
\value{
Pooled SD of \code{group_a} and \code{group_b}
multiplied by 0.2 (Cohen's trivial)
}
\description{
SESOI upper threshold for \code{\link{compare_independent_groups}}
}
\examples{
SESOI_upper_independent_func(rnorm(20), rnorm(10))
}
|
b4f51493de4672f8b8741ebd9f2944f84f3b217f
|
7712fbde5f04f94655680a20131abfd77131f5b0
|
/q431/helloworld.R
|
b1a32c24740919a8f269587da8488e82244b11d7
|
[] |
no_license
|
cielavenir/codeiq_problems
|
5bc7fd972c56ff0f9e534555c3b955b3e76664ce
|
ac111010aef44e2ac0eb69bf33ed6307544720c6
|
refs/heads/master
| 2020-05-21T03:24:14.739998
| 2019-01-11T03:56:58
| 2019-01-11T03:56:58
| 12,939,073
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 489
|
r
|
helloworld.R
|
#!/usr/bin/Rscript
i01<-length(c(TRUE))
i02<-i01+i01
i04<-i02+i02
i08<-i04+i04
i10<-i08+i08
i20<-i10+i10
i40<-i20+i20
cat(intToUtf8(i40+i08))
cat(intToUtf8(i40+i20+i04+i01))
cat(intToUtf8(i40+i20+i08+i04))
cat(intToUtf8(i40+i20+i08+i04))
cat(intToUtf8(i40+i20+i08+i04+i02+i01))
cat(intToUtf8(i20))
cat(intToUtf8(i40+i10+i04+i02+i01))
cat(intToUtf8(i40+i20+i08+i04+i02+i01))
cat(intToUtf8(i40+i20+i10+i02))
cat(intToUtf8(i40+i20+i08+i04))
cat(intToUtf8(i40+i20+i04))
cat(intToUtf8(i08+i02))
|
14a87e5af984d588de102ca01ef60055d62eeec8
|
d7dbc2ffe4da4d8b132ea2d92f7233ca8326bbfb
|
/man/naver_keyword.Rd
|
91ebc7ae46a0e09804f18daa2393e90c79affff9
|
[] |
no_license
|
statkclee/suggestK
|
4853fff110adfa0201f14e504bfac4b910146e19
|
7eb8fe4ac9e37b7196155ba2d49d6632c949b5d1
|
refs/heads/master
| 2020-03-15T15:32:43.117836
| 2018-05-05T14:39:05
| 2018-05-05T14:39:05
| 132,214,721
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 779
|
rd
|
naver_keyword.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/naver_keyword.R
\name{naver_keyword}
\alias{naver_keyword}
\title{Naver Suggest Search Terms - Search Level 2}
\usage{
naver_keyword(keyword)
}
\arguments{
\item{keyword}{search terms, keywords which supports Korean as well.}
}
\description{
The function aims to crawl the second level related search terms from the NAVER search engine.
The only argument for this function is "Search Terms".
This function retrieves the naver_keyword_R1 function in order to get the related search terms from the intial search term.
It iterates crawling the individual search terms until it consumes all the level 1 search terms.
}
\examples{
naver_keyword("korea")
}
\keyword{naver,}
\keyword{search}
\keyword{terms}
|
342968ebfede18424d4fea9860cb6de948600609
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/RobustGaSP/man/Sample.rgasp.Rd
|
3511110df5419b367ecfc7e0427d680b8d02219f
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,626
|
rd
|
Sample.rgasp.Rd
|
\name{Sample}
\alias{Sample}
\alias{Sample.rgasp}
\alias{Sample.rgasp-class}
\alias{Sample,rgasp-method}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Sample for Robust GaSP model
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Function to sample Robust GaSP after the Robust GaSP model has been constructed.
}
\usage{
\S4method{Sample}{rgasp}(object, testing_input, num_sample=1,
testing_trend= matrix(1,dim(testing_input)[1],1),
r0=NA, rr0=NA, sample_data=T,
...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{ an object of class \code{rgasp}.}
\item{testing_input}{a matrix containing the inputs where the \code{rgasp} is to sample.}
\item{num_sample}{number of samples one wants. }
\item{testing_trend}{a matrix of mean/trend for prediction.}
\item{r0}{
the distance between input and testing input. If the value is \code{NA}, it will be calculated later. It can also be specified by the user. If specified by user, it is either a \code{matrix} or \code{list}. The default value is \code{NA}.
}
\item{rr0}{
the distance between testing input and testing input. If the value is \code{NA}, it will be calculated later. It can also be specified by the user. If specified by user, it is either a \code{matrix} or \code{list}. The default value is \code{NA}.
}
\item{interval_data}{
a boolean value. If \code{T}, the interval of the data will be calculated. Otherwise, the interval of the mean of the data will be calculted.
}
\item{...}{Extra arguments to be passed to the function (not implemented yet).}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%Provide here some details.
%}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
The returned value is a \code{matrix} where each column is a sample on the prespecified inputs.
}
\references{
Mengyang Gu. (2016). Robust Uncertainty Quantification and Scalable Computation for Computer Models with Massive Output. Ph.D. thesis. Duke University.
}
\author{
%% ~~who you are~~
\packageAuthor{RobustGaSP}
Maintainer: \packageMaintainer{RobustGaSP}
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
#------------------------
# a 1 dimensional example
#------------------------
###########1dim higdon.1.data
p1 = 1 ###dimensional of the inputs
dim_inputs1 <- p1
n1 = 15 ###sample size or number of training computer runs you have
num_obs1 <- n1
input1 = 10*matrix(runif(num_obs1*dim_inputs1), num_obs1,dim_inputs1) ##uniform
#####lhs is better
#library(lhs)
#input1 = 10*maximinLHS(n=num_obs1, k=dim_inputs1) ##maximin lhd sample
output1 = matrix(0,num_obs1,1)
for(i in 1:num_obs1){
output1[i]=higdon.1.data (input1[i])
}
m1<- rgasp(design = input1, response = output1, lower_bound=FALSE)
#####locations to samples
testing_input1 = seq(0,10,1/50)
testing_input1=as.matrix(testing_input1)
#####draw 10 samples
m1_sample=Sample(m1,testing_input1,num_sample=10)
#####plot these samples
matplot(testing_input1,m1_sample, type='l',xlab='input',ylab='output')
lines(input1,output1,type='p')
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
\keyword{internal}
|
4e92de774fdda81e71fae40878085085a1a7aec2
|
01a458f4aecf432967314c418251dc64ca73f95c
|
/session_data_prep.r
|
6783aedd2eb33c9dda30fe4592a4e0a9b5b05e98
|
[] |
no_license
|
vdpappu/kaggle-Airbnb
|
0e0c8d70c270ade0a607089a62c55c4bc63ff641
|
997934c34a47cbf9f03d7459d9db81839de34b1e
|
refs/heads/master
| 2021-01-15T10:42:01.305875
| 2017-08-07T15:43:02
| 2017-08-07T15:43:02
| 99,591,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,819
|
r
|
session_data_prep.r
|
rm(list=ls())
library(readr)
library(data.table)
library(dplyr)
library(reshape)
library(reshape2)
setwd("/Users/vdpappu/Documents/Kaggle/In-Progress/AIR_BNB")
df_sessions <- read_csv("./input/sessions.csv")
user_sess <- subset(df_sessions,select=c("user_id","secs_elapsed"))
df_sessionTime <- user_sess %>%
group_by(user_id) %>%
summarise(total_time = sum(secs_elapsed,na.rm=TRUE))
rm(user_sess)
temp_action_detail <- subset(df_sessions,select=c("user_id","action_detail"))
temp_action_detail <- temp_action_detail[complete.cases(temp_action_detail),]
temp_action_detail$count <- 1
df_actionDetails <- cast(temp_action_detail,user_id~action_detail)
#write.csv(df_actionDetails,"./input/df_actionDetails.csv")
temp_actionType <- subset(df_sessions,select=c("user_id","action_type"))
temp_actionType <- temp_actionType[complete.cases(temp_actionType),]
temp_actionType$count <- 1
df_actionType <- cast(temp_actionType,user_id~action_type)
names(df_actionType)[2] <- 'unknown_action'
rm(temp_actionType)
for(i in 1:ncol(df_actionType))
{
print(paste(names(df_actionType)[i],length(unique(df_actionType[,i])),sep=" : "))
}
df_actionType$booking_response <- NULL
temp_device_type <- subset(df_sessions,select=c(user_id,device_type))
temp_device_type <- temp_device_type[complete.cases(temp_device_type),]
temp_device_type$count <- 1
df_deviceType <- cast(temp_device_type,user_id~device_type)
names(df_deviceType)[2] <- 'unknown_device'
myvars <- names(df_deviceType) %in% c('Blackberry','Opera Phone','iPodtouch','Windows Phone')
df_deviceType <- df_deviceType[!myvars]
df_sessions_md <- merge(df_actionType,df_deviceType,by="user_id")
df_sessions_md1 <- merge(df_sessions_md,df_actionDetails,by="user_id")
#write.csv(df_sessions_md,"df_sessions_new.csv",row.names=FALSE)
|
eac49badf9ad5cf3c3bfa2320d2ca8f3c39213b5
|
c73d44c98456527625aeb0c4d23d25d417c3e72e
|
/plot2.R
|
3b6376f2e7cdd8229165d53e3918915da8e1a9e3
|
[] |
no_license
|
rgrivasp71/ExploratoryDataAnalysis
|
47d7c00570c4da6d09aa98be65516677d752f5f7
|
5dadc471f55f3f000ac5dfae391f957152db08d4
|
refs/heads/master
| 2022-11-28T06:17:21.432046
| 2020-08-10T13:52:50
| 2020-08-10T13:52:50
| 282,776,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 682
|
r
|
plot2.R
|
#opening the data set
data<-read.table("/cloud/project/household_power_consumption.txt",sep = ";", header = TRUE)
#formating date type columns
data$Date<-as.Date(data$Date, "%d/%m/%Y")
# subsetting dates 2007-02-01 and 2007-02-02
newdata<-subset(data, Date=="2007-02-01"|Date=="2007-02-02")
#Creating a new variable with the date and time
DateAndTime<-paste(as.character(newdata$Date), newdata$Time)
#POSIXlt format to DateAndTime variable
DateAndTime<-strptime(DateAndTime, "%Y-%m-%d %H:%M:%S" )
#plot2
png(filename = "plot2.png",width = 480, height = 480)
plot(DateAndTime, newdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
|
13c73a41cc9f0a5c9c399559f6cbe12c96bbc79a
|
785c2a9c0a772048e3bc48e6ee07046ab902ff71
|
/R/setup.R
|
879ee5583bc4f29e28c75ab74a4654ec28640abe
|
[
"MIT"
] |
permissive
|
bartongroup/MG_GlycoTreg
|
c4fa57e58ff176061bc0fff33af36a258657ad33
|
1dc5eddcce62907542c90d00063b259b4dcc127f
|
refs/heads/master
| 2023-04-14T20:49:24.494690
| 2023-02-17T07:56:57
| 2023-02-17T07:56:57
| 186,639,498
| 1
| 0
| null | 2021-04-29T06:50:27
| 2019-05-14T14:28:01
|
R
|
UTF-8
|
R
| false
| false
| 840
|
r
|
setup.R
|
N <- function(n) prettyNum(n, big.mark = ",")
# top dir for the project
topDir <- "../glycotreg/"
# Public HTML for file downloads
public_html <- "http://www.compbio.dundee.ac.uk/user/mgierlinski/glycotreg/"
makeDirs <- function(topDir) {
lapply(subDirs, function(d) paste0(topDir, d))
}
# Sub-directories
subDirs <- list(
top = "",
fastq = "fastq/",
qc = "qc/",
multiqc = "multiqc/",
genome = "genome/",
starmap = "starmap/",
bam = "bam/",
bedgraph = "bedgraph/",
readcount = "readcount/",
salmon = "salmon/",
download = "download/",
data = "data/"
)
# All directories
dirs <- makeDirs(topDir)
genomeFile <- paste0(dirs$genome, "Mus_musculus.GRCm38.dna_rm.primary_assembly.fa")
transFile <- paste0(dirs$genome, "Mus_musculus.GRCm38.cds.all.fa")
gtfFile <- paste0(dirs$genome, "Mus_musculus.GRCm38.93.gtf")
|
4e460cadbcce9f84e95b0e77e57c761dc18cec67
|
14b4098e27706f1cbae4e558391c7c8f2a1ba61e
|
/man/vecYears.Rd
|
1b4a5a34c5d1a16a5ad8d9845549215da8b5eed7
|
[] |
no_license
|
cran/qualypsoss
|
b0f1a247ae39f21c21d6537e320d97522afd8c98
|
f69214faa637b89596081521c3fb135e17c5440d
|
refs/heads/master
| 2022-09-19T01:29:12.112426
| 2022-08-31T10:50:02
| 2022-08-31T10:50:02
| 236,877,195
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 439
|
rd
|
vecYears.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/description-data.r
\docType{data}
\name{vecYears}
\alias{vecYears}
\title{vecYears gives the years corr. to Y, i.e. from 1971 to 2099}
\format{
vectors of length 129
}
\usage{
data(vecYears)
}
\description{
vecYears gives the years corr. to Y, i.e. from 1971 to 2099
}
\author{
Guillaume Evin \email{guillaume.evin@inrae.fr}
}
\keyword{data}
|
7d98c7e7f9d993c92f4b202c37f53dd67ce23144
|
557cda9a1cb3fd04da7ef15c9adec69bb3df9888
|
/man/profresp.Rd
|
595ffeae4b9943eb95f2de6bdb0919cc67123a7a
|
[] |
no_license
|
cran/SDAResources
|
7e4cb27a87fa4e8e334f641c419fcc6e912e33a2
|
addafccfb82d962f234606fc6fcb2386fc8f60f3
|
refs/heads/master
| 2023-08-22T23:38:51.589732
| 2021-10-22T08:20:13
| 2021-10-22T08:20:13
| 368,240,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,764
|
rd
|
profresp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/profresp.R
\docType{data}
\name{profresp}
\alias{profresp}
\title{profresp data}
\format{
This data frame contains the following columns:
\describe{
\item{prof_cat:}{Level of professionalism
1 = novice
2 = average
3 = professional}
\item{panelnum:}{Number of panels respondent has belonged to. A response
between 1 and 6 means that the person has belonged to that
number of panels; 7 means 7 or more.}
\item{survnum_cat:}{How many Internet surveys have you completed before this
one?
1 = This is my first one
2 = 1-5
3 = 6-10
4 = 11-15
5 = 16-20
6 = 21-30
7 = More than 30}
\item{panelq1:}{Are you a member of any online survey panels besides this one?
1 = yes
2 = no}
\item{panelq2:}{To how many other online panels do you belong?
1 = None
2 = 1 other panel
3 = 2 others
4 = 3 others
5 = 4 others
6 = 5 others
7 = 6 others or more.
This question has
a missing value if panelq1 = 2. If you want to estimate how
many panels a respondent belongs to, create a new variable
numpanel that equals panelq2 if panelq2 is not missing and
equals 1 if panelq1 = 2.}
\item{age4cat:}{Age category
1 = 18 to 34
2 = 35 to 49
3 = 50 to 64
4 = 65 and over}
\item{edu3cat:}{Education category
1 = high school or less
2 = some college or associates' degree
3 = college graduate or higher}
\item{gender:}{
1 = male
2 = female}
\item{non_white:}{1 = race is non-white
0 = race is white}
\item{motive:}{Which best describes your main reason for joining on-line
survey panels?
1 = I want my voice to be heard
2 = Completing surveys is fun
3 = To earn money
4 = Other (Please specify)}
\item{freq_q1:}{During the PAST 12 MONTHS, how many times have you
seen a doctor or other health care professional about your
own health? Response is number between 0 and 999.}
\item{freq_q2:}{During the PAST MONTH, how many days have you felt
you did not get enough rest or sleep?}
\item{freq_q3:}{During the PAST MONTH, how many times have you eaten
in restaurants? Please include both full-service and fast food restaurants.}
\item{freq_q4:}{During the PAST MONTH, how many times have you
shopped in a grocery store? If you shopped at more than one
grocery store on a single trip, please count them separately.}
\item{freq_q5:}{During the PAST 2 YEARS, how many overnight trips have you taken?}
}
}
\usage{
data(profresp)
}
\description{
The data described in Zhang et al. (2020) were downloaded from \href{https://www.openicpsr.org/openicpsr/project/109021/version/V1/view}{https://www.openicpsr.org/openicpsr/project/109021/version/V1/view}
on January 22, 2020, from file survey4.rds.
}
\details{
The data set \emph{profresp} contains selected variables from the set of 2,407
respondents who completed the survey and provided information on the demographic variables and the information
needed to calculate "professional respondent" status. The full data set survey4.rds contains
numerous additional questions about behavior that are not included here, as well as the data
from the partially completed surveys. The website also contains data for three other online
panel surveys. Because profresp is a subset of the full data, statistics calculated from
it may differ from those in Zhang et al. (2020).
Missing values are denoted by NA.
}
\references{
Zhang et al. (2020). Professional respondents in
opt-in online panels: What do we really know? \emph{Social Science Computer Review 38 (6)},
703–719.
Lohr (2021), Sampling: Design and Analysis, 3rd Edition. Boca Raton, FL: CRC Press.
Lu and Lohr (2021), R Companion for \emph{Sampling: Design and Analysis, 3rd Edition}, 1st Edition. Boca Raton, FL: CRC Press.
}
\keyword{datasets}
|
d154a13d12b55253c06fbb908af04541313c7146
|
ace27a97e2012c2c3c275e78f3394fc5ca11b736
|
/R/IRR.R
|
8985bc5b73d3d4daf1f7803b4983e8e37a66b5e1
|
[] |
no_license
|
pem725/MRES
|
6a27ed4884d2ea31524307b0703ee0d097097f14
|
01b0d92577a344614b04a357a820357082896e84
|
refs/heads/master
| 2016-09-05T10:40:03.284385
| 2013-09-16T15:42:34
| 2013-09-16T15:42:34
| 4,679,282
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,946
|
r
|
IRR.R
|
## This is the beginning code to test Bob Holt's agreement indices
## Latest Revision Date: 11/04/2011
## generate example data
dat <- data.frame(id=1:10,rater1=c(0,1,2,3,4,5,6,7,8,9),rater2=c(1,2,3,4,5,6,7,8,9,9),rater3=c(9,8,7,6,5,4,3,2,1,0),rater4=c(3,2,5,6,9,8,1,2,4,0),group=gl(2,5))
dat.l <- reshape(IRRdatEx,varying=2:5,timevar="rater",v.names="rating",sep="",direction="long")
omegasq <- function(x,rater=NULL,data=NULL){
## sensitivity formula for ratings
## data must be in long format
## where x is a formula as rating~group
## rater is the rater variable
## group is the grouping variable to compare raters across groups
## data is the data.frame that contains the above information
## ANOVA by rater to determine sensitivity by group assignment
raterVar <- match(rater,names(data))
raters <- unique(data[,raterVar])
out <- as.data.frame(matrix(NA,length(raters),2))
for (i in 1:length(raters)){
a <- summary(aov(x,data=subset(data,data[,raterVar]==raters[i])))
out[i,] <- c(raters[i],a[[1]][[3]][1] / sum(a[[1]][[3]]))
}
names(out) <- c("rater","OmegaSqrd")
return(out)
}
## run code above with this line:
omegasq(rating~group,rater="rater",data=dat.l)
sysdiff <- function(x,obj,rater,data){
# systematic (holt's t) differences formula for ratings
# consists of a t-test between the rater and group averages
# where...
# x is the rating
# obj is the variable that contains the object being rated
# rater is the variable that contains the rater variable
# data is the data.frame in long format
x <- data[,match(x,names(data))]
obj <- data[,match(obj,names(data))]
rater <- data[,match(rater,names(data))]
out <- data.frame(rater=unique(rater),t.Test=NA)
grpmeans <- aggregate(x,list(obj),mean)
num <- mean(x$rater - x$group)
den <- var(x$rater - x$group)
t.out <- num/den
}
rwg <- function(x,scale=NULL,group=NULL,data=NULL){
## agreement formula for ratings
## rwg (holt's agreement) = 1 - (var(ratings)/E(uniform ratings))
## where x is a vector of ratings
## scale is a vector that includes all possible ratings
## group is an optional string denoting a conditioning variable
## data is the data frame
rwgfcn <- function(x,scale=scale){
return(1-(var(x)/var(rep(scale,1000))))
}
x <- data[,match(x,names(data))]
if(is.null(group)){
rwg <- rwgfcn(x,scale)
} else {
grplvls <- unique(data[,match(group,names(data))])
rwg <- data.frame(group=grplvls,rwg=NA)
for (i in 1:length(grplvls)){
tmpdat <- x[data[,match(group,names(data))]==grplvls[i]]
rwg[i,2] <- rwgfcn(tmpdat,scale=scale)
}
names(rwg) <- c(group,"rwg")
}
return(rwg)
}
rwg("rating",0:9,data=dat.l)
rwg("rating",0:9,"group",data=dat.l)
consist <- function(x){
# consistency formula for ratings
}
congruency <- function(x){
# congruency formula for ratings
}
|
6a15b86fe7eebaa5f81741682176dee5343c028b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ssh.utils/examples/ps.grep.remote.Rd.R
|
f68eb56f1c3b4da8ae8e018e159f6af1014dc965
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
ps.grep.remote.Rd.R
|
library(ssh.utils)
### Name: ps.grep.remote
### Title: Checks for processes running on a local or remote machine.
### Aliases: ps.grep.remote
### ** Examples
## Not run:
##D # Check if Eclipse is running.
##D ps.grep.remote("Eclipse", remote = "")
##D # [1] TRUE
## End(Not run)
|
a43133fd133eaca2cf07a517dbfe336d8fa1a0e1
|
c1d359cdf0281885744cdcd85d41a21e91218b43
|
/man/controls.Rd
|
3151dad3474f472726ebd2b1c324b788ef460ca7
|
[
"MIT"
] |
permissive
|
zwdzwd/sesame
|
20b2d29578661487db53432c8991d3c4478aa2c1
|
62fe6ef99a02e7f94b121fb601c3f368b8a4c1a8
|
refs/heads/master
| 2023-08-08T01:45:02.112492
| 2023-07-26T13:23:03
| 2023-07-26T13:23:03
| 122,086,019
| 37
| 26
|
MIT
| 2023-01-05T16:02:38
| 2018-02-19T16:00:34
|
R
|
UTF-8
|
R
| false
| true
| 465
|
rd
|
controls.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SigDFMethods.R
\name{controls}
\alias{controls}
\title{get the controls attributes}
\usage{
controls(sdf, verbose = FALSE)
}
\arguments{
\item{sdf}{a \code{SigDF}}
\item{verbose}{print more messages}
}
\value{
the controls data frame
}
\description{
get the controls attributes
}
\examples{
sesameDataCache() # if not done yet
sdf <- sesameDataGet('EPIC.1.SigDF')
head(controls(sdf))
}
|
80787b7569fe587ce32a380f0607930fb4866809
|
78c6fb7a90dfd4ce5240032408811660358dfbd7
|
/R/search-better.r
|
7aff16b30124d28234526ad9f20a512456aa24fe
|
[] |
no_license
|
haleyjeppson/tourr
|
b335178bcb9c155be194afbe4d20ee662581578c
|
272a2f88d9981b4bc7d5c86e2f8cde24183037e5
|
refs/heads/master
| 2021-12-07T17:54:33.348101
| 2021-05-30T00:39:47
| 2021-05-30T00:39:47
| 167,255,934
| 0
| 0
| null | 2019-01-23T21:20:03
| 2019-01-23T21:20:03
| null |
UTF-8
|
R
| false
| false
| 6,595
|
r
|
search-better.r
|
#' Generate nearby bases, e.g. for simulated annealing.
#' @keywords internal
basis_nearby <- function(current, alpha = 0.5, method = "linear") {
method <- match.arg(method, c("linear", "geodesic"))
new <- basis_random(nrow(current), ncol(current))
switch(method,
linear = orthonormalise((1 - alpha) * current + alpha * new),
geodesic = step_fraction(geodesic_info(current, new), alpha)
)
}
#' check if the current and target bases are of the same orientation
#' @keywords internal
correct_orientation <- function(current, target){
for (i in ncol(current)){
if (det(t(current[,i]) %*% target[,i]) < 0){
target[,i] <- -target[,i]
}
}
return(target)
}
#' Search for a better projection near the current projection.
#'
#' @param current starting projection
#' @param alpha the angle used to search the target basis from the current basis
#' @param index index function
#' @param tries the counter of the outer loop of the opotimiser
#' @param max.tries maximum number of iteration before giving up
#' @param ... other arguments being passed into the \code{search_better()}
#' @param method whether the nearby bases are found by a linear/ geodesic formulation
#' @param cur_index the index value of the current basis
#' @keywords optimize
#' @importFrom utils tail globalVariables
#' @export
#' @examples
#' animate_xy(flea[, 1:6], guided_tour(holes(), search_f = search_better))
search_better <- function(current, alpha = 0.5, index, tries, max.tries = Inf,...,
method = "linear", cur_index = NA) {
if (is.na(cur_index)) cur_index <- index(current)
if (cur_index == 0) {
warning("cur_index is zero!")
}
cat("Old", cur_index, "\n")
try <- 1
while (try < max.tries) {
new_basis <- basis_nearby(current, alpha, method)
new_index <- index(new_basis)
rcd_env <- parent.frame(n = 4)
rcd_env[["record"]] <- dplyr::add_row(
rcd_env[["record"]],
basis = list(new_basis),
index_val = new_index,
info = "random_search",
tries = tries,
loop = try,
method = "search_better",
alpha = round(alpha, 4)
)
if (new_index > cur_index) {
cat("New", new_index, "try", try, "\n")
nr <- nrow(rcd_env[["record"]])
rcd_env[["record"]][nr, "info"] <- "new_basis"
new_basis <- correct_orientation(current, new_basis)
rcd_env[["record"]][[nr, "basis"]] <- list(new_basis)
return(list(target = new_basis, alpha = alpha))
}
try <- try + 1
}
cat("No better bases found after ", max.tries, " tries. Giving up.\n",
sep = ""
)
cat("Final projection: \n")
if (ncol(current) == 1) {
for (i in 1:length(current)) {
cat(sprintf("%.3f", current[i]), " ")
}
cat("\n")
}
else {
for (i in 1:nrow(current)) {
for (j in 1:ncol(current)) {
cat(sprintf("%.3f", current[i, j]), " ")
}
cat("\n")
}
}
rcd_env[["record"]] <- dplyr::mutate(
rcd_env[["record"]],
id = dplyr::row_number()
)
NULL
}
#' Search for a better projection using simulated annealing
#'
#' Given an initial \eqn{t0}, the cooling scheme updates temperature at \deqn{T = t0 /\log(i + 1)}
#' The candidate basis is sampled via \deqn{B_j = (1 - \alpha) * B_i + \alpha * B} where alpha defines the neighbourhood, \eqn{B_i} is the current basis, B is a randomly generated basis
#' The acceptance probability is calculated as \deqn{prob = \exp{-abs(I(B_i) - I(B_j))/ T}}
#' For more information, see
#' \url{https://projecteuclid.org/download/pdf_1/euclid.ss/1177011077}
#'
#' @param current starting projection
#' @param alpha the angle used to search the target basis from the current basis
#' @param index index function
#' @param tries the counter of the outer loop of the opotimiser
#' @param max.tries maximum number of iteration before giving up
#' @param method whether the nearby bases are found by a linear/ geodesic formulation
#' @param cur_index the index value of the current basis
#' @param t0 initial decrease in temperature
#' @param ... other arguments being passed into the \code{search_better_random()}
#'
#' @keywords optimize
#' @export
#' @examples
#' animate_xy(flea[, 1:6], guided_tour(holes(), search_f = search_better_random))
search_better_random <- function(current, alpha = 0.5, index, tries,
max.tries = Inf, method = "linear", cur_index = NA, t0 = 0.01,
...) {
if (is.na(cur_index)) cur_index <- index(current)
if (cur_index == 0) {
warning("cur_index is zero!")
}
cat("Old", cur_index, "\n")
try <- 1
while (try < max.tries) {
new_basis <- basis_nearby(current, alpha, method)
new_index <- index(new_basis)
temperature <- t0 / log(try + 1)
rcd_env <- parent.frame(n = 4)
rcd_env[["record"]] <- dplyr::add_row(
rcd_env[["record"]],
basis = list(new_basis),
index_val = new_index,
info = "random_search",
tries = tries,
loop = try,
method = "search_better_random",
alpha = round(alpha, 4)
)
if (new_index > cur_index) {
cat("New", new_index, "try", try, "\n")
cat("Accept \n")
nr <- nrow(rcd_env[["record"]])
rcd_env[["record"]][nr, "info"] <- "new_basis"
new_basis <- correct_orientation(current, new_basis)
rcd_env[["record"]][[nr, "basis"]] <- list(new_basis)
return(list(target = new_basis, alpha = alpha))
}
else {
prob <- min(exp(-abs(cur_index - new_index) / temperature), 1)
rand <- stats::runif(1)
if (prob > rand) {
cat("New", new_index, "try", try, "\n")
cat("Accept with probability, prob =", prob, "\n")
nr <- nrow(rcd_env[["record"]])
rcd_env[["record"]][nr, "info"] <- "new_basis"
rcd_env[["record"]] <- dplyr::mutate(
rcd_env[["record"]],
id = dplyr::row_number()
)
return(list(target = new_basis, alpha = alpha))
}
}
try <- try + 1
}
cat("No better bases found after ", max.tries, " tries. Giving up.\n",
sep = ""
)
cat("Final projection: \n")
if (ncol(current) == 1) {
for (i in 1:length(current)) {
cat(sprintf("%.3f", current[i]), " ")
}
cat("\n")
}
else {
for (i in 1:nrow(current)) {
for (j in 1:ncol(current)) {
cat(sprintf("%.3f", current[i, j]), " ")
}
cat("\n")
}
}
rcd_env[["record"]] <- dplyr::mutate(
rcd_env[["record"]],
id = dplyr::row_number()
)
NULL
}
# globalVariables(c("t0","tries", "info", "runif"))
|
43088bfe2e603569af4804f598626be9963897d8
|
1fbd0362678e5198511a6f9f970d57fa71123039
|
/figures/supplementary/sfigure_5/sfigure_5_panels_A_and_B_nucleosome_density_diff.R
|
a3d5f4b9fe57ecb86aebdbbf3d9943fd67236a78
|
[] |
no_license
|
jbelsky/2015_genes_and_dev_belsky
|
179ba4628b340414427e729b937e7e91c85e710f
|
cba346bb78e01f766a7f31eb796c027bd0fe897b
|
refs/heads/master
| 2021-01-10T19:38:49.741327
| 2018-10-06T20:34:42
| 2018-10-06T20:34:42
| 34,973,600
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,951
|
r
|
sfigure_5_panels_A_and_B_nucleosome_density_diff.R
|
#############################################
# Set the Filenames
feature.fn = paste("/data/data2/jab112/2014_mnase_manuscript/datasets/jab112_yeast_feature_files/replication_origins/",
"oridb_acs_feature_file_curated_798_sites_timing_whitehouse_raw_oem_acs_seq.csv", sep = ""
)
oridb_subnuc_density.fn = paste("/data/data2/jab112/2014_mnase_manuscript/figures/figure_2/figure_2_datasets/",
"oridb_acs_feature_file_curated_798_sites_left_win_50bp_right_win_150bp.csv",
sep = ""
)
# Get the mcm single density
mcm_datasets.fn.v = paste("/data/data2/jab112/2014_mnase_manuscript/figures/figure_2/figure_2_datasets/",
"mcm_chip_seq_dm", c(82, 282), "_signal_around_oridb_acs_feature_file_curated_798_sites_win_500bp.csv",
sep = ""
)
# Set the work dir
work_dir = "/data/data2/jab112/2014_mnase_manuscript/figures/figure_4/figure_4_datasets/"
# Set the file name types
file_name_type.v = c("g1_dm243", "g1_dm354",
"g2_dm242", "g2_dm356"
)
# Set the file name footer
footer = "nuc_150_175_density_signal_around_oridb_acs_feature_file_curated_798_sites_win_500bp.csv"
# Load the peak file
load("/data/data2/jab112/2014_mnase_manuscript/figures/figure_5/figure_5_datasets/orc_and_mcm_peak_mat.l")
# Set the x_win
x_win = 400
#############################################
# Data Processing
# Load the feature file name
feature.df = read.csv(feature.fn)
# Get the plot idx
oridb_idx.l = get_oridb_idx(oridb_subnuc_density.fn)
# Get the idx
oridb_subset_idx = sort(unlist(oridb_idx.l[c("g1", "g2")]))
# Subset on the oridb_subset_idx
feature.df = feature.df[oridb_subset_idx,]
# Get the mcm density
mcm.m = average_matrices(convert_csv_to_matrix(mcm_datasets.fn.v[1]),
convert_csv_to_matrix(mcm_datasets.fn.v[2])
)[oridb_subset_idx,]
# Get the Mcm sum in a 100 bp window around the left (-90) and right (160) nucleosomes
left_mcm_sig.v = apply(mcm.m[,as.character(-190:10)], 1, sum)
right_mcm_sig.v = apply(mcm.m[,as.character(60:260)], 1, sum)
# Get the left and right mcm indices
left_idx = which(left_mcm_sig.v > right_mcm_sig.v)
right_idx = which(left_mcm_sig.v < right_mcm_sig.v)
#############################################
# Nucleosome Data Processing
# Create the storage matrix list
mat.l = vector("list", 2)
names(mat.l) = c("g1", "g2")
# Create the dataset matrix
for(i in 1:2){
# Get the output density matrix
mat.l[[i]] = average_matrices(convert_csv_to_matrix(paste(work_dir, file_name_type.v[2*i - 1], "_", footer, sep = "")),
convert_csv_to_matrix(paste(work_dir, file_name_type.v[2*i], "_", footer, sep = ""))
)
# Subset on the G1 and G2 idx
mat.l[[i]] = mat.l[[i]][oridb_subset_idx,]
}
# Plotting
nuc_scr.m = matrix(c(0, 0.5, 0.7, 0.9,
0, 0.5, 0, 0.7,
0.5, 1, 0.7, 0.9,
0.5, 1, 0, 0.7
), ncol = 4, byrow = T
)
# Split the screen
nuc_scr.s = split.screen(nuc_scr.m)
# Open the screen
screen(nuc_scr.s[1])
# Set the mar
par(mar = c(0, 4.1, 0, 2.1))
# Set the plot area
set_chromatin_schematic(x_start = -400, x_end = 400, y_start = 0, y_end = 2)
# Get the nucleosome peaks
nuc_peaks.df = get_mod_peaks(apply(mat.l[[1]][left_idx,], 2, mean), x_mid = 0, peak_width = 150,
min_thresh = 0.5
)
# Make the nucleosome
plot_nucleosome(nuc_peaks.df[2:3,], y_max = 1.5, yh = 0.25, y0 = 1.5)
plot_nucleosome(nuc_peaks.df[2:3,], y_max = 1.5, yh = 0.25, y0 = 0.5)
# Add the Mcm to the G1
mcm_pos.v = nuc_peaks.df$pos[2] + 65 + c(17, 51)
plot_mcm(mcm_pos.v[1], x_w = 17, y0 = 1.5, yh = 0.27, obj_col = "purple")
plot_mcm(mcm_pos.v[2], x_w = 17, y0 = 1.5, yh = 0.27, obj_col = "purple")
# Add the text
text(x = -250, y = c(1.5, 0.5), labels = c("G1", "G2"), cex = 1.25)
# Open the screen
screen(nuc_scr.s[2])
par(mar = c(5.1, 4.1, 0.85, 2.1))
# Make the nucleosome density plot
plot(0, 0, type = "n",
xlim = c(-400, 400), xaxs = "i", xaxt = "n",
ylim = c(0, 2), yaxt = "n",
xlab = "Relative distance from ACS (bp)",
ylab = "Average nucleosome density"
)
axis(1, at = seq(-400, 400, 200))
axis(2, at = 0:2)
axis(2, at = c(0.5, 1.5), labels = F)
# Set the legend
legend("topleft", legend = "G1", lwd = 2, col = "red", bty = "n")
legend("topright", legend = "G2", lwd = 2, col = "darkgreen", bty = "n")
lines(-500:500, apply(mat.l[[1]][left_idx,], 2, mean), col = "red")
lines(-500:500, apply(mat.l[[2]][left_idx,], 2, mean), col = "darkgreen")
# Open the screen
screen(nuc_scr.s[3])
# Set the mar
par(mar = c(0, 4.1, 0, 2.1))
# Set the plot area
set_chromatin_schematic(x_start = -400, x_end = 400, y_start = 0, y_end = 2)
# Get the nucleosome peaks
nuc_peaks.df = get_mod_peaks(apply(mat.l[[1]][right_idx,], 2, mean), x_mid = 0, peak_width = 150,
min_thresh = 0.5
)
# Make the nucleosome
plot_nucleosome(nuc_peaks.df[2:3,], y_max = 1.5, yh = 0.25, y0 = 1.5)
plot_nucleosome(nuc_peaks.df[2:3,], y_max = 1.5, yh = 0.25, y0 = 0.5)
# Add the Mcm to the G1
mcm_pos.v = nuc_peaks.df$pos[3] - 65 - c(17, 51)
plot_mcm(mcm_pos.v[1], x_w = 17, y0 = 1.5, yh = 0.27, obj_col = "purple")
plot_mcm(mcm_pos.v[2], x_w = 17, y0 = 1.5, yh = 0.27, obj_col = "purple")
# Add the text
text(x = -250, y = c(1.5, 0.5), labels = c("G1", "G2"), cex = 1.25)
# Open the screen
screen(nuc_scr.s[4])
par(mar = c(5.1, 4.1, 0.85, 2.1))
# Make the nucleosome density plot
plot(0, 0, type = "n",
xlim = c(-400, 400), xaxs = "i", xaxt = "n",
ylim = c(0, 2), yaxt = "n",
xlab = "Relative distance from ACS (bp)",
ylab = "Average nucleosome density"
)
axis(1, at = seq(-400, 400, 200))
axis(2, at = 0:2)
axis(2, at = c(0.5, 1.5), labels = F)
# Set the legend
legend("topleft", legend = "G1", lwd = 2, col = "red", bty = "n")
legend("topright", legend = "G2", lwd = 2, col = "darkgreen", bty = "n")
lines(-500:500, apply(mat.l[[1]][right_idx,], 2, mean), col = "red")
lines(-500:500, apply(mat.l[[2]][right_idx,], 2, mean), col = "darkgreen")
|
874847bffdc60f86d096a04d54d064d268b207b4
|
321ee8033d9154f903fb70b381b940a1d1c6e9b9
|
/Storable-Goods-Demand.R
|
70ea163c605dd7b41487747996b5c47dfd9adb00
|
[] |
no_license
|
Allisterh/Choice-Model-Consumer-Stockpiling-Behaviour
|
2a34067125c5a5840420106423bd7e1fdbb68a24
|
a0f60180893222cbb54256db572f710cc198ac6a
|
refs/heads/master
| 2022-01-22T08:03:52.444462
| 2018-02-25T08:01:15
| 2018-02-25T08:01:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,897
|
r
|
Storable-Goods-Demand.R
|
# Parameters
settings = list(K = 2, # number of pack sizes
n = c(2, 6, 0), # number of units in size k
I = 20, # maximum inventory
T = 200000, # periods of simulations
T0 = 200, # burn-in period
tol = 1e-8, # convergence tolerance
iter_max = 800000 # maximum number of iterations
)
param = list(beta = 0.99, # discount factor
alpha = 4, # price sensitivity
delta = 10, # consumption utility
c = 0.05 # inventory holding cost
)
price = list(price.norm = c(2, 5), # normal price
price.prom = c(1.2, 3), # promotion price
prob = c(0.84, 0.16), # probability of normal and promotion price, respectively
L = 2 # number of price levels
)
ValueFunctionIteration <- function (settings, param, price){
# value function iteration
# initialize the values under each state
value_0 = matrix(0, nrow=settings$I+1, ncol = price$L)
current_iteration = vector()
current_diff = vector()
# stoping rules
norm = settings$tol + 1
iteration = 1
start.time = Sys.time()
while(norm >= settings$tol && iteration <= settings$iter_max){
current_iteration[iteration] = iteration
# Bellman operator
value = BellmanOperator(value_0, settings, param, price)$value
# Whether the values for iteration n and iteration (n+1) are close enough
norm = max(abs(value - value_0))
current_diff[iteration] = norm
# set the current values as initial values
value_0 = value
iteration = iteration + 1
}
end.time = Sys.time()
time.elapsed = end.time - start.time
print(time.elapsed)
plot(current_iteration,current_diff, type = "l")
output = BellmanOperator(value_0, settings, param, price)
return(output)
}
# generate a function of Bellman operator
BellmanOperator <- function(value_0, settings, param, price){
v_choice = array(0, dim = c(settings$I + 1, price$L, settings$K + 1)) #choice specific value
value = matrix(0, nrow = settings$I + 1, ncol = price$L) #storing value under each state
choice = matrix(0,nrow = settings$I + 1, ncol = price$L) #storing choice under each state
inventory = c(0 : settings$I) #inventory levels
#Replicating step 2 of Slide 20
#Calculate the expected value
Ev=value_0 %*% price$prob
#Obtaining the choice specific values
for(k in 1:(settings$K + 1)) {
#Updating inventory levels
i_plus_n = inventory + settings$n[k] #inventory plus units in pack k
#i_prime is inventory at the end of t or beginning of t+1
i_prime = i_plus_n - 1
for(j in 1:length(i_prime)){
if (i_prime[j] < 0) i_prime[j] = 0
if (i_prime[j] > settings$I) i_prime[j] = settings$I
}
#Consumption utility minus holding cost
u = param$delta * (i_plus_n > 0) - param$c * i_prime
#Normal price
#Choice specific price levels
price_l = cbind(t(price$price.norm), 0)[k]
#updating choice specific values
v_choice[,1,k] = u - param$alpha * price_l + param$beta * Ev[i_prime + 1]
#Promotion price
#Choice specific price levels
price_l = cbind(t(price$price.prom), 0)[k]
#updating choice specific values
v_choice[,2,k] = u - param$alpha * price_l + param$beta * Ev[i_prime + 1]
}
#determine consumer's choice according to choice specific values
for(j in 1:(settings$I + 1)){
value[j,1] = max(v_choice[j, 1,])
value[j,2] = max(v_choice[j, 2,])
choice[j,1] = which.max(v_choice[j, 1,])
choice[j,2] = which.max(v_choice[j, 2,])
}
#output
output = list(value = value,
choice = choice)
return(output)
}
results = ValueFunctionIteration(settings, param, price)
results$choice
|
35b62c60109af323cabb3f8f4b8e2f2699cf5cc8
|
c8cde1ea449bbd1c26454124a96e97df954ed319
|
/R/e06-engineAct.R
|
31e36ddf73127944a8c2c64e6d3434748b1f6342
|
[] |
no_license
|
cran/Umpire
|
b5785399ca942ecdf80cb369aa973b0df38230e0
|
46c5342920eda3414b38f3a1c46658db11a4c4fb
|
refs/heads/master
| 2021-07-03T21:09:38.015302
| 2020-11-10T20:10:06
| 2020-11-10T20:10:06
| 96,940,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,799
|
r
|
e06-engineAct.R
|
###############################################################
# An ENGINE WITH ACTIVITY allows for the possibility that some
# components (or genes) in an expression engine (or tissue) might
# be transcriptionally inactive. Thus, the true biological signal
# described previously should really be viewed as a mixture
# S_gi = z_g * delta_0 + (1 - z_g) * T_gi
# where
# delta_0 = a point mass at zero
# T_gi = a random variable supported on the positive real line
# z_g ~ Binom(pi) defines the activity state (1 = on, 0 = off)
# Note that we typically work not with T_gi but with its logarithm
# to some appropriate base. That is, the multivariate normal or
# independent normal blocks used to construct engines should be
# applied on the logarithmic scale.
setClass("EngineWithActivity",
contains = "Engine",
slots = c(active="logical",
base="numeric"))
## Generates an EngineWithActivity object.
EngineWithActivity <- function(active, components, base=2) {
e <- Engine(components)
if (length(active) == 1) {
active <- rbinom(nComponents(e), 1, active)==1
}
new("EngineWithActivity", e, active=active, base=base)
}
setValidity("EngineWithActivity", function(object) {
msg <- NULL
rightSize <- length(object@active) == nComponents(object)
if (!rightSize) {
msg <- c(msg, "number of object components not equal length of active")
}
if (any(object@base < 0)) {
msg <- c(msg, "base is negative")
}
if (is.null(msg)) { # pass
msg <- TRUE
}
msg
})
# The 'rand' method for an EngineWithActivity is a little bit
# tricky, since we do two things at once. First, we use the
# 'base' slot to exponentiate the random variables generated by
# the underlying Engine on the log scale. We treat base = 0 as
# a special case, which means that we should continue to work on
# the scale of the Engine. Second, we mask any inactive component
# by replacing the generated values with 0.
#
# Note that this is terribly inefficient if we only have a single
# homogeneous population, since we generate a certain amount of
# data only to throw it away. The power comes when we allow
# cancer disregulation to turn a block on or off, when the
# underlying data reappears.
setMethod("rand", "EngineWithActivity", function(object, n, ...) {
x <- callNextMethod()
if (object@base > 0) { # exponentiate the log signal
x <- object@base^x
}
blockSizes <- unlist(lapply(object@components, nrow))
pi <- rep(object@active, times=blockSizes)
x * pi # mask signals from inactive gene-blocks
})
setMethod("summary", "EngineWithActivity", function(object, ...) {
callNextMethod()
cat(paste("Fraction of active genes", sum(object@active)))
})
|
428675c6a24651d5122a1861fabc9bb99ca8efcc
|
7bf73de2e26caf7765125fc351c4b9a7c68d9fe7
|
/man/R/sin_cerosDF.R
|
959adaff018b50c4431fe0eef10d26a8d714072e
|
[
"MIT"
] |
permissive
|
mariosandovalmx/tlamatini
|
dcd816b451aff9eba66f4e105a982383fdea78f7
|
5b9fb29713bb6e3f88a9096a00faf27514485401
|
refs/heads/master
| 2023-07-06T19:56:11.124336
| 2023-07-03T18:48:02
| 2023-07-03T18:48:02
| 411,068,930
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 606
|
r
|
sin_cerosDF.R
|
#' Quitar todos los ceros de un dataframe, para quedarnos con observaciones completas
#'
#' Quitar todos los ceros del dataframe y quedarse solo con las filas con observaciones completas.
#' @param dataframe Un dataframe variables.
#'
#' @return quitar ceros del vector.
#' @export
#'
#' @examples
#' df<-sin_cerosDF(iris)
#' df
#' @encoding UTF-8
#' @importFrom stats complete.cases
sin_cerosDF <- function(dataframe){
data.sc<- dataframe
data.sc[data.sc==0] <- NA
data2.sc<-data.sc[complete.cases(data.sc),]
insight::print_color("Ceros removidos exitosamente.", "green")
print(data2.sc)
}
|
dcc7d21e2e5d90d7ded3cd952c50e6d78490b470
|
a0448d4b83075c557d179b7a9e4ba870a6998a20
|
/R/returnlevelplot.R
|
99f061b9deba2ce4f558096f7853836597d938fb
|
[] |
no_license
|
cran/distrMod
|
920b64674d2f1f25d1659096e05472aaee0bd8a2
|
8c7d22f2fcabc2b2a04980091307132611e7d511
|
refs/heads/master
| 2022-11-27T17:04:46.155165
| 2022-11-16T02:50:18
| 2022-11-16T02:50:18
| 17,695,551
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,647
|
r
|
returnlevelplot.R
|
################################################################
# return level - Plot functions in package distrMod
################################################################
setMethod("returnlevelplot", signature(x = "ANY",
y = "UnivariateDistribution"),
function(x, ### observations
y, ### distribution
n = length(x), ### number of points to be plotted
withIdLine = TRUE, ### shall line y=x be plotted in
withConf = TRUE, ### shall confidence lines be plotted
withConf.pw = withConf, ### shall pointwise confidence lines be plotted
withConf.sim = withConf, ### shall simultaneous confidence lines be plotted
plot.it = TRUE, ### shall be plotted at all (inherited from stats::qqplot)
datax = FALSE, ### as in qqnorm
MaxOrPOT = c("Max","POT"), ### used for block maxima or points over threshold
npy = 365, ### number of observations per year
threshold = if(is(y,"GPareto")) NA else 0,
xlab = deparse(substitute(x)), ## x-label
ylab = deparse(substitute(y)), ## y-label
main = "",
..., ## further parameters
width = 10, ## width (in inches) of the graphics device opened
height = 5.5, ## height (in inches) of the graphics device opened}
withSweave = getdistrOption("withSweave"), ## logical: if \code{TRUE}
## (for working with \command{Sweave}) no extra device is opened and height/width are not set
mfColRow = TRUE, ## shall we use panel partition mfrow=c(1,1)?
n.CI = n, ## number of points to be used for CI
with.lab = FALSE, ## shall observation labels be plotted in
lab.pts = NULL, ## observation labels to be used
which.lbs = NULL, ## which observations shall be labelled
which.Order = NULL, ## which of the ordered (remaining) observations shall be labelled
which.nonlbs = NULL, ## which of the non-labelled observations shall be plotted
attr.pre = FALSE, ## do indices refer to order pre or post ordering
order.traf = NULL, ## an optional trafo; by which the observations are ordered (as order(trafo(obs))
col.IdL = "red", ## color for the identity line
lty.IdL = 2, ## line type for the identity line
lwd.IdL = 2, ## line width for the identity line
alpha.CI = .95, ## confidence level
exact.pCI = (n<100), ## shall pointwise CIs be determined with exact Binomial distribution?
exact.sCI = (n<100), ## shall simultaneous CIs be determined with exact kolmogorov distribution?
nosym.pCI = FALSE, ## shall we use (shortest) asymmetric CIs?
col.pCI = "orange", ## color for the pointwise CI
lty.pCI = 3, ## line type for the pointwise CI
lwd.pCI = 2, ## line width for the pointwise CI
pch.pCI = par("pch"),## symbol for points (for discrete mass points) in pointwise CI
cex.pCI = par("cex"),## magnification factor for points (for discrete mass points) in pointwise CI
col.sCI = "tomato2", ## color for the simultaneous CI
lty.sCI = 4, ## line type for the simultaneous CI
lwd.sCI = 2, ## line width for the simultaneous CI
pch.sCI = par("pch"),## symbol for points (for discrete mass points) in simultaneous CI
cex.sCI = par("cex"),## magnification factor for points (for discrete mass points) in simultaneous CI
added.points.CI = TRUE, ## should the CIs be drawn through additional points?
cex.pch = par("cex"),## magnification factor for the plotted symbols (for backward compatibility only, cex.pts in the sequel)
col.pch = par("col"),## color for the plotted symbols (for backward compatibility only, col.pts in the sequel)
cex.pts = 1, ## magnification factor for labelled shown observations
col.pts = par("col"),## color for labelled shown observations
pch.pts = 19, ## symbol for labelled shown observations
cex.npts = 1, ## magnification factor for non-labelled shown observations
col.npts = grey(.5), ## color for non-labelled shown observations
pch.npts = 20, ## symbol for non-labelled shown observations
cex.lbs = par("cex"),## magnification factor for the plotted observation labels
col.lbs = par("col"),## color for the plotted observation labels
adj.lbs = par("adj"),## adj parameter for the plotted observation labels
alpha.trsp = NA, ## alpha transparency to be added afterwards
jit.fac = 0, ## jittering factor used for discrete distributions
jit.tol = .Machine$double.eps, ## tolerance for jittering: if distance
#is smaller than jit.tol, points are considered replicates
check.NotInSupport = TRUE, ## shall we check if all x lie in support(y)
col.NotInSupport = "red", ## if preceding check TRUE color of x if not in support(y)
with.legend = TRUE, ## shall a legend be plotted
legend.bg = "white", ## background for the legend
legend.pos = "topleft", ## position for the legend
legend.cex = 0.8, ## magnification factor for the legend
legend.pref = "", ## prefix for legend text
legend.postf = "", ## postfix for legend text
legend.alpha = alpha.CI, ## nominal level of CI
debug = FALSE, ## shall additional debug output be printed out?
withSubst = TRUE
){ ## return value as in stats::qqplot
mc <- match.call(call = sys.call(sys.parent(1)))
dots <- match.call(call = sys.call(sys.parent(1)),
expand.dots = FALSE)$"..."
args0 <- list(x = x, y = y, n = n, withIdLine = withIdLine,
withConf = withConf, withConf.pw = withConf.pw,
withConf.sim = withConf.sim, plot.it = plot.it, datax = datax,
xlab = xlab, ylab = ylab, width = width, height = height,
withSweave = withSweave, mfColRow = mfColRow,
n.CI = n.CI, with.lab = with.lab, lab.pts = lab.pts,
which.lbs = which.lbs, which.Order = which.Order,
order.traf = order.traf, col.IdL = col.IdL, lty.IdL = lty.IdL,
lwd.IdL = lwd.IdL, alpha.CI = alpha.CI, exact.pCI = exact.pCI,
exact.sCI = exact.sCI, nosym.pCI = nosym.pCI, col.pCI = col.pCI,
lty.pCI = lty.pCI, lwd.pCI = lwd.pCI, pch.pCI = pch.pCI,
cex.pCI = cex.pCI, col.sCI = col.sCI, lty.sCI = lty.sCI,
lwd.sCI = lwd.sCI, pch.sCI = pch.sCI, cex.sCI = cex.sCI,
added.points.CI = added.points.CI, cex.pch = cex.pch,
col.pch = col.pch, cex.lbs = cex.lbs, col.lbs = col.lbs,
adj.lbs = adj.lbs, alpha.trsp = alpha.trsp, jit.fac = jit.fac,
jit.tol = jit.tol, check.NotInSupport = check.NotInSupport,
col.NotInSupport = col.NotInSupport, with.legend = with.legend,
legend.bg = legend.bg, legend.pos = legend.pos,
legend.cex = legend.cex, legend.pref = legend.pref,
legend.postf = legend.postf, legend.alpha = legend.alpha,
debug = debug, withSubst = withSubst)
plotInfo <- list(call = mc, dots=dots, args=args0)
MaxOrPOT <- match.arg(MaxOrPOT)
xcc <- as.character(deparse(mc$x))
.mpresubs <- if(withSubst){
function(inx)
.presubs(inx, c("%C", "%A", "%D" ),
c(as.character(class(x)[1]),
as.character(date()),
xcc))
}else function(inx)inx
if(missing(xlab)){mc$xlab <- paste(gettext("Return level of"), xcc)}
if(missing(ylab)){mc$ylab <- gettext("Return period (years)")}
if(missing(main)) mc$main <- gettext("Return level plot")
mcl <- as.list(mc)[-1]
mcl$datax <- NULL
mcl$MaxOrPOT <- NULL
mcl$npy <- NULL
mcl$withSweave <- NULL
mcl$mfColRow <- NULL
mcl$type <-NULL
mcl$debug <- NULL
mcl$added.points.CI <- NULL
if(is.null(mcl$datax)) datax <- FALSE
force(x)
thresh0 <- threshold
if(is(y,"GPareto")){
if(is.na(threshold)) thresh0 <- location(y)
y <- y - thresh0
x <- x + thresh0
}
rank0x <- rank(x)
xj <- sort(x)
if(any(.isReplicated(x, jit.tol))&&jit.fac>0)
xj[.isReplicated(x, jit.tol)] <- jitter(x[.isReplicated(x, jit.tol)], factor=jit.fac)
rank1x <- rank(xj)[rank0x]
ind.x <- order(xj)
xj <- sort(xj)
p2rl <- function(pp){
pp <- p(y)(pp)
return(if(MaxOrPOT=="Max") -1/log(pp) else 1/(1-pp)/npy)
}
pp <- ppoints(length(xj))
yc.o <- q.l(y)(pp)
ycl <- p2rl(yc.o)
### extend range somewhat
# pyn <- p(y)(10^(seq(-1, 3.75 + log10(npy), by = 0.1)))
xyall <- force(sort(unique(c(yc.o,x,
q.l(y)(c(seq(0.01, 0.09, by = 0.01),(1:9)/10,
0.95, 0.99, 0.995, 0.999))
))))
rxyall <- (max(xyall)-min(xyall))*0.6
rxymean <- (max(xyall)+min(xyall))/2
xyallc <- seq(from=rxymean-rxyall,to=rxymean+rxyall, length.out=400)
# print(xyallc)
pxyall <- p(y)(xyallc)
# print(pxyall)
pxyallc <- p2rl(xyallc)
xyallc <- xyallc[pxyall>0.00001 & pxyall<0.99999]
pxyallc <- pxyallc[pxyall>0.00001 & pxyall<0.99999]
# print(cbind(pxyallc,xyallc))
if("support" %in% names(getSlots(class(y))))
ycl <- sort(jitter(ycl, factor=jit.fac))
#-------------------------------------------------------------------------------
alp.v <- .makeLenAndOrder(alpha.trsp,ind.x)
alp.t <- function(x,a1) if(is.na(x)) x else addAlphTrsp2col(x,a1)
alp.f <- if(length(alpha.trsp)==1L && is.na(alpha.trsp))
function(x,a) x else function(x,a) mapply(x,alp.t,a1=a)
if(missing(cex.lbs)) cex0.lbs <- par("cex")
cex0.lbs <- .makeLenAndOrder(cex.lbs,ind.x)
if(missing(adj.lbs)) adj0.lbs <- par("adj")
adj0.lbs <- .makeLenAndOrder(adj.lbs,ind.x)
if(missing(col.lbs)) col0.lbs <- par("col")
col0.lbs <- alp.f(.makeLenAndOrder(col.lbs,ind.x),alp.v)
if(missing(lab.pts)||is.null(lab.pts)) lab0.pts <- ind.x else
lab0.pts <- .makeLenAndOrder(lab.pts,ind.x)
lbprep <- .labelprep(x = x, y = yc.o[rank1x], lab.pts = lab0.pts,
col.lbs = col0.lbs, cex.lbs = cex0.lbs,
adj.lbs = adj0.lbs, which.lbs = which.lbs,
which.Order = which.Order, order.traf = order.traf,
which.nonlbs = which.nonlbs)
n.ns <- length(lbprep$ns)
n.s <- length(lbprep$ord)
shown <- c(lbprep$ord,lbprep$ns)
xs <- x[shown]
ycs <- (ycl[rank1x])[shown]
ordx <- order(xs)
xso <- xs[ordx]
ycso <- ycs[ordx]
if(missing(cex.pch)) cex.pch <- par("cex")
if(missing(col.pch)) col.pch <- par("col")
if(missing(cex.pts)) cex.pts <- if(missing(cex.pch)) 1 else cex.pch
if(missing(col.pts)) col.pts <- if(missing(col.pch)) par("col") else col.pch
if(missing(pch.pts)) pch.pts <- 19
if(missing(cex.npts)) cex.npts <- 1
if(missing(col.npts)) col.npts <- par("col")
if(missing(pch.npts)) pch.npts <- 20
if(with.lab) lab.pts <- lbprep$lab.pts
if(attr.pre){
if(with.lab){
col.lbs <- lbprep$col.lbs
cex.lbs <- lbprep$cex.lbs
adj.lbs <- lbprep$adj.lbs
}
cex.pts <- .makeLenAndOrder(cex.pts,ind.x)
col.pts <- alp.f(.makeLenAndOrder(col.pts,ind.x),alp.v)
pch.pts <- .makeLenAndOrder(pch.pts,ind.x)
cex.pts <- cex.pts[shown]
col.pts <- col.pts[shown]
pch.pts <- pch.pts[shown]
}else{
ind.s <- 1:n.s
ind.ns <- 1:n.ns
if(with.lab){
if(missing(lab.pts)||is.null(lab.pts)) lab.pts <- ind.ns else
lab.pts <- .makeLenAndOrder(lab.pts,ind.ns)
if(missing(cex.lbs)) cex.lbs <- par("cex")
cex.lbs <- (.makeLenAndOrder(cex.lbs,ind.s))
if(missing(adj.lbs)) adj.lbs <- par("adj")
adj.lbs <- (.makeLenAndOrder(adj.lbs,ind.s))
if(missing(col.lbs)) col.lbs <- par("col")
col.lbs <- (alp.f(.makeLenAndOrder(col.lbs,ind.s),alp.v[lbprep$ord]))
}
cex.pts <- .makeLenAndOrder(cex.pts,ind.s)
col.pts <- alp.f(.makeLenAndOrder(col.pts,ind.s),alp.v[lbprep$ord])
pch.pts <- .makeLenAndOrder(pch.pts,ind.s)
cex.npts <- .makeLenAndOrder(cex.npts,ind.ns)
col.npts <- alp.f(.makeLenAndOrder(col.npts,ind.ns),alp.v[lbprep$ns])
pch.npts <- .makeLenAndOrder(pch.npts,ind.ns)
col.pts <- c(col.pts,col.npts)
cex.pts <- c(cex.pts,cex.npts)
pch.pts <- c(pch.pts,pch.npts)
}
cex.pts <- cex.pts[ordx]
col.pts <- col.pts[ordx]
pch.pts <- pch.pts[ordx]
#-------------------------------------------------------------------------------
if(check.NotInSupport){
xo <- xso #x[ord.x]
nInSupp <- which(xo < q.l(y)(0))
nInSupp <- unique(sort(c(nInSupp,which( xo > q.l(y)(1)))))
if("support" %in% names(getSlots(class(y))))
nInSupp <- unique(sort(c(nInSupp,which( ! xo %in% support(y)))))
if("gaps" %in% names(getSlots(class(y))))
nInSupp <- unique(sort(c(nInSupp,which( .inGaps(xo,gaps(y))))))
if(length(nInSupp)){
# col.pch[nInSupp] <- col.NotInSupport
col.pts[nInSupp] <- col.NotInSupport
if(with.lab)
# col.lbs[ord.x[nInSupp]] <- col.NotInSupport
col.lbs[nInSupp] <- col.NotInSupport
}
}
if(n < length(x)){
with.lab <- FALSE
nos <- length(shown)
idx <- sample(1:nos,size=n,replace=FALSE)
cex.pts <- cex.pts[idx]
col.pts <- col.pts[idx]
pch.pts <- pch.pts[idx]
xso <- xso[idx]
ycso <- ycso[idx]
}
mcl <- .deleteItemsMCL(mcl)
mcl$pch <- pch.pts
mcl$cex <- cex.pts
mcl$col <- col.pts
mc$xlab <- .mpresubs(mcl$xlab)
mc$ylab <- .mpresubs(mcl$ylab)
if (!withSweave){
devNew(width = width, height = height)
}
opar <- par("mfrow", no.readonly = TRUE)
if(mfColRow) on.exit(do.call(par, list(mfrow=opar, no.readonly = TRUE)))
if(mfColRow) opar1 <- par(mfrow = c(1,1), no.readonly = TRUE)
ret <- list(x=xj,y=ycl)
if(plot.it){
xallc1 <- sort(c(xj,xyallc))
yallc1 <- sort(c(ycl,pxyallc))
mcl$x <- mcl$y <- NULL
logs <- if(datax) "y" else "x"
if(!is.null(mcl$log)){
if(grepl("y", eval(mcl$log))) logs <- "xy"
if(grepl("x",eval(mcl$log)))
warning("The x axis is logarithmic anyway.")
mcl$log <- NULL
}
if(datax){
mcl$xlab <- mc$xlab
mcl$ylab <- mc$ylab
plotInfo$plotArgs <- c(list(x=xallc1, y=yallc1, log=logs, type="n"),mcl)
plotInfo$pointArgs <- c(list(x=xso, y=ycso), mcl)
}else{
mcl$ylab <- mc$xlab
mcl$xlab <- mc$ylab
plotInfo$plotArgs <- c(list(x=yallc1, y=xallc1, log=logs,type="n"),mcl)
plotInfo$pointArgs <- c(list(x=ycso, y=xso), mcl)
}
do.call(plot, plotInfo$plotArgs)
plotInfo$usr <- par("usr")
do.call(points, plotInfo$pointArgs)
}
if(with.lab&& plot.it){
lbprep$y0 <- p2rl(lbprep$y0)
xlb0 <- if(datax) lbprep$x0 else lbprep$y0
ylb0 <- if(datax) lbprep$y0 else lbprep$x0
plotInfo$textArgs <- list(x = xlb0, y = ylb0, labels = lbprep$lab,
cex = lbprep$cex, col = lbprep$col, adj = adj.lbs)
text(x = xlb0, y = ylb0, labels = lbprep$lab,
cex = lbprep$cex, col = lbprep$col, adj = adj.lbs)
}
if(withIdLine){
if(plot.it){
if(datax){
plotInfo$IdLineArgs <- list(xyallc,pxyallc,col=col.IdL,lty=lty.IdL,lwd=lwd.IdL)
lines(xyallc,pxyallc,col=col.IdL,lty=lty.IdL,lwd=lwd.IdL)
}else{
plotInfo$IdLineArgs <- list(pxyallc,xyallc,col=col.IdL,lty=lty.IdL,lwd=lwd.IdL)
lines(pxyallc,xyallc,col=col.IdL,lty=lty.IdL,lwd=lwd.IdL)
}
}
qqb <- NULL
if(#is(y,"AbscontDistribution")&&
withConf){
if(added.points.CI){
xy <- unique(sort(c(x,xj,xyallc,yc.o)))
}else{
xy <- unique(sort(c(x,xj,yc.o)))
}
xy <- xy[!.NotInSupport(xy,y)]
lxy <- length(xy)
if(is(y,"DiscreteDistribution")){
n0 <- min(n.CI, length(support(y)))
n1 <- max(n0-lxy,0)
if (n1 >0 ){
notyetInXY <- setdiff(support(y), xy)
xy0 <- sample(notyetInXY, n1)
xy <- sort(unique(c(xy,xy0)))
}
}else{
if(lxy < n.CI){
n1 <- (n.CI-lxy)%/%3
xy0 <- seq(min(xy),max(xy),length=n1)
xy1 <- r(y)(n.CI-lxy-n1)
xy <- sort(unique(c(xy,xy0,xy1)))
}
}
#qqb <- qqbounds(sort(unique(xy)),y,alpha.CI,n,withConf.pw, withConf.sim,
# exact.sCI,exact.pCI,nosym.pCI, debug = debug)
#qqb$crit <- p2rl(qqb$crit)
if(plot.it){
qqb <- .confqq(xy, y, datax, withConf.pw, withConf.sim, alpha.CI,
col.pCI, lty.pCI, lwd.pCI, pch.pCI, cex.pCI,
col.sCI, lty.sCI, lwd.sCI, pch.sCI, cex.sCI,
n, exact.sCI = exact.sCI, exact.pCI = exact.pCI,
nosym.pCI = nosym.pCI, with.legend = with.legend,
legend.bg = legend.bg, legend.pos = legend.pos,
legend.cex = legend.cex, legend.pref = legend.pref,
legend.postf = legend.postf, legend.alpha = legend.alpha,
qqb0=NULL, transf0=p2rl, debug = debug)
}
}}
plotInfo <- c(plotInfo, ret=ret,qqb=qqb)
class(plotInfo) <- c("plotInfo","DiagnInfo")
return(invisible(plotInfo))
})
## into distrMod
setMethod("returnlevelplot", signature(x = "ANY",
y = "ProbFamily"), function(x, y,
n = length(x), withIdLine = TRUE, withConf = TRUE,
withConf.pw = withConf, withConf.sim = withConf,
plot.it = TRUE, xlab = deparse(substitute(x)),
ylab = deparse(substitute(y)), ...){
mc <- match.call(call = sys.call(sys.parent(1)))
mc1 <- match.call(call = sys.call(sys.parent(1)), expand.dots=FALSE)
mcx <- as.character(deparse(mc$x))
mcy <- as.character(deparse(mc$y))
dots <- mc1$"..."
args0 <- list(x = x, y = y,
n = if(!missing(n)) n else length(x),
withIdLine = withIdLine, withConf = withConf,
withConf.pw = if(!missing(withConf.pw)) withConf.pw else if(!missing(withConf)) withConf else NULL,
withConf.sim = if(!missing(withConf.sim)) withConf.sim else if(!missing(withConf)) withConf else NULL,
plot.it = plot.it, xlab = xlab, ylab = ylab)
plotInfo <- list(call=mc, dots=dots, args=args0)
if(missing(xlab)) mc$xlab <- paste(gettext("Return Level of"), mcx)
if(missing(ylab)) mc$ylab <- paste(gettext("Return Period at"), mcy)
mcl <- as.list(mc)[-1]
mcl$y <- yD <- y@distribution
if(!is(yD,"UnivariateDistribution"))
stop("Not yet implemented.")
retv <- do.call(getMethod("returnlevelplot", signature(x="ANY", y="UnivariateDistribution")),
args=mcl)
retv$call <- retv$dots <- retv$args <- NULL
plotInfo <- c(plotInfo,retv)
class(plotInfo) <- c("plotInfo","DiagnInfo")
return(invisible(plotInfo))
})
setMethod("returnlevelplot", signature(x = "ANY",
y = "Estimate"), function(x, y,
n = length(x), withIdLine = TRUE, withConf = TRUE,
withConf.pw = withConf, withConf.sim = withConf,
plot.it = TRUE, xlab = deparse(substitute(x)),
ylab = deparse(substitute(y)), ...){
mc <- match.call(call = sys.call(sys.parent(1)))
mc1 <- match.call(call = sys.call(sys.parent(1)), expand.dots=FALSE)
mcx <- as.character(deparse(mc$x))
mcy <- as.character(deparse(mc$y))
dots <- mc1$"..."
args0 <- list(x = x, y = y,
n = if(!missing(n)) n else length(x),
withIdLine = withIdLine, withConf = withConf,
withConf.pw = if(!missing(withConf.pw)) withConf.pw else if(!missing(withConf)) withConf else NULL,
withConf.sim = if(!missing(withConf.sim)) withConf.sim else if(!missing(withConf)) withConf else NULL,
plot.it = plot.it, xlab = xlab, ylab = ylab)
plotInfo <- list(call=mc, dots=dots, args=args0)
if(missing(xlab)) mc$xlab <- paste(gettext("Return Level of"), mcx)
mcl <- as.list(mc)[-1]
param <- ParamFamParameter(main=untransformed.estimate(y), nuisance=nuisance(y),
fixed=fixed(y))
es.call <- y@estimate.call
nm.call <- names(es.call)
PFam <- NULL
if("ParamFamily" %in% nm.call)
PFam <- eval(as.list(es.call)[["ParamFamily"]])
if(is.null(PFam))
stop("There is no object of class 'ProbFamily' in the call of 'x'")
PFam0 <- modifyModel(PFam, param)
mcl$y <- PFam0
if(missing(ylab)) mcl$ylab <- paste(gettext("Return Period at fitted"), name(PFam0), "\n -- fit by ", mcy)
retv <- do.call(getMethod("returnlevelplot", signature(x="ANY", y="ProbFamily")),
args=mcl)
retv$call <- retv$dots <- retv$args <- NULL
plotInfo <- c(plotInfo,retv)
class(plotInfo) <- c("plotInfo","DiagnInfo")
return(invisible(plotInfo))
})
|
4cf6a6f907d8d8f92bde01d2641734f06d077524
|
1018cb5b9b71e476a2e26a13b874145de5eb2304
|
/SQL in R.R
|
76c23fe178f838bc4adc892e4d27d1d0c23ed14c
|
[] |
no_license
|
lilatsopelakou/Rental-Car-SQL
|
71c987eb404fe7e2493236e67e50f761b39f171e
|
6a5adfc30d66cad5104fecf26cee67d22cbad38a
|
refs/heads/master
| 2020-04-05T16:02:52.908988
| 2018-11-10T15:44:55
| 2018-11-10T15:44:55
| 156,994,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 896
|
r
|
SQL in R.R
|
library(RMySQL)
#Load csv file to R
customer <-read.csv(file = "C:/Users/User/Desktop/Assignment_1_Customers.csv",
header=TRUE,sep=",",stringsAsFactors = FALSE)
#Connect to the DataBase
mydb <- dbConnect(MySQL(),user='root',password='Lt2091992!',
dbname='dmbi_assignment1',host='127.0.0.1')
#rs1 <- dbSendQuery(mydb, "SELECT * FROM customer")
data1 <- dbFetch(rs1,n=-1)
names(customer_csv)<-names(data1) #make the csv's column names identical to the
#table's column names on the DataBase
#Populate the table from csv
dbWriteTable(mydb, name="Customer", value=customer_csv, overwrite=FALSE,
append= TRUE,row.names=FALSE)
#Check if the table is filled in
rs2 <- dbSendQuery(mydb, "SELECT * FROM customer")
data2 <- dbFetch(rs2,n=-1)
head(data2)
str(data2)
dbClearResult(dbListResults(mydb)[[1]])
dbDisconnect(mydb)
|
93cd2efe2281d48c6e99854653219e17e8cc2429
|
cf145b6637e0b8f031c775ccb57b374c7e4ba14f
|
/man/cforest_control.Rd
|
1bada8f48b9033999907253e54e95d68aa02b1ce
|
[] |
no_license
|
cran/party
|
67e36fa4051a5961e255c2aadc3918abfe71cc66
|
5d781906fc192ea41d12b5dbc2cfd563fb30769c
|
refs/heads/master
| 2023-04-10T06:32:27.376616
| 2023-03-17T11:10:09
| 2023-03-17T11:10:09
| 17,698,357
| 4
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,886
|
rd
|
cforest_control.Rd
|
\name{Control Forest Hyper Parameters}
\alias{cforest_control}
\alias{cforest_classical}
\alias{cforest_unbiased}
\title{ Control for Conditional Tree Forests }
\description{
Various parameters that control aspects of the `cforest' fit via
its `control' argument.
}
\usage{
cforest_unbiased(\dots)
cforest_classical(\dots)
cforest_control(teststat = "max",
testtype = "Teststatistic",
mincriterion = qnorm(0.9),
savesplitstats = FALSE,
ntree = 500, mtry = 5, replace = TRUE,
fraction = 0.632, trace = FALSE, \dots)
}
\arguments{
\item{teststat}{ a character specifying the type of the test statistic
to be applied. }
\item{testtype}{ a character specifying how to compute the distribution of
the test statistic. }
\item{mincriterion}{ the value of the test statistic (for \code{testtype == "Teststatistic"}),
or 1 - p-value (for other values of \code{testtype}) that
must be exceeded in order to implement a split. }
\item{mtry}{ number of input variables randomly sampled as candidates
at each node for random forest like algorithms. Bagging, as special case
of a random forest without random input variable sampling, can
be performed by setting \code{mtry} either equal to \code{NULL} or
manually equal to the number of input variables.}
\item{savesplitstats}{ a logical determining whether the process of standardized
two-sample statistics for split point estimate
is saved for each primary split.}
\item{ntree}{ number of trees to grow in a forest.}
\item{replace}{ a logical indicating whether sampling of observations is
done with or without replacement.}
\item{fraction}{ fraction of number of observations to draw without
replacement (only relevant if \code{replace = FALSE}).}
\item{trace}{ a logical indicating if a progress bar shall be printed
while the forest grows.}
\item{\dots}{ additional arguments to be passed to
\code{\link{ctree_control}}.}
}
\details{
All three functions return an object of class \code{\link{ForestControl-class}}
defining hyper parameters to be specified via the \code{control} argument
of \code{\link{cforest}}.
The arguments \code{teststat}, \code{testtype} and \code{mincriterion}
determine how the global null hypothesis of independence between all input
variables and the response is tested (see \code{\link{ctree}}). The
argument \code{nresample} is the number of Monte-Carlo replications to be
used when \code{testtype = "MonteCarlo"}.
A split is established when the sum of the weights in both daugther nodes
is larger than \code{minsplit}, this avoids pathological splits at the
borders. When \code{stump = TRUE}, a tree with at most two terminal nodes
is computed.
The \code{mtry} argument regulates a random selection of \code{mtry} input
variables in each node. Note that here \code{mtry} is fixed to the value 5 by
default for merely technical reasons, while in \code{\link[randomForest]{randomForest}}
the default values for classification and regression vary with the number of input
variables. Make sure that \code{mtry} is defined properly before using \code{cforest}.
It might be informative to look at scatterplots of input variables against
the standardized two-sample split statistics, those are available when
\code{savesplitstats = TRUE}. Each node is then associated with a vector
whose length is determined by the number of observations in the learning
sample and thus much more memory is required.
The number of trees \code{ntree} can be increased for large numbers of input variables.
Function \code{cforest_unbiased} returns the settings suggested
for the construction of unbiased random forests (\code{teststat = "quad", testtype = "Univ",
replace = FALSE}) by Strobl et al. (2007)
and is the default since version 0.9-90.
Hyper parameter settings mimicing the behaviour of
\code{\link[randomForest]{randomForest}} are available in
\code{cforest_classical} which have been used as default up to
version 0.9-14.
Please note that \code{\link{cforest}}, in contrast to
\code{\link[randomForest]{randomForest}}, doesn't grow trees of
maximal depth. To grow large trees, set \code{mincriterion = 0}.
}
\value{
An object of class \code{\link{ForestControl-class}}.
}
\references{
Carolin Strobl, Anne-Laure Boulesteix, Achim Zeileis and Torsten Hothorn (2007).
Bias in Random Forest Variable Importance Measures: Illustrations, Sources and
a Solution. \emph{BMC Bioinformatics}, \bold{8}, 25. DOI: 10.1186/1471-2105-8-25
}
\keyword{misc}
|
d4460a67d63c51a54de61211e7fcd263986479a2
|
9c80e6fb2840bc832983d506b53a92d60c7e8e79
|
/Stat/R/Example-9-1-1.r
|
b6b9dbf83371a9763800e36c9ce8a6f74f204bd2
|
[] |
no_license
|
AppliedStat/class
|
fd08dd570705634d42b3412e08679ce37aec9389
|
22da5f30b13a2726efab7e137dbe17d31323529b
|
refs/heads/master
| 2023-07-23T17:04:08.986749
| 2023-07-15T04:14:18
| 2023-07-15T04:14:18
| 212,270,114
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,344
|
r
|
Example-9-1-1.r
|
#======================================================
# Example 9.1.1 on Page 425
# Test H0: random versus H1: not random
#------------------------------------------------------
data = c(5,8,3,1,9,4,6,7,9,2,6,3,0,
8,7,5,1,3,6,2,1,9,5,4,8,0,
3,7,1,4,6,0,4,3,8,2,7,3,9,
8,5,6,1,8,7,0,3,5,2,5,2)
dist = diff(data)
length(data)
# Check "SAME"
sum( dist==0 ) # dangerous
sum( dist^2 < 0.001 ) # better
# Check One away
sum( abs(dist)==1 | abs(dist)==9 ) # dangerous
sum( abs(abs(dist)-1)<0.001 | abs(abs(dist)-9)<0.001 ) # better
# Check Other
sum( (abs(dist)-1)>=0.001 ) # not good (actually wrong)
sum( (abs(dist)-1)>=0.001 & abs(abs(dist)-9)>=0.001 ) ## better
#------------------------------------------------------
y1=0; y2=8; y3=42
p10=1/10; p20=2/10; p30=7/10
n = y1+y2+y3
Q2 = (y1-n*p10)^2 / (n*p10) + (y2-n*p20)^2 / (n*p20) + (y3-n*p30)^2 / (n*p30)
# chi-square critical value
qchisq(1-0.05, df=2)
# Compare Q2 with the above critical value
# Reject H0
#------------------------------------------------------
# Using R function
chisq.test( x=c(0,8,42), p=c(1/10, 2/10, 7/10) )
#------------------------------------------------------
# Note
O = c(y1,y2,y3)
E = n*c(p10, p20, p30)
sum( (O-E)^2 / E )
|
d5f0380cf9fc00e6de6060314bdc2adbadb13c38
|
62ba22745ba13b3afaface326d217480980bbaed
|
/R/umlca.r
|
d0015d4837dcefc26f76227740877dab9be627b5
|
[] |
no_license
|
cran/iv
|
6fa670f0c8de03330352b7e1442f2a0d7292ff48
|
fbf5f635935a9997e895843150ac16e57a82b655
|
refs/heads/master
| 2016-09-16T14:16:12.453192
| 2011-10-26T00:00:00
| 2011-10-26T00:00:00
| 17,718,922
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
r
|
umlca.r
|
#need more attributes, col etc
umlmerge = function (...)
{ m = extend (list (...), "umlca")
for (con in m) con$m = 0
m$n = length (m)
m$col = m [[1]]$col
m$fill = m [[1]]$fill
m [[1]]$m = m
invisible (m)
}
plot.umlca = function (ca, ...)
{ #note, no validation
#plus for now, assume all composite arrows are north facing
sources = list ()
target = NULL
for (i in 1:ca$n)
{ sources [[i]] = ca [[i]]$v1
if (is.null (target) ) target = ca [[i]]$v2
}
.umlca.north (sources, target, fill=ca$fill)
}
#note, assuming h attribute exists
.umlca.north = function (sources, target, ...)
{ sxs = sys = numeric ()
for (v in sources)
{ sxs = c (sxs, v$x)
sys = c (sys, v$y - v$h / 2)
}
z1 = target$y + target$h / 2
z2 = (z1 + min (sys) ) / 2
segments (min (sxs), z2, max (sxs), z2)
segments (target$x, z2, target$x, z1)
segments (sxs, sys, sxs, z2)
.arrowhead (target$x, z2, target$x, z1, ...)
}
|
565fecff94dcd5036bb186147fcbe97229c992db
|
d7cfe7ae0d92f3071640bbb2a19aaf0ca567b154
|
/ch04-flowingdata_subscribers/book.R
|
2a7aa3cd4221cc08b6980ded1b414273ec83ed8f
|
[
"MIT"
] |
permissive
|
yuanyangwu/note-on-visualize-this
|
1b414c698ed8cb35845967b5fa9e98ce75b9bbcd
|
e090ecfcbfd70759ba5278883436e1586e42a6b0
|
refs/heads/master
| 2020-04-06T04:30:56.186268
| 2014-02-18T13:33:47
| 2014-02-18T13:33:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
book.R
|
setwd("C:/Users/wuf2/Documents/GitHub/note-on-visualize-this/ch04-flowingdata_subscribers")
subscribers <- read.csv("flowingdata_subscribers.csv", sep=",", header=TRUE)
plot(subscribers$Subscribers, type="p", ylim=c(0, 30000))
plot(subscribers$Subscribers, type="h", ylim=c(0, 30000),
xlab="Day", ylab="Subscribers")
points(subscribers$Subscribers, pch=19, col="black")
|
3a0f0320e2f28b2d9343695238f5abb3eaf96f71
|
126443f109bff3a83ee287262c76c7cb4e6d811e
|
/rmse_count.R
|
24577c4b0c43d16554c28d790b2c3255cb88d3a8
|
[] |
no_license
|
sumeetkr13/SlopeOne
|
33e9cb616f1530e8ff40dd51a4baae89e344fea4
|
448c445a3a00ec9193bf2db462652ea774d626f9
|
refs/heads/master
| 2020-03-11T06:50:23.746524
| 2016-04-04T14:41:30
| 2016-04-04T14:41:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 410
|
r
|
rmse_count.R
|
#
#author vedraiyani
#
# @return vector of rms and rms error percentage
#
rmse_count<-function(){
#read dataset
data <- read.csv("./ml-100k/u2.test",FALSE,"\t");
#data=testSet;
count=length(data[,1]);
sum=0
for(i in 1:count){
predictedRating=predict_rating(data[i,1],data[i,2]);
sum=sum+(data[i,3]-predictedRating)^2;
}
error=sqrt(sum/count)
return (c(error,error*100/5,count));
}
|
ce0355b78c797a881099082ab18475b95df7e038
|
6921b02c3c01f24641ebea68151743b6c5bdde2b
|
/download_data.R
|
8f7aa13b257dd16115dde9d7eb35830daacb432f
|
[] |
no_license
|
dexter11235813/UCSD-bus-route
|
50b53e7e35923cb706ca854e476c91f36f3f9ef0
|
5cfabf3b474a815b766baca332a0cb873e1f23bf
|
refs/heads/master
| 2021-01-12T16:14:26.134976
| 2016-10-27T04:12:56
| 2016-10-27T04:12:56
| 71,954,318
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,297
|
r
|
download_data.R
|
library(rvest)
library(stringr)
source("~/UCSD-bus\ map/get_text.R")
route_id = c('2092','312','314','1263','1264','1114','1113','3442','3440','3159','1098','2399','1434','313','3849')
route = route_id[8]
##### Data Scraping
get_data = function(route_id)
{
url = paste0('https://ucsdbus.com/Route/',route_id[1],"/Vehicles")
i = 0
while(T)
{
data = read_html(url) %>% html_text()
print(paste0("getting infromation ",i))
i = i + 1
write(data,paste0("Route",route,".txt"),append = T)
Sys.sleep(5)
}
}
get_data(route)
######### Data Processing
dat = read.table(paste0("Route",route,".txt"),stringsAsFactors = F,fill = T)
record = prep(dat[1,])
for( i in 2:nrow(dat))
{
record = rbind(record,prep(dat[i,]))
}
record$Latitude = NULL
write.csv(record,file = paste0("Route",route,".csv"))
##### Data Plotting
library(ggmap)
library(ggplot2)
data = read.csv(paste0("Route",route,".csv"))
data = data[,-1]
temp = data[,c("Name","Longitude","Latitude")]
temp$Name = as.factor(temp$Name)
temp$Longitude = -temp$Longitude
map = get_map(location = c(lon = median(temp$Longitude),lat = median(temp$Latitude)),zoom = 14)
map_plot = ggmap(map) +
geom_point(data = temp,aes(x = Longitude,y = Latitude,colour = Name)) #+
#geom_line(data = temp,aes(x = Longitude,y = Latitude,colour = Name))
map_plot
|
4e4a46c1f266c6797fb0e826975a23829dbd84d8
|
086b3d93a0d22a0beadea74150404a7919a28e66
|
/QE_Functions/priming/M_constraint_prim.R
|
0535e7f6699c82429d0af0abe0529d5ad6dc0e25
|
[] |
no_license
|
mingkaijiang/QEframework
|
443a9b0c662f44843c8395f0090be8c78363565d
|
16a3860877bf8c4815b4ad0bce6e460ab3aec36a
|
refs/heads/master
| 2021-06-24T05:13:25.654618
| 2019-04-26T04:03:36
| 2019-04-26T04:03:36
| 118,394,762
| 4
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,417
|
r
|
M_constraint_prim.R
|
### Function for nutrient N constraint in medium term
# considering priming effect on slow SOM
M_constraint_prim <- function(df, a, C_pass, C_slow, Nin_L) {
# passed are df and a, the allocation and plant N:C ratios
# parameters :
# Nin is fixed N inputs (N deposition annd fixation) in g m-2 yr-1 (could vary fixation)
# nleach is the rate of n leaching of the mineral pool (per year)
# Tsoil is effective soil temperature for decomposition
# Texture is the fine soil fraction
# ligfl and ligrl are the lignin:C fractions in the foliage and root litter
# Cpass is the passive pool size in g C m-2
# ncp is the NC ratio of the passive pool in g N g-1 C
# set up stuffs
len <- length(df)
ans <- c()
# burial fractions
pas <- 0.996 - (0.85-0.68*Texture)
psa <- 0.42
ppa <- 0.45
pap <- 0.004
psp <- 0.03
for (i in 1:len) {
fPC <- function(NPP) {
# passive and slow pool burial
pass <- soil_coef_prim(df[i], a[i,], NPP)
# again, exclude exudation from root allocation
omega_ap <- a[i,]$af*pass$omega_af_pass + (a[i,]$ar-a[i,]$ar*a[i,]$ariz)*pass$omega_ar_pass + a[i,]$aw*pass$omega_aw_pass
omega_as <- a[i,]$af*pass$omega_af_slow + (a[i,]$ar-a[i,]$ar*a[i,]$ariz)*pass$omega_ar_slow + a[i,]$aw*pass$omega_aw_slow
# Calculate C slow based on exudation and new decomposition values
C_slow_new <- omega_as*NPP/pass$decomp_slow/(1-pass$qq_slow)*1000.0
# equation for N constraint with passive, slow, wood, and leaching
Npass <- (1-pass$qq_pass) * pass$decomp_pass * C_pass * ncp
Nslow <- (1-pass$qq_slow) * pass$decomp_slow * C_slow_new * ncs
U0 <- Nin_L + Npass + Nslow
nwood <- a[i,]$aw*a[i,]$nw
nburial <- omega_ap*ncp + omega_as*ncs
nleach <- leachn / (1-leachn) * (a[i,]$nfl*a[i,]$af + a[i,]$nr*a[i,]$ar
+ a[i,]$nw*a[i,]$aw)
NPP_NC <- U0 / (nleach + nwood + nburial)
# returned in kg C m-2 yr-1
out <- NPP_NC*10^-3 - NPP
}
ans[i] <- uniroot(fPC,interval=c(0.1,20), trace=T)$root
}
out <- data.frame(ans, a$ariz)
colnames(out) <- c("NPP", "ariz")
return(out)
}
|
2a707132045cc89c8b671eade47ff6903a5573db
|
bcfc8846696469235da486d23f9151600fb2d81b
|
/tests/testthat/test_methods.R
|
9dfafa4db27aa02bc6bd41fe535346157a563aa9
|
[
"Apache-2.0"
] |
permissive
|
jdyen/growmodr
|
6e187b5d56d03e21a03be91440e24bf05846f3a8
|
93ed6108d4d78b66946fc95730e56ec9b6e8cf07
|
refs/heads/rename-package
| 2021-01-16T07:25:39.208954
| 2017-08-11T05:49:46
| 2017-08-11T05:49:46
| 99,996,129
| 3
| 0
| null | 2017-08-11T05:55:40
| 2017-08-11T05:51:48
|
C++
|
UTF-8
|
R
| false
| false
| 5,366
|
r
|
test_methods.R
|
# test methods for growmod S3 class
library(growmod)
SEED <- 12345
set.seed(SEED)
ITER <- 10
CHAINS <- 2
SW <- suppressWarnings
data_test <- growmod_sim()
capture.output(
SW(mod1 <- growmod(size ~ (index | block / predictors),
data = data_test,
model = 'hillslope',
n_iter = ITER,
n_chains = CHAINS)),
SW(mod2 <- growmod(size ~ (index | block),
data = data_test,
model = 'hillslope',
n_iter = ITER,
n_chains = CHAINS)),
SW(mod3 <- growmod(size ~ (index | block / predictors),
data = data_test,
model = 'hillslope',
n_iter = ITER,
n_chains = CHAINS)),
SW(mod4 <- growmod(size ~ index,
data = data_test,
model = 'hillslope',
n_iter = ITER,
n_chains = CHAINS)),
SW(mod_multi <- growmod(size ~ (index | block / predictors),
data = data_test,
model = c('hillslope',
'power2'),
n_iter = ITER,
n_chains = CHAINS)),
SW(mod_multi_noblock <- growmod(size ~ index,
data = data_test,
model = c('hillslope',
'hillslope_log'),
n_iter = ITER,
n_chains = CHAINS)),
SW(mod_cv <- validate(mod1, n_cv = 'loo')),
SW(mod_cv_multi <- validate(mod_multi, n_cv = 'loo'))
)
context("methods for growmod objects")
test_that("growmod extractor methods work correctly", {
expect_equal(fitted(mod1), mod1$fitted)
expect_equal(residuals(mod1), c(mod1$data_set$size_data - mod1$fitted))
expect_equal(fitted(mod2), mod2$fitted)
expect_equal(residuals(mod2), c(mod2$data_set$size_data - mod2$fitted))
expect_equal(fitted(mod3), mod3$fitted)
expect_equal(residuals(mod3), c(mod3$data_set$size_data - mod3$fitted))
expect_equivalent(fitted(mod_multi), lapply(mod_multi, function(x) x$fitted))
expect_equivalent(residuals(mod_multi), lapply(mod_multi,
function(x) c(x$data_set$size_data - x$fitted)))
expect_equal(fitted(mod_cv), mod_cv$size_pred)
expect_equal(residuals(mod_cv), c(mod_cv$size_real - mod_cv$size_pred))
expect_equivalent(fitted(mod_cv_multi), lapply(mod_cv_multi,
function(x) x$size_pred))
expect_equivalent(residuals(mod_cv_multi), lapply(mod_cv_multi,
function(x) c(x$size_real - x$size_pred)))
})
test_that("print, summary and compare methods work correctly", {
expect_output(print(mod1), "hillslope")
expect_output(print(mod2), "hillslope")
expect_output(print(mod3), "hillslope")
expect_output(print(mod_multi), "power2")
expect_output(summary(mod1), "summary statistics")
expect_output(summary(mod2), "summary statistics")
expect_output(summary(mod3), "summary statistics")
expect_output(summary(mod4), "summary statistics")
expect_output(summary(mod_multi), "models were fitted")
expect_length(compare(mod1, mod2, mod3), 15)
expect_length(compare(mod_multi), 10)
expect_output(print(mod_cv), "hillslope")
expect_output(print(mod_cv_multi), "power2")
expect_output(summary(mod_cv), "model was validated")
expect_output(summary(mod_cv_multi), "models were validated")
expect_length(compare(mod_cv, mod_cv), 6)
expect_length(compare(mod_cv_multi), 6)
})
test_that("compare methods error with incorrect inputs", {
expect_error(compare(x = mod1, mod2))
mod_test <- seq(1, 10, 1)
class(mod_test) <- 'growmod'
expect_error(compare(x = mod_test))
expect_length(compare(x = mod1), 5)
expect_error(compare(x = mod_cv, mod_cv))
mod_test <- seq(1, 10, 1)
class(mod_test) <- 'growmod_cv'
expect_error(compare(x = mod_test))
expect_length(compare(x = mod_cv), 3)
expect_length(compare(x = mod_multi), 10)
expect_error(compare(x = mod_multi, mod_multi))
mod_test <- seq(1, 10, 1)
class(mod_test) <- 'growmod_multi'
expect_error(compare(x = mod_test))
expect_length(compare(mod_multi, mod_multi), 20)
expect_length(compare(x = mod_cv_multi), 6)
expect_length(compare(mod_cv_multi, mod_cv_multi), 12)
expect_error(compare(x = mod_cv_multi, mod_cv_multi))
mod_test <- seq(1, 10, 1)
class(mod_test) <- 'growmod_cv_multi'
expect_error(compare(x = mod_test))
mod_multi2 <- mod_multi
names(mod_multi2) <- NULL
expect_length(compare(x = mod_multi), 10)
mod_cv_multi2 <- mod_cv_multi
names(mod_cv_multi2) <- NULL
expect_length(compare(x = mod_cv_multi2), 6)
})
test_that("plot methods work correctly", {
expect_silent(plot(mod1))
expect_silent(plot(mod2))
expect_silent(plot(mod3))
expect_silent(plot(mod4))
expect_silent(plot(mod_multi_noblock))
mod4a <- mod4
mod4a$stan_summary[grep(paste0('plot\\[1'), rownames(mod4a$stan_summary))[1], '97.5%'] <- Inf
expect_warning(plot(mod4a))
expect_warning(plot(mod_multi))
expect_warning(plot(mod_multi, group_blocks = FALSE))
expect_silent(plot(mod_cv))
expect_silent(plot(mod_cv_multi))
})
|
01d62a517e88eab2b64f9725eee20c127a7851e0
|
fd4e0630829d4fb77f483b8257bd31e060aaaa0e
|
/Exp2_Choice_Modelling.R
|
c7be33b92432be1242a2c33004f03fa01b37ba20
|
[] |
no_license
|
carpio-ucv/incentives-repo
|
f5598a29dcd1402a7322f0853279a15318c0804d
|
01a1106c0d19227d3a3c4de3ce2e1c996b47823c
|
refs/heads/master
| 2021-01-10T13:30:48.377914
| 2016-10-16T22:30:51
| 2016-10-16T22:30:51
| 49,686,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,986
|
r
|
Exp2_Choice_Modelling.R
|
##libraries
library("prefmod", lib.loc="~/R/win-library/3.2")
##DATA SETTING
setwd("C:/Users/K56CA/Dropbox/Big Data/Robust Methods R")#cambiar
exp <- read.csv("bltexp2.csv") # cambiar...
exp2 <- llbt.design(exp, nitems = 5, cat.scovs = "cond")
gr <- factor(exp2$cond)
names(exp2)[5:9] <- c("GIFT50", "GIFT75", "GIFT25", "GIFT0", "GIFT100")
###GENERAL MODEL GNM
res <- gnm(formula = y ~ GIFT100 + GIFT75 + GIFT50 + GIFT25 + GIFT0
, eliminate = mu:cond, family = poisson, data = exp2)
summary(res)
1-pchisq(res$deviance, df= res$df)
#MODEL INCLUDING GROUPS INTERACTION
resgr <- gnm(formula = y ~ GIFT100 + GIFT75 + GIFT50 + GIFT25 + GIFT0 +
(GIFT100 + GIFT75 + GIFT50 + GIFT25 + GIFT0):cond,
eliminate = mu:cond, family = poisson, data = exp2)
summary(resgr)
1-pchisq(resgr$deviance, df= resgr$df)
##ANOVA
anova(res,resgr2, test = "Chisq")
##COEFFICIENTS res-MODEL 1
res$coefficients[5] <- 0
worth <- round(exp(2 * res$coefficients[1:5])/(sum(exp(2 *
res$coefficients[1:5]))), digits = 5)
print(worth)
##gesgr coefficients
coefg2 <- resgr$coefficients [1:5] + resgr$coefficients [c(6,8,10,12,14)]
coefg2
coefg3 <- resgr$coefficients [1:5] + resgr$coefficients [c(7,9,11,13,15)]
coefg3
##WORTH 1 (CONTROL NO AWARENESS)
resgr$coefficients[c(5)] <- 0
worth1 <- round(exp(2 * resgr$coefficients[1:5])/(sum(exp(2 *
resgr$coefficients[1:5]))), digits = 5)
print(worth1)
##WORTH 2 (NO RECIPROCITY)
coefg2[c(5)] <- 0
worth2 <- round(exp(2 * coefg2[1:5])/(sum(exp(2 * coefg2[1:5]))), digits = 5)
print(worth2)
##WORTH 3 (RECIPROCITY)
coefg3[c(5)] <- 0
worth3 <- round(exp(2 * coefg3[1:5])/(sum(exp(2 *coefg3[1:5]))), digits = 5)
print(worth3)
#PLOT WORTH- pi PARAMETER- FIGURE 7
matrixplot <- matrix(cbind(worth1, worth2, worth3), nrow=5, ncol=3)
stores<- c("Give-100%", "Give-75%", "Give-50%","Give-25%", "Give-0%")
dimnames(matrixplot)= list(stores, c("No Awareness", "NO-Reciprocity", "Reciprocity"))
matrixplot
plot.wmat(matrixplot, ylab = "Worth Parameters", main = NULL,
psymb = c(16, 15, 17, 18, 19), ylim= c(0, 0.45))
##OTHERS
###MODEL 1-LLBTPC
exp2ch <- exp2[,c("y", "mu", "g0", "g1", "GIFT100", "GIFT75",
"GIFT50", "GIFT25", "GIFT0", "cond")]
items <- c("Give-100", "Give-75", "Give-50","Give-25", "Give-0")
respc <- llbtPC.fit(exp2ch, nitems = 5, formel = ~gr, elim = ~1,
undec = FALSE)
summary(respc)
pis<-llbt.worth(respc)
pis
################
# STAGE 2 #####
################
##DATA SETTING
setwd("C:/Users/K56CA/Dropbox/Robust Methods R")
exp <- read.csv("bltexp2B.csv")
exp2b <- llbt.design(exp, nitems = 3, cov.sel = "cond")
gr <- factor(exp2b$cond)
names(exp2b)[5:7] <- c("Certainty_Both", "Uncertainty_Partner",
"Uncertainty_Both")
### GENERAL MODEL GNM
res2 <- gnm(formula = y ~ Uncertainty_Partner + Uncertainty_Both +
Certainty_Both , family = poisson, eliminate = mu:gr
, data = exp2b)
summary(res2)
1-pchisq(res2$deviance, df= res2$df)
#MODEL INCLUDING GROUPS INTERACTION
resgr2 <- gnm(formula = y ~ Uncertainty_Partner + Uncertainty_Both +
Certainty_Both+(Uncertainty_Partner +
Uncertainty_Both + Certainty_Both):gr,
eliminate = mu:gr, family = poisson, data = exp2b)
summary(resgr2)
1-pchisq(resgr2$deviance, df= resgr2$df)
##ANOVA
anova(res2,resgr2, test = "Chisq")
##gesgr coefficients
coef2g2 <- resgr2$coefficients [1:3] + resgr2$coefficients [c(4,6,8)]
coef2g2
coef2g3 <- resgr2$coefficients [1:3] + resgr2$coefficients [c(5,7,9)]
coef2g3
##WORTH parameters model 1
res2$coefficients[c(3)] <- 0
worth1 <- round(exp(2 * res2$coefficients[1:3])/(sum(exp(2 *
res2$coefficients[1:3]))), digits = 5)
print(worth1)
#PLOT WORTH- pi PARAMETER - MODEL 1-DEF
matrixplot <- matrix(worth1, nrow=3, ncol=1)
stores<- c("Uncertainty_Partner",
"Uncertainty_Both", "Certainty_Both")
dimnames(matrixplot)= list(stores, c("Loyalty Programmes"))
plotworth(matrixplot, ylab = "Worth Parameters", main = NULL,
pcol = c("black", "gray", "black"),
psymb = c(16, 17, 15), ylim= c(0, 0.6))
########## por sia ####
##WORTH 2 (NO RECIPROCITY)
coef2g2[c(3)] <- 0
worth2 <- round(exp(2 * coef2g2[1:3])/(sum(exp(2 * coef2g2[1:3]))), digits = 5)
print(worth2)
##WORTH 3 (RECIPROCITY)
coef2g3[c(3)] <- 0
worth3 <- round(exp(2 * coef2g3[1:3])/(sum(exp(2 *coef2g3[1:3]))), digits = 5)
print(worth3)
#PLOT WORTH- pi PARAMETER
matrixplot <- matrix(cbind(worth1, worth2, worth3), nrow=3, ncol=3)
stores<- c("GIFT10", "GIFT7525", "GIFT50")
dimnames(matrixplot)= list(stores, c("No Awareness", "NO-Reciprocity", "Reciprocity"))
matrixplot
plotworth(matrixplot, ylab = "Worth Parameters", main = NULL,
pcol = c("black", "gray", "black"),
psymb = c(16, 15, 17), ylim= c(0, 0.6))
|
ffc4c1da43fcd70bd327b1068a577c9f6cdf5754
|
991d72b16c087afb9835502757fa69f38e5ce79a
|
/R/bal.tab.designmatch.R
|
bb5a54b650927d5cc506fe32a1719a29ddf844bd
|
[] |
no_license
|
ngreifer/cobalt
|
a1862b212efb254a55a8913a814d4971aaa43ea2
|
42c1ac803a8bae3916833d669f193a7f06c4d89e
|
refs/heads/master
| 2023-08-03T18:58:45.744235
| 2023-07-28T03:44:14
| 2023-07-28T03:44:14
| 63,369,821
| 63
| 13
| null | 2022-10-13T07:20:51
| 2016-07-14T21:07:03
|
R
|
UTF-8
|
R
| false
| false
| 2,013
|
r
|
bal.tab.designmatch.R
|
#' @title Balance Statistics for `designmatch` Objects
#' @description Generates balance statistics for output objects from \pkg{designmatch}.
#'
#' @inheritParams bal.tab.Match
#' @param x the output of a call to \pkgfun{designmatch}{bmatch} or related wrapper functions from the \pkg{designmatch} package.
#' @param s.d.denom `character`; how the denominator for standardized mean differences should be calculated, if requested. See [col_w_smd()] for allowable options. Abbreviations allowed. If not specified, will be set to `"treated"`.
#'
#' @inherit bal.tab.Match return
#'
#' @details `bal.tab()` generates a list of balance summaries for the object given, and functions similarly to \pkgfun{designmatch}{meantab}. Note that output objects from \pkg{designmatch} do not have their own class; `bal.tab()` first checks whether the object meets the criteria to be treated as a `designmatch` object before dispatching the correct method. Renaming or removing items from the output object can create unintended consequences.
#'
#' The input to `bal.tab.designmatch()` must include either both `formula` and `data` or both `covs` and `treat`. Using the `covs` + `treat` input mirrors how \pkgfun{designmatch}{meantab} is used (note that to see identical results to `meantab()`, `s.d.denom` must be set to `"pooled"`).
#'
#' @inherit bal.tab.Match seealso
#'
#' @examplesIf (requireNamespace("designmatch", quietly = TRUE) && FALSE)
#' data("lalonde", package = "cobalt")
#'
#' library(designmatch)
#' covariates <- as.matrix(lalonde[c("age", "educ", "re74", "re75")])
#' treat <- lalonde$treat
#' dmout <- bmatch(treat,
#' total_groups = sum(treat == 1),
#' mom = list(covs = covariates,
#' tols = absstddif(covariates,
#' treat, .05))
#' )
#'
#' ## Using treat and covs
#' bal.tab(dmout, treat = treat, covs = covariates)
#' @exportS3Method bal.tab designmatch
bal.tab.designmatch <- bal.tab.Match
|
0051fa87f133f7b15aa5f878f8d2a2484a00190c
|
b74ccf99aeb3fdb371705237fbb0978ce9affb44
|
/run_analysis.R
|
012c9e47499e5f9c8f7b3cbdb72cd476760b516c
|
[] |
no_license
|
sawasakakazuma/GettingAndCleaningData_Pre-trial
|
b51a9a2513f1ff26c72197a94ed7eca9d9081d04
|
51c663875feb613da637ab38ab3a14268991bf4e
|
refs/heads/master
| 2021-01-18T04:04:03.690883
| 2014-11-24T01:23:13
| 2014-11-24T01:23:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 874
|
r
|
run_analysis.R
|
train <- read.table("./UCI HAR Dataset/train/X_train.txt")
test <- read.table("./UCI HAR Dataset/test/X_test.txt")
concat <- rbind(train,test)
nameTable <- read.table("./UCI HAR Dataset/features.txt")
names(concat) <- nameTable[,2]
selected <- concat[,grep("(mean()|std())",names(concat))]
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
selected_add <- cbind(rbind(subject_train, subject_test), rbind(y_train, y_test),selected)
names(selected_add) <- c("subject", "y", names(selected))
splitted <- split(selected_add, list(selected_add$subject,selected_add$y))
output <- sapply(splitted, colMeans)[3:81,]
write.table(output, "./output.txt",row.name=FALSE)
|
632584b00569a6f435d8a8a3bd79f15fbcd6bf19
|
fd932417cdd3bc27033a0d4dbec0782e51efb599
|
/dna_methylation/correlation.R
|
fa0df2b743b4589e264793a82c5f7bf1c1d31be2
|
[] |
no_license
|
usegalaxy-eu/temporary-tools
|
2282b851f70f30236ee5c902d455cdb1d51ebfcc
|
aa603c1def53c7402fdcee9273eaa31b75e66f9e
|
refs/heads/master
| 2023-08-09T18:23:56.031577
| 2023-07-24T23:58:17
| 2023-07-24T23:58:17
| 156,679,078
| 0
| 9
| null | 2023-07-24T23:58:19
| 2018-11-08T09:05:23
|
Python
|
UTF-8
|
R
| false
| false
| 59
|
r
|
correlation.R
|
/data/0/bgruening_tools/galaxytools/methtools/correlation.R
|
5c2606547bc05cc0cf756293ee7f71e3d10678e9
|
8ad548bd9153ba2464925beb148a72a5dea7548b
|
/man/chemometrics.Rd
|
5a26c18248711de2eb7807e2cf8b139facfe76d3
|
[] |
no_license
|
cran/qut
|
df53d55ec51b9cbb480792b935003fac071d35ce
|
985776919059aa2586361d4da915213bd5917a4c
|
refs/heads/master
| 2021-07-25T02:59:46.209559
| 2021-01-19T09:00:02
| 2021-01-19T09:00:02
| 48,086,667
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 586
|
rd
|
chemometrics.Rd
|
\name{chemometrics}
\alias{chemometrics}
\docType{data}
\title{
Chemometrics data set from Sardy (2008)
}
\description{
Fuel octane level measurements with sample size N = 434 and P = 351 spectrometer measurements.
}
\usage{data(chemometrics)}
\format{
A data frame with 434 observations on the following 2 variables.
\describe{
\item{\code{y}}{a numeric vector}
\item{\code{x}}{a matrix with 351 columns}
}
}
\references{
S. Sardy. On the practice of rescaling covariates. International Statistical Review. 2008
}
\examples{
data(chemometrics)
}
|
7b712e0b03a8620c49682abae4a243af1fbfbc70
|
79f1746216379b959f7d12c6467f77ac14e1d16b
|
/Shiny/BeersAndBreweries/server.R
|
12f48cc4d90c43eae802fa1e2c87476e3402afe1
|
[] |
no_license
|
DHLaurel/DHLaurel.github.io
|
e5cb9cb774498ffb7f0001c6d8b436d8c637dd0f
|
32cc5fbaba3ac0cbb103b88d6d5a7adfef2290ac
|
refs/heads/master
| 2023-04-14T12:09:18.445668
| 2023-04-09T04:06:08
| 2023-04-09T04:06:08
| 147,439,684
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,944
|
r
|
server.R
|
#
# The skeleton code for the below was largely generated by ChatGPT. Thanks ChatGPT!
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(dplyr)
library(mice) # multi-variate imputation
# Globals
brew_dat = read.csv("./Breweries.csv")
#
# # Define server logic required to draw a histogram
# function(input, output, session) {
#
# # output$distPlot <- renderPlot({
# #
# # # generate bins based on input$bins from ui.R
# # x <- faithful[, 2]
# # bins <- seq(min(x), max(x), length.out = input$bins + 1)
# #
# # # draw the histogram with the specified number of bins
# # hist(x, breaks = bins, col = 'darkgray', border = 'white',
# # xlab = 'Waiting time to next eruption (in mins)',
# # main = 'Histogram of waiting times')
# #
# # })
#
# output$distPlot <- renderPlot({
#
# # generate bins based on input$bins from ui.R
# x <- faithful[, 2]
# bins <- seq(min(x), max(x), length.out = input$bins + 1)
#
# # draw the histogram with the specified number of bins
# hist(x, breaks = bins, col = 'darkgray', border = 'white',
# xlab = 'Waiting time to next eruption (in mins)',
# main = 'Histogram of waiting times')
#
# })
# }
function(input, output) {
# Load data from file
data <- reactive({
req(input$data_file)
beer_dat <- read.csv(input$data_file$datapath)
merge(x = brew_dat, y = beer_dat, by.x = "Brew_ID", by.y = "Brewery_id")
})
# Create IBU histogram or boxplot
output$ibu_plot <- renderPlot({
plot_data <- data()
if (input$State != "" & input$State != "All") {
plot_data <- plot_data %>% filter(State == input$State)
}
if (input$plot_type == "hist") {
ggplot(plot_data, aes(x = IBU)) +
geom_histogram(binwidth = 5) +
labs(x = "IBU", y = "Count")
} else {
ggplot(plot_data, aes(x = "IBU", y = IBU)) +
geom_boxplot() +
labs(x = "", y = "IBU")
}
})
# Create ABV histogram or boxplot
output$abv_plot <- renderPlot({
plot_data <- data()
if (input$State != "" & input$State != "All") {
plot_data <- plot_data %>% filter(State == input$State)
}
if (input$plot_type == "hist") {
ggplot(plot_data, aes(x = ABV*100)) +
geom_histogram(binwidth = 0.5) +
labs(x = "ABV (%)", y = "Count")
} else {
ggplot(plot_data, aes(x = "ABV", y = ABV)) +
geom_boxplot() +
labs(x = "", y = "ABV")
}
})
# Create scatter plot of IBU vs. ABV with optional linear regression line
output$ibu_abv_plot <- renderPlot({
plot_data <- data()
if (input$State != "" & input$State != "All") {
plot_data <- plot_data %>% filter(State == input$State)
}
plot <- ggplot(plot_data, aes(x = IBU, y = ABV)) + geom_point()
if (input$show_lm) {
plot <- plot + geom_smooth(method = "lm", se = FALSE)
}
plot
})
# Create additional plot (in this case, a bar plot of beer styles)
output$additional_plot <- renderPlot({
plot_data <- data()
if (input$State != "" & input$State != "All") {
plot_data <- plot_data %>% filter(State == input$State)
}
ggplot(plot_data, aes(x = factor(Style))) +
geom_bar() +
labs(x = "Beer Style", y = "Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
})
output$final_plot <- renderPlot({
plot_data <- data()
if (input$State != "" & input$State != "All") {
plot_data <- plot_data %>% filter(State == input$State)
}
beer_brew_whole <- data()
beer_brew_whole$ABV <- complete(mice(beer_brew_whole, method="pmm"))$ABV # Use predictive mean imputation for now, fill in just ABV rows (fewer) before IBU
beer_brew_whole$IBU <- complete(mice(beer_brew_whole, method="pmm"))$IBU
bb_ales <- beer_brew_whole[grepl("Ale", beer_brew_whole$Style, ignore.case = TRUE) | grepl("IPA", beer_brew_whole$Style, ignore.case=TRUE), ]
bb_ales <- bb_ales[!grepl("Lager", bb_ales$Style, ignore.case=TRUE),]
bb_ales$IsIPA <- grepl("IPA", bb_ales$Style, ignore.case=TRUE)
bb_ales$IsIPA[bb_ales$IsIPA == TRUE] <- "IPA"
bb_ales$IsIPA[bb_ales$IsIPA == FALSE] <- "Not an IPA"
plot_data <- bb_ales
if (input$State != "" & input$State != "All") {
plot_data <- plot_data %>% filter(State == input$State)
}
ggplot(plot_data, aes(x = IBU, y = ABV * 100.0, color = IsIPA)) +
geom_point(position = "jitter", alpha = 0.7) +
labs(title = "IPA Prediction by \nABV and IBU", color = "", y = "ABV (%)") +
theme(axis.title=element_text(size=12)) +
xlim(0, 150) + ylim(2,10)
})
}
|
39d33ff62f0507398802aa8b9c29afaedd8ba62e
|
be76c59c9cf0196af18969a47ec00e8a54f5ade9
|
/strategies/team9.r
|
7f0d9ac7c254dced8ca645bca306c1333189a19e
|
[] |
no_license
|
happy369300/R-project-1
|
0dfb4ac9d33ef5f4bdb9444f83f6c1e43f8aa919
|
56028608a847d3cd5351080a845208a430201ec5
|
refs/heads/master
| 2021-01-13T09:21:13.828344
| 2016-10-04T03:28:06
| 2016-10-04T03:28:06
| 69,930,889
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,980
|
r
|
team9.r
|
#params <-list(series=1:10,lookback1=26,lookback2=12,lookback3=40, #params for pairs trading
#spreadPercentage=0.8,maxPositionSize=70, #params for limit order
#lookbackB=11,sdParam=1.2,seriesB=1:10, #params for bbands
#lookbackM=37,seriesM=c(2,3,9), #params for macd
#lookbackR=14,threshold=11,seriesR=1:10) #params for rsi
# This is a multi-indicator strategy created by team9, it consist of
#pairs trading, bbands, macd, rsi and simple limit with position sizing strategies
maxRows <- 1100 # used to initialize a matrix to store closing prices
getOrders <- function(store, newRowList, currentPos, params) { #major function of strategies
allzero <- rep(0,length(newRowList)) #initialize the variables that would be used later
pos <- allzero #market order position of paris trading strategy
posbuy<- allzero #market order position of bbands strategy
possell<- allzero
RSIsell<-allzero #market order position of RSI strategy
RSIbuy<-allzero
posMb<-allzero #market order position of MACD strategy
posMs<-allzero
if (is.null(store)) #initialize data storage
store <- initStore(newRowList,params$series)
else
store <- updateStore(store, newRowList, params$series)
#Pairs Trading(works on pairs 1&2,3&4,9&10)
#position
cl7<- newRowList[[7]]$Close #input all close price data of series 7
clv7<-as.vector(cl7)#convert the data in to vector format(clv7 means close price of series 7)
mean7ToDay<-mean(clv7[1:store$iter],na.rm = TRUE)#calculate the mean of all cuurent known data
#of series 7 everyday (no over looking)
#pairs 1&2 begin
if (store$iter > params$lookback1) {
pairs1<-cbind(store$cl[1:store$iter,1],store$cl[1:store$iter,2])#combine the data of two series
x=pairs1[,1] #close price of series 1
y=pairs1[,2] #close price of series 2
data <- data.frame(a=y,b=x) #convert the data structure to data frame
fit <- lm(a~b+0,data=data) #calculate the liner model value of the two series
beta1<-coef(fit)[1] #calculate coefficience value
spread1 <- y - beta1*x #calculate the spread
test1=summary(spread1) #store the summary value of all the past spread
cl1 <- newRowList[[params$series[1]]]$Close
clv1<-as.vector(cl1)#convert the close price data of the series 1 into vector form
meanToDay1<-mean(clv1[1:store$iter],na.rm = TRUE)
#calculate the mean of all cuurent known data of the series 1 everyday (no over looking)
#and ignore the NA data
ratio1To7 <-mean7ToDay/meanToDay1
#calculate the ratio between the series 1 and series 7
#using their everyday updated mean of all previous close price
#to set the size of its market order to balance among 10 series
ratio1To7<-floor(ratio1To7)#take the integer part of the result as the size of market order
cl2 <- newRowList[[params$series[2]]]$Close
clv2<-as.vector(cl2)#convert the close price data of the series 2 into vector form
meanToDay2<-mean(clv2[1:store$iter],na.rm = TRUE)
#calculate the mean of all cuurent known data of the series 2 everyday (no over looking)
#and ignore the NA data
#calculate the ratio between the series 2 and series 7
#using their everyday updated mean of all previous close price
#to set the size of its market order to balance among 10 series
ratio2To7 <-mean7ToDay/meanToDay2
#take the integer part of the result as the size of market order
ratio2To7<-floor(ratio2To7)
#using the loweset beta between 1&2 pairs in data1 as critical value of future stop loss
#the if condition (beta1>0.027) means if the difference between this pair did not go too far
#which could result to risks, the pairs trading between 1&2 can be conducted
if(beta1>0.027){
if(spread1[store$iter]>test1[5]&&spread1[store$iter]<test1[6]){
pos[params$series[1]] <- ratio1To7
pos[params$series[2]] <- -ratio2To7
}else if(spread1[store$iter]<test1[2]&&spread1[store$iter]>test1[1]){
pos[params$series[1]] <- -ratio1To7
pos[params$series[2]] <- ratio2To7
}
}
}#pairs1&2 ends
#pairs 3&4
if (store$iter > params$lookback2) {
pairs2<-cbind(store$cl[1:store$iter,3],store$cl[1:store$iter,4])
x2=pairs2[,1]
y2=pairs2[,2]
data2<- data.frame(c=y2,d=x2)
fit2<- lm(c~d+0,data=data2)
beta2<-coef(fit2)[1]
spread2 <- y2 - beta2 * x2
test2=summary(spread2)
cl3 <- newRowList[[params$series[3]]]$Close
clv3<-as.vector(cl3)
meanToDay3<-mean(clv3[1:store$iter],na.rm = TRUE)
ratio3To7 <-floor(mean7ToDay/meanToDay3)
cl4 <- newRowList[[params$series[4]]]$Close
clv4<-as.vector(cl4)
meanToDay4<-mean(clv4[1:store$iter],na.rm = TRUE)
ratio4To7 <-floor(mean7ToDay/meanToDay4)
#using the loweset beta between 3&4 pairs in data1 as critical value of future stop loss
#the if condition (beta1>0.099) means if the difference between this pair did not go too far
#which could result to risks, the pairs trading between 3&4 can be conducted
if(beta2>0.099){
if(spread2[store$iter]>test2[5]&&spread2[store$iter]<test2[6]){
pos[params$series[3]] <- ratio3To7
pos[params$series[4]] <- -ratio4To7
}else if(spread2[store$iter]<test2[2]&&spread2[store$iter]>test2[1]){
pos[params$series[3]] <- -ratio3To7
pos[params$series[4]] <- ratio4To7
}
}
}#pairs 3&4 ends
#pairs 9&10
if (store$iter > params$lookback3) {
pairs9<-cbind(store$cl[1:store$iter,9],store$cl[1:store$iter,10])
x9=pairs9[,1]
y10=pairs9[,2]
data3 <- data.frame(a=y10,b=x9)
fit3 <- lm(a~b+0,data=data3)
beta3<-coef(fit3)[1]
spread3 <- y10 - beta3*x9
test3=summary(spread3)
cl9 <- newRowList[[params$series[9]]]$Close
clv9<-as.vector(cl9)
meanToDay9<-mean(clv9[1:store$iter],na.rm = TRUE)
ratio9To7 <-floor(mean7ToDay/meanToDay9)
cl10 <- newRowList[[params$series[10]]]$Close
clv10<-as.vector(cl10)
meanToDay10<-mean(clv10[1:store$iter],na.rm = TRUE)
ratio10To7 <-floor(mean7ToDay/meanToDay10)
if(beta3>0.14){
if(spread3[store$iter]>test3[5]&&spread3[store$iter]<test3[6]){
pos[params$series[9]] <- ratio9To7
pos[params$series[10]] <- -ratio10To7
}else if(spread3[store$iter]<test3[2]&&spread3[store$iter]>test3[1]){
pos[params$series[9]] <- -ratio9To7
pos[params$series[10]] <- ratio10To7
}
}
}#pairs9&10 ends
#Pairs Trading (works on pairs 1&2,3&4,9&10) end
#bbands(works on all ten series)
if (store$iter > params$lookbackB) {
startIndex <- store$iter - params$lookbackB
for (i in 1:length(params$series)) {
cl <- newRowList[[params$series[i]]]$Close
clv<-as.vector(cl)
meanToDay<-mean(clv[1:store$iter],na.rm = TRUE)
ratioTo7 <-mean7ToDay/meanToDay#
ratioTo7<-floor(ratioTo7)#
bbands <-
last(BBands(store$cl[startIndex:store$iter,i],n=params$lookbackB,sd=params$sdParam))[c("dn","up")]
if (cl < bbands["dn"]) {
posbuy[params$series[i]] <- ratioTo7
}
else if (cl > bbands["up"]) {
possell[params$series[i]] <- -ratioTo7
}
}
}
#bbands (works on all ten series) end
#macd (works on series 2,3,9)
if (store$iter > params$lookbackM) {
startIndex <- store$iter - params$lookbackM
for (i in 1:length(params$seriesM)) {
cl <- newRowList[[params$seriesM[i]]]$Close
clv<-as.vector(cl)
meanToDay<-mean(clv[1:store$iter],na.rm = TRUE)
ratioTo7 <-mean7ToDay/meanToDay
ratioTo7<-floor(ratioTo7)
xdata <- matrix(store$cl[startIndex:store$iter,i])
MACD <- last(MACD(xdata,12,26,9,maType="EMA"))[c("macd","signal")]
if (MACD["macd"] > MACD["signal"]){
posMs[params$seriesM[i]] <- -ratioTo7 #short
}
else if (MACD["macd"] < MACD["signal"]) {
posMb[params$seriesM[i]] <- ratioTo7 #long
}
}
}
#macd (works on series 2,3,9) end
#rsi (works on all ten series)
if (store$iter > params$lookbackR) {
startIndex <- store$iter - params$lookbackR
for (i in 1:length(params$seriesR)) {
cl <- newRowList[[params$seriesR[i]]]$Close
clv<-as.vector(cl)
meanToDay<-mean(clv[1:store$iter],na.rm = TRUE)
ratioTo7 <-mean7ToDay/meanToDay
ratioTo7<-floor(ratioTo7)
xdata <- matrix(store$cl[startIndex:store$iter,i])
rsi <- last(RSI(xdata,n=params$lookbackR))
if (rsi > (50+ params$threshold)){
RSIsell[params$seriesR[i]]<- -ratioTo7
}
if (rsi < (50-params$threshold)){
RSIbuy[params$seriesR[i]] <- ratioTo7
}
}
}
#RSI (works on all ten series) ends
#add all market orders from pairs trading, bbands, macd and rsi strategies together
marketOrders <-pos+posbuy+possell+posMb+posMs+RSIbuy+RSIsell
#cat("marketOrders",marketOrders,"\n")
#Limit orders with position sizing (works on series 1,2,3,6,7,8,9,10)
ranges <- sapply(1:length(newRowList),function(i) newRowList[[i]]$High - newRowList[[i]]$Low)
positions <- round(params$maxPositionSize/(ranges+1))
positions[4]<-0
positions[5]<-0
spread <- sapply(1:length(newRowList),function(i) params$spreadPercentage * (newRowList[[i]]$High -newRowList[[i]]$Low))
limitOrders1 <- positions # BUY LIMIT ORDERS
limitPrices1 <- sapply(1:length(newRowList),function(i) newRowList[[i]]$Close - spread[i]/2)
limitOrders2 <- -positions # SELL LIMIT ORDERS
limitPrices2 <- sapply(1:length(newRowList),function(i) newRowList[[i]]$Close + spread[i]/2)
#Limit orders with position sizing (works on series 1,2,3,6,7,8,9,10) end
return(list(store=store,marketOrders=marketOrders,
limitOrders1=limitOrders1,
limitPrices1=limitPrices1,
limitOrders2=limitOrders2,
limitPrices2=limitPrices2))
}
initClStore <- function(newRowList,series) {
clStore <- matrix(0,nrow=maxRows,ncol=length(series))
clStore <- updateClStore(clStore, newRowList, series, iter=1)
return(clStore)
}
updateClStore <- function(clStore, newRowList, series, iter) {
for (i in 1:length(series))
clStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Close)
return(clStore)
}
initStore <- function(newRowList,series) {
return(list(iter=1,cl=initClStore(newRowList,series)))
}
updateStore <- function(store, newRowList, series) {
store$iter <- store$iter + 1
store$cl <- updateClStore(store$cl,newRowList,series,store$iter)
return(store)
}
|
a7b02b03cc63c558d584b1e4f5967c00375c4092
|
4a589cb2ff5ccc42eeec1741d2407b18f0346192
|
/R/parscensims.R
|
db80157ddb5a6c45a7b82cbc182ad4fed9d39bc1
|
[] |
no_license
|
openfields/Ambcitsci
|
633f636fdd56b4d8392e3a539ace555f8ed0c565
|
0d8385a59b56a8437f365355a8243c4ab8cfd8c8
|
refs/heads/master
| 2020-04-15T14:05:02.179703
| 2017-01-03T21:16:42
| 2017-01-03T21:16:42
| 58,056,418
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,093
|
r
|
parscensims.R
|
# scenario simulations - first run
#out <- array(dim=c(50,2,100), data=NA)
#system.time(for(i in 1:100){
#out[,,i] <- popsim(nyears=50, init_pop=c(100,100), dphi_sa = .9986, pr_b= .4230, nb_dphi = .9992, a_in_night_prob = c(0.333,0.333,0.334),
# a_rmort = 0.0407, a_in_num_guards = c(0,0,0), guard_limit = 10, a_p_dphi = 0.9994, p_days = 14, fec = 165, lmort = 0.0523,
# a_out_night_prob = c(0.333,0.333,0.334), rmort2 = 0.0407, a_out_nguard = c(0,0,0), met_night_prob = c(0.333,0.333,0.334),
# rmort_met = 0.0407, ng_met = c(0,0,0))
#}) # 69.675 sec on old lenovo, ~ 46 sec on x vm
# load data
source('./R/popsim.r')
source('./R/bprob.r')
source('./R/fecund.r')
source('./R/surv.r')
source('./R/migrate.r')
load('./data/scenarios.Rdata')
# create object for simulation output
my_fun <- function(){
list(d=array(dim=c(50,2,500), data=NA))
}
replicate(361, my_fun(), simplify=TRUE)->out
# list option
# loop through first 100 scenarios, going to do 500 iterations
system.time(for(i in 201:361){
# pull in road xing guard data from scenarios object
gds_ai <- c(scenarios$night1_adult_in_migtation[i],scenarios$night2_adult_inout_migtation[i],scenarios$night3_adult_inout_migtation[i])
gds_ao <- c(scenarios$night2_adult_inout_migtation[i],scenarios$night3_adult_inout_migtation[i],scenarios$night4_adult_out_migtation[i])
gd_m <- c(scenarios$night5_meta_out_migration[i], scenarios$night6_meta_out_migration[i], scenarios$night7_meta_out_migration[i])
# loop through for 500 iterations
for(j in 1:500){
out[[i]][,,j] <- popsim(nyears=50, init_pop=c(100,100), dphi_sa = .9986, pr_b= .4230, nb_dphi = .9992, a_in_night_prob = c(0.333,0.333,0.334),
a_rmort = 0.0407, a_in_num_guards = gds_ai, guard_limit = 10, a_p_dphi = 0.9994, p_days = 14, fec = 165, lmort = 0.0523,
a_out_night_prob = c(0.333,0.333,0.334), rmort2 = 0.0407, a_out_nguard = gds_ao, met_night_prob = c(0.333,0.333,0.334),
rmort_met = 0.0407, ng_met = gd_m)
} #j
if(i%%100==0) {save(out, file='./data/outdata.Rdata')}
}) #i
|
ae866749ac3edf96246bbed73785bbbed2ff8440
|
1820722c3c8a37ee2b052db65d658085ab786630
|
/scripts-libro/Code/Berkeley.R
|
1615678a38ae50ab577a87ed22a1e8f2a5afb251
|
[] |
no_license
|
cristianhernan/austral-mcd-aid
|
8f0efcb76fc90bf24d92f210ae7fa3636899636c
|
99d49f1562624de3525abeaba8783f942d8f910d
|
refs/heads/main
| 2023-06-24T05:33:16.219278
| 2021-07-22T01:14:21
| 2021-07-22T01:14:21
| 376,146,006
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 1,471
|
r
|
Berkeley.R
|
library(readxl) # Permite leer archivos xlsx
Berkeley=read_excel("C:/.../Berkeley.xlsx")
# Importa la base con la cual se va a trabajar
attach(Berkeley) # Fija la base de datos donde se trabaja
tabla.sexo=table(Sexo,Admisión) # Calcula la frecuencias por Sexo y Admisión
colnames(tabla.sexo)=c("NO","SI") # Cambia de nombre a las columnas
chisq.test(tabla.sexo) # Aplica el test Chi cuadrado
dptoA=table(Sexo[Departamento=="A"],Admisión[Departamento=="A"])
# Calcula la frecuencias por Sexo y Admisión del Departamento A
colnames(dptoA)=c("NO","SI")
chisq.test(dptoA)
dptoB=table(Sexo[Departamento=="B"],Admisión[Departamento=="B"])
# Calcula la frecuencias por Sexo y Admisión del Departamento B
colnames(dptoB)=c("NO","SI")
chisq.test(dptoB)
dptoC=table(Sexo[Departamento=="C"],Admisión[Departamento=="C"])
# Calcula la frecuencias por Sexo y Admisión del Departamento C
colnames(dptoC)=c("NO","SI")
chisq.test(dptoC)
dptoD=table(Sexo[Departamento=="D"],Admisión[Departamento=="D"])
# Calcula la frecuencias por Sexo y Admisión del Departamento D
colnames(dptoD)=c("NO","SI")
chisq.test(dptoD)
dptoE=table(Sexo[Departamento=="E"],Admisión[Departamento=="E"])
# Calcula la frecuencias por Sexo y Admisión del Departamento E
colnames(dptoE)=c("NO","SI")
chisq.test(dptoE)
dptoF=table(Sexo[Departamento=="F"],Admisión[Departamento=="F"])
# Calcula la frecuencias por Sexo y Admisión del Departamento F
colnames(dptoF)=c("NO","SI")
chisq.test(dptoF)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.