blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e8e2b713d45d369d3b3806af2bd0f3a0d34c60bd | 6f99deb75cc9525c81805353fd531568740027d2 | /ADAS_NLME3.R | d9cbf6b533e7deef7922d8d0130b88c16cde530f | [] | no_license | hilanov/rcodes | a7b443f33532d1de0c8dd8c994da364fdfd14e35 | 63a57bf362a70d57de163eb500514b304e441c1a | refs/heads/master | 2021-06-29T12:37:29.258574 | 2017-09-13T09:37:24 | 2017-09-13T09:37:24 | 100,592,096 | 0 | 0 | null | null | null | null | SHIFT_JIS | R | false | false | 8,696 | r | ADAS_NLME3.R | setwd("C:/Users/T.Hirano/Documents/【PJ】/Eisai_ADAS")
# packageの読み込み
library(nlme)
library(MASS)
library(ggplot2)
library(magrittr)
library(dplyr)
library(reshape2)
library(minpack.lm)
library(lattice)
# データの読み込み
adef<-read.csv(file="C:/Users/T.Hirano/Documents/【PJ】/Eisai_ADAS/ADEF_SMALL.csv")
# 同時点で記録のあるオブザベーションを削除
adef2 <- dplyr::distinct(adef, SUBJID, month, .keep_all=T)
# monthとCHGがNAであるオブザベーションを削除
adef3<-adef2[is.na(adef2$month)!=T & is.na(adef2$CHG)!=T,]
# 必要なデータだけ残す
adef3<-subset(adef3,select=c("SUBJID","month","CHG"))
# プロット
ggplot(adef3, aes(x=month,y=CHG,group=SUBJID,colour=SUBJID))+
geom_line()+guides(colour=F)+geom_hline(yintercept=0)+
scale_y_continuous(limits = c(-30, 35))
###############################################
### Non-linear Fitting by a kinetics model
###############################################
## モデル式
# パラメータは正規分布
MDLFUNK <- function(a,k, k1, xx) a*exp(-k*xx)-(a+70)*exp(-k1*xx)+70
# パラメータは対数正規分布
# MDLFUNK <- function(k, k1, xx) 70*(1-exp(-exp(k)*xx))-70/(exp(k)-exp(k1))*(exp(-exp(k1)*xx)-exp(-exp(k)*xx))
## 時間
t<-sort(unique(adef3$month))
## パラメータ初期値
# パラメータは正規分布
kstart<-.02
k1start<-.004
astart<-20
# パラメータは対数正規分布
# kstart<-log(.003)
# k1start<-log(24.4)
## SUBJIDの上5桁で群を作成
adef3$GRP<-as.numeric(substr(adef3$SUBJID,1,5))
(adef3.grp <- groupedData(CHG ~ month | SUBJID,
data=adef3))
## nls用に平均値のデータを作成
adef3.mean<-tapply(adef3.grp$CHG,adef3.grp$month,mean)
adef3.mean<-data.frame(month=as.numeric(names(adef3.mean)),CHG=adef3.mean)
row.names(adef3.mean)<-NULL
## 平均値のデータをプロット
plot(adef3.mean,type="b",col="blue")
abline(h=0,v=0)
## nlsによるフィッティング(平均値)
# (nls.out <- nls(CHG~MDLFUNK(k, k1, month), data=adef3.mean, start=list(k=kstart, k1=k1start)))
(nls.out <- nls(CHG~MDLFUNK(a, k, k1, month),
data=adef3.mean, start=list(a=astart, k=kstart, k1=k1start),
control=list(minFactor=.0001,maxiter=500)))
summary(nls.out)
predict.c <- predict(nls.out)
plot(adef3.mean, ann=F,xlim=c(min(adef3.mean$month),max(adef3.mean$month)),
ylim=c(min(adef3.mean$CHG),max(adef3.mean$CHG))); par(new=T)
plot(adef3.mean$month, predict.c, type="l", xlim=c(min(adef3.mean$month),max(adef3.mean$month)),
ylim=c(min(adef3.mean$CHG),max(adef3.mean$CHG)),
col="blue")
abline(h=0,v=0)
## NLME
ADAS.nlme <- nlme(
CHG ~ MDLFUNK(a, k, k1, month),
data=adef3.grp,
fixed = a + k + k1 ~ 1,
random = a + k + k1 ~ 1,
start=coef(as.list(nls.out)), control=list(minScale=.0001),verbose=T)
summary(ADAS.nlme)
## 観測値 vs 予測値のプロット
plot(ADAS.nlme,CHG~fitted(.),abline=c(0,1))
## 個別データに対するフィッティングのプロット
ADAS.pred<-augPred(ADAS.nlme,level=0:1)
# ADAS.pred$GRP<-as.numeric(substr(ADAS.pred$.groups,1,5))
ADAS.pred.id<-data.frame(.groups=unique(ADAS.pred$.groups))
ADAS.pred.id$NUM<-1:nrow(ADAS.pred.id)
ADAS.pred.id$GRP<-ceiling(ADAS.pred.id$NUM/8)
ADAS.pred.id2<-ADAS.pred$.groups
ADAS.pred<-inner_join(ADAS.pred , ADAS.pred.id ,by=".groups")
NMBR<-unique(ADAS.pred$GRP)
#なぜかプロットされない
for(i in 1:length(NMBR)){
# print(NMBR[i])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[i],])
}
#やむなく書き下し
plot(ADAS.pred[ADAS.pred$GRP==NMBR[1],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[2],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[3],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[4],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[5],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[6],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[7],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[8],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[9],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[10],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[11],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[12],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[13],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[14],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[15],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[16],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[17],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[18],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[19],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[20],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[21],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[22],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[23],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[24],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[25],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[26],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[27],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[28],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[29],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[30],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[31],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[32],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[33],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[34],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[35],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[36],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[37],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[38],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[39],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[40],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[41],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[42],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[43],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[44],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[45],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[46],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[47],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[48],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[49],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[50],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[51],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[52],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[53],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[54],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[55],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[56],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[57],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[58],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[59],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[60],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[61],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[62],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[63],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[64],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[65],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[66],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[67],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[68],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[69],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[70],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[71],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[72],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[73],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[74],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[75],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[76],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[77],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[78],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[79],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[80],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[81],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[82],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[83],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[84],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[85],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[86],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[87],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[88],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[89],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[90],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[91],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[92],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[93],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[94],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[95],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[96],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[97],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[98],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[99],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[100],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[101],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[102],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[103],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[104],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[105],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[106],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[107],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[108],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[109],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[110],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[111],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[112],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[113],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[114],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[115],])
plot(ADAS.pred[ADAS.pred$GRP==NMBR[116],])
|
c1ae6f03348781a3a9b378fcccf2bddf59edfa54 | 32a6385438661229a0aacf72e4537ba3a772724d | /cachematrix.R | 2aaaae55e7d2673a8f33cd2fc378fe15e3df140c | [] | no_license | PetaBosley/ProgrammingAssignment2 | 35d3d888a37f4e95deb2b95e364faf46138c63ba | 882279d6bd143bf8f018dc8379b79e8b13505010 | refs/heads/master | 2021-01-09T06:43:32.767156 | 2015-06-21T10:41:05 | 2015-06-21T10:41:05 | 37,596,336 | 0 | 0 | null | 2015-06-17T13:24:33 | 2015-06-17T13:24:32 | null | UTF-8 | R | false | false | 2,220 | r | cachematrix.R | ## Programming Assignment 2: Caching the Inverse of a Matrix
##
## The assignment consists of 2 function; makeCacheMatrix and cacheSolve (each shown and
## commented below in this file).
## The function code was unit tested in the R console by setting up a matrix, assigned to x
## (see below)
##
## x <- matrix(c(1,2,3,4), 2, 2)
## > x
## [,1] [,2]
## [1,] 1 3
## [2,] 2 4
##
## The functions were then executed and assigned to z (see below)
## checking for the expected, inverted result when z was printed to the console.
##
## > z <- cacheSolve(makeCacheMatrix(x))
##
## > z
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
## makeCacheMatrix creates an object of class "matrix" that can cache its inverse.
## Assume that the matrix is square and invertible.
makeCacheMatrix <- function(x = matrix()) {
invs <- NULL ## has invs (inverse) been calculated yet?
set <- function(y) { ## if yes, get from cache
x <<- y
invs <<- NULL ## set inverse to NULL in parent environment
}
get <- function() x
setinv <- function(solve) invs <<- solve ## set inverse of matrix
getinv <- function()invs ## get inverse of matrix
list (set = set, get = get, ## create function list so all 4
setinv = setinv, ## functions are available to matrix object
getinv = getinv)
}
## cacheSolve takes the created matrix object and calculates the inverse
## if the inverse has been calculated, that will be retrieved from the cache.
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invs <- x$getinv()
if(!is.null(invs)) { ## has the inverse matrix been written to cache
message("getting cached data")
return(invs)
}
mtx <- x$get() ## assign x to "mtx"
invs <- solve(mtx, ...) ## assign inverse to invs using solve function
x$setinv(invs)
invs
}
|
0f5eadff117d1b3026be5c87f90da1e1e046ae7e | 7f18fef2f10c6ba6164e647f89870e4ad3b83874 | /man/spip_binary_path.Rd | e1b4bede252d2c2e810e0f5c603c4c448b8c6141 | [] | no_license | cran/CKMRpop | 9d7ba4a9674f706dac6ceca16fd19d5f0abd82d0 | fdcb234aaa37ca5605e686b4d2c84b86381da20d | refs/heads/master | 2023-06-16T15:03:50.829457 | 2021-07-17T09:00:06 | 2021-07-17T09:00:06 | 379,974,995 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 353 | rd | spip_binary_path.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spip_binary_path.R
\name{spip_binary_path}
\alias{spip_binary_path}
\title{return the path where spip should be in the R system paths}
\usage{
spip_binary_path()
}
\description{
It expects it to be in the R package directory after external installation.
}
\keyword{internal}
|
e8a587b3ef2e56684b77fa80d89a1436b7755660 | f43ff1e09138649558c2e90a75bd2d4f3cbbdbb6 | /source/Windows/R-Portable-Win/library/shiny/examples/03_reactivity/app.R | 393ebfe61973af405c6134cd2ad5e8670f455793 | [
"MIT",
"CC-BY-3.0",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"GPL-1.0-or-later",
"OFL-1.1",
"GPL-2.0-or-later",
"CC-BY-4.0",
"GPL-3.0-only",
"LGPL-2.0-or-later",
"BSD-3-Clause"
] | permissive | romanhaa/Cerebro | 5b2d9371403c52f60341894f84cd0f6a006cc930 | 946ed178c986027d60af6013e63d1fc51ae8b371 | refs/heads/master | 2022-12-02T15:49:57.705873 | 2021-11-20T11:47:12 | 2021-11-21T17:09:37 | 164,686,297 | 87 | 23 | MIT | 2022-11-10T18:21:44 | 2019-01-08T16:09:59 | HTML | UTF-8 | R | false | false | 3,025 | r | app.R | library(shiny)
# Define UI for dataset viewer app ----
ui <- fluidPage(
# App title ----
titlePanel("Reactivity"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Text for providing a caption ----
# Note: Changes made to the caption in the textInput control
# are updated in the output area immediately as you type
textInput(inputId = "caption",
label = "Caption:",
value = "Data Summary"),
# Input: Selector for choosing dataset ----
selectInput(inputId = "dataset",
label = "Choose a dataset:",
choices = c("rock", "pressure", "cars")),
# Input: Numeric entry for number of obs to view ----
numericInput(inputId = "obs",
label = "Number of observations to view:",
value = 10)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Formatted text for caption ----
h3(textOutput("caption", container = span)),
# Output: Verbatim text for data summary ----
verbatimTextOutput("summary"),
# Output: HTML table with requested number of observations ----
tableOutput("view")
)
)
)
# Define server logic to summarize and view selected dataset ----
server <- function(input, output) {
# Return the requested dataset ----
# By declaring datasetInput as a reactive expression we ensure
# that:
#
# 1. It is only called when the inputs it depends on changes
# 2. The computation and result are shared by all the callers,
# i.e. it only executes a single time
datasetInput <- reactive({
switch(input$dataset,
"rock" = rock,
"pressure" = pressure,
"cars" = cars)
})
# Create caption ----
# The output$caption is computed based on a reactive expression
# that returns input$caption. When the user changes the
# "caption" field:
#
# 1. This function is automatically called to recompute the output
# 2. New caption is pushed back to the browser for re-display
#
# Note that because the data-oriented reactive expressions
# below don't depend on input$caption, those expressions are
# NOT called when input$caption changes
output$caption <- renderText({
input$caption
})
# Generate a summary of the dataset ----
# The output$summary depends on the datasetInput reactive
# expression, so will be re-executed whenever datasetInput is
# invalidated, i.e. whenever the input$dataset changes
output$summary <- renderPrint({
dataset <- datasetInput()
summary(dataset)
})
# Show the first "n" observations ----
# The output$view depends on both the databaseInput reactive
# expression and input$obs, so it will be re-executed whenever
# input$dataset or input$obs is changed
output$view <- renderTable({
head(datasetInput(), n = input$obs)
})
}
# Create Shiny app ----
shinyApp(ui, server)
|
e56afb49d61e8cab0904a24c345785b096b58c9a | 4a070bd30cebc29fb5a17633d0cc96e27a580fab | /imhr/dist/R/bias_behavioral.R | d39e9899eb49eaa6ef05ff2691527005c1bd756b | [
"MIT",
"Python-2.0"
] | permissive | risoms/mdl | cf4d4b1da554701358508d816058743365819664 | 730ba3f3758412786a34b5309465a82c8f5b5d5d | refs/heads/master | 2021-06-25T06:36:09.959012 | 2020-12-15T03:04:33 | 2020-12-15T03:04:33 | 181,785,493 | 2 | 0 | MIT | 2020-06-12T14:37:06 | 2019-04-17T00:05:25 | Python | UTF-8 | R | false | false | 6,385 | r | bias_behavioral.R | rm(list=ls())
#******************************************************************** Imports
#install.packages("jtools")
library(parallel); library(tidyverse); library(itrak);
#**************************************************************** Definitions
suppressPackageStartupMessages(library(tidyverse))
data_path <- "~/Desktop/R33-analysis-master/output"
load_path <- paste(data_path, "1_process/data/behavioral", sep = "/")
tlbs_path <- paste(data_path, "2_tlbs/behavioral", sep = "/")
summary_path <- paste(data_path, "3_bias", sep = "/")
n_cores <- 4
n_trial <- 198
load_directory <- paste(load_path, list.files(load_path), sep = "/")
#*********************************************************************************************
#**************************************************************************************** TLBS
#run dotprobe
process_dotprobe <- function(path, save_path, n_trial){
df <- read_csv(path, na = c(".", ""), guess_max = 999999,
col_types = cols_only(participant = col_character(), #participant
TrialNum = col_double(),
type = col_character(),
event = col_character(),
trialType = col_character(),
Key_Resp.rt = col_number(),
Key_Resp.acc = col_double(), #Key_Resp.acc
LEmotion = col_character(), #LStim
REmotion = col_character(), #RStim
DotLoc = col_character() #DotStim
))
#convert acc to logicial
df$Key_Resp.acc <- as.logical(as.integer(df$Key_Resp.acc))
#rename variables
#colnames(df)[colnames(df) == 'TrialNum'] <- 'trial'
colnames(df)[colnames(df) == 'Key_Resp.rt'] <- 'RT'
# separate trial metadata from sample-measurement data
itrak_data <- df %>%
select(id = participant, trial = TrialNum, event = event, trialType, RT = RT)
#get summary of trial level info
trial_info <- df %>%
##add new variables
mutate(dot_location = gsub(".$", "", DotLoc),
emo_location = ifelse(LEmotion == "Neutral", "Right", "Left"),
congruent = dot_location == emo_location) %>%
##select variables
select(id = participant, trial = TrialNum, dot_location, trialType,
emo_location, congruent, accurate = Key_Resp.acc) %>% distinct()
# get RT
RT <- df %>%
select(id = participant, trial = TrialNum, RT = RT, trialType) %>%
##get unique trials
distinct(trial, .keep_all = TRUE) %>%
##select variables
select(id, trial, RT)
# set rt as decimal value
RT$RT <- RT$RT / 1000
# assign missing values to incorrect trials
RT$RT[!trial_info$accurate] <- NA
# assign missing values to outliers
## first eliminate trials with RTs less than 0.2 s or more than 1.5 s
## then eliminate trials more than 3 MADs from the median
RT$RT[is_outlier(RT$RT, abs_lim = c(.2, 1.5), mad_lim = 3)] <- NA
# join RT data to trial data
trial_data <- left_join(trial_info, RT, by = c("id", "trial")) %>%
mutate(TLBS = get_tlbs(RT, congruent))
#write trial data
write_csv(trial_data,paste(save_path, "/", trial_data$id[1], ".csv", sep = ""))
df
}
#************************************************************* Run Analysis
cl <- makeCluster(n_cores)
clusterEvalQ(cl, library(tidyverse))
clusterEvalQ(cl, library(itrak))
tlbs <- parLapply(cl, load_directory, process_dotprobe, save_path = tlbs_path, n_trial = n_trial)
stopCluster(cl)
#*********************************************************************************************
#*************************************************************************** Gaze Bias Summary
df_bias <- function(file_name, tlbs_folder, groupby){
read_csv(file_name, col_types = cols_only(id = col_character(),
trial = col_double(),
trialType = col_character(),
congruent = col_logical(),
RT = col_double(),
TLBS = col_double(),
initial_bias = col_double(),
final_bias = col_double(),
total_bias = col_double())) %>%
#mutate trial as integer
mutate(trial = as.integer(trial)) %>%
#group by nested value
group_by(.dots = groupby) %>%
#calculate values
summarize(dp_bias = get_bs(RT, congruent),
n_dp_valid = sum(!is.na(RT)),
pct_dp_toward = sum(!is.na(TLBS) & TLBS > 0) / sum(!is.na(RT)),
mean_dp_toward = mean(TLBS[!is.na(TLBS) & TLBS > 0]),
mean_dp_away = -1 * mean(TLBS[!is.na(TLBS) & TLBS < 0]),
var_dp_bias = sd(TLBS, na.rm = TRUE)
)
}
#*************************************************************** nested by subject
groupby=list('id')
tlbs_directory <- paste(tlbs_path, list.files(tlbs_path), sep = "/")
subject_bias_summary <- map(tlbs_directory, df_bias, groupby=groupby) %>%
reduce(bind_rows) %>% ungroup() %>%
mutate_if(is.numeric, function(x) ifelse(is.nan(x), 0, x))
#identify whether nested by subject-level or trialType
subject_bias_summary$nested <- 'subject'
#create blank trialType for later merging
subject_bias_summary$trialType <- '.'
#*************************************************************** nested by subject, trialtype
groupby=list('id','trialType')
tlbs_directory <- paste(tlbs_path, list.files(tlbs_path), sep = "/")
trial_bias_summary <- map(tlbs_directory, df_bias, groupby=groupby) %>%
reduce(bind_rows) %>% ungroup() %>%
mutate_if(is.numeric, function(x) ifelse(is.nan(x), 0, x))
#drop any NA values
trial_bias_summary <- trial_bias_summary[!is.na(trial_bias_summary$trialType),]
#identify whether nested by subject-level or trialType
trial_bias_summary$nested <- 'trialType'
#************************************************************** Combine Tables, export to csv
bias_summary <- rbind(subject_bias_summary, trial_bias_summary)
write_csv(bias_summary, paste(summary_path, "/behavioral_bias", ".csv", sep = ""))
|
c9124bed3e6cf8c78f59e40ed0d52556ed219709 | 28bc54e930ed1512fa42bccada8d6f039e43b0a6 | /MachineLearningR/Rsvm/svm.R | e5be0502a1395cfe4ba06894e0bec67614e35133 | [] | no_license | sksom/Machine-Learning-R-Projects | 1f349ed14694152f245c6feaaabf2fa6fb22d8bc | 3bb9c676dc900952e7fc0d808141e43b386d5657 | refs/heads/master | 2020-03-19T02:56:05.358008 | 2018-06-01T07:09:53 | 2018-06-01T07:09:53 | 135,676,599 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,661 | r | svm.R | # SVM
# data
data("iris")
str(iris)
library(ggplot2)
qplot(Petal.Length,Petal.Width,data = iris,
color=Species)
# svm library
library(e1071)
mymodel <- svm(Species~.,data=iris)
summary(mymodel) # SVM-Type: C-classification , if response variable would have been
# continuous then this would have been regression
plot(mymodel, data=iris,
Petal.Width~Petal.Length,
slice = list(Sepal.Width=3, Sepal.Length=4))
# confusion matrix and misclassification error
pred <- predict(mymodel,iris)
tab <- table(Predicted=pred,Actual=iris$Species)
tab
# accuracy can be predicted as (50+48+48)/(150), and error rate as 1-accuracy
1- sum(diag(tab))/sum(tab)
# our svm used radial kernel, we can also use linear kernel, or polynomial, or sigmoid
mymodel <- svm(Species~.,data=iris,
kernel="sigmoid")
summary(mymodel) # SVM-Type: C-classification , if response variable would have been
# continuous then this would have been regression
plot(mymodel, data=iris,
Petal.Width~Petal.Length,
slice = list(Sepal.Width=3, Sepal.Length=4))
# confusion matrix and misclassification error
pred <- predict(mymodel,iris)
tab <- table(Predicted=pred,Actual=iris$Species)
tab
# accuracy can be predicted as (50+48+48)/(150), and error rate as 1-accuracy
1- sum(diag(tab))/sum(tab)
# best is radial kernel, worst is sigmoid kernel
# fine tune our svm
mymodel <- svm(Species~.,data=iris)
summary(mymodel) # SVM-Type: C-classification , if response variable would have been
# continuous then this would have been regression
plot(mymodel, data=iris,
Petal.Width~Petal.Length,
slice = list(Sepal.Width=3, Sepal.Length=4))
# TUNING OCCURS HERE
# epsilon: 0,0.1,0.2,0.3......1
# cost: 2^2,2^3,2^4,......2^9 Default is 1
set.seed(1234)
tmodel <- tune(svm, Species~.,data=iris,
ranges = list(epsilon=seq(0,1,0.1),cost=2^(2:7))) # don't do this for large
# data set because we can see the combinations value is 88 for this, time-consuming
plot(tmodel) # darker regions means better performance, i.e i can change cost to upto
# 2^7 and re run
summary(tmodel)
# choose best model
mymodel <- tmodel$best.model
summary(mymodel) # generally for classification type problems radial kernel is best
plot(mymodel, data=iris,
Petal.Width~Petal.Length,
slice = list(Sepal.Width=3, Sepal.Length=4))
# confusion matrix and misclassification error
pred <- predict(mymodel,iris)
tab <- table(Predicted=pred,Actual=iris$Species)
tab
# accuracy can be predicted as (50+48+48)/(150), and error rate as 1-accuracy
1- sum(diag(tab))/sum(tab)
# much better
|
a895d8add7c82b0b409e92df2465bcfe8dd54a4a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/backports/examples/strrep.Rd.R | dce055404fba36cbc86eefe6c6f558b8789a54fa | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 325 | r | strrep.Rd.R | library(backports)
### Name: strrep
### Title: Backport of strrep for R < 3.3.0
### Aliases: strrep
### Keywords: internal
### ** Examples
# get function from namespace instead of possibly getting
# implementation shipped with recent R versions:
bp_strrep = getFromNamespace("strrep", "backports")
bp_strrep("-", 10)
|
24e790fe543e59401cdf8366541703d0b21e56a5 | c8d1e22aa304ab606e3d8e1c7ba85e00f18a3ff7 | /vis_analysis/LVIS_functions.R | 3dc41328c8333a8796831eceaf50ebf7c1b62837 | [] | no_license | khl0798/LVIS_pipeline | 5d527debf67723be95b8d4c673133b8bc8b1a76f | 300ec340eb01d3a3a552702fc2472f696ed67281 | refs/heads/master | 2023-07-18T00:35:23.318820 | 2021-08-30T22:32:02 | 2021-08-30T22:32:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,887 | r | LVIS_functions.R | ###the starting point is all_P1_collected_IS
###based on the sample_ori column, we subset all VIS from a certain expt.
###we standardize sample_ori in the form Px_celltype_time, make sure boost is transformed.
###
library(ggplot2);
count_VIS_freq <- function(tmp){
U<-tmp$nReads;#we decided to use nReads to count abundance
if (length(U)<21){
U<-c(U,rep(0,1,21-length(U)));
}
ord<-order(U,decreasing=TRUE);
slices<-c(U[ord][1:20],sum(U[ord][21:length(U)]));
x<-slices/sum(slices);
return(x);
}
#use dataframe of all_collected_VIS as input
get_top20_composition_and_nVIS_all_samples <- function(all_P1_collected_IS_rename){
print('remember to get rid of the VISs with spike');
all_expts<-unique(all_P1_collected_IS_rename$sample_ori);
nVIS<-matrix(0,length(all_expts),1);
top20_IS_freq_vs_expts<-matrix(0,21,length(all_expts));
colnames(top20_IS_freq_vs_expts)<-all_expts;
rownames(nVIS)<-all_expts;
colnames(nVIS)<-'nVIS';
rownames(top20_IS_freq_vs_expts)<-c(c(1:20),'rest');
for (e in all_expts){
tmp<-all_P1_collected_IS_rename[which(all_P1_collected_IS_rename$sample_ori==e),];
nVIS[e,1]<-dim(tmp)[1];
x<-count_VIS_freq(tmp);
top20_IS_freq_vs_expts[,e]<-x;
}
res<-list();
res[['num_VIS']]<-nVIS;
res[['top20_IS_freq']]<-top20_IS_freq_vs_expts;
return(res);
}
get_samples_diversity_measures <- function(tmp){
rr<-tmp;
y<-rr[rr>0];
y<-as.numeric(cumsum(sort(y)));
y<-c(0,y/y[length(y)]);
x<-seq(0,1,1/(length(y)-1));
library(pracma);
sample_OCI<-1-2*trapz(x,y);
y<-rr[rr>0];
y<-as.numeric(y);
y<-y/sum(y);
sample_entropy<-sum(y*log2(y))*-1;
y<-rr[rr>0];
D<-length(y);
ny<-table(y);
f1<-sum(ny==1);
f2<-sum(ny==2);
sample_Chao<-D+f1*(f1-1)/(2*(f2+1));
y<-rr[rr>0];
y<-as.numeric(sort(y,decreasing = TRUE));
p<-y/sum(y);
cp<-cumsum(p);
sample_UC50<-which(cp>0.5)[1];
res<-list();
res[['sample_OCI']]<-sample_OCI;
res[['sample_entropy']]<-sample_entropy;#disad is just on prob. not exactly the number of species
res[['sample_Chao']]<-sample_Chao;#give a estimate of total number
res[['sample_UC50']]<-sample_UC50;#how many species to cover half of abuadance
res[['sample_nVIS']]<-length(rr);
return(res);
}
get_diversity_measures_all_samples <- function(counts_nR_IS_expts_Filter){
all_expts<-colnames(counts_nR_IS_expts_Filter);
diversity_vs_expts<-matrix(0,4,length(all_expts));
colnames(diversity_vs_expts)<-all_expts;
rownames(diversity_vs_expts)<-c('sample_OCI','sample_entropy','sample_Chao','sample_UC50');
for (e in all_expts){
tmp<-counts_nR_IS_expts_Filter[,e];
out<-get_samples_diversity_measures(tmp);
diversity_vs_expts[1,e]<-out[['sample_OCI']];
diversity_vs_expts[2,e]<-out[['sample_entropy']];
diversity_vs_expts[3,e]<-out[['sample_Chao']];
diversity_vs_expts[4,e]<-out[['sample_UC50']];
}
return(diversity_vs_expts);
}
prepare_pie_charts <- function(top20_IS_freq,num_VIS){
library(reshape2);
data<-melt(top20_IS_freq);
colnames(data)<-c("VIS_label","sample_ori","composition");
df = transform(data, expt=colsplit(sample_ori, "_", names = c('patients','Celltype','time')));
#df$expt.Celltype<-as.character(df$expt$Celltype);
#colnames(df)<-c("VIS","expt","composition","patients","time","Celltype");
iz<-which(df$expt.Celltype=='bulk');
df$expt.Celltype[iz]<-'TNC';
#df$expt.Celltype<-factor(df$expt.Celltype,levels=c('CD3','CD14CD15','CD19','CD56','TNC'));
#df<-cbind(df,patient_time=paste0(df$patients,":",df$time))
#df$patient_time<-factor(df$patient_time,levels=c('P1:30mo','P2:24mo','P3:24mo','P4:18mo','P5:18mo','P6:18mo','P7:12mo','P8:12mo','P10:6mo','P11:16wks'));
all_expts<-colnames(top20_IS_freq);
dat_text <- data.frame(
sample_ori=all_expts,
nVIS = num_VIS#
)
dt = transform(dat_text, expt=colsplit(sample_ori, "_", names = c('patients','Celltype','time')));
#dt$expt.Celltype<-as.character(dt$expt.Celltype);
#colnames(dt)<-c("sample_ori","nVIS",'expt.patients', 'expt.time','expt.Celltype');
iz<-which(dt$expt.Celltype=='bulk');
dt$expt.Celltype[iz]<-'TNC';
#dt$expt.Celltype<-factor(dt$expt.Celltype,levels=c('CD3','CD14CD15','CD19','CD56','TNC'));
res<-list();
res[['df']]<-df;
res[['dt']]<-dt;
print('modify the columns of df, and generate the required factors before using the function draw_pie_charts()');
return(res);
}
draw_pie_charts <- function(df,dt){
library(ggplot2);
library(scales);
color_p<-hue_pal()(21);
color_p[21]<-"grey";
p<- ggplot(df, aes(x="", y=composition, color=VIS_label, fill=VIS_label)) +
geom_bar(width = 1, stat = "identity") +
#coord_polar("y", start=0) + facet_grid(expt.time ~ expt.Celltype ) +
coord_polar("y", start=0) + facet_grid(expt.Celltype ~ expt.time ) +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(),
strip.text.x = element_text(size = 12),
strip.text.y = element_text(size = 12)) + scale_fill_manual(values=color_p) + scale_color_manual(values=color_p);
p <- p + geom_text(aes(x=0, y=0,label = as.character(nVIS)),data=dt,inherit.aes=FALSE, parse=FALSE,size=5);
return(p);
#ggsave("all_patients_last_samples_composition.pdf",plot = last_plot(), device = NULL, path = NULL,scale = 1, width = 10, height = 14, dpi = 300);
}
#this function is unnecessary. the use of scientific=FALSE in the format() function fix the problem.
validate_coordinates <- function(all_P5_collected_IS){
z1<-as.character(all_P5_collected_IS$start);
z2<-as.character(all_P5_collected_IS$end);
if (isempty(grep("e",z1))){
print("start ok");
} else{
print(paste0("check start:",as.character(grep("e",z1))));
}
if (isempty(grep("e",z2))){
print("end ok");
} else{
print(paste0("check end:",as.character(grep("e",z2))));
}
}
preprocess_all_VIS_collection<- function(all_P1_collected_IS){
#all_P1_collected_IS is the collection of output from the same patient
#the format here is to make sure the coordinate wouldn't be interpreted as a number..
all_IS_P1<-paste0(all_P1_collected_IS$seqnames,'.',format(all_P1_collected_IS$start,trim=TRUE,scientific=FALSE),'.',format(all_P1_collected_IS$end,trim=TRUE,scientific=FALSE),'.',all_P1_collected_IS$strand);
all_IS_P1_aux<-paste0(all_P1_collected_IS$seqnames,':',format(all_P1_collected_IS$start,trim=TRUE,scientific=FALSE),'-',format(all_P1_collected_IS$end,trim=TRUE,scientific=FALSE));
#20152
u_all_IS_P1<-unique(all_IS_P1);#strand specific...
u_all_IS_P1<-read.table(text = u_all_IS_P1, sep = ".");
library(bedr);
x<-paste0(u_all_IS_P1[,1],":",u_all_IS_P1[,2],'-',u_all_IS_P1[,3]);
is.x.valid <- is.valid.region(x); #get rid of other chr
x <- x[is.x.valid];
u_all_IS_P1<-u_all_IS_P1[is.x.valid,];
dim(u_all_IS_P1)
u_all_IS_P1<-cbind(u_all_IS_P1[,c(1:3)],tmp1='.',tmp2='.',strand=u_all_IS_P1[,4]);
colnames(u_all_IS_P1)<-c("chr","st","ed","tmp1","tmp2","strand");
#rownames(u_all_IS_P1)<-paste0(u_all_IS_P1$chr,':',u_all_IS_P1$st,'-',u_all_IS_P1$ed);
#x<-rownames(u_all_IS_P1);
x<-paste0(u_all_IS_P1$chr,':',u_all_IS_P1$st,'-',u_all_IS_P1$ed);
x.sort <- bedr.sort.region(x);
i_map<-match(x.sort,x);
u_all_IS_P1<-u_all_IS_P1[i_map,];
u_all_IS_P1[,1]<-as.character(u_all_IS_P1[,1]);
map_back<-match(all_IS_P1,paste0(u_all_IS_P1[,1],'.',u_all_IS_P1[,2],'.',u_all_IS_P1[,3],'.',u_all_IS_P1[,6]));
#max(map_back[!is.na(map_back)]);
#how original collected IS go to the u_all_IS_P1
u_all_IS_P1<-cbind(u_all_IS_P1[,c(1:3)],ind=c(1:dim(u_all_IS_P1)[1]),aux=1,strand=u_all_IS_P1[,6]);
all_P1_collected_IS_mapped<-u_all_IS_P1[map_back,];
all_P1_collected_IS_mapped<-cbind(all_P1_collected_IS,u_ind=all_P1_collected_IS_mapped$ind);
res<-list();
res[['u_all_IS_sort']]<-u_all_IS_P1;
res[['all_collected_IS_mapped']]<-all_P1_collected_IS_mapped;
#back to the input, use the ind col to back to the unique
return(res);
}
merge_all_preprocessed_VIS_collection <-function(u_all_IS_P1){
###we can merge adjacinet VIS..with d=8? d=4? d=0?
#currently, we use d=0 in avoid VIS sites getting too big
#the Lance pipeline tested for d=5,6,7,8 to merge in in the raw read level
#it is different from here..
#-s, enforcing strandedness...
#sum for 5th col -> count how many sites are merged
#distict for column 6, as -s are used, it's not possible to merge sites in opposite strands together
#collapse for column 4...put index in delimited table..
u_all_IS_P1.merge<-bedr(
engine = "bedtools",
input = list(i = u_all_IS_P1),
method = "merge",
params = "-s -d 0 -c 5,4,6 -o sum,collapse,distinct"
);
u_all_IS_P1.merge<-cbind(u_all_IS_P1.merge,eff_ind=c(1:dim(u_all_IS_P1.merge)[1]));
#note that column ind refer to how many sites in the input list that merged list covers
#aux stores the label those sites (indexed by row # in u_all_IS_P1. the aux information is then used to generate th
#output, u_VIS_merg and map
U<-u_all_IS_P1.merge$aux;
V<-u_all_IS_P1.merge$eff_ind;
U_final<-c();
V_final<-c();
for (i in 1:length(V)){
tmp<-as.numeric(strsplit(U[i],",")[[1]]);
n<-length(tmp);
U_final<-c(U_final,tmp);
V_final<-c(V_final,rep(V[i],n));
}
map<-cbind(ori_index=U_final,new_index=V_final);
rownames(map)<-U_final;
res<-list();
res[['u_VIS_merge']]<-u_all_IS_P1.merge;
res[['map']]<-data.frame(map);
return(res);
}
get_merged_ID_from_all_VIS_collection <- function(all_P1_collected_IS){
#in the preprocess functions, a simple unique is performed and sites mapped to the non-canonical chr are removed
#the result is u_all_IS_P1, an additional column is added to the input frame, it maps every site in the input to the u_all_IS. NA will be used
#if the site cannot be mapped, because it is found in one of the non-canonical chr,
res1<-preprocess_all_VIS_collection(all_P1_collected_IS);
u_all_IS_P1<-res1[['u_all_IS_sort']];
all_P1_collected_IS_mapped<-res1[['all_collected_IS_mapped']];
#NB, essentially, all sites in the overlapping unique sites were merged using bedtools.
#u_VIS_merge stores the merged list, the mapping between the merged list and the input list is
#defined in the array map
res2<-merge_all_preprocessed_VIS_collection(u_all_IS_P1);
u_VIS_merge<-res2$u_VIS_merge;
u_VIS_to_merged_VIS<-res2$map;
#head(all_P1_collected_IS)
#head(all_IS_P1_collected_mapped)
#iz<-head(all_IS_P1_collected_mapped)$u_ind;
#the unique VIS index to the merged index..
#u_VIS_to_merged_VIS[iz,]
#after 2 steps mapping, the original list is mapped all_P1_collected_IS got a new merged id...listed in u_VIS_merge
#(shown in data.frame X);
X<-cbind(all_P1_collected_IS_mapped,u_merge_ind=u_VIS_to_merged_VIS[all_P1_collected_IS_mapped$u_ind,]$new_index);
#X[order(X$nReads,decreasing = TRUE),];
res<-list();
res[['X']]<-X;
res[['u_VIS_merge']]<-u_VIS_merge;
return(res);
}
generate_u_VIS_merge_samples_matrix <- function(X){
all_expts<-unique(X$sample_ori);
n<-max(X$u_merge_ind)
uID<-matrix(0,n,length(all_expts));
rownames(uID)<-c(1:n);
colnames(uID)<-all_expts;
for (e in all_expts){
iz<-which(X$sample_ori==e);
X2<-X[iz,];
utmp<-unique(X2$u_merge_ind);
vtmp<-c();
for (uu in utmp){
iuu<-which(X2$u_merge_ind==uu);
vtmp<-c(vtmp,sum(X2$nReads[iuu]));
}
uID[utmp,e]<-vtmp;
}
return(uID);
}
generate_u_VIS_merge_samples_matrix_unique <- function(X){
all_expts<-unique(X$sample_ori);
n<-max(X$u_merge_ind)
uID<-matrix(0,n,length(all_expts));
rownames(uID)<-c(1:n);
colnames(uID)<-all_expts;
for (e in all_expts){
iz<-which(X$sample_ori==e);
X2<-X[iz,];
utmp<-unique(X2$u_merge_ind);
vtmp<-c();
for (uu in utmp){
iuu<-which(X2$u_merge_ind==uu);
vtmp<-c(vtmp,sum(X2$nUniqueReads[iuu]));
}
uID[utmp,e]<-vtmp;
}
return(uID);
}
filter_vis<-function(counts_nR_IS_expts,remove){
all_expts<-colnames(counts_nR_IS_expts);
counts_nR_IS_expts_filter<-counts_nR_IS_expts;
for (e in all_expts){
x<-counts_nR_IS_expts[,e];
r<-sort(as.vector(x[x>0]));
cdf<-cumsum(r)/sum(r);
aux<-c(0,diff(r));
i_pick<-which((cdf>remove)&(aux>0))[1];
cut<-r[i_pick];
x[x<cut]<-0;
counts_nR_IS_expts_filter[,e]<-x;
}
return(counts_nR_IS_expts_filter);
}
##since counts_IS_expts are compiled in a strand-specific way, the venn here is strand-specific
prepare_venn_diagram_samples <- function(counts_IS_expts,pick_samples){
aux<-list();
for (i in 1:length(pick_samples)){
aux[[pick_samples[i]]]<-rownames(counts_IS_expts)[which(counts_IS_expts[,pick_samples[i]]>0)];
}
return(aux);
}
collect_IS_per_celltypes <- function(counts_IS_expts){
library(dplyr)
library(tidyr)
X<-colnames(counts_IS_expts)
X<-data.frame(X);
X<-X %>% separate(X,c("patients","type","time"),"_");
rownames(X)<-colnames(counts_IS_expts);
uT<-c('bulk','CD3','CD19','CD14CD15','CD56','CD34');
#unique(X$type);
i<-1;
tt<-uT[i];
iz<-which(X$type==tt);
r<-rowSums(as.matrix(counts_IS_expts[,rownames(X)[iz]]));
all_r<-data.frame(r);
for (tt in uT[2:length(uT)]){
iz<-which(X$type==tt);
r<-rowSums(as.matrix(counts_IS_expts[,rownames(X)[iz]]));
all_r<-cbind(all_r,r);
}
colnames(all_r)<-uT;
return(all_r);
}
get_genomicDensity<-function(u_VIS_merge,win_size){
tmp<-u_VIS_merge[,c(1,2,3,6)];
colnames(tmp)<-c('chr','start','end','strand');
d<-(tmp$end-tmp$start)/2;
tmp$start<-floor(tmp$start+d);
tmp$end<-floor(tmp$start+d)+1;
VIS_list_clean_overall<-tmp;
VIS_list_clean_pos<-VIS_list_clean_overall[which(VIS_list_clean_overall$strand=='+'),];
VIS_list_clean_neg<-VIS_list_clean_overall[which(VIS_list_clean_overall$strand=='-'),];
library(circlize);
VIS1_overall<-genomicDensity(VIS_list_clean_overall, window.size = win_size);
VIS1_pos<-genomicDensity(VIS_list_clean_pos, window.size = win_size);
VIS1_neg<-genomicDensity(VIS_list_clean_neg, window.size = win_size);
VIS_density<-list();
VIS_density[['overall']]<-VIS1_overall;
VIS_density[['pos']]<-VIS1_pos;
VIS_density[['neg']]<-VIS1_neg;
return(VIS_density);
}
#VIS1, 2 are bed files like VIS1_before$overall
cor_genomicDensity <- function(VIS1,VIS2){
all_chr<-c(paste0('chr',as.character(1:22)),'chrX','chrY');
all_cc<-matrix(0,length(all_chr),1);
rownames(all_cc)<-all_chr;
colnames(all_cc)<-'correlation';
for (cc in all_chr){
iz1<-which(VIS1$chr==cc);
iz2<-which(VIS2$chr==cc);
iz<-intersect(iz1,iz2);
x<-VIS1$pct[iz];
y<-VIS2$pct[iz];
all_cc[cc,1]<-cor(x,y);
}
return(all_cc);
}
#input a few bed files as a list. for each file, we have chr, start, end storing the VIS
#row names not matter..
#trace("circos.genomicDensity",edit=TRUE)
#max:4e-5
#https://stackoverflow.com/questions/53600926/how-do-you-add-track-label-in-r-circlize
draw_circos<-function(VIS_list,win_size,bed){
library(circlize);
#bed = generateRandomBed(nr = 50, fun = function(k) sample(letters, k, replace = TRUE))
gap<-c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0);gap[24]<-5;
circos.par(gap.after=gap);
circos.initializeWithIdeogram(plotType = NULL);
circos.genomicIdeogram()
n<-length(VIS_list);
track_names<-names(VIS_list);
for (i in 1:n){
circos.genomicDensity(VIS_list[[i]], col = c("#600000"), track.height = 0.1,window.size=win_size,ylim.force = FALSE);
#circos.text(sector.index="chr1",track.index = 2*i,get.cell.meta.data("cell.xlim")-mean(get.cell.meta.data("cell.xlim"))/2,
#get.cell.meta.data("cell.ylim"), labels = track_names[i],facing = "clockwise", niceFacing = TRUE, adj = c(0,0),cex=0.5)
}
circos.genomicLabels(bed, labels.column = 4, side = "inside", labels.side="inside",cex=1,
col = as.numeric(factor(bed[[1]])), line_col = as.numeric(factor(bed[[1]])))
circos.clear();
}
#use for plotting the hotspots..
draw_circos_rainfall<-function(VIS_list){
library(circlize);
gap<-c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0);gap[24]<-5;
circos.par(gap.after=gap);
circos.initializeWithIdeogram();
circos.genoimcRainfall(VIS_list);
#circos.genomicDensity(VIS_list[[i]], col = c("#600000"), track.height = 0.1,window.size=win_size,ylim.force = FALSE);
circos.clear();
}
draw_composition_stackbars<-function(u_VIS_merge_homer_annot,counts_IS_expts,pick_samples,pick_VIS){
if (missing(pick_VIS)){
aux<-prepare_venn_diagram_samples(counts_IS_expts,pick_samples);
common_VIS<-aux[[1]];
for (i in 1:length(aux)){
common_VIS<-intersect(common_VIS,aux[[i]]);
}
pick_VIS<-common_VIS;
}
X<-counts_IS_expts[,pick_samples];
Z<-sweep(X,2,colSums(X),`/`);
write.table(Z[pick_VIS,],file="tmp.txt");
data<-read.table('tmp.txt',header=TRUE);
rest<-t(data.frame(1-colSums(data)));
rownames(rest)<-"rest";
tmp<-u_VIS_merge_homer_annot[as.numeric(rownames(data)),];
rownames(data)<-paste0(rownames(tmp),':',tmp$Gene.Name);
data<-rbind(data,rest);
df<-melt(cbind(VIS=rownames(data),data));
df$VIS<-factor(df$VIS,levels=rownames(data));
library(scales)
n<-dim(data)[1];
color_p<-hue_pal()(n);
color_p[n]<-"grey";
p <- ggplot(df, aes(x = variable, y = value, fill = VIS)) +
geom_bar(stat = "identity") +
ylab("Fraction") +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.text=element_text(size=12)) +
scale_fill_manual(values=color_p) + scale_color_manual(values=color_p);
res<-list();
res[['ggplot_obj']]<-p;
res[['pick_VIS_composition']]<-data;
return(res);
}
rep.col<-function(x,n){
matrix(rep(x,each=n), ncol=n, byrow=TRUE)
}
# Get upper triangle of the correlation matrix
get_upper_tri <- function(cormat){
cormat[lower.tri(cormat)]<- NA
return(cormat)
}
reorder_cormat <- function(cormat){
# Use correlation between variables as distance
dd <- as.dist((1-cormat)/2)
hc <- hclust(dd)
cormat <-cormat[hc$order, hc$order]
return(cormat);
}
simplify_homer_annotation <- function(u_VIS_merge_homer_annot,M){
print("load hg19 annotation");
if (missing(M)){
load("./hg19.basic.annotation.update.Rdata");
}
x<-u_VIS_merge_homer_annot$Annotation;
x<-gsub("3' UTR","3_UTR",x);
x<-gsub("5' UTR","5_UTR",x);
X<-M[x,c(8,9,5)];
u_VIS_merge_homer_annot_modify<-cbind(u_VIS_merge_homer_annot,X);
colnames(u_VIS_merge_homer_annot_modify)[22]<-"near.gene.strand";
return(u_VIS_merge_homer_annot_modify);
}
get_geneFreq <- function(u_VIS_merge_homer_annot_modify){
print('we drop sites located at the intergenic region');
gl<-u_VIS_merge_homer_annot_modify[which(u_VIS_merge_homer_annot_modify$region_anot!='Intergenic'),]$Gene.Name;
geneFreq<-table(gl)[order(table(gl),decreasing = TRUE)];
return(geneFreq);
}
generate_GeneCloud <- function(geneFreq,display_cumsum,all_max,out_file){
library(wordcloud)
set.seed(1234) # for reproducibility
#display_cumsum<-.25;#the cum sum that the top genes reach
#all_max<-261;#261 is the max of P2, i.e. max of all patients..
df <- data.frame(gene = names(geneFreq),freq=geneFreq);
df<-df[,c(1,3)];
colnames(df)<-c("gene","freq");
df$gene<-as.character(df$gene);
cf<-cumsum(df$freq)/sum(df$freq);
df2<-df[1:which(cf>.25)[1],];
top_n_gene<-dim(df2)[1];
minf<-max(df[top_n_gene,2]-1,1);#avoid minf=0
max_font<-df2[1,2]/sum(df$freq);
max_font<-round(max_font*scale_factor,2);
min_font<-df2[top_n_gene,2]/sum(df$freq);
min_font<-round(min_font*scale_factor,2);
dev.new(width=5, height=5, unit="in")
wordcloud(words = df2$gene, freq = df2$freq,min.freq=minf,max.words=top_n_gene, random.order=FALSE,rot.per=0,fixed.asp=TRUE,colors=brewer.pal(8, "Dark2"),scale=c(max_font,min_font))
dev.copy2pdf(file=out_file, out.type= "cairo" );
}
run_fgsea_from_geneFreq<-function(geneFreq,m_list){
#m_list can be generated by msigdb
#H hallmark gene sets are coherently expressed signatures derived by aggregating many MSigDB gene sets to represent well-defined biological states or processes.
#C1 positional gene sets for each human chromosome and cytogenetic band.
#C2 curated gene sets from online pathway databases, publications in PubMed, and knowledge of domain experts.
#C3 motif gene sets based on conserved cis-regulatory motifs from a comparative analysis of the human, mouse, rat, and dog genomes.
#C4 computational gene sets defined by mining large collections of cancer-oriented microarray data.
#C5 GO gene sets consist of genes annotated by the same GO terms.
#C6 oncogenic gene sets defined directly from microarray gene expression data from cancer gene perturbations.
#C7 immunologic gene sets defined directly from microarray gene expression data from immunologic studies.
#library(msigdbr);
#m_df = msigdbr(species = "Homo sapiens", category = "C5");
#m_list = m_df %>% split(x = .$gene_symbol, f = .$gs_name)
library(fgsea);
#library(annotables);
fgseaRes <- fgsea(m_list,geneFreq,minSize=15, maxSize=1000, nperm=10000);
fgseaRes<-data.frame(fgseaRes);
#fgseaRes<-fgseaRes[order(fgseaRes$ES,decreasing=TRUE),]
rownames(fgseaRes)<-fgseaRes$pathway;
return(fgseaRes);
}
get_hotspots_thres<-function(X,nVIS){
#X is the output from get_genomicDensity
window=X$end[1]-X$start[1]+1;
genome_size=3.235e9;
n<-genome_size/window;
lambda<-nVIS/n;
count=X$value*window;
all_Pvalue<-1-ppois(count,lambda);
X$P<-all_Pvalue;
X$P_Bonferroni<-X$P*dim(X)[1];
X$P_Bonferroni[X$P_Bonferroni>1]<-1;
X<-X[order(X$P),];
library(sgof);
res<-BH(X$P);
X$P.adjust<-res$Adjusted.pvalues;
rownames(X)<-paste0(X[,1],":",X[,2],"-",X[,3]);
library(bedr);
sort.regions <- bedr.sort.region(rownames(X));
Xout<-X[sort.regions,];
return(Xout);
}
convert_bed_format<-function(P1_hotspots.sort){
library(dplyr)
library(tidyr)
df<-data.frame(P1_hotspots.sort);
colnames(df)<-'region';
df <- df %>% separate(region, c("chr", "tmp"),":")
df <- df %>% separate(tmp, c("start", "end"),"-")
return(df);
}
generate_cormat_heatmap<-function(cormat,orient){
library(reshape2)
cormat <- reorder_cormat(cormat);
upper_tri <- get_upper_tri(cormat)
# Melt the correlation matrix
melted_cormat <- melt(upper_tri, na.rm = TRUE)
melted_cormat$value2<-round(melted_cormat$value*100)/100;
if (orient=='upper'){
# Create a ggheatmap
ggheatmap <- ggplot(melted_cormat, aes(Var1, Var2, fill = value))+
geom_tile(color = "white")+
scale_fill_gradient2(low = "white", high = "red", mid = "orange",
midpoint = .7, limit = c(0.15,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal()+ # minimal theme
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 14, hjust = 1))+
coord_fixed()
# Print the heatmap
# print(ggheatmap)
ggheatmap<-ggheatmap +
geom_text(aes(Var1, Var2, label = value2), color = "black", size = 5) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.justification = c(1, 0),
legend.position = c(0.8, 0.2),
legend.direction = "horizontal")+
guides(fill = guide_colorbar(barwidth = 7, barheight = 1,
title.position = "top", title.hjust = 0.5));
} else if (orient=='lower'){
ggheatmap <- ggplot(melted_cormat, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
scale_fill_gradient2(low = "white", high = "red", mid = "orange",
midpoint = .7, limit = c(0.15,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal()+ # minimal theme
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 14, hjust = 1))+
coord_fixed()
# Print the heatmap
# print(ggheatmap)
ggheatmap<-ggheatmap +
geom_text(aes(Var2, Var1, label = value2), color = "black", size = 5) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.justification = c(1, 0),
legend.position = c(0.6, 0.7),
legend.direction = "horizontal")+
guides(fill = guide_colorbar(barwidth = 7, barheight = 1,
title.position = "top", title.hjust = 0.5));
}
return(ggheatmap);
}
run_bedtools_closest<-function(overlap_hotspots,all_SE_bed){
aux1<-convert_bed_format(overlap_hotspots);
aux2<-convert_bed_format(all_SE_bed);
write.table(aux1,file='hotspots.bed',sep="\t",quote=FALSE,row.names=FALSE,col.names=FALSE);
write.table(aux2,file="SE.bed",sep="\t",quote=FALSE,row.names=FALSE,col.names=FALSE);
cmd<-paste0("bedtools closest -a hotspots.bed -b SE.bed > res_out");
system(cmd);
#bedtools closest -a vis.bed -b SE.bed > res_out
res<-read.table('res_out');
tmp<-paste0(res[,1],":",res[,2],"-",res[,3]);
d1<-abs(res[,2]-res[,5]);
d2<-abs(res[,2]-res[,6]);
d3<-abs(res[,3]-res[,5]);
d4<-abs(res[,3]-res[,6]);
out<-cbind(res,minD=pmin(d1,d2,d3,d4));
is.overlap <- in.region(tmp,all_SE_bed);
out[which(is.overlap),]$minD<-0;
#res<-cbind(res,overlap=is.overlap);
iz<-which(!duplicated(tmp));
out<-out[iz,];
rownames(out)<-paste0(out[,1],":",out[,2],"-",out[,3]);
return(out);
}
|
bf3f7fa8080e98167afb28df957baf386e4d98f1 | afe83dd189d8c419bc2eb25ad5a86b4531a499ab | /NetworkAnalysis.R | 8a3665ff05702f8349efffcfb9bb929e6be22832 | [] | no_license | dnbarron/BigStudy | aefcb508e5ba47ead794ea25d3885c9bfe401770 | 0bd52230146df518ba936a6f4a458c7f61c1354f | refs/heads/master | 2020-04-08T23:02:20.610724 | 2013-10-03T11:04:53 | 2013-10-03T11:04:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,350 | r | NetworkAnalysis.R | clique.number(g1) # largest clique has 8 members
cls <- closeness(g1, mode='in')
is.connected(g1) # Graph is connected
clusters(g1,mode="strong")
no.clusters(g1) # 1 cluster!
cbs <- cohesive.blocks(g1)
x11(width=10,height=6)
plot.bgraph(cbs)
ebc <- edge.betweenness.community(g1)
community.to.membership(g1,ebc$merges, steps=70)
power.law.fit(deg) # 1.44
recpr <- reciprocity(g1) # 0.069 Very low, but prob because
##### Focus on know network
### Lood at strength of ties
edge.colors <- c('gray','springgreen','lightskyblue','navy','red')
int_edge_colors <- get.edge.attribute(knows,"Strength")
int_edge_colors[int_edge_colors == 1] <- edge.colors[1]
int_edge_colors[int_edge_colors == 2] <- edge.colors[1]
int_edge_colors[int_edge_colors == 3] <- edge.colors[3]
int_edge_colors[int_edge_colors == 4] <- edge.colors[5]
int_edge_colors[int_edge_colors == 5] <- edge.colors[5]
# Weak ties grey
# Medium ties blue
# Strong ties red
x11(width=12,height=10)
plot(knows, layout=layout.kamada.kawai,edge.arrow.size=.3,vertex.size=5,
vertex.label=deg.know,vertex.color=int_vertex_colors,edge.color=int_edge_colors)
dev.off()
# Look at advice network
tkplot(advice.noiso, layout=layout.kamada.kawai,edge.arrow.size=.3,vertex.size=5,
vertex.label=deg.know,vertex.color=int_vertex_colors)
bet.adv <- betweenness(advice,directed=FALSE)
wtrp.adv <- walktrap.community(advice.noiso)
memb.adv <- community.to.membership(advice.noiso,wtrp.adv$merges,steps=12)
modularity(advice.noiso, memb.adv$membership)
vertex.colors <- c('springgreen','lightskyblue','navy','red','grey')
com.vertex.colors <- memb.adv$membership
com.vertex.colors[com.vertex.colors>3] <- vertex.colors[5]
com.vertex.colors[com.vertex.colors==0] <- vertex.colors[1]
com.vertex.colors[com.vertex.colors==1] <- vertex.colors[2]
com.vertex.colors[com.vertex.colors==2] <- vertex.colors[3]
com.vertex.colors[com.vertex.colors==3] <- vertex.colors[4]
brdg.colors <- rep('grey',199)
brdg.color[]
tkplot(advice.noiso, layout=layout.kamada.kawai,edge.arrow.size=.3,vertex.size=5,
vertex.label=NA,vertex.color=com.vertex.colors)
ix <- order(memb.adv$membership)
com.df <- data.frame(Name=get.vertex.attribute(advice.noiso,"name"),Community=memb.adv$membership)[ix,]
com.df[1:16,]
rem <- as.numeric(rownames(com.df[17:62,])) -1
advice.com <- delete.vertices(advice.noiso,rem)
com.vertex.colors.sub <- c(1,3,2,1,0,2,2,3,2,3,2,2,2,0,0,1)
com.vertex.colors.sub[com.vertex.colors.sub==0] <- vertex.colors[1]
com.vertex.colors.sub[com.vertex.colors.sub==1] <- vertex.colors[2]
com.vertex.colors.sub[com.vertex.colors.sub==2] <- vertex.colors[3]
com.vertex.colors.sub[com.vertex.colors.sub==3] <- vertex.colors[4]
plot(advice.com, layout=layout.kamada.kawai,edge.arrow.size=1,vertex.size=5,
vertex.label=NA,vertex.color=com.vertex.colors.sub)
## fastgreedy community structure
fg.adv <- fastgreedy.community(as.undirected(advice.noiso))
fg.adv <- community.to.membership(advice.noiso,fg.adv$merges,steps=12)
modularity(advice.noiso, fg.adv$membership)
fg.adv$membership
wtrp.adv$membership
### edge betweenness community
eb.adv <- edge.betweenness.community(advice.noiso,directed=FALSE)
eb.adv.mem <- community.to.membership(advice.noiso, eb.adv$merges, steps=9)
modularity(advice.noiso, eb.adv.mem$membership)
table(eb.adv.mem$membership)
eb.adv$bridges
eb.adv$removed.edges
eb.adv$edge.betweenness
brdg.colors <- rep('grey',199)
brdg.colors[eb.adv$removed.edges[1:5]] <- 'red'
brdg.colors[eb.adv$removed.edges[1:5]] <- 'red'
brdg.wd <- rep(1,199)
brdg.wd[eb.adv$removed.edges[1:5]] <- 5
tkplot(advice.noiso, layout=layout.kamada.kawai,edge.arrow.size=1,vertex.size=5,
edge.color=brdg.colors,
edge.width=brdg.wd)
##### Network of people interviewed only
V.notint <- V(knows)[V(knows)$Interviewed==1]
net.interview <- delete.vertices(knows,10:(vcount(knows)-1))
summary(net.interview)
dens <- function(g){
n.edge <- ecount(g)
n.vert <- vcount(g)
e.max <- n.vert^2 - n.vert
if (is.directed(g)){
n.edge/e.max
}
else {
n.edge/(e.max/2)
}
}
dens(net.interview)
know.dy <- dyad.census(knows.int)
adv.dy <- dyad.census(advice.int)
lead.dy <- dyad.census(leader.int)
inf.dy <- dyad.census(infl.int)
dy.op <- matrix(cbind(know.dy$mut,know.dy$asym,know.dy$null,ecount(knows.int),
adv.dy$mut,adv.dy$asym,adv.dy$null,ecount(advice.int),
lead.dy$mut,lead.dy$asym,lead.dy$null,ecount(leader.int),
inf.dy$mut,inf.dy$asym,inf.dy$null,ecount(infl.int)),ncol=4,byrow=TRUE)
row.names(dy.op) <- c('Knows','Advice','Leader','Influence')
colnames(dy.op) <- c('Mutual','Asymmetric','Null','Total')
dy.op
png('Knows.png', width=1500,height=1500,res=200, antialias='cleartype')
plot(knows.noiso, layout=layout.kamada.kawai,edge.arrow.size=.3,vertex.size=5,
vertex.label=NA,vertex.color=int_vertex_colors)
dev.off()
png('Advice.png', width=1500,height=1500,res=200, antialias='cleartype')
plot(advice, layout=layout.fruchterman.reingold.grid,edge.arrow.size=.3,
vertex.label=NA,vertex.color=int_vertex_colors,vertex.size=3)
dev.off()
png('Leader.png', width=1500,height=1500,res=200, antialias='cleartype')
plot(leader.noiso, layout=layout.fruchterman.reingold.grid,edge.arrow.size=.3,
vertex.label=NA,vertex.color=int_vertex_colors,vertex.size=3)
dev.off() |
476c8fb0679c16bf4f1c8c83d089835b4e42aa70 | e1e7d0aab8da6a423762dec4ca0b43b706b19867 | /getting and cleanning data/R获取网页数据.R | 982ebc790f919dc3ed81d98a1ffbc5f133dd823f | [] | no_license | qingaidexin/Data-science | 397eace49afdbd7cfde09e6af6c1b937e74c8817 | 8cd34a0ea8f602350fa172c052e65198ea091bdb | refs/heads/master | 2021-01-19T05:03:04.126142 | 2016-06-05T10:06:54 | 2016-06-05T10:06:54 | 60,453,267 | 0 | 0 | null | null | null | null | GB18030 | R | false | false | 2,712 | r | R获取网页数据.R | R语言:网页抓取之不同提取方法解析
接上篇,用R获取网页数据之后的处理
(一)XML解析网页并提取数据
当获取表格数据时,可以用readHTMLTable来获取数据,很方便。当数据不是表格化的时,则常用xmlTreeParse(xmlParse)和getNodeSet配合来获取相应的数据。xmlTreeParse 来抓取页面数据,并且形成树。getNodeSet来对树结构数据,根据XPath语法来选取特定的节点集。下面举个实际例子来讲解一下这两个最重要函数的应用。
library(XML)
URL = 'http://www.w3school.com.cn/example/xmle/books.xml'
doc <- xmlParse(URL);##解析网页##还有其他参数,如encoding##
##(1)选取属于bookstore子元素的第一个book元素
getNodeSet(doc,'/bookstore/book[1]') ##此方法不常用
##获取根节点
top <- xmlRoot(doc)
##获取某个节点下数据
top[2]
##使用XPath语句查询
##(2)先筛选符合条件的节点
###注意下面的单引号和双引号可能在R里会导致没有执行结果的问题###
Node <- getNodeSet(top, path = "//title[@lang='en']")
##然后取出其中元素
sapply(Node, xmlValue)
##(3)或者直接取出数据##取出title里面lang = ‘en’的内容##
xpathSApply(top, path = "//title[@lang='en']", xmlValue)
推荐阅读
http://www.52ij.com/jishu/XML/12424.html
http://cran.r-project.org/web/packages/XML/XML.pdf R语言的XML包的帮助文档
(二)正则表达式提取数据(其实就是字符串匹配)
URL = 'http://movie.**.com/top250?format=text%27'
##获取网页源码,存储起来##
web <- readLines(url,encoding="UTF-8")
# 找到包含电影名称的行编号以及电影名称
nameRow <- grep('<span class=\"title\">',web)
name <- web[nameRow]
##正则匹配提取电影名称##位置和长度##>move.name<
gregout <- gregexpr('>\\w+', name)
len = length(gregout)
move.name = matrix(0, nrow = len, ncol = 1)
for(i in 1:len){
move.name[i,1] = substring(name[i], gregout[[i]]+1, (gregout[[i]]+attr(gregout[[i]], 'match.length'))[[1]]-1)
}
##去掉空缺值##
move.name <- move.name[nchar(move.name)>1]
辅助工具
自动生成正则表达式,但是不支持中文。
http://www.txt2re.com/index-java.php3
支持编程语言非常多的一个测试正则表达式的工具
RegexBuddy
推荐阅读
http://www.tuicool.com/articles/vEziEj 一个详细的例子
http://developer.51cto.com/art/201305/393692.htm R语言的字符串处理函数
http://www.jb51.net/tools/zhengze.html正则帮助文档
优缺点比较:
XML方法有时候会解析错误,但是简单易用。
正则选择方法需要懂点正则语法,写起来比较费劲,重在灵活。 |
5a2507d1d7c536a465198c28398abe127ba244b7 | c1a17e577be11c1deb13d5a7e588083df15d071f | /data_visualisation_correlation_plot.R | 6cb06242b75d5237044f770170b3924fc1191797 | [] | no_license | divyaimale29/analyticswithr | 2c0152f6b326fa76a56c9f502dc5b704a770857f | a81b13294a756671399787e7fb170042fb19d429 | refs/heads/main | 2023-06-22T11:40:56.661615 | 2021-07-23T14:16:50 | 2021-07-23T14:16:50 | 354,501,983 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,222 | r | data_visualisation_correlation_plot.R | #Correlation Plot in R----
#Use cor( ) function to produce correlations in R and
#we use function corrgram( ) to plot correlograms in R.
# mtcars dataset is natively available in R
head(mtcars)
# Correlation matrix from mtcars
# with mpg, cyl, and disp as rows
# and hp, drat, and wt as columns
x <- mtcars[1:3]
y <- mtcars[4:6]
z<-cor(x, y)
z
#We use function corrplot( ) in package "corrplot" to plot correlograms.
install.packages("corrplot")
library(corrplot)
corrplot(z, type = "upper",
tl.col = "black", tl.srt = 45)
#We use function corrgram( ) in package "corrgram" to plot correlograms.
install.packages("corrgram")
library(corrgram)
# First
corrgram(mtcars, order=TRUE, lower.panel=panel.shade,
upper.panel=panel.pie, text.panel=panel.txt,
main="Car Milage Data in PC2/PC1 Order")
# Second
corrgram(mtcars, order=TRUE, lower.panel=panel.ellipse,
upper.panel=panel.pts, text.panel=panel.txt,
diag.panel=panel.minmax, main="Car Milage Data in PC2/PC1 Order")
# Third
corrgram(mtcars, order=NULL, lower.panel=panel.shade,
upper.panel=NULL, text.panel=panel.txt,
main="Car Milage Data (unsorted)")
|
f3868fd6a67611fc837ef2f1eb31661e3764150e | 4e9687855d4dd20390576bbb65e6b489b189edd2 | /man/empLogit.Rd | a262832703820ed98e1fb500d7326cda1833f0cc | [] | no_license | cran/binomTools | 55e3f724f382f37f2ede1dd35709ed149c86dbdf | 9b9cfae2db9b41dcd0826d9fdf7fc4abda8b629c | refs/heads/master | 2021-01-20T05:08:29.491513 | 2011-08-03T00:00:00 | 2011-08-03T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,116 | rd | empLogit.Rd | \name{empLogit}
\alias{empLogit}
\title{
Calculates the empirical logit transform
}
\description{
The empirical logit transform allows for a tolerence such that
infinity is not returned when the argument is zero or one.
}
\usage{
empLogit(x, eps = 1e-3)
}
\arguments{
\item{x}{
numerical vector for which the empirical logit transform is desired
}
\item{eps}{
numerical scalar; a tolerence to prevent infinite values
}
}
\value{
the empirical logit transform of \code{x}
}
\author{
Rune Haubo B Christensen
}
\examples{
## The function is currently defined as
## function (x, eps = 1e-3) log((eps + x)/(1 - x + eps))
## Lifted from example(predict.glm):
ldose <- rep(0:5, 2)
numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16)
sex <- factor(rep(c("M", "F"), c(6, 6)))
SF <- cbind(numdead, numalive=20-numdead)
## budworm.lg <- glm(SF ~ sex*ldose, family=binomial)
## summary(budworm.lg)
empLogit(numdead/20)
## Possible usage:
## Explorative interaction plot:
interaction.plot(ldose, sex, empLogit(numdead/20))
}
\keyword{utilities}
|
1774c5a010b7362aaa333482ade9a4ab45b52742 | 06dde96ba7b9b2bc67a7cf5bf2d1fbf6c6607e03 | /projet_data_rf.R | 5f5c4b11cfe0ab4a091b951dcbb0cb8489148672 | [] | no_license | arlacroix/Data-science-LDATS2310 | c23b38dfa5d3d5f251936810f87379db218e34cb | 6eefac4bdca5f3a7fa13289be9070627ada6b54d | refs/heads/main | 2023-01-19T13:39:09.750621 | 2020-12-01T09:39:17 | 2020-12-01T09:39:17 | 310,041,833 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,006 | r | projet_data_rf.R | # Random forest code
library(caret)
library(rpart)
library(dismo)
base <- read.table("/Users/lacroixarthur/OneDrive - UCL/Master Actu 2/Data science for finance/projet/DBtrain.csv", sep=",", header=TRUE)
base <- read.table("/Users/lacroixarthur/OneDrive - UCL/Master Actu 2/Data science for finance /projet/DBtrain.csv", sep=",", header=TRUE)
base$Gender <-as.factor(base$Gender)
base$Area <- as.factor(base$Area)
base$Power <- as.factor(base$Power)
base$Leasing <- as.factor(base$Leasing)
base$Fract <- as.factor(base$Fract)
base$Contract <- as.factor(base$Contract)
# Ici, nous n'avons pas de Exposure nulles, si jamais nous en avons, nous devons les supprimer de cette façon :
# base<-base[is.na(base$Exposure)==0,]
# base<-base[base$Exposure>0,]
# range(base$Exposure)
set.seed(2)
inValidation = sample(nrow(base), 0.9*nrow(base), replace = FALSE)
training.set = base[inValidation,]
validation.set = base[-inValidation,]
M = 100 #number of samples
nr = nrow(base) #size of the dataset
size = nr #size of the sample
lambda3 <- rep(0,nrow(validation.set))
lambda3M <- matrix(0 , M,nrow(validation.set))
ageM <- matrix(0 , M,nrow(validation.set))
dstar = 2;
listcovariates=c("DriverAge","Gender","Area","Power",
"CarAge","Contract", "Leasing", "Fract" )
# random forest (non parametric)
for (ct in c(1:M))
{
#non parametrique
tmp <-sample(nr, size, replace = TRUE, prob = NULL)
basetmp <-training.set[tmp,]
rndFact <-sample(6, dstar, replace = FALSE, prob = NULL)
equation=paste("cbind(Exposure,Nbclaims)~",listcovariates[rndFact[1]])
for (j in c(2:dstar)){
equation=paste(equation,listcovariates[rndFact[j]],sep="+")
}
d.tree <-rpart( equation,data=basetmp, method="poisson",
parms=list(shrink=1),control=rpart.control(cp=0, xval = 10))
inValidation = createDataPartition(base$Nbclaims, p=0.1, list=FALSE)
validation.set = base[inValidation,]
training.set = base[-inValidation,]
# if (!is.null(d.tree$csplit))
# {
# plot(d.tree,main = "Regression tree, small d")
# text(d.tree) #equivalent to labels(d.tree)
# }
lambda3<-lambda3+predict(d.tree,validation.set)
lambda3M[ct,]<-lambda3
ageM[ct,] <- validation.set$DriverAge
#readline("Press <return to continue")
}
lambda3 <- lambda3/M
# NbClaims <- lambda3 * (test$Exposure)
# test <- data.frame(test,NbClaims)
#
#
# NbExposure <- lambda3
# test <- data.frame(test,NbExposure)
valid.predrf <- validation.set$Exposure*predict(d.tree, newdata=validation.set)
pred.errorrf <- 1/nrow(validation.set)*2*(sum(valid.predrf)-sum(validation.set$Nbclaims) +sum(log((validation.set$Nbclaims/valid.predrf)^(validation.set$Nbclaims))))
#On peut alors prendre un cp optimal tel que le xerror est minimum
#On doit refaire un boucle et prendre le cp optimal pour chaque nouvel arbre créer. Nous voyons qu'il y a très peu de différence entre les deux
#forêts. Il n'est donc pas utile d'essayer d'optimiser le cp.
# for (ct in c(1:M))
# {
# #non parametrique
# cp.opt <- d.tree$cptable[which.min(d.tree$cptable[,"xerror"]),"CP"]
# tmp <-sample(nr, size, replace = TRUE, prob = NULL)
# basetmp <-training.set[tmp,]
#
# rndFact <-sample(6, dstar, replace = FALSE, prob = NULL)
#
# equation=paste("cbind(Exposure,Nbclaims)~",listcovariates[rndFact[1]])
# for (j in c(2:dstar)){
# equation=paste(equation,listcovariates[rndFact[j]],sep="+")
#
# }
# d.tree <-rpart(equation,data=basetmp, method="poisson",
# parms=list(shrink=1),control=rpart.control(cp=0, xval=10))
#
# cp.opt <- d.tree$cptable[which.min(d.tree$cptable[,"xerror"]),"CP"]
#
# d.tree_opt <-rpart( equation,data=basetmp, method="poisson",
# parms=list(shrink=1),control=rpart.control(cp=cp.opt))
# inValidation = createDataPartition(base$Nbclaims, p=0.1, list=FALSE)
# validation.set = base[inValidation,]
# training.set = base[-inValidation,]
# # if (!is.null(d.tree$csplit))
# # {
# # plot(d.tree,main = "Regression tree, small d")
# # text(d.tree) #equivalent to labels(d.tree)
# # }
#
# lambda3<-lambda3+predict(d.tree,validation.set)
# lambda3M[ct,]<-lambda3
# ageM[ct,] <- validation.set$DriverAge
#
# #readline("Press <return to continue")
# }
pred_rf <- predict(d.tree, newdata = validation.set) * validation.set$Exposure
dev_mean_rf <- calc.deviance(validation.set$Nbclaims, pred_rf, family = "poisson")
dev_rf2 <- calc.deviance(validation.set$Nbclaims, pred_rf, family = "poisson", calc.mean = FALSE)
# Calcul de la déviance Poisson pour la forêt optimisé, très peu de différence et temps de calcul beacoup plus long.
# pred_rf_opt <- predict(d.tree_opt, newdata = validation.set) * validation.set$Exposure
# dev_mean_rf_opt <- calc.deviance(validation.set$Nbclaims, pred_rf_opt, family = "poisson")
# dev_rf_opt2 <- calc.deviance(validation.set$Nbclaims, pred_rf_opt, family = "poisson", calc.mean = FALSE)
|
d70a593645111b05bd443cbc7c330848fbd835ed | b1fcc8be40ae0a7c9a01ae3d1d7ad55bc1e270b5 | /R/aaa.R | fc20d648691622037297cb8f7588e438970bef0f | [
"BSD-2-Clause",
"MIT"
] | permissive | coolbutuseless/lz4lite | 4a196f3665046f18cb9448a8786c25a8a939131f | 47337a3c6e27abf3cc457cb6af5592bbd0855c6f | refs/heads/master | 2022-12-19T01:27:48.489464 | 2020-09-26T00:18:11 | 2020-09-26T00:18:11 | 272,675,271 | 21 | 3 | null | null | null | null | UTF-8 | R | false | false | 49 | r | aaa.R | #' @useDynLib lz4lite, .registration=TRUE
NULL
|
da53a366d961729ae4ff27f17d8e21ea6ed43452 | 29585dff702209dd446c0ab52ceea046c58e384e | /clhs/R/clhs-raster.R | 37b0f5990facf737c24ca09887dfea2bc9d8fa30 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 327 | r | clhs-raster.R | clhs.Raster <- function(
x, # data
...
){
spdf <- rasterToPoints(x, spatial = TRUE)
spl <- clhs(x = spdf, ...)
if (is(spl, "cLHS_result"))
spl$initial_object <- x
else {
if (ncol(spl) < 2)
spl <- raster(spl)
else
spl <- stack(spl)
}
spl
}
setMethod("clhs", "Raster", clhs.Raster)
|
b38f641ee71442eff637535f6f46b491f0524acf | 7a7d03bac0d7a0c2ddd99d0d4633bf15b8eca1a4 | /tests/testthat/test_checkMatrix.r | b13228037912d0d6efe390b57f5039cb2f84c40b | [] | no_license | kerschke/checkmate | cd9b74c5c27ab7d28f53813a43dafd8906b71b29 | b6bc370cac53c67e6fecaa715b1b11eee943d6c4 | refs/heads/master | 2021-01-15T23:07:01.387628 | 2015-03-19T23:18:32 | 2015-03-19T23:18:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,754 | r | test_checkMatrix.r | context("checkMatrix")
test_that("checkMatrix", {
myobj = matrix(1:9, 3)
expect_succ(Matrix, myobj)
myobj = TRUE
expect_fail(Matrix, myobj)
x = matrix(1:9, 3)
expect_true(testMatrix(x))
expect_true(testMatrix(matrix(nrow=0, ncol=0)))
expect_false(testMatrix(NULL))
x[2,2] = NA
expect_true(testMatrix(x))
expect_false(testMatrix(x, any.missing = FALSE))
xl = matrix(TRUE)
xi = matrix(1L)
xr = matrix(1.)
xs = matrix("a")
xc = matrix(1+1i)
expect_true(testMatrix(xl, "logical"))
expect_true(testMatrix(xi, "integer"))
expect_true(testMatrix(xr, "double"))
expect_true(testMatrix(xr, "numeric"))
expect_true(testMatrix(xc, "complex"))
expect_true(testMatrix(xs, "character"))
expect_false(testMatrix(xs, "logical"))
expect_false(testMatrix(xs, "integer"))
expect_false(testMatrix(xs, "double"))
expect_false(testMatrix(xs, "numeric"))
expect_false(testMatrix(xs, "complex"))
expect_false(testMatrix(xl, "character"))
expect_true(testMatrix(x, min.rows = 1, min.cols = 1))
expect_true(testMatrix(x, nrows = 3, ncols = 3))
expect_false(testMatrix(x, min.rows = 5))
expect_false(testMatrix(x, min.cols = 5))
expect_false(testMatrix(x, nrows = 5))
expect_false(testMatrix(x, ncols = 5))
expect_false(testMatrix(x, row.names = "named"))
expect_false(testMatrix(x, col.names = "named"))
rownames(x) = letters[1:3]; colnames(x) = NULL
expect_true(testMatrix(x, row.names = "named"))
expect_false(testMatrix(x, col.names = "named"))
colnames(x) = letters[1:3]; rownames(x) = NULL
expect_false(testMatrix(x, row.names = "named"))
expect_true(testMatrix(x, col.names = "named"))
colnames(x) = rownames(x) = letters[1:3]
expect_true(testMatrix(x, row.names = "named"))
expect_true(testMatrix(x, col.names = "named"))
expect_true(testMatrix(x, mode = "integer"))
expect_true(testMatrix(x, mode = "numeric"))
expect_false(testMatrix(x, mode = "double"))
expect_error(assertMatrix(iris), "matrix")
expect_true(testMatrix(matrix(ncol = 0, nrow = 0), row.names = "named"))
expect_true(testMatrix(matrix(ncol = 0, nrow = 0), col.names = "named"))
expect_error(assertMatrix(matrix(), min.len = 99), "99")
})
test_that("dimension arugments are checked", {
x = matrix(1)
expect_error(checkMatrix(x, min.rows = 1.2), "count")
expect_error(checkMatrix(x, min.rows = NA_integer_), "missing")
expect_error(checkMatrix(x, min.rows = -1), ">= 0")
})
test_that("dimesions are reported correctly", {
x = matrix(1:42, ncol = 1)
expect_true(grepl(42, checkMatrix(x, nrows = 43)))
expect_true(grepl(42, checkMatrix(x, min.rows = 43)))
x = t(x)
expect_true(grepl(42, checkMatrix(x, ncols = 43)))
expect_true(grepl(42, checkMatrix(x, min.cols = 43)))
})
|
bf91c1ae3f11ec287ae7de9ffc09a1019441ef75 | e409b6660d248ed8809dcdf1c6404fd19b133cdc | /HW/variofit2.R | fa034657658cb011d9761067f0ed04746360d6bb | [] | no_license | nmmarquez/stat517 | fef818c822d39929831f27f4b150b29d1e9ccb2c | 4c808d17e828faa40f4838efe7e2c472b3ad8b0b | refs/heads/master | 2020-04-11T13:13:54.321486 | 2018-12-14T15:57:58 | 2018-12-14T15:57:58 | 161,808,464 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,459 | r | variofit2.R | variofit2 <- function (vario, ini.cov.pars, cov.model, fix.nugget = FALSE,
nugget = 0, fix.kappa = TRUE, kappa = 0.5, simul.number = NULL,
max.dist = vario$max.dist, weights, minimisation.function,
limits = pars.limits(), messages, ...)
{
call.fc <- match.call()
if (missing(messages))
messages.screen <- as.logical(ifelse(is.null(getOption("geoR.messages")),
TRUE, getOption("geoR.messages")))
else messages.screen <- messages
if (length(class(vario)) == 0 || all(class(vario) != "variogram"))
warning("object vario should preferably be of the geoR's class \"variogram\"")
if (!missing(ini.cov.pars)) {
if (any(class(ini.cov.pars) == "eyefit"))
cov.model <- ini.cov.pars[[1]]$cov.model
if (any(class(ini.cov.pars) == "variomodel"))
cov.model <- ini.cov.pars$cov.model
}
if (missing(cov.model))
cov.model <- "matern"
cov.model <- match.arg(cov.model, choices = .geoR.cov.models)
if (cov.model == "stable")
cov.model <- "powered.exponential"
if (cov.model == "powered.exponential")
if (limits$kappa["upper"] > 2)
limits$kappa["upper"] <- 2
if (missing(weights)) {
if (vario$output.type == "cloud")
weights <- "equal"
else weights <- "npairs"
}
else weights <- match.arg(weights, choices = c("npairs",
"equal", "cressie"))
if (messages.screen) {
cat(paste("variofit: covariance model used is", cov.model,
"\n"))
cat(paste("variofit: weights used:", weights, "\n"))
}
if (missing(minimisation.function))
minimisation.function <- "optim"
if (any(cov.model == c("linear", "power")) & minimisation.function ==
"nls") {
cat("warning: minimisation function nls can not be used with given cov.model.\n changing for \"optim\".\n")
minimisation.function <- "optim"
}
if (minimisation.function == "nls" & weights != "equal") {
warning("variofit: minimisation function nls can only be used with weights=\"equal\".\n changing for \"optim\".\n")
minimisation.function <- "optim"
}
if (is.matrix(vario$v) & is.null(simul.number))
stop("object in vario$v is a matrix. This function works for only 1 empirical variogram at once\n")
if (!is.null(simul.number))
vario$v <- vario$v[, simul.number]
if (mode(max.dist) != "numeric" || length(max.dist) > 1)
stop("a single numerical value must be provided in the argument max.dist")
if (max.dist == vario$max.dist)
XY <- list(u = vario$u, v = vario$v, n = vario$n)
else XY <- list(u = vario$u[vario$u <= max.dist], v = vario$v[vario$u <=
max.dist], n = vario$n[vario$u <= max.dist])
if (cov.model == "pure.nugget") {
minimisation.function <- "not used"
message <- "correlation function does not require numerical minimisation"
if (weights == "equal")
lm.wei <- rep(1, length(XY$u))
else lm.wei <- XY$n
if (cov.model == "pure.nugget") {
if (fix.nugget) {
temp <- lm((XY$v - nugget) ~ 1, weights = lm.wei)
cov.pars <- c(temp$coef, 0)
}
else {
temp <- lm(XY$v ~ 1, weights = lm.wei)
nugget <- temp$coef
cov.pars <- c(0, 0)
}
}
value <- sum((temp$residuals)^2)
}
else {
if (messages.screen)
cat(paste("variofit: minimisation function used:",
minimisation.function, "\n"))
umax <- max(vario$u)
vmax <- max(vario$v)
if (missing(ini.cov.pars)) {
ini.cov.pars <- as.matrix(expand.grid(c(vmax/2, 3 *
vmax/4, vmax), seq(0, 0.8 * umax, len = 6)))
if (!fix.nugget)
nugget <- unique(c(nugget, vmax/10, vmax/4, vmax/2))
if (!fix.kappa)
kappa <- unique(c(kappa, 0.25, 0.5, 1, 1.5, 2))
if (messages.screen)
warning("initial values not provided - running the default search")
}
else {
if (any(class(ini.cov.pars) == "eyefit")) {
init <- nugget <- kappa <- NULL
for (i in 1:length(ini.cov.pars)) {
init <- drop(rbind(init, ini.cov.pars[[i]]$cov.pars))
nugget <- c(nugget, ini.cov.pars[[i]]$nugget)
if (cov.model == "gneiting.matern")
kappa <- drop(rbind(kappa, ini.cov.pars[[i]]$kappa))
else kappa <- c(kappa, ini.cov.pars[[i]]$kappa)
}
ini.cov.pars <- init
}
if (any(class(ini.cov.pars) == "variomodel")) {
nugget <- ini.cov.pars$nugget
kappa <- ini.cov.pars$kappa
ini.cov.pars <- ini.cov.pars$cov.pars
}
}
if (is.matrix(ini.cov.pars) | is.data.frame(ini.cov.pars)) {
ini.cov.pars <- as.matrix(ini.cov.pars)
if (nrow(ini.cov.pars) == 1)
ini.cov.pars <- as.vector(ini.cov.pars)
else {
if (ncol(ini.cov.pars) != 2)
stop("\nini.cov.pars must be a matrix or data.frame with 2 components: \ninitial values for sigmasq (partial sill) and phi (range parameter)\n")
}
}
else if (length(ini.cov.pars) > 2)
stop("\nini.cov.pars must provide initial values for sigmasq and phi\n")
if (is.matrix(ini.cov.pars) | (length(nugget) > 1) |
(length(kappa) > 1)) {
if (messages.screen)
cat("variofit: searching for best initial value ...")
ini.temp <- matrix(ini.cov.pars, ncol = 2)
grid.ini <- as.matrix(expand.grid(sigmasq = unique(ini.temp[,
1]), phi = unique(ini.temp[, 2]), tausq = unique(nugget),
kappa = unique(kappa)))
v.loss <- function(parms, u, v, n, cov.model, weights) {
sigmasq <- parms[1]
phi <- parms[2]
if (cov.model == "power")
phi <- 2 * exp(phi)/(1 + exp(phi))
tausq <- parms[3]
kappa <- parms[4]
if (cov.model == "power")
v.mod <- tausq + cov.spatial(u, cov.pars = c(sigmasq,
phi), cov.model = "power", kappa = kappa)
else v.mod <- (sigmasq + tausq) - cov.spatial(u,
cov.pars = c(sigmasq, phi), cov.model = cov.model,
kappa = kappa)
if (weights == "equal")
loss <- sum((v - v.mod)^2)
if (weights == "npairs")
loss <- sum(n * (v - v.mod)^2)
if (weights == "cressie")
loss <- sum((n/(v.mod^2)) * (v - v.mod)^2)
return(loss)
}
grid.loss <- apply(grid.ini, 1, v.loss, u = XY$u,
v = XY$v, n = XY$n, cov.model = cov.model, weights = weights)
ini.temp <- grid.ini[which(grid.loss == min(grid.loss))[1],
, drop = FALSE]
if (is.R())
rownames(ini.temp) <- "initial.value"
if (messages.screen) {
cat(" selected values:\n")
print(rbind(round(ini.temp, digits = 2), status = ifelse(c(FALSE,
FALSE, fix.nugget, fix.kappa), "fix", "est")))
cat(paste("loss value:", min(grid.loss), "\n"))
}
names(ini.temp) <- NULL
ini.cov.pars <- ini.temp[1:2]
nugget <- ini.temp[3]
kappa <- ini.temp[4]
grid.ini <- NULL
}
if (ini.cov.pars[1] > 2 * vmax)
warning("unreasonable initial value for sigmasq (too high)")
if (ini.cov.pars[1] + nugget > 3 * vmax)
warning("unreasonable initial value for sigmasq + nugget (too high)")
if (vario$output.type != "cloud") {
if (ini.cov.pars[1] + nugget < 0.3 * vmax)
warning("unreasonable initial value for sigmasq + nugget (too low)")
}
if (nugget > 2 * vmax)
warning("unreasonable initial value for nugget (too high)")
if (ini.cov.pars[2] > 1.5 * umax)
warning("unreasonable initial value for phi (too high)")
if (!fix.kappa) {
if (cov.model == "powered.exponential")
Tkappa.ini <- log(kappa/(2 - kappa))
else Tkappa.ini <- log(kappa)
}
if (minimisation.function == "nls") {
if (ini.cov.pars[2] == 0)
ini.cov.pars <- max(XY$u)/10
if (kappa == 0)
kappa <- 0.5
if (cov.model == "power")
Tphi.ini <- log(ini.cov.pars[2]/(2 - ini.cov.pars[2]))
else Tphi.ini <- log(ini.cov.pars[2])
XY$cov.model <- cov.model
if (fix.nugget) {
XY$nugget <- as.vector(nugget)
if (fix.kappa) {
XY$kappa <- as.vector(kappa)
res <- nls((v - nugget) ~ matrix((1 - cov.spatial(u,
cov.pars = c(1, exp(Tphi)), cov.model = cov.model,
kappa = kappa)), ncol = 1), start = list(Tphi = Tphi.ini),
data = XY, algorithm = "plinear", ...)
}
else {
if (cov.model == "powered.exponential")
res <- nls((v - nugget) ~ matrix((1 - cov.spatial(u,
cov.pars = c(1, exp(Tphi)), cov.model = cov.model,
kappa = (2 * exp(Tkappa)/(1 + exp(Tkappa))))),
ncol = 1), start = list(Tphi = Tphi.ini,
Tkappa = Tkappa.ini), data = XY, algorithm = "plinear",
...)
else res <- nls((v - nugget) ~ matrix((1 -
cov.spatial(u, cov.pars = c(1, exp(Tphi)),
cov.model = cov.model, kappa = exp(Tkappa))),
ncol = 1), start = list(Tphi = Tphi.ini,
Tkappa = Tkappa.ini), data = XY, algorithm = "plinear",
...)
kappa <- exp(coef(res)["Tkappa"])
names(kappa) <- NULL
}
cov.pars <- coef(res)[c(".lin", "Tphi")]
names(cov.pars) <- NULL
}
else {
if (fix.kappa) {
XY$kappa <- kappa
res <- nls(v ~ cbind(1, (1 - cov.spatial(u,
cov.pars = c(1, exp(Tphi)), cov.model = cov.model,
kappa = kappa))), start = list(Tphi = Tphi.ini),
algorithm = "plinear", data = XY, ...)
}
else {
if (cov.model == "powered.exponential")
res <- nls(v ~ cbind(1, (1 - cov.spatial(u,
cov.pars = c(1, exp(Tphi)), cov.model = cov.model,
kappa = (2 * exp(Tkappa)/(1 + exp(Tkappa)))))),
start = list(Tphi = Tphi.ini, Tkappa = Tkappa.ini),
algorithm = "plinear", data = XY, ...)
else res <- nls(v ~ cbind(1, (1 - cov.spatial(u,
cov.pars = c(1, exp(Tphi)), cov.model = cov.model,
kappa = exp(Tkappa)))), start = list(Tphi = Tphi.ini,
Tkappa = Tkappa.ini), algorithm = "plinear",
data = XY, ...)
kappa <- exp(coef(res)["Tkappa"])
names(kappa) <- NULL
}
nugget <- coef(res)[".lin1"]
names(nugget) <- NULL
cov.pars <- coef(res)[c(".lin2", "Tphi")]
names(cov.pars) <- NULL
}
if (cov.model == "power")
cov.pars[2] <- 2 * exp(cov.pars[2])/(1 + exp(cov.pars[2]))
else cov.pars[2] <- exp(cov.pars[2])
if (nugget < 0 | cov.pars[1] < 0) {
warning("\nvariofit: negative variance parameter found using the default option \"nls\".\n Try another minimisation function and/or fix some of the parameters.\n")
temp <- c(sigmasq = cov.pars[1], phi = cov.pars[2],
tausq = nugget, kappa = kappa)
print(rbind(round(temp, digits = 4), status = ifelse(c(FALSE,
FALSE, fix.nugget, fix.kappa), "fix", "est")))
return(invisible())
}
value <- sum(resid(res)^2)
message <- "nls does not provides convergence message"
}
if (minimisation.function == "nlm" | minimisation.function ==
"optim") {
.global.list <- list(u = XY$u, v = XY$v, n = XY$n,
fix.nugget = fix.nugget, nugget = nugget, fix.kappa = fix.kappa,
kappa = kappa, cov.model = cov.model, m.f = minimisation.function,
weights = weights)
ini <- ini.cov.pars
if (cov.model == "power")
ini[2] <- log(ini[2]/(2 - ini[2]))
if (cov.model == "linear")
ini <- ini[1]
if (fix.nugget == FALSE)
ini <- c(ini, nugget)
if (!fix.kappa)
ini <- c(ini, Tkappa.ini)
names(ini) <- NULL
if (minimisation.function == "nlm") {
result <- nlm(.loss.vario, ini, g.l = .global.list,
...)
result$par <- result$estimate
result$value <- result$minimum
result$convergence <- result$code
if (!is.null(get(".temp.theta", pos = 1)))
result$par <- get(".temp.theta", pos = 1)
}
else {
lower.l <- sapply(limits, function(x) x[1])
upper.l <- sapply(limits, function(x) x[2])
if (fix.kappa == FALSE) {
if (fix.nugget) {
lower <- lower.l[c("sigmasq.lower", "phi.lower",
"kappa.lower")]
upper <- upper.l[c("sigmasq.upper", "phi.upper",
"kappa.upper")]
}
else {
lower <- lower.l[c("sigmasq.lower", "phi.lower",
"tausq.rel.lower", "kappa.lower")]
upper <- upper.l[c("sigmasq.upper", "phi.upper",
"tausq.rel.upper", "kappa.upper")]
}
}
else {
if (cov.model == "power") {
if (fix.nugget) {
lower <- lower.l[c("sigmasq.lower", "phi.lower")]
upper <- upper.l[c("sigmasq.upper", "phi.upper")]
}
else {
lower <- lower.l[c("sigmasq.lower", "phi.lower",
"tausq.rel.lower")]
upper <- upper.l[c("sigmasq.upper", "phi.upper",
"tausq.rel.upper")]
}
}
else {
lower <- lower.l["phi.lower"]
upper <- upper.l["phi.upper"]
}
}
result <- optim(ini, .loss.vario, method = "L-BFGS-B",
hessian = TRUE, lower = lower, upper = upper,
g.l = .global.list, ...)
}
value <- result$value
message <- paste(minimisation.function, "convergence code:",
result$convergence)
if (cov.model == "linear")
result$par <- c(result$par[1], 1, result$par[-1])
cov.pars <- as.vector(result$par[1:2])
if (cov.model == "power")
cov.pars[2] <- 2 * exp(cov.pars[2])/(1 + exp(cov.pars[2]))
if (!fix.kappa) {
if (fix.nugget)
kappa <- result$par[3]
else {
nugget <- result$par[3]
kappa <- result$par[4]
}
if (.global.list$cov.model == "powered.exponential")
kappa <- 2 * (exp(kappa))/(1 + exp(kappa))
else kappa <- exp(kappa)
}
else if (!fix.nugget)
nugget <- result$par[3]
}
}
estimation <- list(nugget = nugget, cov.pars = cov.pars,
cov.model = cov.model, kappa = kappa, value = value,
trend = vario$trend, beta.ols = vario$beta.ols, practicalRange = practicalRange(cov.model = cov.model,
phi = cov.pars[2], kappa = kappa), max.dist = max.dist,
minimisation.function = minimisation.function)
estimation$weights <- weights
if (weights == "equal")
estimation$method <- "OLS"
else estimation$method <- "WLS"
estimation$fix.nugget <- fix.nugget
estimation$fix.kappa <- fix.kappa
estimation$lambda <- vario$lambda
estimation$message <- message
estimation$call <- call.fc
estimation$result <- result
oldClass(estimation) <- c("variomodel", "variofit")
estimation$hessian <- result$hessian
return(estimation)
}
|
9417736688227ab966a9987bab0ea7ca3fcff4af | d84b3c61348544eb3e3ce4332ac853e53d8b244a | /man/modplot.Rd | 396267a9e615d6b5d7f0b2fd0c657328e77a5643 | [] | no_license | kbroman/FuzzyForest | 07d5a0cdbeee7109d0615c1db81198ed4b2a9def | de48c0af127205dea77defae8d4f9e5b4cf41e5b | refs/heads/master | 2021-01-14T11:47:44.084730 | 2015-06-23T03:16:35 | 2015-06-23T03:16:35 | 44,036,898 | 1 | 0 | null | 2015-10-11T02:53:19 | 2015-10-11T02:53:18 | null | UTF-8 | R | false | false | 860 | rd | modplot.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/fuzzy_forest_obj.R
\name{modplot}
\alias{modplot}
\title{Plots relative importance of modules.}
\usage{
modplot(object, main = NULL, xlab = NULL, ylab = NULL,
module_labels = NULL)
}
\arguments{
\item{object}{A fuzzy_forest object.}
\item{main}{Title of plot.}
\item{xlab}{Title for the x axis.}
\item{ylab}{Title for the y axis.}
\item{module_labels}{Labels for the modules. A data.frame
or character matrix with first column giving
the current name of module and second column giving
the assigned name of each module.}
}
\description{
The plot is designed
to depict the size of each module and what percentage of selected
features fall into each module. In particular, it is easy to
determine which module is over-represented in the group of selected
features.
}
|
223ed2664db21ff1cd59a6dff770e5bfc3b7b5e4 | d9f816c245d4fb6b31a8bd6a0e578d7a416b0e3e | /r.R | c165e545723926c1a784bbddbc45ca0d75b8a8c1 | [] | no_license | juancarlosgeo16/calse-de-r | 33fa817520247a90d05dace1c51e3fdbc2b762b4 | 024d02b3daf1fe04ec0955641d5e3336a9c3f2d9 | refs/heads/master | 2020-07-25T01:46:13.932169 | 2019-10-11T06:01:32 | 2019-10-11T06:01:32 | 208,118,745 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,320 | r | r.R | #directorio de trabajo
getwd()
#para cambiar de directorio de trabajo, asignar una nueva carpeta de trabajo
setwd()
#asignar una varIABLE
a <- "jalados"
b <- "8"
TRUE -> c
d <- "progra1"
#para ver las variables que asigne
ls()
#para ver los archivos ded trabajo
dir()
history()
#eliminar una variable , en este caso la (a)
rm (a)
#eliminar todo
rm(list = ls())
#para instalar dependencias de la librerias que no se pueden instalar
install.packages("sf", dependencies = T)
rm(a)
#para llamar librerias
library("")
require("")
# tipos de datos
# para asignar vectores en r
#separados por comas
c()
#para saber que tipo de dato es
class()
#los buleanos son 0 y 1
#cuando se transforma el buleano en numerico
#coercion de datos : cuando se forza a convertirte en otro tipo de datos
#para ver datos que no se repiten
vector1 = c(1,2,3,3,4)
factor(vector1)
#creando data frame
#creando previamente los vectores
df = data.frame(var1 = v4, var2=v2)
#similar al data frame
library(dplyr)
tip = tibble(var1 = v4, var2=v2)
d_vino = read.delim("copiar url")
view () #para ver el archivo
print() # para visualizarlo
nrow() y ncol() #para las filas y columnas
str()
glimpse()
d_vino ["pH"]
d_vino[ filas, columnas]
colnames()#para saber el nombre de las columnas
|
065be8383f6a3269cc806ef9635387cddf62bb73 | 80839b1cbe3fe08470bdcac417d4a480bebcf8cf | /R_DataStats/Ch7.R | 94753b7157b8a95929ba5b9ac384141e894d90a2 | [] | no_license | akhagan/Misc_help | e6fb0023541eb6886ac8c8347364070abd02e9e5 | 9851ee049ae3530af9304aae7cdf6b8f3792665c | refs/heads/master | 2020-03-22T15:49:50.425573 | 2019-02-13T19:07:48 | 2019-02-13T19:07:48 | 140,281,881 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,353 | r | Ch7.R | library(tidyverse)
ggplot(diamonds) +
geom_histogram(mapping = aes(x = z), binwidth = 0.5)
ggplot(data = diamonds, mapping = aes(x = carat)) +
geom_histogram(binwidth = 0.01)+
coord_cartesian(xlim = c(0, 1))
nycflights13::flights %>%
mutate(
cancelled = is.na(dep_time),
sched_hour = sched_dep_time %/% 100,
sched_min = sched_dep_time %% 100,
sched_dep_time = sched_hour + sched_min / 60
) %>%
ggplot(mapping = aes(x=sched_dep_time, y=..density..)) +
geom_freqpoly(mapping = aes(colour = cancelled), binwidth = 1/4)
#Use geom_tile() together with dplyr to explore how average flight
#delays vary by destination and month of year. What makes the plot
#difficult to read? How could you improve it?
ggplot(data = diamonds, mapping = aes(x = carat, y = price)) +
geom_bin2d(mapping = aes(group= cut_number(carat,20)))
ggplot(data = diamonds, mapping = aes(x = carat, y = price)) +
geom_bin2d(mapping = aes(group= cut_width(carat,0.1)))
ggplot(data = diamonds, mapping = aes(x = carat, y = price)) +
geom_bin2d(mapping = aes(group= cut_number(price,100)))
ggplot(data = diamonds, mapping = aes(x = carat, y = price)) +
geom_bin2d(mapping = aes(group= cut_width(price,100)))
ggplot(data = diamonds, mapping = aes(x = price, y = ..density..)) +
geom_freqpoly(mapping = aes(colour = cut), binwidth = 500)
|
8593700a10e8259167892ce2e70ef5a6acd985ee | 1fad3cb4dab5b9c89d006ab1085d0376183ac9fa | /Mok-et-al_2018_Developmental-Cell_scRNAseq-analysis-R.R | b51527dd60045cbe4df0d0338d31d63e3a48c4ab | [] | no_license | kasperlab/Mok_et_al_2018_Developmental_Cell | 0bdadfd317933ff3edc854757a51d07f2836a51c | a7a907840ba061b9a3d447afbc8a1199916c0530 | refs/heads/master | 2020-04-13T16:31:49.735980 | 2018-12-27T18:29:28 | 2018-12-27T18:29:28 | 163,323,262 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,935 | r | Mok-et-al_2018_Developmental-Cell_scRNAseq-analysis-R.R | # Load source script (RaceID3_StemID2_class.R), available on https://github.com/dgrun/RaceID3_StemID2
source("~/RaceID3_StemID2_class_GO.R")
# Load required packages
source("https://bioconductor.org/biocLite.R")
biocLite("biomaRt")
install.packages("XML")
install.packages(c("tsne","pheatmap","MASS","cluster","mclust","flexmix","lattice","fpc","RColorBrewer","permute","amap","locfit"))
install.packages("gProfileR")
install.packages("randomForest")
install.packages (c("vegan", "Rtsne", "scran", "DESeq2", "Go.db"))
install.packages("caTools")
library("biomaRt")
require(colorspace)
# Load .csv file (available on GEO (GSE122026))
prdata <- read.csv("~/GSE122026_E15.0_Mesenchymal_cells_scRNAseq_Final.csv", sep = "\t")
# Remove spike-in genes and mitochondrial genes
prdata <- prdata[grep("ERCC-",rownames(prdata),invert=TRUE),]
prdata <- prdata[grep("_chrM",rownames(prdata),invert=TRUE),]
# RaceID3 parameters
params <- TRUE
if ( params ){
init <- TRUE
celltypeid <- TRUE
s <- "Run"
do.downsample <- F
dsn <- 1
mintotal <- 6000
minexpr <- 4
minnumber <- 2
maxexpr <- Inf
metric <- "manhattan"
cln <- 0
do.gap <- FALSE
sat <- TRUE
clustnr <- 30
bootnr <- 50
SE.method <- "Tibs2001SEmax"
SE.factor <- .25
B.gap <- 50
rseed <- 17000
rseed.tsne <- 1337
outminc <- 25
probthr <- 1e-6
outlg <- 3
outdistquant <- .95
qmark <- .95
genegroups <- list(APO="APO",Collagen="COL",Keratin="KRT",Ribo="RPL|RPS",Mito="MTRNR",C1Q="C1Q(A|B)")
genes <- c("Gene name")
FUNcluster <- "hclust"
old <- FALSE
comp <- TRUE
lineagetree <- TRUE
bootcells <- FALSE
cthr <- 10
cellproj <- TRUE
pdiback <- TRUE
pdishuf <- 100
pthr <- .01
monocle <- FALSE
lineagegenes <- TRUE
ksom <- 50
locreg <- FALSE
alpha <- .25
nbsom <- 25
minexprbr <- 5
minnumberbr <- 2
cellorder <- "dist" # "som" "tspc" "entr" "tspg" "dist"
logscale <- FALSE
cty <- list(ENT=c(3,2,1))
lineagenetwork <- TRUE
lgenes <- c("Hmgn2","Atf4","Sox9","Fos","Jun")
netk <- 1
netgenes <- "TF"
netcthr <- .5
corsig <- FALSE
markerid <- FALSE
stringent <- FALSE
mlist <- list()
nset <- 2
markermincount <- 10
clmedian <- TRUE
minsize <- 2
wd <- getwd()
norm <- "n"
if (do.downsample) {
norm <- "d"
}
}
cdiff <- list()
sco <- list()
entr <- list()
## RaceID3
## Create a directory where all the plots etc. will be saved
dir.create(paste(wd,"/RaceID3",s,"_",norm,mintotal,"_out",outlg,"_cln",cln,"minexp",minexpr,sep=""),showWarnings= FALSE)
setwd(paste(wd,"/RaceID3",s,"_",norm,mintotal,"_out",outlg,"_cln",cln,"minexp",minexpr,sep=""))
## Initialize SCseq object with transcript counts
sc <- SCseq(prdata)
## Filter expression data
sc <- filterdata(sc, mintotal=mintotal, minexpr=minexpr, minnumber=minnumber, maxexpr=maxexpr, downsample=do.downsample, sfn=FALSE, hkn=FALSE, dsn=dsn, rseed=rseed, CGenes=NULL, FGenes=NULL, ccor=.4)
# Correct for cell cycle, proliferation
require(biomaRt)
mart <- useMart(biomart = "ensembl", dataset = "mmusculus_gene_ensembl")
g <- sub("__chr.+","",rownames(sc@fdata));
k <- getBM(attributes = c("external_gene_name", "go_id","name_1006"),filters="external_gene_name",values=g,mart=mart)
gCC <- name2id( k$external_gene_name[k$name_1006 == "cell cycle"],rownames(sc@fdata))
gCP <- name2id( k$external_gene_name[k$name_1006 == "cell proliferation"],rownames(sc@fdata))
vset <- list(gCC,gCP)
x <- CCcorrect(sc@fdata,vset=vset,CGenes=NULL,ccor=.4,nComp=NULL,pvalue=.05,quant=.01,mode="pca")
# Number of principal components that have been removed
x$n
# Loadings of the first principal component that has been removed
y <- x$pca$rotation[,x$n[1]]
# Genes from vset are either enriched in the head or the tail of this list
tail(y[order(y,decreasing=TRUE)],10)
# Reassign the corrected expression matrix to sc@fdata
sc@fdata <- x$xcor
## K-means clustering
sc <- clustexp(sc,clustnr=clustnr,bootnr=bootnr,metric=metric,do.gap=do.gap,sat=sat,SE.method=SE.method,SE.factor=SE.factor,B.gap=B.gap,cln=cln,rseed=rseed,FUNcluster=FUNcluster,FSelect=FALSE)
## Compute t-SNE map
fast <- FALSE
sc <- comptsne(sc,rseed=15555,sammonmap=FALSE,initial_cmd=FALSE,fast=fast,perplexity=30)
# Detect outliers and redefine clusters
sc <- findoutliers(sc, outminc=5,outlg=3,probthr=1e-7,thr=2**-(1:40),outdistquant=.95)
# Reassign clusters based on random forest
sc <- rfcorrect(sc,rfseed=12345,final=TRUE,nbfactor=5)
# Cell breakdown for final clusters
dir.create("clust_txt",showWarnings=F)
x <- data.frame(CELLID=names(sc@cpart),cluster=sc@cpart)
write.table(x[order(x$cluster,decreasing=FALSE),],"clust_txt/cell_clust.xls",row.names=FALSE,col.names=TRUE,sep="\t",quote=FALSE)
## Diagnostic plots
# Gap statistics
if ( do.gap ){
fname <- paste("gap_clust_cells",s,sep="_")
plot.2.file(fname)
plotgap(sc)
dev.off()
}
if ( length(unique(sc@cluster$kpart)) > 1 ){
# Silhouette of k-means clusters
fname <- paste("silhouette",s,sep="_")
plot.2.file(fname)
plotsilhouette(sc)
dev.off()
# Jaccard's similarity of k-means clusters
fname <- paste("jaccard",s,sep="_")
plot.2.file(fname)
plotjaccard(sc)
dev.off()
if ( sat ){
fname <- paste("saturation",s,sep="_")
plot.2.file(fname)
plotsaturation(sc)
dev.off()
fname <- paste("dispersion",s,sep="_")
plot.2.file(fname)
plotsaturation(sc,disp=TRUE)
dev.off()
}
}
# Barchart of outlier probabilities
fname <- paste("outlierprob",s,sep="_")
plot.2.file(fname)
plotoutlierprobs(sc)
dev.off()
# Regression of background model
fname <- paste("var_mean_cell_orig",s,sep="_")
plot.2.file(fname)
plotbackground(sc)
dev.off()
# Dependence of outlier number on probability threshold (probthr)
fname <- paste("par_sensitivity",s,sep="_")
plot.2.file(fname)
plotsensitivity(sc)
dev.off()
# Highlight final clusters in t-SNE map
fname <- paste("tsne_map_cells_c",s,"final_v2",sep="_")
plot.2.file(fname)
plottsne3(sc,final=TRUE,ax=F,text=T)
dev.off()
# Highlight k-means clusters in t-SNE map
fname <- paste("tsne_map_cells_c",s,"orig",sep="_")
plot.2.file(fname)
plottsne3(sc,final=FALSE,ax=TRUE)
dev.off()
# Highlight cell groups (experiments) in t-SNE map
fname <- paste("tsne_map_cells_types_symbol",s,sep="_")
plot.2.file(fname)
types <- sapply(names(sc@ndata),function(x) strsplit(x,"_")[[1]][2])
plotsymbolstsne3(sc,types=types,ax=F,seed=6) ## seed = 8 for dev_order figure ##
dev.off()
## Extensions
cdata <- sc@ndata[apply(sc@ndata >= minexpr,1,sum) >= minnumber,]
part <- sc@cpart
set.seed(rseed)
minn <- min(apply(sc@expdata[,names(sc@cpart)],2,function(x) sum(round(x,0))))
dsd <- if ( do.downsample ) sc@ndata else downsample(sc@expdata[,names(sc@cpart)],n=minn,dsn=dsn)
probs <- t(t(dsd)/apply(dsd,2,sum))
entr[[s]] <- -apply(probs*log(probs),2,sum)
probs <- list()
for ( i in 1:max(part) ) probs[[i]] <- if ( sum( part == i ) == 1 ) dsd[,part == i]/sum(dsd[,part == i]) else apply(dsd[,part == i],1,mean)/sum(apply(dsd[,part == i],1,mean))
ent <- c()
for ( p in probs ) ent <- append(ent,-sum(p[p>0]*log(p[p>0])))
fname <- paste("barplot_entropy",s,sep="_")
plot.2.file(fname)
plot(1:length(ent),ent,axes=FALSE,ylim=c(min(ent,na.rm=TRUE) - .1, max(ent,na.rm=TRUE) + .1),xlab="Cluster",ylab="Entropy",cex=0)
rect(1:length(ent) - .4,min(ent) - .1,1:length(ent) + .4,ent,col="grey")
axis(2)
axis(1,at=1:length(ent),cex.axis=.75)
dev.off()
tsd <- sc@tsne
co <- sc@fcol
l <- list(entr=entr[[s]],number_t=apply(sc@expdata[,names(sc@cpart)],2,sum),number_g=apply(sc@expdata[,names(sc@cpart)],2,function(x) sum(x>1)),number_t_log=log2(apply(sc@expdata[,names(sc@cpart)],2,sum)))
nl <- list(entr="Entropy",number_t="Number of transcripts",number_g="Number of genes",number_t_log="log2 Number of transcripts")
for ( i in 1:length(l) ){
mi <- min(l[[i]],na.rm=TRUE)
ma <- max(l[[i]],na.rm=TRUE)
if ( mi < ma ){
ColorRamp <- colorRampPalette(rev(brewer.pal(n = 7,name = "RdYlBu")))(100)
ColorLevels <- seq(mi, ma, length=length(ColorRamp))
v <- round((l[[i]] - mi)/(ma - mi)*99 + 1,0)
fname <- paste("tsne_map",names(l)[i],s,sep="_")
plot.2.file(fname)
layout(matrix(data=c(1,3,2,4), nrow=2, ncol=2), widths=c(5,1,5,1), heights=c(5,1,1,1))
par(mar = c(3,5,2.5,2))
plot(tsd,xlab="Dim 1",ylab="Dim 2",main=paste("tsne: ",s," (",nl[[i]],")",sep=""),pch=20,cex=0,col="grey")
for ( k in 1:length(v) ){
points(tsd[k,1],tsd[k,2],col=ColorRamp[v[k]],pch=20,cex=1.5)
}
par(mar = c(3,2.5,2.5,2))
image(1, ColorLevels,
matrix(data=ColorLevels, ncol=length(ColorLevels),nrow=1),
col=ColorRamp,
xlab="",ylab="",
xaxt="n")
layout(1)
dev.off()
ll <- list()
pv <- c()
qv <- c()
ll[["all"]] <- l[[i]]
for ( k in 1:max(part) ){
ll[[paste("cl",k,sep=".")]] <- l[[i]][part == k]
wi <- if (min(length(ll[["all"]]),length(ll[[paste("cl",k,sep=".")]])) > 5 ) wilcox.test(ll[["all"]],ll[[paste("cl",k,sep=".")]])$p.value else NA
pv <- append(pv,wi)
qv <- append(qv, quantile(ll[[paste("cl",k,sep=".")]],.95))
}
fname <- paste("boxplot_cell_clusters",names(l)[i],s,sep="_")
plot.2.file(fname)
boxplot(ll,xlab="Cluster",main=s,ylab=nl[[i]],ylim=c(min(l[[i]]),max(l[[i]]) + ( max(l[[i]]) - min(l[[i]]) )*.1),names=sub("cl\\.","",names(ll)),cex.axis=.5,cex=.5,pch=20)
f <- !is.na(pv) & pv < .05 & pv >= 0.001
if ( sum(f) > 0 ) text((2:(max(part) + 1))[f],max(l[[i]]) + ( max(l[[i]]) - min(l[[i]]) )*.05,rep("*",max(part))[f])
f <- !is.na(pv) & pv < .001
if ( sum(f) > 0 ) text((2:(max(part) + 1))[f],max(l[[i]]) + ( max(l[[i]]) - min(l[[i]]) )*.05,rep("**",max(part))[f])
abline(a=median(l[[i]]),b=0,col="red",lty=2)
dev.off()
}
}
for ( i in 1:max(part) ){
x <- if ( sum(part != i) == 1 ) cdata[,part != i] else apply(cdata[,part != i],1,quantile,probs=qmark)
#x <- if ( sum(part != i) == 1 ) cdata[,part != i] else apply(cdata[,part != i],1,max)
y <- if ( sum(part == i) == 1 ) cdata[,part == i] else apply(cdata[,part == i],1,median)
sd <- if ( sum(part != i) == 1 ) x else apply(cdata[,part != i],1,mad)
z <- (y - x)/sd
n <- head(z[order(z,decreasing=TRUE)],10)
n[n>100] <- 100
names(n) <- sub("\\_\\_chr\\w+","",names(n))
fname <- paste("cluster_marker_genes",s,"clust",i,sep="_")
dir.create("marker",showWarnings= FALSE)
plot.2.file(paste("marker",fname,sep="/"))
b <- barplot(n,cex.names=.5,main=paste(s,": Cluster ",i,sep=""),ylab="z-score",ylim=c(min(0,n)-( max(n,0) - min(0,n) )/2,max(0,n)),names.arg=FALSE)
text(b,min(0,n) - ( max(0,n) - min(0,n) )/3,names(n),srt=90)
dev.off()
}
# Create tSNEs in separate folder
dir.create("exp", showWarnings = FALSE)
for ( g in genes ) {
facs <- F
ax <- FALSE
# fname <- paste("tsne_map",s,g,"log","noaxis",sep="_")
# plot.2.file(paste("exp",fname,sep="/"))
# tryCatch(plotexptsne2(sc,g,logsc=TRUE,n=paste(g,"log",sep="_"),ax=ax,facs=facs),error=function(err) 0 )
# dev.off()
fname <- paste("tsne_map",s,g,"noaxis",sep="_")
plot.2.file(paste("exp",fname,sep="/"))
tryCatch(plotexptsne2(sc,g,ax=ax,facs=facs),error=function(err) 0)
dev.off()
}
## StemID2
# Initialization
ltr <- Ltree(sc)
# Computation of entropy
ltr <- compentropy(ltr)
# Computation of the projections for all cells
ltr <- projcells(ltr,cthr=2,nmode=FALSE)
# Computation of the projections for all cells after randomization
ltr <- projback(ltr,pdishuf=2000,nmode=FALSE,fast=FALSE,rseed=17000)
# Assembly of the lineage tree
ltr <- lineagetree(ltr,pthr=0.05,nmode=FALSE,fast=FALSE)
# Determination of significant differentiation trajectories
ltr <- comppvalue(ltr,pethr=0.05,nmode=FALSE,fast=FALSE)
## Diagnostic plots
# Histogram of ratio between cell-to-cell distances in the embedded and the input space
fname <- paste("stemid_distanceratio",s,sep="_")
plot.2.file(fname)
plotdistanceratio(ltr)
dev.off()
# t-SNE map of the clusters with more than cthr cells including a minimum spanning tree for the cluster medoids
fname <- paste("stemid_map",s,sep="_")
plot.2.file(fname)
plotmap(ltr)
dev.off()
# Visualization of the projections in t-SNE space overlayed with a minimum spanning tree connecting the cluster medoids
fname <- paste("stemid_mapprojections",s,sep="_")
plot.2.file(fname)
plotmapprojections(ltr)
dev.off()
# Lineage tree showing the projections of all cells in t-SNE space
fname <- paste("stemid_tree_projections",s,sep="_")
plot.2.file(fname)
plottree(ltr,showCells=TRUE,nmode=FALSE,scthr=.3)
dev.off()
# lineage tree without showing the projections of all cells
fname <- paste("stemid_tree",s,sep="_")
plot.2.file(fname)
plottree(ltr,showCells=FALSE,nmode=FALSE,scthr=.3)
dev.off()
# Heatmap of the enrichment p-values for all inter-cluster links
fname <- paste("stemid_link_enrich",s,sep="_")
plot.2.file(fname)
plotlinkpv(ltr)
dev.off()
# Heatmap of the link score for all inter-cluster links
fname <- paste("stemid_link_score",s,sep="_")
plot.2.file(fname)
plotlinkscore(ltr)
dev.off()
# Heatmap showing the fold enrichment (or depletion) for significantly enriched or depleted links
fname <- paste("stemid_projenrichment",s,sep="_")
plot.2.file(fname)
projenrichment(ltr)
dev.off()
## Computing the StemID2 score
x <- compscore(ltr,nn=1,scthr=0)
# Plotting the StemID2 score
fname <- paste("stemid_score",s,sep="_")
plot.2.file(fname)
plotscore(ltr,nn=1,scthr=0)
dev.off()
# Retrieve cells from branch in pseudo-temporal order as inferred by the projection coordinates
n <- cellsfromtree(ltr,c(4,5,6,7,8))
# Filter out lowly expressed genes
fs <- filterset(ltr@sc@ndata,n=n$f,minexpr=3,minnumber=1)
#Ccompute self organizing map (SOM) of co-expressed genes
s1d <- getsom(fs,nb=1000,k=5,locreg=TRUE,alpha=.5)
ps <- procsom(s1d,corthr=.85,minsom=3)
# Coloring scheme for clusters (vector with colors)
fcol <- ltr@sc@fcol
y <- ltr@sc@cpart[n$f]
# Plot average z-score for all modules derived from the SOM
fname <- paste("stemid_SOM_zscore_modules",s,sep="_")
plot.2.file(fname)
plotheatmap(ps$nodes.z,xpart=y,xcol=fcol,ypart=unique(ps$nodes),xgrid=FALSE,ygrid=TRUE,xlab=FALSE)
dev.off()
# Plot z-score profile of each module
fname <- paste("stemid_SOM_zscore_genes",s,sep="_")
plot.2.file(fname)
plotheatmap(ps$all.z,xpart=y,xcol=fcol,ypart=ps$nodes,xgrid=FALSE,ygrid=TRUE,xlab=FALSE)
dev.off()
# Plot normalized expression profile of each module
fname <- paste("stemid_SOM_norm_genes",s,sep="_")
plot.2.file(fname)
plotheatmap(ps$all.e,xpart=y,xcol=fcol,ypart=ps$nodes,xgrid=FALSE,ygrid=TRUE,xlab=FALSE)
dev.off()
# Plot binarized expression profile of each module (z-score < -1, -1 < z-score < 1, z-score > 1)
fname <- paste("stemid_SOM_binary_genes",s,sep="_")
plot.2.file(fname)
plotheatmap(ps$all.b,xpart=y,xcol=fcol,ypart=ps$nodes,xgrid=FALSE,ygrid=TRUE,xlab=FALSE)
dev.off()
# Extract all genes from one module (2) of the SOM
stemnode<-2
g <- names(ps$nodes)[ps$nodes == stemnode]
# plot average expression profile of these genes along the trajectory
plotexpression(fs,y,g,n$f,k=25,col=fcol,name=paste("node",stemnode,sep=" "),cluster=FALSE,locreg=TRUE,alpha=.5,types=NULL)
# Plot expression of a single gene
plotexpression(fs,y,"Gene",n$f,k=25,col=fcol,cluster=FALSE,locreg=TRUE,alpha=.5,types=NULL)
# Plot average expression profile of these genes along the trajectory, highlighting batch origin
stemnode<-37
g <- names(ps$nodes)[ps$nodes == stemnode]
plotexpression(fs,y,g,n$f,k=25,col=fcol,name=paste("node",stemnode,sep=" "),cluster=FALSE,locreg=TRUE,alpha=.5,types=sub("\\_\\d+","",n$f))
|
6066952f00b2897967702e7c9241cbffbd9d2886 | 230982b9ff8911f71ad4dc36c68b6e3639f4f485 | /VertNet2013/Tues.Acquiring&Exploring_species.r | 60619167d7e6e7f86c20ff57e84a0c463eda725c | [] | no_license | mkoo/BITW | aeb8b5d09c36744389979ea8a3b448e790db8c97 | bf645cfc5c1fbeeb129c3d6fbf290a47ae2e9d9e | refs/heads/master | 2020-05-17T12:38:58.209027 | 2018-11-09T23:04:17 | 2018-11-09T23:04:17 | 16,720,884 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 10,894 | r | Tues.Acquiring&Exploring_species.r | #############################################
###VertNet Biodiversity Informatics Training Workshop
###June 2013
#Acquiring and Visualizing occurrence records using 3 different packages
#in geographic and environmental space
#using dismo, rgdal, maptools packages
#
###updated MSK, MH 20130524
###for R.3.0.1
#
##^ Package Versions
##^ dismo 0.8-11
##^ maptools 0.8-23
##^ rgdal 0.8-8
##^ sp 1.0-9
##^ scales 0.2.3
##^ rvertnet 0.0-5
##^ rgbif 0.2.0
##^ ggplot2 0.9.3.1
#################################################
getwd()
setwd("~/Desktop/BITW-Workshop")
install.packages(c('dismo','maptools', 'rgdal', 'sp')); #if you have not already installed these packages
library(dismo)
library(rgdal)
library(maptools)
gpclibPermit()
library(sp)
#Acquire species occurrence records (here we're using GBIF with {dismo}, yesterday you used the website):
species1<-gbif("Chiromantis", "xerampelina", geo=T);
species2<-gbif("Agama", "atra", geo=T);
#check out values
colnames(species1);
head(species1);
head(species2);
#Make some Tallies
table(species1$country);
#Subsetting on coordinates
#with geo=T, only saved georeferenced records, but just in case...
species1_xy<-subset(species1,lat !=0 & lon !=0);
dim(species1_xy)
species2_xy<-subset(species2, lat !=0 & lon !=0);
dim(species2_xy)
#Set coordinates to a Spatial data frame
coordinates(species1_xy) = c("lon", "lat")
coordinates(species2_xy) = c("lon", "lat")
#Map out to check for spatial integrity, or at least make sure records are on the right continent!
#for quick viewing we'll use the wrld_simpl dataset (from maptools package).
data(wrld_simpl);
plot(wrld_simpl, axes=TRUE, col='light green')
points(species1_xy, col='orange', pch=20, cex=0.75); #outliers in other continents? #why are they occurring there?
#Using zoom() to get a closer look-- click once on map to start bounding box, and second click to close
zoom(wrld_simpl)
points(species1_xy, col='orange', pch=20, cex=0.75)
#######
#Subsetting by country
#First tally records by country
table(species1_xy@data$country) #Note that for spatial data frames we have to specify the 'slot' of the object
#let's pick those in South Africa for now
sp1_SA = subset(species1_xy, country=="South Africa")
sp2_SA = subset(species2_xy, country=="South Africa")
# {dismo} has a few functions to display Google Maps:
points.SA <- as.data.frame(sp1_SA)
points.SA$x <- sp1_SA@coords[,"lon"]
points.SA$y <- sp1_SA@coords[,"lat"]
sp.map <- gmap(points.SA, type="terrain") #
# Google Maps are in Web Mercator projection, so
# project the points to that using Mercator()
# this will allow proper mapping of points on basemaps
sp1_SA.merc <- Mercator(points.SA[,c("x","y")])
plot(sp.map)
points(sp1_SA.merc, pch=20, col="red")
###
#Export to CSV for snapshot of data (which one to export? why?):
species1_gbifresults = write.csv(species1_xy, "gbifresults_xy.csv", sep=",")
#####################
# Using {rVertnet} and {rgbif}
#Check out DarwinCore field and datatypes before exploring data on VertNet portal
#first of all, what formats are the data in?
install.packages(c('rvertnet','rgbif')); #if you have not already installed these packages
library("rvertnet")
library("rgbif")
#{rvertnet}
data(dcnames)
head(dcnames) #sneak peek
dcnames #see them all
data(dc) #shows data type
head(dc)
#
#run taxon queries one by one
vertoccurrencecount(t="Megaceryle maxima", grp="bird") #how many?
vertproviders(t="Megaceryle maxima", grp="bird") # which collections have the most?
vertoccurrencecount(t="Alcedo semitorquata", grp="bird")
vertproviders(t="Alcedo semitorquata", grp="bird")
vertoccurrencecount(t="Halcyon albiventris", grp="bird")
vertproviders(t="Halcyon albiventris", grp="bird")
vertlocations(l="Africa", t="Alcedo semitorquata", grp="bird", d="1900-1950") # locations and number of historic records (d=YYYY or range)
vertlocations(l="Africa", t="Alcedo semitorquata", grp="bird", d="1950-2012") # locations and number of more recent records
#map of all three bird specimens:
splist <- splist <- c("Alcedo semitorquata", "Halcyon albiventris", "Megaceryle maxima")
out <- lapply(splist, function(x) vertoccurrence(t=x, grp="bird", num=500))
out <- vertoccurrence(t="Alcedo semitorquata", grp="bird")
vertmap(out) #this rVertnet command takes care of a lot of things behind the curtain, including basemaps, NA values, data.frame
#now using {rgbif}
library(rgbif)
#under the hood taxon concepts being used in GBIF
taxoncount(scientificname="Halcyon albiventris")
hkey=taxonsearch(scientificname="Halcyon albiventris")
taxonget(hkey)
#density check: this is a powerful way to see the spatial make-up of records without mapping them. This retrieves a data.frame of total occurrence counts by one-degree cell for a taxon, country, dataset, provider, etc
head(densitylist(taxonconceptKey = 5228322)) #for Halcyon albiventris rank of species
#and here are the top counts by species list
density_spplist(taxonconceptKey = 5228322, spplist = "great", listcount = "counts")[1:10, ]
d_out=densitylist(taxonconceptKey = 5228322)
gbifmap(d_out)
#grab and view data
Hal_alb=occurrencelist(scientificname='Halcyon albiventris', coordinatestatus=TRUE, latlongdf=TRUE, removeZeros=TRUE)
dim(Hal_alb)
Hal_alb
gbifmap(Hal_alb)
#We may want to check all records by country
density_spplist(originisocountrycode = "ZA", spplist = "great")[1:10] # Top ten list of species data for South Africa
d_ZA <- densitylist(originisocountrycode = "ZA")
gbifmap(d_ZA) # on world map
gbifmap(d_ZA, region="South Africa") # country only-- review the top ten list or the entire list of species for South Africa and see if this density map makes sense? What other issues are there?
#How does this compare with the point data?
#####################
#If you are interested in other ways to create maps in R, there are lots of great tools out there for the job,
#some a bit involved, but worth the effort.
#Check out ggplot, or use CartoDB's R package (https://github.com/Vizzuality/cartodb-r) with your own data uploaded on
#Cartodb, cloud-based postgres/postgis
#
#More on mapping species richness later...
#############################
#Mapping in environmental space using Worldclim.org climate models
#We will use worldclim bioclimatic variables to explore the species' environmental niche
bc5 <- getData('worldclim', var='bio', res=5) # If slow or server not responding we have a local version for copying
plot(bc5,1)
#interactive zooming... double click
zoom(raster(bc5,1))
#do this again if you like to see climate model-- at coarse resolution
#or use your own datasets:
#...
#create a raster stack ()
#to make a stack of the current climate data-- it's the same as the above fetch routine
files = list.files(path="wc5", pattern='*.bil', full.names=TRUE)
library(raster)
s = stack(files)
plot(s)
plot(s, 12)
#extract raster stack values to sample sites
sp1_swd<-extract(bc5, sp1uniques[, c('lon','lat')])
sp2_swd<-extract(bc5, sp2uniques[, c('lon','lat')])
#examine values
#graph values
sp1_data<-data.frame(sp1_swd)
sp2_data<-data.frame(sp2_swd)
pairs(sp1_data[,1:11], cex=0.1, fig=TRUE) #how are these bioclim variables related?
pairs(sp1_data[,12:19], cex=0.1, fig=TRUE)
#fast plot in environmental space
plot(sp1_data$bio1, sp1_data$bio12, axes=TRUE)
#write other ways to view the values
plot(...)
# kmeans clustering-to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean.
sp2_dataclean <- na.omit(sp2_data) # First remove the NAs from the data frame
k <- kmeans(sp2_dataclean, 2)
plot(africa_shp, axes=TRUE)
points(species2$lon, species2$lat, col=k$cluster)
#reduce variability via PCA
sp1_pca<-prcomp(na.omit(sp1_swd))
plot(sp1_pca)
sp1_pca
sp1_pca$rotation # loadings
sp1_pca$x # PCs (aka scores)
screeplot(sp1_pca, npcs = 5, type = "lines");
#play with the plotting...
plot(sp1_pca$x[,1], sp1_pca$x[,2], xlab='Principal component 1', ylab='Principal component 2', main='PCA of Species 1')
plot(sp2_pca$x[,1], sp2_pca$x[,2], xlab='Principal component 1', ylab='Principal component 2', main='PCA of Species 2')
#But prcomp() is pretty barebones so try some better packages.
#to inspect the variables a little more closely we can delve a little into some other packages that specializes in multivariate stats (e.g.factomineR):
#FactoMineR (http://factominer.free.fr/)
install.packages("FactoMineR")
library(FactoMineR)
sp1_pca<-PCA(na.omit(sp1_swd))
sp1_pca #Results of PCA and more-- explore them.
#A few I use often
sp1_pca$ind$contrib
sp1_pca$var$contrib
plot.PCA(sp1_pca)
barplot(sp1_pca$eig[,1],main="Eigenvalues",names.arg=1:nrow(sp1_pca$eig))
dimdesc(sp1_pca) #describe the dimensions
#Online Tutorial (http://factominer.free.fr/docs/code_tutorial.r)
dev.off()
sp1.pca =PCA(sp1_swd[,1:19], scale.unit=TRUE, ncp=2, quanti.sup=c(11: 12), graph=T, axes = c(1,2))
sp2.pca =PCA(sp2_swd[,1:19], scale.unit=TRUE, ncp=5, quanti.sup=c(11: 12), graph=T)
#Correlation of variables is important consideration in modeling
#How would you use this for variable selection?
cor(sp1_data, use = "na.or.complete", method = "pearson")
######################
#{Dismo} is ultimately about conducting species distribution modeling
#There are several {dismo} tools for prepping data, data cleaning, etc
#See Hijmans and Elith, "Species Distribution Modeling"
#http://cran.r-project.org/web/packages/dismo/vignettes/sdm.pdf or available from us
#We have been doing a simplified version
#Start with 2.2 Data Cleaning Chapter, p 8
#here we will do a simple find and remove duplicate records
dups1<-duplicated(species1[, c('species', 'lon','lat')])
sum(dups1)
sp1uniques<-species1[!dups1,];
dups2<-duplicated(species2[, c('species', 'lon','lat')])
sum(dups2)
sp2uniques<-species2[!dups2,];
#Make use of properly georeferenced localities by filtering on "coordinateuncertaintyinmeters" ; we are going to skip georeferencing in this tutorial as it the pdf is out of data, but here's one suggestion to filter on coordinate uncertainties:
sp2clean=subset(sp2uniques, !is.na(coordUncertaintyM) & coordUncertaintyM<=5000, select=c(species,lon,lat))
#
#For setting up your training and testing samples, consider:
#Brenning, A. 2012. sperrorest: Spatial Error Estimation and Variable Importance. R package, available online from CRAN.
#http://cran.r-project.org/web/packages/sperrorest/index.html
#http://cran.r-project.org/web/packages/sperrorest/sperrorest.pdf
#
#Other ways to contrast/compare niches-- looking at niche similarity with phyloclim package
#http://cran.r-project.org/web/packages/phyloclim/phyloclim.pdf
|
93cb18cc0fae420f853286ec24fbd16de6ef9dbb | b38fcb2c8ef24eed88787351ae6a01c3e4310120 | /Day 02. Pictogram/codigo_02.R | afec4a455e0f02f8b22e612df9ce5bb9f1a0543a | [] | no_license | JMZarco/30DayChartChallenge2021 | ee24a79ae86b41a2f8bdf01f318451ccbe92b092 | b5b39376749868a4cec68d324427fc122bd71de8 | refs/heads/main | 2023-06-07T10:46:11.673414 | 2021-07-07T20:44:52 | 2021-07-07T20:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,076 | r | codigo_02.R | # Obtención de los datos
# devtools::install_github("moldach/vapoRwave")
# DOC: https://github.com/moldach/vapoRwave
# Ahi vienen las fuentes que se ocupan
# Librerias
library(waffle)
library(hrbrthemes)
library(extrafont)
library(tidyverse)
library(vapoRwave)
library(ggtext) # Esta creo que no la usé
# DATOS PROVENIENTES DE INEGI - Viviendas
# https://www.inegi.org.mx/contenidos/programas/ccpv/2020/tabulados/cpv2020_b_eum_16_vivienda.xlsx
# Descargamos archivo
curl::curl_download("https://www.inegi.org.mx/contenidos/programas/ccpv/2020/tabulados/cpv2020_b_eum_16_vivienda.xlsx",
"vivienda_nac.xlsx")
# Leemos el archivo
viv = readxl::read_xlsx("vivienda_nac.xlsx", sheet = "18", skip = 7) %>%
filter(!is.na(`...1`)) %>%
rename(aparatos = `...2`,
viviendas_disponen = "Disponen...4",
viviendas_no_disponen = "No disponen...5",
no_especificado = "No especificado...6",
ent = "...1") %>%
filter(aparatos == "Consola de videojuegos") %>%
mutate(pctje_edo = 100*viviendas_disponen/(viviendas_disponen + viviendas_no_disponen + no_especificado)) %>%
select(ent, pctje_edo) %>%
arrange(-pctje_edo) %>%
mutate(ent = str_replace(ent, pattern = "\\s", replacement = "@")) %>%
separate(col = ent, into = c("CVE_EDO", "ENT"), sep = "@")
# Esto creo que no lo use -_-
extrafont::loadfonts(quiet = TRUE)
extrafont::fonttable() %>%
as_tibble() %>%
filter(grepl("Awesom", FamilyName)) %>%
select(afmfile, FullName, FamilyName, FontName)
# Para la Ciudad de México ----
viv_video_edo = viv %>%
filter(ENT == viv$ENT[1]) %>%
mutate(no_videojuego = 100 - pctje_edo) %>%
pivot_longer(cols = c("pctje_edo", "no_videojuego")) %>%
mutate(value = round(value,0)) %>%
mutate(name = str_replace_all(name, c("pctje_edo" = "Hogares con videojuegos",
"no_videojuego" = "Sin Videojuegos"
))) %>%
mutate(name = factor(name))
# Gráfica
viv_video_edo %>%
ggplot(aes(label = name,
values = value,
color = name)) +
geom_pictogram(n_rows = 10,
make_proportional = TRUE) +
scale_label_pictogram(
name = NULL,
values = c(`Hogares con videojuegos` = "gamepad",
`Sin Videojuegos` = "home"
)) +
scale_color_manual(
name = NULL,
values = c(`Hogares con videojuegos` = "#F8B660",
`Sin Videojuegos` = "#4A354F")) +
coord_equal() +
new_retro() +
theme_enhance_waffle() +
theme(panel.background = element_rect(fill = "transparent",
color = "yellow"),
axis.ticks = element_blank(),
panel.grid.major = element_line(color = "#FF6B58",
linetype = 2,
size = 0.1),
panel.grid.minor = element_line(color = "#FF6B58",
linetype = 2,
size = 0.1)
) +
theme(legend.key.height = unit(2.25, "line")) +
theme(legend.text = element_text(size = 10, hjust = 0, vjust = 0.75)) +
theme(legend.position = "bottom") +
guides(fill = guide_legend(title.position = "top",
title.hjust = 0.5)) +
labs(caption = "Datos del Censo 2020. Viviendas. INEGI, 2020.
De cada 100 viviendas, el 21% reportó contar con al menos una consola de videojuegos
siendo la entidad con el porcentaje más alto. En contraste, Chiapas solo reportó un 2%,
siendo el porcentaje más bajo. @JuvenalCamposF, #30DayChartChallenge",
title = "Viviendas con consolas de videojuegos",
subtitle = "Ciudad de Mexico. Mexico. 2020") +
theme(plot.caption = element_text(hjust = 1,
family = "Windows Command Prompt",
color = "#F8B660"))
# Guardamos la gráfica ----
ggsave("grafica_2_transparente.png",
device = "png",
height = 9,
width = 7,
bg = "transparent")
|
63261100fba288f25c1f99a3e39cf517d9263e07 | f084db3890abf3f2c1c0b6e28b61ff448468db6b | /bin/prioritizeSNVs.R | a5c1639c7f83963c65b085fbacabb5ba8fcaa824 | [
"MIT"
] | permissive | yocra3/nf-core-cnvcalling | 73423b90cf756c840de9b9c466b6ac08c0b990fa | d8a7e27f5edc0a8f6a19fdab69980965aa5ec070 | refs/heads/master | 2023-08-23T08:57:08.248518 | 2021-02-02T10:54:52 | 2021-02-02T10:54:52 | 269,351,899 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,345 | r | prioritizeSNVs.R | #! /opt/conda/envs/nf-core-cnvcalling/bin/Rscript
#'#################################################################################
#'#################################################################################
#' Prioritize SNVs using annovar data
#' Remove SNVs that fulfill these criteria:
#' - Common SNVs: present with AF > 0.001 in any population
#' - Segmental Duplications: Remove variants present in segmental Duplications
#' - Low read depth: read depth < 10. Important: read depth should be encoded in OtherInfo2 -> 7th column (2nd extra column) of annovar input
#' - Frequent in our cohort: More than 2 occurences in our cohort
#' Select SNVs:
#' - In exonic or splicing positions
#' - Highly deleterous (frameshift, non-sense) or non-synonimous
#' Generate Excel tables. For each source (but clinVar), two tables are generated:
#' - Dominant model (Heterozygous variants)
#' - Recessive model (double heterozygous and homozygous)
#' - clinVar: Variants indentified in clinVar as pathogenic or likely pathogenic
#' - OMIM: Genes present in OMIM
#' - Candidate genes: Genes with high pLI (pLI > 0.8) or high pREC (pREC > 0.8)
#' - Remaining genes: Variants in other genes
#' A txt with all variants passing selection will be generated (to be used for ORVAL)
#'#################################################################################
#'#################################################################################
## Capture arguments
args <- commandArgs(trailingOnly=TRUE)
annovarFile <- args[1]
omimGenes <- args[2]
omimMap <- args[3]
cohortVCF <- args[4]
outPrefix <- args[5]
### Parameters (move to a config file?)
AF_threshold <- 0.001
min_readDepth <- 10
max_internal_freq <- 2
min_pLI <- 0.8
min_pREC <- 0.8
# Load libraries
library(VariantAnnotation)
library(openxlsx)
library(tidyverse)
## Load SNVs file and modify it:
### Create column with SNV name matching vcfs
### Add a column with genotypes (homogeneize between mosaics and germinal SNVs)
#### Consider Homozygous for AF > 0.8
ini <- read_tsv(annovarFile, na = ".") %>%
mutate(ID = paste0(Chr, ":", Start, "_", Ref, "/", Alt),
genotype = ifelse(Otherinfo1 == "0/1" | Otherinfo1 < 0.8,
"Heterozygous", "Homozygous")
)
## Load interal freq SNVs
comSNVs <- readVcf(cohortVCF)
snpSum <- snpSummary(comSNVs)
if (nrow(snpSum) > 1){
selSNPs <- snpSum %>%
filter(g00 + g01 + g11 > max_internal_freq) %>%
rownames()
ini.snp <- filter(ini, !ID %in% selSNPs)
} else {
ini.snp <- ini
}
## Segmental Duplications
ini.segDup <- subset(ini.snp, is.na(genomicSuperDups))
## Low read depths
ini.depth <- subset(ini.segDup, Otherinfo2 > min_readDepth)
## Common in Population
af_cols <- colnames(ini)[grep("AF", colnames(ini))] ### Columns with AF information
af_cols <- af_cols[-grep("Bayes", af_cols)] ## Remove Bayes measures
ini.com <- ini.depth %>%
filter_at(af_cols, all_vars(is.na(.) | . < AF_threshold))
## Select exonic or splicing Variants
ini.exon <- filter(ini.com, Func.refGene %in% c("exonic", "splicing"))
## Discard synonymous variants
ini.del <- filter(ini.exon, !( !is.na(ExonicFunc.refGene) & ExonicFunc.refGene %in% c("synonymous SNV", "unknown")))
## Create table for ORVAL
orval <- ini.del %>%
select(Chr, Start, Ref, Alt, genotype)
write.table(orval, file = paste0(outPrefix, ".selVariants.txt"), quote = FALSE,
row.names = FALSE)
# OMIM ####
## Create table with annovar gene ID, OMIM ID and OMIM phenotype
omim_match <- read_tsv(omimMap) %>%
mutate(Gene.refGene = `Approved symbol`,
OMIM_ID = `OMIM ID(supplied by OMIM)`) %>%
dplyr::select(Gene.refGene, OMIM_ID)
omim <- read_tsv(omimGenes, skip = 3) %>%
mutate(OMIM_ID = as.character(`MIM Number`),
OMIM_Phenotype = Phenotypes) %>%
dplyr::select(OMIM_ID, OMIM_Phenotype) %>%
filter(!is.na(OMIM_ID) & !is.na(OMIM_Phenotype)) %>%
inner_join(omim_match, by = "OMIM_ID")
## Add OMIM columns to all variants annotation and define categories
vars.annot <- left_join(ini.del, omim, by = "Gene.refGene") %>%
mutate(pLI_flag = !is.na(pLi.refGene) & pLi.refGene > min_pLI,
misZ_flag = !is.na(pRec.refGene) & pRec.refGene > min_pREC,
cand_flag = pLI_flag | misZ_flag,
clinVar_flag = !is.na(CLNSIG) & grepl("Pathogenic|Likely_pathogenic", CLNSIG),
prior_tab = ifelse(clinVar_flag, "clinVar",
ifelse(!is.na(OMIM_Phenotype), "OMIM",
ifelse(cand_flag, "Candidate genes", "Other genes"))))
# clinVar table ####
clinvar <- filter(vars.annot, prior_tab == "clinVar")
ini.omim <- filter(vars.annot, prior_tab == "OMIM")
## Variants per gene
getGenesMultiVariants <- function(df, n = 2){
df %>%
dplyr::select(Gene.refGene) %>%
group_by(Gene.refGene) %>%
summarize(count = n()) %>%
filter(count >= n) %>%
pull(Gene.refGene)
}
omim_genes <- getGenesMultiVariants(ini.omim)
omim.dom <- filter(ini.omim, genotype == "Heterozygous" & !Gene.refGene %in% omim_genes)
omim.rec <- filter(ini.omim, (genotype == "Heterozygous" & Gene.refGene %in% omim_genes) |
genotype == "Homozygous")
# Candidate genes ####
ini.cand <- filter(vars.annot, prior_tab == "Candidate genes")
cand_genes <- getGenesMultiVariants(ini.cand)
cand.dom <- filter(ini.cand, genotype == "Heterozygous" & !Gene.refGene %in% cand_genes)
cand.rec <- filter(ini.cand, (genotype == "Heterozygous" & Gene.refGene %in% cand_genes) |
genotype == "Homozygous")
# Remaining genes
ini.rest <- filter(vars.annot, prior_tab == "Other genes")
rest_genes <- getGenesMultiVariants(ini.rest)
rest.dom <- filter(ini.rest, genotype == "Heterozygous" & !Gene.refGene %in% rest_genes)
rest.rec <- filter(ini.rest, (genotype == "Heterozygous" & Gene.refGene %in% rest_genes) |
genotype == "Homozygous")
## Create variant selection log
sumTable <- data.frame(Description = c(
"Initial Number SNVs",
paste("SNVs with internal Freq <", max_internal_freq),
"SNVs not in Segmental Duplications",
paste("SNVs with read depth >", min_readDepth),
"SNVs in exonic or splicing positions",
"Splicing, Frameshift, non-sense or non-synonymous SNVs",
"Pathogenic or likely pathogenic in clinVar",
"SNVs in OMIM genes",
"SNVs in intolerant genes",
"SNVs in remaining genes"),
Number = c(nrow(ini), nrow(ini.snp), nrow(ini.segDup), nrow(ini.depth),
nrow(ini.exon), nrow(ini.del), nrow(clinvar),
nrow(ini.omim), nrow(ini.cand), nrow(ini.rest)))
sumTable$Proportion <- round(sumTable$Number/nrow(ini)*100, 2)
write.table(sumTable, file = paste0(outPrefix, ".log"), quote = FALSE,
row.names = FALSE)
write.xlsx(list(clinvar, omim.dom, omim.rec, cand.dom, cand.rec,
rest.dom, rest.rec, sumTable),
file = paste0(outPrefix, ".xlsx"),
rowNames = FALSE,
colNames = TRUE,
sheetName = c("ClinVar Pathogenic",
"OMIM genes - Dominant",
"OMIM genes - Recessive",
"Intolerant genes - Dominant",
"Intolerant genes - Recessive",
"Other genes - Dominant",
"Other genes - Recessive",
"Prioritization summary"))
|
289d4c243ce053564cf01648820dcde54e2e51f0 | 824703319b5958aaa6f1f7099239e4eed86089ae | /plot-area.R | 556e4e618ec70b2fcad93bd4df74bbc54ba9538a | [] | no_license | sebdalgarno/hg-logging-history | f7ca79576e31fdee186e7f8c58091860cfb28372 | 22edd69943ba04a5f38e8bdda34b9f185227a7dd | refs/heads/master | 2021-01-19T08:35:14.151638 | 2017-07-13T22:00:06 | 2017-07-13T22:00:06 | 87,649,551 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,786 | r | plot-area.R | source('header.R')
set_sub("final")
load_datas()
poly <- log17Poly %>%
mutate(Type = "poly",
Year = YearHarv)
road <- log17Road
road15 <- st_buffer(road, 15) %>%
mutate(Type = "road")
comb <- rbind(select(poly, Year, ID, Type), select(road15, Year, ID, Type))
set_sub("logarea")
pals <- c(rgb(227, 94, 0, maxColorValue = 255), rgb(255, 255, 0, maxColorValue = 255))
get_color <- colorRampPalette(pals)
labs <- c(1900, 2016)
comb %<>% mutate(Year = replace(Year, Year == 2017, NA),
Year = replace(Year, Year == 88, 1988)) %>%
filter(!is.na(Year)) %>%
filter(Year != 0)
comb %<>% mutate(AreaHa = as.numeric(st_area(.))/10000) %>%
group_by(Year) %>%
summarise(SumAreaHa = sum(AreaHa))
comb %<>% mutate(Color = get_color(116))
road15 %<>% mutate(Year = replace(Year, Year == 2017, NA),
Year = replace(Year, Year == 88, 1988)) %>%
filter(!is.na(Year)) %>%
filter(Year != 0)
road15 %<>% mutate(AreaHa = as.numeric(st_area(.))/10000) %>%
group_by(Year) %>%
summarise(SumAreaHa = sum(AreaHa))
road15 %<>% mutate(Color = get_color(116))
combcsv <- comb
st_geometry(combcsv) <- NULL
write_csv(combcsv, 'input/data/new/output/csv/log17-area-summary.csv')
newha <- read_csv('input/movie_drafts/updated_movie_hectares_070817.csv')
newha %<>% select(Year = `First Harvest Year`, SumAreaHa = Total)
newha %<>% mutate(Year = replace(Year, Year == 2017, NA)) %>%
filter(!is.na(Year))
newha %<>% mutate(Color = get_color(116))
logplot <- function(data = comb, n = i) {
data %<>% mutate(Point = ifelse(Year == n, n, NA),
lovals = predict(lm(SumAreaHa ~ poly(Year, 6),.)))
val <- filter(data, Year == n)
value <- val$lovals
p <- ggplot(data) +
geom_bar(aes(x = Year, y = SumAreaHa, color = Color, fill = Color),
stat = "identity", width = 1) +
scale_color_identity() +
scale_fill_identity() +
scale_y_continuous(expand = c(0,0), limits = c(0, 7000),
position = "right", breaks = seq(1000, 7000, 1000),
labels = c("1000", rep("", 3), "5000", rep("", 2))) +
scale_x_continuous(labels = labs, expand = c(0,0), breaks = c(1908, 2009)) +
labs(x = "", y = "") +
theme(panel.background = element_rect(fill='black', colour='black'),
panel.grid = element_blank()) +
geom_smooth(aes(x = Year, y = SumAreaHa), method = "lm", formula = y ~ poly(x, 6),
se = F, colour = "red") +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
plot.background = element_rect(fill = "transparent",colour = NA)) +
geom_point(data = data, aes(Point, lovals), color = "white",
fill = "white", size = 2, pch = 21) +
geom_hline(yintercept = value, color = "white", size = 0.25) +
geom_vline(xintercept = n, color = "white", size = 0.25)
ggsave(paste0(i, ".png"), bg = "transparent", width = 6,
height = 3.5, dpi = 300, path = "input/data/output/plots/new-Poly6")
# save_plot(paste0(n, "-area-logged-6"), plot = p, width = 6, height = 4, csv = F)
# p
}
for (i in 1901:2016) {
logplot(data = newha, n = i)
}
ggplot(poly) +
geom_bar(aes(x = YearHarv, y = SumArea, color = Color, fill = Color),
stat = "identity", width = 1) +
scale_color_identity() +
scale_fill_identity() +
scale_x_continuous(expand = c(0,0)) +
scale_y_continuous(expand = c(0,0), limits = c(0, 7000)) +
theme_dark() +
labs(x = "", y = "") +
theme(panel.background = element_rect(fill='black', colour='black'),
panel.grid = element_blank()) +
geom_smooth(aes(x = YearHarv, y = SumArea), method = "loess", se = F, colour = "red") +
theme(axis.text = element_blank(), axis.ticks = element_blank())
|
3f1dcd38373343f225ad14755525acce1a6d5e16 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615837784-test.R | 999a9e1af05ecac07075538fac92476bd8e7c96b | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,183 | r | 1615837784-test.R | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(2.76275013262294e-242, -2.71589731135932e-284, 4.50329327293703e-20, 5.0107318811678e+173, 2.95741352140887e-174, 2.39856896797372e-167, 1.52716539195969e+202, -1.77363920813042e+194, 4.81258711601651e-74, 416.419964068629, 8.89287946381353e-298, 4.08534971175406e-86, 9.23217661253899e-35, 1.25089395940605e-164, 3.00649143404508e+211, 1.88125625093574e+20, -9.02609666744009e-305, 5.39704635845907e+157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
813999108f0f21c303fddcef3e24479e2d27ccb8 | 17a56a3b7228c541ee8e48011379ef76e4bd9a20 | /R/utils.R | 3b1d3a6b41a6ba0fd6298887067d2e999ed2e9c9 | [] | no_license | euctrl-pru/reproducible-ans-performance-paper | 02b54c7202a2d64d00a0da6ceae325bbe95fcd4b | 2af5c2a0a57a851b6f84552cd7308fac616765e3 | refs/heads/master | 2022-03-26T13:59:00.294535 | 2019-11-25T15:33:54 | 2019-11-25T15:33:54 | 171,104,518 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,308 | r | utils.R | library(dplyr)
library(stringr)
# copied from trrrj::ddm2dd
ddm2dd <- function(value) {
# RegEx for DdM (eventually w/ spaces):
# ([N|S|W|E]\s*)?(\d+)°\s*(\d+(?:\.\d+)?)'
pattern <- "([N|S|W|E]\\s*)?(\\d+)°\\s*(\\d+(?:\\.\\d+)?)'"
pieces <- value %>% stringr::str_subset(pattern) %>% stringr::str_match(pattern)
dd <- as.double(pieces[, 3]) + as.double(pieces[, 4]) / 60
dd <- ifelse(stringr::str_detect(pieces[, 2], "S|W"), -dd, dd)
# constrain longitude to (-180, 180]: ((lon - 180) %% 360) - 180
# constrain latitude to (-90, 90]: ((lat - 90) %% 180) - 90
dd <- ifelse(
stringr::str_detect(pieces[, 2], "W|E"), # if not lon, assume lat
( (dd - 180) %% 360) - 180,
( (dd - 90) %% 180) - 90)
dd
}
# copied from trrrj::dms2dd
dms2dd <- function(value) {
# RegEx for DMS (eventually w/ spaces):
# (\d+)°\s*(\d+)'\s*(\d+(?:\.\d+)?)\"\s*([N|S|W|E]?)
pattern <- "(\\d+)°\\s*(\\d+)'\\s*(\\d+(?:\\.\\d+)?)\"\\s*([N|S|W|E]?)"
pieces <- value %>% stringr::str_subset(pattern) %>% stringr::str_match(pattern)
dd <- ( (as.double(pieces[, 4]) / 60) + as.double(pieces[, 3])) / 60 + as.double(pieces[, 2])
dd <- ifelse(stringr::str_detect(pieces[, 5], "S|W"), -dd, dd)
# constrain longitude to (-180, 180]: ((lon - 180) %% 360) - 180
# constrain latitude to (-90, 90]: ((lat - 90) %% 180) - 90
dd <- ifelse(
stringr::str_detect(pieces[, 5], "W|E"), # if not lon, assume lat
( (dd - 180) %% 360) - 180,
( (dd - 90) %% 180) - 90)
dd
}
# extract position reports at `distance` from position (lon,lat) arp
# input file is named like `<icao airport>_pos_rt_<date>.csv`,
# i.e. `lszh_pos_rt_2017-08-02.csv`
# output file is named like `<icao airport>_pos_rt_<distance>NM_<date>.csv`
extract_airport_distance <- function(rawdatafile, arp, distance) {
if(!fs::file_exists(rawdatafile)) {
stop(paste0("Error: file", rawdatafile, " does not exist."))
}
file_parts <- rawdatafile %>%
fs::path_file() %>%
fs::path_ext_remove() %>%
stringr::str_split("_") %>% `[[`(1)
apt_lower <- file_parts[1]
infix <- stringr::str_c(file_parts[2], file_parts[3], sep = "_")
data_date <- file_parts %>% dplyr::last()
file <- stringr::str_glue(
apt_lower,
infix,
str_glue(distance, "NM"),
data_date,
.sep = "_") %>%
fs::path_ext_set("csv")
file <- fs::path_join(c(here::here(), "data", file))
cols <- cols(
flight_id = col_character(),
timestamp = col_datetime(format = ""),
latitude = col_double(),
longitude = col_double(),
altitude = col_double(),
speed_gnd = col_double(),
track_gnd = col_double(),
vert_speed = col_double(),
on_ground = col_logical(),
distance = col_double(),
distance_arp = col_double()
)
d <- distance * 1852
rawdatafile %>%
readr::read_csv(col_types = cols) %>%
dplyr::filter(distance_arp <= d) %>%
readr::write_csv(file)
}
# extract arrivals at airport, `apt`. Use `prefix` for output filename
# input file is like `cpr_fr24_flights_2017-08-02.csv`
extract_arrival_flights <- function(rawdatafile, apt, prefix = "flt_rt") {
if(!fs::file_exists(rawdatafile)) {
stop(paste0("Error: file", rawdatafile, " does not exist."))
}
data_date <- rawdatafile %>%
fs::path_file() %>%
fs::path_ext_remove() %>%
fs::path_ext_remove() %>% # remove second extension (if present)
stringr::str_split("_") %>% `[[`(1) %>% last()
apt_lower <- apt %>% tolower()
file <- stringr::str_glue(apt_lower, prefix, data_date, .sep = "_") %>%
fs::path_ext_set("csv")
file <- fs::path(here::here("data"), file)
apt_upper <- apt %>% toupper()
rawdatafile %>%
readr::read_csv() %>%
dplyr::rename_all(tolower) %>%
dplyr::filter(ades == apt_upper) %>%
readr::write_csv(file)
}
#' extract positions for airport
#'
#' @param rawdatafile reference trajectory bzipped file
#' @param apt airport, i.e. "egll"
#' @param arp airport reference point (lon/lat), i.e. `c(-0.461389, 51.4775)`
#' @param ids flight ids to keep
#' @param infix string for filename, default "pos_rt".
#' The default will result in filename
#' egll_pos_rt_<date>.csv where <date> is extracted from the date portion
#' in the `rawdatafile` name
#' @param postfix string for filename, default "raw".
#' @return a data frame
#'
extract_arrival_positions <- function(rawdatafile, apt, arp, ids, prefix = "pos_rt", postfix = "raw") {
pp <- paste0(rawdatafile, ".bz2")
if(!fs::file_exists(pp)) {
stop(paste0("Error: file", pp, " does not exist."))
}
data_date <- rawdatafile %>%
fs::path_file() %>%
fs::path_ext_remove() %>%
stringr::str_split("_") %>% `[[`(1) %>% dplyr::last()
apt_lower <- apt %>% tolower()
apt_upper <- apt %>% toupper()
file <- stringr::str_glue(apt_lower, prefix, data_date, postfix, .sep = "_") %>%
fs::path_ext_set("csv")
file <- here::here("data", file)
# filter by flight ID
filter1 <- function(x, pos, ids) {
x %>%
dplyr::filter(FLIGHT_ID %in% ids)
}
partial1 <- purrr::partial(filter1, ids = ids)
# get all arrivals
read_csv_chunked(
# use connection pipe to unzip on-the-fly
bzfile(pp),
DataFrameCallback$new(partial1),
chunk_size = 5000) %>%
rename_all(tolower) %>%
rename(timestamp = time,
longitude = lon, latitude = lat,
altitude = alt) %>%
group_by(flight_id) %>%
arrange(timestamp) %>%
ungroup() %>%
mutate(distance_arp = geosphere::distGeo(
arp,
cbind(.data$longitude, .data$latitude))) %>%
select(flight_id,
timestamp, latitude, longitude, altitude,
speed_gnd, track_gnd,
vert_speed, on_ground,
distance,
distance_arp) %>%
write_csv(file)
}
# smooth trajectories
smooth_arrival_trajectories <- function(rawdatafile, size = 5) {
if(!fs::file_exists(rawdatafile)) {
stop(paste0("Error: file", rawdatafile, " does not exist."))
}
file <- stringr::str_remove(rawdatafile, "_raw")
c <- cols(
flight_id = col_character(),
timestamp = col_datetime(format = ""),
latitude = col_double(),
longitude = col_double(),
altitude = col_double(),
speed_gnd = col_double(),
track_gnd = col_double(),
vert_speed = col_double(),
on_ground = col_logical(),
distance = col_double(),
distance_arp = col_double()
)
# RT trajectories
vroom(rawdatafile, col_types = c) %>%
dplyr::group_by(flight_id) %>%
dplyr::arrange(timestamp) %>%
dplyr::mutate(n = n()) %>%
# filter out trajectories with too few points
dplyr::filter(n >= 3) %>%
# filter out trajectories with too few non NA altitudes
dplyr::mutate(n_not_na = sum(!is.na(altitude))) %>%
dplyr::filter(n_not_na >= 3) %>%
# fill NA with neaby ones
# tidyr::fill(longitude, latitude, altitude, .direction = "downup") %>%
dplyr::mutate_at(.vars = c("longitude", "latitude", "altitude"),
.funs = ~ zoo::rollmedian(.x,
k = size,
fill = c("extend", "extend", "extend"))) %>%
# dplyr::mutate_at(.vars = c("longitude", "latitude", "altitude"),
# .funs = ~ zoo::rollmean(.x,
# k = size,
# fill = c("extend", "extend", "extend"))) %>%
# dplyr::mutate_at(.vars = c("longitude", "latitude"),
# .funs = ~ pracma::movavg(.x,
# n = size,
# type = "w")) %>%
dplyr::ungroup() %>%
dplyr::select(-n, -n_not_na) %>%
readr::write_csv(file)
}
# filter by distance from point
# arp: airport reference point, lon/lat
# NOTE: Heathrow hardcoded for now (maybe for ever!-)
filter_distance_from <- function(x, arp = c(-0.461389, 51.4775), distance = 40) {
d <- distance * 1852 # in NM
x %>%
# `cbind`` is the trick!!!!!!!!!!!!!!!!!
dplyr::mutate(distance_arp = geosphere::distGeo(arp, cbind(.data$LON, .data$LAT))) %>%
dplyr::filter(distance_arp <= d)
}
plot_flight <- function(flt) {
ggplot2::ggplot(data = flt) +
ggplot2::geom_path(
mapping = ggplot2::aes(x = longitude,
y = latitude,
colour = callsign,
group = callsign),
size = 1.5,
alpha = .3,
lineend = "round")
}
save_egll_holding_plot <- function(data_pos, flight_ids, idx) {
id <- flight_ids[idx]
flt <- data_pos %>%
dplyr::filter(flight_id == id)
stacks <- egll_vor %>%
dplyr::mutate(hjust = 0, vjust = 0) %>%
dplyr::mutate(
hjust = ifelse(id == "BNN", -0.2, hjust),
vjust = ifelse(id == "BNN", 1.5, vjust)) %>%
dplyr::mutate(
hjust = ifelse(id == "BIG", 1.2, hjust),
vjust = ifelse(id == "BIG", -0.7, vjust)) %>%
dplyr::mutate(
hjust = ifelse(id == "LAM", 1.2, hjust),
vjust = ifelse(id == "LAM", 1.5, vjust)) %>%
dplyr::mutate(
hjust = ifelse(id == "OCL", -0.2, hjust),
vjust = ifelse(id == "OCL", -0.7, vjust))
gvor <- ggplot2::geom_point(data = egll_vor,
ggplot2::aes(x = longitude,
y = latitude),
colour = "blue",
size = 2)
gvorl <- ggplot2::geom_text(data = stacks,
ggplot2::aes(x = longitude,
y = latitude,
label = id,
hjust = hjust,
vjust = vjust))
gbox <- ggplot2::geom_sf(data = hss_sf, alpha = 0)
if(nrow(flt) > 0) {
# indexes of the peaks
peaks <- pracma::findpeaks(flt$distance_arp, nups = 5)[,2]
valleys <- pracma::findpeaks(-flt$distance_arp, nups = 5)[,2]
minmax <- c(valleys, peaks)
flt <- flt %>%
dplyr::mutate(is_peak = ifelse(
dplyr::row_number() %in% minmax,
TRUE,
FALSE))
# minimum track duration (secs)
min_track_duration <- 60 * 1
# minimum track altitude, (lowest track is at 7000 ft), keep a margin
min_track_altitude <- 6500 %>%
units::set_units("ft") %>%
units::set_units("m") %>%
as.numeric()
# maximum track altitude, (highiest track is at ??? ft), keep a margin
max_track_altitude <- 250000 %>%
units::set_units("ft") %>%
units::set_units("m") %>%
as.numeric()
s <- trrrj::extract_segment(flt) %>%
dplyr::mutate(duration = as.duration(end_ts - beg_ts), flight_id = id) %>%
dplyr::filter(duration >= lubridate::as.duration(min_track_duration)) %>%
dplyr::filter(beg_alt >= min_track_altitude, beg_alt < max_track_altitude) %>%
mutate(segment_number = dplyr::row_number())
g <- ggplot2::ggplot(flt)
gd <- g +
ggplot2::geom_line(ggplot2::aes(x = timestamp, y = distance_arp)) +
ggplot2::geom_point(data = (flt %>% dplyr::filter(is_peak == TRUE)),
ggplot2::aes(x = timestamp, y = distance_arp), colour = "yellow")
gv <- g + ggplot2::geom_line(ggplot2::aes(x = timestamp, y = altitude))
g2d <- ggplot2::ggplot(data = flt) +
ggplot2::geom_path(
mapping = ggplot2::aes(x = longitude,
y = latitude,
colour = callsign,
group = callsign),
size = 1.5, alpha = .3, lineend = "round")
# keep holding segment: no luck, see 33
# h <- flt %>% left_join(s, by = "flight_id") %>%
# filter(timestamp >= beg_ts, timestamp <= end_ts)
# gh <- geom_path(data = h,
# mapping = aes(x = longitude,
# y = latitude,
# group = callsign),
# size = 1.5, alpha = .3, lineend = "round", colour = "blue")
# g2d + gh
# g2d <- plot_flight_2d(flt)
if (nrow(s) > 0) {
gs <- ggplot2::geom_segment(
data = s,
ggplot2::aes(x = beg_ts, y = beg_alt, xend = end_ts, yend = end_alt),
colour = "blue",
size = 2)
gs0 <- ggplot2::geom_segment(
data = s,
ggplot2::aes(x = beg_ts, y = 0, xend = end_ts, yend = 0),
colour = "blue",
size = 3)
g0 <- gv + gs
g1 <- gd + gs0
} else {
g0 <- gv
g1 <- gd
}
grw_sf <- ggplot2::geom_sf(data = rw_sf, size = 1.2)
grw <- ggplot2::geom_segment(
data = rw,
ggplot2::aes(x = longitude, y = latitude, xend = longitude2, yend = latitude2),
size = 1.2)
g2d <- g2d + grw_sf + gbox + gvor + gvorl
patch <- g0 + g1 + g2d + patchwork::plot_layout(ncol = 1, heights = c(1, 1, 3))
fl <- stringr::str_glue("figures", "generated", "holding_{idx}.png", idx = idx, .sep = "/")
ggplot2::ggsave(fl, plot = patch, device = "png")
}
}
# utility to plot and manually edit a box around a holding pattern
# in order to manually define the holding stacks areas
library(leaflet)
library(mapview)
library(mapedit)
edit_flight <- function(data, id) {
# id <- egll_flt_ids[idx]
flt <- data %>%
filter(flight_id == id)
f_sf <-
flt %>% sf::st_as_sf(coords = c("longitude", "latitude")) %>% sf::st_set_crs(4326)
editMap(mapview(f_sf), targetLayerID = "flight")
}
# example of edit holding area
# 1. create a polygon, it will be ib ppp once done
# ppp <- edit_flight(4)
# 2. make it a tribble (and assign the relevant `id`, i.e. "LAM")
# ppp %>%
# `[[`(1) %>%
# st_geometry() %>%
# st_coordinates() %>%
# as_tibble() %>%
# rename(longitude = X, latitude = Y) %>%
# mutate(id = "LAM") %>%
# select(-L1, -L2) %>%
# datapasta::dpasta()
# calculate the bearing
# add a variable `bearing` (non NA only for the first point)
add_bearing <- function(df, arp) {
df %>%
dplyr::mutate(
bearing =
(geosphere::bearing(
cbind(longitude, latitude),
cbind(arp$longitude, arp$latitude)) + 360) %% 360)
}
# add initial quadrant from bearing [at 40 NM]
# for each flight, add a variable `quadrant`
add_quadrant <- function(df) {
df %>%
dplyr::mutate(
quadrant = case_when(
bearing >= 0 & bearing < 90 ~ "I",
bearing >= 90 & bearing < 180 ~ "II",
bearing >= 180 & bearing < 270 ~ "III",
bearing >= 270 & bearing < 360 ~ "IV",
TRUE ~ NA_character_
)) %>%
mutate(quadrant = factor(
quadrant,
levels = c("I", "II", "III", "IV")))
}
nth_group <- function(x, n) {
x %>%
tidyr::nest() %>%
dplyr::slice(n) %>%
tidyr::unnest(data)
}
first_group <- function(x) nth_group(x, n = 1)
secondt_group <- function(x) nth_group(x, n = 2)
######################################################
# TODO: make it work _correctly_
# pt/th1/th2: lon/lat vectors; name1 is RWY threshold 1
# return the RWY name and its distance from it
cross_track_from_runway <- function(pt, th1, th2, name1, name2) {
r_Earth <- 6371e3 # mean Earth radius (m)
n_ET1_E <- nvctr::lat_lon2n_E(nvctr::rad(th1['latitude']), nvctr::rad(th1['longitude']))
n_ET2_E <- nvctr::lat_lon2n_E(nvctr::rad(th2['latitude']), nvctr::rad(th2['longitude']))
n_EP_E <- nvctr::lat_lon2n_E(nvctr::rad(pt['latitude']), nvctr::rad(pt['longitude']))
# unit normal to the great circle between n_EA1_E and n_EA2_E
c_E <- nvctr::unit(pracma::cross(n_ET1_E, n_ET2_E))
# position of normal on Earth
n_EC_E <- r_Earth * c_E
# find the intersection point between the great circle arcs
# n_ET1_E -- n_ET1_E and n_EP_E -- c_E
# This is the intersection point on the great circle n_ET1_E -- n_ET1_E
n_ED_E <- nvctr::unit(pracma::cross(
pracma::cross(n_ET1_E, n_ET2_E),
pracma::cross(n_EP_E, n_EC_E)))
# there are 2 intersections: n_ED_E and its antipodal n_EF_E = -n_ED_E
n_EF_E <- -n_ED_E
# Select the one that has the minimum distance from the 2 thresholds
d_d_t1 <- base::norm(n_ET1_E - n_ED_E, type = "2") * r_Earth
d_d_t2 <- base::norm(n_ET2_E - n_ED_E, type = "2") * r_Earth
d_f_t1 <- base::norm(n_ET1_E - n_EF_E, type = "2") * r_Earth
d_f_t2 <- base::norm(n_ET2_E - n_EF_E, type = "2") * r_Earth
d <- c(d_d_t1, d_d_t2, d_f_t1, d_f_t2)
rwy <- c(name1, name2, name1, name2)
idx <- which.min(d)
c(rwy[idx], d[idx])
}
# opt1 is the result of applying cross_track_from_runway for RWY1
rwy_for_point <- function(opt1, opt2) {
distances <- c(opt1[2], opt2[2]) %>% as.numeric()
rwys <- c(opt1[1], opt2[1])
rwys[which.min(distances)]
}
guess_egll_rwy <- function(longitude, latitude) {
a <- cross_track_from_runway(c(longitude = longitude, latitude = latitude),
th1 = c(latitude = 51.47750, longitude = -0.4850000),
th2 = c(latitude = 51.47767, longitude = -0.4333333),
name1 = "09L", name2 = "27R")
b <- cross_track_from_runway(c(longitude = longitude, latitude = latitude),
th1 = c(latitude = 51.46483, longitude = -0.4823333),
th2 = c(latitude = 51.46500, longitude = -0.4340000),
name1 = "09R", name2 = "27L")
rwy_for_point(a, b)
}
##############################################
|
f2c2596e7cb4f721b2a79f4260325da8890b9219 | 0f30a25454dc75a201378d16d37de75431cea79b | /R/wm_ranks.R | a6f49aea95e796ab8b4c92bda13d21185761e193 | [
"MIT"
] | permissive | ropensci/worrms | 489388c0121d7f1df7e1bdf9c5fac46da40925c1 | beb0c3e13ce0a0accdb33c0a91ea14a1ccedadcf | refs/heads/master | 2023-07-02T18:31:28.937341 | 2023-06-19T13:34:08 | 2023-06-19T13:34:08 | 77,186,597 | 19 | 9 | NOASSERTION | 2023-06-02T20:50:11 | 2016-12-23T01:03:45 | R | UTF-8 | R | false | false | 1,154 | r | wm_ranks.R | #' Get taxonomic ranks by their identifier
#'
#' @export
#' @name wm_ranks
#' @param rank_id (numeric/integer) a rank identifier. length==1
#' @param rank_name (character) a rank name. length==1
#' @param id an AphiaID. length==1
#' @param offset (integer) record to start at. default: 1
#' @template curl
#' @return A tibble/data.frame
#' @examples \dontrun{
#' wm_ranks_id(220)
#' wm_ranks_id(180)
#' wm_ranks_id(180, id = 4)
#'
#' wm_ranks_name("genus")
#' wm_ranks_name("genus", id = 4)
#' }
#' @export
#' @rdname wm_ranks
wm_ranks_id <- function(rank_id, id = NULL, offset = 1, ...) {
assert(rank_id, c("numeric", "integer"))
assert(id, c("numeric", "integer"))
assert_len(rank_id, 1)
assert_len(id, 1)
wm_GET(file.path(wm_base(), "AphiaTaxonRanksByID", rank_id),
query = cc(list(AphiaID = id)), ...)
}
#' @export
#' @rdname wm_ranks
wm_ranks_name <- function(rank_name, id = NULL, offset = 1, ...) {
assert(rank_name, c("character"))
assert(id, c("numeric", "integer"))
assert_len(rank_name, 1)
assert_len(id, 1)
wm_GET(file.path(wm_base(), "AphiaTaxonRanksByName", rank_name),
query = cc(list(AphiaID = id)), ...)
}
|
5b700243211ab00d9fc9fa4f9437b47a55bef7ba | adc038da9d4a940852573f5862b07c249d911190 | /Codigos/3_Selección_Matriz_De_Pesos.R | f275a875627b11ca2e0da9d196193ba60f7a4c14 | [] | no_license | mrruizf/DISEASE_MAPPING | afad5d7efe7ad30eb9adb103d319270c6cf7a93f | a11d9a307b348c9bd16b97efb489f90535796824 | refs/heads/master | 2022-04-28T18:07:05.818714 | 2020-04-29T21:35:29 | 2020-04-29T21:35:29 | 260,025,648 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 3,007 | r | 3_Selección_Matriz_De_Pesos.R | library(maptools)
library(spdep) ##Matriz de contiguidad
library(adespatial)
library(xtable)
########################################################################
######################MATRIZ DE PESOS ESPACIALES
MPIOS<-readShapePoly("G:/Shapefiles/Epidemiologia/EPIDEMIOLOGIA")
##vecinos
towwer <- poly2nb(MPIOS, queen=F) # Torre
towwer6<-nblag(towwer,6)
towwer<-nb2listw(towwer, style="B", glist = NULL)
towwer2<-nb2listw(towwer6[[2]], style="B", glist = NULL)
towwer3 <-nb2listw(towwer6[[3]], style="B", glist = NULL)
towwer4 <-nb2listw(towwer6[[4]], style="B", glist = NULL)
towwer5 <-nb2listw(towwer6[[5]], style="B", glist = NULL)
towwer6 <-nb2listw(towwer6[[6]], style="B", glist = NULL)
queen <- poly2nb(MPIOS, queen=T) #reina
queen6 <- nblag(queen,6)
queen<-nb2listw(queen, style="B", glist = NULL)
queen2<-nb2listw(queen6[[2]], style="B", glist = NULL)
queen3 <-nb2listw(queen6[[3]], style="B", glist = NULL)
queen4 <-nb2listw(queen6[[4]], style="B", glist = NULL)
queen5 <-nb2listw(queen6[[5]], style="B", glist = NULL)
queen6 <-nb2listw(queen6[[6]], style="B", glist = NULL)
coordenadas<-coordinates(MPIOS) #k-vecinos
k1 <- knn2nb(knearneigh(coordenadas))
k1 <-nb2listw(k1, style="B", glist = NULL)
k2 <- knn2nb(knearneigh(coordenadas,2))
k2 <-nb2listw(k2, style="B", glist = NULL)
k3 <- knn2nb(knearneigh(coordenadas,3))
k3 <-nb2listw(k3, style="B", glist = NULL)
k4 <- knn2nb(knearneigh(coordenadas,4))
k4 <-nb2listw(k4, style="B", glist = NULL)
k5 <- knn2nb(knearneigh(coordenadas,5))
k5 <-nb2listw(k5, style="B", glist = NULL)
k6 <- knn2nb(knearneigh(coordenadas,6))
k6 <-nb2listw(k6, style="B", glist = NULL)
#Distancia de Gabriel
gabrielnb=graph2nb(gabrielneigh(coordenadas),sym=TRUE)
gabriel <-nb2listw(gabrielnb, style="B", glist = NULL)
x11()
plot(MPIOS,border="gray")
plot(gabrielnb,coordenadas,add=T,col="red")
title(main="Gráfica de Gabriel")
#Triangulación Delaunay
trinb=tri2nb(coordenadas)
delaunay <-nb2listw(trinb, style="B", glist = NULL)
X11()
plot(MPIOS,border="gray")
plot(trinb,coordenadas,add=T,col="blue")
title(main="Triangulación Delaunay")
########################################################################
#################SELECCIÓN DE LA MATRIZ DE PESOS########################
Pesos.list<-list(reina1=queen,reina2=queen2,reina3=queen3,reina4=queen4,reina5=queen5,reina6=queen6,
torre1=towwer,torre2=towwer2,torre3=towwer3,torre4=towwer4,torre5=towwer5,torre6=towwer6,
kvecinos1=k1,kvecinos2=k2,kvecinos3=k3,kvecinos4=k4,kvecinos5=k5,kvecinos6=k6,gabriel=gabriel,delaunay=delaunay)
class(Pesos.list)
nbw <- length(Pesos.list)
1 - (1 - 0.05)^(nbw)
W_sel <- listw.select(datos$mala_total, Pesos.list, MEM.autocor = "all",
p.adjust = TRUE, nperm = 50)
W_sel$candidates
W_sel$best.id
xtable(W_sel$candidates, digits = 6)
W_sel$best$MEM.select
Best.S.W.M<-Pesos.list[W_sel$best.id] |
466f26a8b33ad7405f87cb1e2a2f4bce035f8612 | f76dcb5082462f759a63ff46d41c4acc2dbe5a93 | /tests/testthat/test-write.R | 439730289ecad9abe9fbd0b373b4c35b6edea596 | [
"MIT"
] | permissive | poissonconsulting/readwritesqlite | ba7ae2d6c814eb4880f94ee3e0ee77793a12436a | db961138ad98b957b70b3e4f257ab8c9c317b8e2 | refs/heads/main | 2022-11-02T22:30:43.073748 | 2022-10-16T23:09:15 | 2022-10-16T23:09:15 | 158,617,726 | 39 | 1 | NOASSERTION | 2022-09-28T12:29:35 | 2018-11-21T23:26:36 | R | UTF-8 | R | false | false | 27,897 | r | test-write.R | test_that("rws_write.data.frame checks reserved table names", {
conn <- local_conn()
local <- data.frame(x = as.character(1:3))
expect_error(
rws_write(local, x_name = "readwritesqlite_log", conn = conn),
"'readwritesqlite_log' is a reserved table"
)
expect_error(
rws_write(local, x_name = "readwritesqlite_LOG", conn = conn),
"'readwritesqlite_LOG' is a reserved table"
)
expect_error(
rws_write(local, x_name = "readwritesqlite_meta", conn = conn),
"'readwritesqlite_meta' is a reserved table"
)
expect_error(
rws_write(local, x_name = "READwritesqlite_meta", conn = conn),
"'READwritesqlite_meta' is a reserved table"
)
})
test_that("rws_write.data.frame checks table exists", {
conn <- local_conn()
local <- data.frame(x = as.character(1:3))
expect_error(
rws_write(local, conn = conn),
"^Table 'local' does not exist[.]$"
)
})
test_that("rws_write.data.frame writes to existing table", {
conn <- local_conn()
local <- data.frame(x = 1:3, select = 1:3)
DBI::dbCreateTable(conn, "local", local)
expect_identical(rws_write(local, conn = conn), "local")
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, local)
})
test_that("rws_write.data.frame errors if exists = FALSE and already exists", {
conn <- local_conn()
local <- data.frame(x = 1:3, select = 1:3)
DBI::dbCreateTable(conn, "local", local)
expect_error(rws_write(local, exists = FALSE, conn = conn), "^Table 'local' already exists[.]$")
})
test_that("rws_write.data.frame creates table", {
conn <- local_conn()
local <- data.frame(x = 1:3, select = 1:3)
expect_identical(rws_write(local, exists = FALSE, conn = conn), "local")
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, local)
})
test_that("rws_write.data.frame handling of case", {
conn <- local_conn()
local <- data.frame(x = 1:3, select = 1:3)
DBI::dbCreateTable(conn, "local", local)
DBI::dbCreateTable(conn, "`LOCAL`", local)
expect_identical(rws_write(local, conn = conn), "local")
expect_identical(rws_write(local, x_name = "LOCAL", conn = conn), "LOCAL")
LOCAL <- local
expect_identical(rws_write(LOCAL, conn = conn), "LOCAL")
expect_identical(rws_write(LOCAL, x_name = "`LOCAL`", conn = conn), "`LOCAL`")
remote <- DBI::dbReadTable(conn, "LOCAL")
expect_identical(remote, rbind(local, local, local))
REMOTE <- DBI::dbReadTable(conn, "`LOCAL`")
expect_identical(REMOTE, LOCAL)
})
test_that("rws_write.data.frame deals with \" quoted table names", {
conn <- local_conn()
local <- data.frame(x = 1:3, select = 1:3)
locals <- data.frame(y = 1:2)
DBI::dbCreateTable(conn, "local", local)
DBI::dbCreateTable(conn, '"local"', locals)
expect_identical(rws_list_tables(conn), sort(c("\"local\"", "local")))
expect_identical(rws_write(local, conn = conn), "local")
expect_identical(rws_write(locals, x_name = "\"local\"", conn = conn), "\"local\"")
remotes <- DBI::dbReadTable(conn, "\"local\"")
expect_identical(remotes, locals)
})
test_that("rws_write.data.frame deals with [ quoted table names", {
conn <- local_conn()
local <- data.frame(x = 1:3, select = 1:3)
locals <- data.frame(y = 1:2)
DBI::dbCreateTable(conn, "local", local)
DBI::dbCreateTable(conn, "[local]", locals)
expect_identical(rws_list_tables(conn), sort(c("[local]", "local")))
expect_identical(rws_write(local, conn = conn), "local")
expect_identical(rws_write(locals, x_name = "[local]", conn = conn), "[local]")
remotes <- as.data.frame(rws_read_table("[local]", conn = conn))
expect_identical(remotes, locals)
})
test_that("rws_write.data.frame deals with backtick quoted table names", {
conn <- local_conn()
local <- data.frame(x = 1:3, select = 1:3)
locals <- data.frame(y = 1:2)
DBI::dbCreateTable(conn, "local", local)
DBI::dbCreateTable(conn, "`local`", locals)
expect_identical(rws_list_tables(conn), sort(c("`local`", "local")))
expect_identical(rws_write(local, conn = conn), "local")
expect_identical(rws_write(locals, x_name = "`local`", conn = conn), "`local`")
remotes <- DBI::dbReadTable(conn, "`local`")
expect_identical(remotes, locals)
})
test_that("rws_write.data.frame corrects column order", {
conn <- local_conn()
local <- data.frame(x = 4:6, select = 1:3)
DBI::dbCreateTable(conn, "local", local)
expect_identical(rws_write(local, conn = conn), "local")
expect_identical(rws_write(local[2:1], x_name = "local", conn = conn), "local")
expect_error(
rws_write(local[c(1, 1, 2)], x_name = "local", conn = conn),
"^The following column in data 'local' is unrecognised: 'x.1'[.]$"
)
expect_warning(
rws_write(local[c(1, 1, 2)], x_name = "local", conn = conn, strict = FALSE),
"^The following column in data 'local' is unrecognised: 'x.1'"
)
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, rbind(local, local, local))
})
test_that("rws_write.data.frame warns for extra columns", {
conn <- local_conn()
local <- data.frame(x = 4:6, y = 1:3)
DBI::dbCreateTable(conn, "local", local["x"])
expect_error(
rws_write(local, conn = conn),
"^The following column in data 'local' is unrecognised: 'y'[.]$"
)
expect_warning(
rws_write(local, conn = conn, strict = FALSE),
"^The following column in data 'local' is unrecognised: 'y'[.]$"
)
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, local["x"])
})
test_that("rws_write.data.frame is case insensitive", {
conn <- local_conn()
local <- data.frame(x = as.character(1:3), seLect = 1:3)
DBI::dbCreateTable(conn, "local", local)
colnames(local) <- toupper(colnames(local))
expect_identical(rws_write(local, conn = conn), "local")
})
test_that("rws_write.data.frame deals with quoted column names", {
conn <- local_conn()
local <- tibble::tibble(x = factor(1:3), `[x]` = factor(2:4), `"x"` = factor(3:5))
expect_identical(rws_write(local, conn = conn, exists = FALSE), "local")
meta <- rws_read_meta(conn)
expect_identical(meta$ColumnMeta, sort(c("\"x\"", "[x]", "X")))
expect_identical(
DBI::dbReadTable(conn, "local"),
data.frame(
x = as.character(1:3),
X.x. = as.character(2:4),
X.x..1 = as.character(3:5),
stringsAsFactors = FALSE
)
)
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, local)
})
test_that("rws_write.data.frame can delete", {
conn <- local_conn()
local <- data.frame(x = 1:3)
DBI::dbCreateTable(conn, "local", local)
rws_write(local, conn = conn)
rws_write(local, delete = TRUE, conn = conn)
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, local)
})
test_that("rws_write.data.frame can not commit", {
conn <- local_conn()
local <- data.frame(x = 1:3)
DBI::dbCreateTable(conn, "local", local)
rws_write(local, commit = FALSE, conn = conn)
remote <- DBI::dbReadTable(conn, "local")
expect_equal(local[integer(0), , drop = FALSE], remote)
expect_false(DBI::dbExistsTable(conn, "readwritesqlite_meta"))
expect_false(DBI::dbExistsTable(conn, "readwritesqlite_log"))
})
test_that("rws_write.list errors with none data frames", {
conn <- local_conn()
y <- list(x = 1)
expect_error(rws_write(y, conn = conn), "^List `y` includes objects which are not data frames[.]$")
})
test_that("rws_write.environment issues warning with no data frames", {
conn <- local_conn()
y <- new.env()
assign("x", 1, envir = y)
expect_warning(rws_write(y, conn = conn), "^Environment 'y' has no data frames[.]$")
})
test_that("rws_write.list requires named list", {
conn <- local_conn()
y <- list(data.frame(x = 1:3))
expect_error(rws_write(y, conn = conn), "^`x` must be named[.]$",
class = "chk_error"
)
})
test_that("rws_write writes list with 1 data frame", {
conn <- local_conn()
y <- list(local = data.frame(x = 1:3))
DBI::dbCreateTable(conn, "local", y$local)
expect_identical(rws_write(y, conn = conn), "local")
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, y$local)
})
test_that("rws_write writes list with 2 data frame", {
conn <- local_conn()
y <- list(local = data.frame(x = 1:3), local2 = data.frame(y = 1:4))
DBI::dbCreateTable(conn, "local", y$local)
DBI::dbCreateTable(conn, "local2", y$local2)
expect_identical(rws_write(y, conn = conn), c("local", "local2"))
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, y$local)
remote2 <- DBI::dbReadTable(conn, "local2")
expect_identical(remote2, y$local2)
})
test_that("rws_write writes list with 2 identically named data frames", {
conn <- local_conn()
y <- list(local = data.frame(x = 1:3), LOCAL = data.frame(x = 1:4))
DBI::dbCreateTable(conn, "LOCAL", y$local)
expect_identical(rws_write(y, conn = conn, unique = FALSE), c("local", "LOCAL"))
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, rbind(y$local, y$LOCAL))
})
test_that("rws_write errors if list with 2 identically named data frames and complete = TRUE", {
conn <- local_conn()
y <- list(local = data.frame(x = 1:3), LOCAL = data.frame(x = 1:4))
DBI::dbCreateTable(conn, "LOCAL", y$local)
expect_error(
rws_write(y, unique = TRUE, conn = conn),
"^The following table name is duplicated: 'local'; but unique = TRUE[.]$"
)
})
test_that("rws_write errors if complete = TRUE and not all data frames", {
conn <- local_conn()
y <- list(local = data.frame(x = 1:3))
DBI::dbCreateTable(conn, "LOCAL", y$local)
DBI::dbCreateTable(conn, "LOCAL2", y$local)
expect_error(
rws_write(y, all = TRUE, conn = conn),
"^The following table name is not represented: 'LOCAL2'; but all = TRUE and exists != FALSE[.]$"
)
})
test_that("rws_write errors if strict = TRUE and exists = TRUE and extra data frames", {
conn <- local_conn()
y <- list(local = data.frame(x = 1:3), local2 = data.frame(y = 1:2))
DBI::dbCreateTable(conn, "LOCAL", y$local)
expect_error(
rws_write(y, conn = conn),
"^The following data frame in 'y' is unrecognised: 'local2'; but exists = TRUE[.]$"
)
expect_warning(
rws_write(y, strict = FALSE, conn = conn),
"^The following data frame in 'y' is unrecognised: 'local2'; but exists = TRUE[.]$"
)
})
test_that("rws_write writes environment", {
conn <- local_conn()
local <- data.frame(x = 1:3)
z <- 1
expect_identical(rws_write(environment(), conn = conn, exists = FALSE), "local")
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, local)
})
test_that("rws_write not commits", {
conn <- local_conn()
y <- list(local = data.frame(x = 1:3), LOCAL = data.frame(x = 1:4))
expect_identical(rws_write(y, exists = NA, commit = FALSE, unique = FALSE, conn = conn), c("local", "LOCAL"))
expect_identical(DBI::dbListTables(conn), character(0))
expect_identical(rws_write(y, exists = NA, commit = TRUE, unique = FALSE, conn = conn), c("local", "LOCAL"))
expect_identical(DBI::dbListTables(conn), c("local", "readwritesqlite_init", "readwritesqlite_log", "readwritesqlite_meta"))
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, rbind(y$local, y$LOCAL))
})
test_that("replace rows PRIMARY KEY constraints", {
conn <- local_conn()
DBI::dbExecute(conn, "CREATE TABLE local (
x INTEGER PRIMARY KEY NOT NULL,
y INTEGER)")
local <- data.frame(x = 1:3, y = 2:4)
expect_identical(rws_write(local, conn = conn), "local")
local$x <- c(1:2, 4L)
local$y <- local$y + 10L
expect_error(rws_write(local, conn = conn), "UNIQUE constraint failed: local.x")
expect_identical(rws_write(local, replace = TRUE, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, tibble::tibble(x = 1:4, y = c(12L, 13L, 4L, 14L)))
expect_identical(rws_write(local, replace = TRUE, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, tibble::tibble(x = 1:4, y = c(12L, 13L, 4L, 14L)))
expect_error(rws_write(local, conn = conn), "UNIQUE constraint failed: local.x")
expect_identical(rws_write(local, delete = TRUE, replace = TRUE, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, tibble::as_tibble(local))
remote$x[1] <- 5L
expect_identical(rws_write(local, delete = TRUE, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, tibble::as_tibble(local))
expect_identical(
sort(DBI::dbListTables(conn)),
c("local", "readwritesqlite_init", "readwritesqlite_log", "readwritesqlite_meta")
)
})
test_that("replace rows UNIQUE constraints in unique key", {
conn <- local_conn()
DBI::dbExecute(conn, "CREATE TABLE local (
x INTEGER UNIQUE NOT NULL,
y INTEGER)")
local <- data.frame(x = 1:3, y = 2:4)
expect_identical(rws_write(local, conn = conn), "local")
local$x <- c(1:2, 4L)
local$y <- local$y + 10L
expect_error(rws_write(local, conn = conn), "UNIQUE constraint failed: local.x")
expect_identical(rws_write(local, replace = TRUE, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(sort(remote$x), 1:4)
expect_identical(sort(remote$y), c(4L, 12L, 13L, 14L))
expect_identical(rws_write(local, replace = TRUE, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(sort(remote$x), 1:4)
expect_identical(sort(remote$y), c(4L, 12L, 13L, 14L))
expect_error(rws_write(local, conn = conn), "UNIQUE constraint failed: local.x")
expect_identical(rws_write(local, delete = TRUE, replace = TRUE, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, tibble::as_tibble(local))
remote$x[1] <- 5L
expect_identical(rws_write(local, delete = TRUE, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, tibble::as_tibble(local))
})
test_that("replace rows with FOREIGN key", {
conn <- local_conn()
DBI::dbExecute(conn, "CREATE TABLE local (
x INTEGER PRIMARY KEY NOT NULL)")
DBI::dbExecute(conn, "CREATE TABLE local2 (
x INTEGER NOT NULL PRIMARY KEY,
y INTEGER NOT NULL,
FOREIGN KEY (x) REFERENCES local (x))")
local <- data.frame(x = 1:4)
expect_identical(rws_write(local, conn = conn), "local")
local2 <- data.frame(x = c(1:2, 4L))
local2$y <- local2$x + 10L
expect_identical(rws_write(local2, conn = conn), "local2")
expect_error(rws_write(local2, conn = conn), "UNIQUE constraint failed: local2.x")
rws_write(local2, conn = conn, replace = TRUE)
})
test_that("foreign keys switched on one data frame at a time", {
conn <- local_conn()
DBI::dbExecute(conn, "CREATE TABLE local (
x INTEGER PRIMARY KEY NOT NULL)")
DBI::dbExecute(conn, "CREATE TABLE local2 (
x INTEGER NOT NULL,
FOREIGN KEY (x) REFERENCES local (x))")
y <- list(local = data.frame(x = 1:4), local2 = data.frame(x = 1:3))
expect_error(
rws_write(y$local2, x_name = "local2", conn = conn),
"FOREIGN KEY constraint failed"
)
expect_identical(rws_write(y$local, x_name = "local", conn = conn), "local")
expect_identical(rws_write(y$local2, x_name = "local2", conn = conn), "local2")
})
test_that("foreign keys switched off for two data frame", {
conn <- local_conn()
DBI::dbExecute(conn, "CREATE TABLE local (
x INTEGER PRIMARY KEY NOT NULL)")
DBI::dbExecute(conn, "CREATE TABLE local2 (
x INTEGER NOT NULL,
FOREIGN KEY (x) REFERENCES local (x))")
expect_false(foreign_keys(TRUE, conn))
y <- list(local2 = data.frame(x = 1:3), local = data.frame(x = 1:4))
expect_identical(rws_write(y, conn = conn), c("local2", "local"))
expect_true(foreign_keys(TRUE, conn))
})
test_that("foreign keys pick up foreign key violation for two data frames", {
conn <- local_conn()
DBI::dbExecute(conn, "CREATE TABLE local (
x INTEGER PRIMARY KEY NOT NULL)")
DBI::dbExecute(conn, "CREATE TABLE local2 (
x INTEGER NOT NULL,
FOREIGN KEY (x) REFERENCES local (x))")
expect_false(foreign_keys(FALSE, conn))
expect_false(defer_foreign_keys(TRUE, conn))
y <- list(local2 = data.frame(x = 1:3), local = data.frame(x = 2:3))
expect_error(rws_write(y, conn = conn), "FOREIGN KEY constraint failed")
expect_false(foreign_keys(TRUE, conn))
expect_false(defer_foreign_keys(TRUE, conn))
})
test_that("strict environment with extra data frame and extra column", {
conn <- local_conn()
env <- new.env()
local <- data.frame(x = 1:2, z = 2:3)
assign("local", local, envir = env)
assign("local2", local, envir = env)
assign("not", 1, envir = env)
DBI::dbCreateTable(conn, "local", local[1])
expect_error(
rws_write(env, conn = conn),
"^The following data frame in 'x' is unrecognised: 'local2'; but exists = TRUE[.]$"
)
expect_warning(
rws_write(env, strict = FALSE, conn = conn),
"^The following data frame in 'x' is unrecognised: 'local2'; but exists = TRUE[.]$"
)
expect_warning(
rws_write(env, strict = FALSE, conn = conn),
"^The following column in data 'local' is unrecognised: 'z'[.]$"
)
remote <- DBI::dbReadTable(conn, "local")
expect_identical(remote, rbind(local[1], local[1]))
expect_identical(rws_list_tables(conn), "local")
})
test_that("sf data frames with single geometry passed back", {
conn <- local_conn()
local <- readwritesqlite:::rws_data_sf
DBI::dbCreateTable(conn, "local", local)
expect_identical(rws_write(local, conn = conn), "local")
init <- DBI::dbReadTable(conn, "readwritesqlite_init")
expect_identical(init, data.frame(
TableInit = "LOCAL",
IsInit = 1L, SFInit = "GEOMETRY",
stringsAsFactors = FALSE
))
remote <- rws_read_table("local", conn = conn)
expect_identical(class(remote), c("sf", "tbl_df", "tbl", "data.frame"))
expect_identical(colnames(remote), colnames(local))
expect_identical(nrow(remote), 3L)
expect_identical(remote$logical, local$logical)
expect_identical(remote$date, local$date)
expect_identical(remote$posixct, local$posixct)
expect_identical(remote$units, local$units)
expect_identical(remote$factor, local$factor)
expect_identical(remote$ordered, local$ordered)
expect_equivalent(remote$geometry, local$geometry)
})
test_that("sf data frames with two geometries and correct one passed back", {
conn <- local_conn()
local <- as.data.frame(readwritesqlite:::rws_data_sf)
local <- tibble::as_tibble(local)
local <- local["geometry"]
colnames(local) <- "first"
local$second <- local$first
local <- sf::st_sf(local, sf_column_name = "second")
DBI::dbCreateTable(conn, "local", local)
expect_identical(rws_write(local, conn = conn), "local")
init <- DBI::dbReadTable(conn, "readwritesqlite_init")
expect_identical(init, data.frame(
TableInit = "LOCAL", IsInit = 1L, SFInit = "SECOND",
stringsAsFactors = FALSE
))
remote <- rws_read_table("local", conn = conn)
expect_identical(class(remote), c("sf", "tbl_df", "tbl", "data.frame"))
expect_identical(colnames(remote), colnames(local))
expect_identical(nrow(remote), 3L)
expect_equivalent(remote$first, local$first)
expect_equivalent(remote$second, local$second)
})
test_that("sf can change sf_column", {
conn <- local_conn()
local <- as.data.frame(readwritesqlite:::rws_data_sf)
local <- tibble::as_tibble(local)
local <- local["geometry"]
colnames(local) <- "first"
local$second <- local$first
local <- sf::st_sf(local, sf_column_name = "second")
DBI::dbCreateTable(conn, "local", local)
expect_identical(rws_write(local, conn = conn), "local")
init <- DBI::dbReadTable(conn, "readwritesqlite_init")
expect_identical(init, data.frame(
TableInit = "LOCAL", IsInit = 1L, SFInit = "SECOND",
stringsAsFactors = FALSE
))
remote <- rws_read_table("local", conn = conn)
expect_identical(class(remote), c("sf", "tbl_df", "tbl", "data.frame"))
expect_identical(colnames(remote), colnames(local))
expect_identical(nrow(remote), 3L)
expect_equivalent(remote$first, local$first)
expect_equivalent(remote$second, local$second)
})
test_that("sf data frames with two geometries and lots of other stuff and correct one passed back", {
conn <- local_conn()
local <- as.data.frame(readwritesqlite:::rws_data_sf)
local <- tibble::as_tibble(local)
local$second <- local$geometry
local <- sf::st_sf(local, sf_column_name = "second")
expect_identical(rws_write(local, exists = NA, conn = conn), "local")
init <- DBI::dbReadTable(conn, "readwritesqlite_init")
expect_identical(init, data.frame(
TableInit = "LOCAL", IsInit = 1L, SFInit = "SECOND",
stringsAsFactors = FALSE
))
remote <- rws_read_table("local", conn = conn)
expect_identical(class(remote), c("sf", "tbl_df", "tbl", "data.frame"))
expect_identical(colnames(remote), colnames(local))
expect_identical(nrow(remote), 3L)
expect_identical(remote$logical, local$logical)
expect_identical(remote$date, local$date)
expect_identical(remote$posixct, local$posixct)
expect_identical(remote$units, local$units)
expect_identical(remote$factor, local$factor)
expect_identical(remote$ordered, local$ordered)
expect_equivalent(remote$geometry, local$geometry)
expect_equivalent(remote$second, local$second)
})
test_that("initialized even with no rows of data", {
conn <- local_conn()
local <- as.data.frame(readwritesqlite:::rws_data_sf)
local <- tibble::as_tibble(local)
local$second <- local$geometry
local <- sf::st_sf(local, sf_column_name = "second")
local <- local[integer(0), ]
expect_identical(rws_write(local, exists = NA, conn = conn), "local")
init <- DBI::dbReadTable(conn, "readwritesqlite_init")
expect_identical(init, data.frame(
TableInit = "LOCAL", IsInit = 1L, SFInit = "SECOND",
stringsAsFactors = FALSE
))
remote <- rws_read_table("local", conn = conn)
expect_identical(class(remote), c("sf", "tbl_df", "tbl", "data.frame"))
expect_identical(colnames(remote), colnames(local))
expect_identical(nrow(remote), 0L)
expect_identical(remote$logical, local$logical)
expect_identical(remote$date, local$date)
expect_identical(remote$posixct, local$posixct)
expect_identical(remote$units, local$units)
expect_identical(remote$factor, local$factor)
expect_identical(remote$ordered, local$ordered)
expect_equivalent(remote$geometry, local$geometry)
expect_equivalent(remote$second, local$second)
})
test_that("initialized meta with no rows of data and not overwritten unless delete = TRUE", {
conn <- local_conn()
local <- as.data.frame(readwritesqlite:::rws_data_sf)
local <- local["date"]
local <- tibble::as_tibble(local)
local <- local[integer(0), ]
expect_identical(rws_write(local, exists = NA, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, local)
local[] <- lapply(local, as.character)
expect_error(
rws_write(local, conn = conn),
"^Column 'date' in table 'local' has 'No' meta data for the input data but 'class: Date' for the existing data[.]$"
)
local <- data.frame(date = "2000-01-01", stringsAsFactors = FALSE)
expect_error(
rws_write(local, conn = conn),
"^Column 'date' in table 'local' has 'No' meta data for the input data but 'class: Date' for the existing data[.]$"
)
expect_identical(rws_write(local, delete = TRUE, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, tibble::as_tibble(local))
})
test_that("initialized with no rows of data and no metadata and not overwritten unless delete = TRUE", {
conn <- local_conn()
local <- as.data.frame(readwritesqlite:::rws_data_sf)
local <- local["date"]
local <- tibble::as_tibble(local)
local[] <- lapply(local, as.character)
local <- local[integer(0), ]
expect_identical(rws_write(local, exists = NA, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, local)
local2 <- as.data.frame(readwritesqlite:::rws_data_sf)
local2 <- local2["date"]
local2 <- tibble::as_tibble(local2)
local2 <- local2[integer(0), ]
expect_error(
rws_write(local2, conn = conn, x_name = "local"),
"^Column 'date' in table 'local' has 'class: Date' meta data for the input data but 'No' for the existing data[.]$"
)
expect_identical(rws_write(local2, delete = TRUE, conn = conn, x_name = "local"), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, local2)
})
test_that("initialized with no rows of data and no metadata and not overwritten unless delete = TRUE", {
conn <- local_conn()
local <- as.data.frame(readwritesqlite:::rws_data_sf)
local <- local["date"]
local <- tibble::as_tibble(local)
local[] <- lapply(local, as.character)
local <- local[integer(0), ]
expect_identical(rws_write(local, exists = NA, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, local)
local2 <- as.data.frame(readwritesqlite:::rws_data_sf)
local2 <- local2["date"]
local2 <- tibble::as_tibble(local2)
local2 <- local2[integer(0), ]
expect_error(
rws_write(local2, conn = conn, x_name = "local"),
"^Column 'date' in table 'local' has 'class: Date' meta data for the input data but 'No' for the existing data[.]$"
)
expect_identical(rws_write(local2, delete = TRUE, conn = conn, x_name = "local"), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, local2)
})
test_that("meta then inconsistent data then error meta but delete reset", {
conn <- local_conn()
local <- as.data.frame(readwritesqlite:::rws_data_sf)
local$geometry <- NULL
attr(local, "sf_column") <- NULL
attr(local, "agr") <- NULL
local <- tibble::as_tibble(local)
expect_identical(rws_write(local, exists = NA, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, local)
local2 <- local
local2[] <- lapply(local2, function(x) {
return("garbage")
})
local2 <- local2[1, ]
expect_error(
rws_write(local2, conn = conn, x_name = "local"),
"^Column 'logical' in table 'local' has 'No' meta data for the input data but 'class: logical' for the existing data[.]$"
)
expect_identical(rws_write(local2, conn = conn, meta = FALSE, x_name = "local"), "local")
expect_warning(remote <- rws_read_table("local", conn = conn), "Column `logical`: mixed type, first seen values of type integer, coercing other values of type string")
expect_identical(remote, tibble::tibble(
logical = c(TRUE, FALSE, NA, FALSE),
date = as.Date(c("2000-01-01", "2001-02-03", NA, "1970-01-01")),
factor = factor(c("x", "y", NA, NA), levels = c("x", "y")),
ordered = ordered(c("x", "y", NA, NA), levels = c("y", "x")),
posixct = as.POSIXct(c("2001-01-02 03:04:05", "2006-07-08 09:10:11", NA, "1969-12-31 16:00:00"),
tz = "Etc/GMT+8"
),
units = units::as_units(c(10, 11.5, NA, 0), "m")
))
expect_warning(remote2 <- rws_read_table("local", meta = FALSE, conn = conn), "Column `logical`: mixed type, first seen values of type integer, coercing other values of type string")
expect_identical(remote2, tibble::tibble(
logical = c(1L, 0L, NA, 0L),
date = c(10957, 11356, NA, 0),
factor = c("x", "y", NA, "garbage"),
ordered = c("x", "y", NA, "garbage"),
posixct = c(978433445, 1152378611, NA, 0),
units = c(10, 11.5, NA, 0)
))
expect_identical(rws_write(local, delete = TRUE, conn = conn), "local")
remote <- rws_read_table("local", conn = conn)
expect_identical(remote, local)
})
|
cb0274784d654d28268036562ce3ce89e932b5c5 | b14061976e8881dc12af608abde631abd86400c9 | /function/CHOPseq.BamScannerChr.R | 03112562512f5dd36ab2ecc337b09027a41f887f | [] | no_license | zhezhangsh/CHOPseq | 38a41b20ae320f87ccff6abf5746761bdebf4e8a | 5ff7a248deca09fb6d542876bee29640f2215dd5 | refs/heads/master | 2021-01-15T15:50:43.520019 | 2019-09-10T16:30:53 | 2019-09-10T16:30:53 | 55,070,772 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,224 | r | CHOPseq.BamScannerChr.R | # A wrapper of the Rsamtools:bamScanner function to read all tags on given chromosome(s) and specified SAM fields
CHOPseq.BamScannerChr<-function(bamFile, fields='DEFAULT', chr=NA, start=NA, end=NA, output='data.frame', by.chr=FALSE) {
# bamFile character, path and name of bam file
# fields character vector or preset subset name
# DEFAULT, c('rname', 'strand', 'pos', 'qwidth', 'mapq')
# DETAIL, c('qname', 'rname', 'strand', 'pos', 'qwidth', 'mapq', 'cigar', 'seq')
# ALL, all fields
# custom: character vector input
# chr character vec
# output String, the object class of returned value
# "data.frame", default, all chromosomes in one data frame
# "GRanges", all chromosomes in one GRanges object
# by.chr boolean, if true, output individual chromosomes in a list
library(Rsamtools);
library(chipseq);
# define field names
if (fields=='DEFAULT') fields<-c('rname', 'strand', 'pos', 'qwidth', 'mapq')
else if (fields=='DETAIL') fields<-c('qname', 'rname', 'strand', 'pos', 'qwidth', 'mapq', 'cigar', 'seq')
else if (fields=='ALL') fields<-scanBamWhat()
else fields<-intersect(fields, scanBamWhat());
if (length(fields)<1) fields<-c('rname', 'strand', 'pos', 'qwidth', 'mapq'); # use default fields if none valid field name given
if (output=='GRangs') fields<-union(fields, c('rname', 'pos', 'qwidth', 'strand')); # make sure fields required by GRangs will be read in
# make sure the fields are ordered as they are in SAM format
ind<-1:length(scanBamWhat());
names(ind)<-scanBamWhat();
fields<-fields[order(ind[fields])];
######## end of define field names
########## format output of scanBAM (list of fields) as a data frame
format2data.frame<-function(tags) {
tags<-lapply(tags, function(field) if (class(field)=='factor') as.vector(field)
else if (class(field)=='DNAStringSet') as.character(field)
else if(class(field)=='PhredQuality') as.character(field)
else field);
as.data.frame(tags);
} ###### end of function
########### convert tags from data.frame format to a GRanges object
format2GRanges<-function(tags, len) {
library(GenomicRanges);
gr<-GRanges(seqnames=as.vector(tags[['rname']]), ranges=IRanges(start=tags[['pos']], width=tags[['qwidth']]), strand=(tags[['strand']]));
if (length(len)==1) len<-as.numeric(len);
seqlengths(gr)<-len;
# set fields other than default as metaelement
meta<-setdiff(names(tags), c("rname", "pos", "qwidth", "seqnames", "ranges", "strand", "seqlengths", "start", "end", "width", "element")); # names of meta element not include forbidden or default names
if (length(meta)>0) {
meta<-lapply(tags[meta], function(field) if (class(field)=='factor') as.vector(field)
else if (class(field)=='DNAStringSet') as.character(field)
else if(class(field)=='PhredQuality') as.character(field)
else field);
elementMetadata(gr)<-as.data.frame(meta);
}
gr;
} ######### end of function
# get chromosome names and length from BAM file header
bam.chr.len<-scanBamHeader(bamFile)[[1]][[1]];
###############################################################################################################################################
###############################################################################################################################################
# if chromosome not specified, read in all bam file
if (identical(NA, chr)) {
read.chr<-names(bam.chr.len);
names(read.chr)<-read.chr; # used for creating GRanges object
param<-ScanBamParam(what=fields) # read in all if chromosome not specified
####################################################################
taken<-system.time(tags<-scanBam(bamFile, param=param)[[1]])[3];
####################################################################
# format output
if (output=="GRanges") tags<-format2GRanges(tags, bam.chr.len)
else tags<-format2data.frame(tags);
}
############################################################## else, read in given chromosome(s) only
else {
# make chromosome naming consistent, without 'chr' prefix
# find the match of a given chromosome name to the names in BAM file
matchName<-function(chr.name, bam.chr.names) {
if (chr.name %in% bam.chr.names) name<-chr.name
else name<-bam.chr.names[sub('CHR', '', toupper(bam.chr.names))==sub('CHR', '', toupper(chr.name))];
if (length(name)==0) name<-NA;
name;
}
read.chr<-sapply(chr, function(chr, bam) matchName(chr, bam), bam=names(bam.chr.len)); # the parameter passed to bamScanner()
read.chr<-read.chr[!is.na(read.chr)];
# terminate if no valid chromosome name
if (length(read.chr)<1) {
print(as.character(chr));
stop("None valid chromosome names were given");
}
ranges<-RangesList(lapply(read.chr, function(chr, len) IRanges(start=1, end=len[chr]), len=bam.chr.len));
names(ranges)<-read.chr;
param<-ScanBamParam(which=ranges, what=fields);
##################################################################
taken<-system.time(tags<-scanBam(bamFile, param=param))[3]; # read in tags from BAM
##################################################################
names(tags)<-names(read.chr);
# if chromosome names in BAM file are different from given names, using names given as parameter in output
if (!identical(as.character(read.chr), names(read.chr)))
tags<-lapply(1:length(tags), function(i, tags) {tags[[i]]$rname<-rep(names(tags)[i], length(tags[[i]]$rname)); tags[[i]];}, tags=tags);
if (output=='GRanges') tags<-lapply(1:length(tags), function(i, tags, len) format2GRanges(tags[[i]], len[i]), tags=tags, len=bam.chr.len[read.chr])
else tags<-lapply(tags, format2data.frame);
if (!by.chr) if (output=="GRanges") {
seqlevels(tags[[1]])<-sapply(tags, seqlevels);
tags<-do.call('c', tags);
}
else tags<-do.call(rbind, tags);
}### end of reading tags from BAM
###########################################################################################################################################
###############################################################################################################################################
print(paste('Retrieved tags from', length(read.chr), 'chromosome(s),', round(taken), 'seconds used.'));
tags;
}
|
743c768878d77726edca3bde8595142e3dc038c7 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/SCPME/man/RIDGEc.Rd | d0a31294c7981fb06f951e8177b169785f198f7c | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 486 | rd | RIDGEc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{RIDGEc}
\alias{RIDGEc}
\title{Ridge-penalized precision matrix estimation (c++)}
\usage{
RIDGEc(S, lam)
}
\arguments{
\item{S}{sample covariance matrix (denominator n).}
\item{lam}{tuning parameter for ridge penalty.}
}
\value{
estimated Omega
}
\description{
Ridge penalized matrix estimation via closed-form solution. Augmented from Adam Rothman's STAT 8931 code.
}
\keyword{internal}
|
987bc55f11ec3db7a7d8c25a13c30ba9ad782e98 | b9f9cf0f038f7d5581224db3b5fe2de3d9b50b30 | /build_ld_ref/utils/cal_ld.R | fbc0a2bc9e82b4d1383559237727f13579de0419 | [] | no_license | zhenin/HDL | b9c92b2282de71b518c29a5d670f4c8a4e60f6aa | d308812f7c0f4f2cbbdcbad825d26c850df9a950 | refs/heads/master | 2022-07-29T09:20:08.875063 | 2022-07-08T07:43:38 | 2022-07-08T07:43:38 | 209,699,095 | 74 | 27 | null | 2021-04-16T01:45:21 | 2019-09-20T03:39:03 | R | UTF-8 | R | false | false | 4,291 | r | cal_ld.R | #!/usr/bin/env Rscript
suppressWarnings(suppressMessages(require(dplyr)))
suppressWarnings(suppressMessages(require(tidyr)))
suppressWarnings(suppressMessages(require(argparser)))
suppressWarnings(suppressMessages(require(data.table)))
ld.cmd <- function(bfile, ldprefix, chrom, i.segment, from.snp, to.snp,
bandwidth=500, ld.window.kb=100000, plink.path='plink'){
paste0(plink.path, ' --silent --bfile ', bfile, ' --r --ld-window ', bandwidth+1,
' --ld-window-kb ', as.integer(ld.window.kb, 0),
' --from ', from.snp, ' --to ', to.snp,
' --out ', ldprefix, '_chr', chrom, '.', i.segment, '_')
}
write.bim <- function(chrom, i.segment, from.snp, to.snp, ldprefix, bim){
from.pos <- filter(bim, snp==from.snp)$pos
to.pos <- filter(bim, snp==to.snp)$pos
bim %>%
filter(pos<=to.pos, pos>=from.pos) %>%
write.table(., file=paste0(ldprefix, '_chr', chrom, '.', i.segment, '_.bim'),
sep=' ', quote=F, row.names=F, col.names=F)
}
load.seg.split.bim <- function(ldprefix, chrom, bim.file){
nsnps.list.file <- paste0(ldprefix, '_snp_counter_array.RData')
snps.name.list.file <- paste0(ldprefix, '_snp_list_array.RData')
if(!file.exists(nsnps.list.file)){
stop(paste0('[file missing] : ', nsnps.list.file,
'. Please check the prefix of LD reference or run the first (chromosomes spliting) step correctly.'))
}
if(!file.exists(snps.name.list.file)){
stop(paste0('[file missing] : ', snps.name.list.file,
'. Please check the prefix of LD reference or run the first (chromosomes spliting) step correctly.'))
}
load(nsnps.list.file)
load(snps.name.list.file)
idx <- which(names(nsnps.list)==chrom)
if(idx == 0){
stop(paste0('[chromosome missing] : ', chrom,
'. You should `cat` all the required .bim files into a single .bim file, and re-run the first step.'))
}
bim <- fread(bim.file, header=F,
col.names=c('chr', 'snp', 'gpos',
'pos', 'ref', 'alt')) %>%
mutate(chr=as.character(chr)) %>%
filter(chr==chrom)
e <- do.call('sum', nsnps.list[1:idx])
s <- e - sum(nsnps.list[[chrom]]) + 1
snps <- snps.name.list[s:e]
from.snps <- c()
to.snps <- c()
s <- 1
segs <- nsnps.list[[chrom]]
for(i.segment in 1:length(segs)){
e <- s + segs[i.segment] - 1
from.snps <- c(from.snps, snps[s])
to.snps <- c(to.snps, snps[e])
write.bim(chrom, i.segment, snps[s], snps[e], ldprefix, bim) # write .bim
s <- e + 1
}
return(data.frame(chrom=chrom,
from.snp=from.snps,
to.snp=to.snps,
nsnps=nsnps.list[[chrom]],
i.segment=1:length(from.snps)))
}
args <- arg_parser('Calculate LD.') %>%
add_argument('bfile', help='path to the plink bfile', type='character') %>%
add_argument('ldprefix', help='ld_ref_path/ld_ref_name', type='character') %>%
add_argument('chrom', help='chromsome', type='character') %>%
add_argument('plink_path', help='path to plink software', type='character') %>%
add_argument('--print-cmd', help='print plink ld calculation commands', flag=TRUE) %>%
add_argument('--bandwidth', help='bandwith (# of SNPs) for LD calculation, default=500',
type='numeric', default=500) %>%
add_argument('--ld-window',
help='window size (kb) for LD calculation, default=1000000 (whole segment)',
type='numeric', default=1000000) %>%
parse_args()
bfile <- args$bfile
ldprefix <- args$ldprefix
chrom <- args$chrom
plink.path <- args$plink_path
bandwidth <- round(args$bandwidth, 0)
ld.window.kb <- round(args$ld_window, 0)
print.cmd <- args$print_cmd
bim.file <- paste0(bfile, '.bim')
# load segment info & split bim
seg.info <- load.seg.split.bim(ldprefix, chrom, paste0(bfile, '.bim'))
# calculate LD
cmds <- seg.info %>%
rowwise(.) %>%
mutate(cmd=ld.cmd(bfile, ldprefix, chrom, i.segment, from.snp, to.snp, bandwidth, ld.window.kb, plink.path)) %>%
ungroup() %>%
# mutate(cmd=paste0(cmd, ' ; gzip -f ', ldprefix, '.ld')) %>%
select(cmd) %>%
pull()
if(print.cmd){
# print ld cmd
cat(paste0(paste0(cmds, collapse='\n'), '\n'))
}else{
# run ld cmd
for(cmd in cmds){
system(cmd)
}
}
|
405c1c65ad9e2f87eee51309d8ede59688095752 | 5adde4b27d8aafe58598e0986c660d5c88bc089a | /plot2.R | 7baede3b7ecf13cef107a96467b762c4fca0ca2c | [] | no_license | jgoodbody/ExData_Plotting1 | 03b439789a9e97e2644c601ffe45bb6690dec67b | 8cf6f7efe5812b7ea476e033e315daf6d146072e | refs/heads/master | 2021-01-15T09:38:34.942026 | 2015-04-12T23:12:20 | 2015-04-12T23:12:20 | 33,827,395 | 0 | 0 | null | 2015-04-12T17:59:32 | 2015-04-12T17:59:32 | null | UTF-8 | R | false | false | 647 | r | plot2.R | ##Read in data
data <- read.csv("household_power_consumption.txt",header=TRUE,sep = ";")
##Create POSIXt class "Time" column
data$Time <- strptime(paste(data$Date,data$Time),format="%d/%m/%Y %H:%M:%S", tz = "")
##Create Date class "Date" column
data$Date <- as.Date(data$Date,"%d/%m/%Y")
##Subset data for Feb 1, 2007 - Feb 2, 2007
data2 <- subset(data, data$Date >= as.Date("2007-02-01"))
data3 <- subset(data2, data2$Date <= as.Date("2007-02-02"))
##Create PNG file and line graph
png(file="plot2.png")
plot(x=data3$Time,y=as.numeric(as.character(data3$Global_active_power)),type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.off() |
1aae8ce03ab073d7fa0a738a0c3b7034e872047a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/geosphere/examples/area.Rd.R | ef12efdcffa92e3a46e0474b605ceeba80485a05 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 853 | r | area.Rd.R | library(geosphere)
### Name: areaPolygon
### Title: Area of a longitude/latitude polygon
### Aliases: areaPolygon areaPolygon,matrix-method
### areaPolygon,data.frame-method areaPolygon,SpatialPolygons-method
### Keywords: spatial
### ** Examples
p <- rbind(c(-180,-20), c(-140,55), c(10, 0), c(-140,-60), c(-180,-20))
areaPolygon(p)
# Be careful with very large polygons, as they may not be what they seem!
# For example, if you wanted a polygon to compute the area equal to about 1/4 of the ellipsoid
# this won't work:
b <- matrix(c(-180, 0, 90, 90, 0, 0, -180, 0), ncol=2, byrow=TRUE)
areaPolygon(b)
# Becausee the shortest path between (-180,0) and (0,0) is
# over one of the poles, not along the equator!
# Inserting a point along the equator fixes that
b <- matrix(c(-180, 0, 0, 0, -90,0, -180, 0), ncol=2, byrow=TRUE)
areaPolygon(b)
|
c7a228d55a66467320ed5fc8021379b0f6f519e5 | fb76292e6104e86390654c95e8fb38febb562f7a | /test.R | 63bf4ff12309d433dbd4263bee41519febf7ef95 | [] | no_license | daspromi/dataScienceCourse | 673e41f64854ce47a8cecf7d1dd1ac07aea465bc | 23f2072403600bb25131bbe533f6d38f86a088c8 | refs/heads/master | 2020-09-01T22:29:04.324963 | 2019-11-01T23:02:46 | 2019-11-01T23:02:46 | 219,074,923 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 65 | r | test.R | # Load directory
setwd("~/Documents/Projects/dataScienceCourse") |
341d81f51935f11352d8d751044f283dc51f4140 | a4801f5f15cfe478585286cd1986ca04bcc65eef | /tests/testthat/test-updateAdequacyPatch.R | b7f122e21a70f1dc45b6b822bf9f818c9d7e609f | [] | no_license | rte-antares-rpackage/antaresEditObject | acfa8ad126149fb6a38943919e55567a5af155f8 | 452b09e9b98d4425d6ee2474b9bbd06548e846d2 | refs/heads/master | 2023-08-10T21:01:21.414683 | 2023-07-13T13:06:02 | 2023-07-13T13:06:02 | 96,431,226 | 10 | 16 | null | 2023-09-08T09:31:25 | 2017-07-06T13:07:19 | R | UTF-8 | R | false | false | 1,152 | r | test-updateAdequacyPatch.R |
context("Function updateAdequacySettings")
sapply(studies, function(study) {
setup_study(study, sourcedir)
opts <- antaresRead::setSimulationPath(studyPath, "input")
if (opts$antaresVersion >= 850){
test_that("Update an adequacy parameter", {
updateAdequacySettings(include_adq_patch = TRUE)
updateAdequacySettings(set_to_null_ntc_from_physical_out_to_physical_in_for_first_step = FALSE)
updateAdequacySettings(set_to_null_ntc_between_physical_out_for_first_step = FALSE)
updateAdequacySettings(check_csr_cost_function = TRUE)
expect_true(getOption("antares")$parameters$`adequacy patch`$`include-adq-patch`)
expect_false(getOption("antares")$parameters$`adequacy patch`$`set-to-null-ntc-from-physical-out-to-physical-in-for-first-step`)
expect_false(getOption("antares")$parameters$`adequacy patch`$`set-to-null-ntc-between-physical-out-for-first-step`)
expect_true(getOption("antares")$parameters$`adequacy patch`$`check-csr-cost-function`)
})
# remove temporary study
unlink(x = file.path(pathstd, "test_case"), recursive = TRUE)
}
}) |
29df396cc8f23b39d4e2adc2ba161719dbac9d3b | a5bbcb2b8c60e803c0bc6c5f3b6acd6f76f608cd | /man/tables.Rd | 300417e24526541f187ecaf573b53a45032c47cc | [] | no_license | DataXujing/shinyBS | fdfaf0784b40c3693e43ade945bec22efa411bd1 | 6bfa2a44b6d05cebd251d7470b039878510fce3d | refs/heads/master | 2021-07-05T21:12:42.048441 | 2015-01-23T15:12:03 | 2015-01-23T15:12:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,640 | rd | tables.Rd | \name{Table Highlighting}
\alias{highlightRows}
\alias{highlightCells}
\title{
Table Highlighting
}
\description{
Functions to style table elements based on their contents.
}
\usage{
highlightRows(session, id, column, min = NULL, max = NULL, regex = NULL,
class = NULL, style = NULL, reset = NULL)
highlightCells(session, id, min = NULL, max = NULL, regex = NULL, column = NULL,
class = NULL, style = NULL, reset = NULL, skip.row.names = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{session}{
The \code{session} object passed to function given to \code{shinyServer}
}
\item{id}{
The id of the table you want to highlight
}
\item{column}{
The column to do matching on, can be either the index or the column name
}
\item{min}{
The minimum value of a cell that will match (inclusive)
}
\item{max}{
The maximum value of a cell that will match (inclusive)
}
\item{regex}{
A regular expression to base the match on.
}
\item{class}{
The name of the class to apply to matches
}
\item{style}{
CSS style attributes to apply to matches
}
\item{reset}{
Should preexisting styles be removed?
}
\item{skip.row.names}{
Should the first column of the table be ignored because it contains row names?
}
}
\details{
\code{highlightCells()} is used for highlighting individual cells and \code{highlightRows()} is used for highlighting entire rows. Both functions use the same arguments with \code{highlightRows()} having an additional column argument for indicating the column to search. You can supply the column argument with the value that appears in the top row or with a numeric index value for the column.\cr
Cells can be styled with the class or style arguments. class can be one of eight predefined classes (success, warning, error, info, good, bad, neutral, disabled) or any class you have defined. If you use the style argument its contents will be directly inserted into the style attribute of matching td and tr elements.
}
\references{
\href{http://getbootstrap.com/2.3.2/base-css.html}{Tables for Twitter Bootstrap 2.3.2}
}
\author{
Eric Bailey
}
\note{
Run \code{bsDemo()} for a live example of alerts.
}
\examples{
\dontrun{
# Apply the 'warning' class to cells in table 'htTable' that contain a value
# between 5 and 10
highlightCells(session, 'htTable', class = "warning", min = 5, max = 10)
# Apply the 'info' class to rows in table 'htTable' where the text in the
# first column contains an 'i'.
highlightRows(session, 'htable', column = NULL, class = "info", regex = "i")
}
} |
d522e3d5f411cff35de4fb63326adf832bd21bb0 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/imputeTS/examples/na.seadec.Rd.R | 9db582096d1fd6d5b46a0f355b3b32abcc87ff93 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 488 | r | na.seadec.Rd.R | library(imputeTS)
### Name: na.seadec
### Title: Seasonally Decomposed Missing Value Imputation
### Aliases: na.seadec
### ** Examples
#Example 1: Perform seasonal imputation using algorithm = "interpolation"
na.seadec(tsAirgap, algorithm = "interpolation")
#Example 2: Perform seasonal imputation using algorithm = "mean"
na.seadec(tsAirgap, algorithm = "mean")
#Example 3: Same as example 1, just written with pipe operator
tsAirgap %>% na.seadec(algorithm = "interpolation")
|
c2cab70635e2c877a296ddbde30f60b3c187edc2 | 0c50324834ad198598e86151fe26dc46ba7b6bb1 | /Chapter 1 Graph Showing Overlap.R | 4b3ef7d59c5dad33aeff7db92eff00faa9686eb6 | [] | no_license | JessicaWilkes97/Project-R-Code | 2ff9af2119c56b3d3c9f5a502de7450f68387c42 | 2188e8949a90222d89898c1e2d70a88e5287d306 | refs/heads/master | 2020-05-18T15:21:08.652562 | 2019-05-01T23:45:36 | 2019-05-01T23:45:36 | 184,495,975 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,857 | r | Chapter 1 Graph Showing Overlap.R | nondiseased<-c(13.3,
11.1,
16.7,
12.6,
7.4,
5.5,
32.1,
27.2,
6.6,
9.8,
10.5,
7.8,
9.1,
12.3,
12,
42.1,
5.9,
9.2,
7.3,
6.8,
10.7,
15.7,
8,
6.8,
47.35,
17.9,
96.2,
108.9,
16.6,
9.5,
179,
12.1,
35.6,
15,
12.6,
5.9,
10.1,
8.5,
11.4,
54.65,
9.7,
11.2,
35.7,
22.5,
21.2,
5.6,
9.4,
12,
9.8,
17.2,
10.6)
diseased<-c(79.1,
31.4,
15,
77.8,
25.7,
11.7,
8.25,
14.95,
8.7,
14.1,
123.9,
12.1,
99.1,
18.6,
10.5,
6.6,
74,
43.9,
45.7,
13,
7.3,
8.6,
17.2,
15.4,
14.3,
93.1,
66.3,
26.7,
32.4,
9.9,
30.3,
11.2,
202,
35.7,
9.2,
103.6,
21.4,
8.1,
29.9,
17.5,
30.8,
57.3,
6.5,
33.8,
53.6,
17.2,
94.2,
33.5,
3.7,
11.7,
19.9,
38.7,
27.3,
20.1,
86.1,
844,
36.9,
6.9,
27.7,
9.9,
38.6,
142.6,
12.5,
11.6,
21.2,
13.2,
19.2,
1024,
14.1,
34.8,
35.3,
35,
15.5,
12.1,
31.6,
184.8,
24.8,
10.4,
34.5,
19.4,
22.2,
53.9,
15.4,
17.3,
36.8,
49.8,
26.5667,
9.7,
19.2,
14.2
)
par(mar = c(5, 5,1,10))
plot(log(diseased), ylim = c(min(log(diseased)),max(log(diseased))), xlab= "Patients",ylab="log CA 125 value", cex=0.6, pch=19, cex.lab=1.3, cex.axis=1.3)
points(log(nondiseased),col="red", cex=0.6, pch=19)
legend("topright"
,legend=c("Individuals with
pancreatic cancer",
"Individuals without
pancreatic cancer"),
col=c("black","red")# entries
,pt.cex=c(1,1) # point expansion
,pch=19 # plot symbol
,ncol=1
,inset = c(-0.7,
0)
,cex=1
,bty = 'n'
,xpd = TRUE
,y.intersp=2
)
abline(a=3.95,b=0)
?log
|
d3222d9dd72e8e5849a9c82ea3eea34e1c7ca294 | 6c5eccf8518cfaecdc2a7ac47c060d78a622c789 | /run_analysis.R | 5bd99bc6d768852bb941e4b59a66af40c991a55f | [] | no_license | sueyic/GettingAndCleaningDataCourseProject | 15d89afd436c052acab00a8bda0fe53c4369995f | b9f6a0d5f0f5c3736fdc4cf917589c9db5f7d0e6 | refs/heads/master | 2016-09-06T12:50:09.378332 | 2014-12-29T03:03:06 | 2014-12-29T03:03:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,796 | r | run_analysis.R | ### Submission for course project for "Getting and Cleaning Data":
### http://class.coursera.org/getdata-016/
### Description: Prepare tidy data.
### Author: sueyic@gmail.com
library(plyr)
library(reshape2)
library(stringr)
library(Hmisc)
library(dplyr)
### (1) Merge the training and the test sets to create one data set.
### Sanity check -- verify that X_test.txt and X_train.txt have same number of columns.
testNumCols <- ncol(read.table("data/test/X_test.txt", nrow=1)) # Returns 561
trainNumCols <- ncol(read.table("data/train/X_train.txt", nrow=1)) # Returns 561
print(paste("Num cols in X_test.txt:", testNumCols,
", Num cols in X_train.txt:", trainNumCols))
if (testNumCols != trainNumCols) {
print(paste("Failed assertion: (testNumCols != testNumRows)"))
quit(status=1)
}
# Build raw_train from subject_train.txt, X_train.txt (measures), and
# y_train.txt (activities).
subject_train <- read.table("data/train/subject_train.txt", col.names=c("subject"))
y_train <- read.table("data/train/y_train.txt", col.names=c("activity"))
X_train <- read.table("data/train/X_train.txt")
raw_train <- cbind(subject_train, y_train, X_train)
# Build raw_test from subject_test.txt, X_test.txt (measures), and
# y_test.txt (activities).
subject_test <- read.table("data/test/subject_test.txt", col.names=c("subject"))
y_test <- read.table("data/test/y_test.txt", col.names=c("activity"))
X_test <- read.table("data/test/X_test.txt")
raw_test <- cbind(subject_test, y_test, X_test)
# Create the merged data set from raw_train and raw_test.
raw <- rbind(raw_train, raw_test)
### (2) Extract only the measurements on the mean and standard deviation for each
# measurement.
features <- read.table("data/features.txt", col.names=c("colIndex", "featureRawName"))
baseNames <- c("tBodyAcc-XYZ", "tGravityAcc-XYZ", "tBodyAccJerk-XYZ", "tBodyGyro-XYZ",
"tBodyGyroJerk-XYZ", "tBodyAccMag", "tGravityAccMag", "tBodyAccJerkMag",
"tBodyGyroMag", "tBodyGyroJerkMag", "fBodyAcc-XYZ", "fBodyAccJerk-XYZ",
"fBodyGyro-XYZ", "fBodyAccMag", "fBodyAccJerkMag", "fBodyGyroMag",
"fBodyGyroJerkMag")
featureNamesToKeep <- NULL
for (baseName in baseNames) {
if (grepl("-XYZ$", baseName)) {
pattern <- str_extract(baseName, "^[^-]+")
featureNamesToKeep <- append(featureNamesToKeep, paste(pattern, "-mean()-X", sep=""))
featureNamesToKeep <- append(featureNamesToKeep, paste(pattern, "-std()-X", sep=""))
featureNamesToKeep <- append(featureNamesToKeep, paste(pattern, "-mean()-Y", sep=""))
featureNamesToKeep <- append(featureNamesToKeep, paste(pattern, "-std()-Y", sep=""))
featureNamesToKeep <- append(featureNamesToKeep, paste(pattern, "-mean()-Z", sep=""))
featureNamesToKeep <- append(featureNamesToKeep, paste(pattern, "-std()-Z", sep=""))
}
featureNamesToKeep <- append(featureNamesToKeep, paste(baseName, "-mean()", sep=""))
featureNamesToKeep <- append(featureNamesToKeep, paste(baseName, "-std()", sep=""))
}
filteredFeatures <- features[features$featureRawName %in% featureNamesToKeep,]
# This prints "Number of measurements to keep: 60
print(paste("Number of measurements to keep:", nrow(filteredFeatures)))
# Create raw2, which only has the measurements to keep.
raw2 <- raw[, c("subject", "activity",
ldply(filteredFeatures[,1], function(x) { paste("V", x, sep="") } )[,1])]
### (3) Uses descriptive activity names to name the activities in the data set.
raw3 <- mutate(raw2,
activity=factor(raw2$activity,
labels= c("walking", "walkingUpstairs", "walkingDownstairs",
"sitting", "standing", "laying")))
### (4) Appropriately labels the data set with descriptive variable names.
# Here we will rename the features.
featureRename <- function(x) {
# "-" is an invalid character of a column name in R, so we will replace it.
newName <- gsub("\\-", "", x)
newName <- gsub("mean\\(\\)", "Mean", newName)
newName <- gsub("std\\(\\)", "Std", newName)
newName
}
newFeatureNames <- vapply(filteredFeatures[,2], featureRename, "")
names(raw3) <- c("subject", "activity", newFeatureNames)
# raw3 is tidy. But to make the next step easier, we will melt the measure variables into
# a "variable" column.
tidy <- melt(raw3, id=c("subject", "activity"), measure.vars=newFeatureNames)
### (5) From the data set in step 4, creates a second, independent tidy data set with the
# average of each variable for each activity and each subject.
grouped <- group_by(tidy, subject, activity, variable)
tidySummarized <- summarise(grouped, mean=mean(value))
# Write tidySummarized to data/tidySummarized.txt
write.table(tidySummarized, "data/tidySummarized.txt")
### The end.
|
f088d3e12da2fe9fea69ea8447d794b069937e4f | c9b151232ad188a38469473ec765c0f7a1defe7c | /R/cshift.R | 6c5a22e65825d850ef6ec4cd8f231af5673c74a1 | [] | no_license | obreschkow/cooltools | 3b2c46ac539962153c3a9aa8fbeaeee185455015 | 3b212d077537220aec5b8162f04ed85f7f0af996 | refs/heads/main | 2023-08-15T02:14:07.742064 | 2023-07-24T08:47:36 | 2023-07-24T08:47:36 | 184,692,943 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,418 | r | cshift.R | #' Circularly shift each dimension of an array
#'
#' @description Circulates each dimension of an array. This routine is identical to \code{\link[pracma]{circshift}}, but works with arrays up to 5 dimensions.
#'
#' @param x vector or array (up to rank 5)
#' @param s scalar, if x is a vector, or a vector of length matching the rank of x, if x is an array
#'
#' @return Returns a vector or array of the same shape as x.
#'
#' @author Danail Obreschkow
#'
#' @export
cshift = function(x,s) {
if (is.null(x)) return(x)
if (is.vector(x) && length(s) == 1) {
n = length(x)
s = s%%n
x = x[(1:n-s-1)%%n+1]
} else if (is.array(x)) {
if (length(dim(x))>5) stop("x must be an array of rank 1-5.")
if (length(dim(x))!=length(s)) stop("Length of s must be equal to the number of dimensions of x.")
n = dim(x)
s = s%%n
d = length(n)
if (d==1) {
x = x[(1:n-s-1)%%n+1]
} else if (d==2) {
x = x[(1:n[1]-s[1]-1)%%n[1]+1,(1:n[2]-s[2]-1)%%n[2]+1]
} else if (d==3) {
x = x[(1:n[1]-s[1]-1)%%n[1]+1,(1:n[2]-s[2]-1)%%n[2]+1,(1:n[3]-s[3]-1)%%n[3]+1]
} else if (d==4) {
x = x[(1:n[1]-s[1]-1)%%n[1]+1,(1:n[2]-s[2]-1)%%n[2]+1,(1:n[3]-s[3]-1)%%n[3]+1,(1:n[4]-s[4]-1)%%n[4]+1]
} else if (d==5) {
x = x[(1:n[1]-s[1]-1)%%n[1]+1,(1:n[2]-s[2]-1)%%n[2]+1,(1:n[3]-s[3]-1)%%n[3]+1,(1:n[4]-s[4]-1)%%n[4]+1,(1:n[5]-s[5]-1)%%n[5]+1]
}
}
return(x)
}
|
88f852e0c0bcf10589795f5955930b27dd7f09f1 | bba339cdacd372bf37fa8f0e979c674c6ccf1dae | /tmp-app.R | 135bb5f585a306745042d5467ee63883760ea492 | [] | no_license | anguswg-ucsb/groundwater-dash | 6ccfa9d65f7bc54b8d7543cf7197cb488f3f277c | f6c48e29e875e7c846f6acbfa32909f84351668f | refs/heads/main | 2023-03-27T13:36:03.576976 | 2021-03-28T16:04:50 | 2021-03-28T16:04:50 | 326,004,629 | 1 | 1 | null | 2021-01-04T18:52:02 | 2021-01-01T15:27:47 | R | UTF-8 | R | false | false | 6,045 | r | tmp-app.R | # one piece of an answer to this StackOverflow question
# http://stackoverflow.com/questions/31814037/integrating-time-series-graphs-and-leaflet-maps-using-r-shiny
# for this we'll use Kyle Walker's rpubs example
# http://rpubs.com/walkerke/leaflet_choropleth
# combined with data from Diego Valle's crime in Mexico project
# https://github.com/diegovalle/mxmortalitydb
# we'll also build on the shiny example included in leaflet
# https://github.com/rstudio/leaflet/blob/master/inst/examples/shiny.R
well_stats = function(az_time, wellid) {
stats = az_time %>%
filter(wellid == !!wellid) %>%
arrange(desc(date)) %>%
slice(n = 1) %>%
select(wellid, source, dtw, min_dtw, max_dtw, measurement_dist)
}
withdrawals_plot <- function(az_time, sector_total, wellid){
# ADD WELL ID TO SECTOR TOTALS DF
tmp1 <- az_time %>%
filter(wellid == !!wellid) %>%
select(county, wellid)
subset4 <- sector_total %>%
filter(county == tmp1$county[1])
subset4$wellid <- tmp1$wellid[match(subset4$county, tmp1$county)]
gg1 = ggplot(subset4, aes(x = sector, y = withdrawal, fill = source)) +
geom_col(position = "fill", col = "black", alpha = 0.7) +
scale_y_continuous(labels = scales::percent) +
labs(title = "SECTOR WITHDRAWALS",
x = "SECTOR",
y = "PERCENTAGE OF WITHDRAWALS",
fill = "") +
scale_fill_manual(values = c('green4', 'dodgerblue3')) +
theme_bw() +
theme(plot.title =element_text(size = 16, hjust = 0.5, vjust = 2),
axis.text = element_text(size =10),
axis.title = element_text(size = 10, vjust = 1),
legend.title = element_text(size = 12),
plot.caption = element_text(hjust = 0, face = "bold", size = 12))
ggplotly(gg1, tooltip = c('y'))
# style(hoverlabel = label) %>%
# layout(font = font,
# yaxis = list(fixedrange = TRUE)) %>%
# config(displayModeBar = FALSE))
}
subset4[which(subset4$withdrawal>0.00),]
withdrawals_plot(az_time, sector_total, 5301)
county_well <- sectors_total %>%
filter(county == "Apache")
ggplot(subset4, aes(x = sector, y = withdrawal)) +
geom_col(aes(fill = source), alpha = 0.7) +
labs(title = "SECTOR WITHDRAWALS",
x = "SECTOR",
y = "WITHDRAWALS",
fill = "") +
scale_fill_manual(values = c('green4', 'dodgerblue3')) +
theme_bw() +
theme(plot.title =element_text(size = 16, hjust = 0.5, vjust = 2),
axis.text = element_text(face = "bold", size =10),
axis.title = element_text(size = 12),
legend.title = element_text(size = 12),
plot.caption = element_text(hjust = 0, face = "bold", size = 12))
stats = well_stats(az_time, 100)
stats[3]
source('helper.R')
today = today_pts(az_time)
basemap <- basemap(today)
ui <- dashboardPage(
dashboardHeader(title = "ARIZONA GROUNDWATER WELLS"),
dashboardSidebar(disable = TRUE),
dashboardBody(
fluidRow(
column(width = 7,
autocomplete_input("auto", "Search for a County:",
value = "",
max_options = 5,
structure(today$fips, names = today$name)),
box(width = NULL, solidHeader = TRUE,
leafletOutput("groundMap", height = 650)),
box(width = NULL, title = "Statistics",
solidHeader = TRUE,
DTOutput('groundTable'))
),
column(width = 4,
box(width = NULL, status = "primary",
title = "DEPTH TO WATER",
solidHeader = TRUE,
dygraphOutput('groundGraph')),
# infoBox("Total cases", icon = icon("credit-card"), fill = TRUE),
box(width = NULL, status = "primary",
title = "SECTOR WITHDRAWALS",
solidHeader = TRUE,
plotlyOutput('groundPlot')),
valueBoxOutput("depthValue"),
valueBoxOutput("minValue"),
valueBoxOutput("maxValue"),
)
)
)
)
server <- function(input, output, session) {
# Global variables initialized
wellid <- today$wellid[which.max(today$dtw)]
v <- reactiveValues(msg = "Arizona Dept. of Water Resources")
output$groundMap <- renderLeaflet({ basemap })
output$groundGraph <- renderDygraph({ make_graph(az_time, wellid) })
output$groundGraph2 <- renderDygraph({ make_graph2(az_time, wellid) })
output$groundPlot <- renderPlotly({ withdrawals_plot(az_time, sector_total, wellid) })
output$groundTable <- renderDT({ make_table(az_time, wellid) })
output$groundMessage <- renderText(v$msg)
output$depthValue <- renderValueBox({
valueBox(
paste0((well_stats(az_time, wellid)[3])),
subtitle = "DEPTH",
icon = icon("user"),
color = "blue") })
output$minValue <- renderValueBox({
valueBox(
paste0((well_stats(az_time, wellid)[4])),
subtitle = "MINMUMUM DEPTH",
icon = icon("user"),
color = "green") })
output$maxValue <- renderValueBox({
valueBox(
paste0((well_stats(az_time, wellid)[5])),
subtitle = "MAXIMUM DEPTH",
icon = icon("user"),
color = "red") })
observeEvent(input$groundMap_marker_mouseout, {
v$msg <- "Mouse is over: "
})
observeEvent(input$groundMap_marker_click, {
wellid <<- input$groundMap_marker_click$id
print(wellid)
output$groundGraph <- renderDygraph({ make_graph(az_time, wellid) })
# output$groundTable <- renderDT({ make_table(az_time, wellid) })
output$groundGraph2 <- renderDygraph({ make_graph2(az_time, wellid) })
output$groundPlot <- renderPlotly({ withdrawals_plot(az_time, sector_total, wellid) })
output$groundTable <- renderDT({ make_table(az_time, wellid) })
output$groundMessage <- renderText(v$msg)
output$depthValue <- renderValueBox({
valueBox(
paste0((well_stats(az_time, wellid)[3])),
subtitle = "DEPTH",
icon = icon("user"),
color = "blue") })
leafletProxy('groundMap')
})
}
shinyApp( ui, server )
|
09d695b5ba9d0d2acddeb288a26d4de7db218b52 | de1a7bf7c408b0b0d8c9c353a70b978c5be3d460 | /R/data.R | 4cfd204e7aff0685ee2c7b9f315b7cd573bbc5a3 | [] | no_license | cran/gWQSRS | a82cad571cdd7550450f750db4e0250648825cc0 | 220e8345eb45f0c8a66443c87c7f6b318366e4e1 | refs/heads/master | 2020-12-22T00:07:29.396933 | 2020-03-03T10:40:02 | 2020-03-03T10:40:02 | 236,609,742 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,117 | r | data.R | #' Exposure concentrations of 34 PCB (simulated dataset)
#'
#' We created the `wqs_data` dataset to show how to use this function. These data reflect
#' 34 exposure concentrations simulated from a distribution of PCB exposures measured in
#' subjects participating in the NHANES study (2001-2002). Additionally, an end-point
#' meaure, simulated from a distribution of leukocyte telomere length (LTL), a biomarker
#' of chronic disease, is provided as well (variable name: y), as well as simulated
#' covariates, e.g. sex, and a dichotomous outcome variable (variable name: disease_state).
#' This dataset can thus be used to test the `gWQS` package by analyzing the mixed effect
#' of the 34 simulated PCBs on the continuous or binary outcomes, with adjustments for
#' covariates.
#'
#' \describe{
#' \item{y}{continuous outcome, biomarker of chronic disease}
#' \item{disease_state}{dichotomous outcome, state of disease}
#' \item{sex}{covariate, gender of the subject}
#' \item{log_LBX}{34 exposure concentrations of PCB exposures}
#' ...
#' }
#'
#' @format A data frame with 500 rows and 37 variables
"wqs_data"
|
3a9a2a4fd1ec704181670e59ee9cef7f6bbba759 | 8fb0bb53eeba3ad9dbfb6a6b50a2e340cbeae3e7 | /Misc/ISLR_Scripts/Chapter9.R | 3508223c908131960e929ee2a04165464951e1d6 | [] | no_license | jpsuen/ITEC621_Class_Files | 02cf19de7faf348cc1e63433e6b4fd3b8b3e4189 | 3ae2ff686d44c6e868d9f8867d314d375e6b1907 | refs/heads/master | 2021-01-11T01:16:22.336630 | 2017-07-11T12:50:25 | 2017-07-11T12:50:25 | 70,730,672 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,586 | r | Chapter9.R | ### Chapter 9 Lab: Support Vector Machines
# 9.1 Support Vector Classifier -- need the "e1071" package
install.packages("e1071") # Contains several machine learning methods
library(e1071)
# Let's generate some observations in two classes, X and Y
set.seed(1)
x=matrix(rnorm(20*2), ncol=2)
y=c(rep(-1,10), rep(1,10))
x[y==1,]=x[y==1,] + 1
plot(x, col=(3-y)) # Note that the randomly generated data is not separable by a straight line
dat=data.frame(x=x, y=as.factor(y)) # We need to encode the response as a factor variable
# We now use the svm() function to fit a support vector classifier
svmfit=svm(y~., data=dat, kernel="linear", cost=10,scale=FALSE)
# the kernel="linear" argument is used to fit a support vector classifier
# the scale=FALSE tells SVM NOT to stadardize the variables
# In some cases we may want to standardize the data and use scale=TRUE
plot(svmfit, dat) # Let's plot the SVC fit
# svmfit is the fitted model output and the input data
# Note: the jagged line is really a straight line
# Also, note that the support verctors as noted as crosses -- to find out which ones they are:
svmfit$index
summary(svmfit) # To get some basic information on the model
# e.g., 7 support vectors, cost=10, 2 classes, 4 support vectors in 1 and 3 in the other
# Let's try a different cost, e.g., 0.1
svmfit=svm(y~., data=dat, kernel="linear", cost=0.1,scale=FALSE)
plot(svmfit, dat)
svmfit$index
summary(svmfit)
# Let's do cross validation with the tune() function available in the "e1071" library
set.seed(1)
# This is how we can test various cost values
tune.out=tune(svm,y~.,data=dat,kernel="linear",ranges=list(cost=c(0.001, 0.01, 0.1, 1,5,10,100)))
summary(tune.out) # This will display the cross validation errors for each model
# Note: the cross validation was done with 10-fold
# Best performance is with cost=0.1, but you can list the best model with these 2 commands:
bestmod=tune.out$best.model
summary(bestmod)
# We can use the predict() function to predict the class label of test observations
xtest=matrix(rnorm(20*2), ncol=2)
ytest=sample(c(-1,1), 20, rep=TRUE)
xtest[ytest==1,]=xtest[ytest==1,] + 1
testdat=data.frame(x=xtest, y=as.factor(ytest))
ypred=predict(bestmod,testdat) # Using the best model to predict with the test data
table(predict=ypred, truth=testdat$y)
# Let's try to predict with different cost values
svmfit=svm(y~., data=dat, kernel="linear", cost=.01,scale=FALSE)
ypred=predict(svmfit,testdat)
table(predict=ypred, truth=testdat$y)
# Now let's try data that is separable by a straight line
x[y==1,]=x[y==1,]+0.5
plot(x, col=(y+5)/2, pch=19)
dat=data.frame(x=x,y=as.factor(y))
svmfit=svm(y~., data=dat, kernel="linear", cost=1e5)
summary(svmfit)
plot(svmfit, dat)
svmfit=svm(y~., data=dat, kernel="linear", cost=1)
summary(svmfit)
plot(svmfit,dat)
# 9.2 Support Vector Machine
# We proceed just like with support vector classifier, but using kernel="polynomial"
# or kernel="radial" depending on the desired fit method
# Let's generate some random data
set.seed(1)
x=matrix(rnorm(200*2), ncol=2)
x[1:100,]=x[1:100,]+2
x[101:150,]=x[101:150,]-2
y=c(rep(1,150),rep(2,50))
dat=data.frame(x=x,y=as.factor(y))
plot(x, col=y)
# Obviously, the separating boundary is not linear
# Separate 1/2 of the data for the training set
train=sample(200,100)
svmfit=svm(y~., data=dat[train,], kernel="radial", gamma=1, cost=1)
plot(svmfit, dat[train,])
summary(svmfit)
# Given the large number of training errors in the fitted model
# So, let's try a larger cost (at the expense of a more irregular decision boundary)
svmfit=svm(y~., data=dat[train,], kernel="radial",gamma=1,cost=1e5)
plot(svmfit,dat[train,])
# Let's inspect the cross validation errors for a few costs and gamma values
set.seed(1)
tune.out=tune(svm, y~., data=dat[train,], kernel="radial", ranges=list(cost=c(0.1,1,10,100,1000),gamma=c(0.5,1,2,3,4)))
summary(tune.out)
# Best model is with cost=1 and gamma=2
# Now let's predict on the test set (i.e., -train)
table(true=dat[-train,"y"], pred=predict(tune.out$best.model,newx=dat[-train,]))
# 9.3 ROC Curves
# We use the "ROCR" package
install.packages("ROCR")
library(ROCR)
# Quick function to plot and ROC curve for a given vector
rocplot=function(pred, truth, ...){
predob = prediction(pred, truth)
perf = performance(predob, "tpr", "fpr")
plot(perf,...)}
# Note: use decision.values=T to obtain fitted values
# Positive fitted values then the observations are assigned to one class
# Negative fitted values are assigned to the other class
svmfit.opt=svm(y~., data=dat[train,], kernel="radial",gamma=2, cost=1,decision.values=T)
# Now, the predict() function will output the fitted values
fitted=attributes(predict(svmfit.opt,dat[train,],decision.values=TRUE))$decision.values
par(mfrow=c(1,2))
rocplot(fitted,dat[train,"y"],main="Training Data")
# Let's increase the gamma value to 50
svmfit.flex=svm(y~., data=dat[train,], kernel="radial",gamma=50, cost=1, decision.values=T)
fitted=attributes(predict(svmfit.flex,dat[train,],decision.values=T))$decision.values
rocplot(fitted,dat[train,"y"],add=T,col="red")
# This gives a better model (hugging the corner more closely)
# Let's now try the ROC curves on the test data
fitted=attributes(predict(svmfit.opt,dat[-train,],decision.values=T))$decision.values
rocplot(fitted,dat[-train,"y"],main="Test Data")
fitted=attributes(predict(svmfit.flex,dat[-train,],decision.values=T))$decision.values
rocplot(fitted,dat[-train,"y"],add=T,col="red")
# Gamma=2 (svmfit.opt) does better with the test data
# 9.4 SVM with Multiple Classes
# svm() will use one vs. one approach for multiple classes
# Let's generate some data
set.seed(1)
x=rbind(x, matrix(rnorm(50*2), ncol=2))
y=c(y, rep(0,50))
x[y==0,2]=x[y==0,2]+2
dat=data.frame(x=x, y=as.factor(y))
par(mfrow=c(1,1))
plot(x,col=(y+1))
svmfit=svm(y~., data=dat, kernel="radial", cost=10, gamma=1)
plot(svmfit, dat)
# 9.4 Application to Gene Expression Data
library(ISLR)
names(Khan) # the Khan data set has xtrain, xtest, ytrain and ytest data sets already prepared
dim(Khan$xtrain)
dim(Khan$xtest)
length(Khan$ytrain)
length(Khan$ytest)
table(Khan$ytrain)
table(Khan$ytest)
dat=data.frame(x=Khan$xtrain, y=as.factor(Khan$ytrain))
# Let's try a linear kernel
out=svm(y~., data=dat, kernel="linear",cost=10)
summary(out)
table(out$fitted, dat$y)
# Note that there are no training errors (e.g., data is separable by a straight line)
# NOw let's try it in the test set
dat.te=data.frame(x=Khan$xtest, y=as.factor(Khan$ytest))
pred.te=predict(out, newdata=dat.te)
table(pred.te, dat.te$y)
# Now there are 2 test set errors |
4f66058711634dd0fa2945b00df0521a098bf4f1 | 5cc48f7a950a49fb17dabeb8d18476980a5e36af | /analyses/input/Fig5-FT_Competition_Index.R | 80d7f4d1db18ba8f3d6370f0f8a199509a773ee1 | [] | no_license | alicelinder/senior-moment | 3a8f7758fdb0aae33da77c63909fc4faaee7ed33 | dfd269ac59acae915a29219220f61d1fdb28573a | refs/heads/master | 2021-01-11T18:21:11.987326 | 2017-03-10T09:21:28 | 2017-03-10T09:21:28 | 69,621,509 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,254 | r | Fig5-FT_Competition_Index.R | # Functional traits and competitiveness at each site and across latitudes
## by Alice Linder
### Jan. 31, 2017
rm(list = ls())
#options(stringsAsFactors=FALSE)
setwd("~/Library/Mobile Documents/com~apple~CloudDocs/GitHub/senior-moment/data")
# LIBRARIES HERE
library(dplyr)
library(devtools)
library(ggplot2)
library(ggfortify) #install.packages("ggfortify")
load("CHVols.centroid.Rdata")
load("CHVols.RData")
load("BA-CHVols.RData")
load("Clim.Focal.RData")
clim.focal <- subset(clim.focal, select = c("Individual", "sp", "distance.to.climatic.centroid"))
#x <- subset(chvols.focal, sp == "ACEPEN")
clim.focal$site <- unlist(
lapply(strsplit(clim.focal$Individual, "_"),
function(x) x[[2]]))
clim.focal$relative.vol <- NA
hf <- chvols.focal[chvols.focal$site == "HF",]
acepen <- clim.focal[chvols.focal$site]
#clim.focal[which(c(clim.focal$sp == "SORAME" & clim.focal$site == "SH")),]$relative.vol <- chvols.focal[which(c(chvols.focal$site == "SH" & chvols.focal$sp == "FAGGRA")),]$relative.vol
clim.focal <- clim.focal[-which(clim.focal$sp == "HAMVIR"),]
clim.focal <- clim.focal[-which(clim.focal$sp == "SORAME"),]
save(clim.focal, file = "CHVol.Clim.RData")
clim.vol <- merge(ba.chvols, clim.focal, by = c("Individual"))
ba.chvols <- ba.chvols[-which(ba.chvols$sp == "SORAME"),]
myspecieslist <- unique(ba.chvols$sp)
mycolors <- rep(c("#1B9E77", "#D95F02", "#7570B3", "#E7298A"), 10) # need 6 really!
head(ba.chvols)
# plot in base package
plot(ba.chvols$relative.vol, ba.chvols$relative.BA, type="n", ylab="Relative Basal Area", xlab="Relative Convex Hull Volume")
?plot
for (i in c(1:length(myspecieslist))){
subby <- subset(ba.chvols, sp==myspecieslist[i])
points(subby$relative.vol, subby$relative.BA, col=mycolors[i], pch="O")
}
lm(ba.chvols$relative.BA[ba.chvols$sp == "FAGGRA"] ~ ba.chvols$relative.vol[ba.chvols$sp == "FAGGRA"])
# ACEPEN
abline(0.0254792, -0.0002011, col = "#1B9E77", lwd = 2)
# BETPAP
abline(0.255235, 0.007697, col = "#D95F02", lwd = 2)
#CORALT
abline(-0.0001574, 0.0000892 , col = "#7570B3", lwd = 2)
#FAGGRA
abline(0.083677, -0.007733, col = "#E7298A", lwd = 2)
legend('topright', legend=c("A. pensylvanicum", "B. papyrifera", "C. alternifolia", "F. grandifola"),
lty=1, col=mycolors, bty='n', cex=.75)
load("Focal-Centroid.RData")
focal.centroid$Site <- unlist(
lapply(strsplit(focal.centroid$Individual, "_"),
function(x) x[[2]]))
names(focal.centroid)[names(focal.centroid) == "Site"] <- "site"
comp.index.mean <- aggregate(focal.centroid$relative.BA, list(Species = focal.centroid$sp, Site = focal.centroid$Site), FUN = mean, na.rm=TRUE)
ba.chvols <- merge(chvols.focal, focal.centroid, by = c("sp", "site"))
ba.chvols <- ba.chvols[-which(ba.chvols$sp == "HAMVIR"),]
ba.chvols <- ba.chvols[-which(ba.chvols$sp == "SORAME"),]
save(ba.chvols, file = "BA-CHVols.RData")
summary(lm1 <- lm(relative.BA ~ vol, data = ba.chvols[ba.chvols$sp == "ACEPEN",]))
summary(lm1 <- lm(relative.BA ~ vol, data = ba.chvols[ba.chvols$sp == "BETPAP",]))
summary(lm1 <- lm(relative.BA ~ vol, data = ba.chvols[ba.chvols$sp == "CORALT",]))
summary(lm1 <- lm(relative.BA ~ vol, data = ba.chvols[ba.chvols$sp == "FAGGRA",]))
summary(lm1 <- lm(relative.BA ~ vol, data = ba.chvols[ba.chvols$sp == "HAMVIR",]))
summary(lm1 <- lm(relative.BA ~ vol, data = ba.chvols[ba.chvols$sp == "SORAME",]))
acepen <- ba.chvols[ba.chvols$sp == "ACEPEN",]
betpap <- ba.chvols[ba.chvols$sp == "BETPAP",]
coralt <- ba.chvols[ba.chvols$sp == "CORALT",]
faggra <- ba.chvols[ba.chvols$sp == "FAGGRA",]
ba.chvols.focal <- rbind(acepen, betpap, coralt, faggra)
ba.chvols.focal$vol <- log(ba.chvols.focal$vol)
ba.chvols.focal$relative.BA <- log(ba.chvols.focal$relative.BA)
#lme1 <- lmer(relative.BA ~ vol + (relative.BA | sp), data = ba.chvols.focal)
ranef(lme1)
ggplot(ba.chvols,
aes(vol,relative.BA, color = sp)) +
geom_point() +
geom_smooth(method="lm", se=F) +
facet_wrap(~sp, ncol = 3, scales = "free") +
xlab("Convex Hull Volume") +
ylab("Relative Basal Area")
# Principal components analysis of functional traits to plot against competitiveness index
tree.traits <- read.csv("tree-traits.csv")
# remove Distance, bottom.angle, top.angle, bottom.m, Top.m
tree.traits <- tree.traits[,-13:-17]
# remove DBH.1-DBH.5, notes, route, and date sampled
tree.traits <- tree.traits[,-14:-21]
# calculate stem density
tree.traits$Stem.density = tree.traits$Stem.mass/tree.traits$Stem.volume
# calculate SLA
tree.traits$SLA = tree.traits$Leaf.area / tree.traits$Dry.mass
# calculate C:N ratio
tree.traits$c.n = tree.traits$X.C / tree.traits$X.N
# choose traits
tr <- c("SLA", "Stem.density", "DBH", "c.n")
traits <- tree.traits[complete.cases(tree.traits[,tr]),]
# apply skewness transformation, center and scale the variables prior to the application of PCA
# log transform
log.traits <- log(traits[tr])
# apply PCA - scale. = TRUE is highly advisable, but default is FALSE.
traits.pca <- prcomp(log.traits,
center = TRUE,
scale = TRUE)
# view standard deviation and PC via print method and summary method
print(traits.pca)
summary(traits.pca)
autoplot(prcomp(log.traits))
# view on graph -- HOW TO DO THIS?
|
4b6479acec9fe11b43c1d19e661a767c632295fe | 56453f61cb60eff27911795855020932bdf3df2c | /simulation/plot_reliability_sim.R | 1e1d419c922a34afec8c7afe42f27a76368af5eb | [] | no_license | yuxuanzhao2295/Matrix-Completion-with-Quantified-Uncertainty-through-Low-Rank-Gaussian-Copula | 17c6a9154f8112c5b699f24aa55880895ab5efcb | 92b96d3cdd87d791e3f29d053434d1acc616687f | refs/heads/master | 2022-12-30T13:35:28.257117 | 2020-10-22T03:47:15 | 2020-10-22T03:47:15 | 290,662,737 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,631 | r | plot_reliability_sim.R | library(ggplot2)
library(reshape2)
library(gridExtra)
library(grid)
# simulation plot Figure 1
errReliability_allsim_cont = array(0, dim=c(20,100,2,4),
dimnames = list(NULL, Quantile = 1:100, Setting = c('LowRank','HighRank'), Method = c('LRGC','MI+softImpute','MI+GLRM-l2','MI-PCA')))
errReliability_allsim_ord = array(0, dim=c(20,100,2,4),
dimnames = list(NULL, Quantile = 1:100, Setting = c('HighSNR','LowSNR'), Method = c('LRGC','MI+softImpute','MI+GLRM-BvS', 'MI-PCA')))
errReliability_allsim_bin = array(0, dim=c(20,100,2,4),
dimnames = list(NULL, Quantile = 1:100, Setting = c('HighSNR','LowSNR'), Method = c('LRGC','MI+softImpute', 'MI+GLRM-logistic', 'MI+GLRM-hinge')))
setwd("/Users/yuxuan/Documents/GitHub/Matrix-Completion-with-Quantified-Uncertainty-through-Low-Rank-Gaussian-Copula")
load("simulation/ResReliability_LRGC_softImpute_SimBinary.RData")
load("simulation/ResReliability_LRGC_softImpute_SimContinuous.RData")
load("simulation/ResReliability_LRGC_softImpute_SimOrdinal.RData")
load("simulation/Rebuttal_MIPCA.RData")
load("GLRM_julia/Res_sim_GLRM.RData")
load("GLRM_julia/errReliability_bin_GLRM_hinge.RData")
errReliability_allsim_cont[,,,1:2] = errReliability_cont
errReliability_allsim_cont[,,,3] = Res_sim_MMMF$errReliability_cont_MMMF
errReliability_allsim_cont[,,,4] = res_MIPCA_rebuttal$continuous[,,,2]
errReliability_allsim_ord[,,,1:2] = errReliability_ord
errReliability_allsim_ord[,,,3] = Res_sim_MMMF$errReliability_ord_MMMF
errReliability_allsim_ord[,,,4] = res_MIPCA_rebuttal$ord[,,,2]
errReliability_allsim_bin[,,,1:2] = errReliability_bin
errReliability_allsim_bin[,,,3] = Res_sim_MMMF$errReliability_bin_MMMF
errReliability_allsim_bin[,,,4] = errReliability_bin_MMMF_hinge
p1 = plot_error_quantile_twofactor(err = errReliability_allsim_cont, title = 'Continuous',xlab = 'Percentage of entries selected',ylab = 'NRMSE', errorbar = TRUE,
ylims=c(0,0.8), colorvals = c("brown1", "deepskyblue", "deeppink",'darkolivegreen3'), shapevals = c(15,18,3,4))
p2 = plot_error_quantile_twofactor(err = errReliability_allsim_ord, title = '1-5 Ordinal',xlab = 'Percentage of entries selected',ylab = 'MAE', errorbar = TRUE,
ylims=c(0,1), colorvals = c("brown1", "deepskyblue", "darkorange",'darkolivegreen3'), shapevals = c(15,18,17,4))
p3 = plot_error_quantile_twofactor(err = errReliability_allsim_bin, title = 'Binary',xlab = 'Percentage of entries selected',ylab = 'MAE', errorbar = TRUE,
ylims=c(0,0.25), colorvals = c("brown1", "deepskyblue", "cornsilk4","mediumpurple"), shapevals = c(15,18,16,11))
mylegend = produce_legend(names = c("LRGC", "MI+softImpute", "MI+GLRM-l2", "MI-PCA", "MI+GLRM-BvS", "MI+GLRM-logistic", "MI+GLRM-hinge"),
colorvals = c("brown1", "deepskyblue", "deeppink",'darkolivegreen3', 'darkorange', 'cornsilk4', 'mediumpurple'),
shapevals = c(15,18,3,4,17,16,11))
grid.arrange(arrangeGrob(p1,p2,p3,nrow=1),
mylegend, nrow=2,heights=c(9, 1))
# movielens result figure 2
errReliability_allmovielens = array(0, dim=c(5,100,2,3),
dimnames = list(NULL, Quantile = 1:100, Metric = c('MAE','RMSE'), Method = c('LRGC','MI+softImpute','MI+GLRM-l2')))
load("GLRM_julia/Res_movielens_GLRM.RData")
load("movielsn1m/resReliability_softimpute_movielens.RData")
#load("movielsn1m/ResLRGC_movielens.RData") # corrupted data
errReliability_allmovielens[,,,1] = ResLRGC_movielens$errReliability
errReliability_allmovielens[,,,2] = resReliability_softimpute_movielens
errReliability_allmovielens[,,,3] = res_MMMF$errReliability
p4 = plot_error_quantile_onefactor(err = errReliability_allmovielens[,,1,], title = '',xlab = '',ylab = 'MAE', errorbar = TRUE,
ylims=c(0,1), colorvals = c("brown1", "deepskyblue", "darkorange"), shapevals = c(15,18,17))
# plot from history data, to be updated
load("/Users/yuxuan/Downloads/Uncertainty_movie50obs.RData")
load("/Users/yuxuan/Jupyter_notebook/Uncertainty_movie50obs_MMMF.RData")
err = array(0,dim = c(5,100,3),dimnames = list(NULL,Quantile=1:100,Method=c('LRGC','10fold-softImpute','10fold-MMMF-BvS')))
err[,,1] = error_quantile_movie50$EM[,,1]
err[,,2] = error_quantile_movie50$soft[,,1]
err[,,3] = info_q[,,1]
p4 = plot_error_quantile_onefactor(err = err, title = '',xlab = '',ylab = 'MAE', errorbar = TRUE,
ylims=c(0,0.75), colorvals = c("brown1", "deepskyblue", "darkorange"), shapevals = c(15,18,17))
err = array(0,dim = c(5,100,3),dimnames = list(NULL,Quantile=1:100,Method=c('LRGC','10fold-softImpute','10fold-MMMF-BvS')))
err[,,1] = sqrt(error_quantile_movie50$EM[,,2])
err[,,2] = sqrt(error_quantile_movie50$soft[,,2])
err[,,3] = info_q[,,2]
p5 = plot_error_quantile_onefactor(err = err, title = '',xlab = '',ylab = 'RMSE', errorbar = TRUE,
ylims=c(0,1), colorvals = c("brown1", "deepskyblue", "darkorange"), shapevals = c(15,18,17))
mylegend = produce_legend(names = c("LRGC", "MI+softImpute", "MI+GLRM-BvS"),
colorvals = c("brown1", "deepskyblue", 'darkorange'),
shapevals = c(15,18,17), position = "right")
grid.arrange(arrangeGrob(p4,p5,nrow=1),
mylegend, ncol=2,widths=c(3, 1),
top = textGrob("Movielens 1M",gp=gpar(fontsize=10, font=2)),
bottom = textGrob("Percentages of entries selected",gp=gpar(fontsize=10, font=2)))
|
ea1d7df0dd558d364b0841d936a40b901384070c | fa6bf9f5bee1627b6158a5e693c8c15969c019ee | /Simulation & Risk/HW 2/Keyur - Code & Outputs/02-code_drywell.R | 12a97007f2c6fa558b35a524540f3b6bedc74307 | [] | no_license | gclark422/spring1-orange5 | 59eb61880226000b9f9a6244a1e653319f0d3e3a | 13586f04939ac66f5edf0db4cf34e66be2a81634 | refs/heads/master | 2020-12-06T23:02:53.183034 | 2020-02-22T03:27:17 | 2020-02-22T03:27:17 | 232,574,947 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,716 | r | 02-code_drywell.R | # Loading the libraries
library(tidyverse)
library(readxl)
library(lubridate)
library(scales)
library(xlsx)
# Setting options for the R session
set.seed(12345)
n_sims <- 100000
options(tibble.width = Inf)
windowsFonts("tw_cen_mt" = windowsFont("Tw Cen MT"))
ggplot2::theme_set(
ggplot2::theme_minimal(base_family = "tw_cen_mt")
)
# Defining a function to draw simulations from triangular distribution
rtri <- function(n, min, max, avg) {
u <- runif(n)
cut_off <- (avg - min) / (max - min)
sim1 <- min + sqrt((max - min) * (avg - min) * u)
sim2 <- max - sqrt((max - min) * (max - avg) * (1 - u))
ifelse(u < cut_off, sim1, sim2)
}
# 1. Since there is no production taking place, the only transactions taking
# place are costs in Year 0.
# 2. First one is Seismic Cost (Normal)
# 3. Second one is Lease Cost (Normal)
# 4. Third one is Professional Overhead Cost (Triangle)
# 5. Fourth one is Drilling Costs
drilling_cost <- readr::read_csv("Drilling Cost Simulations.csv") %>%
dplyr::pull(value)
seismic_cost <- rnorm(n = n_sims, mean = 600, sd = 50) * 960
lease_cost <- rnorm(n = n_sims, mean = 3, sd = 0.35) * 43000
professional_overhead <- rtri(n = n_sims, min = 172000, max = 279500, avg = 215000)
tot_cost <- seismic_cost + lease_cost + professional_overhead + drilling_cost
dry_df <- data.frame(tot_cost)
xlsx::write.xlsx2(dry_df, "Simulated Cost of Dry Well.xlsx", sheetName = "Simulations", row.names = FALSE)
# Histogram plot of the simulations of dry well costs
ggplot2::ggplot(dry_df, ggplot2::aes(x = tot_cost)) +
ggplot2::geom_histogram(colour = "white", fill = "#01B8AA") +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::scale_x_continuous(labels = scales::dollar_format(scale = 10 ^ (-6), suffix = "M", accuracy = 0.1)) +
ggplot2::scale_y_continuous(labels = scales::comma_format()) +
ggplot2::labs(x = "Cost of Single Dry Well", y = "Frequency")
ggplot2::ggsave("Simulated Cost of Dry Well.png", device = "png")
# Calculating the descriptive statistics of NPV
funs <- c("Minimum" = min,
"Maximum" = max,
"5th Percentile" = function(x) quantile(x, probs = 0.05),
"First Quartile" = function(x) quantile(x, probs = 0.25),
"Median" = median,
"Third Quartile" = function(x) quantile(x, probs = 0.75),
"95th Percentile" = function(x) quantile(x, probs = 0.95),
"Mean" = mean,
"Standard Deviation" = sd)
purrr::map_dbl(funs, ~.x(tot_cost)) %>%
tibble::enframe(name = "Descriptive Statistic", value = "Cost of Dry Well") %>%
as.data.frame(stringsAsFactors = FALSE) %>%
xlsx::write.xlsx2("Simulated Cost of Dry Well.xlsx", sheetName = "Descriptive Statistics", append = TRUE, row.names = FALSE) |
6a79767183e529e1db039a30acdce931a6d64671 | f324a32f5664544c1956ffdc1dbad61a18751711 | /man/response.surface.lsd.Rd | 6e9d3fff86270613632bc17018bbd4f4546b378c | [] | no_license | cran/LSDsensitivity | 90994ab1b15d66b2871ef7e2aa3349df6b80608c | 5d0c283229b28a57a9ed8306f8f1b64aae9c9ac8 | refs/heads/master | 2022-07-11T11:12:45.433337 | 2022-07-03T20:50:02 | 2022-07-03T20:50:02 | 112,502,818 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,820 | rd | response.surface.lsd.Rd | \name{response.surface.lsd}
\alias{response.surface.lsd}
\title{
Generate the meta-model 3D response surface data
}
\description{
This function produces a data object for the three-dimensional graphical representations of the meta-model response surfaces for a set of factors (parameters), including the confidence interval for the surfaces.
}
\usage{
response.surface.lsd( data, model, sa, gridSz = 25, defPos = 2,
factor1 = 0, factor2 = 0, factor3 = 0 )
}
\arguments{
\item{data}{
an object created by a previous call to \code{\link{read.doe.lsd}} which contains all the required experimental data for the analysis.
}
\item{model}{
an object created by a previous call to \code{\link{kriging.model.lsd}} or \code{\link{polynomial.model.lsd}} which contains the meta-model estimated hyper-parameters.
}
\item{sa}{
an object created by a previous call to \code{\link{sobol.decomposition.lsd}} which contains the estimated total and conditional variances for all the meta-model factors.
}
\item{gridSz}{
integer: the number of divisions in the 3D wire frame grid. The default is 25.
}
\item{defPos}{
1, 2, 3: the position of the default/calibration configuration on the 3 plot sequence. The default is 2 (center position).
}
\item{factor1}{
integer: the index of the first most-important factor: 0 = automatic selection (according to the Sobol index, the default); any other number = the selected factor index, according to DoE factor order.
}
\item{factor2}{
integer: the index of the second most-important factor: 0 = automatic selection (according to the Sobol index, the default); any other number = the selected factor index, according to DoE factor order.
}
\item{factor3}{
integer: the index of the third most-important factor: 0 = automatic selection (according to the Sobol index, the default); any other number = the selected factor index, according to DoE factor order.
}
}
\details{
This function produces data for three different wire frame 3D plots. In the 3 plots, the x-y plan is defined by the 2 most-important factors (calculated or set by the user in \code{\link{sobol.decomposition.lsd}}) and the z axis represents the response variable chosen. The three different plots shows the response surface for three values of the third most-important factor: the minimum, the default/calibration and the maximum. The order the three response surfaces are shown is defined by \code{defPos}.
The automatically set most-important factors can be overridden by any factors chosen by the user by the usage of the arguments \code{factor1}, \code{factor2} and \code{factor3}. This way, the response surfaces can be represented for a combination of any 3 factors (parameters) in the model.
}
\value{
The function returns an object/list of class \code{response} containing three similar objects, one for each 3D plot, each of them comprised of:
\item{calib}{the predicted meta-model response values on each point of the 3D grid.}
\item{factor}{the predicted values for each individual factor.}
\item{default}{the predicted values for the default/calibration configuration.}
}
\author{
\packageAuthor{LSDsensitivity}
}
\note{
See the note in \link[LSDsensitivity]{LSDsensitivity-package} for step-by-step instructions on how to perform the complete sensitivity analysis process using LSD and R.
}
\seealso{
\code{\link{read.doe.lsd}()},
\code{\link{kriging.model.lsd}()},
\code{\link{polynomial.model.lsd}()},
\code{\link{sobol.decomposition.lsd}()}
}
\examples{
# get the example directory name
path <- system.file( "extdata/sobol", package = "LSDsensitivity" )
# Steps to use this function:
# 1. define the variables you want to use in the analysis
# 2. load data from a LSD simulation saved results using read.doe.lsd
# 3. fit a Kriging (or polynomial) meta-model using kriging.model.lsd
# 4. identify the most influential factors applying sobol.decomposition.lsd
# 5. calculate the response surface for the selected factors using model.limits.lsd
# 6. plot the response surface
lsdVars <- c( "var1", "var2", "var3" ) # the definition of existing variables
dataSet <- read.doe.lsd( path, # data files folder
"Sim3", # data files base name (same as .lsd file)
"var3", # variable name to perform the sensitivity analysis
does = 2, # number of experiments (data + external validation)
saveVars = lsdVars ) # LSD variables to keep in dataset
model <- kriging.model.lsd( dataSet ) # estimate best Kriging meta-model
SA <- sobol.decomposition.lsd( dataSet, model ) # find Sobol indexes
resp <- response.surface.lsd( dataSet, # LSD experimental data set
model, # estimated meta-model
SA ) # Sobol sensitivity analysis results
theta3d <- 310 # horizontal view angle
phi3d <- 30 # vertical view angle
grid3d <- 25
for( i in 1 : 3 ) { # do for each top factor
# plot 3D grid charts
zMat <- matrix( resp$calib[[ i ]]$mean, grid3d, grid3d, byrow = TRUE )
zlim <- range( zMat, na.rm = TRUE )
vt <- persp( resp$grid[[ 1 ]], resp$grid[[ 2 ]], zMat, col = "gray90",
xlab = colnames( dataSet$doe )[ SA$topEffect[ 1 ] ], zlim = zlim,
ylab = colnames( dataSet$doe )[ SA$topEffect[ 2 ] ], zlab = dataSet$saVarName,
theta = theta3d, phi = phi3d, ticktype = "detailed" )
}
}
\keyword{ models }
\keyword{ design }
|
2ad7566741b561f06e119aa95741babc34f20450 | 719f490f2c7b2a82c212cca364d145e1a2ce9d0a | /R_Woodcock/merge_script_bydate.R | 1aed2e5983f2eb6d66eda21c1b05accce1a1f5b7 | [] | no_license | eeliz1/Woodcock | a262bae4b2e9e2a60ce308aefcf595f0fbba4dd8 | b29244a527e8a8a992049040eee92350a7583402 | refs/heads/master | 2020-09-20T07:49:05.102012 | 2018-02-19T18:48:10 | 2018-02-19T18:48:10 | 67,633,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 633 | r | merge_script_bydate.R | #Example Code for Lisa
library(plyr)
#Set working directory
setwd("C:/Users/Elisa/Documents/Woodcock/R_Woodcock/Veg_csv")
#this tells you what files are in the working directory folder then slices the list
filenames=list.files()
x = filenames[9:10]
x
#This combines all thefiles you created together into a object called
comb.files=ldply(x, read.csv)
str(comb.files)
#this writes all the combined files into a single file called comb_woodcock
write.csv(comb.files,
"C:/Users/Elisa/Documents/Woodcock/R_Woodcock/Veg_csv/date/171368602_Jan12_2016_all.csv")
|
e2cd2138a64b076c3fee9cdb03b659d22929ed4d | f45333719c9d6f1b55c95be0f9dc03dea9b6c086 | /man/load_lna.Rd | b0b3c09dea1847560a42d43a1d0e3d9023fc83ba | [] | no_license | fintzij/stemr | 7c95bde20622142c7f62aad49cfb764a46342b47 | 185375e0933331da49bdc53563ce61338de4c450 | refs/heads/master | 2022-05-19T18:29:05.622681 | 2022-03-16T17:16:52 | 2022-03-16T17:16:52 | 52,254,628 | 8 | 6 | null | 2022-03-16T01:35:15 | 2016-02-22T07:16:45 | R | UTF-8 | R | false | true | 997 | rd | load_lna.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_lna.R
\name{load_lna}
\alias{load_lna}
\title{Construct and compile the functions for proposing an LNA path, with
integration of the LNA ODEs accomplished using the Boost odeint library.}
\usage{
load_lna(lna_rates, compile_lna, messages, atol, rtol, stepper)
}
\arguments{
\item{lna_rates}{list containing the LNA rate functions, derivatives, and
parameter codes}
\item{compile_lna}{if TRUE, code will be generated and compiled. If a
character string for the name of a file that does not yet exist, code will
be generated but not compiled. If the name of a file that exists in the
current working directory, the code in the file will be compiled.}
\item{messages}{should messages be printed}
}
\value{
list containing the LNA pointers and calling code
}
\description{
Construct and compile the functions for proposing an LNA path, with
integration of the LNA ODEs accomplished using the Boost odeint library.
}
|
acfefd12eb6000a7c3736f1a576c10e1713bde78 | f464a388b87c7c9d0af4e25d693c7fb9879ebd29 | /PH125.6 Data Wrangling/R code/gather.R | 6f44995cce565f0d7cd2b1809fac4fa952f97147 | [] | no_license | alvarobarbera/HarvardX-PH125-DataScience | 5e57e8d5d36bc57992fbf9a6477a51465ef5fc02 | 1a152f47b20131d71bc61ac94282867843f5a3ae | refs/heads/master | 2022-07-12T09:21:19.496692 | 2020-05-15T08:37:51 | 2020-05-15T08:37:51 | 260,189,883 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 649 | r | gather.R | library(tidyverse)
library(dslabs)
path <- system.file("extdata", package="dslabs")
filename <- file.path(path, "fertility-two-countries-example.csv")
wide_data <- read_csv(filename)
wide_data
# gather
# converting wide into tidy
new_tidy_data <- wide_data %>% gather(year, fertility, '1960':'2015')
new_tidy_data
# note year is stored as character, to convert it
new_tidy_data <- wide_data %>% gather(year, fertility, '1960':'2015', convert=TRUE)
# or
new_tidy_data <- new_tidy_data %>% mutate(year=as.numeric(year))
class(new_tidy_data$year)
# now we can plot
new_tidy_data %>% ggplot(aes(year, fertility, color=country)) + geom_point()
|
008cfae03b34f03690fdf441fe20696917f86b83 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.networking/man/route53recoveryreadiness_get_resource_set.Rd | 025b6494f5e9a0e318f553d08a8941ce4651db62 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 687 | rd | route53recoveryreadiness_get_resource_set.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53recoveryreadiness_operations.R
\name{route53recoveryreadiness_get_resource_set}
\alias{route53recoveryreadiness_get_resource_set}
\title{Displays the details about a resource set, including a list of the
resources in the set}
\usage{
route53recoveryreadiness_get_resource_set(ResourceSetName)
}
\arguments{
\item{ResourceSetName}{[required] Name of a resource set.}
}
\description{
Displays the details about a resource set, including a list of the resources in the set.
See \url{https://www.paws-r-sdk.com/docs/route53recoveryreadiness_get_resource_set/} for full documentation.
}
\keyword{internal}
|
d6c924d3e63c134a59002d31d1c2f95878ab174d | 3cbe75bc17f544c62a3ba1032997289c11f80223 | /R/data.R | 7b7cbe9cae7cbba2b71f18cf8725add608673d0f | [] | no_license | Fjellrev/nngeo | 352dfc5f3a10dc46962a5345a85cd632be1ea737 | dcee9c4826b2ef8180422f6429f5a293a414c525 | refs/heads/master | 2023-06-29T23:23:30.766756 | 2021-07-15T20:37:53 | 2021-07-15T20:37:53 | 391,885,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,684 | r | data.R | #' Point layer of towns in Israel
#'
#' A \code{sf} POINT layer of towns in Israel, based on a subset from the \code{maps::world.cities} dataset.
#'
#' @format A \code{sf} POINT layer with 193 features and 4 attributes:
#' \describe{
#' \item{name}{Town name}
#' \item{country.etc}{Country name}
#' \item{pop}{Population size}
#' \item{capital}{Is it a capital?}
#' }
#' @examples
#' plot(towns)
"towns"
#' Point layer of the three largest cities in Israel
#'
#' A \code{sf} POINT layer of the three largest cities in Israel: Jerusalem, Tel-Aviv and Haifa.
#'
#' @format A \code{sf} POINT layer with 3 features and 1 attribute:
#' \describe{
#' \item{name}{Town name}
#' }
#' @examples
#' plot(cities)
"cities"
#' Polygonal layer of water bodies in Israel
#'
#' A \code{sf} POLYGON layer of the four large water bodies in Israel:
#' \itemize{
#' \item{Mediterranean Sea}
#' \item{Red Sea}
#' \item{Sea of Galilee}
#' \item{Dead Sea}
#' }
#'
#' @format A \code{sf} POLYGON layer with 4 features and 1 attribute:
#' \describe{
#' \item{name}{Water body name}
#' }
#' @examples
#' plot(water)
"water"
#' Sample network dataset: lines
#'
#' An \code{sf} object based on the \code{edge_table} sample dataset from pgRouting 2.6 tutorial
#'
#' @format An \code{sf} object
#' @references
#' \url{https://docs.pgrouting.org/2.6/en/sampledata.html}
#' @examples
#' plot(line)
"line"
#' Sample network dataset: points
#'
#' An \code{sf} object based on the \code{pointsOfInterest} sample dataset from pgRouting 2.6 tutorial
#'
#' @format An \code{sf} object
#' @references
#' \url{https://docs.pgrouting.org/2.6/en/sampledata.html}
#' @examples
#' plot(pnt)
"pnt"
|
6f1113bcb6b89fdd8b7662815cc8b683e707c166 | 5e85df6e3edead3eca4a2a4730f1705d1228c23d | /unsorted_code/image.plot.g.R | fae61c468a6419f7ac88f6684efae11fdf89cfa0 | [
"MIT"
] | permissive | giorgioarcara/R-code-Misc | 125ff2a20531b2fbbc9536554042003b4e121766 | decb68d1120e43df8fed29859062b6a8bc752d1d | refs/heads/master | 2022-11-06T14:44:02.618731 | 2022-10-26T07:34:09 | 2022-10-26T07:34:09 | 100,048,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,988 | r | image.plot.g.R | ### modificata da funzione in package "fields"
# eeg.axis=FALSE, se TRUE allora prende gli argomenti di eeg.axis.args, per disegnare un asse delle x per eeg.
# eeg.axis.args, argomenti da passare alla funzione axis, per diseganre degli assi. Servirà basarsi sulla funzione msectopoints.
image.plot.g=function (..., add = FALSE, nlevel = 64, horizontal = FALSE,
legend.shrink = 0.9, legend.width = 1.2, legend.mar = ifelse(horizontal,
3.1, 5.1), legend.lab = NULL, graphics.reset = FALSE,
bigplot = NULL, smallplot = NULL, legend.only = FALSE, col = tim.colors(nlevel),
lab.breaks = NULL, axis.args = NULL, legend.args = NULL,
midpoint = FALSE, border = NA, lwd.poly = 1, eeg.axis=FALSE, eeg.axis.args=NULL)
{
old.par <- par(no.readonly = TRUE)
info <- image.plot.info(...)
if (add) {
big.plot <- old.par$plt
}
if (legend.only) {
graphics.reset <- TRUE
}
if (is.null(legend.mar)) {
legend.mar <- ifelse(horizontal, 3.1, 5.1)
}
temp <- image.plot.plt(add = add, legend.shrink = legend.shrink,
legend.width = legend.width, legend.mar = legend.mar,
horizontal = horizontal, bigplot = bigplot, smallplot = smallplot)
smallplot <- temp$smallplot
bigplot <- temp$bigplot
if (!legend.only) {
if (!add) {
par(plt = bigplot)
}
if (!info$poly.grid) {
image(..., add = add, col = col)
}
else {
poly.image(..., add = add, col = col, midpoint = midpoint,
border = border, lwd.poly = lwd.poly)
}
big.par <- par(no.readonly = TRUE)
}
##################################
## PARTE AGGIUNTA DA ME #######
if (eeg.axis=="TRUE"){
do.call("axis", eeg.axis.args)
}
################################
####################################
if ((smallplot[2] < smallplot[1]) | (smallplot[4] < smallplot[3])) {
par(old.par)
stop("plot region too small to add legend\n")
}
ix <- 1
minz <- info$zlim[1]
maxz <- info$zlim[2]
binwidth <- (maxz - minz)/nlevel
midpoints <- seq(minz + binwidth/2, maxz - binwidth/2, by = binwidth)
iy <- midpoints
iz <- matrix(iy, nrow = 1, ncol = length(iy))
breaks <- list(...)$breaks
par(new = TRUE, pty = "m", plt = smallplot, err = -1)
if (!is.null(breaks) & !is.null(lab.breaks)) {
axis.args <- c(list(side = ifelse(horizontal, 1, 4),
mgp = c(3, 1, 0), las = ifelse(horizontal, 0, 2),
at = breaks, labels = lab.breaks), axis.args)
}
else {
axis.args <- c(list(side = ifelse(horizontal, 1, 4),
mgp = c(3, 1, 0), las = ifelse(horizontal, 0, 2)),
axis.args)
}
if (!horizontal) {
if (is.null(breaks)) {
image(ix, iy, iz, xaxt = "n", yaxt = "n", xlab = "",
ylab = "", col = col)
}
else {
image(ix, iy, iz, xaxt = "n", yaxt = "n", xlab = "",
ylab = "", col = col, breaks = breaks)
}
}
else {
if (is.null(breaks)) {
image(iy, ix, t(iz), xaxt = "n", yaxt = "n", xlab = "",
ylab = "", col = col)
}
else {
image(iy, ix, t(iz), xaxt = "n", yaxt = "n", xlab = "",
ylab = "", col = col, breaks = breaks)
}
}
do.call("axis", axis.args)
box()
if (!is.null(legend.lab)) {
legend.args <- list(text = legend.lab, side = ifelse(horizontal,
1, 4), line = legend.mar - 2)
}
if (!is.null(legend.args)) {
do.call(mtext, legend.args)
}
mfg.save <- par()$mfg
if (graphics.reset | add) {
par(old.par)
par(mfg = mfg.save, new = FALSE)
invisible()
}
else {
par(big.par)
par(plt = big.par$plt, xpd = FALSE)
par(mfg = mfg.save, new = FALSE)
invisible()
}
} |
8bd09443dda7636f747ba22a7ab4173457a8beeb | 3deac26e5cb861cb58cbab395c5467d2a6c9888f | /FluxTeam/PFT3_Peru_R_scripts/LICOR_7500_Directory_Loop.R | b2ecf73c3176b62f1a9b1f599d28291486f5a552 | [] | no_license | EnquistLab/PFTC3_Peru | 88b09f0ee99642a2c5ab3c72fc6e21ec1ed0b5c4 | 829e225f5afb584338ecd672621c6d3e390346e2 | refs/heads/master | 2021-03-22T02:56:52.831251 | 2020-04-05T15:26:36 | 2020-04-05T15:26:36 | 109,146,843 | 4 | 7 | null | 2018-08-27T21:21:12 | 2017-11-01T15:12:16 | R | UTF-8 | R | false | false | 1,912 | r | LICOR_7500_Directory_Loop.R | source("LICOR_7500_Flux_Analysis.R")
source("GAS_HOUND_ANALYSIS.R")
readline("Please set the working directory to the folder \n that contains the LiCOR files to be analyzed. \n Do so with the upcoming prompt. \n Note that you must choose a(any) file in the \n folder that you want to set as the working directory. \n Please press 'return' to continue.")
setwd(dirname(file.choose()))
directory <- dir()
stats.df <- c()
#Check for soil respiration data
soil.names <- grep("soil", dir(), value=TRUE)
if(length(soil.names) == 0)
{
photo.names <- grep("[^resp].txt", grep("[^_a]\\.txt", dir(), value = TRUE), value = TRUE)
ambient.names <- grep("a.txt", dir(), value = TRUE)
resp.names <- grep("resp.txt", dir(), value = TRUE)
for (i in 1:length(photo.names))
{
stats.df <- rbind(stats.df, nee.fit(photo.names[i]))
}
if (length(resp.names) > 0)
{
print("Performing ecosystem respiration fits")
for (i in 1:length(resp.names))
{
stats.df <- rbind(stats.df, nee.fit(resp.names[i]))
}
}
stats.df <- as.data.frame(stats.df)
names.vec <- c("tstart", "tfinish", "time", "camb", "tav", "pav", "wav", "nee_lm", "nee_exp", "LM rsqd", "non-linear sigma", "aic_lm", "aic_nlm")
for(i in 1:length(names.vec))
{
names(stats.df)[i] <- names.vec[i]
}
write.csv(stats.df, file = paste(paste(strsplit(getwd(), "/")[[1]][length(strsplit(getwd(), "/")[[1]])], "summary", sep = " "), ".csv", sep = ""))
}else
{
for (i in 1:length(soil.names))
{
stats.df <- rbind(stats.df, soil_r_fit(soil.names[i]))
}
stats.df <- as.data.frame(stats.df)
names.vec <- c("tstart", "tfinish", "time", "tav", "pav", "wav", "nee_lm", "LM rsqd", "aic_lm")
for(i in 1:length(names.vec))
{
names(stats.df)[i] <- names.vec[i]
}
write.csv(stats.df, file = paste(paste(strsplit(getwd(), "/")[[1]][length(strsplit(getwd(), "/")[[1]])], "summary", sep = " "), ".csv", sep = ""))
}
|
a90fd77f87b15a6285d181621f3c6f8202778db0 | 87c22c0890768c7731f944009ace4de422738965 | /week6exercise_distributions.R | df2ef436f2e137f9f4518e6ca7dc64e5a05c3143 | [] | no_license | erichseamon/computational-analysis | bc2d23367237750761f2652d448d02a0a0b4e6ea | aca4509cb166325d574dea067139e1f2fc58911a | refs/heads/master | 2020-04-23T20:37:06.715201 | 2015-08-11T19:43:42 | 2015-08-11T19:43:42 | 40,561,552 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,727 | r | week6exercise_distributions.R | #---------------------------------------------------------------------------------------------------------------------#
# TITLE: week6exercise_distributions.R
#
# COURSE: Computational Data Analysis (FOR504)
# Assignment #3
#
# AUTHOR: Erich Seamon
#
# INSTITUITON: College of Natural Resources
# University of Idaho
#
# DATE: 23 Sept. 2014
#
# COMMENTS: This file contains code to test the fit of dataset to varying distributions
#
#
#---------------Setting the working directory and clearing the workspace----------------------------------------------#
setwd("X:/Dropbox/ES Research/ES Classwork/FOR504/data") # Set working directory
rm(list=ls()) # Clear workspace (remove varialbes)
graphics.off() # Close current graphing windows
cat("\14") # Clear command prompt
library(MASS)
library(scatterplot3d)
#-------Loading the dataset to use for analysis ----------------------------------------------------------------------#
week6data = read.csv("Week6Data.csv", # Read in random seedling data
header=TRUE,
skip=1,
col.names=c("var1", "var2", "var3", "var4"),)
#-------Creating variables for each column in dataset-----------------------------------------------------------------#
var1 = week6data[,1] # [site] The first column, sites, in seedlingdata
var2 = week6data[,2] # [class] The second column, class, in seedlingdata
var3 = week6data[,3] # [Spp] The third column, spp, in seedlingdata
var4 = week6data[,4] # [Ht] The fourth column, ht (height), in seedlingdata
#graphics.off()
#set.seed(1) # Used to repeat functions that generate random data
#n = 1000 # Number of random numbers to use.
setwd("X:/Dropbox/ES Research/ES Classwork/FOR504/Data") #----set the working directory
mu = mean(var1)
sigma = sd(var1)
#mu = 100 # Set the mean.
#sigma = 2 # Set the standard deviation.
#randNorm = rnorm(n, mu, sigma) # Use rnorm function to generate random numbers.
muExp = mean(var1)
#muExp = 10 # Set the mean.
#randExp = sort(rexp(n = n,rate = 1/muExp))
b = 150 # Set the scale parameter, b.
c = 1.5 # Set the shape parameter, c.
#randWbl = sort(rweibull(n = n, scale = b,shape = c))
lambda = mean(var1)
#lambda = 10 # Set the mean.
#randPois = sort(rpois(n = n, lambda = lambda))
fit = fitdistr(var1,"normal") # Fit normal distribution to data.
muHat = fit$estimate[1] # MLE mean
sigmaHat = fit$estimate[2] # MLE standard deviation
nHat = dnorm(sort(var1),muHat,sigmaHat)
win.graph()
hist(var1, breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
ylab = "Probability density",
xlab = "x",
main = paste("var1 normal - ", "mu = ", mu, "; sigma = ", sigma))
lines(sort(var1),nHat,lwd=2)
text(min(var1),max(nHat), pos = 4,
paste("Fit: mu = ",round(muHat*100)/100))
text(min(var1),0.9*max(nHat), pos = 4,
paste("sigma = ", round(sigmaHat*100)/100))
#fit = fitdistr(i,"exponential")
#muHat = 1/fit$estimate[1] # The parameter for the exponential distirubtion
# is the rate; the mean of an exponential distribution is 1 / rate.
#nHat = dexp(i,fit$estimate[1])
#win.graph()
#hist(i,breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
#ylab = "Probability density",
#xlab = "x",
#main = paste("mu = ", muExp))
#lines(i,nHat,lwd=2)
#text(max(i),max(nHat), pos = 2,
#paste("Fit: mu = ",round(muHat*100)/100))
#fit = fitdistr(i, "weibull")
#bHat = fit$estimate[2] # Estiamted scale parameter
#cHat = fit$estimate[1] # Estimated shape parameter
#nHat = dweibull(i, cHat, bHat)
#win.graph()
#hist(i, breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
#ylab = "Probability density",
#xlab = "x")
#main = paste("b = ", b,"; c = ", c))
#lines(i,nHat,lwd=2)
#text(max(i),max(nHat), pos = 2,
#paste("Fit: b = ", round(bHat*100)/100, "; c = ", round(cHat*100)/100))
#fit = fitdistr(i, "Poisson")
#lambdaHat = fit$estimate
#nHat = dpois(i,lambdaHat)
#win.graph()
#hist(i, breaks = c(min(i):max(i)), freq = FALSE, col=rgb(0.75,0.75,0.75),
#ylab = "Probability density",
#xlab = "x",
#main = paste("lambda = ", lambda))
#points(i,nHat,lwd=2)
#text(min(i),max(nHat), pos = 4,
#paste("Fit: lambda = ", round(lambdaHat*100)/100))
mu = mean(var2)
sigma = sd(var2)
#mu = 100 # Set the mean.
#sigma = 2 # Set the standard deviation.
#randNorm = rnorm(n, mu, sigma) # Use rnorm function to generate random numbers.
muExp = mean(var2)
#muExp = 10 # Set the mean.
#randExp = sort(rexp(n = n,rate = 1/muExp))
b = 150 # Set the scale parameter, b.
c = 1.5 # Set the shape parameter, c.
#randWbl = sort(rweibull(n = n, scale = b,shape = c))
lambda = mean(var2)
#lambda = 10 # Set the mean.
#randPois = sort(rpois(n = n, lambda = lambda))
fit = fitdistr(var2,"normal") # Fit normal distribution to data.
muHat = fit$estimate[1] # MLE mean
sigmaHat = fit$estimate[2] # MLE standard deviation
nHat = dnorm(sort(var2),muHat,sigmaHat)
win.graph()
hist(var2, breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
ylab = "Probability density",
xlab = "x",
main = paste("var2 normal - ","mu = ", mu, "; sigma = ", sigma))
lines(sort(var2),nHat,lwd=2)
text(min(var2),max(nHat), pos = 4,
paste("Fit: mu = ",round(muHat*100)/100))
text(min(var1),0.9*max(nHat), pos = 4,
paste("sigma = ", round(sigmaHat*100)/100))
fit = fitdistr(var2,"exponential")
muHat = 1/fit$estimate[1] # The parameter for the exponential distirubtion
# is the rate; the mean of an exponential distribution is 1 / rate.
nHat = dexp(var2,fit$estimate[1])
win.graph()
hist(var2,breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
ylab = "Probability density",
xlab = "x",
main = paste("var2 exp - ","mu = ", muExp))
lines(var2,nHat,lwd=2)
text(var2,max(nHat), pos = 2,
paste("Fit: mu = ",round(muHat*100)/100))
fit = fitdistr(var2, "weibull")
bHat = fit$estimate[2] # Estiamted scale parameter
cHat = fit$estimate[1] # Estimated shape parameter
nHat = dweibull(var2, cHat, bHat)
win.graph()
hist(var2, breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
ylab = "Probability density",
xlab = "x",
main = paste("var2 weibull - ","b = ", b,"; c = ", c))
lines(var2,nHat,lwd=2)
text(max(var2),max(nHat), pos = 2,
paste("Fit: b = ", round(bHat*100)/100, "; c = ", round(cHat*100)/100))
#fit = fitdistr(i, "Poisson")
#lambdaHat = fit$estimate
#nHat = dpois(i,lambdaHat)
#win.graph()
#hist(i, breaks = c(min(i):max(i)), freq = FALSE, col=rgb(0.75,0.75,0.75),
#ylab = "Probability density",
#xlab = "x",
#main = paste("lambda = ", lambda))
#points(i,nHat,lwd=2)
#text(min(i),max(nHat), pos = 4,
#paste("Fit: lambda = ", round(lambdaHat*100)/100))
mu = mean(var3)
sigma = sd(var3)
#mu = 100 # Set the mean.
#sigma = 2 # Set the standard deviation.
#randNorm = rnorm(n, mu, sigma) # Use rnorm function to generate random numbers.
muExp = mean(var3)
#muExp = 10 # Set the mean.
#randExp = sort(rexp(n = n,rate = 1/muExp))
b = 150 # Set the scale parameter, b.
c = 1.5 # Set the shape parameter, c.
#randWbl = sort(rweibull(n = n, scale = b,shape = c))
lambda = mean(var3)
#lambda = 10 # Set the mean.
#randPois = sort(rpois(n = n, lambda = lambda))
mu = mean(var3)
sigma = sd(var3)
#mu = 100 # Set the mean.
#sigma = 2 # Set the standard deviation.
#randNorm = rnorm(n, mu, sigma) # Use rnorm function to generate random numbers.
muExp = mean(var3)
#muExp = 10 # Set the mean.
#randExp = sort(rexp(n = n,rate = 1/muExp))
#b = 150 # Set the scale parameter, b.
#c = 1.5 # Set the shape parameter, c.
#randWbl = sort(rweibull(n = n, scale = b,shape = c))
lambda = mean(var3)
#lambda = 10 # Set the mean.
#randPois = sort(rpois(n = n, lambda = lambda))
fit = fitdistr(var3,"normal") # Fit normal distribution to data.
muHat = fit$estimate[1] # MLE mean
sigmaHat = fit$estimate[2] # MLE standard deviation
nHat = dnorm(sort(var3),muHat,sigmaHat)
win.graph()
hist(var3, breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
ylab = "Probability density",
xlab = "x",
main = paste("var3 normal - ","mu = ", mu, "; sigma = ", sigma))
lines(sort(var3),nHat,lwd=2)
text(min(var3),max(nHat), pos = 4,
paste("Fit: mu = ",round(muHat*100)/100))
text(min(var3),0.9*max(nHat), pos = 4,
paste("sigma = ", round(sigmaHat*100)/100))
fit = fitdistr(var3,"exponential")
muHat = 1/fit$estimate[1] # The parameter for the exponential distirubtion
# is the rate; the mean of an exponential distribution is 1 / rate.
nHat = dexp(var3,fit$estimate[1])
win.graph()
hist(var3,breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
ylab = "Probability density",
xlab = "x",
main = paste("var3 exp - ","mu = ", muExp))
lines(var3,nHat,lwd=2)
text(max(var3),max(nHat), pos = 2,
paste("Fit: mu = ",round(muHat*100)/100))
fit = fitdistr(var3, "weibull")
bHat = fit$estimate[2] # Estiamted scale parameter
cHat = fit$estimate[1] # Estimated shape parameter
nHat = dweibull(var3, cHat, bHat)
win.graph()
hist(var3, breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
ylab = "Probability density",
xlab = "x",
main = paste("var3 weibull - ","b = ", b,"; c = ", c))
lines(var3,nHat,lwd=2)
text(max(var3),max(nHat), pos = 2,
paste("Fit: b = ", round(bHat*100)/100, "; c = ", round(cHat*100)/100))
#fit = fitdistr(i, "Poisson")
#lambdaHat = fit$estimate
#nHat = dpois(i,lambdaHat)
#win.graph()
#hist(i, breaks = c(min(i):max(i)), freq = FALSE, col=rgb(0.75,0.75,0.75),
#ylab = "Probability density",
#xlab = "x",
#main = paste("lambda = ", lambda))
#points(i,nHat,lwd=2)
#text(min(i),max(nHat), pos = 4,
#paste("Fit: lambda = ", round(lambdaHat*100)/100))
mu = mean(var4)
sigma = sd(var4)
#mu = 100 # Set the mean.
#sigma = 2 # Set the standard deviation.
#randNorm = rnorm(n, mu, sigma) # Use rnorm function to generate random numbers.
muExp = mean(var4)
#muExp = 10 # Set the mean.
#randExp = sort(rexp(n = n,rate = 1/muExp))
#b = 150 # Set the scale parameter, b.
#c = 1.5 # Set the shape parameter, c.
#randWbl = sort(rweibull(n = n, scale = b,shape = c))
lambda = mean(var4)
#lambda = 10 # Set the mean.
#randPois = sort(rpois(n = n, lambda = lambda))
fit = fitdistr(var4,"normal") # Fit normal distribution to data.
muHat = fit$estimate[1] # MLE mean
sigmaHat = fit$estimate[2] # MLE standard deviation
nHat = dnorm(sort(var4),muHat,sigmaHat)
win.graph()
hist(var4, breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
ylab = "Probability density",
xlab = "x",
main = paste("var4 normal - ","mu = ", mu, "; sigma = ", sigma))
lines(sort(var4),nHat,lwd=2)
text(min(var4),max(nHat), pos = 4,
paste("Fit: mu = ",round(muHat*100)/100))
text(min(var4),0.9*max(nHat), pos = 4,
paste("sigma = ", round(sigmaHat*100)/100))
#fit = fitdistr(i,"exponential")
#muHat = 1/fit$estimate[1] # The parameter for the exponential distirubtion
# is the rate; the mean of an exponential distribution is 1 / rate.
#nHat = dexp(i,fit$estimate[1])
#win.graph()
#hist(i,breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
#ylab = "Probability density",
#xlab = "x",
#main = paste("mu = ", muExp))
#lines(i,nHat,lwd=2)
#text(max(i),max(nHat), pos = 2,
#paste("Fit: mu = ",round(muHat*100)/100))
#fit = fitdistr(i, "weibull")
#bHat = fit$estimate[2] # Estiamted scale parameter
#cHat = fit$estimate[1] # Estimated shape parameter
#nHat = dweibull(i, cHat, bHat)
#win.graph()
#hist(i, breaks=50, freq = FALSE, col=rgb(0.75,0.75,0.75),
#ylab = "Probability density",
#xlab = "x")
#main = paste("b = ", b,"; c = ", c))
#lines(i,nHat,lwd=2)
#text(max(i),max(nHat), pos = 2,
#paste("Fit: b = ", round(bHat*100)/100, "; c = ", round(cHat*100)/100))
#fit = fitdistr(i, "Poisson")
#lambdaHat = fit$estimate
#nHat = dpois(i,lambdaHat)
#win.graph()
#hist(i, breaks = c(min(i):max(i)), freq = FALSE, col=rgb(0.75,0.75,0.75),
#ylab = "Probability density",
#xlab = "x",
#main = paste("lambda = ", lambda))
#points(i,nHat,lwd=2)
#text(min(i),max(nHat), pos = 4,
#paste("Fit: lambda = ", round(lambdaHat*100)/100))
#------------manual coded---------------------------#
|
64f1b78384463988741615fc1d245bbe7c321bba | 5a3c52c867009743f7c46a59132c0f7cb12eb643 | /man/plot_box_mut_dep.Rd | 30e3e0072a42afd8825ca07034f06dd70a10da79 | [
"MIT"
] | permissive | sbenfatto/PARIS | 23b004b12396aa08c53b29d072845cf8590c47bc | 179b67a58b2855b411d06877a43624595b71babb | refs/heads/master | 2023-03-17T22:40:38.992952 | 2021-03-12T15:37:37 | 2021-03-12T15:37:37 | 346,786,819 | 2 | 2 | null | null | null | null | UTF-8 | R | false | true | 447 | rd | plot_box_mut_dep.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots_utils.R
\name{plot_box_mut_dep}
\alias{plot_box_mut_dep}
\title{Plot boxplot dependecy vs mutation}
\usage{
plot_box_mut_dep(depgene, mutgene)
}
\arguments{
\item{depgene}{Dependecy score gene name}
\item{mutgene}{Expression gene name}
}
\value{
Plot boxplot dependecy score, by mutational status plus jitter
}
\description{
Plot boxplot dependecy vs mutation
}
|
33d020c4d9139c21430f0c30f7ad32cd440d8544 | e26a361b9556305efbdc59cb7554e54cc67e8f80 | /man/configuration_setting_generation.Rd | 70ca5208f634c2ee918d8a587b8e6374bbbf83d3 | [] | no_license | CHuanSite/PJD | 1e9b16b6212428a2069d856efa33cedc57133e66 | f477b613d7a67adf4de50427a80992cd71ab080d | refs/heads/master | 2023-08-26T20:34:17.948459 | 2021-10-21T19:12:54 | 2021-10-21T19:12:54 | 266,581,079 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 981 | rd | configuration_setting_generation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simFunction.R
\name{configuration_setting_generation}
\alias{configuration_setting_generation}
\title{Configuration for simulated data generation}
\usage{
configuration_setting_generation(
featureNum = 50,
DataNum = c(100, 100, 100, 100),
commonlySharedNum = 2,
partiallySharedNum = c(2, 2, 2, 2),
individualSharedNum = c(2, 2, 2, 2),
noiseVariance = c(1, 1, 1, 1)
)
}
\arguments{
\item{featureNum}{number of features}
\item{DataNum}{number of data}
\item{commonlySharedNum}{number of common component}
\item{partiallySharedNum}{number of partial shared component}
\item{individualSharedNum}{number of individual component}
\item{noiseVariance}{variance of noise}
}
\description{
generate the configuration for the data
}
\examples{
configuration_setting_generation()
}
\keyword{configure}
\keyword{data}
\keyword{for}
\keyword{generation}
\keyword{of}
\keyword{setting}
\keyword{the}
|
38518c6e2d16a0f3afe75dfa83cf496ceb4cf23d | 694bec950bee64845eaf3235331e083a2aa84f97 | /man/gg_color_hue.Rd | 14be48c5d578641bd2dfa2f11d53b1f768d7c2a5 | [] | no_license | AdolfoRojas/webr_Adolfo | d8aeeb27bbeae1376af605a4c8e4d58af7cffd84 | b5a027ce46a305fe4b1ed34bbc156b174e02b59e | refs/heads/master | 2022-08-01T04:20:29.537774 | 2020-05-19T20:11:53 | 2020-05-19T20:11:53 | 265,335,535 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 263 | rd | gg_color_hue.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PieDonut.R
\name{gg_color_hue}
\alias{gg_color_hue}
\title{Make default palette}
\usage{
gg_color_hue(n)
}
\arguments{
\item{n}{number of colors}
}
\description{
Make default palette
}
|
aef301fed187105e35b78fb1464441b616f59cff | 29585dff702209dd446c0ab52ceea046c58e384e | /SMIR/R/R2CV.R | b9f4d378448b2184eb042835a4df4b4f121bd15c | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 258 | r | R2CV.R | R2CV <- function(model){
if (!any(class(model)=="lm")) stop("model must be class ``lm'' or ``glm''")
yhat <- fitted(model)
ehat <- residuals(model)
y <- yhat + ehat
hi <- influence(model)$hat
yhatcve <- (yhat-hi*y)/(1-hi)
cor(y,yhatcve)^2
}
|
f1374ebb05e7c2ca39328fda8f446e724d509d5e | 8273f47fe706f9ddf034875de1a94a326b43d567 | /intermediateR/Tidyverse.R | 70d55d8aa34e1318848579fe1893a1c53d987d8a | [
"MIT"
] | permissive | Jokendo-collab/intermediateRcourse | 91b82aab5c9f325c3a269dfbc1474191f3f29636 | 9f81fb7a288cb9605400229eafe91ea145eb778f | refs/heads/main | 2023-01-28T00:15:37.417563 | 2020-12-02T11:12:48 | 2020-12-02T11:12:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,800 | r | Tidyverse.R | # ------------------------------------------------------------------------
# Data wrangling with the Tidyverse
# Code used to illustrate key functions and operations from the
# core Tidyverse packages.
# Data are based on waterbird counts conducted in KwaZulu-Natal at
# 60 sites over several successive months (missions). Each site falls
# within a wetland cluster and has a protection status (full, partial,
# non-protected). Habitat and weather measurements were taken after
# each point count.
# ------------------------------------------------------------------------
install.packages("tidyverse")
#setwd("C:/Users/01466539/Desktop/R Learning/R immediate workshop/Tidyverse")
# Install and load libraries ----------------------------------------------
# install.packages("tidyverse")
#install.packages("here", dependencies = TRUE)
install.packages("tidyverse")
library(tidyverse)
setwd("C:/Users/01466539/Desktop/R Learning/R immediate workshop/Tidyverse")
# Import data by setting your own working directory -----------------------
setwd("C:/Users/01466539/Desktop/R Learning/R immediate workshop/Tidyverse")
counts1 = read.csv
counts1 <- read.csv("bird_counts.csv")
str(counts1)
head(counts1)
counts <- readr::read_csv("bird_counts.csv")
counts <- read_csv("bird_counts.csv")
# Tibble ------------------------------------------------------------------
counts # Say goodbye to head()!
print(counts, n = 20)
print(counts, width = Inf)
# Tidy column names -------------------------------------------------------
counts <- dplyr::rename_all(counts, tolower)
counts
###select columns
# Select & reorder --------------------------------------------------------
counts <- dplyr::select(counts, site,protection,year,month,abundance,richness,
everything())
names(counts)
olina1 <- dplyr::select(counts, -matches("sl_|aq_"),-humidity)
names(olina1)
# Split site variable -----------------------------------------------------
counts <- tidyr::separate(counts, site, c("mission","cluster","site"), sep = "_")
counts
# Arrange(Putting them in order) -----------------------------------------------------------------
counts <- dplyr::arrange(counts, mission, cluster,site)
counts
# Mutate ------------------------------------------------------------------
counts <- dplyr::mutate(counts, year = year + 10)
counts
counts <- dplyr::mutate(counts, air_temp = (air_temp - 32)/1.8)
counts
# What about the "$"?
# Filter ------------------------------------------------------------------
counts[["ph"]]
counts <- dplyr::filter(counts,!is.na(ph))
counts
# All together ------------------------------------------------------------
counts <- read_csv("bird_counts.csv") %>%
rename_all(tolower) %>%
select(site,protection,year,month,abundance,richness,everything()) %>%
select(-matches("sl_|aq_"), -humidity) %>%
separate(site, c("mission","cluster","site"), sep = "_") %>%
arrange(mission, cluster, site) %>%
mutate(year = year + 10,air_temp = (air_temp - 32)/1.8) %>%
filter(!is.na(ph))
counts # Success!
# Mutate multiple columns -------------------------------------------------
counts %>%
select(ph:sal)
counts %>%
mutate_at(vars(ph:sal),scale) %>%
select(ph:sal)
## Base R ##
as.data.frame(apply(counts[,16:20], 2, function(x) scale (x)))
# Group and summarise -----------------------------------------------------
counts %>%
group_by(cluster) %>%
summarise(mean(abundance),sd(abundance))
# Group, summarise and plot -----------------------------------------------
counts %>%
mutate(year = as.factor(year)) %>%
group_by(cluster,year) %>%
summarise(mean_rich = mean(richness),sd_rich = sd(richness)) %>%
ggplot(aes(x = cluster, y = mean_rich, fill = year))+
geom_bar(stat = "identity",position = position_dodge())+
geom_errorbar(aes(ymin=mean_rich, ymax=mean_rich + sd_rich),
width=.2,position=position_dodge(.9))
# Reorder factor levels ---------------------------------------------------
counts %>%
group_by(cluster) %>%
summarise(mean_abun = mean(abundance)) %>%
ggplot(aes(x = fct_reorder(cluster,mean_abun), y = mean_abun))+
geom_bar(stat = "identity")
## Forcats ##
#fct_reorder(cluster,mean_abun)
## Base R ##
#counts$cluster <-factor(counts$cluster, levels = counts[order(counts$mean_abun),"cluster"])
# Filter and summarise ----------------------------------------------------
counts %>%
filter(protection == "FP" & air_temp < 30) %>%
group_by(cluster, year) %>%
summarise(total = sum(abundance), max = max(abundance),n_counts = n()) %>%
filter(total > 500)
|
d145df0b248f7ff2cfacf847e2fd2293f2a09861 | 2492c8dee590ff43db2189ddca8c68ac32d49bdf | /TSExperiment/R/scratch.R | c1981c9a8d4ed8b348a9409b8e6f124b2e56a628 | [
"MIT"
] | permissive | dfreestone/TSLibrary | 1848fd0f7cc601bee816dd377742b0901ab3045f | 5b864d23e2c26da1745fc039b5c0358dbda7e69a | refs/heads/master | 2023-03-08T12:07:22.163911 | 2020-04-08T21:21:52 | 2020-04-08T21:21:52 | 83,917,970 | 0 | 0 | MIT | 2022-12-12T10:27:35 | 2017-03-04T19:10:41 | HTML | UTF-8 | R | false | false | 2,182 | r | scratch.R | # TSExperiment::CreateActiveExperiment('the_data_trail',
# '~/Dropbox (William Paterson)/lab/manuscripts/the_data_trail/R',
# '~/Desktop/xx_conditions.txt',
# '~/Desktop/xx_protocol.mpc',
# '~/Desktop/xx_eventcodes.txt')
#
#
#
# name <- 'test_experiment'
# location <- '~/Dropbox (William Paterson)/lab/experiments/experiments'
#
# conditionsFile <- '~/Desktop/xx_conditions.txt'
# protocolFile <- '~/Desktop/xx_protocol.mpc'
# eventcodesFile <- '~/Desktop/xx_eventcodes.txt'
#
# CreateActiveExperiment(name, location, conditionsFile, protocolFile, eventcodesFile)
#
# library(magrittr)
# library(tidyverse)
#
# data_path <- '~/Dropbox (William Paterson)/lab/experiments/experiments/t_train_minimum_cv/data/mpc/'
# df <- Sys.glob(file.path(data_path, "*")) %>%
# TSLib::mpc_load_files() %>%
# TSLib::mpc_tidy()
#
# df %<>%
# slice(6:26)
#
# # the conditions file is from a different experiment, but it doesn't matter here...
# df %<>%
# mutate(subject = 'M097')
#
# # Any column that's not a number should be a factor
# df %<>%
# mutate_if(~!is.numeric(.), factor) %>%
# select(-variable)
#
# lst <- df %>%
# select_if(is.factor) %>%
# map(function(x){list(name = as.character(unique(x)),
# code = as.numeric(unique(x)))})
#
# old_json <- jsonlite::read_json('~/Dropbox (William Paterson)/lab/manuscripts/the_data_trail/R/xx_the_data_trail/data/xx_dataset.json',
# simplifyVector = TRUE)
# old_json[['data']] <- NULL
# json <- append(old_json,
# list(data = df,
# data_factors = lst))
#
# jsonlite::write_json(json,
# path = '~/Desktop/test.json',
# dataframe = 'columns',
# factor = 'integer',
# pretty = TRUE)
#
# d2 <- jsonlite::read_json(path = '~/Desktop/test.json',
# simplifyVector = TRUE)
#
# data.in <- d2$data %>%
# as_tibble()
#
# # TODO(David): Put the factors back...
#
#
|
54f38ad94a2cf65bbccf3cf35d1120d4ec4b339f | 6c789da6d723702920a44001cc09620d42093d10 | /R/glossary.R | 6d66576980d5c28a44da17ee904a522d3c3d2e7c | [
"MIT",
"CC0-1.0",
"CC-BY-4.0",
"CC-BY-3.0",
"LicenseRef-scancode-public-domain"
] | permissive | YaoqiLi/traitdataform | 97b36d181b0a77df43139674886aee70034ef739 | 9db404002325418c54332cc8e0af96afafe4c3d5 | refs/heads/master | 2020-03-25T07:26:01.502080 | 2018-07-21T17:40:37 | 2018-07-21T17:40:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 149 | r | glossary.R | #' @export
"glossary"
glossary <- read.csv("https://raw.githubusercontent.com/EcologicalTraitData/TraitDataStandard/master/TraitDataStandard.csv")
|
7d9d67408c84da6c1eeb50efca1a84c46ce82023 | 6d51cb6e3ab9d6909479f30a4ea1664cc3a0f5b1 | /man/LSLM2RGB.Rd | e11bd2a3a6a9fb9dff963c50384fc492924bac9c | [] | no_license | playwar/colorscience | d7c51c7f8bb41591725436ff5cd2594f4124762b | 784f5467f4aad5dc5295ab469b78811417751304 | refs/heads/master | 2022-03-01T14:20:03.852206 | 2019-10-29T17:40:02 | 2019-10-29T17:40:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 754 | rd | LSLM2RGB.Rd | \name{LSLM2RGB}
\alias{LSLM2RGB}
\title{Convert LSLM coordinates to RGB}
\description{\code{LSLM2RGB} Converts LSLM coordinates to RGB.
}
\usage{LSLM2RGB(LSLMmatrix) }
\arguments{
\item{LSLMmatrix}{ LSLM coordinates}
}
\value{
RGB coordinates
}
\source{
Francoise Vienot, Hans Brettel,John D. Mollon, 1999
Digital Video Colourmaps for Checking the Legibility of Displays by Dichromats
Color Research & Application
John Wiley & Sons, Inc.
}
\references{
Francoise Vienot, Hans Brettel,John D. Mollon, 1999
Digital Video Colourmaps for Checking the Legibility of Displays by Dichromats
Color Research & Application
John Wiley & Sons, Inc.
}
\author{Jose Gama}
\examples{
LSLM<-c(-0.4186083, 0.007563981, 0.4918533)
LSLM2RGB(LSLM)
}
\keyword{datasets}
|
01be6c9d7cec6248fbaf7f9a88cf1c53c0516355 | e7bd469eccdbf11cded79bfd9d4cf457f87fbba4 | /R/HGmultc.r | 9c5f16787eb164dc90641f88e81d980adde83fc3 | [] | no_license | parsifal9/ptools | ac1c4268ec8e7d728e3a21c8df764799dd8c2702 | baeeb606b7e2f9076155de0b7885b6e45daee94c | refs/heads/master | 2023-08-29T17:05:39.639233 | 2023-08-06T08:43:11 | 2023-08-06T08:43:11 | 299,174,721 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,184 | r | HGmultc.r | HGmultc<-function (X, y, weights = rep(1, nrow(X)), initb=NULL, sparsity.prior = "NG",
lambda = 1, bbess = 1e+07, kbess = 0, adj.prior = TRUE, control = HGcontrol())
{
if (max(y) == 1) {
cat("Error: class labels must be 1,2,...,G", "\n")
return()
}
tol0 <- control$tolerance
tol1 <- tol0
eps0 <- control$epsilon
eps1 <- 1e-4 # changed from eps0
tolc <- control$tolc
HGIcexpb <- match.prior(sparsity.prior)
n <- nrow(X)
p <- ncol(X)
X <- cbind(rep(1, n), X)
p <- p + 1
G <- max(y)
G1 <- G
ff <- 1
if (adj.prior)
ff <- (G - 1)/G
weights <- abs(weights)
nn <- sum(weights > 0)
weights <- (weights/sum(weights)) * nn
if(is.null(initb))bhat <- HGIinitialisew(X, y, eps1, lambda, weights)
else bhat<-initb
S <- bhat != 0
S[1,]<-TRUE # fix so constant always present even if not in b0
R <- HGIprob(X, bhat, S)
Q1 <- HGIQfunwb(y, bhat, bhat, S, R, weights, bbess, kbess,
ff, HGIcexpb)
beta <- bhat
j <- 1
repeat {
Q0 <- Q1
repeat {
delta <- vector("list", G1)
for (g in 1:G1) {
delta[[g]] <- HGIdirectionwb(X, y, beta, bhat,
S, R, g, weights, bbess, kbess, ff, HGIcexpb)
}
alpha <- 1
nc <- 0
repeat {
btmp <- beta
for (g in 1:G1) {
tt <- S[, g]
btmp[tt, g] <- btmp[tt, g] + alpha * delta[[g]]
}
R <- HGIprob(X, btmp, S)
Q2 <- HGIQfunwb(y, btmp, bhat, S, R, weights,
bbess, kbess, ff, HGIcexpb)
if (Q2 > Q1 - tolc)
break
nc <- nc + 1
#cat(Q1,Q2,nc,tolc,"\n")
if (nc > 100)
stop("Error in line search\n No upward step could be found")
alpha <- alpha/2
}
beta <- btmp
if (abs(Q2 - Q1) < tolc)
break
Q1 <- Q2
if (alpha * max(abs(unlist(delta))) < tol1)
break
}
j <- j + 1
if (exists("Qlog"))
Qlog <<- c(Qlog, Q1)
S <- (abs(beta) > eps0)
S[1, ] <- TRUE
bhat <- beta
if (abs(Q1 - Q0) < tol0)
break
R <- HGIprob(X, bhat, S)
Q1 <- HGIQfunwb(y, bhat, bhat, S, R, weights, bbess,
kbess, ff, HGIcexpb)
if (j > control$maxit) {
cat("Iteration Limit exceeded.\n")
break()
}
}
#class <- apply(R, 1, which.max)
class<-max.col(R)
zz <- list(beta = bhat, S = S, P = R, class = class)
class(zz) <- c("HGmultc", "HG")
zz
}
HGIQfunwb.old <- function (y, beta, bhat, S, R, weights, bbess, kbess, ff,
HGIcexpb)
{
n <- nrow(R)
G <- max(y)
#Rt <- 1 - apply(R, 1, sum)
Rt<-1-rowSums(R)
Qf <- sum(weights * log(pmax(1e-100, R[cbind(1:n, y)])))
S[1, ] <- FALSE
for (g in 1:ncol(S)) {
bssi <- HGIcexpb(bbess, kbess, bhat[S[, g], g])
bssi <- bssi^-0.5
Qf <- Qf - ff * 0.5 * sum((beta[S[, g], g] / bssi)^2, na.rm = TRUE)#added ff
}
Qf
}
HGIQfunwb <- function (y, beta, bhat, S, R, weights, bbess, kbess, ff,
HGIcexpb)
{
n <- nrow(R)
G <- max(y)
#Rt <- 1 - apply(R, 1, sum)
Rt<-1-rowSums(R)
Qf <- sum(weights * log(pmax(1e-100, R[cbind(1:n, y)])))
S[1, ] <- FALSE
#
bhat<-as.vector(bhat*S) # need S here for equivalence
beta<-as.vector(beta)
ind<-(bhat!=0)
bhat<-bhat[ind]
bssi<-HGIcexpb(bbess,kbess,bhat)
bssi<-bssi^-0.5
Qf<-Qf-ff*0.5*sum((beta[ind]/bssi)^2,na.rm=T)
Qf
}
HGIdirectionwb <- function (X, y, beta, bhat, S, R, g, weights, bbess, kbess,
ff, HGIcexpb)
{
tt <- S[, g]
bssi <- HGIcexpb(bbess, kbess, bhat[tt, g])
bssi <- bssi^-0.5
pg <- sum(tt)
if (pg == 0)
return(NULL)
n <- nrow(X)
kappa <- 10
bssi[1] <- kappa
beta[tt, g][1] <- 0
B <- X[, tt, drop = FALSE] * matrix(bssi, nrow = n,
ncol = pg, byrow = TRUE) / ff^0.5 #added ff^0.5 here
A <- sqrt(weights * R[, g] * (1 - R[, g])) * B
B <- crossprod(B, weights * ((y == g) - R[, g])) - ff * beta[tt, g] / bssi #added ff
if (pg < n) {
A <- crossprod(A)
diag(A) <- diag(A) + 1
Z <- solve(A, B)
delta <- bssi * Z
}
else {
M <- crossprod(t(A))
diag(M) <- diag(M) + 1
Z <- solve(M, A %*% B)
delta <- bssi * (B - crossprod(A, Z))
}
delta
}
HGIinitialisew <- function (X, y, eps, lambda, weights)
{
n <- nrow(X)
p <- ncol(X)
G <- max(y)
R <- matrix(log(eps), nrow = n, ncol = G)
R[cbind(1:n, y)[y <= G, ]] <- -log(eps)
R <- sweep(R, 1, weights, FUN = "*")
X <- sweep(X, 1, sqrt(weights), FUN = "*")
if (p <= n) {
A <- crossprod(X)
diag(A) <- diag(A) + lambda
B <- crossprod(X, R)
beta <- solve(A, B)
}
else {
A <- crossprod(t(X))
B <- A %*% R
diag(A) <- diag(A) + lambda
beta <- crossprod(X, R - solve(A, B))/lambda
}
beta <- t(scale(t(beta), center = TRUE, scale = FALSE))
beta <- sweep(beta, 2, apply(abs(beta), 2, max), "/")
25 * beta
}
#HGIcexpb <- function(b, k, beta)
#{
#b scale parameter
# k shape parameter
# beta conditioning values for E{v-2 | beta}
# returns conditional expected value for expanded hg model
#
# gi <- beta^2 / (2 * b)
# y <- 2 * sqrt(gi)
# e <- beta * beta
# e <- 1 / e
# e <- e * y * besselK(y, 1.5 - k) / besselK(y, 0.5 - k)
# e
#}
HGIprob.old <- function (X, beta, S)
{
R <- matrix(0, nrow = nrow(X), ncol = ncol(beta))
for (g in 1:ncol(beta)) {
tt <- S[, g]
R[, g] <- exp(cupcap(X[, tt, drop = FALSE] %*%
beta[tt, g, drop = FALSE], 100))
}
RG <- 1 / (apply(R, 1, sum))
R <- sweep(R, 1, RG, "*")
R
}
HGIprob <- function (X, beta, S)
{
#browser()
R <- matrix(0, nrow = nrow(X), ncol = ncol(beta))
R<-exp(cupcap(X%*%(beta*S),100))
#R<-exp(cupcap(tcrossprod(X,t(beta*S)),100))
#RG <- 1 / (apply(R, 1, sum))
RG<-1/rowSums(R)
#R <- sweep(R, 1, RG, "*")
R<-R*RG
R
}
|
cd0c39108953d9da06b8a41c2138435e6c2ded42 | 47afb630e472f82d4da2411319bbb2a85e5bd580 | /server.R | 88bd3cf4acbd264f6cec944c36e34c5f56292444 | [] | no_license | petbadger/ufo_sightings | b54b2bd955153396097a5019a7857519a10b4bc4 | 2034cae5e60f747622af5fb85a565f35453e7b0f | refs/heads/master | 2020-03-29T03:42:48.462992 | 2017-06-18T05:18:29 | 2017-06-18T05:18:29 | 94,650,434 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,706 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
reactive({ input$update})
scrub <- reactive({
data <- scrubbed %>% select(lat, long, country, city, shape, duration_descr, state, comments, duration, year, month, day, seconds, minutes)
if( input$theDuration != "All" ){
data <- data %>% filter(duration_descr == input$theDuration)
}
if( input$theShape2 != "All" ){
data <- data %>% filter(shape == input$theShape2)
}
if( input$theCountry != "All" ){
data <- data %>% filter(country == input$theCountry)
}
if( input$theState != "All" ){
data <- data %>% filter(state == input$theState)
}
if( length(input$sightYear) == 0 ){
#removed the one NA
data <- data %>% filter(year == 2014)
}else{
data <- data %>% filter(year %in% input$sightYear)
}
#get total obs available once filtered
nobs_avail_txt <- nrow(data)
#important not to use nrow(data) in the render* because it will grab the result nrow of reactive scrub()
output$nobs_avail <- renderText({ paste("Observations available:", formatC(nobs_avail_txt, format = "d", big.mark = ",") ) })
#if ( !is.na(input$obs) ){ #this checks that obs is no na, but it would never be that since it's not an option in the slider.
# data <- data[1:input$obs,]
#}
#The problem with this is that it it will force the creation of empty rows if nrow's is less than that of nobs.
#This results in blank lat longs.
#Instead, we need to check how many records there are. If nrow is less than nobs, then don't limit the vector
if ( nrow(data) > input$obs ){
data <- data[1:input$obs,]
}
validate(need(nrow(data) > 0, message = "Whoops. Filtered to empty data set." ) )
check <- data
data
})
#for zoom button
lastZoomButtonValue <- NULL
output$mapPlot <- renderLeaflet({
map <- leaflet(data = scrub() ) %>% addTiles() %>%
addMarkers(~long, ~lat, icon=ufoIcon, popup=~paste("<p style='color: #000000; background-color: #ffffff'>",
"<b>Shape: </b>", scrub()$shape,"</br>",
"<b>Date: </b>", scrub()$month, scrub()$day, scrub()$year, "</br>",
"<b>Duration: </b>", scrub()$duration, "</br>",
"<b>City: </b>", scrub()$city, "<b> State: </b>", scrub()$state,"<b> Country: </b>", scrub()$country, "</br>",
"<b>Comment: </b>", scrub()$comments,"</br></p>"),
#options = popupOptions(syles=WMS)
label=~htmlEscape(shape), clusterOptions = markerClusterOptions())
rezoom <- "first"
# If zoom button was clicked this time, and store the value, and rezoom
if (!identical(lastZoomButtonValue, input$zoomButton)) {
lastZoomButtonValue <<- input$zoomButton
rezoom <- "always"
}
map <- map %>% mapOptions(zoomToLimits = rezoom)
map
})
#Wordcloud
# terms <- reactive({
# Change when the "update" button is pressed...
# input$update
# ...but not for anything else
# isolate({
# withProgress({
# setProgress(message = "Processing corpus...")
# getTermMatrix(input$selection)
# })
# })
# })
# Make the wordcloud drawing predictable during a session
#wordcloud_rep <- repeatable(wordcloud)
output$plot <- renderPlot({
input$buildWordcloud
#https://stackoverflow.com/questions/26711423/how-can-i-convert-an-r-data-frame-with-a-single-column-into-a-corpus-for-tm-such
#Import your data into a "Source", your "Source" into a "Corpus", and then make a TDM out of your "Corpus":
corp <- Corpus(DataframeSource(as.data.frame( isolate(scrub() ) ))) #isolate scrub?
corp <- tm_map(corp, stripWhitespace)
corp <- tm_map(corp, tolower)
corp <- tm_map(corp, removePunctuation)
corp <- tm_map(corp, removeWords, stopwords("english"))
corp <- tm_map(corp, stemDocument)
corp <- tm_map(corp, PlainTextDocument) #put the corpus back into the correct data type
wordcloud(corp, scale=c(10,1), max.words=30, random.order=FALSE,
rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
})
observeEvent(input$reset_input, {
reset("side-panel-1")
})
observeEvent(input$reset_input2, {
reset("side-panel-2")
})
observeEvent(input$reset_input3, {
reset("side-panel-3")
})
observeEvent(input$reset_input4, {
reset("side-panel-4")
})
# Show the first "n" observations
output$dataview <- DT::renderDataTable({
datatable(
scrub() %>% select(year, month, day, country, state, shape, seconds, minutes, comments),filter="top",
selection = "none",
options = list(lengthMenu = c(5, 10, 25), pageLength = 5, sDom = '<"top">flrt<"bottom">ip'), style = 'bootstrap',
,callback=JS("
//hide column filters for the first two columns
$.each([0,1,2,3,4,5,6,7,8], function(i, v) {
$('input.form-control').eq(v).hide()
});" ))
})
#https://stackoverflow.com/questions/35624413/remove-search-option-but-leave-search-columns-option
#http://legacy.datatables.net/usage/options
#https://rstudio.github.io/DT/options.html
observe({
x <- input$theCountry
# Can also set the label and select items
updateSelectInput(session, "theState",
#label = paste("Select input label", length(x)),
choices = liststates(x)
#selected = tail(x, 1)
)
})
# output$validDateRange <- renderText({
# make sure end date later than start date
# validate(
# need( as.POSIXct(input$sightDateRange[2]) > as.POSIXct(input$sightDateRange[1]), "end date is earlier than start date"
# )
# )
# paste("<i>Date range is ",
# difftime(as.POSIXct(input$sightDateRange[2]), as.POSIXct(input$sightDateRange[1]), units="days"),
# "days apart.</i>")
# })
#output$noData <- renderText({
# paste("If no data appears, try changing the filters")
#})
})
|
4b8d64f4715104221371c38da004c03e6f77ba5d | b74900922dcfdad29854da52e0ae91f95ebd0cac | /fsm_elligible.R | ef28493f8ddbd0d9f7efda80758a4bbbb998492c | [] | no_license | uk-gov-mirror/openglasgow.fsm_cg_match | 6c53b082043791c879a3b1252c469440a711beb1 | 57f69cf31171c0fa9a53ba9fe4d5164b8bec3188 | refs/heads/master | 2021-09-13T13:08:24.443742 | 2018-04-30T09:52:23 | 2018-04-30T09:52:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,413 | r | fsm_elligible.R | ### Add FSM criteria to matched cases
matched_final = matched_final %>% mutate(hb_id_member = paste(hb_id, hb_member))
### 1. get our ids from incomes
cohort_income_ids = education_norm$income %>% filter(claim_id %in% matched_final$hb_id) %>% select(claim_id) %>% unique() %>% .$claim_id
### 2. on JSA or income suppport or child tax credit
cohort_support_ids = education_norm$incomes %>% filter(claim_id %in% cohort_income_ids & (inc_code %in% c("JSI", "CTX", "INS"))) %>% select(claim_id) %>% unique() %>% .$claim_id
### 3. on WTX with less than £6420 or is it wtx less than that / year?
cohort_wtx_ids = extract$income %>%
filter(claim_id %in% cohort_income_ids) %>%
group_by(claim_id, inc_code) %>%
summarize(n=n(), amt=sum(weekly_income_calculated)) %>%
filter(inc_code=="WTX") %>%
mutate(yearly_sum=amt*52) %>%
filter(yearly_sum<=6420) %>% select(claim_id) %>% unique() %>% .$claim_id
cohort_deps_fsm_eligible = cohort %>% filter(dwp_claim_id %in% c(cohort_support_ids, cohort_wtx_ids))
### 4. For EMA - remove children under 16
cohort_over = cohort %>% filter(dep_dob <= Sys.Date()-(16*365))
### 5. No earned income
cohort_ema_no_earned_ids = extract$income %>% filter(claim_id %in% cohort_over$dwp_claim_id & !(inc_code %in% c("EAR"))) %>% select(claim_id) %>% unique() %>% .$claim_id
### 6. WTX ids
cohort_ema_wtx_ids = extract$income %>% filter(claim_id %in% cohort_over$dwp_claim_id & inc_code %in% c("WTX")) %>% select(claim_id) %>% unique() %>% .$claim_id
### 7. WTX - all income for the houses on some kind of wtx
cohort_ema_wtx = extract$income %>%
filter(claim_id %in% cohort_ema_wtx_ids) %>%
group_by(claim_id) %>%
summarise(count=n(), amt = sum(weekly_income_calculated)) %>%
mutate(yearly_sum = amt*52) %>%
merge(extract$income, ., by="claim_id")
### Try something new = add income information to our cohort document
cohort_merged = left_join(cohort, (extract$income %>% group_by(claim_id) %>% summarize(yearly_household_income = sum(weekly_income_calculated)*52)), by=c("dwp_claim_id" = "claim_id")) %>%
left_join(., (extract$income %>% group_by(claim_id) %>% summarize(income_codes = paste(inc_code, collapse=","))), by=c("dwp_claim_id" = "claim_id")) %>%
mutate(WTX_eligible = as.vector(!is.na(str_match(income_codes, "WTX"))),
JSA_elligible = as.vector(!is.na(str_match(income_codes, "JSI"))),
CTX_eligible = as.vector(!is.na(str_match(income_codes, "CTX"))),
IS_eligible = as.vector(!is.na(str_match(income_codes, "INS"))),
earned_income = as.vector(!is.na(str_match(income_codes, "EAR"))),
under_16 = dep_dob <= Sys.Date()-(16*365),
income_under_6420 = yearly_household_income <= 6420,
income_under_26_5k = yearly_household_income <= 26500,
born_between_1mar1997_28feb_2001 = (dep_dob >= as.Date('1997-03-1') & dep_dob <= as.Date('2001-02-28')))
### Add information to our matched sheet
# Income Support or Jobseekers Allowance
# Working Tax Credit with a pre tax household income of £6420 or lower
# Child Tax Credit with a pre tax household income of 16105
# Universal Credit (less than £500 per moth)
#1. Build income criteria
incomes_household = education_norm$incomes[] %>%
group_by(claim_id) %>%
summarize(income_codes = paste(inc_code, collapse=","), yearly_household_income=sum(weekly_income_calculated)*52) %>%
mutate(WTX_elligible = as.vector(!is.na(str_match(income_codes, "wtx"))),
JSA_elligible = as.vector(!is.na(str_match(income_codes, "jsi"))),
CTX_eligible = as.vector(!is.na(str_match(income_codes, "ctx"))),
IS_eligible = as.vector(!is.na(str_match(income_codes, "ins"))),
IS_eligible = as.vector(!is.na(str_match(income_codes, "esa"))),
earned_income = as.vector(!is.na(str_match(income_codes, "ear"))),
claim_id = as.character(claim_id))
matched_final = left_join(matched_final, incomes_household, by=c("hb_id" = "claim_id")) %>%
mutate(hb_dob = as.Date(paste(HB_dob_y, HB_dob_m, HB_dob_d, sep="-"))) %>%
mutate(under_16 = hb_dob <= Sys.Date()-(16*365),
income_under_6420 = yearly_household_income <= 6420,
income_under_26_5k = yearly_household_income <= 26500,
born_between_1mar1997_28feb_2001 = (hb_dob >= as.Date('1997-03-1') & hb_dob <= as.Date('2001-02-28')))
### *** Mel analysis mark 2 ***
## Bring matched final over from "run.R"
|
a4df16d67c023a5080b2668f4bb703d30731360b | 7329459bb72ddd723bc58358e80b5f0db3db730c | /R/rglmb.R | c44f76d5fb1e7bbd5474bbe7b82e83bfdf80b3f3 | [] | no_license | knygren/glmbayes | 6f2411da073f3d6bfcb727e8d02d4888cacb8fef | 3c25c08c1f4ac71a0e67d47341fb0cf39497d5f8 | refs/heads/master | 2021-01-17T10:49:54.755257 | 2020-08-29T21:24:08 | 2020-08-29T21:24:08 | 18,466,002 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,963 | r | rglmb.R | #' The Bayesian Generalized Linear Model Distribution
#'
#' \code{rglmb} is used to generate iid samples for Bayesian Generalized Linear Models.
#' The model is specified by providing a data vector, a design matrix,
#' the family (determining the likelihood function) and the pfamily (determining the
#' prior distribution).
#' @param y a vector of observations of length \code{m}.
#' @param x for \code{rglmb} a design matrix of dimension \code{m * p} and for \code{print.rglmb} the object to be printed.
#' @inheritParams glmb
#' @return \code{rglmb} returns a object of class \code{"rglmb"}. The function \code{summary}
#' (i.e., \code{\link{summary.rglmb}}) can be used to obtain or print a summary of the results.
#' The generic accessor functions \code{\link{coefficients}}, \code{\link{fitted.values}},
#' \code{\link{residuals}}, and \code{\link{extractAIC}} can be used to extract
#' various useful features of the value returned by \code{\link{rglmb}}.
#' An object of class \code{"rglmb"} is a list containing at least the following components:
#' \item{coefficients}{a matrix of dimension \code{n} by \code{length(mu)} with one sample in each row}
#' \item{coef.mode}{a vector of \code{length(mu)} with the estimated posterior mode coefficients}
#' \item{dispersion}{Either a constant provided as part of the call, or a vector of length \code{n} with one sample in each row.}
#' \item{Prior}{A list with the priors specified for the model in question. Items in the
#' list may vary based on the type of prior}
#' \item{prior.weights}{a vector of weights specified or implied by the model}
#' \item{y}{a vector with the dependent variable}
#' \item{x}{a matrix with the implied design matrix for the model}
#' \item{famfunc}{Family functions used during estimation process}
#' \item{iters}{an \code{n} by \code{1} matrix giving the number of candidates generated before acceptance for each sample.}
#' \item{Envelope}{the envelope that was used during sampling}
#'
#' @author The \R implementation of \code{rglmb} has been written by Kjell Nygren and
#' was built to be a Bayesian version of the \code{glm} function but with a more minimalistic interface
#' than the \code{glmb} function. It also borrows some of its structure from other random generating function
#' like \code{\link{rnorm}} and hence the \code{r} prefix.
#'
#' @family modelfuns
#' @seealso \code{\link[stats]{lm}} and \code{\link[stats]{glm}} for classical modeling functions.
#'
#' \code{\link{family}} for documentation of family functions used to specify priors.
#' \code{\link{pfamily}} for documentation of pfamily functions used to specify priors.
#'
#' \code{\link{Prior_Setup}}, \code{\link{Prior_Check}} for functions used to initialize and to check priors,
#'
#' \code{\link{summary.glmb}}, \code{\link{predict.glmb}}, \code{\link{residuals.glmb}}, \code{\link{simulate.glmb}},
#' \code{\link{extractAIC.glmb}}, \code{\link{dummy.coef.glmb}} and methods(class="glmb") for \code{glmb}
#' and the methods and generic functions for classes \code{glm} and \code{lm} from which class \code{glmb} inherits.
#'
#' @references
#' Dobson, A. J. (1990)
#' \emph{An Introduction to Generalized Linear Models.}
#' London: Chapman and Hall.
#'
#' Hastie, T. J. and Pregibon, D. (1992)
#' \emph{Generalized linear models.}
#' Chapter 6 of \emph{Statistical Models in S}
#' eds J. M. Chambers and T. J. Hastie, Wadsworth & Brooks/Cole.
#' McCullagh P. and Nelder, J. A. (1989)
#' \emph{Generalized Linear Models.}
#' London: Chapman and Hall.
#'
#' Nygren, K.N. and Nygren, L.M (2006)
#' Likelihood Subgradient Densities. \emph{Journal of the American Statistical Association}.
#' vol.101, no.475, pp 1144-1156.
#' doi: \href{https://doi.org/10.1198/016214506000000357}{10.1198/016214506000000357}.
#'
#' Raiffa, Howard and Schlaifer, R (1961)
#' \emph{Applied Statistical Decision Theory.}
#' Boston: Clinton Press, Inc.
#'
#' Venables, W. N. and Ripley, B. D. (2002)
#' \emph{Modern Applied Statistics with S.}
#' New York: Springer.
#'
#' @example inst/examples/Ex_rglmb.R
#' @export
#' @rdname rglmb
#' @order 1
rglmb<-function(n=1,y,x,family=gaussian(),pfamily,offset=NULL,weights=1){
## Pull in information from the pfamily
pf=pfamily$pfamily
okfamilies=pfamily$okfamilies
plinks=pfamily$plinks
prior_list=pfamily$prior_list
simfun=pfamily$simfun
## Pull in information on families
if (is.character(family))
family <- get(family, mode = "function", envir = parent.frame())
if (is.function(family))
family <- family()
if (is.null(family$family)) {
print(family)
stop("'family' not recognized")
}
## Check that the family is implemented for the pfamily
if(family$family %in% okfamilies){
oklinks=plinks(family)
if(!family$link %in% oklinks){
stop(gettextf("link \"%s\" not available for selected pfamily/family combination; available links are %s",
family$link , paste(sQuote(oklinks), collapse = ", ")), domain = NA)
}
}
else{
stop(gettextf("family \"%s\" not available for current pfamily; available families are %s",
family$family , paste(sQuote(okfamilies), collapse = ", ")),
domain = NA)
}
## Call relevant simulation function (for now without control2 list)
outlist=simfun(n=n,y=y,x=x,prior_list=prior_list,offset=offset,weights=weights,family=family)
outlist$pfamily=pfamily
return(outlist)
}
#' @rdname rglmb
#' @order 2
#' @method print rglmb
#' @export
print.rglmb<-function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall: ", paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
if (length(coef(x))) {
cat("Simulated Coefficients")
cat(":\n")
print.default(format(x$coefficients, digits = digits),
print.gap = 2, quote = FALSE)
}
else cat("No coefficients\n\n")
}
|
a61a08ae63d56c1bb3560367eaaaf902bbdd8940 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Tentrup/ltl2aig-comp/load_full_3_comp2_REAL.sat/load_full_3_comp2_REAL.sat.R | 81b09305e1505a850d15dbb2f9400efaa31bf114 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 79 | r | load_full_3_comp2_REAL.sat.R | dfdcdd28c4dbdeecdd58c1eba350b79d load_full_3_comp2_REAL.sat.qdimacs 14689 45460 |
7486baec308d752d45f671377d3bf10b30a9950b | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /segmenTier/man/tsd.Rd | 9d6cbdf4fd934c52d3784df71df61c964a0ed2df | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 450 | rd | tsd.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segment.R
\docType{data}
\name{tsd}
\alias{tsd}
\title{Transcriptome time-series from budding yeast.}
\description{
Transcriptome time-series data from a region encompassing
four genes and a regulatory upstream non-coding RNA in budding yeast.
The data set is described in more detail in the publication
Machne, Murray & Stadler (2017) <doi:10.1038/s41598-017-12401-8>.
}
|
e0233bbd4cf4da3f47b299cf5dc33423181eb1d4 | a3568335e5a29070953d512b4241febc5462d9ef | /run_analysis.R | d401db0690a27cc53d18e64704f245a6d7b91b3c | [] | no_license | vifalcon/Getting-and-Cleaning-Data-Course-Project | d055470b62d1a45f638b00b9c7bcadc7bbaffd5b | 4b98dcf959b4518abaf1b03383666eef0f1aa1ba | refs/heads/main | 2023-01-14T16:29:55.605098 | 2020-11-23T23:51:19 | 2020-11-23T23:51:19 | 315,224,105 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,021 | r | run_analysis.R | test<-read.table("./UCI HAR Dataset/test/X_test.txt")
train<-read.table("./UCI HAR Dataset/train/X_train.txt")
#pull data from test set and training set
subject_test<-read.table("./UCI HAR Dataset/test/subject_test.txt")
subject_train<-read.table("./UCI HAR Dataset/train/subject_train.txt")
#pull subject id numbers
features<-read.table("./UCI HAR Dataset/features.txt")
#pull names for data from features
activities_test<-read.table("./UCI HAR Dataset/test/y_test.txt")
activities_train<-read.table("./UCI HAR Dataset/train/y_train.txt")
#activities numbered 1-6 that correspond with data
mergedset<-rbind(test,train)
#merge test set and training set into one data frame
names(mergedset)<-features[,2]
#match names of features to data
library(dplyr)
culledset<-mergedset %>% select(matches(c("mean()", "std()"))) %>% select(-("angle(tBodyAccMean,gravity)":"angle(Z,gravityMean)"))
#these columns are the mean and standard deviation measurements for each measurement
#the last 6 columns include mean in their name but contain angles so they were excluded
#names of the columns are already in place from naming the mergedset cols with features
subjects<-rbind(subject_test,subject_train)
activities<-rbind(activities_test, activities_train)
#consolidate subject and activity data
activitieslist<-c("Walking", "Walking Upstairs", "Walking Downstairs",
"Sitting", "Standing", "Laying")
activity<-activitieslist[activities[,1]]
culledsetmod<-cbind(subjects, activity, culledset)
names(culledsetmod)[1]<-"subjectID"
names(culledsetmod)<-gsub("-", "", names(culledsetmod))
names(culledsetmod)<-gsub("\\(", "", names(culledsetmod))
names(culledsetmod)<-gsub(")", "", names(culledsetmod))
#match subjects and activities with data, renames columns appropriately
#remove tricky characters
tidydataset <- culledsetmod %>% group_by(subjectID, activity) %>% summarize_all(mean)
write.table(tidydataset, "./courseprojectdata.txt", row.names = FALSE)
#create text file containing the data set |
47e46ad4dab3dfc87e67605ba7ab53f715cbc431 | 3abfaa6cf0b52c5ff44e9c0aa7d842f73e2d876e | /scripts/buoys-validator/resources/r/ww3nctab2df.R | ca50f141e341da78c3c226aebbb5d7e80ab8d0d3 | [] | no_license | rchailan/mirmidon-toolbox | 0c272c4f7b7fd3db1d2a2a975b8a4e86876bb677 | d116b1e0c8c7faaa4dd99144ea4965a1fa64fd03 | refs/heads/master | 2021-05-29T01:03:44.384387 | 2015-04-23T17:10:45 | 2015-04-23T17:10:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 501 | r | ww3nctab2df.R | library(RNetCDF)
ww3nctab2df <- function (file,station=NULL){
nc.file<-open.nc(file)
nc<-read.nc(nc.file)
#refractor into a df
df<-data.frame(date=nc$time,hs.espi=nc$hs[1,]
,hs.sete=nc$hs[2,]
,hs.leucate=nc$hs[3,]
,hs.banyuls=nc$hs[4,]
,hs.meteofranc=nc$hs[5,])
t<-seq(from = as.POSIXct("2011-01-01",tz="GMT"), length.out=length(nc$time),
by = "hour")
df$date<-t
return(df)
} |
339899911d12980d04c2ff5f5979e8ac5d45c51a | a006d51a2e2e09c48c9449c3cb5488bf35e41c87 | /association.R | 755c3f6d4dbe32f69a49cdf1c79913a6ee0b2b4b | [] | no_license | faith-hplc/eQTL-peptide | ee9931b525660ef7092a1b252463c548f4715069 | 3a54c37ef0fbf16ec69210548485f89bd8e69466 | refs/heads/main | 2023-06-12T12:45:20.623556 | 2021-07-04T07:38:49 | 2021-07-04T07:38:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,531 | r | association.R | library("rMVP")
# transform format of marker
# Full-featured function (Recommended)
MVP.Data(fileVCF="prune_ok.vcf",
#filePhe="Phenotype.txt",
fileKin=FALSE,
filePC=FALSE,
out="mvp"
)
# Only convert genotypes
#MVP.Data.VCF2MVP("myVCF.vcf", out='mvp') # the genotype data should be fully imputed before using this function
# transform format of K matrix
# read from file
MVP.Data.Kin("kinship.txt", out="mvp", priority='memory', sep='\t')
# calculate from mvp_geno_file
#MVP.Data.Kin(TRUE, mvp_prefix='mvp', out='mvp')
# transform format of Q matrix
# read from file
MVP.Data.PC("admixture.9.Q", out='mvp', sep='\t')
# calculate from mvp_geno_file
#MVP.Data.PC(TRUE, mvp_prefix='mvp', perc=1, pcs.keep=5)
# Input data
genotype <- attach.big.matrix("mvp.geno.desc")
phenotype <- read.table("zzz_phenotype_ok.txt",head=TRUE)
map <- read.table("mvp.geno.map" , head = TRUE)
Kinship <- attach.big.matrix("mvp.kin.desc")
Covariates_PC <- bigmemory::as.matrix(attach.big.matrix("mvp.pc.desc"))
#GWAS
for(i in 2:ncol(phenotype)){
imMVP <- MVP(
phe=phenotype[, c(1, i)],
geno=genotype,
map=map,
K=Kinship,
CV.GLM=Covariates_PC,
CV.MLM=Covariates_PC,
CV.FarmCPU=Covariates_PC,
#nPC.GLM=5,
#nPC.MLM=3,
#nPC.FarmCPU=3,
priority="speed",
ncpus=10,
vc.method="BRENT",
maxLoop=10,
method.bin="static",
#permutation.threshold=TRUE,
#permutation.rep=100,
threshold=0.05,
method=c("GLM", "MLM", "FarmCPU")
)
gc()
}
|
9c68f3d0acaeedb22dece1dc80ce3e0deb45f466 | 3228240f4a9b7aff3053fcbbcd95b93b1f3319b4 | /man/opendss.Rd | 12ec31620b9d762daf65bae0edd99eb9480f0641 | [
"MIT"
] | permissive | cameronbracken/DSS-Rip | 2528612fe7dbf3636c10ca143ff5eb0cb82916a1 | 58acfe05bd9d5d693c5b082b38ee791c1e6d63e3 | refs/heads/master | 2021-01-16T20:47:38.731272 | 2014-03-26T17:56:10 | 2014-03-26T17:56:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 205 | rd | opendss.Rd | \name{opendss}
\alias{opendss}
\title{opendss}
\usage{
opendss(filename)
}
\value{
stuff
}
\description{
Short description
}
\details{
Long Description
}
\note{
NOTE
}
\author{
Evan Heisman
}
|
156c2419e5dbeae557d9f3815333f286d9f71f72 | b76ed9b0617327c56d7e4387e8f249610b169c97 | /sesame_street_audience_code.R | 7671065e114ce7ef5eee1338782cd2d863e48b6a | [] | no_license | ngoyle1/DKC-Analytics-Help-Code | 35c31bdd20110d211356ffaa0d725145e389df94 | 716837d4bbd756e842749ad4ee8fc5d59fb5b0ad | refs/heads/master | 2021-01-23T05:04:17.347787 | 2017-05-30T19:43:17 | 2017-05-30T19:43:17 | 92,951,649 | 0 | 0 | null | 2017-05-31T13:59:53 | 2017-05-31T13:59:52 | null | UTF-8 | R | false | false | 355 | r | sesame_street_audience_code.R | remove(list = ls())
options(stringsAsFactors = FALSE)
options(scipen = 999)
setwd("/Users/harrocyranka/Desktop/")
source("/Users/harrocyranka/Desktop/code/twitter_info_analysis_3.R")
source("/Users/harrocyranka/Desktop/code/estimate_audience.R")
source("/Users/harrocyranka/Desktop/code/script_to_load_packages.R")
p <- estimate_audience("sesamestreet") |
98e70a32c15435f63295b789c65fdd4941e6f164 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/indelmiss/man/plot.indelmiss.Rd | 11ebc1281be8ee1b9865d5422b83af315e844220 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,279 | rd | plot.indelmiss.Rd | \name{plot.indelmiss}
\alias{plot.indelmiss}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot parameter estimates from the model fit
}
\description{
Plotting command for use on an object of class "indelmiss". Will draw two plots: one with the estimates for the rates and the other for the "missingness" parameter. plot.indelmiss() calls plotp and plotrates.
}
\usage{
\method{plot}{indelmiss}(x, model = NULL, ci = TRUE, cil = 95, ...)
}
\arguments{
\item{x}{An object of class "indelmiss".}
\item{model}{One of "M1", "M2", "M3", or "M4". }
\item{ci}{TRUE plots confidence intervals around the estimates.}
\item{cil}{Confidence interval level.}
\item{\dots}{Any further commands to plot.}
}
\author{
Utkarsh J. Dang and G. Brian Golding
\email{udang@binghamton.edu}}
\seealso{
See also \code{\link{indelrates}}, \code{\link{plotrates}} and \code{\link{plotp}}.
}
\examples{
indel <- indelrates(datasource = "simulation", seed = 1, taxa = 5,
mu = 1, nu = 5, phyl = 5000, pmiss = 0, toi = 1,
bgtype="ancestornodes", bg = c(7, 9),
zerocorrection = TRUE, rootprob="maxlik",
modelnames = c("M1", "M2", "M3", "M4"),
optmethod = "nlminb")
print(indel)
plot(indel, model="M4")
} |
8b16b5fcf1aca0f8c93506ae186c335ec66867dd | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mice/examples/make.blocks.Rd.R | b97d52bc04d99a84a5e556908e0fcf7123ba9425 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 179 | r | make.blocks.Rd.R | library(mice)
### Name: make.blocks
### Title: Creates a 'blocks' argument
### Aliases: make.blocks
### ** Examples
make.blocks(nhanes)
make.blocks(c("age", "sex", "edu"))
|
ab158990c7aaaa1a9fd17a3a45ea2ea46f8ec3af | a84dbc7957827272acf07ce0875414d7d5763fe7 | /MLToolkit/Funcoes.R | fbd3b9304ed529473cf57ae5856cf7b4406b7d74 | [
"MIT"
] | permissive | jpcaico/Machine-Learning-Toolkit | 1d22ab88aebde519eeb7f415934dcda0c67ab4ca | d043957d56b61a02019f7e6e05f0d1a081c417b5 | refs/heads/master | 2020-06-04T22:19:24.153229 | 2019-07-02T04:29:16 | 2019-07-02T04:29:16 | 192,213,146 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,780 | r | Funcoes.R | #Sys.setlocale(locale = "portuguese")
####### gerais #################
graficarImportanciaClassificacao <- function(modelo){
data = data.frame(x = rownames(varImp(modelo)$importance),
y = varImp(modelo)$importance)
data = data[c(1,2)]
colnames(data) = c("x", "y")
data = arrange(data, y)
data = mutate(data, x = factor(x, x))
p <- ggplot(data = data, aes(x = x, y = y)) +
geom_segment(aes(
x = x,
xend = x,
y = 0,
yend = y
),
color = "cyan",
size = 1) +
geom_point(color = "magenta",
size = 4,
alpha = 0.7) +
theme_light() +
#coord_filp() +
theme_bw() +
xlab("Variável") +
ylab("Importância (%)") +
ggtitle("Importância das variáveis") + theme(
axis.text=element_text(size=12),
axis.title=element_text(size=18),
plot.title = element_text(size=20, face = "bold")) + theme(axis.text.x = element_text(angle = 45))
ggplotly(p)
style(p, text = paste0("X: ", data$x ,"\n" ,"Y: ", data$y ))
}
graficarImportancia <- function(modelo){
data = data.frame(x = rownames(varImp(modelo)$importance),
y = varImp(modelo)$importance)
colnames(data) = c("x", "y")
data = arrange(data, y)
data = mutate(data, x = factor(x, x))
p <- ggplot(data = data, aes(x = x, y = y)) +
geom_segment(aes(
x = x,
xend = x,
y = 0,
yend = y
),
color = "cyan",
size = 1) +
geom_point(color = "magenta",
size = 4,
alpha = 0.7) +
theme_light() +
# coord_flip() +
theme_bw() +
xlab("Variável") +
ylab("Importância (%)") +
ggtitle("Importância das Variáveis") + theme(
axis.text=element_text(size=12),
axis.title=element_text(size=18),
plot.title = element_text(size=20, face = "bold")) + theme(axis.text.x = element_text(angle = 45))
ggplotly(p)
style(p, text = paste0("X: ", data$x ,"\n" ,"Y: ", data$y ))
}
##### graficos classificacao ###########
ggplotConfusionMatrix <- function(m){
# mytitle <- paste0('Matriz de Confusão Treino ',"Acurácia Geral: ", percent_format()(m$overall[1]),
# "Kappa Geral: ", percent_format()(m$overall[2]))
#
mytitle <- 'Matriz de Confusão Teste '
data_c <- mutate(group_by(as.data.frame(m$table), Reference ), percentage =
percent(Freq/sum(Freq)))
ggplot(data = data_c,
aes(x = Reference, y = Prediction)) +
geom_tile(aes(fill = log(Freq)), colour = "white") +
scale_fill_gradient(low = "white", high = "green") +
xlab("Referência") +
ylab("Classificação") +
geom_text(aes(x = Reference, y = Prediction, label = paste0(Freq,' (', percentage, ')') )) +
theme(legend.position = "none") +
ggtitle(mytitle) + theme(
axis.text=element_text(size=12),
axis.title=element_text(size=20),
plot.title = element_text(size=20, face = "bold"))
}
graficarLimite <- function(grid, X,Y, y, teste_plot, predgrid){
ggplot(data=grid) + stat_contour(aes(x=grid[,X], y=grid[,Y], z=predgrid),
bins=2) +
geom_point(data=teste_plot, aes(x=teste_plot[,X], y=teste_plot[,Y], colour=as.factor(teste_plot[,y])),
size=4, alpha = 0.7)+
theme_bw() + xlab(X) + ylab(Y) + ggtitle("Limite de Decisão - Treino") + labs(title = "Limite de Decisão - Treino") +
theme(legend.title = element_blank()) + theme(legend.position ="bottom") + theme(
axis.text=element_text(size=12),
axis.title=element_text(size=20),
plot.title = element_text(size=20, face = "bold"))
}
graficarSuperficie <- function(grid, X, Y, predgrid){
ggplot(data=grid) +
geom_point(aes(x=grid[,X], y=grid[,Y], colour=as.factor(predgrid))) +
theme_bw() + xlab(X) + ylab(Y) + ggtitle("Região de Decisão") + labs(title = "Região de Decisão")+
theme(legend.title = element_blank()) + theme(legend.position ="bottom") +
theme(
axis.text=element_text(size=12),
axis.title=element_text(size=20),
plot.title = element_text(size=20, face = "bold"))
}
###### graficos regressao #########
gerarTabelaCoeficientes <- function(modelo){
data = as.data.frame(t(summary(modelo)$coefficients))
data = round(data[1,],3)
return(data)
}
graficarTreinoResidual <- function(modelo, data, X, Y){
residuals <- residuals(modelo)
predicted <- predict(modelo, newdata = data)
p <-
ggplot(data, aes(x = data[, X], y = data[, Y])) +
geom_line(aes(x = data[, X], y = predicted), color =
'cyan') +
geom_segment(aes(xend = data[, X], yend = predicted), alpha = .2) +
geom_point(aes(
color = abs(residuals),
size = abs(residuals)
)) +
scale_color_continuous(low = "green", high = "red") +
guides(color = FALSE, size = FALSE) +
geom_point(aes(y = predicted), shape = 1) +
theme_bw() +
xlab(X) +
ylab(Y) +
ggtitle('Conjunto de Treino - Resíduos') + theme(
axis.text=element_text(size=12),
axis.title=element_text(size=18), plot.title = element_text(size=20, face = "bold" ))
ggplotly(p)
}
graficarTesteMultiplo <- function(teste, X, Y){
p<- ggplot() + geom_point(aes(x = teste[, X], y = teste[, Y]),
colour = 'magenta') +
xlab(X) +
ylab(Y) +
theme_bw() + ggtitle('Gráfico de Dispersão') + theme(
axis.text=element_text(size=12),
axis.title=element_text(size=18),
plot.title = element_text(size=20, face = "bold"))
ggplotly(p)
style(p, text = paste0("X: ", teste[, X] ,"\n" ,"Y: ", teste[, Y] ))
}
graficarTeste <- function(modelo, teste, treino, X, Y){
p<- ggplot() + geom_point(aes(x = teste[, X], y = teste[, Y]),
colour = 'magenta') +
geom_line(aes(
x = treino[, X],
y = predict(modelo, newdata = treino)
),
colour = 'cyan',
size = 1) +
geom_segment(
aes(x = teste[, X], y = teste[, Y]),
xend = teste[, X],
yend = predict(modelo, newdata = teste),
linetype = "dashed",
alpha=0.5
) +
xlab(X) +
ylab(Y) +
theme_bw() + ggtitle('Conjunto de Teste') + theme(
axis.text=element_text(size=12),
axis.title=element_text(size=18),
plot.title = element_text(size=20, face = "bold"))
ggplotly(p)
style(p, text = paste0("Pontos (Real):","\n", "X: ", teste[, X] ,"\n" ,"Y: ", teste[, Y],
"\n", "Linha (Predito):", "\n",
'X: ', teste[,X] ,"\n", 'Y: ',predict(modelo,newdata=treino)))
}
graficarTreino <- function(modelo, treino, X, Y){
ggplot() +
geom_point(aes(x = treino[, X], y = treino[, Y]),
colour = 'magenta') +
geom_line(aes(
x = treino[, X],
y = predict(modelo, newdata = treino)
),
colour = 'cyan',
size = 1) +
xlab(X) +
ylab(Y) +
theme_bw() + ggtitle('Conjunto de Treino') + theme(
axis.text=element_text(size=12),
axis.title=element_text(size=18),
plot.title = element_text(size=20, face = "bold"))
}
graficarMultiplos <- function(treino_resid, x, Y, predicted, iv){
ggplot(treino_resid, aes(x = x, y = treino_resid[, Y])) +
geom_point(aes(y = predicted), color = 'cyan', size = 0.5) +
geom_segment(aes(xend = x, yend = predicted), alpha = .4) +
geom_point(aes(y = treino_resid[, Y]), color = 'magenta') +
facet_grid( ~ iv, scales = 'free') + theme_bw() +
ylab(Y) + xlab('Variável') + ggtitle('Relação das Variáveis') + theme(
axis.text=element_text(size=12),
axis.title=element_text(size=18),
plot.title = element_text(size=20, face = "bold"))
}
|
d0e51d098f754ac088f80eb92439fc954d8d97ab | 11311aea4618398dc7a217241809e6fc48fca8e0 | /man/progress_phyllotaxis_update.Rd | ca671320f919d138eca30a3e93343755d7bea08c | [] | no_license | suharoschi/switchboard | 81a8a6b4f3dd9698ed207cd5ec122a0c7db7edd4 | 9fc25eb9e0facba3d8da971dff742bc9701aec0b | refs/heads/main | 2023-08-06T18:44:19.906946 | 2021-10-08T19:07:35 | 2021-10-08T19:07:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 521 | rd | progress_phyllotaxis_update.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/progress_phyllotaxis.R
\name{progress_phyllotaxis_update}
\alias{progress_phyllotaxis_update}
\title{helper function that updates the canvas items of a progress_phyllotaxis widget}
\usage{
progress_phyllotaxis_update(
.switchboard,
eavesdrop,
maximum = 100,
size = 1,
updates = 100,
closeAtMaximum = FALSE,
...
)
}
\description{
helper function that updates the canvas items of a progress_phyllotaxis widget
}
\keyword{internal}
|
2af164583badd8a39d791e3ecea8ee52da2181ae | ec13d4854010cd663c736856def72cfc50fc0282 | /R/logitcalcLam.R | 57c259ef18120f0648f4ff04c0712a14152d78d9 | [] | no_license | dankessler/standGL | 66cae7f30b3f5be67fbfc7bbc480c6d3edb572fa | 615f2e2eb5e4fbca4fd979536da197899e440242 | refs/heads/master | 2021-05-27T14:26:27.912958 | 2011-02-23T00:00:00 | 2011-02-23T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 591 | r | logitcalcLam.R | logitcalcLam <- function(y, Us, index, nlam = 20, min.lam = 0.05, is.pen = rep(1,length(unique(index)))){
groups <- unique(index)
fitNorm <- rep(0,length(groups))
for(i in 1:length(groups)){
if(is.pen[i] != 0){
newInd <- which(index == groups[i])
fitNorm[i] <- sum((Us[[i]] %*% (t(Us[[i]]) %*% (y - 0.5)))^2)/length(newInd)
}
}
lam.max <- sqrt(max(fitNorm))
lam.min <- min.lam * lam.max
lam.list <- nlam:1
scale <- (log(lam.max)-log(lam.min))/(nlam - 1)
shift <- log(lam.min) - scale
lam.list <- exp(lam.list * scale + shift)
return(lam.list)
}
|
6e65c108d17d1c89090e2392d26498736f0baa1a | 66a965ce41523533820e85fa2765ee8acb05b6ae | /input_for_2calc.R | eca04e3496664f563a32012efb44739ed9317a4e | [
"CC-BY-4.0"
] | permissive | biodiversity-exploratories-synthesis/Synthesis_dataset_functions_grassland | 61f46e4549e4984528c6c2e24ea527cfae28d521 | 4e1779046d5b9623d6d3cd3aa6a54debe02f2ca2 | refs/heads/master | 2023-08-06T03:40:37.095985 | 2023-08-02T12:52:12 | 2023-08-02T12:52:12 | 181,692,075 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,706 | r | input_for_2calc.R | minimult <- raw_functions[, c("beta_Glucosidase", "N_Acetyl_beta_Glucosaminidase", "Xylosidase", "Urease", "DEA",
"Potential.nitrification2011", "Potential.nitrification2014","nifH","amoA_AOB.2011",
"amoA_AOA.2011", "amoA_AOB.2016", "amoA_AOA.2016", "nxrA_NS", "16S_NB", "P_loss2011",
"P_leaching_risk2015", "PRI", "SoilOrganicC", "Soil.C.stock", "Phosphatase")]
# correlation across the years
y <- cor(minimult, use="pairwise.complete.obs")
corrplot::corrplot(y, type="lower",addCoef.col = "black",method="color",diag=F, tl.srt=1, tl.col="black", mar=c(0,0,0,0), number.cex=0.6)
# take mean of multi-year-measurements
minimult[, "amoA_AOB" := apply(minimult[,c("amoA_AOB.2011", "amoA_AOB.2016")],1, mean)]
minimult[, "amoA_AOA" := apply(minimult[,c("amoA_AOA.2011", "amoA_AOA.2016")],1, mean)]
minimult[, "Potential.nitrification" := apply(minimult[, c("Potential.nitrification2014", "Potential.nitrification2011")], 1, mean)]
minimult[, c("amoA_AOB.2011", "amoA_AOB.2016", "amoA_AOA.2011", "amoA_AOA.2016", "Potential.nitrification2014", "Potential.nitrification2011") := NULL]
minimult[, "P_loss" := apply(minimult[,c("P_loss2011", "P_loss2015")],1, mean)]
minimult[, c("P_loss2011", "P_loss2015") := NULL]
# correlations
M <- cor(minimult, use="pairwise.complete.obs")
corrplot::corrplot(M,type="lower",addCoef.col = "black",method="color",diag=F, tl.srt=1, tl.col="black", mar=c(0,0,0,0),
number.cex=0.6, order = "hclust")
corrplot::corrplot(M, type = "upper", tl.col="black", tl.srt=40, order = "hclust")
highM <- M
highM[M < 0.6 & M > -0.6] <- 0
corrplot::corrplot(highM, type = "upper", tl.col="black", tl.srt=40)
library(igraph)
par(mfrow=c(2,2))
t <- 0.4
highM <- M
highM[M < t & M > -t] <- 0
network <- graph_from_adjacency_matrix(highM, weighted=T, mode="undirected", diag=F)
plot(network)
t <- 0.5
highM <- M
highM[M < t & M > -t] <- 0
network <- graph_from_adjacency_matrix(highM, weighted=T, mode="undirected", diag=F)
plot(network)
t <- 0.6
highM <- M
highM[M < t & M > -t] <- 0
network <- graph_from_adjacency_matrix(highM, weighted=T, mode="undirected", diag=F)
plot(network)
t <- 0.7
highM <- M
highM[M < t & M > -t] <- 0
network <- graph_from_adjacency_matrix(highM, weighted=T, mode="undirected", diag=F)
plot(network)
# pca
pca_minimult <- prcomp(na.omit(minimult), scale=T)
# plot(pca_minimult) # 1 very clear component, maybe 4 (cumul to 72%)
summary(pca_minimult)
library(ggfortify)
autoplot(pca_minimult, data = na.omit(minimult), loadings = TRUE, loadings.colour = 'black',
loadings.label = TRUE, loadings.label.size = 3, colour = "lightgray")
|
b907688def6f0072664b0990d1df6acb515d346a | a815407aeafc5224b546fbe905af757f40bed1d3 | /R/list_platforms.R | 9ddd131482bb0878840e54252f068d6a2b0d03c4 | [] | no_license | iaradsouza1/MicroarrayMethods | a51de58e2e8a527af3bd3f50e32eedc593024360 | e717ec81551daacebb9dfe804a761c29371139bb | refs/heads/master | 2021-11-24T09:03:44.411844 | 2021-10-29T17:27:44 | 2021-10-29T17:27:44 | 166,096,496 | 0 | 1 | null | 2020-06-25T23:34:12 | 2019-01-16T19:17:58 | R | UTF-8 | R | false | false | 1,325 | r | list_platforms.R | #' List microarray platforms
#'
#' List microarray platforms annotation available for organisms
#'
#' @param dataset ENSEMBL dataset name. A dataset name can be retrieved with list_datasets() function.
#' @param db One of the following ENSEMBL databases: "main" for ENSEMBL_MART_ENSEMBL database (the main database in ENSEMBL, provides information for the majority of species)
#' and "mouse" for ENSEMBL_MART_MOUSE database (which provides specific information of mouse strains)
#'
#' @return
#' @export
#'
#' @examples
list_platforms <- function(dataset, db = "main") {
if (db == "main") {
ensembl <- biomaRt::useDataset(dataset = dataset, mart = biomaRt::useMart("ENSEMBL_MART_ENSEMBL"))
att <- biomaRt::listAttributes(ensembl)
idx <- grep("affy|agilent|illumina", att$description, ignore.case = T)
att <- att[idx, c(1,2)]
return(att)
} else if (db == "mouse") {
ensembl <- biomaRt::useDataset(dataset = dataset, mart = biomaRt::useMart("ENSEMBL_MART_MOUSE"))
att <- biomaRt::listAttributes(ensembl)
idx <- grep("affy|agilent|illumina", att$description, ignore.case = T)
att <- att[idx, c(1,2)]
return(att)
} else {
stop("Provide an ensembl value: \n 'main': corresponding to ENSEMBL_MART_ENSEMBL database or 'mouse': corresponding to ENSEMBL_MART_MOUSE database")
}
}
|
8afa187ac0dfc153af8f6978102cb01093c62a7c | f3e02380300825ce90eaf94546fa2ab823943580 | /man/blink.Rd | b51576446d0e6739e9fe1340700c705165c69946 | [] | no_license | kirillseva/blink | 0ddbec67454049134afaf8129c0bb55fcb436837 | 5219221d2678405ea445a8f52bc47f1536ccd6c5 | refs/heads/master | 2021-01-10T08:26:39.067314 | 2015-12-17T22:17:34 | 2015-12-17T22:17:34 | 45,405,905 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 238 | rd | blink.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/blink-package.R
\docType{package}
\name{blink}
\alias{blink}
\alias{blink-package}
\title{Describe the package.}
\description{
Describe the package.
}
|
f12ff04e076be6be1e2823a946ef7a8e51fe0330 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/spagmix/examples/rpoispoly.Rd.R | 14bdca272f7186cde9252f6f6727c306848e7e70 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 955 | r | rpoispoly.Rd.R | library(spagmix)
### Name: rpoispoly
### Title: Generate a Poisson point pattern in a polygonal window
### Aliases: rpoispoly
### ** Examples
mn <- cbind(c(0.25,0.8),c(0.31,0.82),c(0.43,0.64),c(0.63,0.62),c(0.21,0.26))
v1 <- matrix(c(0.0023,-0.0009,-0.0009,0.002),2)
v2 <- matrix(c(0.0016,0.0015,0.0015,0.004),2)
v3 <- matrix(c(0.0007,0.0004,0.0004,0.0011),2)
v4 <- matrix(c(0.0023,-0.0041,-0.0041,0.0099),2)
v5 <- matrix(c(0.0013,0.0011,0.0011,0.0014),2)
vr <- array(NA,dim=c(2,2,5))
for(i in 1:5) vr[,,i] <- get(paste("v",i,sep=""))
intens <- sgmix(mean=mn,vcv=vr,window=toywin,p0=0.1,int=500)
aa <- rpoispp(intens) # Default spatstat function
bb <- rpoispoly(intens) # No polygon supplied; just uses pixel union
cc <- rpoispoly(intens,w=toywin) # Original irregular polygon
par(mfrow=c(2,2))
plot(intens,log=TRUE)
plot(aa,main=paste("aa\nn =",npoints(aa)))
plot(bb,main=paste("bb\nn =",npoints(bb)))
plot(cc,main=paste("cc\nn =",npoints(cc)))
|
13d3e5e75484415776cf048721707da892845c07 | 5f6d248fa755b953f40ac5bad5401d3985ef37ca | /Graphing/Old/Graphs_differences.r | 10394e6ee4380441dd8ebab8312b7d8b0618806e | [
"CC0-1.0"
] | permissive | LizEve/SquamateLikelihoodRatios | 411dc42882a16c333794292fdafac346fd52a571 | cc90c66832ca6492b52000b7b1fc4fb8fc7994d3 | refs/heads/main | 2023-08-13T12:00:57.835196 | 2021-10-11T21:43:44 | 2021-10-11T21:43:44 | 343,491,251 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 34,255 | r | Graphs_differences.r | rm(list=ls())
library(dplyr)
library(ggplot2)
library(tidyr)
library(ggpubr)
library("viridis")
# set dataset
#dataset <- "SinghalOG"
dataset <- "Streicher"
setwd(paste("/Users/ChatNoir/Projects/Squam/Graphs/",dataset, sep=''))
# Read in data
load(paste("/Users/ChatNoir/Projects/Squam/Graphs/",dataset,"/Calcs_",dataset,".RData", sep=''))
# Colors
# https://www.w3schools.com/colors/colors_picker.asp
color_S <- "orange"
color_TP <- "springgreen4"
color_AI <- "#2BB07FFF"
color_SA <- "#38598CFF"
color_SI <- "yellow4" # 8b8b00
# Add column to name type of support
mLGL <- mutate(mLGL, supportType = case_when(TvS != 'a' ~ "dGLS"))
mLBF <- mutate(mLBF, supportType = case_when(TvS != 'a' ~ "BF"))
# Get total number of loci
loci <- length(mLGL$Locus)
# Change from 2ln(BF) to ln(BF) by dividing all comparisons by 2 7-10 - not including average calcs
#names(mLBF[7:10])
#mLBF[7:10] <- mLBF[7:10]/2
# for singhal OG
names(mLBF[4])
mLBF[4] <- mLBF[4]/2
# Want to stack datasets so GL and BF columns have both comparisons for each hypothesis.
reformatdf <- function(df,h,h1,s1,keepCols){
# keepCols is a vector of the locus and support type column indices
# Grab column number
c1 <- match(h1,names(df))
# Grab and rename column, add hypothesis column
a.df <- df[,append(keepCols,c1)]
names(a.df)[names(a.df) == h1] <- h
a.df$Hypothesis <- rep(h1,length(a.df[,1]))
c <- match(h,names(a.df))
# Adjust direction of support if needed. ie if column is AIvSA, but you want to know SAvAI
a.df[,c] <- a.df[,c]*(s1)
return(a.df)
}
# reformat
AI.g <- bind_rows(reformatdf(mLGL,"AI","AIvSA",1,c(1,13)),
reformatdf(mLGL,"AI","AIvSI",1,c(1,13)))
SA.g <- bind_rows(reformatdf(mLGL,"SA","AIvSA",-1,c(1,13)),
reformatdf(mLGL,"SA","SAvSI",1,c(1,13)))
SI.g <- bind_rows(reformatdf(mLGL,"SI","AIvSI",-1,c(1,13)),
reformatdf(mLGL,"SI","SAvSI",-1,c(1,13)))
AI.b <- bind_rows(reformatdf(mLBF,"AI","AIvSA",1,c(1,13)),
reformatdf(mLBF,"AI","AIvSI",1,c(1,13)))
SA.b <- bind_rows(reformatdf(mLBF,"SA","AIvSA",-1,c(1,13)),
reformatdf(mLBF,"SA","SAvSI",1,c(1,13)))
SI.b <- bind_rows(reformatdf(mLBF,"SI","AIvSI",-1,c(1,13)),
reformatdf(mLBF,"SI","SAvSI",-1,c(1,13)))
TS.g <- reformatdf(mLGL,"TS","TvS",1,c(1,13)) %>% select(-SIvAvg)
TS.b <- reformatdf(mLBF,"TS","TvS",1,c(1,13)) %>% select(-SIvAvg)
# Singhal OG
#TS.g <- reformatdf(mLGL,"TS","TvS",1,c(1,3))
#TS.b <- reformatdf(mLBF,"TS","TvS",1,c(1,3))
# MERGE
AI.a <- left_join(AI.g, AI.b, by=c("Locus","Hypothesis"), suffix = c(".g", ".b"))
SA.a <- left_join(SA.g, SA.b, by=c("Locus","Hypothesis"), suffix = c(".g", ".b"))
SI.a <- left_join(SI.g, SI.b, by=c("Locus","Hypothesis"), suffix = c(".g", ".b"))
TS.a <- left_join(TS.g, TS.b, by=c("Locus","Hypothesis"), suffix = c(".g", ".b"))
rm(AI.g,AI.b,SA.b,SA.g,SI.b,SI.g,TS.b,TS.g)
library(xlsx)
fname <- paste("/Users/ChatNoir/Projects/Squam/Graphs/",dataset,"/",dataset,"_TvSLoci.xlsx",sep='')
#write.xlsx2(as.data.frame(TS.a), file=fname, sheetName="loci", row.names=FALSE)
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Quantiles ---------------------------------------------- Scatter Quantiles --------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Calcs------------------------------------------------------------------------------------------------------------------------------------------------------
# Adding two rows that give the difference in percentiles and rank between bf and gl
# This will give the same rank number if the value is identicle in two cells -
# hint - decile scatter is pointless, deleted from script
# Rank difference needs to be calulated differently because the total rank spots for df and gl are different
# Normalizing rank by calculating max rank by hypothesis - rank.g/max(rank.g) - using these fractions to compare bf and gl
# Now rank difference is the percent difference? lets just call it normalized difference
AI.p <- AI.a %>%
group_by(Hypothesis) %>%
mutate(per.g = ntile(desc(AI.g),100)) %>%
mutate(rank.g = dense_rank(desc(AI.g))) %>%
mutate(per.b = ntile(desc(AI.b),100)) %>%
mutate(rank.b =dense_rank(desc(AI.b))) %>%
mutate(per.diff = abs(per.g-per.b)) %>%
mutate(rank.diff = abs((rank.g/max(rank.g))-(rank.b/max(rank.b)))*100)
SA.p <- SA.a %>%
group_by(Hypothesis) %>%
mutate(per.g = ntile(desc(SA.g),100)) %>%
mutate(rank.g =dense_rank(desc(SA.g))) %>%
mutate(per.b = ntile(desc(SA.b),100)) %>%
mutate(rank.b =dense_rank(desc(SA.b))) %>%
mutate(per.diff = abs(per.g-per.b)) %>%
mutate(rank.diff = abs((rank.g/max(rank.g))-(rank.b/max(rank.b)))*100)
SI.p <- SI.a %>%
group_by(Hypothesis) %>%
mutate(per.g = ntile(desc(SI.g),100)) %>%
mutate(rank.g = dense_rank(desc(SI.g))) %>%
mutate(per.b = ntile(desc(SI.b),100)) %>%
mutate(rank.b = dense_rank(desc(SI.b))) %>%
mutate(per.diff = abs(per.g-per.b)) %>%
mutate(rank.diff = abs((rank.g/max(rank.g))-(rank.b/max(rank.b)))*100)
TS.p <- TS.a %>%
group_by(Hypothesis) %>%
mutate(per.g = ntile(desc(TS.g),100)) %>%
mutate(rank.g =dense_rank(desc(TS.g))) %>%
mutate(per.b = ntile(desc(TS.b),100)) %>%
mutate(rank.b =dense_rank(desc(TS.b))) %>%
mutate(per.diff = abs(per.g-per.b)) %>%
mutate(rank.diff = abs((rank.g/max(rank.g))-(rank.b/max(rank.b)))*100)
# Graph Function ------------------------------------------------------------------------------------------------------------------------------------------------------
SC <- function(df,x.tic,y.tic,legtit,l){
# defining columns here to avoid repeating this over and over
xcol <- match(paste(l,".g",sep = ''),names(df))
ycol <- match(paste(l,".b",sep = ''),names(df))
CC <- match(paste(l,".diff",sep = ''),names(df))
scat <- ggplot(df, aes(x=df[[xcol]],y=df[[ycol]])) +
geom_point(alpha=1, aes(color=df[[CC]]), size=1) + theme_bw() + theme(panel.border = element_blank()) +
theme_classic() +
theme(
axis.text = element_text(size=6, color="black"),
text = element_text(size=10),
panel.border = element_blank(),
panel.background = element_rect(fill = "transparent"), # bg of the panel
plot.background = element_rect(fill = "transparent", color = NA), # bg of the plot
panel.grid = element_blank(), # get rid of major grid
plot.title = element_text(hjust = 0.5)
) +
coord_cartesian(ylim=y.tic,xlim = x.tic) +
scale_y_continuous(breaks = y.tic) +
scale_x_continuous(breaks = x.tic) +
labs(x=paste('dGLS ',l, sep=''),y=paste('ln(BF) ',l,sep=''),color=legtit) +
guides(colour = guide_legend(override.aes = list(size=2))) +
scale_color_viridis(limits=c(0,100),direction = -1,option='D')
#scale_color_gradientn(colors=heat.colors(2),limits=c(0,100))
#scale_color_gradient2(low="blue", high="red",midpoint=mid)
return(scat)
}
# All Rank #------------------------------------------------------------------------------------------------------------------------------------------------------
# BF vs dGLS colored by normalized difference in rank between dGLS and BF
# Each comparison has a graph. AIvSA, AIvSI, SAvSI, TvS
# Get scale for x and y axes
df1 <- AI.p[AI.p$Hypothesis=='AIvSA',]
df2 <- AI.p[AI.p$Hypothesis=='AIvSI',]
df3 <- SA.p[SA.p$Hypothesis=='SAvSI',]
df4 <- TS.p
max(c(df2$rank.b,df2$rank.g))
max(c(df3$rank.b,df3$rank.g))
max(c(df4$rank.b,df4$rank.g))
# Tic marks
t.rx <- seq(0,3000,500)
t.ry <- seq(0,3000,500)
ts.rx <- seq(0,5000,500)
ts.ry <- seq(0,5000,500)
#values,tick marks,column for values to color, legend title, rank/decile for axis labels
p1.a <- SC(df1,t.rx,t.ry,"normalized \n difference \n in rank","rank") + ggtitle(df1$Hypothesis)
p2.a <- SC(df2,t.rx,t.ry,"normalized \n difference \n in rank","rank") + ggtitle(df2$Hypothesis)
p3.a <- SC(df3,t.rx,t.ry,"normalized \n difference \n in rank","rank") + ggtitle(df3$Hypothesis)
p4.a <- SC(df4,ts.rx,ts.ry,"normalized \n difference \n in rank","rank") + ggtitle(df4$Hypothesis)
p.a <- ggarrange(p1.a, p2.a, p3.a, p4.a, ncol=2, nrow=2, align="h")
#quartz()
p.a
#ggsave(paste(dataset,"_scatter_rank_diff.pdf",sep=""), plot=p.a,width = 9, height = 6, units = "in", device = 'pdf',bg = "transparent")
#ggsave(paste(dataset,"_scatter_rank_diff.pdf",sep=""), plot=p4.a,width = 9, height = 6, units = "in", device = 'pdf',bg = "transparent")
# All Percentiles #------------------------------------------------------------------------------------------------------------------------------------------------------
# BF vs dGLS colored by normalized difference in rank between dGLS and BF
# Each comparison has a graph. AIvSA, AIvSI, SAvSI, TvS
# Tic marks
t.rx <- seq(0,100,20)
t.ry <- seq(0,100,20)
ts.rx <- seq(0,100,20)
ts.ry <- seq(0,100,20)
#values,tick marks,column for values to color, legend title, per/decile for axis labels
r1.a <- SC(df1,t.rx,t.ry,"difference in \n percentile","per") + ggtitle(df1$Hypothesis)
r2.a <- SC(df2,t.rx,t.ry,"difference in \n percentile","per") + ggtitle(df2$Hypothesis)
r3.a <- SC(df3,t.rx,t.ry,"difference in \n percentile","per") + ggtitle(df3$Hypothesis)
r4.a <- SC(df4,ts.rx,ts.ry,"difference in \n percentile","per") + ggtitle(df4$Hypothesis)
r.a <- ggarrange(r1.a, r2.a, r3.a, r4.a, ncol=2, nrow=2, align="h")
#quartz()
r.a
#ggsave(paste(dataset,"_scatter_percentile_diff.pdf",sep=""), plot=r.a,width = 9, height = 6, units = "in", device = 'pdf',bg = "transparent")
#ggsave(paste(dataset,"_scatter_percentile_diff.pdf",sep=""), plot=r4.a,width = 9, height = 6, units = "in", device = 'pdf',bg = "transparent")
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Compare hypotheses ------------------------------------ Loci across hypotheses --------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Compare hypotheses to see if same loci give same support across hypotheses. ie, if it supports AI in AIvSI will it support it in AIvSA
# Graph Function------------------------------------------------------------------------------------------------------------------------------------------------------
SL <- function(df,x.val,y.val,x.tic,y.tic,cc,x.y){
scat <- ggplot(df, aes(x=x.val,y=y.val)) +
geom_point(alpha=1, aes(color=as.factor(df[[x.y[3]]])), size=0.5) + theme_bw() + theme(panel.border = element_blank()) +
theme_classic() +
theme(
axis.text = element_text(size=10, color="black"),
text = element_text(size=12),
panel.border = element_blank(),
panel.background = element_rect(fill = "transparent"), # bg of the panel
plot.background = element_rect(fill = "transparent", color = NA), # bg of the plot
panel.grid = element_blank(), # get rid of major grid
plot.title = element_text(hjust = 0.5),
legend.position = 'none'
) +
coord_cartesian(ylim=y.tic,xlim = x.tic) +
scale_y_continuous(breaks = y.tic) +
scale_x_continuous(breaks = x.tic) +
labs(x=x.y[1],y=x.y[2]) +
scale_color_manual(values=cc)
return(scat)
}
# dGLS-------------------------------------------------------- Loci across hypotheses ----------------------------------------------------------------------------------------------
# Set up hypotheses with correct directionality and add codes for one/both/neither with support for hypoth
GL <- mLGL %>%
select(Locus,TvS,AIvSA,AIvSI,SAvSI) %>%
mutate(SAvAI=AIvSA*-1) %>%
mutate(SIvAI=AIvSI*-1) %>%
mutate(SIvSA=SAvSI*-1) %>%
mutate(AI = case_when(AIvSI < 0.5 & AIvSA < 0.5 ~ 0,
AIvSI >= 0.5 & AIvSA >= 0.5 ~ 2,
AIvSI >= 0.5 & AIvSA < 0.5 ~ 1,
AIvSI < 0.5 & AIvSA >= 0.5 ~ 1)) %>%
mutate(SA = case_when(SAvSI < 0.5 & SAvAI < 0.5 ~ 0,
SAvSI >= 0.5 & SAvAI >= 0.5 ~ 2,
SAvSI >= 0.5 & SAvAI < 0.5 ~ 1,
SAvSI < 0.5 & SAvAI >= 0.5 ~ 1)) %>%
mutate(SI = case_when(SIvSA < 0.5 & SIvAI < 0.5 ~ 0,
SIvSA >= 0.5 & SIvAI >= 0.5 ~ 2,
SIvSA >= 0.5 & SIvAI < 0.5 ~ 1,
SIvSA < 0.5 & SIvAI >= 0.5 ~ 1))
# Graph
df <- GL
max(c(abs(df$AIvSA),abs(df$AIvSI),abs(df$SAvSI)))
lower <- 170 # 45
upper <- 170
x.t <- seq(-lower,upper,50) # 15
y.t <- seq(-lower,upper,50)
df <- GL
x.v1 <- GL$AIvSA
y.v1 <- GL$AIvSI
x.y1 <- c("AIvSA","AIvSI","AI")
cc1 <- c('grey56','#1e7b59','#46d29f')
quartz()
ai.g <- SL(df,x.v1,y.v1,x.t,y.t,cc1,x.y1) + geom_hline(yintercept=c(0.5),color=c("black"), linetype="dashed", size=0.25) +
geom_vline(xintercept=c(0.5),color=c("black"), linetype="dashed", size=0.25)
x.v2 <- GL$SAvAI
y.v2 <- GL$SAvSI
x.y2 <- c("SAvAI","SAvSI","SA")
cc2 <- c('grey56','#243a5b','#4975b6')
sa.g <- SL(df,x.v2,y.v2,x.t,y.t,cc2,x.y2) + geom_hline(yintercept=c(0.5),color=c("black"), linetype="dashed", size=0.25) +
geom_vline(xintercept=c(0.5),color=c("black"), linetype="dashed", size=0.25)
x.v3 <- GL$SIvAI
y.v3 <- GL$SIvSA
x.y3 <- c("SIvAI","SIvSA","SI")
cc3 <- c('grey56','#4d4d00','#cccc00')
si.g <- SL(df,x.v3,y.v3,x.t,y.t,cc3,x.y3) + geom_hline(yintercept=c(0.5),color=c("black"), linetype="dashed", size=0.25) +
geom_vline(xintercept=c(0.5),color=c("black"), linetype="dashed", size=0.25)
g <- annotate_figure(ggarrange(ai.g,sa.g,si.g, ncol=1, nrow=3, align="v",labels="auto"),
top = text_grob("dGLS", color = "black", size = 16))
g
# BF --------------------------------------------------------- Loci across hypotheses ---------------------------------------------------------------------------------------------
# Set up hypotheses with correct directionality and add codes for one/both/neither with support for hypoth
BF <- mLBF %>%
select(Locus,TvS,AIvSA,AIvSI,SAvSI) %>%
mutate(SAvAI=AIvSA*-1) %>%
mutate(SIvAI=AIvSI*-1) %>%
mutate(SIvSA=SAvSI*-1) %>%
mutate(AI = case_when(AIvSI < 5 & AIvSA < 5 ~ 0,
AIvSI >= 5 & AIvSA >= 5 ~ 2,
AIvSI >= 5 & AIvSA < 5 ~ 1,
AIvSI < 5 & AIvSA >= 5 ~ 1)) %>%
mutate(SA = case_when(SAvSI < 5 & SAvAI < 5 ~ 0,
SAvSI >= 5 & SAvAI >= 5 ~ 2,
SAvSI >= 5 & SAvAI < 5 ~ 1,
SAvSI < 5 & SAvAI >= 5 ~ 1)) %>%
mutate(SI = case_when(SIvSA < 5 & SIvAI < 5 ~ 0,
SIvSA >= 5 & SIvAI >= 5 ~ 2,
SIvSA >= 5 & SIvAI < 5 ~ 1,
SIvSA < 5 & SIvAI >= 5 ~ 1))
# Graph
max(c(abs(BF$AIvSA),abs(BF$AIvSI),abs(BF$SAvSI)))
lower <- 90
upper <- 90
x.t <- seq(-lower,upper,20)
y.t <- seq(-lower,upper,20)
df2 <- BF
x.v4 <- BF$AIvSA
y.v4 <- BF$AIvSI
x.y4 <- c("AIvSA","AIvSI","AI")
cc4 <- c('grey56','#1e7b59','#46d29f')
#quartz()
ai.b <- SL(df2,x.v4,y.v4,x.t,y.t,cc4,x.y4) + geom_hline(yintercept=c(5),color=c("black"), linetype="dashed", size=0.25) +
geom_vline(xintercept=c(5),color=c("black"), linetype="dashed", size=0.25)
ai.b
x.v5 <- BF$SAvAI
y.v5 <- BF$SAvSI
x.y5 <- c("SAvAI","SAvSI","SA")
cc5 <- c('grey56','#243a5b','#4975b6')
sa.b <- SL(df2,x.v5,y.v5,x.t,y.t,cc5,x.y5) + geom_hline(yintercept=c(5),color=c("black"), linetype="dashed", size=0.25) +
geom_vline(xintercept=c(5),color=c("black"), linetype="dashed", size=0.25)
x.v6 <- BF$SIvAI
y.v6 <- BF$SIvSA
x.y6 <- c("SIvAI","SIvSA","SI")
cc6 <- c('grey56','#4d4d00','#cccc00')
si.b <- SL(df2,x.v6,y.v6,x.t,y.t,cc6,x.y6) + geom_hline(yintercept=c(5),color=c("black"), linetype="dashed", size=0.25) +
geom_vline(xintercept=c(5),color=c("black"), linetype="dashed", size=0.25)
b <- annotate_figure(ggarrange(ai.b,sa.b,si.b, ncol=1, nrow=3, align="v",labels=c("d","e","f")),
top = text_grob("ln(BF)", color = "black", size = 16))
b
f <- ggarrange(g,b, ncol=2, nrow=1, align="v")
f
#ggsave(paste(dataset,"_scatter_loci_compare.pdf",sep=""), plot=f,width = 9, height = 12, units = "in", device = 'pdf',bg = "transparent")
#ggsave(paste(dataset,"_scatter_loci_comparezoom.pdf",sep=""), plot=f,width = 9, height = 12, units = "in", device = 'pdf',bg = "transparent")
#------------------------------------------------------------------------------------------------------------------------------------------------------
# CSV files ------------------------------------ CSV files --------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Smash data ----------------------------------------- Significant disagree CSV------------------------------------------------------------------------------------------------------------
# Look at "significance" for dGLS and BF. Record which loci conflict
# Create dataframe with all metadata, locus names, one col for hypothesis, one call for bf, and one gl
keep <- c(1,seq(14,22))
B <- bind_rows(reformatdf(mLBF,"BF","AIvSA",1,keep),
reformatdf(mLBF,"BF","AIvSI",1,keep),
reformatdf(mLBF,"BF","SAvSI",1,keep),
reformatdf(mLBF,"BF","TvS",1,keep))
G <- bind_rows(reformatdf(mLGL,"GL","AIvSA",1,keep),
reformatdf(mLGL,"GL","AIvSI",1,keep),
reformatdf(mLGL,"GL","SAvSI",1,keep),
reformatdf(mLGL,"GL","TvS",1,keep))
# for SinghalOG
#keep <- c(1,seq(5,13))
#B <- reformatdf(mLBF,"BF","TvS",1,keep)
#G <- reformatdf(mLGL,"GL","TvS",1,keep)
metaData <- c("MEAN_COL_SCORE","Sequences","Columns","Dist_Pat","Pars_Info","Sing_Sites","Cons_Sites" ,"Chi2_Fail","Gaps_Ambig")
all <- left_join(B, G, by=c("Locus","Hypothesis",metaData), suffix = c(".g", ".b"))
# Classify discordance ----------------------------------------- Significant disagree CSV------------------------------------------------------------------------------------------------------------
# neu.s both neutral in same direction
# neu.o both neutral in opposite directions
# sig.s both sig in same direction
# sig.o both sig in opposite directions
# neuG.sigB.s dgls neutral, bf significant - both same direction
# neuB.sigG.s bf neutral, dgls significant - both same direction
# neuG.sigB.o dgls neutral, bf significant - in opposite directions
# neuB.sigG.o bf neutral, dgls significant - in opposite directions
# can cluster by same or opposite direction if needed.
a <- all %>% mutate(sig = case_when(between(BF,0,5) & between(GL,0,0.5) ~ 'neu.s',
between(BF,-5,0) & between(GL,-0.5,0) ~ 'neu.s',
between(BF,0,5) & between(GL,-0.5,0) ~ 'neu.o',
between(BF,-5,0) & between(GL,0,0.5) ~ 'neu.o',
BF >= 5 & GL >= 0.5 ~ 'sig.s',
BF <= -5 & GL <= -0.5 ~ 'sig.s',
BF <= -5 & GL >= 0.5 ~ 'sig.o',
BF >= 5 & GL <= -0.5 ~ 'sig.o',
BF <= -5 & between(GL,-0.5,0) ~ 'neuG.sigB.s',
BF >= 5 & between(GL,0,0.5) ~ 'neuG.sigB.s',
GL <= -0.5 & between(BF,-5,0) ~ 'neuB.sigG.s',
GL >= 0.5 & between(BF,0,5) ~ 'neuB.sigG.s',
BF <= -5 & between(GL,0,0.5) ~ 'neuG.sigB.o',
BF >= 5 & between(GL,-0.5,0) ~ 'neuG.sigB.o',
GL <= -0.5 & between(BF,0,5) ~ 'neuB.sigG.o',
GL >= 0.5 & between(BF,-5,0) ~ 'neuB.sigG.o'))
# Classify discordance ----------------------------------------- Rank/Percentile disagree CSV------------------------------------------------------------------------------------------------------------
# calculate percentage for whole dataset
# add normalization to rank difference calculation
p <- a %>%
group_by(Hypothesis) %>%
mutate(per.g = ntile(desc(GL),100)) %>%
mutate(rank.g = dense_rank(desc(GL))) %>%
mutate(per.b = ntile(desc(BF),100)) %>%
mutate(rank.b = dense_rank(desc(BF))) %>%
mutate(per.diff = (per.g-per.b)) %>%
mutate(rank.diff = ((rank.g/max(rank.g))-(rank.b/max(rank.b)))*100)
# Rank diff - g - b. positive - higher G. negative - higher B
# same - within 50% of rank/percentile
# rank.b - b is more than 50% greater than g in terms of rank
# rank.g - g is more than 50% greater than b in terms of rank
# per.b - b is more than 50 percentiles greater than g
# per.g - g is more than 50 percentiles greater than b
# Add columns for whether normalized b and g rank or percentile difference is larger than half the total ranks
p <- p %>% mutate(rank = case_when(rank.diff >= 50 ~ 'rank.g',
rank.diff <= -50 ~ 'rank.b',
between(rank.diff,-50,50) ~ 'same')) %>%
mutate(per = case_when(per.diff >= 50 ~ 'per.g',
per.diff <= -50 ~ 'per.b',
between(per.diff,-50,50) ~ 'same'))
# Summarize number of loci for each type of discordance
s.sum <- bind_rows(
p %>% group_by(sig) %>% dplyr::summarise(Hypothesis='ALL',n.loci=n()),
p %>% group_by(sig,Hypothesis) %>% dplyr::summarise(n.loci=n()) %>%
arrange(sig,Hypothesis)) %>%
mutate(prop.loci= case_when(Hypothesis == "ALL" ~ round((n.loci/(loci*4))*100,digits=2),
Hypothesis != "ALL" ~ round((n.loci/loci)*100,digits = 2)))
r.sum <- bind_rows(
p %>% group_by(rank) %>% summarise(Hypothesis='ALL',n.loci=n()),
p %>% group_by(rank,Hypothesis) %>% summarise(n.loci=n()) %>%
arrange(rank,Hypothesis)) %>%
mutate(prop.loci= case_when(Hypothesis == "ALL" ~ round((n.loci/(loci*4))*100,digits=2),
Hypothesis != "ALL" ~ round((n.loci/loci)*100,digits = 2)))
p.sum <- bind_rows(
p %>% group_by(per) %>% summarise(Hypothesis='ALL',n.loci=n()),
p %>% group_by(per,Hypothesis) %>% summarise(n.loci=n()) %>%
arrange(per,Hypothesis)) %>%
mutate(prop.loci= case_when(Hypothesis == "ALL" ~ round((n.loci/(loci*4))*100,digits=2),
Hypothesis != "ALL" ~ round((n.loci/loci)*100,digits = 2)))
# Summarize ----------------------------------------- Disagree CSV------------------------------------------------------------------------------------------------------------
# List loci and the number of hypotheses they have issues across. ie issue for all hypotheses or only one.
y1 <- p %>%
filter(!sig %in% c('neu.s','sig.s')) %>%
group_by(Locus) %>%
summarize(n.h.sig=n_distinct(Hypothesis))
y2 <- p %>%
filter(!rank %in% c('same')) %>%
group_by(Locus) %>%
summarize(n.h.rank=n_distinct(Hypothesis))
y3 <- p %>%
filter(!per %in% c('same')) %>%
group_by(Locus) %>%
summarize(n.h.per=n_distinct(Hypothesis))
y <- full_join(full_join(y1,y2,by='Locus'),y3,by='Locus')
# Output ----------------------------------------- Disagree CSV------------------------------------------------------------------------------------------------------------
# Want to out put list of loci - y, and summary tables - r,p,s.sum
library(xlsx)
fname <- paste("/Users/ChatNoir/Projects/Squam/Graphs/",dataset,"/",dataset,"_disagreeingLociSummary.xlsx",sep='')
x <- full_join(p,y,by="Locus") %>% filter(rank.diff<=-10| rank.diff>=10)
x$rank.diff.abs <- abs(x$rank.diff)
x$Locus.2 <- x$Locus
options(java.parameters = "-Xmx1024m")
library(XLConnect)
write.xlsx2(as.data.frame(x), file=fname, sheetName="all", row.names=FALSE)
write.xlsx2(as.data.frame(y), file=fname, sheetName="loci", append=TRUE,row.names=FALSE)
write.xlsx2(as.data.frame(s.sum), file=fname, sheetName="sig", append=TRUE, row.names=FALSE)
write.xlsx2(as.data.frame(r.sum), file=fname, sheetName="rank", append=TRUE, row.names=FALSE)
write.xlsx2(as.data.frame(p.sum), file=fname, sheetName="per", append=TRUE, row.names=FALSE)
# old
#write.csv(z,paste("/Users/ChatNoir/Projects/Squam/Graphs/",dataset,"/",dataset,"_disagreeingLoci.csv",sep=''))
#write.csv(y,paste("/Users/ChatNoir/Projects/Squam/Graphs/",dataset,"/",dataset,"_disLociSummary.csv",sep=''))
#------------------------------------------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------- Disagree Scatters --------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Graph Function ------------------------------------------------------------------------------------------------------------------------------------------------------
SC <- function(df,xcol,ycol,c,x.tic,y.tic,xl,yl){
scat <- ggplot(df, aes(x=xcol,y=ycol)) +
geom_point(alpha=1, size=2, aes(color=c)) + theme_bw() + theme(panel.border = element_blank()) +
theme_classic() +
theme(
axis.text = element_text(size=6, color="black"),
text = element_text(size=10),
panel.border = element_blank(),
panel.background = element_rect(fill = "transparent"), # bg of the panel
plot.background = element_rect(fill = "transparent", color = NA), # bg of the plot
panel.grid = element_blank(), # get rid of major grid
plot.title = element_text(hjust = 0.5)
) +
coord_cartesian(ylim=y.tic,xlim = x.tic) +
scale_y_continuous(breaks = y.tic) +
scale_x_continuous(breaks = x.tic) +
labs(x=xl,y=yl,color="abs val of x") +
scale_color_viridis(limits=c(min(c),max(c)),direction = 1,option='D')
#guides(colour = guide_legend(override.aes = list(size=2))) +
#scale_color_viridis(limits=c(-100,100),direction = -1,option='D')
#scale_color_gradientn(colors=heat.colors(2),limits=c(0,100))
#scale_color_gradient2(low="blue", high="red",midpoint=mid)
return(scat)
}
# Graph BF GL Scatters------------------------------------------------------------------------------------------------------------------------------------------------------
all <- p
# Rank
x <- abs(all$rank.diff)
c <- all$rank.diff
max(x)
min(x)
ts.rx <- seq(0,100,20)
xlab <- "normalized rank difference"
# Bf and GL #####
xlab <- "ln(BF)"
x <- all$BF
max(x)
min(x)
ts.rx <- seq(-100,60,20)
ylab <- "GL"
y <- all$GL
max(y)
min(y)
ts.ry <- seq(-80,30,10)
c <- abs(all$rank.diff)
r <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab) + labs(color="abs rank diff")
c <- all$rank.diff
r2 <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab) + labs(color="rank diff")
z <- ggarrange(r,r2, ncol=1, nrow=2,align="v")
z
#ggsave(paste(dataset,"_rank_bfgl.pdf",sep=""), plot=z ,width = 4, height = 7, units = "in", device = 'pdf',bg = "transparent")
# Graph diff Histogram ---------------------------------------------- Histogram --------------------------------------------------------------------------------------------------------
H <- function(df,xval,cc,hbin,xlab){
ggplot(data=df, aes(x=xval)) +
geom_histogram(bins=hbin, alpha=1, position="dodge", fill=cc, color="grey",size=0.1) +
#geom_histogram(breaks=brx, alpha=1, position="dodge", fill=cc, color="grey",size=0.1)+
#geom_histogram(binwidth = max(abs(xval))*0.01, alpha=1, position="dodge",color="grey", fill=cc, size=0.1)+
theme_classic() +
theme(plot.title = element_text(hjust = 0.5, size=10),
axis.text = element_text(size=6, color="black"),
text = element_text(size=8),
legend.title = element_text(size = 8),
legend.text = element_text(size = 8)) +
labs(y="Number of Loci",x=xlab) +
#coord_cartesian(xlim = x.tic) +
#coord_cartesian(ylim=ytic, xlim = xtic) +
#scale_y_continuous() +
#scale_x_continuous(breaks = xtic) +
# geom_vline(xintercept=lines.g,color=c("black"), linetype="dashed", size=0.5) +
geom_vline(xintercept=c(0),color=c("black"), linetype="dashed", size=0.2)
}
#quartz()
h.bin <- 30
hy <- "TvS"
#hy <- "All"
#hy <- "AIvSA"
#all <- p[p$Hypothesis == "AIvSA",]
all <- p[p$Hypothesis == "TvS",]
#all <- p
# Set input data
x.val <- all$rank.diff
df <- all
cc <- "#2BB07FFF"
# Get max min for graph to set x axis values
max(abs(x.val))
m <- 100
x.tic <- seq(-m,m,20)
H(df,x.val,cc,h.bin,'dGLS values') +
coord_cartesian(xlim = x.tic) +
scale_x_continuous(breaks = x.tic)
y.tic <- seq(0,900,100)
rh <- H(df,x.val,cc,h.bin,'Difference in rank. - BF higher + GL higher') +
coord_cartesian(xlim = x.tic, ylim=y.tic) +
scale_x_continuous(breaks = x.tic) +
scale_y_continuous(breaks = y.tic)
rh
x.val <- all$per.diff
df <- all
cc <- "#2BB07FFF"
# Get max min for graph to set x axis values
#max(abs(x.val))
#m <- 70
#x.tic <- seq(-m,m,10)
H(df,x.val,cc,h.bin,'dGLS values') +
coord_cartesian(xlim = x.tic) +
scale_x_continuous(breaks = x.tic)
#y.tic <- seq(0,40,5)
ph <- H(df,x.val,cc,h.bin,'Difference in percentile. - BF higher + GL higher') +
coord_cartesian(xlim = x.tic, ylim=y.tic) +
scale_x_continuous(breaks = x.tic) +
scale_y_continuous(breaks = y.tic)
ph
title <- paste(dataset," - ",hy,sep="")
h <- annotate_figure(ggarrange(rh,ph, ncol=1, nrow=2,align="v"),
top = text_grob(title, color = "black", size = 16))
h
#ggsave(paste(dataset,"_rank_hist_",hy,".pdf",sep=""), plot=h ,width = 5, height = 4, units = "in", device = 'pdf',bg = "transparent")
# Graph Metadata Scatters------------------------------------------------------------------------------------------------------------------------------------------------------
#all <- p[p$Hypothesis == "AIvSA",]
all <- p[p$Hypothesis == "TvS",]
#all <- p
ylab <- "num taxa"
y <- all$GL
max(y)
min(y)
ts.ry <- seq(200,290,30)
gl <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab)
gl
ylab <- "alignment score"
y <- all$MEAN_COL_SCORE
ts.ry <- seq(0,1,0.1)
g1 <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab)
g1
ylab <- "num taxa"
y <- all$Sequences
max(y)
min(y)
ts.ry <- seq(200,290,30)
g2 <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab)
g2
ylab <- "alignment length"
y <- all$Columns
max(y)
min(y)
ts.ry <- seq(300,2100,300)
g3 <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab)
g3
names(all)
ylab <- "PI"
y <- all$Pars_Info
max(y)
min(y)
ts.ry <- seq(100,1500,100)
g4 <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab)
g4
names(all)
ylab <- "Singleton Sites"
y <- all$Sing_Sites
max(y)
min(y)
ts.ry <- seq(0,200,50)
g5 <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab)
g5
names(all)
ylab <- "Cons_Sites"
y <- all$Cons_Sites
max(y)
min(y)
ts.ry <- seq(0,1200,100)
g6 <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab)
g6
ylab <- "Chi2_Fail"
y <- all$Chi2_Fail
max(y)
min(y)
ts.ry <- seq(0,250,50)
g7 <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab)
g7
ylab <- "Gaps"
y <- all$Gaps_Ambig
max(y)
min(y)
ts.ry <- seq(0,6,1)
g8 <- SC(all,x,y,c,ts.rx,ts.ry,xlab,ylab)
g8
g <- ggarrange(g1,g2,g3,g4,g5,g6,g7,g8, ncol=2, nrow=4,align="v")
g
ggsave(paste(dataset,"_rank_metaData.pdf",sep=""), plot=g ,width = 6, height = 10, units = "in", device = 'pdf',bg = "transparent")
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Testin ------------------------------------ Testing --------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Mean Mode Variance ------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------------------------------------------
bf <- all %>% group_by(Hypothesis) %>%
summarise(Mean=mean(BF), Median=median(BF), Std=sd(BF), Mad=mad(BF), IQR=IQR(BF), Max=max(BF), Min=min(BF))
gl <- all %>% group_by(Hypothesis) %>%
summarise(Mean=mean(GL), Median=median(GL), Std=sd(GL), Mad=mad(GL), IQR=IQR(GL), Max=max(GL), Min=min(GL))
# copy and paste into metadata excel sheet for now.
#Outliers based on IQR------------------------------------------------------------------------------------------------------------------------------------------------------
# https://stackoverflow.com/questions/12866189/calculating-the-outliers-in-r
# https://www.r-bloggers.com/combined-outlier-detection-with-dplyr-and-ruler/
data <- all$BF
lowerq = quantile(data)[2]
upperq = quantile(data)[4]
iqr = upperq - lowerq
extreme.threshold.upper = (iqr * 3) + upperq
extreme.threshold.lower = lowerq - (iqr * 3)
x <- all %>% group_by(Hypothesis) %>% mutate(bo = case_when(BF <= extreme.threshold.lower ~ TRUE,
BF >= extreme.threshold.upper ~ TRUE,
between(BF,extreme.threshold.lower,extreme.threshold.upper) ~ FALSE)) %>%
count(bo)
y <- all %>% group_by(Hypothesis) %>% mutate(go = case_when(GL <= extreme.threshold.lower ~ TRUE,
GL >= extreme.threshold.upper ~ TRUE,
between(GL,extreme.threshold.lower,extreme.threshold.upper) ~ FALSE)) %>%
count(go)
|
92df2313fbca6e6469a2e4e72929bad72cdab5e5 | 6332c12ee7806f68d629cd0d191c53d028a2a56f | /posteriordb_check/phaseI_check/check_LBFGS.R | 7c1a40dec1ce75c2f563125bbed1750c81da0cbf | [
"BSD-3-Clause"
] | permissive | SteveBronder/Pathfinder | 57f5ce27de94b22855703511ad7e4378d3edeef6 | 06ad87e70c4b29645bf22752a32ccb85973ead5a | refs/heads/master | 2023-07-04T21:39:12.778909 | 2021-08-05T02:59:52 | 2021-08-05T02:59:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,734 | r | check_LBFGS.R | rm(list = ls())
setwd("./posteriordb") # set working dir to cloned package
library(rstan)
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
## install the beta release version of R package posterior
# install.packages("posterior", repos = c("https://mc-stan.org/r-packages/", getOption("repos")))
# check the dataset in posteriordb #
library(posteriordb)
library(posterior)
library(ggplot2)
library(dplyr)
source("../utils/sim_pf.R")
source("../utils/lp_utils.R")
set.seed(123)
pd <- pdb_local() # Posterior database connection
pn <- posterior_names(pd)
L_pn = length(pn)
# parameters settings #
alpha = 0.01
L = 1000 # iteration for Stan phase I sampling
N1 = 1000 # maximum iteration for L-BFGS
M = 20 # No. of chains
width = 860; height = 740 # the size of the plot
mc.cores = parallel::detectCores() - 2
factr_tol = 1e2
init_bound = 2.0
# get model names with reference posterior samples#
N_models = 0
model_record = c()
for(l in 1:L_pn){
modelname <- pn[l]
# pick model
po <- posterior(modelname, pdb = pd)
# get reference posterior samples
skip_to_next <- FALSE
tryCatch(gsd <- reference_posterior_draws(po),
error = function(e) { skip_to_next <<- TRUE})
if(skip_to_next) {
# print("Error in obtaining reference posterior for this posterior.")
next }
N_models = N_models + 1
model_record = c(model_record, l)
printf("model %d: %s", l, modelname)
}
N_models
# preallocate results #
lp_LBFGS_n_fn <- array(data = NA, dim = c(M, length(model_record)))
lp_LBFGS_n_gr <- array(data = NA, dim = c(M, length(model_record)))
lp_INV <- array(data = NA, dim = c(2, length(model_record)))
lp_mean <- c()
initial_ls <- list()
#i = which(model_record == 24);i
for(i in 1:length(model_record)){
modelname <- pn[model_record[i]]
printf("\n model %d: %s", model_record[i], modelname)
# pick model
po <- posterior(modelname, pdb = pd)
# get reference posterior samples
gsd <- reference_posterior_draws(po)
# compile the model
sc <- stan_code(po)
model <- stan_model(model_code = sc)
data <- get_data(po)
# obtain posterior interval of lp__
if(modelname == "eight_schools-eight_schools_noncentered"){
INV <- lp_Int_q_posteriordb_8school_noncen(po, alpha)
} else if (modelname == "gp_pois_regr-gp_pois_regr") {
INV <- lp_Int_q_posteriordb_gp_pois_regr(po, alpha)
} else {INV <- lp_Int_q_posteriordb(po, alpha)}
lp_INV[, i] = INV[c(1, 2)]
lp_mean[i] = INV[3]
posterior <- to_posterior(model, data)
D <- get_num_upars(posterior)
fn <- function(theta) -log_prob(posterior, theta, adjust_transform = TRUE,
gradient = TRUE)[1]
gr <- function(theta) -grad_log_prob(posterior, theta, adjust_transform = TRUE)
# choose the size of history in L-BFGS
lmm = 6;
cat("No. pars:", D," lmm in L-BFGS: ", lmm, "\n")
set.seed(1234)
m = 1
initial_ls[[i]] = list()
while (m <= M){
cat(m, "\t")
# run M initials and check the optim algorithm
init <- runif(D, -init_bound, init_bound)
initial_ls[[i]][[m]] <- init
restart <- FALSE
tryCatch(lp_old <- -fn(init),
error = function(e) { restart <<- TRUE})
tryCatch(test_uncon <- constrain_pars(posterior, init),
error = function(e) { restart <<- TRUE})
if(restart){
print("Error in initialization.")
next
}
if((lp_old >= lp_INV[1, i] && lp_old <= lp_INV[2, i])){
break_opt <- FALSE
tryCatch(z <- optim(par = init,
fn = fn, # negate for maximization
gr = gr,
method = "L-BFGS-B",
control = list(maxit = N1, factr = factr_tol,
lmm = lmm #, ndeps = 1e-8 #,
#trace = 6, REPORT = 1
)),
error = function(e) { break_opt <<- TRUE})
if(break_opt) {
print("Error in obtaining optimization path.")
next
}
print("initialized in target region")
lp_LBFGS_n_fn[m, i] = 1
lp_LBFGS_n_gr[m, i] = 1
m = m + 1
next
} else if(lp_old > lp_INV[2, i] && is.finite(lp_old)){
break_opt <- FALSE
tryCatch(z <- optim(par = init,
fn = fn, # negate for maximization
gr = gr,
method = "L-BFGS-B",
control = list(maxit = N1, factr = factr_tol,
lmm = lmm #, ndeps = 1e-8 #,
#trace = 6, REPORT = 1
)),
error = function(e) { break_opt <<- TRUE})
if(break_opt) {
print("Error in obtaining optimization path.")
next
}
print("initial lp__ pass the target region")
lp_LBFGS_n_fn[m, i] = 1
lp_LBFGS_n_gr[m, i] = 1
m = m + 1
next
} else if(is.infinite(lp_old)){
print("initial lp__ is Inf")
next
}
# run optimization
for (n in 1:N1) {
break_opt <- FALSE
tryCatch(z <- optim(par = init,
fn = fn, # negate for maximization
gr = gr,
method = "L-BFGS-B",
control = list(maxit = n, factr = factr_tol,
lmm = lmm #, ndeps = 1e-8 #,
#trace = 6, REPORT = 1
)),
error = function(e) { break_opt <<- TRUE})
if(break_opt) {
print("Error in obtaining optimization path.")
break
}
lp_up <- -fn(z$par)
if( (lp_up >= lp_INV[1, i] && lp_up <= lp_INV[2, i]) ){
# once reach the target region, record the number of calls to fn and gr
lp_LBFGS_n_fn[m, i] = z$counts[1]
lp_LBFGS_n_gr[m, i] = z$counts[2]
m = m + 1
break
} else if(lp_up > lp_INV[2, i]){
print("optimization pass the target region")
lp_LBFGS_n_fn[m, i] = z$counts[1]
lp_LBFGS_n_gr[m, i] = z$counts[2]
m = m + 1
break
} else if(lp_old == lp_up){
lp_LBFGS_n_fn[m, i] = -z$counts[1]
lp_LBFGS_n_gr[m, i] = -z$counts[1]
print("converge to a local minimum.")
m = m + 1
break
} else {
lp_old = lp_up
}
}
}
}
save(file = "../results/lp_posteriordb_LBFGS_h6.RData",
list = c("lp_LBFGS_n_fn", "lp_LBFGS_n_gr", "initial_ls", "lp_INV",
"lp_mean", "model_record"))
# check the output #
load(file = "../results/lp_posteriordb_LBFGS_h6.RData")
## check the distribution of number of iterations ##
n_grfn <- colMeans(abs(lp_LBFGS_n_gr))
mean(n_grfn) # 41.67
median(n_grfn) # 31.05
sd(n_grfn) # 53.80
sd(abs(lp_LBFGS_n_gr)) # 56.02
jpeg(filename = paste0("../pics/hist_LBFGS_grfn_counts.jpeg"),
width = width, height = height, units = "px", pointsize = 12)
hist(abs(lp_LBFGS_n_gr), breaks = 100,
main = "", ylab = "", axes = TRUE,
xlab = "iterations")
dev.off()
table(model_record[as.integer(which(abs(lp_LBFGS_n_gr)>200) / M - 0.5 / M) + 1])
#6 27 41
#20 1 3
# boxplot of counts
df <- data.frame(n_counts = c(abs(lp_LBFGS_n_gr)),
model = rep(pn[model_record], each = M),
not_reach_target =
rep(apply(lp_LBFGS_n_gr, 2,
f <- function(x){as.numeric(any(x < 0))}),
each = M))
jpeg(filename = paste0("../pics/box_LBFGS_gr_counts_log.jpeg"),
width = width*1.3, height = height*2, units = "px", pointsize = 12)
p_box_iter <- df %>% mutate( type=ifelse(not_reach_target == 1, "Highlighted","Normal")) %>%
ggplot( aes(y = model,
x = n_counts, fill=type, alpha=type)) +
geom_boxplot() +
scale_x_log10() + ylab("") + xlab("No. calls to fn and gr") +
theme_grey(base_size = 26) +
scale_fill_manual(values=c("red", "white")) +
theme(legend.position = "none") +
scale_alpha_manual(values=c(1,0.1))
print(p_box_iter)
dev.off()
jpeg(filename = paste0("../pics/hist_LBFGS_grfn_counts_log.jpeg"),
width = width, height = height, units = "px", pointsize = 12)
p_hist <- ggplot(df, aes(x = n_counts)) +
geom_histogram(bins = 100, color = "black", fill = "gray") +
theme_bw(base_size = 26) +
scale_x_log10(breaks=c(1, 10, 100, 300), labels = c("1", "10", "100", "300"))+
theme(axis.text.y = element_blank(),
axis.ticks.y = element_blank(), axis.title.y = element_blank())+
xlab("No. calls to fn and gr")
print(p_hist)
dev.off()
|
c631225fe9385f96a9e77aa6d990145efb50abc7 | 2c8d53ef85044379957a5d16ad530ce12e74378e | /rapid/objects.R | 25dc332871a91d91a6690c6973c50328ea7f8efc | [] | no_license | antoine-lizee/rAPId | 333acb7f1bcad9b436f936f695fc480faa0d86fc | 40bba8ad7d2ef3ebda852f0134f7b6d581b23353 | refs/heads/master | 2021-01-25T08:28:04.251418 | 2015-07-15T23:12:40 | 2015-07-15T23:12:40 | 17,385,078 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,865 | r | objects.R |
library(proto)
# Helpers for proto programming -------------------------------------------
newWithDefaults <- function(protoName, protoArgs) {
# This function creates a constructor for a proto trait, but checking for missing values when these are required.
# The required values are specified by setting their default Value as "NULL".
# The only significant disadvantage of using this function is the lack of auto-completion when creating new objects,
# because of the use of the ellipsis in the returned function. Could be fixed by using the formals()<- setter. TODO
return(function(., ...) {
# Collect the arguments passed to .$new()
newArgs <- list(...)
newArgNames <- names(newArgs)
# Loop through the arguments of the specification
for (protoArgName in names(protoArgs)) {
# If the specified argument is not passed to .$new()...
if (!protoArgName %in% newArgNames) {
if (is.null(protoArgs[[protoArgName]])) {
# ...throw an error when it's compulsory (because specified as "NULL")
stop("PROTO: You created an instance of the Trait '", protoName, "' without specifying the needed parameter '", protoArgName, "' .")
} else {
# ...add it to the arguments
newArgs[[protoArgName]] <- protoArgs[[protoArgName]]
}
}
}
# Call proto(., ...) with the modified version of the arguments
# protoCall <- as.call(c( list(quote(proto), quote(.)), args ))
# print(protoCall)
# eval(protoCall)
eval(as.call(c(list(quote(.$proto)), newArgs)))
})
}
# Argument helpers --------------------------------------------------------
checkWithinValues <- function(., value) {
values <- .$expectedValues
if (isAnNA(values) || isAnNA(value) || value %in% values) {
return(value)
}
stop(simpleError(
buildArgErrorMessage(., "Wrong value for ")
))
}
checkWithinRange <- function(., value) {
rethrowWithMessage({
min <- .$expectedValues$min
max <- .$expectedValues$max
isInRange <- value >= .$expectedValues$min && value <= .$expectedValues$max
}, buildApiDevErrorMessage(., "Wrong range specification."))
if (isInRange %in% FALSE) { #NA passes
stop(simpleError(buildArgErrorMessage(., "Value is not in range for ")))
}
return(value)
}
defaultArgumentCheck <- function(., value) {
# This is the default argument check function. If there is expected values specified,
# it checks that the passed argument is wihin these.
# It also does some basic type checks.
# TODO: support lambda & other functions.
switch(.$expectedValueType,
"string" = checkWithinValues(., value),
"integer"= {
value = throwWithMessage(toInt(value), buildArgErrorMessage(., "Wrong value type for "))
if (is.list(.$expectedValues)) {
checkWithinRange(., value)
} else {
checkWithinValues(., value)
}
},
"any" = checkWithinValues(., value))
}
# Misc Helpers ------------------------------------------------------------
isAnNA <- function(x) {
length(x) == 1 && is.na(x)
}
buildApiDevErrorMessage <- function(., msg) {
paste0("API_DEV::ARG: "
, msg
, .$name
, ifelse(is.na(.$description), "", paste0(" the ", .$description))
, ". Please check your argument definitions for the action '"
, .$rapid.action
, "'."
)
}
buildArgErrorMessage <- function(., msg) {
paste0("ARGS: "
, msg
, .$name
, ifelse(is.na(.$description), "", paste0(" the ", .$description))
, ". Please refer to the embedded reference available with an OPTIONS call to '"
, .$rapid.action
, "'."
)
}
throwWithMessage <- function(expr, errorMsg) {
# This is for a custom error
tryCatch(expr = expr, error = function(e) stop(simpleError(errorMsg)))
}
rethrowWithMessage <- function(expr, errorMsg) {
# This is for a native error for which we customize the output
tryCatch(expr = expr, error = function(e) {
e$message <- paste0(errorMsg, "\nOriginal error msg:\n", e$message)
stop(e)
})
}
toInt <- function(string) {
if (string == "") {
return(NA)
}
res <- strtoi(string)
if (is.na(res)) {
stop()
} else {
return(res)
}
}
# Argument definition -----------------------------------------------------
Argument <- proto(new = newWithDefaults(protoName = "Argument",
protoArgs = list(name = NULL
, description = NULL
, expectedValueType = "any"
, expectedValues = NA
, defaultValue = "<none>"))
, check = defaultArgumentCheck
, funEnvir = FALSE)
# Backup Argument definition ----------------------------------------------
ArgumentBckp <- proto(new = function(.
, name = NULL
, description = NULL
, expectedValueType = NA
, expectedValues = NA
, defaultValue = "<none>"
, ...) {
for (protoArg in c("name", "description")) {
if (eval(parse(text = paste0("missing(", protoArg, ")")))) {
stop("PROTO: You created an instance of the Trait 'Argument' without specifying the needed parameter '", protoArg, "'.")
}
}
proto(.
, name = name
, description = description
, expectedValueType = expectedValueType
, expectedValues = expectedValues
, default = default
, ...)}
, check = defaultArgumentCheck)
|
9f02796c2e6eccb5da63eec341f9a77631bbf796 | 62f106061017995cfda81866a4c6e4a3c953b13c | /CleaningDataCourseProject/run_analysis.R | b1ef8e1efed4a4d1438e3097801bd9cf3938ac43 | [] | no_license | mohitr/datasciencecoursera | e40bba87fa76f87434f0155808b624152ef1415d | cb43b31abebffbc79d4b3359c2ec2da1f4d9c4fb | refs/heads/master | 2016-09-05T15:31:36.489456 | 2014-06-23T16:12:33 | 2014-06-23T16:12:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,633 | r | run_analysis.R | # read features
features <- read.csv("features.txt", sep=" ", header=FALSE)
features <- as.character(features[,2])
# read activity labels
activity_name <- read.csv("activity_labels.txt", sep=" ", header=FALSE)
colnames(activity_name) <- c("activity_id", "activity_name")
# reading test data
test_x <- read.csv("test/X_test.txt", sep=" ", header=FALSE)
test_y <- read.csv("test/y_test.txt", header=FALSE)
test_subject <- read.csv("test/subject_test.txt", header=FALSE)
# reading training data
train_x <- read.csv("train/X_train.txt", sep=" ", header=FALSE)
train_y <- read.csv("train/y_train.txt", header=FALSE)
train_subject <- read.csv("train/subject_train.txt", header=FALSE)
combined_x <- rbind(train_x, test_x)
combined_y <- rbind(train_y, test_y)
combined_subject <- rbind(train_subject, test_subject)
# assign names to features
colnames(combined_x) <- features
colnames(combined_y) <- c("activity_id")
colnames(combined_subject) <- c("subject_id")
# merge activity labels and values
labelled_combined_y <- merge(combined_y, activity_name)
# mean and std features
mean_std_features <- regexpr("mean", features) != -1 | regexpr("std", features) != -1
combined_x <- combined_x[, which(mean_std_features)]
# adding activity_name and subject_id to same data set
combined_x <- cbind(combined_subject, labelled_combined_y$activity_name, combined_x )
# aggregating based on subject_id and activity_name
aggregate_data <- aggregate(combined_x[, 3:ncol(combined_x)], by=list(combined_x[['subject_id']], combined_x[['labelled_combined_y$activity_name']]), FUN=mean)
# writing text file
write.csv(aggregate_data, "tidy_data.txt") |
ae7313243675da9710085d9a72dc33dc4beebc39 | 3e931bb105cc62e35589d071b787a8635ec55552 | /man/summary.qif.Rd | b966a2f41cc8d69710cf1c1066a9e3b6beaaa234 | [] | no_license | umich-biostatistics/qif | 2b31a13bc48a52f5e107c45395b1c355eebfe89e | 59fac882807ffe9b38fe1573b6aa53e516c92341 | refs/heads/master | 2020-06-14T04:16:38.780344 | 2019-07-18T15:51:27 | 2019-07-18T15:51:27 | 194,895,446 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 639 | rd | summary.qif.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qif.r
\name{summary.qif}
\alias{summary.qif}
\title{Summary of a qif Object}
\usage{
\method{summary}{qif}(object, correlation = TRUE, ...)
}
\arguments{
\item{object}{an object for which a summary is desired.}
\item{correlation}{binary, include correlation.}
\item{...}{additional arguements to be passed to \code{summary}.}
}
\value{
The \code{summary.qif} object.
}
\description{
Procuce a summary of a \code{qif} object.
}
\seealso{
\code{\link[qif]{qif}}
}
\author{
Zhichang Jiang, Alberta Health Services, and Peter X.K. Song, University
of Michigan.
}
|
ba3e9352ddae4d2a5b515aa272dcfd5693846639 | c8063310239560f8d85e7554a4a9c75f1dec01f3 | /assignment_10/test/160204.r | 298ee7c8c111e39fc8356bb6e9d9540354b577b0 | [] | no_license | bismayswain/endsem | e93e6fed0605751a0e1dc5acfc895c1c2511fadb | 0392277b8cfce3d7d5ee71a69f3e9c33473eac45 | refs/heads/master | 2020-03-13T08:13:50.144947 | 2019-04-18T22:42:49 | 2019-04-18T22:42:49 | 131,039,841 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,860 | r | 160204.r | ######################### Part-1
num_samples <- 50000
data <- rexp(num_samples, 0.2)
#print(data)
value <- list()
for (i in 1: 500)
{
# print(data[10:20])
start=i*100-99
end=i*100
value[[i]] = c(data[start:end])
#print(value[[i]])
}
####################################### Part-2
for (i in 1:500)
{
#str(value[[i]])
#print(value[[i]])
}
for (j in 1:5)
{
pdata <- rep(0, 20);
for(i in 1:100)
{
val=round(value[[j]][i], 0)
#print(val)
if(val <= 20)
{
pdata[val] = pdata[val] + 1/ 100;
}
}
xcols <- c(0:19)
#str(pdata)
#str(xcols)
plot(xcols, pdata, "l", xlab="X", ylab="f(X)")
#################################### Part-3
cdata <- rep(0, 20)
cdata[1] <- pdata[1]
for(i in 2:20){
cdata[i] = cdata[i-1] + pdata[i]
}
plot(xcols, cdata, "o", col="blue", xlab="X", ylab="F(X)");
print(paste(" sample ",j," #########################"))
print(mean(value[[j]]))
print(sd(value[[j]]))
}
###################################### Part-4
mean_sample <- rep(0,500)
dev_sample <- rep(0,500)
for (i in 1:500)
{
mean_sample[i] <- mean(value[[i]])
dev_sample[i] <-sd(value[[i]])
}
tab <- table(round(data))
#str(tab)
plot(tab, "h", xlab="Value of mean of samples", ylab="Frequency")
pdata <- rep(0, 30);
for(i in 1:500){
val=round(mean_sample[i], 0);
if(val <= 30){
pdata[val] = pdata[val] + 1/ 500;
}
}
xcols <- c(0:29)
#str(pdata)
#str(xcols)
plot(xcols, pdata, "l", xlab="X", ylab="f(X)")
cdata <- rep(0, 30)
cdata[1] <- pdata[1]
for(i in 2:30){
cdata[i] = cdata[i-1] + pdata[i]
}
plot(xcols, cdata, "o", col="blue", xlab="X", ylab="F(X)");
############################ Part-5
mean_overall <- mean(mean_sample)
dev_overall <- sd(mean_sample)
############################ Part-6
print(mean_overall)
print(dev_overall)
|
02acc6d36156c975bba826c7a70cdad687269795 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/PWFSLSmoke/examples/monitor_performance.Rd.R | 602464838e41f1ed115a3dd748e6362c554b5375 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,038 | r | monitor_performance.Rd.R | library(PWFSLSmoke)
### Name: monitor_performance
### Title: Calculate Monitor Prediction Performance
### Aliases: monitor_performance
### Keywords: ws_monitor
### ** Examples
## Not run:
##D # If daily avg data were the prediciton and Spokane were
##D # the observed, which WA State monitors had skill?
##D wa <- airnow_load(2017) %>% monitor_subset(stateCodes='WA')
##D wa_dailyAvg <- monitor_dailyStatistic(wa, mean)
##D Spokane_dailyAvg <- monitor_subset(wa_dailyAvg, monitorIDs='530630021_01')
##D threshold <- AQI$breaks_24[4] # Unhealthy
##D performanceMetrics <- monitor_performance(wa_dailyAvg,
##D Spokane_dailyAvg,
##D threshold, threshold)
##D monitorIDs <- rownames(performanceMetrics)
##D mask <- performanceMetrics$heidikeSkill &
##D !is.na(performanceMetrics$heidikeSkill)
##D skillfulIDs <- monitorIDs[mask]
##D skillful <- monitor_subset(wa_dailyAvg, monitorIDs=skillfulIDs)
##D monitorLeaflet(skillful)
## End(Not run)
|
dbe4766bc2d95bd90c57f3fc5d7487f547b2208d | 6ad95fc7067d6ab32bee6f1a6ad730b984d12579 | /Akhil/R Codes/Basics of Linear Algebra- T S Blyth and Robertson/Chapter 8/EXR_8_15R.R | 8b3b20c8b56fe00c54675c5b31f563ba3183e043 | [] | no_license | sahridhaya/BitPlease | d881f95c404ae9a433c79a8f6519ea7eaba9950a | 04e1c38944f0ee24eb1377ced87e6550253fbb2e | refs/heads/master | 2020-04-21T11:36:10.914922 | 2020-04-06T09:02:47 | 2020-04-06T09:02:47 | 169,531,342 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 408 | r | EXR_8_15R.R | # Exercise 15 Chapter 8 Page no.: 142
# Determinant of complex matrix
# Include "Matrix" , "matlib" and "complexplus" library
A <- matrix(c(0, complex(real = 1,imaginary = 1), complex(real = 1,imaginary = 2),complex(real = 1,imaginary = -1),0,complex(real = 2,imaginary = -3),complex(real = 1,imaginary = -2),complex(real = 2,imaginary = 3),0), nrow = 3,ncol = 3, byrow = TRUE)
x <- Det(A)
print(x) |
eacec629b236fff546b9b1e3d2891b46df18a12e | 12ba52d9a0820f63ccc9006742da8e1f4c892639 | /regex_pattern_extraction.R | 7cda3ae95bcf0fd889cfe5e9ffaeb9d036573ec0 | [] | no_license | ankitccuser187/task | 71bc03cf9da4b22c35b5d9addbd02b62598ca3fc | 077f8b759c8007ea78a26b1579118b3f04d04e23 | refs/heads/master | 2021-01-10T13:54:47.065911 | 2016-01-11T08:49:09 | 2016-01-11T08:49:09 | 49,272,997 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,924 | r | regex_pattern_extraction.R |
#social networking
pattern_1<-"facebook|twiter|plus.google|instagram|tinder"
#multimedia hosting website
pattern_2<-"tube\\.|vimeo|motion|daily|metacafe|videos\\.|movie.*\\.|mp4
|video\\.|movies\\/|movie[a-z]*\\.|video[a-z]*\\.|maza|film|pagalworld\\."
#some pattern for online shoping
pattern_3<-"kart\\.|deal\\.|jabong|myntra|zovi|amazon|baba|
shop\\.|americanswan|bazzar|paytm|
freecharge|yepmee|[E-e]bay|naaptol"
#online cloud storage website
pattern_4<-"multi[a-z]*\\.|[a-z]*host|[a-z]*file[a-z]*\\.|[a-z]*cloud[a-z]*\\.|
upload.*\\.|drive|dropbox"
#music hosting website
pattern_5<-"mp3|song|dl|bolly|music|ganna|tune"
#adult website
pattern_6<-"xnxx\\.|xvdieos\\.|adult|hotgirl|porn|xxx.*\\.|sex|erowapi"
#advertisment website
pattern_7<-"ad.*\\.com|ads.*\\.|market[a-z]*\\."
#netowrking and ISP
pattern_8<-"online.*\\.|track[a-z]*\\.|i2e1.in"
#microsoft/yahoo/google
pattern_9<-"microsoft|bing|google|yahoo"
#movie booking and streaming
pattern_10<-"pvrcinemas|bookmyshow|imdb|rotten"
#travel and resturant
pattern_11<-"trip|adviser|makenytrip|yatra|goibibo|airindia|spicejet|indigo
dominos|pizza|zomato|foodpanda|justeat|khana|burp"
#online magzines/news
pattern_12<-"India|news|today|poltics|sarita|jagran"
#introducing new wed detail column
refined_t$web_detail<-NA
for(i in 1:nrow(refined_t)){
message_1<-refined_t$Website[i]
message_2<-refined_t$Referer[i]
message_3<-refined_t$Referer.page[i]
if(grepl(pattern_1,message_1,ignore.case = TRUE)|
grepl(pattern_1,message_2,ignore.case = TRUE)|
grepl(pattern_1,message_3,ignore.case = TRUE)){
refined_t$web_detail[i]<-"Social networking"
}
if(grepl(pattern_6,message_1,ignore.case = TRUE)|
grepl(pattern_6,message_2,ignore.case = TRUE)|
grepl(pattern_6,message_3,ignore.case = TRUE)){
refined_t$web_detail[i]<-"Adult Content"
}
if(grepl(pattern_2,message_1,ignore.case = TRUE)|
grepl(pattern_2,message_2,ignore.case = TRUE)|
grepl(pattern_2,message_3,ignore.case = TRUE)){
refined_t$web_detail[i]<-"multimedia hosting"
}
if(grepl(pattern_5,message_1,ignore.case = TRUE)|
grepl(pattern_5,message_2,ignore.case = TRUE)|
grepl(pattern_5,message_3,ignore.case = TRUE)){
refined_t$web_detail[i]<-"music hosting"
}
if(grepl(pattern_3,message_1,ignore.case = TRUE)|
grepl(pattern_3,message_2,ignore.case = TRUE)|
grepl(pattern_3,message_3,ignore.case = TRUE)){
refined_t$web_detail[i]<-"eCommerce"
}
if(grepl(pattern_4,message_1,ignore.case = TRUE)|
grepl(pattern_4,message_2,ignore.case = TRUE)|
grepl(pattern_4,message_3,ignore.case = TRUE)){
refined_t$web_detail[i]<-"online cloud"
}
if(grepl(pattern_7,message_1,ignore.case = TRUE)|
grepl(pattern_7,message_2,ignore.case = TRUE)){
refined_t$web_detail[i]<-"ad content"
}
if(grepl(pattern_8,message_1,ignore.case = TRUE)|
grepl(pattern_8,message_2,ignore.case = TRUE)){
refined_t$web_detail[i]<-"Networking/ISP"
}
if(grepl(pattern_9,message_1,ignore.case = TRUE)|
grepl(pattern_9,message_2,ignore.case = TRUE)){
refined_t$web_detail[i]<-"microsoft/google/bing"
}
if(grepl(pattern_10,message_1,ignore.case = TRUE)|
grepl(pattern_10,message_2,ignore.case = TRUE)){
refined_t$web_detail[i]<-"moview review/ticket booking"
}
if(grepl(pattern_11,message_1,ignore.case = TRUE)|
grepl(pattern_11,message_2,ignore.case = TRUE)|
grepl(pattern_11,message_3,ignore.case = TRUE)){
refined_t$web_detail[i]<-"Travel and Resturants"
}
if(grepl(pattern_11,message_1,ignore.case = TRUE)|
grepl(pattern_11,message_2,ignore.case = TRUE)|
grepl(pattern_11,message_3,ignore.case = TRUE)){
refined_t$web_detail[i]<-"News/magzines"
}
}
source('~/i2ei/user_profile.R')
|
0340be058b0045faa4454343e3120c81ecdb76e5 | a80156b781a5d450eed498f0dbd5fbdfd36405da | /Concrete_NN.R | 8cf3e96fe6b81eab276a0e0028655da0d4143474 | [] | no_license | shreyas-95/R-codes | 257e31833bb0598fad23042e661a35d9d7e7e5a8 | c4561577ad81515b4f9f4209a740a131821e430f | refs/heads/main | 2023-01-13T19:36:05.852583 | 2020-11-17T06:31:06 | 2020-11-17T06:31:06 | 301,051,431 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,538 | r | Concrete_NN.R | # Prepare a model for strength of concrete data using Neural Networks.
# Install required packages.
library(neuralnet)
library(nnet)
library(NeuralNetTools)
library(plyr)
library(skimr)
concrete <- read.csv(file.choose())
View(concrete)
attach(concrete)
skim(concrete)
summary(concrete)
str(concrete)
#plots.
hist(concrete$strength,prob=TRUE,breaks = 50)
lines(density(concrete$strength))
hist(concrete$slag,prob=TRUE,breaks = 30)
lines(density(concrete$slag))
# Normalizing of data.
normalize<-function(x){
return ( (x-min(x))/(max(x)-min(x)))
}
concrete_norm <- as.data.frame(lapply(concrete,FUN = normalize))
summary(concrete_norm)
View(concrete_norm)
# Partition of data.
set.seed(1234)
ind <- sample(2,nrow(concrete_norm),replace = TRUE,prob = c(0.7,0.3))
train <- concrete_norm[ind==1,]
test <- concrete_norm[ind==2,]
# Building neural network model.
concrete_model <- neuralnet(strength~cement+slag+ash+water+superplastic+coarseagg+fineagg+age,data =train)
str(concrete_model)
concrete_model
plot(concrete_model,rep = "best")
# Evaluating model performance.
# Compute function to generate ouput for the model prepared.
set.seed(12323)
model_results <- compute(concrete_model,test[1:8])
str(model_results)
predicted_strength <- model_results$net.result
# predicted_strength
# model_results$neurons
cor(predicted_strength,test$strength) # 0.84
plot(predicted_strength,test$strength)
# since the prediction is in Normalized form, we need to de-normalize it
# to get the actual prediction on strength.
str_max <- max(concrete$strength)
str_min <- min(concrete$strength)
unnormalize <- function(x, min, max) {
return( (max - min)*x + min )
}
Actualarea_pred <- unnormalize(predicted_strength,str_min,str_max)
head(Actualarea_pred)
# Buildingmodel for original data.
model<-neuralnet(strength~cement+slag+ash+water+superplastic+coarseagg+fineagg+age,data= concrete_norm,hidden = 5)
plot(model)
model_res<-compute(model,test[1:8])
predict <-model_res$net.result
cor(predict,test$strength) # 0.95
plot(predict,test$strength)
# SSE has reduced and training steps had been increased as the number of nuerons
# under hidden layer are increased
model1 <- neuralnet(strength~.,data = concrete_norm,hidden = 5)
plot(model1)
model1_res <- compute(model1,test[1:8])
predict1 <- model1_res$net.result
cor(predict1,test$strength)
plot(predict1,test$strength)
###############################################333
|
1a7abec049428a3a7895d1c3de68a9eacc8f5343 | 4c27d5a2a61ac4fd036669e38daf407e8645ee66 | /ch_06_v2/ch_06_03_models.R | e68c689ee36134649ab33b27a7a53f8056c1f4e2 | [] | no_license | michaelfrancenelson/Spatial_Ecology_Fletcher_Fortin | da27fd9441752dcba6fa8b37802b7dc6a7e811f9 | 048d1890dc2f07a643a7826fa7cf0cbca17dfbc7 | refs/heads/master | 2020-12-05T14:40:41.452798 | 2020-04-27T12:24:52 | 2020-04-27T12:24:52 | 232,141,202 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,976 | r | ch_06_03_models.R | ## ---- model_1 ----
fit_elev = glm(
VATH ~ elevation,
family = "binomial",
data = thrush_sf)
summary(fit_elev)
## ---- model_2 ----
fit_terrain = glm(
VATH ~ elevation + slope + aspect,
family = "binomial",
data = thrush_sf)
summary(fit_terrain)
## ---- model_3 ----
fit_elev_poly = glm(
VATH ~ elevation + I(elevation^2),
family = "binomial",
data = thrush_sf)
summary(fit_elev_poly)
## ---- model_1_center ----
fit_elev_cen = glm(
VATH ~ elevation_cen,
family = "binomial",
data = thrush_sf)
summary(fit_elev_cen)
## ---- model_2_center ----
fit_terrain_cen = glm(
VATH ~ elevation_cen + slope_cen + aspect_cen,
family = "binomial",
data = thrush_sf)
summary(fit_terrain_cen)
## ---- model_3_center ----
fit_elev_poly_cen = glm(
VATH ~ elevation_cen + I(elevation_cen^2),
family = "binomial",
data = thrush_sf)
summary(fit_elev_poly_cen)
## ---- model_diagnostics_1 ----
plot(fit_elev)
fit_elev$predicted = predict(fit_elev, type="response")
fit_elev$residuals = residuals(fit_elev, type = "response")
d$predicted <- predict(fit, type="response")
d$residuals <- residuals(fit, type = "response")
d$predicted <- predict(fit, type="response")
d$residuals <- residuals(fit, type = "response")
# Steps 3 and 4: plot the results
ggplot(d, aes(x = hp, y = vs)) +
geom_segment(aes(xend = hp, yend = predicted), alpha = .2) +
geom_point(aes(color = residuals)) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
guides(color = FALSE) +
geom_point(aes(y = predicted), shape = 1) +
theme_bw()
# Steps 3 and 4: plot the results
ggplot(d, aes(x = hp, y = vs)) +
geom_segment(aes(xend = hp, yend = predicted), alpha = .2) +
geom_point(aes(color = residuals)) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
guides(color = FALSE) +
geom_point(aes(y = predicted), shape = 1) +
theme_bw()
# Steps 3 and 4: plot the results
ggplot(d, aes(x = hp, y = vs)) +
geom_segment(aes(xend = hp, yend = predicted), alpha = .2) +
geom_point(aes(color = residuals)) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
guides(color = FALSE) +
geom_point(aes(y = predicted), shape = 1) +
theme_bw()
thrush_sf$predicted = predict(fit_elev, type="response")
thrush_sf$residuals = residuals(fit_elev, type = "response")
# Steps 3 and 4: plot the results
ggplot(thrush_sf, aes(x = elevation_cen, y = VATH)) +
geom_segment(aes(xend = elevation_cen, yend = predicted), alpha = .2) +
geom_point(aes(color = residuals)) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
guides(color = FALSE) +
geom_point(aes(y = predicted), shape = 1) +
theme_bw()
fit_elev$predicted
}
## ---- model_1_diagnostics ----
{
dat_fit_1 = data.frame(thrush_sf)
dat_fit_1$predicted = predict(fit_elev_cen, type="response")
dat_fit_1$residuals = residuals(fit_elev_cen, type="response")
# plot residuals against elevation (the predictor)
plot(residuals ~ elevation, data = dat_fit_1)
# Plot residuals against the predicted values
plot(residuals ~ predicted, data = dat_fit_1)
}
## ---- model_3_diagnostics ----
{
dat_fit_3 = data.frame(thrush_sf)
dat_fit_3$predicted = predict(fit_elev_poly_cen, type="response")
dat_fit_3$residuals = residuals(fit_elev_poly_cen, type="response")
# plot residuals against elevation (the predictor)
plot(residuals ~ elevation, data = dat_fit_3)
# Plot residuals against the predicted values
plot(residuals ~ predicted, data = dat_fit_3, pch = 16, cex = 0.1)
}
## ---- model_1_diagnostics_fancy ----
if (FALSE)
{
with(subset(dat_fit_1, VATH == 0), lines(lowess(elevation, residuals)))
with(subset(dat_fit_1, VATH == 1), lines(lowess(elevation, residuals)))
with(dat_fit_1, lines(lowess(elevation, residuals)))
plot(residuals ~ predicted, data = dat_fit_1, type = )
# with(subset(dat_fit_1, VATH == 0), lines(lowess(predicted, residuals)))
# with(subset(dat_fit_1, VATH == 1), lines(lowess(predicted, residuals)))
with(dat_fit_1, lines(lowess(predicted, residuals)))
plot(predicted ~ elevation, data = dat_fit_1)
ggplot(dat_fit_1, aes(x = elevation, y = residuals)) +
# geom_segment(aes(xend = elevation, yend = predicted), alpha = .2) +
geom_point(aes(color = residuals)) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
guides(color = FALSE) +
geom_point(aes(y = predicted), shape = 1) +
theme_bw()
# Steps 3 and 4: plot the results
ggplot(dat_fit_1, aes(x = predicted, y = residuals)) +
# geom_segment(aes(xend = predicted, yend = predicted), alpha = .2) +
geom_point(aes(color = residuals)) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
guides(color = FALSE) +
# geom_point(aes(y = predicted), shape = 1) +
theme_bw()
}
## ---- model_3_diagnostics_fancy ----
if (FALSE)
{
ggplot(dat_fit_1, aes(x = elevation, y = VATH)) +
geom_segment(aes(xend = elevation, yend = predicted), alpha = .2) +
geom_point(aes(color = residuals)) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
guides(color = FALSE) +
geom_point(aes(y = predicted), shape = 1) +
theme_bw()
plot(residuals ~ predicted, data = dat_fit_3)
}
# ---- model_selection ----
{
AIC(fit_elev_cen, fit_terrain_cen, fit_elev_poly_cen)
}
# ---- model_plots ----
{
newdata = data.frame(
elevation_cen =
seq(
min(thrush_sf$elevation_cen),
max(thrush_sf$elevation_cen),
length = 25))
# Use model to predict values for different elevations:
# type = response for predicted probabilities
glm.pred = predict(
fit_elev_cen,
newdata = newdata,
type = "link",
se = T)
} |
ce45f7a8c1960be371f2c3e309779a346b76b7c9 | 08f3b72fabbab22bfbd90eb6b3984dc85cb971d8 | /man/colnames_transect.Rd | a9885916784ed608e805e4f4678bbd924721f98f | [
"MIT"
] | permissive | benmack/lucas | 8408801bbfcdcb7efd95d3fb8fb706b043218641 | a4c3376455653bf0a307456b48a91831107b7883 | refs/heads/master | 2021-01-20T10:32:18.399915 | 2019-04-07T10:01:12 | 2019-04-07T10:01:12 | 66,085,066 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 385 | rd | colnames_transect.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colnames_transect.R
\name{colnames_transect}
\alias{colnames_transect}
\title{Get column names of the transect attributes}
\usage{
colnames_transect(x)
}
\arguments{
\item{x}{a LMD data frame}
}
\value{
character vector with the transect names
}
\description{
Get column names of the transect attributes
}
|
3ffe879e7cc36f6bf7414101f6f11e5e528ddc6e | c309b0cfa10655c0554a1ec26101445b5a7d6156 | /man/ff_sup_lower_hierarchy.Rd | 00e321b1d7377a4c92074ed064d9fa63e01592fc | [
"MIT"
] | permissive | FanWangEcon/REconTools | 155ceef764a8bcabbb5db0b9796be986fc567ef4 | 2b695a2f6d47458f1f336f230548126bf7201f9f | refs/heads/master | 2022-01-26T04:08:49.039538 | 2022-01-18T05:12:27 | 2022-01-18T05:12:27 | 232,354,596 | 5 | 4 | null | null | null | null | UTF-8 | R | false | true | 719 | rd | ff_sup_lower_hierarchy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ff_sup_inout.R
\name{ff_sup_lower_hierarchy}
\alias{ff_sup_lower_hierarchy}
\title{Increase the RMD Hierarchy of Existing Files}
\usage{
ff_sup_lower_hierarchy(st_file, it_hierachy_lower_rmd = 1)
}
\arguments{
\item{st_file}{string texts contents from a R4Econ RMD file.}
\item{it_hierachy_lower_rmd}{integer how many pound tiers to promote hierarchy by}
}
\value{
modified texts
}
\description{
Modify Rmd files by increasing pound sign counts. This is needed for the RMD files
converted from mlx, where I can only set file top hierachy to 2 pounds, but should be
three for M4Econ
}
\author{
Fan Wang, \url{http://fanwangecon.github.io}
}
|
714d3bf906f6ace9bfbb1b93c29ef00d1f96735e | d2aac74ea9b5f68360a601dc9ff6494105bfba36 | /Ch05/5-8.R | edffeddbba889b60d93d61d3f7844c23c58aef4a | [] | no_license | ligr00vefe/R-2020 | f61bc50adfcec99b1ced34d95884072a247b24b4 | bb72711b6fb3dd1b46dd4cd55ee7b5982f125903 | refs/heads/master | 2023-01-03T06:45:03.478169 | 2020-10-28T01:56:37 | 2020-10-28T01:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,420 | r | 5-8.R | # 날짜 : 2020/08/05
# 이름 : 권기민
# 내용 : 텍스트시각화
install.packages("multilinguer")
library(multilinguer)
install_jdk()
install.packages(c('stringr', 'hash', 'tau', 'Sejong', 'RSQLite', 'devtools'), type = "binary")
install.packages("remotes")
remotes::install_github('haven-jeon/KoNLP', upgrade = "never", INSTALL_opts=c("--no-multiarch"))
library(KoNLP)
library(dplyr)
library(stringr)
# 형태소 분석을 위한 사전설정
useNIADic()
# 텍스트 파일 데이터 생성
data_song <- readLines('../file/song.txt')
View(data_song)
# 특수문자 제거
data_song <- str_replace_all(data_song, '\\W', ' ')
View(data_song)
# 명사 추출
data_noun <- extractNoun(data_song)
View(data_song)
# 명사 단어 집계
word_count <- table(unlist(data_noun))
View(word_count)
# 데이터프레임으로 변환
df_word <- as.data.frame(word_count, stringsAsFactors = F)
View(df_word)
# 두 문자 이상 단어만 추출(결측값 제거)
df_word <- df_word %>% filter(nchar(Var1) >= 2) %>% filter(!is.na(Var1))
View(df_word)
# 워드클라우드 출력
install.packages('wordcloud')
library(wordcloud)
library(RColorBrewer)
pal <- brewer.pal(8, 'Dark2')
set.seed(1)
wordcloud(words = df_word$Var1,
freq = df_word$Freq,
min.freq = 9,
max.words = 1000,
random.order = F,
rot.per = 0.7,
scale = c(4, 0.3),
colors = pal)
|
50e9babde25eed55f00aa5bd7de6fbf907999c24 | c06ff03ea688fd22f7968eed274763a7475ae0a4 | /shinycoreci-apps-master/shinycoreci-apps-master/037-date-and-date-range/tests/shinytests/mytest.R | 7aa329f861b392ea920e42481dd04f2db620d9c6 | [] | no_license | RohanYashraj/R-Tutorials-Code | a6b670a7c580e270461c401eb2fb6b52c5accd1a | 1c5e4fa9103912a40c84bca65fbeeb8c9f94e428 | refs/heads/main | 2023-04-06T16:41:42.867165 | 2021-02-04T05:41:36 | 2021-02-04T05:41:36 | 335,850,407 | 1 | 1 | null | 2021-04-13T17:40:14 | 2021-02-04T05:42:29 | HTML | UTF-8 | R | false | false | 450 | r | mytest.R | app <- ShinyDriver$new("../../", seed = 100, shinyOptions = list(display.mode = "normal"))
app$snapshotInit("mytest")
app$snapshot()
app$setInputs(date = "2020-03-13")
app$setInputs(date2 = "2020-01-12")
app$setInputs(dateRange = c("2019-12-01", "2020-01-10"))
app$setInputs(dateRange = c("2019-12-01", "2020-04-24"))
app$setInputs(dateRange2 = c("2020-01-02", "2020-01-11"))
app$setInputs(dateRange2 = c("2020-01-02", "2020-01-17"))
app$snapshot()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.