blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7d623138505de4f1eff432b1466f4ea633c63fe | f1de1e557c0013509bfa93ebec936e81549232fe | /Chapter 4/Test_Error.R | 34c538b420a00377679208011fc718ede3f611cd | [] | no_license | BassJohn09/Introduction-to-Statistical-Learning---Solution | f79036df36e4e741b2d2df79cec35f0a4b84cf55 | 499587a5bb836f3d6486b294d7144f983636979b | refs/heads/master | 2020-04-24T10:40:52.559098 | 2019-02-26T23:06:32 | 2019-02-26T23:06:32 | 171,902,010 | 0 | 0 | null | 2019-02-26T23:06:33 | 2019-02-21T16:00:22 | Jupyter Notebook | UTF-8 | R | false | false | 1,002 | r | Test_Error.R |
# Scenarios
# KNN Classification
require(ISLR)
library(class)
#Since data has different magnitudes, scaling is necessary
standard.X <- scale(Caravan[,-86])
standard.Y <- Caravan[,86]
# K- Nearest Neighbors
n <- 1000
test.vector <- sample(1:nrow(Caravan),n, replace = FALSE)
training.X <- standard.X[-c(test.vector),]
test.X <- standard.X[test.vector,]
training.Y <- standard.Y[-c(test.vector)]
test.Y <- standard.Y[test.vector]
KNN.pred <- knn(training.X,test.X,training.Y,k=3)
mean(test.Y != KNN.pred)
mean(test.Y == KNN.pred)
KNN.cross <- table(KNN.pred,test.Y)
Error.knn <- (KNN.cross[1,2]+KNN.cross[2,1])/n
# Logistic Regression
glm.fit <- glm(Purchase ~ .,data = Caravan, family = "binomial",subset = -test.vector)
summary(glm.fit)
glm.probs <- predict(glm.fit,newdata = Caravan[test.vector,],type = "response")
glm.probs[1:5] # Close to 0.5
glm.pred <- ifelse(glm.probs > 0.5,"Yes","No")
logit.cross <- table(glm.pred,test.Y)
Error.logreg <- (logit.cross[1,2]+logit.cross[2,1])/n
|
993af56288f019832cbd5f39c61f457f56b3b31a | a1241d111c801c927dc800722e82efd2329c1474 | /man/draw_person.Rd | c0c1b37552229f1395f9c15e6340914721dd2bd3 | [] | no_license | zkzofn/GEMINI | 819203296a8e6181aac2e8dfae868ee7a3e6c69b | 90cea036dc9fe851032c53dd3e85fb922aac7f6f | refs/heads/master | 2020-05-16T15:58:49.945475 | 2019-03-28T01:54:38 | 2019-03-28T01:54:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 258 | rd | draw_person.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/draw_person.R
\name{draw_person}
\alias{draw_person}
\title{Draw person}
\usage{
draw_person()
}
\description{
This function for draw graph from person RDS data
}
\keyword{gemini}
|
b3e2c66f3146b4b832a6e47d543e4ed0c586c7fe | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/SDT/examples/summary.sdi.Rd.R | e3e529df887e40608dc7ac8f6d2d9c2d870bb1d0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 435 | r | summary.sdi.Rd.R | library(SDT)
### Name: summary.sdi
### Title: Summary Method for Objects of Class sdi
### Aliases: summary.sdi
### Keywords: methods univar
### ** Examples
## attach dataset to search path (for using variable names)
attach(learning_motivation)
## original and adjusted index summary
summary(sdi(intrinsic, identified, introjected, external, compute.adjusted = FALSE))
summary(sdi(intrinsic, identified, introjected, external))
|
8aa744a88cb00eda74ee42aa1e0cece764dc5720 | d42265adacbce1ff851fdb64c0ff4ed19af77a53 | /code/07-residuals-analysis.R | 803fe33dc10bee82385837a9ab5aece0db9f65ab | [] | no_license | Gui-go/human_capital_x_production_in_sc | fbb21bd4977c79fe60ec8bb508599d07a50a17d1 | 3f1d2af2ce3d01ffb0a42e8a45c9b4f41e61efb3 | refs/heads/main | 2023-02-27T07:52:32.949853 | 2021-02-05T22:26:02 | 2021-02-05T22:26:02 | 314,433,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,360 | r | 07-residuals-analysis.R | # R-script 07-residualas-analysis.R
# Setup -------------------------------------------------------------------
rm(list = ls())
gc()
options(stringsAsFactors = F)
ggplot2::theme_set(ggplot2::theme_minimal())
options(scipen = 666)
# Packages ----------------------------------------------------------------
if(!require(readr)){install.packages("readr")}
if(!require(plyr)){install.packages("plyr")}
if(!require(dplyr)){install.packages("dplyr")}
if(!require(ggplot2)){install.packages("ggplot2")}
if(!require(janitor)){install.packages("janitor")}
if(!require(sf)){install.packages("sf")}
if(!require(sp)){install.packages("sp")}
if(!require(st)){install.packages("st")}
if(!require(leaflet)){install.packages("leaflet")}
if(!require(mongolite)){install.packages("mongolite")}
if(!require(readxl)){install.packages("readxl")}
if(!require(janitor)){install.packages("janitor")}
if(!require(spdep)){install.packages("spdep")}
if(!require(vroom)){install.packages("vroom")}
if(!require(jtools)){install.packages("jtools")}
# Functions ---------------------------------------------------------------
cent_as_cols <- function(polygonx, names = c("centlat", "centlng")){
centroids_plus <- do.call(rbind, st_centroid(polygonx$geometry)) %>%
tibble::as_tibble() %>% stats::setNames(c(names[1],names[2])) %>% dplyr::bind_cols(polygonx, .)
return(centroids_plus)
}
normalize <- function(x) {
x <- x[!is.na(x)]
return ((x - min(x)) / (max(x) - min(x)))
}
# Data --------------------------------------------------------------------
suppressMessages(
df <- readr::read_csv("data/clean/tabela1554.csv") %>%
janitor::clean_names() %>%
dplyr::mutate(
uf = substr(cod, 1, 2),
cd_mun = as.character(cod)
) %>%
dplyr::rename('pop_sup_comp'='superior_completo') %>%
dplyr::filter(uf%in%c('42')) %>%
dplyr::group_by(cd_mun) %>%
dplyr::summarise(
pop_sup_comp = pop_sup_comp,
municipio = municipio
) %>%
dplyr::select(cd_mun, municipio, pop_sup_comp)
)
# Exportações
# http://www.mdic.gov.br/index.php/comercio-exterior/estatisticas-de-comercio-exterior/base-de-dados-do-comercio-exterior-brasileiro-arquivos-para-download
suppressMessages(
exp_comex <- vroom::vroom(file = "data/clean/EXP_COMPLETA_MUN.csv") %>%
suppressMessages() %>%
janitor::clean_names() %>%
dplyr::filter(co_ano>=2010) %>%
dplyr::filter(sg_uf_mun%in%c('SC')) %>%
dplyr::mutate(exp_fob=if_else(is.na(vl_fob), 0, vl_fob)) %>%
# dplyr::mutate("sh2" = substr(sh4, 1, 2)) %>%
# dplyr::filter(sh2=='69') %>%
dplyr::group_by(co_mun) %>%
dplyr::summarise(exp_fob = sum(exp_fob)) %>%
dplyr::mutate(cd_mun=as.character(co_mun)) %>%
dplyr::select(cd_mun, exp_fob)
)
# Dados espaciais das divisões regionais em SC (IBGE-API-Serviços)
suppressMessages(
loc <- readr::read_csv("data/localizacoes_ibge_sc.csv") %>%
dplyr::mutate(
cd_mun = as.character(cd_mun),
cd_micro = as.character(cd_micro)
)
)
# .shp Shapefile dos municípios de SC (IBGE-Geociencias-divisao_do_territorio)
sc_shp <- sf::st_read("data/raw/sc_municipios/") %>%
janitor::clean_names() %>%
st_set_crs(4326)
# plot(sc_shp['cd_mun'])
# Join --------------------------------------------------------------------
micro_shp <- dplyr::left_join(sc_shp, loc) %>%
na.omit() %>%
group_by(cd_micro) %>%
summarise() %>%
cent_as_cols(.) %>%
st_set_crs(4326)
data <- dplyr::left_join(exp_comex, df, by=c("cd_mun")) %>%
dplyr::left_join(., loc, by = "cd_mun") %>%
# stats::na.omit(.) %>%
dplyr::group_by(cd_micro) %>%
dplyr::summarise(
nm_micro = first(nm_micro),
pop_sup_comp = sum(pop_sup_comp, na.rm = T),
log_pop_sup_comp = log(sum(pop_sup_comp, na.rm = T)),
exp = sum(exp_fob, na.rm = T),
log_exp = log(sum(exp_fob, na.rm = T))
) %>%
dplyr::left_join(., micro_shp, by = "cd_micro") %>%
sf::st_as_sf()
plot(data$exp, data$pop_sup_comp)
plot(data$log_exp, data$log_pop_sup_comp)
# Model -------------------------------------------------------------------
reg1 <- lm(log_exp ~ log_pop_sup_comp, data = data)
summary(reg1)
jtools::summ(reg1, digits = 5)
data$reg_res <- reg1$residuals
data$reg_res_norm <- normalize(reg1$residuals)
hist(data$reg_res_norm, 30)
nb <- spdep::poly2nb(data, queen=TRUE)
lw <- nb2listw(nb, style="W", zero.policy=TRUE)
spdep::moran.test(data$reg_res,lw)
hist(data$reg_res, 30)
plot(data['reg_res'])
plot(data['reg_res_norm'])
shapiro.test(data$reg_res)
# Spatial Model -----------------------------------------------------------
# install.packages("spgwr")
# library(spgwr)
dd <- sp::SpatialPointsDataFrame(
data=data.frame(
log_exp = data$log_exp,
log_pop_sup_comp = data$log_pop_sup_comp
),
coords=cbind(data$centlng, data$centlat)
)
GWRbandwidth <- spgwr::gwr.sel(log_exp ~ log_pop_sup_comp, data=dd, adapt=T)
gwr.model <- spgwr::gwr(
log_exp ~ log_pop_sup_comp, data = dd,
adapt=GWRbandwidth, hatmatrix=TRUE, se.fit=TRUE
)
results <- as.data.frame(gwr.model$SDF)
results$cd_micro <- data$cd_micro
dd2 <- SpatialPointsDataFrame(
data=results,
coords=cbind(data$centlng, data$centlat)
) %>% sf::st_as_sf(.) %>% st_set_crs(4326)
shp_ff <- dplyr::left_join(micro_shp, results, by = 'cd_micro')
# class(shp_ff)
plot(shp_ff['log_pop_sup_comp'])
# plot(shp_ff['pred'])
|
79d3cd08c769ef147fe46df5996574c0bc9f5aad | c57b7c0b78074544d19257e052c4d5849ee87936 | /Data Manipulation.R | 1a5f3b8206273aae8346ff27f1195ac8458b20d7 | [] | no_license | Lakshhmi/R-Course | af5a8751eac761dcc22dee5eaf0be8f08f2d55fb | ac247341c749308e06c04068236818dd862d5266 | refs/heads/master | 2020-03-17T19:42:48.745175 | 2018-11-08T15:45:27 | 2018-11-08T15:45:27 | 133,874,520 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,669 | r | Data Manipulation.R | # Data Manipulation
install.packages('dplyr')
install.packages('nycflights13')
library(dplyr)
library(nycflights13)
head(flights)
summary(flights)
# Dplyr built in functions
# filter() and slice()
# arrange()
# select() and rename()
# distinct()
# mutate() and transmute()
# summarise()
# file:///C:/Lakshmi/R-Course/R Programming/Data Manipulation.R
#filter() - Selects subset of arguments in the dataframe
head(filter(flights,month==11,day==3,carrier=='AA'))
#Slice() - allow to select by positions
slice(flights,1:10)
#arrange() - similar to filter but arrange() - order by columns
head(arrange(flights,year,month,day,air_time))
head(arrange(flights,year,month,day,desc(air_time)))
#Select()
head(select(flights,carrier))
head(select(flights,carrier,arr_time))
head(select(flights,carrier,arr_time,month))
#rename() - allows to quickly rename the column
head(rename(flights,airlines_carrier=carrier))
#distinct() - allows to select distinct or unique values in a column or table
distinct(select(flights,carrier))
#mutate()
head(mutate(flights,new_col=arr_delay-dep_delay))
head(mutate(flights,new_col=arr_delay*dep_delay))
#transmute()
head(transmute(flights,new_col=arr_delay*dep_delay))
#summarise()
summarise(flights,avg_air_time=mean(air_time,na.rm = TRUE))
summarise(flights,total_time=sum(air_time,na.rm = TRUE))
#sample_n() - return random number of rows
sample_n(flights,10)
#sample_frac()
sample_frac(flights,0.1)
sample_frac(flights,0.5)
install.packages('tidyr',repos = 'http://cran.us.r-project.org')
library(tidyr)
library(data.table)
#Let's create some fake data that needs to be cleaned using tidyr
comp <- c(1,1,1,2,2,2,3,3,3)
yr <- c(1998,1999,2000,1998,1999,2000,1998,1999,2000)
q1 <- runif(9, min=0, max=100)
q2 <- runif(9, min=0, max=100)
q3 <- runif(9, min=0, max=100)
q4 <- runif(9, min=0, max=100)
df <- data.frame(comp=comp,year=yr,Qtr1 = q1,Qtr2 = q2,Qtr3 = q3,Qtr4 = q4)
df
#Gather() and Spread()
# Using Pipe Operator
head(df %>% gather(Quarter,Revenue,Qtr1:Qtr4))
# With just the function
head(gather(df,Quarter,Revenue,Qtr1:Qtr4))
stocks <- data.frame(
time = as.Date('2009-01-01') + 0:9,
X = rnorm(10, 0, 1),
Y = rnorm(10, 0, 2),
Z = rnorm(10, 0, 4)
)
stocks
stocksm <- stocks %>% gather(stock, price, -time)
stocksm %>% spread(stock, price)
stocksm %>% spread(time, price)
# Separate and Unite
#Separate
df <- data.frame(x = c(NA, "a.x", "b.y", "c.z"))
df
df %>% separate(x, c("ABC", "XYZ"))
# Unite()
head(mtcars)
unite_(mtcars, "vs.am", c("vs","am"),sep = '.')
# Separate is the complement of unite
mtcars %>%
unite(vs_am, vs, am) %>%
separate(vs_am, c("vs", "am")) |
c947861bd4cb9fd72fddc23e87c7f8821efe3601 | 32ff5ccab138160fee9fca0ad39d550573f7b575 | /Quora_Functions.R | e8c637fcfe88b939879919d937b1931e0b7666fc | [] | no_license | mrbraveheart71/Quora | 48df310547f3469e22494e88a5a2bb3c7a8fb23b | 421a91749a4ea66f4787e5112c53409746d523eb | refs/heads/master | 2021-01-19T11:48:27.455552 | 2017-05-06T23:43:29 | 2017-05-06T23:43:29 | 87,995,836 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,237 | r | Quora_Functions.R |
MultiLogLoss <- function(act, pred){
eps <- 1e-15
pred <- pmin(pmax(pred, eps), 1 - eps)
sum(act * log(pred) + (1 - act) * log(1 - pred)) * -1/NROW(act)
}
getWordWeights <- function(count,eps=5000,min_count=2) {
if (count < min_count) {0} else
{1 / (count + eps)}
}
nGramHitRate <- function(question1,question2,n=1,n_min=1,googleDictMin=500,stemming=TRUE) {
if (stemming==TRUE) {
question1 <- paste0(tokenize_word_stems(question1,language="en", simplify=TRUE),collapse=" ")
question2 <- paste0(tokenize_word_stems(question2,language="en", simplify=TRUE),collapse=" ")
}
t1 <- unlist(tokenize_ngrams(question1,n=n, n_min=n_min,stopwords=words.google[1:googleDictMin]))
t2 <- unlist(tokenize_ngrams(question2,n=n, n_min=n_min,stopwords=words.google[1:googleDictMin]))
sharedPhrases <- intersect(t1,t2)
bothLengths <- length(t1)+length(t2)
hitRate <- 2*length(sharedPhrases)/bothLengths
hitRate <- ifelse(is.infinite(hitRate) | is.nan(hitRate),0,hitRate)
hitRate
}
sentenceOverlap <- function(question1,question2,n=20,stopWords,stemming=TRUE) {
# Do stemming first
question1 <- paste0(tokenize_word_stems(question1,language="en", simplify=TRUE),collapse=" ")
question2 <- paste0(tokenize_word_stems(question2,language="en", simplify=TRUE),collapse=" ")
t1 <- tokenize_ngrams(question1,n=n, n_min=1,stopwords=stopWords,simplify=TRUE)
q1Length <- length(tokenize_words(question1,stopwords = stopWords,simplify=TRUE))
t2 <- tokenize_ngrams(question2,n=n, n_min=1,stopwords=stopWords,simplify =TRUE)
q2Length <- length(tokenize_words(question2,stopwords = stopWords,simplify=TRUE))
overlap <- intersect(t1,t2)
if (length(overlap>0)) {
overlapSum <- sum(sapply(overlap,function(x) length(tstrsplit(x," "))^2))
} else {
overlapSum <- 0
}
#ret <- tanh(overlapSum/(q1Length+q2Length))
#ret <- overlapSum
ret <- overlapSum/(q1Length+q2Length)
}
nGramSkipHitRate <- function(question1,question2,n=2,k=1) {
t1 <- tokenize_skip_ngrams(question1,n=n, k=k, simplify = TRUE)
t2 <- tokenize_skip_ngrams(question2,n=n, k=k, simplify = TRUE)
sharedPhrases <- intersect(t1,t2)
bothLengths <- length(t1)+length(t2)
hitRate <- 2*length(sharedPhrases)/bothLengths
hitRate <- ifelse(is.infinite(hitRate),0,hitRate)
hitRate
}
nGramHitRateStem <- function(question1,question2,n=1,n_min=1,googleDictMin=500) {
t1 <- tokenize_ngrams(question1,n=1, n_min=1,stopwords=words.google[1:googleDictMin], simplify = TRUE)
t2 <- tokenize_ngrams(question2,n=1, n_min=1,stopwords=words.google[1:googleDictMin], simplify = TRUE)
t1 <- as.vector(sapply(t1,function(x) wordStem(x,language = "english")))
t2 <- as.vector(sapply(t2,function(x) wordStem(x,language = "english")))
sharedPhrases <- intersect(t1,t2)
bothLengths <- length(t1)+length(t2)
hitRate <- 2*length(sharedPhrases)/bothLengths
hitRate <- ifelse(is.infinite(hitRate) | is.nan(hitRate),0,hitRate)
hitRate
}
nGramHitCount <- function(question1,question2,n=1,n_min=1,googleDictMin=500) {
t1 <- unlist(tokenize_ngrams(question1,n=n, n_min=n_min,stopwords=words.google[1:googleDictMin]))
t2 <- unlist(tokenize_ngrams(question2,n=n, n_min=n_min,stopwords=words.google[1:googleDictMin]))
sharedPhrases <- intersect(t1,t2)
hitRate <- length(sharedPhrases)
}
lastNWordsPunct <- function(question1,question2,n=2, googleDictMin=500) {
t1 <- unlist(strsplit(tolower(question1), " "))
t2 <- unlist(strsplit(tolower(question2), " "))
# remove stop words
t1 <- setdiff(t1,words.google[1:googleDictMin])
t2 <- setdiff(t2,words.google[1:googleDictMin])
t1.start <- max(1,length(t1)-n+1)
t2.start <- max(1,length(t2)-n+1)
hitRate <- length(intersect(t1[t1.start:length(t1)],t2[t2.start:length(t2)]))
hitRate
}
sharedWords <- function(question1,question2,googleDictMin=500) {
t1 <- unlist(tokenize_ngrams(question1,n=1, n_min=1,stopwords=words.google[1:googleDictMin]))
t2 <- unlist(tokenize_ngrams(question2,n=1, n_min=1,stopwords=words.google[1:googleDictMin]))
sharedPhrases <- intersect(t1,t2)
length(sharedPhrases)
}
sharedApproximateWords <- function(question1,question2,googleDictMin=500, approx=3) {
t1 <- unlist(tokenize_ngrams(question1,n=1, n_min=1,stopwords=words.google[1:googleDictMin]))
t1 <- unique(as.vector(sapply(t1, function(x) closest_to(wordVecSpace,x,approx)$word)))
t2 <- unlist(tokenize_ngrams(question2,n=1, n_min=1,stopwords=words.google[1:googleDictMin]))
t2 <- unique(as.vector(sapply(t2, function(x) closest_to(wordVecSpace,x,approx)$word)))
sharedPhrases <- intersect(t1,t2)
length(sharedPhrases)/(0.5*length(t1)+0.5*length(t2))
}
lastWordCount <- function(question,stopwords=NULL) {
t <- unlist(tokenize_ngrams(question,n=1, n_min=1,stopwords=stopWords))
if (length(t)==0) 0 else {
#word.count[t[length(t)]] %/% 100
word.count[t[length(t)]]/sum(word.count)*100
}
}
lastTwoWordCount <- function(question, stopwords=NULL) {
t <- unlist(tokenize_ngrams(question,n=1, n_min=1,stopwords=stopWords))
if (length(t)<2) 0 else {
#word.count[t[length(t)]] %/% 100
word1.count <- word.count[t[length(t)]]
word2.count <- word.count[t[length(t)-1]]
mean(c(word1.count,word2.count),na.rm=TRUE)/sum(word.count)*100/2
}
}
sharedWordsLastN <- function(question1,question2,n=1,stopWords) {
t1 <- unlist(strsplit(tolower(question1), " "))
t2 <- unlist(strsplit(tolower(question2), " "))
# remove stop words
t1 <- setdiff(t1,stopWords)
t2 <- setdiff(t2,stopWords)
if (length(t1)>0 & length(t2)>0) {
t1.idx <- max(length(t1)-n+1,1):length(t1)
t2.idx <- max(length(t2)-n+1,1):length(t2)
length(intersect(t1[t1.idx],t2[t2.idx]))
} else 0
}
getNGramMatch <- function(question1,question2,is_duplicate,n=2) {
t1 <- tokenize_ngrams(question1,n=n,n_min=n,simplify = TRUE)
t2 <- tokenize_ngrams(question2,n=n,n_min=n,simplify = TRUE)
sharedPhrases <- intersect(t1,t2)
if (length(sharedPhrases) > 0) {
ret <- cbind(sharedPhrases,is_duplicate)
} else {
ret <- NULL
}
ret
}
generate_ngrams_batch <- function(documents_list, ngram_min, ngram_max, stopwords = character(), ngram_delim = " ") {
.Call('tokenizers_generate_ngrams_batch', PACKAGE = 'tokenizers', documents_list, ngram_min, ngram_max, stopwords, ngram_delim)
}
getNGramMatchPunct <- function(question1,question2,is_duplicate,n=2) {
t1 <- strsplit(tolower(question1), " ")
t2 <- strsplit(tolower(question2), " ")
t1 <- unlist(generate_ngrams_batch(t1,ngram_min =n,ngram_max=n))
t2 <- unlist(generate_ngrams_batch(t2,ngram_min =n,ngram_max=n))
sharedPhrases <- intersect(t1,t2)
if (length(sharedPhrases) > 0) {
ret <- cbind(sharedPhrases,is_duplicate)
} else {
ret <- NULL
}
ret
}
getNGramMiss <- function(question1,question2,is_duplicate,n=2) {
t1 <- tokenize_ngrams(question1,n=n,n_min=n,simplify = TRUE)
t2 <- tokenize_ngrams(question2,n=n,n_min=n,simplify = TRUE)
sharedPhrases <- union(setdiff(t1,t2),setdiff(t2,t1))
if (length(sharedPhrases) > 0) {
ret <- cbind(sharedPhrases,is_duplicate)
} else {
ret <- NULL
}
ret
}
getNWordMatch <- function(question1,question2,is_duplicate,n=2, stemming=FALSE) {
t1 <- tokenize_ngrams(question1,n=1,n_min=1,simplify = TRUE)
t2 <- tokenize_ngrams(question2,n=1,n_min=1,simplify = TRUE)
if (stemming==TRUE) {
t1 <- wordStem(t1)
t2 <- wordStem(t2)
}
sharedWords <- intersect(t1,t2)
if (length(sharedWords) > (n-1)) {
sharedPhrases <- t(combn(sharedWords,n))
sharedPhrases <- t(apply(sharedPhrases,1,function(x) x[order(x)]))
sharedPhrases <- apply(sharedPhrases,1,function(x) paste0(x,collapse=" "))
ret <- cbind(sharedPhrases,is_duplicate)
colnames(ret)[1] <- "sharedPhrases"
} else {
ret <- NULL
}
ret
}
getNWordMiss <- function(question1,question2,is_duplicate,n=2, stemming=FALSE,stopwords=FALSE) {
sharedPhrases1 <- NULL
sharedPhrases2 <- NULL
st <- ""
if (stopwords==TRUE) st <- stopWords
t1 <- tokenize_ngrams(question1,n=1,n_min=1,simplify = TRUE, stopwords = st)
if (stemming==TRUE) {
t1 <- wordStem(t1)
}
if (length(t1) > (n-1) ) {
sharedPhrases1 <- t(combn(t1,n))
sharedPhrases1 <- t(apply(sharedPhrases1,1,function(x) x[order(x)]))
sharedPhrases1 <- apply(sharedPhrases1,1,function(x) paste0(x,collapse=" "))
}
t2 <- tokenize_ngrams(question2,n=1,n_min=1,simplify = TRUE,stopwords = st)
if (stemming==TRUE) {
t2 <- wordStem(t2)
}
if (length(t2) > (n-1)) {
sharedPhrases2 <- t(combn(t2,n))
sharedPhrases2 <- t(apply(sharedPhrases2,1,function(x) x[order(x)]))
sharedPhrases2 <- apply(sharedPhrases2,1,function(x) paste0(x,collapse=" "))
}
if (length(sharedPhrases1>0) & length(sharedPhrases2>0)) {
sharedPhrases <- union(setdiff(sharedPhrases1,sharedPhrases2),setdiff(sharedPhrases2,sharedPhrases1))
if (length(sharedPhrases)>0) {
ret <- cbind(sharedPhrases,is_duplicate)
colnames(ret)[1] <- "sharedPhrases"
} else {
ret <- NULL
}
} else {
ret <- NULL
}
ret
}
WordCrossProbTopN <- function(question1,question2,nTupleTable=NULL,countMin=0,TopN=1, decreasing=TRUE, stopwords=stopWords) {
t1 <- tokenize_ngrams(question1,n=1,n_min=1,simplify = TRUE, stopwords = stopwords)
t2 <- tokenize_ngrams(question2,n=1,n_min=1,simplify = TRUE, stopwords = stopwords)
sharedWords <- expand.grid(t1,t2)
if (length(sharedWords) > 0) {
sharedWords <- t(apply(sharedWords,1,function(x) x[order(x)]))
sharedWords <- apply(sharedWords,1,function(x) paste0(x,collapse=" "))
matches <- nTupleTable[sharedWords]
matches <- matches[!is.na(matches$count)]
matches <- matches[order(prob,decreasing=decreasing)]
#idx <- which(nTupleTable$sharedPhrases %in% sharedPhrases & nTupleTable$count > countMin)
ret.prob <-matches[matches$count > countMin]$prob[1:TopN]
ret.count <-matches[matches$count > countMin]$count[1:TopN]
} else {
ret.prob <- NA
ret.count <- NA
}
list(prob=ret.prob,count=ret.count)
}
getCrossMatch <- function(question1,question2,is_duplicate,stopwords) {
t1 <- tokenize_ngrams(question1,n=1,n_min=1,simplify = TRUE, stopwords = stopwords)
t2 <- tokenize_ngrams(question2,n=1,n_min=1,simplify = TRUE, stopwords = stopwords)
sharedWords <- expand.grid(t1,t2)
if (length(sharedWords) > 0) {
sharedWords <- t(apply(sharedWords,1,function(x) x[order(x)]))
sharedWords <- apply(sharedWords,1,function(x) paste0(x,collapse=" "))
ret <- cbind(sharedWords,is_duplicate)
colnames(ret)[1] <- "sharedPhrases"
} else {
ret <- NULL
}
ret
}
nCrossQuestionDiagnostics <- function(train, stopwords=TRUE) {
word.tuples <- mapply(getCrossMatch, train$question1,train$question2,train$is_duplicate,stopwords,USE.NAMES=FALSE, SIMPLIFY=TRUE)
word.tuples <- list.clean(word.tuples, fun = is.null, recursive = FALSE)
word.tuples <- list.rbind(word.tuples)
word.tuples <- as.data.table(word.tuples)
word.tuples$is_duplicate <- as.numeric(word.tuples$is_duplicate)
setkeyv(word.tuples,c("sharedPhrases"))
tuple.mean.duplicate <- word.tuples[,j=list(prob=mean(is_duplicate),count=length(is_duplicate)),by=list(sharedPhrases)]
tuple.mean.duplicate
}
# Tuple generation, words in both questions
nWordDiagnostics <- function(train, n=2, match=TRUE, stemming=FALSE, stopwords=FALSE) {
if (match==TRUE) {
word.tuples <- mapply(getNWordMatch, train$question1,train$question2,train$is_duplicate,n,stemming,USE.NAMES=FALSE, SIMPLIFY=TRUE)
} else {
word.tuples <- mapply(getNWordMiss, train$question1,train$question2,train$is_duplicate,n,stemming,stopwords,USE.NAMES=FALSE, SIMPLIFY=TRUE)
}
word.tuples <- list.clean(word.tuples, fun = is.null, recursive = FALSE)
word.tuples <- list.rbind(word.tuples)
word.tuples <- as.data.table(word.tuples)
word.tuples$is_duplicate <- as.numeric(word.tuples$is_duplicate)
setkeyv(word.tuples,c("sharedPhrases"))
tuple.mean.duplicate <- word.tuples[,j=list(prob=mean(is_duplicate),count=length(is_duplicate)),by=list(sharedPhrases)]
tuple.mean.duplicate
}
WordMissProbTopN <- function(question1,question2,n=2,nTupleTable=NULL,countMin=0,TopN=1, decreasing=FALSE, stemming=FALSE) {
t1 <- tokenize_ngrams(question1,n=1,n_min=1,simplify = TRUE)
if (stemming==TRUE) {
t1 <- wordStem(t1)
}
sharedPhrases1 <- NULL
sharedPhrases2 <- NULL
if (length(t1) >1 ) {
sharedPhrases1 <- t(combn(t1,n))
sharedPhrases1 <- t(apply(sharedPhrases1,1,function(x) x[order(x)]))
sharedPhrases1 <- apply(sharedPhrases1,1,function(x) paste0(x,collapse=" "))
}
t2 <- tokenize_ngrams(question2,n=1,n_min=1,simplify = TRUE)
if (stemming==TRUE) {
t2 <- wordStem(t2)
}
if (length(t2) >1) {
sharedPhrases2 <- t(combn(t2,n))
sharedPhrases2 <- t(apply(sharedPhrases2,1,function(x) x[order(x)]))
sharedPhrases2 <- apply(sharedPhrases2,1,function(x) paste0(x,collapse=" "))
}
ret.prob <- NA
ret.count <- NA
if (length(sharedPhrases1>0) & length(sharedPhrases2>0)) {
sharedPhrases <- union(setdiff(sharedPhrases1,sharedPhrases2),setdiff(sharedPhrases2,sharedPhrases1))
if (length(sharedPhrases)>0) {
matches <- nTupleTable[sharedPhrases]
matches <- matches[!is.na(matches$count)]
matches <- matches[order(prob,decreasing=decreasing)]
ret.prob <-matches[matches$count > countMin]$prob[TopN]
ret.count <-matches[matches$count > countMin]$count[TopN]
}
}
list(prob=ret.prob,count=ret.count)
}
WordMiss1ProbTopN <- function(question1,question2,nTupleTable=NULL,countMin=0,TopN=1, decreasing=FALSE, stemming=FALSE) {
t1 <- tokenize_ngrams(question1,n=1,n_min=1,simplify = TRUE)
if (stemming==TRUE) {
t1 <- wordStem(t1)
}
t2 <- tokenize_ngrams(question2,n=1,n_min=1,simplify = TRUE)
if (stemming==TRUE) {
t2 <- wordStem(t2)
}
sharedPhrases <- union(setdiff(t1,t2),setdiff(t2,t1))
ret.prob <- NA
ret.count <- NA
if (length(sharedPhrases)>0) {
matches <- nTupleTable[sharedPhrases]
matches <- matches[!is.na(matches$count)]
matches <- matches[order(prob,decreasing=decreasing)]
ret.prob <-matches[matches$count > countMin]$prob[TopN]
ret.count <-matches[matches$count > countMin]$count[TopN]
}
list(prob=ret.prob,count=ret.count)
}
WordMatchProbTopN <- function(question1,question2,n=2,nTupleTable=NULL,countMin=0,TopN=1, decreasing=TRUE, stemming=FALSE) {
t1 <- tokenize_ngrams(question1,n=1,n_min=1,simplify = TRUE)
t2 <- tokenize_ngrams(question2,n=1,n_min=1,simplify = TRUE)
if (stemming==TRUE) {
t1 <- wordStem(t1)
t2 <- wordStem(t2)
}
sharedWords <- intersect(t1,t2)
if (length(sharedWords) > (n-1)) {
sharedPhrases <- t(combn(sharedWords,n))
sharedPhrases <- t(apply(sharedPhrases,1,function(x) x[order(x)]))
sharedPhrases <- apply(sharedPhrases,1,function(x) paste0(x,collapse=" "))
matches <- nTupleTable[sharedPhrases]
matches <- matches[!is.na(matches$count)]
matches <- matches[order(prob,decreasing=decreasing)]
#idx <- which(nTupleTable$sharedPhrases %in% sharedPhrases & nTupleTable$count > countMin)
ret.prob <-matches[matches$count > countMin]$prob[TopN]
ret.count <-matches[matches$count > countMin]$count[TopN]
} else {
ret.prob <- NA
ret.count <- NA
}
list(prob=ret.prob,count=ret.count)
}
# Tuple generation, phrases in both questions
nTupleDiagnostics <- function(n=2, train, tokenize=TRUE, match=TRUE, order=FALSE) {
if (match==TRUE) {
if (tokenize == TRUE) {
word.tuples <- mapply(getNGramMatch, train$question1,train$question2,train$is_duplicate,n,USE.NAMES=FALSE, SIMPLIFY=TRUE)
} else {
word.tuples <- mapply(getNGramMatchPunct, train$question1,train$question2,train$is_duplicate,n,USE.NAMES=FALSE, SIMPLIFY=TRUE)
}
} else {
word.tuples <- mapply(getNGramMiss, train$question1,train$question2,train$is_duplicate,n,USE.NAMES=FALSE, SIMPLIFY=TRUE)
}
word.tuples <- list.clean(word.tuples, fun = is.null, recursive = FALSE)
word.tuples <- list.rbind(word.tuples)
word.tuples <- as.data.table(word.tuples)
word.tuples$is_duplicate <- as.numeric(word.tuples$is_duplicate)
setkeyv(word.tuples,c("sharedPhrases"))
tuple.mean.duplicate <- word.tuples[,j=list(prob=mean(is_duplicate),count=length(is_duplicate)),by=list(sharedPhrases)]
tuple.mean.duplicate
}
TupleProbPunct <- function(question1,question2,n=2,nTupleTable=NULL) {
t1 <- strsplit(tolower(question1), " ")
t2 <- strsplit(tolower(question2), " ")
t1 <- unlist(generate_ngrams_batch(t1,ngram_min =n,ngram_max=n))
t2 <- unlist(generate_ngrams_batch(t2,ngram_min =n,ngram_max=n))
sharedPhrases <- intersect(t1,t2)
idx <- which(nTupleTable$sharedPhrases %in% sharedPhrases)
ret.prob <-sum(nTupleTable$prob[idx] * nTupleTable$count[idx])/sum(nTupleTable$count[idx])
ret.count <-sum(nTupleTable$count[idx])
list(prob=ret.prob,count=ret.count)
}
TupleProb <- function(question1,question2,n=2,nTupleTable=NULL) {
t1 <- tokenize_ngrams(question1,n=n,n_min=n,simplify = TRUE)
t2 <- tokenize_ngrams(question2,n=n,n_min=n,simplify = TRUE)
sharedPhrases <- intersect(t1,t2)
idx <- which(nTupleTable$sharedPhrases %in% sharedPhrases)
ret.prob <-sum(nTupleTable$prob[idx] * nTupleTable$count[idx])/sum(nTupleTable$count[idx])
ret.count <-sum(nTupleTable$count[idx])
list(prob=ret.prob,count=ret.count)
}
TupleProbTopN <- function(question1,question2,n=2,nTupleTable=NULL,countMin=0,TopN=1, decreasing=TRUE) {
t1 <- tokenize_ngrams(question1,n=n,n_min=n,simplify = TRUE)
t2 <- tokenize_ngrams(question2,n=n,n_min=n,simplify = TRUE)
sharedPhrases <- intersect(t1,t2)
matches <- nTupleTable[sharedPhrases]
matches <- matches[!is.na(matches$count)]
matches <- matches[order(prob,decreasing=decreasing)]
#idx <- which(nTupleTable$sharedPhrases %in% sharedPhrases & nTupleTable$count > countMin)
ret.prob <-matches[matches$count > countMin]$prob[TopN]
ret.count <-matches[matches$count > countMin]$count[TopN]
list(prob=ret.prob,count=ret.count)
}
TupleProbTopNMiss <- function(question1,question2,n=2,nTupleTable=NULL,countMin=0,TopN=1, decreasing=TRUE) {
t1 <- tokenize_ngrams(question1,n=n,n_min=n,simplify = TRUE)
t2 <- tokenize_ngrams(question2,n=n,n_min=n,simplify = TRUE)
sharedPhrases <- union(setdiff(t1,t2),setdiff(t2,t1))
matches <- nTupleTable[sharedPhrases]
matches <- matches[!is.na(matches$count)]
matches <- matches[order(prob,decreasing=decreasing)]
#idx <- which(nTupleTable$sharedPhrases %in% sharedPhrases & nTupleTable$count > countMin)
ret.prob <-matches[matches$count > countMin]$prob[TopN]
ret.count <-matches[matches$count > countMin]$count[TopN]
list(prob=ret.prob,count=ret.count)
}
calcAndSaveNGramTables <- function(tupleSample, fileName = "Quora NGrams Train") {
print("nTuple1")
nTuple1 <- nTupleDiagnostics(1,train[tupleSample])
print("nTuple1Miss")
nTuple1Miss <- nTupleDiagnostics(1,train[tupleSample],tokenize=TRUE,match=FALSE)
print("next")
nTuple2 <- nTupleDiagnostics(2,train[tupleSample])
print("next")
nTuple2Punct <- nTupleDiagnostics(2,train[tupleSample],tokenize=FALSE)
print("nTuple2Miss")
nTuple2Miss <- nTupleDiagnostics(2,train[tupleSample],tokenize=TRUE,match=FALSE)
print("nWordMatch2")
nWordMatch2 <- nWordDiagnostics(train[tupleSample])
print("next")
nWordMatch2Stem <- nWordDiagnostics(train[tupleSample],match=TRUE,stemming=TRUE)
print("next")
nWordMatch3 <- nWordDiagnostics(train[tupleSample],n=3)
print("next")
nWordMatch3Stem <- nWordDiagnostics(train[tupleSample],n=3,match=TRUE,stemming=TRUE)
print("next")
nWordMiss2 <- nWordDiagnostics(train[tupleSample],2,match=FALSE)
print("next")
nWordMiss2Stem <- nWordDiagnostics(train[tupleSample],2,match=FALSE,stemming=TRUE)
print("next")
nCrossQuestion <- nCrossQuestionDiagnostics(train[tupleSample])
print("next")
nTuple3 <- nTupleDiagnostics(3,train[tupleSample])
print("next")
nTuple4 <- nTupleDiagnostics(4,train[tupleSample])
print("next")
nTuple5 <- nTupleDiagnostics(5,train[tupleSample])
save("nTuple1","nTuple2","nTuple2Miss","nTuple3","nTuple4","nTuple5","nTuple1Miss","nTuple2Miss",
"sample","tupleSample","nWordMatch2","nWordMatch2Stem","nWordMatch3Stem","nWordMatch3","nWordMiss2","nWordMiss2Stem",
file=fileName)
}
traintestAddColumn <- function(sample,train) {
j <- 1
ptm <- proc.time()
for (i in sample) {
if (j %% 100 == 0) {
print(j)
print(proc.time() - ptm)
}
train[i,nGramHitRate11_0 := nGramHitRate(question1,question2,1,1,0)]
train[i,nGramHitRate22_0 := nGramHitRate(question1,question2,2,2,0)]
# # #
t1 <- unlist(strsplit(tolower(train$question1[i]), " "))
t2 <- unlist(strsplit(tolower(train$question2[i]), " "))
#train[i,firstWordEqual := ifelse(t1[1]==t2[1],1,0)]
# remove stop words
t1 <- setdiff(t1,stopWords)
t2 <- setdiff(t2,stopWords)
sharedWords <- intersect(t1,t2)
bothLengths <- length(t1)+length(t2)
train[i,nGramHitRate := 2*length(sharedWords)/bothLengths]
train[i,nGramHitRate := ifelse(is.infinite(nGramHitRate),0,nGramHitRate)]
# Now N Tuples Max
# ret <- TupleProbTopN(train$question1[i],train$question2[i],n=1,nTupleTable=nTuple1,countMin=0,TopN=1, decreasing=TRUE)
# train[i,Tuple1ProbMax:= ret$prob]
# train[i,Tuple1CountMax:= ret$count]
#
ret <- TupleProbTopNMiss(train$question1[i],train$question2[i],n=1,nTupleTable=nTuple1Miss,countMin = 0, TopN = 1, decreasing=FALSE )
train[i,Tuple1ProbTop1Min:= ret$prob]
train[i,Tuple1CountTop1Min:= ret$count]
ret <- TupleProbTopNMiss(train$question1[i],train$question2[i],n=2,nTupleTable=nTuple2Miss,countMin = 0, TopN = 1, decreasing=FALSE )
train[i,Tuple2ProbTop1Min:= ret$prob]
train[i,Tuple2CountTop1Min:= ret$count]
ret <- WordMatchProbTopN(train$question1[i],train$question2[i],n=2,nTupleTable=nWordMatch2,countMin = 0, TopN = 1, decreasing=TRUE )
train[i,WordMatch2ProbTop1:= ret$prob]
train[i,WordMatch2CountTop1:= ret$count]
# ret <- WordMatchProbTopN(train$question1[i],train$question2[i],n=3,nTupleTable=nWordMatch3,countMin = 0, TopN = 1, decreasing=TRUE )
# train[i,WordMatch3ProbTop1:= ret$prob]
# train[i,WordMatch3CountTop1:= ret$count]
#
ret <- WordMissProbTopN(train$question1[i],train$question2[i],n=2,nTupleTable=nWordMiss2,countMin = 0, TopN = 1, decreasing=FALSE )
train[i,WordMiss2ProbTop1:= ret$prob]
train[i,WordMiss2CountTop1:= ret$count]
ret <- WordMissProbTopN(train$question1[i],train$question2[i],n=2,nTupleTable=nWordMiss2,countMin = 0, TopN = 1, decreasing=TRUE )
train[i,WordMiss2ProbTop1Max:= ret$prob]
train[i,WordMiss2CountTop1Max:= ret$count]
# ret <- WordMissProbTopN(train$question1[i],train$question2[i],n=2,nTupleTable=nWordMiss2,countMin = 0, TopN = 2, decreasing=FALSE )
# train[i,WordMiss2ProbTop2:= ret$prob]
# train[i,WordMiss2CountTop2:= ret$count]
#
# ret <- WordMatchProbTopN(train$question1[i],train$question2[i],n=2,
# nTupleTable=nWordMatch2Stem,countMin = 0, TopN = 1, decreasing=TRUE, stemming=TRUE )
# train[i,WordMatch2ProbTop1Stem:= ret$prob]
# train[i,WordMatch2CountTop1Stem:= ret$count]
#
j = j+1
}
train
}
WordMissProbTopNCheck <- function(question1,question2,n=2,nTupleTable=NULL,TopN=1, decreasing=FALSE, stemming=FALSE) {
t1 <- tokenize_ngrams(question1,n=1,n_min=1,simplify = TRUE)
if (stemming==TRUE) {
t1 <- wordStem(t1)
}
sharedPhrases1 <- NULL
sharedPhrases2 <- NULL
if (length(t1) >1 ) {
sharedPhrases1 <- t(combn(t1,n))
sharedPhrases1 <- t(apply(sharedPhrases1,1,function(x) x[order(x)]))
sharedPhrases1 <- apply(sharedPhrases1,1,function(x) paste0(x,collapse=" "))
}
t2 <- tokenize_ngrams(question2,n=1,n_min=1,simplify = TRUE)
if (stemming==TRUE) {
t2 <- wordStem(t2)
}
if (length(t2) >1) {
sharedPhrases2 <- t(combn(t2,n))
sharedPhrases2 <- t(apply(sharedPhrases2,1,function(x) x[order(x)]))
sharedPhrases2 <- apply(sharedPhrases2,1,function(x) paste0(x,collapse=" "))
}
ret.prob <- NA
ret.count <- NA
if (length(sharedPhrases1>0) & length(sharedPhrases2>0)) {
sharedPhrases <- union(setdiff(sharedPhrases1,sharedPhrases2),setdiff(sharedPhrases2,sharedPhrases1))
if (length(sharedPhrases)>0) {
matches <- nTupleTable[sharedPhrases]
matches <- matches[!is.na(matches$count)]
matches <- matches[order(prob,decreasing=decreasing)]
upper <- min(TopN,nrow(matches))
#ret.prob <-sum(matches$prob[1:upper]*matches$count[1:upper])/sum(matches$count[1:upper])
ret.prob <-mean(matches$prob[1:upper])
ret.count <-sum(matches$count[1:upper])
}
}
list(prob=ret.prob,count=ret.count)
}
WordMissProbAllTopN <- function(question1,question2,n=2,nTupleTable=NULL,countMin=0,TopN=1, decreasing=FALSE, stemming=FALSE) {
t1 <- tokenize_ngrams(question1,n=1,n_min=1,simplify = TRUE)
if (stemming==TRUE) {
t1 <- wordStem(t1)
}
sharedPhrases1 <- NULL
sharedPhrases2 <- NULL
if (length(t1) >1 ) {
sharedPhrases1 <- t(combn(t1,n))
sharedPhrases1 <- t(apply(sharedPhrases1,1,function(x) x[order(x)]))
sharedPhrases1 <- apply(sharedPhrases1,1,function(x) paste0(x,collapse=" "))
}
t2 <- tokenize_ngrams(question2,n=1,n_min=1,simplify = TRUE)
if (stemming==TRUE) {
t2 <- wordStem(t2)
}
if (length(t2) >1) {
sharedPhrases2 <- t(combn(t2,n))
sharedPhrases2 <- t(apply(sharedPhrases2,1,function(x) x[order(x)]))
sharedPhrases2 <- apply(sharedPhrases2,1,function(x) paste0(x,collapse=" "))
}
ret.prob <- NA
ret.count <- NA
if (length(sharedPhrases1>0) & length(sharedPhrases2>0)) {
sharedPhrases <- union(setdiff(sharedPhrases1,sharedPhrases2),setdiff(sharedPhrases2,sharedPhrases1))
if (length(sharedPhrases)>0) {
matches <- nTupleTable[sharedPhrases]
matches <- matches[!is.na(matches$count)]
matches <- matches[order(prob,decreasing=decreasing)]
ret.prob <-matches[matches$count > countMin]$prob[1:TopN]
ret.count <-matches[matches$count > countMin]$count[1:TopN]
}
}
list(prob=ret.prob,count=ret.count)
}
|
b4539e83ff659cbd084a158d22da3ee0302c5c81 | 4df547967c830abe8126644cb78d2d416320066d | /man/availableMetrics.Rd | cdefcfaf6279df58830393ad67ca86d407dd97b7 | [] | no_license | josefansinger/sanapiwrapper | 80aa222c7eb1760596b9b7b0d914324182bdbf13 | 18204670e3c35860a57165d65b3c4d649a1faaab | refs/heads/master | 2022-06-20T00:20:32.228421 | 2020-05-15T19:45:27 | 2020-05-15T19:45:27 | 263,316,785 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 463 | rd | availableMetrics.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sanapiwrapper.R
\name{availableMetrics}
\alias{availableMetrics}
\title{Get the available Santiment metrics for a specific project.}
\usage{
availableMetrics(slug)
}
\arguments{
\item{slug}{project}
}
\value{
vector of available metrics
}
\description{
Returns all availabale timeseries metrics (note not the histogram metrics) for a slug.
}
\examples{
availableMetrics('ethereum')
}
|
0fab8a2f6bc6369eca964f009d5498c002caaa4c | e6f1a90627518802d31beb663a18718759ba6463 | /login.R | 0b8a193e5b6614b3a64368035d6376b48bfe4e15 | [] | no_license | IntersysConsulting/R_Miracle_Foundation | 7a9ce933d8a084fcd281af3b324f266e9b9d9842 | 4c75274fe10497490ba1780b67b32290edffca57 | refs/heads/master | 2021-03-16T08:43:29.161934 | 2014-10-29T21:24:38 | 2014-10-29T21:24:38 | 25,941,447 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 359 | r | login.R | install.packages ("RForcecom")
options(repos=structure(c(CRAN="http://cran.revolutionanalytics.com")))
library(RForcecom)
username <- "jimg@miraclefoundation.org.test"
password <- "Miracle2468NQOYQ9MUOB8jWV3bFGFm5WR0"
instanceURL <- "https://cs14.salesforce.com/"
apiVersion <- "27.0"
(session <- rforcecom.login(username, password, instanceURL, apiVersion))
|
d4b8c1d714e032662e4c283252895081ac72e4fd | fbb4db1d64c5aace6cb921f5933a57df0da68ddf | /Model Finding Similar Features/Real Data/tuning_funs.R | 99fd6c0ee55b101a1e0816491a6a80a72e489453 | [] | no_license | bommert/phd-thesis | 0516255dfdb5b1428150b29eaf985554444da677 | 25a65101799fcba0211acc948f85395ca48b2479 | refs/heads/master | 2023-01-11T18:59:26.381428 | 2020-11-06T11:33:04 | 2020-11-06T11:33:04 | 306,611,652 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,133 | r | tuning_funs.R | eps.constr = function(acc, stab, p.acc = 0.025, p.stab = 0.1) {
cho = rep(TRUE, length(acc))
cho[is.na(stab)] = FALSE
best.acc = max(acc[cho])
cho[cho] = acc[cho] >= best.acc - p.acc
best.stab = max(stab[cho])
cho[cho] = stab[cho] >= best.stab - p.stab
best.acc.end = max(acc[cho])
cho = (acc == best.acc.end) & cho
best.stab.end = max(stab[cho])
cho = (stab == best.stab.end) & cho
res = as.numeric(cho)
return(res)
}
removeDuplicates = function(res, by.cols) {
dup = res[, .N, by = by.cols]
dup = dup[N > 1, ]
if (nrow(dup) > 0) {
dup = cbind(dup, dup.id = 1:nrow(dup))
res2 = cbind(res, id = 1:nrow(res))
all = merge(dup, res2)
all.split = split(all, all$dup.id)
rem = lapply(all.split, function(part) {
ids = part$id
sample(ids, length(ids) - 1)
})
rem = unlist(rem)
res = res[-rem, ]
}
return(res)
}
tuning.single = function(by.cols, select.cols) {
result.part = result[algo.name == "tuning", ]
best = result.part[, list(acc.tune = max(acc.tune)), by = by.cols]
res = merge(best, result.part)
res = subset(res, select = c(select.cols, "maxSuppSize"))
res = removeDuplicates(res, by.cols)
cbind(res, measure = "acc")
}
tuning.multi = function(measure, by.cols, select.cols) {
mt = paste0(measure, ".tune")
tmp = result[algo.name == "tuning", ]
tmp[, ec := eps.constr(acc.tune, get(mt)), by = by.cols]
best = tmp[, list(ec = max(ec, na.rm = TRUE)), by = by.cols]
res = merge(best, tmp)
res = subset(res, select = c(select.cols, "maxSuppSize"))
res = removeDuplicates(res, by.cols)
cbind(res, measure = measure)
}
tuning.stab.l0 = function(by.cols, select.cols) {
result.part = result[algo.name == "stabilitySelL0",]
best = result.part[, list(acc.tune = max(acc.tune, na.rm = TRUE)), by = by.cols]
res = merge(best, result.part)
res = subset(res, select = c(select.cols, "cutoff", "pfer"))
res = removeDuplicates(res, by.cols)
cbind(res, measure = "stabilitySelectionL0")
}
truth = function(select.cols) {
res = result[algo.name == "truth", select.cols, with = FALSE]
cbind(res, measure = "truth")
}
|
fe37599167ade14c931c7058371c133e3dd7d05c | d08e69198fbd60086aa35d765c7675006d06cf3f | /R/plot.ellipse.R | a3769f852c0095c42ce2f8e73fe1a215c8f573b3 | [] | no_license | villardon/MultBiplotR | 7d2e1b3b25fb5a1971b52fa2674df714f14176ca | 9ac841d0402e0fb4ac93dbff078170188b25b291 | refs/heads/master | 2023-01-22T12:37:03.318282 | 2021-05-31T09:18:20 | 2021-05-31T09:18:20 | 97,450,677 | 3 | 2 | null | 2023-01-13T13:34:51 | 2017-07-17T08:02:54 | R | UTF-8 | R | false | false | 522 | r | plot.ellipse.R | plot.ellipse <- function(x, add=TRUE, labeled= FALSE , center=FALSE, centerlabel="Center", initial=FALSE, ...){
if (add)
points(x$ellipse, type="l", ...)
else
plot(x$ellipse, type="l", ...)
if (labeled) text(x$ellipse[1,1], x$ellipse[1,2], labels=x$confidence, pos=4, ... )
if (center) {
points(x$center[1], x$center[2], pch=16, cex=1.2, ...)
text(x$center[1], x$center[2], labels=centerlabel, cex=1.2, pos=4, ...)
}
if (initial) points(x$data[,1], x$data[,2], pch=16, cex=0.8, ...)
}
|
041eb0d66dfa0da0e2a5f516bd3e985ef0c83a3f | 9e90a923b49e53e8d402d85d66bee9d02f6a483f | /R/zzz.R | 65e03c042fcadf2ef29233843b91669190a417f8 | [] | no_license | thijsjanzen/pirouette | 7e66c8276a11a18a6184f27dab6cb8ee06abfa28 | 463a2bd61ee9d5ada7c249d803e99391de264b79 | refs/heads/master | 2020-05-25T16:21:22.467232 | 2019-05-07T12:46:27 | 2019-05-07T12:46:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 264 | r | zzz.R | .onLoad <- function(libname, pkgname){
suppressPackageStartupMessages(
lapply(
c("beautier", "beastier", "tracerer", "mauricer", "babette", "mcbette"),
library,
character.only = TRUE,
warn.conflicts = FALSE
)
)
invisible()
}
|
c680d720dbe70212bcdff999e225951e5d45c78b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/SLOPE/tests/test_slope.R | 0748fe40d59dc2c59cccf1e83d8c80cabb95e648 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 776 | r | test_slope.R | test_that("SLOPE accepts manually specified noise level", {
prob = random_problem(20, 10, sigma=2)
result = SLOPE(prob$X, prob$y, sigma=2)
expect_equal(result$sigma, 2)
})
test_that("SLOPE estimates sigma from data (when n-p is large)", {
prob = random_problem(100, 50, amplitude=6, sigma=2)
result = SLOPE(prob$X, prob$y)
sigma.hat = result$sigma
expect_equal(length(sigma.hat), 1)
expect_true(1.5 < sigma.hat && sigma.hat < 2.5)
})
test_that("SLOPE iteratively estimates sigma from data (when n-p is small)", {
skip("Known failure")
prob = random_problem(100, 100, amplitude=6, sigma=2)
result = SLOPE(prob$X, prob$y)
sigma.hat = result$sigma
expect_true(length(sigma.hat) > 1)
expect_true(1.5 < tail(sigma.hat,1) && tail(sigma.hat,1) < 2.5)
}) |
3fdabad45e50265772af2f63de6d93aa25569d7d | da2aded3040e88b9b7e375a5c79bf1a4b90b0556 | /R/validate_method.R | 590eaadf43f23806d7d56070bcca6c89c4746d2c | [] | no_license | PolMine/polmineR.misc | f1c3cca7651dbd91fd7fce92103d11a5e8270be1 | 03dfef42f3242ce3120a45f283a61b89ebb903d4 | refs/heads/master | 2023-06-09T01:20:29.818197 | 2022-11-17T13:27:23 | 2022-11-17T13:27:23 | 54,013,177 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 931 | r | validate_method.R | setGeneric("validate", function(.Object, ...) standardGeneric("validate"))
mismatches <- variable <- NULL
setMethod("validate", "data.table", function(.Object, bundle, ...){
if ("TOTAL" %in% colnames(.Object)){
.Object[, "TOTAL" := as.integer(.Object[["TOTAL"]]), with = TRUE]
}
DT <- data.table::melt.data.table(.Object, id.vars = "partition")
setnames(DT, old="value", new="count")
DT <- subset(DT, count > 0)
DT[, "mismatches" := NA, with = TRUE]
for (i in c(1:nrow(DT))){
partitionToInspect <- as(bundle[[ DT[i, partition] ]], "plprPartition")
read(
partitionToInspect,
highlight = list(yellow = c(as.character(DT[i, variable]))),
meta = "text_name", cqp = TRUE
)
input <- readline(">> ")
if (input == "exit") break
if (input == ""){
DT[i, mismatches] <- 0
} else if(grepl("^[0-9]+$", input)){
DT[i, mismatches] <- as.integer(input)
}
}
})
|
a64781e9c797019156e79ae36fed93ea2e8c538f | f32c9ffd0118ae743eae83312587802302cb8728 | /R/allPackage.R | ef9387d6e8281af89c309ba13a862d1abb89c6a5 | [] | no_license | qiime/qiime.io | 2b7a28cd4c110882b43ec4237d10a12051b5497a | 95a309575a370e6fe48e40f88a9bdf233fb31007 | refs/heads/master | 2020-04-06T04:51:33.902087 | 2013-10-16T16:04:13 | 2013-10-16T16:04:13 | 10,043,792 | 1 | 0 | null | 2013-10-17T19:16:37 | 2013-05-14T00:03:45 | R | UTF-8 | R | false | false | 791 | r | allPackage.R | ###############################################
#' Support for loading and interacting with QIIME files in R.
#'
#' There are already several ecology and phylogenetic
#' packages available in R, including
#' adephylo, vegan, ade4, picante, ape, phangorn, phylobase,
#' phyloseq and OTUbase packages.
#' This package is
#' intended to be a standard interface to QIIME output files,
#' especially legacy QIIME formats.
#' See the biom package for help importing files in biom-format.
#'
#'
#' @name qiime.io-package
#' @author Dan Knights <dan.knights@@gmail.com>,
#' Paul J. McMurdie <mcmurdie@@stanford.edu>,
#' and the QIIME devel team.
#' @references \url{http://qiime.github.io/qiime.io/}
#' @docType package
#' @keywords package
NA
###############################################
|
48088dac588a2c6b2c9326cef259c56e422f6788 | f4bbaaf60d7784a23fc934a8f2f42af9ac07b3a7 | /testCode_Backup/testOutlierDetection.R | 1aa378c48604289f11a20e4dcf59f89cfbae4d20 | [] | no_license | praster1/Unit-root-test-of-Non-stationary-time-series | 6e8fca8f47b8c71a48ab9e1022a4e10ac494ed51 | e47b62ac7dcefde7e4bcb16b29ccd12960ee3eb8 | refs/heads/master | 2020-04-02T01:54:59.554348 | 2019-04-17T18:04:10 | 2019-04-17T18:04:10 | 153,881,587 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 489 | r | testOutlierDetection.R | require(forecast)
require(tsoutliers)
dataLen = 500
### ARMA(1,1) process simulation
# purely random process with mean 0 and standard deviation 1.5
Z <- rnorm(dataLen, mean = 0, sd = 1.5)
# Process
X <- rnorm(1)
for (i in 2:length(Z))
{
X[i] <- X[i-1] + Z[i] + Z[i-1]
}
X[100:length(X)] = X[100:length(X)] - 100
X[255] = 50
X[300:325] = X[300:325] + rnorm(dataLen, mean = -50, sd = 3.5)
plot(X, type="l")
# auto.arima(X)
outliers = tso(as.ts(X))
outliers
plot(outliers) |
7adf82fed72cd3959c664faa5bd0fd4948244616 | 7bf8ba22baa77854c9039cb2e4754d056ab7ecc6 | /Mikdat-hw3.R | 0b0954a68eaee9aec670e00fe18ae6db103446b8 | [] | no_license | teamwilf/Quant-I | 228580e813915229faaf1ee32d1fd9d35af18f6a | 8802bd55466029f16ceaab1472a8c15f6a54aa2c | refs/heads/master | 2021-07-07T19:53:22.329656 | 2017-10-04T14:13:35 | 2017-10-04T14:13:35 | 105,075,955 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 899 | r | Mikdat-hw3.R | #Question 2
install.packages("mvtnorm")
library("mvtnorm", lib.loc="/Library/Frameworks/R.framework/Versions/3.4/Resources/library")
set.seed(135467)
smat<-matrix(c(1,.5,.5,1),2)
data<-rmvnorm(100,sigma =smat)
x<-data[,1]
eps<-data[,2]
y<-3+7*x+eps
reg<-lm(y~x)
regtest<-lm(x~reg$resid)
summary.lm(regtest)
summary.lm(lm(x~eps))
#Question 3
rm(list = ls())
set.seed(135467)
smat<-matrix(c(1,.5,.5,1),2)
data<-rmvnorm(100,sigma =smat)
x1 <-data[,1]
x2 <-data[,2]
epsilon <- rnorm(100, mean=1, sd=1)
a <- 3
b <- 2
c <- 2
y <- a + b*x1 + c*x2 + epsilon
lm(y~x1+x2)
residuals1 <- residuals(lm(y~x2))
residuals2 <- residuals(lm(x1~x2))
lm(residuals1~residuals2)
#Question 4
Y <- matrix(y,ncol=1)
X <- matrix(c(x1,x2),ncol=2)
solve(t(X)%*%X) %*% t(X) %*% Y
#Question 5, part a
lm(y~x1+x2-1)
#Question 5, part b
ycen <- y - mean(y)
x1cen <- x1 - mean(x1)
x2cen <- x2 - mean(x2)
lm(ycen~x1cen+x2cen-1)
|
4328a72abd95f499e6984b890daf5cc88292a5b1 | 7798ce0d75135ed0d0c664b79128e7fc3feedef6 | /2_RProgramming/week2/Programming Assignment 1 INSTRU.R | a9df36afca45b1bb352fb276b0e1ad96bbe1cdc4 | [] | no_license | artickavenger/datasciencecoursera | 7bcdd67e2175bcc7de967f8f6c9b8b69e332edf3 | 19df07f0d625413a30060af60f01f616ad41c64e | refs/heads/master | 2022-12-20T08:06:51.955067 | 2020-10-21T10:58:30 | 2020-10-21T10:58:30 | 297,189,939 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,159 | r | Programming Assignment 1 INSTRU.R | # Programming Assignment 1 INSTRU
pollutantmean <- function(directory, pollutant, id = 1:332){
# create a list of files
files_full <- list.files(directory, full.names = TRUE)
# create an empty data frame
dat <- data.frame()
for (i in id){
# add files to main data
dat <- rbind(dat, read.csv(files_full[i]))
}
# Calculate mean
mean_data <- mean(dat[, pollutant], na.rm = TRUE)
return(mean_data)
}
pollutantmean("specdata", "sulfate")
complete <- function(directory, id = 1:332){
# Create a list of file
files_full <- list.files(directory, full.names = TRUE)
# Create empty data frame
dat <- data.frame()
for (i in id){
# Read files
temp <- read.csv(files_full[i])
# nobs are sum of all complete cases
nobs <- sum(complete.cases(temp))
# Enumerates complete cass by index
dat <- rbind(dat, data.frame(i, nobs))
}
colnames(dat) <- c("id", "nobs")
return(dat)
}
complete("specdata", c(2, 4, 8, 10, 12))
corr <- function(directory, threshold = 0){
# create list of all files
files_full <- list.files(directory, full.names = TRUE)
# create empty data set
dat <- vector(mode = "numeric", length = 0)
for (i in 1:length(files_full)) {
# Read File
tmp <- read.csv(files_full[i])
# Calculate csum
csum <- sum((!is.na(tmp$sulfate)) & (!is.na(tmp$nitrate)))
if (csum > threshold){
# Extract data of nitrate and sulfate and calculate correlation between them
sul <- tmp[which(!is.na(tmp$sulfate)), ]
nit <- sul[which(!is.na(sul$nitrate)), ]
dat <- c(dat, cor(nit$sulfate, nit$nitrate))
}
}
dat
}
cr <- corr("specdata", 150)
head(cr)
summary(cr)
cr <- corr("specdata", 400)
head(cr)
summary(cr)
cr <- corr("specdata")
head(cr)
summary(cr)
|
95f514366357c4f3b6fefd381e0f84f97ed588b0 | c395f9c5c3253cfa5b1b09d68afc5e1ce36b5a51 | /dpylr.R | e595ef7965658d5a6ec25ee6d7a8f80d5e3f7417 | [] | no_license | RohanZutshi/analytics | 8a945b9d4a7df100135d8072870b5142db772ac2 | a26d2a6b3e51b14f3691395cb6c3dcc363fcca83 | refs/heads/master | 2020-04-06T23:06:03.524954 | 2018-11-17T06:19:24 | 2018-11-17T06:19:24 | 157,858,035 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,480 | r | dpylr.R | install.packages('dplyr')
(rollno=1:30)
(sname=paste('student',1:30,sep=" "))
(gender=sample(c('M',"F")))
set.seed(1234)#to replicate the same pattern
(gender=sample(c('M','F'), size=30, replace=T))
table(gender)
(gender=sample(c('M','F'), size=30, replace=T, prob=c(.7,.3)))
(marks1=floor(rnorm(30,mean=50,sd=10)))
(marks2=ceiling(rnorm(30,40,5)))
(course= sample(c('bba','mba'), size=30, replace=T, prob=c(.5,.5)))
rollno; sname; course
df1= data.frame(rollno, sname, gender, marks1, marks2, course, stringsAsFactors = F)
df1$gender= factor(df1$gender)
df1$gender= factor(df1$course)
library(dplyr)
df1
df1 %>% group_by(gender) %>% summarise(meanM1= mean(marks1), meanM2=mean(marks2)
summarise(mtcars, avg = mean(mpg))
summarise(df1, meanM1 = mean(marks1))
mean(mtcars$mpg)
names(mtcars)
table(mtcars$am)
table(mtcars$cyl)
table(mtcars$gear)
table(mtcars$carb)
table(mtcars$cyl, mtcars$am)
table(mtcars$cyl, mtcars$am, mtcars$vs)
xtabs(~cyl+am+vs, data=mtcars)
mtcars %>% group_by(cyl) %>% summarise(mean(mpg))
mtcars %>% group_by(gear,cyl) %>% summarise(meanMPG=mean(mpg)) %>% arrange(meanMPG)
arrange(desc(meanMPG))
mtcars%>% select(mpg,wt) %>% head(n=7)
mtcars %>% select(mpg,wt) %>% slice(c(1,5,6))
seq(1,32,2)
mtcars %>% select(mpg,wt) %>% slice(seq(1,32,2)) #alternate rows
mtcars %>% filter(mpg>25) %>% select(mpg,wt,hp,am)
mtcars %>% filter(mpg>25 & hp<60) %>% select(mpg,wt,hp,am)
mtcars %>% sample_n(3)
mtcars %>% sample_frac(.3)
|
1148a15a0b6fe328d99bb7ded592f8c002752a47 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/wordspace/examples/eval_similarity_correlation.Rd.R | edbb6bfef813cd81c5e0aa0af488857eb4176542 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 345 | r | eval_similarity_correlation.Rd.R | library(wordspace)
### Name: eval.similarity.correlation
### Title: Evaluate DSM on Correlation with Similarity Ratings (wordspace)
### Aliases: eval.similarity.correlation
### ** Examples
eval.similarity.correlation(RG65, DSM_Vectors)
## Not run:
##D plot(eval.similarity.correlation(RG65, DSM_Vectors, details=TRUE))
## End(Not run)
|
300a1aa201790b4e50c88fd8aafa8834b57d7ffd | dd0474b22e9ac1dc5aa420aea55277d44753d47e | /Analysis.R | 73062c546817e811bd279de439eb1c4be55df9a9 | [] | no_license | yatsenkotetyana/HR_Yatsenko | 73019ca858e531e214dd8906035510650b1e7fce | 151a6346b5b56911cd475c8284b7a65081f36893 | refs/heads/master | 2021-01-13T12:58:05.405703 | 2017-01-12T20:25:36 | 2017-01-12T20:25:36 | 78,783,437 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 27,857 | r | Analysis.R | # Install packages if we need it
if (!require("corrplot")) install.packages("corrplot")
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("caret")) install.packages("caret")
if (!require("e1071")) install.packages("e1071")
if (!require("car")) install.packages("car")
if (!require("nortets")) install.packages("nortest")
if (!require("pastecs")) install.packages("pastecs")
if (!require("stats")) install.packages("stats")
if (!require("MASS")) install.packages("MASS")
if (!require("leaps")) install.packages("leaps")
if (!require("gridExtra")) install.packages("gridExtra")
# Load librarys
library(ggplot2)
library(caret)
library(corrplot)
library(e1071)
library(car)
library(nortest)
library(pastecs)
library(stats)
library(MASS)
library(leaps)
library(gridExtra)
# Clear global environment
rm(list = ls())
# Read database
dataset <- read.csv("HR_comma_sep.csv")
# Correlation before forming factors
# "Factor" not correlation too
# Интересно еще воспользоваться функцией cor.test(), но она работает,
# только для векторов, а не для матриц,
# Различие между этими
# двумя функциями заключается в том, что cor() позволяет вычислить только сам
# коэффициент корреляции, тогда как cor.test() выполняет еще и оценку
# статистической значимости коэффициента, проверяя нулевую гипотезу о равенстве его нулю.
num.cols <- sapply(dataset,is.numeric)
cor.data <- cor(dataset[,num.cols])
# visualisation of corrolation with corrlot
par(mfrow=c(1,1))
corrplot(cor.data, method = "pie")
# shapiro.test - работает для выборок <= 5000
# ad.test() - тест Андерсона-Дарлинга;
# cvm.test() - тест Крамера фон Мизеса;
# lillie.test() - тест Колмогорова-Смирнова в модификации Лиллиефорса;
# sf.test() - тест Шапиро-Франсия (см. Thode, 2002). тоже для выборки <=5000
# Тесты ad & cvm показывает, распределение ненормальное
# а lillie похоже провалили
ad.test(dataset$time_spend_company)
cvm.test(dataset$time_spend_company)
lillie.test(dataset$time_spend_company)
# Если basic=TRUE (по умолчанию), то вычисляется число значений, число нулей и
# пропущенных значений, минимум, максимум, размах и сумма. Если
# desc=TRUE (тоже по умолчанию), то вычисляются медиана, среднее
# арифметическое, стандартная ошибка среднего, 95% доверительный
# интервал для среднего, дисперсия, стандартное отклонение и коэффициент вариации.
# Если norm=TRUE (не по умолчанию), вычисляются статистики нормального распределения, включая
# асимметрию и эксцесс (и их достоверность), и проводится тест Шапиро-Уилка (Shapiro-Wilk test <=5000)
# на нормальность распределения. Опция p используется при вычислении доверительного интервала для
# среднего арифметического (по умолчанию она равна 0.95).
stat.desc(dataset$time_spend_company, basic=TRUE, desc=TRUE, norm=FALSE, p=0.95)
# Counting Variance Inflation Factor
# VIF - orto, VIF - from 1 to 2 all OK
# У нас все значений < 2, значит мультиколлинеарности нет
# Я видела только примеры для lm, а не для glm
# Поэтому я провожу проверку здесь
vif(lm(formula = left ~ . ,data = dataset))
# Агрегировать данных по by, посчитали среднее и дисперсия для примера
aggregate(dataset[,num.cols], by=list(dataset$left, dataset$salary), FUN=mean, na.rm=TRUE)
aggregate(dataset[,num.cols], by=list(dataset$left, dataset$salary), FUN=sd, na.rm=TRUE)
# Form factors
dataset$left <- dataset$left
dataset$promotion_last_5years <- as.factor(dataset$promotion_last_5years)
dataset$Work_accident <- as.factor(dataset$Work_accident)
dataset$salary <- ordered(dataset$salary, c("low","medium" ,"high"))
# Summary
summary(dataset)
# Structure
str(dataset)
# After form factors
# check correlation
# only for numeric
num.cols <- sapply(dataset,is.numeric)
cor.data <- cor(dataset[,num.cols])
# visualisation of corrolation with corrlot
corrplot(cor.data, method = "color")
# Counting Variance Inflation Factor
# VIF - orto, VIF - from 1 to 2 all OK
# У нас все значений < 2, значит мультиколлинеарности нет
# Но я видела только примеры для lm, а не для glm
# МОжет біть кто-то найдет пример и для glm
# Заметно, что слегка подросли значения, но
vif(glm(formula = left ~ . ,family = binomial,data = dataset))
RG <- round(rgamma(length(dataset$time_spend_company),mean(dataset$time_spend_company),
sd(dataset$time_spend_company)),0)
# Проверка кор. шашей переменной time_spend_company с rgamma распределенем
# Получается, что несмотря та то что они внешне похожи.
# Корелляции между ними нет
cor.test(RG,dataset$time_spend_company)
par(mfrow=c(1,2))
# Построение гистограмм и сглаживания для rgamma и time_spend_company
hist(RG,xlab="rgamma",main="Comparison rgamma and time_spend_company",probability=TRUE,breaks=19,ylim=c(0,1))
lines(density(RG,bw=0.2),ylim=c(0,1))
hist(dataset$time_spend_company,xlab="time_spend_company", main=" ", probability=TRUE,breaks=19,ylim=c(0,1))
lines(density(dataset$time_spend_company,bw=0.2),ylim=c(0,1))
par(mfrow=c(1,1))
# Просто для примера попарный график, который Игорь сбразывал в файл
featurePlot(x = dataset[,c("satisfaction_level",
"average_montly_hours",
"time_spend_company")],
y = factor(dataset$left),plot = "pairs",
## Add a key at the top
auto.key = list(columns = 2))
# В самом ggplot несколько графиков можно поместить на один слайд просто только как фасет
# Т.е. факторную переменные для образования нескольких графиков
ggplot(dataset, aes(x = average_montly_hours,color=left,fill=left))+ geom_histogram()+facet_wrap(~left,nrow=2)
# Гистограммы, фактор left
g1<-ggplot(dataset, aes(x = satisfaction_level, colour = factor(left), fill = factor(left)))+ geom_histogram()
g2<-ggplot(dataset, aes(x = last_evaluation, colour = factor(left), fill = factor(left)))+ geom_histogram()
g3<-ggplot(dataset, aes(x = time_spend_company, colour = factor(left), fill = factor(left)))+ geom_histogram()
g4<-ggplot(dataset, aes(x = time_spend_company, colour = factor(salary), fill = factor(salary)))+ geom_histogram()
# Поэтому надо воспользоваться другим пакетом gridExtra
grid.arrange(g1,g2,g3,g4, nrow=2, ncol=2)
# Плотности, фактор left
g5<-ggplot(dataset, aes(x = satisfaction_level, colour = factor(left)))+ geom_density()
g6<-ggplot(dataset, aes(x = last_evaluation, colour = factor(left)))+ geom_density()
g7<-ggplot(dataset, aes(x = time_spend_company, colour = factor(left)))+ geom_density()
grid.arrange(g5,g6,g7, nrow=3, ncol=1)
# Плотности, фактор salary
g8<-ggplot(dataset, aes(x = satisfaction_level, colour = factor(salary)))+geom_density()
g9<-ggplot(dataset, aes(x = last_evaluation, colour = factor(salary)))+ geom_density()
g10<-ggplot(dataset, aes(x = time_spend_company, colour = factor(salary)))+ geom_density()
grid.arrange(g8,g9,g10, nrow=3, ncol=1)
#split the dataset into train and test sets (using caret lib, it gives nuber of records)
#library(caTools) can provide with vector true false for spliting
set.seed(123)
split = createDataPartition(y=dataset$left, p=0.75, list=FALSE)
training <- dataset[split, ]
testing <- dataset[-split,]
names(training)
# логическая регресия
# Полная модель биномиальная
model_Log_Reg_b <- glm(left ~ satisfaction_level +
last_evaluation +
number_project +
average_montly_hours +
time_spend_company +
Work_accident +
promotion_last_5years +
salary +
sales,
data=training,family = binomial(link="logit"))
# Полная модель квазибиномиальная
model_Log_Reg_qb <- glm(left ~ satisfaction_level +
last_evaluation +
number_project +
average_montly_hours +
time_spend_company +
Work_accident +
promotion_last_5years +
salary +
sales,
data=training,family = quasibinomial)
# Сокращенная модель биноминальная
model_Log_Reg_b_r <- glm(left ~ satisfaction_level+
last_evaluation +
number_project +
average_montly_hours +
time_spend_company,
data=training,family = binomial(link="logit"))
summary(model_Log_Reg_b)
summary(model_Log_Reg_qb)
summary(model_Log_Reg_b_r)
# Незначимая величина критерия хи-квадрат свидетельствует о том,
# что сокращенная модель соответствует данным так же хорошо, как и полная модель
anova(model_Log_Reg_b_r,model_Log_Reg_b,test="Chisq")
# Избыточная дисперсия (overdispersion) отмечается, когда наблюдаемая
# дисперсия зависимой переменной превышает ожидаемую. Избыточная
# дисперсия может привести к искажению оценки среднеквадратичных ошибок
# и некорректным тестам значимости.В случае избыточной дисперсии можно по-прежнему проводить
# логистическую регрессию при помощи функции glm(), однако в этом
# случае нужно использовать квазибиномиальное распределение, а не
# биномиальное.
# p=0.99 избыточная дисперсия в данном случае не представляет проблемы
# можем пользоваться биномиальным распределением
pchisq(summary(model_Log_Reg_qb)$dispersion * model_Log_Reg_qb$df.residual,
model_Log_Reg_qb$df.residual, lower = F)
# Поскольку логарифм отношения шансов сложно интерпретировать,
# можно потенцировать коэффициенты, чтобы представить их в виде отношения шансов:
# Обычно гораздо проще интерпретировать регрессионные коэффициенты,
# когда они выражены в исходных единицах зависимой переменной.
# Для этого коэффициенты нужно потенцировать
exp(coef(model_Log_Reg_b))
# Тест на отсутствие автокорреляции в остатках
# Низкое значение p говорит о том что автокорреляция в остатках наблюдается.
# Тут стоит заметить что функция durbinWatsonTest как и сам тест больше заточены
# под временные ряды, поэтому в данном случае интерпретировать наличие автокорреляции
# с лагом =1 довольно сложно.
durbinWatsonTest(model_Log_Reg_b)
# Тест на наличие линейной связи между остаками модели(+вклад компоненты в модель) и предикторами
# Зеленая линия-сглаженная, Красная-линейный тренд.
# Полученный набор графиков позволяет понять-какие компоненты ведут себя нелинейно и
# требуют преобразований (например логарифмирования). В данном случае стоит обратить внимание
# на чуть менее выраженную линейность воздействия бедер на мат.ожидание в сравнении с другими предикатами.
# В случае выявления нелинейной зависимости в результате обзора графиков,
# независимый предикат можно возвести в степень преобразующий данную зависимость в линейную.
# расчет степени(и потребности в преобразовании) дает преобразование Бокса-Тидвелла пакета CAR
crPlots(model_Log_Reg_b)
# Однако похоже, что boxTidwell можно использовать только для положительных переменных!!
ax<-seq(1,nrow(training),by=1)
# Диагностика регрессии
# hatvalues() y_i with hat
# In multiple regression, y_i with hat measures the distance from the
# centroid point of means point
# if some values have big hat value they have unusual X values
# These cases have high leverage, but not necessarily high influence.
c1<-ggplot(training, aes(x=ax, y = hatvalues(model_Log_Reg_b) )) + geom_jitter()
# Unusual observations typically have large residuals but not necessarily so high leverage observations can
# have small residuals because they pull the line towards them
# In statistics and in particular in regression analysis, leverage is a measure of how
# far away the independent variable values of an observation are from those of the other observations.
# High-leverage points are those observations, if any, made at extreme or outlying values of the
# independent variables such that the lack of neighboring observations means that the fitted regression
# model will pass close to that particular observation.
# rstudent() - стьюдентизированные остатки подогнанной модели e_i
c2<-ggplot(training, aes(x=ax, y = rstudent(model_Log_Reg_b) )) + geom_jitter()
# cooks.distance() -- расстояние Кука. Расстояние Кука оценивает эффект от удаления одного (рассматриваемого)
# наблюдения и используется для обнарущения выбросов
# Важно потому, что выбросы могут влиять на наклон "гиперплоскости"
# Выявлять выбросы можно и до начала постоения модели
# И избавляться от них с помощью специальных методов
c3<-ggplot(training, aes(x=ax, y = cooks.distance(model_Log_Reg_b) )) + geom_jitter()
# residuals() Выводит остатки подогнанной модели e_i
# residuals is a generic function which extracts model residuals from objects returned by modeling functions
# All object classes which are returned by model fitting functions should provide a residuals method.
c4<-ggplot(training, aes(x=ax, y = residuals(model_Log_Reg_b) )) + geom_jitter()
grid.arrange(c1,c2,c3, c4,nrow=2, ncol=2)
# The fitted function returns the y-hat values associated with the data used to fit the model.
# Fitted is a generic function which extracts fitted values from objects returned by
# modeling function. Differents between predict and fiited.
# Predict returns the fitted values before the inverse of the link function
# is applied (to return the data to the same scale as the response variable),
# and fitted shows it after it is applied.
# In practical terms, this means that if you want to compare the fit to the original data, you should use fitted.
c5<-ggplot(training, aes(x=fitted(model_Log_Reg_b), y = residuals(model_Log_Reg_b) )) + geom_jitter()
c6<-ggplot(training, aes(x=fitted(model_Log_Reg_b), y = cooks.distance(model_Log_Reg_b) )) + geom_jitter()
grid.arrange(c5,c6,nrow=2, ncol=1)
# общую диаграмму. В ней горизонтальная ось – это напряженность (leverage),
# вертикальная ось – это стьюдентизированные остатки, а размер отображаемых
# символов пропорционален расстоянию Кука. Диагностические диаграммы обычно наиболее полезны, когда
# зависимая переменная имеет много значений. Когда зависимая переменная принимает ограниченное
# число значений (например, логистическая регрессия), польза от диагностических диаграмм не так велика.
influencePlot(model_Log_Reg_b)
# Creates plots for examining the possible dependence of spread on level,
# or an extension of these plots to the studentized residuals from linear models.
# This is testing that the variance of residuals doesn’t vary by predictable amounts,
# i.e. they are random. This can easily be checked by plotting fitted values against residuals:
# Plot testing homoscedasticity.
# For all fitted values (i.e. along the x axis) you should expect to see
# variation in the residuals that is random, that is no pattern should be apparent.
# If the right of the graph, though, the residuals
# start to show a pattern. This might be evidence of heteroscedasticity,
# that is the assumption might be violated, and should be investigated further.
# Гетероскедастичность (англ. heteroscedasticity) — понятие, используемое в прикладной статистике,
# означающее неоднородность наблюдений, выражающуюся в неодинаковой (непостоянной) дисперсии случайной
# ошибки регрессионной (эконометрической) модели. Гетероскедастичность противоположна гомоскедастичности,
# означающей однородность наблюдений, то есть постоянство дисперсии случайных ошибок модели.
# Наличие гетероскедастичности случайных ошибок приводит к неэффективности оценок,
# полученных с помощью метода наименьших квадратов. Кроме того, в этом случае оказывается
# смещённой и несостоятельной классическая оценка ковариационной матрицы МНК-оценок параметров.
# Следовательно статистические выводы о качестве полученных оценок могут быть неадекватными.
# В связи с этим тестирование моделей на гетероскедастичность является одной из необходимых
# процедур при построении регрессионных моделей.
# Гомоскедастичность (англ. homoscedasticity) — однородная вариативность значений наблюдений,
# выражающаяся в относительной стабильности, гомогенности дисперсии случайной ошибки
# регрессионной модели. Явление, противоположное гетероскедастичности. Является обязательным
# предусловием применения метода наименьших квадратов, который может быть использован только
# для гомоскедастичных наблюдений.
spreadLevelPlot(model_Log_Reg_b)
# Функция показывает наиболее влиятельный объект на зависимую переменную-вес
# (не обязательно в высоких значениях зависимой переменной но и в области низких значений)
# Плюсом данных графиков является тот важный момент что становится понятно в какую
# сторону качнется мат.ожидание зависимой переменной если удалить определенный объект.
avPlots(model_Log_Reg_b, id.n=2, id.cex=0.7)
# Plots the residuals versus each term in a mean function and versus fitted values. Also computes a
# curvature test for each of the plots by adding a quadratic term and testing the quadratic to be zero.
# This is Tukey’s test for nonadditivity when plotting against fitted values.
# This plot shows if residuals have non-linear patterns. There could be a non-linear
# relationship between predictor variables and an outcome variable and the pattern could show
# up in this plot if the model doesn’t capture the non-linear relationship. If you find equally
# spread residuals around a horizontal line without distinct patterns, that is a good indication
# you don’t have non-linear relationships.
residualPlots(model_Log_Reg_b)
# MASS library using direction "backward" and
# they considered only for the final model the variables with a significance level of < 5%.
# stepAIS uses on Akaike Information Criteria, not p-values. The goal is to find the model with the smallest
# AIC by removing or adding variables in your scope
model1.stepAIC <- stepAIC(model_Log_Reg_b,direction="backward")
# function regsubsets that can be used for best subsets, forward selection and backwards
# elimination depending on which approach is considered most appropriate for the
# application under consideration
# The function regsubsets identifies each variables as the best four.
# Для каждого количества переменных подбирает ту, которая лучшая для включеняи в модель
# Т.е. если мы хотим одну, то она говорит какая лучшая, две -- какие две лучшие
leaps <- regsubsets(left ~ satisfaction_level +
last_evaluation +
number_project +
average_montly_hours +
time_spend_company +
Work_accident +
promotion_last_5years +
salary +
sales, data=training,
nbest = 1, # 1 best model for each number of predictors
method = "exhaustive",
nvmax = NULL # NULL for no limit on number of variables
)
# Здездочкой помечают переменные лучшие для каждого количества переменных
summary(leaps)
par(mfrow=c(1,1))
# Численное представление этих же результатов
plot(leaps,scale="adjr2")
# In order to obtain a confidence interval for the coefficient estimates 2.5 % & 97.5 %
# из пакета MAAS
# Позволит рассчитать доверительные интервалы для всех коэффициентов модели для каждого
# коэффициента в единицах отношения шансов
exp(confint(model_Log_Reg_b,level=0.95,method="Wald"))
# Стандартный подсчет, результат слегка отличается
exp(confint.default(model_Log_Reg_b,level=0.95))
# Регрессия с учетом взаимодействия переменных
model_Log_Reg_b_int <- glm(left ~ satisfaction_level*last_evaluation*
number_project*average_montly_hours*
time_spend_company +
Work_accident +
promotion_last_5years +
salary +
sales,
data=training,family = binomial)
summary(model_Log_Reg_b)
# Незначимая величина критерия хи-квадрат свидетельствует о том,
# что сокращенная модель соответствует данным так же хорошо, как и полная модель
anova(model_Log_Reg_b,model_Log_Reg_b_int,test="Chisq")
# Регрессия с учетом степеней переменных
model_Log_Reg <- glm(left~ poly(satisfaction_level, 10) +
poly(last_evaluation, 10) +
poly(number_project, 5) +
poly(average_montly_hours, 10) +
poly(time_spend_company, 5) +
Work_accident +
promotion_last_5years +
sales +
salary,
data=training,family = binomial )
summary(model_Log_Reg)
# Незначимая величина критерия хи-квадрат свидетельствует о том,
# что сокращенная модель соответствует данным так же хорошо, как и полная модель
anova(model_Log_Reg_b,model_Log_Reg,test="Chisq")
|
2b254c77fde12a94eaa13476d2ae28d2a328547f | b959bed2a2da2b025fc0a6ce9fcd6766ba9bffe6 | /man/summary.class_item.Rd | a012b4ac6afd993c65b7e463e473fd074f725702 | [] | no_license | cran/MultiLCIRT | 074ac46148da08008180f775cd890c92f0907a14 | 2463a23604281b7516fe4001dec575925a32bb98 | refs/heads/master | 2021-01-17T08:58:16.283841 | 2017-06-06T16:10:33 | 2017-06-06T16:10:33 | 17,681,036 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 835 | rd | summary.class_item.Rd | \name{summary.class_item}
\alias{summary.class_item}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Print the output of class_item object}
\description{Given the output from class_item, it is written in a readable form}
\usage{
\method{summary}{class_item}(object, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{output from class_item}
\item{...}{further arguments passed to or from other methods}
}
%\details{
%%% ~~ If necessary, more details than the description above ~~
%}
\value{
\item{table}{summary of all the results}
}
\author{Francesco Bartolucci - University of Perugia (IT)}
%\note{
%%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
|
4c52c9e0adcbca3239b103056ab2683137823cc3 | 3a2a402f434403db37d4f1ad5456185cc5c4f7d7 | /R/refit.R | c4e14695174998b3f85804487a865245dc222359 | [] | no_license | zdk123/pulsar | 445090f2814f02a6dfc461fc46737583c29b5996 | 9b78ec8dc01f9bf49497e4ecda9335a40d462e18 | refs/heads/master | 2023-02-18T14:26:15.152281 | 2023-01-26T02:25:45 | 2023-01-26T02:25:45 | 40,200,366 | 11 | 8 | null | 2023-02-07T17:19:28 | 2015-08-04T17:59:52 | R | UTF-8 | R | false | false | 3,067 | r | refit.R | #' Refit pulsar model
#'
#' Run the supplied graphical model function on the whole dataset and refit with the selected lambda(s)
#'
#' @param obj a fitted \code{pulsar} or \code{batch.pulsar} object
#' @param criterion a character vector of criteria for refitting on full data. An optimal index must be defined for each criterion or a message will displayed. If missing (no argument is supplied), try to refit for all pre-specified criteria.
#' @details The \code{refit} call is evaluated in the environment specified by the \code{pulsar} or \code{batch.pulsar} object, so if any variables were used for arguments to the original call, unless they are purposefully updated, should not be altered. For example, if the variable for the original data is reassigned, the output of \code{refit} will not be on the original dataset.
#' @return a \code{pulsar.refit} S3 object with members:
#' \itemize{
#' \item est: the raw output from the graphical model function, \code{fun}, applied to the full dataset.
#' \item refit: a named list of adjacency matrices, for each optimal criterion in \code{obj} or specified in the \code{criterion} argument.
#' \item fun: the original function used to estimate the graphical model along the lambda path.
#'}
#' @examples
#'
#' ## Generate the data with huge:
#' \dontrun{
#' library(huge)
#' set.seed(10010)
#' p <- 40 ; n <- 1200
#' dat <- huge.generator(n, p, "hub", verbose=FALSE, v=.1, u=.3)
#' lams <- getLamPath(getMaxCov(dat$data), .01, len=20)
#'
#' ## Run pulsar with huge
#' hugeargs <- list(lambda=lams, verbose=FALSE)
#' out.p <- pulsar(dat$data, fun=huge::huge, fargs=hugeargs,
#' rep.num=20, criterion='stars')
#'
#' fit <- refit(out.p)
#' }
#' @seealso \code{\link{pulsar}} \code{\link{batch.pulsar}}
#' @export
refit <- function(obj, criterion) {
UseMethod("refit")
}
#' @export
refit.pulsar <- function(obj, criterion) {
.refit.pulsar(obj, criterion)
}
#' @keywords internal
.refit.pulsar <- function(obj, criterion) {
est <- vector('list', 2)
names(est) <- c('est', 'refit')
fin <- getArgs(getCall(obj), getEnvir(obj))
## call est function on original dataset
if (length(obj$est)) {
est$est <- obj$est
} else {
est$est <- do.call(eval(fin$fun), c(fin$fargs, list(fin$data)))
}
if (missing(criterion)) criterion <- eval(fin$criterion)
est$refit <- vector('list', length(criterion))
names(est$refit) <- criterion
for (crit in criterion) {
optind <- obj[[crit]]$opt.index
if (!is.null(optind)) {
est$refit[[crit]] <- est$est$path[[optind]]
} else {
est$refit[[crit]] <- NULL
if (crit %in% names(obj)) {
message(paste('No optimal index selected for', crit, 'criterion', sep=" "))
} else
warning(paste('Unknown criterion', crit, sep=" "), call.=FALSE)
}
}
## TODO: if fun is null, get formal arg of obj
est$fun <- obj$call$fun
if (is.null(est$fun))
est$fun <- formals(class(obj))$fun
structure(est, class='pulsar.refit')
}
|
fc2cafccab97293432da6fa5e75ff8c66e4f1970 | 763de830f49f3ef8462eb3097b36535e4836cf63 | /InterLab_study/analysis_InterLab/2_normalize.R | dbe86a6d73ef884bb474e27e58f12af8c1c4f200 | [
"MIT"
] | permissive | Akmazad/batch_effects_workflow_code | ba44d64d8d10435696857ad4851231af55d42fbd | 696eb609a55ba9ece68b732e616d7ebeaa660373 | refs/heads/master | 2023-05-01T18:06:06.099583 | 2020-11-20T14:47:22 | 2020-11-20T14:47:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,498 | r | 2_normalize.R | #!/usr/bin/env Rscript --vanilla
#load the libraries
library(proBatch)
library(readr)
library(dplyr)
library(tidyr)
source('transition_fragment_conversion.R')
#load the data
essential_columns = c('peptide_group_label','filename','aggr_Fragment_Annotation', 'aggr_Peak_Area')
cols_to_get <- rep(list(col_guess()), length(essential_columns))
names(cols_to_get) <- essential_columns
cols_to_get2 = do.call(cols_only, cols_to_get)
proteome = read_delim("data_InterLab/1_original_data/all_sites_global_q_0.01_applied_to_local_global.txt",
delim = "\t", escape_double = FALSE, trim_ws = TRUE,
col_types = cols_to_get2)
#transform transition-level matrix to fragment-level
fragmentome <- proteome %>%
transition_to_fragment()
fragmentome_log = fragmentome %>%
log_transform_df(measure_col = 'Ion_intensity')
#normalize the samples
fragmentome_centered = normalize_sample_medians_df(fragmentome_log,
sample_id_col = 'filename',
measure_col = 'Ion_intensity')
#exponentiate (reverse log-transformation)
fragmentome_centered_un_log = fragmentome_centered %>%
unlog_df(measure_col = 'Ion_intensity')
print(names(fragmentome_centered_un_log))
proteome_median_centered = fragment_df_to_openSWATH(fragmentome_centered_un_log,
fragment_intensity_column = 'Ion_intensity',
fragment_annotation_column = 'Ion_ID',
id_column = 'ions',
fragment_united_column = 'aggr_Fragment_Annotation_new',
fragment_united_int_column = 'aggr_Peak_Area_new',
un_log = NULL,
intensities_to_exclude = setdiff(names(fragmentome_centered_un_log),
names(fragmentome)))
old_names <- c("aggr_Peak_Area", "aggr_Fragment_Annotation",
"aggr_Peak_Area_new", "aggr_Fragment_Annotation_new")
new_names <- c("aggr_Peak_Area_old", "aggr_Fragment_Annotation_old",
"aggr_Peak_Area", "aggr_Fragment_Annotation")
supporting_info_cols = c("transition_group_id", "Sequence", "FullPeptideName", "RT",
"assay_rt", "Intensity", "ProteinName", "m_score", "run_id",
"peak_group_rank", "Charge", "decoy", 'peptide_group_label','filename')
cols_to_get <- rep(list(col_guess()), length(supporting_info_cols))
names(cols_to_get) <- supporting_info_cols
cols_to_get2 = do.call(cols_only, cols_to_get)
proteome_supporting_info = read_delim('data_InterLab/1_original_data/all_sites_global_q_0.01_applied_to_local_global.txt',
delim = "\t", escape_double = FALSE, trim_ws = TRUE,
col_types = cols_to_get2)
proteome_median_centered = proteome_median_centered %>%
rename_at(vars(old_names), ~ new_names) %>%
merge(proteome_supporting_info, by = c('peptide_group_label','filename'))
#save new data frame
write_delim(proteome_median_centered,
path = "data_InterLab/2_interim_data/all_sites_global_q_001_applied_to_local_global_medianCentered.tsv",
delim = '\t')
|
deea11aa75a1011c0bd81ac4ae2c056fb071ea56 | 270ded734f3164c2b8996d87a0a4ba9723409fd8 | /R/data.R | b976ad59520da627dcaddde4eda59921ea79bfd8 | [] | no_license | cran/epos | 786602338b9da96c3b19a4233cba27cde658df18 | 21c8f0a2d46ffa61436eb31be9b8288efe33da54 | refs/heads/master | 2021-06-17T18:44:04.996756 | 2021-02-20T00:10:14 | 2021-02-20T00:10:14 | 152,093,581 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,719 | r | data.R | #' List drug terms with their frequency co-occurring with terms from the EpSO
#' ontology in publications since 2015 from the BioASQ 2020 corpus.
#'
#' @source The text mining workflows for data generation are described in
#' Mueller, Bernd and Hagelstein, Alexandra (2016)
#' <doi:10.4126/FRL01-006408558>, Mueller, Bernd et al. (2017)
#' <doi:10.1007/978-3-319-58694-6_22>, and Mueller, Bernd and Rebholz-Schuhmann,
#' Dietrich (2020) <doi:10.1007/978-3-030-43887-6_52>. The source data set for
#' generating the data co-occurrence lists is the BioASQ 2020 corpus filtered
#' for documents published since the year 2015. The source ontology for the
#' creation of the dictionary is the Epilepsy and Seizure
#' Ontology (EpSO) from https://bioportal.bioontology.org/ontologies/EPSO
#' @format A named list of drug term frequencies
#' @examples
#' utils::data(rawDrugNamesCoOcEpSO, package="epos")
"rawDrugNamesCoOcEpSO"
#' List drug terms with their frequency co-occurring with terms from the ESSO
#' ontology in publications since 2015 from the BioASQ 2020 corpus.
#'
#' @source The text mining workflows for data generation are described in
#' Mueller, Bernd and Hagelstein, Alexandra (2016)
#' <doi:10.4126/FRL01-006408558>, Mueller, Bernd et al. (2017)
#' <doi:10.1007/978-3-319-58694-6_22>, and Mueller, Bernd and Rebholz-Schuhmann,
#' Dietrich (2020) <doi:10.1007/978-3-030-43887-6_52>. The source data set for
#' generating the data co-occurrence lists is the BioASQ 2020 corpus filtered
#' for documents published since the year 2015. The source ontology for the
#' creation of the dictionary is Epilepsy Syndrome Seizure
#' Ontology (ESSO) from https://bioportal.bioontology.org/ontologies/ESSO
#' @examples
#' utils::data(rawDrugNamesCoOcESSO, package="epos")
"rawDrugNamesCoOcESSO"
#' List drug terms with their frequency co-occurring with terms from the EPILONT
#' ontology in publications since 2015 from the BioASQ 2020 corpus.
#'
#' @source The text mining workflows for data generation are described in
#' Mueller, Bernd and Hagelstein, Alexandra (2016)
#' <doi:10.4126/FRL01-006408558>, Mueller, Bernd et al. (2017)
#' <doi:10.1007/978-3-319-58694-6_22>, and Mueller, Bernd and Rebholz-Schuhmann,
#' Dietrich (2020) <doi:10.1007/978-3-030-43887-6_52>. The source data set for
#' generating the data co-occurrence lists is the BioASQ 2020 corpus filtered
#' for documents published since the year 2015. The source ontology for the
#' creation of the dictionary is the Epilepsy Ontology
#' (EPILONT) from https://bioportal.bioontology.org/ontologies/EPILONT
#' @format A named list of drug term frequencies
#' @examples
#' utils::data(rawDrugNamesCoOcEPILONT, package="epos")
"rawDrugNamesCoOcEPILONT"
#' List drug terms with their frequency co-occurring with terms from the EPISEM
#' ontology in publications since 2015 from the BioASQ 2020 corpus.
#'
#' @source The text mining workflows for data generation are described in
#' Mueller, Bernd and Hagelstein, Alexandra (2016)
#' <doi:10.4126/FRL01-006408558>, Mueller, Bernd et al. (2017)
#' <doi:10.1007/978-3-319-58694-6_22>, and Mueller, Bernd and Rebholz-Schuhmann,
#' Dietrich (2020) <doi:10.1007/978-3-030-43887-6_52>. The source data set for
#' generating the data co-occurrence lists is the BioASQ 2020 corpus filtered
#' for documents published since the year 2015. The source ontology for the
#' creation of the dictionary is the Epilepsy Semiology Ontology (EPISEM) from
#' https://bioportal.bioontology.org/ontologies/EPISEM
#' @format A named list of drug term frequencies
#' @examples
#' utils::data(rawDrugNamesCoOcEPISEM, package="epos")
"rawDrugNamesCoOcEPISEM"
#' List drug terms with their frequency co-occurring with terms from the FENICS
#' ontology in publications since 2015 from the BioASQ 2020 corpus.
#'
#' @source The text mining workflows for data generation are described in
#' Mueller, Bernd and Hagelstein, Alexandra (2016)
#' <doi:10.4126/FRL01-006408558>, Mueller, Bernd et al. (2017)
#' <doi:10.1007/978-3-319-58694-6_22>, and Mueller, Bernd and Rebholz-Schuhmann,
#' Dietrich (2020) <doi:10.1007/978-3-030-43887-6_52>. The source data set for
#' generating the data co-occurrence lists is the BioASQ 2020 corpus filtered
#' for documents published since the year 2015. The source ontology for the
#' creation of the dictionary is the Functional Epilepsy Nomenclature for Ion
#' Channels (FENICS) from https://bioportal.bioontology.org/ontologies/FENICS
#' @format A named list of drug term frequencies
#' @examples
#' utils::data(rawDrugNamesCoOcFENICS, package="epos")
"rawDrugNamesCoOcFENICS" |
91d9e4805e3ed8eda0f526a1299039e26f77f94f | 72cfd71c7d584530657782c4a0a23cc22950dff7 | /man/demo_sim_SEG.Rd | 5ecd909a73fa58f3b8556efad883fa683d48fb3e | [] | no_license | juvelas/PVA | 6ae2b957705d9d317b71b05ae72a76f598f0d3ab | 795f571908caedb73f675de4526d35be9f7b2355 | refs/heads/master | 2023-02-28T06:28:47.192149 | 2021-01-23T00:36:18 | 2021-01-23T00:36:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,096 | rd | demo_sim_SEG.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/demo_sim_SEG.R
\name{demo_sim_SEG}
\alias{demo_sim_SEG}
\title{Stochastic Exponential Growth model demo}
\usage{
demo_sim_SEG()
}
\value{
Nothing is returned; the commmand is run for its side effect of
launching a shiny app. You will have to close the shiny window or hit the
Stop button in the console to get the console prompt back.
}
\description{
A Shiny demo of the SEG model. The user can set the mean log growth rate,
the variance of the log growth rate, the initial population size, and the length of simulation.
The number of replicate simulations can also be changed.
}
\details{
Four graphs are produced. The left plots are time series of abundance, plotted on a linear
scale (top) and a logarithmic scale (bottom). The right plots are histograms of the final
abundance and final log abundance across the replicate simulations.
The simulations update immediately when you change any parameter value. To
see a new set of stochastic simulations without changing any parameters, hit
the "Run again" button.
}
|
d3d0e54bab2e2232d362697ffa0ccfd8624d053a | 2496529c830072687f31657ef826d1aa695af025 | /man/summary.coxph.relsurv.Rd | 1d33678a2d22508fbb8778acc3219c083d8d477d | [] | no_license | damjanm/mstate-1 | 3d4823c2f08367704db2d69ff421db46a0743a7c | f497395b5d41a3cd1b988a05e867e4ec323db056 | refs/heads/master | 2023-07-16T09:52:47.986603 | 2023-06-26T09:25:21 | 2023-06-26T09:25:21 | 300,222,747 | 0 | 0 | null | 2020-10-01T09:31:00 | 2020-10-01T09:30:59 | null | UTF-8 | R | false | true | 909 | rd | summary.coxph.relsurv.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relsurv.summary.coxph.relsurv.R
\name{summary.coxph.relsurv}
\alias{summary.coxph.relsurv}
\title{Summary method for coxph.relsurv object}
\usage{
\method{summary}{coxph.relsurv}(object, conf.int = 0.95, scale = 1, ...)
}
\arguments{
\item{object}{The result of a coxph.relsurv fit}
\item{conf.int}{Level for computation of the confidence intervals. If set to FALSE no confidence intervals are printed}
\item{scale}{Vector of scale factors for the coefficients, defaults to 1. The printed coefficients, se, and confidence intervals will be associated with one scale unit}
\item{...}{For future methods}
}
\value{
An object of class summary.coxph.relsurv
}
\description{
Produces a summary of a fitted coxph.relsurv model.
}
\seealso{
\code{\link{coxph.relsurv}}
}
\author{
Damjan Manevski \email{damjan.manevski@mf.uni-lj.si}
}
|
6e96bbfe73c0b9a5f890dcf1dd165f4999e91ae5 | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws/man/securityhub.Rd | 5a900965f8ad9a5734bb07a8ccb7d784ea08f038 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 10,084 | rd | securityhub.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{securityhub}
\alias{securityhub}
\title{AWS SecurityHub}
\usage{
securityhub(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Security Hub provides you with a comprehensive view of the security
state of your AWS environment and resources. It also provides you with
the readiness status of your environment based on controls from
supported security standards. Security Hub collects security data from
AWS accounts, services, and integrated third-party products and helps
you analyze security trends in your environment to identify the highest
priority security issues. For more information about Security Hub, see
the \emph{\href{https://docs.aws.amazon.com/securityhub/latest/userguide/what-is-securityhub.html}{AWS Security Hub User Guide}}
.
When you use operations in the Security Hub API, the requests are
executed only in the AWS Region that is currently active or in the
specific AWS Region that you specify in your request. Any configuration
or settings change that results from the operation is applied only to
that Region. To make the same change in other Regions, execute the same
command for each Region to apply the change to.
For example, if your Region is set to \code{us-west-2}, when you use
\code{\link[=securityhub_create_members]{create_members}} to add a member account
to Security Hub, the association of the member account with the master
account is created only in the \code{us-west-2} Region. Security Hub must be
enabled for the member account in the same Region that the invitation
was sent from.
The following throttling limits apply to using Security Hub API
operations.
\itemize{
\item \code{\link[=securityhub_batch_enable_standards]{batch_enable_standards}} -
\code{RateLimit} of 1 request per second, \code{BurstLimit} of 1 request per
second.
\item \code{\link[=securityhub_get_findings]{get_findings}} - \code{RateLimit} of 3
requests per second. \code{BurstLimit} of 6 requests per second.
\item \code{\link[=securityhub_update_findings]{update_findings}} - \code{RateLimit} of 1
request per second. \code{BurstLimit} of 5 requests per second.
\item \code{\link[=securityhub_update_standards_control]{update_standards_control}} -
\code{RateLimit} of 1 request per second, \code{BurstLimit} of 5 requests per
second.
\item All other operations - \code{RateLimit} of 10 requests per second.
\code{BurstLimit} of 30 requests per second.
}
}
\section{Service syntax}{
\preformatted{svc <- securityhub(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=securityhub_accept_invitation]{accept_invitation} \tab Accepts the invitation to be a member account and be monitored by the Security Hub master account that the invitation was sent from\cr
\link[=securityhub_batch_disable_standards]{batch_disable_standards} \tab Disables the standards specified by the provided StandardsSubscriptionArns\cr
\link[=securityhub_batch_enable_standards]{batch_enable_standards} \tab Enables the standards specified by the provided StandardsArn\cr
\link[=securityhub_batch_import_findings]{batch_import_findings} \tab Imports security findings generated from an integrated third-party product into Security Hub\cr
\link[=securityhub_batch_update_findings]{batch_update_findings} \tab Used by Security Hub customers to update information about their investigation into a finding\cr
\link[=securityhub_create_action_target]{create_action_target} \tab Creates a custom action target in Security Hub\cr
\link[=securityhub_create_insight]{create_insight} \tab Creates a custom insight in Security Hub\cr
\link[=securityhub_create_members]{create_members} \tab Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the master account\cr
\link[=securityhub_decline_invitations]{decline_invitations} \tab Declines invitations to become a member account\cr
\link[=securityhub_delete_action_target]{delete_action_target} \tab Deletes a custom action target from Security Hub\cr
\link[=securityhub_delete_insight]{delete_insight} \tab Deletes the insight specified by the InsightArn\cr
\link[=securityhub_delete_invitations]{delete_invitations} \tab Deletes invitations received by the AWS account to become a member account\cr
\link[=securityhub_delete_members]{delete_members} \tab Deletes the specified member accounts from Security Hub\cr
\link[=securityhub_describe_action_targets]{describe_action_targets} \tab Returns a list of the custom action targets in Security Hub in your account\cr
\link[=securityhub_describe_hub]{describe_hub} \tab Returns details about the Hub resource in your account, including the HubArn and the time when you enabled Security Hub\cr
\link[=securityhub_describe_organization_configuration]{describe_organization_configuration} \tab Returns information about the Organizations configuration for Security Hub\cr
\link[=securityhub_describe_products]{describe_products} \tab Returns information about the available products that you can subscribe to and integrate with Security Hub in order to consolidate findings\cr
\link[=securityhub_describe_standards]{describe_standards} \tab Returns a list of the available standards in Security Hub\cr
\link[=securityhub_describe_standards_controls]{describe_standards_controls} \tab Returns a list of security standards controls\cr
\link[=securityhub_disable_import_findings_for_product]{disable_import_findings_for_product} \tab Disables the integration of the specified product with Security Hub\cr
\link[=securityhub_disable_organization_admin_account]{disable_organization_admin_account} \tab Disables a Security Hub administrator account\cr
\link[=securityhub_disable_security_hub]{disable_security_hub} \tab Disables Security Hub in your account only in the current Region\cr
\link[=securityhub_disassociate_from_master_account]{disassociate_from_master_account} \tab Disassociates the current Security Hub member account from the associated master account\cr
\link[=securityhub_disassociate_members]{disassociate_members} \tab Disassociates the specified member accounts from the associated master account\cr
\link[=securityhub_enable_import_findings_for_product]{enable_import_findings_for_product} \tab Enables the integration of a partner product with Security Hub\cr
\link[=securityhub_enable_organization_admin_account]{enable_organization_admin_account} \tab Designates the Security Hub administrator account for an organization\cr
\link[=securityhub_enable_security_hub]{enable_security_hub} \tab Enables Security Hub for your account in the current Region or the Region you specify in the request\cr
\link[=securityhub_get_enabled_standards]{get_enabled_standards} \tab Returns a list of the standards that are currently enabled\cr
\link[=securityhub_get_findings]{get_findings} \tab Returns a list of findings that match the specified criteria\cr
\link[=securityhub_get_insight_results]{get_insight_results} \tab Lists the results of the Security Hub insight specified by the insight ARN\cr
\link[=securityhub_get_insights]{get_insights} \tab Lists and describes insights for the specified insight ARNs\cr
\link[=securityhub_get_invitations_count]{get_invitations_count} \tab Returns the count of all Security Hub membership invitations that were sent to the current member account, not including the currently accepted invitation\cr
\link[=securityhub_get_master_account]{get_master_account} \tab Provides the details for the Security Hub master account for the current member account\cr
\link[=securityhub_get_members]{get_members} \tab Returns the details for the Security Hub member accounts for the specified account IDs\cr
\link[=securityhub_invite_members]{invite_members} \tab Invites other AWS accounts to become member accounts for the Security Hub master account that the invitation is sent from\cr
\link[=securityhub_list_enabled_products_for_import]{list_enabled_products_for_import} \tab Lists all findings-generating solutions (products) that you are subscribed to receive findings from in Security Hub\cr
\link[=securityhub_list_invitations]{list_invitations} \tab Lists all Security Hub membership invitations that were sent to the current AWS account\cr
\link[=securityhub_list_members]{list_members} \tab Lists details about all member accounts for the current Security Hub master account\cr
\link[=securityhub_list_organization_admin_accounts]{list_organization_admin_accounts} \tab Lists the Security Hub administrator accounts\cr
\link[=securityhub_list_tags_for_resource]{list_tags_for_resource} \tab Returns a list of tags associated with a resource\cr
\link[=securityhub_tag_resource]{tag_resource} \tab Adds one or more tags to a resource\cr
\link[=securityhub_untag_resource]{untag_resource} \tab Removes one or more tags from a resource\cr
\link[=securityhub_update_action_target]{update_action_target} \tab Updates the name and description of a custom action target in Security Hub\cr
\link[=securityhub_update_findings]{update_findings} \tab UpdateFindings is deprecated\cr
\link[=securityhub_update_insight]{update_insight} \tab Updates the Security Hub insight identified by the specified insight ARN\cr
\link[=securityhub_update_organization_configuration]{update_organization_configuration} \tab Used to update the configuration related to Organizations\cr
\link[=securityhub_update_security_hub_configuration]{update_security_hub_configuration} \tab Updates configuration options for Security Hub\cr
\link[=securityhub_update_standards_control]{update_standards_control} \tab Used to control whether an individual security standard control is enabled or disabled
}
}
\examples{
\dontrun{
svc <- securityhub()
svc$accept_invitation(
Foo = 123
)
}
}
|
8100b2a5421fc714d2c293eefc56169007dbe5b2 | a893f063e4fb685c6d959882b74f35cfc17686ee | /solutions/manu_colour_palette.R | 86bc84aac797ee4ff393387b5052b39b3cc9fef3 | [] | no_license | jmarshallnz/intro_to_r | e8ebe29fe4df3d32f9848e79eb75ead3a46bce4c | 35065a9255915f5f9eec4248972560fcbe7ff991 | refs/heads/main | 2023-06-16T10:09:57.485226 | 2021-06-21T04:13:12 | 2021-06-27T23:29:42 | 332,910,494 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 492 | r | manu_colour_palette.R |
library(tidyverse)
library(palmerpenguins)
library(Manu) # from: https://g-thomson.github.io/Manu/
# install the Manu package using:
# remotes::install_github("G-Thomson/Manu")
my_colours = c(Adelie = "darkgreen",
Chinstrap = "#7f2b3c",
Gentoo = "orange")
my_colours
ggplot(data = penguins) +
geom_point(
mapping = aes(
x = flipper_length_mm,
y = body_mass_g,
col = species
)
) +
scale_colour_manual(values = get_pal("Kaka"))
|
941cb5cec17391422ad665defa511b9ae6ddb42c | 5397b2f52030662f0e55f23f82e45faa165b8346 | /R/j_example_xlsx.R | df442d6902b29f1631f98a7ef6611612ab8168a6 | [
"MIT"
] | permissive | data-science-made-easy/james-old | 8569dcc8ce74c68bcbb81106127da4b903103fcd | 201cc8e527123a62a00d27cd45d365c463fc1411 | refs/heads/master | 2023-01-12T21:16:23.231628 | 2020-11-19T13:46:17 | 2020-11-19T13:46:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 913 | r | j_example_xlsx.R | #' Create example xlsx file
#'
#' You can use this example to import, analyse, and visualise your own data
#'
#' @param file_name where your xlsx-file should be stored (default temporary xlsx-file)
#' @param multiple_tabs creates an xlsx-file with multiple tabs if TRUE (default), including a meta tab. If FALSE, it creates an xlsx-file with only one tab
#'
#' @return Path to your example xlsx
#'
#' @seealso \code{\link{j_import}} to import the example data
#'
#' @export
j_example_xlsx <- function(file_name = paste0(tempfile(), ".xlsx"), multiple_tabs = TRUE) {
if (multiple_tabs) {
success <- file.copy(from = system.file("extdata", "james-example-multiple-tabs.xlsx", package = "james"), to = file_name)
} else {
success <- file.copy(from = system.file("extdata", "james-example-one-tab.xlsx", package = "james"), to = file_name)
}
if (success) return(file_name) else return(NULL)
} |
8e56a2755e77e1c4e09282c7e534cf5605a333f9 | 9d8990e66af63d2e691d01fd66b1c28f314750ea | /IE544_PROJECT_Q1.r | f16b68f0b5bfd1f4259d330377d3dcb4b53ac716 | [] | no_license | buraksimsekkk/IE544-Project | 6333c97ca472e475acbbd242bb68faf73cbcbd17 | 1dd731627f4e54d977939d5a82d77d874d6853d6 | refs/heads/main | 2023-05-28T08:49:53.722563 | 2021-06-13T13:28:49 | 2021-06-13T13:28:49 | 374,346,175 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,915 | r | IE544_PROJECT_Q1.r | library(tibble)
library(bnlearn)
library(dplyr)
require(ggplot2)
data<-readRDS("amz_train.rds")
data$epoc = as.Date(as.character(data$epoc))
data_df <- data.frame(year = as.numeric(format(data$epoc, format = "%Y")),
month = as.numeric(format(data$epoc, format = "%m")),
day = as.numeric(format(data$epoc, format = "%d")))
data = data[,-2]
data = add_column(data, d = data_df[,1], .after = 1)
data = add_column(data, d = data_df[,2], .after = 2)
data = add_column(data, d = data_df[,3], .after = 3)
colnames(data)[2] = "year"
colnames(data)[3] = "month"
colnames(data)[4] = "day"
###a-) Number of Products, number of sellers (overall and by day)
length(unique(data$pid))
unique(data$pid)
length(unique(data$sid))
no_of_sellers = list()
for (i in c(1:9)){
df = data %>% filter(data$pid==unique(data$pid)[i])
count = length(unique(df$sid))
no_of_sellers = append(no_of_sellers,count)
}
no_of_sellers
barplot(unlist(no_of_sellers),names.arg=c(1:9),xlab="Products",ylab="No_of_sellers",col="blue")
month_day = paste(data$month,data$day)
data = data[,-c(3,4)]
#data
data = add_column(data, month_day = month_day, .after = 1)
#data
product_count_byday = data %>% group_by(month_day) %>% summarise(product_count_byday = length(unique(pid)))
seller_count_byday = data %>% group_by(month_day) %>% summarise(seller_count_byday = length(unique(sid)))
barplot(seller_count_byday$seller_count_byday,names.arg=seller_count_byday$month_day,xlab="Date",ylab="No_of_sellers",col="green")
barplot(product_count_byday$product_count_byday,names.arg=product_count_byday$month_day,xlab="Date",ylab="No_of_products",col="red")
##b-) Average, max, min price, buy-box price, shipping cost by product
avg = data.frame( data %>% group_by(pid,month_day) %>% summarise(average = mean(price)))
max = data.frame(data %>% group_by(pid,month_day) %>% summarise(max = max(price)))
min = data.frame(data %>% group_by(pid,month_day) %>% summarise(min = min(price)))
df = data%>% filter(data$bbox=="success")
bbox_price = data.frame(df %>% group_by(pid,month_day) %>% summarise(bbox_price = mean(bbox_price)))
for (i in c(1:9)){
avg = data.frame( data %>% group_by(pid,month_day) %>% summarise(average = mean(price)))
max = data.frame(data %>% group_by(pid,month_day) %>% summarise(max = max(price)))
min = data.frame(data %>% group_by(pid,month_day) %>% summarise(min = min(price)))
df=data%>% filter(data$bbox=="success")
bbox_price = data.frame(df %>% group_by(pid,month_day) %>% summarise(bbox_price = mean(bbox_price)))
max=max[max$pid==unique(data$pid)[i],]
avg=avg[avg$pid==unique(data$pid)[i],]
min=min[min$pid==unique(data$pid)[i],]
bbox_price=bbox_price[bbox_price$pid==unique(data$pid)[i],]
if (i==2){
bbox_price=rbind(bbox_price,data.frame(pid=unique(data$pid)[i],month_day='8 31',bbox_price=0))
bbox_price=rbind(bbox_price,data.frame(pid=unique(data$pid)[i],month_day='9 1',bbox_price=0))
bbox_price=rbind(bbox_price,data.frame(pid=unique(data$pid)[i],month_day='9 2',bbox_price=0))
}
df = data.frame(avg$month_day,avg$average,max$max,min$min,bbox_price$bbox_price)
g = ggplot(df, aes(avg$month_day,group=1))
g = g + geom_line(aes(y=avg$average), colour="red")
g = g + geom_line(aes(y=max$max), colour="green")
g = g + geom_line(aes(y=min$min), colour="blue")
g = g + geom_line(aes(y=bbox_price$bbox_price), colour="purple")
print(g)
}
###############
# shipping cost by product eklenmeli
##############
##c-) Seller ratings, positive feedbacks and counts, product ratings and counts by product
data %>% group_by(pid) %>% summarise(average_sid_rating = mean(sid_rating))
data %>% group_by(pid) %>% summarise(average_sid_pos_fb = mean(sid_pos_fb))
data %>% group_by(pid) %>% summarise(average_sid_rating_cnt = mean(sid_rating_cnt))
data %>% group_by(pid) %>% summarise(average_pid_rating = mean(pid_rating))
data %>% group_by(pid) %>% summarise(average_pid_rating_cnt = mean(pid_rating_cnt))
##d-) Percentage of buy-box successes when Amazon is the seller against every other seller (total and by product and week)
##by product
a = data %>% filter(sid=="amazon", box=="success") %>% group_by(pid) %>% summarise(No_of_Amazon_Win = length((sid)))
a = a %>% add_row(pid="B00MVVI1FC" , No_of_Amazon_Win=0, .before = 5)
b = data %>% filter(sid=="amazon")%>% group_by(pid) %>% summarise(is_Amazon_seller = length((sid)))
winper = a[,2]/b[,2]
winper = winper %>% add_column(a[,1], .before = 1) %>% add_row(pid="B00AMFLZLG" , No_of_Amazon_Win=NA, .before = 3)
##total
a = data %>% filter(sid=="amazon", box=="success") %>% group_by(pid) %>% summarise(No_of_Amazon_Win = length((sid)))
colSums(a[,2])
b = data %>% filter(sid=="amazon")%>% group_by(pid) %>% summarise(is_Amazon_seller = length((sid)))
colSums(b[,2])
winpertotal = colSums(a[,2])/colSums(b[,2])
winper = winper %>% add_row(pid="total", No_of_Amazon_Win=winpertotal)
###############
#Weekly ratio eklenmeli
##############
##e-) Prime, FBA, Page and Rank information by seller and by product
##is_Prime
data %>% filter(bbox=="success", is_prime=="yes") %>% group_by(pid) %>% summarise(two_condition = length((is_prime)))
data %>% filter(bbox=="success", is_prime=="yes") %>% group_by(sid) %>% summarise(two_condition = length((is_prime)))
##is_fba
data %>% filter(bbox=="success", is_fba=="yes") %>% group_by(pid) %>% summarise(two_condition = length((is_fba)))
data %>% filter(bbox=="success", is_fba=="yes") %>% group_by(sid) %>% summarise(two_condition = length((is_fba)))
##Rank
data %>% filter(bbox=="success") %>% group_by(rank) %>% summarise(no_of_bbox_success = length((rank)))
data %>% filter(bbox=="success") %>% group_by(pid,rank) %>% summarise(no_of_bbox_success = length((rank)))
data %>% filter(bbox=="success") %>% group_by(sid,rank) %>% summarise(no_of_bbox_success = length((rank)))
##Page
data %>% filter(bbox=="success") %>% group_by(page) %>% summarise(no_of_bbox_success = length((page)))
data %>% filter(bbox=="success") %>% group_by(pid,page) %>% summarise(no_of_bbox_success = length((page)))
data %>% filter(bbox=="success") %>% group_by(sid,page) %>% summarise(no_of_bbox_success = length((page)))
##g-) What are the scales on which the data in each column is measured on?
str(data)
summary(data)
boxplot(data$price)
boxplot(data$sid_rating)
boxplot(data$sid_pos_fb)
boxplot(data$sid_rating_cnt)
boxplot(data$shipping)
boxplot(data$pid_rating)
boxplot(data$pid_rating_cnt)
aa=data %>% filter(bbox=="success") %>% group_by(pid) %>% summarise(result = mean((sid_rating_cnt)))
bb=data %>% group_by(pid) %>% summarise(average_sid_rating_cnt = mean(sid_rating_cnt))
aa
bb
data %>% filter(bbox=="failure") %>% group_by(page) %>% summarise(no_of_bbox_success = length((page)))
data %>% filter(bbox=="success") %>% group_by(page) %>% summarise(no_of_bbox_success = length((page)))
|
41474212ec7ef1a0c97a8c4d4cf8b43b372643af | c75c5b232956e2c4c042ccb7193ed128262408a5 | /aggregate boiler analysis_341 Bloor.R | d35ab87392cd9d5064345e71c5d26fa5c70dea07 | [] | no_license | cchenyq/Boiler-Data-Analysis | 3b56bd0c655d0418b99e21e982c9982e7ee717e4 | d7ec3a6a3114ef098fd538be330856db2b642d6e | refs/heads/master | 2020-03-27T17:01:26.489501 | 2018-08-31T06:11:42 | 2018-08-31T06:11:42 | 146,823,818 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,358 | r | aggregate boiler analysis_341 Bloor.R | library(dplyr)
library(tidyverse)
library(lubridate)
library(ggplot2)
get_daily_rt <- function(df, num_days) { # num_days is the # of days in the month
daily_rt <- vector()
for (i in 1:num_days) {
day <- subset(df, (day(df$Timestamp) == i))
if (nrow(day) > 1) {
t <- day[,1]
status <- day[,2]
sum_rt <- 0
for (i in 2:length(status)) {
start <- t[i-1] #previous index
end <- t[i] # current index
time.interval <- start %--% end
time.duration <- as.duration(time.interval) #subtracting the start time from the end time
if (status[i] == "Off" & (as.numeric(time.duration) / 60) < 6000) {
sum_rt <- sum_rt + (as.numeric(time.duration) / 60)
}
}
} else if (nrow(day) < 1) {
sum_rt <- 0
}
daily_rt <- append(daily_rt, sum_rt, after = length(daily_rt))
}
return(daily_rt) #returns a vector of the runtime of the specific boiler for each day
}
get_daily_gas <- function(df, num_days) {
#where num_days is the number of days in the month
daily_gas <- vector()
for (i in 1:num_days) {
day <- subset(df, (day(df$Timestamp) == i))
day <- na.omit(day)
daily_gas <- append(daily_gas, sum(day$Gas), after = length(daily_gas))
}
return(daily_gas)
}
get_avg_status <- function(v) {
#treat every stage as a pseudo boiler
if (length(v[v > 0]) >= 15) {
return(1)
} else {
return(0)
}
}
gas_data <- read.table("hrlygas_341 Bloor.txt", header = TRUE, sep = "\t") #insert text file name here
colnames(gas_data) <- c("Timestamp", "Gas")
gas_data <- mutate(gas_data, Timestamp = dmy_hms(gas_data$Timestamp)) #reformatting the timestamp to make it easier to subset the data by date
usable <- filter(gas_data, (year(gas_data$Timestamp) == 2017 | year(gas_data$Timestamp) == 2018))
gas_jan <- filter(usable, (month(usable$Timestamp) == 1))
gas_feb <- filter(usable, (month(usable$Timestamp) == 2))
gas_mar <- filter(usable, (month(usable$Timestamp) == 3))
gas_apr <- filter(usable, (month(usable$Timestamp) == 4))
gas_dec <- filter(usable, (month(usable$Timestamp) == 12))
#boiler 1
b1_data <- read.table("cycle times_B1_341 Bloor.txt", header = TRUE, sep = "\t")
colnames(b1_data) <- c("Timestamp","Status")
b1_data <- mutate(b1_data, Timestamp = dmy_hms(b1_data$Timestamp))
b1_jan <- filter(b1_data, (month(b1_data$Timestamp) == 1))
b1_feb <- filter(b1_data, (month(b1_data$Timestamp) == 2))
b1_mar <- filter(b1_data, (month(b1_data$Timestamp) == 3))
b1_apr <- filter(b1_data, (month(b1_data$Timestamp) == 4))
b1_dec <- filter(b1_data, (month(b1_data$Timestamp) == 12))
#boiler 2
b2_data <- read.table("cycle times_B2_341 Bloor.txt", header = TRUE, sep = "\t")
colnames(b2_data) <- c("Timestamp","Status")
b2_data <- mutate(b2_data, Timestamp = ymd_hms(b2_data$Timestamp))
b2_jan <- filter(b2_data, (month(b2_data$Timestamp) == 1))
b2_feb <- filter(b2_data, (month(b2_data$Timestamp) == 2))
b2_mar <- filter(b2_data, (month(b2_data$Timestamp) == 3))
b2_apr <- filter(b2_data, (month(b2_data$Timestamp) == 4))
b2_dec <- filter(b2_data, (month(b2_data$Timestamp) == 12))
b1_rt <- mapply(get_daily_rt, list(b1_dec, b1_jan, b1_feb, b1_mar, b1_apr), num_days = c(31,31,28,31,30))
b2_rt <- mapply(get_daily_rt, list(b2_dec, b2_jan, b2_feb, b2_mar, b2_apr), num_days = c(31,31,28,31,30))
daily_gas <- mapply(get_daily_gas, list(gas_dec, gas_jan, gas_feb, gas_mar, gas_apr), num_days = c(31,31,28,31,30))
avg_daily_gas <- sapply(daily_gas, mean)
total_runtime <- mapply(sum, b1_rt, b2_rt, SIMPLIFY = FALSE)
avg_daily_runtime <- mapply(function (x,y) x/y, total_runtime, c(31,31,28,31,30))
avg_status_b1 <- sapply(b1_rt, get_avg_status)
avg_status_b2 <- sapply(b2_rt, get_avg_status)
avg_numOn <- avg_status_b1 + avg_status_b2
OBU <- 100*(avg_daily_runtime / (avg_numOn*1440)) #operating boiler utilization
TCU <- 100*(avg_daily_runtime / (2*1440)) #total capacity utilization
avg <- data.frame(c("Dec","Jan","Feb","Mar","Apr"), c(-3.9,-4.4,-0.8,1.0, 4.5), avg_daily_gas, avg_daily_runtime, OBU, TCU)
colnames(avg) <- c("Month", "OAT", "Average daily gas consumption", "Total average daily boiler runtime", "Average OBU", "Average TCU")
write.csv(avg, file = "341 Bloor - Daily Runtime, Gas, OBU, and TCU.csv", row.names = FALSE) |
cf9c5b65fb2d20c521357573fdac0c74d4c3d75e | 4d4b524b7d5e1cbb1be26e391144cfa6cbce72f4 | /man/wtPlotResult.Rd | 3f4efcdc0f0b8293bd6e8f8768b7de21feeff5df | [
"MIT"
] | permissive | KWB-R/kwb.wtaq | e6afc2455e5a828e8d5c98c19868df2d13c505a8 | f05c8cb30c48051c7f4b856c40571bae08facc27 | refs/heads/master | 2022-06-28T07:47:09.950237 | 2022-06-12T08:31:16 | 2022-06-12T08:31:16 | 60,534,495 | 3 | 0 | MIT | 2022-06-12T08:29:57 | 2016-06-06T14:26:52 | Fortran | UTF-8 | R | false | false | 1,372 | rd | wtPlotResult.Rd | \name{wtPlotResult}
\alias{wtPlotResult}
\title{Plot WTAQ results}
\description{Plot WTAQ results}
\usage{wtPlotResult(wtaqResult, main = "", plottype = "w", showMeasurements = TRUE,
auto.key = NULL, asp = NA, PDF = FALSE, PNG = FALSE, pumpingWellName = "PW",
xlim = NULL, ylim = NULL, ...)}
\arguments{
\item{wtaqResult}{data frame as returned by \code{\link{wtRunConfiguration}}}
\item{main}{plot title, default: ""}
\item{plottype}{vector of plot types ("s" = superposed, "w" = one plot per well,
"d" = along distance to pump well, "t" each time along distance to well).
Default: "w" (one plot per well).}
\item{showMeasurements}{if TRUE, measurements are shown}
\item{auto.key}{given to xyplot, see there. If NULL, a default key with as many columns as
there are wells is used. Default: NULL.}
\item{asp}{aspect ratio between x and y axis. Default: 1. Set to NA if aspect ratio
does not matter. }
\item{PDF}{if TRUE, a pdf file is created in tempdir() and opened in a PDF viewer}
\item{PNG}{if TRUE, all plots made with plot type "t" are saved to png files in
tempdir()/wtaqResult.}
\item{pumpingWellName}{name of pumping well in wtaqResult}
\item{xlim}{
}
\item{ylim}{
}
\item{\dots}{additional arguments given to xyplot}
}
\author{Hauke Sonnenberg}
|
225b4b8cc36d32df4dce6659f3c6d54553b77957 | 60a99dc425d9edca7b3dec562f5cf6367d9c61ec | /MExPosition/man/print.doact.statis.compromise.Rd | f7bdfe28ef81195c1b66d5c7dfddf1efcc79d2c1 | [] | no_license | LukeMoraglia/ExPosition1 | e7718ae848608f1dc3934513c6588f53f2c45a7f | a69da6c5b0f14ef9fd031b98c3b40b34dad5240f | refs/heads/master | 2022-12-31T17:45:10.909002 | 2020-10-22T19:45:49 | 2020-10-22T19:45:49 | 255,486,130 | 0 | 1 | null | 2020-10-22T18:08:38 | 2020-04-14T02:01:12 | R | UTF-8 | R | false | false | 444 | rd | print.doact.statis.compromise.Rd | \name{print.doact.statis.compromise}
\alias{print.doact.statis.compromise}
\title{Print the results of the Compromise for DO-ACT}
\description{S3 Class function to print results for MExPosition.}
\usage{\method{print}{doact.statis.compromise}(x,\dots)}
\arguments{
\item{x}{an object of class DO-ACT}
\item{\dots}{inherited/passed arguments for S3 print method(s).}
}
\author{Cherise R. Chin Fatt \email{cherise.chinfatt@utdallas.edu}
} |
f93951bd6103157cb4188fc21e87d70e4c55b66c | fbea782e161a7173ef98e7fa34d90743d75743e1 | /other/package_installs.R | 77c6fffec7087f7b725ce4df20343e3eb7057281 | [] | no_license | edimaudo/R-projects | c3523a0bb551b0a945f12aa8d6c34107d7dd7ced | 80af84e08fef61dfec4c87e7059f6dc50e24f868 | refs/heads/master | 2022-10-27T03:08:59.306013 | 2022-10-23T00:55:24 | 2022-10-23T00:55:24 | 94,769,351 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 116 | r | package_installs.R | install.packages ("reshape2")
install.packages ("dplyr")
install.packages ("ggplot2")
install.packages ("stringr")
|
41b9e75db883d5108d5e62df15a6ee7b569e2098 | 294a336d95509a0ce4b0d1a3ed9b77adc5a3bd13 | /man/breadth_profile.Rd | e2670caa1f9e64458a3f06504655a69ed7569677 | [] | no_license | anttonalberdi/ENMhill | 80069a2b6ad03c97912fb4d67df2bae3ab2094e3 | 2e26e0f5d67885b1f33d87ec2a15d2d02f1534f8 | refs/heads/master | 2020-08-17T20:09:25.836753 | 2019-10-23T12:48:32 | 2019-10-23T12:48:32 | 215,706,668 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,987 | rd | breadth_profile.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/breadth_profile.R
\name{breadth_profile}
\alias{breadth_profile}
\title{Spatial breadth profile}
\usage{
breadth_profile(raster, qvalues, relative, threshold)
}
\arguments{
\item{raster}{A RasterLayer (single projection) or RasterStack (multiple projections) object containing ENM projection(s) with suitability scores.}
\item{qvalues}{A vector of sequential orders of diversity (default from 0 to 5). qvalues=seq(from = 0, to = 5, by = (0.1))}
\item{relative}{Whether to compute absolute or relative breadths. Default=FALSE}
\item{threshold}{Suitability value(s) below which all values are converted into zeros. If a RasterStack (multiple projections) is used, the argument should contain a vector of threshold values.}
}
\value{
A vector of breadth values at different orders of diversity or a matrix containing breadth values at different orders of diversity (columns) per projection (rows).
}
\description{
Compute profiles of Hill numbers-based spatial breadths from one or multiple Environmental Niche Model projection rasters.
}
\details{
Spatial breadth profile computation based on Hill numbers
}
\examples{
data()
breadth_profile(rasters[[1]])
breadth_profile(rasters[[1]],qvalues=seq(from = 0, to = 5, by = 1))
breadth_profile(rasters[[1]],qvalues=seq(from = 0, to = 5, by = 1), relative=TRUE)
breadth_profile(rasters,qvalues=seq(from = 0, to = 5, by = 1), relative=TRUE)
}
\references{
Alberdi, A., Novella-Fernandez R., Aizpurua, O., Razgour, O. (2019). Measuring breadth and overlap of spatial projections of environmental niche models based on Hill numbers.\cr\cr
Alberdi, A., Gilbert, M.T.P. (2019). A guide to the application of Hill numbers to DNA-based diversity analyses. Molecular Ecology Resources, 19, 804-817.\cr\cr
}
\seealso{
\code{\link{breadth}}
}
\author{
Antton Alberdi, \email{anttonalberdi@gmail.com}
}
\keyword{breadth}
\keyword{diversity}
\keyword{hill}
\keyword{profile}
|
0da9c02ab18935e5ce50820fa860cb35c94c5a7f | 0a906cf8b1b7da2aea87de958e3662870df49727 | /distr6/inst/testfiles/C_EmpiricalMVCdf/libFuzzer_C_EmpiricalMVCdf/C_EmpiricalMVCdf_valgrind_files/1610037035-test.R | 2eed7c743c07c4e74454dda22cc4ea4257544abb | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 494 | r | 1610037035-test.R | testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(1.75150903897595e-260, 9.28267364693302e-215, 1.57064017100971e-307, 6.16474212895706e-270, 5.02154668707453e-304, 1.13659433913701e-259, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 7L)))
result <- do.call(distr6:::C_EmpiricalMVCdf,testlist)
str(result) |
b849af5fc63edd86fc58e9171112ca14c8f5e2ee | cc3c93f79576701be76eeafec0d3907e3f394970 | /generate_dashboard.R | 9fa8728453fdb554a9bba163d28b7582a9d82abc | [] | no_license | mkao006/cpmb_dashboard | 42ead463eacd18928c718626b50c3f8b2c27bc2d | 1f51825fa2f8ac984f1b532d2310ba50cf411df4 | refs/heads/master | 2021-01-23T03:54:32.400226 | 2014-08-19T15:16:40 | 2014-08-19T15:16:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,592 | r | generate_dashboard.R | ########################################################################
## Title: Script for the FAO strategic objective scorecard.
## Date: 2013-08-02
########################################################################
library(plyr)
library(reshape2)
library(data.table)
SO = "so5"
path = paste0(getwd(), "/dashboard_", SO,"/")
template = "benchmark"
version = "B"
texFileName = paste0(SO, "dashboard", template, version, ".tex")
pdfFileName = gsub("tex", "pdf", texFileName)
## Data are taken from the Statistical yearbook for consistency
library(FAOSTAT)
source("dashboard_package.R")
source("metaToDissemination.R")
## Load dissemination file
dissemination.df =
read.csv(file = paste0(path, SO, "_dissemination.csv"),
stringsAsFactors = FALSE)
dissemination.df = dissemination.df[which(dissemination.df[, paste0("SCORECARD_", version)]), ]
## Load statistical yearbook data
##
## NOTE (Michael): Need access to the SYB data base
## load("~/Dropbox/SYBproject/RegionalBooks/Data/SO.RData")
load("C:/Users/kao/Dropbox/SYBproject/RegionalBooks/Data/SO.RData")
## final.df = M49.lst$dataset
final.df = SO.df
final.df$UN_CODE = NULL
final.df$FAO_TABLE_NAME = NULL
## final.df = final.df[final.df$Area == "Territory", ]
final.df = final.df[final.df$Area == "M49world", ]
## Manually convert back zero to NA for GHG data
final.df[final.df$Year %in% c(2011:2013),
grep("GHG", colnames(final.df), value = TRUE)] = NA
## Convert data to numeric and subset countries
final.df$FAOST_CODE = as.numeric(final.df$FAOST_CODE)
## final.df = final.df[which(final.df$FAOST_CODE %in%
## na.omit(c(FAOregionProfile[!is.na(FAOregionProfile$UNSD_MACRO_REG),
## "FAOST_CODE"]))), ]
## Construct new variables
allData.df = arrange(final.df, FAOST_CODE, Year)
allData.dt = data.table(allData.df)
## allData.dt[, AQ.WAT.PROD.SH.NO := NV.AGR.TOTL.KD/AQ.WAT.WWAGR.MC.NO]
if(SO == "so4"){
tmp = read.csv(file = "./GV.VS.FPI.IN.NO.csv",
header = TRUE, stringsAsFactors = FALSE)
allData.dt = merge(allData.dt, tmp, by = "Year", all.x = TRUE)
}
## if(SO == "so5"){
## tmp = read.csv(file = "./old_manual_data/TP.FO.AID.NO.csv",
## header = TRUE, stringsAsFactors = FALSE)
## allData.dt = merge(allData.dt, tmp, by = c("FAOST_CODE", "Year"),
## all.x = TRUE)
## allData.dt[, TP.FO.AID.SHP := (TP.FO.AID.NO * 1000)/POP.TOT]
## }
allData.df = data.frame(allData.dt)
## Take only variables required to be disseminated
SO.df = subset(allData.df,
select = c("FAOST_CODE", "Year",
intersect(dissemination.df$DATA_KEY, colnames(allData.df))))
SO.df = merge(SO.df,
FAOcountryProfile[!is.na(FAOcountryProfile$FAOST_CODE),
c("FAOST_CODE", "ABBR_FAO_NAME")],
all.x = TRUE, by = "FAOST_CODE")
if(SO == "so2"){
SO.df$DT.OUT.UTUN.POP.SH.HACK = SO.df$QI.NPCPIN.CRPS.IN.NO
SO.df$DT.OUT.UTST.POP.SH.HACK = SO.df$QI.NPCPIN.CRPS.IN.NO
}
if(SO == "so5"){
SO.df$DO.OUT.ACDFD.DP.NO.HACK = runif(NROW(SO.df))
SO.df$SH.DYN.MORT.HACK = runif(NROW(SO.df))
}
## Check whether all variables are included
if(NROW(dissemination.df[which(dissemination.df[, paste0("SCORECARD_", version)]), c("DATA_KEY", "SERIES_NAME")][!dissemination.df[which(dissemination.df[, paste0("SCORECARD_", version)]), "DATA_KEY"] %in% colnames(SO.df),]) != 0L){
warning("Some variables are not available in the data base, missing values are inserted")
missVar = dissemination.df[which(dissemination.df[, paste0("SCORECARD_", version)]), c("DATA_KEY", "SERIES_NAME")][!dissemination.df[which(dissemination.df[, paste0("SCORECARD_", version)]), "DATA_KEY"] %in% colnames(SO.df),]
cat(missVar[, 1], file = paste0(path, SO, "_missvar.txt"))
SO.df[, missVar[, "DATA_KEY"]] = rep(NA, NROW(SO.df))
}
## Scale the units
source("C:/Users/kao/Dropbox/SYBproject/Packages/InternalCodes/scaleUnit.R")
thirdQuantileUnit = function(x){
q3 = quantile(x, prob = 0.75, na.rm = TRUE)
tmp = paste0("1e", as.numeric(nchar(as.character(round(q3)))) - 1)
q3unit = eval(parse(text = tmp))
q3unit
}
scale.df = data.frame(DATA_KEY = dissemination.df$DATA_KEY[dissemination.df$DATA_KEY %in%
colnames(SO.df)],
MULTIPLIER = sapply(SO.df[, dissemination.df$DATA_KEY[dissemination.df$DATA_KEY %in%
colnames(SO.df)]],
FUN = thirdQuantileUnit))
## Remove all scaling for the world
scale.df$MULTIPLIER = 1
dissemination.df = merge(dissemination.df, scale.df, all.x = TRUE)
scaleVec = 1/dissemination.df$MULTIPLIER
names(scaleVec) = dissemination.df$DATA_KEY
sSO.df = scaleUnit(df = SO.df, multiplier = scaleVec)
## Melt into ER form
mSO.df = melt(sSO.df, id.var = c("FAOST_CODE", "ABBR_FAO_NAME",
"Year"))
mSO.df$variable = as.character(mSO.df$variable)
finalScorecards.df = merge(mSO.df,
dissemination.df[, c("DATA_KEY", "SERIES_NAME", "TOPIC",
paste0("ORDER_", version),
"MULTIPLIER", "ORIG_UNIT")],
all.x = TRUE, by.x = "variable", by.y = "DATA_KEY")
index = sapply(regexpr("\\(", finalScorecards.df$SERIES_NAME),
FUN = function(x) x[[1]]) - 2
index[index == -3] = 1000000L
newQuantity = translateQuantity(finalScorecards.df$MULTIPLIER)
newQuantity[is.na(newQuantity)] = ""
trim <- function(x){
gsub("^[[:space:]]+|[[:space:]]+$", "", x)
}
newUnitName = trim(paste(newQuantity, " ", finalScorecards.df$ORIG_UNIT,
sep = ""))
finalScorecards.df$SERIES_NAME =
gsub("\\(\\)", "", paste(substr(finalScorecards.df$SERIES_NAME, 1,
index), " (", newUnitName, ")",
sep = ""))
## Sort the data and change the column name
sortedFAO.df = arrange(finalScorecards.df,
eval(parse(text = paste0("ORDER_", version))), FAOST_CODE, Year)
## Transform the text for LaTeX
sortedFAO.df[, "SERIES_NAME"] =
sanitizeToLatex(sortedFAO.df[, "SERIES_NAME"], type = "table")
## Rename the columns
colnames(sortedFAO.df)[colnames(sortedFAO.df) %in%
c("FAOST_CODE", "ABBR_FAO_NAME", "SERIES_NAME",
"TOPIC")] =
c("areaCode", "areaName", "indicatorName", "topic")
scorecard.df = sortedFAO.df[, c("areaCode", "areaName", "Year",
"variable", "indicatorName", "topic", "value")]
set.seed(587)
## mySampleCountry = sample(x = unique(scorecard.df$areaCode), size = 10)
mySampleCountry = unique(scorecard.df$areaCode)
scorecard.df$areaName = "World"
scorecardFAO(variable = unique(scorecard.df$variable),
data = scorecard.df[scorecard.df$areaCode %in%
mySampleCountry, ],
file = paste0(path, texFileName),
startYear = 2002, endYear = 2013, baselineYear = 2000,
layout = template)
## Create meta data table
texMetaFileName = paste0(SO, "dashboard", version, "Meta.tex")
pdfMetaFileName = gsub("tex", "pdf", texMetaFileName)
cat("\\documentclass{faoyearbook}\\begin{document}",
file = paste0(path, texMetaFileName), append = FALSE)
metaTable.df = dissemination.df[dissemination.df[,
paste0("SCORECARD_", version)], ]
metaTable.df[, "SERIES_NAME"] =
sanitizeToLatex(metaTable.df[, "SERIES_NAME"], type = "table")
metaTable.df[, "INFO"] =
sanitizeToLatex(metaTable.df[, "INFO"], type = "table")
for(i in 1:NROW(metaTable.df)){
cat("\\begin{metadata}{", metaTable.df[i, "SERIES_NAME"] ,"}{",
metaTable.df[i, "DATA_KEY"], "}", metaTable.df[i, "INFO"],
"\\end{metadata}", file = paste0(path, texMetaFileName), append = TRUE)
}
cat("\\end{document}", file = paste0(path, texMetaFileName),
append = TRUE)
|
6bd1364710327e8e88822e638b3a7b1e83b2c0c9 | 1df1f61b46f89c0af6f1e2f2908f3be04f855df6 | /Taller_Mod2_IntroRcompac.R | 76a2edad28bc79a8fae25dedcc675893316023b3 | [] | no_license | erichr2018/Modulo2_Taller | e41fd486a7ca1ee4d76a29686dbf0e76e23712c8 | 495f221fcc601b9a33c0931b2f748bc65228fab8 | refs/heads/master | 2020-03-29T22:47:24.891467 | 2018-09-28T20:12:35 | 2018-09-28T20:12:35 | 150,440,540 | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 4,505 | r | Taller_Mod2_IntroRcompac.R | ############################################################
# Curso Redes Aplicadas 2018 .... #
############################################################
# Sesion 2: #
###########28 de septiembre 2018############################
############################################################
# Taller introduccion a R #
############################################################
## Objetivo: Familiarizar al alumno con algunas de las funciones
# de R, utiles al momento de crear un grafo de Igraph.
## Las funciones que vamos a aprender en este Modulo son:
# 1. vectores (carateres)
# 2. Listas
# 3. Matrices
# 4. DataFrame
#Vectors
v1 <- c(1, 5, 11, 32)
v3 <- c(TRUE, TRUE, FALSE)
v2 <- c("hola", "mundo")
# paste() : Esta funcion toma un numero arbitrario de argumentos y los concatena
# uno por uno en una cadena de caracteres. Cualesquiera numeros dados
# entre los argumentos son coercionados en un argumento caracter, esto es,
# los argumentos son separados por default en el resultado por un caracter
# de espacio en blanco, pero esto puede ser cambiado por los argumentos
# llamados sep = string, que cambia el espacio en blaco por string,
# posiblemtne vacio.
labs <- paste(c("X","Y"),1:10)
labs
c(labs)
labs2 <- paste(c("X", "Y"), 1:10, collapse = ",")
labs2
labs3 <- paste(c("X","Y"),1:10, sep = ", ")
labs3
labs4 <- paste(c("X","Y"),1:10, sep = "") #quita espacio en blaco
labs4
v1 <- c(1, 5, 11, 32)
v2 <- c("hola", "mundo")
v3 <- c(TRUE, TRUE, FALSE)
v4 <- c(v1, v2, v3, "boo")
v4
v <- 1:7 # lo mismo que c(1,2,3,4,5,6,7)
v <- rep(0, 77) # esto repite el cero 77 veces
v <- rep(1:3, times=2) # repite 1,2,3 dos veces
v <- rep(1:10, each=2) # repite cada elemento dos veces
v <- seq(10,20,2) # secuence: numeros entre 10 y 20, en saltos de 2
v1 <- 1:5 #1,2,3,4,5
v2 <- rep(1,5) #1,1,1,1,1
#Revisa la longitud de un vector
length(v1) #longitud de v1
length(v2) # longitud de v2
#Marices y Arreglos
m <- rep(1,20) # Un vector de 20 elementos, todos 1
m
dim(m) <- c(5,4) # Agrupa las dimesiones del conjunto a 5 X 4 , por lo tanto m es una matriz de 5X4
m # 5(renglones) x 4(columnas) c(5,4) c(renglones, columnas)
# Creando matrices por medio del comando matrix()
m <- matrix(data=1, nrow=5, ncol=4) # lo mismo que la matriz de arriba, 5x4, llena de 1s
m
m <- matrix(1,5,4) # esto hace lo mismo que la matriz de arriba (renglon anterior)
dim(m) # te devuelve renglones columnas 5x4
#Vamos a crear una matriz combinando vectores
m <- cbind(1:5, 5:1, 5:9) # matriz de 5 rengloens x 3 columnas
m
m <- rbind(1:5, 5:1, 5:9) # matriz de 3 renglones x columnas
m
# Seleccionando elementos de la matriz
m[2,3] # renglon 2 columna 3 este es 3
m[2,] # todos los elementos del renglon 2
m[,2] # todos los elementos de la columna 2 2,4,6
m <- matrix(1:10, 10, 10)
m
m[1:2, 4:6] #Submatriz: renglon 1 y 2 y columnas 4,5 y 6
m[-1,] # todos los renglones *excepto* el primero
#Algunas operaciones con matrices
m[1,]==m[,1] # Todos los elemetos del renglon 1 son equivalentes a los correspondientes elemntos de la columna1
m>3 # Una matriz logica: Verdadero para los m elementos > 3, Falso en otro caso:
m[m>3] #Selecciona solo los elementos Verdaderos, es decir, los que son mayores de 3
m
t(m) # traspuesta
m <- t(m) # Asignamos a m la traspuesta de m
m
m %*% t(m) # hace la multiplicacion MATRICIAL
m*m # hace una multiplicacion elemento a elemento
# Los arreglos o array son utilizados cuando tenemos más de dos dimensiones
a <- array(data=1:18,dim=c(3,3,2)) # 3d con dimensiones 3x3x2
a
#El anterior es un vector de 2 entradas donde cada entrada es un matriz de 3x3
a <- array(1:18, c(3,3,2)) # lo mismo de arriba
a
#LISTAS
#Las listas son colecciones de objeos. Una sola lista puede contener todas las clases de elementos
# - cadenas de caracteres, vectores numericos, matrices, y otras listas y más. Los elementos de las listas
# a menudo llamadas para acceso sencillo
v
v2
v3
l1 <- list(boo=v , foo=v2 , moo=v3 , zoo="Animals!") # una lista con cuatro componetes
l1
l2 <- list(v1, v2, v3, "Animals!")
l1
l2
l3 <- list()
l3
l4 <- NULL
|
e9a6aa5556bc0253edb921425311deffa3cad0ef | c6ffe1c30ab6f0d2d055c1dbf69a7c4d7fb5876e | /clean_data/clean_hotels/CleanDataHotel.R | 7e7294b00114e6a79a7650d119845565a74d9120 | [] | no_license | STAT133-Summer2016/finalproject-airbnb-team | eda415e5abe139520225263cbab4e431177257d2 | c91db49a87e1d554221081ddcb75415c57652472 | refs/heads/master | 2021-01-17T17:46:47.436130 | 2016-08-10T00:12:10 | 2016-08-10T00:12:10 | 63,273,451 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,788 | r | CleanDataHotel.R | ## San Francisco
sf_dbl = read_csv("sf_dbl.csv")
sf_dbl <- sf_dbl %>%
mutate(Room.Type ="Private room") %>%
mutate(City = "San Francisco") %>%
mutate(Bed = 2)
colnames(sf_dbl)[6] <- "Bed(s)"
sf_sgl <- read_csv("sf_sgl.csv")
sf_sgl <- sf_sgl %>%
mutate(Room.Type ="Private room") %>%
mutate(City = "San Francisco") %>%
mutate(Bed = 1)
colnames(sf_sgl)[6] <- "Bed(s)"
sf_final <- rbind(sf_sgl,sf_dbl)
## NYC
ny_dbl <- read_csv("ny_dbl.csv")
ny_dbl <- ny_dbl %>%
mutate(Room.Type ="Private room") %>%
mutate(City = "New York") %>%
mutate(Bed = 2)
colnames(ny_dbl)[6] <- "Bed(s)"
ny_sgl <- read_csv("ny_sgl.csv")
ny_sgl <- ny_sgl %>%
mutate(Room.Type ="Private room") %>%
mutate(City = "New York") %>%
mutate(Bed = 1)
colnames(ny_sgl)[6] <- "Bed(s)"
ny_final <- rbind(ny_sgl,ny_dbl)
## Los Angeles
la_dbl <- read_csv("la_dbl.csv")
la_dbl <- la_dbl %>%
mutate(Room.Type ="Private room") %>%
mutate(City = "Los Angeles") %>%
mutate(Bed = 2)
colnames(la_dbl)[6] <- "Bed(s)"
la_sgl <- read_csv("la_sgl.csv")
la_sgl <- la_sgl %>%
mutate(Room.Type ="Private room") %>%
mutate(City = "Los Angeles") %>%
mutate(Bed = 1)
colnames(la_sgl)[6] <- "Bed(s)"
la_final <- rbind(la_sgl,la_dbl)
## Chicago
ch_dbl <- read_csv("ch_dbl.csv")
ch_dbl <- ch_dbl %>%
mutate(Room.Type ="Private room") %>%
mutate(City = "Chicago") %>%
mutate(Bed = 2)
colnames(ch_dbl)[6] <- "Bed(s)"
ch_sgl <- read_csv("ch_sgl.csv")
ch_sgl <- ch_sgl %>%
mutate(Room.Type ="Private room") %>%
mutate(City = "Chicago") %>%
mutate(Bed = 1)
colnames(ch_sgl)[6] <- "Bed(s)"
ch_final <- rbind(ch_sgl,ch_dbl)
#aggregate data frame for all four cities
cleaned_four_hotels = rbind(la_final, ch_final, ny_final, sf_final)
write_csv(cleaned_four_hotels, "cleaned_four_hotels.csv")
|
3c83808cff7c9d5a51a46a4297efd06357208d4e | 7a16dd81e0267984acc6df921eaa135466fd7640 | /codes/countclust_ebony_nd_bl.R | 0aa84e9cc8d89ce69e0f98bcb29b07e9aa74d654 | [] | no_license | kkdey/Black_magazines | f21d9ae4e6ee9960069d7a54906ddba4fd6b463a | 66d0c0bbe134b03b7531fa60fc788010b9e15946 | refs/heads/master | 2020-03-10T04:45:18.249343 | 2018-08-02T04:52:00 | 2018-08-02T04:52:00 | 129,200,350 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,268 | r | countclust_ebony_nd_bl.R |
########### CountClust on Ebony + Black World + Negro Digest ##################
tab_ebony <- get(load("../output/table_word_frequencies_ebony.rda"))
tab_bl_nd <- get(load("../output/table_word_frequencies.rda"))
all_words_ebony <- get(load("../output/all_words_ebony.rda"))
all_words_bl_nd <- get(load("../output/all_words.rda"))
common_words <- intersect(all_words_ebony, all_words_bl_nd)
tab_pooled <- rbind(tab_ebony[,match(common_words, all_words_ebony)],
tab_bl_nd[,match(common_words, all_words_bl_nd)])
rownames(tab_pooled) <- c(paste0("Ebony_", 1961:1976),
paste0("ND_", 1961:1970), paste0("BL_", 1970:1976))
one_occur_words <- apply(tab_pooled, 2, function(x) return(sum(x[x!=0])))
tab2_pooled <- tab_pooled[, which(one_occur_words > 2)]
tab2_pooled <- tab2_pooled[, which(nchar(colnames(tab2_pooled)) > 2)]
topic_clus <- maptpx::topics(tab2_pooled, K=2, tol = 1)
save(topic_clus, file = "../output/CountClust_k_2_Ebony_BL_ND_cutoff_2.rda")
topic_clus <- maptpx::topics(tab2_pooled, K=3, tol = 1)
save(topic_clus, file = "../output/CountClust_k_3_Ebony_BL_ND_cutoff_2.rda")
topic_clus <- maptpx::topics(tab2_pooled, K=4, tol = 1)
save(topic_clus, file = "../output/CountClust_k_4_Ebony_BL_ND_cutoff_2.rda")
########################## K = 2 #####################################
library(CountClust)
topic_clus <- get(load("../output/CountClust_k_2_Ebony_BL_ND_cutoff_2.rda"))
omega <- topic_clus$omega
annotation <- data.frame(
sample_id = paste0("X", c(1:NROW(omega))),
tissue_label = factor(rownames(omega),
levels = rownames(omega)))
rownames(omega) <- annotation$sample_id;
StructureGGplot(omega = omega,
annotation = annotation,
palette = RColorBrewer::brewer.pal(8, "Accent"),
yaxis_label = "Years of Publication",
order_sample = TRUE,
axis_tick = list(axis_ticks_length = .1,
axis_ticks_lwd_y = .1,
axis_ticks_lwd_x = .1,
axis_label_size = 7,
axis_label_face = "bold"))
out <- ExtractTopFeatures(topic_clus$theta, top_features = 50, method = "poisson", options = "min")
driving_words <- apply(out$indices, c(1,2), function(x) return(rownames(topic_clus$theta)[x]))
###################### only Ebony articles #####################################
tab_pooled_1 <- tab_pooled[1:16,]
one_occur_words_1 <- apply(tab_pooled_1, 2, function(x) return(sum(x[x!=0])))
tab2_pooled_1 <- tab_pooled_1[, which(one_occur_words_1 > 2)]
tab2_pooled_1 <- tab2_pooled_1[, which(nchar(colnames(tab2_pooled_1)) > 2)]
tab2_pooled_1 <- tab2_pooled_1[, which(colnames(tab2_pooled_1) != "tlie")]
topic_clus <- maptpx::topics(tab2_pooled_1, K=2, tol = 1)
save(topic_clus, file = "../output/CountClust_k_2_Ebony_cutoff_2.rda")
topic_clus <- maptpx::topics(tab2_pooled_1, K=3, tol = 1)
save(topic_clus, file = "../output/CountClust_k_3_Ebony_cutoff_2.rda")
topic_clus <- maptpx::topics(tab2_pooled_1, K=4, tol = 1)
save(topic_clus, file = "../output/CountClust_k_4_Ebony_cutoff_2.rda")
#################### K = 2 Ebony ############################
topic_clus <- get(load("../output/CountClust_k_2_Ebony_cutoff_2.rda"))
omega <- topic_clus$omega
annotation <- data.frame(
sample_id = paste0("X", c(1:NROW(omega))),
tissue_label = factor(rownames(omega),
levels = rownames(omega)))
rownames(omega) <- annotation$sample_id;
StructureGGplot(omega = omega,
annotation = annotation,
palette = RColorBrewer::brewer.pal(8, "Accent"),
yaxis_label = "Years of Publication",
order_sample = TRUE,
axis_tick = list(axis_ticks_length = .1,
axis_ticks_lwd_y = .1,
axis_ticks_lwd_x = .1,
axis_label_size = 7,
axis_label_face = "bold"))
out <- ExtractTopFeatures(topic_clus$theta, top_features = 50, method = "poisson", options = "min")
driving_words <- apply(out$indices, c(1,2), function(x) return(rownames(topic_clus$theta)[x]))
################# Remove black, blacks, negro, negroes ##############
negro_words <- grep("negro", colnames(tab_pooled))
black_words <- match(c("black", "blacks", "blackman"), colnames(tab_pooled))
tab_pooled_filtered <- tab_pooled[, - union(negro_words, black_words)]
one_occur_words_2 <- apply(tab_pooled_filtered, 2, function(x) return(sum(x[x!=0])))
tab2_pooled_2 <- tab_pooled_filtered[, which(one_occur_words_2 > 2)]
tab2_pooled_2 <- tab2_pooled_2[, which(nchar(colnames(tab2_pooled_2)) > 2)]
topic_clus <- maptpx::topics(tab2_pooled_2, K=2, tol = 1)
save(topic_clus, file = "../output/CountClust_wo_negro_black_k_2_Ebony_BL_ND_cutoff_2.rda")
topic_clus <- maptpx::topics(tab2_pooled_2, K=3, tol = 1)
save(topic_clus, file = "../output/CountClust_wo_negro_black_k_3_Ebony_BL_ND_cutoff_2.rda")
topic_clus <- maptpx::topics(tab2_pooled_2, K=4, tol = 1)
save(topic_clus, file = "../output/CountClust_wo_negro_black_k_4_Ebony_BL_ND_cutoff_2.rda")
#################### K = 2 filtered ############################
topic_clus <- get(load("../output/CountClust_wo_negro_black_k_4_Ebony_BL_ND_cutoff_2.rda"))
omega <- topic_clus$omega
annotation <- data.frame(
sample_id = paste0("X", c(1:NROW(omega))),
tissue_label = factor(rownames(omega),
levels = rownames(omega)))
rownames(omega) <- annotation$sample_id;
StructureGGplot(omega = omega,
annotation = annotation,
palette = RColorBrewer::brewer.pal(8, "Accent"),
yaxis_label = "Years of Publication",
order_sample = TRUE,
axis_tick = list(axis_ticks_length = .1,
axis_ticks_lwd_y = .1,
axis_ticks_lwd_x = .1,
axis_label_size = 7,
axis_label_face = "bold"))
out <- ExtractTopFeatures(topic_clus$theta, top_features = 50, method = "poisson", options = "min")
driving_words <- apply(out$indices, c(1,2), function(x) return(rownames(topic_clus$theta)[x]))
############## only Ebony with filtered black and negro ####################
tab_pooled_3 <- tab_pooled_filtered[1:16, which(nchar(colnames(tab_pooled_filtered)) > 2)]
one_occur_words_3 <- apply(tab_pooled_3, 2, function(x) return(sum(x[x!=0])))
tab2_pooled_3 <- tab_pooled_3[, which(one_occur_words_3 > 2)]
tab2_pooled_3 <- tab2_pooled_3[, which(colnames(tab2_pooled_3) != "tlie")]
topic_clus <- maptpx::topics(tab2_pooled_3, K=2, tol = 1)
save(topic_clus, file = "../output/CountClust_wo_negro_black_k_2_Ebony_cutoff_2.rda")
topic_clus <- maptpx::topics(tab2_pooled_3, K=3, tol = 1)
save(topic_clus, file = "../output/CountClust_wo_negro_black_k_3_Ebony_cutoff_2.rda")
topic_clus <- maptpx::topics(tab2_pooled_3, K=4, tol = 1)
save(topic_clus, file = "../output/CountClust_wo_negro_black_k_4_Ebony_cutoff_2.rda")
#################### K = 2 filtered Ebony ############################
topic_clus <- get(load("../output/CountClust_wo_negro_black_k_2_Ebony_cutoff_2.rda"))
omega <- topic_clus$omega
annotation <- data.frame(
sample_id = paste0("X", c(1:NROW(omega))),
tissue_label = factor(rownames(omega),
levels = rownames(omega)))
rownames(omega) <- annotation$sample_id;
StructureGGplot(omega = omega,
annotation = annotation,
palette = RColorBrewer::brewer.pal(8, "Accent"),
yaxis_label = "Years of Publication",
order_sample = TRUE,
axis_tick = list(axis_ticks_length = .1,
axis_ticks_lwd_y = .1,
axis_ticks_lwd_x = .1,
axis_label_size = 7,
axis_label_face = "bold"))
out <- ExtractTopFeatures(topic_clus$theta, top_features = 50, method = "poisson", options = "min")
driving_words <- apply(out$indices, c(1,2), function(x) return(rownames(topic_clus$theta)[x]))
|
98efecc56d2bee68e7a9c45b928730e493eaca94 | ffd8d6c91609c708439aa6fe49d24325db01a264 | /plot2.R | 96ce96294cdde3877ac8deed67aa2191ead071e2 | [] | no_license | ebloo/ExplDataAnalysisPrj1 | e5df590127e2a7ff62ec01e0c4f5c8a5c56889f8 | 0f9c8fc586ceeb2bef78d59f67f7004a934dc970 | refs/heads/master | 2021-01-10T12:25:23.167910 | 2015-11-09T00:04:03 | 2015-11-09T00:04:03 | 45,804,489 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 595 | r | plot2.R | ## setwd("//Msad/root/NA/NY/users/desevan/My Documents/admin/datasciencecoursera/exploratoryanalysis/prj1")
# read in the data
data <- read.table("household_power_consumption.txt",sep=";",header=TRUE)
summary(data)
# take a subset
data <- subset(data, Date %in% c("1/2/2007","2/2/2007"))
# extract date/time
dt <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# plot
plot(dt, data$Global_active_power, type="l", col="black", xlab="", ylab="Global Active Power (kilowatts)")
# copy plot to a PNG file
dev.copy(png, file = "plot2.png", width=480, height=480)
dev.off()
|
fd107d72a38f5a6a3db4eebe7e049f1bee761f9d | de8eac546dd1a7c3fb269735bf2283ea7b221cb4 | /r/compare_idw.R | ae99765e7535f885c0cec85e6a5f7c22e3f43bb1 | [] | no_license | nronnei/Geo866-Final | 37e1ca409ebc3f8711e6bff83d8a8afe9bdb2341 | 48460b6daff031450c6554ddfde7d8b92e430f7e | refs/heads/master | 2020-06-16T13:10:18.505413 | 2017-07-07T17:18:48 | 2017-07-07T17:18:48 | 75,098,903 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 489 | r | compare_idw.R | setwd("/home/nronnei/gis/class/spatial_analysis/final_project/")
dc.r <- read.csv("./data/points/study_points_R.csv", header = T, sep = ",")
dc.p <- read.csv("./data/points/study_points_Python.csv", header = T, sep = ",")
com.x <- dc.r$x[dc.r$x == dc.p$x]
com.y <- dc.r$y[dc.r$y == dc.p$y]
com.dc <- dc.r$dc[dc.r$dc == dc.p$dc]
hist(dc.p$dc)
hist(dc.r$dc)
# Doesn't look great. Values are close, but not identical.
# Shape of the distributions is similar as well, but still not great. |
c062d259b418cebed9fbdfeb575d9f8b46e730e9 | 877459dbe42dcf958f12bce9a58c2312eb30fbb9 | /src/ConnectivityScores.R | cf7e5be385d2f6601d0675081049084a69e4eaae | [] | no_license | rhodos/cfdrug | f51b9dda38543836e4820df2c204623629fe5e46 | e70c56f3f205690dbbdcac42ab7b8dcd096207bc | refs/heads/main | 2023-03-25T23:20:46.413982 | 2021-03-08T12:59:27 | 2021-03-08T12:59:27 | 345,197,619 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,669 | r | ConnectivityScores.R |
library(foreach)
LINCS_CS_parallel = function(querySig, signatureBlockFiles, sigFilters=NULL, nPerm=50,
show_progress=TRUE, annotFilter=TRUE, debug=FALSE, csMetric='ks'){
if(debug){signatureBlockFiles = signatureBlockFiles[1]}
### compute background distribution of scores
cat('simulating null model...\n')
RL = loadLincsBlock(signatureBlockFiles[1], sigFilters=sigFilters, debug=debug)$RL
if(csMetric == 'ks'){
bgDist = est_emp_Cs(querySig, nPerm, RANKED_LISTS=RL, show_progress=show_progress)
}else if(csMetric == 'xsum'){
load(DataDir('lincs_mean_zscores.RData'))
drugData = cbind(zMean, zMean)
rownames(drugData) = RL[,1]
bgDist = est_emp_XSum(querySig, nPerm, drugData)
}
cat('done!\n')
###iterate over blocks of 10k signatures
warning('change this to dorng, and check')
res_i = foreach(blockFile=signatureBlockFiles) %dopar% {
CS_LINCS(querySig, blockFile = blockFile, bgDist=bgDist, csMetric=csMetric, debug=debug,
sigFilters=sigFilters, nPerm=nPerm, show_progress=show_progress)
}
dfList = lapply(res_i, function(x) assemble_df(x))
combRes = ldply(dfList, rbind)
resDat = combRes[order(combRes$score,decreasing=FALSE),]
if(annotFilter){
load(DataDir('lincs_compound_metadata.RData'))
resDat = resDat[resDat$pert_id %in% lincsAnnot$pert_id,]
}
resDat_pert_type = split(resDat, f = resDat$pert_type)
for(type_i in 1:length(resDat_pert_type)){
resDat_pert_type[[type_i]]$adjPvalue = p.adjust(p = resDat_pert_type[[type_i]]$pvalue, method = "BH")
}
result = lapply(resDat_pert_type, function(x) collapsePerturbagens(x))
return(result)
}
CS_LINCS = function(querySig, blockFile, bgDist=NULL, csMetric='ks', sigFilters=NULL, debug = FALSE,
nPerm=NULL, show_progress=TRUE){
out = loadLincsBlock(blockFile, sigFilters=sigFilters, debug=debug)
if(ncol(out$sigMat) == 0){ #check for matrix width (ie some non -666 sigs)
res = vector("list")
}else if(ncol(out$sigMat) > 0){
cat('computing connectivity scores\n')
ns = ncol(out$RL)
if(show_progress){
pb = txtProgressBar(min=1,max=ns,style=3)
}
if(csMetric == 'xsum'){
load(DataDir('lincs_mean_zscores.RData'))
drugDataVec = rev(zMean)
mu = mean(bgDist)
stdev = sd(bgDist)
}
CS = rep(NA,ns)
Pvals = rep(NA,ns)
for (i in 1:ns){
if(csMetric == 'ks'){
CS[i]<-cMap_CS(out$RL[,i],querySig)
Pvals[i] = pnormmix(CS[i], bgDist)
}else if(csMetric == 'xsum'){
names(drugDataVec) = out$RL[,i]
CS[i] = XSum_CS(drugDataVec, querySig)
}
if(show_progress){
setTxtProgressBar(pb, i)
}
}
if(show_progress){
Sys.sleep(1)
close(pb)
}
if(csMetric == 'ks'){
NCS<-rep(NA,length(CS))
NCS[CS>=0] = CS[CS>=0]/max(bgDist$mu)
NCS[CS<0] = -CS[CS<0]/min(bgDist$mu)
}else if(csMetric == 'xsum'){
NCS = (CS - mu) / stdev
Pvals = 2*(1-pnorm(abs(NCS)))
}
names(CS) = colnames(out$RL)
names(Pvals) = colnames(out$RL)
names(NCS) = colnames(out$RL)
res<-list(CS=CS, Pval=Pvals, NCS=NCS, annotations=out$annot)
}
return(res)
}
collapsePerturbagens = function(filteredRes){
# Summarize results for same perturbagen
require(plyr)
filteredRes = Factor2Char(filteredRes[!is.na(filteredRes$pert_id),])
meanScoreTable = ddply(filteredRes, .(pert_id),
plyr::summarize, pert_iname=paste(unique(pert_iname), collapse="|"),
nPert = length(score),
cellContributors = paste(sort(cell), collapse = "|"),
signedLogP = -sign(mean(normScore))*log10(pcomb(pvalue)),
score = mean(score),
normScore = mean(normScore),
pvalue = pcomb(pvalue),
.parallel = TRUE)
meanScoreTable = meanScoreTable[order(meanScoreTable$pvalue,decreasing = FALSE),]
meanScoreTable$adjPvalue = p.adjust(p = meanScoreTable$pvalue, method = "BH")
return(meanScoreTable)
}
### Stouffers method for combining p-values
erf = function(x) 2 * pnorm(2 * x/ sqrt(2)) - 1
erfinv = function(x) qnorm( (x+1)/2 ) / sqrt(2)
pcomb = function(p) (1-erf(sum(sqrt(2) * erfinv(1-2*p))/sqrt(2*length(p))))/2
pertMfc2RegularId = function(x){
if(is.null(x)){
out = NA
}else{
out = paste(unlist(strsplit(x,split='-'))[1:2], collapse='-')
}
return(out)
}
loadLincsBlock = function(blockFile, sigFilters, debug=FALSE){
cat(sprintf('loading file %s\n', blockFile))
load(file = blockFile)
#align sig info with matrix ie we have some sig info, with -666 annotations and no data
allSigs = allSigs[match(colnames(sigMat), names(allSigs))]
if(ncol(sigMat) > 0){
if(sigFilters$onlyGold){
gold_status = sapply(allSigs, function(x) x$is_gold)
allSigs = allSigs[which(gold_status)]
sigMat = sigMat[,which(gold_status)]
}
if(sigFilters$onlyChemPert){
pert_type = sapply(allSigs, function(x) x$pert_type)
allSigs = allSigs[which(pert_type == 'trt_cp')]
sigMat = sigMat[,which(pert_type == 'trt_cp')]
}
nFilter = sigFilters$debugNumber
if(debug && ncol(sigMat) > nFilter){
allSigs = allSigs[1:nFilter]
sigMat = sigMat[,1:nFilter]
}
if(ncol(sigMat) > 0){
geneOrder = rownames(sigMat)
RANKED_LISTS = sapply(1:ncol(sigMat), function(x)
rownames(sigMat)[order(sigMat[,x], decreasing = TRUE)])
colnames(RANKED_LISTS) = colnames(sigMat)
annot = data.frame(t(sapply(allSigs, function(x) c(pertMfc2RegularId(
x$pert_mfc_id), x$pert_iname, x$pert_idose, x$pert_time, x$cell_id, x$pert_type, x$distil_ss))))
colnames(annot) = c("pert_id", "pert_iname", "dose", "time", "cell", "pert_type", "distil_ss")
}else{
RANKED_LISTS = NULL
annot = NULL
}
}else{
# yes, this is intentional. There are two ways that sigMat can be empty-
# either there is no data to start with, or there is no data after filtering
RANKED_LISTS = NULL
annot = NULL
}
return(list(RL=RANKED_LISTS, allSigs=allSigs, sigMat=sigMat, annot=annot))
}
assemble_df = function(resList){
resDat = data.frame(resList$annotations, signature_id = names(resList$CS),
score = resList$CS, normScore = resList$NCS, pvalue = resList$Pval, row.names = NULL)
}
############################################################################################################
### X sum, as per Cheng, Agarwal et al 2014
############################################################################################################
XSum_allCmap = function(querySig, nPerm, numDrugGenes=500, data='FC', takeAbs=FALSE, debug=FALSE, debugN=2, database='cmap'){
if(database=='cmap'){
warning('should remove other two cell types from FC matrices')
if(data == 'FC'){
# load('data/cmap/FC1309.RData')
# drugData = FC1309
load(DataDir('cmap/cell_specific_FC_cmap.RData'))
drugData = meanFC
}else if(data == 'qFC'){
load(DataDir('cmap/cell_specific_qFC_cmap.RData'))
drugData = FC
}
drugDataVec = drugData[,1]
}else if(database=='lincs'){
load(DataDir('lincs_mean_zscores.RData'))
if(debug && debugN<=5){
load(DataDir('PRL_entrez_lincs_cell_specific_debug.RData'))
}else{
load(DataDir('PRL_entrez_lincs_cell_specific_plusCF.RData'))
}
drugData = PRL_entrez
drugDataVec = rev(zMean) # reversing so that the first entry of drugDataVec is the largest z-score,
# since PRL_entrez rank 1 corresponds to most up-regulated gene
names(drugDataVec) = as.character(PRL_entrez[,1])
}else{
stop('unexpected database type')
}
if(debug){
drugData = drugData[,1:debugN]
}
cat('estimating empirical distribution for XSum...\n')
print(system.time(CS_EMP <- est_emp_XSum(querySig, nPerm, drugDataVec=drugDataVec, numDrugGenes=numDrugGenes, takeAbs=takeAbs)))
cat('...done!\n')
mu = mean(CS_EMP)
stdev = sd(CS_EMP)
cat('computing connectivity scores...\n')
warning('change this to dorng, and check')
CS = foreach(i=1:ncol(drugData)) %dopar%{
if(database == 'cmap'){
drugDataVec = drugData[,i]
}else{
names(drugDataVec) = as.character(PRL_entrez[,i])
}
XSum_CS(drugDataVec, querySig, numDrugGenes=numDrugGenes, takeAbs=takeAbs)
}
CS = unlist(CS)
names(CS) = colnames(drugData)
cat('...done!\n')
cat('computing pvalues...\n')
NCS = (CS - mu) / stdev
Pvals = 2*(1-pnorm(abs(NCS)))
cat('...done!\n')
return(list(CS=CS,Pval=Pvals,adjP=stats::p.adjust(Pvals,method='fdr'),NCS=NCS))#, mu=mu, stdev=stdev, CS_EMP=CS_EMP))
}
# drugDataVec should be a named vector with names corresponding to entrez gene
# IDs and values either logFC or z-score
#
# querySig should be a list with at least one element named UP (containing a
# vector of entrez ids) and possibly another list called DOWN, also containing a
# vector of entrez ids
#
# not sure if takeAbs does anything useful. I made this up..
XSum_CS = function(drugDataVec, querySig, numDrugGenes=500, takeAbs=FALSE){
if(takeAbs){
drugDataVec = abs(drugDataVec)
}
minRankUp = length(drugDataVec) - numDrugGenes + 1
minRankDown = numDrugGenes
geneRanks = rank(drugDataVec)
idxUp = which(geneRanks >= minRankUp)
idxDown = which(geneRanks <= minRankDown)
changedByCompound = names(drugDataVec)[c(idxUp, idxDown)]
XUP = intersect(querySig$UP, changedByCompound)
XDOWN = intersect(querySig$DOWN, changedByCompound)
if(length(XUP) > 0){
XSum_up = sum(drugDataVec[XUP])
}else{
XSum_up = 0
}
if(length(XDOWN) > 0){
XSum_down = sum(drugDataVec[XDOWN])
}else{
XSum_down = 0
}
if(takeAbs){
XSum = XSum_up + XSum_down
}else{
XSum = XSum_up - XSum_down
}
return(XSum)
}
est_emp_XSum = function(querySig, nPerm, drugDataVec, numDrugGenes=500, takeAbs=FALSE){
set.seed(42)
names = names(drugDataVec)
ng = length(drugDataVec)
warning('change this to dorng, and check')
EMP_CS = foreach(i = 1:nPerm) %dopar% {
names(drugDataVec) = names[sample(1:ng, ng)]
XSum_CS(drugDataVec, querySig, numDrugGenes=numDrugGenes, takeAbs=takeAbs)
}
return(unlist(EMP_CS))
}
qES = function(RANKEDLIST,REGULON,display=FALSE,returnRS=FALSE){
REGULON<-intersect(as.character(REGULON),RANKEDLIST)
HITS<-is.element(RANKEDLIST,REGULON)+0
hitCases<-cumsum(HITS)
missCases<-cumsum(1-HITS)
N<-length(RANKEDLIST)
NR<-length(REGULON)
Phit<-hitCases/NR
Pmiss<-missCases/(N-NR)
m<-max(abs(Phit-Pmiss))
t<-which(abs(Phit-Pmiss)==m)
if (length(t)>1){t<-t[1]}
peak<-t
ES<-Phit[t]-Pmiss[t]
RS<-Phit-Pmiss
if (display){
if (ES>=0){c<-"red"}else{c<-"green"}
plot(0:N,c(0,Phit-Pmiss),col=c,type="l",xlim=c(0,N),ylim=c(-(abs(ES)+0.5*(abs(ES))),abs(ES)+0.5*(abs(ES))),xaxs="i",bty="l",axes=FALSE,
xlab="Gene Rank Position",ylab="Running Sum")
par(new=TRUE)
plot(0:N,rep(0,N+1),col='gray',type="l",new=FALSE,xlab="",ylab="",ylim=c(-(abs(ES)+0.5*(abs(ES))),abs(ES)+0.5*(abs(ES))))
axis(side=2)
}
if (returnRS){
POSITIONS<-which(HITS==1)
names(POSITIONS)<-RANKEDLIST[which(HITS==1)]
POSITIONS<-POSITIONS[order(names(POSITIONS))]
names(POSITIONS)<-names(POSITIONS)[order(names(POSITIONS))]
return(list(ES=ES,RS=RS,POSITIONS=POSITIONS,PEAK=t))
} else {return(ES)}
}
pnormmix = function(x,mixture) {
lambda = mixture$lambda
k = length(lambda)
pnorm.from.mix = function(x,component) {
if (x>=0){
lambda[component]*pnorm(-x,mean=-mixture$mu[component],sd=mixture$sigma[component],lower.tail=TRUE)
}else {
lambda[component]*pnorm(x,mean=mixture$mu[component],sd=mixture$sigma[component],lower.tail=TRUE)
}
}
pnorms = sapply(1:k, function(i) pnorm.from.mix(x, i))
return(sum(pnorms))
}
computeCsOnCmap = function(querySig, nPerm, csMetric='ks', numDrugGenes=500, debug=FALSE, debugN=NULL, database='cmap'){
if(csMetric == 'ks'){
csList = CS_entrez(querySig, nPerm, debug=debug, debugN=debugN, database=database)
}else if(csMetric == 'xsum'){
csList = XSum_allCmap(querySig, nPerm, numDrugGenes=numDrugGenes, data='qFC',
takeAbs=FALSE, debug=debug, debugN=debugN, database=database)
}else{
stop('unexpected csMetric, not computing anything')
}
out = data.frame(compound = names(csList$CS), score = csList$CS,
normScore = csList$NCS, pvalue = csList$Pval,
adjP = csList$adjP)
return(out)
}
CS_entrez = function(querySig, nPerm, debug=FALSE, debugN=NULL, csMetric='ks', numDrugGenes=500, database='cmap'){
if(database=='cmap'){
if(debug){
load(DataDir('PRL_entrez_cmap_cell_specific_debug.RData'))
}else{
cat('loading CMAP PRL file...\n')
load(DataDir('PRL_entrez_cmap_cell_specific.RData'))
cat('...done!\n')
}
}else if(database=='lincs'){
if(debug){
load(DataDir('PRL_entrez_lincs_cell_specific_debug.RData'))
}else{
cat('loading LINCS PRL file...\n')
print(system.time(load(DataDir('PRL_entrez_lincs_cell_specific_plusCF.RData'))))
}
}
if(debug){
PRL_entrez = PRL_entrez[,1:debugN]
}
RANKED_LISTS = PRL_entrez
cat('simulating null model\n')
mixmdl<-est_emp_Cs(querySig, nPerm, RANKED_LISTS)
cat('done!\n')
cat('computing connectivity scores\n')
ns<-ncol(RANKED_LISTS)
CS<-rep(NA,ns)
Pvals<-rep(NA,ns)
warning('change this to dorng, and check')
CS = foreach(i=1:ns) %dopar% {
cMap_CS(RANKED_LISTS[,i],querySig)
}
CS = unlist(CS)
warning('change this to dorng, and check')
Pvals = foreach(i=1:ns) %dopar%{
pnormmix(CS[i], mixmdl)
}
Pvals = unlist(Pvals)
names(CS)<-colnames(RANKED_LISTS)
names(Pvals)<-colnames(RANKED_LISTS)
NCS<-rep(NA,length(CS))
NCS[CS>=0]<-CS[CS>=0]/max(mixmdl$mu)
NCS[CS<0]<--CS[CS<0]/min(mixmdl$mu)
names(NCS)<-names(CS)
res<-list(CS=CS,Pval=Pvals,adjP=stats::p.adjust(Pvals,method='fdr'),NCS=NCS)
return(res)
}
# This differs from CS_entrez original function in that you input your own drug signatures
CS2 = function(querySig, drugSigs, nPerm){
# Reformat drugSigs into RANKED_LISTS matrix
RANKED_LISTS = array(data='', dim=c(length(drugSigs[[1]]), length(drugSigs)))
for(i in 1:length(drugSigs)){
RANKED_LISTS[,i] = names(sort(-drugSigs[[i]]))
}
mixmdl<-est_emp_Cs(querySig, nPerm, RANKED_LISTS)
ns<-ncol(RANKED_LISTS)
CS<-rep(NA,ns)
Pvals<-rep(NA,ns)
CS = foreach(i=1:ns) %dopar% {
cMap_CS(RANKED_LISTS[,i],querySig)
}
CS = unlist(CS)
Pvals = foreach(i=1:ns) %dopar%{
pnormmix(CS[i], mixmdl)
}
Pvals = unlist(Pvals)
names(CS)<-colnames(RANKED_LISTS)
names(Pvals)<-colnames(RANKED_LISTS)
NCS<-rep(NA,length(CS))
NCS[CS>=0]<-CS[CS>=0]/max(mixmdl$mu)
NCS[CS<0]<--CS[CS<0]/min(mixmdl$mu)
names(NCS)<-names(CS)
res<-list(CS=CS,Pval=Pvals,adjP=stats::p.adjust(Pvals,method='fdr'),NCS=NCS)
return(res)
}
cMap_CS = function(ranked_list, querySig, returnRS=FALSE){
ESUP = qES(ranked_list,querySig$UP,display=FALSE,returnRS=returnRS)
twoSided = !is.null(querySig$DOWN)
if(twoSided){
ESDOWN = qES(ranked_list,querySig$DOWN,display=FALSE,returnRS=returnRS)
}
if (returnRS){
RSUP<-ESUP$RS
ESUP<-ESUP$ES
if(twoSided){
RSDOWN<-ESDOWN$RS
ESDOWN<-ESDOWN$ES
}
}
if(!twoSided){
TES = ESUP
}else{
TES<-(ESUP-ESDOWN)/2
}
if(returnRS){
out = list(TES=TES, ESUP=ESUP, ESDOWN=ESDOWN, RSUP=RSUP, RSDOWN=RSDOWN)
}else{
out = TES
}
return(out)
}
est_emp_Cs = function(querySig,nt,RANKED_LISTS,show_progress=TRUE){
set.seed(42)
require(mixtools)
EMP_CS = rep(NA,nt)
ng = nrow(RANKED_LISTS)
if (show_progress){
pb = txtProgressBar(min=1,max=nt,style=3)
}
warning('change this to dorng, and check')
EMP_CS = foreach(i = 1:nt) %dopar%{
cMap_CS(RANKED_LISTS[sample(1:ng,ng),1],querySig)
}
EMP_CS = unlist(EMP_CS)
if(show_progress){
Sys.sleep(1)
close(pb)
}
if(is.null(querySig$DOWN)){
mixmdl = normalmixEM(EMP_CS,k=2,verb=FALSE) # one-sided
}else{
mixmdl = normalmixEM(EMP_CS,k=3,verb=FALSE) # two-sided
}
return(mixmdl)
}
|
608c40d7cfc377e42908f5cca29e6b69a130b15a | 49f0605bea382482a028e543cac6e72e3c0335c7 | /man/nvdescstat.Rd | 1035902e92ab4b381469fe9e2262358803f2fe04 | [] | no_license | cran/cbanalysis | 2993af56c9b1b6809842132e43398207bad61940 | ce00fca11d555c4c251bb44908e0c32f2d33984d | refs/heads/master | 2021-01-19T13:44:33.718634 | 2017-09-04T17:43:49 | 2017-09-04T17:43:49 | 88,104,628 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,556 | rd | nvdescstat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nvdescstat.R
\name{nvdescstat}
\alias{nvdescstat}
\title{Numerical Variable Descriptive Statistics returns a data frame containing descriptive statistics for numerical variable.}
\usage{
nvdescstat(df)
}
\arguments{
\item{df}{- Input Data frame.}
}
\value{
Returns a data frame containing descriptive statistics for numerical variable.
}
\description{
Returns a data frame containing following descriptive statistics for numerical variables in a data frame.
% Missing Value -Percent of missing values in the variable.
Min Value -Minimum value of the variable.
Max Value -Maximum value of the variable.
Mean -Mean of the variable.
Median -Median of the variable.
Variance -Variance of the variable.
Standard Deviation -Standard Deviation of the variable.
Lower Outlier Cutoff -This helps to detect Lower outliers.
Upper Outlier Cutoff -This helps to detect Upper outliers.
}
\examples{
numv1<-c(8000,200,323)
numv2<-c(400,533,633)
numv3<-c(100,534,734)
numv4<-c(1,25,34)
chrv6<-c("a","b","c")
numv5<-c(50,10000,34000)
chrv7<-as.factor(c("male","female","unknown"))
numv6<-c(NA,300,340)
df<-data.frame(numv1,numv2,chrv6,numv3,numv4,numv5,chrv7,numv6)
nvdescstatdf<-nvdescstat(df)
}
\author{
"Sandip Kumar Gupta", "sandip_nitj@yahoo.co.in"
}
|
ae5cb8b3efabff62005e242ca5c8a43d85bf46ed | 3f7bfee1072108a851a0c5c22dd632436327737a | /man/current.market.Rd | 58f734ae08cb3a3d4b566556496df576d3636f79 | [] | no_license | arturochian/maRketSim | 94e08c3d5adbeb00a67061b9d278dbae2df47b31 | 5e6ff01aff467d3d163390318e4471d170f8a9b8 | refs/heads/master | 2021-01-15T22:18:37.551423 | 2014-01-26T16:51:13 | 2014-01-26T16:51:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 554 | rd | current.market.Rd | \name{current.market}
\alias{current.market}
\title{
Find current market object
}
\description{
Function to return the market object at a given time from a history.market object. If no time point exists, interpolate by grabbing the previous market and returning a market object with those characteristics but the current time.
}
\usage{
current.market(hist.mkt, t, ...)
}
\arguments{
\item{hist.mkt}{
a history.market object
}
\item{t}{
which time to extract
}
\item{\dots}{
Pass-alongs
}
}
\value{
A market object
}
|
558f9faf939c1251d0465c0af6697a2c49168aa8 | 81f6f50ab82bdd997b7da53f83771ac13323c2f7 | /plots/plot4.R | b81c8f3f8a7ed1b5fbb5178e49b4af6a3e6b04f3 | [] | no_license | zawhtetwai/ExData_Plotting1 | 04c739fbf206533c080692ddd2dedb6fc69b1dd7 | 507f56077d58796853c81a4177156b70f444f24f | refs/heads/master | 2021-01-13T14:38:45.648770 | 2014-11-08T19:06:11 | 2014-11-08T19:06:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 948 | r | plot4.R | source("load_data.R")
data <- load_data()
png("plot4.png", width=480, height=480)
data$DateTime <- paste(data$Date, data$Time, sep=" ")
data$DateTime <- strptime(data$DateTime, format ="%Y-%m-%d %H:%M:%S")
par(mfrow=c(2,2), mar=c(5,4,2,2),oma=c(1.5,0.5,1,0.5))
plot(data$DateTime, data$Global_active_power, type="s", xlab="", ylab="Global Active Power")
plot(data$DateTime, data$Voltage, type="s", xlab="datetime", ylab="Voltage")
plot(data$DateTime, data$Sub_metering_1, type="s", col="black", xlab="", ylab="Energy sub metering")
lines(data$DateTime, data$Sub_metering_2, type="s", col="red")
lines(data$DateTime, data$Sub_metering_3, type="s", col="blue")
legend("topright", border = "white", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty="n", lty=c(1,1,1), lwd=c(1,1,1), col=c("black", "red", "blue"))
plot(data$DateTime, data$Global_reactive_power, type="s", xlab="datetime", ylab="Global_reactive_power")
dev.off() |
1748ae2b1e33e01d818306f8995f7ec1bf060aa0 | 626e2140772938f91faf332daf1409af31f08454 | /code/Manuscript/Section2.R | 96004374c94ba489562e07cbe08ccd8505d4dde1 | [] | no_license | iLukegogogo/BreastCancerMetaPotenial | 56461d0a425c93b6ce0355a188f6723b9d3c1d59 | 79d39bc35372d997d755c9e927d404a8c9bed71e | refs/heads/master | 2023-02-08T14:42:44.665538 | 2021-01-02T03:24:16 | 2021-01-02T03:24:16 | 276,227,478 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,198 | r | Section2.R | require(CePa)
require(dplyr)
require(ComplexHeatmap)
require(RColorBrewer)
require(circlize)
require(segmented)
source('client-side/code/Manuscript/ggplot.style.R')
load('client-side/output/DE.breast.cancer.R.output/DE.breast.cancer.RData')
##############################################################################
#---------------- Figure 2 --------------------------------------------------#
##############################################################################
#######################################################
# Fig 2a: motivation to use piecewise linear regression to remove liver speicfic genes (LuminalB subtype)
######################################################
c.gene <- intersect(rownames(de.res.liver.vs.breast.lumb),rownames(de.res.metastasis.liver.vs.breast.lumb))
x <- de.res.liver.vs.breast.lumb[c.gene,'log2FoldChange']
y <- de.res.metastasis.liver.vs.breast.lumb[c.gene,'log2FoldChange']
lin.mod <- lm(y~x)
segmented.mod <- segmented(lin.mod, seg.Z = ~x, psi=2)
tmp <- summary(segmented.mod)
psi <- tmp$psi[1,'Est.']
df <- data.frame(x=de.res.liver.vs.breast.lumb[c.gene,'log2FoldChange'],y=de.res.metastasis.liver.vs.breast.lumb[c.gene,'log2FoldChange'],fitted = fitted(segmented.mod))
rownames(df) <- c.gene
ggplot(df,aes(x=x,y=y)) + geom_point(size=2.5) + ggplot.style + ylab('log2FC (MET.vs.PRI)') + xlab('log2FC (LIVER.vs.PRI)') + xlim(c(-15,22)) + ylim(c(-15,22)) + geom_abline(intercept = 0,slope=1) + geom_vline(xintercept =psi,linetype=2,size=2) + geom_line(aes(x=x,y=fitted),col='red',lwd=3.5)
ggsave(filename = '~/OneDrive - Michigan State University/Project/BreastCancerMetaPotenial/Manuscript/Manuscript/section.Figure/Section2/lumb.log2FC.MP.and.normal.pdf',width = 20,height=20)
#######################################################
# Fig 2b: motivation to use piecewise linear regression to remove liver speicfic genes (GSE58708 dataset)
######################################################
load('client-side/output/validation.of.confounding.R.output/validation.of.confounding.RData')
c.gene <- intersect(rownames(SRP043470.de.res.liver.vs.breast),rownames(SRP043470.de.res.metastasis.liver.vs.breast))
x <- SRP043470.de.res.liver.vs.breast[c.gene,'log2FoldChange']
y <- SRP043470.de.res.metastasis.liver.vs.breast[c.gene,'log2FoldChange']
lin.mod <- lm(y~x)
segmented.mod <- segmented(lin.mod, seg.Z = ~x, psi=2)
tmp <- summary(segmented.mod)
psi <- tmp$psi[1,'Est.']
df <- data.frame(x=SRP043470.de.res.liver.vs.breast[c.gene,'log2FoldChange'],y=SRP043470.de.res.metastasis.liver.vs.breast[c.gene,'log2FoldChange'],fitted = fitted(segmented.mod))
rownames(df) <- c.gene
ggplot(df,aes(x=x,y=y)) + geom_point(size=2.5) + ggplot.style + ylab('log2FC (MET.vs.PRI)') + xlab('log2FC (LIVER.vs.PRI)') + xlim(c(-15,22)) + ylim(c(-15,22)) + geom_abline(intercept = 0,slope=1) + geom_vline(xintercept =psi,linetype=2,size=2) + geom_line(aes(x=x,y=fitted),col='red',lwd=3.5)
ggsave(filename = '~/OneDrive - Michigan State University/Project/BreastCancerMetaPotenial/Manuscript/Manuscript/section.Figure/Section2/SRP043470.log2FC.MP.and.normal.pdf',width = 20,height=20)
#######################################################
# Fig 2c: DEBoost pipeline
######################################################
##############################################################################
#---------------- Figure S2 --------------------------------------------------#
##############################################################################
#######################################################
# Fig S2a: motivation to use piecewise linear regression to remove liver speicfic genes (Her2 subtype)
######################################################
c.gene <- intersect(rownames(de.res.liver.vs.breast.her2),rownames(de.res.metastasis.liver.vs.breast.her2))
x <- de.res.liver.vs.breast.her2[c.gene,'log2FoldChange']
y <- de.res.metastasis.liver.vs.breast.her2[c.gene,'log2FoldChange']
lin.mod <- lm(y~x)
segmented.mod <- segmented(lin.mod, seg.Z = ~x, psi=2)
tmp <- summary(segmented.mod)
psi <- tmp$psi[1,'Est.']
df <- data.frame(x=de.res.liver.vs.breast.her2[c.gene,'log2FoldChange'],y=de.res.metastasis.liver.vs.breast.her2[c.gene,'log2FoldChange'],fitted = fitted(segmented.mod))
rownames(df) <- c.gene
ggplot(df,aes(x=x,y=y)) + geom_point(size=2.5) + ggplot.style + ylab('log2FC (MET.vs.PRI)') + xlab('log2FC (LIVER.vs.PRI)') + xlim(c(-15,22)) + ylim(c(-15,22)) + geom_abline(intercept = 0,slope=1) + geom_vline(xintercept =psi,linetype=2,size=2) + geom_line(aes(x=x,y=fitted),col='red',lwd=3.5)
ggsave(filename = '~/OneDrive/OneDrive - Michigan State University/Project/BreastCancerMetaPotenial/Manuscript/Manuscript/section.Figure/Section2/her2.log2FC.MP.and.normal.pdf',width = 20,height=20)
#######################################################
# Fig S2b: motivation to use piecewise linear regression to remove liver speicfic genes (Basal-like subtype)
######################################################
c.gene <- intersect(rownames(de.res.liver.vs.breast.basal),rownames(de.res.metastasis.liver.vs.breast.basal))
x <- de.res.liver.vs.breast.basal[c.gene,'log2FoldChange']
y <- de.res.metastasis.liver.vs.breast.basal[c.gene,'log2FoldChange']
lin.mod <- lm(y~x)
segmented.mod <- segmented(lin.mod, seg.Z = ~x, psi=2)
tmp <- summary(segmented.mod)
psi <- tmp$psi[1,'Est.']
df <- data.frame(x=de.res.liver.vs.breast.basal[c.gene,'log2FoldChange'],y=de.res.metastasis.liver.vs.breast.basal[c.gene,'log2FoldChange'],fitted = fitted(segmented.mod))
rownames(df) <- c.gene
ggplot(df,aes(x=x,y=y)) + geom_point(size=2.5) + ggplot.style + ylab('log2FC (MET.vs.PRI)') + xlab('log2FC (LIVER.vs.PRI)') + xlim(c(-15,22)) + ylim(c(-15,22)) + geom_abline(intercept = 0,slope=1) + geom_vline(xintercept =psi,linetype=2,size=2) + geom_line(aes(x=x,y=fitted),col='red',lwd=3.5)
ggsave(filename = '~/OneDrive/OneDrive - Michigan State University/Project/BreastCancerMetaPotenial/Manuscript/Manuscript/section.Figure/Section1/basal.log2FC.MP.and.normal.pdf',width = 20,height=20)
####################################################### Trash #######################################################
#######################################################
# FigS2a: Bar plot to show subtype-sepcificity of DE genes
######################################################
# up.gene <- c(lumb.up.gene,her2.up.gene,basal.up.gene)
# dn.gene <- c(lumb.dn.gene,her2.dn.gene,basal.dn.gene)
#
# up.gene.freq.df <- table(up.gene) %>% as.data.frame
# dn.gene.freq.df <- table(dn.gene) %>% as.data.frame
# up.gene.freq.df$color <- 'up'
# dn.gene.freq.df$color <- 'dn'
# colnames(up.gene.freq.df)[1] <- 'gene'
# colnames(dn.gene.freq.df)[1] <- 'gene'
#
# ggplot(rbind(up.gene.freq.df,dn.gene.freq.df)) +
# geom_bar( aes( x=factor(Freq),fill=color),position= 'dodge') +
# ggplot.style +
# xlab('Number of subtypes') +
# scale_fill_manual(values=c('up'='red','dn'='blue'))
#
# ggsave(filename = '~/OneDrive/OneDrive - Michigan State University/Project/BreastCancerMetaPotenial/Manuscript/Manuscript/section.Figure/Section2/DE.gene.subtype.specificity.pdf',width = 20,height=20)
# subtype.sample <- intersect(pure.TCGA.breast.cancer.polyA.Her2.sample,colnames(TCGA.breast.cancer.log2.fpkm.matrix))
# her2.eg.up.gene.KM.plot <- draw.KM.plot(her2.eg.up.gene)
# her2.eg.dn.gene.KM.plot <- draw.KM.plot(her2.eg.dn.gene)
#
# pdf(file ='~/OneDrive/OneDrive - Michigan State University/Project/BreastCancerMetaPotenial/Manuscript/Manuscript/section.Figure/Section2/her2.eg.up.gene.KM.plot.pdf',width=20,height=15)
# print(her2.eg.up.gene.KM.plot[[1]])
# dev.off()
# her2.survival.rs.up[her2.eg.up.gene,]
#
# pdf(file ='~/OneDrive/OneDrive - Michigan State University/Project/BreastCancerMetaPotenial/Manuscript/Manuscript/section.Figure/Section2/her2.eg.dn.gene.KM.plot.pdf',width=20,height=15)
# print(her2.eg.dn.gene.KM.plot[[1]])
# dev.off()
# her2.survival.rs.dn[her2.eg.dn.gene,]
#
#
#
# subtype.sample <- intersect(pure.TCGA.breast.cancer.polyA.LumB.sample,colnames(TCGA.breast.cancer.log2.fpkm.matrix))
# lumb.eg.up.gene.KM.plot <- draw.KM.plot(lumb.eg.up.gene)
# lumb.eg.dn.gene.KM.plot <- draw.KM.plot(lumb.eg.dn.gene)
#
# pdf(file ='~/OneDrive/OneDrive - Michigan State University/Project/BreastCancerMetaPotenial/Manuscript/Manuscript/section.Figure/Section2/lumb.eg.up.gene.KM.plot.pdf',width=20,height=15)
# print(lumb.eg.up.gene.KM.plot[[1]])
# dev.off()
# lumb.survival.rs.up[lumb.eg.up.gene,]
#
#
# pdf(file ='~/OneDrive/OneDrive - Michigan State University/Project/BreastCancerMetaPotenial/Manuscript/Manuscript/section.Figure/Section2/lumb.eg.dn.gene.KM.plot.pdf',width=20,height=15)
# print(lumb.eg.dn.gene.KM.plot[[1]])
# dev.off()
# lumb.survival.rs.dn[lumb.eg.dn.gene,]
#
#
#
#
#
#
#
# tmp <- basal.survival.rs.up[basal.survival.rs.up$p.value < 0.05 ,]
# tmp <- tmp[order(tmp$p.value),]
# sum(tmp$effect.size > 0) / nrow(tmp)
# basal.eg.up.gene <- 'ENSG00000103253'
#
#
# tmp <- her2.survival.rs.up[her2.survival.rs.up$p.value < 0.05 ,]
# tmp <- tmp[order(tmp$p.value),]
# sum(tmp$effect.size > 0) / nrow(tmp)
# her2.eg.up.gene <- 'ENSG00000156453'
#
#
# tmp <- lumb.survival.rs.up[lumb.survival.rs.up$p.value < 0.05 ,]
# tmp <- tmp[order(tmp$p.value),]
# sum(tmp$effect.size > 0) / nrow(tmp)
# lumb.eg.up.gene <- 'ENSG00000132613'
#
#
# tmp <- basal.survival.rs.dn[basal.survival.rs.dn$p.value < 0.05 ,]
# tmp <- tmp[order(tmp$p.value),]
# sum(tmp$effect.size < 0) / nrow(tmp)
# basal.eg.dn.gene <- 'ENSG00000160307'
#
#
# tmp <- her2.survival.rs.dn[her2.survival.rs.dn$p.value < 0.05 ,]
# tmp <- tmp[order(tmp$p.value),]
# sum(tmp$effect.size < 0) / nrow(tmp)
# her2.eg.dn.gene <- 'ENSG00000182326'
#
#
# tmp <- lumb.survival.rs.dn[lumb.survival.rs.dn$p.value < 0.05 ,]
# tmp <- tmp[order(tmp$p.value),]
# sum(tmp$effect.size < 0) / nrow(tmp)
# lumb.eg.dn.gene <- 'ENSG00000188001'
|
94e4a6fcf1efa5c136ce06f1414c455adc00d3f3 | 4344aa4529953e5261e834af33fdf17d229cc844 | /input/gcamdata/man/module_gcamusa_L142.Building.Rd | 80300df1c06e0e9eb194d6ea46ce31f004b6aa9e | [
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | JGCRI/gcam-core | a20c01106fd40847ed0a803969633861795c00b7 | 912f1b00086be6c18224e2777f1b4bf1c8a1dc5d | refs/heads/master | 2023-08-07T18:28:19.251044 | 2023-06-05T20:22:04 | 2023-06-05T20:22:04 | 50,672,978 | 238 | 145 | NOASSERTION | 2023-07-31T16:39:21 | 2016-01-29T15:57:28 | R | UTF-8 | R | false | true | 951 | rd | module_gcamusa_L142.Building.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zgcamusa_L142.Building.R
\name{module_gcamusa_L142.Building}
\alias{module_gcamusa_L142.Building}
\title{module_gcamusa_L142.Building}
\usage{
module_gcamusa_L142.Building(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{L142.in_EJ_state_bld_F}. The corresponding file in the
original data system was \code{LA142.Building.R} (gcam-usa level1).
}
\description{
Downscaling each state and sector's shares of USA building energy use by fuel
}
\details{
Scaled national-level building energy consumption by portion of total US building energy use by fuel for each state and sector from the SEDS table.
}
\author{
KD September 2017
}
|
d5b3ab9f525a654e472d52e27d998b8c818fb921 | d509d8563acbd99b2956df36876c80d80b004e7d | /2021-1-FunctionalDataAnalysis/Lecture5.R | c591fdb2a8d55000c7de7f226b26f7526130fe75 | [] | no_license | jbeen2/Today-I-Learned | a3ccd59180f1f2036e3948bb30ecaea612749a7f | a28ff8b16c018411bdc8a5b3836ea25bffc7e2f1 | refs/heads/main | 2023-06-15T12:51:00.188228 | 2021-07-10T18:33:36 | 2021-07-10T18:33:36 | 303,149,155 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 7,291 | r | Lecture5.R | ##### Lecture 5 Part 1 #####
library(fda)
library(refund)
set.seed(2016)
par(mfrow=c(2,2),mar=c(2,2,1,1))
gaus2<-function(x){exp(-x^2/2)/sqrt(2*pi)}
gaus4<-function(x){(1/2)*(3-x^2)*gaus2(x)}
ep2<-function(x){(3/4)*(1-x^2)*(abs(x)<1)}
ep4<-function(x){(15/8)*(1-(7/3)*x^2)*ep2(x)}
xlim=4
ylimh=1.4
yliml=-0.25
fnt = 1.3
par(mfrow=c(2,2),mar=c(2,4,1,1), cex=fnt)
curve(gaus2,from=-xlim,to=xlim,xlab="",ylab="Gaussian - 2nd",ylim=c(yliml,ylimh))
abline(h = 0,lty=2)
curve(gaus4,from=-xlim,to=xlim,xlab="",ylab="Gaussian - 4th",ylim=c(yliml,ylimh))
abline(h = 0,lty=2)
curve(ep2,from=-xlim,to=xlim,xlab="",ylab="Epanechnikov - 2nd",ylim=c(yliml,ylimh))
abline(h = 0,lty=2)
curve(ep4,from=-xlim,to=xlim,xlab="",ylab="Epanechnikov - 4th",ylim=c(yliml,ylimh))
abline(h = 0,lty=2)
N = 100; M = 5
T = matrix(runif(N*M),nrow=N,ncol=M)
T = apply(T,1,"sort"); T = t(T)
mu <-function(t){
.5*dnorm(t,mean = 1/3,sd=1/18)+
.1*dnorm(t,mean = 2/3,sd=1/18)}
par(mar=c(2,2,1,1))
curve(mu,from=0,to=1)
library(MASS)
C<-function(d){exp(-abs(d))}
Y<-matrix(0,nrow=N,ncol=M)
for(n in 1:N){
tms<-T[n,]
Sig<-C(outer(tms,tms,FUN="-"))
mu_v<-mu(tms)
Y[n,] = mvrnorm(1,mu_v,Sig)
}
par(mar=c(2,2,1,1))
plot(c(T),c(Y),xlab="",ylab="")
h = 1/10; P<-1; grd_n<-50
K<-function(d){(1/sqrt(2*pi))*exp(-d^2/2)}
grd<-seq(0,1,length=grd_n)
mu_hat = numeric(grd_n)
y = c(Y); t_obs = c(T)
for(i in 1:grd_n){
t = grd[i]
x = matrix(0,nrow=N*M,ncol=P+1)
for(j in 0:P){
x[,j+1] = (t-t_obs)^j
}
W = diag(K((t-t_obs)/h))
beta_hat = solve(t(x)%*%W%*%x,t(x)%*%W%*%y)
mu_hat[i] = beta_hat[1]
}
par(mar=c(2,2,1,1))
plot(c(T),c(Y),xlab="",ylab="")
points(grd,mu_hat,type="l",lwd=1.5,col="red")
newdata=data.frame(t_obs = grd)
fit1<-loess(y~t_obs,span=1/2)
pred1 <- predict(fit1,newdata=newdata,
degree=1,family="gaussian")
fit2<-loess(y~t_obs,span=1/10)
pred2 <- predict(fit2,newdata=newdata,
degree=1,family="gaussian")
fit3<-loess(y~t_obs,span=1/15)
pred3 <- predict(fit3,newdata=newdata,
degree=1,family="gaussian")
fit4<-loess(y~t_obs,span=1/20)
pred4 <- predict(fit4,newdata=newdata,
degree=1,family="gaussian")
par(mar=c(2,2,1,1))
plot(t_obs,y,xlab="",ylab="")
points(grd,pred1,type="l",col=2,lwd=2)
points(grd,pred2,type="l",col=3,lwd=2)
points(grd,pred3,type="l",col=4,lwd=2)
points(grd,pred4,type="l",col=5,lwd=2)
##### Lecture 5 Part 2 #####
library(fda)
library(refund)
set.seed(2016)
N = 100; M = 5
T = matrix(runif(N*M),nrow=N,ncol=M)
T = apply(T,1,"sort"); T = t(T)
mu <-function(t){
.5*dnorm(t,mean = 1/3,sd=1/18)+
.1*dnorm(t,mean = 2/3,sd=1/18)}
library(MASS)
C<-function(d){exp(-abs(d))}
Y<-matrix(0,nrow=N,ncol=M)
for(n in 1:N){
tms<-T[n,]
Sig<-C(outer(tms,tms,FUN="-"))
mu_v<-mu(tms)
Y[n,] = mvrnorm(1,mu_v,Sig)}
par(mar=c(2,2,1,1))
plot(c(T),c(Y),xlab="",ylab="")
y<-c(Y); t<-c(T)
K<-exp(-outer(t,t,FUN="-")^2)
lambda1<-1
alpha1<-solve(lambda1*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
grd<-seq(0,1,length=50)
K_fit<-exp(-outer(grd,t,FUN="-")^2)
muhat<-K_fit%*%alpha1
par(mar=c(2,2,1,1))
plot(c(T),c(Y),xlab="",ylab="")
points(grd,muhat,type="l",lwd=2)
y<-c(Y); t<-c(T)
K<-exp(-outer(t,t,FUN="-")^2)
lambda1<-1
alpha1<-solve(lambda1*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda2<-.01
alpha2<-solve(lambda2*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda3<-.0001
alpha3<-solve(lambda3*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda4<-.000001
alpha4<-solve(lambda4*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
grd<-seq(0,1,length=50)
K_fit<-exp(-outer(grd,t,FUN="-")^2)
muhat1<-K_fit%*%alpha1
muhat2<-K_fit%*%alpha2
muhat3<-K_fit%*%alpha3
muhat4<-K_fit%*%alpha4
par(mar=c(2,2,1,1))
plot(c(T),c(Y),xlab="",ylab="")
points(grd,muhat1,type="l",lwd=2,col=1)
points(grd,muhat2,type="l",lwd=2,col=2)
points(grd,muhat3,type="l",lwd=2,col=3)
points(grd,muhat4,type="l",lwd=2,col=4)
leg<-paste("lambda=",c(1,.01,.0001,.000001))
legend("topright",legend=leg,col=1:4)
y<-c(Y); t<-c(T)
K<-exp(-abs(outer(t,t,FUN="-")))
lambda1<-1
alpha1<-solve(lambda1*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda2<-.5
alpha2<-solve(lambda2*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda3<-.1
alpha3<-solve(lambda3*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda4<-.05
alpha4<-solve(lambda4*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
grd<-seq(0,1,length=100)
K_fit<-exp(-abs(outer(grd,t,FUN="-")))
muhat1<-K_fit%*%alpha1
muhat2<-K_fit%*%alpha2
muhat3<-K_fit%*%alpha3
muhat4<-K_fit%*%alpha4
par(mar=c(2,2,1,1))
plot(c(T),c(Y),xlab="",ylab="")
points(grd,muhat1,type="l",lwd=2,col=1)
points(grd,muhat2,type="l",lwd=2,col=2)
points(grd,muhat3,type="l",lwd=2,col=3)
points(grd,muhat4,type="l",lwd=2,col=4)
leg<-paste("lambda=",c(1,.5,.1,.05))
legend("topright",legend=leg,col=1:4)
y<-c(Y); t<-c(T)
myfun<-function(t,s){
if(t<=s){cosh(1-s)*cosh(t)
}else{cosh(1-t)*cosh(s)}
}
myfun<-Vectorize(myfun)
K<-outer(t,t,FUN=myfun)
lambda1<-1
alpha1<-solve(lambda1*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda2<-.5
alpha2<-solve(lambda2*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda3<-.1
alpha3<-solve(lambda3*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda4<-.05
alpha4<-solve(lambda4*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
grd<-seq(0,1,length=100)
K_fit<-outer(grd,t,FUN=myfun)
muhat1<-K_fit%*%alpha1
muhat2<-K_fit%*%alpha2
muhat3<-K_fit%*%alpha3
muhat4<-K_fit%*%alpha4
par(mar=c(2,2,1,1))
plot(c(T),c(Y),xlab="",ylab="")
points(grd,muhat1,type="l",lwd=2,col=1)
points(grd,muhat2,type="l",lwd=2,col=2)
points(grd,muhat3,type="l",lwd=2,col=3)
points(grd,muhat4,type="l",lwd=2,col=4)
leg<-paste("lambda=",c(1,.5,.1,.05))
legend("topright",legend=leg,col=1:4)
y<-c(Y); t<-c(T)
myfun<-function(t,s){
(1+sqrt(3)*abs(t-s))*exp(-sqrt(3)*abs(t-s))
}
myfun<-Vectorize(myfun)
K<-outer(t,t,FUN=myfun)
lambda1<-1
alpha1<-solve(lambda1*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda2<-.5
alpha2<-solve(lambda2*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda3<-.1
alpha3<-solve(lambda3*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda4<-.05
alpha4<-solve(lambda4*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
grd<-seq(0,1,length=100)
K_fit<-outer(grd,t,FUN=myfun)
muhat1<-K_fit%*%alpha1
muhat2<-K_fit%*%alpha2
muhat3<-K_fit%*%alpha3
muhat4<-K_fit%*%alpha4
par(mar=c(2,2,1,1))
plot(c(T),c(Y),xlab="",ylab="")
points(grd,muhat1,type="l",lwd=2,col=1)
points(grd,muhat2,type="l",lwd=2,col=2)
points(grd,muhat3,type="l",lwd=2,col=3)
points(grd,muhat4,type="l",lwd=2,col=4)
leg<-paste("lambda=",c(1,.5,.1,.05))
legend("topright",legend=leg,col=1:4)
y<-c(Y); t<-c(T)
myfun<-function(t,s){
exp(sin(pi*abs(t-s))^2)
}
myfun<-Vectorize(myfun)
K<-outer(t,t,FUN=myfun)
lambda1<-1
alpha1<-solve(lambda1*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda2<-.5
alpha2<-solve(lambda2*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda3<-.1
alpha3<-solve(lambda3*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
lambda4<-.05
alpha4<-solve(lambda4*K + t(K)%*%K,t(K)%*%y,tol=10^{-23})
grd<-seq(0,1,length=100)
K_fit<-outer(grd,t,FUN=myfun)
muhat1<-K_fit%*%alpha1
muhat2<-K_fit%*%alpha2
muhat3<-K_fit%*%alpha3
muhat4<-K_fit%*%alpha4
par(mar=c(2,2,1,1))
plot(c(T),c(Y),xlab="",ylab="")
points(grd,muhat1,type="l",lwd=2,col=1)
points(grd,muhat2,type="l",lwd=2,col=2)
points(grd,muhat3,type="l",lwd=2,col=3)
points(grd,muhat4,type="l",lwd=2,col=4)
leg<-paste("lambda=",c(1,.5,.1,.05))
legend("topright",legend=leg,col=1:4)
|
f632d7739bde50509d829a0fb5283b6c1bd8f9b2 | dfe5f07a4e176a847e291bb681471a3d51e75222 | /exp_cens_I.R | fa718ddc506fd3ec473d80fcab94bde020da201d | [] | no_license | Bousfiha/ProjetSpeGit | bd1fb6322f8cde06c110254de0883488318414e6 | ba60f34b57a72478016e6d162906dea6148fe821 | refs/heads/master | 2016-09-01T05:47:04.772429 | 2015-06-11T06:38:58 | 2015-06-11T06:38:58 | 36,016,276 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,199 | r | exp_cens_I.R | ##Loi exponentielle
##Données censurées de type I
rm(list=ls())
lambda = 1
TailleEch = 100
NbIt = 1000
table = matrix(0,81,2)
Var_2 = table
P=seq(from=0.1,to=0.9,by=0.01)
for ( p in 1:81) {
tc = -log(1-P[p])/lambda
for ( j in 1:NbIt) {
R = 0
##Simulation de la loi
exp = rexp(TailleEch,lambda)
expOrd = exp[order(exp)]
for ( i in 1:TailleEch ) {
if ( exp[i] <= tc)
R = R + 1
}
if ( R == P[p]*TailleEch) {
##Estimation du paramètre lambda
#LambdaSimu = -(1/tc)*log(1-R/TailleEch)
LambdaSimu = R/ (sum(expOrd[1:R])+(TailleEch-R)*tc)
Var_2[p,1] = table[p,1] + LambdaSimu^2
table[p,1] = table[p,1] + LambdaSimu
table[p,2] = table[p,2] + 1
}
}
}
Var = Var_2[,1]/table[,2]- (table[,1]/table[,2])^2
table[,1] = table[,1]/table[,2]
plot(P,Var,pch=20,xlab="R",ylab="Variance",col="blue",font.axis=2)
title("Evolution de la variance en fonction de R",font.main=2)
#plot(P,table[,1],ylim=c(0.9,1.1),pch=20,xlab="R",ylab="Moyenne du Lambda estimé",col="blue",font.axis=2)
#title("Evolution de l'espérance du lambda estimé en fonction de R",font.main=2)
#abline(h=lambda,col="red",font.main=2)
|
ca6118c53805a5636ed7c3570ebcb23825e0c389 | 7fc453391224956da9ce2867d9bd54530a66aa43 | /R/distr_to_xml.R | b3f27dcbd33161c3fc53a6089849ab1422fae8e0 | [] | no_license | cran/beautier | 880277272f6cf48b4eca9c28db68e4a42c4ccc3a | 439683e296d755698c3861b447106556e540aa9f | refs/heads/master | 2022-08-30T18:21:31.772630 | 2022-08-11T09:40:07 | 2022-08-11T09:40:07 | 127,175,959 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,158 | r | distr_to_xml.R | #' Internal function
#'
#' Converts a distribution to XML
#' @inheritParams default_params_doc
#' @param distr a distribution,
#' as created by \code{\link{create_distr}})
#' @return the distribution as XML text
#' @examples
#' check_empty_beautier_folder()
#'
#' distr_to_xml(create_uniform_distr(id = 1))
#'
#' check_empty_beautier_folder()
#' @author Richèl J.C. Bilderbeek
#' @export
distr_to_xml <- function(
distr,
beauti_options = create_beauti_options()
) {
beautier::check_beauti_options(beauti_options)
text <- NULL
id <- distr$id
if (!beautier::is_id(id)) {
stop("distribution must have an ID")
}
if (beautier::is_beta_distr(distr)) {
text <- c(text, beautier::distr_to_xml_beta(distr = distr, beauti_options = beauti_options)) # nolint indeed a long line
} else if (beautier::is_exp_distr(distr)) {
text <- c(text, beautier::distr_to_xml_exp(distr = distr, beauti_options = beauti_options)) # nolint indeed a long line
} else if (beautier::is_gamma_distr(distr)) {
text <- c(text, beautier::gamma_distr_to_xml(gamma_distr = distr, beauti_options = beauti_options)) # nolint indeed a long line
} else if (beautier::is_inv_gamma_distr(distr)) {
text <- c(text, beautier::distr_to_xml_inv_gamma(distr = distr, beauti_options = beauti_options)) # nolint indeed a long line
} else if (beautier::is_laplace_distr(distr)) {
text <- c(text, beautier::distr_to_xml_laplace(distr = distr, beauti_options = beauti_options)) # nolint indeed a long line
} else if (beautier::is_log_normal_distr(distr)) {
text <- c(text, beautier::distr_to_xml_log_normal(distr = distr, beauti_options = beauti_options)) # nolint indeed a long line
} else if (beautier::is_normal_distr(distr)) {
text <- c(text, beautier::distr_to_xml_normal(distr = distr, beauti_options = beauti_options)) # nolint indeed a long line
} else if (beautier::is_one_div_x_distr(distr)) {
text <- c(text, beautier::distr_to_xml_one_div_x(distr = distr, beauti_options = beauti_options)) # nolint indeed a long line
} else if (beautier::is_poisson_distr(distr)) {
text <- c(text, beautier::distr_to_xml_poisson(distr = distr, beauti_options = beauti_options)) # nolint indeed a long line
} else {
testit::assert(beautier::is_uniform_distr(distr))
text <- c(text, beautier::distr_to_xml_uniform(distr = distr, beauti_options = beauti_options)) # nolint indeed a long line
}
testit::assert(beautier::is_xml(text))
text
}
#' Internal function
#'
#' Converts a beta distribution to XML
#' @inheritParams default_params_doc
#' @param distr a beta distribution,
#' as created by \code{\link{create_beta_distr}})
#' @return the distribution as XML text
#' @author Richèl J.C. Bilderbeek
#' @export
distr_to_xml_beta <- function(
distr,
beauti_options = create_beauti_options()
) {
testit::assert(beautier::is_beta_distr(distr))
beautier::check_beauti_options(beauti_options)
id <- distr$id
testit::assert(beautier::is_id(id))
text <- NULL
text <- c(text, paste0("<Beta id=\"Beta.", id, "\" name=\"distr\">"))
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$alpha)
)
)
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$beta)
)
)
text <- c(text, paste0("</Beta>"))
text
}
#' Internal function
#'
#' Converts an exponential distribution to XML
#' @inheritParams default_params_doc
#' @param distr an exponential distribution,
#' as created by \code{\link{create_exp_distr}})
#' @return the distribution as XML text
#' @author Richèl J.C. Bilderbeek
#' @export
distr_to_xml_exp <- function(
distr,
beauti_options = create_beauti_options()
) {
testit::assert(beautier::is_exp_distr(distr))
beautier::check_beauti_options(beauti_options)
id <- distr$id
testit::assert(beautier::is_id(id))
text <- NULL
text <- c(text, paste0("<Exponential ",
"id=\"Exponential.", id, "\" name=\"distr\">"))
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$mean)
)
)
text <- c(text, paste0("</Exponential>"))
text
}
#' Internal function
#'
#' Converts an inverse-gamma distribution to XML
#' @inheritParams default_params_doc
#' @param distr an inverse-gamma distribution,
#' as created by \code{\link{create_inv_gamma_distr}})
#' @return the distribution as XML text
#' @author Richèl J.C. Bilderbeek
#' @export
distr_to_xml_inv_gamma <- function(
distr,
beauti_options = create_beauti_options()
) {
testit::assert(beautier::is_inv_gamma_distr(distr))
beautier::check_beauti_options(beauti_options)
id <- distr$id
testit::assert(beautier::is_id(id))
text <- NULL
text <- c(text, paste0("<InverseGamma ",
"id=\"InverseGamma.", id, "\" name=\"distr\">"))
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$alpha)
)
)
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$beta)
)
)
text <- c(text, paste0("</InverseGamma>"))
text
}
#' Internal function
#'
#' Converts a Laplace distribution to XML
#' @inheritParams default_params_doc
#' @param distr a Laplace distribution
#' as created by \code{\link{create_laplace_distr}})
#' @return the distribution as XML text
#' @author Richèl J.C. Bilderbeek
#' @export
distr_to_xml_laplace <- function(
distr,
beauti_options = create_beauti_options()
) {
testit::assert(beautier::is_laplace_distr(distr))
beautier::check_beauti_options(beauti_options)
id <- distr$id
testit::assert(beautier::is_id(id))
text <- NULL
text <- c(text, paste0("<LaplaceDistribution ",
"id=\"LaplaceDistribution.", id, "\" name=\"distr\">"))
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$mu)
)
)
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$scale)
)
)
text <- c(text, paste0("</LaplaceDistribution>"))
text
}
#' Internal function
#'
#' Converts a log-normal distribution to XML
#' @inheritParams default_params_doc
#' @param distr a log-normal distribution,
#' as created by \code{\link{create_log_normal_distr}})
#' @return the distribution as XML text
#' @author Richèl J.C. Bilderbeek
#' @export
distr_to_xml_log_normal <- function(
distr,
beauti_options = create_beauti_options()
) {
testit::assert(beautier::is_log_normal_distr(distr))
beautier::check_beauti_options(beauti_options)
id <- distr$id
testit::assert(beautier::is_id(id))
text <- NULL
text <- c(text, paste0("<LogNormal ",
"id=\"LogNormalDistributionModel.", id, "\" name=\"distr\">"))
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$m)
)
)
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$s)
)
)
text <- c(text, paste0("</LogNormal>"))
text
}
#' Internal function
#'
#' Converts a normal distribution to XML
#' @inheritParams default_params_doc
#' @param distr a normal distribution,
#' as created by \code{\link{create_normal_distr}})
#' @return the distribution as XML text
#' @author Richèl J.C. Bilderbeek
#' @export
distr_to_xml_normal <- function(
distr,
beauti_options = create_beauti_options()
) {
testit::assert(beautier::is_normal_distr(distr))
beautier::check_beauti_options(beauti_options)
id <- distr$id
testit::assert(beautier::is_id(id))
text <- NULL
text <- c(text, paste0("<Normal ",
"id=\"Normal.", id, "\" name=\"distr\">"))
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$mean)
)
)
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$sigma)
)
)
text <- c(text, paste0("</Normal>"))
text
}
#' Internal function
#'
#' Converts a 1/x distribution to XML
#' @inheritParams default_params_doc
#' @param distr a 1/x distribution,
#' as created by \code{\link{create_one_div_x_distr}})
#' @return the distribution as XML text
#' @author Richèl J.C. Bilderbeek
#' @export
distr_to_xml_one_div_x <- function(
distr,
beauti_options = create_beauti_options()
) {
testit::assert(beautier::is_one_div_x_distr(distr))
beautier::check_beauti_options(beauti_options)
id <- distr$id
testit::assert(beautier::is_id(id))
text <- NULL
text <- c(text, paste0("<OneOnX ",
"id=\"OneOnX.", id, "\" name=\"distr\"/>")) # nolint this is no absolute path
text
}
#' Internal function
#'
#' Converts a Poisson distribution to XML
#' @inheritParams default_params_doc
#' @param distr a Poisson distribution,
#' as created by \code{\link{create_poisson_distr}})
#' @return the distribution as XML text
#' @author Richèl J.C. Bilderbeek
#' @export
distr_to_xml_poisson <- function(
distr,
beauti_options = create_beauti_options()
) {
testit::assert(beautier::is_poisson_distr(distr))
beautier::check_beauti_options(beauti_options)
id <- distr$id
testit::assert(beautier::is_id(id))
text <- NULL
text <- c(text, paste0("<distr ",
"id=\"Poisson.", id, "\" ",
"spec=\"beast.math.distributions.Poisson\">"))
text <- c(text,
beautier::indent(
beautier::parameter_to_xml(distr$lambda),
)
)
text <- c(text, paste0("</distr>"))
text
}
#' Internal function
#'
#' Converts a uniform distribution to XML
#' @inheritParams default_params_doc
#' @param distr a uniform distribution,
#' as created by \code{\link{create_uniform_distr}})
#' @return the distribution as XML text
#' @author Richèl J.C. Bilderbeek
#' @export
distr_to_xml_uniform <- function(
distr,
beauti_options = create_beauti_options()
) {
testit::assert(beautier::is_uniform_distr(distr))
beautier::check_beauti_options(beauti_options)
id <- distr$id
testit::assert(beautier::is_id(id))
text <- NULL
line_begin <- paste0("<Uniform id=\"Uniform.", id, "\" name=\"distr\"")
line_end <- "/>" # nolint this is no absolute path
upper <- distr$upper
if (beautier::is_one_na(upper)) {
text <- c(text, paste0(line_begin, line_end))
} else if (is.infinite(upper)) {
text <- c(text, paste0(line_begin, " upper=\"Infinity\"", line_end))
} else {
text <- c(text, paste0(line_begin, " upper=\"", upper, "\"", line_end))
}
text
}
|
fb6662b8a8ca96a3302d1133e959d2bb19af4b85 | c66e8641380b3217feeacbe1ed60c754142dcce6 | /plants/03_output.R | a60fde1f24029c2868dc299724836cb861033219 | [
"Apache-2.0"
] | permissive | bcgov/invasive-species-indicator | 1a7354fe73c97e743c6a2b363adb85d3fe833f5b | c2c8cb46adece82965ae7040c271b39d599555c3 | refs/heads/master | 2021-06-17T20:09:25.889827 | 2020-12-16T23:05:10 | 2020-12-16T23:05:10 | 68,943,845 | 1 | 2 | Apache-2.0 | 2020-04-15T01:12:10 | 2016-09-22T17:10:01 | R | UTF-8 | R | false | false | 4,643 | r | 03_output.R | # Copyright 2016 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# @knitr plants-graphs
require(ggplot2) #plotting
require(extrafont) #plot font
require(envreportutils) #theme_soe
require(grid) #
require(dplyr) #data munging
source("fun.R") #functions from fun.R
dir.create("out", showWarnings = FALSE)
## Transforming shapefile contents for use in ggplot2
bgc_simp@data$id <- rownames(bgc_simp@data)
biozones.data.points <- fortify(bgc_simp, region = "id")
biozones.data.points <- left_join(biozones.data.points, bgc_simp@data, by = "id")
biozones.data.df <- left_join(biozones.data.points, summary, by = c("MAP_LAB" = "BGC"))
## Replace 0 with NA for displaying zeros differently on the map. If doing this,
## add na.value = "grey90" (or whatever) tp scale_fill_gradient call
# biozones.data.points$n_spp_s[biozones.data.points$n_spp_s == 0] <- NA
# biozones.data.points$n_sts_s[biozones.data.points$n_sts_s == 0] <- NA
## Plotting plant sites map
map_theme <- theme(axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(),
legend.position = c(.2, .2),
legend.text = element_text(size = 11),
legend.title = element_text(size = 11,
face = "bold"),
plot.margin = unit(c(0,0,0,0),"mm"),
text = element_text(family = "Verdana"))
sitesmap <- ggplot(biozones.data.points) +
aes(long, lat, group = group, fill = n_sts_s) +
geom_polygon() +
coord_fixed() +
theme_minimal() +
scale_fill_gradient(name = "Number of Known Invasive\nPlant Locations",
low = "#f7fcb9", high = "#004529",
guide = guide_colourbar(reverse = FALSE,
barwidth = 1.5, barheight = 7,
title.position = "bottom")) +
map_theme
#plot(sitesmap)
sppmap <- ggplot(biozones.data.points) +
aes(long, lat, group = group, fill = n_spp_s) +
geom_polygon() +
coord_fixed() +
theme_minimal() +
scale_fill_gradient(name = "Number of Established\nInvasive Plant Species",
low = "#f7fcb9", high = "#004529",
guide = guide_colourbar(reverse = FALSE,
barwidth = 1.5, barheight = 7,
title.position = "bottom")) +
map_theme
#plot(sppmap)
## @knitr stop
## OUPUTS ----------------------------------------------------------------
## invasive plant chloropleth of number of occurences and number of speciesby BGC map
# png(filename = "./out/plant.viz.png", width = 836, height = 430, units = "px")
# multiplot(sitesmap, sppmap, cols = 2, widths = c(1, 1))
# dev.off()
# png_retina(filename = "./out/plant.viz.png", width = 836, height = 430, units = "px")
# multiplot(sitesmap, sppmap, cols = 2, widths = c(1, 1))
# dev.off()
png_retina(filename = "./out/plant.viz.sites.png", width = 500, height = 500, units = "px")
plot(sitesmap)
dev.off()
png_retina(filename = "./out/plant.viz.spp.png", width = 500, height = 500, units = "px")
plot(sppmap)
dev.off()
library(magick)
(plantsites <- image_read("./out/plant.viz.sites.png"))
(plantsitessmall <- image_resize(plantsites, "1000x1000"))
image_write(plantsitessmall,
path = "./out/plant.viz.sites.small.png",
format = "jpg")
(plantspp <- image_read("./out/plant.viz.spp.png"))
(plantsppsmall <- image_resize(plantspp, "1000x1000"))
image_write(plantsppsmall,
path = "./out/plant.viz.spp.small.png",
format = "jpg")
## resulting filesize too big
# svg_px(file = "./out/plant.viz.svg", width = 900, height = 430)
# multiplot(sitesmap, sppmap, cols = 2, widths = c(1, 1))
# dev.off()
## Crunching some more numbers for the indicator page
## number of unique species and frequency
speciesfreq <- table(species.long$species_id)
speciesfreq <- as.data.frame(speciesfreq)
|
fb9ee7545fd11e4d1b5cbc5a2666d331e8c162f2 | cde7a60b95e3a91bff43df40a6d0a8d16d48aeb5 | /plot4.R | 8c832a1ebd681fb3578ca7150cd00edf945d56c6 | [] | no_license | eborrero/ExData_Plotting1 | f4e9dbda6ed188b91d0add82fdf8ffa34504fccd | e25b3ba0d4868630cbc9974084f0dc7a7a191517 | refs/heads/master | 2021-01-17T06:31:17.224750 | 2016-02-14T08:01:50 | 2016-02-14T08:01:50 | 51,681,986 | 0 | 0 | null | 2016-02-14T05:57:24 | 2016-02-14T05:57:23 | null | UTF-8 | R | false | false | 2,322 | r | plot4.R | ## Make sure the working directory is the same directory ad the R files and the .txt data file
##read the file
data <- read.csv("household_power_consumption.txt", header = TRUE, sep=";")
##separate the file by the first date, Feb. 1st
dataFebFirst <-data[data$Date=="1/2/2007",]
##separate the file by the second date, Feb. 2nd
dataFebSec <-data[data$Date=="2/2/2007",]
##rbind the two data frames together
consumptionData <- rbind(dataFebFirst,dataFebSec)
##Combine Date and Times into one. From ?strptime, found out to use paste() for dates and times to get strptime(x, "%m/%d/%y %H:%M:%S")
consumptionData$dateAndTime <- strptime(paste(consumptionData$Date, consumptionData$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
##prepare the graphical display (or in this case, saving as png with 480 width and height)
png("plot4.png", width = 480, height = 480)
##setting the parameter to have a 2x2 plot, which will populate by row first
par(mfrow = c(2,2))
##Plot from plot2...look at plot2.r for comments
plot(consumptionData$dateAndTime, as.numeric(as.character(consumptionData$Global_active_power)), type="l", ylab = "Global Active Power", xlab = "")
##New plot of dateTime and Voltage with appropriate labels
plot(consumptionData$dateAndTime, as.numeric(as.character(consumptionData$Voltage)), type="l", ylab = "Voltage", xlab = "datetime")
##Plot from plot3...look at plot3.r for comments
plot(consumptionData$dateAndTime, as.numeric(as.character(consumptionData$Sub_metering_1)), type="n", ylab = "Energy sub metering", xlab = "")
points(consumptionData$dateAndTime, as.numeric(as.character(consumptionData$Sub_metering_1)), type ="l",col = "black")
points(consumptionData$dateAndTime, as.numeric(as.character(consumptionData$Sub_metering_2)), type ="l",col = "red")
points(consumptionData$dateAndTime, as.numeric(as.character(consumptionData$Sub_metering_3)), type ="l",col = "blue")
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),col=c("black","red","blue"))
##New plot of dateTime and global_reactive_power with appropriate labels
plot(consumptionData$dateAndTime, as.numeric(as.character(consumptionData$Global_reactive_power)), type="l", ylab = "Global_reactive_power", xlab = "datetime")
##required for saving as png
dev.off() |
3c2c8c35369025448d4fb0c0613a0437729dd594 | 186dd33c855dc643aeef3953a373a6d704588cf0 | /R/io.R | 15ca95309e4882a9f0ff7cac3fb5ee79edbcff1b | [
"BSD-2-Clause"
] | permissive | cran/pubprint | 8432d36fc9d0aa8397bbb41d6fae1f189d967f80 | 864a5a20de759dcd50bee84ad44639601a5aadc6 | refs/heads/master | 2021-01-21T14:04:33.481164 | 2016-05-24T10:28:06 | 2016-05-24T10:28:06 | 50,608,880 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,702 | r | io.R | #############################################################################
# io.R
#############################################################################
#' @include style.R
NULL
#' Initialises a pubprint object
#'
#' \code{pubprint} returns an empty pubprint S3 object.
#'
#' This function initialises an empty pubprint S3 object and returns it.
#' This is mandatory for using the pull and push functions of the pubprint
#' package.
#'
#' @seealso See \code{\link{pubprint-package}} for package documentation.
#'
#' @examples
#' ppo <- pubprint()
#' ppo
#'
#' @export
pubprint <- function()
{
x <- list(pipe = list(),
memory = list())
class(x) <- "pubprint"
return(x)
}
#' Adds another item to an object
#'
#' \code{push<-} is a generic function and is used to put another item to a stack
#' or pipe.
#'
#' There is no default function, so you have to see a specific \code{push<-}
#' function for further information.
#'
#' @param x an object used to select a method.
#'
#' @param ... further arguments passed to or from other methods.
#'
#' @param value an item pushed to \code{x}.
#'
#' @return The updated object.
#'
#' @export
`push<-` <- function(x, ..., value) UseMethod("push<-")
#' Adds another item to a pubprint object
#'
#' \code{push<-.pubprint} adds the given item to named memory or pipe of a
#' pubprint object.
#'
#' No further details.
#'
#' @param x a pubprint object to which \code{value} is added.
#'
#' @param item numeric or character. If \code{item} is a numeric, \code{value}
#' is added to pipe. If \code{item} is a character, \code{value} is added to
#' named memory. A warning is thrown, if an existing item is overwritten.
#'
#' @param add logical, indicating if \code{value} is added to an existing item.
#' If \code{item} is specified, \code{value} is added to this item, else
#' argument \code{n} is used.
#'
#' @param n numeric. If \code{item} is missing and \code{add} is true,
#' \code{n} indicates to which pipe position (backwards) \code{value} is added.
#' Therefore, \code{n = 1} adds the item to the last pipe item, \code{n = 2}
#' to the second last item and so on.
#'
#' @param ... further arguments passed to or from other methods.
#'
#' @param value an item pushed to \code{x}.
#'
#' @return The updated pubprint object.
#'
#' @seealso See \code{\link{push<-}} for the generic function,
#' \code{\link{pull}} to extract items again.
#'
#' @examples
#' ppo <- pubprint()
#' push(ppo) <- t.test(1:100, 2:101)
#' push(ppo, add = TRUE) <- .8123 # add d value to last pipe item
#' push(ppo, item = "i1") <- t.test(1:30, 2:31)
#'
#' pull(ppo)
#' pull(ppo, item = "i1")
#'
#' @export
`push<-.pubprint` <- function(x,
item,
add = FALSE,
n = 1,
...,
value)
{
if (add)
{
if (missing(item))
{
mypos <- length(x$pipe) + 1 - n
x$pipe[[mypos]] <- c(x$pipe[[mypos]], list(value))
}
else if (is.numeric(item))
{
x$pipe[[item]] <- c(x$pipe[[item]], list(value))
}
else
{
x$memory[[item]] <- c(x$memory[[item]], list(value))
}
}
else
{
if (missing(item))
x$pipe <- c(x$pipe, list(list(value)))
else
{
if (is.numeric(item))
x$pipe[[item]] <- list(value)
else
{
if (!is.null(x$memory[[item]]))
warning("overwriting item")
x$memory[[item]] <- list(value)
}
}
}
return(x)
}
#' Pulls an item from an object
#'
#' \code{pull} is a generic function and is used to pull an item from a stack
#' or pipe.
#'
#' There is no default function, so you have to see a specific \code{pull}
#' function for further information.
#'
#' @param x an object used to select a method.
#'
#' @param ... further arguments passed to or from other methods.
#'
#' @return The updated object.
#'
#' @export
pull <- function(x, ...) UseMethod("pull")
#' Pulls an item from a pubprint object
#'
#' \code{pull.pubprint} is used to pull an item from the pipe or the named
#' memory of a pubprint object.
#'
#' No further details.
#'
#' @param x a pubprint object
#'
#' @param item the item to pull. If item is numeric, pipe and if it is a
#' character, named memory is chosen.
#'
#' @param remove either a logical, \code{"pipe"} or \code{"memory"}. If
#' \code{remove} is \code{TRUE}, every returned item is removed from pipe or
#' memory. If it is \code{"pipe"} (or \code{"memory"}), only accessed pipe (or
#' memory) items will be removed.
#'
#' @param ... further arguments passed to \code{\link{pprint}} or the internal
#' style functions.
#'
#' @return The updated object.
#'
#' @seealso See \code{\link{pull}} for the generic function,
#' \code{\link{push<-}} to put items to pipe or named memory.
#'
#' @examples
#' ppo <- pubprint()
#' push(ppo) <- t.test(1:100, 2:101)
#' push(ppo, add = TRUE) <- .8123 # add d value to last pipe item
#' push(ppo, item = "i1") <- t.test(1:30, 2:31)
#'
#' pull(ppo)
#' pull(ppo, item = "i1")
#' pull(ppo, item = "i1", remove = TRUE) # removes item as well
#'
#' @export
pull.pubprint <- function(x,
item = 1,
remove = pp_opts$get("removeItems"),
...)
{
objName <- deparse(substitute(x))
if (is.numeric(item))
{
if (!length(x$pipe) || length(x$pipe) < item)
stop("subscript out of bounds")
ret <- x$pipe[[item]]
if ((is.logical(remove) && remove) || "pipe" == remove)
x$pipe <- x$pipe[-item]
}
else
{
if (!length(x$memory) || ! item %in% names(x$memory))
stop("item \"", item, "\" not available")
ret <- x$memory[[item]]
if ((is.logical(remove) && remove) || "memory" == remove)
x$memory <- x$memory[item != names(x$memory)]
}
ret <- pprint(ret, ...)
assign(objName, x, envir = parent.frame())
return(ret)
}
#' Prints a pubprint object
#'
#' Prints the contents of a pubprint object
#'
#' Prints contents of named memory and pipe of a pubprint object.
#'
#' @param x object of class \code{pubprint}.
#'
#' @param ... further arguments. Ignored.
#'
#' @examples
#' ppo <- pubprint()
#' push(ppo) <- t.test(1:10)
#' print(ppo)
#'
#' @export
print.pubprint <- function(x, ...)
{
cat("Values in unnamed register (pipe):\n")
if (length(x$pipe))
print(lapply(x$pipe, pprint, format = "object"))
else
cat("empty\n")
cat("\n")
cat("Values in named register (memory):\n")
if (length(x$memory))
print(lapply(x$memory, pprint, format = "object"))
else
cat("empty\n")
}
#' Prints an object in a publishable manner
#'
#' \code{pprint} formats the output of the given object in a specified way
#'
#' This function calls internal style functions to convert the output of the
#' object into the specified publication style. It offers options to put a
#' math mode and surrounding characters around the (concatenated) output.
#'
#' If argument \code{format} is missing, a function tries to determine a
#' default format specifier. Can be specified to simple return the input
#' object (\code{"object"}). It is possible to set it to any internal style
#' function, the selected style supports.
#'
#' @param x object which output should be printed. Can be a list to deliver
#' additional information to internal style functions.
#'
#' @param format optional format specifier. Character vector, see details.
#'
#' @param ... optional arguments passed to internal style functions. See their
#' help files for more information.
#'
#' @param concat logical, whether returned result is in a single character or
#' a character vector with parts of the statistical output.
#'
#' @param mmode logical indicating if the returned result should be set in
#' math mode (depends on output format).
#'
#' @param separator character string specifying the surrounding characters.
#'
#' @param toClip logcial, whether returned result should be printed to
#' clipboard (see \code{\link{toClipboard}}).
#'
#' @return Simply the unmodified object \code{x} in a list if \code{format} is
#' \code{"object"}, else a character vector.
#'
#' @seealso See \code{\link{pp_opts_style}} for setting publication style and
#' \code{\link{pp_opts_out}} for setting output format.
#'
#' @examples
#' pprint(t.test(1:30))
#' pprint(t.test(1:30, 2:31))
#' pprint(t.test(1:30, 2:31), format = "object")
#' pprint(t.test(1:30, 2:31), mmode = FALSE, separator = NULL)
#' pprint(list(t.test(1:30), .843))
#'
#' @export
pprint <- function(x,
format,
...,
concat = TRUE,
mmode = pp_opts$get("mmode"),
separator = pp_opts$get("separator"),
toClip = FALSE)
{
if ("list" != class(x)[1])
x <- list(x)
if (missing(format))
format <- utils.get.format(x[[1]])
# format == "object" the whole list will be returned
if ("object" != format)
{
x <- pp_opts_style$get(format)(x, ...)
if (concat)
x <- out.concat(x)
x <- out.math(x, mmode = mmode)
if (!is.null(separator))
{
if (separator %in% c("brackets", "delimiter"))
x <- out.bracket(x, brackets=pp_opts$get(separator), inmmode=FALSE)
else
x <- out.bracket(x, brackets=separator, inmmode=FALSE)
}
}
if (toClip)
toClipboard(x)
return(x)
}
#' Pastes text to clipboard
#'
#' Text is written to clipboard, allowing easy pasting to other software. This
#' function supports only pasting of character vectors because
#' \code{\link[base]{writeLines}} is used. Supported operating systems are
#' BSD/Linux (the \code{xclip} command has to be installed), Mac OS
#' (\code{pbcopy} has to be installed) and Microsoft Windows.
#'
#' @param x character vector that should be pasted to clipboard.
#'
#' @examples
#' toClipboard("This is a little test.")
#'
#' @export
toClipboard <- function(x)
{
if ("unix" == .Platform$OS.type)
{
if ("Darwin" == Sys.info()[["sysname"]])
con <- pipe("pbcopy", open="w")
else
con <- pipe("xclip -i", open="w")
}
else
con <- "clipboard"
writeLines(x, con = con)
# closing won't be necessary on windows
if ("unix" == .Platform$OS.type && isOpen(con))
close(con)
}
|
a0fc9f0682e1e8b3ea9899f541ff6dc1582a2b6b | c4547314bb5e40b6386968ef995b1a4149c1de8c | /R/options.R | 8b008f087563c1dd15f73a9fabdd977b5f49ecdc | [] | no_license | cran/colorSpec | a23ea51692949e43fce61e7ead9ba10b39668c58 | 7b6120a30cad781b413e6145a7d5b73c10991a64 | refs/heads/master | 2022-05-10T19:23:30.384707 | 2022-05-04T01:40:02 | 2022-05-04T01:40:02 | 58,973,975 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,069 | r | options.R |
# g.* are global variables for colorSpec that are "unlocked" in .onAttach()
# g.options is a list with private copies of the colorSpec.* options managed by the base package
# these copies are used because g.loglevel and g.word[] are derived variables,
# and we want to see whether the user has changed a colorSpec.* option
g.options <- list(loglevel = 'WARN', # must be character
logformat = "%t{%H:%M:%OS3} %l %n::%f(). %m", # must be character
stoponerror = TRUE # must be logical
)
# g.loglevel is an integer derived from g.options$loglevel which is a string
# it is managed in logging.R
g.loglevel <- 0L # 0L is invalid
# g.word[] is an array derived from g.options$logformat, and managed in logging.R
g.word <- character(0) # character(0) is invalid
# cs.options()
# retrieve and assign items in g.options
#
# ... items with a valid name are assigned
# items with no name must be character and if valid are returned returned
cs.options <- function(...)
{
myfun = function() { .Options[ grepl( "^colorSpec[.]", names(.Options) ) ] }
theList = list(...) #; print(theList)
n = length(theList)
if( n==0 ) return( do.call(myfun,list()) )
namevec = names(theList) #; print( namevec )
if( is.null(namevec) )
{
cat( sprintf( "WARN cs.options() All of the %d arguments are unnamed, and ignored\n", n ), file=stderr() )
return( do.call(myfun,list()) )
}
# extract just the args with names and ignore the rest
mask = nzchar( namevec )
idx.unnamed = which( ! mask )
if( 0 < length(idx.unnamed) )
{
mess = paste( idx.unnamed, sep=', ', collapse=' ' )
mess = sprintf( "WARN cs.options() Arguments indexed '%s' are unnamed, and ignored.", mess )
cat( mess, '\n', file=stderr() )
}
theList = theList[ mask ] #; print(theList)
namevec = names( theList ) #; print( namevec )
# extract just the args with names that partially match the full names, and ignore the rest
fullname = names(g.options)
#fullname = c( "loglevel", "logformat", "stoponerror" )
idx = pmatch( namevec, fullname )
idx.na = which( is.na(idx) )
if( 0 < length(idx.na) )
{
mess = paste( namevec[idx.na], collapse=' ' )
mess = sprintf( "WARN cs.options() Arguments named '%s' cannot be matched, and are ignored.", mess )
cat( mess, '\n', file=stderr() )
if( length(idx.na) == length(idx) ) return( do.call(myfun,list()) )
}
mask = ! is.na(idx)
#print( idx ) ; print( mask )
theList = theList[ mask ] # ; print(theList)
idx = idx[ mask ] # this must have positive length
names(theList) = sprintf( "colorSpec.%s", fullname[idx] ) # prepend
#print( theList )
# finally ready to assign global options
options( theList )
checkBaseOptions()
updatePrivateOptions()
return( do.call(myfun,list()) )
}
# updatePrivateOptions copy values from public base package .Options to private copies in g.options
# and update derived globals (g.word and g.loglevel) if necessary
updatePrivateOptions <- function()
{
if( ! identical( g.options$loglevel, .Options$colorSpec.loglevel ) )
{
setLogLevel( .Options$colorSpec.loglevel )
}
if( ! identical( g.options$logformat, .Options$colorSpec.logformat ) )
{
setLogFormat( .Options$colorSpec.logformat )
}
if( is.logical(.Options$colorSpec.stoponerror) )
{
if( ! identical( g.options$stoponerror, .Options$colorSpec.stoponerror ) )
{
assignPrivateOption( 'stoponerror', .Options$colorSpec.stoponerror )
}
}
else
{
mess = sprintf( "WARN updatePrivateOptions() colorSpec.stoponerror='%s' is not logical - ignored.",
as.character(.Options$colorSpec.stoponerror) )
cat( mess, '\n', file=stderr() )
}
return(TRUE)
}
# name one of 'loglevel', 'logformat', 'stoponerror'
# value an appropriate value
assignPrivateOption <- function( name, value )
{
g.options[[ name ]] <<- value
# colorSpec::g.options$stoponerror = .Options$colorSpec.stoponerror does not work, does not understand the namespace 'colorSpec'
# this one works, but takes 3 lines
#opts = g.options
#opts[[ name ]] = value
#assign( "g.options", opts, envir=asNamespace('colorSpec') )
# test assignment - should be an assert()
if( ! identical( g.options[[name]], value ) ) cat( "ERROR assignPrivateOption() failed.\n", file=stderr() )
# cat( name, sprintf( '%s\n', as.character(g.options[[name]]) ) )
}
checkBaseOptions <- function()
{
for( name in names(g.options) )
{
baseopt = sprintf( "colorSpec.%s", name )
value = getOption( baseopt )
if( is.null(value) )
{
mess = sprintf( "ERROR checkBaseOptions() Option '%s' is unassigned.", baseopt )
cat( mess, '\n', file=stderr() )
next
}
if( name == 'stoponerror' )
ok = is.logical(value)
else
ok = is.character(value)
if( ! ok )
{
mess = sprintf( "ERROR checkBaseOptions() Value of option %s is '%s', which has the wrong type.", baseopt, as.character(value) )
cat( mess, '\n', file=stderr() )
}
}
} |
2f42493e2279ed1391ecea76d170ca7aebbc176c | 72636b048d08f566acbb28d8d7f24a0adbdfa646 | /data/get-city-locations.R | 8365537a49b217ea574c01c4f4a09b5031da04ea | [
"BSD-3-Clause",
"CC-BY-4.0"
] | permissive | OxShef/oxshef_charts | 3a0bffec81eb9b612d92aa3e0514fa9dadf624a7 | 36a9cf91bd462b55e3a27f8f665ec745c545df27 | refs/heads/master | 2021-04-25T16:49:05.989536 | 2018-12-07T11:14:13 | 2018-12-07T11:14:13 | 108,114,095 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 355 | r | get-city-locations.R | download.file(
url = "https://simplemaps.com/static/data/world-cities/basic/simplemaps_worldcities_basicv1.4.zip",
destfile = "data/simple-maps_cities.zip"
)
unzip(zipfile = "data/simple-maps_cities.zip", exdir = "data/simple-maps_cities")
simple_maps_cities <- read_csv("data/simple-maps_cities/worldcities.csv")
unlink("data/simple-maps_cities.zip") |
4cb82b847f29b8b45be704f3df34d0814c621f45 | 3cde4b8f4c03c7513a59a349c988c3b1cefd1a18 | /rabbit/deseq2.R | 27c7f3008b71c2c2966b84c22034924a7a143815 | [] | no_license | x-nm/R | 7ce75d351c71d125cd3d27145d42c449902d8347 | 7ac7a6381422301195bd0235a83bc2320c4efee1 | refs/heads/master | 2021-01-13T14:19:41.633295 | 2016-11-04T03:33:18 | 2016-11-04T03:33:18 | 72,810,872 | 1 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 1,493 | r | deseq2.R | # 2016.05.19 by xnm
# prepare independent sample files
# With tactiful R skills, theses steps can be negleted...= =
setwd("D:\\Files\\Study\\½»´ó\\¿ÎÌâ\\16.1.13\\extract_data_from_raw\\data")
# CountData <- read.table("raw_count_aorta_sample.txt",
# header=T,
# row.names = 1)
# colData <- read.table("aorta_sample.txt",header = T,row.names = 1)
#but the order is not the same,.. so I try to bind 4 count files
r_1 <-read.table("../extracted_raw_data/raw_count_JW.txt",
header = T,
row.names = 1)
r_2 <-read.table("../extracted_raw_data/raw_count_NZW.txt",
header = T,
row.names = 1)
r_3 <-read.table("../extracted_raw_data/raw_count_NZW_HC.txt",
header = T,
row.names = 1)
r_4 <-read.table("../extracted_raw_data/raw_count_WHHL.txt",
header = T,
row.names = 1)
JW_WHHL <-cbind(r_1,r_4)
NZW_HC <- cbind(r_2,r_3)
write.table(JW_WHHL,"aorta_JW_WHHL_raw_count",quote = F,sep = "\t")
write.table(NZW_HC,"aorta_NZW_HC_raw_count",quote = F,sep = "\t")
WHHL_HC <- cbind(r_3,r_4)
write.table(WHHL_HC,"aorta_WHHL_HC_raw_count",quote = F,sep = "\t")
JW_NZW <-cbind(r_1, r_2)
write.table(JW_NZW,"aorta_JW_NZW_raw_count",quote = F,sep = "\t")
# dds <- DESeqDataSetFromMatrix(countData = countData_bind,
# colData = colData,
# design = ~ condition)
|
6256ad82c443764f47ab35a85a40660cdc85b7e5 | d4ded9c7242ddad1a769f86a3072398ed62bb5ff | /003-model-output/003-seizure-rct-code.R | 81a22c7bb3256554ab5f509701b9af0633f962a6 | [
"CC-BY-4.0"
] | permissive | niebert/knitr-tutorial | a1a6fe5a37f348871041460afc352b4b763d19bf | dddb8e8737aaad749eed598eb9ab6e825370d230 | refs/heads/master | 2020-05-18T11:21:31.442593 | 2018-08-09T15:40:33 | 2018-08-09T15:40:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,736 | r | 003-seizure-rct-code.R | #####################################
# R Source code for seizure data
# Created: May 27, 2015
# Updated:
# NOTES:
#####################################
## ---- import-data ----
data("epil")
DT <- epil %>% as.data.table
DT.base <- DT %>% distinct(subject)
DT.base[,`:=`(period=0,y=base)]
DT.epil <- rbind(DT,DT.base)
setkey(DT.epil,subject, period)
DT.epil[,`:=`(post=as.numeric(period>0), tj=ifelse(period==0,8,2))]
## ---- fit-models ----
fit.glm <- glm(y ~ trt*post,
data = DT.epil, family = "poisson", offset = log(tj))
fit.glmm <- lme4::glmer(y ~ trt*post + (1|subject),
data = DT.epil, family = "poisson", offset = log(tj))
fit.gee.ind <- geepack::geeglm(y ~ trt*post, id = subject, offset = log(tj),
data = DT.epil, family = "poisson", corstr = "independence")
fit.gee.ex <- geepack::geeglm(y ~ trt*post, id = subject, offset = log(tj),
data = DT.epil, family = "poisson", corstr = "exchangeable")
fit.gee.unst <- geepack::geeglm(y ~ trt*post, id = subject, offset = log(tj),
data = DT.epil, family = "poisson", corstr = "unstructured")
fit.gee.ar1 <- geepack::geeglm(y ~ trt*post, id = subject, offset = log(tj),
data = DT.epil, family = "poisson", corstr = "ar1")
texreg::texreg(list(fit.glm,fit.glmm,fit.gee.ind,fit.gee.ex,fit.gee.unst,fit.gee.ar1),
custom.model.names = c("GLM","GLMM","GEE Ind","GEE Ex.","GEE Unst.","GEE AR1"),
custom.coef.names = c("$\\beta_0$", "$\\beta_1$","$\\beta_2$","$\\beta_3$"),
fontsize = "scriptsize", caption = "Comparing model estimates", single.row = T,
bold = 0.05, float.pos = 'H')
|
960f5e2a2cb42adb2600aa6cd84cae4981a6536e | 628ca710b26797ed36bb86ed21d7e3f40c41a599 | /project_01_new_product_recommendation/01_product_recommendation.R | 8a32c4ef35ca5a35ccf7b0f69ee413217cd0edf4 | [] | no_license | dexters-workshop/ds4b_101_bus_analysis_with_R | ecbc23d5777518d1fe6da9b6083de61a9a54cfc1 | 1ebe7e178b95664834d5103e641be831a3c97ebd | refs/heads/master | 2022-11-02T12:37:55.644870 | 2020-05-22T03:39:04 | 2020-05-22T03:39:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 853 | r | 01_product_recommendation.R | # DS4B 101-R ----
# PRODUCT RECOMMENDATION FUNCTIONS
# 1.0 LIBRARIES ----
library(tidyverse)
library(tidyquant)
library(parsnip)
library(plotly)
source("00_scripts/separate_bikes_and_outlier_detection.R")
bike_orderlines_tbl <- read_rds("00_data/bike_sales/data_wrangled/bike_orderlines.rds")
models_tbl <- read_rds("00_models/parsnip_models_tbl.rds")
# 2.0 BIKE FEATURES ----
get_bike_features <- function() {
}
get_bike_features()
plot_bike_features <- function(interactive = TRUE) {
# DATA MANIPULATION
# VISUALIZATION
}
plot_bike_features()
plot_bike_features(interactive = FALSE)
# 3.0 SAVE FUNCTIONS ----
function_names <- c("get_bike_features", "plot_bike_features")
dump(function_names, file = "00_scripts/plot_product_recommendation.R")
|
fa7bc30cdce0ed339c186152656cbb892a37def9 | 36ddd48ff64db62913b0fd1aaf20f7b0925610d2 | /man/correlate_mb.Rd | 25c37c1e0bb924f60497df55ee840b58f90ff95c | [
"BSD-2-Clause"
] | permissive | danlooo/coabundance | 0c1916e12441af5ed910efa53a25335fe2095462 | cef529a43e2ffd227e987ed3fc359797783c07f7 | refs/heads/main | 2023-02-13T06:14:58.882246 | 2021-01-12T09:40:13 | 2021-01-12T09:40:13 | 322,286,656 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 818 | rd | correlate_mb.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correlate.R
\name{correlate_mb}
\alias{correlate_mb}
\title{Coabundance analysis using mb as implemented in SpiecEasi}
\usage{
correlate_mb(
data,
pulsar.params = list(thresh = 0.05, subsample.ratio = 0.8, ncores =
getOption("mc.cores"), rep.num = 20, seed = 1337),
...
)
}
\arguments{
\item{data}{integer matrix of abundance count data. One sample per row and one taxon per column}
\item{pulsar.params}{list of options passed to \link[SpiecEasi]{pulsar.params}}
\item{...}{further options passed to \link[SpiecEasi]{spiec.easi}}
}
\description{
This is a wrapper arround function \code{SpiecEasi::spiec.easi} with argument \code{method = "mb"}.
}
\references{
\insertRef{spiec_easi}{coabundance}
\insertRef{mb}{coabundance}
}
|
75473aac15c0a91a16be4fe13731d1c9a97e4948 | 5e90fe6a6db2611e73d08dca0245dab7d4039667 | /man/resample_partition.Rd | b1f890d5cc0e8c594afdfba7403a6b7133d1ad3e | [] | no_license | crazybilly/modelr | 4d5a719d4e0090c3ea9843eb6b820e291544907c | 71502aeeb74b7deaab3c2b033e12cdcacb8aa3d3 | refs/heads/master | 2021-01-21T00:02:19.878566 | 2016-09-09T13:25:53 | 2016-09-09T13:25:53 | 67,800,592 | 0 | 0 | null | 2016-09-09T13:19:44 | 2016-09-09T13:19:43 | null | UTF-8 | R | false | true | 650 | rd | resample_partition.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/partition.R
\name{resample_partition}
\alias{resample_partition}
\title{Generate an exclusive partitioning of a data frame}
\usage{
resample_partition(data, p)
}
\arguments{
\item{data}{A data frame}
\item{p}{A named numeric vector giving where the value is the probability
that an observation will be assigned to that group.}
}
\description{
Generate an exclusive partitioning of a data frame
}
\examples{
ex <- resample_partition(mtcars, c(test = 0.3, train = 0.7))
mod <- lm(mpg ~ wt, data = ex$train)
rmse(mod, ex$test)
rmse(mod, ex$train)
}
|
ded5d80ffd74e249cfdfcca9170b8e00e3fdfecb | d922758e6c9ac51cdbcfe25ff26a114d1635250c | /man/make.DemandPoints.Rd | 66ecd78d5cffa15329827e6717921a38f9cd605e | [] | no_license | jeffreyhanson/raptr | c7fa50617b080a72f8fe97c9534a664cc02d91b9 | 059a1abc9c2e2f071ce2a7d3596bfd1189441d92 | refs/heads/master | 2023-08-31T22:13:12.446845 | 2023-03-14T03:26:50 | 2023-03-14T03:26:50 | 44,244,406 | 5 | 0 | null | 2023-08-21T23:56:38 | 2015-10-14T11:51:44 | R | UTF-8 | R | false | true | 2,977 | rd | make.DemandPoints.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DemandPoints.R
\name{make.DemandPoints}
\alias{make.DemandPoints}
\title{Generate demand points for RAP}
\usage{
make.DemandPoints(
points,
n = 100L,
quantile = 0.5,
kernel.method = c("ks", "hypervolume")[1],
...
)
}
\arguments{
\item{points}{\code{\link[base:matrix]{base::matrix()}} object containing points.}
\item{n}{\code{integer} number of demand points to use for each attribute
space for each species. Defaults to \code{100L}.}
\item{quantile}{\code{numeric} quantile to generate demand points within. If
0 then demand points are generated across the full range of values the
\code{points} intersect. Defaults to \code{0.5}.}
\item{kernel.method}{\code{character} name of kernel method to use to
generate demand points. Defaults to \code{'ks'}.}
\item{...}{arguments passed to kernel density estimating functions}
}
\value{
A new \code{\link[=DemandPoints]{DemandPoints()}} object.
}
\description{
This function generates demand points to characterize a distribution of
points.
}
\details{
Broadly speaking, demand points are generated by fitting a kernal
to the input \code{points}. A shape is then fit to the extent of
the kernal, and then points are randomly generated inside the shape. The
demand points are generated as random points inside the shape. The weights
for each demand point are calculated the estimated density of input points
at the demand point. By supplying 'ks' as an argument to \code{method} in
\code{kernel.method}, the shape is defined using a minimum convex polygon
\code{\link[adehabitatHR:mcp]{adehabitatHR::mcp()}} and \code{\link[ks:kde]{ks::kde()}} is used to fit
the kernel. Note this can only be used when the data is low-dimensional (d
< 3). By supplying \code{"hypervolume"} as an argument to \code{method},
the \code{\link[hypervolume:hypervolume]{hypervolume::hypervolume()}} function is used to create the
demand points. This method can be used for hyper-dimensional data
(\eqn{d << 3}).
}
\examples{
\dontrun{
# set random number generator seed
set.seed(500)
# load data
cs_spp <- terra::rast(
system.file("extdata", "cs_spp.tif", package = "raptr")
)
cs_space <- terra::rast(
system.file("extdata", "cs_space.tif", package = "raptr")
)
# generate species points
species.points <- randomPoints(cs_spp[[1]], n = 100, prob = TRUE)
env.points <- as.matrix(terra::extract(cs_space, species.points))
# generate demand points for a 1d space using ks
dps1 <- make.DemandPoints(points = env.points[, 1], kernel.method = "ks")
# print object
print(dps1)
# generate demand points for a 2d space using hypervolume
dps2 <- make.DemandPoints(
points = env.points,
kernel.method = "hypervolume",
samples.per.point = 50,
verbose = FALSE
)
# print object
print(dps2)
}
}
\seealso{
\code{\link[hypervolume:hypervolume]{hypervolume::hypervolume()}}, \code{\link[ks:kde]{ks::kde()}},
\code{\link[adehabitatHR:mcp]{adehabitatHR::mcp()}}.
}
|
2f76f9e334cf302c8f81e1c6d0cb9082892badb1 | 384c3dbc571be91c6f743d1427dec00f13e0d8ae | /r/kernels/mkgiitr-titanic-survival-prediction/script/titanic-survival-prediction.r | fff25887dae04c84d3706f10e157d4b2b1fbc3c8 | [] | no_license | helenaK/trustworthy-titanic | b9acdd8ca94f2fa3f7eb965596eed4a62821b21e | ade0e487820cf38974561da2403ebe0da9de8bc6 | refs/heads/master | 2022-12-09T20:56:30.700809 | 2020-09-10T14:22:24 | 2020-09-10T14:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,168 | r | titanic-survival-prediction.r |
## Importing packages
# This R environment comes with all of CRAN and many other helpful packages preinstalled.
# You can see which packages are installed by checking out the kaggle/rstats docker image:
# https://github.com/kaggle/docker-rstats
library(tidyverse) # metapackage with lots of helpful functions
## Running code
# In a notebook, you can run a single code cell by clicking in the cell and then hitting
# the blue arrow to the left, or by clicking in the cell and pressing Shift+Enter. In a script,
# you can run code by highlighting the code you want to run and then clicking the blue arrow
# at the bottom of this window.
## Reading in files
# You can access files from datasets you've added to this kernel in the "../input/" directory.
# You can see the files added to this kernel by running the code below.
list.files(path = "../input")
## Saving data
# If you save any files or images, these will be put in the "output" directory. You
# can see the output directory by committing and running your kernel (using the
# Commit & Run button) and then checking out the compiled version of your kernel.
#Loading library
library(tidyverse)
library('ggplot2') # visualization
library('ggthemes') # visualization
library('scales') # visualization
library('dplyr') # data manipulation
library('mice') # imputation
library('randomForest') # classification algorithm
#Checking the files loaded into kernel
list.files(path = "../input")
#Checking the working directory
getwd()
#loading the data files
train <- read.csv("../input/train.csv",stringsAsFactors=F )
test <- read.csv("../input/test.csv",stringsAsFactors=F)
full <- bind_rows(train,test)
#Checking the structure of data
str(full)
#Create the user define function for descriptive analysis
var_summ <- function(x){
if(class(x)=="numeric"){
var_type=class(x)
n <- length(x)
n_miss <- sum(is.na(x))
mean <- mean(x,na.rm=T)
std <- sd(x,na.rm=T)
min <- min(x,na.rm=T)
max <- max(x,na.rm=T)
return(c(var_type=var_type,n=n,n_miss=n_miss,mean=mean,std=std,min=min,max=max))
}
else{
var_type=class(x)
n = length(x)
n_miss = sum(is.na(x))
return(c(n=n,n_miss=n_miss))
}
}
#checking the variable is numeric or not.
num_var <- sapply(full,is.numeric)
#apply above defined function on the data
num_data <- t(data.frame(apply(full[num_var],2,var_summ)))
num_data
cat_data <- data.frame(apply(full[!num_var],2,var_summ))
cat_data
#playing with name variable-extracting title of name
full$Title <- gsub('(.*, )|(\\..*)', '', full$Name)
#Checking title with gender
table(full$Sex,full$Title)
#combined the rare title into group
rare_title <- c("Capt","Col","Don","Dona","Dr","Jonkheer","Lady","Major","Mlle","Mme","Ms","Rev","Sir","the Countess")
full$Title[full$Title %in% rare_title] <- "rare_title"
#Show title count by sex.
table(full$Sex,full$Title)
# Finally, grab surname from passenger name
full$Surname <- sapply(full$Name,
function(x) strsplit(x, split = '[,.]')[[1]][1])
#family size calculation
full$Fsize <- full$Parch+full$SibSp+1
#create a family variable
full$Family <- paste(full$Surname, full$Fsize, sep='_')
ggplot(full[1:891,], aes(x = Fsize, fill = factor(Survived))) +
geom_bar(stat='count', position='dodge') +
scale_x_continuous(breaks=c(1:11)) +
labs(x = 'Family Size') +
theme_few()
#grouping family by its size
full$FsizeD[full$Fsize==1] <- "Singleton"
full$FsizeD[full$Fsize<5 & full$Fsize>1] <- "Small"
full$FsizeD[full$Fsize>4] <- "Large"
#Checking survival by familysize
table(full$Survive,full$FsizeD)
# Create a Deck Variable
full$Cabin[1:28]
strsplit(full$Cabin[2],NULL)[[1]][1]
full$Deck <- factor(sapply(full$Cabin,function(x) strsplit(x,NULL)[[1]][1]))
table(full$Deck)
#Missing Value Treatment
unique(full$Embarked)
table(full$Embarked)
full[c(62,830),]
#removing missing value
embarked_value <- full %>% filter(PassengerId !=62 & PassengerId !=830)
# Use ggplot2 to visualize embarkment, passenger class, & median fare
ggplot(embarked_value,aes(x=Embarked,y=Fare, fill=factor(Pclass)))+
geom_boxplot()+
geom_hline(aes(yintercept=80), colour='red', linetype='dashed', lwd=2)+
scale_y_continuous(labels=dollar_format())+
theme_few()
# Since their fare was $80 for 1st class, they most likely embarked from 'C'
full$Embarked[c(62,830)] <- 'C'
#Fare missing Value Treatmet
full[is.na(full$Fare),]
full[1044,]
#cheking fare distribution for class=3 & embarked=S
ggplot(full[full$Pclass==3 & full$Embarked=='S',],aes(x=Fare))+
geom_density(fill='#99d6ff',alpha=0.4)+
geom_vline(aes(xintercept=median(Fare,na.rm=T)),colour='red',linetype='dashed',lwd=1)+
scale_x_continuous(labels=dollar_format())+
theme_few()
# Replace missing fare value with median fare for class/embarkment
full$Fare[1044] <- median(full[full$Pclass==3 & full$Embarked=='S',]$Fare,na.rm=T)
#Age missing value treatment with predictive imputation
#no of missing age value
sum(is.na(full$Age))
#we are going to use MICE imputation
factor_vars <- c('PassengerId','Pclass','Sex','Embarked','Title','Surname','Family','FsizeD')
full[factor_vars] <- lapply(full[factor_vars],function(x) as.factor(x))
# Set a random seed
set.seed(129)
# Perform mice imputation, excluding certain less-than-useful variables:
mice_mod <- mice(full[, !names(full) %in% c('PassengerId','Name','Ticket','Cabin','Family','Surname','Survived')], method='rf')
#save the complete output
mice_output <- complete(mice_mod)
# Plot age distributions
par(mfrow=c(1,2))
hist(full$Age, freq=F, main='Age: Original Data',
col='darkgreen', ylim=c(0,0.04))
hist(mice_output$Age, freq=F, main='Age: MICE Output',
col='lightgreen', ylim=c(0,0.04))
#now replace the age to mice age
full$Age <- mice_output$Age
sum(is.na(full$Age))
# Create the column child, and indicate whether child or adult
full$Child[full$Age < 18] <- 'Child'
full$Child[full$Age >= 18] <- 'Adult'
# Show counts
table(full$Child, full$Survived)
# Adding Mother variable
full$Mother <- 'Not Mother'
full$Mother[full$Sex == 'female' & full$Parch > 0 & full$Age > 18 & full$Title != 'Miss'] <- 'Mother'
# Show counts
table(full$Mother, full$Survived)
# Finish by factorizing our two new factor variables
full$Child <- factor(full$Child)
full$Mother <- factor(full$Mother)
#Now all the variable is treated & going for the prediction using randomforest
# Split the data back into a train set and a test set
train <- full[1:891,]
test <- full[892:1309,]
#Building the Model
#set seed
set.seed(101)
#build the model using random forest
rf_model <- randomForest(factor(Survived)~Pclass+Sex+Age+SibSp+Parch+Fare+Embarked+Title+FsizeD+Child+Mother,data=train)
rf_model
#Giving 16.61% Error which we can accept right now
#Applyin the model to test data set to predict
prediction <- predict(rf_model,test)
#Save the prediction into data frame
solution <- data.frame(PassengerID=test$PassengerId,Survived=prediction)
table(solution$Survived)
#Write solution to a csv file
write.csv(solution,'rf_model_solution.csv',row.names=F)
|
a4c2a38b295a317d33a5370004f0956cf01c1f98 | e85887c76341d45a3829fc552d6c53536f32b719 | /R/trialr-package.R | 8bb78e423ba39f3185ac11a09012ed2d15cdc701 | [] | no_license | brockk/trialr | fa8fd43ca43dc79911677ba42c8e50d88a2fc03d | 15fd90d3a779a61454baedcd517e2ce8bb301f92 | refs/heads/master | 2023-03-16T21:41:49.580277 | 2023-03-11T07:36:58 | 2023-03-11T07:36:58 | 69,753,350 | 39 | 13 | null | 2023-09-10T19:11:18 | 2016-10-01T17:39:28 | TeX | UTF-8 | R | false | false | 535 | r | trialr-package.R | #' The 'trialr' package.
#'
#' @description
#' trialr collects in one place Bayesian clinical trial designs and methods.
#' Models are implemented in Stan and helper functions are provided in R.
#'
#' @docType package
#' @name trialr-package
#' @aliases trialr
#' @useDynLib trialr, .registration = TRUE
#' @import methods
#' @import Rcpp
#' @import rstantools
#' @importFrom rstan sampling
#'
#' @references
#' Stan Development Team (2018).
#' RStan: the R interface to Stan. R package version 2.18.2.
#' https://mc-stan.org/
#'
NULL
|
c0b3b069639fedf7d8f4baef49dd04371a1a29e0 | a8c7bc56108f6dc3261d49bb361e93dc60ef5e9f | /plot1.R | a29f6fd285167f8a70b3b483f11e0891d672e4ac | [] | no_license | Zoeyihui/ExData_Plotting1 | d9dd2372b7832f65a909250015938daecb3adb85 | 02e56ef8f3602a83f44c88cb22396966d639882a | refs/heads/master | 2021-01-23T23:14:06.406865 | 2015-03-12T08:17:27 | 2015-03-12T08:17:27 | 31,900,574 | 0 | 0 | null | 2015-03-12T08:17:27 | 2015-03-09T13:45:35 | null | UTF-8 | R | false | false | 778 | r | plot1.R | setwd("E:/Data/exploratory analysis/exdata-data-household_power_consumption/ExData_Plotting1")
housepowercon<- read.table("household_power_consumption.txt",header=TRUE,sep=";")
date<-as.Date(housepowercon$Date,"%d/%m/%Y")
date1<- housepowercon[mapply(identical,"2007-02-01",as.character(date)),]
date2<- housepowercon[mapply(identical,"2007-02-02",as.character(date)),]
data<-rbind(date1,date2)
#save the data will be used later
write.csv(data,file="E:/Data/exploratory analysis/exdata-data-household_power_consumption/ExData_Plotting1/twoday_data.csv",row.names=F,quote=F)
png(file="plot1.png", width = 480, height = 480, units = "px")
hist(as.numeric(as.character(data$Global_active_power)),col="red",main="Global Active Power",xlab="Global Active Power(kilowatts)")
dev.off() |
250f5aae5ee3a905ff4786b12eeaf803b2e00d68 | 2734f403602b049277a786816a33fc82901f8388 | /회귀분석 실습 2.R | dbe007e953b083edd756616d247c8703435438ea | [] | no_license | youjin2github/-_R | b9799eb7b1e7f0ecdebb1b33ec8f50b22f3ec00e | 7fd1775ca3e3fc5510f003177bb9e85249f135fd | refs/heads/main | 2023-08-01T06:48:50.924295 | 2021-08-30T04:31:29 | 2021-08-30T04:31:29 | 401,216,069 | 1 | 0 | null | null | null | null | UHC | R | false | false | 2,234 | r | 회귀분석 실습 2.R | # 중회귀분석 연습
mtcars
# 분석 목표: 배기량(disp), 마력(hp), 무게(wt)가 연비(mpg)에
# 미치는 영향 분석
cars <- mtcars[,c("mpg","disp","hp","wt")]
# 그림
pairs(cars)
plot(cars)
# 배기량 vs. 연비
plot(cars$disp,cars$mpg)
lines(lowess(cars$mpg~cars$disp,f=0.8))
abline(lm(cars$mpg~cars$disp))
plot(cars$disp,cars$mpg)
lines(smooth.spline(cars$disp,cars$mpg,spar=0.9))
abline(lm(cars$mpg~cars$disp))
# 마력 vs. 연비
plot(cars$hp,cars$mpg)
lines(lowess(cars$mpg~cars$hp,f=0.8))
abline(lm(cars$mpg~cars$hp))
# 무게 vs. 연비
plot(cars$wt,cars$mpg)
lines(lowess(cars$mpg~cars$wt,f=0.8))
abline(lm(cars$mpg~cars$wt))
# 비선형 관계를 위한 파생변수
cars$disp2 <- cars$disp^2
cars$hp2 <- cars$hp^2
cars$wt2 <- cars$wt^2
# correlation
cor(cars)
cor.test(cars$mpg,cars$hp)
# 중회귀분석
lm1 <- lm(mpg~disp+hp+wt,data=cars)
summary(lm1)
lm2 <- lm(mpg~hp+wt,data=cars)
summary(lm2)
# 잔차검증
pred <- lm2$fitted.values
resid <- lm2$residuals
pred <- predict(lm2)
resid <- residuals(lm2)
# 잔차그림
plot(pred,resid)
abline(h=0)
lines(lowess(resid~pred))
plot(cars$disp,resid)
abline(h=0)
lines(lowess(resid~cars$disp,f=0.8))
plot(cars$hp,resid)
abline(h=0)
lines(lowess(resid~cars$hp,f=0.9))
plot(cars$wt,resid)
abline(h=0)
lines(lowess(resid~cars$wt,f=0.9))
# 비선형을 고려한 파생변수를 포함한 모형
lm3 <- lm(mpg~disp+disp2+hp+hp2+wt+wt2,data=cars)
summary(lm3)
lm4 <- lm(mpg~hp+hp2+wt+wt2,data=cars)
summary(lm4)
# 잔차검증
pred <- predict(lm4 )
resid <- residuals(lm4 )
# 잔차그림
plot(pred,resid)
abline(h=0)
lines(lowess(resid~pred,f=0.9))
plot(cars$disp,resid)
abline(h=0)
lines(lowess(resid~cars$disp,f=0.8))
plot(cars$hp,resid)
abline(h=0)
lines(lowess(resid~cars$hp,f=0.9))
plot(cars$wt,resid)
abline(h=0)
lines(lowess(resid~cars$wt,f=0.9))
# 정규성 검정
hist(resid)
library(moments)
skewness(resid)
agostino.test(resid)
kurtosis(resid)
anscombe.test(resid)
cars
qq<-qqnorm(resid)
qqline(resid)
identify(qq$x,qq$y,labels=names(qq$y))
shapiro.test(resid)
|
b73f604b6f68552197613d0f7652dd5c2178bb74 | ae79c3753f00b467c4bb0097f17033b84afffe7f | /man/module_water_batch_unlimited_water_supply_xml.Rd | c5d8c7978e68e69d85b42648a8ce12e621114e58 | [
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | JGCRI/gcamdata | c3fe3584984ca9115c498bb54cb5afabdc4eaa24 | 17f8d84d4cd08aed2418872f181544fb52cf81dc | refs/heads/main | 2023-06-07T19:44:37.473767 | 2023-03-15T14:39:09 | 2023-03-15T14:39:09 | 72,801,482 | 44 | 29 | NOASSERTION | 2023-05-31T22:52:33 | 2016-11-04T01:09:57 | R | UTF-8 | R | false | true | 853 | rd | module_water_batch_unlimited_water_supply_xml.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zchunk_batch_unlimited_water_supply_xml.R
\name{module_water_batch_unlimited_water_supply_xml}
\alias{module_water_batch_unlimited_water_supply_xml}
\title{module_water_batch_unlimited_water_supply_xml}
\usage{
module_water_batch_unlimited_water_supply_xml(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{unlimited_water_supply.xml}. The corresponding file in the
original data system was \code{batch_unlimited_water_supply.xml.R} (water XML).
}
\description{
Construct XML data structure for \code{unlimited_water_supply.xml}.
}
|
67eeccd7f865bdda0238a303be20f3f3b5162b0a | 4ae7e10a0a6bb332d65db0ae3a57de3593c7d95b | /Stat/cw2/2.R | 43153d47c2bac61d5b962aab8d3171f154a2d201 | [] | no_license | Doxxer/SPbAU-Spring-2014 | 1c591aa01059f6ca18e6ce5207ac8b4709ada9db | 70ace3e545d3abef1df717e1ab3b003df1ac8973 | refs/heads/master | 2020-04-15T21:42:54.597776 | 2014-06-20T16:29:10 | 2014-06-20T16:29:10 | 16,694,268 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 244 | r | 2.R | # Строим доверительный интервал для доли, normal approximation interval
# ответ (0.2747415, 0.5824014)
n = 28
alpha = 0.1
w = 12/28
ans = w + qnorm(c(alpha/2, 1-alpha/2)) * sqrt(w*(1-w))/sqrt(n)
print(ans) |
212acdaaaafa8303965ccfc5b83b3f3914715484 | 3366ac6055037e16738938a277a30377050fc876 | /scripts/prepareData.R | a92e2600e6448e5af7f3e950df7488dee284a340 | [] | no_license | ilsley/Ciona16 | 6721996f07731e0d6811518fff536cf3c4b1f25c | 0e6f0139aec59ac4ee777ebf332598fdbfa6ec31 | refs/heads/master | 2018-10-31T17:44:27.659770 | 2017-10-04T00:38:29 | 2017-10-04T00:38:29 | 105,616,311 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,430 | r | prepareData.R | library(SingleCellExperiment)
hiSeqCountData = read.csv("data-raw/hiSeqCountData.csv", row.names = 1, header = TRUE)
miSeqCountData = read.csv("data-raw/miSeqCountData.csv", row.names = 1, header = TRUE)
cellInfo = read.csv("data-raw/phenoData.csv", row.names = 1, header= TRUE,colClasses = "factor")
cellInfo = cellInfo[order(cellInfo$Embryo,cellInfo$CellType),]
sampleNames <- rownames(cellInfo)
geneNames <- read.csv("data-raw/cionaGeneNames.csv",header=FALSE)
colnames(geneNames) <- c("GeneID","GeneName")
geneNames <- geneNames[!is.na(geneNames$GeneID),]
rownames(geneNames)=geneNames$GeneID
ciona16_sce <- SingleCellExperiment(assays=SimpleList(counts=as.matrix(hiSeqCountData[,sampleNames])),colData=cellInfo)
ciona16MiSeq_sce <- SingleCellExperiment(assays=SimpleList(counts=as.matrix(miSeqCountData[,sampleNames])),colData=cellInfo)
addDetails <- function(sce) {
isSpike(sce, "ERCC") <- grepl("^ERCC-", rownames(sce))
sizeFactors(sce) <- colSums(assay(sce))
normcounts(sce) <- sweep(counts(sce),MARGIN=2,STATS = sizeFactors(sce),FUN = "/")
assay(sce, i="phicounts") <- 2*asin(sqrt(normcounts(sce)))
rowData=merge(data.frame("GeneID"=rownames(sce)),geneNames,by="GeneID",all.x=TRUE)
rowData(sce)=rowData
sce
}
ciona16_sce <- addDetails(ciona16_sce)
ciona16MiSeq_sce <- addDetails(ciona16MiSeq_sce)
save(ciona16_sce,file="data/ciona16_sce.rda")
save(ciona16MiSeq_sce,file="data/ciona16MiSeq_sce.rda")
|
4b3ef250c7d4447473cbec7b4ce47f5d7198f687 | eefcd8a80f3cebe5fc36fff145bea7c985d01d52 | /paper_expansion_test.R | 1a3e2ff809754d911e29285d05b53f56cbfe7f82 | [] | no_license | bbbales2/ising | 0202f3ecf9cdfe0152c42ecf45f8814afd92e4b7 | be7b24cf13b84db369f82676402ef96460bc276a | refs/heads/master | 2021-01-01T17:08:08.930806 | 2018-05-16T23:32:37 | 2018-05-16T23:32:37 | 98,006,458 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,464 | r | paper_expansion_test.R | library(MASS)
source("casm_helper.R")
source("ising_helpers.R")
require(Rcpp)
library(argparse)
library(reshape2)
library(tidyverse)
library(stats)
library(parallel)
library(gtools)
library(ggplot2)
library(rstan)
path = "/home/bbales2/casm/invent1"
N = 30
ecis = rep(0, length(getECIs(path)))
nonZero = c(3, 4, 5, 6, 7, 14)
keep = c(3, 4, 5, 6, 7, 14)
ts = c(2.0)
ecis[nonZero[1]] = rnorm(1, 0.30, 0.1)
ecis[nonZero[2:length(nonZero)]] = rnorm(length(nonZero) - 1, 0.1, 0.25)
mus = seq(-4, 4, 0.5)
makeECIs = function() {
ecis[keep] = rnorm(length(keep), 0.1, 0.25)
ecis[-keep] = 0
ecis
}
smu = function(corrs, Tfrac) {
NN = N * N
res = corrs %>% select(starts_with("corr")) %>% as.matrix
jac = -cov(res, res) * NN / Tfrac
rownames(jac) = colnames(res)
colnames(jac) = colnames(res)
f = colMeans(res)
names(f) = colnames(res)
out = list(g = t(res), Eg = f, Egrad = jac)
out
}
runSimulation = function(path, g, mu = NULL) {
setSupercell(path, N)
setECIs(path, g)
if(!is.null(mu)) {
setChemicalPotentials(path, mu, mu, 0)
} else {
setChemicalPotentials(path, -4, 4, 0.5)
}
runMC(path)
corrs = getCorrs(path)
Tfrac = getTemperatureFraction(path)
corrs %>%
group_by(mu) %>%
filter(mci > 500) %>%
do(out = smu(., Tfrac)) %>% pull(out)
}
# Stolen from https://gist.github.com/doobwa/941125
log_sum_exp = function(x) {
offset = max(x)
log(sum(exp(x - offset))) + offset
}
#lambda = 0.0005
GgradG2 = function(path, g, data, mu = NULL) {
a = runSimulation(path, g, mu)
Tfrac = getTemperatureFraction(path)
out = list()
if(!is.null(mu)) {
idxs = which(mus == mu)
} else {
idxs = 1:length(a)
}
for(i in idxs) {
grad = rep(0, length(keep))
grad2 = rep(0, length(keep))
if(!is.null(mu)) {
ai = 1
} else {
ai = i
}
out[[ai]] = list()
out[[ai]]$lp = (-data[[i]]$Eg[keep] %*% g[keep] / Tfrac - log_sum_exp(-g[keep] %*% a[[ai]]$g[keep,] / Tfrac))[1, 1]
for(j in 1:length(keep)) {
k = keep[j]
if(k == 14) {
grad[[j]] = -3 * (data[[i]]$Eg[3] * data[[i]]$Eg[2] - a[[ai]]$Eg[3] * a[[ai]]$Eg[2]) / Tfrac
grad2[[j]] = (-1 / (Tfrac^2)) * var(3 * a[[ai]]$g[3,] * a[[ai]]$g[2,]) * N * N
} else if(k == 15) {
grad[[j]] = -(data[[i]]$Eg[4] * data[[i]]$Eg[2] - a[[ai]]$Eg[4] * a[[ai]]$Eg[2]) / Tfrac -
2 * (data[[i]]$Eg[3] * data[[i]]$Eg[2] - a[[ai]]$Eg[3] * a[[ai]]$Eg[2]) / Tfrac
grad2[[j]] = (-1 / (Tfrac^2)) * var(2 * a[[ai]]$g[3,] * a[[ai]]$g[2,] + a[[ai]]$g[4,] * a[[ai]]$g[2,]) * N * N
} else if(k == 16) {
grad[[j]] = -3 * (data[[i]]$Eg[4] * data[[i]]$Eg[2] - a[[ai]]$Eg[4] * a[[ai]]$Eg[2]) / Tfrac
grad2[[j]] = (-1 / (Tfrac^2)) * var(3 * a[[ai]]$g[4,] * a[[ai]]$g[2,]) * N * N
} else if(k == 17) {
grad[[j]] = -(data[[i]]$Eg[3] * data[[i]]$Eg[2] - a[[ai]]$Eg[3] * a[[ai]]$Eg[2]) / Tfrac -
(data[[i]]$Eg[4] * data[[i]]$Eg[2] - a[[ai]]$Eg[4] * a[[ai]]$Eg[2]) / Tfrac -
(data[[i]]$Eg[5] * data[[i]]$Eg[2] - a[[ai]]$Eg[5] * a[[ai]]$Eg[2]) / Tfrac
grad2[[j]] = (-1 / (Tfrac^2)) * var(a[[ai]]$g[3,] * a[[ai]]$g[2,] +
a[[ai]]$g[4,] * a[[ai]]$g[2,] +
a[[ai]]$g[5,] * a[[ai]]$g[2,]) * N * N
} else if(k == 18) {
grad[[j]] = -3 * (data[[i]]$Eg[5] * data[[i]]$Eg[2] - a[[ai]]$Eg[5] * a[[ai]]$Eg[2]) / Tfrac
grad2[[j]] = (-1 / (Tfrac^2)) * var(3 * a[[ai]]$g[5,] * a[[ai]]$g[2,]) * N * N
} else {
grad[[j]] = -(data[[i]]$Eg[k] - a[[ai]]$Eg[k]) / Tfrac
grad2[[j]] = (-1 / (Tfrac^2)) * var(a[[ai]]$g[k,]) * N * N
}
#if(k > 10) {
# grad[[j]] = grad[[j]] + ifelse(g[k] > 0.0, -lambda, lambda)
#}
}
out[[ai]]$Eg = a[[ai]]$Eg
out[[ai]]$Egrad = rep(0, length(g))
out[[ai]]$Egrad2 = rep(0, length(g))
out[[ai]]$Egrad[keep] = grad
out[[ai]]$Egrad2[keep] = grad2
}
out
}
GgradG = function(path, g, mu = NULL) {
out = list(lp = 0, Egrad = rep(0, length(g)), Egrad2 = rep(0, length(g)))
for(i in 1:length(ts)) {
setTemperatureFraction(path, ts[i])
a = GgradG2(path, g, data[[i]], mu)
out$lp = out$lp + Reduce(`+`, map(a, ~ .$lp))
out$Egrad = out$Egrad + Reduce(`+`, map(a, ~ .$Egrad))
out$Egrad2 = out$Egrad2 + Reduce(`+`, map(a, ~ .$Egrad2))
}
out
}
setSupercell(path, N)
setECIs(path, ecis)
ts = seq(0.5, 5, length = 15)
Sys.time()
data = list()
for(i in 1:length(ts)) {
setTemperatureFraction(path, ts[i])
data[[i]] = runSimulation(path, ecis)
}
Sys.time()
map(data, ~ .[[8]]$Eg[c(2, 3, 14)]) %>%
do.call(rbind, .) %>% as.tibble %>%
mutate(corr1_2 = corr1 * corr2,
corr1_1_1 = corr1 * corr1 * corr1) %>%
#select(corr1_2, capprox, corr1_1_1, corr13) %>%
mutate(t = ts) %>%
gather(which, value, -t) %>%
ggplot(aes(t, value)) +
geom_point(aes(shape = which, colour = which), alpha = 0.75)
map(1:length(data), function(j) {
map(1:length(mus), ~ data[[j]][[.]]$Eg[c(2, 3, 14)]) %>%
do.call(rbind, .) %>%
as.tibble %>%
mutate(mu = mus,
t = ts[j])
}) %>%
bind_rows %>%
mutate(corr1_2 = corr1 * corr2,
corr1_1_1 = corr1 * corr1 * corr1) %>%
gather(which, value, -mu, -t) %>%
ggplot(aes(mu, value)) +
geom_point(aes(shape = which, colour = which), alpha = 0.75) +
facet_wrap( ~ t)
|
2dcf7a2400cca9fdb430b93c87733b69f1bb0ff1 | 2a274b4036db38176ad4bcd7eada506f5280000a | /15-imp-results-convert.R | 18175ddd110bdbfebad41ae25ae36a066e4780ce | [] | no_license | philliphungerford/ndarc-point-fentanyl | d5e1f999c0e831ea10b66bd26ddd6408b54382b6 | 2bc8f0e3f2ac4100763506e62c4dde3eefdbe19a | refs/heads/master | 2022-11-28T17:52:37.828219 | 2020-08-04T01:28:39 | 2020-08-04T01:28:39 | 281,807,763 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,366 | r | 15-imp-results-convert.R | ################################################################################
# PROGRAM: 12-side-effect-table.R
# PURPOSE: Convert the bivariate results to table
# WRITTEN BY: Phillip Hungerford
# DATE: 07/07/2020
##############################################################################
# Setup
library(dplyr)
library(data.table) # for setDT & rbindlist
source("functions/rowname_convert.R")
##############################################################################
# Create function to convert
results_convert <- function(variable = "overdose", data_dir){
#"""
## REQUIRES: rowname_converter function
#"""
#---------------------------------------------------------------------------
# 1. Load the results to clean
load(paste0(data_dir, ".RData"))
#---------------------------------------------------------------------------
# 2. need to turn the rownames into a column variable
results <- lapply(results, rowname_converter)
#---------------------------------------------------------------------------
# 3. convert the list of dfs to one big df
results_combined <- rbindlist(results)
#---------------------------------------------------------------------------
# 4. remove the unnecessary rows
# remove intercept values and time values
results_combined = results_combined[!(results_combined$rn == '(Intercept)'),]
results_combined = results_combined[!(results_combined$rn == 'time'),]
#---------------------------------------------------------------------------
# 5. create conf
results_combined$conf <- paste0(
format(round(results_combined$estimate,2),nsmall=2)," (",
format(round(results_combined$`2.5 %`,2),nsmall=2),"-",
format(round(results_combined$`97.5 %`,2),nsmall=2),")")
#---------------------------------------------------------------------------
# 6. format p value too
results_combined$p.value <- format(round(results_combined$p.value,2), nsmall=2)
#---------------------------------------------------------------------------
# 7. Rename to avoid confusion
names(results_combined)[names(results_combined) == "conf"] <- variable
names(results_combined)[names(results_combined) == "p.value"] <- paste0(variable, "_p")
#---------------------------------------------------------------------------
# 8. Extract independent variables, confidence intervals and pvalues
final <- results_combined[,c(1,9,6)]
#---------------------------------------------------------------------------
# 9. save output
write.csv(x = final, paste0(data_dir, '.csv'))
return(final)
}
##############################################################################
## RUN
se_results <- results_convert(variable = "side_effect", data_dir = "output/results/12-side-effects-imp") # nothing is sig
bv_results <- results_convert(variable = "bivariates", data_dir = "output/results/10-bivariate-imp") # nothing is sig
hm_results <- results_convert(variable = "harms", data_dir = "output/results/13-harms-imp") # nothing is sig
##############################################################################
################################# END ########################################
##############################################################################
|
e96f139c4e2cfce5bc8823d625c4c7b53c8d0302 | 37b7887265487badd542d592d1377db1712f39f3 | /man/write.table0.Rd | 54e002c0453a89d84158bb8db61ef40a770aa2b8 | [] | no_license | jkruppa/dataTools | 1d1f8a7b59ffdf529ff0fdbc48106f450b13ed11 | d00c41dbdd359e124d4de826ce7a05b4aa641e43 | refs/heads/master | 2020-09-22T20:07:41.356365 | 2017-11-29T12:31:20 | 2017-11-29T12:31:20 | 66,249,464 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 403 | rd | write.table0.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataIO.r
\name{write.table0}
\alias{write.table0}
\title{Small wrapper for write.table}
\usage{
write.table0(...)
}
\arguments{
\item{...}{}
}
\description{
Small wrapper for write.table
}
\details{
Small wrapper for write.table with the options col.names = FALSE, row.names = FALSE, quote = FALSE
}
\author{
Jochen Kruppa
}
|
46c8e0c31273ce463d8a42593af6b016efab395e | c03754bd79a6f9baf6bd55e74483bed1a2dbe8a7 | /R/superCell.R | 457cabde30d1da32072fd269e7f3f69587da74fd | [] | no_license | julienide/Atoms | 675ee51c5803bef9868838b1a229dc9d037bce9b | d805bd893a152bd4edc350a2fb18de0ed2d19a3c | refs/heads/master | 2021-04-27T06:57:27.989087 | 2018-06-19T09:09:53 | 2018-06-19T09:09:53 | 122,623,126 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,766 | r | superCell.R | #' superCell
#'
#'
#' @name superCell
#' @export
superCell <- function(x, ...)
UseMethod("superCell")
#' @rdname superCell
#' @export
superCell.Atoms <- function(x, aInds = 0L, bInds = 0L, cInds = 0L,
current = NULL){
if(is.null(current))
current <- current(x)
if(!is.integer(aInds))
stop("'aInds' must be an integer vector")
if(!is.integer(bInds))
stop("'bInds' must be an integer vector")
if(!is.integer(cInds))
stop("'cInds' must be an integer vector")
if(!is.integer(current))
stop("'current' must be an integer vector")
Obj <- x[, current]
abc <- expand.grid(aInds, bInds, cInds)
ncell <- nrow(abc)
frac <- as.matrix(fractional(x))
natm <- natom(x)
coords <- rbind(
rep(frac[, 1L], ncell) + abc[rep(1:ncell, each = natm), 1L],
rep(frac[, 2L], ncell) + abc[rep(1:ncell, each = natm), 2L],
rep(frac[, 3L], ncell) + abc[rep(1:ncell, each = natm), 3L])
coords <- cell(x)%*%coords
Obj@x <- matrix(coords[1L, ], ncol = 1L)
Obj@y <- matrix(coords[2L, ], ncol = 1L)
Obj@z <- matrix(coords[3L, ], ncol = 1L)
rm(coords)
Obj@a <- x@a*length(aInds)
Obj@b <- x@b*length(bInds)
Obj@c <- x@c*length(cInds)
Obj@atoms <- Obj@atoms[rep(1:natm, ncell), ]
duplicateTopo <- function(x, ncell, natm){
if(nrow(x)){
x <- x[rep(1:nrow(x), ncell), ] +
rep(seq.int(0L, (ncell - 1L)*natm, natm), each = nrow(x))
rownames(x) <- NULL
}
return(x)
}
Obj@bonds <- duplicateTopo(Obj@bonds , ncell, natm)
Obj@angles <- duplicateTopo(Obj@angles , ncell, natm)
Obj@dihedrals <- duplicateTopo(Obj@dihedrals, ncell, natm)
Obj@impropers <- duplicateTopo(Obj@impropers, ncell, natm)
Obj@call <- match.call()
return(Obj)
}
|
ab50e1e5d1be817ab7766b9f429259f1e2805a4e | f29f603c38cd2ab4a3d350adc3be560aa3a99285 | /R/rowWeightedMedians.R | b3b99b53a0c7fd6dd6b02d5d4322a9b7d836fe38 | [] | no_license | const-ae/matrixStats | 03ff8cdbfc6439e4ff640cce9b7cd4ef7f0b91a0 | f84d13431889596b543fbdbf4d2af10b44cbb62c | refs/heads/master | 2023-06-27T19:59:17.105135 | 2020-09-25T22:23:30 | 2020-09-25T22:23:30 | 266,979,193 | 1 | 0 | null | 2020-05-26T07:52:24 | 2020-05-26T07:52:23 | null | UTF-8 | R | false | false | 4,532 | r | rowWeightedMedians.R | #' Calculates the weighted medians for each row (column) in a matrix
#'
#' Calculates the weighted medians for each row (column) in a matrix.
#'
#' The implementations of these methods are optimized for both speed and
#' memory. If no weights are given, the corresponding
#' \code{\link{rowMedians}}()/\code{colMedians()} is used.
#'
#' @param x A \code{\link[base]{numeric}} NxK \code{\link[base]{matrix}}.
#'
#' @param w A \code{\link[base]{numeric}} \code{\link[base]{vector}} of length
#' K (N).
#'
#' @param rows,cols A \code{\link[base]{vector}} indicating subset of rows
#' (and/or columns) to operate over. If \code{\link[base]{NULL}}, no subsetting
#' is done.
#'
#' @param na.rm If \code{\link[base:logical]{TRUE}}, missing values are
#' excluded from the calculation, otherwise not.
#'
#' @param ... Additional arguments passed to \code{\link{weightedMedian}}().
#'
#' @return Returns a \code{\link[base]{numeric}} \code{\link[base]{vector}} of
#' length N (K).
#'
#' @example incl/rowWeightedMedians.R
#'
#' @author Henrik Bengtsson
#'
#' @seealso Internally, \code{\link{weightedMedian}}() is used.
#' See \code{\link{rowMedians}}() and \code{colMedians()} for non-weighted
#' medians.
#'
#' @keywords array iteration robust univar
#' @export
rowWeightedMedians <- function(x, w = NULL, rows = NULL, cols = NULL,
na.rm = FALSE, ...) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'x':
if (!is.matrix(x)) {
.Defunct(msg = sprintf("Argument 'x' is of class %s, but should be a matrix. The use of a %s is not supported, the correctness of the result is not guaranteed. Please update your code accordingly.", sQuote(class(x)[1]), sQuote(class(x)[1]))) #nolint
}
# Argument 'w':
has_weights <- !is.null(w)
if (has_weights) {
n <- ncol(x)
if (length(w) != n) {
stop("The length of argument 'w' is does not match the number of column in 'x': ", length(w), " != ", n) #nolint
}
if (!is.numeric(w)) {
stop("Argument 'w' is not numeric: ", mode(w))
}
if (any(!is.na(w) & w < 0)) {
stop("Argument 'w' has negative weights.")
}
}
# Apply subset on x
if (!is.null(rows) && !is.null(cols)) x <- x[rows, cols, drop = FALSE]
else if (!is.null(rows)) x <- x[rows, , drop = FALSE]
else if (!is.null(cols)) x <- x[, cols, drop = FALSE]
# Apply subset on w
if (!is.null(w) && !is.null(cols)) w <- w[cols]
if (has_weights) {
# Allocate results
m <- nrow(x)
if (m == 0L)
return(double(0L))
res <- apply(x, MARGIN = 1L, FUN = function(x) {
weightedMedian(x, w = w, na.rm = na.rm, ...)
})
w <- NULL # Not needed anymore
} else {
res <- rowMedians(x, na.rm = na.rm)
}
res
}
#' @rdname rowWeightedMedians
#' @export
colWeightedMedians <- function(x, w = NULL, rows = NULL, cols = NULL,
na.rm = FALSE, ...) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'x':
if (!is.matrix(x)) {
.Defunct(msg = sprintf("Argument 'x' is of class %s, but should be a matrix. The use of a %s is not supported, the correctness of the result is not guaranteed. Please update your code accordingly.", sQuote(class(x)[1]), sQuote(class(x)[1]))) #nolint
}
# Argument 'w':
has_weights <- !is.null(w)
if (has_weights) {
n <- nrow(x)
if (length(w) != n) {
stop("The length of argument 'w' is does not match the number of rows in 'x': ", length(w), " != ", n) #nolint
}
if (!is.numeric(w)) {
stop("Argument 'w' is not numeric: ", mode(w))
}
if (any(!is.na(w) & w < 0)) {
stop("Argument 'w' has negative weights.")
}
}
# Apply subset on x
if (!is.null(rows) && !is.null(cols)) x <- x[rows, cols, drop = FALSE]
else if (!is.null(rows)) x <- x[rows, , drop = FALSE]
else if (!is.null(cols)) x <- x[, cols, drop = FALSE]
# Apply subset on w
if (!is.null(w) && !is.null(rows)) w <- w[rows]
if (has_weights) {
# Allocate results
m <- ncol(x)
if (m == 0L)
return(double(0L))
res <- apply(x, MARGIN = 2L, FUN = function(x) {
weightedMedian(x, w = w, na.rm = na.rm, ...)
})
w <- NULL # Not needed anymore
} else {
res <- colMedians(x, na.rm = na.rm)
}
res
}
|
c44bcfef208847fb396a90f3ece139e6a6ec2cb8 | d6aba2b5d8c3a9f2c6bffb994e3d54101872a12d | /2准备模型数据-种植方案数据获取与分析流程.R | 2a0b1f9d8c00ce7c22db960b1ec67ce1e0b02aed | [] | no_license | wellionx/iCAN | c73539ef53836d6eaa89307702d047571b52536d | d9a69e246fa302cd621406db743f26b97d519a11 | refs/heads/master | 2021-09-17T03:29:25.665921 | 2018-06-27T08:04:58 | 2018-06-27T08:04:58 | 112,999,597 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,092 | r | 2准备模型数据-种植方案数据获取与分析流程.R | load(file = "D:\\Data\\datav\\Weather of 30 years clean data to2017.rda")
#pick up the station we need
# two stations in Xinjiang
library(data.table)
DT <- DT30ys[stationID %in% c("51133")] # tacheng
DT <- DT30ys[stationID %in% c("51076")] # alatai
DT <- DT30ys[stationID %in% c("54254")] # kaiyuan
DT <- DT30ys[stationID %in% c("54260")] # xifeng
DT <- DT30ys[stationID %in% c("53446")] # Neimeng
DT <- DT30ys[stationID %in% c("53336")] # Neimeng 巴彦淖尔乌兰特旗
DT <- DT30ys[stationID %in% c("54134")] # Neimeng 集丰粮贸-巴彦淖尔乌兰特旗
#中种国际黑龙江四地市
DT <- DT30ys[stationID %in% c("50658")] # 拜泉县
DT <- DT30ys[stationID %in% c("50742")] # 富裕县
DT <- DT30ys[stationID %in% c("50778")] # 同江市
DT <- DT30ys[stationID %in% c("50788")] # 富锦市
#察县
DT <- DT30ys[stationID %in% c("51238")] # 察县
DT <- DT30ys[stationID %in% c("54416")] # 密云
#通辽通辽花吐古拉镇
DT <- DT30ys[stationID %in% c("54135")]
wth <- DT
wth$Date <- as.Date(paste(wth$Year, wth$Month, wth$Day, sep='-'))
# EVP_A plus EVP_B
wth$EVP_A[is.na(wth$EVP_A)] <- 0
wth$EVP_B[is.na(wth$EVP_B)] <- 0
wth$EVP <- wth$EVP_A + wth$EVP_B
head(wth)
####挑选出太阳辐射(日照时长),最高温,最低温,相对湿度,降水和蒸散量####
library(plyr)
# wth2 <- ddply(wth, .(Date, stationID), summarise,
#
# Year = Year,
# solarhr = round(SSD/10,1),
# Tmax = round(TEM_H/10,1),
# Tmin = round(TEM_L/10,1),
# humidity = RHU_M,
# rainfall = round(PRE_C/10,1),
# ET = round(EVP/10,1))
wth2 <- wth[,list(Year = Year,
solarhr = round(SSD/10,1),
Tmax = round(TEM_H/10,1),
Tmin = round(TEM_L/10,1),
humidity = RHU_M,
rainfall = round(PRE_C/10,1),
ET = round(EVP/10,1)),
by=.(stationID,Date)]
setkey(wth2,Date) #order the Date
# get the Day of Year
doyFromDate <- function(date) {
date <- as.character(date)
as.numeric(format(as.Date(date), "%j"))
}
wth2$DOY <- doyFromDate(wth2$Date)
head(wth2)
# drop the stationID
wth2$stationID <- NULL
#check missing data ####
library(mice)
anyNA(wth2)
md.pattern(wth2)
#if there is missing value, use KNN to impute the missing
#if not missing value ,skip this process
#KNN插值
library(DMwR)
im <- wth2[,-1]
knnOutput <- knnImputation(im) # 使用KNN插值.
anyNA(knnOutput)
wth2_im <- cbind(wth2[,1],knnOutput)
#data with missing values
wth_final <- wth2_im[,c("Year", "DOY", "solarhr", "Tmax", "Tmin",
"humidity" , "rainfall" , "ET")]
#data output without missing values
wth_final <- wth2[,c("Year", "DOY", "solarhr", "Tmax", "Tmin",
"humidity" , "rainfall" , "ET")]
#rename the data, and save to txt files for wth data
#save files to the desktop
write.table(wth_final,file = "C:/Users/Wei Li/Desktop/wth file TLhuatugula for crop model20180625.txt",sep = "\t",row.names = FALSE,quote = FALSE)
|
675132b19ae8eaed72fbd7acaf96a75afe8c2ce9 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.developer.tools/man/codebuild_stop_build_batch.Rd | 21fb73585992f1abdeaf0515ef88a49b8eaa58b2 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 485 | rd | codebuild_stop_build_batch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codebuild_operations.R
\name{codebuild_stop_build_batch}
\alias{codebuild_stop_build_batch}
\title{Stops a running batch build}
\usage{
codebuild_stop_build_batch(id)
}
\arguments{
\item{id}{[required] The identifier of the batch build to stop.}
}
\description{
Stops a running batch build.
See \url{https://www.paws-r-sdk.com/docs/codebuild_stop_build_batch/} for full documentation.
}
\keyword{internal}
|
42ff20dfaf10f0bb3c1eb4a7cfbb1a3b08454b92 | 1407f90078724f562ec58c34f7878c909044de0c | /modified_rainbow/man/fboxplot.Rd | 4d7704440c6164b8db1edd37e904785796fb3ec8 | [] | no_license | Chris7462/Modified-Rainbow | 3a91d16e55c4de043128153b497bf174e6aa0ca6 | 19d6f714a244d70241a364a27455c2e19d4ee051 | refs/heads/master | 2021-01-19T00:40:25.907579 | 2017-04-04T14:39:42 | 2017-04-04T14:39:42 | 87,197,743 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,094 | rd | fboxplot.Rd | \name{fboxplot}
\alias{fboxplot}
\title{Functional bagplot and functional HDR boxplot}
\description{
Compute bivariate bagplot, functional bagplot and bivariate HDR boxplot, functional HDR boxplot.
}
\usage{
fboxplot(data, plot.type = c("functional", "bivariate"),
type = c("bag", "hdr"), alpha = c(0.05, 0.5), projmethod = c("PCAproj","rapca"),
factor = 1.96, na.rm = TRUE, xlab = data$xname, ylab = data$yname,
shadecols = gray((9:1)/10), pointcol = 1, plotlegend = TRUE,
legendpos = "topright", ncol = 2, ...)
}
\arguments{
\item{data}{An object of class \code{\link[rainbow]{fds}} or \code{fts}.}
\item{plot.type}{Version of boxplot. When \code{plot.type="functional"}, a functional plot is provided. When \code{plot.type="bivariate"}, a square bivariate plot is provided.}
\item{type}{Type of boxplot. When \code{type="bag"}, a bagplot is provided. When \code{type="hdr"}, a HDR boxplot is provided.}
\item{alpha}{Coverage probability for the functional HDR boxplot. \eqn{\alpha}{alpha} are the coverage percentages of the outliers and the central region.}
\item{factor}{When \code{type="bag"}, the outer region of a bagplot is the convex hull obtained by inflating the inner region by the bagplot factor.}
\item{na.rm}{Remove missing values.}
\item{xlab}{A title for the x axis.}
\item{ylab}{A title for the y axis.}
\item{shadecols}{Colors for shaded regions.}
\item{pointcol}{Color for outliers and mode.}
\item{plotlegend}{Add a legend to the graph.}
\item{legendpos}{Legend position. By default, it is the top right corner.}
\item{ncol}{Number of columns in the legend.}
\item{projmethod}{Method used for projection.}
\item{...}{Other arguments.}
}
\details{
The functional curves are first projected into a finite dimensional subspace.
For simiplicity, we choose the subspace as \eqn{R^2}. Based on Tukey (1974)'s halfspace bagplot and
Hyndman (1996)'s HDR boxplot, we order each data point in \eqn{R^2} by data depth and data density.
Outliers are those that have either lowest depth or lowest density.
}
\value{
Function produces a plot.
}
\references{
J. W. Tukey (1974) "Mathematics and the picturing of data", \emph{Proceedings of the International Congress of Mathematicians}, \bold{2}, 523-532, Canadian Mathematical Congress, Montreal.
P. Rousseeuw, I. Ruts and J. Tukey (1999) "The bagplot: A bivariate boxplot", \emph{The American Statistician}, \bold{53}(4), 382-387.
R. J. Hyndman (1996) "Computing and graphing highest density regions", \emph{The American Statistician}, \bold{50}(2), 120-126.
R. J. Hyndman and H. L. Shang. (2010) "Rainbow plots, bagplots, and boxplots for functional data", \emph{Journal of Computational and Graphical Statistics}, \bold{19}(1), 29-45.
}
\author{Rob J Hyndman, Han Lin Shang. Please, report bugs and suggestions to hanlin.shang@anu.edu.au}
\seealso{
\code{\link[rainbow]{SVDplot}}
}
\examples{
fboxplot(data = ElNino, plot.type = "functional", type = "bag", projmethod="PCAproj")
fboxplot(data = ElNino, plot.type = "bivariate", type = "bag", projmethod="PCAproj")
}
\keyword{multivariate}
|
944a2268fc61a33a97aa3baf765721b7eee4084f | b437adefdb097c34f01f2470790ef8c6fe3648df | /scripts/Niche_Models/old/bioclim_pca.R | 9bfbb16af70cdaf8c9f47f1d470722439b66fd06 | [] | no_license | kaiyaprovost/GDM_pipeline | 607887afed2f6faddb2584eebb9eb7ff0120fea1 | 05e8f5d0a46781d727b60fe913c94137b9b35824 | refs/heads/master | 2022-09-20T22:55:39.707325 | 2022-08-31T18:24:19 | 2022-08-31T18:24:19 | 237,048,262 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,074 | r | bioclim_pca.R | ## pcas of bioclim variables plus elevation
library(raster)
folder = "/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/enm_layers/bio_2-5m_bil/"
worldclim = list.files(folder,pattern="bil$",full.names = T)
clim = raster::stack(worldclim)
## clip to SW
#ext = raster::extent(c(-125,-99, 22, 44))
#ext = raster::extent(c(-118,-98, 21, 36))
ext = raster::extent(c(-125,-69,10,55))
elev=raster::raster("/Users/kprovost/Documents/ENMs/OLD/NASA_TOPOGRAPHY_SRTM_RAMP2_TOPO_2000-02-11_gs_3600x1800.asc")
clim = raster::crop(clim, ext)
elev = raster::crop(elev, ext)
plot(clim[[1]])
plot(elev)
bigclim = raster::raster("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/enm_layers/ENMS_multilayer4.tif")
# longs=c(-101.00,-109.03965)
# lats = c(25.67,32.0526078)
# x = coordinates((cbind(longs,lats)))
# y = extract(clim[[c("bio1","bio12")]],x)
# png("precip_barplot.png",height=280)
# par(mar=c(2,4,0,0))
# barplot(y[,2],horiz=F,las=1,col=c("lightblue","blue"),ylim=c(0,310),names=c("Coahuila","New Mexico"),ylab="Precipitation (mm)")
# dev.off()
# png("temp_barplot.png",height=280)
# par(mar=c(2,4,0,0))
# barplot(y[,1]/10,horiz=F,las=1,col=c("red","pink"),names=c("Coahuila","New Mexico"),ylim=c(0,21),ylab="Temperature (°C)")
# dev.off()
shapefile = raster::shapefile("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/deserts_north_america_wwf_SONCHI.shp")
plot(shapefile,add=F)
clim = raster::crop(clim,shapefile)
plot(clim[[1]])
plot(shapefile,add=T)
elev = raster::crop(elev,shapefile)
plot(elev)
plot(shapefile,add=T)
clim2 = clim
#r.polys <- rasterize(shapefile, clim2, field = 1, fun = "mean",
# update = F, updateValue = "NA")
r.polys <- rasterize(shapefile, climpca, field = 1, fun = "mean",
update = F, updateValue = "NA")
plot(r.polys)
clim2[is.na(r.polys),] = NA
plot(clim2[[1]])
plot(clim2[[c("bio1","bio4","bio12","bio15")]])
elev2 = elev
r.polys <- rasterize(shapefile, elev2, field = 1, fun = "mean",
update = F, updateValue = "NA")
plot(r.polys)
elev2[is.na(r.polys),] = NA
plot(elev2)
data = values(clim2)
data2 = unique(data)
data2 = data2[complete.cases(data2),]
data3 = data2
data3[,c(1:3,12:19)] = data3[,c(1:3,12:19)]/10
dataelev = values(elev2)
dataelev2 = unique(dataelev)
dataelev2 = dataelev2[complete.cases(dataelev2)]
climr = resample(clim2[[1]],elev2)
climp = resample(clim2[[4]],elev2)
## correlate values
dat1 = values(climr)
dat2 = values(climp)
dat = cbind(dataelev,dat1,dat2)
dat=dat[complete.cases(dat),]
par(mfrow=c(1,2))
plot(dat[,1],dat[,2])
abline(lm(dat[,2]~dat[,1]),col="red")
plot(dat[,1],dat[,3])
abline(lm(dat[,3]~dat[,1]),col="blue")
sds=matrixStats::colSds(data3)
means=colMeans(data3)
mins = sapply(1:ncol(data3),FUN = function(x){min(data3[,x])})
maxs = sapply(1:ncol(data3),FUN = function(x){max(data3[,x])})
summaries=rbind(sds,means,mins,maxs)
summaries
pca = prcomp(data2,center = T,scale. = T,rank. = 3)
#newdata = pca$x
pca
abs(pca$rotation)
corrplot::corrplot(abs(t(pca$rotation)),is.corr=F,cl.lim=c(min(abs(pca$rotation)),
max(abs(pca$rotation))),
method="number")
summary(pca)
# r=3,g=1,b=2
corrplot::corrplot(abs(t(pca$rotation)),is.corr=F,cl.lim=c(min(abs(pca$rotation)),
max(abs(pca$rotation))),
method="ellipse",
col=c(rev(viridis::viridis(50)),viridis::viridis(50)))
pred <- predict(pca, newdata=values(clim))
pred2 = pred
pred2[,1] = scales::rescale(pred[,1],to=c(0,1))
pred2[,2] = scales::rescale(pred[,2],to=c(0,1))
pred2[,3] = scales::rescale(pred[,3],to=c(0,1))
pred3 <- predict(pca, newdata=data)
pred4 = pred3
pred4[,1] = scales::rescale(pred3[,1],to=c(0,1))
pred4[,2] = scales::rescale(pred3[,2],to=c(0,1))
pred4[,3] = scales::rescale(pred3[,3],to=c(0,1))
climpca = clim[[1:3]]
values(climpca) = pred2
names(climpca)
writeRaster(climpca,"Deserts_Bioclim_PCA_SONCHI.tif",format="GTiff")
climpca = stack("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER1_REVIEW/Deserts_Bioclim_PCA_SONCHI.tif")
wcdshape=shapefile("/Users/kprovost/Dropbox (AMNH)/Dissertation/western_continental_divide/condivl020.shp")
watersheds=shapefile("/Users/kprovost/Downloads/wri_basins/wribasin.shp")
plot(watersheds,add=F,lwd=2)
library(raster)
library(rgdal)
ext_user=extent(c(-126,-90,18,40))
path <- readOGR(dsn = "/Users/kprovost/Downloads/WCD_from_Ed_1236_4iouz24kv_ConDivides.gpx", layer = "tracks")
waypoints <- readOGR(dsn = "/Users/kprovost/Downloads/WCD_from_Ed_1236_4iouz24kv_ConDivides.gpx", layer = "track_points")
#shapefile("USA_States")->us #need US state shape
#shapefile("Mexico_States")->mex #and Mexico... I can send these too
#crop(bind(us, mex), ext_user)->NorAmer
#plot(NorAmer)
#plot(crop(path, ext_user), add = T, lwd=2, col = "red")
tif=stack("/Users/kprovost/Downloads/Deserts_Bioclim_PrecPCA_From_GDM.tif")
shp=shapefile("/Users/kprovost/Downloads/deserts_north_america_wwf_SONCHI.shp")
tif = crop(tif,extent(shp))
png("outline_desert_latlong.png",bg=rgb(0,0,0,0))
plot(tif[[1]],col=rgb(0,0,0,0))
plot(shp,add=T,col=rgb(0,0,0,0))
dev.off()
png("just_outline_of_deserts_color_6nov2020.png",bg=rgb(0,0,0,0))
par(mar=c(0,0,0,0))
plot(shp,col=rgb(0,0,0,0),border=c("green","blue"),lwd=3)
plot(path,add=T,col=rgb(0.3,0,0),lwd=3,lty=1)
plot(path,add=T,col="red",lwd=3,lty=2)
dev.off()
path=crop(path,extent(climpca))
#png("Deserts_Bioclim_PCA_CONTINENT.png")
png("Deserts_Bioclim_PCA_SONCHI.png")
plotRGB(climpca,scale=1,r=2,g=1,b=3,colNA="white") #312 is best previously
plot(shapefile,add=T,lty=1)
plot(path,add=T,lwd=3,lty=3)
#points(x,col="black",cex=2,pch=4,lwd=5)
dev.off()
png("Deserts_Bioclim_PCA_SONCHI_ONLY.png")
climpca[is.na(r.polys),] = NA
plotRGB(climpca,scale=1,r=2,g=1,b=3,colNA="white") #312 is best previously
plot(shapefile,add=T,lty=1)
plot(path,add=T,lwd=3,lty=3)
dev.off()
climpca2 = clim[[1:3]]
values(climpca2) = pred4
names(climpca2)
plotRGB(climpca2,scale=1,r=3,g=1,b=2) #1
plot(shapefile,add=T)
plotRGB(climpca,scale=1,r=1,g=1,b=1,colNA="magenta") #1
plotRGB(climpca,scale=1,r=2,g=2,b=2,colNA="magenta") #1
plotRGB(climpca,scale=1,r=3,g=3,b=3,colNA="magenta") #1
#par(mfrow=c(1,3))
#plotRGB(climpca2,scale=1,r=1,g=1,b=1,colNA="magenta") #1
#plotRGB(climpca2,scale=1,r=2,g=2,b=2,colNA="magenta") #1
#plotRGB(climpca2,scale=1,r=3,g=3,b=3,colNA="magenta") #1
plot(climpca2)
temp = clim[[c(1,10+5,10+6,10+7,11-8)]]
names(temp) = toupper(names(temp))
prec = clim[[c(12-8,13-8,14-8,18-8,19-8)]]
names(prec) = toupper(names(prec))
temp_pca = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/temperature_pca.csv",row.names = 1)
temp_pca = temp_pca[1:5,]
prec_pca = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/precipitation_pca.csv",row.names = 1)
prec_pca = prec_pca[1:5,]
temp_val = values(temp)
temp_val = as.data.frame(scale(temp_val, center = TRUE, scale = TRUE))
corrplot::corrplot(cor(temp_val,use="pairwise.complete.obs"),method="number")
temp_pca_val = temp_val
for (i in 1:nrow(temp_pca_val)) {
if (i %% 100 == 0) {
print(paste(i/100,"of",nrow(temp_pca_val)/100))
}
row = t(temp_pca_val[i,])
newrow = colSums(row * temp_pca)
temp_pca_val[i,] = newrow
}
temp_pca_ras = temp
values(temp_pca_ras) = as.matrix(temp_pca_val)
writeRaster(temp_pca_ras,"Deserts_Bioclim_TempPCA_From_GDM.asc",format="GTiff")
temp_pca_val[,1] = scales::rescale(temp_pca_val[,1])
temp_pca_val[,2] = scales::rescale(temp_pca_val[,2])
temp_pca_val[,3] = scales::rescale(temp_pca_val[,3])
values(temp_pca_ras) = as.matrix(temp_pca_val)
png("Deserts_Bioclim_TempPCA_From_GDM.png")
plotRGB(temp_pca_ras,scale=1,r=3,g=1,b=2) #1
dev.off()
prec_val = values(prec)
prec_val = as.data.frame(scale(prec_val, center = TRUE, scale = TRUE))
prec_pca_val = prec_val
for (i in 1:nrow(prec_pca_val)) {
if (i %% 100 == 0) {
print(paste(i/100,"of",nrow(prec_pca_val)/100))
}
row = t(prec_pca_val[i,])
newrow = colSums(row * prec_pca)
prec_pca_val[i,] = newrow
}
prec_pca_ras = prec
values(prec_pca_ras) = as.matrix(prec_pca_val)
writeRaster(prec_pca_ras,"Deserts_Bioclim_PrecPCA_From_GDM.asc",format="GTiff")
prec_pca_val[,1] = scales::rescale(prec_pca_val[,1])
prec_pca_val[,2] = scales::rescale(prec_pca_val[,2])
prec_pca_val[,3] = scales::rescale(prec_pca_val[,3])
values(prec_pca_ras) = as.matrix(prec_pca_val)
png("Deserts_Bioclim_precPCA_From_GDM.png")
plotRGB(prec_pca_ras,scale=1,r=3,g=1,b=2) #1
dev.off()
prec_pca_val = stack("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/Deserts_Bioclim_PrecPCA_From_GDM.tif")
temp_pca_val = stack("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/Deserts_Bioclim_TempPCA_From_GDM.tif")
png("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/PrincipalComponents_TempAndPrec.png",
width=776,height=460)
par(mfrow=c(2,3))
plot(temp_pca_val[[1]],main="PC1T")
plot(temp_pca_val[[2]],main="PC2T")
plot(temp_pca_val[[3]],main="PC3T")
plot(prec_pca_val[[1]],main="PC1P")
plot(prec_pca_val[[2]],main="PC2P")
plot(prec_pca_val[[3]],main="PC3P")
dev.off()
## DO THIS BUT FOR THE TRANSLOCATED ONES
folder = "/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/enm_layers/bio_2-5m_bil/"
worldclim = list.files(folder,pattern="bil$",full.names = T)
clim = raster::stack(worldclim)
## clip to SW
ext = raster::extent(c(-125,-99, 22, 44))
#ext = raster::extent(c(-118,-98, 21, 36))
clim = raster::crop(clim, ext)
plot(clim[[1]])
shapefile = raster::shapefile("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/deserts_north_america_wwf.shp")
plot(shapefile,add=T)
clim = raster::crop(clim,shapefile)
plot(clim[[1]])
plot(shapefile,add=T)
clim2 = clim
r.polys <- rasterize(shapefile, clim2, field = 1, fun = "mean",
update = F, updateValue = "NA")
plot(r.polys)
clim2[is.na(r.polys),] = NA
plot(clim2[[1]])
data = values(clim2)
data2 = unique(data)
data2 = data2[complete.cases(data2),]
data3 = data2
data3[,c(1:3,12:19)] = data3[,c(1:3,12:19)]/10
sds=matrixStats::colSds(data3)
means=colMeans(data3)
mins = sapply(1:ncol(data3),FUN = function(x){min(data3[,x])})
maxs = sapply(1:ncol(data3),FUN = function(x){max(data3[,x])})
summaries=rbind(sds,means,mins,maxs)
pca = prcomp(data2,center = T,scale. = T,rank. = 3)
#newdata = pca$x
pca
abs(pca$rotation)
corrplot::corrplot(abs(t(pca$rotation)),is.corr=F,cl.lim=c(min(abs(pca$rotation)),
max(abs(pca$rotation))),
method="number")
summary(pca)
# r=3,g=1,b=2
pred <- predict(pca, newdata=data)
pred2 = pred
pred2[,1] = scales::rescale(pred[,1])
pred2[,2] = scales::rescale(pred[,2])
pred2[,3] = scales::rescale(pred[,3])
climpca = clim[[1:3]]
values(climpca) = pred2
names(climpca)
png("Deserts_Bioclim_PCA.png")
plotRGB(climpca,scale=1,r=3,g=1,b=2) #1
plot(shapefile,add=T)
dev.off()
plotRGB(climpca,scale=1,r=1,g=1,b=1,colNA="magenta") #1
plotRGB(climpca,scale=1,r=2,g=2,b=2,colNA="magenta") #1
plotRGB(climpca,scale=1,r=3,g=3,b=3,colNA="magenta") #1
temp = clim[[c(1,10+5,10+6,10+7,11-8)]]
names(temp) = toupper(names(temp))
prec = clim[[c(12-8,13-8,14-8,18-8,19-8)]]
names(prec) = toupper(names(prec))
temp_pca = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/temperature_pca.csv",row.names = 1)
temp_pca = temp_pca[1:5,]
prec_pca = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/precipitation_pca.csv",row.names = 1)
prec_pca = prec_pca[1:5,]
temp_val = values(temp)
temp_val = as.data.frame(scale(temp_val, center = TRUE, scale = TRUE))
corrplot::corrplot(cor(temp_val,use="pairwise.complete.obs"),method="number")
temp_pca_val = temp_val
for (i in 1:nrow(temp_pca_val)) {
if (i %% 100 == 0) {
print(paste(i/100,"of",nrow(temp_pca_val)/100))
}
row = t(temp_pca_val[i,])
newrow = colSums(row * temp_pca)
temp_pca_val[i,] = newrow
}
temp_pca_ras = temp
values(temp_pca_ras) = as.matrix(temp_pca_val)
writeRaster(temp_pca_ras,"Deserts_Bioclim_TempPCA_From_GDM.asc",format="GTiff")
temp_pca_val[,1] = scales::rescale(temp_pca_val[,1])
temp_pca_val[,2] = scales::rescale(temp_pca_val[,2])
temp_pca_val[,3] = scales::rescale(temp_pca_val[,3])
values(temp_pca_ras) = as.matrix(temp_pca_val)
png("Deserts_Bioclim_TempPCA_From_GDM.png")
plotRGB(temp_pca_ras,scale=1,r=3,g=1,b=2) #1
dev.off()
prec_val = values(prec)
prec_val = as.data.frame(scale(prec_val, center = TRUE, scale = TRUE))
prec_pca_val = prec_val
for (i in 1:nrow(prec_pca_val)) {
if (i %% 100 == 0) {
print(paste(i/100,"of",nrow(prec_pca_val)/100))
}
row = t(prec_pca_val[i,])
newrow = colSums(row * prec_pca)
prec_pca_val[i,] = newrow
}
prec_pca_ras = prec
values(prec_pca_ras) = as.matrix(prec_pca_val)
writeRaster(prec_pca_ras,"Deserts_Bioclim_PrecPCA_From_GDM.asc",format="GTiff")
prec_pca_val[,1] = scales::rescale(prec_pca_val[,1])
prec_pca_val[,2] = scales::rescale(prec_pca_val[,2])
prec_pca_val[,3] = scales::rescale(prec_pca_val[,3])
values(prec_pca_ras) = as.matrix(prec_pca_val)
png("Deserts_Bioclim_precPCA_From_GDM.png")
plotRGB(prec_pca_ras,scale=1,r=3,g=1,b=2) #1
dev.off()
prec_pca_val = stack("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/Deserts_Bioclim_PrecPCA_From_GDM.tif")
temp_pca_val = stack("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/Deserts_Bioclim_TempPCA_From_GDM.tif")
png("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/PrincipalComponents_TempAndPrec.png",
width=776,height=460)
par(mfrow=c(2,3))
plot(temp_pca_val[[1]],main="PC1T")
plot(temp_pca_val[[2]],main="PC2T")
plot(temp_pca_val[[3]],main="PC3T")
plot(prec_pca_val[[1]],main="PC1P")
plot(prec_pca_val[[2]],main="PC2P")
plot(prec_pca_val[[3]],main="PC3P")
dev.off()
|
246cdff8f086c03775ae75cfa963e472f5ea1048 | 5cfcd5e4dc4068737571a376cd8ee414810f7289 | /E_expression_analysis/5_WGCNA.R | 131c96a93ccf6964f6c3b97132d0a3596b6e1571 | [] | no_license | RJEGR/Small-RNASeq-data-analysis | e42926cf3e6d28c117cb5bcd4a41d371bdbd528b | 1b0ea5a3f302f942e881c624497b2ed09da0978c | refs/heads/master | 2023-09-04T12:34:50.444113 | 2023-08-25T00:05:32 | 2023-08-25T00:05:32 | 77,959,541 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,291 | r | 5_WGCNA.R |
# LETS CLUSTER MIRS TO OVERVIEW PANEL OF RESPONSE TO ACIDIFICATION AND DEVELOPMENTAL STAGES:
# EX. https://doi.org/10.1038/s41598-020-75945-2 (LOOK GENES HUBS SEACH M&M)
library(WGCNA)
library(flashClust)
library(tidyverse)
rm(list = ls());
if(!is.null(dev.list())) dev.off()
path <- "~/Documents/MIRNA_HALIOTIS/SHORTSTACKS/ShortStack_20230315_out/"
count_f <- list.files(path = path, pattern = "Counts.txt", full.names = T)
count <- read_tsv(count_f)
which_cols <- count %>% select_if(is.double) %>% colnames()
datExpr <- count %>% select(all_of(which_cols))
datExpr <- as(datExpr, "matrix")
rownames(datExpr) <- count$Name
.datExpr <- datExpr
datExpr <- log2(datExpr+1)
datExpr <- t(datExpr) # log2(count+1) #
str(datExpr)
cat("\n:::::\n")
gsg = goodSamplesGenes(datExpr, verbose = 3)
gsg$allOK
if (!gsg$allOK) {
if (sum(!gsg$goodGenes)>0)
printFlush(paste("Removing genes:", paste(names(datExpr)[!gsg$goodGenes], collapse= ", ")));
if (sum(!gsg$goodSamples)>0)
printFlush(paste("Removing samples:", paste(rownames(datExpr)[!gsg$goodSamples], collapse=", ")))
datExpr= datExpr[gsg$goodSamples, gsg$goodGenes]
}
# Simulated-05-NetworkConstruction.pdf (Horvath and Langfelder, 2011)
# When you have a lot of genes use the following code
k=softConnectivity(datE=datExpr,power=6)
# Plot a histogram of k and a scale free topology plot
sizeGrWindow(10,5)
par(mfrow=c(1,2))
hist(k)
scaleFreePlot(k, main="Check scale free topology\n")
# Debemos señalar que no es necesario que una red satisfaga una topología libre de escala; La topología libre de escala puede no satisfacerse si los datos están compuestos por grupos de muestras globalmente muy distintos (por ejemplo, diferentes tipos de tejidos).
# DEBIDO A LA CANTIDAD DE DATOS MASIVOS. NO PODEMOS GENERAR: Average linkage hierachical clustering with adjacency-based dissimilarity
# 1) Detect max power ----
max_power <- 30
powers = c(c(1:10), seq(from = 10, to = max_power, by=1))
#powers = unique(powers)
allowWGCNAThreads()
cor_method = "cor" # by default WGCNA::cor(method = 'pearson') is used, "bicor"
corOptionsList = list(use ='p') # maxPOutliers = 0.05, blocksize = 20000
sft = pickSoftThreshold(datExpr,
powerVector = powers,
corFnc = cor_method,
corOptions = corOptionsList,
verbose = 5,
networkType = "signed")
saveRDS(sft, file = paste0(path, 'SoftThreshold_',cor_method, '.rds'))
sft <- read_rds(paste0(path, 'SoftThreshold_',cor_method, '.rds'))
soft_values <- abs(sign(sft$fitIndices[,3])*sft$fitIndices[,2])
soft_values <- round(soft_values, digits = 2)
hist(soft_values)
power_pct <- quantile(soft_values, probs = 0.95)
softPower <- sft$fitIndices[,1][which(soft_values >= power_pct)]
meanK <- sft$fitIndices[softPower,5]
hist(sft$fitIndices[,5])
softPower <- min(softPower)
cat("\nsoftPower value", softPower, '\n')
title1 = 'Scale Free Topology Model Fit,signed R^2'
title2 = 'Mean Connectivity'
caption = paste0("Lowest power for which the scale free topology index reaches the ", power_pct*100, " %")
sft$fitIndices %>%
mutate(scale = -sign(slope)*SFT.R.sq) %>%
select(Power, mean.k., scale) %>% pivot_longer(-Power) %>%
mutate(name = ifelse(name %in% 'scale', title1, title2)) %>%
ggplot(aes(y = Power, x = value)) +
geom_text(aes(label = Power), size = 2) +
geom_abline(slope = 0, intercept = softPower, linetype="dashed", alpha=0.5) +
# geom_vline(xintercept = min(meanK), linetype="dashed", alpha=0.5) +
labs(y = 'Soft Threshold (power)', x = '',
caption = caption) +
facet_grid(~name, scales = 'free_x', switch = "x") +
# scale_x_continuous(position = "top") +
theme_light(base_family = "GillSans",base_size = 16) -> psave
ggsave(psave, path = path, filename = 'SoftThreshold.png', width = 7, height = 4)
# The soft thresholding,
# is a value used to power the correlation of the genes to that threshold.
# The assumption on that by raising the correlation to a power will reduce the noise of
# the correlations in the adjacency matrix
# 2) Construct a gene co-expression matrix and generate modules ----
# Using Blockwise construction
# Call the network topology analysis function
cor_method = 'cor'
filename <- paste0(path, 'SoftThreshold_',cor_method, '.rds')
sft <- readRDS(filename)
soft_values <- abs(sign(sft$fitIndices[,3])*sft$fitIndices[,2])
soft_values <- round(soft_values, digits = 2)
power_pct <- quantile(soft_values, probs = 0.95)
softPower <- sft$fitIndices[,1][which(soft_values >= power_pct)]
softPower <- min(softPower)
allowWGCNAThreads()
wd <- paste0(path, Sys.Date())
system(paste0('mkdir ', wd))
setwd(wd)
# RUN:
bwnet <- blockwiseModules(datExpr,
maxBlockSize = 5000,
power = 5, # softPower,
TOMType = "signed",
networkType = "signed",
minModuleSize = 30,
corType = "bicor",
reassignThreshold = 0,
mergeCutHeight = 0.25,
numericLabels = TRUE,
saveTOMs = TRUE,
saveTOMFileBase = "TOM-blockwise",
verbose = 3)
saveRDS(bwnet, "bwnet.rds")
# 3) Output exploratory ----
bwnet <- readRDS(paste0(path, "2023-06-26/bwnet.rds"))
bwmodules = labels2colors(bwnet$colors)
names(bwmodules) <- names(bwnet$colors)
table(bwmodules)
# (TEST) Plot the dendrogram and the module colors underneath for block 1
# i <- 8
#
# plotDendroAndColors(bwnet$dendrograms[[i]], bwmodules[bwnet$blockGenes[[i]]],
# "Module colors", main = "Gene dendrogram and module colors in block i",
# dendroLabels = FALSE, hang = 0.03,
# addGuide = TRUE, guideHang = 0.05)
reads <- rowSums(.datExpr)
Total <- sum(reads)
# Sanity check:
identical(names(colSums(datExpr)), names(bwmodules))
data.frame(reads, bwmodules) %>%
as_tibble() %>%
group_by(bwmodules) %>%
summarise(n = n(), reads = sum(reads)) %>%
dplyr::rename('module' = 'bwmodules') -> stats
# Prep binary datTraits
datTraits <- gsub(".clean.newid.subset", "", rownames(datExpr))
HR11076 <- grepl('HR11076', datTraits)
HR1108 <- grepl('HR1108', datTraits)
HR2476 <- grepl('HR2476', datTraits)
HR248 <- grepl('HR248', datTraits)
datTraits <- data.frame(HR11076, HR1108, HR2476, HR248)
datTraits <- 1*datTraits
rownames(datTraits) <- rownames(datExpr)
# Recalculate MEs with color labels
MEs0 = moduleEigengenes(datExpr, bwmodules)$eigengenes
MEs = orderMEs(MEs0)
names(MEs) <- str_replace_all(names(MEs), '^ME', '')
moduleTraitCor = cor(MEs, datTraits, use= "p")
moduleTraitPvalue = corPvalueStudent(moduleTraitCor, nrow(datTraits))
moduleTraitCor %>% as_tibble(rownames = 'module') %>%
pivot_longer(-module, values_to = 'moduleTraitCor') -> df1
moduleTraitPvalue %>% as_tibble(rownames = 'module') %>%
pivot_longer(-module, values_to = 'corPvalueStudent') %>%
right_join(df1) -> df1
hclust <- hclust(dist(moduleTraitCor), "complete")
# # BASED ON THE dendogram from the heatmap, WE REALIZE THAN "tan" (2n clade), "magenta" (2n clade), "greenyellow" (1st clade) are within same cluster than grey module in the dendo.
# plot(hclust)
# SPLIT CLUSTERS BY MIRS/PIRS/SIRS
SRNAS <- read_rds(paste0(path, "KNOWN_CLUSTERS_MIRS_PIRS.rds"))
str(which_pirs <- SRNAS %>% filter(grepl("piR", KnownRNAs)) %>% distinct(Name) %>% pull())
str(which_mirs <- SRNAS %>% filter(!grepl("piR", KnownRNAs)) %>% distinct(Name) %>% pull())
bwmodules %>%
as_tibble(., rownames = 'Name') %>%
mutate(biotype = ifelse(Name %in% which_mirs, 'miR',
ifelse(Name %in% which_pirs, 'piR', 'siRs'))) %>%
dplyr::rename('module' = 'value') -> bwModuleCol
# ADD COUNT INFO:
# reads <- rowSums(datExpr)
# Total <- sum(reads)
bwModuleCol <- data.frame(reads) %>%
as_tibble(rownames = "Name") %>%
right_join(bwModuleCol)
# bwModuleCol %>% group_by(module) %>% count(sort = T) %>% left_join(stats)
bwModuleCol %>%
group_by(module, biotype) %>%
summarise(reads = sum(reads), n = n()) %>%
arrange(desc(n)) -> bwModuleDF
bwModuleDF %>%
mutate(module = factor(module, levels = hclust$labels[hclust$order])) -> bwModuleDF
bwModuleDF %>%
group_by(module) %>%
# ungroup() %>%
mutate(cluster_frac = n / sum(n),
reads_frac = reads / sum(reads)) -> bwModuleDF
# bwModuleDF %>% tally(reads_frac)
df1 %>%
mutate(star = ifelse(corPvalueStudent <.001, "***",
ifelse(corPvalueStudent <.01, "**",
ifelse(corPvalueStudent <.05, "*", "")))) -> df1
df1 %>%
mutate(facet = ifelse(name %in% c('HR11076', 'HR2476'), 'Low pH', 'Control')) %>%
mutate(moduleTraitCor = round(moduleTraitCor, 2)) %>%
mutate(star = ifelse(star != '', paste0(moduleTraitCor, '(', star,')'), moduleTraitCor)) %>%
# mutate(star = ifelse(star != '', paste0(moduleTraitCor, '(', star,')'), '')) %>%
ggplot(aes(y = module, x = name, fill = moduleTraitCor)) +
geom_tile(color = 'white', size = 0.7, width = 1) +
# geom_raster() +
geom_text(aes(label = star), vjust = 0.5, hjust = 0.5, size= 4, family = "GillSans") +
ggsci::scale_fill_gsea(name = "", reverse = T, na.value = "white") +
# scale_fill_viridis_c(name = "Membership", na.value = "white") +
ggh4x::scale_y_dendrogram(hclust = hclust) +
labs(x = '', y = 'Module') +
guides(fill = guide_colorbar(barwidth = unit(3.5, "in"),
barheight = unit(0.1, "in"), label.position = "top",
alignd = 0.5,
ticks.colour = "black", ticks.linewidth = 0.5,
frame.colour = "black", frame.linewidth = 0.5,
label.theme = element_text(size = 10))) +
theme_classic(base_size = 12, base_family = "GillSans") +
theme(legend.position = "top",
strip.background = element_rect(fill = 'white', color = 'white')) +
facet_wrap(~ facet, scales = 'free_x') -> p1
p1 <- p1 + theme(
axis.line.x = element_blank(),
axis.line.y = element_blank(),
axis.text.y = element_text(hjust = 1.2),
axis.ticks.length = unit(5, "pt"))
p1 <- p1 + theme(panel.spacing.x = unit(-0.5, "mm"))
bwModuleDF %>%
ggplot(aes(y = module, fill = biotype)) +
# facet_wrap(biotype ~ ., nrow = 1, scales = "free_x") +
scale_x_continuous("Frac. of reads", labels = scales::percent) +
geom_col(aes(x = reads_frac), width = 0.95, position = position_stack(reverse = TRUE)) +
# geom_col(aes(x = reads_frac), width = 0.95, fill = "grey")
scale_fill_manual(name = '', values = c("#303960", "#647687", "#E7DFD5")) + # grey90
theme_classic(base_size = 12, base_family = "GillSans") +
theme(legend.position = "top",
strip.background = element_rect(fill = 'white', color = 'white'),
axis.title.y = element_blank(),
axis.text.y= element_blank(),
axis.ticks.y =element_blank(),
axis.line.y = element_blank(),
axis.line.x = element_blank(),
axis.ticks.length = unit(5, "pt")) -> p2
library(patchwork)
# p1 + plot_spacer() + p2 + plot_layout(widths = c(5,-0.5, 10)) #& theme(plot.margin = 0)
ps <- p1 + p2 + plot_layout(widths = c(6, 5)) + labs(caption = '* corPvalueStudent < 0.05 ')
# ps
ggsave(ps, filename = 'WGCNA.png',
path = path, width = 8, height = 4.5, device = png, dpi = 300)
bwModuleDF %>%
ungroup() %>%
mutate(x = n / sum(n), y = reads / sum(reads)) %>%
ggplot(aes(x, y, color = biotype)) +
# facet_wrap(~module) +
theme_classic(base_size = 12, base_family = "GillSans") +
scale_color_manual(name = '', values = c("#303960", "#647687", "grey90")) +
geom_point()
bwModuleDF %>% view()
df1 %>% filter(star != "") %>% View()
|
64a56d7ebc81379221e81cd2dfd78d26c62bba09 | 8532b2aff57e42dabb8170f6aee416e801d08c13 | /Places/PlacesHelper.R | 25f59c4f35bba9c216e332b61ad49146516e1b44 | [] | no_license | sztal/MA | b5295472cf37b9b740676dc1ddbca0b05da5b0aa | 818ec13170e3d7e8e7257cc796f822356bc571e0 | refs/heads/master | 2016-09-05T11:14:56.150329 | 2015-11-09T02:12:08 | 2015-11-09T02:12:08 | 27,833,877 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 11,596 | r | PlacesHelper.R | # This is a file with helper funtions for network datasets derivation
getNames <- function(P) {
# This function gets a list of all unique place names from a place dataset
N = vector(mode="character", length=dim(P)[1]*dim(P)[2])
for(i in 1:dim(P)[1]) {
for(j in 1:dim(P)[2]) {
ind = (i-1)*dim(P)[2] + j
name = as.character(P[i, j])
N[ind] = name
}
}
N = N[!(N == "" | is.na(N) | is.null(N))]
N = tolower(N)
N = unique(N)
return(N)
}
# This function transform all columns in a place dataset to character vector and then to lower cases
toLower <- function(P) {
for(i in 1:dim(P)[2]) {
P[,i] = as.character(P[,i])
P[,i] = tolower(P[,i])
}
return(P)
}
# This function get all palces from a place dataset as a vector - it can be used to compute distributions of place names
getNames <- function(P) {
Names = c()
for(i in 1:dim(P)[1]) {
n = as.character(P[i, ])
n = n[!(n=="" | is.na(n) | is.null(n))]
Names = append(Names, n)
}
Names = sort(Names)
return(Names)
}
# This function give a proposition of approximate mathing of names
approxMatch <- function(p1, N) {
len = length(p1)
match = vector(mode="character", length=len)
for(i in 1:len) {
if(p1[i] != "") {
prop = agrep(p1[i], N)[1]
match[i] = N[prop]
}
else match[i] = NA
}
DF = data.frame(p1, match=match)
return(DF)
}
# Function changing factor columns to character columns
toCharacter <- function(P) {
for(i in 1:dim(P)[2]) {
if(class(P[,i]) == "factor") P[,i] = as.character(P[,i])
}
return(P)
}
# Get a list of unique places
uniquePlaces <- function(P) {
P = toCharacter(P)
if(class(P[,1]) != "character") P = P[, 2:dim(P)[2]]
places = vector(mod="character", length=0)
for(i in 1:dim(P)[1]) {
for(j in 1:dim(P)[2]) {
if(P[i,j] != "" & !is.na(P[i,j]) & !is.null(P[i,j])) {
places = append(places, P[i,j])
}
}
}
return(list(places = places, unique = unique(places)))
}
# This function takes a final place dataset and tranforms it to a place matrix
getPlaceMatrix <- function(P) {
P = toCharacter(P)
N = uniquePlaces(P)$unique # gets unique place names
M = matrix(0, nrow = dim(P)[1], ncol = length(N)) # initialize a place matrix
colnames(M) = N
return(M)
}
# This function takes a final place dataset and gives a people-place adjacency matrix
getPeoplePlaceMatrix <- function(P) {
require(dplyr)
P = toCharacter(P)
P = arrange(P, id)
ids = P$id
P = P[, -which(names(P) == "id")]
AM = getPlaceMatrix(P) # gets a place matrix
for(i in 1:dim(P)[1]) {
for(j in 1:dim(P)[2]) {
if(P[i,j] != "" & !is.na(P[i,j]) & !is.null(P[i,j])) {
place = P[i,j]
AM[i, place] = 1 # double indications are not considered
}
}
}
rownames(AM) = ids
return(AM)
}
# Get the dominant type of cluster for the places and the entropy of the cluster type distribution; moreover it gets proportions of each cluster in places
getDominantClusters <- function(Pdat, D, P) {
# Pdat is the dataset of place profiles; P is a final place dataset; D is a respondents dataset
source("HelperFunctionsMisc/ComputingMisc.R")
require(dplyr)
P = arrange(P, id)
D = arrange(D, id)
if(!all.equal(P$id, D$id)) stop("ids in P and D are not equal")
places = rownames(Pdat)
dcluster = vector(mode="character", length=dim(Pdat)[1]) # dominant clusters
ent = vector(mode="numeric", length=dim(Pdat)[1]) # absolute entropies
r_ent = vector(mode="numeric", length=dim(Pdat)[1]) # vector of relative entropies
cluster1 = vector(mode="numeric", length=dim(Pdat)[1]) # vector of proportions of the cluster 1 - Wolne Zawody
cluster2 = vector(mode="numeric", length=dim(Pdat)[1]) # vector of proportions of the cluster 2 - Studenci
cluster3 = vector(mode="numeric", length=dim(Pdat)[1]) # vector of proportions of the cluster 3 - Kulturalnie wycofani
for(place in places) {
cluster_dist = vector(mode="character", length=0)
for(id in D$id) {
if(place %in% P[P$id==id, ]) {
cluster_type = as.character(D[D$id==id, "cluster"])
cluster_dist = append(cluster_dist, cluster_type)
}
}
mode = domin(cluster_dist) # mode of the cluster type distribution
H = entropy(cluster_dist, rel = FALSE) # absolute entropy of the distribution
Hr = entropy(cluster_dist, rel = TRUE) # relative entropy of the distribution
### cluster 1 - Wolne Zawody
clust1 = table(cluster_dist)[3]/sum(table(cluster_dist))
### cluster 2 - Studenci
clust2 = table(cluster_dist)[2]/sum(table(cluster_dist))
### cluster 3 - Kulturalnie Wycofani
clust3 = table(cluster_dist)[1]/sum(table(cluster_dist))
index = which(places == place) # get the index of the place
dcluster[index] = mode # assign a dominant cluster to a place
ent[index] = H # assign absolute entropy to a place
r_ent[index] = Hr # assing relative entropy to a place
cluster1[index] = clust1 # assign cluster 1 proportion to a place
cluster2[index] = clust2 # assign cluster 2 proportion to a place
cluster3[index] = clust3 # assign cluster 3 proportion to a place
}
Pdat$dcluster = as.factor(dcluster)
Pdat$ent = ent
Pdat$r_ent = r_ent
Pdat$cluster1 = cluster1
Pdat$cluster2 = cluster2
Pdat$cluster3 = cluster3
return(Pdat)
}
# This function computes average profiles (for numerical variables) of places
profilePlaces <- function(Pdat, D, P) {
# Pdat is the dataset of place profiles; P is a final place dataset; D is a respondents dataset
require(dplyr)
P = arrange(P, id)
D = arrange(D, id)
ids = D$id # get a set of ids
if(!all.equal(P$id, D$id)) stop("ids in P and D are not equal")
places = rownames(Pdat)
numvars = which(sapply(D, class) == "numeric")
D = D[, numvars] # restrict the respondents dataset to numeric variables only
D = cbind(id = ids, D) # add ids again
PM = matrix(0, nrow=dim(Pdat)[1], ncol=dim(D)[2]-1)
colnames(PM) = names(D)[-1] # variables names without id
rownames(PM) = rownames(Pdat) # sets up a matrix of average place profiles
for(place in places) {
M = matrix(0, nrow=0, ncol=dim(D)[2]-1)
colnames(M) = names(D)[-1] # a matrix of respondents profiles
for(id in ids) {
if(place %in% P[P$id==id, ]) {
row = D[D$id==id, -1]
M = rbind(M, row)
}
}
avg_profile = apply(M, 2, mean, na.rm=TRUE)
PM[place, ] = avg_profile
}
Pdat = cbind(Pdat, PM)
return(Pdat)
}
# This function combines dominant clusters and average profiles
getFullPlaceInfo <- function(Pdat, D, P) {
Q = getDominantClusters(Pdat, D, P)
W = profilePlaces(Pdat, D, P)
Pdat = cbind(Q, W[,-1])
return(Pdat)
}
# This function computes various social space homogeneity indices based on place entropy distributions for individuals. These indices are:
# 1) sum of entropies; 2) average entropy; 3) maximal entropy; 4) minimal entropy
heterogeneityCoefs <- function(Pdat, D, P, threshold=0) {
source("HelperFunctionsMisc/ComputingMisc.R")
require(dplyr)
D = arrange(D, id)
P = arrange(P, id)
P = toCharacter(P)
places = rownames(Pdat)
# initialize vectors of entropy measures
ent_total = vector(mode="numeric", length=dim(D)[1])
ent_avg = vector(mode="numeric", length=dim(D)[1])
ent_wgh = vector(mode="numeric", length=dim(D)[1])
ent_max = vector(mode="numeric", length=dim(D)[1])
ent_min = vector(mode="numeric", length=dim(D)[1])
for(id in D$id) {
# initialize a vector for storing place entropies of an individual
ent_vec = vector(mode="numeric", length=0)
# initialize a vector for storing weights of places for an individual
# places are wieghted in regard to square root of popularity
# this is due to the fact that popular places should have higheer weights, but at the same time they should not completely dominate other places
weights = vector(mode="numeric", length=0)
for(place in places) {
if(Pdat[place, "popularity"] < threshold) next
if(place %in% P[P$id==id, ]) {
ent_vec = append(ent_vec, Pdat[place, "ent"])
weights = append(weights, sqrt(Pdat[place, 1]))
}
index = which(D$id == id) # for indexing entropy values on ent_ vectors
if(length(ent_vec) > 0) {
ent_total[index] = sum(ent_vec, na.rm=TRUE)
ent_avg[index] = mean(ent_vec, na.rm=TRUE)
ent_wgh[index] = as.numeric(t(ent_vec %*% weights)) / sum(weights)
ent_max[index] = max(ent_vec, na.rm=TRUE)
ent_min[index] = min(ent_vec, na.rm=TRUE)
}
else {
ent_total[index] = NA
ent_avg[index] = NA
ent_wgh[index] = NA
ent_max[index] = NA
ent_min[index] = NA
}
}
}
E = data.frame(ent_total, ent_avg, ent_wgh, ent_max, ent_min)
return(E)
}
# Real entropy measure; it is computed as entropy of the distribution of all (unique) persons (in terms of their cluster assignments) that are met through places that are being visited by the respondent
realEntropy <- function(D, AM, rel=FALSE) {
source("HelperFunctionsMisc/ComputingMisc.R")
require(dplyr)
ids = as.numeric(rownames(AM))
ids = sort(ids)
D = arrange(D, id)
places = colnames(AM)
AM = AM[as.character(ids), ]
# initialize a vector storing real entropy values
ent = vector(mode="numeric", length=dim(D)[1])
for(id in ids) {
char.id = as.character(id)
index = which(ids == id) # to index new values properly on the ent vector
placevec = places[AM[char.id, ] == 1]
distentropy = 0 # initialize distribution entropy
if(length(placevec) > 0) {
personvec = apply(as.matrix(AM[, placevec]), 1, sum)
personvec[personvec > 1] = 1 # to includ only unique persons
persons.id = ids[personvec == 1]
clusterdist = D[D$id %in% persons.id, "cluster"]
distentropy = entropy(clusterdist, rel=rel)
ent[index] = distentropy
}
else {
distentropy = NA
ent[index] = distentropy
}
}
return(ent)
}
|
54f0968708c332e1486c46f2a77838e7c61327df | 0918eb5ea18915bc59bb64357ab06eff86ad3ee3 | /R/add_R2_from_gds.R | 108f07e4fb9b7a5c005647b1e4ff0b5340e7d429 | [
"Apache-2.0"
] | permissive | raonyguimaraes/POEMColoc | fd9ddfea8dfa6e788453b29c321ea5fbd27e0c74 | ac38733626eab2fca852187ac42248532126b018 | refs/heads/master | 2022-10-03T10:55:46.213718 | 2020-05-19T20:22:09 | 2020-05-19T20:22:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,770 | r | add_R2_from_gds.R | #' Compute R2 from gds input
#'
#' @param dataset_list
#' list of datasets formatted according to the requirements of coloc.abf with the following differences
#' pos, and chr are additionally required.
#' snp is ignored as it is determined by pos.
#' For top SNP datasets, R2 and MAF and gds_file and subset are optional, if supplied as arguments to the function.
#' For full summary statistic datasets, MAF is required.
#' imputation class either all or top
#' top_pos giving the position of the top p-value
#' @param dataset_R2_condition
#' indicates whether gds is to be used to get the MAF and R2 for the dataset. A string containing gds means it is.
#' @param window_size
#' if NULL, window size is determined to cover all positions in dataset_list
#' if specified, covers all positions in dataset list plus window_size around top SNP dataset
#' @param get_dosage_fn
#' a function taking gds file connection and returning
#' a matrix with the genotype or imputed dosage at each position. The function
#' should not perform any filtering or reordering of variants. rows must correspond
#' to individuals and columns to positions.
#' @return
#' a list of datasets, in which any dataset with dataset_R2_condition containing gds string now has R2 in addition to gds_file
#' @examples
#' gds_file <- system.file("extdata", "example.gds", package = "POEMColoc")
#' gds_file2 <- system.file("extdata", "example2.gds", package = "POEMColoc")
#' subset <- system.file("extdata", "subset.ped", package = "POEMColoc")
#' subset2 <- system.file("extdata", "subset2.ped", package = "POEMColoc")
#' dataset_top_SNP <- list(pos = 2, N= 10000, s =0.5, type="cc", pvalues = 10^-9, chr = "Z", imputation_class = "top", top_pos=2, snp="2")
#' dataset_top_SNP2 <- list(pos = 4, N= 10000, s =0.5, type="cc", pvalues = 10^-9, chr = "Z", imputation_class = "top", top_pos=4, snp="4")
#' dataset_full <- list(pos = c(1, 2, 3, 4, 5), MAF = c(0.14, 0.15, 0.25, 0.2, 0.4), N=1000, type ="quant", pvalues = c(2 * 10^-8, 4 * 10^-8, 2 * 10^-4, 0.6, 0.03), chr= "Z", top_pos = 1, imputation_class = "all", snp = as.character(1:5))
#' # Example in which gds file is not used
#' add_R2_from_gds(list(dataset_top_SNP, dataset_top_SNP, dataset_full), c('R2 from function', 'R2 from function', 'none'), 1)
#' # Example in which there is a single gds file and no subsets
#' add_R2_from_gds(list(c(dataset_top_SNP, list(gds_file = gds_file)), c(dataset_top_SNP2, list(gds_file = gds_file)), dataset_full),
#' c('gds from dataset', 'gds from function', 'none'), 1)
#' # Example with multiple gds files
#' add_R2_from_gds(list(c(dataset_top_SNP, list(gds_file = gds_file)), c(dataset_top_SNP2, list(gds_file = gds_file2)), dataset_full),
#' c('gds from dataset', 'gds from function', 'none'), 1)
#' # Example with one gds file and multiple subsets. Note it only selects the variants once.
#' add_R2_from_gds(list(c(dataset_top_SNP, list(gds_file = gds_file, subset = subset)),
#' c(dataset_top_SNP2, list(gds_file = gds_file, subset = subset2)), dataset_full),
#' c('gds from dataset', 'gds from function', 'none'), 1)
#' # Example with single subset and single gds file. Note in this case it only selects the subset from the gds.
#' add_R2_from_gds(list(c(dataset_top_SNP, list(gds_file = gds_file, subset = subset)),
#' c(dataset_top_SNP2, list(gds_file = gds_file, subset = subset)), dataset_full),
#' c('gds from dataset', 'gds from function', 'none'), 1)
#' #Example in which the gds file does not have one of the needed positions
#' dataset_full2 <- list(pos = 1:7, MAF = c(0.14, 0.15, 0.25, 0.2, 0.4, 0.2, 0.4), N=1000, type ="quant", pvalues = c(2 * 10^-8, 4 * 10^-8, 2 * 10^-4, 0.6, 0.03, 0.1, 0.2), chr= "Z", top_pos = 1, imputation_class = "all", snp = as.character(1:7))
#' add_R2_from_gds(list(c(dataset_top_SNP, list(gds_file = gds_file, subset = subset)),
#' c(dataset_top_SNP2, list(gds_file = gds_file, subset = subset)),
#' c(dataset_top_SNP2, list(gds_file = gds_file, subset = subset2)), dataset_full, dataset_full2),
#' c('gds from dataset', 'gds from function', 'gds from function', 'none', 'none'), 1)
add_R2_from_gds <- function(dataset_list, dataset_R2_condition, window_size, get_dosage_fn = get_dosage_alt) {
pos <- get_start_and_end_pos(dataset_list, window_size)
has_gds <- grepl("gds", dataset_R2_condition)
if (any(has_gds)) {
dataset_list_by_gds <- split(dataset_list[has_gds], sapply(dataset_list[has_gds], function(x) x$gds_file))
R2_from_gds <- vector("list", length(dataset_list_by_gds))
names(R2_from_gds) <- names(dataset_list_by_gds)
for (i in seq_along(dataset_list_by_gds)) {
# if single or no subset use that
null_subset <- sapply(dataset_list_by_gds[[i]], function(x) is.null(x$subset))
unique_subset <- unique(unlist(lapply(dataset_list_by_gds[[i]], function(x) x$subset)))
if ((!any(null_subset)) & (length(unique_subset) == 1)) {
subset <- unique_subset
} else {
subset <- NULL
}
current_geno_matrix <- getSNP(gds_file = names(dataset_list_by_gds)[i], chr = dataset_list_by_gds[[i]][[1]]$chr,
subset = subset, start = pos$start, end = pos$end, get_dosage_fn = get_dosage_fn)
subset_top_snp <- sapply(dataset_list_by_gds[[i]], function(x) ifelse(is.null(x$subset), NA, x$subset))
pos_top_snp <- sapply(dataset_list_by_gds[[i]], function(x) x$top_pos)
combos <- unique(data.frame(subset = subset_top_snp, pos = pos_top_snp, stringsAsFactors = FALSE))
R2_from_gds[[i]] <- list(R2 = vector("list", length(combos)), index = combos)
for (j in seq_len(nrow(combos))) {
if (!is.na(combos$subset[j])) {
subset <- scan(combos$subset[j], what = "character", sep="\n")
geno_matrix <- current_geno_matrix$genotype[current_geno_matrix$id %in% subset,,drop=FALSE]
} else {
geno_matrix <- current_geno_matrix$genotype
}
R2_from_gds[[i]]$R2[[j]] <- compute_R2_MAF_from_geno_df(list(genotype = geno_matrix, pos = current_geno_matrix$pos),
list(top_pos = combos$pos[j]))
}
}
}
# Note: this could be sped up further by looking for same top position and subset and only computing once.
for (i in which(has_gds)) {
fileR2 = R2_from_gds[[dataset_list[[i]]$gds_file]]
if (is.null(dataset_list[[i]]$subset)) {
index <- which(is.na(fileR2$index$subset) & fileR2$index$pos == dataset_list[[i]]$top_pos)
} else {
index <- which(fileR2$index$subset == dataset_list[[i]]$subset & fileR2$index$pos == dataset_list[[i]]$top_pos)
}
R2_MAF = fileR2$R2[[index]]
dataset_list[[i]]$R2 <- R2_MAF$R2
dataset_list[[i]]$MAF <- R2_MAF$MAF
}
return(dataset_list)
}
|
872433359821f51c7e32563fde55f64a4fca1f91 | 3f2f9819395da80010e0b1b21e7c6f261b12ae82 | /Melphalan/Simulation/NONMEM_Melphalan_Simulation_Model.R | c69770b54b54ab5c6ebeded7280f042030d6d0b9 | [] | no_license | wojjy001/melphalan-app | 84ffbbc9e4718dbb52816d877023c1e36acb7769 | 0b1d0a65769572756a1774e4bf9e28e49bf5e2c0 | refs/heads/master | 2021-03-27T10:30:11.024709 | 2016-10-19T06:18:48 | 2016-10-19T06:18:48 | 64,433,526 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,569 | r | NONMEM_Melphalan_Simulation_Model.R | #Script for simulating a population from a PK/PD model for melphalan
#------------------------------------------------------------------------------------
#Remove all current objects in the workspace
rm(list=ls(all=TRUE))
#Load package libraries
library(R2HTML)
library(ggplot2)
library(doBy)
library(stringr)
library(Hmisc)
library(grid)
library(plyr)
library(reshape2)
#Use custom ggplot2 theme
theme_bw2 <- theme_set(theme_bw(base_size = 16))
theme_bw2 <- theme_update(plot.margin = unit(c(1.1,1.1,3,1.1), "lines"),
axis.title.x=element_text(size = 16, vjust = 0),
axis.title.y=element_text(size = 16, vjust = 0, angle = 90),
strip.text.x=element_text(size = 14),
strip.text.y=element_text(size = 14, angle = 90))
#------------------------------------------------------------------------------------
#Set working directory
setwd("/Volumes/Prosecutor/Melphalan/Simulation/")
#Source functions_utility
source("functions_utility.R")
#------------------------------------------------------------------------------------
#Read the original data
ORG.data <- read.csv("PKPD_OSU11055_Neutropenia_lnplus4.csv", stringsAsFactors=F, na.strings=".")
ORG.data <- subset(ORG.data, X.Note != "#")
ORG.data <- ORG.data[,-1] #Delete the "Notes" column
#Run name
runname <- "119pt_PKPD_Neutropenia_INPUTdelay_sim100"
#Process the fit file
#processSIMdata(paste(runname,".ctl",sep=""))
#Read the simulated data
SIM.data <- read.csv(paste(runname,".nm7/",runname,".fit.csv",sep=""), stringsAsFactors=F)
#Change working directory
setwd(paste(master.dir,"/",runname,".nm7",sep=""))
#------------------------------------------------------------------------------------
#Subset PK and PD data
ORG.data$TIME <- as.numeric(ORG.data$TIME)
ORG.data$DV <- as.numeric(ORG.data$DV)
ORG.PK.data <- subset(ORG.data, DVID != 2)
ORG.PD.data <- subset(ORG.data, DVID == 2)
#Bin time - ORG.PK.data
ORG.PK.data$TIMEBIN <- cut2(ORG.PK.data$TIME, g=10, levels.mean=T)
ORG.PK.data$TIMEBIN <- as.numeric(paste(ORG.PK.data$TIMEBIN))
#Bin time - ORG.PD.data
ORG.PD.data$TIMEBIN <- cut2(ORG.PD.data$TIME, g=10, levels.mean=T)
ORG.PD.data$TIMEBIN <- as.numeric(paste(ORG.PD.data$TIMEBIN))
#------------------------------------------------------------------------------------
#Bin time - SIM.data
#Subset PK and PD data
SIM.PK.data <- subset(SIM.data, DVID != 2)
SIM.PD.data <- subset(SIM.data, DVID == 2)
#Bin time - ORG.PK.data
SIM.PK.data$TIMEBIN <- cut2(SIM.PK.data$TIME, g=10, levels.mean=T)
SIM.PK.data$TIMEBIN <- as.numeric(paste(SIM.PK.data$TIMEBIN))
#Bin time - ORG.PD.data
SIM.PD.data$TIMEBIN <- cut2(SIM.PD.data$TIME, g=10, levels.mean=T)
SIM.PD.data$TIMEBIN <- as.numeric(paste(SIM.PD.data$TIMEBIN))
#------------------------------------------------------------------------------------
#Plot - PK data
#Function for calculating 5th and 95th percentiles for plotting concentrations
CI90lo <- function(x) quantile(x, probs = 0.05)
CI90hi <- function(x) quantile(x, probs = 0.95)
#Function for calculating 2.5 and 97.5 percentiles for plotting concentrations
CI95lo <- function(x) quantile(x, probs = 0.025)
CI95hi <- function(x) quantile(x, probs = 0.975)
sim.data.bystudy.median <- ddply(SIM.PK.data, .(SIM,TIMEBIN), function(df) median(df$DV))
sim.data.bystudy.median <- rename(sim.data.bystudy.median, c("V1"="medianS"))
sim.data.bystudy.loCI <- ddply(SIM.PK.data, .(SIM,TIMEBIN), function(df) CI90lo(df$DV))
sim.data.bystudy.loCI <- rename(sim.data.bystudy.loCI, c("5%"="loCI90S"))
sim.data.bystudy.hiCI <- ddply(SIM.PK.data, .(SIM,TIMEBIN), function(df) CI90hi(df$DV))
sim.data.bystudy.hiCI <- rename(sim.data.bystudy.hiCI, c("95%"="hiCI90S"))
sim.data.bystudy <- data.frame(sim.data.bystudy.median, "loCI90S"=sim.data.bystudy.loCI$loCI90S, "hiCI90S"=sim.data.bystudy.hiCI$hiCI90S)
#Generate a plot of the SIM.data
plotobj1 <- NULL
plotobj1 <- ggplot(ORG.PK.data)
plotobj1 <- plotobj1 + geom_point(aes(x = TIME, y = DV), colour = "blue", shape = 1, size = 2)
#Median simulated with confidence band
plotobj1 <- plotobj1 + stat_summary(aes(x = TIMEBIN, y = medianS), data = sim.data.bystudy, geom = "ribbon", fun.ymin = "CI95lo", fun.ymax = "CI95hi", alpha = 0.3, fill = "red")
#plotobj1 <- plotobj1 + stat_summary(aes(x = TIMEBIN, y = medianS), data = sim.data.bystudy, fun.y = median, geom = "line", colour = "black", size = 1)
#Lower 90% CI simulated with confidence band
plotobj1 <- plotobj1 + stat_summary(aes(x = TIMEBIN, y = loCI90S), data = sim.data.bystudy, geom = "ribbon", fun.ymin = "CI95lo", fun.ymax = "CI95hi", alpha = 0.3, fill = "blue")
#plotobj1 <- plotobj1 + stat_summary(aes(x = TIMEBIN, y = loCI90S), data = sim.data.bystudy, fun.y = median, geom = "line", colour = "black", linetype = "dashed", size = 1)
#Upper 90% CI simulated with confidence band
plotobj1 <- plotobj1 + stat_summary(aes(x = TIMEBIN, y = hiCI90S), data = sim.data.bystudy, geom = "ribbon", fun.ymin = "CI95lo", fun.ymax = "CI95hi", alpha = 0.3, fill = "blue")
#plotobj1 <- plotobj1 + stat_summary(aes(x = TIMEBIN, y = hiCI90S), data = sim.data.bystudy, fun.y = median, geom = "line", colour = "black", linetype = "dashed", size = 1)
plotobj1 <- plotobj1 + stat_summary(aes(x = TIMEBIN, y = DV), fun.y = median, geom = "line", colour = "red", size = 1)
plotobj1 <- plotobj1 + stat_summary(aes(x = TIMEBIN, y = DV), fun.y = "CI90lo", geom = "line", colour = "red", linetype = "dashed", size = 1)
plotobj1 <- plotobj1 + stat_summary(aes(x = TIMEBIN, y = DV), fun.y = "CI90hi", geom = "line", colour = "red", linetype = "dashed", size = 1)
plotobj1 <- plotobj1 + scale_y_log10("Melphalan Concentration (mg/L)\n")
plotobj1 <- plotobj1 + scale_x_continuous("\nTime (hours)", breaks = c(0,2,4,6,8))
print(plotobj1)
#------------------------------------------------------------------------------------
#Plot - PD data
sim.data.bystudy.median <- ddply(SIM.PD.data, .(SIM,TIMEBIN), function(df) median(df$DV))
sim.data.bystudy.median <- rename(sim.data.bystudy.median, c("V1"="medianS"))
sim.data.bystudy.loCI <- ddply(SIM.PD.data, .(SIM,TIMEBIN), function(df) CI90lo(df$DV))
sim.data.bystudy.loCI <- rename(sim.data.bystudy.loCI, c("5%"="loCI90S"))
sim.data.bystudy.hiCI <- ddply(SIM.PD.data, .(SIM,TIMEBIN), function(df) CI90hi(df$DV))
sim.data.bystudy.hiCI <- rename(sim.data.bystudy.hiCI, c("95%"="hiCI90S"))
sim.data.bystudy <- data.frame(sim.data.bystudy.median, "loCI90S"=sim.data.bystudy.loCI$loCI90S, "hiCI90S"=sim.data.bystudy.hiCI$hiCI90S)
#Generate a plot of the SIM.data
plotobj2 <- NULL
plotobj2 <- ggplot(ORG.PD.data)
plotobj2 <- plotobj2 + geom_point(aes(x = TIME, y = DV), colour = "blue", shape = 1, size = 2)
#Median simulated with confidence band
plotobj2 <- plotobj2 + stat_summary(aes(x = TIMEBIN, y = medianS), data = sim.data.bystudy, geom = "ribbon", fun.ymin = "CI95lo", fun.ymax = "CI95hi", alpha = 0.3, fill = "red")
#plotobj2 <- plotobj2 + stat_summary(aes(x = TIMEBIN, y = medianS), data = sim.data.bystudy, fun.y = median, geom = "line", colour = "black", size = 1)
#Lower 90% CI simulated with confidence band
plotobj2 <- plotobj2 + stat_summary(aes(x = TIMEBIN, y = loCI90S), data = sim.data.bystudy, geom = "ribbon", fun.ymin = "CI95lo", fun.ymax = "CI95hi", alpha = 0.3, fill = "blue")
#plotobj2 <- plotobj2 + stat_summary(aes(x = TIMEBIN, y = loCI90S), data = sim.data.bystudy, fun.y = median, geom = "line", colour = "black", linetype = "dashed", size = 1)
#Upper 90% CI simulated with confidence band
plotobj2 <- plotobj2 + stat_summary(aes(x = TIMEBIN, y = hiCI90S), data = sim.data.bystudy, geom = "ribbon", fun.ymin = "CI95lo", fun.ymax = "CI95hi", alpha = 0.3, fill = "blue")
#plotobj2 <- plotobj2 + stat_summary(aes(x = TIMEBIN, y = hiCI90S), data = sim.data.bystudy, fun.y = median, geom = "line", colour = "black", linetype = "dashed", size = 1)
plotobj2 <- plotobj2 + stat_summary(aes(x = TIMEBIN, y = DV), fun.y = median, geom = "line", colour = "red", size = 1)
plotobj2 <- plotobj2 + stat_summary(aes(x = TIMEBIN, y = DV), fun.y = "CI90lo", geom = "line", colour = "red", linetype = "dashed", size = 1)
plotobj2 <- plotobj2 + stat_summary(aes(x = TIMEBIN, y = DV), fun.y = "CI90hi", geom = "line", colour = "red", linetype = "dashed", size = 1)
plotobj2 <- plotobj2 + scale_y_continuous()
plotobj2 <- plotobj2 + ylab(expression(paste("Neutrophils (", 10^9, "/L)")))
plotobj2 <- plotobj2 + scale_x_continuous("\nTime (hours)")
print(plotobj2)
|
47243e023348dcfa486857c0646507ae8363a278 | d37b8ff5c3b3c211a215b0497b09eb29a1b384ed | /man/BIRCH-get_microclusters.Rd | 7dc0571ca8a0489b419e5eef7d22ac6542500af3 | [] | no_license | Dennis1989/stream | 6d610478ac40db092f80e1f182ae675d0eb2c10e | 87d586251337b42f838a2d3920c3e532dd9996ba | refs/heads/master | 2020-03-06T17:13:54.979940 | 2018-03-27T13:29:13 | 2018-03-27T13:29:13 | 126,986,355 | 0 | 0 | null | 2018-03-27T12:56:32 | 2018-03-27T12:56:32 | null | UTF-8 | R | false | true | 267 | rd | BIRCH-get_microclusters.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DSC_BIRCH.R
\name{BIRCH-get_microclusters}
\alias{BIRCH-get_microclusters}
\title{Centroids of micro clusters}
\description{
This function returns all micro clusters of a given CF-Tree.
}
|
cb20074e2db5abac96efdc37f11c8803dbcbee30 | e55cd127187b8ea6aea6848f8b75fce2ea242fea | /head/2scripts/00A.BasicSelectionModes.R | 76976d2be799c4ab7debbc04c7c59aa8b3b290b6 | [] | no_license | mitoclub/GeneticHandicap | 3059d5f14d7eae8acced691025bb3e0c93338e2b | d7d0e7a5e15ec5f4a4cc0335de93101c461b5356 | refs/heads/master | 2022-11-07T22:30:32.347008 | 2020-06-26T15:51:13 | 2020-06-26T15:51:13 | 272,410,609 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 624 | r | 00A.BasicSelectionModes.R | ############################## SELECTION SCHEMES:
# see "Multiplicative versus additive selection in relation to genome evolution: a simulation study" 2001
rm(list=ls(all=TRUE))
pdf("../../body/4figures/00A.BasicSelectionModes.R.pdf", height = 14, width = 14)
# additive
NumOfMuts = seq(0,10000, by = 1)
SelCoeff = 0.0001;
AdditiveFitness = 1-NumOfMuts*SelCoeff
MultiplicativeFitness = (1-SelCoeff)^NumOfMuts
plot(NumOfMuts,AdditiveFitness, ylim = c(0.5,1), pch = '.', col = 'blue', ylab = 'FITNESS')
par(new=TRUE)
plot(NumOfMuts,MultiplicativeFitness, ylim = c(0.5,1), pch = '.', col = 'red', ylab = 'FITNESS')
dev.off() |
3bf128a9de409f6e57b89a63fa794320dcc3024f | 45ed40d811fdc426a97e5efe61210a4d6fd3b786 | /Simulacion.R | 047f3d1d4d48178b5cabad15f6f5007d00027694 | [] | no_license | rschifini/Monte-Carlo-completando-album-de-figuritas | 4fa4b3be2a9e092ab60a80bc67614b10d605a2d4 | a1e4e73ab15ba416b7eb8ca873daadac2ad91d30 | refs/heads/master | 2021-05-07T19:22:53.635738 | 2017-10-30T16:29:50 | 2017-10-30T16:29:50 | 108,866,063 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 702 | r | Simulacion.R | figuritas = 200
SobresXAmigo = c()
Grupo = c(1:10,seq(20,100,5))
for(amigos in Grupo){
resultados = c()
for(k in 1:1000){
albumLLeno = FALSE
sobres = 0
album = rep(0,200)
while (!albumLLeno){
# Se abren tantos sobres como amigos
paquete = floor(runif(amigos * 5,1,figuritas+1))
sobres = sobres + amigos
# Agregar las cartas al pool
for(j in paquete){
album[j] = album[j] + 1
}
# Si todas estan repetida al menos la cantidad de amigos entonces finalizar
if(all(album >= amigos)){
albumLLeno = T
}
}
resultados = c(resultados,sobres)
}
SobresXAmigo = c(SobresXAmigo, mean(resultados)/amigos)
}
|
9054570c54650eea3695f020799e88c327fda8f7 | a1d6673d62231ea7b82f695b2f4e2616d3bf5abf | /install_scripts/hpc_r_packages.r | 1694b517f2073e1b5a967975c61b669ba17f45ee | [
"MIT"
] | permissive | coatless/stat490uiuc | 16f6ea925dae94fdc9879f217692c9cca59ac3f5 | f62fd9cb02fc3908bbdfadc1a73e3290d6ddd8ca | refs/heads/master | 2021-01-19T21:54:23.478814 | 2016-01-21T22:23:52 | 2016-01-21T22:23:52 | 29,217,236 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 587 | r | hpc_r_packages.r | # Install rmr2 dependencies
install.packages(c('RJSONIO', 'itertools', 'digest', 'Rcpp', 'functional', 'httr', 'plyr', 'stringr', 'reshape2', 'caTools', 'rJava'), repos="http://cran.us.r-project.org", INSTALL_opts=c('--byte-compile') )
# Install plyrmr dependencies
install.packages(c('dplyr', 'R.methodsS3', 'Hmisc'), repos="http://cran.us.r-project.org", INSTALL_opts=c('--byte-compile')
# Installs some wonderful HPC Packages
install.packages(c("bigmemory","foreach","iterators","doMC","doSNOW","itertools"), repos="http://cran.us.r-project.org", INSTALL_opts=c('--byte-compile') )
|
45eef5ee14b3804f5e0fb422fc23b02fa9b158ab | 7f82b6d5c3a88decef6b25549ba8e101945d0124 | /ITS_analysis/ITS_analysis_20site.R | 596e34f3db4761acb3022074fb8fee63f14b41c2 | [] | no_license | ShadeLab/apple_replant | 09e3cb0cad0b0e11e753d38ca3437f22c995e4ad | 656245dead6f50eb1ea78bd9499fca5cac7134be | refs/heads/master | 2022-01-13T06:21:01.849461 | 2019-05-02T20:52:12 | 2019-05-02T20:52:12 | 112,532,108 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 13,333 | r | ITS_analysis_20site.R | #################################### Analysis ITS Sequences of 20 Sites only #####################################
# Date: May 30th 2018
# By : AF. Bintarti
# INSTALL PACKAGES
install.packages(c('vegan', 'tidyverse'))
install.packages('reshape')
source("https://bioconductor.org/biocLite.R")
biocLite()
library(vegan)
library(dplyr)
library(tidyr)
library(ggplot2)
library(reshape)
# SET THE WORKING DIRECTORY
setwd('/Users/arifinabintarti/Documents/Parent/apple_replant/ITS_analysis/')
wd <- print(getwd())
otuITS <- read.table('single_rare45its.txt', sep='\t', header=T, row.names = 1)
map <- read.table('clean_map_data.csv', sep=',', header=TRUE)
head(otuITS)
dim(otuITS)
taxonomy <- otuITS[,'taxonomy']
taxonomy
otuITS <- otuITS[,-46]
set.seed(13)
head(sort(colSums(otuITS, na.rm = FALSE, dims = 1), decreasing = FALSE))
sort(rowSums(otuITS, na.rm = FALSE, dims = 1), decreasing = FALSE)
dim(otuITS)
otuITS
# CALCULATE THE ALPHA DIVERSITY (SHANNON, RICHNESS, PIELOU)
otuITS_rare_PA <- 1*(otuITS>0)
sITS <- specnumber(otuITS, MARGIN = 2)
hITS <- diversity(t(otuITS), index = 'shannon')
pielouITS <- hITS/log(sITS)
map20 <- map[31:75,]
map.div <- map20
map.div$Richness <- sITS
map.div$Shannon <- hITS
map.div$Pielou <- pielouITS
names(map.div)
map.alphaITS <- melt(map.div, id.vars=c('site_name', 'cultivar', 'rootstock'), measure.vars=c('Richness', 'Pielou'))
# GGPLOT OF THE ALPHA DIVERSITY (BASED ON 'site_name', 'cultivar', 'rootstock')
ggplot(map.alphaITS, aes(y=value, x=cultivar)) +
facet_wrap(~variable, scales = 'free_y') +
geom_boxplot(aes(color=cultivar))+
geom_point(aes(color=cultivar))+
theme(axis.text.x=element_text(angle=90, hjust=1))
ggplot(map.alphaITS, aes(y=value, x=site_name)) +
facet_wrap(~variable, scales = 'free_y') +
geom_boxplot(aes(color=site_name))+
geom_point(aes(color=site_name))+
theme(legend.position="bottom",axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.text=element_text(size=25),
strip.text.x = element_text(size=30,colour = "black", face = "bold"),
legend.text=element_text(size=20),legend.title = element_text(size = 14), legend.spacing.x = unit(1.0, 'cm'),
plot.title = element_text(size = rel(2)),axis.title=element_text(size=18,face="bold"))
# CALCULATE THE BETA DIVERSITY (PCA PLOT)
# dissimilarity indices for community ecologist to make a distance structure (Bray-Curtis distance between samples)
otu_distITS <- vegdist(t(otuITS), method='bray')
# CMD/classical multidimensional scaling (MDS) of a data matrix. Also known as principal coordinates analysis
otu_pcoaITS <- cmdscale(otu_distITS, eig=T)
env <- map20[,c(12:23, 25:37)]
# scores of PC1 and PC2
ax1ITS.scores=otu_pcoaITS$points[,1]
ax2ITS.scores=otu_pcoaITS$points[,2]
env_fitITS <- envfit(otu_pcoaITS, env, na.rm=TRUE)
ax1ITS <- otu_pcoaITS$eig[1]/sum(otu_pcoaITS$eig)
ax2ITS <- otu_pcoaITS$eig[2]/sum(otu_pcoaITS$eig)
map2=cbind(map20,ax1ITS.scores,ax2ITS.scores)
# PCoA plot with fit of environmental vectors
pcoa_plot <- plot(ax1ITS.scores, ax2ITS.scores, xlab=paste("PCoA1: ",round(ax1ITS,3)*100,"% var. explained", sep=""), ylab=paste("PCoA2: ",round(ax2ITS,3)*100,"% var. explained", sep=""))
plot(env_fitITS, p.max=0.05, col="red1")
###GGPLOT PCoA with Env_fit###
spp.scrsITS <- as.data.frame(scores(env_fitITS, display = "vectors"))
spp.scrsITS <- cbind(spp.scrsITS, env = rownames(spp.scrsITS))
#only significant pvalues
#shortcutting ef$vectors
A_ITS <- as.list(env_fitITS$vectors)
#creating the dataframe
pvals_ITS <- as.data.frame(A_ITS$pvals)
arrows_ITS <- as.data.frame(A_ITS$arrows*sqrt(A_ITS$r))
C_ITS <- cbind(arrows_ITS, pvals_ITS)
#subset
Cred_ITS<-subset(C_ITS,pvals_ITS<0.05)
Cred_ITS <- cbind(Cred_ITS, env = rownames(Cred_ITS))
ggplot(data = map2, aes(x=ax1ITS.scores, y=ax2ITS.scores, size=10)) +
geom_point(aes(color = site_name), size=4)+
scale_x_continuous(name=paste("PCoA1: ",round(ax1ITS,3)*100,"% var. explained", sep=""))+
scale_y_continuous(name=paste("PCoA2: ",round(ax2ITS,3)*100,"% var. explained", sep=""))+
scale_size(guide="none")+
geom_segment(data=Cred_ITS,aes(x=0,xend=Dim1,y=0,yend=Dim2),
arrow = arrow(length = unit(0.25, "cm")),size=0.5,colour="grey") +
geom_text(data=Cred_ITS,aes(x=Dim1,y=Dim2, label=env),size=5)+
coord_fixed()+
theme(legend.position="right",plot.title = element_text(size = rel(2), face="bold"),axis.text=element_text(size=22), axis.title=element_text(size=22,face="bold"),
legend.text=element_text(size=20),legend.title = element_text(size = 14), legend.spacing.x = unit(1.0, 'cm'))+
ggtitle("ITS PCoA (Bray-Curtis)-Environmental fit")
# PCoA plot with symbol control in ggplot2
# plot by site_name
fig <- ggplot(data=map2, aes(x=ax1ITS.scores, y=ax2ITS.scores, size=8))+
geom_point(aes(color=site_name)) +
scale_x_continuous(name=paste("PCoA1: ",round(ax1ITS,3)*100,"% var. explained", sep=""))+
scale_y_continuous(name=paste("PCoA2: ",round(ax2ITS,3)*100,"% var. explained", sep=""))+
scale_size(guide="none")+
theme_bw(base_size=12)+
ggtitle("16S rRNA PCoA (Bray-Curtis)")
fig
# plot by rootstock
fig <- ggplot(data=map2, aes(x=ax1ITS.scores, y=ax2ITS.scores, size=8))+
geom_point(aes(color=rootstock)) +
scale_x_continuous(name=paste("PCoA1: ",round(ax1ITS,3)*100,"% var. explained", sep=""))+
scale_y_continuous(name=paste("PCoA2: ",round(ax2ITS,3)*100,"% var. explained", sep=""))+
scale_size(guide="none")+
theme_bw(base_size=12)+
ggtitle("16S rRNA PCoA (Bray-Curtis)")
fig
# plot by cultivar
fig <- ggplot(data=map2, aes(x=ax1ITS.scores, y=ax2ITS.scores, size=8))+
geom_point(aes(color=cultivar)) +
scale_x_continuous(name=paste("PCoA1: ",round(ax1ITS,3)*100,"% var. explained", sep=""))+
scale_y_continuous(name=paste("PCoA2: ",round(ax2ITS,3)*100,"% var. explained", sep=""))+
scale_size(guide="none")+
theme_bw(base_size=12)+
ggtitle("16S rRNA PCoA (Bray-Curtis)")
fig
# PERMANOVA
adonis(otu_distITS~map2$site_name)
adonis(otu_distITS~map2$rootstock)
adonis(otu_distITS~map2$cultivar)
# MAKE A BARPLOT USING PHYLOSEQ
# INSTALL PACKAGES
source('http://bioconductor.org/biocLite.R')
biocLite('phyloseq')
library(phyloseq)
# ADD THE TAXONOMY W/O PREFIX
# WRITE.CSV
write.csv(taxonomy, file = "its_taxonomy.csv")
# READ .CSV
tax_its = read.csv("its_taxon.csv", sep=',', header=T)
tax_its
# ADD OTU TABLE
dim(otuITS)
dim(tax_its)
rownames(tax_its) <- rownames(otuITS)
tax_its
class(otuITS)
class(tax_its)
OTU_its = otu_table(otuITS, taxa_are_rows = TRUE)
TAX_its = tax_table(as.matrix(tax_its))
# IMPORT OTU TABLE
otu_phyl <- import_biom("/Users/arifinabintarti/Documents/Parent/apple_replant/16S analysis/single_rare20.biom")
otu_phyl
# ADD MAP
rownames(map20) <- map20$sample_code
phyloseq_map <- sample_data(map20)
otu_map_its <- merge_phyloseq(OTU_its,TAX_its,phyloseq_map)
otu_map_its
# CHANGE THE RANK NAMES
rank_names(otu_map_its)
colnames(tax_table(otu_map)) <- c(k = "Kingdom", p = "Phylum", c = "Class", o = "Order", f = "Family", g = "Genus", s = "Species")
# MERGE SAMPLES BY SITE_NAMES
site_phyl <- merge_samples(otu_map_its, "site_name")
sample_data(site_phyl)$site_name <- levels(sample_data(otu_map)$site_name)
# MERGE TAXA BY RANK
site_phylum <- tax_glom(site_phyl, taxrank = "Phylum", NArm=FALSE)
site_RA <- transform_sample_counts(site_phylum, function(x) 100 * x/sum(x))
# MAKE A BAR PLOT
plot_bar(site_RA, "site_name", fill="Phylum")+
theme(axis.text.y=element_text(size=22),axis.text.x=element_text(size=22,angle=49,hjust =1, face = "bold"), axis.title=element_text(size=22,face="bold"),
legend.text=element_text(size=16),legend.title = element_text(size = 14), legend.spacing.x = unit(1.0, 'cm'))
otu_table(site_RA)
otu_table(site_phylum)
phylum.sum = tapply(taxa_sums(site_phyl), tax_table(site_phyl)[, "Phylum"], sum, NArm=FALSE)
top5phyla = names(sort(phylum.sum, TRUE))[1:5]
GP1 = prune_taxa((tax_table(site_phyl)[, "Phylum"] %in% top5phyla), site_phyl)
GP1
GP1_RA <- transform_sample_counts(GP1, function(x) 100 * x/sum(x))
otu_table(GP1_RA)
phylum.sum = tapply(taxa_sums(site_phylum), tax_table(site_phylum)[, "Phylum"], sum, NArm=FALSE)
library("RColorBrewer")
library("ggplot2")
library("plyr")
library("vegan")
library("reshape2")
library("ape")
library("phyloseq")
library("knitr")
library("xtable")
library("colorspace")
library("ggrepel")
#otu abundances and frequencies required for generating the basic plots
otu_map_its.ra = transform_sample_counts(otu_map_its, function(x) x/sum(x))
otu_table(otu_map_its.ra)
otu_map_its.ra
phylum.sum = tapply(taxa_sums(site_phyl), tax_table(site_phyl)[, "Phylum"], sum, NArm=FALSE)
top12phyla = names(sort(phylum.sum, TRUE))[1:12]
abund_val <- function(normalized){
otu.abun = apply(otu_table(normalized),1,mean)
otu.freq = rowSums(otu_table(normalized) != 0)/45
phyla = as.vector(data.frame(tax_table(normalized))$Phylum)
levels(phyla) = c(levels(phyla),"Other")
keephyla = c("Ascomycota","Basidiomycota","Mortierellomycota", "Chytridiomycota", "Glomeromycota", "Rozellomycota", "Entomophthoromycota", "Mucoromycota", "Kickxellomycota", "Calcarisporiellomycota", "Blastocladiomycota")
phyla[!(phyla %in% keephyla)] = "Other"
phyla = as.vector(phyla)
phyla=as.factor(phyla)
otuabun = cbind.data.frame(abundance=log(otu.abun),frequency=otu.freq,phyla)
return(otuabun)
}
abund_all <- abund_val(otu_map_its.ra)
# Use color brewer to pick a color scheme for the phyla
brew = brewer.pal(12, "Paired")
# Create a scatterplot of OTUs showing their average relative abundance and occupancy
ggplot(abund_all, aes(x=abundance,y=frequency,color=phyla)) +
geom_point(size=3) + xlab("Mean relative abundance (log10 scale)") +
ylab("Mean occupancy (n=45)") + scale_colour_brewer(palette="Paired")+
labs(title="Fungal abundancy vs. occupancy plots")+ xlim(-14.6, -2)+
theme(plot.title = element_text(size = rel(2)),axis.text=element_text(size=22), axis.title=element_text(size=22,face="bold"),
legend.text=element_text(size=18),legend.title = element_text(size = 14), legend.spacing.x = unit(1.0, 'cm'))
######## Occupancy-Abundancy #########
#Comulative Occ_Abund
otuITS_PA <- 1*((otuITS>0)==1)
otuITS_PA <- otuITS_PA[rowSums(otuITS_PA)>0,]
Occ_otuITS <- rowSums(otuITS_PA)/ncol(otuITS_PA)
abs_otuITS <- otuITS[rowSums(otuITS)>0,]
otuITS_rel <- decostand(abs_otuITS, method="total", MARGIN=2)
com_abund_otuITS <- rowSums(otuITS_rel)
#color code for the top most abundant OTUs
color_top <- com_abund_otuITS
color_top[] <- 'black'
otuITS_top_abund <- log10(com_abund_otuITS)[order((com_abund_otuITS), decreasing=TRUE)][1:10] #top 10 most abundant OTUs
color_top[names(color_top) %in% names(otuITS_top_abund)] <- 'red'
plot(log10(com_abund_otuITS), Occ_otuITS, col=color_top, pch=20, ylab='Occupancy (n=45)', xlab='log(sum relative abundace per OTU)\n (n=20838 OTUs)',
main='Fungal occupancy-abundance\n plot (comulative)')
#Mean Occ_Abund
otuITS_PA <- otuITS_PA[rowSums(otuITS_PA)>0,]
Mean_otuITS_PA <- apply(otuITS_PA, 1, mean)
Mean_Occ <- rowSums(otuITS_PA)/ncol(otuITS_PA)
Mean_abund <- apply(otuITS_rel, 1, mean)
#order(Mean_abund[names(Mean_abund) %in% unique(selected_otus_switch$otu)])
#Creating df for plotting with ggplot and adding color code for the shared and unique OTUs
df_occ <- data.frame(otu=names(Occ_otuITS), occ=Occ_otuITS)
df_abun <- data.frame(otu=names(Mean_abund), abun=log10(Mean_abund))
otu_col <- data.frame(otu=names(color_top), col=color_top)
occ_abun <- left_join(df_abun, df_occ, by='otu')
occ_abun <- left_join(occ_abun, otu_col, by='otu')
occ_abun <- cbind.data.frame(occ_abun, phyla, by='otu')
occ_abun$unique <- 'unique'
#occ_abun$unique[occ_abun$otu %in% misc_occ_abun$otu] <- 'shared' #run after the miscanthus block bellow
#####################################
#Figure 3A - Occupancy abundance plot
#####################################
setEPS()
postscript('fungal_occ_abund.eps', width = 4.5, height = 4)
brew = brewer.pal(12, "Paired")
ggplot(data=occ_abun, aes(x=abun, y=occ, color=phyla)) +
geom_point(size=4, pch=20)+
scale_colour_brewer(palette="Paired")+
labs(x="log(mean relative abundace per OTU)\n (n=2952 OTUs)", y= "Mean occupancy (n=45)", title='Fungal abundancy vs. occupancy plots') +
geom_text_repel(data=occ_abun[occ_abun$col=='red',], aes(label=otu), box.padding = unit(0.45, "lines"), show.legend = FALSE) +
geom_hline(aes(yintercept=.4), linetype='dashed', size=1.5) +
geom_vline(aes(xintercept=-2.5), linetype='dashed', size=1.5) +
theme(plot.title = element_text(size = rel(2)),axis.text=element_text(size=22), axis.title=element_text(size=22,face="bold"))+
scale_y_continuous(breaks=seq(0,1,.2)) +
scale_x_continuous(breaks=seq(-10,2,2))
dev.cur()
dev.off()
abund_all
phyla = as.vector(data.frame(tax_table(otu_map_its.ra))$Phylum)
phyla
levels(phyla) = c(levels(phyla),"Other")
keephyla = c("Ascomycota","Basidiomycota","Mortierellomycota", "Chytridiomycota", "Glomeromycota", "Rozellomycota", "Entomophthoromycota", "Mucoromycota", "Kickxellomycota", "Calcarisporiellomycota", "Blastocladiomycota")
phyla[!(phyla %in% keephyla)] = "Other"
phyla[!(phyla %in% keephyla)] = "Other"
phyla = as.vector(phyla)
phyla=as.factor(phyla)
occ_abun <- cbind.data.frame(occ_abun, phyla, by='otu')
occ_abun
|
bea6f01077451396cabab253bdad8b435e6467ee | 34b429c98a64f8a9c43908cc57d6d622d4df1ffc | /man/getters.Rd | f1953998a30a136a5d792762b7bcc3bd1fca0334 | [] | no_license | mkoohafkan/wqpr-clone | f55212370ff548fab586fc28c3d91fda408f1a28 | 96f17b980125420a0d170dd9d145509c31f60e30 | refs/heads/master | 2023-04-18T03:02:17.881632 | 2021-04-27T16:37:29 | 2021-04-27T16:37:29 | 362,178,533 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,954 | rd | getters.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getters.r
\name{getters}
\alias{getters}
\alias{get_contact_id}
\alias{get_station_id}
\alias{get_event_id}
\alias{get_event_type_id}
\alias{get_reason_id}
\alias{get_summary_id}
\alias{get_result_id}
\alias{get_reading_type_id}
\alias{get_sonde_id}
\alias{get_location_id}
\alias{get_action_id}
\alias{get_instrument_id}
\alias{get_solution_id}
\title{Getters}
\usage{
get_contact_id(new.data, program, database)
get_station_id(new.data, program, database)
get_event_id(new.data, program, database)
get_event_type_id(new.data, program, database)
get_reason_id(new.data, program, database)
get_summary_id(new.data, program, database)
get_result_id(new.data, program, database)
get_reading_type_id(new.data, program, database)
get_sonde_id(new.data, program, database)
get_location_id(new.data, program, database)
get_action_id(new.data, program, database)
get_instrument_id(new.data, program, database)
get_solution_id(new.data, program, database)
}
\arguments{
\item{new.data}{A dataframe.}
\item{program}{The program name.}
\item{database}{The database name.}
}
\value{
A vector of IDs.
}
\description{
internal functions for getting IDs based on other available fields.
}
\section{Functions}{
\itemize{
\item \code{get_contact_id}: Get Contact ID
\item \code{get_station_id}: Get Station ID
\item \code{get_event_id}: Get Event ID
\item \code{get_event_type_id}: Get Event Type ID
\item \code{get_reason_id}: Get Reason ID
\item \code{get_summary_id}: Get Summary ID
\item \code{get_result_id}: Get Result ID
\item \code{get_reading_type_id}: Get Reading Type ID
\item \code{get_sonde_id}: Get Sonde ID
\item \code{get_location_id}: Get Location ID
\item \code{get_action_id}: Get Action Type ID
\item \code{get_instrument_id}: Get Verification Instrument Type ID
\item \code{get_solution_id}: Get Standard Solution Type ID
}}
\keyword{internal}
|
a33f47dc101b3eae407f283e39fd389fb63ef693 | 9b9e21fea61870f3458bec92ee25a5a9f10345c3 | /man/summary.Rd | fdab10cfec1ee799d7b8083924bd138dd380c1da | [] | no_license | brendo1001/GSIF | dd46bc744309a970ef5622f1af423e179bf1d3d7 | 12ed85244a1ca46212033f0ecc16f8cd0303ea64 | refs/heads/master | 2021-01-14T09:18:33.661687 | 2014-01-15T00:00:00 | 2014-01-15T00:00:00 | 18,876,903 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,382 | rd | summary.Rd | \name{summary-methods}
\alias{summary}
\alias{summary,SpatialPredictions-method}
\alias{show,SpatialPredictions-method}
\title{Summarize an object of class \code{"SpatialPredictions"}}
\description{Derives a statistical summary for an object of class \code{"SpatialPredictions"}.}
\usage{
\S4method{summary}{SpatialPredictions}(object)
}
\arguments{
\item{object}{object of class \code{"SpatialPredictions"}}
}
\value{
The summary returns a data.frame with the following columns:
\describe{
\item{\code{"variable"}}{variable name}
\item{\code{"minium"}}{lowest value observed}
\item{\code{"maximum"}}{largest value observed}
\item{\code{"npoints"}}{number of observations}
\item{\code{"area"}}{lowest value observed}
\item{\code{"area.units"}}{area units either square-m or square-arcdegrees}
\item{\code{"covariates"}}{list of covariates used}
\item{\code{"family"}}{GLM family (if applicable)}
\item{\code{"RMSE"}}{RMSE derived using cross-validation}
\item{\code{"tvar"}}{variance percent explained by the model using the cross-validation}
\item{\code{"npixels"}}{total number of produced pixels}
\item{\code{"breaks"}}{breaks based on the half RMSE}
\item{\code{"bonds"}}{lower and upper boundaries for effective classes}
\item{\code{"Bytes"}}{effective bytes produced (see \href{http://dx.doi.org/10.1016/j.jag.2012.02.005}{Hengl et al (2012)} for more details)}
\item{\code{"compress"}}{compression algorithm used}
}
}
\details{The function creates a summary table with standard column names. These tell us what is the summary accuracy of the spatial predictions and what are the effective bytes of information produced.}
\author{ Tomislav Hengl }
\references{
\itemize{
\item Hengl, T., Nikolic, M., MacMillan, R.A., (2013) \href{http://dx.doi.org/10.1016/j.jag.2012.02.005}{Mapping efficiency and information content}. International Journal of Applied Earth Observation and Geoinformation, special issue Spatial Statistics Conference, 22: 127--138.
}
}
\seealso{ \code{plotKML::SpatialPredictions-class} }
\examples{
## load observations:
library(sp)
library(rgdal)
library(gstat)
demo(meuse, echo=FALSE)
## fit a model:
omm <- fit.gstatModel(meuse, om~dist,
fit.family=gaussian(link="log"), meuse.grid)
show(omm@regModel)
## produce SpatialPredictions:
om.rk <- predict(omm, predictionLocations = meuse.grid)
x = summary(om.rk)
str(x)
}
|
13b6a892dc7070e2fcdb792139ad6b183148f41b | 3cfaa91742c7d723ce0f87a6ace95a392e872941 | /functions/munge_functions.R | f4f13c44ae7a566ca791b093af5481aaaab21cfd | [] | no_license | jbayham/weather_risk_resource_orders | 1e26cccd9ff5854481b472ed12333512de476ae9 | 5fe40759ee4194f3a5a86926e1105fdc3b66ca0e | refs/heads/master | 2020-11-30T10:50:13.476728 | 2020-01-28T17:19:09 | 2020-01-28T17:19:09 | 230,381,389 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 180 | r | munge_functions.R | #Function to remove outliers (identified by 3 sd away)
isnt_out_z <- function(x, thres = 3, na.rm = TRUE) {
abs(x - mean(x, na.rm = na.rm)) <= thres * sd(x, na.rm = na.rm)
}
|
6c4df71782f1eb69b168265605d189110b7ee4a1 | f250f747e450ca7b1d4ee0b8be200403eb2d0059 | /Rcode_and_Graphs/plot4.R | fe8503d258d4266db3d11064893529d2216f9b1d | [] | no_license | miguel19c/ExData_Plotting1 | e680af838d1726c0609690692be9117bc6ce6e34 | e01568432acbac197d08c34cb7e4674bd2be6ab7 | refs/heads/master | 2021-03-13T21:51:32.056757 | 2017-05-17T18:37:25 | 2017-05-17T18:37:25 | 91,510,281 | 0 | 0 | null | 2017-05-16T22:33:23 | 2017-05-16T22:33:22 | null | UTF-8 | R | false | false | 1,604 | r | plot4.R | #Loading the dataset
setwd("C:/Users/0016/Dropbox/Exploratory_Data_Analysis")
data.p1=read.table('household_power_consumption.txt',header=T,sep=';',stringsAsFactors = FALSE)
dates=data.p1$Date
times=data.p1$Time
x=paste(dates,times)
date.time=strptime(x,"%d/%m/%Y %H:%M:%S")
#taking the original dataframe without date and time
data.aux=data.p1[,3:9]
#merge the data/time class with the data.aux dataframe
data=cbind(date.time,data.aux)
#filtering data with dates 2007-02-01 and 2007-02-02.
data.project=data[grep('2007-02-01|2007-02-02',data$date.time),]
#Delete incomplete cases
data.project=data.project[complete.cases(data.project),]
#Data time format to data.time column
data.project$date.time=as.POSIXct(data.project$date.time)
#plot4
par(mfrow=c(2,2),mar=c(4,4,2,1), oma=c(0,0,2,0))
#plot1
plot(data.project$Global_active_power~data.project$date.time,type='l',xlab='',ylab='Global Active Power')
#plot2
plot(data.project$Voltage~data.project$date.time,type='l',xlab='datetime',ylab='voltage')
#plot3
plot(data.project$Sub_metering_1~data.project$date.time,type='l',col='black',ylab='Energy Sub metering',xlab='')
lines(data.project$Sub_metering_2~data.project$date.time,col='red')
lines(data.project$Sub_metering_3~data.project$date.time,col='blue')
legend('topright',legend=names(data.project)[6:8],col=c('black','red','blue'),lty=1,bty='n',lwd=2)
#plot4
plot(data.project$Global_reactive_power~data.project$date.time,type='l',xlab='datetime',ylab='Global_reactive_power')
dev.copy(png,"plot4.png", width=480, height=480)
dev.off() |
66cb505d2bba915a0e7a83454e075832772fd0fc | fe2fbba0a0b961918be0a57b7c5aa4bfd0a0c0b0 | /R/divideAndConquer.R | 101a34ba036e6542efe85076726157048f00da76 | [] | no_license | tbemsi/consensusClustering | 8411cb2912b722fb774f2236a5c538000d03aae8 | 9fc2bd26f50581bd520d52fbf8b8ed8f4731f152 | refs/heads/master | 2023-05-27T11:01:34.615854 | 2021-06-10T14:18:08 | 2021-06-10T14:18:08 | 367,302,767 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,507 | r | divideAndConquer.R | divideAndConquer <- function(sampleID,
dataframe,
numberOfSweeps,
BurnIn,
seed = 1234){
train <- dataframe %>% filter(sampleGroup == sampleID)
runInfoObjTrain <- profRegr(yModel="Bernoulli",
xModel="Discrete",
nSweeps=numberOfSweeps,
nClusInit=100,
nBurn=BurnIn,
seed = seed,
data=train,
output=paste0("PremiumOutput/newtrainOutput_",sampleID),
covNames = names(train)[2:(length(names(train)) - 1)],
reportBurnIn = TRUE,
excludeY = TRUE)
zTrain <- fread(paste0("PremiumOutput/newtrainOutput_",sampleID,"_z.txt"), header = FALSE)
zMatrixTrain <- as.matrix(zTrain)
trainPSM <- makePSM(zMatrixTrain)
train$Clusters <- maxpear(trainPSM)$cl
clusterParamsTrain <- clusterParams(train)
predictedClustersForTest <- clusterPredictions(dataWithLabels = train,
dataWithoutLabels = dataframe,
trainingParams = clusterParamsTrain$ClusterParams,
trainMixWeights = clusterParamsTrain$mixtureWeights)
predictedClustersForTest
}
|
7e964ca34df81a28b2dd06542b036cbb957be353 | a32d9125f203e83d526828b862203bb710f18323 | /R/util.R | 69f0103d4e91b33548e45fc777e8c76348140f23 | [] | no_license | hrbrmstr/porc | 2332dbd03eaf1e33e12350fd53012a947ba0ee89 | e9937564e7f72938ab3aab450bb6af9e65939e82 | refs/heads/master | 2021-08-28T16:24:09.742487 | 2017-12-12T18:34:23 | 2017-12-12T18:34:23 | 113,615,400 | 10 | 4 | null | null | null | null | UTF-8 | R | false | false | 452 | r | util.R | is_blank <- function(x) x == ""
not_blank <- function(x) x != ""
has_colon <- function(x) grepl(":", x, fixed=TRUE)
no_colon <- function(x) !grepl(":", x, fixed=TRUE)
not_na <- function(x) !is.na(x)
#' Helper to class a Snort rules data frame properly
#'
#' @param rules a Snort rules data frame read in with [read_rules()].
#' @export
as_rule_df <- function(rules) {
class(rules) <- c("tbl_df", "tbl", "snort_rule_df", "data.frame")
rules
}
|
cfab48639f502d55299dfa217cfae7faa29d294c | 3fce42b76523b546e2d8253bce9fcc8c8790d023 | /tests/testthat.R | ee25192d971232b64dc331400f6e4dd28204b82b | [] | no_license | KrystynaGrzesiak/gslope | e962997fbad21345ee1628ffebdccc6657f79919 | 83c45a6c78a0263d08cfecc083d297569af8bc13 | refs/heads/master | 2022-12-27T02:21:27.861557 | 2020-10-03T13:59:03 | 2020-10-03T13:59:03 | 256,288,231 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 56 | r | testthat.R | library(testthat)
library(gslope)
test_check("gslope")
|
c41e394a91d473ffb44266f669860699fd0a9868 | b733d3f7e67a62c34d4889c561d2388da835d451 | /man/tidyeval-data.Rd | 5888b26b810b2e259cc99c427916530a1915d523 | [] | no_license | cran/MazamaCoreUtils | 7c3c4c71d2667b4512f203ca5ba7c67df773dc9d | 15f2b32ed32835229b1df8cf74243d745ea7fd16 | refs/heads/master | 2023-09-05T17:48:57.276030 | 2023-08-29T21:50:02 | 2023-08-29T23:30:40 | 154,902,175 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 284 | rd | tidyeval-data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-tidyeval-data.R
\name{tidyeval-data}
\alias{tidyeval-data}
\alias{.data}
\title{Data pronoun for tidy evaluation}
\description{
See \code{rlang::\link[rlang]{.data}} for details.
}
\keyword{internal}
|
d868e6ebb0850ab1b0a8df98855c4f3e3c689df3 | e70edad7de0cb8c66d211afc5623cbec935301d4 | /Code/Old/State County Analysis/County Regression.R | 021415a8d2006f668f7db0d994a0fb1838d989ea | [] | no_license | man44duke/Thesis | c8420528384131bd3ad6e4990bbc422e5da52c14 | 7633202e1ac2d0d3b817a2ae2fd1948eb78ee5dd | refs/heads/master | 2021-01-09T16:32:46.943538 | 2020-03-15T14:27:36 | 2020-03-15T14:27:36 | 242,359,762 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,546 | r | County Regression.R | library(knitr)
library(leaps)
library(MASS)
#############################
# Select Data
###########################
#county_data_fred <- county_data_fred[-c(672, 941, 1578, 1318),]
#save(county_data_fred, file = "county_data_fred.RData")
load("RData/County/county_data_fred.RData")
data <- county_data_fred
############################
##################################
# Total denials
##################################
dataNA <-na.omit(data)
linear.fit <- lm(loan_term_total ~ . - county_code, data = dataNA)
stepwise <- stepAIC(linear.fit, direction = "both", trace = FALSE, )
step.coefs <- names(stepwise$coefficients)[-1]
coefs <- gsub("`", "", step.coefs)
coefs <- c("loan_term_total" , coefs)
data.subset <- subset(data, select = coefs)
regress <- lm(loan_term_total ~ ., data.subset)
regressDenial <- lm(percent_denied_total ~ percent_population_black + average_income_total + loan_amount_total + percent_female_total + debt_to_income_ratio + loan_to_value_ratio_total, data = data)
regressDenial <- lm(percent_denied_total ~ percent_population_black + average_income_total + loan_amount_total + percent_female_total + debt_to_income_ratio + loan_to_value_ratio_total +
percent_manufactured_total + loan_term_total, data = data)
regressDenial <- lm(percent_denied_total ~ percent_population_black + average_income_total + loan_amount_total + percent_female_total + debt_to_income_ratio + loan_to_value_ratio_total
+ origination_charges_total + loan_term_total + purchaser_fannie_total + purchaser_freddie_total + purchaser_ginnie_total + purchaser_private_total + percent_manufactured_total, data = data)
regressDenial <- lm(percent_denied_total ~ percent_population_black + average_income_total + loan_amount_total + percent_female_total + debt_to_income_ratio + loan_to_value_ratio_total
+ percent_manufactured_total+ loan_term_total + origination_charges_total + purchaser_fannie_total + purchaser_freddie_total + purchaser_ginnie_total + purchaser_private_total
+ Per.Capita.Personal.Income.by.County..Dollars. + Bachelor.s.Degree.or.Higher..5.year.estimate..by.County..Percent. + Estimated.Percent.of.People.of.All.Ages.In.Poverty.by.County..Percent. + + Median.Age.of.the.Population.by.County..Years.of.Age. + Equifax.Subprime.Credit.Population.by.County..Percent.
+ White.to.Non.White.Racial.Dissimilarity.Index.by.County..Percent., data = data)
summary(regressDenial)
plot(regressDenial)
############################
#Rate Spread
###########################
regressSpread <- lm(rate_spread_total ~ percent_population_black + average_income_total + loan_amount_total + percent_female_total + debt_to_income_ratio + loan_to_value_ratio_total, data = data)
regressSpread <- lm(rate_spread_total ~ percent_population_black + average_income_total + loan_amount_total + percent_female_total + debt_to_income_ratio + loan_to_value_ratio_total +
percent_manufactured_total + loan_term_total, data = data)
regressSpread <- lm(rate_spread_total ~ percent_population_black + average_income_total + loan_amount_total + percent_female_total + debt_to_income_ratio + loan_to_value_ratio_total
+ origination_charges_total + loan_term_total + purchaser_fannie_total + purchaser_freddie_total + purchaser_ginnie_total + purchaser_private_total + percent_manufactured_total, data = data)
regressSpread <- lm(rate_spread_total ~ percent_population_black + average_income_total + loan_amount_total + percent_female_total + debt_to_income_ratio + loan_to_value_ratio_total
+ percent_manufactured_total+ loan_term_total + origination_charges_total + purchaser_fannie_total + purchaser_freddie_total + purchaser_ginnie_total + purchaser_private_total
+ Per.Capita.Personal.Income.by.County..Dollars. + Bachelor.s.Degree.or.Higher..5.year.estimate..by.County..Percent. + Estimated.Percent.of.People.of.All.Ages.In.Poverty.by.County..Percent. + + Median.Age.of.the.Population.by.County..Years.of.Age. + Equifax.Subprime.Credit.Population.by.County..Percent.
+ White.to.Non.White.Racial.Dissimilarity.Index.by.County..Percent., data = data)
summary(regressSpread)
plot(regressSpread)
###################################
##################################
#City Predictions
#################################
#New York = 36061, Durham = 37065
city_predictions <- function(code){
county <- county_data_fred[which(county_data_fred$county_code == code),]
vars <- names(regressFred$coefficients)
vars[1] <- "county_code"
coefs <- regressFred$coefficients
county.vars <- county[vars]
county.vars$county_code[1] <- 1
predicts <- predict(regressFred, type = "terms", newdata = county)
prediction = county.vars * coefs
return(prediction)
}
predictions <- city_predictions( 37065)
write.csv(predictions, file = "CSV/Durham_coefs_NoTerm.csv" )
sum(predictions)
############################################################
######################################################
#Testing
###################################################
testing <- lm(interest_rate_total ~ loan_term_total + origination_charges_total, data = county_data_fred)
testing <- lm(percent_denied_total ~ Equifax.Subprime.Credit.Population.by.County..Percent., data = county_data_fred)
summary(testing)
durham <- county_data_fred[which(county_data_fred$county_code == 37065),]
################################################
|
d986e469beba4758490c0f08cff7e09d3704819b | 2f85eb97602cdb2db24d454abaea4ffed7942a33 | /run_analysis.R | 72d9cb4a50e02dd943435331d20ad4e6a9c47239 | [] | no_license | gsouza2001/datacleaningassign | 381519a1199695a608d08d9173823b496e176ab9 | 98a1a23dedf1de913165b83f32cd292de0cd0406 | refs/heads/master | 2021-01-11T14:14:18.050197 | 2017-02-07T15:41:43 | 2017-02-07T15:41:43 | 81,222,674 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,788 | r | run_analysis.R | # This code is for Programming Assignment in Week 4
# First, separately reads all the data
names.measur <- read.table("./ucidata/features.txt",header = FALSE,colClasses = c("integer","character"))
activity.names <- read.table("./ucidata/activity_labels.txt",header = FALSE,colClasses = c("integer","character"))
values.test <- read.table("./ucidata/test/X_test.txt",header = FALSE)
activity.test <- read.table("./ucidata/test/y_test.txt",header = FALSE)
subject.test <- read.table("./ucidata/test/subject_test.txt",header = FALSE)
values.train <- read.table("./ucidata/train/X_train.txt",header = FALSE)
activity.train <- read.table("./ucidata/train/y_train.txt",header = FALSE)
subject.train <- read.table("./ucidata/train/subject_train.txt",header = FALSE)
# Now we merge test and train data sets together
values <- rbind(values.test,values.train)
activity <- rbind(activity.test,activity.train)
subject <- rbind(subject.test,subject.train)
# Now we keep only variables of interest (-mean() and -std()) for each measur.
# We also create better names for the variables by using only lower case letters
# and also removing symbols such as "-" and "()"
varToKeep <- grep("mean\\(\\)|std\\(\\)",names.measur$V2)
namesToKeep <- names.measur[varToKeep,2]
namesToKeep <- tolower(gsub("-|\\(|\\)","",namesToKeep))
values <- values[,varToKeep]
names(values) <- namesToKeep
# Now, we use descriptive names for the activities in activity
# We also add the two columns, one with activity, and one with subject
library(plyr)
activity <- join(activity,activity.names)
values$activity <- activity[,2]
values$subject <- subject$V1
# Now, we create an independent, tidy data set with the average of all measurements
# by activity and subject
# First, we create an interaction of factors "subject" and "activity"
# This is used to spli the dataframe "values" to use the sapply function
factors <- interaction(as.factor(values$subject),as.factor(values$activity))
test <- split(values,factors)
test2 <- sapply(test,function(x) {colMeans(x[,1:66])})
# The matrix test2 has the rows as each of the measurements, and the columns
# are the interaction factors (e.g., "1.LAYING")
# we now create a list that splits the names of test2 by the "."
splitnames <- strsplit(colnames(test2),"\\.")
# We now recover the subjects (vector s), and activities (vector a) from
# the list with the splitnames
s <- as.integer(sapply(splitnames,function(x) {x[1]}))
a <- sapply(splitnames,function(x) {x[2]})
# The final step is to create a tidy data frame where the columns are
# subject, activity, and the averages of each of the 66 measurements
dat <- cbind(s,a)
for (i in 1:66) {
dat <- cbind(dat,test2[i,])
}
colnames(dat) <- c("subject","activity",row.names(test2))
write.table(dat,"dat.txt",row.names = FALSE)
|
35c3ed2d7a01adacaa83e7b65178d13ad969bd78 | 497360e57330ad027b78852ecfbe9ec7b1fb1b66 | /plots/step1_read_count.R | bdd64ae3c365cdfc93375d79b0cc0b29bce88897 | [] | no_license | YutingPKU/Bulk_RNA-seq_Process | c05d2af1eb3e8b23971461ade09e8089b7456841 | 28146c158da68b78ac7a241d83f199c06b9b877b | refs/heads/master | 2022-12-12T21:10:09.948317 | 2020-08-27T14:26:05 | 2020-08-27T14:26:05 | 290,795,313 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,590 | r | step1_read_count.R | ######## reads counting step for all samples
library(GenomicAlignments)
library(Rsamtools)
#library(GenomicFeatures)
#library(refGenome)
#library(BiocParallel)
#library(DESeq2)
library(data.table)
library(BiocParallel)
## step 1 definning gene models
#print("definning gene models")
#setwd("/lustre/user/liclab/publicData/igenomes/Homo_sapiens/UCSC/hg19/Annotation/Archives/archive-2014-06-02-13-47-56/Genes/")
#txdb <- makeTxDbFromGFF("genes.gtf", format="gtf")
#eByg <- exonsBy(txdb, by=c("gene"))
#ghs <- genes(txdb, columns = c("TXNAME","GENEID"))
#save(ghs, file = "/lustre/user/liclab/liuyt/denglab/backup/public/UCSC.hg19.genes.GRanges.RData")
load("/lustre/user/liclab/liuyt/denglab/backup/public/UCSC.hg19.genes.exonbygene.GRanges.RData")
#save(eByg, file = "/lustre/user/liclab/liuyt/denglab/backup/public/UCSC.hg19.genes.exonbygene.GRanges.RData")
## step 2 locating alinged files
#indir = "/lustre/user/liclab/liuyt/denglab/star/bamfiles/RNA-Seq_180718/"
#indir = "/lustre/user/liclab/liuyt/denglab/star/bamfiles/RNA-Seq_181017/"
indir = "/lustre/user/liclab/liuyt/denglab/star/bamfiles/RNA-Seq_200104/"
filenames = list.files(indir, pattern = "bam",recursive = TRUE, full.names = T)
file.exists(filenames)
bamfiles = BamFileList(filenames)
seqinfo(bamfiles[1])
## step 3 reads counting
registered()
register(MulticoreParam(workers = 6,RNGseed = 77394650, timeout = 144000000, log = F ))
registered()
print("start counting reads:")
se <- summarizeOverlaps(features=eByg, reads=bamfiles,
mode="Union",
singleEnd=FALSE,
ignore.strand=TRUE,
fragments=TRUE )
vec <- lapply(filenames, FUN=function(chr){
list <- unlist(strsplit(basename(chr), "_"))[1]
})
vec <- unlist(vec)
sampleid = vec
#sampleid = c()
sampleData = cbind(id= sampleid)
#type = substr(vec,1, nchar(vec)-1),
#replicate = substr(vec,nchar(vec), nchar(vec)),
#batch = )
sampleData = data.frame(sampleData)
#sampleData$type = as.factor(sampleData$type)
#sampleData$replicate = as.factor(sampleData$replicate)
#colData(se) = DataFrame(sampleData)
#table = assay(se)
#table = data.frame(table)
#rownames(table) = mcols(my_gr)[,1]
#colnames(table) = colnames(se)
#write.table(table,"/lustre/user/liclab/liuyt/monkey-brain/human-brain/RNA-Seq/results/expression-mat.txt", sep = '\t', row.names = T, col.names = T, quote = F)
saveRDS(se, "/lustre/user/liclab/liuyt/denglab/star/results/D200104.summarizeOverlaps.exonbygene.rds")
print("Counting step is done!")
|
3bd9e41e56aaf419622fb47dbb255a1a5edf72ba | bdb413b1d969f5619842b8d561af7e0183de5e02 | /study4/1nocirROC.R | 9a1203149cb70e7dda26cd95fecd05e8b9c4e361 | [] | no_license | FocusPaka/NIMEB | d5645c31e32440dac5e5de8809798867d30e0627 | d26065eeb5e5584adf3510f7adcbe73d43409f16 | refs/heads/main | 2023-04-02T12:15:53.347608 | 2021-04-08T21:44:22 | 2021-04-08T21:44:22 | 356,049,157 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,842 | r | 1nocirROC.R | #多个replicates=2,3,4,5,6,7的情况(循环)
rm(list=ls())
source("functions.R")
source("DEmethods_replicates.R")
#产生数据
data_primal <- read.table("Grimmond_lengths.txt",
sep="\t", header=TRUE, stringsAsFactors=FALSE)
library('limma')
# ------------------------------------
# simulate data from empirical distribution of counts
# ------------------------------------
nreps <- c(2,2)
pUp <- 1
xx_new1 <- generateDataset2(commonTags=15000, uniqueTags=c(1000,800),
empiricalDist = data_primal$EB,
lengthDist = data_primal$transcript_length,
pDifferential=0.6, foldDifference=4, pUp=pUp,
libLimits=c(.9,1.2)*1e6,nreps = nreps)
xx_new2 <- generateDataset2(commonTags = 10000, uniqueTags=c(2000,1000),
libLimits=c(.9,1.2)*1e6,
empiricalDist = data_primal$EB,
lengthDist = data_primal$transcript_length,
pDifferential = 0.4, pUp=.1,
foldDifference= 8,nreps = nreps)
xx_new <- list()
xx_new$DATA <- rbind(xx_new1$DATA, xx_new2$DATA)
xx_new$commonInd <- union(xx_new1$commonInd,xx_new2$commonInd+nrow(xx_new1$DATA))
xx_new$differentialInd <- union(xx_new1$differentialInd,xx_new2$differentialInd +
nrow(xx_new1$DATA))
xx_new$group <- xx_new1$group
xx_new$length <- c(xx_new1$length, xx_new2$length)
k <- which(rowSums(xx_new$DATA) > 0 & rowMeans(xx_new$DATA) > 2)
xx <- takeSubset(xx_new, k)
k1 <- which(rowSums(xx_new1$DATA) > 0 & rowMeans(xx_new1$DATA) > 2)
xx_1 <- takeSubset(xx_new1, k1)
ci_1 <- xx_1$commonInd #表达水平相同(包括成倍数的)
dii_1 <- intersect(ci_1,xx_1$differentialInd) #表达水平成倍数的
comm_1 <- setdiff(ci_1,dii_1) #表达水平相同的
diff_1 <- xx_1$differentialInd #表达水平不同
ndiff_1 <- length(diff_1)
#################
k2 <- which(rowSums(xx_new2$DATA) > 0 & rowMeans(xx_new2$DATA) > 2)
xx_2 <- takeSubset(xx_new2, k2)
ci_2 <- xx_2$commonInd #表达水平相同(包括成倍数的)
dii_2 <- intersect(ci_2,xx_2$differentialInd) #表达水平成倍数的
comm_2 <- setdiff(ci_2,dii_2) #表达水平相同的
diff_2 <- xx_2$differentialInd #表达水平不同
ndiff_2 <- length(diff_2)
#########################################
ci <- xx$commonInd #表达水平相同(包括成倍数的)
dii <- intersect(ci,xx$differentialInd) #表达水平成倍数的
comm <- setdiff(ci,dii) #表达水平相同的
diff <- xx$differentialInd #表达水平不同
ndiff <- length(diff)
id <- c(sample(comm_1, 500, replace=FALSE),sample(comm_2, 500, replace = FALSE)+nrow(xx_1$DATA))
x_train <- xx$DATA[id,]
countsTable <- xx$DATA
condsAB <- xx$group
genelength <- c(xx_1$length,xx_2$length)
res <- runDEs(countsTable=countsTable, condsAB=condsAB, run_MEB=TRUE, train_id=id,
gamma=seq(1e-07, 2e-04, 5e-06), nu=0.01, reject_rate=0.1,
run_Marioni0=FALSE, run_Marioni=FALSE,run_edgeR0=FALSE,
run_edgeR=TRUE,run_cloonan=FALSE,genelength=genelength, run_HTN=TRUE,
run_DESeq=TRUE, run_NOISeq=TRUE)
#----MEB method----#
pred_comm <- predict(res$MEBmodel,xx$DATA[comm,]) #无差异的基因
x_test <- xx$DATA[diff,] #所有有差异的基因
pred_test <- predict(res$MEBmodel, x_test)
pred_all <- predict(res$MEBmodel, xx$DATA) #总的数据
#----MEB AUC-------#
check <- numeric(nrow(xx$DATA))
for(m in 1:nrow(xx$DATA)){
check[m] <- decision_function(xx$DATA[m,], model=res$MEBmodel,gamma=res$gamma)-(res$MEBmodel)$rho
}
sum(check>0) #no.TRUE non-DE genes
summary(predict(res$MEBmodel,xx$DATA))
library(pROC)
check_ord_MEB <- order(check)
category <- numeric(nrow(xx$DATA))
category[diff] <- 1
roc_temp_MEB <- roc(category[check_ord_MEB], (-check)[check_ord_MEB],smooth = T)
roc_obj_MEB <- auc(roc_temp_MEB)
roc_obj_MEB
roc(category, check)
#################################################
#ROC curve
oldpar=par(mar=c(3,2.6,1,0.2),mgp=c(1.7,0.5,0))
plot(roc_temp_MEB,col="red",legacy.axes=T,grid=TRUE, asp=NA)
par(pty="s")
#---LibSize method---#
libsize.pvalues <- Poisson.model.new(countMatrix = countsTable, group1 = which(condsAB == 1),
group2 = which(condsAB == 2), calcFactor = FALSE)
libsize_roc <- roc(category, libsize.pvalues$stats$pval,smooth = T)
libsize_auc <- auc(libsize_roc)
plot(libsize_roc,col="green",add=T,legacy.axes=T)
#edgeR
roc_temp_edgeR <- roc(category, res$pfull[,4],smooth = T)
auc(roc_temp_edgeR)
plot(roc_temp_edgeR,col="yellow",add=T,legacy.axes=T)
#HTN
roc_temp_HTN <- roc(category, res$pfull[,6],smooth=T)
auc(roc_temp_HTN)
plot(roc_temp_HTN,col="blue",add=T,legacy.axes=T)
#DESeq
roc_temp_DESeq <- roc(category, res$pfull[,7],smooth=T)
auc(roc_temp_DESeq)
plot(roc_temp_DESeq,col="black",add=T,legacy.axes=T)
#NOISeq
roc_temp_NOISeq <- roc(category, res$pfull[,8],smooth=T)
auc(roc_temp_NOISeq)
plot(roc_temp_NOISeq,col="orange",add=T,legacy.axes=T)
title("pUp=0.5")
legend("bottomright",c("NIMEB","HTN","edgeR","Library Size","DESseq","NOISeq"),
col = c("red","blue","yellow","green","black","orange"),
lwd=1, cex=0.8)
|
dbc552c76e7b31afbcc819a91145472487b937a7 | febc9fe1ca5814fc96cba4594c08057bcb6a5c9b | /examples/06-predict-implementation.R | 736933e208ae07ee4e9f4fa5d3b4bdeccba48aee | [] | no_license | MaryleneH/2019-useR-workshop-design-for-humans | 4cd9ef1af1698759dcf359df340c5d9dac98d723 | 66a9d0dce0f862c1be8f79c022b0f77a16a56466 | refs/heads/master | 2020-06-17T17:53:16.463349 | 2019-07-09T08:47:43 | 2019-07-09T08:47:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 684 | r | 06-predict-implementation.R | library(rsample)
library(tibble)
library(dplyr)
library(hardhat)
set.seed(123)
split <- initial_split(admittance)
train <- training(split)
test <- testing(split)
model <- logistic_regression(admit ~ gre + gpa, train)
model
# outcome levels info
model$blueprint$ptypes$predictors
model$blueprint$ptypes$outcomes
levels(model$blueprint$ptypes$outcomes[[1]])
# convert to low level matrix
test_matrix <- test %>%
select(gre, gpa) %>%
# hardhat::add_intercept_column()
add_intercept_column() %>%
as.matrix()
head(test_matrix)
# class prob
predict_logistic_regression_prob(model, test_matrix)
# hard class prediction
predict_logistic_regression_class(model, test_matrix)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.