blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6ed9bc3b190de6a63010d8ed338a5446d33ad24f | c7825ebaf6170b33d232afec734edb83b23cc2d1 | /origin_datas/TCGA/0_datadeal.R | 4e675ad31253b433202a70fb36aa7415fb03756d | [] | no_license | yancunlibmchh/TNBC_hypoxia | 79e82d9c210439729a8735eb94b440d875130ac1 | 8d2959cce129dca0f8ec0f5608a3aa75257e892c | refs/heads/master | 2023-06-25T14:50:24.412497 | 2021-07-30T09:19:37 | 2021-07-30T09:19:37 | 390,946,198 | 0 | 4 | null | null | null | null | UTF-8 | R | false | false | 2,819 | r | 0_datadeal.R | source("http://bioconductor.org/biocLite.R")
options(BioC_mirror="http://mirrors.ustc.edu.cn/bioc/")
biocLite("genefu")
biocLite("amap")
biocLite("breastCancerMAINZ")
biocLite("breastCancerTRANSBIG")
install.packages('R.utils')
install.packages('xtable')
install.packages('caret')
library(amap)
library(genefu)
library(xtable)
library(caret)
library(breastCancerMAINZ)
library(breastCancerTRANSBIG)
library(data.table)
library(R.utils)
setwd("E:/0_test/01_TNBC/0_data")
mRNA_exp_count <- fread("TCGA-BRCA_mRNA_counts.txt.gz",header = T,quote = "",sep="\t")
mRNA_exp_count <- as.data.frame(mRNA_exp_count)
miRNA_exp_count <- fread("TCGA-BRCA_counts.txt.gz",header = T,quote = "",sep="\t")
miRNA_exp_count <- as.data.frame(miRNA_exp_count)
sample_info <- fread("BRCA_survival.txt.gz",header = T,quote = "",sep="\t")
length(intersect(as.matrix(sample_info)[,1],colnames(mRNA_exp_count)))
sample_info_more <- fread("TCGA-BRCA_clinical.csv",header = T,quote = "",sep=",")
length(intersect(as.matrix(sample_info_more)[,1],substr(colnames(mRNA_exp_count),1,12)))
####### genefu pam50 #######
exp_mRNA_count <- mRNA_exp_count[,-1]
rownames(exp_mRNA_count) <- mRNA_exp_count[,1]
labels_sample <- as.numeric(substr(colnames(exp_mRNA_count),14,15))
samples_norm <- colnames(exp_mRNA_count)[which(labels_sample>=10)]
samples_dis <- colnames(exp_mRNA_count)[which(labels_sample<10)]
IDlist <- as.matrix(read.table("gene_ENSG2Symbol2ID.txt",header = F,quote = "",sep="\t"))
interID <- intersect(IDlist[,1],rownames(exp_mRNA_count))
rownames(IDlist) <- IDlist[,1]
EntrezGene.ID <- IDlist[interID,3]
ind_NA <- which(is.na(EntrezGene.ID))
interID_noNA <- interID[-ind_NA]
EntrezGene.ID_noNA <- EntrezGene.ID[-ind_NA]
ID_probe <- as.matrix(read.table("gene_probe.txt",header = F,quote = "",sep="\t",fill = T))
interID_probe <- intersect(interID_noNA,ID_probe[,4])
ID_probe_useful <- ID_probe[which(ID_probe[,4]%in%interID_probe),]
rownames(ID_probe_useful) <- ID_probe_useful[,4]
probes <- ID_probe_useful[interID_probe,1]
EntrezGene.ID_noNA_probe <- as.numeric(as.character(EntrezGene.ID_noNA[interID_probe]))
ddata <- t(exp_mRNA_count[interID_probe,samples_dis])
dannot <- data.frame(probe=probes,Gene.symbol=interID_probe,EntrezGene.ID=EntrezGene.ID_noNA_probe)
colnames(ddata) <- dannot$probe
PAM50Preds<-molecular.subtyping(sbt.model = "pam50",data=ddata,
annot=dannot,do.mapping=TRUE)
sample_class <- data.frame(samples=names(PAM50Preds$subtype),class=as.character(PAM50Preds$subtype))
sample_class <- rbind(as.matrix(sample_class),cbind(samples_norm,"norm"))
write.table(sample_class,"sample_class.txt",quote=F,row.names=F,col.names=T,sep = "\t")
setwd("E:\\0_test\\01_TNBC")
save.image(file = "allInfo.RData")
load("allInfo.RData")
|
aaf7f30422db84c6b0664bfd7c61b63680287f53 | 013276f4e5be344d9ad7f21bfa2257338b43aa1b | /tests/testthat.R | 4ed0b3e5469738d7765c5aca1d699ecbdf1e98b8 | [
"MIT"
] | permissive | jimsforks/taskdesignr | a9e934ce85257cea22566cf589fd64aa5db59aed | 01940d020b23658232dfd6d4c2791c444833f8a2 | refs/heads/master | 2023-02-23T06:55:07.138703 | 2021-01-31T00:53:47 | 2021-01-31T00:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 66 | r | testthat.R | library(testthat)
library(taskdesignr)
test_check("taskdesignr")
|
db5278cbb09d456598e0cc9434bcbe4161dc8c0d | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/DiffusionRjgqd/R/JGQD.dic.R | 7c910d0f1c521b6da7d493e13ea3f46b823a928c | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,330 | r | JGQD.dic.R | JGQD.dic <-function(model.list,type='col')
{
M=matrix(0,6,length(model.list))
for(i in 1:length(model.list))
{
M[,i]=unlist(model.list[[i]]$model.info)[1:6]
}
whDIC = which(as.numeric(M[4,])==min(as.numeric(M[4,])))
probs=as.numeric(M[4,whDIC])-as.numeric(M[4,])
M[c(3,4,5),]=format(round(as.numeric(M[c(3,4,5),]),2),nsmall=2)
wh=which(as.numeric(M[4,])==min(as.numeric(M[4,])))
M[4,wh]=paste0(' [=] ',format(as.numeric(M[4,wh]),nsmall=2))
rownames(M)=c('Elapsed time :'
,'Time Homogeneous :'
,'p :'
,'DIC :'
,'pD (effective) :'
,'N :')
mtags=rep(0,length(model.list))
for(i in 1:length(model.list))
{
mtags[i] = model.list[[i]]$model.info$Tag
}
colnames(M) = mtags
if(all(is.na(mtags)))
{
colnames(M)=paste('Model',1:length(model.list))
#warning('Some model tags are NULL!')
}
if(type=='row')
{
return(data.frame(M))
}
if(type=='col')
{
M=t(M)
colnames(M)=c('Elapsed_Time'
,'Time_Homogeneous'
,'p'
,'DIC'
,'pD'
,'N')
return(data.frame(M))
}
}
|
1e992465a200bacb988b9f50678c2e55002a55d3 | b068f48befd889ffe4a5e1f7e64ad60177a7348e | /R/legend.R | 0fa86e777849daea195466b6c929474a217cccd6 | [] | no_license | MaStatLab/BGCR | 0495d5e5d260bb5edb31db810fb2371edb32c2c0 | 7ab3427166f9000cef8604b044aed68676c07bc9 | refs/heads/master | 2020-06-17T14:24:24.504230 | 2019-11-24T07:48:47 | 2019-11-24T07:48:47 | 195,950,203 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,930 | r | legend.R |
##################################################################################################
##################################################################################################
####----function to plot the legend of the PMAP plot----####
legend_col = function(col, lev){
opar = par
n = length(col)
bx = par("usr")
box_cx = c(bx[2] + (bx[2] - bx[1]) / 1000, bx[2] + (bx[2] - bx[1]) / 1000 + (bx[2] - bx[1]) / 50)
box_sy = (bx[4] - bx[3]) / n
box_cy = c(bx[3], bx[3])
xx = rep(box_cx, each = 2)
par(xpd = TRUE)
for(i in 1:n){
yy = c(box_cy[1] + (box_sy * (i - 1)), box_cy[1] + (box_sy * (i)),
box_cy[1] + (box_sy * (i)), box_cy[1] + (box_sy * (i - 1)))
polygon(xx, yy, col = col[i], border = col[i])
}
par(new = TRUE)
plot(0, 0, type = "n",
ylim = c(min(lev), max(lev)),
yaxt = "n", ylab = "",
xaxt = "n", xlab = "",
frame.plot = FALSE)
axis(side = 4, las = 2, tick =FALSE, line = 0.5, cex.axis=0.7)
par = opar
}
##################################################################################################
##################################################################################################
legend_col_horiz = function(col, lev){
opar = par
n = length(col)
by = par("usr")
box_cy = c(by[3] , by[4])
box_sx = (by[2] - by[1]) / n
box_cx = c(by[1], by[1])
yy = rep(box_cy, each = 2)
#par(xpd = TRUE)
for(i in 1:n){
xx = c(box_cx[1] + (box_sx * (i - 1)), box_cx[1] + (box_sx * (i)),
box_cx[1] + (box_sx * (i)), box_cx[1] + (box_sx * (i - 1)))
polygon(xx, yy, col = col[i], border = col[i])
}
par(new = TRUE)
plot(0, 0, type = "n",
xlim = c(min(lev), max(lev)),
xaxt = "n", ylab = "",
yaxt = "n", xlab = "",
frame.plot = FALSE)
axis(side = 1, las = 1, tick =FALSE, line = 0.5, cex.axis=1)
par = opar
}
|
3866d8ee055f7fa78c1586dcff608883f74637c2 | 4a4d70802bc721f2750e4443723a53c2c37c3225 | /gif_animations/emissions_animation/gif_emissions.R | 380b0d44dc8c85cbb8344f8d921c2136659fa3c2 | [] | no_license | juangordyn/Jamsnot_Vis | 4f02b0207e06460c262db689e5caec1d69e3c002 | 7964e243e935b5cf46f37154d2dac0f3b30453d6 | refs/heads/main | 2023-08-02T16:39:04.374969 | 2021-09-30T02:39:21 | 2021-09-30T02:39:21 | 411,896,918 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,915 | r | gif_emissions.R | library('shiny')
library('ggplot2')
library('plotly')
library('gganimate')
library('gifski')
library('ggimage')
library('dplyr')
transport_types <- c('Car', 'Bus', 'Tram', 'Train')
text_annotations <- c('184 gCO2/km', '18 gCO2/km ', '13 gCO2/km', '12 gCO2/km')
images <- c('car_64.png', 'bus_64.png', 'tram_64.png', 'train_64.png')
emissions_df_1 <- data.frame(transport = transport_types,
emissions = c(184, 18, 13, 12), frame = rep('b', 4), image = images, label = text_annotations, text_height = c(203, 35, 32, 30 ))
emissions_df_2 <- data.frame(transport = transport_types,
emissions = c(0, 0, 0, 0), frame = rep('a', 4) , image = images)
emissions_df <- bind_rows(emissions_df_2,emissions_df_1)
print(emissions_df)
emissions_df$transport <- factor(emissions_df$transport, levels = transport_types)
key = row.names(emissions_df)
key_list = list()
for(i in (1:length(key))){
key_list[[i]] <- c(key[i], transport_types)
}
plot_transport_emission <- ggplot(emissions_df, aes(x = transport, y = emissions, fill = transport, group = 1)) + geom_bar(stat = 'identity', colour = 'black') +
scale_fill_manual(values = c("#F64F4C", "orange", "yellow", "#76B756")) +
scale_y_continuous(breaks=seq(0,250, 25)) +
geom_text(aes(label = label, y = text_height)) + geom_image(aes(image = image), size = 0.075) +
theme(plot.title = element_text(face ='bold'), legend.position='none',panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_text(face = "bold.italic", color = "black", size = 12)) +
labs(x = '' , y='Emissions per km (gCO2/km) per person') +
ggtitle('Gas emissions by type of transport') +
transition_states(frame, transition_length = 1, state_length = 0, wrap = FALSE) +
ease_aes('sine-in-out')
anim_save("emissions_animation.gif", animate(plot_transport_emission, duration = 15, end_pause=65))
|
fd742f638c3109281b000faac61812b1f8dd98a9 | a27ddbf3ffafe36488324c1e4a85a542ea526aed | /hw2/jwc/starter.R | 5a459a992a1f676a5f5bb80d2eacd1d0cf1697d6 | [] | no_license | wclark3/digital_marketing | f7845976f7360ab9ecacee85f318ecfee1d6dffb | a30cda7968646ca3a0f0fbf4ebcdf47ab5f36b13 | refs/heads/master | 2021-01-17T09:06:38.850769 | 2016-05-25T04:11:14 | 2016-05-25T04:11:14 | 55,690,156 | 0 | 1 | null | 2016-04-26T02:29:32 | 2016-04-07T11:44:39 | R | UTF-8 | R | false | false | 1,564 | r | starter.R | # Digital and Algorithmic Marketing
# Homework #2
# Instructor: Sanjog Misra
# Topic: Recommendation Systems
# if needed: install.packages("recommenderlab)
library(recommenderlab)
# A Real Dataset
data(MovieLense) # Not sure why there is an e at the end
MovieLense
# Use the Movielens data to answer the following questions
# Please upload a pdf file to chalk. Code is not needed.
# Hints: To evalaute timing you can use the system.time command.
# For example, system.time(r <- Recommender(data, method = "RANDOM") )
# will tell you how gold raining a RANDOM CF algorithm takes.
# To assess predictive accuracy you can use the function calcPredictionAccuracy
# See ?calcPredictionAccuracy for examples
# or the recommenderlab vignette:
# https://cran.r-project.org/web/packages/recommenderlab/vignettes/recommenderlab.pdf
# Finally, to examine overlap you may use the following function
rec.overlap = function(recA,recB,i)
{
length(intersect(recA@items[[i]],recB@items[[i]]))
}
# Example
# Split the data
# Hint: You may want to look at the Reccomderlab
# vignette for better ways of doing this.
train <- MovieLense[1:300]
test <- MovieLense[301:350]
# Training the recommender
r1 <- Recommender(train, method = "RANDOM")
r2 <- Recommender(train, method = "POPULAR")
# Predict for the test data
rec1 = predict(r1,test,n=150)
rec2 = predict(r2,test,n=150)
# How many reccomendations overlap for user 12 in the test sample
# You can do this for all the users in test
rec.overlap(rec1,rec2,i=12)
|
7f697f49877ee0206b958f2cd529389e703561e9 | ce19633f0f8f4dc4fed29d2f211d90f8443d7536 | /R_19_01/zad1_19_01.R | ff3a86ff251433717c0d11ef43de38b374b17172 | [] | no_license | adrianstodolski/DataScienceCDV | 152653e732c432fd58ed42d5888c98acfb0f40bf | 4b3e70e59c28bdce153b4dd4aefdc29958c6dcbb | refs/heads/master | 2020-09-22T22:40:31.588745 | 2020-01-25T17:24:05 | 2020-01-25T17:24:05 | 225,336,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,870 | r | zad1_19_01.R | library(ggplot2) # Bibliotek z której będziemy korzystać przy tworzeniu wykresu
data_ap <- AirPassengers #
plot(data_ap) # widzimy, ze dane maja trend i sezonowosc
class(data_ap)
str(data_ap) # struktura danych
# sprawdzenie danych -> tylko time series
start(data_ap)
end(data_ap)
frequency(data_ap)
# naniesienie trendu na wykres
plot(data_ap, ylab = "Liczba pasażerów")
abline(lm(data_ap ~ time(data_ap)))
# analiza danych
decompose(data_ap, type = "multiplicative") # rozkladanie danych na skladowe
# w rezultacie otrzymamy kilka komponentow
# Przedstawiamy dekompozycje na wykresie
ap_deco <- decompose(data_ap, type = "multiplicative")
plot(ap_deco)
#######################
Prognozowanie przyszlosci
cycle(data_ap)
# dane po miesiacach
boxplot(data_ap ~ cycle(data_ap))
# do tej pory dane maja trend
plot(data_ap, ylab = "Liczba pasażerów")
# dalsz analiza - wyrownujemy dane
plot(log(data_ap))
# usunmy trend (funkcja diff) - stosujemy tyle razy az uzyskamy stala srednia
plot(diff(log(data_ap)))
# Przechodzimy do tworzenia modelu
# model ARIMA
# Jest to zintegrowania(I) regresji(AR) i sredniej kraczacej(MA)
# aby uzyc model mamy 3 parametry
# AR -> p
# I -> d
# MA -> q
acf(diff(log(data_ap)))
pacf(diff(log(data_ap)))
plot(diff(log(data_ap)))
# Tworzymy model
mod <- arima(log(data_ap), c(0,1,1), seasonal = list(order = c(0,1,1), period = 12))
# Prognozujemy dane na 5 lat
mod_pred <- predict(mod, n.ahead = 5*12)
summary(mod)
pred1 <- 2.718^mod_pred$pred
ts.plot(data_ap,pred1,lty=c(1,3)) #lty 1,3 typ lini : 1 oraz 3
ap_szkoleniowe <- ts(data_ap, frequency = 12, start = c(1949,1), end =c(1959,12))
model_1 <- arima(log(ap_szkoleniowe), c(0,1,1), seasonal = list(order=c(0,1,1), period = 12))
ap_pred <- predict(model_1, n.ahead = 1*12)
ap_pred <- 2.718^ap_pred$pred
ap_pred
ap_pred=round(ap_pred,digit=0)
#Porównajmy
data_ap
ap_pred
|
5276ad24fc4d6100a7256575894dd57256acb9ab | b46a1f0322e9f5ae34fa8e6192b73bf6c0339aef | /man/importance.Rd | 3ec05d0cf8ecb42d3574d58944824d5e6cec3c34 | [] | no_license | cran/labdsv | 33f478ed6469edad170b78577d4aafd04d49b7f7 | 8a8565f635143f62df5d2bbae3debbd275d5d45e | refs/heads/master | 2023-04-14T09:05:46.429809 | 2023-04-10T08:30:02 | 2023-04-10T08:30:02 | 17,696,960 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,174 | rd | importance.Rd | \name{importance}
\alias{importance}
\alias{importance.default}
\title{Importance Table}
\description{For a classified set of vegetation samples,
a importance table lists for each species
the average or typical abundance of each species in each class.
}
\usage{importance(comm,clustering,minval=0,digits=2,show=minval,
sort=FALSE,typical=TRUE,spcord,dots=TRUE)
}
\arguments{
\item{comm}{a data.frame of species abundances with samples as rows and
species as columns}
\item{clustering}{a vector of (integer) class memberships, or an object of
class \sQuote{clustering}, class \sQuote{partana},
of class \code{\link[cluster]{partition}}}
\item{minval}{the minimum importance a species must have in at least one class
to be included in the output}
\item{digits}{the number of digits to report in the table}
\item{show}{the minimum value a species must have to print a value}
\item{sort}{a switch to control interactive re-ordering}
\item{typical}{a switch to control how mean abundance is calculated.
Typical=TRUE divides the sum of species abundance by the number of plots in which
it occurs; typical=FALSE divides by the number of plots in the type}
\item{spcord}{a vector of integers to specify the order in which species
should be listed in the table}
\item{dots}{a switch to control substituting dots for small values}
}
\value{a data.frame with species as rows, classes as columns, with
average abundance of species in classes.}
\note{Importance tables are often used in vegetation classification to
calculate or present characteristic species for specific classes or types.
Importance may be combined with \code{\link[labdsv]{const}},
\code{\link[labdsv]{concov}} and \code{\link[labdsv]{vegtab}} to achieve a
vegetation table-oriented analysis.}
\author{
David W. Roberts
\email{droberts@montana.edu}
}
\seealso{\code{\link[labdsv]{const}}, \code{\link[labdsv]{vegtab}},
\code{\link[labdsv]{concov}}
}
\examples{
data(bryceveg) # returns a data.frame called bryceveg
data(brycesite)
class <- cut(brycesite$elev,10,labels=FALSE)
importance(bryceveg,class,minval=0.25)
}
\keyword{multivariate}
|
20f29c54446f1670b593c612a682fd0a14fa9e31 | 7a281ec4d5cc823b79ebe590a0a2f15be9564df4 | /Anime _Shubhasree_Sarkar.R | 736fa8b2d5fcb875a5244184e0a9e17ddf4b2dab | [] | no_license | shubhasree1234/Anime-Rating-Prediction | 2d3750cec259f7bd4cfc93ae265a2694c8ff432c | 330b8de3930a5f60d1a935056cb5d05c820cd71e | refs/heads/main | 2023-03-26T14:51:53.062995 | 2021-03-23T14:52:32 | 2021-03-23T14:52:32 | 350,737,415 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 55,645 | r | Anime _Shubhasree_Sarkar.R | #Anime Rating Prediction#
#*************************************************************************************************************************#
#Step 1-import the libraries
library(boot) #
library(car)
library(QuantPsyc)
library(lmtest)
library(sandwich)
library(vars)
library(nortest)
library(MASS)
library(Hmisc)
library(caTools) #used for splitting the data into test and train
library(ggplot2) #used for data visualization
library(Metrics)
library(psych)
#*************************************************************************************************************************#
#Step 2- Data Extraction or Import
#set directory to extract the data
setwd("E:\\IVY COURSE\\R STUDIO\\Internship")
#use of na.strings in order to convert blank cells to 'NA'
Anime<- read.csv(file ="Anime_Final.csv", header = TRUE, fill = T, na.strings = c("","[]"))
#get the table containing the dataset
View(Anime)
#names of the attributes as well as the overall dataset
names(Anime)
dim(Anime) #check for the dimension of the dataset
nrow(Anime) #no of rows
ncol(Anime) #no of columns
#Result- The dataset is comprised of 7029 rows and 16 columns
#*************************************************************************************************************************#
#Step 3-check for Data Sanity
str(Anime) #check for the overall structure of the data (types of variables)
#Result- From the above mentioned code, we could derive that
#8 columns are of Character datatype, while rest 7 are integer and one column(Rating) is Numeric
summary(Anime) #check for the summarized output in terms of metrics
describe(Anime) #check for detailed description of the variables
#--------------------------------------------------------------------------------------------------------------------------#
#Convert all of the character datatypes to the factor datatypes in the dataset
Anime$mediaType<-as.factor(Anime$mediaType)
Anime$ongoing<-as.factor(Anime$ongoing)
Anime$sznOfRelease<-as.factor(Anime$sznOfRelease)
Anime$description<-as.factor(Anime$description)
Anime$studios<-as.factor(Anime$studios)
Anime$tags<-as.factor(Anime$tags)
Anime$contentWarn<-as.factor(Anime$contentWarn)
#--------------------------------------------------------------------------------------------------------------------------#
#check for data sanity once again
str(Anime)
#Results - [title(7029 levels),mediaType(9 levels),ongoing(2 levels),
# sznOfRelease(5 levels),description(3923 levels), studios(532 levels),
# tags(4869 levels),contentWarn(131 levels)]
summary(Anime)
describe(Anime)
#Nature of Variables-
#Quantitative -eps,duration,watched,watching,wantWatch,dropped,rating,votes
#Categorical- mediaType,ongoing,sznOfRelease,
#Qualitative- title,description,studios,tags,contentWarn
#*************************************************************************************************************************#
#Step 4-Data Pre-processing
#the different steps of data preprocessing includes-
# 1. Missing Value treatment, 2.Duplicate treatment 3.Outlier treatment
#--------------------------------------------------------------------------------------------------------------------------#
#4.1-Missing value treatment
#check for missing values
sapply(Anime, function(x) sum(is.na(x)))
# ##Result- From the output we can witness the missing values,
# mediaType(30),duration(1696),sznOfRelease(4916)
# description(3083), studios(2239),tags(229)
# contentWarn(6242), watched(87)
#-------------------------------------------------------------------------------------------------------------------------#
# check for percentage wise missing values
library(dplyr)
Missing_values <- data.frame(sapply(Anime, function(y) round((sum(length(which(is.na(y))))/nrow(Anime))*100.00,2)))
Missing_values
#Filter the missing values >30%
Missing_values %>% filter(Missing_values > 30.00)
# #Result- Missing values greater than 30% are for the following categories
# sznOfRelease- 69.94%
# description - 43.86%
# studios- 31.85%
# contentWarn - 88.80%
# for the above cases we will remove them from further calculations
#Now for the categorical variables, we will use mode and for numeric variables we will use median
#[For the above mentioned columns , as we are going to drop them eventually, hence, it's better
# to explore them a bit before dropping ]
#--------------------------------------------------------------------------------------------------------------------------#
#Exploratory data Analysis
#Importing libraries for data visualization and exploration
library(tidyverse)
library(magrittr)
library(sqldf)
#--------------------------------------------------------------------------------------------------------------------------#
#For the two types of variables , the following graphs or plots are being chosen
#Histograms ~ Continuous Variables()
#Barplot~ Categorical(sznOfRelease)
#Exploration of data using SQL for the Qualitative variables(description,studios,contentWarn)
#--------------------------------------------------------------------------------------------------------------------------#
#1. Barplot of Season Of Release
counts_sznofRelease<- table(Anime$sznOfRelease)
barplot(counts_sznofRelease,
main = "Barplot of Seasons Of Release" ,
xlab = "Seasons Of Release" ,
ylab = "Frequency",
col = c("blue","green","yellow","orange","red","lightgreen","gray","violet"))
#Results- Spring has the highest no. of Releases followed by Fall
#With respect to sznOfRelease, how was Rating distributed
boxplot(rating~sznOfRelease,
data = Anime,
xlab = "Rating",
ylab = "sznOfRelease",
main = "Rating vs sznOfRelease",
horizontal = TRUE)
#Results- For the Spring and Summer, there are presence of Outliers, but mostly skewed towards higher rating
#--------------------------------------------------------------------------------------------------------------------------#
#2. Details on Studios
#2.1- Top 5 studios in terms of maximum nos. of Anime
viz_1 = data.frame(sqldf('select studios,COUNT(studios) from Anime GROUP BY studios ORDER BY COUNT(studios) desc limit (5)'))
viz_1 <- table(viz_1)
barplot(viz_1 ,
main = "Barplot of Studios" ,
xlab = "Studios" ,
ylab = "Frequency",
col = c("blue","green","yellow","orange","red","lightgreen","gray","violet"))
#Results- Sunrise Studios contributed mostly for the Anime Shows
#--------------------------------------------------------------------------------------------------------------------------#
#3. Details on Top 5 content warnings
sqldf('select contentwarn,COUNT(contentwarn) from Anime GROUP BY contentwarn ORDER BY COUNT(contentwarn) desc limit (5)')
#Results- The above analysis states Violence being used the most as Contents for shows
#--------------------------------------------------------------------------------------------------------------------------#
## Dropping the redundant variables - sznOfRelease,description,studios & contentwarn using subset function
Anime<- subset(Anime, select =-c(sznOfRelease,description,studios,contentWarn))
View(Anime)
names(Anime)
#--------------------------------------------------------------------------------------------------------------------------#
#Replacing the missing values
#First Step- Categorical Variables ~ replacement using Mode Function
#Second Step- Continuous Variables ~ replacement using Median Function
#--------------------------------------------------------------------------------------------------------------------------#
#First Step-Replacement of categorical variables(tags,mediaType)
# Create the function for mode
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
# Calculate the mode using the user function.
Anime$mediaType[is.na(Anime$mediaType)] <- getmode(Anime$mediaType[!is.na(Anime$mediaType)]) # Mode imputation
Anime$tags[is.na(Anime$tags)] <- getmode(Anime$tags[!is.na(Anime$tags)]) # Mode imputation
A <- getmode(Anime$mediaType)
E <- getmode(Anime$tags)
print(A)
print(E)
View(Anime)
#Result- Media type which has 30 missing values, and for tags that has 229 missing values, those are being replaced by the mode function
#-------------------------------------------------------------------------------------------------------------------------#
#Second Step-Replacement of continuous variables (duration,watched)
#replace missing values with median
for(i in c(4,11)){
Anime[is.na(Anime[,i]),i]<-round(median(Anime[,i],na.rm = TRUE))
}
Anime$watched[is.na(Anime$watched)]<- round(median(Anime$watched,na.rm=TRUE))
#check the output
print(str(Anime$watched))
summary(Anime)
#Result- Duration which has 1696 missing values, and for watched that has 87 missing values, those are being replaced by the median function
#--------------------------------------------------------------------------------------------------------------------------#
#4.2-Duplicate values treatment
#check for the unique values
lapply(Anime, function(x) length(unique(x)))
#Result- Tags and Title have too many of the unique values, hence, for further ease in modelling, they should be dropped
#Please note that we will backup the data with a different name which we will use to explore the respective attributes present in them
#--------------------------------------------------------------------------------------------------------------------------#
#back up the data
Anime1<-Anime
nrow(Anime1)
#--------------------------------------------------------------------------------------------------------------------------#
#4.3-Outlier treatment
#Boxplot view~ Continuous variables)
#Rating
# identify outliers in r boxplot and save it
boxplot(Anime$rating, main = "boxplot for Rating")
#quantile view
quantile(Anime$rating, seq(0,1, by= 0.005))
#Result- From the Boxplot visualization and also from the quantile processing, it can be confirmed,
# that the rating data being continuous, has no outliers present
#--------------------------------------------------------------------------------------------------------------------------#
#1.Eps-
# identify outliers in r boxplot and save it
boxplot(Anime$eps, main = "Boxplot of Episodes",xlab = "No. of Episodes", horizontal = TRUE)
quantile(Anime$eps, seq(0,1, by= 0.001))
quantile(Anime$eps, c(0.999,0.999301))
#Result- Here, we will be considering the value of 99.9301% which is 399.6171, rounded off to 400, to replace the extreme outliers
#check for details
length(Anime$eps)
summary(Anime$eps)
#setting the bench mark
eps1 <- as.numeric(round(quantile(Anime$eps,0.999301)))
eps1
#Winsorizing
Anime$eps = (ifelse(Anime$eps > eps1,eps1,Anime$eps))
#Final visualization
summary(Anime$eps)
boxplot(Anime$eps,main = "Boxplot of Episodes",xlab = "No. of Episodes", horizontal = TRUE)
#-------------------------------------------------------------------------------------------------------------------------#
#2.Duration-
# identify outliers in r boxplot and save it
boxplot(Anime$duration,main = "Boxplot of duration",xlab = "Total Duration in minutes", horizontal = TRUE)
quantile(Anime$duration, seq(0,1, by= 0.001))
quantile(Anime$duration, c(0.999,0.999701))
#Result- Here, we will be considering the value of 99.9701% which is 150.000, to replace the extreme outliers
#check for details
length(Anime$duration)
summary(Anime$duration)
#setting the bench mark
dur1 <- as.numeric(round(quantile(Anime$duration,0.999701)))
dur1
#Winsorizing
Anime$duration= (ifelse(Anime$duration > dur1,dur1,Anime$duration))
#Final visualization
summary(Anime$duration)
boxplot(Anime$duration,main = "Boxplot of duration",xlab = "Total Duration in minutes", horizontal = TRUE)
#-------------------------------------------------------------------------------------------------------------------------#
#3.Watched-
# identify outliers in r boxplot and save it
boxplot(Anime$watched, main = "Boxplot of Watched",xlab = "No. of People Watched",horizontal = TRUE)
quantile(Anime$watched, seq(0,1, by= 0.001))
quantile(Anime$watched,c(0.999,0.999464))
#Result- Here, we will be considering the value of 99.9464% which is 100019.23 , rounded off to 100019, to replace the extreme outliers(which is 4 in this case)
#check for details
length(Anime$watched)
summary(Anime$watched)
#setting the bench mark
wat1 <- as.numeric(round(quantile(Anime$watched,0.999464)))
wat1
#Winsorizing
Anime$watched = (ifelse(Anime$watched > wat1,wat1,Anime$watched))
#Final visualization
summary(Anime$watched)
boxplot(Anime$watched, main = "Boxplot of Watched",xlab = "No. of People Watched",horizontal = TRUE)
#-------------------------------------------------------------------------------------------------------------------------#
#4.Watching-
# identify outliers in r boxplot and save it
boxplot(Anime$watching,main = "Boxplot of Watching",xlab = "No. of People Watching", horizontal = TRUE)
quantile(Anime$watching, seq(0,1, by= 0.001))
quantile(Anime$watching,c(0.999,0.9995))
#Result- Here, we will be considering the value of 99.95% which is 20021.98 , rounded off to 20022, to replace the extreme outliers(which is 4 in this case)
#check for details
length(Anime$watching)
summary(Anime$watching)
#setting the bench mark
watchn1 <- as.numeric(round(quantile(Anime$watching,0.9995)))
watchn1
#Winsorizing
Anime$watching = (ifelse(Anime$watching > watchn1,watchn1,Anime$watching))
#Final visualization
summary(Anime$watching)
boxplot(Anime$watching,main = "Boxplot of Watching",xlab = "No. of People Watching", horizontal = TRUE)
#-------------------------------------------------------------------------------------------------------------------------#
#5.Wannawatch-
# identify outliers in r boxplot and save it
boxplot(Anime$wantWatch,main = "Boxplot of Want to Watch",xlab = "No. of People want to watch", horizontal = TRUE)
quantile(Anime$wantWatch, seq(0,1, by= 0.001))
quantile(Anime$wantWatch,c(0.999,0.99929))
#Result- Here, we will be considering the value of 99.929% which is 20003.17 , rounded off to 20003, to replace the extreme outliers(which is 5 in this case)
#check for details
length(Anime$wantWatch)
summary(Anime$wantWatch)
#setting the bench mark
wantw1 <- as.numeric(round(quantile(Anime$wantWatch,0.99929)))
wantw1
#Winsorizing
Anime$wantWatch = (ifelse(Anime$wantWatch > wantw1,wantw1,Anime$wantWatch))
#Final visualization
summary(Anime$wantWatch)
boxplot(Anime$wantWatch, main = "Boxplot of Want to Watch",xlab = "No. of People want to watch",horizontal = TRUE)
#-------------------------------------------------------------------------------------------------------------------------#
#6.dropped-
# identify outliers in r boxplot and save it
boxplot(Anime$dropped, main = "Boxplot of Dropped",xlab = "No. of People dropped out",horizontal = TRUE)
quantile(Anime$dropped, seq(0,1, by= 0.001))
quantile(Anime$dropped, c(0.999,0.999381))
#Result- Here, we will be considering the value of 99.9381% which is 4998.508 , rounded off to 4999, to replace the extreme outliers(which is 5 in this case)
#check for details
length(Anime$dropped)
summary(Anime$dropped)
#setting the bench mark
drp <- as.numeric(round(quantile(Anime$dropped,0.999381)))
drp
#Winsorizing
Anime$dropped = (ifelse(Anime$dropped > drp,drp,Anime$dropped))
#Final visualization
summary(Anime$dropped)
boxplot(Anime$dropped, main = "Boxplot of Dropped",xlab = "No. of People dropped out", horizontal = TRUE)
#-------------------------------------------------------------------------------------------------------------------------#
#7.Voted-
# identify outliers in r boxplot and save it
boxplot(Anime$votes, main = "Boxplot of Votes",xlab = "No. of People Voted", horizontal = TRUE)
quantile(Anime$votes, seq(0,1, by= 0.001))
quantile(Anime$votes, c(0.999,0.999454))
#Result- Here, we will be considering the value of 99.9454% which is 80022.01, rounded off to 80022, to replace the extreme outliers(which is 4 in this case)
#check for details
length(Anime$votes)
summary(Anime$votes)
#setting the bench mark
vote <- as.numeric(round(quantile(Anime$votes,0.999454)))
vote
#Winsorizing
Anime$votes = (ifelse(Anime$votes > vote,vote,Anime$votes))
#Final visualization
summary(Anime$votes)
boxplot(Anime$votes,main = "Boxplot of Votes",xlab = "No. of People Voted", horizontal = TRUE)
#Note- All the outliers are being treated with respect to their quantiles > 99.9%
## Select a subset of Continuous and Categorical variables
Cont_Var<- subset(Anime1,select = c(eps,duration,watching,watched,wantWatch,dropped,rating,votes))
Cont_Var
Cat_Var<- subset(Anime1,select = c(mediaType,ongoing))
Cat_Var
#*************************************************************************************************************************#
#Step 5- Exploratory Data Analysis(Graphical)
#5.1 Univariate Analysis
#*************************************************************************************************************************#
#Histograms ~ Continuous Variables()
#Barplot~ Categorical(sznOfRelease)
#Exploration of data using SQL for the Qualitative variables(description,studios,contentWarn)
#*************************************************************************************************************************#
##1. Barplot of Media Types
counts_media<- table(Anime1$mediaType)
barplot(counts_media,
main = "Barplot of MediaTypes" ,
xlab = "Media types" ,
ylab = "Frequency",
col = c("blue","green","yellow","orange","red","lightgreen","gray","violet"))
#Results- TV is the most used medium or platform to watch Anime, while TV special is the least
#-------------------------------------------------------------------------------------------------------------------------#
##2. Histogram of Eps
hist(Anime1$eps,
xlab = "Episodes_distribution",
main = "Histogram view of Episodes",
col = "blue",
xlim = c(0,150),
breaks = sqrt(nrow(Anime1)))
#Results- Majority of the shows has episodes between 0-20
#-------------------------------------------------------------------------------------------------------------------------#
##3. Histogram of Duration
hist(Anime1$duration,xlab = "Duration_distribution",col = "green")
hist(Anime1$duration,
xlab = "Duration_distribution",
main = "Histogram view of Duration",
col = "green",
xlim = c(0,140),
breaks = sqrt(nrow(Anime1)))
#Results- Majority of the shows has duration between 0-8 minutes
# The Graph over here is Right skewed as observed
#-------------------------------------------------------------------------------------------------------------------------#
##4. Barplot of Ongoing
counts_ongoing<- table(Anime1$ongoing)
barplot(counts_ongoing,
main = "Barplot of ongoing shows" ,
xlab = "Ongoing Shows" ,
ylab = "Frequency",
col = c("blue","green"))
#Result- Most of the Shows are not live ones, and hence are not ongoing presently
#-------------------------------------------------------------------------------------------------------------------------#
##5. Details on Tags
#5.1- Top 5 tags in terms of most nos. of Anime
sqldf('select tags,COUNT(tags) from Anime GROUP BY tags ORDER BY COUNT(tags) desc limit (5)')
t <- table(Anime1$tags) # frequency of values in tags
plot(( sort(t, decreasing=TRUE)[1:5] ), type="h")
#Results- Vocaloid is the tag which has being used the most for the Shows
#-------------------------------------------------------------------------------------------------------------------------#
##6. Details on Top 5 shows watched by the no. of Users
sqldf('select title, SUM(watched) from Anime1 GROUP BY title ORDER BY SUM(watched) desc limit (5)')
#Results- Deathnote has been the most watched show
##3. Histogram of watched
hist(Anime1$watched,xlab = "watched_distribution", main = "Histogram view of watched",col = "green")
hist(Anime1$watched,
xlab = "watched_distributionn",
main = "Histogram view of watched",
col = "green",
xlim = c(0,140),
breaks = sqrt(nrow(Anime1)))
#-------------------------------------------------------------------------------------------------------------------------#
##7.Details on Top 5 shows currently being watched/watching by the no. of Users
sqldf('select title, SUM(watching) from Anime1 GROUP BY title ORDER BY SUM(watching) desc limit (5)')
#Results- Naruto Shippuden is being the most watched show at present
hist(Anime1$watching,xlab = "watching_distribution", main = "Histogram view of watching shows",col = "blue")
#-------------------------------------------------------------------------------------------------------------------------#
##8.Details on Top 5 shows people are willing to watch
sqldf('select title, SUM(wantWatch) from Anime1 GROUP BY title ORDER BY SUM(wantWatch) desc limit (5)')
#Results- Steins;Gate is the show most people are willing to watch
hist(Anime1$wantWatch,xlab = "wantwatch_distribution", main = "Histogram view of wantwatch shows",col = "red")
#-------------------------------------------------------------------------------------------------------------------------#
##9.Details on Top 5 shows people have dropped before completing it
sqldf('select title, SUM(dropped) from Anime1 GROUP BY title ORDER BY SUM(dropped) desc limit (5)')
#Results- Naruto Shippuden is the show most people have dropped before completion
hist(Anime1$dropped,xlab = "dropped_distribution", main = "Histogram view of dropped shows",col = "red")
#-------------------------------------------------------------------------------------------------------------------------#
##10. Histogram of Ratings
hist(Anime1$rating,
xlab = "Rating",
main = "Histogram of Rating",
breaks = sqrt(nrow(Anime1)),
lines(density(Anime1$rating, adjust=2), lty="dotted", col="darkgreen", lwd=2))
#Result- The rating has Binomial Distribution
#-------------------------------------------------------------------------------------------------------------------------#
##11.Details on Top 5 shows which has being voted by users the most
sqldf('select title, SUM(votes) from Anime1 GROUP BY title ORDER BY SUM(votes) desc limit (5)')
#Results- Death Note is the most voted show by people
hist(Anime1$votes,xlab = "votes_distribution", main = "Histogram view of votes of shows",col = "yellow")
#*************************************************************************************************************************#
#5.2 Bivariate Analysis:
#*************************************************************************************************************************#
#Scatterplots -Continuous vs Continuous Variables
#Boxplots - Continuous vs Categorical Variables
#*************************************************************************************************************************#
##1.With respect to mediaType
boxplot(rating~mediaType,
data = Anime1,
xlab = "Rating",
ylab = "mediaType",
main = "Rating vs mediaType")
#Results- For the Movie ,OVA, TV types presence of outliers can be noticed and they are left skewed
# More or less, the data has skewness
#-------------------------------------------------------------------------------------------------------------------------#
##2.With respect to eps
plot(x= Anime1$rating, y= Anime1$eps,
xlab = "Rating",
ylab = "eps",
main = "Rating vs eps")
#Results- This graph shows, the shows with episodes between 0-15 have rating between 1.5-3.5 on an average
#-------------------------------------------------------------------------------------------------------------------------#
##3.With respect to duration
plot(x= Anime1$rating, y= Anime1$duration,
xlab = "Rating",
ylab = "duration",
main = "Rating vs duration")
#Results- This graph shows, the shows with less duration have higher ratings , but also many of those with higher durations are rated well.
# So no clear interpretation can be done
#-------------------------------------------------------------------------------------------------------------------------#
##4.With respect to ongoing
boxplot(rating~ongoing,
data = Anime1,
xlab = "Rating",
ylab = "ongoing",
main = "Rating vs ongoing")
#Results- For the Ongoing shows, presence of outliers can be noticed and they are left skewed
# The shows that are not outgoing , have normal distribution
#-------------------------------------------------------------------------------------------------------------------------#
##5.With respect to tags
plot(x= Anime1$rating, y= Anime1$tags,
xlab = "Rating",
ylab = "tags",
main = "Rating vs tags")
#Result- No interpretation can be drawn out of this variable
#-------------------------------------------------------------------------------------------------------------------------#
##6.With respect to watched
plot(x= Anime1$rating, y= Anime1$watched,
xlab = "Rating",
ylab = "watched",
main = "Rating vs watched")
#Result- Most of the high Rated shows are less watched, with few exceptions
#-------------------------------------------------------------------------------------------------------------------------#
##7.With respect to watching
plot(x= Anime1$rating, y= Anime1$watching,
xlab = "Rating",
ylab = "watching",
main = "Rating vs watching")
#Result- Most of the high Rated shows are being currently less watched, with few exceptions
#-------------------------------------------------------------------------------------------------------------------------#
##8.With respect to wantWatch
plot(x= Anime1$rating, y= Anime1$wantWatch,
xlab = "Rating",
ylab = "wantWatch",
main = "Rating vs wantWatch")
#Result- Most of the high Rated shows are in the watchlist of no. of users between 0-10000
#-------------------------------------------------------------------------------------------------------------------------#
##9.With respect to dropped
plot(x= Anime1$rating, y= Anime1$dropped,
xlab = "Rating",
ylab = "dropped",
main = "Rating vs dropped")
#Result- Most of the high Rated shows are less dropped, w.r.t. others
#-------------------------------------------------------------------------------------------------------------------------#
##10.With respect to votes
plot(x= Anime1$rating, y= Anime1$votes,
xlab = "Rating",
ylab = "votes",
main = "Rating vs votes")
#Result- Most of the high Rated shows are less voted,,there is a gradual trend considering an increase in votes for few of the shows
#-------------------------------------------------------------------------------------------------------------------------#
##11.With respect to ongoing
boxplot(rating~ongoing,
data = Anime1,
xlab = "Rating",
ylab = "ongoing",
main = "Rating vs ongoing")
#*************************************************************************************************************************#
#Step 6- Exploratory Data Analysis(Statistical tests)
#6.1.Correlation tests
#Correlation test is used to evaluate the association between two or more variables(continuous)
#Pearson correlation(r) -It measures a linear dependence between two variables (x and y).
# It's also known as a parametric correlation test because it depends to the distribution of the data.
# It can be used only when x and y are from normal distribution.
# If the p-value is < 5%, then the correlation between x and y is significant.
# Correlation coefficient can be computed using the functions cor() or cor.test()
#*************************************************************************************************************************#
##1.With respect to eps
Eps_Rate <- cor.test(Anime$rating, Anime$eps,
method = "pearson")
Eps_Rate
#Result- P value < 2.2e-16, which is less than the significance level alpha = 0.05
# cor of 0.1109282 or 11.09% indicates very weak positive correlation between the two variables
#-------------------------------------------------------------------------------------------------------------------------#
##2.With respect to duration
Dur_Rate <- cor.test(Anime$rating, Anime$duration,
method = "pearson")
Dur_Rate
#Result- P value < 2.2e-16, which is less than the significance level alpha = 0.05, means its significant
# cor of 0.2836987 or 28.36% indicates a weak positive correlation between the two variables
#-------------------------------------------------------------------------------------------------------------------------#
##3.With respect to watched
Wat_Rate <- cor.test(Anime$rating, Anime$watched,
method = "pearson")
Wat_Rate
#Result- P value < 2.2e-16, which is less than the significance level alpha = 0.05, means its significant
# cor of 0.4267195 or 42.67% indicates a moderate positive correlation between the two variables
#-------------------------------------------------------------------------------------------------------------------------#
##4.With respect to watching
Watchn_Rate <- cor.test(Anime$rating, Anime$watching,
method = "pearson")
Watchn_Rate
#Result- P value < 2.2e-16, which is less than the significance level alpha = 0.05, means its significant
# cor of 0.3308784 or 33.08% indicates a weak positive correlation between the two variables
#-------------------------------------------------------------------------------------------------------------------------#
##5.With respect to wantWatch
Want_Watch_Rate <- cor.test(Anime$rating, Anime$wantWatch,
method = "pearson")
Want_Watch_Rate
#Result- P value < 2.2e-16, which is less than the significance level alpha = 0.05, means its significant
# cor of 0.5486213 or 54.86% indicates a moderate positive correlation between the two variables
#-------------------------------------------------------------------------------------------------------------------------#
##6.With respect to dropped
Drop_Rate <- cor.test(Anime$rating, Anime$dropped,
method = "pearson")
Drop_Rate
#Result- P value < 2.2e-16, which is less than the significance level alpha = 0.05, means its significant
# cor of 0.3286063 or 32.86% indicates a weak positive correlation between the two variables
#-------------------------------------------------------------------------------------------------------------------------#
##7.With respect to votes
Vote_Rate <- cor.test(Anime$rating, Anime$votes,
method = "pearson")
Vote_Rate
#Result- P value < 2.2e-16, which is less than the significance level alpha = 0.05, means its significant
# cor of 0.4154198 or 41.54% indicates a moderate positive correlation between the two variables
## Plot a correlation graph
#install.packages("corrplot")
library(corrplot)
Anime_Cor = cor(Anime[,c("eps","duration","watched","watching","wantWatch","dropped","rating","votes")])
corrplot(Anime_Cor, method = "number")
#Considering watched,wantWatch, Votes having correlation >0.40, we need to use log transformation of the same
#Consider dropping of Variables with little or no correlation Ex- eps, duration
Anime<- subset(Anime, select =-c(eps,duration))
View(Anime)
#*************************************************************************************************************************#
#6.2.Anova tests
#ANOVA stands for Analysis of Variance, is a statistical test used to
# analyze the difference between the means of more than two groups(Categorical vs continuous)
#1.With respect to MediaTypes,ongoing, tags
Anov <- aov(rating ~ mediaType+ongoing+tags, data = Anime)
summary(Anov)
#Result- P value < 2.2e-16, which is less than the significance level alpha = 0.05, means for both the variables, there is presence of correlation
# with respect to the categorical variables as stated
# Since the F statistic, in each case is greater than the critical value,
# we conclude that there is a significant batch effect at the 0.05 level of significance.
#*************************************************************************************************************************#
#6.3.Check for Nonlinearity(on the Backed up data)
pairs.panels(Cont_Var,col ="red")
pairs.panels(Cat_Var,col ="red")
View(Anime)
#change the values of all numerical variables except the rating using the logarithmic transformation
Anime_Cor_1 = cor(Anime[,c("watched","wantWatch","watching", "dropped", "rating","votes")])
corrplot(Anime_Cor_1, method = "number")
Anime$watched<-log(Anime$watched)
Anime$wantWatch<-log(Anime$wantWatch)
Anime$votes<-log(Anime$votes)
View(Anime)
hist(Anime$wantWatch)
# Replace inf with 1
Anime$watched[Anime$watched == -Inf]<- 1
Anime$wantWatch[Anime$wantWatch == -Inf]<- 1
Cont1_Var<- subset(Anime,select = c(eps,duration,watching,watched,wantWatch,dropped,rating,votes))
Cont1_Var
pairs.panels(Cont1_Var,col ="red")
#*************************************************************************************************************************#
# Task 2
#Step 7-Model Development
#create a backup data
data<- Anime
#7.1-Splitting the data set into train and test data
set.seed(123)
library(caTools)
sample<- sample.split(Anime, SplitRatio = 0.70)
#-------------------------------------------------------------------------------------------------------------------------#
#7.2defining train & test data
Anime_train<- subset(Anime, sample == TRUE)
Anime_test<- subset(Anime, sample == FALSE)
#-------------------------------------------------------------------------------------------------------------------------#
#7.3-checking for the no. of rows and columns of the respective datasets
nrow(Anime_train)
nrow(Anime_test)
names(Anime)
#-------------------------------------------------------------------------------------------------------------------------#
#7.4-create linear regression model
Anime_fit<- lm(rating ~., data = Anime_train)
##Step 1
Anime_fit_1 <- lm(rating ~ mediaType + eps + duration + ongoing +
tags + watched+ watching+ wantWatch +
dropped + votes, data = Anime_train)
summary(Anime_fit_1)
#-------------------------------------------------------------------------------------------------------------------------#
##Step 2- removing tags
Anime_fit_1 <- lm(rating ~ mediaType + ongoing +
watched + wantWatch + watching +
dropped + votes, data = Anime_train)
summary(Anime_fit_1)
#-------------------------------------------------------------------------------------------------------------------------#
##Step 3 -removing all other media type except music video,Tv, and Votes
Anime_fit_1 <- lm(rating ~ I(mediaType== "Movie") + I(mediaType== "Music Video") + I(mediaType== "TV Special")+
I(ongoing== "Yes") + watched + wantWatch + watching +
dropped + votes,
data = Anime_train)
summary(Anime_fit_1)
#-------------------------------------------------------------------------------------------------------------------------#
##Step 4 -removing votes
Anime_fit_1 <- lm(rating ~ I(mediaType== "Movie") + I(mediaType== "Music Video") + I(mediaType== "TV Special")+
I(ongoing== "Yes") + watched + wantWatch + watching +
dropped,
data = Anime_train)
summary(Anime_fit_1)
#-------------------------------------------------------------------------------------------------------------------------#
#check Vif values
vif(Anime_fit_1)
#Results- Watched and Wantwatch column have high vif, hence should be dropped from the model
#-------------------------------------------------------------------------------------------------------------------------#
##Step 5 -removing wantwatch which have high VIF and also correlation is >.40
Anime_fit <- lm(rating ~ I(mediaType== "Movie") +
I(ongoing== "Yes") + watched + watching +
dropped,
data = Anime_train)
summary(Anime_fit)
#Result- p value of watching is >0.05, hence to be removed
crPlots(Anime_fit)
#-------------------------------------------------------------------------------------------------------------------------#
##Step 6- removing watching
Anime_fit <- lm(rating ~ I(mediaType== "Movie") + I(mediaType== "Music Video") +I(mediaType== "Other") +
I(mediaType== "OVA") + I(mediaType== "Web")+
eps + duration + I(ongoing== "Yes") +
wantWatch + dropped ,
data = Anime_train)
summary(Anime_fit)
abline(Anime_fit,lwd = 3, col = "red")
ggplot(Anime_train, aes(rating, dropped) ) +
geom_point() +
stat_smooth()
Anime_fit_2 <- lm(rating ~ I((mediaType== "Movie")^2) + I((mediaType== "Music Video")^2) +I((mediaType== "Other")^2) +
I((mediaType== "OVA")^2) + I((mediaType== "Web")^2)+
I(duration^2) + I((ongoing== "Yes")^2) +
I(wantWatch^2) + I(dropped^2), data = Anime_train)
summary(Anime_fit_2)
#*************************************************************************************************************************#
#About the results
#*************************************************************************************************************************#
#The above table proves that there is a weak negative relationship between Rating and mediaType(Movie,Music Video,Other,Web),dropped
# and weak positive relationship between Rating and mediaType(OVA),eps,duration,ongoing(Yes),wantWatch
#Residual standard error (RSE).
#The RSE (or model sigma), corresponding to the prediction error, represents roughly the average difference between the observed outcome values and the predicted values by the model.
#The lower the RSE the best the model fits to the data.
#Dividing the RSE by the average value of the outcome variable will give us the prediction error rate, which should be as small as possible.
#Here the RSE = 0.6784, meaning that the observed rating values deviate from the predicted values by approximately 0.5939 units in average.
mean(Anime_train$rating) #3.049757
Error_Rate <-0.5969/mean(Anime_train$rating)
Error_Rate
#This corresponds to an error rate of 0.6784/mean(Anime_train$rating) = 0.5915/3.049757 = 0.1939499 or 19.39%, which is low.
#-------------------------------------------------------------------------------------------------------------------------#
#R-squared
#The R-squared (R2) ranges from 0 to 1 and represents the proportion of variation in the outcome variable that can be explained by the model predictor variables.
#For a simple linear regression, R2 is the square of the Pearson correlation coefficient between
# the outcome and the predictor variables.
#In multiple linear regression, the R2 represents the correlation coefficient between the observed outcome values and the predicted values.
#The R2 measures, how well the model fits the data. The higher the R2, the better the model.
#Result- The R square value is 0.5764, which means the moderately okay
#-------------------------------------------------------------------------------------------------------------------------#
#Adjusted R squared
#The adjustment in the Adjusted R Squared value in the summary output is a correction for the number of x variables included in the predictive model.
#So the adjusted R-squared is considered, which is a penalized R2 for a higher number of predictors.
#An (adjusted) R2 that is close to 1 indicates that a large proportion of the variability in the outcome has been explained by the regression model.
#A number near 0 indicates that the regression model did not explain much of the variability in the outcome.
#Result- The Adjusted R square value is 0.5756 , which means the moderately okay
#-------------------------------------------------------------------------------------------------------------------------#
#F-Statistic:
#The F-statistic gives the overall significance of the model.
#It assess whether at least one predictor variable has a non-zero coefficient.
#The F-statistic becomes more important once we start using multiple predictors as in multiple linear regression.
#A large F-statistic will corresponds to a statistically significant p-value (p < 0.05).
#Results- In this example, the F-statistic equal 706.9 producing a p-value of < 2.2e-16, which is highly significant.
#**************************************************************************************************************************#
#7.5-estimate the model performance
#A more conventional way to estimate the model performance is to display the residual against different measures.
par(mfrow=c(2,2))
plot(Anime_fit)
#Step 8-check for model validation using correlation plots
#Result- It shows many of the variables are highly correlated, which is affecting the model accuracy
#8.2-Get the predicted or fitted values
fitted(Anime_fit)
Anime_train$pred <- fitted(Anime_fit)
#Plot the values
plot(Anime_train$pred,Anime_train$rating, main= "Actual vs predictive for train data")
abline(a=0, b=1)
Anime_train$pred_1<- fitted(Anime_fit_2)
plot(Anime_train$pred_1,Anime_train$rating, main= "Actual vs predictive for train data")
abline(a=0, b=1)
#Result- For the rating values between range of 1-4, they are highly concentrated towards the predicted values of 3-4
#**************************************************************************************************************************#
#Step 9- Check respective metrics- MAPE, MdAPE, RMSE
#9.1-checking MAPE
#MAPE- One of the most common metrics used to measure the forecasting accuracy of a model is MAPE,
# which stands for mean absolute percentage error.
attach(Anime_train)
mape(Anime_train$rating,Anime_train$pred)
#Results - The MAPE value is 0.1857621 or 18.57% which means the average difference between
# the predicted value and the actual value is 18.57%, which is acceptable
Mean_Accuracy_train = (1-mape(Anime_train$rating,Anime_train$pred))*100
Mean_Accuracy_train
# In other words the mean accuracy thus is (100-MAPE)= (100-18.57)% = 81.42379
#-------------------------------------------------------------------------------------------------------------------------#
#9.2-checking Median APE
#MdAPE- The Median Absolute Percentage Error (MdAPE) is found by ordering the absolute percentage error (APE) from the smallest to the largest, and using its middle value
# (or the average of the middle two values if N is an even number) as the median
#MdAPE is more resilient to outliers than MAPE
#MdAPE is less intuitive, for example an MdAPE of 8% does not mean that the average absolute percentage error is 8%.
# Instead it means that half of the absolute percentage errors are less than 8% and half are over 8%
median(abs((Anime_train$rating-Anime_train$pred)/Anime_train$rating))
#Results- The MdAPE value is 0.123073 or 12.30%, which means half of the absolute percentage errors are less than 15.38%
# while the rest is over 12.30%
Median_Accuracy_train = (1-median(abs((Anime_train$rating-Anime_train$pred)/Anime_train$rating)))*100
Median_Accuracy_train
#Thus Median Accuracy is 87.92226
#-------------------------------------------------------------------------------------------------------------------------#
#9.3-checking RMSE
#Root Mean Square Error-It is the standard deviation of the residuals (prediction errors).
# RMSE is a measure of how spread out these residuals are.
# It shows how concentrated the data is around the line of best fit.
rmse(Anime_train$rating,Anime_train$pred)
#Result- RMSE value is0.5908401,thus >0.5 reflects the poor ability of the model to accurately predict the data, which is observed from this dataset
#-------------------------------------------------------------------------------------------------------------------------#
#9.4-Residuals- A residual is the vertical distance between a data point and the regression line.
# Each data point has one residual.
# They are positive if they are above the regression line and negative if they are below the regression line.
#check for residuals
Anime_train$res<-studres(Anime_fit)
Anime_train$res
install.packages("gvlma")
library(gvlma)
gvmodel<- gvlma(Anime_fit)
summary(gvmodel)
install.packages("olsrr")
library(olsrr)
ols_plot_resid_qq(Anime_fit_2)
ols_plot_resid_fit(Anime_fit_2)
ols_plot_resid_hist(Anime_fit_2)
ols_test_normality(Anime_fit_2)
##################################### Checking of Assumption ############################################
#Step 10- Check for Assumptions
# residuals should be uncorrelated ##Autocorrelation
# Null H0: residuals from a linear regression are uncorrelated. Value should be close to 2.
#Less than 1 and greater than 3 -> concern
## Should get a high p value
#10.1-check for multicollinearity
#Multicollinearity- Multicollinearity occurs when independent variables in a regression model are correlated.
# This correlation is a problem because independent variables should be independent.
#VIF- The Variance Inflation Factor (VIF) measures the impact of collinearity among the variables in a regression model.
# As per VIF, 1 = not correlated, Between 1 and 5 = moderately correlated, Greater than 5 = highly correlated.
vif(Anime_fit)
#Conclusion- The vif values being less than 2 for Mediatype,eps,Duration,ongoing, hence we can say, that multicollinearity doesn't exist for them
# But for others , like watching, wantwatch, dropped, it is <4, thus acceptable
#**************************************************************************************************************************#
#10.2-check for autocorrelation
#Autocorrelation- Autocorrelation refers to the degree of correlation between
# the values of the same variables across different observations in the data.
#Ho= the residuals are not correlated, ie. they are independent
#H1= the residuals are correlated
#Durbin Watson statistic- The Durbin Watson Test is a test statistic used in statistics to detect autocorrelation,
# in the residuals from a regression analysis.
# The Durbin-Watson statistic will always have a value between 0 and 4.
#A value of 2.0 means that there is no autocorrelation detected in the sample.
#Values from 0 to less than 2 indicate positive autocorrelation and values from 2 to 4 indicate negative autocorrelation.
durbinWatsonTest(Anime_fit)
#Conclusion- The D-W Statistic is 1.062099, which is less than 2 and it means that positive autocorrelation exits
#**************************************************************************************************************************#
#10.3- check for heteroscedasticity
#heteroscedasticity- Heteroscedasticity is a systematic change in the spread of the residuals over the
# range of measured values and is being caused mainly by the presence of outliers
# Breusch-Pagan test - It is used to test for heteroskedasticity in a linear regression model and assumes that the error terms are normally distributed.
# It tests whether the variance of the errors from a regression is dependent on the values of the independent variables.
# If the test statistic has a p-value below an appropriate threshold (e.g. p < 0.05) then
# the null hypothesis of homoskedasticity is rejected and heteroskedasticity assumed.
bptest(Anime_fit)
#Result - p value from BP test being < 2.2e-16, which is < 0.05 means Heteroscedasticity exists
# Null hypothesis -> error is homogenious (p value should be more than 0.05)
#Hence null hypothesis can be rejected and therefore heterscedasticity exists
#Cook-Weisberg test --- Car package
# hypothesis of constant error variance against the alternative that the error variance changes with the level of the response
# p value should be more than 0.05
ncvTest(Anime_fit)
#Result - p value from NCV test being 0.00084771 thus < 0.05 means Heteroscedasticity exists
#**************************************************************************************************************************#
#10.4 -Test for normality
#Normality- A normality test is used to determine whether sample data has been drawn from a normally distributed population (within some tolerance).
#Null hypothesis - The errors should be normally distributed
# expected- the p value should be more than 0.05
#Anderson-Darling Test -The Anderson-Darling test is a statistical test of whether a given
# sample of data is drawn from a given probability distribution.
# The test rejects the hypothesis of normality when the p-value is less than or equal to 0.05.
# Failing the normality test allows you to state with 95% confidence the data does not fit the normal distribution.
Anime_resids <- Anime_fit$residuals
ad.test(Anime_resids)
#Conclusion- P value is 8.232e-11, which is <0.05, hence means that with 95% confidence the data does not fit the normal distribution
#**************************************************************************************************************************#
#Step 11- Check for the Residuals and their Statistics
#distribution of residuals
#11.1-Histogram
hist(Anime_resids)
#Result- The residuals show skewness towards left for the residuals
#-------------------------------------------------------------------------------------------------------------------------#
#11.2-Quantile- Quantile plot~ Q-Q plot, is a graphical tool to help us assess if a set of data plausibly came from some theoretical distribution such as a Normal, exponential or Uniform distribution.
# Also, it helps to determine if two data sets come from populations with a common distribution.
# In other words,it is a scatterplot created by plotting two sets of quantiles against one another
qqnorm(Anime_resids)
qqline(Anime_resids)
#Result- If both sets of quantiles came from the same distribution, we should see the points forming a line that's roughly straight.
# Similarly here as well, the plot is a straight line
# As we can see from this plot the errors follow the straight line well so we can say this assumption is met.
####Testing the model on test data ###
#Test the model on the remaining dataset.
Anime_test$pred<- predict(Anime_fit, Anime_test)
attach(Anime_test)
plot(pred,rating, main= "Actual vs predictive for test data")
abline(a=0, b=1)
#Result - Most of variations can be seen between 2-4 values of the variables
#-------------------------------------------------------------------------------------------------------------------------#
#Check for Mean APE
mape(Anime_test$rating,Anime_test$pred)
mean(abs((rating-pred)/rating))
#Result- The MAPE value is 18.56% which means
# the average difference between the predicted value and the actual value is 22.16%.
Mean_Accuracy_test = (1-mape(Anime_test$rating,Anime_test$pred))*100
Mean_Accuracy_test
## The mean accuracy thus is 81.43577
#-------------------------------------------------------------------------------------------------------------------------#
#Check for Median APE
median(abs((Anime_test$rating-Anime_test$pred)/Anime_test$rating))
#Result- The MdAPE value is 0.1204413 or 12.04%, which means half of the absolute percentage errors are less than 15.38%
# while the rest is over 12.04%
Median_Accuracy_test = (1-median(abs((Anime_test$rating-Anime_test$pred)/Anime_test$rating)))*100
Median_Accuracy_test
#Thus Median Accuracy is 87.95587
#-------------------------------------------------------------------------------------------------------------------------#
#Check for RMSE
rmse(Anime_test$rating,Anime_test$pred)
#Result- RMSE value is 0.5930023,thus =0.5 reflects the poor ability of the model to accurately predict the data, which is observed from this dataset
#**************************************************************************************************************************#
#check for residuals
Anime_test$res<- Anime_test$rating-Anime_test$pred
#-------------------------------------------------------------------------------------------------------------------------#
#Plot the residuals
plot(Anime_test$res)
#Results- Residuals are homogeneously scattered forming a band
#-------------------------------------------------------------------------------------------------------------------------#
hist(Anime_test$res)
#Results- The residuals show normal distribution of the data
#-------------------------------------------------------------------------------------------------------------------------#
qqnorm(Anime_test$res)
qqline(Anime_test$res)
#Results- If both sets of quantiles came from the same distribution, we should see the points forming a line that's roughly straight.
# Similarly here as well, the plot is a straight line
#As we can see from this plot the errors follow the straight line well so we can say this assumption is met.
#**************************************************************************************************************************#
#Conclusion- Thus from the Metrics values like RSE which is considerably low, but Rsquare and adjusted R square value
# state that the predictor variables are not highly significant as per the model is concerned and both having value ~ 44%
#The assumptions show the dataset being highly skewed, having multiple correlations and collinearity,and deviates from Normal Distribution as per the oevrall frequency being concerned,
# even after treatment of outliers, missing values
#The MAPE and MdAPE values are approximately 22% and 15% which means the Accuracy being at a considerable range of 75-80%
# Business Recommendation
#The overall attributes used in the model as we have seen, had weak to moderate correlation with the target variable ie., Rating
#Hence, it is difficult to provide business recommendation from that point of View.
#But from the overall seasons of Release, as we have observed, that Shows with Ratings Greater than 4, had their Release around Fall and Spring
#From the Media Types, Shows with rating >4, are being released on TV the most |
b5a8ad56c5489187c49ca7d3677656231ee3c6d4 | f29a2a607de04e1c051f36cd964e6896dd4d4876 | /capitulo03/scriptR/Problema-10.R | d0a0581b74a3e14183639363b3efb9d8b7f1a83e | [] | no_license | mmejiap/Simulacion-y-modelamiento | 6905dd6a24c7fdd112141c424012beca2d6fd0da | d97f0ac5e156bfa4da157894f063a1ffcfd1fdd6 | refs/heads/master | 2020-07-19T17:52:46.576040 | 2019-10-09T23:12:06 | 2019-10-09T23:12:06 | 206,489,902 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 227 | r | Problema-10.R | rm(list = ls())
k <- 1000
n <- 1:k
u <- runif(k,0,1)
cov = cumsum(u*exp(u))/n - (cumsum(u)/n )*(cumsum(exp(u))/n)
plot(cov,type='l',xlab='n',ylab = 'valor',main='Problema 10')
print(paste("Covarianza -> ",cov[length(u)]))
|
211a954a77439fa2d851d7e4279537fb96d37660 | bb35e7e2662ce999c707c176e3784e0ba610c0e1 | /man/tile_merge.Rd | 0a8616ad5fac4368936b35bdc36858d73d193b43 | [] | no_license | kapitzas/WorldClimTiles | a447816c44210c65a7099bff3e992244f92ea149 | c84f06070dfeb45e91a35583979c36f0dde9f055 | refs/heads/master | 2021-07-21T09:48:33.980355 | 2021-07-07T08:49:37 | 2021-07-07T08:49:37 | 176,862,630 | 4 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,743 | rd | tile_merge.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tile_merge.R
\name{tile_merge}
\alias{tile_merge}
\title{Merge worldclim data.}
\usage{
tile_merge(tiles, fac)
}
\arguments{
\item{tiles}{List with 0.5 arc min WorldClim tile stacks (or single rasters) (i.e. produced by \code{tile_get}). Names and number of variables need to match between stacks.}
\item{fac}{Factor by which resolution is changed, see details.}
}
\value{
RasterStack or Raster containing merged WorldClim variable(s).
}
\description{
This function merges tiles downloaded from WorldClim into a single raster.
}
\details{
\code{raster::merge} is used for merging, at default settings. That means that in case of overlaps the cell values of the object that comes first in the sequence are retained. Raster resolution can be changed to speed up merging of very high resolution data. For example, when data are SRTM tiles, \code{fac = 10} will resample to a resolution of 0.5 arcmin (bilinear), which corresponds to the resolution of WorldClim data.
}
\examples{
boundary <- getData("GADM", country = "FRA", level = 0) #Downloads France boundaries
tilenames <- tile_name(boundary, name = 'worldclim') #Determines names of the worldclim tiles covering France
srtmtilenames <- tile_name(boundary, name = 'srtm') #Determines names of the srtm tiles covering France
wctiles <- tile_get(tiles = tilenames, name = 'srtm', var = "bio") #downloads WorldClim tiles covering France
srtmtiles <- tile_get(tiles = srtmtilenames, name = 'srtm') #downloads SRTM tiles covering France
wcmerged <- tile_merge(wctiles)
srtmmerged <- tile_merge(srtmtiles)
srtmmerged <- tile_merge(srtmtiles, fac = 10)
}
\author{
Simon Kapitza \email{simon.statecology@gmail.com}
}
|
553f1af82fa2ebe6ef51b223e2132759904ea152 | b1217b56f950ba7425c6a406041585fb97547fff | /tests/testthat/test-cached-post.R | c6a0616b89884035634ed421e14817a78e66f6d9 | [] | no_license | cran/httpcache | fcc2df0579135531729ccd1a84d1153ea8387dfc | 162cc3b635a0627d29e938965cd3c77de8341066 | refs/heads/master | 2021-05-04T11:23:06.272418 | 2021-01-10T22:10:02 | 2021-01-10T22:10:02 | 52,335,838 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,082 | r | test-cached-post.R | context("cachedPOST")
public({
clearCache()
test_that("Cache gets set on cachedPOST", {
expect_length(cacheKeys(), 0)
with_fake_http({
expect_POST(a <<- cachedPOST("https://app.crunch.io/api/"),
"https://app.crunch.io/api/")
expect_POST(b <<- cachedPOST("https://app.crunch.io/api/",
body='{"user":"me"}'),
'https://app.crunch.io/api/ {"user":"me"}')
})
expect_length(cacheKeys(), 2)
expect_true(setequal(cacheKeys(),
c("https://app.crunch.io/api/?POST",
"https://app.crunch.io/api/?POST&BODY=aec2de8a85873530777f26424e086337")))
expect_identical(a$url, "https://app.crunch.io/api/")
expect_identical(content(b), list(user="me"))
})
without_internet({
test_that("When the cache is set, can read from it even with no connection", {
## Now read from cache
expect_no_request(
expect_identical(cachedPOST("https://app.crunch.io/api/"), a)
)
expect_no_request(
expect_identical(cachedPOST("https://app.crunch.io/api/",
body='{"user":"me"}'), b)
)
})
test_that("But uncached() prevents reading from the cache", {
uncached({
expect_POST(cachedPOST("https://app.crunch.io/api/"),
"https://app.crunch.io/api/")
expect_POST(cachedPOST("https://app.crunch.io/api/",
body='{"user":"me"}'),
'https://app.crunch.io/api/ {"user":"me"}')
})
})
test_that("GETs don't read from cachedPOST cache", {
expect_GET(uncached(GET("https://app.crunch.io/api/")),
"https://app.crunch.io/api/")
})
test_that("And POSTs with different payloads don't read the wrong cache", {
expect_POST(cachedPOST("https://app.crunch.io/api/", body="wrong"),
"https://app.crunch.io/api/ wrong")
})
})
})
|
58d5b258836c57512e706fb12192fc054ead49a7 | 4fc01b0aa267a461f9a64e74fc67873aab4d3007 | /run_analysis.R | a89ee54cb3108536f806385d05841025331c129d | [] | no_license | trponeill/tidy_data | 34757b9cc04cf4dfc1f2e45a184245d70a97c1a1 | 2dae955c1df503197ef076d20f35abd7327adbb4 | refs/heads/master | 2016-09-06T05:19:52.568762 | 2014-09-21T15:19:47 | 2014-09-21T15:19:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,565 | r | run_analysis.R |
###run_analysis.R
library(plyr)
library(dplyr)
library(data.table)
#Get data into R to see shape and dimensions
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt", header = FALSE, sep="")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt", header = FALSE, sep="")
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt", header = FALSE, sep="")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt", header = FALSE, sep="")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt", header = FALSE, sep="")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt", header = FALSE, sep="")
features <- read.table("./UCI HAR Dataset/features.txt", header = FALSE, sep="")
activity <- read.table("./UCI HAR Dataset/activity_labels.txt", header = FALSE, sep="")
#combine the test and train data sets using rbind
Xdata<- rbind(X_test, X_train)
ydata<- rbind(y_test, y_train)
subjectData<- rbind(subject_test, subject_train)
#add meaningful column names
colnames(Xdata)=(features$V2) #add feature column names to Xdata
colnames(activity)=c("activityID","activityType") ##add column names to activity
colnames(ydata)=("activityID") #add column names to ydata
colnames(subjectData)=("subjectID") #add column names to subject
#subset only features with mean/std using dplyr select
XdataSubset1 <-select(Xdata, contains("mean")) #subsets columns with 'mean'
XdataSubset2 <-select(Xdata, contains("std")) #subset columns with'std'
XdataSubset <-cbind(XdataSubset1, XdataSubset2) #combines columns with bind
#add activity names to ydata by using plyr 'join'
ydata <- arrange(join(ydata, activity), activityID)
ydata$activityID <-NULL #drop ID as not needed
#combine data sets into one
DataSet <- cbind(ydata, subjectData, XdataSubset)
#tidy columns using 'names' expand abbrebiations
names<-names(DataSet)
names<-gsub("mean", "Mean",names)
names<-gsub("std", "StdDev",names)
names<-gsub("\\()","",names)
names<-gsub("^(t)", "Time",names)
names<-gsub("tBody", "TimeBody",names)
names<-gsub("^(f)", "Freq",names)
names<-gsub("BodyBody", "Body",names)
setnames(DataSet, names)
#apply mean to tidyDataSet variables by activity and subject
tidyDataSet <-tbl_df(DataSet) #prepare for dplyr
tidyDataSet <- group_by(tidyDataSet, activityType, subjectID)
tidyDataSet <-summarise_each(tidyDataSet,funs(mean))
#write tidyDataSet to txt file 'tidy_data
write.table(tidyDataSet, file = "./tidy_data.txt", row.name=FALSE)
|
16b5d36297abef1135ef8c7fc0cd7c58697f1f3a | 4d12d0314f6f156bcae0dc82c3c03dcc7cba813b | /plot3.R | 086e21cd0e5fdbef0eb16ede260cb218108b62df | [] | no_license | BreizhZut/ExData_Plotting1 | 8a7d5c4c9c9d9278da6f44aa8b76c56e91ddfddd | 1779532e40679379b0d040c009ab60445cec43c0 | refs/heads/master | 2021-01-17T08:53:50.610284 | 2016-05-09T06:03:13 | 2016-05-09T06:03:13 | 58,339,801 | 0 | 0 | null | 2016-05-09T01:22:56 | 2016-05-09T01:22:56 | null | UTF-8 | R | false | false | 1,762 | r | plot3.R | ## We need package lubridate to convert character into time
library(lubridate)
library(dplyr)
## write the file to read into a variable
fileHCG <- "household_power_consumption.txt"
## test for existance
if(!file.exists(fileHCG)){
stop(paste("File ",fileHCG,"is not found in working directory"))
}
## Let's read the header first
headHCG <- read.table(fileHCG,header=TRUE,sep=";",nrows=1)
## From previous test (check README.md for details) we know the
## they are 2880 rows with dates 2007-02-01 and 2007-02-02 (as d/m/yyyy)
## they were written in the file in sequence starting at row 66637
dataHCG <- read.table(fileHCG,header=TRUE,sep=";",skip=66636,nrows=2880,col.names = names(headHCG))
## combine date and time into a new data.frame of class time
datetime <- data.frame(datetime=dmy_hms(paste(dataHCG$Date,dataHCG$Time)))
## Add datetime to the orignal data.frame as the first column
dataHCG <- tbl_df(bind_cols(datetime,dataHCG))
## Clean up temporary variables
rm(headHCG)
rm(datetime)
## Plot Energy sub metering as a function of time
## Open png device
png(filename="plot3.png")
## Make the plot
## Initialise plot with Sub_metering_1
with(dataHCG,
plot(datetime,Sub_metering_1,type="l",
xlab="",ylab ="Energy sub metering"
)
)
## Add Sub_metering_2 in red
with(dataHCG,
points(datetime,Sub_metering_2,
type="l",col="red"
)
)
## Add Sub_metering_3 in blue
with(dataHCG,
points(datetime,Sub_metering_3,
type="l",col="blue"
)
)
## Add legend
## Since I don't feel like type I use the grep command to generate the labels
legend("topright",
legend=grep("Sub_metering",names(dataHCG),value = TRUE),
col=c("black","red","blue"),lty=c(1,1,1)
)
## Close png device
dev.off() |
be259481c7f3b7002a9ab0ef5b200638c8a44185 | 0ea92a0e1eace26312972c5d2be22ae49b59c99c | /R/modeling/json/om_nhd_model_utils.R | 0b285c8378da1afeda38343b253f0986c4e3e276 | [] | no_license | HARPgroup/vahydro | 852425ccf9271ebe2ff95fb6b48d8b7eb23e473f | 73ea02207eee96f574f24db95fad6a2f6a496e69 | refs/heads/master | 2023-09-04T00:11:29.732366 | 2023-09-01T18:13:46 | 2023-09-01T18:13:46 | 147,414,383 | 0 | 0 | null | 2023-09-08T13:52:26 | 2018-09-04T20:37:13 | R | UTF-8 | R | false | false | 7,302 | r | om_nhd_model_utils.R |
nhd_next_up <- function (comid, nhd_network) {
next_ups <- sqldf(
paste(
"select * from nhd_network
where tonode in (
select fromnode from nhd_network
where comid = ", comid,
")"
)
)
return(next_ups)
}
om_handle_wshed_area <- function(wshed_info) {
if ("local_area_sqmi" %in% names(wshed_info)) {
area_sqmi = wshed_info$local_area_sqmi
} else {
if ("area_sqmi" %in% names(wshed_info)) {
area_sqmi = wshed_info$area_sqmi
} else {
if ("areasqkm" %in% names(wshed_info)) {
area_sqmi = wshed_info$areasqkm * 0.386102
} else {
message("Cannot process. Watershed must have either local_area_sqmi or areasqkm")
return(FALSE)
}
}
}
return(area_sqmi)
}
om_watershed_container <- function(wshed_info) {
if (!("name" %in% names(wshed_info))) {
if ("comid" %in% names(wshed_info)) {
wshed_info$name = paste0('nhd_', wshed_info$comid)
} else {
message("Error: watershed info must have 'name' field")
return(FALSE)
}
}
wshed_props = list(
name=wshed_info$name,
object_class = 'ModelObject'
)
area_sqmi = om_handle_wshed_area(wshed_info)
if ("rchres_id" %in% names(wshed_info)) {
rchres_id = wshed_info['rchres_id']
} else {
message("Error: You must include 'rchres_id' (ex: RCHRES_R001) to add container. ")
return(FALSE)
}
wshed_props[['drainage_area_sqmi']] = list(
name = 'drainage_area_sqmi',
object_class = 'Constant',
value = area_sqmi
)
wshed_props[['run_mode']] = list(
name = 'run_mode',
object_class = 'Constant',
value = run_mode
)
wshed_props[['flow_mode']] = list(
name = 'flow_mode',
object_class = 'Constant',
value = flow_mode
)
# inflow and unit area
wshed_props[['IVOLin']] = list(
name = 'IVOLin',
object_class = 'ModelLinkage',
right_path = paste0('/STATE/', rchres_id, '/HYDR/IVOL'),
link_type = 2
)
# this is a fudge, only valid for headwater segments
# till we get DSN 10 in place
wshed_props[['Runit']] = list(
name = 'Runit',
object_class = 'Equation',
value='IVOLin / drainage_area_sqmi'
)
# Get Local & Upstream model inputs
wshed_props[['read_from_children']] = list(
name='read_from_children',
object_class = 'ModelBroadcast',
broadcast_type = 'read',
broadcast_channel = 'hydroObject',
broadcast_hub = 'self',
broadcast_params = list(
list("Qtrib","Qtrib"),
list("trib_area_sqmi","trib_area_sqmi"),
list("child_wd_mgd","wd_mgd")
)
)
return(wshed_props)
}
om_facility_model <- function(facility_info) {
if (!("name" %in% names(facility_info))) {
message("Error: Facility info must have 'name' field")
return(FALSE)
}
facility_props = list(
name=facility_info$name,
object_class = 'ModelObject'
)
facility_props[['send_to_parent']] = list(
name='send_to_parent',
object_class = 'ModelBroadcast',
broadcast_type = 'send',
broadcast_channel = 'hydroObject',
broadcast_hub = 'parent',
broadcast_params = list(
list("wd_mgd","wd_mgd"),
list("discharge_mgd","ps_nextdown_mgd")
)
)
return(facility_props)
}
om_nestable_watershed <- function(wshed_info) {
if (!("name" %in% names(wshed_info))) {
if ("comid" %in% names(wshed_info)) {
wshed_info$name = paste0('nhd_', wshed_info$comid)
} else {
message("Error: watershed info must have 'name' field")
return(FALSE)
}
}
nested_props = list(
name=wshed_info$name,
object_class = 'ModelObject'
)
area_sqmi = om_handle_wshed_area(wshed_info)
nested_props[['local_area_sqmi']] = list(
name='local_area_sqmi',
object_class = 'Equation',
value=area_sqmi
)
# Get Upstream model inputs
nested_props[['read_from_children']] = list(
name='read_from_children',
object_class = 'ModelBroadcast',
broadcast_type = 'read',
broadcast_channel = 'hydroObject',
broadcast_hub = 'self',
broadcast_params = list(
list("Qtrib","Qtrib"),
list("trib_area_sqmi","trib_area_sqmi"),
list("child_wd_mgd","wd_mgd")
)
)
# simulate flows
nested_props[['Qlocal']] = list(
name='Qlocal',
object_class = 'Equation',
value=paste('local_area_sqmi * Runit')
)
nested_props[['Qin']] = list(
name='Qin',
object_class = 'Equation',
equation=paste('Qlocal + Qtrib')
)
nested_props[['Qout']] = list(
name='Qout',
object_class = 'Equation',
equation=paste('Qin * 1.0')
)
# calculate secondary properties
nested_props[['drainage_area_sqmi']] = list(
name='drainage_area_sqmi',
object_class = 'Equation',
equation=paste('local_area_sqmi + trib_area_sqmi')
)
# send to parent object
nested_props[['send_to_parent']] = list(
name='send_to_parent',
object_class = 'ModelBroadcast',
broadcast_type = 'send',
broadcast_channel = 'hydroObject',
broadcast_hub = 'parent',
broadcast_params = list(
list("Qout","Qtrib"),
list("drainage_area_sqmi","trib_area_sqmi")
)
)
return(nested_props)
}
nhd_model_network <- function (wshed_info, nhd_network, json_network) {
comid = wshed_info$comid
wshed_name = paste0('nhd_', comid)
json_network[[wshed_name]] = list(
name=wshed_name,
object_class = 'ModelObject'
)
# base attributes
json_network[[wshed_name]][['local_area_sqmi']] = list(
name='local_area_sqmi',
object_class = 'Equation',
value=paste(wshed_info$areasqkm,' * 0.386102')
)
# Get Upstream model inputs
json_network[[wshed_name]][['read_from_children']] = list(
name='read_from_children',
object_class = 'ModelBroadcast',
broadcast_type = 'read',
broadcast_channel = 'hydroObject',
broadcast_hub = 'self',
broadcast_params = list(
list("Qtrib","Qtrib"),
list("trib_area_sqmi","trib_area_sqmi")
)
)
# simulate flows
json_network[[wshed_name]][['Qlocal']] = list(
name='Qlocal',
object_class = 'Equation',
value=paste('local_area_sqmi * Runit')
)
json_network[[wshed_name]][['Qin']] = list(
name='Qin',
object_class = 'Equation',
equation=paste('Qlocal + Qtrib')
)
json_network[[wshed_name]][['Qout']] = list(
name='Qout',
object_class = 'Equation',
equation=paste('Qin * 1.0')
)
# calculate secondary properties
json_network[[wshed_name]][['drainage_area_sqmi']] = list(
name='drainage_area_sqmi',
object_class = 'Equation',
equation=paste('local_area_sqmi + trib_area_sqmi')
)
# send to parent object
json_network[[wshed_name]][['send_to_parent']] = list(
name='send_to_parent',
object_class = 'ModelBroadcast',
broadcast_type = 'send',
broadcast_channel = 'hydroObject',
broadcast_hub = 'parent',
broadcast_params = list(
list("Qout","Qtrib"),
list("drainage_area_sqmi","trib_area_sqmi")
)
)
next_ups <- nhd_next_up(comid, nhd_network)
num_tribs = nrow(next_ups)
if (num_tribs > 0) {
for (n in 1:num_tribs) {
trib_info = next_ups[n,]
json_network[[wshed_name]] = nhd_model_network(trib_info, nhd_network, json_network[[wshed_name]])
}
}
return(json_network)
} |
ef0843618f694157bffbd9a0a60d7c7bfc6020b4 | 444936c27b6a76d40c100bdfdbcd6917ec136840 | /scripts/seeWhatHappens.r | 609e428d2134f3343480e37fb4dc26d8ae67901e | [] | no_license | pickledplum/mpg | bb1c09b2393006513d6120a01284ccde059e182e | 8b0f376ba7088164bb2273dbc6083dd9659e6979 | refs/heads/master | 2016-09-10T20:10:54.655702 | 2014-03-07T23:51:23 | 2014-03-07T23:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,153 | r | seeWhatHappens.r | #' Mining indicators
#'
library(RSQLite)
library(xts)
have_GridExtra = FALSE
if( "ggplto2" %in% installed.packages()
&& "gridExtra" %in% installed.packages() ){
library(ggplot2)
require(gridExtra)
have_GridExtra = TRUE
} else{
logger.warn("No ggplot2 or gridExtra packages re installed. No funcy grid plotting...")
}
source("../dummy/getUniverse.r")
source("../dummy/getTSeries.r")
source("logger.r")
source("advanceMonths.r")
source("getCompoundedReturn.r")
# colors in the order of lower to higher bins
pallet <- c("blue","dark green","yellow","dark orange","red")
seeWhatHappens <- function( controlVar, totalR, periods, nbins, factorName ){
# browser()
n_periods <- length(periods)
valid_months <- as.character(intersect(substr(index(controlVar),1,7), substr(index(totalR),1,7)))
oldest_this_year_month <- min(valid_months)
tokens <- as.integer(strsplit(oldest_this_year_month, "-")[[1]])
oldest_yy <- tokens[1]
oldest_mm <- tokens[2]
latest_this_year_month <- max(valid_months)
tokens <- as.integer(strsplit(latest_this_year_month, "-")[[1]])
latest_yy <- tokens[1]
latest_mm <- tokens[2]
logger.debug(paste("Valid months: min,max,# - ", paste(oldest_this_year_month, latest_this_year_month, n_periods),sep=","))
# iterate from the most recent to the oldest
for( this_year_month in valid_months ){
logger.info("Processing ", this_year_month, "...")
yy <- rep(0,n_periods)
mm <- rep(0,n_periods)
factor <- as.vector(controlVar[this_year_month])
#logger.debug("factor: ", paste(factor, collapse=","))
names(factor) <- colnames(controlVar)
#logger.debug("Names(factor): ", paste(names(factor), collapse=","))
# Get rid of NAs
factor <- factor[!is.na(factor)]
if( is.empty(factor) ){
logger.warn("No factor data. Skipping...")
next
}
#logger.debug("factor on ", this_year_month, ":", paste(factor,collapse=","))
# Sort in the ascending order
sorted_index <- order(factor, decreasing=FALSE)
sorted_pbk <- factor[sorted_index]
#logger.debug("Sorted index: ", sorted_index)
# collect points to the companies data in each bin
range <- length(sorted_pbk)
if( range < nbins ){
logger.warn("Not enough data. Bin size is ", nbins, " but there is only ", range, " non-null data points. Skipping...")
next
}
percentile_notations <- vector("numeric", nbins)
h <- as.integer(range/nbins)
bin_ids <- list()
sorted_ids <- names(factor)[sorted_index]
for(i in 1:nbins){
begin <- (i-1) * h + 1
end <- begin + h - 1
bin_ids[[i]] <- sorted_ids[begin:end]
percentile_notations[i] <- as.integer(h*(i-1)/range*100.)
#logger.debug("Bin ", percentile_notations[i], " percentile: ", bin_ids[[i]])
}
# Will store the returns for the future periods
dailyR <- list()
# Extract the year and the month of this iteration
tokens <- as.integer(strsplit(this_year_month, "-")[[1]])
yy0 <- tokens[1]
mm0 <- tokens[2]
# Get the price for each company for this month
# Keep carrying the company IDs attached to the values
# so that we can filter & sort by IDs later.
if( is.empty(totalR[ paste(yy0,mm0,sep="-")] )){
logger.warn("No return data on the reference month. Skipping...")
next
}
r0 <- as.vector(totalR[ paste(yy0,mm0,sep="-")][1,sorted_index])
names(r0) <- sorted_ids
#logger.debug("Return on (", this_year_month, ")")
#print(r0)
# Gather incremental returns by months
# Example: if periods = c(1,3,6,12) then,
# dailyR[[1]] contains returns from the reference month to the next month period. 0,1,2,3
# dailyR[[2]] contains rturns from the first period to the second period. 4,5,6
# dailyR[[3]] contains 7,8,9,10,11,12
are_we_done = FALSE
previous_period <- this_year_month
for(period in seq(1,n_periods)) {
next_period <- advanceMonths(this_year_month, periods[period]-1)
logger.debug("Looking into ", next_period)
if( !is.empty((totalR[next_period])) ){
dailyR[[period]] <- totalR[paste(previous_period, next_period, sep="/")]
#logger.debug("For period from ", previous_period, " to the end of ", next_period, ": ")
#print(dailyR[[period]])
} else{
# put dummy in the slot
dummy <- t(data.frame(rep(NA, ncol(totalR))))
rownames(dummy) <- paste(next_period, "-01", sep="")
colnames(dummy) <- colnames(totalR)
dummy <- as.xts(dummy, by=as.Date(index(dummy)))
dailyR[[period]] <- dummy
# check if there is really no more data beyond that point
tokens <- as.integer(strsplit(next_period, "-")[[1]])
next_yy <- tokens[1]
next_mm <- tokens[2]
if( next_yy > latest_yy ) {
# obvious, this is done
are_we_done <- TRUE
} else if( next_yy == latest_yy ) {
if( next_mm > latest_mm ){
# sure, went beyond
are_we_done <- TRUE
}
} else {
# No, it's just missing data for this period
logger.warn("Skipping this time period in the future due to the lack of data: ", next_period)
next
}
}
#browser()
previous_period <- next_period
}
# We need 12 months in the future to do this marching, or we are done.
if( are_we_done ){
logger.warn("No more ", periods[n_periods], " months in the future. We are done.")
break
}
# Report how many stocks in each bin
num_companies_in_each_bin <- vector("numeric", nbins)
for( bin in 1:nbins ){
num_companies_in_each_bin[bin] <- length(bin_ids[[bin]])
}
logger.info("Number of stocks in bins in ascending order of percentile: ",
paste(num_companies_in_each_bin, collapse=","))
# Compute compounded return upto the period
#
compoundedR <- list()
projectedPrice <- list()
for( period in 1:n_periods ){
#ndays <- as.integer(periods[period]*(260/12))
compoundedR_this_bin <- list()
for( bin in 1:nbins ){
#logger.debug(as.integer(h*(bin-1)/range*100.), " percentile")
r <- rep(NA, length(bin_ids[[bin]]))
names(r) <- bin_ids[[bin]]
for( i in 1:length(bin_ids[[bin]]) ){
company <- bin_ids[[bin]][i]
returns <- as.vector(dailyR[[period]][,company])
#logger.debug("Visiting ", company)
##############################################
# ToDO: Revisit by Sachko, 3/7/2014
#
# Not quite sure what to do with
# "annualization". Here's the code where
# total returns are compounded upto this
# month period. Do I need to divide
# the daily return by 260, or divide the
# M-month compounded R by M/12?
#
#
##############################################
r[i] <- getCompoundedReturn( 1., returns ) #/ 12. * periods[period]
}
if( is.empty(r[!is.na(r)]) ){
logger.warn("No non-NA returns for this bin in period: ", bin, ", ", period)
}
# Compounded upon the previous time period.
if(period>1){
r <- r * compoundedR[[period-1]][[bin]]
}
compoundedR_this_bin[[bin]] = r
#logger.debug("Compounded return for bin=", bin, ": ")
#print(r)
}
compoundedR[[period]] <- compoundedR_this_bin
}
#print(compoundedR)
# average out across the companies within a bin
#
# Example: The resulting matrix has the dimensions of nbins by nperiods.
# 1mon 3mon 6mon 12mon
# 0-20 r11 r12 r13 r14
# 21-40
# 41-60
# 61-80
# 81-100
all_returns <- matrix(NA, nrow=nbins, ncol=n_periods)
colnames(all_returns) <- periods
for( bin in 1:nbins) {
for( period in 1:n_periods) {
bin_r <- compoundedR[[period]][[bin]] #/ max(periods) * periods[period]
all_returns[bin,period] <- mean(bin_r)
}
}
period_mean <- vector("numeric", length=n_periods)
rel_returns <- matrix(NA, nrow=nbins, ncol=n_periods)
colnames(rel_returns) <- periods
for ( period in 1:n_periods ) {
period_mean[period] <- mean(all_returns[,period], na.rm=TRUE)
rel_returns[,period] <- all_returns[,period] - period_mean[period]
}
#print(rel_returns)
rel_returns <- rel_returns * 100. # to percent
if(TRUE) {
y <- rel_returns[!is.na(rel_returns)]
if( !is.empty(y) ) {
percentiles <- seq(0, 100-as.integer(100/nbins), as.integer(100/nbins))
y_min <- -5 #floor(min(y))
y_max <- 5 #max(ceiling(max(y)), y_min)
logger.debug("y min,max: ", y_min, ",", y_max)
plot(x=1,y=1, type="n", xlim=c(0,max(periods)), ylim=c(y_min,y_max), axes=FALSE,
xlab="Months in Future", ylab="Relative Return (%)")
axis(side=1, at=c(0,periods) )
axis(side=2, at=seq(y_min,y_max,1) )
abline(0,0, col="black")
for(i in 1:nbins){
points(c(0,periods), c(0,rel_returns[i,]), col=pallet[i], pch="*")
lines(c(0,periods), c(0,rel_returns[i,]), col=pallet[i], pch="*")
}
legend("bottom", paste(percentiles, "%"), col=pallet[1:nbins],
lty=1:nbins, pch="*", ncol=5)
title(main=paste(this_year_month, " - ", factorName, " vs Returns"))
# Delay for view
Sys.sleep(1)
} else{
logger.warn("All Y values are NAs, skipping plotting for ", this_year_month, " ...")
}
}
}
} |
ef9eb2e05b095544f9eeb483315efa32aa0c80ba | 8e96ff3f9d4f3d274fa4908471456e277ee80618 | /data/ramldb/Step3_add_sst_data.R | e15b104f73a459f50e22fb4e127bd954536c5a4f | [] | no_license | cfree14/sst_recruitment | 1e96d7e9718a93949f17ed0773d0e2702f4dcc4d | 134104d76152f1869209e0eb883036274da156a1 | refs/heads/master | 2020-04-14T08:45:49.817785 | 2019-01-12T00:15:25 | 2019-01-12T00:15:25 | 163,742,595 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,860 | r | Step3_add_sst_data.R |
# Use PT data
# Use data based on STOCKID and not ASSESSID
# Setup
################################################################################
# Clear workspace
rm(list = ls())
# Packages
library(plyr)
library(dplyr)
library(reshape2)
# Directories
datadir <- "data/ramldb/data"
plotdir <- "data/ramldb/figures"
sstdir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/data/sst/data/averages"
bndrydir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/data/stock_boundaries/data"
# Read potential RAM data
load(file.path(datadir, "ramldb_stock_recruit_data_trimmed.Rdata"))
data_orig <- data
stocks_orig <- stocks
# Read SST data
cobe <- read.csv(paste(sstdir, "ramldb_sst_yearly_cobe.csv", sep="/"), as.is=T)
# Read stock boundary key
key <- read.csv(file.path(bndrydir, "ramldb_v3.8_stock_boundary_table_v2.csv"), as.is=T)
# Read centroids key
centroids <- read.csv(file.path(bndrydir, "ramldb_v3.8_stock_boundary_centroids_areas_fixed.csv"), as.is=T)
# Build assessid, stockid, lme key
key1 <- key %>%
select(assessid, stockid) %>%
left_join(select(centroids, assessid, lme_name), by="assessid")
# Build data
################################################################################
# Format SST data
sst <- cobe %>%
left_join(select(key1, assessid, stockid, lme_name), by="assessid") %>%
select(stockid, lme_name, year, sst_c)
# Add SST data
data1 <- data_orig %>%
# Add SST data
left_join(sst, by=c("stockid", "year")) %>%
select(stockid, lme_name, year, ssb, r, sst_c) %>%
filter(!is.na(sst_c)) %>%
# Add order/family data
left_join(select(stocks_orig, stockid, order, family), by="stockid") %>%
# Arrange columns
select(stockid, lme_name, order, family, everything())
# Check sample size
check <- data1 %>%
group_by(stockid) %>%
summarize(nyr=n()) %>%
filter(nyr>=20)
# Final data
stocks <- filter(stocks_orig, stockid %in% check$stockid)
# Standardize data
data <- data1 %>%
# Reduce to final stocks
filter(stockid %in% check$stockid) %>%
# Standardize SSB, R, and SST
group_by(stockid) %>%
mutate(r_sd=r/sd(r),
ssb_sd=ssb/sd(ssb),
# r_sd=exp(log(r) - mean(log(r))),
# ssb_sd=exp(log(ssb) - mean(log(ssb))),
sst_sd=sst_c-mean(sst_c))
# Confirm standardization
aggregate(data$r_sd, by=list(data$stockid), sd) # SD=1
aggregate(data$ssb_sd, by=list(data$stockid), sd) # SD=1
aggregate(data$sst_sd, by=list(data$stockid), mean) # Mean=0
# Plot
par(mfrow=c(1,2))
plot(r ~ ssb, data, subset=stockid=="CODGB")
plot(r_sd ~ ssb_sd, data, subset=stockid=="CODGB")
# Complete
freeR::complete(data)
freeR::complete(stocks)
# Build data
################################################################################
# Export
save(data, stocks, file=file.path(datadir, "ramldb_stock_recruit_data_trimmed_sst.Rdata"))
|
6867ca3046c30b41fac458e401d42ebef07d18ac | e4343700d1efe9c5f573be6f26204d4cd5ca17c8 | /inst/applications/boardpack2/server.R | b4c3d7017d1e32c51409804d4c69e9460e81a1e1 | [] | no_license | jchenpku/Rtraining | bfe2bb6382dda9402e9530eeb1741a5ff046ca26 | 8e116878aa6ec7ff6ed232a17d87655bb83befa2 | refs/heads/master | 2021-06-14T09:32:27.721070 | 2017-04-20T10:32:53 | 2017-04-20T10:32:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,792 | r | server.R | ############# Prep ################
library(shiny)
library(knitr)
if(!require(devtools)) install.packages("devtools",repos = "https://cran.rstudio.com")
if(!require(Rtraining)) devtools::install_github("stephlocke/Rtraining")
library("RSQLite")
db <- dbConnect(SQLite(), dbname="Boardpack.sqlite")
if(!dbExistsTable(db,"Comments")){
emptyComments<-data.frame(Field=NA_character_,
Comment=NA_character_,
Date=NA_character_)
dbWriteTable(db,name = "Comments" ,value=emptyComments)
dbSendQuery(db,"DELETE FROM Comments")
}
shinyServer(function(input, output, session) {
############# Preview Functionality ################
boardpreview<- reactive( readr::read_file(
knit2html(input = "MyBoardPack.Rmd",output = "MyBoardPack.html"))
)
output$knitDoc <- renderUI({
HTML(boardpreview() )
})
############# Generate Functionality ################
output$report = downloadHandler(
filename <- paste0("MyBoardPack",format(Sys.Date(),"%Y%m%d"),".pdf"),
content <- function(file) {
out <- rmarkdown::render("MyBoardPack.Rmd", "pdf_document")
file.rename(out, file) # move pdf to file for downloading
},
contentType = 'application/pdf'
)
############# DB Functionality ################
comments<- reactive({
time<-Sys.time()
data <- data.frame(
Field= c("TableD1","ChartD1","TableD2","ChartD2"),
Comment=c(input$TableD1,input$ChartD1,
input$TableD2,input$ChartD2),
Date=format(time))
data
})
observeEvent(input$save,{
dbWriteTable(db,name = "Comments" ,value=comments(), append=TRUE)
})
output$previouscomments<-renderTable({
input$save
dbReadTable(db,"Comments")
})
}) |
5c938d65b142e248a7a3492ec6e109535e9516ee | a52bfb54bda7604534487c2744642da30f634b3b | /FPU68882/SRCS/pml-2.03/doc/trlr.r | e692c2e3e9a8668d7c6d1793b2449ab155c09bdf | [] | no_license | Kochise/atari-docs | 292ac844dd951065d7d68d91725d013455b8d3c3 | 51bbffce0b9930c1d349219b9a4c1d60e53f813f | refs/heads/master | 2021-01-22T19:49:13.763992 | 2017-03-29T18:18:13 | 2017-03-29T18:18:13 | 85,242,308 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 376 | r | trlr.r | .bp
.ce
REFERENCES
.sp 3
.nf
P. A. Fox, A. D. Hall, and N. L. Schryer,
The PORT Mathematical Subroutine Library,
ACM Transactions on Mathematical Software,
Vol 4, No. 2, June 1978, pp 104-126.
.sp 3
Brian Ford,
Parameterization of the Environment for Transportable Numerical Software,
ACM Transactions on Mathematical Software,
Vol 4, No. 2, June 1978, pp 100-103.
.bp
.xp
|
1eeb02d91740148d1648aa7e93c55430a5eddb5a | 72352674e0c2b9a3e1e13e324e484da7e6e23c07 | /tests/testthat/test-thumbnail.R | 23cf7a637c9c7b3e8497a6ecacc687cc659f785b | [
"Apache-2.0"
] | permissive | karigar/rgee | 85ad501dc40362cc4d6494eac0fdfd0f94d5ae87 | bea78daa7195e86d2cb6e90dabb5062d4e6e1d76 | refs/heads/master | 2022-11-12T06:34:17.280941 | 2020-07-05T04:25:55 | 2020-07-05T09:05:34 | 277,981,630 | 0 | 0 | null | 2020-07-08T03:36:34 | 2020-07-08T03:36:33 | null | UTF-8 | R | false | false | 4,278 | r | test-thumbnail.R | context("rgee: ee_as_thumbnail test")
# Pre-checking ------------------------------------------------------
# Google credentials were loaded in the system?
skip_if_no_credentials <- function(user) {
ee_path <- path.expand(sprintf("~/.config/earthengine/%s", user))
credentials <- list.files(
path = ee_path,
pattern = "@gmail.com|credentials|GCS_AUTH_FILE.json"
)
if (length(credentials) != 3) {
skip("All google credentials were not found")
}
}
# Neccesary Python packages were loaded?
skip_if_no_pypkg <- function() {
have_ee <- reticulate::py_module_available("ee")
have_numpy <- reticulate::py_module_available("numpy")
if (isFALSE(have_ee)) {
skip("ee not available for testing")
}
if (isFALSE(have_numpy)) {
skip("numpy not available for testing")
}
}
# Init Earth Engine just if it is necessary
init_rgee <- function() {
tryCatch(
expr = ee$Image()$getInfo(),
error = function(e) {
ee_Initialize(
email = 'data.colec.fbf@gmail.com',
drive = TRUE,
gcs = TRUE
)
}
)
}
user <- "data.colec.fbf"
skip_if_no_credentials(user)
skip_if_no_pypkg()
init_rgee()
# -------------------------------------------------------------------------
library(raster)
library(rgee)
library(sf)
### 1. Data
dem_palette <- c(
"#008435", "#1CAC17", "#48D00C", "#B3E34B", "#F4E467",
"#F4C84E", "#D59F3C", "#A36D2D", "#C6A889", "#FFFFFF"
)
nc <- st_read(system.file("shp/arequipa.shp", package = "rgee"))
sheds <- ee$FeatureCollection("USGS/WBD/2017/HUC06")$
filterBounds(ee$Geometry$Rectangle(-127.18, 19.39, -62.75, 51.29))$
map(function(feature) {
num <- ee$Number$parse(feature$get("areasqkm"))
return(feature$set("areasqkm", num))
})
image <- ee$Image("CGIAR/SRTM90_V4")
region <- nc$geometry[[1]] %>%
st_bbox() %>%
st_as_sfc() %>%
st_set_crs(4326) %>%
sf_as_ee() %>%
ee$FeatureCollection$geometry()
# just one band -----------------------------------------------------------
test_that("ee_as_thumbnail full parameters", {
arequipa_dem <- ee_as_thumbnail(image = image,
region = region,
raster = TRUE,
vizparams = list(min = 0, max = 5000))
arequipa_dem <- arequipa_dem * 5000
expect_equal(max(getValues(arequipa_dem)), 5000)
})
test_that("ee_as_thumbnail min-max", {
# JPEG images
mysheds <- ee$Feature(sheds$first())$geometry()
shed_dem <- ee_as_thumbnail(
image = image,
region = mysheds$bounds(),
vizparams = list(
min = 0,
max = 500
)
)
expect_equal(max(shed_dem[[1]]), 0.4470588, tolerance = .002)
})
# RGB band -----------------------------------------------------------
test_that("ee_as_thumbnail palette, min-max", {
# PNG images
arequipa_dem <- ee_as_thumbnail(
image = image,
region = region,
vizparams = list(palette = dem_palette, min = 0, max = 5000)
)
arequipa_dem <- arequipa_dem * 5000
expect_equal(max(arequipa_dem[[1]]), 5000, tolerance = 1)
})
# RGB band -----------------------------------------------------------
test_that("ee_as_thumbnail region", {
# PNG images
image_clip <- image$clip(region)
arequipa_dem <- ee_as_thumbnail(
image = image_clip,
region = region,
raster = TRUE,
vizparams = list(
palette = dem_palette,
min = 0,
max = 5000
)
)
arequipa_dem <- arequipa_dem * 5000
expect_equal(mean(arequipa_dem[1:10,1:10,3]), 1638.17, tolerance = 1)
})
# error -----------------------------------------------------------
test_that("ee_as_thumbnail error 01", {
# PNG images
expect_error(ee_as_thumbnail("ee$Image", region))
})
test_that("ee_as_thumbnail error 02", {
# PNG images
expect_error(ee_as_thumbnail(image, "ee$Geometry"))
})
test_that("ee_as_thumbnail error 03", {
# PNG images
expect_error(ee_as_thumbnail(image, region$centroid()$buffer(100)))
})
# large image -----------------------------------------------------------
test_that("ee_as_thumbnail large image", {
# PNG images
region <- ee$Geometry$Point(-72.403,-16.08)$buffer(100)$bounds()
arequipa_dem <- ee_as_thumbnail(
image = image,
region = region,
dimensions = 3000L
)
expect_s3_class(arequipa_dem, "stars")
})
|
a8d35e7b3c61cc035b7eaf1b76b0d3b1ab8d4b72 | 760ec696df1c77a8aa9b82608d6d4515f21fd0e3 | /Testing to see if historical change in earnings affects future stock prices (Public).R | b5e3edf94b845b11233c560b1b2785263dc38b16 | [] | no_license | gabistreche/Multi_Factor | 8e91b6df52c9a791a2adc31574eed1498dca0a87 | e385babdae53868b0ad582996ef6cdc03feb7f57 | refs/heads/master | 2023-03-18T22:13:05.932184 | 2021-03-08T05:40:54 | 2021-03-08T05:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,035 | r | Testing to see if historical change in earnings affects future stock prices (Public).R | #trying to see if change in earnings trend is predictive of future stock
#prices and
library(PerformanceAnalytics)
library(quantmod)
library(tidyverse)
library(Quandl)
library(ggthemes)
library(gridExtra)
library(gtable)
library(grid)
library(TTR)
#create custome ggplot theme
#from http://joeystanley.com/blog/custom-themes-in-ggplot2
theme_joey <- function () {
theme_bw(base_size=12, base_family="Avenir") %+replace%
theme(
panel.background = element_blank(),
plot.background = element_rect(fill="gray96", colour=NA),
legend.background = element_rect(fill="transparent", colour=NA),
legend.key = element_rect(fill="transparent", colour=NA)
)
}
#choose start and end dates
start <- as.Date("1900-01-01")
end <- Sys.Date()
#set api key
Quandl.api_key("apikeyhere")
Yale <- Quandl("YALE/SPCOMP", api_key="apikeyhere", type = "xts")
#create 12 month SMA of Earnings as a leading indicator
SP_Earnings <- na.omit(Yale$Earnings)
SP_Earnings$SMA12 <- SMA(SP_Earnings, n = 12)
SP_Earnings$Year.on.Year.Change <- ROC(SP_Earnings[,1], n = 12)
colnames(SP_Earnings) <- c("S&P 500 Earnings",
"12-Month Moving Average Earnings",
"Year on Year Change")
#calculate the price return, dividend yield, monthly equivalent, and total return
SP_Earnings$Change_in_trend <- Return.calculate(SP_Earnings$`Year on Year Change`)
SP500 <- merge(Yale$`S&P Composite`, Yale$Dividend)
SP500$Price_Return <- Return.calculate(SP500$S.P.Composite)
SP500$Dividend_Yield <- SP500$Dividend/SP500$S.P.Composite
SP500$Dividend_Yield_Monthly <- SP500$Dividend_Yield/12
SP500$Total_Return <- SP500$Price_Return + SP500$Dividend_Yield_Monthly
SP500 <- na.omit(SP500)
#check to make sure it looks correct
head(SP500)
table.AnnualizedReturns(SP500[,c(3,5,6)])
#create a date time index that will work with other functions in
#performance analytics package and match length of current
#data from Yale Shiller. I chose 28th of the month since it
#will be at the end of the month and don't have to worry about February
#dates <- seq(as.Date("1871-02-28"),length=1778,by="months")
#SP500_xts <- cbind(SP500$Total_Return, dates)
#SP500_data <- as.xts(SP500_xts$Total_Return, order.by = dates)
#colnames(SP500_data) <- "SP500 TR"
#SP500_data$rolling_12month_returns <- rollapply(SP500_data[,1],
#FUN = Return.annualized,
#width = 12,
#scale = 12)
#SP500_data$rolling_24month_returns <- rollapply(SP500_data[,1],
#FUN = Return.annualized,
#width = 24,
#scale = 12)
#SP500_data$rolling_36month_returns <- rollapply(SP500_data[,1],
#FUN = Return.annualized,
#width = 36,
#scale = 12)
#check to make sure the data looks correct
#autoplot(SP500_data[,c(2,3,4)], facets = FALSE)
#lag the total return SP500 performance to correlate with the current month
#earnings and the next 12, 24, 36 month returns
#SP500_data$rolling_12month_returns_lag <- lag.xts(SP500_data$rolling_12month_returns, k = -12)
#SP500_data$rolling_24month_returns_lag <- lag.xts(SP500_data$rolling_24month_returns, k = -24)
#SP500_data$rolling_36month_returns_lag <- lag.xts(SP500_data$rolling_36month_returns, k = -36)
head(SP500)
SP500$rolling_12month_returns <- rollapply(SP500$Total_Return,
FUN = Return.annualized,
width = 12,
scale = 12)
SP500$rolling_24month_returns <- rollapply(SP500$Total_Return,
FUN = Return.annualized,
width = 24,
scale = 12)
SP500$rolling_36month_returns <- rollapply(SP500$Total_Return,
FUN = Return.annualized,
width = 36,
scale = 12)
#lag the total return SP500 performance to correlate with the current month
#earnings and the next 12, 24, 36 month returns
SP500$rolling_12month_returns_lag <- lag.xts(SP500$rolling_12month_returns, k = -12)
SP500$rolling_24month_returns_lag <- lag.xts(SP500$rolling_24month_returns, k = -24)
SP500$rolling_36month_returns_lag <- lag.xts(SP500$rolling_36month_returns, k = -36)
tail(SP500)
Test_data <- merge(SP500, SP_Earnings)
Test_data_na <- na.omit(Test_data)
tail(Test_data_na, 5)
#the chart of correlation seems to indicate that change in earnings on a calendar basis
#is not correlated with future returns
chart.Correlation(Test_data_na[,c(10,11,12,15,16)])
#create a trading strategy to see if getting in and out depending on earnings
#can work
#1st signal and strategy is if the change in trend is negative get out
signal <- ifelse(Test_data_na$Change_in_trend > 0, 1, 0)
#add the signal
Test_data_na$signal <- signal
#calculate the returns based on the price data times the signals. must add to signal three months to buy on the day
#after the signal
Test_data_na$signal_lagged <- lag(signal,3)
Test_data_na$portfolio.return_no_costs_1 <- (Test_data_na$Total_Return*Test_data_na$signal_lagged)
tail(Test_data_na, 30)
ncol(Test_data_na)
table.AnnualizedReturns(Test_data_na[,c(6,19)])
#second signal is to see if change in trend is greater than -.05 otherwise get out
signal2 <- ifelse(Test_data_na$Change_in_trend > -.05, 1, 0)
#add the signal
Test_data_na$signal2 <- signal2
#calculate the returns based on the price data times the signal. must add to signal three months to buy on the day
#after the signal
Test_data_na$signal_lagged2 <- lag(signal2,3)
Test_data_na$portfolio.return_no_costs_2 <- (Test_data_na$Total_Return*Test_data_na$signal_lagged2)
tail(Test_data_na, 30)
table.AnnualizedReturns(Test_data_na[,c(6,22)])
#Third signal to see if simple year on year change is greater than -.05 otherwise get out
signal3 <- ifelse(Test_data_na$Year.on.Year.Change > -.05, 1, 0)
#add the signal
Test_data_na$signal3 <- signal3
#calculate the returns based on the price data times the signal. must add to signal three months to buy on the day
#after the signal
Test_data_na$signal_lagged3 <- lag(signal3,3)
Test_data_na$portfolio.return_no_costs_3 <- (Test_data_na$Total_Return*Test_data_na$signal_lagged3)
tail(Test_data_na, 30)
table.AnnualizedReturns(Test_data_na[,c(6,25)])
chart.CumReturns(Test_data_na[,c(6,19,22,25)], legend.loc = "topleft")
#based on a few different signals, even ignoring transaction costs, one
#cannot use earnings, change in earnings, or change in earnings trend
#to time the market and get superior risk adjusted performance
|
d54befc0de8be0f7cc64b85bbfbe5f18fb0ffd05 | 11c18a4332b419d124e331ef1ba0de2191cdb794 | /man/gg_colors.Rd | 78b5d3354d8c1d40ee8a425db7f11db58ec62423 | [] | no_license | MarkKurzeja/rankr | fca771cd35a073e676ba592cd36f9af4017d44fd | 1f0b2fe36ea33d73fab08c25f443ab611ea33112 | refs/heads/master | 2020-12-31T04:42:31.731017 | 2016-04-28T20:40:37 | 2016-04-28T20:40:37 | 57,325,361 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 434 | rd | gg_colors.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rankr_main.R
\name{gg_colors}
\alias{gg_colors}
\title{ggplot2 Color Defaults}
\usage{
gg_colors(n)
}
\arguments{
\item{n}{number of colors}
}
\description{
The colors from the great ggplot2 package by Hadley Wickhim which are
evenly spaced hues aroudn the hcl colour wheel
}
\examples{
scales::show_col(gg_colors(4))
}
\keyword{colors}
\keyword{ggplot2}
|
8e9a8ab0c13bddfdf618cbf02a7eb0bbaddc39d9 | 17bd236260351ebc9cf1bff367c0e63dfb3961d0 | /man/Plot.CA.Rd | e05aeba42e2059b1b73f1415287ef55afe35574b | [] | no_license | LeoDonatoNunes/MVar.pt | 7b68119d79c052ba57025b22e51026cce68ae657 | 1a95f718564b438bfd65f43b06f977bdde61d775 | refs/heads/master | 2021-01-17T13:04:15.307181 | 2015-10-06T22:14:02 | 2015-10-06T22:14:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,319 | rd | Plot.CA.Rd | \name{Plot.CA}
\alias{Plot.CA}
\title{Graficos da analise de correspondencia (CA) simples e multipla}
\description{Graficos da analise de correspondencia (CA) simples e multipla.}
\usage{Plot.CA(AC, Titles = matrix(NA,1,3), Color = "s", LinLab = NULL)}
\arguments{
\item{AC}{Dados da funcao CA.}
\item{Titles}{Titulos para os graficos.}
\item{Color}{"s" para graficos coloridos - default,\cr
"n" para graficos em preto e branco.}
\item{LinLab}{Vetor com o rotulo para as linhas, para dados de frequencia.\cr
Se nao informado retorna o padrao dos dados.}
}
\value{Retorna varios graficos.}
\author{
Paulo Cesar Ossani
Marcelo Angelo Cirillo
}
\seealso{\code{\link{CA}}}
\examples{
data(DataFreq) # conjunto de dados de frequencia
Dat <- DataFreq[,2:ncol(DataFreq)]
rownames(Dat) <- as.character(t(DataFreq[1:nrow(DataFreq),1]))
Resp <- CA(Dat, "f") # realiza CA
Titles = c("Observacoes", "Variaveis", "Observacoes/Variaveis")
Plot.CA(Resp, Titles, Color = "s")
data(DataQuali) # Conjunto de dados qualitativos
Dat <- DataQuali[,2:ncol(DataQuali)]
Resp <- CA(Dat, "c", "b") # realiza CA
Titles = c("","Graficos das Variaveis")
Plot.CA(Resp, Titles, Color = "s")
}
\keyword{Analise de correspondencia}
\keyword{CA}
|
7e507b990844b86a224a22c3a072ef8ec27bc3ed | c1d359cdf0281885744cdcd85d41a21e91218b43 | /man/sdf_read_table.Rd | e09819998016df15448f43a11f4c6653d0ce8c37 | [
"MIT"
] | permissive | zwdzwd/sesame | 20b2d29578661487db53432c8991d3c4478aa2c1 | 62fe6ef99a02e7f94b121fb601c3f368b8a4c1a8 | refs/heads/master | 2023-08-08T01:45:02.112492 | 2023-07-26T13:23:03 | 2023-07-26T13:23:03 | 122,086,019 | 37 | 26 | MIT | 2023-01-05T16:02:38 | 2018-02-19T16:00:34 | R | UTF-8 | R | false | true | 700 | rd | sdf_read_table.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SigDFMethods.R
\name{sdf_read_table}
\alias{sdf_read_table}
\title{read a table file to SigDF}
\usage{
sdf_read_table(fname, platform = NULL, verbose = FALSE, ...)
}
\arguments{
\item{fname}{file name}
\item{platform}{array platform (will infer if not given)}
\item{verbose}{print more information}
\item{...}{additional argument to read.table}
}
\value{
read table file to SigDF
}
\description{
read a table file to SigDF
}
\examples{
sesameDataCache() # if not done yet
sdf <- sesameDataGet('EPIC.1.SigDF')
fname <- sprintf("\%s/sigdf.txt", tempdir())
sdf_write_table(sdf, file=fname)
sdf2 <- sdf_read_table(fname)
}
|
1a42b19b350ad802331fe27550c4dc8374fbf751 | 7f27db9c3a8e1eeda456dc64f11338466c6a2a98 | /man/set_methods.Rd | 37503f7fbbd581d0ec5b4ef7a0ce417aa6ac5da0 | [
"MIT"
] | permissive | ropensci/EML | 4c228654f2fbcc5846121255dbb3dc19ba1c61df | b7871cca2b996a33aa1f534e8446f0730e706d4d | refs/heads/master | 2023-05-24T01:50:33.364460 | 2022-06-06T22:10:05 | 2022-06-06T22:10:05 | 10,894,022 | 75 | 43 | NOASSERTION | 2022-06-06T22:10:07 | 2013-06-23T23:20:03 | R | UTF-8 | R | false | true | 1,338 | rd | set_methods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_methods.R
\name{set_methods}
\alias{set_methods}
\title{set_methods}
\usage{
set_methods(
methods_file,
instrumentation = character(),
software = NULL,
sampling_file = NULL,
sampling_coverage = NULL,
sampling_citation = NULL,
qualityControl_file = NULL
)
}
\arguments{
\item{methods_file}{Path to a file (markdown or .docx) containing a description of the methods used}
\item{instrumentation}{optional, text describing instrumentation used in methods}
\item{software}{optional, an EML software node describing software used in methods}
\item{sampling_file}{optional, Path to a file (.md or .docx) describing sampling method}
\item{sampling_coverage}{optional, coverage node for methods, e.g. set_coverage()}
\item{sampling_citation}{optional, a citation element describing the sampling protocol}
\item{qualityControl_file}{optional, path to a file (.md or .docx) describing quality control methods}
}
\value{
A methods object
}
\description{
set_methods
}
\examples{
\donttest{
f <- system.file("examples/hf205-methods.md", package = "EML")
set_methods(methods_file = f)
## Can also import from methods written in a .docx MS Word file.
f <- system.file("examples/hf205-methods.docx", package = "EML")
set_methods(methods_file = f)
}
}
|
21d5d342a987c56714ed355953ff2156d05600d0 | 323611254fe6aac77f4c519b0c9c757ddb66fb7d | /univarb.R | 96315694428a0b30b57bd5986b8bb05ff125a177 | [] | no_license | MattHealey/Sudi | 976f25e491eafed459e3252391900ec867b6018b | cd280fd3c8060aee150c7d13cbdaf9b6e953e246 | refs/heads/master | 2021-01-16T21:37:06.830794 | 2016-07-10T12:02:17 | 2016-07-10T12:02:17 | 62,262,532 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,254 | r | univarb.R | options(scipen = 999)
#source("1 - clean.R")
library(multcomp)
library(effects)
library(lubridate)
library(margins)
#row.has.na <- apply(usudi, 1, function(x){any(is.na(x))}); sum(row.has.na);magesudi <- usudi[!row.has.na,]; rm(row.has.na)
usudi$bwr[usudi$bwr==0] <- NA
contrasts(usudi$yod) <- contr.treatment(levels(usudi$yod), base=which(levels(usudi$yod) == "2014"))
contrasts(usudi$dhb) <- contr.treatment(levels(usudi$dhb), base=which(levels(usudi$dhb) == "Southern"))
contrasts(usudi$eth) <- contr.treatment(levels(usudi$eth), base=which(levels(usudi$eth) == "European_or_other"))
contrasts(usudi$sex) <- contr.treatment(levels(usudi$sex), base=which(levels(usudi$sex) == "F"))
contrasts(usudi$dep) <- contr.treatment(levels(usudi$dep), base=which(levels(usudi$dep) == "01_08"))
## Maternal age
## create factor for mage
age <- function(dob, age.day, units = "years", floor = TRUE) {
calc.age = interval(dob, age.day) / duration(num = 1, units = units)
if (floor) return(as.integer(floor(calc.age)))
return(calc.age)
}
usudi$mage <- age(dob = usudi$mdob, age.day = usudi$dob)
#magesudi$mage <- age(dob = magesudi$mdob, age.day = magesudi$dob)
addmargins(table(usudi$mage, usudi$sudi, exclude = NULL))
# 0 1 <NA> Sum
#11 1 0 0 1
#12 5 0 0 5
#13 51 1 0 52
#14 346 1 0 347
#15 1469 4 0 1473
#16 4727 10 0 4737
#17 10123 16 0 10139
#18 15782 28 0 15810
#19 21320 50 0 21370
#20 24198 21 0 24219
#21 25881 48 0 25929
#22 28255 48 0 28303
#23 30101 42 0 30143
#24 31627 34 0 31661
#25 33389 34 0 33423
#26 35970 27 0 35997
#27 38673 28 0 38701
#28 42016 26 0 42042
#29 44883 13 0 44896
#30 46152 16 0 46168
#31 48188 19 0 48207
#32 47798 11 0 47809
#33 45252 17 0 45269
#34 42327 20 0 42347
#35 38152 16 0 38168
#36 32664 16 0 32680
#37 27024 8 0 27032
#38 21543 5 0 21548
#39 16741 8 0 16749
#40 11980 4 0 11984
#41 7994 0 0 7994
#42 5029 0 0 5029
#43 2885 2 0 2887
#44 1544 1 0 1545
#45 781 1 0 782
#46 360 0 0 360
#47 157 0 0 157
#48 86 0 0 86
#49 30 0 0 30
#50 18 0 0 18
#51 17 0 0 17
#52 5 0 0 5
#53 4 0 0 4
#54 1 0 0 1
#55 3 0 0 3
#56 2 0 0 2
#57 1 0 0 1
#64 1 0 0 1
#65 2 0 0 2
#70 1 0 0 1
#<NA> 8 158 0 166
#Sum 785567 733 0 786300
age.cat <- function(x, lower = 0, upper, by = 5,
sep = "-", above.char = "+") {
labs <- c(paste(seq(lower, upper - by, by = by),
seq(lower + by - 1, upper - 1, by = by),
sep = sep),
paste(upper, above.char, sep = ""))
cut(floor(x), breaks = c(seq(lower, upper, by = by), Inf),
right = FALSE, labels = labs)
}
addmargins(table(age.cat(usudi$mage, lower = 10,upper = 80), usudi$sudi, exclude = NULL))
# 0 1 <NA> Sum
#10-14 403 2 0 405
#15-19 53421 108 0 53529
#20-24 140062 193 0 140255
#25-29 194931 128 0 195059
#30-34 229717 83 0 229800
#35-39 136124 53 0 136177
#40-44 29432 7 0 29439
#45-49 1414 1 0 1415
#50-54 45 0 0 45
#55-59 6 0 0 6
#60-64 1 0 0 1
#65-69 2 0 0 2
#70-74 1 0 0 1
#75-79 0 0 0 0
#80+ 0 0 0 0
#<NA> 8 158 0 166
#Sum 785567 733 0 786300
usudi$magecat <- age.cat(usudi$mage, lower = 10,upper = 80)
levels(usudi$magecat)
#[1] "10-14" "15-19" "20-24" "25-29" "30-34" "35-39" "40-44" "45-49" "50-54" "55-59" "60-64" "65-69" "70-74" "75-79" "80+"
levels(usudi$magecat) <- list("under20" = c("10-14","15-19"),
"20-24" = "20-24",
"25-29" = "25-29",
"30-34" = "30-34",
"35-39" = "35-39",
"over40" = c("40-44","45-49","50-54","55-59","60-64","65-69","70-74","75-79","80+"))
sudi <- droplevels(usudi)
levels(usudi$magecat)
addmargins(table(usudi$magecat, usudi$sudi, exclude = NULL))
# 0 1 <NA> Sum
#>20 53824 110 0 53934
#20-24 140062 193 0 140255
#25-29 194931 128 0 195059
#30-34 229717 83 0 229800
#35-39 136124 53 0 136177
#<40 30901 8 0 30909
#<NA> 8 158 0 166
#Sum 785567 733 0 786300
contrasts(usudi$magecat) <- contr.treatment(levels(usudi$magecat), base=which(levels(usudi$magecat) == "over40"))
rm(age,age.cat)
s1 <- glm(sudi ~ yod, data = usudi, family = binomial(link = "log"))
s2 <- glm(sudi ~ bw, data = usudi, family = binomial(link = "log"))
s3 <- glm(sudi ~ eth, data = usudi, family = binomial(link = "log"))
s4 <- glm(sudi ~ sex, data = usudi, family = binomial(link = "log"))
s5 <- glm(sudi ~ dhb, data = usudi, family = binomial(link = "log"))
s6 <- glm(sudi ~ dep, data = usudi, family = binomial(link = "log"))
s7 <- glm(sudi ~ magecat, data = usudi, family = binomial(link = "log"))
s8 <- glm(sudi ~ bwr, data = usudi, family = binomial(link = "log"))
s9 <- glm(sudi ~ mage, data = usudi, family = binomial(link = "log"))
library(doBy)
orderBy(~ AIC, AIC(s1,s2,s3,s4,s5,s6,s7))
###
## Year of Death
#
addmargins(table(usudi$yod, usudi$sudi, exclude = NULL))
summary(s1 <- glm(sudi ~ yod, data = usudi, family = binomial(link = "log")))
s1b <- glm(sudi ~ 0 + yod, data = usudi, family = binomial(link = "log"))
anova(s1, test = "Chisq")
#Analysis of Deviance Table
#Model: binomial, link: log
#Response: sudi
#Terms added sequentially (first to last)
#Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#NULL 786299 11695
#yod 12 20.823 786287 11674 0.05304 .
year.ht <- glht(s1, linfct = mcp(yod = "Tukey"))
summary(year.ht)
plot(year.ht)
plot(allEffects(s1))
plot(margins(s1b, type = "link")[[1]])
plot(margins(s1, type = "link")[[1]])
m <- s1
se <- sqrt(diag(vcov(m)))
# table of estimates with 95% CI
(tab <- cbind(Est = coef(m), LL = coef(m) - 1.96 * se, UL = coef(m) + 1.96 *se))
exp(tab)
#Est LL UL
#(Intercept) 0.0006884563 0.0005050495 0.0009384666
#yod2002 1.4519065953 0.9648031975 2.1849355049
#yod2003 1.6042566267 1.0782473193 2.3868729171
#yod2004 1.5407978031 1.0355920731 2.2924643127
#yod2005 1.2651144223 0.8364064852 1.9135606069
#yod2006 1.7167325587 1.1654455220 2.5287931717
#yod2007 1.4548079293 0.9813302382 2.1567317799
#yod2008 1.4497370441 0.9779094532 2.1492148277
#yod2009 1.4499473158 0.9757329670 2.1546337881
#yod2010 1.3965287039 0.9386195084 2.0778306900
#yod2011 1.2659600051 0.8412309169 1.9051305681
#yod2012 0.9392033882 0.6060188657 1.4555702047
#yod2013 1.0737210826 0.6998363295 1.6473522659
###
## Birth Weight
#
addmargins(table(usudi$bw, usudi$sudi, exclude = NULL))
foo <- usudi[usudi$bw != "Unknown",]; foo <- droplevels(foo)
contrasts(foo$bw) <- contr.treatment(levels(foo$bw), base=which(levels(foo$bw) == "over4500"))
summary(s2 <- glm(sudi ~ bw, data = foo, family = binomial(link = "log")))
s2b <- glm(sudi ~ 0 + bw, data = foo, family = binomial(link = "log"))
anova(s2, test = "Chisq")
#Analysis of Deviance Table
#Model: binomial, link: log
#Response: sudi
#Terms added sequentially (first to last)
#Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#NULL 785565 11596
#bw 8 279.75 785557 11316 < 0.00000000000000022 ***
bw.ht <- glht(s2, linfct = mcp(bw = "Tukey"))
summary(bw.ht)
plot(bw.ht)
plot(allEffects(s2))
plot(margins(s2b, type = "link")[[1]])
plot(margins(s2, type = "link")[[1]])
m <- s2
se <- sqrt(diag(vcov(m)))
# table of estimates with 95% CI
(tab <- cbind(Est = coef(m), LL = coef(m) - 1.96 * se, UL = coef(m) + 1.96 *se))
exp(tab)
# Est LL UL
#(Intercept) 0.000095225 0.0000238706 0.0003798732
#bwunder1000 16.842732491 3.4073282582 83.2551536765
#bw1000-1499 31.475104241 7.1709458784 138.1522331592
#bw1500-1999 35.643423701 8.5624717940 148.3746380380
#bw2000-2499 28.418252763 7.0007755510 115.3582319861
#bw2500-2999 17.313116111 4.3058396852 69.6133649608
#bw3000-3499 9.880542360 2.4628593436 39.6389333340
#bw3500-3999 5.525348281 1.3707497435 22.2720987346
#bw4000-4499 4.277268915 1.0360875570 17.6578024183
###
## Ethnicity
#
addmargins(table(usudi$eth, usudi$sudi, exclude = NULL))
foo <- usudi[usudi$eth != "Unknown",]; foo <- droplevels(foo)
contrasts(foo$eth) <- contr.treatment(levels(foo$eth), base=which(levels(foo$eth) == "European_or_other"))
summary(s3 <- glm(sudi ~ eth, data = foo, family = binomial(link = "log")))
s3b <- glm(sudi ~ 0 + eth, data = foo, family = binomial(link = "log"))
anova(s3, test = "Chisq")
#Analysis of Deviance Table
#Model: binomial, link: log
#Response: sudi
#Terms added sequentially (first to last)
#Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#NULL 785716 11680
#eth 2 541.64 785714 11138 < 0.00000000000000022 ***
eth.ht <- glht(s3, linfct = mcp(eth = "Tukey"))
summary(eth.ht)
plot(eth.ht)
plot(allEffects(s3))
plot(margins(s3b)[[1]])
plot(margins(s3)[[1]])
m <- s3
se <- sqrt(diag(vcov(m)))
# table of estimates with 95% CI
(tab <- cbind(Est = coef(m), LL = coef(m) - 1.96 * se, UL = coef(m) + 1.96 *se))
exp(tab)
# Est LL UL
#(Intercept) 0.0003089326 0.0002626802 0.000363329
#ethMaori 7.0387997870 5.8527777958 8.465160334
#ethPacific 3.4457815638 2.6524246786 4.476436478
###
## Sex
#
addmargins(table(usudi$sex, usudi$sudi, exclude = NULL))
summary(s4 <- glm(sudi ~ sex, data = usudi, family = binomial(link = "log")))
s4b <- glm(sudi ~ 0 + sex, data = usudi, family = binomial(link = "log"))
anova(s4, test = "Chisq")
#Analysis of Deviance Table
#Model: binomial, link: log
#Response: sudi
#Terms added sequentially (first to last)
#Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#NULL 786299 11695
#sex 1 11.028 786298 11684 0.0008975 ***
sex.ht <- glht(s4, linfct = mcp(sex = "Tukey"))
summary(sex.ht)
plot(sex.ht)
plot(allEffects(s4))
plot(margins(s4b)[[1]])
m <- s4
se <- sqrt(diag(vcov(m)))
# table of estimates with 95% CI
(tab <- cbind(Est = coef(m), LL = coef(m) - 1.96 * se, UL = coef(m) + 1.96 *se))
exp(tab)
# Est LL UL
#(Intercept) 0.0008151365 0.0007295571 0.0009107545
#sexM 1.2798616806 1.1056158159 1.4815688216
###
## Location
#
addmargins(table(usudi$dhb, usudi$sudi, exclude = NULL))
foo <- usudi[usudi$dhb != "Unknown",]; foo <- droplevels(foo)
summary(s5 <- glm(sudi ~ dhb, data = foo, family = binomial(link = "log")))
s5b <- glm(sudi ~ 0 + dhb, data = foo, family = binomial(link = "log"))
anova(s5, test = "Chisq")
#Analysis of Deviance Table
#Model: binomial, link: log
#Response: sudi
#Terms added sequentially (first to last)
#Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#NULL 785343 11693
#dhb 3 40.704 785340 11652 0.000000007555 ***
dhb.ht <- glht(s5, linfct = mcp(dhb = "Tukey"))
summary(dhb.ht)
plot(dhb.ht)
plot(allEffects(s5))
plot(margins(s5)[[1]])
plot(margins(s5b)[[1]])
m <- s5
se <- sqrt(diag(vcov(m)))
# table of estimates with 95% CI
(tab <- cbind(Est = coef(m), LL = coef(m) - 1.96 * se, UL = coef(m) + 1.96 *se))
exp(tab)
# Est LL UL
#(Intercept) 0.0009417619 0.0008408861 0.001054739
#dhbMidland 1.3098096206 1.0927743020 1.569950208
#dhbCentral 1.0529486731 0.8658995271 1.280403642
#dhbSouthern 0.6041930829 0.4778598717 0.763925374
###
## Deprivation
#
addmargins(table(usudi$dep, usudi$sudi, exclude = NULL))
foo <- usudi[usudi$dep != "Unknown",]; foo <- droplevels(foo)
summary(s6 <- glm(sudi ~ dep, data = foo, family = binomial(link = "log")))
s6b <- glm(sudi ~ 0 + dep, data = foo, family = binomial(link = "log"))
anova(s6, test = "Chisq")
#Analysis of Deviance Table
#Model: binomial, link: log
#Response: sudi
#Terms added sequentially (first to last)
#Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#NULL 785292 11679
#dep 1 206.68 785291 11472 < 0.00000000000000022 ***
dep.ht <- glht(s6, linfct = mcp(dep = "Tukey"))
summary(dep.ht)
plot(dep.ht)
plot(allEffects(s6))
m <- s6
se <- sqrt(diag(vcov(m)))
# table of estimates with 95% CI
(tab <- cbind(Est = coef(m), LL = coef(m) - 1.96 * se, UL = coef(m) + 1.96 *se))
exp(tab)
# Est LL UL
#(Intercept) 0.0006116709 0.0005509314 0.0006791068
#dep09-10 2.9457046786 2.5482970779 3.4050880992
###
## Maternal age - categorical
addmargins(table(usudi$magecat, usudi$sudi, exclude = NULL))
summary(s7 <- glm(sudi ~ magecat, data = usudi, family = binomial(link = "log")))
s7b <- glm(sudi ~ 0 + magecat, data = usudi, family = binomial(link = "log"))
anova(s7, test = "Chisq")
#Analysis of Deviance Table
#Model: binomial, link: log
#Response: sudi
#Terms added sequentially (first to last)
#Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#NULL 786133 9453.2
#magecat 5 241.35 786128 9211.8 < 0.00000000000000022 ***
magec.ht <- glht(s7, linfct = mcp(magecat = "Tukey"))
summary(magec.ht)
plot(magec.ht)
plot(allEffects(s7))
plot(margins(s7)[[1]])
plot(margins(s7b)[[1]])
m <- s7
se <- sqrt(diag(vcov(m)))
# table of estimates with 95% CI
(tab <- cbind(Est = coef(m), LL = coef(m) - 1.96 * se, UL = coef(m) + 1.96 *se))
exp(tab)
# Est LL UL
#(Intercept) 0.0002588243 0.0001294476 0.0005175067
#magecatunder20 7.8799783067 3.8448787357 16.1498092353
#magecat20-24 5.3165992300 2.6215406609 10.7822959962
#magecat25-29 2.5353559692 1.2412824514 5.1785392462
#magecat30-34 1.3954781331 0.6755309491 2.8827091087
#magecat35-39 1.5037203419 0.7150663625 3.1621888332
###
## Birth weight - numeric
#
summary(s8 <- glm(sudi ~ bwr, data = usudi, family = binomial(link = "log")))
anova(s8, test = "Chisq")
#Analysis of Deviance Table
#Model: binomial, link: log
#Response: sudi
#Terms added sequentially (first to last)
#Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#NULL 785565 11596
#bwr 1 239.59 785564 11356 < 0.00000000000000022 ***
plot(allEffects(s8))
m <- s8
se <- sqrt(diag(vcov(m)))
# table of estimates with 95% CI
(tab <- cbind(Est = coef(m), LL = coef(m) - 1.96 * se, UL = coef(m) + 1.96 *se))
exp(tab)
plot(usudi$bwr,usudi$sudi,xlab="Maternal Age",ylab="Probability of survival") # plot with body size on x-axis and survival (0 or 1) on y-axis
g=glm(sudi~bwr,family=binomial,usudi) # run a logistic regression model (in this case, generalized linear model with logit link). see ?glm
curve(predict(g,data.frame(bwr=x),type="resp"),add=TRUE) # draws a curve based on prediction from logistic regression model
points(usudi$bwr,fitted(g),pch=20) # optional: you could skip this draws an invisible set of points of body size survival based on a 'fit' to glm model. pch= changes type of dots.
###
## Maternal age - numeric
#
summary(s9 <- glm(sudi ~ mage, data = usudi, family = binomial(link = "log")))
anova(s9, test = "Chisq")
#Analysis of Deviance Table
#Model: binomial, link: log
#Response: sudi
#Terms added sequentially (first to last)
#Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#NULL 786133 9453.2
#mage 1 226.93 786132 9226.2 < 0.00000000000000022 ***
plot(allEffects(s9))
m <- s9
se <- sqrt(diag(vcov(m)))
# table of estimates with 95% CI
(tab <- cbind(Est = coef(m), LL = coef(m) - 1.96 * se, UL = coef(m) + 1.96 *se))
exp(tab)
plot(usudi$mage,usudi$sudi,xlab="Maternal Age",ylab="Probability of survival") # plot with body size on x-axis and survival (0 or 1) on y-axis
g=glm(sudi~mage,family=binomial,usudi) # run a logistic regression model (in this case, generalized linear model with logit link). see ?glm
curve(predict(g,data.frame(mage=x),type="resp"),add=TRUE) # draws a curve based on prediction from logistic regression model
points(usudi$mage,fitted(g),pch=20) # optional: you could skip this draws an invisible set of points of body size survival based on a 'fit' to glm model. pch= changes type of dots.
library(doBy)
orderBy(~ AIC, AIC(s1,s2,s3,s4,s5,s6,s7,s8,s9))
t.test(usudi$mage[usudi$sudi != 1], sudi$mage[usudi$sudi == 1])
#Welch Two Sample t-test
#
#data: usudi$mage[usudi$sudi != 1] and sudi$mage[usudi$sudi == 1]
#t = 15.01, df = 574.84, p-value < 2.2e-16
#alternative hypothesis: true difference in means is not equal to 0
#95 percent confidence interval:
# 3.357628 4.368594
#sample estimates:
# mean of x mean of y
#29.22485 25.36174
plot(predict(s1),residuals(s1),col=c("blue","red")[1+sudi])
lines(lowess(predict(s1)[sudi==0],residuals(s1)[Y==0]),col="blue")
lines(lowess(predict(s1)[sudi==1],residuals(s1)[Y==1]),col="red")
lines(lowess(predict(s1),residuals(s1)),col="black",lwd=2)
abline(h=0,lty=2,col="grey")
k <- 20 ##20% of the dataset as testdata
N <- 100 ##100 Permutations
permu <- paste("Permut_",1:N,sep="")
AUC_Results <- matrix(NA, 1, N, dimnames=list("AUC",permu))
n <- ncol(usudi)
numrows <- nrow(usudi)
learnDataSize <- round(numrows*(1-0.01*k))
testDataSize <- numrows-learnDataSize
##loop
for (j in 1:N){
cat("calculating",((j/N)*100),"% \n")
learnIndex <-sample(nrow(Dataset))[1:learnDataSize]
learnData <-Dataset[learnIndex,]
testData <-Dataset[-learnIndex,]
mg <-glm(formula =yourFormula,
family = binomial(link = "logit"),data=learnData)
bestmod_cv <-step(mg,direction="backward",trace=0)
predicted_cv <-predict(bestmod_cv, newdata=testData, type="response")
observed_cv <-testData[,"Y"]
AUC_result <-roc.auc(observed_k, predicted_k)
AUC_Results[1,j] <-AUC_result$A
}
set.seed(0)
head(tab <- data.frame(Y=as.numeric(runif(100)>0.5), X=rnorm(100)))
subs <- sample(c(1:nrow(tab)), round(nrow(tab)*0.66), replace=F) #the 66% of data you want in one sample
tab1 <- tab[subs, ] #the one sample
tab2 <- tab[!c(1:nrow(tab)) %in% subs, ] #the other sample, which are the data that do not fall in the first sample
rlog1 <- glm(Y~X,family=binomial,data=tab1)
summary(rlog1)
tab2$pred <-predict(rlog1, newdata=tab2, type="response")
hist(tab2$pred)
library(ROCR) #allows you to make easily ROC's which allows the assessment of your prediction
pred <- prediction(tab2$pred, tab2$Y)
perf <- performance(pred,"tpr","fpr")
plot(perf); abline(0, 1, col="red")
slrm <- lrm(sudi ~ bw, data = usudi)
|
18d8bb799d504b4d71358a3e5d02a12959eca954 | 94df6050f2a262da23f62dd678ccc4366b7657fc | /plot-car.R | 73a507a2ece28a815707cdd6d402550ac5f96fc6 | [] | no_license | EkenePhDAVHV/phd-autonomous-cars-frank | 29cc2fc608db53d4d060422022dc5019cf6360f0 | 1daed3425bfad99dac31543fbeb7950e25aa2878 | refs/heads/main | 2023-04-29T06:02:59.444072 | 2021-05-23T11:04:07 | 2021-05-23T11:04:07 | 357,157,735 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 526 | r | plot-car.R | library("ggplot2")
library("dplyr")
d = read.csv("AVHV_Main/output/AVHV_Traffic_Lights/av.csv")
d$car_name = as.factor(d$car_name)
ggplot(d, aes(x=car_name, y=speed)) + geom_point(aes(col=stopping_time, size=reaction_time))
y = d %>% filter(car_name == "GentleCar29")
y$id = 1:nrow(y)
ggplot(y, aes(x=id, y=speed)) + geom_point(aes(col=stopping_time, size=reaction_time))
y = d %>% filter(car_name == "GentleCar60")
y$id = 1:nrow(y)
ggplot(y, aes(x=id, y=speed)) + geom_point(aes(col=stopping_time, size=reaction_time))
|
3bff5fc50a34205c53b723ef8ccbcf82c81e46f3 | 8a5830837c7181be89ff732663b5e3e6ec500941 | /R/wls.R | d79eb3cadf634b6d0e3f73afcefc2ab8d2a89d7c | [] | no_license | svmiller/stevemisc | 282c68d00557537c3be10804e782c64ca757d2e0 | 5bf1d006908463010866f8fec09300272a3538a0 | refs/heads/master | 2023-05-11T07:46:56.713314 | 2023-05-05T13:03:12 | 2023-05-05T13:03:12 | 95,028,314 | 11 | 4 | null | 2023-01-12T15:55:12 | 2017-06-21T17:18:49 | R | UTF-8 | R | false | false | 1,643 | r | wls.R | #' Get Weighted Least Squares of Your OLS Model
#'
#' @description \code{wls()} takes an OLS model and re-estimates it using a weighted least squares
#' approach. Weighted least squares is often a "textbook" approach to dealing with the presence of
#' heteroskedastic standard errors, for which the weighted least squares estimates are compared
#' to the OLS estimates of uncertainty to check for consistency or potential inferential implications.
#'
#' @details The function *should* be robust to potential model specification oddities (e.g. polynomials
#' and fixed effects). It also should perform nicely in the presence of missing data, if and only
#' if `na.action = na.exclude` is supplied first to the offending OLS model supplied to the function
#' for a weighted least squares re-estimation.
#'
#' @return \code{wls()} returns a new model object that is a weighted least squares re-estimation
#' of the OLS model supplied to it.
#'
#' @author Steven V. Miller
#'
#' @param mod a fitted OLS model
#'
#' @examples
#'
#' M1 <- lm(mpg ~ ., data=mtcars)
#' M2 <- wls(M1)
#'
#' summary(M2)
wls <- function(mod) {
if (!identical(class(mod), "lm")) {
stop("Weighted Least Squares only makes sense in the context of OLS, and this model/object does not appear to be OLS. As of right now, this function works for just those OLS models generated by the lm() function in base R.")
}
resid <- resid(mod)
fitted <- fitted(mod)
WM <- lm(abs(resid) ~ fitted, na.action=na.exclude)
wts <- 1/(fitted(WM)^2)
A <- eval(mod$call$data)
A$wts <- 1/(fitted(WM)^2)
WLSM <- update(mod, ~., data=A, weights = wts)
return(WLSM)
}
|
15bf498ff0c633a88f463d3bf82990d68020f4e6 | 9b8c5fba0371b4663f5891bc1ba2225181b59692 | /Scripts/0_CV_Cutoff.R | 016856d01820429d6c57568c066ac6c65be0ea94 | [] | no_license | mpark-bioinfo/HF11 | 30ac03b32e8329791d13fb8b0ad9ab3177906290 | 25d1f9b4b10bd2ce91dd385612acbbfbb1b85d37 | refs/heads/master | 2021-01-21T04:44:21.166060 | 2016-06-15T21:06:22 | 2016-06-15T21:06:22 | 54,639,616 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,765 | r | 0_CV_Cutoff.R | #!/usr/bin/env Rscript
# Meeyoung Park, 04/07/2016
# Command: Rscript <filename> <outdir>
# Input: "_imputed.csv"
# arguments: cut-off, input_file, output_dir
# Process: Extract >= CV cutoff
# Output: "_CV.csv"
#library(dply)
args = commandArgs(TRUE)
print(args[2])
# Define CV function
cutoff <- args[1]
# Read input file
lipid_data = read.csv(args[2], header = TRUE, sep = ",")
#lipid_data = read.csv("../Normalization/SCNPos/SCNPos_imputed.csv", header = TRUE, sep = ",")
# Exclude internal standards
is_lipid_idx <- grep("IS", lipid_data$Sample)
LipidData <- lipid_data[-is_lipid_idx, ]
# Calculate CV
TestPool <- LipidData[, 50:length(LipidData)]
sd_test <- data.frame(apply(TestPool,1, sd))
colnames(sd_test) <- 'SD'
mean_test <- data.frame(rowMeans(TestPool))
colnames(mean_test) <- 'Mean'
RSD <- data.frame(as.numeric(sd_test$SD)/as.numeric(mean_test$Mean) * 100)
colnames(RSD) <- 'CV';
count.10 <- as.numeric(table(RSD$CV <= 10)["TRUE"])
count.20 <- as.numeric(table(RSD$CV <= 20)["TRUE"])
count.30 <- as.numeric(table(RSD$CV <= 30)["TRUE"])
print(dim(RSD)[1])
print(count.10)
print(count.20)
print(count.30)
percentage10 <- count.10/dim(RSD)[1]*100
percentage20 <- count.20/dim(RSD)[1]*100
percentage30 <- count.30/dim(RSD)[1]*100
print(percentage10)
print(percentage20)
print(percentage30)
CV30_idx <- which((as.numeric(RSD$CV) <= cutoff) == TRUE);
FinalLipidData <- LipidData[CV30_idx,];
print(dim(FinalLipidData));
# Output file name
name_temp <- strsplit(args[1], "/");
tmp <- strsplit(name_temp[[1]][length(name_temp[[1]])], "\\.") ;#Check the position of file name
tissue_name <- strsplit(tmp[[1]][1], "\\_");
new_out <- paste(args[3], tissue_name[[1]][1],"_CV",args[1], ".csv", sep="");
write.csv(FinalLipidData, new_out, row.names=FALSE) |
6fadbfe62ab85dd839653647b31882268225f2d3 | 32c187ff42b70be2dfe109328056d001f5c8b245 | /webscraping.R | f8400e59eac608d0a3e1ed062ceb3a195ecb43f9 | [] | no_license | npowell759/epl-data-analysis | ea36950d1b4a4d67f1e0b5c0a87d51ac192c0f8e | 6e1238a2706086b71001d59960e3b4433f3ad13e | refs/heads/master | 2023-03-13T09:02:59.627981 | 2021-03-05T22:43:55 | 2021-03-05T22:43:55 | 344,944,218 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,495 | r | webscraping.R | library(rvest)
library(dplyr)
link <- "https://www.transfermarkt.co.uk/premier-league/transfers/wettbewerb/GB1/plus/?saison_id=2019"
page <- read_html(link)
team <- page %>% html_nodes("h2 a") %>% html_text() %>% .[21:40]
expenditure <- page %>% html_nodes(".transfer-einnahmen-ausgaben.redtext") %>% html_text() %>%
gsub("\n\t\tExpenditure: £", "", .) %>% gsub("m\t\t\t\t\t\t", "", .) %>% as.numeric(.)
income <- page %>% html_nodes(".transfer-einnahmen-ausgaben.greentext") %>% html_text() %>%
gsub("\n\t\t\t\t\t\tIncome: £", "", .) %>% gsub("m\t\t\t\t\t\t\t", "", .) %>%
gsub("Th.\t\t\t\t\t\t\t", "", .) %>% as.numeric(.)
income[income>250] <- income[income>250] / 1000
income <- round(income, digits = 2)
net_spend <- income - expenditure
largest_in <- page %>% html_nodes(".responsive-table:nth-child(2) tr:nth-child(1) .rechts a") %>% html_text() %>%
gsub("£", "", .) %>% gsub("m", "", .) %>% as.numeric(.)
largest_out <- page %>% html_nodes(".responsive-table:nth-child(4) tr:nth-child(1) .rechts a") %>% html_text() %>%
gsub("£", "", .) %>% gsub("m", "", .) %>% gsub("Loan fee:", "", .) %>% gsub("Th.", "", .) %>% as.numeric(.)
largest_out[largest_out > 140] <- largest_out[largest_out > 140] / 1000
largest_out <- round(largest_out, digits = 2)
transferspending <- data.frame(team, expenditure, income, net_spend, largest_in, largest_out, stringsAsFactors = FALSE)
write.csv(transferspending, "transferdata.csv", fileEncoding="Windows-1252", row.names=FALSE)
|
6ca97acc5a6c4b4a11e39877c30207e06723b453 | a9392f6fa8733c02e536354c8983c3c437202993 | /decisionTreeTraffic.R | 4c02fd99728cebb359e22864e822c998723e518e | [] | no_license | abhinav-sunderrajan/Work | 363a07da0b17c2288ce996b24ca1414ba1c715d8 | cd9b19f6ef3960284127538ce1baa27b3bbba671 | refs/heads/master | 2021-01-10T14:49:38.617861 | 2016-01-18T03:39:27 | 2016-01-18T03:39:27 | 48,419,991 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,776 | r | decisionTreeTraffic.R | library(RPostgreSQL)
library(ggplot2)
require(gridExtra)
library("plot3D")
require(tree)
library(caret)
library(partykit)
library("reshape2")
library(rpart)
library(doParallel)
set.seed(2123)
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, host='172.25.187.111', port='5432', dbname='abhinav',
user='abhinav', password='qwert$$123')
times1=c(4500)
times2=times1+120
penetrations=c(0.05,0.1,0.2,0.3,0.4,0.5,1.0)
method=c("Uniform","P.I.E","Decision Tree")
rSquareMat=matrix(0,length(method),length(penetrations))
rownames(rSquareMat)=method
colnames(rSquareMat)=penetrations
#PIE
pieBreaks=c(0.0,583.9831796057117, 1973.3525390927846, 2367.5948374569775,
2489.872751163762, 3261.271616457807, 4071.903598144841, 4834.836586015808,
5531.1844928841365, 5743.1141418953775, 5965.289275894134, 6207.740716730275,
6670.222224060167, 7025.154292404613, 7658.39571005447, 8040.834612543026,
8554.283981491059, 8807.936731242416, 9591.841494223643, 10148.238120947959,
11286.20075545962, 11637.04476230792, 12438.227750939186, 13328.802132638793)
par(mfrow=c(1,1))
#Function for different segmentation methods
rSquare<-function(distance_split,time1,time2,penetration,splits){
ls_density=numeric()
ls_speed=numeric()
i=1
for(name_dist in names(distance_split)){
y<-distance_split[[name_dist]]
c2<-cut(y$time_stamp,breaks = seq(time1, time2, by = 1),labels = seq(time1, (time2-1), by = 1))
time_split<-split(y,c2)
density_vec=numeric()
speed_vec=numeric()
index=1
for(name_time in names(time_split)){
num_of_vehicles<-length(unique(time_split[[name_time]]$agent_id))*(1/penetration)
density_vec[index]=num_of_vehicles*1000.0/(splits[i+1]-splits[i])
speed_vec[index]=mean(time_split[[name_time]]$speed)
index=index+1
}
ls_density=c(ls_density,density_vec)
ls_speed=c(ls_speed,speed_vec)
i=i+1
}
good=complete.cases(ls_speed,ls_density)
ls_speed=ls_speed[good]
ls_density=ls_density[good]
good=ls_speed>0
ls_speed=ls_speed[good]
ls_density=ls_density[good]
fit=lm(ls_density~log(ls_speed))
if((time1==4500 & time2==4620) & penetration==1.0){
plot(ls_speed,ls_density,xlab="",ylab="",cex.axis = 1.50,cex.lab=1.5,xlim=c(0,20),ylim=c(0,250))
points(ls_speed, predict(fit), col="red")
mtext("Speed (m/s)", 1, 2.5,cex=1.5)
mtext("Density (veh/km)", 2, 2.5,cex=1.5)
}
summary(fit)$r.squared
}
sdSplits<-function(distance_split,time1,time2,penetration,splits){
ls_density=numeric()
ls_speed=numeric()
sd_speed_list=list()
i=1
for(name_dist in names(distance_split)){
y<-distance_split[[name_dist]]
c2<-cut(y$time_stamp,breaks = seq(time1, time2, by = 60),labels = seq(time1, (time2-60), by = 60))
time_split<-split(y,c2)
density_vec=numeric()
speed_vec=numeric()
sd_speed=numeric()
index=1
for(name_time in names(time_split)){
num_of_vehicles<-length(unique(time_split[[name_time]]$agent_id))*(1/penetration)
density_vec[index]=num_of_vehicles*1000.0/(splits[i+1]-splits[i])
speed_vec[index]=mean(time_split[[name_time]]$speed)
sd_speed[index]=sd(time_split[[name_time]]$speed)
index=index+1
}
ls_density=c(ls_density,density_vec)
ls_speed=c(ls_speed,speed_vec)
i=i+1
sd_speed_list[[name_dist]]=c(sd_speed_list[[name_dist]],sd_speed)
}
sd_speed_list
}
for(i in 1:1){
query<-paste("select distance_along_road as distance,speed,time_stamp,agent_id from semsim_output WHERE iteration_count=47 AND time_stamp >=",times1[i], " AND time_stamp <=",times2[i]," AND distance_along_road<=13000", sep="")
data<-dbGetQuery(con,query)
set.seed(i)
for(penetration in penetrations){
samples=sample(unique(data$agent_id),length(unique(data$agent_id))*penetration,replace=FALSE)
training <- subset(data, agent_id %in% samples)
#Decision tree rpart
modelFit <- rpart(speed ~ distance,data=training,control=rpart.control(maxdepth=14))
#rpart1a <- as.party(modelFit)
#plot(rpart1a)
index=1
decisonTreeSplit=numeric()
for(split in modelFit$splits[,4]){
decisonTreeSplit[index]=split
index=index+1
}
decisonTreeSplit<-sort(decisonTreeSplit)
decisonTreeSplit<-c(0.0,decisonTreeSplit)
#CUTS PIE
cutPIE<-cut(training$distance,breaks = pieBreaks,labels = pieBreaks[1:length(pieBreaks)-1])
distance_split_pie<-split(training,cutPIE)
statPIE=rSquare(distance_split_pie,times1[i],times2[i],penetration,pieBreaks)
rSquareMat["P.I.E",toString(penetration)]=rSquareMat["P.I.E",toString(penetration)]+statPIE
#CUTS DECISION TREE
cutDT<-cut(training$distance,breaks = decisonTreeSplit,labels = decisonTreeSplit[1:length(decisonTreeSplit)-1])
distance_split_dt<-split(training,cutDT)
statDT=rSquare(distance_split_dt,times1[i],times2[i],penetration,decisonTreeSplit)
rSquareMat["Decision Tree",toString(penetration)]=rSquareMat["Decision Tree",toString(penetration)]+statDT
#Uniform cuts
segment_length=1000
segment_breaks=seq(0, 13000, by = segment_length)
c1<-cut(training$distance,breaks = segment_breaks,labels = seq(0, (13000-segment_length), by = segment_length))
distance_split<-split(training,c1)
statUniform=rSquare(distance_split,times1[i],times2[i],penetration,segment_breaks)
rSquareMat["Uniform",toString(penetration)]=rSquareMat["Uniform",toString(penetration)]+statUniform
print(paste(times1[i],times2[i],penetration,statPIE,statDT,statUniform,sep=" "))
}
}
rSquareMat=rSquareMat/5.0
#SD Split
query<-paste("select distance_along_road as distance,speed,time_stamp,agent_id from semsim_output WHERE iteration_count=50 AND time_stamp >=",4500, " AND time_stamp <=",4620," AND distance_along_road<=13000", sep="")
training<-dbGetQuery(con,query)
#Decision tree rpart
modelFit <- rpart(speed ~ distance,data=training,control=rpart.control(maxdepth=14))
#rpart1a <- as.party(modelFit)
#plot(rpart1a)
index=1
decisonTreeSplit=numeric()
for(split in modelFit$splits[,4]){
decisonTreeSplit[index]=split
index=index+1
}
decisonTreeSplit<-sort(decisonTreeSplit)
decisonTreeSplit<-c(0.0,decisonTreeSplit)
cutDT<-cut(training$distance,breaks = decisonTreeSplit,labels = decisonTreeSplit[1:length(decisonTreeSplit)-1])
distance_split_dt<-split(training,cutDT)
sdInSplits=sdSplits(distance_split_dt,4500,4620,1.0,decisonTreeSplit)
avgSd=sapply(sdInSplits,mean)
#Close Database connection
dbDisconnect(con)
|
3c584db7fc3b9388c04acf39ce5fa6865be1df0f | effd148f74a6e0e7e365953b0ee00918c429f67e | /README.rd | 8f1ef61b7c4dc21613c185a7938f9ab122255416 | [] | no_license | vijaygithub45/DotNetCoreWebapp | bfa6cc73f213bbe42fa3d48d54b013c511355d55 | f885cf63cf373b6a82cf097a11dbe1cff4c543a8 | refs/heads/main | 2023-04-08T10:02:31.917299 | 2021-04-19T18:20:49 | 2021-04-19T18:20:49 | 356,908,330 | 0 | 0 | null | 2021-04-14T11:12:13 | 2021-04-11T15:41:32 | null | UTF-8 | R | false | false | 19 | rd | README.rd | THIS IS TEST REPO
|
ae2633d7e0f7703e40f4cc8dd2a5ebf6967c8f3f | f8ce1034cef41685ab2387fa42084ac1ee5c96cf | /chapter13/asggdend.R | 35294bfe7e26a204de1a8b4b5767370241dad31e | [] | no_license | AnguillaJaponica/RProgramming | ab7e15d99153ebe585745289490e6e3df118a35c | ae1401920b5a52795cffc95bd83922f15761e531 | refs/heads/master | 2020-11-28T10:07:02.579925 | 2019-12-23T15:28:23 | 2019-12-23T15:28:23 | 229,777,900 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 584 | r | asggdend.R | library(ggplot2)
# デンドログラムの設定
dend2 <- dend %>%
set("labels_cex", 0.5) %>%
hang.dendrogram(hang_height=0.8) %>%
set("branches_k_color", value=c("red", "blue", "green"), k=3) %>%
set("branches_lwd", 0.5) %>%
set("branches_lty", 1) %>%
set("nodes_pch", 19) %>%
set("nodes_cex", 1) %>%
set("nodes_col", "black")
# ggplot2パッケージで描画可能なオブジェクトへの変換
ggd <- as.ggdend(dend2)
# ggplot2パッケージでの描画
ggplot(ggd, offset_labels = -0.1) |
ec55ebfefeb36b9b130139f6bcf3eaa117c16090 | ed633d145dfa8b32511b3cb13ba95b822e2559c8 | /doc/gpw.R | 36734ae7d5a13a7d8923d631430b78b96eb697fd | [] | no_license | wendellopes/rvswf | 51a09f034e330fbb7fd58816c3de2b7f7fdba9dc | ee243c3e57c711c3259a76051a88cc670dfe9c4b | refs/heads/master | 2020-05-19T19:38:18.987560 | 2016-09-11T22:57:37 | 2016-09-11T22:57:37 | 19,242,694 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,917 | r | gpw.R | lambda=.5e-6
ko=2*pi/lambda
lmax<-40
#-------------------------------------------------------------------------------
NP=200
z<-seq(-lambda,lambda,lambda/(NP-1))
#-------------------------------------------------------------------------------
xo<-sample(z,1) # Entre 1:NP
yo<-sample(z,1) # Entre 1:NP
zo<-sample(z,1) # Entre 1:NP
#-------------------------------------------------------------------------------
# Calcula os campos - u e k devem ser ortogonais
#-------------------------------------------------------------------------------
ux<-sample(-10:10,1)
uy<-0
uz<-sample(-10:10,1)
#-------------------------------------------------------------------------------
kx<-0
ky<-sample(-10:10,1)
kz<-0
#-------------------------------------------------------------------------------
u<-sqrt(Conj(ux)*ux+Conj(uy)*uy+Conj(uz)*uz)
k<-sqrt(Conj(kx)*kx+Conj(ky)*ky+Conj(kz)*kz)
#-------------------------------------------------------------------------------
# NORMALIZATION
#-------------------------------------------------------------------------------
ux<-ux/u
uy<-uy/u
uz<-uz/u
um<-(ux-1i*uy)/sqrt(2)
up<-(ux+1i*uy)/sqrt(2)
#-------------------------------------------------------------------------------
# NORMALIZATION
#-------------------------------------------------------------------------------
kx<-kx/k
ky<-ky/k
kz<-kz/k
km<-(kx-1i*ky)/sqrt(2)
kp<-(kx+1i*ky)/sqrt(2)
#-------------------------------------------------------------------------------
# ELECTRIC FIELD
#-------------------------------------------------------------------------------
E0<-exp(1i*ko*(kx*xo+ky*yo+kz*zo))
#-------------------------------------------------------------------------------
Em<-E0*um
Ez<-E0*uz
Ep<-E0*up
#-------------------------------------------------------------------------------
# MAGNETIC FIELD
#-------------------------------------------------------------------------------
Hm<-1i*E0*(km*uz-kz*um)
Hz<-1i*E0*(kp*um-km*up)
Hp<-1i*E0*(kz*up-kp*uz)
#-------------------------------------------------------------------------------
v<-vswf.hmp(ko,xo,yo,zo,lmax)
G<-vswf.gpw(ko*kx,ko*ky,ko*kz,ux,uy,uz,lmax)
#-------------------------------------------------------------------------------
Em.pwe<-sum(G$GTE*v$M.m-G$GTM*v$N.m)
Ez.pwe<-sum(G$GTE*v$M.z-G$GTM*v$N.z)
Ep.pwe<-sum(G$GTE*v$M.p-G$GTM*v$N.p)
Hm.pwe<-sum(G$GTM*v$M.m+G$GTE*v$N.m)
Hz.pwe<-sum(G$GTM*v$M.z+G$GTE*v$N.z)
Hp.pwe<-sum(G$GTM*v$M.p+G$GTE*v$N.p)
#-------------------------------------------------------------------------------
H.wfd<-c(Hm,Hz,Hp)
E.wfd<-c(Em,Ez,Ep)
#-------------------------------------------------------------------------------
E.pwe<-c(Em.pwe,Ez.pwe,Ep.pwe)
H.pwe<-c(Hm.pwe,Hz.pwe,Hp.pwe)
print(cbind(E.pwe,H.pwe))
#-------------------------------------------------------------------------------
print(cbind(E.wfd,H.wfd))
print(cbind(E.pwe,H.pwe))
#-------------------------------------------------------------------------------
|
1279a7c547940f0bccc2a2f45341231f7e963111 | 95b6831a180d9224004ef7b16f73b77874eb7739 | /man/wcs.Rd | 018951c28b99fe2a1c35f55d948101d505d1f4c8 | [] | no_license | Ketua76/SOWAS | 13154b3df7abcfd604d4baafdc894b33b89f0bfa | 8a1ec6928f7551f4700c8412014fc4ea3a777566 | refs/heads/master | 2020-05-22T14:09:36.497283 | 2012-11-01T15:58:08 | 2012-11-01T15:58:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,797 | rd | wcs.Rd | \name{wcs}
\alias{wcs}
\title{Wavelet Sample Cross Spectrum}
\usage{
wcs(ts1, ts2, s0 = 1, noctave = 5, nvoice = 10,
w0 = 2 * pi, sw = 0, tw = 0, swabs = 0, markt = -999,
marks = -999, logscale = FALSE, phase = TRUE,
plot = TRUE, units = "", device = "screen",
file = "wcsp", split = FALSE, color = TRUE,
pwidth = 10, pheight = 7, labsc = 1, labtext = "")
}
\arguments{
\item{ts1}{first time series object to be transformed}
\item{ts2}{second time series object to be transformed}
\item{s0}{lowest calculated scale in units of the time
series}
\item{noctave}{number of octaves}
\item{nvoice}{number of voices per octave}
\item{w0}{time/frequency resolution omega_0}
\item{sw}{length of smoothing window in scale direction
is 2*sw*nvoice+1}
\item{tw}{length of smoothing window in time direction is
2*s*tw+1}
\item{swabs}{length of smoothing window in scale
direction at scale s is 2*swabs+1}
\item{markt}{vector of times to be marked by vertical
dotted lines; when set to -999 (default), no lines are
plotted.}
\item{marks}{vector of scales to be marked by horizontal
dotted lines; when set to -999 (default), no lines are
plotted.}
\item{logscale}{when TRUE, the contours are plotted in
logarithmic scale}
\item{phase}{TRUE when phase calculation desired}
\item{plot}{TRUE when graphical output desired}
\item{units}{character string giving units of the data
sets. Default: ""}
\item{device}{"screen" or "ps"}
\item{file}{character string giving filename of graphical
output without extension}
\item{split}{when TRUE, modulus and phase are splitted
into two files; default: FALSE}
\item{color}{TRUE (default): color plot, FALSE: gray
scale}
\item{pwidth}{width of plot in cm}
\item{pheight}{height of plot in cm}
\item{labsc}{scale of labels, default: 1, for two-column
manuscripts: 1.5, for presentations: >2}
\item{labtext}{puts a label in upper left corner of the
plot}
}
\value{
modulus matrix of modulus of wavelet sample cross
spectrum of dimension [length(intersection of ts1 and
ts2)]x[nvoice*noctave+1] phase matrix of phase of wavelet
sample cross spectrum, same dimension as modulus s0
lowest calculated scale in units of the time series
noctave number of octaves nvoice number of voices per
octave w0 time/frequency resolution omega_0 time vector
of times of length(intersection of ts1 and ts2) scales
vector of scales of length nvoice*noctave+1 critval not
used at not used kernel not used
}
\description{
This funtion estimates the wavelet cross spectrum of two
time series objects with the Morlet wavelet.
}
\details{
\strong{WARNING!} Better do not use this function because
it is in general easily misinterpreted! A peak in the
wavelet cross sample spectrum appears in the three cases,
that either the first processes exhibits a peak, or the
second process or both. But it does not tell, what case
is observed. \strong{So in general, a peak in the wavelet
cross sample spectrum does not imply that the two
underlying processes are related in any way.} The
function returns an object of type "wt", that might be
directly plotted by the plot function.
}
\examples{
##
data(nao)
data(nino3)
# wcs mimics peaks of coherent power, where in reality are non to be
# found, as wco shows (see FAQs on my homepage)
# Thus, never use wcs! :-)
wcsp.nao.nino3 <- wcs(nao,nino3,s0=0.5,noctave=5,nvoice=10)
wcoh.nao.nino3 <- wco(nao,nino3,s0=0.5,noctave=5,nvoice=10,sw=0.5,arealsiglevel=0)
}
\author{
D. Maraun
}
\references{
D. Maraun and J. Kurths, Nonlin. Proc. Geophys. 11:
505-514, 2004
}
\seealso{
\code{\link{cwt.ts}}, \code{\link{wsp}},
\code{\link{wco}}
}
\keyword{cross}
\keyword{spectrum}
\keyword{wavelet}
|
797ddfae0b10dcdc28f87cb11fc1efe4fb10f57f | ae7fed84e88e9626f8387cf5dc92b638be3d90fa | /Postwork1_equipo8.R | 4f57e77ab8e220eefb0601ec41cc51053b5e3608 | [] | no_license | samanthamartinez-chem/Postworks-equipo-8 | 4744f4207fcd5392b8b1bed76def686b5f76e7c1 | e51f999d5cd57577ca4154e5f732c7114f3c3fff | refs/heads/main | 2023-02-28T15:02:23.353289 | 2021-02-05T03:18:39 | 2021-02-05T03:18:39 | 336,146,575 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,824 | r | Postwork1_equipo8.R | ### ###
## POSTWORK 1 ##
### EQUIPO 8 ###
# Se trabajará con datos referentes a equipos de la liga española de fútbol
# 1) Importando los datos de soccer de la temporada 2019/2020 de la primera
# division de la liga española
url <- "https://www.football-data.co.uk/mmz4281/1920/SP1.csv"
datos_futbol <- read.csv(url)
# 2) Extrayendo las columnas con número de goles anotados por equipos locales
# (FTHG), y los goles anotados por los equipos visitantes (FTAG).
(tabla_goles_locales <- as.data.frame(table(Goles = datos_futbol$FTHG))) #local
(tabla_goles_visitantes <- as.data.frame(table(Goles = datos_futbol$FTAG))) #visita
LE1920$FTHG # Goles anotados por los equipos que jugaron en casa
LE1920$FTAG # Goles anotados por los equipos que jugaron como visitante
# 3) Consultando la función table
?table
# Esta función sirve para contruir tablas de frecuencia.
#### Parte 2 ####
# Elaborando tablas de frecuencia relativas para estimar probabilidades.
# Aquí puede visualizar la tabla de frecuencias relativas:
# Añadiendo la variable "6 goles" y freucuencia 0 para visitantes, para que los
# tamaños de los vectores sean iguales (Tuve que volver a 6 factor)
x <- data.frame(Goles = factor(6), Freq = c(0))
# Creando una nuevo df para Visitantes que incluye 6 goles
(tabla_goles_visitantes <- rbind.data.frame(tabla_goles_visitantes, x))
# Uniendo frecuencias de Locales y Visitantes en un df
merge(tabla_goles_locales, tabla_goles_visitantes, by = "Goles")
tabla_frecuencias <- merge(tabla_goles_locales, tabla_goles_visitantes, by = "Goles")
colnames(tabla_frecuencias) <- c("Goles", "Locales", "Visitantes")
# TABLA DE FRECUENCIAS DE AMBOS EQUIPOS
tabla_frecuencias
#### AHORA SÍ, LAS PROBABILIDADES ####
# 1) La probabilidad (marginal) de que el equipo que juega en casa anote
# "x" goles (x = 0, 1, 2, ...)
# Obteniendo los goles anotados por equipo local, y dividiendo entre el total
# de partidos que se puede obtener con el número de filas.
(probmar_locales <- (table(datos_futbol$FTHG)/dim(datos_futbol)[1])*100)
# 2) La probabilidad (marginal) de que el equipo que juega como visitante
# anote "y" goles (y = 0, 1, 2, ...)
# Obteniendo los goles anotados por visitantes y dividiendo entre el total de
# partidos.
(probmar_visitantes <- (table(datos_futbol$FTAG)/dim(datos_futbol)[1])*100)
# 3) La probabilidad (conjunta) de que el equipo que juega en casa anote "x"
# goles y el equipo que juega como visitante anote "y" goles
# (x = 0, 1, 2, ..., y = 0, 1, 2, ...).
(prob_conjunta <- (table(datos_futbol$FTHG, datos_futbol$FTAG)/dim(datos_futbol)[1])*100)
|
449f6eef6294e157b9a296ee6ecccc24da2f7f34 | 7c76704087a176e740cc6fbe10682e9c963b129b | /R/dockerfile.R | 812aac460e6fc93725272bddf6e3319cc6ffc61f | [
"MIT"
] | permissive | uribo/dockerfiler | d20c0fc352daa39f4ecf7f84c613e170c8683173 | 2f11b0de7889a2cf60024dc52c90d10859a95075 | refs/heads/master | 2021-03-30T20:42:24.112672 | 2018-02-28T17:55:32 | 2018-02-28T17:55:32 | 124,981,971 | 1 | 0 | null | 2018-03-13T02:43:09 | 2018-03-13T02:43:09 | null | UTF-8 | R | false | false | 4,145 | r | dockerfile.R | #' A Dockerfile template
#'
#' @return A dockerfile template
#'
#' @importFrom R6 R6Class
#' @export
#'
#' @examples
#' my_dock <- Dockerfile$new()
Dockerfile <- R6::R6Class("Dockerfile",
public = list(
Dockerfile = character(),
## Either from a file, or from a character vector
initialize = function(FROM = "rocker/r-base", AS = NULL){
self$Dockerfile <- create_dockerfile("rocker/r-base", AS)
},
RUN = function(cmd){
self$Dockerfile <- c(self$Dockerfile, add_run(cmd))
},
ADD = function(from, to, force = TRUE){
self$Dockerfile <- c(self$Dockerfile,add_add(from, to, force))
},
COPY = function(from, to, force = TRUE){
self$Dockerfile <- c(self$Dockerfile,add_copy(from, to, force))
},
WORKDIR = function(where){
self$Dockerfile <- c(self$Dockerfile, add_workdir(where))
},
EXPOSE = function(port){
self$Dockerfile <- c(self$Dockerfile, add_expose(port))
},
VOLUME = function(volume){
self$Dockerfile <- c(self$Dockerfile, add_volume(volume))
},
CMD = function(cmd){
self$Dockerfile <- c(self$Dockerfile, add_cmd(cmd))
},
LABEL = function(key, value){
self$Dockerfile <- c(self$Dockerfile, add_label(key, value))
},
ENV = function(key, value){
self$Dockerfile <- c(self$Dockerfile, add_env(key, value))
},
ENTRYPOINT = function(cmd){
self$Dockerfile <- c(self$Dockerfile, add_entrypoint(cmd))
},
USER = function(user){
self$Dockerfile <- c(self$Dockerfile, add_user(user))
},
ARG = function(arg, ahead = FALSE){
if (ahead) {
self$Dockerfile <- c(add_arg(arg), self$Dockerfile)
} else {
self$Dockerfile <- c(self$Dockerfile,add_arg(arg))
}
},
ONBUILD = function(cmd){
self$Dockerfile <- c(self$Dockerfile,add_onbuild(cmd))
},
STOPSIGNAL = function(signal){
self$Dockerfile <- c(self$Dockerfile,add_stopsignal(signal))
},
HEALTHCHECK = function(check){
self$Dockerfile <- c(self$Dockerfile,add_healthcheck(check))
},
SHELL = function(shell){
self$Dockerfile <- c(self$Dockerfile,add_shell(shell))
},
MAINTAINER = function(name, email){
self$Dockerfile <- c(self$Dockerfile,add_maintainer(name, email))
},
print = function(){
cat(self$Dockerfile, sep = '\n')
},
write = function(as = "Dockerfile"){
base::write(self$Dockerfile, file = as)
},
switch_cmd = function(a,b){
self$Dockerfile <- switch_them(self$Dockerfile, a, b)
},
remove_cmd = function(where){
self$Dockerfile <- remove_from(self$Dockerfile, where)
}
))
|
033076a2d38f46b3d4faee121caafada4578a0d6 | 9d587824ee032d24c700fe0514c382de04e2bf01 | /Pexp.R | 2cab6a10cde493d2756f50c0296d42c72c8dc1dc | [] | no_license | Kishor-Kc/ProjectK | 518ffad9fe67a7ab4e1f763191ff677bb6dae004 | 0c80b31bdb7f9ad1948a97bfa9e8c5f5700b3398 | refs/heads/master | 2021-06-28T03:55:51.119083 | 2021-03-16T16:09:44 | 2021-03-16T16:09:44 | 218,608,169 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,996 | r | Pexp.R | library(Rmisc)
library(httr)
library(RCurl)
library(ggplot2)
library(dplyr)
library(Hmisc)
library(data.table)
library(tidyverse)
library(Rmisc)
library(RCurl)
library(reshape2)
library(ggpubr)
library(naniar)
#P3
P3_complete <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_Weight%26Oocyst_complete.csv"
P3_complete <- read.csv(text = getURL(P3_complete))
P3_complete$X <- NULL
P3_a <-select(P3_complete, EH_ID, labels, OPG, wloss, dpi, batch, primary)
P3_a <-na.omit(P3_a)
P3_b <- select(P3_complete, EH_ID, labels, OPG, wloss, dpi, batch, challenge, infHistory)
P3_b <- na.omit(P3_b)
# for gene expression
P3_design <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experimental_design/P3_112019_Eim_design.csv"
P3_design <- read.csv(text = getURL(P3_design))
P3_gene <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_CEWE_RTqPCR.csv"
P3_gene <- read.csv(text = getURL(P3_gene))
P3_gene$X <-NULL
P3_Delta <-"https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_CEWE_qPCR.csv"
P3_Delta <- read.csv(text = getURL(P3_Delta))
P3_Delta$X <- NULL
P3_DeltaX <- merge(P3_Delta, P3_design)
P3_Comp <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_COMPLETE.csv"
P3_Comp <- read.csv(text = getURL(P3_Comp))
P3_Comp$X <- NULL
P3_geneX <- select(P3_Comp,"CXCR3", "IL.12", "IRG6", "EH_ID")
P3_geneX <- na.omit(P3_geneX)
Comp1 <- select(P3_Comp,Eim_MC,EH_ID)
Comp1 <- na.omit(Comp1)
Comp2 <- merge(P3_DeltaX, Comp1)
Comp2 <- distinct(Comp2)
compX <- merge(P3_gene, P3_Delta, by = "EH_ID")
#boxplot for gene expression
complete <- merge(compX, Comp1)
Comp.long <- reshape(complete,
direction = "long",
varying = list(names(complete)[2:4]),
v.names = "NE",
times = c("CXCR3", "IRG6", "IL.12"),
timevar = "Target",
idvar = "EH_ID")
P3_GE <- select(P3_Comp, EH_ID, challenge)
P3_GE <-na.omit(P3_GE)
P3_GET <- merge(P3_GE, Comp.long)
ggplot(P3_GET, aes(x = challenge, y = NE, color = challenge)) +
geom_violin() +
geom_jitter() +
facet_wrap(~Target) +
ggtitle("P3 gene expression") + xlab("Eimeria") + ylab("normalised gene expression")
OPGx <- select(P3_Comp,"OPG","dpi","challenge", "EH_ID")
OPGx <- na.omit(OPGx)
OPG1 <- select(P3_Comp, "OPG", "dpi", "primary", "EH_ID")
OPG1 <- na.omit(OPG1)
# fix problem with 0340,0335
LM_0340 <- subset(P3_b, P3_b$EH_ID == "LM_0340")
LM_O335 <- subset(P3_b, P3_b$EH_ID == "LM_0335")
ggplot(P3_b, aes(x = dpi, y = OPG, group = EH_ID, col = challenge)) +
geom_smooth()+
geom_line()+
theme_bw()+
facet_grid(~challenge)
ggplot(P3_a, aes(x = dpi, y = OPG, group = EH_ID, col = primary)) +
geom_smooth()+
geom_line()+
theme_bw()+
facet_grid(~primary)
ggplot(P3_Comp, aes(x = dpi, y = OPG, group = EH_ID, col = infHistory)) +
geom_point()+
geom_line()+
theme_bw()+
facet_wrap(~infHistory)
# for p4 experiment
P4_complete <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P4_082020_Eim_COMPLETE.csv"
P4_complete <- read.csv(text = getURL(P4_complete))
P4_1 <- select(P4_complete, label, dpi, infHistory, wloss, primary, challenge, OPG)
P4_2 <- select(P4_1, label, primary)
P4_2 <- na.omit(P4_2)
P4_3 <- select(P4_1, label, OPG)
P4_3 <- na.omit(P4_3)
P4_4 <- select(P4_1,label, dpi, wloss)
P4_5 <- merge(P4_2, P4_3)
P4_6 <- merge(P4_5, P4_4)
#plot OOcyst and weightloss for P4a
ggplot(P4_6, aes(x = dpi, y = OPG, color = primary)) +
geom_point()+
theme_bw()+
facet_grid(~primary)
ggplot(P4_6, aes(x = dpi, y = wloss, col = primary)) +
geom_smooth()+
geom_line()+
theme_bw()+
facet_grid(~primary)
# plot OOcyst and weightloss for P4b
P4_7 <- select(P4_1, label, challenge)
P4_7 <- na.omit(P4_7)
P4_8 <- select(P4_1, label, OPG)
P4_8 <- na.omit(P4_8)
P4_9 <- select(P4_1,label, dpi, wloss)
P4_10 <- merge(P4_7, P4_8)
P4_11 <- merge(P4_10, P4_9)
ggplot(P4_11, aes(x = dpi, y = OPG, color = challenge)) +
geom_point()+
theme_bw()+
facet_grid(~challenge)
ggplot(P4_11, aes(x = dpi, y = wloss, col = challenge)) +
geom_smooth()+
geom_line()+
theme_bw()+
facet_grid(~challenge)
#add infHis
P4_12 <- select(P4_1, label, infHistory, challenge)
P4_12 <- na.omit(P4_12)
P4_13 <- merge(P4_11, P4_12)
ggplot(P4_13, aes(x = dpi, y = OPG, col = infHistory)) +
geom_point()+
geom_line()+
theme_bw()+
facet_wrap(~infHistory)
# let make P3 + P4 together
# for primary wloss
P3_w1 <- select(P3_Comp,label, EH_ID, dpi, wloss,primary)
P3_w1 <- na.omit(P3_w1)
P4_w1 <- select(P4_complete,label, EH_ID, dpi, wloss, primary)
P4_w1 <- na.omit(P4_w1)
Pr_wloss<- rbind(P3_w1, P4_w1)
Pr_wloss <- Pr_wloss[-c(195),]
ggplot(Pr_wloss, aes(x = dpi, y = wloss, group = primary, col = primary)) +
geom_smooth(se = FALSE)+
geom_jitter()+
theme_bw() +
ggtitle("Weightloss during primary infection ") + ylab("Weightloss in percentage") + xlab("Day post infection (dpi)") +
scale_color_manual(breaks = c("E139", "E64", "E88","Eflab","UNI"),
values=c("red", "blue", "darkgreen","orange","violet"))
ggplot(Pr_wloss, aes(x = dpi, y = wloss, group = primary, col = primary)) +
geom_smooth(se = FALSE)+
geom_jitter()+
theme_bw() +
facet_grid(~primary) +
ggtitle("Weightloss during primary infection ") + ylab("Weightloss in percentage") + xlab("Day post infection (dpi)")
# sec wloss
P3_w2 <- select(P3_Comp,label, EH_ID, dpi, wloss,challenge)
P3_w2 <- na.omit(P3_w2)
P4_w2 <- select(P4_complete,label, EH_ID, dpi, wloss, challenge)
P4_w2 <- na.omit(P4_w2)
Ch_wloss <- rbind(P3_w2, P4_w2)
Ch_wloss <- distinct(Ch_wloss)
ggplot(Ch_wloss, aes(x = dpi, y = wloss, group = challenge, col = challenge )) +
geom_smooth(se = FALSE)+
geom_jitter()+
theme_bw() +
ggtitle("Weightloss during secondary infection ") + ylab("Weightloss in percentage") + xlab("Day post infection (dpi)") +
scale_color_manual(breaks = c("E64", "E88", "UNI"),
values=c("blue", "darkgreen","violet"))
ggplot(Ch_wloss, aes(x = dpi, y = wloss, group = challenge, col = challenge)) +
geom_smooth()+
geom_jitter()+
facet_grid(~challenge)+
ggtitle("Weightloss during secondary infection ") + ylab("Weightloss in percentage") + xlab("Day post infection (dpi)") +
scale_color_manual(breaks = c("E64", "E88", "UNI"),
values=c("blue", "darkgreen","violet"))
P4_d1 <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experimental_design/P3_112019_Eim_design.csv"
P4_d1 <- read.csv(text = getURL(P4_d1))
P4_d2 <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experimental_design/P4a_082020_Eim_design.csv"
P4_d2 <- read.csv(text = getURL(P4_d2))
P4_d11 <- select(P4_d1, EH_ID, primary)
P4_d21 <- select(P4_d2, EH_ID, primary)
P4_D <- rbind(P4_d11, P4_d21)
P4_WW <- merge(Ch_wloss, P4_D)
ggplot(P4_WW, aes(x = dpi, y = wloss, group = EH_ID, col = primary)) +
geom_point()+
geom_line()+
theme_bw()+
facet_grid(~challenge)
ggplot(P4_WW, aes(x = dpi, y = wloss, group = challenge, col = challenge)) +
geom_smooth(se = FALSE)+
geom_jitter()+
theme_bw()+
facet_grid(~primary) +
ggtitle("Weightloss during secondary infection ") + ylab("Weightloss in percentage") + xlab("Day post infection (dpi)")+
scale_color_manual(breaks = c("E64", "E88", "UNI"),
values=c("blue", "darkgreen","violet"))
# Ococyst plot
#for primary
P4_O1 <- select(P4_complete,OPG, dpi, primary, EH_ID)
P4_O1 <- na.omit(P4_O1)
P4_O1$batch <- c("P4a")
P3_O1 <- select(P3_a, OPG, dpi, primary, EH_ID)
P3_O1$batch <- c("P3a")
OPG_a <- rbind(P4_O1, P3_O1)
P3_O2 <- select(P3_b, OPG, dpi, challenge, EH_ID)
P3_O2$batch <- c("P3b")
P4_O2 <- select(P4_complete,OPG, dpi, challenge, EH_ID)
P4_O2 <- na.omit(P4_O2)
P4_O2$batch <- c("P4b")
Pr_OPG <- rbind(P4_O1, P3_O1)
Ch_OPG <- rbind(P4_O2, P3_O2)
Inf_His1 <- select(P3_Comp, EH_ID, challenge, infHistory)
Inf_His1 <- na.omit(Inf_His1)
P3_OPGIH <- merge(Inf_His1,P3_O2)
Inf_His2 <- select(P4_complete, EH_ID, challenge, infHistory)
Inf_His2 <- na.omit(Inf_His2)
P4_OPGIH <- merge(Inf_His2,P4_O2)
P_OPGIH <- rbind(P3_OPGIH, P4_OPGIH)
P_OPGIH <-na.omit(P_OPGIH)
cols <- c("8" = "red", "4" = "blue", "6" = "darkgreen", "10" = "orange","12"="pink")
ggplot(Pr_OPG, aes(x = dpi, y = OPG, col = primary)) +
geom_smooth()+
geom_point(size=2)+
theme_bw()+
facet_grid(~primary) +
ylim(0,5000000) +
ggtitle("Oocyst shedding for primary infection ") + ylab("Oocysts per gram (OPG)") + xlab("Day post infection (dpi)") +
scale_color_manual(breaks = c("E139", "E64", "E88","Eflab","UNI"),
values=c("red", "blue", "darkgreen","orange","violet"))
ggplot(P_OPGIH, aes(x = dpi, y = OPG, col = challenge)) +
geom_smooth()+
geom_point(size=2)+
theme_bw()+
facet_grid(~challenge) +
ylim(0,3000000) +
ggtitle("Oocyst shedding for secondary infection ") + ylab("Oocysts per gram (OPG)") + xlab("Day post infection (dpi)")+
scale_color_manual(breaks = c("E64", "E88", "UNI"),
values=c("blue", "darkgreen","violet"))
ggplot(P_OPGIH, aes(x = dpi, y = OPG, col = challenge)) +
geom_smooth()+
geom_point(size=2)+
theme_bw()+
facet_grid(~batch) +
ylim(0,3000000) +
ggtitle("Oocyst shedding for secondary infection ") + ylab("Oocysts per gram (OPG)") + xlab("Day post infection (dpi)") +
scale_color_manual(breaks = c("E64", "E88", "UNI"),
values=c("blue", "darkgreen","violet"))
ggplot(P_OPGIH, aes(x = dpi, y = OPG, group = challenge, col = batch)) +
geom_point()+
geom_line()+
theme_bw()+
facet_wrap(~infHistory) +
ggtitle("Oocyst shedding regarding infection history") + ylab("Oocysts per gram (OPG)") + xlab("Day post infection (dpi)")
#wlos for sec
P_wlossCH <- merge(P_OPGIH,Ch_wloss)
ggplot(P_wlossCH, aes(x = dpi, y = wloss, group = challenge, col = challenge)) +
geom_smooth()+
geom_jitter()+
facet_grid(~batch)+
ggtitle("Weightloss during secondary infection ") + ylab("Weightloss in percentage") + xlab("Day post infection (dpi)") +
scale_color_manual(breaks = c("E64", "E88", "UNI"),
values=c("blue", "darkgreen","violet"))
# Delta Eim_MC
P4_Delta <- select( P4_complete, EH_ID, delta, Eim_MC, infHistory,challenge)
P4_Delta <- na.omit(P4_Delta)
P4_Delta <- distinct(P4_Delta)
P4_Delta$Eim_MC[P4_Delta$Eim_MC == "TRUE"] <- "infected"
P4_Delta$Eim_MC[P4_Delta$Eim_MC == "FALSE"] <- "uninfected"
P4_Delta$Eim_MC <- as.character(P4_Delta$Eim_MC)
P3_Delta <- select(P3_Comp, EH_ID, delta, Eim_MC, infHistory,challenge)
P3_Delta <- na.omit(P3_Delta)
P3_Delta$Eim_MC <- as.character(P3_Delta$Eim_MC)
P3_Delta$Eim_MC[P3_Delta$Eim_MC == "pos"] <- "infected"
P3_Delta$Eim_MC[P3_Delta$Eim_MC == "neg"] <- "uninfected"
P_Delta <- rbind(P3_Delta, P4_Delta)
P_Delta <- distinct(P_Delta)
# change LM_0354 uninfected to infected
LM_0354 <- subset(P_Delta, P_Delta$EH_ID == "LM_0354")
LM_0354$Eim_MC[LM_0354$Eim_MC == "uninfected"] <- "infected"
LM337_D <- subset(P_Delta, P_Delta$EH_ID == "LM_0337")
LM337_D$Eim_MC[LM337_D$Eim_MC == "infected"] <- "uninfected"
LM346_D <- subset(P_Delta, P_Delta$EH_ID == "LM_0346")
LM346_D$Eim_MC[LM346_D$Eim_MC == "infected"] <- "uninfected"
P_Delta <- P_Delta[-c(9),]
P_Delta1 <- P_Delta[-c(13),]
P_Delta2 <- P_Delta1[-c(19),]
P_Deltax <- rbind(P_Delta2, LM_0354, LM337_D, LM346_D)
ggplot(subset(P_Deltax,!is.na(P_Deltax$delta)), aes(x = infHistory, y = delta, color = Eim_MC)) +
geom_point()+
theme_bw()+
facet_grid(~challenge)
ggplot(P_Deltax, aes(x = Eim_MC, y = delta, color = challenge)) +
geom_violin()+
geom_smooth() +
geom_jitter()+
theme_bw()
ggplot(P_Deltax, aes(x = Eim_MC, y = delta, color = challenge)) +
geom_smooth() +
geom_jitter()+
ggtitle("Distribution of infection intensity") + xlab("Infection presence") + ylab("Infection intensity") +
scale_color_manual(breaks = c("E64", "E88", "UNI"),
values=c("blue", "darkgreen","violet"))
#try for the average intensity
AVG <- subset(P_Deltax, P_Deltax$challenge== "E64")
AVG_64 <- mean(AVG[,"delta"], na.rm = TRUE)
AVG1 <- subset(P_Deltax, P_Deltax$challenge== "E88")
AVG1 <- distinct(AVG1)
AVG_88 <- mean(AVG1[,"delta"], na.rm = TRUE)
# INFy ELISA result
P3_IFN <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_CEWE_ELISA.csv"
P3_IFN <- read.csv(text = getURL(P3_IFN))
P3_IFN$X <- NULL
P3_E <- merge(P3_IFN,P_Deltax)
names(P3_E)[names(P3_E) == "IFNy"] <- "IFNy_CEWE"
P3_EE <- select(P3_E, EH_ID, infHistory, delta, challenge, IFNy_CEWE, Eim_MC)
P4_E <- select(P4_complete, EH_ID, IFNy_CEWE)
P4_E <- na.omit(P4_E)
P4_EE <- merge(P_Deltax, P4_E)
P_E <- rbind(P3_EE, P4_EE)
P_E <- distinct(P_E)
ggplot(P_E, aes(y = IFNy_CEWE, x = delta, color= Eim_MC)) +
geom_smooth(method="lm")+
geom_jitter()+
theme_bw()+
facet_wrap(~Eim_MC) +
stat_cor(aes(label = paste(..rr.label.., ..p.label.., sep = "~`,`~")),label.x = -10, label.y = 750) +
ggtitle("Distribution of IFNγ") + ylab("IFNγ from cecum wall") + xlab("Infection intensity") +
scale_color_manual(breaks = c("INF", "UNI"),
values=c("blue","violet"))
P_Inf <- subset(P_E, P_E$Eim_MC == "infected")
P_Inf <- distinct(P_Inf)
ggplot(P_Inf, aes(x=challenge, y= IFNy_CEWE, color= challenge,main = "Infected" ))+
geom_violin()+
geom_jitter()
ggplot(P_Inf, aes(x=delta, y= IFNy_CEWE, color = challenge))+
geom_smooth()+
geom_jitter() +
facet_wrap(~challenge) +
ggtitle("Presence of IFNγ in infected mice") + ylab("IFNγ from cecum wall") + xlab("Infection intensity")+
scale_color_manual(breaks = c("E64", "E88"),
values=c("blue", "darkgreen"))
|
f49dd5464733c311fc47d3aa57e8bed8fdfe4f02 | fafd26bd65bb0afcc6777ce18726dd8b0a5bb1c8 | /man/Bus_delays.Rd | 34cb02d1b8f45a6383ff06efe2cb44c59d3fc53a | [] | no_license | dtkaplan/SDSdata | 50a3814af475c4793e03ec0200abd5d1cec738e9 | 5f8c9ec18bff5f1a4565cc72f745c4943fd372d9 | refs/heads/master | 2022-06-27T13:33:10.639497 | 2022-06-23T20:26:56 | 2022-06-23T20:26:56 | 133,568,261 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,804 | rd | Bus_delays.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Bus_delays.R
\docType{data}
\name{Bus_delays}
\alias{Bus_delays}
\title{School bus delays in New York City}
\format{
A data.frame object with one row for each of 238,266 bus-delay
incidents in New York City.
\itemize{
\item \code{breakdown_id}: A unique (almost) id number for each incident
\item \code{year} the school year of the incident
\item \code{route_name} the name of the bus route
\item \code{delay_duration} how long (minutes) the delay lasted
\item \code{pre_k} whether the bus was for Pre-kindergarden students. If not, it was for elementary
school students. (Older students ride the city bus in NYC.)
\item \code{reason} an explanation of the cause of the delay
\item \code{boro} which of the jurisdictions of the NYC area the delay occurred in.
\item \code{n_students} how many student passengers were on the bus when the delay occurred.
\item \code{company} the name of the company operating the bus
\item \code{date} the date of the incident
}
}
\source{
The data themselves were scraped from the New York City OpenData site,
\url{https://data.cityofnewyork.us/Transportation/Bus-Breakdown-and-Delays/ez4e-fazm} on Dec. 13, 2018.
The data was brought to the author's attention on a blog written by Nils Ross \url{https://datascienceplus.com/nyc-bus-delays/}. Much
of the code used to clean the data was copied from that blog post.
}
\usage{
data(Bus_delays)
}
\description{
School bus delays in New York City
}
\details{
The data have been lightly cleaned from their original form as a CSV file. 305 rows from the original
data were removed: those with a number of passengers greater than 48. (These were presumably
data-entry errors since a common legal limit for bus passengers is 48.)
}
\keyword{datasets}
|
9e0a61fe0a509f8504eeaea54cf075dbe95460c5 | c6f9ccacec1e33bd5b132469ea65f7eae8ea1956 | /man/subsampleDetections.Rd | 85fd51aa55b66de82402ef0e080e29a7ab51ad70 | [] | no_license | EricArcher/banter | a2301d738c0ad2572cace2acab16f3058b8f54fb | fa97b2d7ceec5aba51fc9fcce5e9b53a75d352d6 | refs/heads/master | 2023-03-21T07:39:56.132570 | 2023-03-14T00:29:58 | 2023-03-14T00:29:58 | 127,483,576 | 7 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,731 | rd | subsampleDetections.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subsampleDetections.R
\name{subsampleDetections}
\alias{subsampleDetections}
\title{Subsample Detections}
\usage{
subsampleDetections(data, n)
}
\arguments{
\item{data}{a detector data.frame or list of detector data.frames.}
\item{n}{a value giving the number (\code{n} >= 1) or
fraction (\code{n} between 0 and 1) of detections per event per detector
to select. Detections are randomly selected without replacement.
If \code{n} is greater than the number of detections in an event, all
detections for that event will be retained.}
}
\value{
a detector data.frame or list of detector data.frames with
no more than \code{n} detections per event per detector.
}
\description{
Extract a random subsample of detections for each event and
detector.
}
\examples{
data(train.data)
# initial number of detections per event per detector
sapply(train.data$detectors, function(x) table(x$event.id))
# select half of all detectors
detect.half <- subsampleDetections(train.data$detectors, 0.5)
sapply(detect.half, function(x) table(x$event.id))
# select 20 detections
detect.20 <- subsampleDetections(train.data$detectors, 20)
sapply(detect.20, function(x) table(x$event.id))
# select 10 detections fro 'ec' detector
ec.10 <- subsampleDetections(train.data$detectors$ec, 10)
table(ec.10$event.id)
}
\references{
Rankin, S., Archer, F., Keating, J. L., Oswald, J. N.,
Oswald, M. , Curtis, A. and Barlow, J. (2017), Acoustic classification
of dolphins in the California Current using whistles, echolocation clicks,
and burst pulses. Marine Mammal Science 33:520-540. doi:10.1111/mms.12381
}
\author{
Eric Archer \email{eric.archer@noaa.gov}
}
|
d0e1a042890b9495844e8b2b5d63415e5dec69ef | 265cd044ba3011947844692e0d2d6d071735a64e | /EDMhelper/R/plot_smap.R | 4bf1a8dea4f0326b84be784bb51bd3bc41534d0f | [] | no_license | canankarakoc/r_package_EDMhelper | 82656008887af66af7dd5ada5d58b96cf6e7aba0 | 188d2863130a70068b86fcad01cec23792ba2a78 | refs/heads/master | 2022-04-08T04:42:57.249681 | 2020-03-02T20:05:46 | 2020-03-02T20:05:46 | 183,004,497 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,570 | r | plot_smap.R | #' plot_smap
#'
#' Plot prediction skill vs. theta for the results of an s-map.
#' @param smap_out Output from s-map function (or a list of outputs).
#' @param predtype Type of prediction metric to use. Can be "rho", "rmse", or "mae".
#' @param pname Optional name for the plot.
#' @keywords rEDM, s-map
#' @return A ggplot object.
#' @import ggplot2
#' @export
plot_smap<-function(smap_out, predtype="rho", pname=NA) {
if(sum(grep("const_p_val", names(smap_out)))==0) {
dm<-dim(smap_out[[1]])
lng<-length(smap_out)
if(!is.na(pname[1])) {
nms<-pname
} else {
nms<-names(smap_out)
}
smap_out<-do.call("rbind", smap_out)
rownames(smap_out)<-NULL
smap_out$variable<-rep(nms, each=dm[1])
} else {
smap_out$variable<-pname
}
variable<-smap_out$variable
theta<-smap_out$theta
nice_theme <-theme_bw()+
theme(axis.text= element_text(size=12),
axis.title = element_text(size=14,face="bold"),
legend.text = element_text(size=12),
legend.title = element_text(size=12,face="bold"),
plot.title = element_text(size=14, face="bold"),
strip.text = element_text(size=14, face="bold"),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
predtype<-smap_out[,predtype]
plot_ccm <- ggplot(smap_out, aes(x=theta, y=predtype))+
geom_line(color="darkred")+
facet_wrap(~variable)+
labs(y="Prediction skill", x="Nonlinearity, Theta")+
nice_theme
return(plot_ccm)
}
|
c442df5b6b8ef64b4c7e68a9cddab231ae5a8af3 | 883a4a0c1eae84485e1d38e1635fcae6ecca1772 | /nCompiler/tests/testthat/test-tensorOperations_accessors.R | 2ee7e7b8aa0e297ca8459e0bb4895586892662df | [
"BSD-3-Clause"
] | permissive | nimble-dev/nCompiler | 6d3a64d55d1ee3df07775e156064bb9b3d2e7df2 | 392aabaf28806827c7aa7b0b47f535456878bd69 | refs/heads/master | 2022-10-28T13:58:45.873095 | 2022-10-05T20:14:58 | 2022-10-05T20:14:58 | 174,240,931 | 56 | 7 | BSD-3-Clause | 2022-05-07T00:25:21 | 2019-03-07T00:15:35 | R | UTF-8 | R | false | false | 11,877 | r | test-tensorOperations_accessors.R | context("tensorOperations: special read/write operators")
library(nCompiler)
library(testthat)
library(Matrix)
#
# test the ability to read/write using nCompiler C++ implementations of diag(),
# and related accessor functions
#
#
# generate test data
#
set.seed(2022)
# diagonal matrix creation data
nr <- 10
nc <- 3
xv <- runif(n = min(nr, nc))
xv_nr <- runif(n = nr)
# random dense matrices
X <- matrix(runif(n = 100), nrow = nr)
Y <- matrix(runif(n = 100), nrow = nr)
# random sparse matrix
Xsp <- X
Xsp[sample(x = length(Xsp), size = .9 * length(Xsp))] <- 0
Xsp <- Matrix(Xsp, sparse = TRUE)
diag(Xsp)[sample(x = nrow(Xsp), size = .1 * nrow(Xsp))] <- 3
#
# diag as a creation operator
#
# Documenting many of R's behaviors for diag(). There are a few other cases
# where x may be either a scalar or a vector, expanding upon the ideas
# documented here.
#
# FN. ARGS. NROW NCOL DIAG EIGENIZED
#
# (x, nrow, ncol) nrow ncol x (x, nrow, ncol)
# (nrow, ncol) nrow ncol 1 (1, nrow, ncol)
# (x) x x 1 (1, x, x)
# (x) length(x) length(x) x (x, length(x), length(x))
# (x, nrow) nrow nrow x (x, nrow, nrow)
# (x, ncol) 1 ncol x (x, 1, ncol)
# (nrow) nrow nrow 1 (1, nrow, nrow)
# (ncol) --- --- - Error, stop processing!
diagXRC <- function(x, nrow, ncol) {
ans <- diag(x = x, nrow = nrow, ncol = ncol)
return(ans)
}
diagXR <- function(x, nrow) {
ans <- diag(x = x, nrow = nrow)
return(ans)
}
diagXC <- function(x, ncol) {
ans <- diag(x = x, ncol = ncol)
return(ans)
}
diagRC <- function(nrow, ncol) {
ans <- diag(nrow = nrow, ncol = ncol)
return(ans)
}
diagX <- function(x) {
ans <- diag(x = x)
return(ans)
}
diagR <- function(nrow) {
ans <- diag(nrow = nrow)
return(ans)
}
diagC <- function(ncol) {
ans <- diag(ncol = ncol)
return(ans)
}
nDiagXRCv <- nFunction(
fun = diagXRC,
argTypes = list(x = 'numericVector', nrow = 'integer', ncol = 'integer'),
returnType = 'nMatrix'
)
nDiagXRC <- nFunction(
fun = diagXRC,
argTypes = list(x = 'double', nrow = 'integer', ncol = 'integer'),
returnType = 'nMatrix'
)
nDiagXRv <- nFunction(
fun = diagXR,
argTypes = list(x = 'numericVector', nrow = 'integer'),
returnType = 'nMatrix'
)
nDiagXR <- nFunction(
fun = diagXR,
argTypes = list(x = 'double', nrow = 'integer'),
returnType = 'nMatrix'
)
nDiagXCv <- nFunction(
fun = diagXC,
argTypes = list(x = 'numericVector', ncol = 'integer'),
returnType = 'nMatrix'
)
nDiagXC <- nFunction(
fun = diagXC,
argTypes = list(x = 'double', ncol = 'integer'),
returnType = 'nMatrix'
)
nDiagRC <- nFunction(
fun = diagRC,
argTypes = list(nrow = 'integer', ncol = 'integer'),
returnType = 'nMatrix'
)
nDiagXv <- nFunction(
fun = diagX,
argTypes = list(x = 'numericVector'),
returnType = 'nMatrix'
)
nDiagX <- nFunction(
fun = diagX,
argTypes = list(x = 'integer'),
returnType = 'nMatrix'
)
nDiagR <- nFunction(
fun = diagR,
argTypes = list(nrow = 'integer'),
returnType = 'nMatrix'
)
nDiagC <- nFunction(
fun = diagC,
argTypes = list(ncol = 'integer'),
returnType = 'nMatrix'
)
cDiagXRCv <- nCompile(nDiagXRCv)
cDiagXRC <- nCompile(nDiagXRC)
cDiagXRv <- nCompile(nDiagXRv)
cDiagXR <- nCompile(nDiagXR)
cDiagXCv <- nCompile(nDiagXCv)
cDiagXC <- nCompile(nDiagXC)
cDiagRC <- nCompile(nDiagRC)
cDiagXv <- nCompile(nDiagXv)
cDiagX <- nCompile(nDiagX)
cDiagR <- nCompile(nDiagR)
#
# creation usage tests
#
expect_error(nCompile(nDiagC)) # don't support behavior R doesn't support
expect_identical(diag(x = xv, nrow = nr, ncol = nc),
cDiagXRCv(x = xv, nrow = nr, ncol = nc))
expect_error(cDiagXRCv(x = 4, nrow = nr, ncol = nc))
expect_error(cDiagXRCv(x = runif(nr), nrow = nr, ncol = nc))
expect_identical(cDiagXRC(x = 3, nrow = nr, ncol = nc),
diag(x = 3, nrow = nr, ncol = nc))
expect_identical(cDiagXRv(x = xv_nr, nrow = nr), diag(x = xv_nr, nrow = nr))
expect_error(cDiagXRv(x = xv, nrow = nr))
expect_identical(cDiagXR(x = 3, nrow = nr), diag(x = 3, nrow = nr))
expect_identical(cDiagXCv(x = 3, ncol = nc), diag(x = 3, ncol = nc))
expect_identical(cDiagXC(x = 3, ncol = nc), diag(x = 3, ncol = nc))
expect_identical(cDiagRC(nrow = nr, ncol = nc), diag(nrow = nr, ncol = nc))
expect_identical(cDiagXv(x = xv), diag(x = xv))
expect_identical(cDiagX(x = 3), diag(x = 3))
expect_identical(cDiagR(nrow = nr), diag(nrow = nr))
#
# sparse creation tests
#
diagXRC <- function(x, nrow, ncol) {
ans <- Diagonal(x = x, nrow = nrow, ncol = ncol)
return(ans)
}
diagXR <- function(x, nrow) {
ans <- Diagonal(x = x, nrow = nrow)
return(ans)
}
diagXC <- function(x, ncol) {
ans <- Diagonal(x = x, ncol = ncol)
return(ans)
}
diagRC <- function(nrow, ncol) {
ans <- Diagonal(nrow = nrow, ncol = ncol)
return(ans)
}
diagX <- function(x) {
ans <- Diagonal(x = x)
return(ans)
}
diagR <- function(nrow) {
ans <- Diagonal(nrow = nrow)
return(ans)
}
diagC <- function(ncol) {
ans <- Diagonal(ncol = ncol)
return(ans)
}
nSpDiagXRCv <- nFunction(
fun = diagXRC,
argTypes = list(x = 'numericVector', nrow = 'integer', ncol = 'integer'),
returnType = 'nSparseMatrix'
)
nSpDiagXRC <- nFunction(
fun = diagXRC,
argTypes = list(x = 'double', nrow = 'integer', ncol = 'integer'),
returnType = 'nSparseMatrix'
)
nSpDiagXRv <- nFunction(
fun = diagXR,
argTypes = list(x = 'numericVector', nrow = 'integer'),
returnType = 'nSparseMatrix'
)
nSpDiagXR <- nFunction(
fun = diagXR,
argTypes = list(x = 'double', nrow = 'integer'),
returnType = 'nSparseMatrix'
)
nSpDiagXCv <- nFunction(
fun = diagXC,
argTypes = list(x = 'numericVector', ncol = 'integer'),
returnType = 'nSparseMatrix'
)
nSpDiagXC <- nFunction(
fun = diagXC,
argTypes = list(x = 'double', ncol = 'integer'),
returnType = 'nSparseMatrix'
)
nSpDiagRC <- nFunction(
fun = diagRC,
argTypes = list(nrow = 'integer', ncol = 'integer'),
returnType = 'nSparseMatrix'
)
nSpDiagXv <- nFunction(
fun = diagX,
argTypes = list(x = 'numericVector'),
returnType = 'nSparseMatrix'
)
nSpDiagX <- nFunction(
fun = diagX,
argTypes = list(x = 'integer'),
returnType = 'nSparseMatrix'
)
nSpDiagR <- nFunction(
fun = diagR,
argTypes = list(nrow = 'integer'),
returnType = 'nSparseMatrix'
)
nSpDiagC <- nFunction(
fun = diagC,
argTypes = list(ncol = 'integer'),
returnType = 'nSparseMatrix'
)
cSpDiagXRCv <- nCompile(nSpDiagXRCv)
cSpDiagXRC <- nCompile(nSpDiagXRC)
cSpDiagXRv <- nCompile(nSpDiagXRv)
cSpDiagXR <- nCompile(nSpDiagXR)
cSpDiagXCv <- nCompile(nSpDiagXCv)
cSpDiagXC <- nCompile(nSpDiagXC)
cSpDiagRC <- nCompile(nSpDiagRC)
cSpDiagXv <- nCompile(nSpDiagXv)
cSpDiagX <- nCompile(nSpDiagX)
cSpDiagR <- nCompile(nSpDiagR)
expect_error(nCompile(nSpDiagC)) # don't support behavior R doesn't support
expect_identical(as(diag(x = xv, nrow = nr, ncol = nc), 'dgCMatrix'),
cSpDiagXRCv(x = xv, nrow = nr, ncol = nc))
expect_error(cSpDiagXRCv(x = 4, nrow = nr, ncol = nc))
expect_error(cSpDiagXRCv(x = runif(nr), nrow = nr, ncol = nc))
expect_identical(cSpDiagXRC(x = 3, nrow = nr, ncol = nc),
as(diag(x = 3, nrow = nr, ncol = nc), 'dgCMatrix'))
expect_identical(cSpDiagXRv(x = xv_nr, nrow = nr),
as(diag(x = xv_nr, nrow = nr), 'dgCMatrix'))
expect_error(cSpDiagXRv(x = xv, nrow = nr))
expect_identical(cSpDiagXR(x = 3, nrow = nr),
as(diag(x = 3, nrow = nr), 'dgCMatrix'))
expect_identical(cSpDiagXCv(x = 3, ncol = nc),
as(diag(x = 3, ncol = nc), 'dgCMatrix'))
expect_identical(cSpDiagXC(x = 3, ncol = nc),
as(diag(x = 3, ncol = nc), 'dgCMatrix'))
expect_identical(cSpDiagRC(nrow = nr, ncol = nc),
as(diag(nrow = nr, ncol = nc), 'dgCMatrix'))
expect_identical(cSpDiagXv(x = xv),
as(diag(x = xv), 'dgCMatrix'))
expect_identical(cSpDiagX(x = 3),
as(diag(x = 3), 'dgCMatrix'))
expect_identical(cSpDiagR(nrow = nr),
as(diag(nrow = nr), 'dgCMatrix'))
#
# diag as accessor operator
#
diagExprAccessor <- function(x, y) {
ans <- diag(x + y)
return(ans)
}
diagAccessor <- function(x) {
ans <- diag(x)
return(ans)
}
nDiagAccessor <- nFunction(
fun = diagAccessor,
argTypes = list(x = 'nMatrix'),
returnType = 'numericVector'
)
nDiagExprAccessor <- nFunction(
fun = diagExprAccessor,
argTypes = list(x = 'nMatrix', y = 'nMatrix'),
returnType = 'numericVector'
)
nDiagAccessorSp <- nFunction(
fun = diagAccessor,
argTypes = list(x = 'nSparseMatrix'),
returnType = 'numericVector'
)
cDiagAccessor <- nCompile(nDiagAccessor)
cDiagAccessorSp <- nCompile(nDiagAccessorSp)
cDiagExprAccessor <- nCompile(nDiagExprAccessor)
# dense accessor
expect_identical(as.numeric(cDiagAccessor(X)), diag(X))
# dense accessor of tensor expression objects
expect_identical(as.numeric(cDiagExprAccessor(X, Y)), diag(X + Y))
# sparse accessor
expect_identical(as.numeric(cDiagAccessorSp(Xsp)), diag(Xsp))
#
# diag as assignment operator
#
diagExprAssignment <- function(x, y, z) {
diag(x) <- y + z
return(x)
}
diagAssignment <- function(x, y) {
diag(x) <- y
return(x)
}
nDiagExprAssignment <- nFunction(
fun = diagExprAssignment,
argTypes = list(x = 'nMatrix', y = 'numericVector', z = 'numericVector'),
returnType = 'nMatrix'
)
nSpDiagExprAssignment <- nFunction(
fun = diagExprAssignment,
argTypes = list(x = 'nSparseMatrix', y = 'numericVector', z = 'numericVector'),
returnType = 'nSparseMatrix'
)
nDiagAssignmentv <- nFunction(
fun = diagAssignment,
argTypes = list(x = 'nMatrix', y = 'numericVector'),
returnType = 'nMatrix'
)
nDiagAssignment <- nFunction(
fun = diagAssignment,
argTypes = list(x = 'nMatrix', y = 'double'),
returnType = 'nMatrix'
)
nSpDiagAssignmentv <- nFunction(
fun = diagAssignment,
argTypes = list(x = 'nSparseMatrix', y = 'numericVector'),
returnType = 'nSparseMatrix'
)
nSpDiagAssignment <- nFunction(
fun = diagAssignment,
argTypes = list(x = 'nSparseMatrix', y = 'double'),
returnType = 'nSparseMatrix'
)
cDiagExprAssignment <- nCompile(nDiagExprAssignment)
cDiagAssignmentv <- nCompile(nDiagAssignmentv)
cDiagAssignment <- nCompile(nDiagAssignment)
cSpDiagAssignmentv <- nCompile(nSpDiagAssignmentv)
cSpDiagAssignment <- nCompile(nSpDiagAssignment)
cSpDiagExprAssignment <- nCompile(nSpDiagExprAssignment)
# dense assignment via an expression
X1 <- X
X2 <- X
diag(X1) <- diag(Y) + diag(X)
expect_identical(X1, cDiagExprAssignment(x = X2, y = diag(Y), z = diag(X)))
# dense assignment to vector
X1 <- X
X2 <- X
diag(X1) <- 1:nrow(X)
expect_identical(X1, cDiagAssignmentv(x = X2, y = 1:nrow(X)))
# dense assignment to constant
X1 <- X
X2 <- X
diag(X1) <- pi
expect_identical(X1, cDiagAssignment(x = X2, y = pi))
# sparse assignment to vector
X1 <- Xsp
X2 <- Xsp
diag(X1) = 1:nrow(X1)
expect_identical(X1, cSpDiagAssignmentv(x = X2, y = 1:nrow(X1)))
# sparse assignment to constant
X1 <- Xsp
X2 <- Xsp
diag(X1) = pi
expect_identical(X1, cSpDiagAssignment(x = X2, y = pi))
# sparse assignment via an expression
X1 <- Xsp
X2 <- Xsp
diag(X1) <- diag(Y) + diag(X)
expect_identical(X1, cSpDiagExprAssignment(x = X2, y = diag(Y), z = diag(X)))
#
# usage in composition
#
d2 <- function(x) {
return(diag(diag(x = x)))
}
d2Sp <- function(x) {
return(diag(Diagonal(x = x)))
}
nD2 <- nFunction(
fun = d2,
argTypes = list(x = 'integer'),
returnType = 'numericVector'
)
nD2Sp <- nFunction(
fun = d2Sp,
argTypes = list(x = 'integer'),
returnType = 'numericVector'
)
cD2 <- nCompile(nD2)
cD2Sp <- nCompile(nD2Sp)
expect_equivalent(cD2(x = 5), rep(1,5))
expect_equivalent(cD2Sp(x = 5), rep(1,5))
|
bae52c7ea499c6f2fff087e052bbb691e306cbb4 | bf48bca6d68872cd14b5f26ada56ed23c056beee | /Project2/Problem 4/ActortoID.R | 7b78f901abd3733718358ecb5c652ea57cf16647 | [] | no_license | coastline1201/ee232e | 7186be918c49f0733628e247e848866ae7ceebed | b49cbad655d570dd41e2b62a4b550ca3d0032a70 | refs/heads/master | 2021-01-10T13:14:52.716399 | 2016-01-22T05:56:41 | 2016-01-22T05:56:41 | 50,161,011 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,266 | r | ActortoID.R | # This scripted is used to make a hash table of Actorname -> ActorID
# The second part is converting the three new movie’s actor name list to actorID
hIDActor = hash()
len = length(hActorID)
for (i in 1:len)
{
if(i%%1000==0)
{
print(i)
}
hIDActor[hActorID[[paste(i)]]] = i
}
hIDActress = hash()
len = length(hActressID)
for (i in 1:len)
{
if(i%%1000==0)
{
print(i)
}
hIDActress[hActressID[[paste(-i)]]] = -i
}
len1 = length(Node1)
Node1Act = vector("numeric",len1)
for (i in 1:len1)
{
if (has.key(Node1[i],hIDActor))
{
Node1Act[i] = hIDActor[[Node1[i]]]
}
if (has.key(Node1[i],hIDActress))
{
Node1Act[i] = hIDActress[[Node1[i]]]
}
}
Node1Act = Node1Act[Node1Act!=0]
len1 = length(Node2)
Node2Act = vector("numeric",len1)
for (i in 1:len1)
{
if (has.key(Node2[i],hIDActor))
{
Node2Act[i] = hIDActor[[Node2[i]]]
}
if (has.key(Node2[i],hIDActress))
{
Node2Act[i] = hIDActress[[Node2[i]]]
}
}
Node2Act = Node2Act[Node2Act!=0]
len1 = length(Node3)
Node3Act = vector("numeric",len1)
for (i in 1:len1)
{
if (has.key(Node3[i],hIDActor))
{
Node3Act[i] = hIDActor[[Node3[i]]]
}
if (has.key(Node3[i],hIDActress))
{
Node3Act[i] = hIDActress[[Node3[i]]]
}
}
Node3Act = Node3Act[Node3Act!=0] |
0ff5140a92d44c1ce72b2742306fd8edc70b8be6 | c983ab9e5564104b93c1301c1d7c153b343a333f | /Chapter 11/Ex11_2_2.R | f082b37e25fb0e45706aec225da7651e262ed658 | [] | no_license | SaksheePhade/R-Programming | e638863451f88d7012091e9a34b4fe72f2822919 | b5b87a79e3dee2bbb2dd91eacb7abbcdbf446489 | refs/heads/master | 2021-03-06T10:37:18.710337 | 2020-11-06T16:25:58 | 2020-11-06T16:25:58 | 246,195,251 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,724 | r | Ex11_2_2.R | # Page no 549
case_no <- c(1:100)
grams <- c(3147 ,2977 ,3119 ,3487 ,4111 ,3572 ,3487 ,3147 ,
3345 ,2665 ,1559 ,3799 ,2750 ,3487 ,3317 ,3544 ,
3459 ,2807 ,3856 ,3260 ,2183 ,3204 ,3005 ,3090 ,
3430 ,3119 ,3912 ,3572 ,3884 ,3090 ,2977 ,3799 ,
4054 ,3430 ,3459 ,3827 ,3147 ,3289 ,3629 ,3657 ,
3175 ,3232 ,3175 ,3657 ,3600 ,3572 ,709 ,624 ,
2778 ,3572 ,3232 ,3317 ,2863 ,3175 ,3317 ,3714 ,
2240 ,3345 ,3119 ,2920 ,3430 ,3232 ,3430 ,4139 ,
3714 ,1446 ,3147 ,2580 ,3374 ,3941 ,2070 ,3345 ,
3600 ,3232 ,3657 ,3487 ,2948 ,2722 ,3771 ,3799 ,
1871 ,3260 ,3969 ,3771 ,3600 ,2693 ,3062 ,2693 ,
3033 ,3856 ,4111 ,3799 ,3147 ,2920 ,4054 ,2296 ,
3402 ,1871 ,4167 ,3402)
weeks <- c(40, 41, 38, 38, 39, 41, 40, 41, 38,34, 34, 38, 38,
40, 38, 43, 45, 37,40, 40, 42, 38, 36, 40, 39, 40, 39,
40, 41, 38, 42, 37, 40, 38, 41, 39, 44, 38, 36, 36,
41, 43, 36, 40, 39, 40, 25, 25, 36, 35, 38, 40, 37,
37, 40, 34, 36, 39, 39, 37, 41, 35, 38, 39, 39, 28,
39, 31, 37, 40, 37, 40, 40, 41, 38, 39, 38, 40, 40,
45, 33, 39, 38, 40, 40, 35, 45, 36, 41, 42, 40, 39,
38, 36, 40, 36, 38, 33, 41, 37)
smoke <- c(rep(0, 8) ,1 ,rep(0, 6),1 ,rep(0, 4),1 ,0 ,0 ,
1 ,rep(0, 9),1 ,0 ,0 ,1 ,0 ,0 ,0 ,1 ,1 ,0 ,1 ,rep(0, 6),
rep (0 ,15) ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 , rep(0 ,10) ,
1, rep (0 ,11) ,1 ,0 ,1)
data <- data.frame(grams, weeks, smoke)
data
Regression.eqn <- lm(grams~weeks+smoke)
Regression.eqn
summary(Regression.eqn)
aov <- aov(Regression.eqn)
aov
summary(aov) |
074af1558943a1c7048a3540a1e679de5783207f | c2f9615579153ab57d4e3c80a12fca36988e60fd | /R-script/chap15_(3) 다중공선성(Multicolinearity).R | b246c79eadea4cff1d4d3c70c8a191addbd1d98e | [] | no_license | Elly-bang/R | a0e78a07d01431dd7b774f27a32006035f7646a0 | 859c87a330b6ec9bba8f881a751318e42029f446 | refs/heads/master | 2022-11-25T13:35:17.854021 | 2020-07-13T01:03:37 | 2020-07-13T01:03:37 | 277,995,594 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,711 | r | chap15_(3) 다중공선성(Multicolinearity).R | ###################################
# 4. 다중공선성(Multicolinearity)
###################################
# - 독립변수 간의 강한 상관관계로 인해서 회귀분석의 결과를 신뢰할 수 없는 현상
# - 생년월일과 나이를 독립변수로 갖는 경우
# - 해결방안 : 강한 상관관계를 갖는 독립변수 제거
# (1) 다중공선성 문제 확인
library(car)
fit <- lm(formula=Sepal.Length ~ Sepal.Width+Petal.Length+Petal.Width, data=iris)
vif(fit)
# Sepal.Width Petal.Length Petal.Width
# 1.270815 15.097572 14.234335
sqrt(vif(fit))>2 # root(VIF)가 2 이상인 것은 다중공선성 문제 의심
# Sepal.Width Petal.Length Petal.Width
# FALSE TRUE TRUE
# (2) iris 변수 간의 상관계수 구하기
cor(iris[,-5]) # 변수간의 상관계수 보기(Species 제외)
#
# Sepal.Length Sepal.Width Petal.Length Petal.Width
# Sepal.Length 1.0000000 -0.1175698 0.8717538 0.8179411
# Sepal.Width -0.1175698 1.0000000 -0.4284401 -0.3661259
# Petal.Length 0.8717538 -0.4284401 1.0000000 0.9628654 (높은 상관관계)
# Petal.Width 0.8179411 -0.3661259 0.9628654 1.0000000
#Petal.Length+Petal.Width
#x변수 들끼 계수값이 높을 수도 있다. -> 해당 변수 제거(모형 수정) <- Petal.Width
# (3) 학습데이터와 검정데이터 분류 : 기계학적 측면
nrow(iris) #150
x <- sample(1:nrow(iris), 0.7*nrow(iris)) # 전체중 70%만 추출
train <- iris[x, ] # 학습데이터 추출
test <- iris[-x, ] # 검정데이터 추출
dim(train) #105 5
dim(test) # 45 5 #모델 검정
# (4) Petal.Width 변수를 제거한 후 회귀분석
result.model<- lm(formula=Sepal.Length ~ Sepal.Width + Petal.Length, data=train)
result.model
# Call:
# lm(formula = Sepal.Length ~ Sepal.Width + Petal.Length, data = train)
#
# Coefficients:
# (Intercept) Sepal.Width Petal.Length
# 2.2739 0.5983 0.4622
library(iris)
iris_model <- lm(formula = Sepal.Length~Sepal.Width+ Petal.Length, data=train )
summary(iris_model)
#(5) model예측치 : test set
y_pred <- predict(iris_model,test)
y_pred
length(y_pred) #45
y_true <- test$Sepal.Length
#(6)model평가 :MSE, cor
# MSE(표준화o)
Error <- y_true - y_pred
mse <- mean(Error**2)
cat('MSE=',mse) #MSE=0.09154192
#상관계수 r :표준화(X)
r <- cor(y_true,y_pred)
cat('r=',r) #r=0.9317594
y_pred[1:10]
#시각화 평가
plot(y_true, col='blue', type='l', label='y true')
points(y_pred, col='red', type='l', label='y pred')
#범례 추가
legend("topleft", legend=c,('y true','y pred'),
col=c('blue','red'),pch='-')
|
c6fa38de210a26157a97fc03ee6f0ca346d815ed | 2bfd239fd6449e13a5ca753e1c0d0f40407bcfaa | /plot2.R | 6c56cd4454c21286a0da371e1697332c089ea8b7 | [] | no_license | heber7/Course-Project-2 | 8d136871ae657da03f9dec33be16863cab1010c7 | daf4d381f07e0fc46166314d430c8df1c7899b21 | refs/heads/master | 2021-01-10T10:39:58.172418 | 2015-05-24T12:30:58 | 2015-05-24T12:30:58 | 36,170,480 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 503 | r | plot2.R |
#File Required:
NEI <- readRDS("summarySCC_PM25.rds")
#-----------------------------------#
## Question 2 - total emmision from PM2.5 Decrrease in Baltimore City from 1999 to 2008
Baltimore <- NEI[NEI$fips == "24510", ]
BaltimoreCity <- tapply(Baltimore$Emissions, Baltimore$year, sum)
plot(BaltimoreCity, type = "o", pch = 18, cex=2, col = "purple", ylab = "Emissions",
xlab = "Year", main = "Emissions: Baltimore City")
dev.copy(png,file="plot2.png",width = 480, height = 480)
dev.off() |
4d4d5e852250bc93b647fe3bed4e178a31c98347 | 67c56336d7d1236fa57700af759fe75e9454ed44 | /util/exechunk.R | c2fff39372bed211d8b3c5332631a15825b4fa25 | [] | no_license | actongender/geam-report | b9fbec753a0fca1a4854d60d0abf00a55d3ca65b | 163f5b88dacf1ead8cb0397f1ad0e795ffbcb1eb | refs/heads/master | 2023-04-28T06:14:36.966608 | 2021-05-21T06:51:58 | 2021-05-21T06:51:58 | 275,156,339 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 757 | r | exechunk.R | #' @title Check several conditions in relation to variable
#'
#' @description Checks for several conditions related to the execution of a knitr chunk of
#' code. Basic check includes if varible exists.
#'
#' @param variable String. Name of the variable of the corresponding code
#' @param data Data frame. Usually df.geam
#'
#' @return logit
#'
exechunk <- function(variable, data=NULL){
if (variable %in% skipsubquestion){
return(FALSE)
}
exec <- TRUE
if (is.null(data) & exists("df.geam") ){
cnames <- names(df.geam)
} else if (!is.null(data)){
cnames <- names(data)
} else {
cnames <- ""
}
exec <- var_exists(variable, cnames, data)
return(exec)
} |
b2bb025ea37d9d6fe4a92174bd1974fb5e8c1575 | 799475c2ac41f6af3cec0ab5b9ab5136356b884a | /example_LHDdata_multi.R | d84fe8b8d2f95893389d46475ad3ba5775f7de4a | [] | no_license | jtkgithub/trendtests | 0f0cf9551140228b88eba7888e1c972aeb6f0a59 | b51c1e2692af509212fca9ee70b6baae80fc3d6b | refs/heads/master | 2020-04-10T07:10:29.540410 | 2018-12-12T22:05:59 | 2018-12-12T22:05:59 | 160,874,332 | 0 | 0 | null | null | null | null | ISO-8859-15 | R | false | false | 3,726 | r | example_LHDdata_multi.R | ### Examples of how to run the multiple process tests
# Clear the workspace
rm(list=ls())
# Load the tests
source("trendtests_multi.R")
## Read in data
# The following data are from:
# Kumar and Klefsjø (1992), Reliability analysis of hydraulic systems of LHD machines
# using the power law process model, Reliability Engineering and System Safety 35: 217-224.
# The data are reported as times between events for machine no 1, 3, 9, 11, 17 and 20.
LHD1_xtimes <- c(327, 125, 7, 6, 107, 277, 54, 332, 510, 110, 10, 9, 85,
27, 59, 16, 8, 34, 21, 152, 158, 44, 18) # Times between events
LHD1_ftimes <- cumsum(LHD1_xtimes) # Times to event
LHD1_tau <- tail(LHD1_ftimes,1) # Use the last event time as censoring time
LHD1_ftimes <- LHD1_ftimes[LHD1_ftimes<LHD1_tau] # Remove the censoring time from the event times
LHD3_xtimes <- c(637, 40, 397, 36, 54, 53, 97, 63, 216, 118, 125, 25, 4,
101, 184, 167, 81, 46, 18, 32, 219, 405, 20, 248, 140)
LHD3_ftimes <- cumsum(LHD3_xtimes)
LHD3_tau <- tail(LHD3_ftimes,1)
LHD3_ftimes <- LHD3_ftimes[LHD3_ftimes<LHD3_tau]
LHD9_xtimes <- c(278, 261, 990, 191, 107, 32, 51, 10, 132, 176, 247, 165, 454, 142,
38, 249, 212, 204, 182, 116, 30, 24, 32, 38, 10, 311, 61)
LHD9_ftimes <- cumsum(LHD9_xtimes)
LHD9_tau <- tail(LHD9_ftimes,1)
LHD9_ftimes <- LHD9_ftimes[LHD9_ftimes<LHD9_tau]
LHD11_xtimes <- c(353, 96, 49, 211, 82, 175, 79, 117, 26, 4, 5, 60, 39, 35, 258,
97, 59, 3, 37, 8, 245, 79, 49, 31, 259, 283, 150, 24)
LHD11_ftimes <- cumsum(LHD11_xtimes)
LHD11_tau <- tail(LHD11_ftimes,1)
LHD11_ftimes <- LHD11_ftimes[LHD11_ftimes<LHD11_tau]
LHD17_xtimes <- c(401, 36, 18, 159, 341, 171, 24, 350, 72, 303, 34, 45, 324, 2, 70, 57,
103, 11, 5, 3, 144, 80, 53, 84, 218, 122)
LHD17_ftimes <- cumsum(LHD17_xtimes)
LHD17_tau <- tail(LHD17_ftimes,1)
LHD17_ftimes <- LHD17_ftimes[LHD17_ftimes<LHD17_tau]
LHD20_xtimes <- c(231, 20, 361, 260, 176, 16, 101, 293, 5, 119, 9, 80, 112, 10,
162, 90, 176, 370, 90, 15, 315, 32, 266)
LHD20_ftimes <- cumsum(LHD20_xtimes)
LHD20_tau <- tail(LHD20_ftimes,1)
LHD20_ftimes <- LHD20_ftimes[LHD20_ftimes<LHD20_tau]
# Number of processes considered
npros <- 6
# Vector of all censoring times
ttauvec <- c(LHD1_tau,LHD3_tau,LHD9_tau,LHD11_tau,LHD17_tau,LHD20_tau)
# List of the data set up in the required format
SBdatalist <- list(tvec=list(LHD1_ftimes,LHD3_ftimes,LHD9_ftimes,LHD11_ftimes,LHD17_ftimes,LHD20_ftimes),
tauvec=ttauvec,m=npros)
## Nelson-Aalen plot of the data
nrisk <- npros:1
taus <- sort(ttauvec)
etimes <- sort(c(LHD1_ftimes,LHD3_ftimes,LHD9_ftimes,LHD11_ftimes,LHD17_ftimes,LHD20_ftimes))
nrisks <- numeric(length(etimes))
for(i in 1:length(etimes))
nrisks[i] <- head(nrisk[taus>etimes[i]],1)
NAest <- cumsum(1/nrisks)
plot(etimes,NAest,type="s",xlab="Time (hours)",ylab="Estimated mean cumulative number",
lwd=2,col="blue", main="Nelson-Aalen plot")
points(etimes,NAest,col="blue")
lines(c(head(etimes,1),tail(etimes,1)),c(0,tail(NAest,1)),lty=3,lwd=1,col="red")
## Run the trend tests
LRtest_multi(SBdatalist,weights="sqrtNCV",sigma="s")
maxtau <- max(ttauvec) # Used to define a in each process in ELR below to correspond to maxtau/2
ELRtest_multi(SBdatalist,weights="sqrtNCV",sigma="s",avec=(1-0.5*ttauvec/maxtau))
CvMtest_multi1(SBdatalist,weights="tau",sigma="s",Npsim=10000)
CvMtest_multi2(SBdatalist,weights="tau",sigma="s")
ADtest_multi1(SBdatalist,weights="tau",sigma="s",Npsim=10000)
ADtest_multi2(SBdatalist,weights="tau",sigma="s")
LinRanktest_multi(SBdatalist)
GLtest_multi(SBdatalist)
|
ba487215c62a94cf72b185eca6e80d4a8c98e9eb | d18d09d4184ac140224078017b9cf7ef8f3ae3c1 | /R/trump_mein_kampf.R | 6f1f0c9794c272f2770f60b18f48737db00484c2 | [
"CC0-1.0"
] | permissive | inh2102/TrumpTweetToMeinKampf | 3839a4a58215a75d2232550a2604553f4d5c6ea3 | a74100d0a4adfd9067cea9bb4b21e332ab4ae376 | refs/heads/master | 2022-11-27T15:58:51.258186 | 2020-07-27T23:45:05 | 2020-07-27T23:45:05 | 282,809,040 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,701 | r | trump_mein_kampf.R | ### Just how similar is a given Trump tweet to a line from Hitler's Mein Kampf? Let's find out...
### This script uses four packages: tidyverse (notation), quanteda (text pre-processing and cosine similarity calculation), and rtweet (importing and processing Twitter API data). You'll need to un-comment the install.packages() calls if you haven't installed these packages.
#install.packages(tidyverse)
#install.packages(quanteda)
#install.packages(rtweet)
library(tidyverse,quietly=T); library(quanteda,quietly=T); library(rtweet,quietly=T)
# As of 7/26/20, Trump has produced 43,137 original tweets (excluding retweets). Let's read those in along with all 10,317 lines of Mein Kampf:
trump <- read_csv("all_trump_original_tweets_072620.csv",col_types = cols())
fileName <- 'MeinKampf.txt'
MeinKampf <- readChar(fileName, file.info(fileName)$size) %>% strsplit("\\.\\s|\\!\\s|\\?\\s")
MeinKampf <- tibble(data.frame(matrix(unlist(MeinKampf), nrow=10317, byrow=T),stringsAsFactors=FALSE))
df <- MeinKampf %>% rename(text = matrix.unlist.MeinKampf...nrow...10317..byrow...T.)
# Mein Kampf text pre-processing:
hitlercorp <- corpus(df$text)
hitlerdfm <- tokens(hitlercorp) %>% tokens_ngrams(n=1:3) %>% dfm(tolower=TRUE,remove_url=TRUE,stem=TRUE,remove_punct=TRUE,remove=c(stopwords("english")))
# Here we define the function:
select_trump_tweet <- function() {
q1 <- readline(prompt="Which Trump tweet would you like to view? (A number from 1-43137) ")
cat(plain_tweets(trump$text)[as.numeric(q1)])
q2 <- readline(prompt="Would you like to use this tweet (Yes or No)? ")
if (as.character(tolower(q2))=="yes") {
trumpcorp <- corpus(plain_tweets(trump$text)[as.numeric(q1)])
trumpdfm <- tokens(trumpcorp) %>% tokens_ngrams(n=1:10) %>% dfm(tolower=TRUE,remove_url=TRUE,stem=TRUE,remove_punct=TRUE,remove=c(stopwords("english"), "t.co", "https", "rt", "amp", "http", "t.c", "can", "~","RT","realdonaldtrump"))
cat("\n\nTrump tweet selected!\n\n")
cat("Searching lines from Mein Kampf...\n\n")
trump_hitler <- as.data.frame(textstat_simil(hitlerdfm, trumpdfm, margin = "documents",method="cosine"))
cat("Done! Top 5 matching results:\n\n")
trump_hitler <- as_tibble(trump_hitler[order(-trump_hitler$cosine),])
for (tweet in trump_hitler$document1[1:5]) {
cat(paste(which(trump_hitler$document1==tweet),df$text[as.numeric(substr(tweet,5,nchar(tweet)))],"\n\n"))
}
}
else {
select_trump_tweet()
}
}
### Before running the function, if you'd like to view 10 random Trump tweets, you can do so here:
sample_n(plain_tweets(trump),10)
## Time to turn a Trump tweet into Mein Kampf. Can you tell the difference?
select_trump_tweet()
|
58e79398f631b5980fdd50efa311ca291b43cd1a | 83bfc2ffa4b4e28c1c6ea877c204931980a3e99d | /R_code/simulation_fixed_effect/simulation_tranfromation_fixed_effect_full.R | 1c6885f38e859db2d6be86cc237011e47696248c | [] | no_license | wal615/prime_project | 0d555626292a713d94700e565363681e2e2e514e | 8a85b47ecbcaf4419ca33588fd607019226bf3ca | refs/heads/master | 2022-07-04T20:58:33.789355 | 2020-05-05T20:13:16 | 2020-05-05T20:13:16 | 111,431,232 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,541 | r | simulation_tranfromation_fixed_effect_full.R | # the following simulation is trying to compare the differnce between the proposed method
# and the proposed method on fixed effects.
setwd("~/dev/projects/Chen_environmental_study/")
source("./R_code/Yang_REML.R")
library(sas7bdat)
library(MASS)
library(tidyverse)
library(foreach)
library(doRNG)
library(doParallel)
a=read.sas7bdat("~/dev/projects/Chen_environmental_study/R_code/pcbs1000nomiss.sas7bdat")
a=data.matrix(a[,2:35], rownames.force = NA)
## Transromation with full data
b <- a
b_null_full <- std_fn(b, ncol(b), tran_FUN = null_tran, list(tran = "null", subset = "full"))
b_rank_full <- std_fn(b, ncol(b), tran_FUN = rank_tran, list(tran = "rank", subset = "full"))
b_quantile_full <- std_fn(b, ncol(b), tran_FUN = norm_quantile_tran, list(tran = "norm_quan", subset = "full"))
b_log_full <- std_fn(b, ncol(b), tran_FUN = log_tran, list(tran = "log", subset = "full"))
b_sqrt_full <- std_fn(b, ncol(b), tran_FUN = sqrt_tran, list(tran = "sqrt", subset = "full"))
b_cat_10_full <- std_fn(b, ncol(b), tran_FUN = categorized_tran, by = 0.1, list(tran = "cate", by = "0.1", subset = "full"))
b_cat_5_full <- std_fn(b, ncol(b), tran_FUN = categorized_tran, by = 0.2, list(tran = "cate", by = "0.2", subset = "full"))
b_cat_2_full <- std_fn(b, ncol(b), tran_FUN = categorized_tran, by = 0.5, list(tran = "cate", by = "0.5", subset = "full"))
data_list_fixed_full <- list(b_null_full = b_null_full,
b_rank_full = b_rank_full,
b_quantile_full = b_quantile_full,
b_log_full = b_log_full,
b_sqrt_full = b_sqrt_full,
b_cat_10_full = b_cat_10_full,
b_cat_5_full = b_cat_5_full,
b_cat_2_full = b_cat_2_full)
interaction_list <- as.list(rep(1,length(data_list_fixed_full)))
interaction_m_list <- as.list(rep(1,length(data_list_fixed_full)))
result_list <- mapply(FUN = compare_corr_GCTA,
b = data_list_fixed_full,
interaction = interaction_list,
interaction_m = interaction_m_list,
MoreArgs = list(brep = 3, nrep = 100, seed = 123, cores = 3,
interm_result = TRUE,
interm_result_path = "~/dev/projects/Chen_environmental_study/result/inter_result/"),
SIMPLIFY = FALSE)
save(result_list, file = "./result/simulation_fixed_full_8tran")
|
2fc3683167d56ccac9cde141fa85ee197dd38939 | db8a43ce4e4d58a57a0a2bb29b63acf6c30b5092 | /R/modelmetrics.R | 800e2bba79e91cde7e05246c74e7be4167ff6de4 | [] | no_license | zhaoxiaohe/MachineShop | ca6fa7d6e7f00ac7d6f8522d50faeec2f4735b2d | 85b1ff6a9d7df425d041289856861e75ce596621 | refs/heads/master | 2020-04-11T06:30:08.059577 | 2018-12-13T00:45:43 | 2018-12-13T00:45:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,977 | r | modelmetrics.R | #' Model Performance Metrics
#'
#' Compute measures of model performance.
#'
#' @rdname modelmetrics
#'
#' @param x observed responses or class containing observed and predicted
#' responses.
#' @param y predicted responses.
#' @param metrics function, one or more function names, or list of named
#' functions to include in the calculation of performance metrics.
#' @param cutoff threshold above which probabilities are classified as success
#' for binary responses.
#' @param times numeric vector of follow-up times at which survival events
#' were predicted.
#' @param na.rm logical indicating whether to remove observed or predicted
#' responses that are \code{NA} when calculating model metrics.
#' @param ... arguments passed from the \code{Resamples} method to the others.
#'
#' @seealso \code{\link{response}}, \code{\link{predict}},
#' \code{\link{resample}}, \code{\link{metrics}}
#'
modelmetrics <- function(x, ...) {
UseMethod("modelmetrics")
}
#' @rdname modelmetrics
#'
#' @examples
#' res <- resample(Species ~ ., data = iris, model = GBMModel)
#' (metrics <- modelmetrics(res))
#' summary(metrics)
#' plot(metrics)
#'
modelmetrics.Resamples <- function(x, ..., na.rm = TRUE) {
control <- x@control
if (na.rm) x <- na.omit(x)
args <- list(...)
args$times <- control@surv_times
metrics_by <- by(x, x[c("Model", "Resample")], function(x) {
if (nrow(x)) {
do.call(modelmetrics, c(list(x$Observed, x$Predicted), args))
} else {
NA
}
}, simplify = FALSE)
metrics_list <- tapply(metrics_by,
rep(dimnames(metrics_by)$Model, dim(metrics_by)[2]),
function(metrics) do.call(rbind, metrics),
simplify = FALSE)
metrics <- if (length(metrics_list) > 1) {
abind(metrics_list, along = 3)
} else {
metrics_list[[1]]
}
dimnames(metrics)[[1]] <- dimnames(metrics_by)$Resample
ModelMetrics(metrics)
}
#' @rdname modelmetrics
#'
modelmetrics.factor <- function(x, y, metrics =
c("Accuracy" = MachineShop::accuracy,
"Kappa" = MachineShop::kappa2,
"ROCAUC" = MachineShop::roc_auc,
"Sensitivity" = MachineShop::sensitivity,
"Specificity" = MachineShop::specificity,
"Brier" = MachineShop::brier),
cutoff = 0.5, ...) {
metrics <- list2function(metrics)
metrics(x, y, cutoff = cutoff)
}
#' @rdname modelmetrics
#'
modelmetrics.matrix <- function(x, y, metrics =
c("R2" = MachineShop::r2,
"RMSE" = MachineShop::rmse,
"MAE" = MachineShop::mae), ...) {
metrics <- list2function(metrics)
metrics(x, y)
}
#' @rdname modelmetrics
#'
modelmetrics.numeric <- function(x, y, metrics =
c("R2" = MachineShop::r2,
"RMSE" = MachineShop::rmse,
"MAE" = MachineShop::mae), ...) {
metrics <- list2function(metrics)
metrics(x, y)
}
#' @rdname modelmetrics
#'
#' @examples
#' ## Survival response example
#' library(survival)
#' library(MASS)
#'
#' fo <- Surv(time, status != 2) ~ sex + age + year + thickness + ulcer
#' gbmfit <- fit(fo, data = Melanoma, model = GBMModel)
#'
#' obs <- response(fo, data = Melanoma)
#' pred <- predict(gbmfit, newdata = Melanoma, type = "prob")
#' modelmetrics(obs, pred)
#'
modelmetrics.Surv <- function(x, y, metrics =
c("CIndex" = MachineShop::cindex,
"ROC" = MachineShop::roc_auc,
"Brier" = MachineShop::brier),
times = numeric(), ...) {
metrics <- list2function(metrics)
metrics(x, y, times = times)
}
|
d6751778777e66d757173fbd6037b864c3d79dbc | d0216cc7b6bc2c1694e746a8c176d23405520642 | /PGMs/Package_Trial/T3/expMRF/R/SPGM.select.R | 8f4a68f7e0d575ea9dab1af8b20b25299175c3bb | [] | no_license | zhandong/XFam | f329947e856afb608eaa3103e02380f982d52bcb | a62925691fa7a5e499d7ad5e76fe903f4cf42914 | refs/heads/master | 2021-01-19T20:27:37.451797 | 2014-03-27T14:28:57 | 2014-03-27T14:28:57 | 5,893,492 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 618 | r | SPGM.select.R | SPGM.select <-
function(X, R, R0=0, N=100,beta=0.05, lmin = 0.01, nlams=20, lambda.path=NULL ,parallel=TRUE,nCpus=4){
require('huge')
require('glmnet')
if (R < 0){
cat("ERROR: Truncating threshold R should be positive. \n")
ghat = NULL
return(ghat)
}
# Generate the matrix with values sublinearly truncated between R (upepr bound) and R0 (lower bound)
Xorig <- X
X <- round(Bsublin(X, R, R0))
ghat <- LPGM.select(X, method="SPGM", N=N, beta=beta, lmin=lmin, nlams=nlams, lambda.path=lambda.path, parallel=parallel, nCpus=nCpus)
if(!is.null(ghat)){
ghat$call = match.call()
}
return(ghat)
}
|
a3b2704f7213773d676d6e6c4a81ac61b429a0b0 | 97fbb93b69089b926d299a5d9d44f67888ca4143 | /man/dailyprecipNCEP.Rd | 45609718b146d3472a03892f7ce6d8a067d2e9b9 | [] | no_license | ilyamaclean/microclima | 814001c6b36329c477e3497422c21fb2dfd5cf09 | 8d9900b31c2ff1c58a07e07f71647fde08068e2e | refs/heads/master | 2023-08-17T22:27:03.898069 | 2023-08-04T12:30:03 | 2023-08-04T12:30:03 | 139,837,968 | 30 | 14 | null | 2023-04-30T18:44:14 | 2018-07-05T11:12:32 | R | UTF-8 | R | false | true | 973 | rd | dailyprecipNCEP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datatools.R
\name{dailyprecipNCEP}
\alias{dailyprecipNCEP}
\title{Obtain daily precipitation from NCEP}
\usage{
dailyprecipNCEP(lat, long, tme, reanalysis2 = FALSE)
}
\arguments{
\item{lat}{the latitude of the location for which data are required}
\item{long}{the longitude of the location for which data are required}
\item{tme}{a POSIXlt object covering the duration for which data are required.
Intervals should be daily.
Hourly data are returned irrespective of the time interval of \code{tme}.}
\item{reanalysis2}{Logical. Should data be obtained from the Reanalysis II dataset (default) or
from Reanalysis I (data prior to 1979).}
}
\value{
a vector of daily precipitations (mm / day) for the period covered by tme
}
\description{
Obtain daily precipitation from NCEP
}
\examples{
tme <- as.POSIXlt(c(1:15) * 24 * 3600, origin = "2015-01-15", tz = 'UTC')
dailyprecipNCEP(50, -5, tme)
}
|
c403acac63911df9bf6598fb25dd8c819b2d9270 | 07c8f492710be978cffd39acec53328bba44e5b8 | /auxFunctions/R/adjust_elements_dataframe.R | bf6afde8434f4aad2eb4692898e6ee424c8104b3 | [] | no_license | contaratoandressa/pegasus | 543957c304c16eaba3a5e3a724a14a16b2455b0c | 277c336d8c52c75e902bfdb6f279b070102c5c1c | refs/heads/master | 2021-10-02T11:04:20.729333 | 2021-09-19T00:29:13 | 2021-09-19T00:29:13 | 191,631,400 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 973 | r | adjust_elements_dataframe.R | #####################################################################
# function that reads a dataframe and adjusts values in factor format
#####################################################################
rm(list = ls())
# example
dataset <- data.frame(x1 = as.factor(c('teste','Teste0','0TESTE')), x2 = c(1,2,3))
# function
#' @param data dataframe
#' @param col specific column
#' @param name name for change
#' @return what the function returns
adjust_dataframe <- function(data = dataset, col = 'x1', name = 'teste'){
data[col] <- as.vector(apply(data[col],2,as.character))
data[,col] <- toupper(chartr("ÁÉÍÓÚÀÈÌÒÙÃÕÂÊÔÇáéíóúàèìòùãõâêôç","AEIOUAEIOUAOAEOCaeiouaeiouaoaeoc",data[,col]))
data[col][grep("[^:digit:]", data[col])] <- name
return(data)
}
adjust_dataframe()
#####################################################################
# end
#####################################################################
|
dcc15de49f6a666827b267a08747970a5a87b599 | dc5673ce188e1702e1870bbcdbdec6da34ab55b5 | /R_repository/acousaR/R/LF2PGNAPES.R | 4b74ce3d28460613c493751176838f85af5bee8e | [] | no_license | saschafassler/acousa | 56c04d442e35ae15f19bf0032f89c8ec8e19e5a9 | a7d30e66c4aff802643a49d72a41558a4a5efa0c | refs/heads/master | 2021-01-10T04:03:18.302152 | 2015-12-10T14:51:57 | 2015-12-10T14:51:57 | 43,646,097 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,953 | r | LF2PGNAPES.R | ### LF2PGNAPES
# load Biology file
biology <- read.csv(file="D:/Sascha/Projects/WKEVAL/Dutch HERAS data/PGNAPES format/2014TRI2HERAS_Biology.csv",header=TRUE)
# load file with LF data (of fish with just length measurements)
lf_data <- read.csv(file="D:/Sascha/Projects/WKEVAL/Dutch HERAS data/acousa format/catchLF_HERAS_2014_HER.csv",header=FALSE)
lf_data[is.na(lf_data)] <- 0
# list relevant trawl numbers
trawls <- unique(biology$Station)
if(unique(biology$Year)==2012)
trawls <- trawls[-c(7,8)]
# table of aged biological samples: numbers at length
lf_biology <- t(table(biology[c("Station","Length")]))
# vector of measured lengths
lengths <- as.numeric(row.names(lf_biology))
# creating table of additional records (with only length observations) per station
for(t in 1: length(trawls)){
station <- trawls[t]
Nl_recs <- lf_data[-1,lf_data[1,]==station] - as.numeric(lf_biology[,colnames(lf_biology)==station]) #numbers at length of required records to be added to biological samples
reclength <- sum(Nl_recs) #additional records to be added by station
station_recs <- data.frame(matrix(ncol = length(colnames(biology)), nrow = reclength))
colnames(station_recs) <- paste(colnames(biology))
# fill additional records table
station_recs$Country <- unique(biology$Country)
station_recs$Vessel <- unique(biology$Vessel)
station_recs$Cruise <- unique(biology$Cruise)
station_recs$Station <- station
station_recs$StType <- unique(biology$StType)
station_recs$Year <- unique(biology$Year)
station_recs$Species <- unique(biology$Species)
station_recs$Length <- rep(lengths,Nl_recs)
station_recs$Recnr <- seq(max(biology$Recnr[which(biology$Station==station)])+1,length=reclength)
biology <- rbind(biology,station_recs)
}
write.csv(biology, file="D:/Sascha/Projects/WKEVAL/Dutch HERAS data/PGNAPES format/2014TRI2HERAS_BiologyALL.csv")
|
15c0372454555051c12ba3ccde39a3838f2602b7 | 165ce4507ff54ad5fdd35666af4ebc767faa1231 | /man/schema_meta.Rd | 0c472f208696a8b06a371c796d53734d9fdbe3f5 | [
"MIT"
] | permissive | paulhibbing/AGread | 7c99fada4ef1a4c9e17b9a06ae4631e7e82b5f8a | 28e9a4946d45cfa0464a10de3ff19f5543a3e6be | refs/heads/master | 2022-10-21T12:20:40.233431 | 2022-09-07T03:04:26 | 2022-09-07T03:04:26 | 128,490,460 | 18 | 4 | NOASSERTION | 2021-01-29T21:18:09 | 2018-04-07T02:47:45 | R | UTF-8 | R | false | true | 364 | rd | schema_meta.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_packet_set_SENSOR_SCHEMA.R
\name{schema_meta}
\alias{schema_meta}
\title{Parse metadata from a SENSOR_SCHEMA packet}
\usage{
schema_meta(payload)
}
\arguments{
\item{payload}{raw. the packet payload}
}
\description{
Parse metadata from a SENSOR_SCHEMA packet
}
\keyword{internal}
|
59ee03baab0eb9d7f8e5b42afc8b46fe751ebcb3 | f991c879a03a6e5a81141ae59a4fca3a708a55b9 | /R/sccosmomcma.R | 8d07ad73a1de6d80c2402253ed8a3dc64e50ca12 | [
"MIT"
] | permissive | SC-COSMO/sccosmomcma | d1469077b9182d803320ca7ca89b4851a5e81a0f | 0e051ae94a2ff0641a9fee09d00097205ea19748 | refs/heads/main | 2023-04-14T03:12:50.174840 | 2021-10-11T22:52:25 | 2021-10-11T22:52:25 | 371,769,010 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 388 | r | sccosmomcma.R | #' \code{sccosmo} package
#'
#' The Stanford-CIDE COronavirus Simulation MOdel (SC-COSMO)
#'
#' See the description of the package on the README on
#' \href{https://github.com/feralaes/sccosmo#readme}{GitHub}
#'
#' @docType package
#' @name sccosmo
#' @importFrom dplyr %>% filter
#' @importFrom rlang .data
NULL
globalVariables(c(#"df_pop_state_cty_age_us_mex_bra"#
)) |
28f43672b5774165805635d49034b466b67279a6 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#130.A#48.c#.w#7.s#37.asp/ctrl.e#1.a#3.E#130.A#48.c#.w#7.s#37.asp.R | 35be74f35e97bedd923bc56cf5b9a4ae0ff654d0 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 91 | r | ctrl.e#1.a#3.E#130.A#48.c#.w#7.s#37.asp.R | 5ce6f43acd9f5390f7ac98f844906a9a ctrl.e#1.a#3.E#130.A#48.c#.w#7.s#37.asp.qdimacs 7457 21838 |
008f6963f6778c68b560357d8c252353906ec9d3 | 0f981f1c09c115bcf6676e561d3d424cc0b2443f | /atacAlign.R | 08f740384f686eede2aae148cf704dc54ca8fcd2 | [] | no_license | timplab/fragillis | b1b44f84f182adb65132ff7383fa29b166d34308 | 595c4226541bf8b40c3cb931281a3056414937ea | refs/heads/master | 2021-07-17T10:48:01.764770 | 2018-12-02T20:01:38 | 2018-12-02T20:01:38 | 101,221,710 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,901 | r | atacAlign.R | library(tidyverse)
library(googlesheets)
#Code dir is ~/Code/
gs_auth(token = "~/googlesheets_token.rds")
fullsheet=gs_title("Fragillis Data")
dataloc=gs_read(fullsheet, ws="180608_dat")
workdir="/atium/Data/NGS/Aligned/180806_fragillis"
outdir="~/Dropbox/timplab_data/fragillis/180806_revision"
if (!dir.exists(workdir)) {
dir.create(workdir)
}
if (!dir.exists(outdir)) {
dir.create(outdir)
}
setwd(workdir)
if (FALSE) {
##Distribute which set to 3 different computers by changing the starting index
for (i in seq(1, dim(dataloc)[1], 3)) {
system(paste0("java -jar ~/Code/Trimmomatic-0.38/trimmomatic-0.38.jar PE -phred33 -threads 6 ",
dataloc$ill.r1[i], " ", dataloc$ill.r2[i],
" /tmp/read1.trim.fq.gz /tmp/read1.unpaired.fq.gz",
" /tmp/read2.trim.fq.gz /tmp/read2.unpaired.fq.gz",
" ILLUMINACLIP:/home/timp/Code/Trimmomatic-0.38/adapters/NexteraPE-PE.fa:2:30:10",
" LEADING:20 TRAILING:20 SLIDINGWINDOW:4:20 MINLEN:40"))
system(paste0("bowtie2 -X 2000 -p 10 -t -x /mithril/Data/NGS/Reference/human38/GRCH38 ",
"-1 /tmp/read1.trim.fq.gz -2 /tmp/read2.trim.fq.gz ",
"2> ", workdir, "/", dataloc$label[i], "_bowtie2.log ",
"| samtools view -bS - | samtools sort - -o ", dataloc$label[i], ".bam"))
##started at 8:44AM
system(paste0("samtools index ", dataloc$label[i], ".bam"))
}
}
nucchr=paste(c(paste0("chr", 1:22), "chrX", "chrY"), collapse=" ")
if (TRUE) {
##remove chrM
for (i in 1:dim(dataloc)[1]) {
#system(paste0("samtools idxstats ", dataloc$bam[i], " | cut -f 1 | grep -v chrM | xargs samtools view -b ",
# dataloc$bam[i], " >", dataloc$label[i], "noM.bam"))
system(paste0("samtools view -b ", dataloc$bam[i], " ", nucchr, " >", dataloc$label[i], "noM.bam"), wait=F)
}
##remove dups
for (i in 1:dim(dataloc)[1]) {
system(paste0("java -Xmx8G -jar ~/Code/picard/picard.jar MarkDuplicates ",
"REMOVE_DUPLICATES=true INPUT=", dataloc$label[i], "noM.bam",
" OUTPUT=", dataloc$label[i], "nodup.bam",
" METRICS_FILE=", dataloc$label[i], "_duplicate_metrics.txt"))
}
for (i in 1:dim(dataloc)[1]) {
system(paste0("samtools index ", dataloc$label[i], "nodup.bam"), wait=F)
}
}
##Generate and share coverage plots
if (TRUE) {
for (i in 1:dim(dataloc)[1]) {
system(paste0("/bin/bash -c ", shQuote(paste0("source activate fragillis; bamCoverage -b ",
dataloc$label[i], "nodup.bam -bs 100 -o ",
dataloc$label[i], ".bw"))))
}
for (cond in unique(dataloc$condition)) {
just.these=dataloc %>%
filter(condition==cond)
system(paste0("samtools merge -r ", cond, ".merged.bam ", paste(paste0(just.these$label, "nodup.bam"), collapse=" ")), wait=F)
system(paste0("samtools index ", cond, ".merged.bam"))
system(paste0("/bin/bash -c ", shQuote(paste0("source activate fragillis; bamCoverage -b ",
cond, ".merged.bam -bs 100 -o ", cond, ".bw"))))
}
}
if (TRUE) {
##Merge bam and call peaks on it
system(paste0("samtools merge merged.bam ", paste(paste0(dataloc$label, "nodup.bam"), collapse=" ")))
##call peaks
system(paste0("/bin/bash -c ", shQuote(paste0("source activate py27.fragillis; macs2 callpeak ",
"--nomodel -g hs -t merged.bam --broad -n broadmerged --keep-dup all"))),
wait=F)
system(paste0("/bin/bash -c ", shQuote(paste0("source activate py27.fragillis; macs2 callpeak ",
"--nomodel -g hs -t merged.bam -n sharpmerged --keep-dup all"))),
wait=F)
##per condition
for (cond in unique(dataloc$condition)) {
just.these=dataloc %>%
filter(condition==cond)
system(paste0("/bin/bash -c ", shQuote(paste0("source activate py27.fragillis; macs2 callpeak ",
"--nomodel -g hs -t ", cond, ".merged.bam --broad -n ",
cond, ".broadmerged", " --keep-dup all"))),
wait=F)
system(paste0("/bin/bash -c ", shQuote(paste0("source activate py27.fragillis; macs2 callpeak ",
"--nomodel -g hs -t ", cond, ".merged.bam -n ",
cond, ".sharpmerged", " --keep-dup all"))),
wait=F)
}
##individ
for (i in 1:dim(dataloc)[1]) {
system(paste0("/bin/bash -c ", shQuote(paste0("source activate py27.fragillis; macs2 callpeak ",
"--nomodel -g hs -t ", dataloc$label[i], "nodup.bam --broad -n ",
dataloc$label[i], ".broadmerged", " --keep-dup all"))),
wait=F)
system(paste0("/bin/bash -c ", shQuote(paste0("source activate py27.fragillis; macs2 callpeak ",
"--nomodel -g hs -t ", dataloc$label[i], "nodup.bam -n ",
dataloc$label[i], ".sharpmerged", " --keep-dup all"))),
wait=F)
}
##Denny et al command
##macs2 callpeak --nomodle --broad --keep-dup all
##Jawara command
##macs2 callpeak --nomodel -t ${cleaned} -n ${peakout} --nolambda --keep-dup all --shift -75 --extsize 150 -p 0.1 -B -SPMR
}
|
c7c9cb08a0f2aa3c00494e6d03a7060d00a476f2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/h2o/examples/h2o.init.Rd.R | ccde17e9c53c2bfe5830fc1376f825a1f1fc0cd3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 818 | r | h2o.init.Rd.R | library(h2o)
### Name: h2o.init
### Title: Initialize and Connect to H2O
### Aliases: h2o.init
### ** Examples
## Not run:
##D # Try to connect to a local H2O instance that is already running.
##D # If not found, start a local H2O instance from R with the default settings.
##D h2o.init()
##D
##D # Try to connect to a local H2O instance.
##D # If not found, raise an error.
##D h2o.init(startH2O = FALSE)
##D
##D # Try to connect to a local H2O instance that is already running.
##D # If not found, start a local H2O instance from R with 5 gigabytes of memory.
##D h2o.init(max_mem_size = "5g")
##D
##D # Try to connect to a local H2O instance that is already running.
##D # If not found, start a local H2O instance from R that uses 5 gigabytes of memory.
##D h2o.init(max_mem_size = "5g")
## End(Not run)
|
6f1785f215fd469d2d18b672fb507e6e5b2bb7e3 | 99e681afa2d7fead8d48034b9536d1360bff04af | /Vaja3/Vaja3.R | b4081e3a47c0d2516f9b842176e26c31864527de | [] | no_license | janpristovnik/Financni_praktikum | 9def6419494c9e1fa5dd8b17f13bf415bad537c8 | dbed1ea09412b7eb39111ec73d6e779d3fc2a679 | refs/heads/master | 2021-05-15T09:08:00.548115 | 2017-12-22T15:43:51 | 2017-12-22T15:43:51 | 108,002,774 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,097 | r | Vaja3.R | library(combinat)
library(Rlab)
#naloga 1
#1.a
S_0 <- 50
u <- 1.05
d <- 0.95
U <- 5
R <- 0.03
T <- 3
S0 <-c(50.00, 50.00, 50.00, 50.00, 50.00)
S1 <-c(52.50, 52.50, 47.50, 47.50, 52.50)
S2 <-c(48.88, 55.12, 49.88, 45.12, 49.88)
S3 <-c(52.37, 57.88, 47.38, 47.38, 52.37)
S4 <-c(49.75, 60.78, 45.01, 49.75, 54.99)
S5 <-c(52.24, 63.81, 42.76, 52.24, 57.74)
Izplacilo_X <-c(0, 0, 0, 0, 0)
Izplacilo_Y <-c(0, 0, 0, 0, 0)
tabela1 <- data.frame(S0, S1, S2, S3, S4, S5, Izplacilo_X, Izplacilo_Y)
for (i in 1:5){
l = max( max(tabela1[i,(T+1):(U+1)]) - max(tabela1[i, 1:T]), 0)
tabela1$Izplacilo_X[i] <- l
}
for (i in 1:5){
k = max( min(tabela1[i,(T+1):(U+1)]) - min(tabela1[i, 1:T]), 0)
tabela1$Izplacilo_Y[i] <- k
}
#1.b
izplacilo <- function(vrsta, T, type = c("call", "put") ) {
if (type == "call") {
return( max( max(vrsta[(T+1):length(vrsta)]) - max(vrsta[1:T]), 0))}
else {
return( max( min(vrsta[(T+1):length(vrsta)]) - min(vrsta[1:T]), 0) )}
}
#2.naloga
#a.primer
binomski <- function(S0,u,d,U,R,T,type){
q = (1+R-d)/(u-d)
razpleti <- hcube(rep(2,U), translation = -1) #drevo stanj 1 pomeni up, 0 down
razpleti_1 <- d**(1-razpleti) * u**(razpleti)
k <- rowSums(razpleti) #vektor, ki za vsako vrstico pove kolikokrat je up
vektor_verjetnosti_koncnih_stanj <- q^k *(1-q)^(U-k)
razpleti_1 <- t(apply( razpleti_1, 1, cumprod))
vrednosti <- cbind(S0, S0*razpleti_1)
izplacila <- apply(vrednosti, 1, function(x) izplacilo(x,T,type))
E <- sum(izplacila * vektor_verjetnosti_koncnih_stanj)
return (E/(1+R)^U)
}
#2.naloga b primer
monte <- function(S0, u, d, U, R, T, type, N){
q = (1+R-d)/(u-d)
stanja <- matrix(rbinom(U*N,1,q),N,U)
stanja_1 <- d**(1-stanja) * u**(stanja)
k <- rowSums(stanja) #vektor, ki za vsako vrstico pove kolikokrat je up
vektor_verjetnosti_koncnih_stanj <- q^k *(1-q)^(U-k)
stanja_1 <- t(apply(stanja_1, 1, cumprod))
vrednosti <- cbind(S0, S0*stanja_1)
izplacila <- apply(vrednosti, 1, function(x) izplacilo(x,T,type))
E= sum(izplacila)/ length(izplacila)
return (E/(1+R)^U)
}
#simuliranje vrednosti
monte(60,1.05,0.95,15,0.01,8,"put",10)
monte(60,1.05,0.95,15,0.01,8,"put",100)
monte(60,1.05,0.95,15,0.01,8,"put",1000)
#3.naloga
#a.primer
N1 <- c()
N2 <- c()
N3 <- c()
M <- 100
for (i in c(1:M)){
N1 <- c(N1,monte(60,1.05,0.95,15,0.01,8,"put",10) )
N2 <- c(N2,monte(60,1.05,0.95,15,0.01,8,"put",100) )
N3 <- c(N3,monte(60,1.05,0.95,15,0.01,8,"put",1000) )
}
cena_binomske <- binomski(60,1.05,0.95,15,0.01,8,"put") #cena premije dobljena z binomksim modelom
min <- floor(min(c(N1,N2,N3)))
max <- ceiling(max(c(N1,N2,N3)))
#histogram N1
pov.N1 <- mean(N1) #povprečje vrednosti N1
odklon.N1 <- sqrt(var(N1)) #standardni odklon vrednosti N1
x1_odklon_desno <- cena_binomske + odklon.N1
x1_odklon_levo <- cena_binomske - odklon.N1
histogram1 <-hist(N1,breaks = 20,
main = "Monte Carlo: N=10",
xlab = "Premija",
xlim = c(min, max),
col ="yellow")
abline(v= pov.N1, col = "green")
abline (v = cena_binomske, col = "red", lty = "dashed")
arrows(x0 = cena_binomske, y0 = 0, x1= x1_odklon_desno, col= "green", length = 0.1 )
arrows(x0 = cena_binomske, y0 = 0, x1= x1_odklon_levo, col= "green", length = 0.1 )
legend('topright',
legend = c('Monte Carlo', 'Analiza modela'),
col = c('green', 'red'),
cex=0.8,
lty=c("solid","dashed"))
#histogram N2
pov.N2 <- mean(N2) #povprečje vrednosti N1
odklon.N2 <- sqrt(var(N2)) #standardni odklon vrednosti N1
x2_odklon_desno <- cena_binomske + odklon.N2
x2_odklon_levo <- cena_binomske - odklon.N2
histogram2 <-hist(N2,breaks = 20,
main = "Monte Carlo: N=100",
xlab = "Premija",
xlim = c(min, max),
col ="yellow")
abline(v= pov.N2, col = "green")
abline (v = cena_binomske, col = "red", lty = "dashed")
arrows(x0 = cena_binomske, y0 = 0, x1= x2_odklon_desno, col= "green", length = 0.1 )
arrows(x0 = cena_binomske, y0 = 0, x1= x2_odklon_levo, col= "green", length = 0.1 )
legend('topright',
legend = c('Monte Carlo', 'Analiza modela'),
col = c('green', 'red'),
cex=0.8,
lty=c("solid","dashed"))
#histogram N3
pov.N3 <- mean(N3) #povprečje vrednosti N1
odklon.N3 <- sqrt(var(N3)) #standardni odklon vrednosti N1
x3_odklon_desno <- cena_binomske + odklon.N3
x3_odklon_levo <- cena_binomske - odklon.N3
histogram3 <-hist(N3,breaks = 20,
main = "Monte Carlo: N=1000",
xlab = "Premija",
xlim = c(min, max),
col ="yellow")
abline(v= pov.N3, col = "green")
abline (v = cena_binomske, col = "red", lty = "dashed")
arrows(x0 = cena_binomske, y0 = 0, x1= x3_odklon_desno, col= "green", length = 0.1 )
arrows(x0 = cena_binomske, y0 = 0, x1= x3_odklon_levo, col= "green", length = 0.1 )
legend('topright',
legend = c('Monte Carlo', 'Analiza modela'),
col = c('green', 'red'),
cex=0.8,
lty=c("solid","dashed")) |
22e55a61b99ee150427b4b6a387fbbff4be73ee6 | 444d2cd04daef3b6ad2d16d288c05ef908481ce7 | /Labs5/Labs5-2.R | 5f8b2c2be7fa74ed283c545a354aa5062af260d2 | [] | no_license | danielreyes9756/ME-LABS | 3b25664feaf43d2ceafa6ffc088f47305c822e54 | 5fc20d1075e928e4ad2102d7d06687d7c720993a | refs/heads/master | 2021-01-05T07:34:09.420346 | 2020-02-16T17:43:53 | 2020-02-16T17:43:53 | 240,934,403 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 568 | r | Labs5-2.R | setwd("C:/Users/danie/Desktop")
Datos <- read.table("herramienta.txt", dec = ",", header = TRUE)
SinHerramientas <- Datos$Sin.herramienta
hist(SinHerramientas, labels=TRUE,col="green", density=10, angle = 45, border = "brown")
ConHerramientas <- Datos$Con.herramienta
hist(ConHerramientas, col="yellow", labels=TRUE, density=10, angle = 45, border = "blue")
#Trabajadores distintos
t.test(x=SinHerramientas, y=ConHerramientas, alternative="greater",mu=0)
#Trabajadores iguales
t.test(x=SinHerramientas, y=ConHerramientas, alternative="greater",mu=0, paired = TRUE)
|
4ff79138ca6e48716b5b2bbcc1244aed5fdc7fbc | d1bf4396480318ae9d7a027bb0001b7d792d70d3 | /include/phrank_ml_f.R | 47f7bf2b8c11224ee2b314692003c52617760a8f | [] | no_license | mbask/phrank | 50ba961710285299323e44cc624318e4809b2c0d | b76c92e3019f661fe92994504411b9996cc1147a | refs/heads/master | 2021-01-01T05:16:01.086393 | 2016-04-20T08:54:29 | 2016-04-20T08:54:29 | 56,670,500 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,681 | r | phrank_ml_f.R | #' Extract response variable (y) from a formula
#'
#' @param ml_formula
#'
#' @return a character vector
#' @export
#'
#' @examples
get_response_variable <- function(ml_formula) {
formula_terms <- terms(ml_formula)
resp_var_name <- attr(formula_terms, "variables")[[attr(formula_terms, "response") + 1]]
as.character(resp_var_name)
}
#' Wrapper around caret::createDataPartition
#'
#' Pre-defined arguments:
#' p = .7,
#' list = FALSE,
#' times = 1
#'
#' @param dataset
#'
#' @return A list or matrix of row position integers corresponding to the training data
#' @export
#' @importFrom caret createDataPartition
#'
#' @examples
create_data_partition <- function(dataset) {
caret::createDataPartition(
dataset$genus_id,
p = .7,
list = FALSE,
times = 1)
}
#' Subset dataset by extracting train_index rows
#'
#' @param train_index
#' @param dataset
#'
#' @return filtered dataset
#' @export
#'
#' @examples
filter_train_set <- function(train_index, dataset) {
dataset[train_index, ]
}
#' Subset dataset by filtering out train_index rows
#'
#' @param train_index
#' @param dataset
#'
#' @return filtered dataset
#' @export
#'
#' @examples
filter_test_set <- function(train_index, dataset) {
dataset[-train_index, ]
}
#' Wrapper around e1071::tune
#'
#' Pre-defined arguments:
#' method = svm
#' ranges = list(
#' cost = 2^(0:14),
#' gamma = 10^(-8:0))
#'
#' @param train_set
#' @param svm_formula
#'
#' @return an object of class "tune"
#' @export
#' @importFrom e1071 tune
#'
#' @examples
# tune_svm_model <- function(train_set, svm_formula) {
# tune(
# method = svm,
# train.x = svm_formula,
# ranges = list(
# cost = 2^(0:14),
# gamma = 10^(-8:0)),
# data = train_set)
# }
tune_svm_model <- function(train_set, svm_formula) {
tune(
method = svm,
train.x = svm_formula,
ranges = list(
#cost = 2^(0:14),
gamma = seq(0, 1, by = 0.01)),
kernel = "radial",
data = train_set)
}
#' Wrapper around e1071::svm
#'
#' @param train_set
#' @param svm_tune_model an object of class "tune"
#' @param svm_formula
#'
#' @return An object of class "svm"
#' @export
#' @importFrom e1071 svm
#'
#' @examples
# perform_svm_on_train_set <- function(train_set, svm_tune_model, svm_formula) {
# svm(
# formula = svm_formula,
# cost = svm_tune_model$best.parameters$cost,
# gamma = svm_tune_model$best.parameters$gamma,
# data = train_set)
# }
#' Wrapper around e1071::predict.svm
#'
#' @param test_set
#' @param svm_model
#' @param response_var_name a character vector of the response variable of the svm_model
#'
#' @return A vector of predicted values
#' @export
#' @importFrom e1071 predict.svm
#'
#' @examples
predict_svm_on_test_set <- function(test_set, svm_model, response_var_name) {
predict(
svm_model$best.model,
test_set[, colnames(test_set) != response_var_name])
}
#' Cross tabulation between the 2 sets of prediction and real factors
#'
#' It's a wrapper of base::table
#' @param prediction_set
#' @param real_set
#' @param response_var_name
#'
#' @return
#' @export
#'
#' @examples
get_confusion_matrix <- function(prediction_set, real_set, response_var_name) {
true_set <- unlist(real_set[, response_var_name])
table(
pred = prediction_set,
true = true_set)
}
#' Get accuracy of a classification model from its confusion matrix
#'
#' Accuracy is defined as the rate of matching row & columns over the total count of elements
#'
#' @param confusion_matrix
#'
#' @return
#' @export
#'
#' @examples
get_model_accuracy <- function(confusion_matrix) {
sum(diag(confusion_matrix) / sum(confusion_matrix))
}
get_model_accuracy.randomForest <- function(model) {
stopifnot("randomForest" %in% class(model))
get_model_accuracy(model$confusion)
}
get_model_accuracy.table <- function(model) {
stopifnot("table" %in% class(model))
get_model_accuracy(as.matrix(model))
}
#' Optimal tree size for a classification random forest
#'
#' @param m_formula
#' @param train_set
#'
#' @return
#' @export
#' @importFrom randomForest randomForest
#'
#' @examples
tune_rf_model <- function(train_set, ml_formula) {
foo <- randomForest::randomForest(
formula = ml_formula,
data = train_set)
which.min(foo$err.rate[,1])
}
#' Title
#'
#' @param test_set
#' @param tree_size
#' @param ml_formula
#'
#' @return
#' @export
#' @importFrom randomForest randomForest
#'
#' @examples
perform_rf_on_test_set <- function(test_set, tree_size, ml_formula) {
randomForest(
formula = ml_l$ml_formula,
data = ml_l$train_set_l$EVI, ntree = tree_size)
}
|
c82d3ad50c1a6250d0c63c5f53adabddc63b82aa | af5fdc8473585759439e661c55a83628f1a87359 | /man/hSearch.Rd | dba132732ab0bee67db693bcc7795c65cb768dca | [] | no_license | ljzhao007/mnspc | f3790be940d0d15c49565eea1eb9c1f62726cc6a | 25313d150768afcb692bbf0d0eda20ceef8dce6d | refs/heads/master | 2021-03-28T18:27:59.900273 | 2011-02-21T00:00:00 | 2011-02-21T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,293 | rd | hSearch.Rd | \name{hSearch}
\alias{hSearch}
\title{Searching for \eqn{h} when \eqn{k} is given.}
\description{
Given a value of \eqn{k}, this procedure searches for the decision interval \eqn{h} to achieve
a pre-specified in-control average run length (ARL).
}
\usage{
hSearch(f0, arl0, k, Lh, Uh, eh1=0.5, eh2=0.001, istart=0, iter=10000, print.hHistory=TRUE)
}
\arguments{
\item{f0}{The estimated in-control distribution of the categorized response of the process.}
\item{arl0}{The desired in-control ARL.}
\item{k}{The allowance constant.}
\item{Lh}{Lower bound of the search interval. See details.}
\item{Uh}{Upper bound of the search interval. See details.}
\item{eh1}{Stopping threshold 1. See details.}
\item{eh2}{Stopping threshold 2. See details.}
\item{istart}{Steady state start value.}
\item{iter}{Number of replications used to compute ARLs throughout the procedure.}
\item{print.hHistory}{If \code{TRUE}, the search history will be printed (recommended.)}
}
\details{
If there is no prior information about the search interval boundaries \code{Lh} and \code{Uh}, they
can be omitted. The procedure will first select appropriate decision interval bounds and then search for \eqn{h}. Choosing a very large \code{Uh} may slow down the algorithm substantially and choosing a \code{Uh} that is too small may result in the true \eqn{h} being excluded from the search interval.
\code{istart=50} is a good approximation to the steady state start and will not
substantially slow down the procedure. Setting \code{istart} to more than 100 is generally not recommended.
The stopping rules \code{eh1} and \code{eh2} are explained on pgs. 668-669 of Qiu (2008). \code{iter} should
be chosen based on desired precision and speed, although speed is generally
not an issue here.
This functions calls \code{getARL}.
}
\value{
A list containing the following:
\item{h}{The searched value of \eqn{h}}
\item{arl0.actual}{The actual in-control ARL.}
}
\examples{
ic.dist<-rep(0.125,8)
hSearch(f0=ic.dist, arl0=200, k=0.003)
hSearch(f0=ic.dist, arl0=200, k=0.967, eh1=0.2, Lh=8, Uh=12, istart=100, iter=5000)
}
\references{
Qiu, P. (2008) Distribution-free multivariate process control based on log-linear modeling. \emph{IIE
Transactions}, 40, 664-677.
}
\keyword{decision}
|
6af9eff8d7f752236163ec8b0a0b20a74eb49299 | 360df3c6d013b7a9423b65d1fac0172bbbcf73ca | /FDA_Pesticide_Glossary/1,2,4-triazole-3-car.R | 165ae26840718f95437c711fd6cc85862832201a | [
"MIT"
] | permissive | andrewdefries/andrewdefries.github.io | 026aad7bd35d29d60d9746039dd7a516ad6c215f | d84f2c21f06c40b7ec49512a4fb13b4246f92209 | refs/heads/master | 2016-09-06T01:44:48.290950 | 2015-05-01T17:19:42 | 2015-05-01T17:19:42 | 17,783,203 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 276 | r | 1,2,4-triazole-3-car.R | library("knitr")
library("rgl")
#knit("1,2,4-triazole-3-car.Rmd")
#markdownToHTML('1,2,4-triazole-3-car.md', '1,2,4-triazole-3-car.html', options=c("use_xhml"))
#system("pandoc -s 1,2,4-triazole-3-car.html -o 1,2,4-triazole-3-car.pdf")
knit2html('1,2,4-triazole-3-car.Rmd')
|
a542b1658526610c92e311ba451d32d3d64b13de | 6336fdd99e037619e88b610934dfcca54e063c0f | /man/dmrs.ex.Rd | b2b8955e4e300d5f0dc7102c0e1ca17fbaa94943 | [
"MIT"
] | permissive | kdkorthauer/dmrseq | 62012f3dfd88e1f3f8acf9b3063198ac0e2531fe | e64d8669f04f0f653ec61d46f8f7071ce9e329ca | refs/heads/master | 2023-08-21T16:04:47.550295 | 2021-11-16T00:56:46 | 2021-11-16T00:56:46 | 97,278,089 | 56 | 17 | MIT | 2023-08-04T16:01:36 | 2017-07-14T22:48:24 | R | UTF-8 | R | false | true | 1,413 | rd | dmrs.ex.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\docType{data}
\name{dmrs.ex}
\alias{dmrs.ex}
\title{dmrs.ex: Example results of DMRs}
\format{a data.frame that contains the results of the inference. The
data.frame contains one row for each candidate region, and
10 columns, in the following order: 1. chr =
chromosome, 2. start =
start basepair position of the region, 3. end = end basepair position
of the region,
4. indexStart = the index of the region's first CpG,
5. indexEnd = the index of the region's last CpG,
6. L = the number of CpGs contained in the region,
7. area = the sum of the smoothed beta values
8. beta = the coefficient value for the condition difference,
9. stat = the test statistic for the condition difference,
10. pval = the permutation p-value for the significance of the test
statistic, and
11. qval = the q-value for the test statistic (adjustment
for multiple comparisons to control false discovery rate).}
\source{
Obtained from running the examples in \code{\link{dmrseq}}.
A script which executes these steps
and constructs the \code{dmrs.ex}
object may be found in \file{inst/scripts/get_dmrs.ex.R}
}
\usage{
data(dmrs.ex)
}
\description{
Example output from \code{dmrseq} function run on the
example dataset \code{BS.chr21}.
}
\examples{
data(dmrs.ex)
}
\keyword{datasets}
|
57bcb5dc9efa382f12642e3f07e659bd22938aec | 280578d8d46d88881ad69c8126034a06e08090a8 | /data_table_dplyr.R | e7f2cf8e4debe71e5eeec4fa0213af096a2e3188 | [
"MIT"
] | permissive | kamilsieklucki/usecases | 27aee0ad4e80607800fd4b160d2c1cd1ef9245e8 | 46b8082c7fc26c52afcc3e2e31bbe3e7dc51d864 | refs/heads/master | 2021-07-09T08:03:27.550685 | 2020-07-30T11:34:16 | 2020-07-30T11:34:16 | 167,319,679 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,718 | r | data_table_dplyr.R | #### https://atrebas.github.io/post/2019-03-03-datatable-dplyr/ ####
library(dplyr)
library(data.table)
df <- mtcars
dt <- as.data.table(mtcars)
# Slice ----
df %>% slice(5:7)
dt[5:7]
dt[5:7, ]
df %>% slice(-(1:2))
dt[!1:2]
dt[-(1:2), ]
# Filter ----
df %>% filter(mpg > 20, cyl == 4)
dt[mpg > 20 & cyl == 4]
# Distinct ----
df %>% distinct()
df %>% distinct_at(vars(cyl, gear))
unique(dt)
unique(dt, by = c("cyl", "gear")) # zostają wszystkie kolumny więc to nie jest efekt, który był zamierzony
unique(dt[, .(cyl, gear)])
# Sort ----
df %>% arrange(desc(mpg), disp)
dt[order(-mpg, disp)]
# Select ----
df %>% select(mpg, cyl)
df %>% select(-(mpg:cyl))
dt[, .(mpg, cyl)]
dt[, c("mpg", "cyl")]
dt[, !c("mpg", "cyl")]
dt[, -c("mpg", "cyl")]
# usuwanie zmiennej całkowicie ze zbioru danych
dt[, c("mpg", "cyl") := NULL]
x <- c("mpg", "cyl")
df %>% select(!!x)
df %>% select(-!!x)
dt[, ..x]
dt[, !..x]
dt[, -..x]
# usuwanie zmiennej całkowicie ze zbioru danych
dt[, (x) := NULL]
# Summarise ----
df %>% summarise(suma_mpg = sum(mpg), srednia_mpg = mean(mpg))
dt[, .(suma_mpg = sum(mpg), srednia_mpg = mean(mpg))]
# Mutate ----
# DT - modyfikuje w miejscu nie trzeba przypisywać do nowej zmiennej, aby otrzymać rezultat
df2 <- df %>% mutate(cyl_2 = cyl ^ 2, sqrt_disp = sqrt(disp))
df2
dt[, ':='(cyl_2 = cyl^2, sqrt_disp = sqrt(disp))]
dt[, c("cyl_2", "sqrt_disp") := .(cyl^2, sqrt(disp))]
dt
# Transmute ----
df %>% transmute(cyl_2 = cyl ^ 2)
dt[, .(cyl_2 = cyl ^ 2)]
# Change values in column ----
df %>% mutate(cyl = replace(cyl, cyl == 4, 0L))
dt[cyl == 4, cyl := 0L]
# group by ----
df %>% group_by(cyl, gear) %>% summarise(suma = sum(disp))
dt[, .(suma = sum(disp)), keyby = .(cyl, gear)] # lub by -> The sole difference between by and keyby is that keyby orders the results and creates a key that will allow faster subsetting
# count ----
df %>% count(cyl)
dt[, .N, keyby = cyl]
# add_count ----
df %>% add_count(cyl)
dt[, n := .N, by = cyl]
# To further manipulate columns, dplyr includes nine functions: the _all, _at, and _if versions of summarise(), mutate(), and transmute().
# With data.table, we use .SD, which is a data.table containing the Subset of Data for each group, excluding the column(s) used in by. So, DT[, .SD] is DT itself and in the expression DT[, .SD, by = V4], .SD contains all the DT columns (except V4) for each values in V4 (see DT[, print(.SD), by = V4]). .SDcols allows to select the columns included in .SD.
# summarise_all ----
df %>% summarise_all(max)
dt[, lapply(.SD, max)]
# summarise_at ----
df %>% group_by(am) %>% summarise_at(vars(cyl, gear), list(min, max))
dt[, c(lapply(.SD, min), lapply(.SD, max)), .SDcols = c("cyl", "gear"), keyby = am]
# summarise_if ----
summarise_if(df, nchar(names(df)) == 2, mean)
cols <- names(dt)[nchar(names(dt)) == 2]
dt[, lapply(.SD, mean),
.SDcols = cols]
# mutate_all ----
mutate_all(df, as.integer)
dt[, lapply(.SD, as.integer)]
# analogicznie reszta przypadków
# arrange ----
df <- df %>% arrange(cyl)
setkey(dt, cyl)
setindex(dt, cyl)
setorder(dt, mpg)
# In data.table, set*() functions modify objects by reference, making these operations fast and memory-efficient. In case this is not a desired behaviour, users can use copy(). The corresponding expressions in dplyr will be less memory-efficient.
# set(*) ----
# rename ----
setnames(dt, old = "mpg", new = "test")
setnames(dt, old = "test", new = "mpg")
# reorder ----
setcolorder(dt, c("carb", "wt", "disp"))
# Advanced
# Get row number of first (and last) observation by group ----
dt[, .I[c(1, .N)], by = cyl]
# suma + poziom wyżej ----
dt[,
.(SumV2 = sum(mpg)),
keyby = c("cyl", "gear")]
rollup(dt,
.(SumV2 = sum(mpg)),
by = c("cyl", "gear"))
# Read and rbind several files ----
rbindlist(lapply(c("DT.csv", "DT.csv"), fread))
# JOINS ----
x <- data.table(Id = c("A", "B", "C", "C"),
X1 = c(1L, 3L, 5L, 7L),
XY = c("x2", "x4", "x6", "x8"),
key = "Id")
y <- data.table(Id = c("A", "B", "B", "D"),
Y1 = c(1L, 3L, 5L, 7L),
XY = c("y1", "y3", "y5", "y7"),
key = "Id")
# left join x <- y
y[x, on = "Id"]
left_join(x, y, by = "Id")
# right join x -> y
x[y, on = "Id"]
right_join(x, y, by = "Id")
# inner join
x[y, on = "Id", nomatch = 0]
inner_join(x, y, by = "Id")
# full join
merge(x, y, all = TRUE, by = "Id")
full_join(x, y, by = "Id")
# semi_join
unique(x[y$Id, on = "Id", nomatch = 0])
semi_join(x, y, by = "Id")
# anti_join
x[!y, on = "Id"]
anti_join(x, y, by = "Id")
# Non-equi joins ----
z <- data.table(ID = "C", Z1 = 5:9, Z2 = paste0("z", 5:9))
x[z, on = .(Id == ID, X1 <= Z1)]
|
a3ec8bb46cbacdb0561ecab6d44ed7523895d57c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/zendeskR/examples/getAllOrganizations.Rd.R | b6506acbd5358a219625951c55898e2e40059d72 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 292 | r | getAllOrganizations.Rd.R | library(zendeskR)
### Name: getAllOrganizations
### Title: getAllOrganizations
### Aliases: getAllOrganizations
### Keywords: getAllOrganizations
### ** Examples
## Not run:
##D ## This requires Zendesk authentication
##D organizations <- getAllOrganizations()
## End(Not run)
|
1a6738f9cce4e9081fe2faac95b7ef3607841824 | da41dfabbd285a1a1400d59cbdb7b923bc6d5a35 | /algorithm.R | e49809875a09e7041cc1ab43ece6fabecb7d709d | [] | no_license | ferwiner2/FactoryLocation | 4e56f2b576b5c22f3341dfcbed2c308d3d4fb5d4 | 00b9b5cacc193a0b504d72587c3a175c3c0decff | refs/heads/master | 2021-01-17T23:45:09.582802 | 2016-05-21T01:27:10 | 2016-05-21T01:27:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,268 | r | algorithm.R | install.packages("GA")
library("GA")
myData <- read.csv(file = "sources.csv")
# fitness function
euc.dist <- function(x1, x2) sqrt(sum((x1 - x2) ^ 2))
processRow <- function(x, y, row) euc.dist(c(row[1], row[2]), c(x,y)) * row[3]
fitnessFunction1 <- function(x) sum( apply(myData, 1, function(row) processRow(x[1], x[2], row)))
myClip <- function(x, a, b) {
ifelse(x <= a, a, ifelse(x >= b, b, x))
}
# this is gareal_raMutation changed to implement
# gaussian mutation from https://en.wikipedia.org/wiki/Mutation_(genetic_algorithm)
gareal_gaussianMutation <- function(object, parent, ...) {
mutate <- parent <- as.vector(object@population[parent,])
n <- length(parent)
j <- sample(1:n, size = 1)
mutate[j] <- myClip(mutate[j] + rnorm(1), object@min[j], object@max[j])
return(mutate)
}
solutions <- c()
GA <- ga(type="real-valued",
fitness = function(x) -fitnessFunction1(x),
seed = 0,
min = c(0,0),
max = c(10, 10),
popSize = 20,
maxiter = 100,
run = 20,
selection = function(x) gareal_tourSelection(x, k = 2),
pcrossover = 0,
mutation = gareal_gaussianMutation,
monitor = function(object) solutions <<- append(solutions, object@population))
|
1285888265953c3069a99dd0f454aaac0ac67937 | 3b497149a45c9609654d5cbd1da982cbeda530e0 | /R/plateaddr.R | bdebbc7ff803e3224d28a8fc5b77683c4ea96c40 | [] | no_license | aushev/vautils | f5800a3788bc36b1722884bf7379dcb46327136d | ce770312ecada45305a4e9713a12b8275f1ae12c | refs/heads/master | 2023-08-22T10:30:15.931140 | 2023-08-21T14:36:55 | 2023-08-21T14:36:55 | 78,473,057 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,538 | r | plateaddr.R | # welltrue('A1a1') = 1
# welltrue('D12h8') = 3072 = 4*12*8*8
welltrue <- function(wellName){
bigRow <- gsub('([a-zA-Z])+.*','\\1',wellName);
wellName <- chopLeft(wellName,nchar(bigRow));
bigCol <- gsub('(\\d+).*','\\1',wellName);
wellName <- chopLeft(wellName,nchar(bigCol));
smlRow <- gsub('([a-zA-Z]+).*','\\1',wellName);
wellName <- chopLeft(wellName,nchar(smlRow));
smlCol <- wellName;
bigRowN <- match(tolower(bigRow),letters) - 1L;
bigColN <- as.numeric(bigCol) - 1L;
smlRowN <- match(tolower(smlRow),letters) - 1L;
smlColN <- as.numeric(smlCol);
return(bigRowN*12*8*8+bigColN*8*8+smlRowN*8+smlColN);
}
wellNum2Let <- function(wellNum, base=1L){ # former oaWellNumToAddr() but +1!!!
wellNum <- wellNum - base;
BigRowN <- wellNum %/% (12*8*8); # 768
BigRow <- LETTERS[BigRowN+1L]
remainder <- wellNum %% 768; # wellNum - BigRowN*768; #
BigColN <- remainder %/% (8*8);
BigCol <- BigColN + 1L;
remainder <- remainder %% 64
SmlRowN <- remainder %/% 8;
SmlRow <- letters[SmlRowN+1L]
SmlCol <- remainder - SmlRowN*8 + 1L;
return(paste0(BigRow,BigCol,SmlRow,SmlCol));
}
oaWellNumToAddr <- function(wellNum){ #
BigRowN <- wellNum %/% (12*8*8); # 768
BigRow <- LETTERS[BigRowN+1L]
remainder <- wellNum %% 768; # wellNum - BigRowN*768; #
BigColN <- remainder %/% (8*8);
BigCol <- BigColN + 1L;
remainder <- remainder %% 64
SmlRowN <- remainder %/% 8;
SmlRow <- letters[SmlRowN+1L]
SmlCol <- remainder - SmlRowN*8 + 1L;
return(paste0(BigRow,BigCol,SmlRow,SmlCol));
} |
b1ab0327bb5029af41193f857315b58c5de21a2c | 59fc6c331e0a4aa604860b899cba2a9331311439 | /man/dsumlogchisq.Rd | 64591874849579ea010ad600e4dde652294fd669 | [] | no_license | cran/sadists | 222c55c3cbb6fc42d049d63a76d1d18449bd1df1 | 7941938a1576fa5cc308c2e5b7d6a0236f03d663 | refs/heads/master | 2023-09-01T10:47:45.370954 | 2023-08-21T18:30:02 | 2023-08-21T19:31:01 | 31,738,358 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,535 | rd | dsumlogchisq.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sumlogchisq.r
\name{sumlogchisq}
\alias{sumlogchisq}
\alias{dsumlogchisq}
\alias{psumlogchisq}
\alias{qsumlogchisq}
\alias{rsumlogchisq}
\title{The sum of the logs of (non-central) chi-squares distribution.}
\usage{
dsumlogchisq(x, wts, df, ncp=0, log = FALSE, order.max=6)
psumlogchisq(q, wts, df, ncp=0, lower.tail = TRUE, log.p = FALSE, order.max=6)
qsumlogchisq(p, wts, df, ncp=0, lower.tail = TRUE, log.p = FALSE, order.max=6)
rsumlogchisq(n, wts, df, ncp=0)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{wts}{the vector of weights.
This is recycled against the \code{df, ncp}, but not against the \code{x,q,p,n}.}
\item{df}{the vector of degrees of freedom.
This is recycled against the \code{wts, ncp}, but not against the \code{x,q,p,n}.}
\item{ncp}{the vector of non-centrality parameters.
This is recycled against the \code{wts, df}, but not against the \code{x,q,p,n}.}
\item{log}{logical; if TRUE, densities \eqn{f} are given
as \eqn{\mbox{log}(f)}{log(f)}.}
\item{order.max}{the order to use in the approximate density,
distribution, and quantile computations, via the Gram-Charlier,
Edeworth, or Cornish-Fisher expansion.}
\item{p}{vector of probabilities.}
\item{n}{number of observations.}
\item{log.p}{logical; if TRUE, probabilities p are given
as \eqn{\mbox{log}(p)}{log(p)}.}
\item{lower.tail}{logical; if TRUE (default), probabilities are
\eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}{P[X > x]}.}
}
\value{
\code{dsumlogchisq} gives the density, \code{psumlogchisq} gives the
distribution function, \code{qsumlogchisq} gives the quantile function,
and \code{rsumlogchisq} generates random deviates.
Invalid arguments will result in return value \code{NaN} with a warning.
}
\description{
Density, distribution function, quantile function and random
generation for the distribution of the weighted sum of logs of
non-central chi-squares.
}
\details{
Let \eqn{X_i \sim \chi^2\left(\delta_i, \nu_i\right)}{X_i ~ chi^2(delta_i, v_i)}
be independently distributed non-central chi-squares, where \eqn{\nu_i}{v_i}
are the degrees of freedom, and \eqn{\delta_i}{delta_i} are the
non-centrality parameters.
Let \eqn{w_i} be given constants. Suppose
\deqn{Y = \sum_i w_i \log X_i.}{Y = sum w_i log(X_i).}
Then \eqn{Y}{Y} follows a weighted sum of log of chi-squares distribution.
}
\note{
The PDF, CDF, and quantile function are approximated, via
the Edgeworth or Cornish Fisher approximations, which may
not be terribly accurate in the tails of the distribution.
You are warned.
The distribution parameters are \emph{not} recycled
with respect to the \code{x, p, q} or \code{n} parameters,
for, respectively, the density, distribution, quantile
and generation functions. This is for simplicity of
implementation and performance. It is, however, in contrast
to the usual R idiom for dpqr functions.
}
\examples{
wts <- c(1,-3,4)
df <- c(100,20,10)
ncp <- c(5,3,1)
rvs <- rsumlogchisq(128, wts, df, ncp)
dvs <- dsumlogchisq(rvs, wts, df, ncp)
qvs <- psumlogchisq(rvs, wts, df, ncp)
pvs <- qsumlogchisq(ppoints(length(rvs)), wts, df, ncp)
}
\references{
Pav, Steven. Moments of the log non-central chi-square distribution.
\url{https://arxiv.org/abs/1503.06266}
}
\seealso{
The product of chi-squares to a power,
\code{\link{dprodchisqpow}},
\code{\link{pprodchisqpow}},
\code{\link{qprodchisqpow}},
\code{\link{rprodchisqpow}}.
}
\author{
Steven E. Pav \email{shabbychef@gmail.com}
}
\keyword{distribution}
|
587c9826471bc45db329ccc3afa3b5eb15b11bbe | 9ea3df2940ea1796776f00c333a5d8597cf52263 | /main.R | 93f3ccfb805d7bcc2d92cd453e753ac0c337168f | [] | no_license | enw860/CSC458_Project | 5b168a6c0c96092fde5f479e9c35a351b27e4248 | 4a59647674b4d91657735a4ead1d244faf161019 | refs/heads/master | 2020-04-05T20:13:34.796370 | 2019-01-08T16:17:20 | 2019-01-08T16:17:20 | 157,170,233 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,480 | r | main.R | library(grid)
library(gridExtra)
library(ggplot2)
library(varhandle)
library(rlist)
################## begin default settings #####################
# default setting (set your own absoulte path)
root_dir <- system("pwd", intern = TRUE)
data_dir <- paste(root_dir, "data/", sep="/")
img_dir <- paste(root_dir, "plots/", sep="/")
# loading sources
source(paste(root_dir, "analysis_perflow/analysis_perflow.R", sep="/"))
source(paste(root_dir, "analysis_perpack/analysis_perpack.R", sep="/"))
source(paste(root_dir, "analysis_RTT/analysis_RTT.R", sep="/"))
source(paste(root_dir, "util/util.R", sep="/"))
################## end default settings #####################
######################### start control box ############
run_perpacket <- bool_switch(1)
run_perflow <- bool_switch(1)
run_rtt <- bool_switch(1)
plot_scatter <- bool_switch(0)
plot_ecdf <- bool_switch(0)
plot_cdf <- bool_switch(1)
######################### end control box ##############
################# start perpacket analysis ##########################
if(run_perpacket){
# Load data
print("Loading packets detail")
all_packets <- read.table(set_filePath(data_dir, "result.tsv"), sep = "\t" , header = TRUE, stringsAsFactors = FALSE)
# cast header length into number
all_packets$ip.hdr_len <- suppressWarnings(as.integer(all_packets$ip.hdr_len))
all_packets$ip.len <- suppressWarnings(as.integer(all_packets$ip.len))
# calculate UDP header length
udp_header_length <- rep(1, nrow(all_packets))
udp_header_length[is.na(all_packets$udp.length)] <- NA
udp_header_dstport <- rep(1, nrow(all_packets))
udp_header_dstport[is.na(all_packets$udp.dstport)] <- NA
udp_header_srcport <- rep(1, nrow(all_packets))
udp_header_srcport[is.na(all_packets$udp.srcport)] <- NA
udp_header_checksum <- rep(1, nrow(all_packets))
udp_header_checksum[is.na(all_packets$udp.checksum)] <- NA
all_packets$udp.hdr_size <- 2 * (udp_header_length + udp_header_dstport + udp_header_srcport + udp_header_checksum)
ip_filter <- !is.na(all_packets$ip.len) | (all_packets$ipv6.addr!="")
# parsing data into groups
IP_packets <- get_IP_packets(all_packets, ip_filter)
non_IP_packets <- get_non_IP_packets(all_packets, ip_filter)
# plot per-packet analysis bullet point 1 | GENERATE: ./plots/packets_precentage_statistics.png
print("Start ploting overall table statistics > GENERATE: ./plots/packets_precentage_statistics.png")
plot_precentage_tables(all_packets, img_dir)
# plot per-packet analysis bullet point 2 | GENERATE: ./plots/packlen_total_cdf.png & ./plots/packlen_cdfs.png
print("Start ploting package length cdf > GENERATE: ./plots/packlen_total_cdf.png & ./plots/packlen_cdfs.png")
plot_total_packlen_cdf(all_packets, IP_packets, non_IP_packets, img_dir)
# plot per-packet analysis bullet point 2 | GENERATE: ./plots/header_length_cdfs.png
print("Start ploting package header length cdf > GENERATE: ./plots/header_length_cdfs.png")
plot_total_headerlen_cdf(IP_packets, img_dir)
}
################# end perpacket analysis ##########################
################# start perflow analysis ##########################
if(run_perflow){
# loading data (~ 3 mins)
print("Loading all tcp packets data")
tcps <- read.table(set_filePath(data_dir, "tcp_result.tsv"), header=TRUE, sep="\t")
tcps <- tcps[!is.na(tcps$tcp.hdr_len), c(1:ncol(tcps))]
tcps$ip.hdr_len <- suppressWarnings(as.numeric(unfactor(tcps$ip.hdr_len)))
print("Loading all udp packets data")
udps <- read.table(set_filePath(data_dir, "udp_result.tsv"), header=TRUE, sep="\t")
udps <- udps[!is.na(udps$udp.length), c(1:ncol(udps))]
udps$ip.hdr_len <- suppressWarnings(as.numeric(unfactor(udps$ip.hdr_len)))
# convert tcp time
tcps$frame.time <- convert_time(tcps$frame.time)
tcps$frame.time <- tcps$frame.time - tcps$frame.time[1]
# convert udp time
udps$frame.time <- convert_time(udps$frame.time)
udps$frame.time <- udps$frame.time - udps$frame.time[1]
# analysis data (~ 8 mins)
print("Analysis TCP flows (~ 5 mins)")
overall_tcp_flows <- analysis_overall_tcp_flows(tcps)
print("Analysis UDP flows (~ 3 mins)")
overall_udp_flows <- analysis_overall_udp_flows(udps)
# per flow bullet point 1 | GENERATE: ./plots/flows_statistics.png
print("Start ploting TCP and UDP flows summary table > GENERATE: ./plots/flows_statistics.png")
flows_statistics(img_dir ,overall_tcp_flows, overall_udp_flows)
# per flow bullet point 2 | GENERATE: ./plots/durations_cdfs.png
print("Start ploting TCP and UDP duration cdf > GENERATE: ./plots/durations_cdfs.png")
duration_cdfs(img_dir ,overall_tcp_flows, overall_udp_flows)
# per flow bullet point 3 | GENERATE: ./plots/byteSum_cdfs.png
print("Start ploting TCP and UDP byte sum cdf > GENERATE: ./plots/byteSum_cdfs.png")
byteSum_cdfs(img_dir, overall_tcp_flows, overall_udp_flows)
# per flow bullet point 3 | GENERATE: ./plots/packetCount_cdfs.png
print("Start ploting TCP and UDP packet count cdf > GENERATE: ./plots/packetCount_cdfs.png ")
packetCount_cdfs(img_dir ,overall_tcp_flows, overall_udp_flows)
# per flow bullet point 3 | GENERATE: ./plots/overhead_cdfs.png
print("Start ploting TCP overhead rate cdf > GENERATE: ./plots/overhead_cdfs.png")
overhead_cdfs(img_dir, overall_tcp_flows)
# per flow bullet point 4 | GENERATE: ./plots/inter_arriving.png
print("Start ploting TCP inter arriving time cdf > GENERATE: ./plots/inter_arriving.png")
inter_packet_arrival_cdfs(img_dir ,overall_tcp_flows, overall_udp_flows)
# per flow bullet point 5 | GENERATE: ./plots/TCP_exitState_statistics.png
print("Start ploting TCP flows exit state table > GENERATE: ./plots/TCP_exitState_statistics.png")
exit_state_statistics(img_dir, overall_tcp_flows)
}
################# end perflow analysis ##########################
################# start RTT analysis ##########################
if(run_rtt){
# get top three TCP connection with largest package count
three_longest_packnum_stream <- head(overall_tcp_flows[order(overall_tcp_flows$packets, decreasing = TRUE), 1], 3)
print(paste(c("Top three TCP connection with largest package count: ", three_longest_packnum_stream), collapse=" "))
# get top three TCP connection with largest Bytes sum
three_largest_bytesum_stream <- head(overall_tcp_flows[order(overall_tcp_flows$total_bytes, decreasing = TRUE), 1], 3)
print(paste(c("Top three TCP connection with largest Bytes sum: ", three_largest_bytesum_stream), collapse=" "))
# get top three TCP connection with longest duration
three_largest_duration_stream <- head(overall_tcp_flows[order(overall_tcp_flows$duration, decreasing = TRUE), 1], 3)
print(paste(c("Top three TCP connection with longest duration: ", three_largest_duration_stream), collapse=" "))
# analysis and draw Sample RTT and estimate RTT
analysis_streams <- sort(unique(c(three_longest_packnum_stream, three_largest_bytesum_stream, three_largest_duration_stream)))
# plot analysis for all streams in analysis streams
print(paste(c("So we need analysis TCP connection: ", analysis_streams), collapse=" "))
analysis_all_stream(img_dir, tcps, analysis_streams)
# plot analysis for median streams for top three TCP connections
print("Analysis 3 hosts TCP connection over time")
analysis_median_host_stream(img_dir, tcps, overall_tcp_flows, 3)
}
################# end RTT analysis ########################## |
b6a9e142732a56074b4b6f6bf96ddd3b1aee6a4c | 6819738573656579a81afe8b292177d5dd81bdbf | /MovieLens_Project.R | 9f80ea733828efcb60a120d387fb75a98bbdb819 | [] | no_license | rishabhdodeja/MovieLens_Linear | 0f73ff46ddee5ea19a99170ad1f3df83bea270f6 | 62ef5a16a559c53d134c62aa566ef0c1e7959443 | refs/heads/master | 2022-11-24T14:37:09.009032 | 2020-07-22T11:45:52 | 2020-07-22T11:45:52 | 281,660,577 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,258 | r | MovieLens_Project.R | # ---
# title: "MovieLens"
# author: "Rishabh Singh Dodeja"
# date: "July 17, 2020"
# ---
# This R Script will perform the following task:
# 1. Download Movie Lens 10M dataset and required libraries (if needed)
# 2. Split the data set into edx(90%) and validation(10%)
# 3. Filtering and cleaning the data into final test_set and train_set from validation and edx respectively
# 4. Build a Linear Model with predictors : movie mean rating, user effect, genre, and release year
# 5. Run Simulations to regularize bias termplot
# 6. Calculate RMSE for final Regularized Linear Model
# 7. Generate csv file containing entries from validation dataset with following columns:
# "userId", "movieId", "rating", "predicted ratings"
################################
# Create edx set, validation set
################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(ratings, movies, test_index, temp, movielens, removed)
################################
# Data Filtering and cleaning
################################
# Note: this process removes features/colums that are not required by our Predictor Model
# we will only be taking userId, genre, and release year into account.
# This selection is based on insights gained from data exploration, explained in Rmd and report.
# We will make train set from edx set, our model will be trained on this set
train_set <- edx %>% select(userId, movieId, rating, title,genres) %>% mutate(year = as.numeric(str_sub(title,-5,-2)))
# We will make test_set for validation set, our model will be testes/validated on this set
test_set <- validation %>% select(userId, movieId, rating, title,genres) %>% mutate(year = as.numeric(str_sub(title,-5,-2)))
# remove rest of the objects/datasets to free up disk space
rm(edx,validation)
################################
# Model Evaluation
################################
# Define Mean Absolute Error (MAE)
MAE <- function(true_ratings, predicted_ratings){
mean(abs(true_ratings - predicted_ratings))
}
# Define Mean Squared Error (MSE)
MSE <- function(true_ratings, predicted_ratings){
mean((true_ratings - predicted_ratings)^2)
}
# Define Root Mean Squared Error (RMSE)
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
# Create Results Table
result <- tibble(Method = "Project Goal", RMSE = 0.8649, MSE = NA, MAE = NA);
################################
# Modelling and Results
################################
#LINEAR MODEL#
#Calculating Movie Mean Ratings
print("evaluating Movie-Mean-Model...")
#calculating movie_mean from train_set
movie_mean = train_set %>% group_by(movieId) %>% summarise(mu=mean(rating))
#Join Predicted values with test_set
y_hat_mu = test_set %>% left_join(movie_mean,by="movieId") %>% .$mu
# Update the Results table
result <- bind_rows(result,
tibble(Method = "Movie Mean",
RMSE = RMSE(test_set$rating, y_hat_mu),
MSE = MSE(test_set$rating, y_hat_mu),
MAE = MAE(test_set$rating, y_hat_mu)));
print(result)
#Calculating User Effect Bias terms
print("Calculating User-Effect and evaluating new model...")
user_effect = train_set %>% left_join(movie_mean,by="movieId") %>% group_by(userId) %>% summarise(bu = mean(rating-mu));
# Join predicted values with test_set
y_hat_bu = test_set %>% left_join(movie_mean, by="movieId") %>% left_join(user_effect,by="userId") %>% mutate(pred=mu+bu) %>% .$pred;
result <- bind_rows(result,
tibble(Method = "Movie Mean + bu",
RMSE = RMSE(test_set$rating, y_hat_bu),
MSE = MSE(test_set$rating, y_hat_bu),
MAE = MAE(test_set$rating, y_hat_bu)))
# Show the RMSE improvement
print(result)
#Calculating Genre-Effect bias terms
print ("Calculating Genre-effect and including in evaluation...")
#calculating genre effect from train_set
genre_effect = train_set %>% left_join(movie_mean,by="movieId") %>% left_join(user_effect,by="userId") %>% group_by(genres) %>% summarise(bg = mean(rating-mu-bu));
#Joining predicted values with test_set
y_hat_bg = test_set %>% left_join(movie_mean, by="movieId") %>% left_join(user_effect,by="userId") %>% left_join(genre_effect,by="genres") %>% mutate(pred=mu+bu +bg) %>% .$pred;
result <- bind_rows(result,
tibble(Method = "Movie Mean + bu + bg",
RMSE = RMSE(test_set$rating, y_hat_bg),
MSE = MSE(test_set$rating, y_hat_bg),
MAE = MAE(test_set$rating, y_hat_bg)))
# Show the RMSE improvement
print(result)
#Calculating Release Year Effect bias terms
print("calclating Release-Year Effect and evaluating...");
#calculating year effect from train_set
year_effect = train_set %>% left_join(movie_mean,by="movieId") %>% left_join(user_effect,by="userId") %>%left_join(genre_effect,by="genres") %>% group_by(year) %>% summarise(by = mean(rating-mu-bu-bg));
# Join predictions with test_set
y_hat_by = test_set %>% left_join(movie_mean, by="movieId") %>% left_join(user_effect,by="userId") %>% left_join(genre_effect,by="genres") %>% left_join(year_effect,be="year")%>% mutate(pred=mu+bu +bg+by) %>% .$pred;
#update results table
result = bind_rows(result,
tibble(Method = "Movie Mean + bu + bg + by",
RMSE = RMSE(test_set$rating, y_hat_by),
MSE = MSE(test_set$rating, y_hat_by),
MAE = MAE(test_set$rating, y_hat_by)));
# Show the RMSE improvement
print(result)
#REGULARIZATION#
#defining regularization fuction
regularization <- function(lambda, trainset, testset){
# Movie Mean
movie_mean = trainset %>% group_by(movieId) %>% summarise(mu=mean(rating));
# User effect (bu)
user_effect = trainset %>% left_join(movie_mean,by="movieId") %>% group_by(userId) %>% summarise(bu = sum(rating-mu)/(n()+lambda));
#Genre effect (bg)
genre_effect = trainset %>% left_join(movie_mean,by="movieId") %>% left_join(user_effect,by="userId") %>%
group_by(genres) %>% summarise(bg = sum(rating-mu-bu)/(n()+lambda));
#Year effect (by)
year_effect = trainset %>% left_join(movie_mean,by="movieId") %>% left_join(user_effect,by="userId") %>%
left_join(genre_effect,by="genres") %>% group_by(year) %>% summarise(by = sum(rating-mu-bu-bg)/(n()+lambda));
# Prediction: mu + bu + bg + by
predicted_ratings = testset %>% left_join(movie_mean, by="movieId") %>% left_join(user_effect,by="userId") %>%
left_join(genre_effect,by="genres") %>% left_join(year_effect,be="year")%>% mutate(pred=mu+bu +bg+by) %>% .$pred;
return(RMSE(testset$rating,predicted_ratings));
}
#Running Regularization Simulation to get optimal value of lambda
print("Now running Regularization to tune lambda...")
# Define a set of lambdas to tune
lambdas = seq(0, 10, 0.25)
# Tune lambda
rmses = sapply(lambdas,
regularization,
trainset = train_set,
testset = test_set)
# Plot the lambda vs RMSE
tibble(Lambda = lambdas, RMSE = rmses) %>%
ggplot(aes(x = Lambda, y = RMSE)) +
geom_point() +
ggtitle("Regularization")
#picking lambda with lowest RMSE
lambda = lambdas[which.min(rmses)];
#Evaluating ofr optimal lambda
print("Regularization done. Evaluating Final Model...")
# Movie Mean
movie_mean = train_set %>% group_by(movieId) %>% summarise(mu=mean(rating));
# User effect (bu)
user_effect = train_set %>% left_join(movie_mean,by="movieId") %>% group_by(userId) %>% summarise(bu = sum(rating-mu)/(n()+lambda));
#Genre effect (bg)
genre_effect = train_set %>% left_join(movie_mean,by="movieId") %>% left_join(user_effect,by="userId") %>%
group_by(genres) %>% summarise(bg = sum(rating-mu-bu)/(n()+lambda));
#Year effect (by)
year_effect = train_set %>% left_join(movie_mean,by="movieId") %>% left_join(user_effect,by="userId") %>%
left_join(genre_effect,by="genres") %>% group_by(year) %>% summarise(by = sum(rating-mu-bu-bg)/(n()+lambda));
# Prediction: mu + bu + bg + by
y_hat_reg = test_set %>% left_join(movie_mean, by="movieId") %>% left_join(user_effect,by="userId") %>%
left_join(genre_effect,by="genres") %>% left_join(year_effect,be="year")%>% mutate(pred=mu+bu +bg+by) %>% .$pred;
# Final Result Results Table
result <- bind_rows(result,
tibble(Method = "Regularized (mu + bu + bg + by)",
RMSE = RMSE(test_set$rating, y_hat_reg),
MSE = MSE(test_set$rating, y_hat_reg),
MAE = MAE(test_set$rating, y_hat_reg)));
# Display Final RMSE table
print(result)
print(paste("RMSE for Final Model is:", RMSE(test_set$rating, y_hat_reg)))
################################
# Save Predictions
################################
predictions = test_set %>% select(movieId,userId,rating) %>% mutate(predictedRatings = y_hat_reg)
write.csv(predictions, "predictions.csv",row.names=FALSE) |
3018adde5f6371f7e1f7e0ebb653f0a4719ebb16 | 6ae01e2f3d6134c2b008cca6a174951414ceaa05 | /cachematrix.R | 0634437a9c4ae01e340d9a468ac3bd182a73aa75 | [] | no_license | markcovello-home/ProgrammingAssignment2 | ae03f877ce10adba2e1e13fbf485eb372ec3f0ee | 2403a8d181755c624d8ed429db39bf238986508b | refs/heads/master | 2020-12-26T02:33:07.934878 | 2015-04-25T05:49:46 | 2015-04-25T05:49:46 | 34,495,827 | 0 | 0 | null | 2015-04-24T03:26:52 | 2015-04-24T03:26:51 | null | UTF-8 | R | false | false | 2,582 | r | cachematrix.R | ## {assignment root)/source/cachematrix.R code to read/write a matrix from cache
## Not sure I get most of this, but what is sure is we must:
## 1. Test to see if the matrix is in the solve cache
## 2. if matrix is not in cache, calculate inverse and cache it
## 3. If the matrix is in cache then just return it
## two functions accomplish this makeCacheMatrix() makes a, "special," matrix
## that chacheSolve() can use to either calculate or extract from cache
## the inverse of the matrix that was the argument originally passed to makeCacheMatrix()
## makeCacheMatrix() takes an invertible matrix and returns a, "special matrix,"
## (in fact it's a list and it looks a lot like a list of instructions)
## that chacheSolve() can use to return the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
## initialize the value of the inverse to NULL,
## matrixinverse is the token to identify the inverse (solution) in this function
matrixinverse <- NULL
## This simple function just puts it's argument into the cache(parent env)
## th token that holds the cached object in the parent env is x
set <- function(y) {
x <<- y
##
matrixinverse <<- NULL
}
## gets the value of the inverse; x is the token foe the cached inverse
get <- function() x
#calculates the inverse of non-singular matrix via the solve function
setinverse <- function(solve) matrixinverse <<- solve
# gets the inverse
getinverse <- function() matrixinverse
## passes the value of the function makeCacheMatrix
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## chacheSolve() takes the special matrix from makeCacheMatrix() and returns either the
## previously calculated cached inverse of the original input matrix
## or calculates its inverse directly
cacheSolve<- function(x, ...) {
## load the cache into matrixinverse
matrixinverse <- x$getinverse()
#if the cache is not null just return it and we're done
if(!is.null(matrixinverse)) {
message("getting cached data - Inverse of the matrix")
return(matrixinverse)
}
#otherwise , calculate the inverse and send it back with a message saying it was calculated.
data <- x$get()
message("not cached - Calculating Inverse")
matrixinverse <- solve(data, ...)
x$setinverse(matrixinverse)
matrixinverse
}
|
66acf7a5ecb18fc01899115b22e379a0763b54d0 | d1e2a6d642015a3eb37162808648ff32bfaa0bce | /data/processed/process_train_val_history.R | fa81c3e83495942cc93056e58cc9e826037762bb | [] | no_license | TeamMacLean/effector_class_ms | a889cbab122a9d8fe0566cbabd414da197104ec7 | 34f3f71ebf64e0e4863fe2e4b809e2dc798be7c8 | refs/heads/master | 2023-06-04T16:06:46.205768 | 2020-06-17T08:35:49 | 2020-06-17T08:35:49 | 261,469,416 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,675 | r | process_train_val_history.R | # Load raw data ----
bacteria_cnn_lstm_train <- data.table::fread("data/raw/bacteria_hist_cnn_lstm_train.csv")
bacteria_cnn_lstm_val <- data.table::fread("data/raw/bacteria_hist_cnn_lstm_val.csv")
bacteria_cnn_gru_train <- data.table::fread("data/raw/bacteria_hist_cnn_gru_train.csv")
bacteria_cnn_gru_val <- data.table::fread("data/raw/bacteria_hist_cnn_gru_val.csv")
bacteria_lstm_emb_train <- data.table::fread("data/raw/bacteria_hist_lstm_emb_train.csv")
bacteria_lstm_emb_val <- data.table::fread("data/raw/bacteria_hist_lstm_emb_val.csv")
bacteria_gru_emb_train <- data.table::fread("data/raw/bacteria_hist_gru_emb_train.csv")
bacteria_gru_emb_val <- data.table::fread("data/raw/bacteria_hist_gru_emb_val.csv")
fungi_cnn_lstm_train <- data.table::fread("data/raw/fungi_hist_cnn_lstm_train.csv")
fungi_cnn_lstm_val <- data.table::fread("data/raw/fungi_hist_cnn_lstm_val.csv")
fungi_cnn_gru_train <- data.table::fread("data/raw/fungi_hist_cnn_gru_train.csv")
fungi_cnn_gru_val <- data.table::fread("data/raw/fungi_hist_cnn_gru_val.csv")
fungi_lstm_emb_train <- data.table::fread("data/raw/fungi_hist_lstm_emb_train.csv")
fungi_lstm_emb_val <- data.table::fread("data/raw/fungi_hist_lstm_emb_val.csv")
fungi_gru_emb_train <- data.table::fread("data/raw/fungi_hist_gru_emb_train.csv")
fungi_gru_emb_val <- data.table::fread("data/raw/fungi_hist_gru_emb_val.csv")
oomycete_cnn_lstm_train <- data.table::fread("data/raw/oomycete_hist_cnn_lstm_train.csv")
oomycete_cnn_lstm_val <- data.table::fread("data/raw/oomycete_hist_cnn_lstm_val.csv")
oomycete_cnn_gru_train <- data.table::fread("data/raw/oomycete_hist_cnn_gru_train.csv")
oomycete_cnn_gru_val <- data.table::fread("data/raw/oomycete_hist_cnn_gru_val.csv")
oomycete_lstm_emb_train <- data.table::fread("data/raw/oomycete_hist_lstm_emb_train.csv")
oomycete_lstm_emb_val <- data.table::fread("data/raw/oomycete_hist_lstm_emb_val.csv")
oomycete_gru_emb_train <- data.table::fread("data/raw/oomycete_hist_gru_emb_train.csv")
oomycete_gru_emb_val <- data.table::fread("data/raw/oomycete_hist_gru_emb_val.csv")
# Bind results together ----
bacteria_train_data <- dplyr::bind_rows(
bacteria_cnn_lstm_train %>% mutate(model = "CNN-LSTM"),
bacteria_cnn_gru_train %>% mutate(model = "CNN-GRU"),
bacteria_lstm_emb_train %>% mutate(model = "LSTM-Emb"),
bacteria_gru_emb_train %>% mutate(model = "GRU-Emb")
) %>%
dplyr::mutate(epochs = V1 + 1) %>%
dplyr::select(model, epochs, acc, loss)
bacteria_val_data <- dplyr::bind_rows(
bacteria_cnn_lstm_val %>% mutate(model = "CNN-LSTM"),
bacteria_cnn_gru_val %>% mutate(model = "CNN-GRU"),
bacteria_lstm_emb_val %>% mutate(model = "LSTM-Emb"),
bacteria_gru_emb_val %>% mutate(model = "GRU-Emb")
) %>%
dplyr::mutate(epochs = V1 + 1) %>%
dplyr::select(model, epochs, acc, loss)
fungi_train_data <- dplyr::bind_rows(
fungi_cnn_lstm_train %>% mutate(model = "CNN-LSTM"),
fungi_cnn_gru_train %>% mutate(model = "CNN-GRU"),
fungi_lstm_emb_train %>% mutate(model = "LSTM-Emb"),
fungi_gru_emb_train %>% mutate(model = "GRU-Emb")
) %>%
dplyr::mutate(epochs = V1 + 1) %>%
dplyr::select(model, epochs, acc, loss)
fungi_val_data <- dplyr::bind_rows(
fungi_cnn_lstm_val %>% mutate(model = "CNN-LSTM"),
fungi_cnn_gru_val %>% mutate(model = "CNN-GRU"),
fungi_lstm_emb_val %>% mutate(model = "LSTM-Emb"),
fungi_gru_emb_val %>% mutate(model = "GRU-Emb")
) %>%
dplyr::mutate(epochs = V1 + 1) %>%
dplyr::select(model, epochs, acc, loss)
oomycete_train_data <- dplyr::bind_rows(
oomycete_cnn_lstm_train %>% mutate(model = "CNN-LSTM"),
oomycete_cnn_gru_train %>% mutate(model = "CNN-GRU"),
oomycete_lstm_emb_train %>% mutate(model = "LSTM-Emb"),
oomycete_gru_emb_train %>% mutate(model = "GRU-Emb")
) %>%
dplyr::mutate(epochs = V1 + 1) %>%
dplyr::select(model, epochs, acc, loss)
oomycete_val_data <- dplyr::bind_rows(
oomycete_cnn_lstm_val %>% mutate(model = "CNN-LSTM"),
oomycete_cnn_gru_val %>% mutate(model = "CNN-GRU"),
oomycete_lstm_emb_val %>% mutate(model = "LSTM-Emb"),
oomycete_gru_emb_val %>% mutate(model = "GRU-Emb")
) %>%
dplyr::mutate(epochs = V1 + 1) %>%
dplyr::select(model, epochs, acc, loss)
# Save data ----
bacteria_train_data %>% data.table::fwrite("data/processed/bacteria_hist_train.csv")
bacteria_val_data %>% data.table::fwrite("data/processed/bacteria_hist_val.csv")
fungi_train_data %>% data.table::fwrite("data/processed/fungi_hist_train.csv")
fungi_val_data %>% data.table::fwrite("data/processed/fungi_hist_val.csv")
oomycete_train_data %>% data.table::fwrite("data/processed/oomycete_hist_train.csv")
oomycete_val_data %>% data.table::fwrite("data/processed/oomycete_hist_val.csv") |
ec5f19cada22685cd9475e887e4197e32f605d5e | 94bd617b377a58ebd0fbbd0d70a0a43362a1108d | /R/seed_generators.R | 0ce3e3aa6cd74fce1ba66efd8acc61fdd4ba7f77 | [
"MIT"
] | permissive | Maschette/jasmines | 36585e07b95676a66c05154ee3e753e37116ba77 | 1218da72c52ca7c93d2a0a72bf03fd715a6fc294 | refs/heads/master | 2020-11-24T10:38:04.772951 | 2019-12-16T01:03:50 | 2019-12-16T01:03:50 | 228,111,310 | 0 | 0 | null | 2019-12-15T01:06:02 | 2019-12-15T01:06:01 | null | UTF-8 | R | false | false | 3,868 | r | seed_generators.R | #' Seed shaped like a teardrop
#'
#' @param n number of dot points
#' @param m exponent
#'
#' @return tibble
#' @export
seed_teardrop <- function(n = 100, m = 3) {
t <- seq(0, 2*pi, length.out = n)
tibble::tibble(
x = sin(t) * (sin(t/2))^m,
y = cos(t),
id = 1
)
}
#' Seed shaped like a heart
#'
#' @param n number of dot points
#'
#' @return tibble
#' @export
seed_heart <- function(n = 100) {
t <- seq(0, 2*pi, length.out = n)
tibble::tibble(
x = (16*sin(t)^3) / 17,
y = (13*cos(t) - 5*cos(2*t) - 2*cos(3*t) - cos(4*t)) / 17,
id = 1
)
}
#' Seed shaped like concentric rings
#'
#' @param points Total number of interior points
#' @param rings How many rings to spread across?
#' @param size Radius of the outermost ring
#' @export
seed_disc <- function(points = 1000, rings = 10, size = 1) {
radius <- size * (1:rings)/rings
circumference <- 2 * pi * radius
proportion <- circumference / sum(circumference)
counts <- round(points * proportion)
unfold <- function(radius, grain, id) {
theta <- seq(0, 2*pi, length.out = grain + 1)
theta <- theta[-1]
return(tibble::tibble(
x = radius * cos(theta),
y = radius * sin(theta),
id = id
))
}
seed <- purrr::pmap_dfr(
.l = list(radius, counts, 1:length(radius)),
.f = unfold
)
return(seed)
}
#' Seed shaped from text
#'
#' @param text the message
#'
#' @return a tibble
#' @export
seed_text <- function(text) {
char_map <- make_dotted_letters()
char_map <- dplyr::filter(char_map, value == 1)
char_set <- stringr::str_split(text, "", simplify = TRUE)
dots <- purrr::map_dfr(
.x = char_set,
.f = ~ dplyr::filter(char_map, char == .x),
.id = "char_ind"
) %>%
dplyr::mutate(
char_ind = as.numeric(char_ind),
x = x + 6 * char_ind,
id = 1:nrow(.)
) %>%
dplyr::mutate( # to match scale used in bridges
x = x/10,
y = y/10
) %>%
dplyr::select(char, char_ind, x, y, id)
return(dots)
}
#' Seed from a set of random sticks
#'
#' @param n how many sticks
#' @param grain how many points along each stick
#'
#' @return a tibble with columns x, y and id
#' @export
seed_sticks <- function(n = 10, grain = 1000) {
make_stick <- function(id, grain) {
return(tibble::tibble(
x = seq(stats::runif(1), stats::runif(1), length.out = grain),
y = seq(stats::runif(1), stats::runif(1), length.out = grain),
id = id
))
}
points <- purrr::map_dfr(1:n, make_stick, grain = grain)
return(points)
}
#' Seed with evenly spaced rows
#'
#' @param n how many rows
#' @param grain how many points along each row
#' @param vertical flip the x/y co-ords to produce columns?
#'
#' @return a tibble with columns x, y and id
#' @export
seed_rows <- function(n = 10, grain = 1000, vertical = FALSE) {
make_row <- function(id, grain, vertical = FALSE) {
if(!vertical) {
return(tibble::tibble(
x = seq(0, 1, length.out = grain),
y = id/(n+1),
id = id
))
} else{
return(tibble::tibble(
x = id/(n+1),
y = seq(1, 0, length.out = grain),
id = id
))
}
}
points <- purrr::map_dfr(1:n, make_row, grain = grain, vertical = vertical)
return(points)
}
#' Seed with a random set of bubbles
#'
#' @param n how many bubbles
#' @param grain how many points along each stick
#'
#' @return a tibble with columns x, y and id
#' @export
seed_bubbles <- function(n = 2, grain = 1000) {
make_bubble <- function(id, grain) {
radius <- stats::runif(1)
origin_x <- stats::runif(1)
origin_y <- stats::runif(1)
th <- seq(0, 2*pi, length.out = grain)
return(tibble::tibble(
x = radius * cos(th) + origin_x,
y = radius * sin(th) + origin_y,
id = id
))
}
points <- purrr::map_dfr(1:n, make_bubble, grain = grain)
return(points)
}
|
924220bde9e48d6b8d037eed2a0862da81d11a7d | 1da4a08a9e5a33ae868187de57c6ac324b627fce | /03.phylogenetic-stats.r | 43fd902269d678c07483833d28ec99bf804af98b | [] | no_license | basil-yakimov/meadow-structure | a211e35401661e6983cae69bd125f6bc3ca863b3 | eaa420636b5acdd0c081a1c2f6da55dc1a3a1ed7 | refs/heads/master | 2020-07-22T19:09:26.136503 | 2019-11-15T04:50:24 | 2019-11-15T04:50:24 | 207,300,289 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,137 | r | 03.phylogenetic-stats.r | load("clean.data/meadows-2018.rda")
load("clean.data/meadows-phylo-ses-ao.rda")
meta <- meta2
plot.year <- function(y , ind )
{
dd <- meta[[ind]][meta$year == y]
ss <- factor(meta$site[meta$year == y], levels = paste0(c("a", "f1", "f2", "m"), "-", substr(y, 3, 4)))
boxplot(dd ~ ss, col = c("tomato", "steelblue", "skyblue", "limegreen"), ylab = ind)
}
plot.year(2014, "mpd.z")
op <- par(mfrow = c(2, 5), mar = c(2, 4, 0.5, 0.5))
plot.year(2014, "mpd.z")
plot.year(2015, "mpd.z")
plot.year(2016, "mpd.z")
plot.year(2017, "mpd.z")
plot.year(2018, "mpd.z")
plot.year(2014, "mpd.a.z")
plot.year(2015, "mpd.a.z")
plot.year(2016, "mpd.a.z")
plot.year(2017, "mpd.a.z")
plot.year(2018, "mpd.a.z")
par(op)
#---#
df <- data.frame(mpd.z = meta$mpd.z, mpd.a.z = meta$mpd.a.z, mntd.z = meta$mntd.z, mntd.a.z = meta$mntd.a.z,
year = meta$year,
site = sapply(strsplit(as.character(meta$site), "-"), function(x) x[[1]]),
id = paste0(sapply(strsplit(as.character(meta$site), "-"), function(x) x[[1]]), "-", c(rep(1:50, 2), rep(1:65, 15))))
df$numsite <- as.numeric(df$site)
dfa <- data.frame(mpd.z = tapply(df$mpd.z, df$id, mean),
mpd.a.z = unname(tapply(df$mpd.a.z, df$id, mean)),
mntd.z = unname(tapply(df$mntd.z, df$id, mean)),
mntd.a.z = unname(tapply(df$mntd.a.z, df$id, mean)))
dfa$site <- sapply(strsplit(as.character(rownames(dfa)), "-"), function(x) x[[1]])
dfa$site <- factor(dfa$site)
#---------------------------------------------------------#
fit <- lm(mpd.z ~ year + id, data = df)
( an <- anova(fit) )
eta <- an[, 2] / sum(an[, 2])
names(eta) <- row.names(an)
eta * 100
fit <- lm(mpd.a.z ~ year + id, data = df)
( an <- anova(fit) )
eta <- an[, 2] / sum(an[, 2])
names(eta) <- row.names(an)
eta * 100
fit <- lm(mntd.z ~ year + id, data = df)
( an <- anova(fit) )
eta <- an[, 2] / sum(an[, 2])
names(eta) <- row.names(an)
eta * 100
fit <- lm(mntd.a.z ~ year + id, data = df)
( an <- anova(fit) )
eta <- an[, 2] / sum(an[, 2])
names(eta) <- row.names(an)
eta * 100
#---------------------------------------------------------#
col4 <- c("tomato", "steelblue", "skyblue", "limegreen")
op <- par(mfcol = c(2, 2))
boxplot(mpd.z ~ site, dfa, col = col4, ylab = "SES MPD")
boxplot(mpd.a.z ~ site, dfa, col = col4, ylab = expression("SES "*MPD[a]))
boxplot(mntd.z ~ site, dfa, col = col4, ylab = "SES MNTD")
boxplot(mntd.a.z ~ site, dfa, col = col4, ylab = expression("SES "*MNTD[a]))
par(op)
#---------------------------------------------------------#
library(multcompView)
library(reshape)
cld <- function(res)
{
require(multcompView)
require(reshape)
a <- melt(res$p.value)
a.cc <- na.omit(a)
a.pvals <- a.cc[, 3]
names(a.pvals) <- paste(a.cc[, 2], a.cc[, 1], sep="-")
multcompLetters(a.pvals)$Letters
}
fit <- lm(mpd.z ~ site, dfa)
anova(fit)
res <- pairwise.t.test(dfa$mpd.z, dfa$site)
abc <- cld(res)
pp <- tapply(dfa$mpd.z, dfa$site, function(x) t.test(x)$p.value)
abc[pp < 0.05] <- paste0(abc[pp < 0.05], "(*)")
boxplot(mpd.z ~ site, dfa, col = col4, ylab = "SES MPD", ylim = c(-.5, .5), axes = F, cex.lab = 1)
axis(1, at = 1:4, cex.axis = 1, labels = c("поле", "залежь 1", "залежь 2", "луг"))
axis(2, cex.axis = 1)
box()
text(x = 1:4, y = 0.45, labels = abc, cex = 1)
#---#
fit <- lm(mpd.a.z ~ site, dfa)
anova(fit)
res <- pairwise.t.test(dfa$mpd.a.z, dfa$site)
abc <- cld(res)
pp <- tapply(dfa$mpd.a.z, dfa$site, function(x) t.test(x)$p.value)
abc[pp < 0.05] <- paste0(abc[pp < 0.05], "(*)")
boxplot(mpd.a.z ~ site, dfa, col = col4, ylab = expression("SES "*MPD[a]), ylim = c(-.5, .5), axes = F, cex.lab = 1)
axis(1, at = 1:4, cex.axis = 1, labels = c("поле", "залежь 1", "залежь 2", "луг"))
axis(2, cex.axis = 1)
box()
text(x = 1:4, y = 0.45, labels = abc, cex = 1)
#________________________________________________________________________________________
boxplotm <- function(x, y, i){
res <- pairwise.t.test(x, y)
abc <- cld(res)
pp <- tapply(x, y, function(x) t.test(x)$p.value)
abc[pp < 0.05] <- paste0(abc[pp < 0.05], "(*)")
col4 <- c("tomato", "steelblue", "skyblue", "limegreen")
ylab <- c("SES MPD", expression("SES "*MPD[a]), "SES MNTD", expression("SES "*MNTD[a]))
boxplot(x ~ y, col = col4, ylab = ylab[i], ylim = c(min(x), max(x)+0.21), axes = F, cex.lab = 1)
axis(1, at = 1:4, cex.axis = 1, labels = c("поле", "залежь 1", "залежь 2", "луг"))
axis(2, cex.axis = 1)
box()
text(x = 1:4, y = max(x) + 0.15, labels = abc, cex = 1)
}
png("figures/ses-phylo-angiosperm-only.png", width = 1000, height = 1000)
op <- par(mfcol = c(2,2), mar = c(4, 4.1, 1, 1), cex = 1.5)
boxplotm(dfa$mpd.z, dfa$site, 1)
boxplotm(dfa$mpd.a.z, dfa$site, 2)
boxplotm(dfa$mntd.z, dfa$site, 3)
boxplotm(dfa$mntd.a.z, dfa$site, 4)
par(op)
dev.off()
#________________________________________________________________________________________
load("clean.data/meadows-phylo-ses.rda")
df <- data.frame(mpd.z = meta$mpd.z, mpd.a.z = meta$mpd.a.z, mntd.z = meta$mntd.z, mntd.a.z = meta$mntd.a.z,
year = meta$year,
site = sapply(strsplit(as.character(meta$site), "-"), function(x) x[[1]]),
id = paste0(sapply(strsplit(as.character(meta$site), "-"), function(x) x[[1]]), "-", c(rep(1:50, 2), rep(1:65, 15))))
df$numsite <- as.numeric(df$site)
dfa <- data.frame(mpd.z = tapply(df$mpd.z, df$id, mean),
mpd.a.z = unname(tapply(df$mpd.a.z, df$id, mean)),
mntd.z = unname(tapply(df$mntd.z, df$id, mean)),
mntd.a.z = unname(tapply(df$mntd.a.z, df$id, mean)))
dfa$site <- sapply(strsplit(as.character(rownames(dfa)), "-"), function(x) x[[1]])
dfa$site <- factor(dfa$site)
png("figures/ses-phylo-all-species.png", width = 1000, height = 1000)
op <- par(mfcol = c(2,2), mar = c(4, 4.1, 1, 1), cex = 1.5)
boxplotm(dfa$mpd.z, dfa$site, 1)
boxplotm(dfa$mpd.a.z, dfa$site, 2)
boxplotm(dfa$mntd.z, dfa$site, 3)
boxplotm(dfa$mntd.a.z, dfa$site, 4)
par(op)
dev.off()
|
fb4fa36f17d8ccf6ab778504403df54d64745e0d | 524b38b3eba37f9ccfb169b7fdb92286a5bbfd36 | /tests/testthat/testcorrmatrix.R | 6d96b52585a66138dbb9086b069ec911ecc29089 | [] | no_license | lago1970/jmv | b623a4bad1191cdf864c6c3a3ab0c450eaaa3b68 | 41247ccd0a9ea985f218494b1fc810e669ec6ecb | refs/heads/master | 2020-03-08T05:57:33.352626 | 2018-04-02T01:35:47 | 2018-04-02T01:39:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 481 | r | testcorrmatrix.R | context('corrmatrix')
test_that('corrmatrix works', {
x <- as.factor(c(NA,rep(c("a", "b"), 6)))
y <- c(8,51,2,74,1,91,5,25,1,59,5,32,7) # breaks equality of variance
z <- c(2,NA,NaN,3,-1,-2,1,1,-2,2,-2,-3,3)
w <- c(0,4,19,5,9,15,1,4,19,10,13,7,5)
data <- data.frame(x = x, y = y, z = z, w = w)
# expect_error(jmv::corrMatrix(data, c("x","y","z","w")), "'x' must be a numeric vector", fixed=TRUE)
corrmatrix <- jmv::corrMatrix(data, c("y","w"))
})
|
309b95ba135a3122aa693e0335301ee53b268548 | cba10b84d2cc708dd66148a4511451d77a92a7c5 | /man/selShapes.Rd | 48fa73a54a538716679125bb54c82deef3dc6240 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | r4ss/r4ss | 03e626ae535ab959ff8109a1de37e3e8b44fe7ad | 0ef80c1a57e4a05e6172338ddcb0cda49530fa93 | refs/heads/main | 2023-08-17T08:36:58.041402 | 2023-08-15T21:42:05 | 2023-08-15T21:42:05 | 19,840,143 | 35 | 57 | null | 2023-07-24T20:28:49 | 2014-05-16T00:51:48 | R | UTF-8 | R | false | true | 430 | rd | selShapes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selShapes.R
\name{selShapes}
\alias{selShapes}
\title{Launch a shiny app that displays various selectivity curves}
\usage{
selShapes()
}
\description{
This app is hosted at https://connect.fisheries.noaa.gov/ss3-helper/
}
\author{
Allan C. Hicks, Andrea M. Havron, Ian G. Taylor, Kathryn L. Doering
inspired by tcl/tk code written by Tommy Garrison
}
|
d761806ac566c82816c1b264eedee845728b14e1 | 2fb65d442efadbc3a1db41fcf25fed8958c4e04f | /man/readable.files.Rd | 04998efee8bcdfb96c5e9f9c38ad9947ffe0160d | [
"MIT"
] | permissive | dfsp-spirit/freesurferformats | 8f507d8b82aff7c34b12e9182893007064e373b9 | 6cf9572f46608b7bb53887edd10dfed10e16e13d | refs/heads/master | 2023-07-25T00:28:09.021237 | 2023-07-19T07:29:07 | 2023-07-19T07:29:07 | 203,574,524 | 22 | 3 | NOASSERTION | 2023-07-19T07:29:09 | 2019-08-21T11:57:16 | R | UTF-8 | R | false | true | 919 | rd | readable.files.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{readable.files}
\alias{readable.files}
\title{Find files with the given base name and extensions that exist.}
\usage{
readable.files(
filepath,
precedence = c(".mgh", ".mgz"),
error_if_none = TRUE,
return_all = FALSE
)
}
\arguments{
\item{filepath}{character string, path to a file without extension}
\item{precedence}{vector of character strings, the file extensions to check. Must include the dot (if you expect one).}
\item{error_if_none}{logical, whether to raise an error if none of the files exist}
\item{return_all}{logical, whether to return all readable files instead of just the first one}
}
\value{
character string, the path to the first existing file (or `NULL` if none of them exists).
}
\description{
Note that in the current implementation, the case of the filepath and the extension must match.
}
|
893b0b5266100f3bfbef330bf8bc7ec4b720a6af | 07a57621be185d493c6026cdfb972a5cf0376c09 | /DecompMex/R/9_Infection_notHom.R | ef05e9872def399dfe8e7e539fb545670b7517b6 | [] | no_license | timriffe/DecompMex | fe47b21c8b6796eeb3f6f3b8e87bddc02d617d1c | b92b5aa927cc52d6c1be6b741eaa7bf1ebdd208f | refs/heads/master | 2020-04-07T03:59:40.373849 | 2018-08-07T14:05:23 | 2018-08-07T14:05:23 | 42,098,790 | 0 | 1 | null | 2018-08-07T13:56:13 | 2015-09-08T08:07:42 | TeX | UTF-8 | R | false | false | 882 | r | 9_Infection_notHom.R | # test of female homicide spike 2009
head(data)
# g7 is homicide
load('Data/Counts&Rates_1990-2015Mex.RData')
head(Data_rates)
dim(Data_Counts)
dim(data)
data <- data[with(data, order(state,year,sex,age)), ]
Data_Counts <- Data_Counts[with(Data_Counts, order(state,year,sex,age)), ]
Data_rates <- Data_rates[with(Data_rates, order(state,year,sex,age)), ]
Data_rates$Exp <- Data_rates$Pop
Data_rates[,standage := sum(Exp), by=list(year,sex,age)]
strate <- function(rate,standardage,age,from=0,to=14){
keep <- age >= from & age <= to
sum(rate[keep] * standardage[keep]) / sum(standardage[keep])
}
setnames(Data_rates,as.character(1:12),letters[1:12])
asdr <- Data_rates[ ,list(asdr = strate(a,standage,age,from=50,to=59)),by=list(state,year,sex)]
mat <- acast(asdr[sex == 1, ], state ~ year, value.var = "asdr")
matplot(1990:2015, t(mat), type = 'l', lty = 1)
abline(v=2009)
|
5d21301d8e44aa3629aed550ca794ca43ad2927c | e244706f842fd980ad5887c953817d63f0a329dc | /gapminder_practice.R | c1aabf455bda018e3e838efdaee8b3d53de39609 | [] | no_license | cavfan96/tidyverse_with_gapminder | 15ce1c5be1047aecd5c4b27219dffd83b63bba2e | 5c915703fd1903e8678528c8b7c1536d0a79a9aa | refs/heads/master | 2022-12-07T21:13:37.976236 | 2020-08-23T17:19:43 | 2020-08-23T17:19:43 | 289,731,013 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,905 | r | gapminder_practice.R | # tidyverse
library(gapminder)
library(dplyr)
gapminder
## Filter Verb
# filters show a subset of the data
# filter for just the year 2007 using the pipe %>%
# piping simply is creating a df on the fly without
# changing your original data frame unless you assign it
# as a new dataframe
gapminder %>%
filter(year == 2007)
# filter for the US - need quotes around text
gapminder %>%
filter(country == "United States")
# filter by two conditions
gapminder %>%
filter(country == "United States", year == 2007)
## Arrange Verb
# Sorts the data
gapminder %>%
arrange(gdpPercap)
gapminder %>%
arrange(desc(gdpPercap))
# combining verbs to find richest countries in 2007 descending order
gapminder %>%
filter(year == 2007) %>%
arrange(desc(gdpPercap))
## Mutate Verb
# changes the data value in a new value, doesn't effect the original data
gapminder %>%
mutate(pop = pop / 1000000)
# add a variable
gapminder %>%
mutate(gdp = gdpPercap * pop) %>%
arrange(desc(gdp))
# Create, filter, and sort the data by combining verbs
gapminder %>%
mutate(gdp = gdpPercap * pop) %>%
filter(year == 2007) %>%
arrange(desc(gdp))
## Data Visualization
## Data, Aesthetic, Layer
library(ggplot2)
# create a new dataframe for 2007
gapminder_2007 <- gapminder %>%
filter(year == 2007)
gapminder_2007
# ggplots start with the df and then an aesthetic
# then you add a layer (scatterplot, bar chart, etc.)
ggplot(gapminder_2007, aes(x = gdpPercap,
y = lifeExp)) +
geom_point()
# use a logscale if there's several orders of magnitude in one section
# of the x-axis or y-axis
ggplot(gapminder_2007, aes(x = gdpPercap,
y = lifeExp)) +
geom_point() +
scale_x_log10()
# group the data by continent with color
ggplot(gapminder_2007, aes(x = gdpPercap,
y = lifeExp, color = continent)) +
geom_point() +
scale_x_log10()
# group the data by continent and size of population as aesthetics #3 and #4
# on top of aesthetics #1 (x) and #2 (y)
ggplot(gapminder_2007, aes(x = gdpPercap,
y = lifeExp,
color = continent,
size = pop)) +
geom_point() +
scale_x_log10()
## Faceting
# Faceting explores categorical variables into subplots.
ggplot(gapminder_2007, aes(x = gdpPercap,
y = lifeExp)) +
geom_point() +
scale_x_log10() +
facet_wrap(~ continent)
## Summarize
# finding powerful data points using summarize and its
# functionality from the dplyr package
# mean, min, max, sum and median are available functions
# you can do as many of these functions as you wish
gapminder %>%
summarize(meanLifeExp = mean(lifeExp))
gapminder %>%
filter(year == 2007) %>%
summarize(meanLifeExp = mean(lifeExp))
gapminder %>%
filter(year == 2007) %>%
summarize(meanLifeExp = mean(lifeExp),
totalPop = sum(pop))
## group_By
# group_by replaces filter and does the summarize by a "group"
gapminder %>%
group_by(year) %>%
summarize(meanLifeExp = mean(lifeExp),
totalPop = sum(pop))
# group_by is now done after filtering and does the summarize by a "group"
gapminder %>%
filter(year == 2007) %>%
group_by(continent) %>%
summarize(meanLifeExp = mean(lifeExp),
totalPop = sum(pop))
# group_by is now done after filtering and does the summarize by a "group"
gapminder %>%
group_by(year, continent) %>%
summarize(meanLifeExp = mean(lifeExp),
totalPop = sum(pop))
### Dynamic viz
by_year <- gapminder %>%
group_by(year) %>%
summarize(totalPop = sum(pop),
meanLifeExp = mean(lifeExp))
# plot these summaries now
ggplot(by_year, aes(x = year, y = totalPop)) +
geom_point()
# add y = 0 to the graph
ggplot(by_year, aes(x = year, y = totalPop)) +
geom_point() +
expand_limits(y = 0)
# group by year, continent into new data frame
by_year_continent <- gapminder %>%
group_by(year, continent) %>%
summarize(totalPop = sum(pop),
meanLifeExp = mean(lifeExp))
ggplot(by_year_continent, aes(x = year,
y = totalPop,
color = continent)) +
geom_point() +
expand_limits(y = 0)
### Line Plots
ggplot(by_year_continent, aes(x = year,
y = meanLifeExp,
color = continent)) +
geom_line() +
expand_limits(y = 0)
### Bar Plots
# In a bar plot, the categorical goes on the x-axis
# and the height of the bar is determined by the y-axis quantitative
# variable
by_continent <- gapminder %>%
group_by(continent) %>%
summarize(totalPop = sum(pop),
meanLifeExp = mean(lifeExp))
ggplot(by_continent, aes(x = continent,
y = meanLifeExp)) +
geom_col()
### Histogram
# Investigate one variable distribution at a time in "bins"
# Uses only one axis in the aesthetic
ggplot(gapminder_2007, aes(x = lifeExp)) +
geom_histogram()
# change the histogram bin width
# you can also change the # of bins, using bins = <insert #>
ggplot(gapminder_2007, aes(x = lifeExp)) +
geom_histogram(binwidth = 5)
### Box Plots
# Shows the importance of a variable distribution for comparison
# box plots have 2 aesthetics where x = categorical and y = measurable
# variable you want to compare
ggplot(gapminder_2007, aes(x = continent, y = lifeExp)) +
geom_boxplot()
# add a label using labs and title
ggplot(gapminder_2007, aes(x = lifeExp)) +
geom_histogram(binwidth = 5) +
labs(
title = paste(
"Life Expectancy Across Continents"
)
)
# done anotehr way
ggplot(gapminder_2007, aes(x = lifeExp)) +
geom_histogram(binwidth = 5) +
labs(title = "Life Expectancy Across Continents")
# using ggtitle
ggplot(gapminder_2007, aes(x = lifeExp)) +
geom_histogram(binwidth = 5) +
ggtitle("Life Expectancy Across Continents")
|
e5740c2a4b827baa6100263a18a3640728862c02 | 73139b16df978f4cd71e5d3a4e8820c62c9bdc67 | /Analyze Data with R/Visualizing Carbon Dioxide Levels.R | 71de12fb49e85e692a770f1a4db18c2ca773ee95 | [] | no_license | oplt/Codecademy-Projects | 9bbbc07f8c5957082baf1437ddfb4f5e3dd91cce | 09a41bfb623d5d4859bfaaa8e7bc7d6461a9f7d4 | refs/heads/main | 2023-02-05T04:53:20.299992 | 2020-12-25T09:15:31 | 2020-12-25T09:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,166 | r | Visualizing Carbon Dioxide Levels.R | ---
title: "Visualizing Carbon Dioxide Levels"
output: html_notebook
---
# load libraries and data
library(readr)
library(dplyr)
library(ggplot2)
noaa_data <- read_csv("carbon_dioxide_levels.csv")
head(noaa_data)
options(scipen=10000) #removes scientific notation
#Create NOAA Visualization here:
noaa_viz <- ggplot(data = noaa_data, aes(x = Age_yrBP, y = CO2_ppmv))
+ geom_line()
+ labs(title="Carbon Dioxide Levels From 8,000 to 136 Years BP", subtitle="From World Data Center for Paleoclimatology and NOAA Paleoclimatology Program", x="Years Before Today (0=1950)", y = "Carbon Dioxide Level (Parts Per Million)")
+ scale_x_reverse(lim=c(800000,0))
#Create IAC Visualization
iac_data <- read_csv("yearly_co2.csv")
head(iac_data)
millennia_max <- max(noaa_data$CO2_ppmv)
iac_viz <- ggplot(data=iac_data, aes(x=year, y=data_mean_global))
+ geom_line()
+ labs(title = 'Carbon Dioxide Levels over Time', subtitle= 'From Institute for Atmospheric and Climate Science (IAC)', x="Year", y="Carbon Dioxide Level (Parts Per Million)")
+ geom_hline(aes(yintercept=millennia_max, linetype='Historical CO2 Peak before 1950'))
|
f659042ce4b54ed5826cf32c8b8e394ccba0a173 | 56146ed60db3ede66970748bd15757f901057092 | /03_buffer_analysis.R | 1c9ab373b6d0df6a6fa7c14d1d746e41bdfd8396 | [] | no_license | annepaulus/FAWKES-III | d00ee66733e78b918aeb9fa8b378ed4e9cffb1fb | 70095573faa183abbf84dd21c4269dcc0811321c | refs/heads/master | 2021-05-24T08:07:30.214570 | 2020-01-31T09:51:38 | 2020-01-31T09:51:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 40,098 | r | 03_buffer_analysis.R | ############################################################################
### 4. overlay of SPAs with ACTs
############################################################################
###############################################################################
### Buffer stuffer
###############################################################################
# subset shapefile of 10 SPAs
#subset<-Natura2000_shape_all[1:10,]
subset<-Natura2000_shape_all
#plot(subset, col="red", add=TRUE) # only subset for now
#fun with outer buffers
outerBuffer<-function(x, d){
buff<-buffer(x, width = d, dissolve = F)
e<-erase(buff,x)
return(e)
}
buffer10<-list()
area<-list()
perimeter<-list()
# outer10km<-outerBuffer(x=subset,d=10000)
myBuffer<-list()
pb <- txtProgressBar(min = 0, max = length(subset), style = 3) # produces a progress bar
for(i in 1:length(subset)){
myBuffer[[i]]<-outerBuffer(x=subset[i,],d=10000)
setTxtProgressBar(pb, i)
}
close(pb)
saveRDS(myBuffer, file = "myBuffer.rds")
#plot(ACT)
plot(myBuffer[[4]], col="red")
pb <- txtProgressBar(min = 0, max = total, style = 3) # produces a progress bar
for (i in 1:length(myBuffer)){
subset10 <- myBuffer[[i]]
# takes each shapefile, if it is a SPA
buffer10[[i]]<-extract(ACT,subset10,weights=TRUE,normalizeWeights=TRUE)
area[[i]]<-gArea(subset10)/1e+06
perimeter[[i]]<-gLength(subset10)/1e+03
setTxtProgressBar(pb, i)
}
close(pb)
#cleanup
ACT_red<-list()
area_red<-list()
for (i in 1:length(buffer10)){
if(is.null(buffer10[[i]][[1]])==TRUE) {
ACT_red[[i]]<-NA
area_red[[i]]<-NA
}else{
if(isTRUE(unique(is.na(as.data.frame(buffer10[[i]])$value)))==TRUE){
ACT_red[[i]]<-NA
area_red[[i]]<-NA
}else{
f.correct<-1/sum(as.data.frame(buffer10[[i]])[!is.na(as.data.frame(buffer10[[i]])[,1]),2])
reduced.na<-as.data.frame(buffer10[[i]])[!is.na(as.data.frame(buffer10[[i]])[,1]),]
reduced.na[,2]<-reduced.na[,2]*f.correct
ACT_red[[i]]<-reduced.na
area_red[[i]]<-area[[i]]*sum(as.data.frame(buffer10[[i]])[!is.na(as.data.frame(buffer10[[i]])[,1]),2])
}
}
}
# create a dataframe for presenting the distribution of SPA-area per ACT
results <- as.data.frame(matrix(NA,length(ACT_red),20))
colnames(results) <- c("SITECODE","area",paste("ACT",c(1:17),sep=""),"count")
for(k in 1:length(ACT_red)){
results[k,1]<-as.character(overlapSPA)[k]
results[k,2]<-area_red[[k]]
if(is.na(ACT_red[[k]])==TRUE){
results[k,3:19]<-NA
} else{
test.sum<-cbind(aggregate(weight~value,sum,data=ACT_red[[k]]),table(ACT_red[[k]]$value))
results[k,test.sum$value+2]<-test.sum$weight
results[k,20]<-sum(test.sum$Freq)
}
print(k)
}
results[is.na(results)]<-0
#results2 <- results[which(results$count>5),]
saveRDS(results, file = "results_buffer.rds")
###############################################################################
# DATA CLEANING #
###############################################################################
# # get rid of NAs and compute class proportions
#
# ACT_red<-list()
# area_red<-list()
#
# for (i in 1:total){
# if(is.null(ACT_ext[[i]][[1]])==TRUE) {
# ACT_red[[i]]<-NA
# area_red[[i]]<-NA
# }else{
# if(isTRUE(unique(is.na(as.data.frame(ACT_ext[[i]])$value)))==TRUE){
# ACT_red[[i]]<-NA
# area_red[[i]]<-NA
# }else{
# f.correct<-1/sum(as.data.frame(ACT_ext[[i]])[!is.na(as.data.frame(ACT_ext[[i]])[,1]),2])
# reduced.na<-as.data.frame(ACT_ext[[i]])[!is.na(as.data.frame(ACT_ext[[i]])[,1]),]
# reduced.na[,2]<-reduced.na[,2]*f.correct
# ACT_red[[i]]<-reduced.na
# area_red[[i]]<-area[[i]]*sum(as.data.frame(ACT_ext[[i]])[!is.na(as.data.frame(ACT_ext[[i]])[,1]),2])
# }
# }
# }
#
# # create a dataframe for presenting the distribution of SPA-area per ACT
# results <- as.data.frame(matrix(NA,length(ACT_red),20))
# colnames(results) <- c("SITECODE","area",paste("ACT",c(1:17),sep=""),"count")
#
# for(k in 1:length(ACT_red)){
# results[k,1]<-as.character(overlapSPA)[k]
# results[k,2]<-area_red[[k]]
#
# if(is.na(ACT_red[[k]])==TRUE){
# results[k,3:19]<-NA
# } else{
# test.sum<-cbind(aggregate(weight~value,sum,data=ACT_red[[k]]),table(ACT_red[[k]]$value))
# results[k,test.sum$value+2]<-test.sum$weight
# results[k,20]<-sum(test.sum$Freq)
#
# }
# print(k)
# }
#
#
#
# results[is.na(results)]<-0
# results2 <- results[which(results$count>5),]
############################################################################
### relate conservation status with ACT via bird species ###
############################################################################
birds<-(unique(mydata$SPECIESNAME))
length(unique(mydata$SPECIESCODE)) # 530 bird species
#for each species, link the ACTs (1 to 17) with the Conservation Status (for all sites
tabFinal <- merge(mydata,results, by="SITECODE")
tabFinal<-tabFinal[!duplicated(tabFinal), ]
tabFinal[is.na(tabFinal)] <- 0
#remove rows completely 0
#test<-apply(tabFinal[,7:23], MARGIN=1,FUN=sum)
#zero<-which(test==0)
#tabFinal<-tabFinal[-zero,]
#CStatus ordered factor
CStatus<-droplevels(as.factor(tabFinal$CONSERVATION))
CStatus <- factor(CStatus,levels = c("A","B","C"),ordered = TRUE)
tabFinal$CONSERVATION<-CStatus
#tabFinal<-tabFinal[-which(tabFinal$SPECIESNAME=="--NULL--"),]
tabFinal$SPECIESNAME<-droplevels(tabFinal$SPECIESNAME)
# bird<-bird[1:511,1:3]
# colnames(bird)<-c("SPECIESCODE","migration","preference")
#bird <- bird[which(bird$migration=="mainly resident"),]
tabFinal_buffer <- merge(tabFinal,bird, by="SPECIESCODE")
#table(bird$SPECIESCODE)
#which(table(bird$SPECIESCODE)>1)
#save.image(file = "FAWKES.RData")
saveRDS(tabFinal, file = "tabFinal_buffer.rds")
############################################################################
###
############################################################################
dat_buffer <- data.frame(tabFinal_buffer[,c(1:6,24)],
Conversion = rowSums(tabFinal_buffer[,18:22]),
Intens = rowSums(tabFinal_buffer[,c(7:10,17)]),
De_intens = rowSums(tabFinal_buffer[,11:16]),
Stabil = c(tabFinal_buffer[,23]),
Int_crop = rowSums(tabFinal_buffer[,7:10]),
Ext_crop = rowSums(tabFinal_buffer[,11:13]),
Ext_pasture = rowSums(tabFinal_buffer[,14:16]),
Int_wood = c(tabFinal_buffer[,17]),
Cropland_loss = rowSums(tabFinal_buffer[,20:21]),
Forest_gain = c(tabFinal_buffer[,18]),
Forest_loss = c(tabFinal_buffer[,19]),
Urban = c(tabFinal_buffer[,22])
)
tab_buffer <- merge(dat_buffer,bird, by="SPECIESCODE")
for (i in 1:length(dat_buffer$SPECIESCODE)){
dat_buffer$ACT_dom4[i]<-colnames(dat_buffer[i,8:11])[max.col(dat_buffer[i,8:11])]
}
for (i in 1:length(dat_buffer$SPECIESCODE)){
dat_buffer$ACT_dom9[i]<-colnames(dat_buffer[i,11:19])[max.col(dat_buffer[i,11:19])]
}
saveRDS(dat_buffer, file = "dat_buffer.rds")
saveRDS(tab_buffer, file = "tab_buffer.rds")
#
#
# bird2<-reshape(aggregate (dat$ACT_dom, list(dat$SPECIESCODE,dat$ACT_dom), FUN=length), v.names="x", timevar="Group.2", idvar="Group.1", direction="wide")
# colnames(bird2)<-c("SPECIESCODE","Conversion", "De_intens" ,"Intens" ,"Stabil")
# bird<-merge(bird,bird2, by="SPECIESCODE")
#
# colnames(bird)<-c("SPECIESCODE", "migration" ,"preference","Conversion", "De_intens" ,"Intens" ,"Stabil")
#
# for (i in 1:length(bird$SPECIESCODE)){
#
# bird$ACT_dom_dom[i]<-colnames(bird[i,4:7])[max.col(bird[i,4:7])]
#
# }
#########################################
#### Ordered logistic regression analysis for habitat Conservation status (ABC) against levers trajectories (4 summarized)
#### This is using the extracted values from Levers in 10km buffers around all SPAs
#### inspiration for code found at https://stats.idre.ucla.edu/r/dae/ordinal-logistic-regression/
#### Authors: MB, TV ...
#########################################
#### re-calculate actual area, unit unknown ...
dat_buffer$cc_area<-dat_buffer$area*dat_buffer$Conversion
dat_buffer$int_area<-dat_buffer$area*dat_buffer$Intens
dat_buffer$ext_area<-dat_buffer$area*dat_buffer$De_intens
dat_buffer$stab_area<-dat_buffer$area*dat_buffer$Stabil
#### create new dataframe, each species in one row
bird_OR_results <- data.frame(unique(droplevels(dat_buffer$SPECIESCODE)),
OR_int=seq(1:486),
p_int=seq(1:486),
LCI_OR_int=seq(1:486),
UCI_OR_int=seq(1:486),
OR_ext=seq(1:486),
p_ext=seq(1:486),
LCI_OR_ext=seq(1:486),
UCI_OR_ext=seq(1:486),
OR_cc=seq(1:486),
p_cc=seq(1:486),
LCI_OR_cc=seq(1:486),
UCI_OR_cc=seq(1:486),
OR_stab=seq(1:486),
p_stab=seq(1:486),
LCI_OR_stab=seq(1:486),
UCI_OR_stab=seq(1:486),
OR_Int_crop=seq(1:486),
p_Int_crop=seq(1:486),
LCI_OR_Int_crop=seq(1:486),
UCI_OR_Int_crop=seq(1:486),
OR_Ext_crop=seq(1:486),
p_Ext_crop=seq(1:486),
LCI_OR_Ext_crop=seq(1:486),
UCI_OR_Ext_crop=seq(1:486),
OR_Ext_pasture=seq(1:486),
p_Ext_pasture=seq(1:486),
LCI_OR_Ext_pasture=seq(1:486),
UCI_OR_Ext_pasture=seq(1:486),
OR_Int_wood=seq(1:486),
p_Int_wood=seq(1:486),
LCI_OR_Int_wood=seq(1:486),
UCI_OR_Int_wood=seq(1:486),
OR_Cropland_loss=seq(1:486),
p_Cropland_loss=seq(1:486),
LCI_OR_Cropland_loss=seq(1:486),
UCI_OR_Cropland_loss=seq(1:486),
OR_Forest_gain=seq(1:486),
p_Forest_gain=seq(1:486),
LCI_OR_Forest_gain=seq(1:486),
UCI_OR_Forest_gain=seq(1:486),
OR_Forest_loss=seq(1:486),
p_Forest_loss=seq(1:486),
LCI_OR_Forest_loss=seq(1:486),
UCI_OR_Forest_loss=seq(1:486),
OR_Urban=seq(1:486),
p_Urban=seq(1:486),
LCI_OR_Urban=seq(1:486),
UCI_OR_Urban=seq(1:486),
stringsAsFactors=FALSE)
names(bird_OR_results)[1]<-"SPECIESCODE"
#### loop running individual models for all bird species
for (i in 1:length(bird_OR_results$SPECIESCODE)){
one_bird<-dat_buffer[dat_buffer$SPECIESCODE==paste(bird_OR_results$SPECIESCODE[i]),]
one_bird<-(one_bird[(rowSums((one_bird[,11:19]))==0)<=0,])
# skip all species that occur in less than 5 SPAs
if (nrow(one_bird) < 5){
bird_OR_results[i,2:9]<-NA
} else {
#### Intens models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Intens+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,2]<-res[1,1]
bird_OR_results[i,4]<-res[1,2]
bird_OR_results[i,5]<-res[1,3]
bird_OR_results[i,3]<-ctable[1,4]
#### De_intens models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~De_intens+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,6]<-res[1,1]
bird_OR_results[i,8]<-res[1,2]
bird_OR_results[i,9]<-res[1,3]
bird_OR_results[i,7]<-ctable[1,4]
#### Conversion models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Conversion+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,10]<-res[1,1]
bird_OR_results[i,12]<-res[1,2]
bird_OR_results[i,13]<-res[1,3]
bird_OR_results[i,11]<-ctable[1,4]
#### Stabil models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Stabil+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,14]<-res[1,1]
bird_OR_results[i,16]<-res[1,2]
bird_OR_results[i,17]<-res[1,3]
bird_OR_results[i,15]<-ctable[1,4]
#### Int_crop models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Int_crop+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,18]<-res[1,1]
bird_OR_results[i,20]<-res[1,2]
bird_OR_results[i,21]<-res[1,3]
bird_OR_results[i,19]<-ctable[1,4]
#### Ext_crop models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Ext_crop+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,22]<-res[1,1]
bird_OR_results[i,24]<-res[1,2]
bird_OR_results[i,25]<-res[1,3]
bird_OR_results[i,23]<-ctable[1,4]
#### Ext_pasture models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Ext_pasture+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,26]<-res[1,1]
bird_OR_results[i,28]<-res[1,2]
bird_OR_results[i,29]<-res[1,3]
bird_OR_results[i,27]<-ctable[1,4]
#### Int_wood models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Int_wood+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,30]<-res[1,1]
bird_OR_results[i,32]<-res[1,2]
bird_OR_results[i,33]<-res[1,3]
bird_OR_results[i,31]<-ctable[1,4]
#### Cropland_loss models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Cropland_loss+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,34]<-res[1,1]
bird_OR_results[i,36]<-res[1,2]
bird_OR_results[i,37]<-res[1,3]
bird_OR_results[i,35]<-ctable[1,4]
#### Forest_gain models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Forest_gain+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,38]<-res[1,1]
bird_OR_results[i,40]<-res[1,2]
bird_OR_results[i,41]<-res[1,3]
bird_OR_results[i,39]<-ctable[1,4]
#### Forest_loss models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Forest_loss+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,42]<-res[1,1]
bird_OR_results[i,44]<-res[1,2]
bird_OR_results[i,45]<-res[1,3]
bird_OR_results[i,43]<-ctable[1,4]
#### Urban models
tryCatch({
m<-polr(formula=factor(one_bird$CONSERVATION,levels = c("C","B","A"),ordered = TRUE)~Urban+area, data=one_bird,Hess=TRUE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
tryCatch({
ctable <- coef(summary(m))
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
tryCatch({
ci <- confint(m)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
#confint.default(m)
#exp(coef(m))
res<-exp(cbind(OR = coef(m), ci))
bird_OR_results[i,46]<-res[1,1]
bird_OR_results[i,48]<-res[1,2]
bird_OR_results[i,49]<-res[1,3]
bird_OR_results[i,47]<-ctable[1,4]
}
print(i)
}
saveRDS(bird_OR_results, file = "bird_OR_results_buffer.rds")
bird_OR_results<-readRDS(file = "bird_OR_results_buffer.rds")
#### results summary & plot
int_neg<-summary((bird_OR_results$OR_int<1 & bird_OR_results$p_int<0.05))
int_pos<-summary((bird_OR_results$OR_int>1 & bird_OR_results$p_int<0.05))
ext_neg<-summary((bird_OR_results$OR_ext<1 & bird_OR_results$p_ext<0.05))
ext_pos<-summary((bird_OR_results$OR_ext>1 & bird_OR_results$p_ext<0.05))
cc_neg<-summary((bird_OR_results$OR_cc<1 & bird_OR_results$p_cc<0.05))
cc_pos<-summary((bird_OR_results$OR_cc>1 & bird_OR_results$p_cc<0.05))
stab_neg<-summary((bird_OR_results$OR_stab<1 & bird_OR_results$p_stab<0.05))
stab_pos<-summary((bird_OR_results$OR_stab>1 & bird_OR_results$p_stab<0.05))
Int_crop_neg<-summary((bird_OR_results$OR_Int_crop<1 & bird_OR_results$p_Int_crop<0.05))
Int_crop_pos<-summary((bird_OR_results$OR_Int_crop>1 & bird_OR_results$p_Int_crop<0.05))
Ext_crop_neg<-summary((bird_OR_results$OR_Ext_crop<1 & bird_OR_results$p_Ext_crop<0.05))
Ext_crop_pos<-summary((bird_OR_results$OR_Ext_crop>1 & bird_OR_results$p_Ext_crop<0.05))
Ext_pasture_neg<-summary((bird_OR_results$OR_Ext_pasture<1 & bird_OR_results$p_Ext_pasture<0.05))
Ext_pasture_pos<-summary((bird_OR_results$OR_Ext_pasture>1 & bird_OR_results$p_Ext_pasture<0.05))
Int_wood_neg<-summary((bird_OR_results$OR_Int_wood<1 & bird_OR_results$p_Int_wood<0.05))
Int_wood_pos<-summary((bird_OR_results$OR_Int_wood>1 & bird_OR_results$p_Int_wood<0.05))
Cropland_loss_neg<-summary((bird_OR_results$OR_Cropland_loss<1 & bird_OR_results$p_Cropland_loss<0.05))
Cropland_loss_pos<-summary((bird_OR_results$OR_Cropland_loss>1 & bird_OR_results$p_Cropland_loss<0.05))
Forest_gain_neg<-summary((bird_OR_results$OR_Forest_gain<1 & bird_OR_results$p_Forest_gain<0.05))
Forest_gain_pos<-summary((bird_OR_results$OR_Forest_gain>1 & bird_OR_results$p_Forest_gain<0.05))
Forest_loss_neg<-summary((bird_OR_results$OR_Forest_loss<1 & bird_OR_results$p_Forest_loss<0.05))
Forest_loss_pos<-summary((bird_OR_results$OR_Forest_loss>1 & bird_OR_results$p_Forest_loss<0.05))
Urban_neg<-summary((bird_OR_results$OR_Urban<1 & bird_OR_results$p_Urban<0.05))
Urban_pos<-summary((bird_OR_results$OR_Urban>1 & bird_OR_results$p_Urban<0.05))
res_summary<-rbind(int_neg,int_pos,ext_neg,ext_pos,cc_neg,cc_pos,stab_neg,stab_pos)
res_summary<-rbind(Int_crop_neg,Int_crop_pos,Ext_crop_neg,Ext_crop_pos,Ext_pasture_neg,Ext_pasture_pos,Int_wood_neg,Int_wood_pos, Cropland_loss_neg,Cropland_loss_pos,Forest_gain_neg,Forest_gain_pos,Forest_loss_neg,Forest_loss_pos,Urban_neg,Urban_pos,stab_neg,stab_pos)
res_summary<-res_summary[,-1]
barplot(as.numeric(res_summary[,2]),names.arg=c(rownames(res_summary)), col=c("red","green"), las=2)
bird_OR_results<-merge(bird_OR_results,bird, by = "SPECIESCODE")
bird_pref<-unique(bird_OR_results$preference)
for (i in 1:length(bird_pref)){
sub_bird<-bird_OR_results[bird_OR_results$preference==bird_pref[i],]
ext_neg<-summary((sub_bird$OR_ext<1 & sub_bird$p_ext<0.05))
ext_pos<-summary((sub_bird$OR_ext>1 & sub_bird$p_ext<0.05))
cc_neg<-summary((sub_bird$OR_cc<1 & sub_bird$p_cc<0.05))
cc_pos<-summary((sub_bird$OR_cc>1 & sub_bird$p_cc<0.05))
stab_neg<-summary((sub_bird$OR_stab<1 & sub_bird$p_stab<0.05))
stab_pos<-summary((sub_bird$OR_stab>1 & sub_bird$p_stab<0.05))
Int_crop_neg<-summary((sub_bird$OR_Int_crop<1 & sub_bird$p_Int_crop<0.05))
Int_crop_pos<-summary((sub_bird$OR_Int_crop>1 & sub_bird$p_Int_crop<0.05))
Ext_crop_neg<-summary((sub_bird$OR_Ext_crop<1 & sub_bird$p_Ext_crop<0.05))
Ext_crop_pos<-summary((sub_bird$OR_Ext_crop>1 & sub_bird$p_Ext_crop<0.05))
Ext_pasture_neg<-summary((sub_bird$OR_Ext_pasture<1 & sub_bird$p_Ext_pasture<0.05))
Ext_pasture_pos<-summary((sub_bird$OR_Ext_pasture>1 & sub_bird$p_Ext_pasture<0.05))
Int_wood_neg<-summary((sub_bird$OR_Int_wood<1 & sub_bird$p_Int_wood<0.05))
Int_wood_pos<-summary((sub_bird$OR_Int_wood>1 & sub_bird$p_Int_wood<0.05))
Cropland_loss_neg<-summary((sub_bird$OR_Cropland_loss<1 & sub_bird$p_Cropland_loss<0.05))
Cropland_loss_pos<-summary((sub_bird$OR_Cropland_loss>1 & sub_bird$p_Cropland_loss<0.05))
Forest_gain_neg<-summary((sub_bird$OR_Forest_gain<1 & sub_bird$p_Forest_gain<0.05))
Forest_gain_pos<-summary((sub_bird$OR_Forest_gain>1 & sub_bird$p_Forest_gain<0.05))
Forest_loss_neg<-summary((sub_bird$OR_Forest_loss<1 & sub_bird$p_Forest_loss<0.05))
Forest_loss_pos<-summary((sub_bird$OR_Forest_loss>1 & sub_bird$p_Forest_loss<0.05))
Urban_neg<-summary((sub_bird$OR_Urban<1 & sub_bird$p_Urban<0.05))
Urban_pos<-summary((sub_bird$OR_Urban>1 & sub_bird$p_Urban<0.05))
res_summary<-rbind(int_neg,int_pos,ext_neg,ext_pos,cc_neg,cc_pos,stab_neg,stab_pos)
res_summary<-rbind(Int_crop_neg,Int_crop_pos,Ext_crop_neg,Ext_crop_pos,Ext_pasture_neg,Ext_pasture_pos,Int_wood_neg,Int_wood_pos, Cropland_loss_neg,Cropland_loss_pos,Forest_gain_neg,Forest_gain_pos,Forest_loss_neg,Forest_loss_pos,Urban_neg,Urban_pos,stab_neg,stab_pos)
res_summary<-res_summary[,-1]
png(gsub("[[:punct:]]", " ", paste(bird_pref[i],"outside SPAs.png")))
par(mar=c(10,3,3,3))
barplot(as.numeric(res_summary[,2]),names.arg=c(rownames(res_summary)), col=c("red","green"), las=2, main=paste(bird_pref[i],"outside SPAs"))
dev.off()
}
##### Guy-inspired plot
pcols<-c(15,19,23,27,31,35,39,43,47)
plot_list = list()
for (i in 1:length(pcols)){
sub<-bird_OR_results[bird_OR_results[pcols[i]]<0.05,]
#sub<-sub[sub[pcols[i]+2]<10000,]
#sub<-sub[sub[pcols[i]-1]>0.00001,]
sub<-sub[order(sub[pcols[i]-1]),]
sub$SPECIESCODE<-factor(sub$SPECIESCODE,levels=sub$SPECIESCODE)
sub$SPECIESCODE<-droplevels(sub$SPECIESCODE)
p = ggplot(sub[sub[pcols[i]-1]<10,], aes_string(x = colnames(sub[1]), y = colnames(sub[pcols[i]-1]))) +
geom_point(size = 4) +
geom_hline(yintercept=1)+
ylim(0,25)+
geom_errorbar(aes_string(ymax = colnames(sub[pcols[i]+1]), ymin = colnames(sub[pcols[i]+2])))
# plot_list[[i]] = p
ggsave(p, file=paste0("plot_", colnames(sub[pcols[i]-1]),".png"))
}
############################################################################
### Derichlet-Regression
############################################################################
### aggregation of data on either site or species level (both offer alternative ways to interpret the data)
### Aggregate on site level. We'll keep 1 entry per site giving us the proportion of Levers trajectories ("int","ext","cc","stab") within each site as well as the proportion of conservation classes of ALL birds within each site (ABC) + area of site and levers pixel count
sites<-unique(tab$SITECODE)
mat_sites<-as.data.frame(matrix(NA,length(sites), 9))
colnames(mat_sites)<-c("A","B","C","int","ext","cc","stab","area", "count")
for(j in 1:length(sites)){
sub<-subset(tab,tab$SITECODE==sites[j])
prop.Cstat<-table(sub$CONSERVATION)/sum(table(sub$CONSERVATION))
mat_sites[j,1:3]<-prop.Cstat
mat_sites[j,4:7]<-sub[1,8:11]
mat_sites[j,8]<-sub[1,6]
mat_sites[j,9]<-sub[1,7] # count
}
### Aggregate on species level. We'll keep 1 entry per species giving us the proportion of Levers trajectories ("int","ext","cc","stab") the species encounters within all sites. This is done as a two-step aggregation: first for each site the species occurs in the majority trajectory is selected and second, these majority trajectories are summarized as proportions across all sites the species occurs in. In addition we have the proportion of conservation classes of that species within all sites (ABC).
species<-unique(tab$SPECIESCODE)
mat_bird<-as.data.frame(matrix(NA,length(species), 10))
colnames(mat_bird)<-c("A","B","C","int","ext","cc","stab", "SPECIESCODE","migration","preference")
for(j in 1:length(species)){
sub<-subset(tab,tab$SPECIESCODE==species[j])
lu_agg<-factor()
levels(lu_agg)<-c("Conversion","Stabil","De_intens","Intens")
for(k in 1:length(sub$SPECIESCODE)){
lu_agg[k]<-names(which.max(sub[k,8:11]))
}
mat_bird[j,4:7]<-table(lu_agg)/sum(table(lu_agg))
prop.Cstat<-table(sub$CONSERVATION)/sum(table(sub$CONSERVATION))
mat_bird[j,1:3]<-prop.Cstat
mat_bird[j,4:7]<-sub[1,8:11]
mat_bird[j,8]<-as.character(sub[1,1])
mat_bird[j,9]<-as.character(sub[1,12])
mat_bird[j,10]<-as.character(sub[1,13])
}
#remove all birds that have no levers data at all (~50)
mat_bird<-mat_bird[rowSums((mat_bird[,4:7])==0)<=0,]
### Site-based Drichlet analysis
ABC_status <- DR_data(mat_sites[, 1:3])
plot(ABC_status, cex = 0.5, a2d = list(colored = FALSE, c.grid = FALSE))
plot(rep(mat_sites$int, 3), as.numeric(ABC_status), pch = 21,cex=0.2, bg = rep(c("#E495A5", "#86B875", "#7DB0DD"), each = 39), xlab = "intensity", ylab = "Proportion",ylim = 0:1)
first_model <- DirichReg(ABC_status ~ mat_sites$int+mat_sites$cc+mat_sites$ext)
summary(first_model)
first_model <- DirichReg(ABC_status ~ mat_sites$int)
plot(rep(mat_sites$int, 3), as.numeric(ABC_status), pch = 21,cex=0.2, bg = rep(c("#E495A5", "#86B875", "#7DB0DD"), each = 39), xlab = "intensity", ylab = "Proportion",ylim = 0:1)
Xnew <- data.frame(int = seq(min(mat_sites$int), max(mat_sites$int),length.out = 100))
for (i in 1:3) lines(cbind(Xnew, predict(first_model, Xnew)[, i]), col = c("#E495A5", "#86B875", "#7DB0DD")[i], lwd = 2)
#
pre<-predict(first_model)
plot(pre[,1]~mat_sites$int)
plot(pre[,2]~mat_sites$int)
plot(pre[,3]~mat_sites$int)
boxplot(mat_sites[,1:3])
### Species-based Drichlet analysis
for (i in 1:length(mat_bird$SPECIESCODE)){
mat_bird$ABC_dom[i]<-colnames(mat_bird[i,1:3])[max.col(mat_bird[i,1:3])]
}
for (i in 1:length(tab$SPECIESCODE)){
tab$ACT_dom[i]<-colnames(tab[i,8:11])[max.col(tab[i,8:11])]
}
dat$cc_area<-dat$area*dat$Conversion
dat$int_area<-dat$area*dat$Intens
dat$ext_area<-dat$area*dat$De_intens
dat$stab_area<-dat$area*dat$Stabil
one_bird<-dat[dat$SPECIESCODE=="A030",]
#head(one_bird)
one_bird$country<-factor(substr((one_bird$SITECODE),1,2))
#lapply(one_bird[, c("CONSERVATION","cc_area")], table)
one_bird<-(one_bird[(rowSums((one_bird[,8:10]))==0)<=0,])
m<-polr(formula=CONSERVATION~ACT_dom+area, data=one_bird,Hess=TRUE)
ctable <- coef(summary(m))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
ctable <- cbind(ctable, "p value" = p)
ci <- confint(m)
confint.default(m)
exp(coef(m))
exp(cbind(OR = coef(m), ci))
summary(factor(one_bird$ACT_dom))
ggplot(one_bird, aes(x = CONSERVATION, y = int_area)) +
geom_boxplot(size = .75) +
geom_jitter(alpha = .5) +
facet_grid(ACT_dom~country, margins = TRUE) +
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
sf <- function(y) {
c('Y>=1' = qlogis(mean(y >= 1)),
'Y>=2' = qlogis(mean(y >= 2)),
'Y>=3' = qlogis(mean(y >= 3)))
}
s <- with(one_bird, summary(as.numeric(CONSERVATION) ~ ACT_dom + area, fun=sf))
glm(I(as.numeric(CONSERVATION) >= 2) ~ ACT_dom, family="binomial", data = one_bird)
glm(I(as.numeric(CONSERVATION) >= 3) ~ ACT_dom, family="binomial", data = one_bird)
s[, 4] <- s[, 4] - s[, 3]
s[, 3] <- s[, 3] - s[, 3]
plot(s, which=1:3, pch=1:3, xlab='logit', main=' ', xlim=range(s[,3:4]))
farmland_birds <- c("A247","A110", "A255", "A257", "A025", "A133", "A243","A366", "A031","A348", "A377", "A376", "A379",
"A382", "A096","A244","A245", "A251", "A338", "A339", "A341","A156","A242", "A383","A260","A278","A356",
"A112","A357", "A275","A276", "A361","A210", "A352", "A351", "A309", "A128", "A232", "A142")
trends_farmland <- read.delim("trends_farmland.txt", header=FALSE)
trends_farmland <- cbind(farmland_birds, trends_farmland)
colnames(trends_farmland) <- c("Speciescode", "Speciesname", "Baseyear", "Trend")
farmland_trends <- as.vector(trends_farmland$Trend)
forest_birds <- c("A086", "A256", "A263", "A104", "A365", "A335", "A334", "A373", "A207", "A454", "A238", "A240",
"A236", "A542", "A321", "A332", "A342", "A344", "A328", "A327", "A326", "A325",
"A274", "A313", "A315", "A314", "A234", "A372", "A318", "A317", "A362", "A332", "A165", "A287")
trends_forest <- read.delim("trends_forest.txt", header=FALSE)
trends_forest <- cbind(forest_birds, trends_forest)
colnames(trends_forest) <- c("Speciescode", "Speciesname", "Baseyear", "Trend")
forest_trends <- as.vector(trends_forest$Trend)
trends_all <- read.delim("trends_all.txt", header=FALSE)
colnames(trends_all) <- c("SPECIESCODE", "Speciesname", "Baseyear", "Trend")
trends_farmland$type<-"farmland"
trends_forest$type<-"forest"
trends<-rbind(trends_farmland,trends_forest)
colnames(trends)[1]<-"SPECIESCODE"
mat_bird$SPECIESCODE<-factor(mat_bird$SPECIESCODE)
mat_bird_trends<-merge(mat_bird,trends,by="SPECIESCODE")
summary(lme(C~int+cc+Trend, random= ~ 1|SPECIESCODE,data=mat_bird_trends))
boxplot(A~Trend, data=mat_bird_trends)
############################################################################
### 6. correlations between Conservation status and ACTs
############################################################################
###Describ. Stat.
boxplot(dat[,8:11],col=c("darkred","red","lightgreen","green"))
boxplot(tabFinal[,7:23],col=c("red","red","red","red","lightgreen","lightgreen",
"lightgreen","lightgreen","lightgreen","lightgreen","red",
"darkred","darkred","darkred","darkred","darkred",
"green"))
par(mfrow=c(3,1))
boxplot(subset(dat,dat$CONSERVATION=="A")[,c(8:11)],col="green")
boxplot(subset(dat,dat$CONSERVATION=="B")[,c(8:11)],col="yellow")
boxplot(subset(dat,dat$CONSERVATION=="C")[,c(8:11)],col="red")
##################### models for subgroup of birds #######################
prey <- subset(tab, tab$preference=="birds of prey")
open_c <- subset(tab, tab$preference=="open land carnivores")
open_h <- subset(tab, tab$preference=="open land herbivores" )
coastal <- subset(tab, tab$preference=="coastal species")
lakes <- subset(tab, tab$preference=="wetland/lakes")
wood_c <- subset(tab, tab$preference=="woodland carnivores" )
wood_h <- subset(tab, tab$preference=="woodland herbivores")
par(mfrow=c(3,1))
boxplot(subset(open_h,open_c$CONSERVATION=="A")[,c(8:11)],col="green")
boxplot(subset(open_h,open_c$CONSERVATION=="B")[,c(8:11)],col="yellow")
boxplot(subset(open_h,open_c$CONSERVATION=="C")[,c(8:11)],col="red")
par(mfrow=c(1,1))
boxplot(prey[,8:11],col=c("darkred","red","lightgreen","green"))
boxplot(open_c[,8:11],col=c("darkred","red","lightgreen","green"))
boxplot(open_h[,8:11],col=c("darkred","red","lightgreen","green"))
boxplot(wood_c[,8:11],col=c("darkred","red","lightgreen","green"))
boxplot(wood_h[,8:11],col=c("darkred","red","lightgreen","green"))
boxplot(lakes[,8:11],col=c("darkred","red","lightgreen","green"))
boxplot(coastal[,8:11],col=c("darkred","red","lightgreen","green"))
pom_prey <- vglm(factor(CONSERVATION,levels = c("A","B","C"),ordered = TRUE)~Conversion+Intens+De_intens+area,data=coastal, family = cumulative(parallel=FALSE))
pom.ll <- logLik(pom_prey)
pom0 <- vglm(factor(CONSERVATION,levels = c("A","B","C"),ordered = TRUE)~1,data=coastal, family = cumulative(parallel=FALSE))
p0.ll <- logLik(pom0)
# R^2 McFadden
pom.mcfad <- as.vector(1 - (pom.ll/p0.ll))
pom.mcfad
# prey: 0.00369, open_c: 0.0087, open_h: 0.004609, wood_c: 0.00709, wood_h: -5.73 ???, lakes: 0.01467, coastal: 0.00853
# counts > 5: prey: 0.0104, open_c: 0.0076, open_h: 0.0157, wood_c: 0.0131, wood_h: -2,09 ???, lakes: 0.00613, coastal: 0.0059
# counts > 5 & "mainly resident": prey: 0.01333, open_c: 0.0074, open_h: 0.02249, wood_c: 0.0188, wood_h: -2,2 ???, lakes: 0.00817, coastal: 0.00269
# "mainly resident": prey: 0.01046, open_c: 0.0076, open_h: 0.0157, wood_c: 0.01314, wood_h: 0.2263, lakes: 0.0061, coastal: 0.005969
a<-strsplit(as.character(dat$SPECIESNAME), " ")
name<-c()
for(i in 1:length(a)){
name[i] <-a[[i]][1]
}
pom0 <- vglm(factor(CONSERVATION,levels = c("A","B","C"),ordered = TRUE)~1,data=dat, family = cumulative(parallel=FALSE))
pom <- vglm(factor(CONSERVATION,levels = c("A","B","C"),ordered = TRUE)~Conversion+Intens+De_intens+area,data=dat, family = cumulative(parallel=FALSE))
?vglm
#plotvglm(pom)
summary(pom)
pom.ll <- logLik(pom)
pom.ll
p0.ll <- logLik(pom0)
p0.ll
# R^2 McFadden
pom.mcfad <- as.vector(1 - (pom.ll/p0.ll))
pom.mcfad
# R^2 Cox&Snell
N <- length(dat[, 1]) # Anzahl der F?lle
pom.cox <- as.vector(1 - exp((2/N) * (p0.ll - pom.ll)))
pom.cox
# R^2 Nagelkerke
pom.nagel <- as.vector((1 - exp((2/N) * (p0.ll - pom.ll)))/(1 - exp(p0.ll)^(2/N)))
pom.nagel
#Functions for PseudoR2s
RsqGLM <- function(obs = NULL, pred = NULL, model = NULL) {
# version 1.2 (3 Jan 2015)
model.provided <- ifelse(is.null(model), FALSE, TRUE)
if (model.provided) {
if (!("glm" %in% class(model))) stop ("'model' must be of class 'glm'.")
if (!is.null(pred)) message("Argument 'pred' ignored in favour of 'model'.")
if (!is.null(obs)) message("Argument 'obs' ignored in favour of 'model'.")
obs <- model$y
pred <- model$fitted.values
} else { # if model not provided
if (is.null(obs) | is.null(pred)) stop ("You must provide either 'obs' and 'pred', or a 'model' object of class 'glm'")
if (length(obs) != length(pred)) stop ("'obs' and 'pred' must be of the same length (and in the same order).")
if (!(obs %in% c(0, 1)) | pred < 0 | pred > 1) stop ("Sorry, 'obs' and 'pred' options currently only implemented for binomial GLMs (binary response variable with values 0 or 1) with logit link.")
logit <- log(pred / (1 - pred))
model <- glm(obs ~ logit, family = "binomial")
}
null.mod <- glm(obs ~ 1, family = family(model))
loglike.M <- as.numeric(logLik(model))
loglike.0 <- as.numeric(logLik(null.mod))
N <- length(obs)
# based on Nagelkerke 1991:
CoxSnell <- 1 - exp(-(2 / N) * (loglike.M - loglike.0))
Nagelkerke <- CoxSnell / (1 - exp((2 * N ^ (-1)) * loglike.0))
# based on Allison 2014:
McFadden <- 1 - (loglike.M / loglike.0)
Tjur <- mean(pred[obs == 1]) - mean(pred[obs == 0])
sqPearson <- cor(obs, pred) ^ 2
return(list(CoxSnell = CoxSnell, Nagelkerke = Nagelkerke, McFadden = McFadden, Tjur = Tjur, sqPearson = sqPearson))
}
|
0050413a4eaac06037edad5bcb81196c32005068 | 2099a2b0f63f250e09f7cd7350ca45d212e2d364 | /DUC-Dataset/Summary_p100_R/D092.AP900626-0010.html.R | 7667b154b46ee1a28d75ca4552cb77fddf78db6d | [] | no_license | Angela7126/SLNSumEval | 3548301645264f9656b67dc807aec93b636778ef | b9e7157a735555861d2baf6c182e807e732a9dd6 | refs/heads/master | 2023-04-20T06:41:01.728968 | 2021-05-12T03:40:11 | 2021-05-12T03:40:11 | 366,429,744 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 989 | r | D092.AP900626-0010.html.R | <html>
<head>
<meta name="TextLength" content="SENT_NUM:6, WORD_NUM:113">
</head>
<body bgcolor="white">
<a href="#0" id="0">After the June 4, 1989, military crackdown on the democracy movement, Fang and Li obtained refuge in the U.S. Embassy.</a>
<a href="#1" id="1">Americans who rushed to donate money to help victims of the Armenian earthquake 18 months ago are responding more slowly to last week's killer earthquake in Iran.</a>
<a href="#2" id="2">Iran is believede-scale structure in the universe.</a>
<a href="#3" id="3">Colenso said, however, that contributions will ``surge upward'' as news spreads of the earthquake's destruction.</a>
<a href="#4" id="4">AmeriCares, a private agency based in New Canaan, Conn., brought the first shipment of supplies from the United States into Iran on Sunday.</a>
<a href="#5" id="5">AWARDS: National Award for Science and Technology, 1978; Chinese Academy of Sciences Award 1982; New York Academy of Sciences Award, 1988.</a>
</body>
</html> |
f2e302bc6a9d4b7d0723ffe78ee0aec530bedc41 | 0cd778de37c5ce532f22bfe1363ea3820cf43582 | /Taller Integración Numérica/Taller Integrales.R | b6d705a7270b447b98e6d9ed201e8ea2f8dfb890 | [
"CC0-1.0"
] | permissive | GabrielGomez9898/Analisis-Numerico | 873b413e481781152ff12f74f3464b8c6c9d6df0 | b1bd6b5ccfd1d0102863371fbf09d4544a328105 | refs/heads/master | 2020-12-26T07:45:44.406075 | 2020-05-25T03:46:38 | 2020-05-25T03:46:38 | 237,437,384 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,194 | r | Taller Integrales.R | #TALLER INTEGRALES
rm(list=ls())
#PUNTO A -----------------------------------------------------------------------------
f<-function(x){
return(sin(2*x))
}
trapecio<- function (a,b){
h = b-a
trap = h*((f(b)+f(a))/2)
return(trap)
}
trapecio (0,2)
#PUNTO B -----------------------------------------------------------------------------
x<-c(0.1,0.2,0.3,0.4,0.5)
y<-c(1.8,2.6,3.0,2.8,1.9)
mat <- matrix(c(0,0,0,0,0,0), nrow=1,ncol = 5, byrow=TRUE)
options(digits = 16)
vec<-c(0)
for(i in 1:length(x)){
for(j in 0:length(x)){
vec[j+1]=x[i]^j
}
mat<-rbind(mat,c(vec))
}
mat <- mat[1:length(x)+1,]
coef_pol <- (solve(mat,y)) #el solve solo sirve para matrices cuadradas
cat("Resultado Polinomio: f(x)", coef_pol[5],"x^4 +",coef_pol[4],"X^3 +",coef_pol[3],"X^2 +",coef_pol[2],"X +",coef_pol[1])
f<-function(x){
return( coef_pol[5]*x^4 +coef_pol[4]*x^3 +coef_pol[3]*x^2 +coef_pol[2]*x +coef_pol[1])
}
simpson <- function(vector_x){
#h=0.1
cont=1
secuencia_simpson=0
cont=1
const=4
for(i in 1:length(vector_x)){
if(cont==1){
secuencia_simpson=secuencia_simpson +f(vector_x[cont])
}
if(cont > 1 && cont < length(vector_x)){
if(const==4){
secuencia_simpson=secuencia_simpson + 4*f(vector_x[cont])
const=2
}else{
secuencia_simpson=secuencia_simpson + 2*f(vector_x[cont])
const=4
}
}
if(cont==length(vector_x)){
secuencia_simpson=secuencia_simpson +f(vector_x[cont])
}
cont=cont+1
}
resultado=secuencia_simpson
cat("Valor de la integral: ",resultado)
return(resultado)
}
valorAprox=simpson(x)
integrate(f,1.6,2)
valorReal=27.44311111111179
errorRelativo= abs((valorReal-valorAprox)/valorAprox)
cat("Error relativo: ",errorRelativo)
#REGLA TRAPECIO
trapecio<- function (a,b){
h = b-a
trap = h*((f(b)+f(a))/2)
return(trap)
}
trapecio (1.6,2)
#PUNTO C -------------------------------------------------------------------------------
#Aplicar metodo de Simpson para solucionar la integral
options(digits = 16)
f<-function(x){
return( sqrt(1+cos(x)*cos(x)))
}
simpson <- function(a,b,n){
h = (b-a)/n
xn<-c(0)
cont=1
xn[cont]=0
cont=cont+1
for (i in 1:n) {
xn[cont]= xn[cont-1]+h
cont=cont+1
}
secuencia_simpson=0
cont=1
const=4
for(i in 1:length(xn)){
if(cont==1){
secuencia_simpson=secuencia_simpson +f(xn[cont])
}
if(cont > 1 && cont < length(xn)){
if(const==4){
secuencia_simpson=secuencia_simpson + 4*f(xn[cont])
const=2
}else{
secuencia_simpson=secuencia_simpson + 2*f(xn[cont])
const=4
}
}
if(cont==length(xn)){
secuencia_simpson=secuencia_simpson +f(xn[cont])
}
cont=cont+1
}
valor = (b-a)/(3*n)
resultado=valor*secuencia_simpson
cat("Valor de la integral: ",resultado)
}
simpson(0,2,4)
#PUNTO D ------------------------------------------------------------------------------
#Utilizar cuadratura de Gauss para aproximar la integral
options(digits = 16)
f<- function(t){
return(t*exp(t))
}
cuadratura_gauss<-function(){
m=2 # para encontrar un polinomio de grado <= 2
xk = sqrt(1/3) # Encontrar los xn despejando x en los polinomios de Legendre
#Para encontrar los pesos Ck, como es de grado dos basta con 2 puntos, es decir 2 Xn y 2 Ck
c1=(2*(1-(xk)^2))/(m^2*(xk)^2)
c2=(2*(1-(-xk)^2))/(m^2*(-xk)^2)
ck<-c(c1,c2)
#Para generar el cambio de variable de t -> x de [1,2] a [-1,1] se tiene en cuenta
#la formula (t-a)/(b-a) , resolviendo dicha fomrula obtenemos t = (x+1)/2 +1
# y reemplazamos t en la funcion original, de esta manera quedaria la nueva funcion:
f_nueva<- function(x){
return(((x+1)/2)+1*exp((x+1)/2)+1)
}
cont=1
sumatoria_gauss=0
for (i in 1:length(ck)) {
sumatoria_gauss=sumatoria_gauss+(ck[cont]*f_nueva(xk))
cont=cont+1
}
cat("Solucion de integral por cuadrantes de Gauss: ",sumatoria_gauss)
}
cuadratura_gauss()
#PUNTO E ------------------------------------------------------------------------------
options(digits = 16)
cuadratura_gauss_partida<-function(){
m=2 # para encontrar un polinomio de grado <= 2
xk = sqrt(1/3) # Encontrar los xn despejando x en los polinomios de Legendre
#Para encontrar los pesos Ck, como es de grado dos basta con 2 puntos, es decir 2 Xn y 2 Ck
c1=(2*(1-(xk)^2))/(m^2*(xk)^2)
c2=(2*(1-(-xk)^2))/(m^2*(-xk)^2)
ck<-c(c1,c2)
#Para generar el cambio de variable de t -> x de [1,1.5] a [-1,1] se tiene en cuenta
#la formula (t-a)/(b-a) , resolviendo dicha fomrula obtenemos t = (0.5x+0.5)/2 +1
# y reemplazamos t en la funcion original, de esta manera quedaria la nueva funcion:
f_nueva<- function(x){
return(((0.5*x+0.5)/2)+1*exp((0.5*x+0.5)/2)+1)
}
cont=1
sumatoria_gauss=0
for (i in 1:length(ck)) {
sumatoria_gauss=sumatoria_gauss+(ck[cont]*f_nueva(xk))
cont=cont+1
}
integral_1=sumatoria_gauss*(1/2)
cat("integral 1: ",integral_1)
#Para generar el cambio de variable de t -> x de [1.5,2] a [-1,1] se tiene en cuenta
#la formula (t-a)/(b-a) , resolviendo dicha fomrula obtenemos t = (0.5x+0.5)/2 +1.5
# y reemplazamos t en la funcion original, de esta manera quedaria la nueva funcion:
f_nueva<- function(x){
return(((0.5*x+0.5)/2)+1.5*exp((0.5*x+0.5)/2)+1.5)
}
cont=1
sumatoria_gauss=0
for (i in 1:length(ck)) {
sumatoria_gauss=sumatoria_gauss+(ck[cont]*f_nueva(xk))
cont=cont+1
}
integral_2=sumatoria_gauss*(1/2)
cat("integral 2: ",integral_2)
cat("\n")
cat("Sumatoria de las dos particiones de integrales: ",integral_1+integral_2)
}
cuadratura_gauss_partida()
|
5c08a6cd5fbc3c91c3a2bc35e7c413b9c14bb5df | c0afe781ba59fd9bfe43ccf4819bf854fe0f683d | /주요 변수 양상.R | 20c108e9edffb7bb911e82fbc26d87c0265c5105 | [] | no_license | CHO-MINJUNG/BigContest2020 | 340d0605dbd2ded30aea620fd049a4be7278cb28 | 49faeeee3f3493bc0778d875091850ff4e47cda3 | refs/heads/master | 2022-12-11T04:12:17.914406 | 2020-09-15T17:24:35 | 2020-09-15T17:24:35 | 288,389,040 | 0 | 0 | null | null | null | null | UHC | R | false | false | 4,271 | r | 주요 변수 양상.R | library(fpp2)
data = read.csv('C:/Users/chomjung/OneDrive - 명지대학교/빅콘테스트2020/1차_최종_데이터/1차_최종_최근_팀타자.csv', stringsAsFactors =TRUE, header = TRUE, encoding="UTF-8")
NCdata= subset(data, 팀코드=='NC')
SKdata= subset(data, 팀코드=='SK')
KTdata= subset(data, 팀코드=='KT')
OBdata= subset(data, 팀코드=='OB')
WOdata= subset(data, 팀코드=='WO')
HHdata= subset(data, 팀코드=='HH')
SSdata= subset(data, 팀코드=='SS')
LGdata= subset(data, 팀코드=='LG')
HTdata= subset(data, 팀코드=='HT')
LTdata= subset(data, 팀코드=='LT')
# NC
NCdata <- ts(NCdata$잔루,start = 2016,end = 2019,frequency = 20)
NCdata = stl(NCdata, t.window=13, s.window="periodic", robust=TRUE)
autoplot(NCdata)
ft = max(0,1-var(NCdata$time.series[,3])/(var(NCdata$time.series[,2]+NCdata$time.series[,3]))); ft
fs = max(0,1-var(NCdata$time.series[,3])/(var(NCdata$time.series[,1]+NCdata$time.series[,3]))); fs
# SK
SKdata <- ts(SKdata$2루타,start = 2016,end = 2019,frequency = 20)
SKdata = stl(SKdata, t.window=13, s.window="periodic", robust=TRUE)
autoplot(SKdata)
ft = max(0,1-var(SKdata$time.series[,3])/(var(SKdata$time.series[,2]+SKdata$time.series[,3]))); ft
fs = max(0,1-var(SKdata$time.series[,3])/(var(SKdata$time.series[,1]+SKdata$time.series[,3]))); fs
# KT
KTdata <- ts(KTdata$단타,start = 2016,end = 2019,frequency = 20)
KTdata = stl(KTdata, t.window=13, s.window="periodic", robust=TRUE)
autoplot(KTdata)
ft = max(0,1-var(KTdata$time.series[,3])/(var(KTdata$time.series[,2]+KTdata$time.series[,3]))); ft
fs = max(0,1-var(KTdata$time.series[,3])/(var(KTdata$time.series[,1]+KTdata$time.series[,3]))); fs
# OB
OBdata <- ts(OBdata$단타,start = 2016,end = 2019,frequency = 20)
OBdata = stl(OBdata, t.window=13, s.window="periodic", robust=TRUE)
autoplot(OBdata)
ft = max(0,1-var(OBdata$time.series[,3])/(var(OBdata$time.series[,2]+OBdata$time.series[,3]))); ft
fs = max(0,1-var(OBdata$time.series[,3])/(var(OBdata$time.series[,1]+OBdata$time.series[,3]))); fs
# WO
WOdata <- ts(WOdata$단타,start = 2016,end = 2019,frequency = 20)
WOdata = stl(WOdata, t.window=13, s.window="periodic", robust=TRUE)
autoplot(WOdata)
ft = max(0,1-var(WOdata$time.series[,3])/(var(WOdata$time.series[,2]+WOdata$time.series[,3]))); ft
fs = max(0,1-var(WOdata$time.series[,3])/(var(WOdata$time.series[,1]+WOdata$time.series[,3]))); fs
# HH
HHdata <- ts(HHdata$단타,start = 2016,end = 2019,frequency = 20)
HHdata = stl(HHdata, t.window=13, s.window="periodic", robust=TRUE)
autoplot(HHdata)
ft = max(0,1-var(HHdata$time.series[,3])/(var(HHdata$time.series[,2]+HHdata$time.series[,3]))); ft
fs = max(0,1-var(HHdata$time.series[,3])/(var(HHdata$time.series[,1]+HHdata$time.series[,3]))); fs
# SS
SSdata <- ts(SSdata$득점권타수,start = 2016,end = 2019,frequency = 20)
SSdata = stl(SSdata, t.window=13, s.window="periodic", robust=TRUE)
autoplot(SSdata)
ft = max(0,1-var(SSdata$time.series[,3])/(var(SSdata$time.series[,2]+SSdata$time.series[,3]))); ft
fs = max(0,1-var(SSdata$time.series[,3])/(var(SSdata$time.series[,1]+SSdata$time.series[,3]))); fs
# LG
LGdata <- ts(LGdata$득점권타수,start = 2016,end = 2019,frequency = 20)
LGdata = stl(LGdata, t.window=13, s.window="periodic", robust=TRUE)
autoplot(LGdata)
ft = max(0,1-var(LGdata$time.series[,3])/(var(LGdata$time.series[,2]+LGdata$time.series[,3]))); ft
fs = max(0,1-var(LGdata$time.series[,3])/(var(LGdata$time.series[,1]+LGdata$time.series[,3]))); fs
# HT
HTdata <- ts(HTdata$득점권타수,start = 2016,end = 2019,frequency = 20)
HTdata = stl(HTdata, t.window=13, s.window="periodic", robust=TRUE)
autoplot(HTdata)
ft = max(0,1-var(HTdata$time.series[,3])/(var(HTdata$time.series[,2]+HTdata$time.series[,3]))); ft
fs = max(0,1-var(HTdata$time.series[,3])/(var(HTdata$time.series[,1]+HTdata$time.series[,3]))); fs
# LT
LTdata <- ts(LTdata$득점권타수,start = 2016,end = 2019,frequency = 20)
LTdata = stl(LTdata, t.window=13, s.window="periodic", robust=TRUE)
autoplot(LTdata)
ft = max(0,1-var(LTdata$time.series[,3])/(var(LTdata$time.series[,2]+LTdata$time.series[,3]))); ft
fs = max(0,1-var(LTdata$time.series[,3])/(var(LTdata$time.series[,1]+LTdata$time.series[,3]))); fs
|
cdfa1da78ef8bf525427ad3c0096193350c8f715 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Rlab/examples/skew.Rd.R | a4d19e696d3005563c5a9ee173b736c4a54aad4c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 230 | r | skew.Rd.R | library(Rlab)
### Name: skew
### Title: 3rd moment skewness ratio
### Aliases: skew
### Keywords: univar
### ** Examples
set.seed(1)
x <- rexp(100)
# Get skewness coefficient estimate for exponential distribution
skew(x)
|
894b0b5d4ddf9b0be388c069ec51b97a1ecf7f4e | b9e54258e540f0a0447045729bb4eecb0e490426 | /Bölüm 17 - Veri Modelleme Aşamaları ve Veri Ön İşleme/17.13 - Cold Deck Imputation.R | 79dd04abf145c29d2f591b8b326e40423a4697fd | [] | no_license | sudedanisman/RUdemy | b36b67b9e875206a5424f33cc784fd13506f8d8d | 28a9814706873f5d2e5985e4ba795354144d52c4 | refs/heads/master | 2023-01-30T01:54:26.321218 | 2020-12-14T11:36:00 | 2020-12-14T11:36:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 564 | r | 17.13 - Cold Deck Imputation.R | ## Cold Deck Imputatiion
df <- student_placement_data_with_NA
View(df)
index <- which(is.na(df$percentage.in.Algorithms))
index
length(index)
# certifications
# workshops
index[1]
names(df)
df[c("certifications" , "workshops")][index[1] , ]
d <- subset(df ,
select = c("percentage.in.Algorithms" , "certifications" , "workshops"),
subset = ( certifications == "python" & workshops == "data science" ) )
ort <- mean(d$percentage.in.Algorithms , na.rm = T)
ort
df$percentage.in.Algorithms[index[1]] <- ort
df$percentage.in.Algorithms[31]
|
c95286baafd08433dbf140fc779b22e57d450bfa | e4c44366d606a27749d0d230e316ef46cf41d6af | /ImmoDaten/Immo_SAR_benchmark.R | eee9e05d53fb9904166cf0fdb9836164d5c88460 | [] | no_license | JohannesJacob/APA-SatelliteImages | fe53a34b0301a05a9618f8f206743bce10104242 | 8487188ae1d3e2e8cb4da5ee5d2df4a96bc14853 | refs/heads/master | 2020-04-05T12:09:02.217440 | 2017-08-31T13:40:12 | 2017-08-31T13:40:12 | 95,221,425 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,738 | r | Immo_SAR_benchmark.R | # Spatial Auto-Regression on Immo-Data
library(HSAR)
df_street <- read.csv("Immo_streetCoordinates.csv")
df_street_noNA <- df_street[complete.cases(df_street),]
# Convert data frame to a spatial object
library(sp)
spdf_street <- SpatialPointsDataFrame(coords = df_street_noNA[, c("lng", "lat")],
proj4string = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"),
data = df_street_noNA)
coords <- coordinates(spdf_street)
# extract the land parcel level spatial weights matrix
library(spdep)
nb.1.5 <- dnearneigh(coords, d1 = 0, d2 = 1.5 * max_1nn, row.names = IDs)
knn.10 <- knearneigh(coords, k = 10)
knn.10 <- knn2nb(knn.10)
# to a weights matrix
dist.1.5 <- nbdists(nb.1.5,spdf_street) # or knn.10
#dist.1.5 <- lapply(dist.25,function(x) exp(-0.5 * (x / 2500)^2))
mat.1.5 <- nb2mat(nb.1.5,glist=dist.1.5,style="W")
W <- as(mat.1.5,"dgCMatrix")
## run the sar() function
res.formula <- price ~.
model <- lm(formula=res.formula,data=df_street_noNA)
betas= coef(lm(formula=res.formula, data=df_street_noNA))
# minus NA coefficients
betas= coef(lm(formula=res.formula,
data=df_street_noNA[, -c(1:5,81, 92,118:121,128,130,134,137:140,143,146,149,152,155:158,
126, 162,165,168,171,174,177:180,186,189,192:199)]))
pars=list(rho = 0.5, sigma2e = 2.0, betas = betas)
res <- sar(res.formula,data=df_street_noNA[, -c(1:5,81,92,118:121,128,130,134,137:140,143,146,149,152,155:158,
126, 162,165,168,171,174,177:180,186,189,192:199)],
W=W, burnin=5000, Nsim=10000, thinning=0, parameters.start=pars)#changed thinning to zero
summary(res)
|
aad332a282e3fa84545887bf82eed7bcf4859241 | 3c4298be0c3b00e13690f11616097207e3215612 | /man/reverse.Rd | 97deecef5baeff681bbcfe9b90b9dfd8dd1e2dbe | [] | no_license | cran/quest | 4ab4799f3d5a4f1fa980927ad7d7d7022e2aa2b6 | 0724147528574656cba132d423346d59514e5780 | refs/heads/master | 2023-08-03T09:11:28.856628 | 2021-09-10T10:20:02 | 2021-09-10T10:20:02 | 405,129,468 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,082 | rd | reverse.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quest_functions.R
\name{reverse}
\alias{reverse}
\title{Reverse Code a Numeric Vector}
\usage{
reverse(x, mini, maxi)
}
\arguments{
\item{x}{numeric vector.}
\item{mini}{numeric vector of length 1 specifying the minimum numeric value.}
\item{maxi}{numeric vector of length 1 specifying the maximum numeric value.}
}
\value{
numeric vector that correlates exactly -1 with \code{x}.
}
\description{
\code{reverse} reverse codes a numeric vector based on minimum and maximum
values. For example, say numerical values of response options can range from
1 to 4. The function will change 1 to 4, 2 to 3, 3 to 2, and 4 to 1. If there
are an odd number of response options, the middle in the sequence will be
unchanged.
}
\examples{
x <- psych::bfi[[1]]
head(x, n = 15)
y <- reverse(x = psych::bfi[[1]], min = 1, max = 6)
head(y, n = 15)
cor(x, y, use = "complete.obs")
}
\seealso{
\code{\link{reverses}}
\code{\link[psych]{reverse.code}}
\code{\link[car]{recode}}
}
|
23e4412d116d4c31ad20df8b9f790485707517d0 | 2ab96d6236c3e25ceb0a128e53bf025c413f04e8 | /Week7/dataSketch.R | fcfa8dfe53b706a0554cc53bcca0c74c32318c75 | [] | no_license | miamiww/ComputationalMedia | 0eb66b206fb353ecc7497a8bb9ef7d0e41362a1b | f925983eb1a075cdc39444f17706b23c9a6eea10 | refs/heads/master | 2021-01-23T07:09:13.441410 | 2017-12-11T19:32:32 | 2017-12-11T19:32:32 | 102,499,677 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 63 | r | dataSketch.R |
airData <- read.csv("PRSA-adapted-aparrish.csv")
str(airData)
|
bb9a7a903ab23aa43386d04ea6eef133a606fba4 | b39f4a7acf1766383efbeafb4c10ef00cfdaafbe | /man/load_bps.Rd | 4b7a7672991be58b42ae1198c0108feebce222de | [] | no_license | hmorzaria/atlantisom | 6e7e465a20f3673d3dbc843f164fe211faddc6fb | b4b8bd8694c2537e70c748e62ce5c457113b796f | refs/heads/master | 2020-04-02T02:24:05.672801 | 2015-12-12T00:06:03 | 2015-12-12T00:06:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,639 | rd | load_bps.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_bps.R
\name{load_bps}
\alias{load_bps}
\title{Extracts the names of the epibenthic biomasspools from the initial conditions file.}
\usage{
load_bps(dir = getwd(), fgs, file_init)
}
\arguments{
\item{dir}{The directory of the atlantis model output, where the
default is \code{getwd()}.}
\item{fgs}{A data frame created by \code{\link{load_fgs}} that
reads in the csv file containing functional group names, usually
\code{"functionalGroups.csv"}.}
\item{file_init}{A character value giving the file name of the intial conditions
file. The file should be located in your current working directory or the
folder you specify in \code{dir}.
The argument can also be specified as the full path name, just as long as
argument \code{dir} is specified as \code{NULL}.
Usually the file is named \code{"init[...].nc".}, but it must end in \code{.nc}.}
}
\value{
A \code{vector} of biomass pools.
}
\description{
Use \code{fgs} \code{data.frame} as read in by \code{\link{load_fgs}}
to get the biomass pool information.
}
\examples{
testscenario <- "INIT_VMPA_Jan2015"
d <- system.file("extdata", testscenario, package = "atlantisom")
file <- "functionalGroups.csv"
fgs <- load_fgs(d, file)
bps <- load_bps(dir = d, fgs = fgs, file_init = "INIT_VMPA_Jan2015.nc")
}
\author{
Alexander Keth
}
\seealso{
\code{\link{load_fgs}}
Other load functions: \code{\link{load_biolprm}},
\code{\link{load_boxarea}}, \code{\link{load_box}},
\code{\link{load_diet_comp}}, \code{\link{load_fgs}},
\code{\link{load_meta}}, \code{\link{load_nc}},
\code{\link{load_runprm}}
}
|
c484a9096223b0d739fbf5ce172808b2770d183d | d5ecf34f545c6dfffb920cdaad3b8c89bf23af74 | /shiny/personnasIdentificateur/loadData.R | 58f65e7128a73fa15e7d1ab16599692273769b07 | [
"LPPL-1.3c"
] | permissive | davebulaval/Actulab_COOP | 8b69bc082e28909354eef4c9aea75c5d605edc4e | 4a6c652ac0d378ff4163aa10a1da5fa853839c6c | refs/heads/master | 2021-08-30T08:09:21.487975 | 2017-12-16T23:17:17 | 2017-12-16T23:17:17 | 109,923,867 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 717 | r | loadData.R | # Shapefile
FSA.shape <- rgdal::readOGR(dsn = "shapefileFSA",
layer = "grta000a11a_f")
FSA.shapeQC <- subset(FSA.shape, substr(FSA.shape$RTACIDU, 1, 1) %in% c("G", "H", "J"))
# Import dataset
library(readr)
dataSetPredict <- read_csv("DatasetModif/dataSetPredict.csv",
col_types = cols(X1 = col_skip()))
probColocation <- read_csv("DatasetModif/probColocation.csv",
col_types = cols(X1 = col_skip()))
dataEducation <- read_csv("DatasetModif/dataEducation.csv",
col_types = cols(X1 = col_skip()))
dataEmploi <- read_csv("DatasetModif/dataEmploi.csv",
col_types = cols(X1 = col_skip()))
|
b680603f66715eb99e54a5f32534680f31618384 | 582436aa42fd1930c663c0b63ca440f1b96111ab | /3_Experiments_Stabilising_AHHA_SPACE/2_Iterative_Stabilisation/Scripts/Iterative_Stabilisation.R | 5d7597a52f9139ff1a5f5b1e7cc052ed3e14f4c3 | [] | no_license | JamesCollerton/MSc_Thesis | 1543f2c5e498b536d90e7e5eb787f491cb317867 | 9ff52287102005f86dabc1230c1768564d6ac4d0 | refs/heads/master | 2020-05-25T10:57:45.341264 | 2017-03-26T10:06:23 | 2017-03-26T10:06:23 | 42,585,457 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,810 | r | Iterative_Stabilisation.R | # - James Collerton
# - Student Number 46114
# - Source Code for MSc Thesis
# Stabilising the SPACE model by iteration. This script requires some manual
# work as it is quicker to manually alter this script and use the results with
# the Eureqa desktop application than to try and automate the whole pipeline
# using the Eureqa API. Also, the Eureqa API is fairly limited in what it can
# be applied to as it is limited in the number of variables/ amount of data
# it can take in.
###############################################################################
# Global Variables and settings.
# Setting the current directory to where we have stored our data.
setwd("~/Documents/Programming/Git_Folders/MSc-Thesis-Code/Submission/3_Experiments_Stabilising_AHHA_SPACE/2_Iterative_Stabilisation/Data")
# Later this will be filled with the new, stable components.
new_data = list()
# Read these in either for comparison or to form part of the model.
# We need unstable_df and data as the same thing, as one will be overwritten
# throughout the program, whilst the other will not.
unstable_df = read.csv("unstable_data.txt")
comparison_data = read.csv("Training_Data.txt")
data = read.csv("unstable_data.txt")
# Initial conditions.
N = 200
Q = data$Q[1]
initial_scouts = data$S[1]
total_passive = N - initial_scouts
# Indicators for the data vectors.
T_ind = 1
S_ind = 2
P0_ind = 3
P1_ind = 4
P2_ind = 5
A1_ind = 6
A2_ind = 7
R1_ind = 8
R2_ind = 9
###############################################################################
# Initialising
# Initial conditions. These have been hardcoded for the single trial, but
# could easily be changed to data$S[1], data$A1[1] etc. to allow flexibility.
S = initial_scouts
A_one = 0
A_two = 0
R_one = 0
R_two = 0
P_zero = total_passive
P_one = 0
P_two = 0
# Initialising empty vectors to store the results in.
S_vec = c()
A_one_vec = c()
A_two_vec =c()
R_one_vec = c()
R_two_vec = c()
P_zero_vec = c()
P_one_vec = c()
P_two_vec = c()
###############################################################################
# Differential functions
dS <- function(S, t, Q, N){
result = 1.06669377213794e-9*t^3 - 0.0130383120110502 -
0.000168544171939409*t - 6.65198441305169e-13*t^4 -
2.20232908623747e-7*t^2
return(result)
}
dA_one <- function(S, P1, P2, R2, t, Q, N){
result = 0.00231744609341487*S + 0.942990934721937*R2*
(
(R2 > 0) * 0.00157400238903202 +
(R2 <= 0) * cos(0.855282848706793*S)
) -
0.0434850619886643 - 2.33968166891488e-5*S^2 -
0.00563945347163448*sin(5.19729925857688 + 0.364203873387924*S)
return(result)
}
dA_two <- function(S, P1, P2, A1, t, Q, N){
result = 0.000282667076235126*A1 + 0.0950374171430637/S +
0.000297132846101472*A1*P2 + 0.030200635796381*
(10.6088331903667 < S) - 0.0249260226030619 -
0.000370254067916185*P2 - 0.0042652733456114*P2*
(10.6088331903667 < S) - 7.45572148229618e-6*S*P2*A1^2
return(result)
}
dR_one <- function(S, A1, A2, P1, P2, t, Q, N){
result = 0.0266282174935782 + 0.009329126372239*S +
0.0231123108033275*(P1 > 0) - 6.99557347486849e-5*t -
0.00535770880365551*A1 - 0.000343308507165349*S*A1 -
0.000231772856346287*S^2
return(result)
}
dR_two <- function(S, A1, A2, P1, P2, R1, t, Q, N){
result = 0.00410346545735221*S - 0.0110700090049996 -
9.0219126132179e-5*
(
(S <= 6.52877096324257) * (-75.5959114648096) +
(S > 6.52877096324257) * S
)*
(
(0.425348476983269 < P1) * 1.92739381263045 +
(0.425348476983269 >= P1) * (S - 9.6421558706767*P1)
)
return(result)
}
dP_one <- function(S, P2, t, Q, N){
result = 0.00011585819289194*P2*
(72.3159612664517 <= P2)*cos(-0.0123745627255911*t)
return(result)
}
dP_two <- function(S, t, Q, N){
result = 0.00235796547198259*t*exp(-0.255561049906936*S) -
cos(S)*exp(-0.377989973882601*S)
return(result)
}
###############################################################################
# Updating functions.
# These functions are used to update the data vector to be written back in.
# The idea is that each time we run the model, we stabilise a different component
# (ex the number of scouts) and then use that stable part in declaring the
# next function in Eureqa.
update_S <- function(S_vec, data){
data$S <- S_vec
return(data)
}
update_P2 <- function(P2_vec, data){
data$P2 <- P2_vec
return(data)
}
update_P1 <- function(P1_vec, data){
data$P1 <- P1_vec
return(data)
}
update_A1 <- function(A1_vec, data){
data$A1 <- A1_vec
return(data)
}
update_A2 <- function(A2_vec, data){
data$A2 <- A2_vec
return(data)
}
update_R1 <- function(R1_vec, data){
data$R1 <- R1_vec
return(data)
}
update_R2 <- function(R2_vec, data){
data$R2 <- R2_vec
return(data)
}
# This updates the training data file. For example, lets say that at the start
# we have the variable S, and we define a function dS/dt = g(t, Q, N), then
# we know that each time we run the simulation the S values won't change. We then
# update the training data with these new stable S values, and build the next term
# on top of that. So dA1/dt = h(S, t, Q, N).
write_new_file <- function(new_data){
setwd("~/Documents/Programming/Git_Folders/MSc-Thesis-Code/Submission/3_Experiments_Stabilising_AHHA_SPACE/2_Iterative_Stabilisation/Data")
S = c()
P0 = c()
P1 = c()
P2 = c()
A1 = c()
A2 = c()
R1 = c()
R2 = c()
N = c()
Q = c()
t = c()
for(single_file in new_data){
S = c(S, single_file$S)
P0 = c(P0, single_file$P0)
P1 = c(P1, single_file$P1)
P2 = c(P2, single_file$P2)
A1 = c(A1, single_file$A1)
A2 = c(A2, single_file$A2)
R1 = c(R1, single_file$R1)
R2 = c(R2, single_file$R2)
N = c(N, single_file$N)
Q = c(Q, single_file$Q)
t = c(t, single_file$t)
}
write_df <- data.frame(S, A1, A2, P0, P1, P2, R1, R2, Q, t, N)
write.csv(write_df, "new_data.txt", row.names = FALSE)
}
###############################################################################
# Running model with the underlying data included
t = 1
while(t <= nrow(data)){
S_vec = c(S_vec, S)
A_one_vec = c(A_one_vec, A_one)
A_two_vec = c(A_two_vec, A_two)
R_one_vec = c(R_one_vec, R_one)
R_two_vec = c(R_two_vec, R_two)
P_one_vec = c(P_one_vec, P_one)
P_two_vec = c(P_two_vec, P_two)
S = S + dS(S, t, Q, N)
A_one = A_one + dA_one(S, P_one, P_two, R_two, t, Q, N)
A_two = A_two + dA_two(S, P_one, P_two, A_one, t, Q, N)
R_one = R_one + dR_one(S, A_one, A_two, P_one, P_two, t, Q, N)
R_two = R_two + dR_two(S, A_one, A_two, P_one, P_two, R_one, t, Q, N)
P_one = P_one + dP_one(S, P_two, t, Q, N)
P_two = P_two + dP_two(S, t, Q, N)
t = t + 1
}
# Plotting the results
plot_result <- function(vec, compare_data, unstable_data, y_label, leg_x, leg_y){
par(mfrow = c(1,2))
plot(compare_data, type = "l", col = "firebrick1", xlab = "Time",
ylab = y_label, lwd = 2)
lines(vec, col = "dodgerblue", lwd = 2)
lines(unstable_data, col = "lightgreen", lwd = 2)
legend(leg_x, leg_y,
c("Underlying", "Stabilised", "Unstabilised"),
lty=c(1, 1, 1),
lwd=c(2, 2, 2),
col=c("dodgerblue", "firebrick1", "lightgreen"),
cex = 0.8)
plot(compare_data - vec, type = "l", col = "red", lwd = 2.5,
xlab = "Time", ylab = "Difference in Functions")
}
# Calculating the R squared value.
R_squared <- function(estimate, real_values){
obs_mean = mean(estimate)
SS_res = sum((estimate - real_values)^2)
SS_tot = sum((real_values - obs_mean)^2)
return(1 - SS_res/SS_tot)
}
# Mean squared error
MSE <- function(estimate, real_values){
residuals = real_values - estimate
return(sum(residuals^2) / length(residuals))
}
# Mean Absolute Error
MAE <- function(estimate, real_values){
abs_residuals = abs(real_values - estimate)
return(sum(abs_residuals) / length(abs_residuals))
}
# Creating vector of the R squared values.
create_Rsq_vec <- function(){
c(S_rsq = R_squared(S_vec, comparison_data$S),
A1_rsq = R_squared(A_one_vec, comparison_data$A1),
A2_rsq = R_squared(A_two_vec, comparison_data$A2),
R1_rsq = R_squared(R_one_vec, comparison_data$R1),
R2_rsq = R_squared(R_two_vec, comparison_data$R2),
P1_rsq = R_squared(P_one_vec, comparison_data$P1),
P2_rsq = R_squared(P_two_vec, comparison_data$P2))
}
# Creating vector of the MAE values.
create_MAE_vec <- function(){
c(S_MAE = MAE(S_vec, comparison_data$S),
A1_MAE = MAE(A_one_vec, comparison_data$A1),
A2_MAE = MAE(A_two_vec, comparison_data$A2),
R1_MAE = MAE(R_one_vec, comparison_data$R1),
R2_MAE = MAE(R_two_vec, comparison_data$R2),
P1_MAE = MAE(P_one_vec, comparison_data$P1),
P2_MAE = MAE(P_two_vec, comparison_data$P2))
}
# Creating vector of the MSE values.
create_MSE_vec <- function(){
c(S_MSE = MSE(S_vec, comparison_data$S),
A1_MSE = MSE(A_one_vec, comparison_data$A1),
A2_MSE = MSE(A_two_vec, comparison_data$A2),
R1_MSE = MSE(R_one_vec, comparison_data$R1),
R2_MSE = MSE(R_two_vec, comparison_data$R2),
P1_MSE = MSE(P_one_vec, comparison_data$P1),
P2_MSE = MSE(P_two_vec, comparison_data$P2))
}
###############################################################################
# Running the plotting of the results.
plot_result(S_vec, comparison_data$S, unstable_df$S, "Number of Scouts", 0, 14)
plot_result(A_one_vec, comparison_data$A1, unstable_df$A1, "Number of Assessors (Site One)", 0, 5.6)
plot_result(A_two_vec, comparison_data$A2, unstable_df$A2, "Number of Assessors (Site Two)", 0, 4.6)
plot_result(R_one_vec, comparison_data$R1, unstable_df$R1, "Number of Recruiters (Site One)", 0, 10.4)
plot_result(R_two_vec, comparison_data$R2, unstable_df$R2, "Number of Recruiters (Site Two)", 0, 27.5)
plot_result(P_one_vec, comparison_data$P1, unstable_df$P1, "Number of Passive (Site One)", 0, 1.5)
plot_result(P_two_vec, comparison_data$P2, unstable_df$P2, "Number of Passive (Site Two)", 0, 133)
###############################################################################
# Updating the values and preparing the new data vector to be written.
data = update_S(S_vec, data)
data = update_P2(P_two_vec, data)
data = update_P1(P_one_vec, data)
data = update_A1(A_one_vec, data)
data = update_A2(A_two_vec, data)
data = update_R1(R_one_vec, data)
data = update_R2(R_two_vec, data)
new_data[[length(new_data) + 1]] <- data
###############################################################################
# Calculating the R squared values, MAE and MSE.
create_Rsq_vec()
create_MAE_vec()
create_MSE_vec()
###############################################################################
###############################################################################
# Write new files
# write_new_file(new_data)
###############################################################################
|
6329639c8d75827b81a5ec9074c0c979b0b76151 | 6fec9d9c8a3718bd3e45a1db4d854732ef875a07 | /LeadWorkerStudy/lead_data_strunc.R | fc13ca0c6d3530dd8ba843f52b8281e6111191d2 | [] | no_license | bikram12345k/senstP | 35884caf87724e129f5b3e9138ba88f38f66ef16 | 2b30a3d1a9c9ec5aadb4402f6ef8fe0bba71bbcd | refs/heads/master | 2021-07-13T16:31:31.738108 | 2021-03-11T18:50:03 | 2021-03-11T18:50:03 | 239,898,297 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,902 | r | lead_data_strunc.R | library(senstrat)
library(sensitivitymw)
library(sensitivitymv)
library(struncatedP)
library(partitions)
library(foreach)
library(doSNOW)
lead <- read.csv('lead_absorption1.csv')
gamSeq = seq(1, 5, .05)
res_original = matrix(NA, length(gamSeq), 5)
for(gamma in gamSeq){
pvals <- c()
## Exposed child vs control child
pvals <- c(pvals, senmw(cbind(lead$lead[1:33], lead$lead[(33+1):(33+33)]),
gamma=gamma, m1=7, m2=8, m=8)$pval)
## High and Medium vs Low exposure
who <- lead$exposed_gp==1
st = rep(1, sum(who))
z = lead[who, 'job_exposure'] %in% c("h","m")
sc = rscores(lead$lead[who], z, st, method="U788")
pvals <- c(pvals, sen2sample(sc, z, alternative="greater", gamma=gamma )$pval)
## High vs Medium exposure
who <- lead$exposed_gp==1 & lead$job_exposure != "l"
z = lead[who, 'job_exposure'] == "h"
st = rep(1, sum(who))
sc = rscores(lead$lead[who], z, st, method="U788")
pvals <- c(pvals, sen2sample(sc, z, alternative="greater", gamma=gamma )$pval)
## Good vs Moderate or poor hygine
who <- lead$exposed_gp==1 & lead$job_exposure == "h"
st = rep(1, sum(who))
z = lead[who, 'hygiene'] != "g"
sc = rscores(lead$lead[who], z, st, method="U788")
pvals <- c(pvals, sen2sample(sc, z, alternative="greater", gamma=gamma )$pval)
## Moderate vs poor hygine
who <- lead$exposed_gp==1 & lead$job_exposure == "h" & lead$hygiene != 'g'
st = rep(1, sum(who))
z = lead[who, 'hygiene'] == "p"
sc = rscores(lead$lead[who], z, st, method="U788")
pvals <- c(pvals, sen2sample(sc, z, alternative="greater", gamma=gamma )$pval)
#pvals
res_original[which(gamma==gamSeq),] = pvals
n = length(pvals)
}
#round(res, 6)
rownames(res_original) = gamSeq
#round(res_original, 6)
foo <- function(f, trunc=.15)
function(x) ifelse(x<.5,f((x-trunc)/(.5-trunc)),0)
## Table 4
tab = round(t(apply(res_original[,c(1,2,3,4,5)], 1, function(p) c(
(5)*min(tail(sort(p), 5)/(1:(5))),
sort(p)[1]*(5),
pchisq(-2*log(prod(tail(sort(p), 5))), 2*(5), lower.tail=FALSE),
truncatedP(p, trunc=.2),
getstruncatedPval1(p, xi=foo(xiVp, trunc=.2), trunc=.2, N=20))
)), 5)
rownames(tab) = gamSeq
colnames(tab) = c('Simes', 'Holm', 'Fisher', 'truncatedP', 'struncatedP')
tab
##########################################################################
### Supplementary functions
rscores <- function (y, z, st = NULL, tau = 0, method=c("wilcoxon", "cs1", "cs2", "savage", "cs4", "sh", "U545", "U788")) {
method = match.arg(method)
stopifnot(length(tau) == 1)
stopifnot(length(y) == length(z))
if (is.null(st))
st <- rep(1, length(y))
stopifnot(length(st) == length(y))
ust <- unique(st)
nst <- length(ust)
if (tau != 0)
y <- y - z * tau
sc <- rep(NA, length(y))
for (i in 1:nst) {
who <- st == ust[i]
yi <- y[who]
ni <- length(yi)
if (ni == 1) {
sc[who] <- 0
} else if (ni >= 2) {
sc[who] <- score(rank(yi), ni, method=method)
}
}
sc
}
score <- function(j, Ns, method=c("W", "wilcoxon", "cs1", "cs2", "savage", "cs4", "sh", "U545", "U788"), a=5){
method = match.arg(method)
sj <- switch(method, W = j, wilcoxon = j/(Ns+1),
cs1 = sapply(j, function(j) prod( seq(j/(Ns+1), (j+a-2)/(Ns+1), by=1/(Ns+1)))),
cs2 = (j/(Ns+1))^(a-1),
savage = sapply(j, function(j) sum(1/(1-1:j/(Ns+1)))),
cs4 = -log(1-j/(Ns+1)),
sh = sapply(j, function(j) ifelse(j<a, 0, prod(seq((j-a+1)/(Ns+1),(j-1)/Ns+1), by=1/(Ns+1) )) ),
U545 = sapply(j, function(j) ifelse(j<5, 0, (Ns*choose(Ns, 5)^(-1))*sum( choose((j-1), (4-1):(5-1))*choose((Ns-j), (5-4):(5-5)) ))),
U788 = sapply(j, function(j) ifelse(j<8, 0, (Ns*choose(Ns, 8)^(-1))*sum( choose((j-1), (7-1):(8-1))*choose((Ns-j), (8-7):(8-8)) ))) )
sj
}
|
72d12635eed75cc355cbcbc2164cedb0bd5db104 | b1de6b75f59c9d003c922b8c796fb04115224bfc | /man/dice.Rd | c30dec3ae737856655887847ef4a0d5575bb1723 | [] | no_license | muschellij2/msseg | 4e282048d785907c3558ec1f1c9ae371c299e2e0 | ed85b43b4395d87500d15c66fbc5738b0c516868 | refs/heads/master | 2020-05-21T20:57:52.329193 | 2019-01-29T15:34:34 | 2019-01-29T15:34:34 | 62,831,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 427 | rd | dice.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dice.R
\name{dice}
\alias{dice}
\title{Calculate Dice Coefficient}
\usage{
dice(prediction.obj)
}
\arguments{
\item{prediction.obj}{object of class \code{\link{prediction-class}} from
\code{\link{prediction}}}
}
\value{
Object of class \code{\link{performance}}
}
\description{
Calculate Dice Coefficient/Similarity Index from a prediction object
}
|
056dcecbc2f260c47714e7f9c85e1ec2ac7adf8b | 592b4e685540c531e2d2130048de1cc3cdf6503b | /man/is.memo.Rd | 6657667211f422622bf1860b26df4b9286478b46 | [] | no_license | rwetherall/memofunc | 1a202d176dc1de62751ce06c3c2a11a2ec835c76 | 0dd0ee722f6454f9025701df1065b55ed2c6b75a | refs/heads/master | 2021-06-23T11:35:13.089942 | 2021-02-23T09:38:02 | 2021-02-23T09:38:02 | 178,967,249 | 0 | 0 | null | 2021-02-18T00:19:33 | 2019-04-02T00:18:49 | R | UTF-8 | R | false | true | 339 | rd | is.memo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/memo.R
\name{is.memo}
\alias{is.memo}
\title{Is Memo}
\usage{
is.memo(f)
}
\arguments{
\item{f}{function, memo or otherwise}
}
\value{
\code{TRUE} if memo function, \code{FALSE} otherwise
}
\description{
Checks whether the passed function is a memo function.
}
|
80f7248a2b1aff5b8e38e348f60c9bce45def717 | 333d0bdae713f70f99ec2e4a13af83bd036e60e6 | /Trade_report.R | 10346f618c4f66d6695f42ae1a76d49f750ce84e | [
"Apache-2.0"
] | permissive | minhsphuc12/Work | a21a8f3204aa25e2f2edce8ed02b408a7f802576 | aeae49dd4e6c4529b10bdac3ebd37b5d575fd451 | refs/heads/master | 2021-01-10T06:43:07.912223 | 2015-11-27T08:01:20 | 2015-11-27T08:01:20 | 46,967,025 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 30,895 | r | Trade_report.R | #Load packages
library(rio)
library(data.table)
# TRADE REPORT
# Read Data
data_pathT0=file.path(getwd(),'VNDIRECT/Trade_Report/Trade201510.xlsx')
data_pathT1=file.path(getwd(),'VNDIRECT/Trade_Report/Trade201509.xlsx')
data_pathT2=file.path(getwd(),'VNDIRECT/Trade_Report/Trade201508.xlsx')
data_pathT3=file.path(getwd(),'VNDIRECT/Trade_Report/Trade201507.xlsx')
data_pathT4=file.path(getwd(),'VNDIRECT/Trade_Report/Trade201506.xlsx')
data_pathT12=file.path(getwd(),'VNDIRECT/Trade_Report/Trade201410.xlsx')
TradeT0=import(data_pathT0)
TradeT1=import(data_pathT1)
TradeT2=import(data_pathT2)
TradeT3=import(data_pathT3)
TradeT4=import(data_pathT4)
TradeT12=import(data_pathT12)
DATE_CHECK = "2015-10-30 UTC"
# Convert Data Dates: NO NEED
# Add ActiveStatus
TradeT0$STATUS = ifelse (((TradeT0$GTGD==0)& (TradeT0$NAV_END==0))& (TradeT0$NAV_INIT==0), "CLOSE", "ACTIVE")
TradeT1$STATUS = ifelse (((TradeT1$GTGD==0)& (TradeT1$NAV_END==0))& (TradeT1$NAV_INIT==0), "CLOSE", "ACTIVE")
TradeT2$STATUS = ifelse (((TradeT2$GTGD==0)& (TradeT2$NAV_END==0))& (TradeT2$NAV_INIT==0), "CLOSE", "ACTIVE")
TradeT3$STATUS = ifelse (((TradeT3$GTGD==0)& (TradeT3$NAV_END==0))& (TradeT3$NAV_INIT==0), "CLOSE", "ACTIVE")
TradeT4$STATUS = ifelse (((TradeT4$GTGD==0)& (TradeT4$NAV_END==0))& (TradeT4$NAV_INIT==0), "CLOSE", "ACTIVE")
TradeT12$STATUS = ifelse (((TradeT12$GTGD==0)& (TradeT12$NAV_END==0))& (TradeT12$NAV_INIT==0), "CLOSE", "ACTIVE")
# Add Careby
TradeT0$CAREBY = ifelse(substring(TradeT0$ACTYPE,1,1)==1,"MASS",
ifelse(substring(TradeT0$ACTYPE,1,1)==2,"MG",
ifelse(substring(TradeT0$ACTYPE,1,1)==3,"VIP","OTHER")))
TradeT1$CAREBY = ifelse(substring(TradeT1$ACTYPE,1,1)==1,"MASS",
ifelse(substring(TradeT1$ACTYPE,1,1)==2,"MG",
ifelse(substring(TradeT1$ACTYPE,1,1)==3,"VIP","OTHER")))
TradeT2$CAREBY = ifelse(substring(TradeT2$ACTYPE,1,1)==1,"MASS",
ifelse(substring(TradeT2$ACTYPE,1,1)==2,"MG",
ifelse(substring(TradeT2$ACTYPE,1,1)==3,"VIP","OTHER")))
TradeT3$CAREBY = ifelse(substring(TradeT3$ACTYPE,1,1)==1,"MASS",
ifelse(substring(TradeT3$ACTYPE,1,1)==2,"MG",
ifelse(substring(TradeT3$ACTYPE,1,1)==3,"VIP","OTHER")))
TradeT4$CAREBY = ifelse(substring(TradeT4$ACTYPE,1,1)==1,"MASS",
ifelse(substring(TradeT4$ACTYPE,1,1)==2,"MG",
ifelse(substring(TradeT4$ACTYPE,1,1)==3,"VIP","OTHER")))
TradeT12$CAREBY = ifelse(substring(TradeT12$ACTYPE,1,1)==1,"MASS",
ifelse(substring(TradeT12$ACTYPE,1,1)==2,"MG",
ifelse(substring(TradeT12$ACTYPE,1,1)==3,"VIP","OTHER")))
# Add First Debt Check
TradeT0$FIRST_DEBT = ifelse( ((as.numeric (difftime (as.Date(TradeT0$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) >-30) &
((as.numeric (difftime (as.Date(TradeT0$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) <= 0) ,
"YES","NO")
TradeT1$FIRST_DEBT = ifelse(((as.numeric (difftime (as.Date(TradeT1$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) >-30) &
((as.numeric (difftime (as.Date(TradeT1$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) <= 0),
"YES","NO")
TradeT2$FIRST_DEBT = ifelse(((as.numeric (difftime (as.Date(TradeT2$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) >-30) &
((as.numeric (difftime (as.Date(TradeT2$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) <= 0),
"YES","NO")
TradeT3$FIRST_DEBT = ifelse(((as.numeric (difftime (as.Date(TradeT3$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) >-30) &
((as.numeric (difftime (as.Date(TradeT3$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) <= 0),
"YES","NO")
TradeT4$FIRST_DEBT = ifelse(((as.numeric (difftime (as.Date(TradeT4$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) >-30) &
((as.numeric (difftime (as.Date(TradeT4$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) <= 0),
"YES","NO")
TradeT12$FIRST_DEBT = ifelse(((as.numeric (difftime (as.Date(TradeT12$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) >-30) &
((as.numeric (difftime (as.Date(TradeT12$FIRST_DAY_OF_DEAL,"%d/%m/%Y"), as.Date(DATE_CHECK)), units = "days")) <= 0),
"YES","NO")
# Sum NAV/Trade Amount/Trade Fee/Interest (Careby)
Total_NAV_MASS = c()
Total_NAV_MASS[1] = sum (subset(TradeT0,CAREBY== "MASS")$NAV_END)
Total_NAV_MASS[2] = sum (subset(TradeT1,CAREBY== "MASS")$NAV_END)
Total_NAV_MASS[3] = sum (subset(TradeT2,CAREBY== "MASS")$NAV_END)
Total_NAV_MASS[4] = sum (subset(TradeT12,CAREBY== "MASS")$NAV_END)
Total_NAV_MG = c()
Total_NAV_MG[1] = sum (subset(TradeT0,CAREBY== "MG")$NAV_END)
Total_NAV_MG[2] = sum (subset(TradeT1,CAREBY== "MG")$NAV_END)
Total_NAV_MG[3] = sum (subset(TradeT2,CAREBY== "MG")$NAV_END)
Total_NAV_MG[4] = sum (subset(TradeT12,CAREBY== "MG")$NAV_END)
Total_NAV_VIP = c()
Total_NAV_VIP[1] = sum (subset(TradeT0,CAREBY== "VIP")$NAV_END)
Total_NAV_VIP[2] = sum (subset(TradeT1,CAREBY== "VIP")$NAV_END)
Total_NAV_VIP[3] = sum (subset(TradeT2,CAREBY== "VIP")$NAV_END)
Total_NAV_VIP[4] = sum (subset(TradeT12,CAREBY== "VIP")$NAV_END)
Total_TRADE_MASS = c()
Total_TRADE_MASS[1] = sum (subset(TradeT0,CAREBY== "MASS")$GTGD)
Total_TRADE_MASS[2] = sum (subset(TradeT1,CAREBY== "MASS")$GTGD)
Total_TRADE_MASS[3] = sum (subset(TradeT2,CAREBY== "MASS")$GTGD)
Total_TRADE_MASS[4] = sum (subset(TradeT12,CAREBY== "MASS")$GTGD)
Total_TRADE_MG = c()
Total_TRADE_MG[1] = sum (subset(TradeT0,CAREBY== "MG")$GTGD)
Total_TRADE_MG[2] = sum (subset(TradeT1,CAREBY== "MG")$GTGD)
Total_TRADE_MG[3] = sum (subset(TradeT2,CAREBY== "MG")$GTGD)
Total_TRADE_MG[4] = sum (subset(TradeT12,CAREBY== "MG")$GTGD)
Total_TRADE_VIP = c()
Total_TRADE_VIP[1] = sum (subset(TradeT0,CAREBY== "VIP")$GTGD)
Total_TRADE_VIP[2] = sum (subset(TradeT1,CAREBY== "VIP")$GTGD)
Total_TRADE_VIP[3] = sum (subset(TradeT2,CAREBY== "VIP")$GTGD)
Total_TRADE_VIP[4] = sum (subset(TradeT12,CAREBY== "VIP")$GTGD)
Total_TRADE_FEE_MASS = c()
Total_TRADE_FEE_MASS[1] = sum (subset(TradeT0,CAREBY== "MASS")$FEE)
Total_TRADE_FEE_MASS[2] = sum (subset(TradeT1,CAREBY== "MASS")$FEE)
Total_TRADE_FEE_MASS[3] = sum (subset(TradeT2,CAREBY== "MASS")$FEE)
Total_TRADE_FEE_MASS[4] = sum (subset(TradeT12,CAREBY== "MASS")$FEE)
Total_TRADE_FEE_MG = c()
Total_TRADE_FEE_MG[1] = sum (subset(TradeT0,CAREBY== "MG")$FEE)
Total_TRADE_FEE_MG[2] = sum (subset(TradeT1,CAREBY== "MG")$FEE)
Total_TRADE_FEE_MG[3] = sum (subset(TradeT2,CAREBY== "MG")$FEE)
Total_TRADE_FEE_MG[4] = sum (subset(TradeT12,CAREBY== "MG")$FEE)
Total_TRADE_FEE_VIP = c()
Total_TRADE_FEE_VIP[1] = sum (subset(TradeT0,CAREBY== "VIP")$FEE)
Total_TRADE_FEE_VIP[2] = sum (subset(TradeT1,CAREBY== "VIP")$FEE)
Total_TRADE_FEE_VIP[3] = sum (subset(TradeT2,CAREBY== "VIP")$FEE)
Total_TRADE_FEE_VIP[4] = sum (subset(TradeT12,CAREBY== "VIP")$FEE)
Total_INTEREST_MASS = c()
Total_INTEREST_MASS[1] = sum (subset(TradeT0,CAREBY== "MASS")$INTEREST)
Total_INTEREST_MASS[2] = sum (subset(TradeT1,CAREBY== "MASS")$INTEREST)
Total_INTEREST_MASS[3] = sum (subset(TradeT2,CAREBY== "MASS")$INTEREST)
Total_INTEREST_MASS[4] = sum (subset(TradeT12,CAREBY== "MASS")$INTEREST)
Total_INTEREST_MG = c()
Total_INTEREST_MG[1] = sum (subset(TradeT0,CAREBY== "MG")$INTEREST)
Total_INTEREST_MG[2] = sum (subset(TradeT1,CAREBY== "MG")$INTEREST)
Total_INTEREST_MG[3] = sum (subset(TradeT2,CAREBY== "MG")$INTEREST)
Total_INTEREST_MG[4] = sum (subset(TradeT12,CAREBY== "MG")$INTEREST)
Total_INTEREST_VIP = c()
Total_INTEREST_VIP[1] = sum (subset(TradeT0,CAREBY== "VIP")$INTEREST)
Total_INTEREST_VIP[2] = sum (subset(TradeT1,CAREBY== "VIP")$INTEREST)
Total_INTEREST_VIP[3] = sum (subset(TradeT2,CAREBY== "VIP")$INTEREST)
Total_INTEREST_VIP[4] = sum (subset(TradeT12,CAREBY== "VIP")$INTEREST)
# Calculate median of trade fee (Careby)
Median_FEE_MASS = c()
Median_FEE_MASS [1] = median (subset(subset(TradeT0,CAREBY== "MASS"), FEE>0) $FEE)
Median_FEE_MASS [2] = median (subset(subset(TradeT1,CAREBY== "MASS"), FEE>0) $FEE)
Median_FEE_MASS [3] = median (subset(subset(TradeT2,CAREBY== "MASS"), FEE>0) $FEE)
Median_FEE_MASS [4] = median (subset(subset(TradeT12,CAREBY== "MASS"), FEE>0) $FEE)
Median_FEE_MG = c()
Median_FEE_MG [1] = median (subset(subset(TradeT0,CAREBY== "MG"), FEE>0) $FEE)
Median_FEE_MG [2] = median (subset(subset(TradeT1,CAREBY== "MG"), FEE>0) $FEE)
Median_FEE_MG [3] = median (subset(subset(TradeT2,CAREBY== "MG"), FEE>0) $FEE)
Median_FEE_MG [4] = median (subset(subset(TradeT12,CAREBY== "MG"), FEE>0) $FEE)
Median_FEE_VIP = c()
Median_FEE_VIP [1] = median (subset(subset(TradeT0,CAREBY== "VIP"), FEE>0) $FEE)
Median_FEE_VIP [2] = median (subset(subset(TradeT1,CAREBY== "VIP"), FEE>0) $FEE)
Median_FEE_VIP [3] = median (subset(subset(TradeT2,CAREBY== "VIP"), FEE>0) $FEE)
Median_FEE_VIP [4] = median (subset(subset(TradeT12,CAREBY== "VIP"), FEE>0) $FEE)
# Count Account/ with Trade/ with Debt / with first Debt (Careby)
Account_ALL_MASS = c()
Account_ALL_MASS [1] = sum (!is.na(subset(TradeT0,CAREBY== "MASS")$STATUS [subset(TradeT0,CAREBY== "MASS")$STATUS != "CLOSE"]))
Account_ALL_MASS [2] = sum (!is.na(subset(TradeT1,CAREBY== "MASS")$STATUS [subset(TradeT1,CAREBY== "MASS")$STATUS != "CLOSE"]))
Account_ALL_MASS [3] = sum (!is.na(subset(TradeT2,CAREBY== "MASS")$STATUS [subset(TradeT2,CAREBY== "MASS")$STATUS != "CLOSE"]))
Account_ALL_MASS [4] = sum (!is.na(subset(TradeT12,CAREBY== "MASS")$STATUS [subset(TradeT12,CAREBY== "MASS")$STATUS != "CLOSE"]))
Account_ALL_MG = c()
Account_ALL_MG [1] = sum (!is.na(subset(TradeT0,CAREBY== "MG")$STATUS [subset(TradeT0,CAREBY== "MG")$STATUS != "CLOSE"]))
Account_ALL_MG [2] = sum (!is.na(subset(TradeT1,CAREBY== "MG")$STATUS [subset(TradeT1,CAREBY== "MG")$STATUS != "CLOSE"]))
Account_ALL_MG [3] = sum (!is.na(subset(TradeT2,CAREBY== "MG")$STATUS [subset(TradeT2,CAREBY== "MG")$STATUS != "CLOSE"]))
Account_ALL_MG [4] = sum (!is.na(subset(TradeT12,CAREBY== "MG")$STATUS [subset(TradeT12,CAREBY== "MG")$STATUS != "CLOSE"]))
Account_ALL_VIP = c()
Account_ALL_VIP [1] = sum (!is.na(subset(TradeT0,CAREBY== "VIP")$STATUS [subset(TradeT0,CAREBY== "VIP")$STATUS != "CLOSE"]))
Account_ALL_VIP [2] = sum (!is.na(subset(TradeT1,CAREBY== "VIP")$STATUS [subset(TradeT1,CAREBY== "VIP")$STATUS != "CLOSE"]))
Account_ALL_VIP [3] = sum (!is.na(subset(TradeT2,CAREBY== "VIP")$STATUS [subset(TradeT2,CAREBY== "VIP")$STATUS != "CLOSE"]))
Account_ALL_VIP [4] = sum (!is.na(subset(TradeT12,CAREBY== "VIP")$STATUS [subset(TradeT12,CAREBY== "VIP")$STATUS != "CLOSE"]))
Account_TRADE_MASS = c()
Account_TRADE_MASS [1] = sum (!is.na(subset(TradeT0,CAREBY== "MASS")$GTGD [subset(TradeT0,CAREBY== "MASS")$GTGD >0]))
Account_TRADE_MASS [2] = sum (!is.na(subset(TradeT1,CAREBY== "MASS")$GTGD [subset(TradeT1,CAREBY== "MASS")$GTGD >0]))
Account_TRADE_MASS [3] = sum (!is.na(subset(TradeT2,CAREBY== "MASS")$GTGD [subset(TradeT2,CAREBY== "MASS")$GTGD >0]))
Account_TRADE_MASS [4] = sum (!is.na(subset(TradeT12,CAREBY== "MASS")$GTGD [subset(TradeT12,CAREBY== "MASS")$GTGD >0]))
Account_TRADE_MG = c()
Account_TRADE_MG [1] = sum (!is.na(subset(TradeT0,CAREBY== "MG")$GTGD [subset(TradeT0,CAREBY== "MG")$GTGD >0]))
Account_TRADE_MG [2] = sum (!is.na(subset(TradeT1,CAREBY== "MG")$GTGD [subset(TradeT1,CAREBY== "MG")$GTGD >0]))
Account_TRADE_MG [3] = sum (!is.na(subset(TradeT2,CAREBY== "MG")$GTGD [subset(TradeT2,CAREBY== "MG")$GTGD >0]))
Account_TRADE_MG [4] = sum (!is.na(subset(TradeT12,CAREBY== "MG")$GTGD [subset(TradeT12,CAREBY== "MG")$GTGD >0]))
Account_TRADE_VIP = c()
Account_TRADE_VIP [1] = sum (!is.na(subset(TradeT0,CAREBY== "VIP")$GTGD [subset(TradeT0,CAREBY== "VIP")$GTGD >0]))
Account_TRADE_VIP [2] = sum (!is.na(subset(TradeT1,CAREBY== "VIP")$GTGD [subset(TradeT1,CAREBY== "VIP")$GTGD >0]))
Account_TRADE_VIP [3] = sum (!is.na(subset(TradeT2,CAREBY== "VIP")$GTGD [subset(TradeT2,CAREBY== "VIP")$GTGD >0]))
Account_TRADE_VIP [4] = sum (!is.na(subset(TradeT12,CAREBY== "VIP")$GTGD [subset(TradeT12,CAREBY== "VIP")$GTGD >0]))
Account_INTEREST_MASS = c()
Account_INTEREST_MASS [1] = sum (!is.na(subset(TradeT0,CAREBY== "MASS")$INTEREST [subset(TradeT0,CAREBY== "MASS")$INTEREST >0]))
Account_INTEREST_MASS [2] = sum (!is.na(subset(TradeT1,CAREBY== "MASS")$INTEREST [subset(TradeT1,CAREBY== "MASS")$INTEREST >0]))
Account_INTEREST_MASS [3] = sum (!is.na(subset(TradeT2,CAREBY== "MASS")$INTEREST [subset(TradeT2,CAREBY== "MASS")$INTEREST >0]))
Account_INTEREST_MASS [4] = sum (!is.na(subset(TradeT12,CAREBY== "MASS")$INTEREST [subset(TradeT12,CAREBY== "MASS")$INTEREST >0]))
Account_INTEREST_MG = c()
Account_INTEREST_MG [1] = sum (!is.na(subset(TradeT0,CAREBY== "MG")$INTEREST [subset(TradeT0,CAREBY== "MG")$INTEREST >0]))
Account_INTEREST_MG [2] = sum (!is.na(subset(TradeT1,CAREBY== "MG")$INTEREST [subset(TradeT1,CAREBY== "MG")$INTEREST >0]))
Account_INTEREST_MG [3] = sum (!is.na(subset(TradeT2,CAREBY== "MG")$INTEREST [subset(TradeT2,CAREBY== "MG")$INTEREST >0]))
Account_INTEREST_MG [4] = sum (!is.na(subset(TradeT12,CAREBY== "MG")$INTEREST [subset(TradeT12,CAREBY== "MG")$INTEREST >0]))
Account_INTEREST_VIP = c()
Account_INTEREST_VIP [1] = sum (!is.na(subset(TradeT0,CAREBY== "VIP")$INTEREST [subset(TradeT0,CAREBY== "VIP")$INTEREST >0]))
Account_INTEREST_VIP [2] = sum (!is.na(subset(TradeT1,CAREBY== "VIP")$INTEREST [subset(TradeT1,CAREBY== "VIP")$INTEREST >0]))
Account_INTEREST_VIP [3] = sum (!is.na(subset(TradeT2,CAREBY== "VIP")$INTEREST [subset(TradeT2,CAREBY== "VIP")$INTEREST >0]))
Account_INTEREST_VIP [4] = sum (!is.na(subset(TradeT12,CAREBY== "VIP")$INTEREST [subset(TradeT12,CAREBY== "VIP")$INTEREST >0]))
Account_FIRSTDEBT_MASS = c()
Account_FIRSTDEBT_MASS[1] = sum (!is.na(subset(TradeT0,CAREBY== "MASS")$FIRST_DEBT [subset(TradeT0,CAREBY== "MASS")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_MASS[2] = sum (!is.na(subset(TradeT1,CAREBY== "MASS")$FIRST_DEBT [subset(TradeT1,CAREBY== "MASS")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_MASS[3] = sum (!is.na(subset(TradeT2,CAREBY== "MASS")$FIRST_DEBT [subset(TradeT2,CAREBY== "MASS")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_MASS[4] = sum (!is.na(subset(TradeT12,CAREBY== "MASS")$FIRST_DEBT [subset(TradeT12,CAREBY== "MASS")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_MG = c()
Account_FIRSTDEBT_MG[1] = sum (!is.na(subset(TradeT0,CAREBY== "MG")$FIRST_DEBT [subset(TradeT0,CAREBY== "MG")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_MG[2] = sum (!is.na(subset(TradeT1,CAREBY== "MG")$FIRST_DEBT [subset(TradeT1,CAREBY== "MG")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_MG[3] = sum (!is.na(subset(TradeT2,CAREBY== "MG")$FIRST_DEBT [subset(TradeT2,CAREBY== "MG")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_MG[4] = sum (!is.na(subset(TradeT12,CAREBY== "MG")$FIRST_DEBT [subset(TradeT12,CAREBY== "MG")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_VIP = c()
Account_FIRSTDEBT_VIP[1] = sum (!is.na(subset(TradeT0,CAREBY== "VIP")$FIRST_DEBT [subset(TradeT0,CAREBY== "VIP")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_VIP[2] = sum (!is.na(subset(TradeT1,CAREBY== "VIP")$FIRST_DEBT [subset(TradeT1,CAREBY== "VIP")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_VIP[3] = sum (!is.na(subset(TradeT2,CAREBY== "VIP")$FIRST_DEBT [subset(TradeT2,CAREBY== "VIP")$FIRST_DEBT == "YES"]))
Account_FIRSTDEBT_VIP[4] = sum (!is.na(subset(TradeT12,CAREBY== "VIP")$FIRST_DEBT [subset(TradeT12,CAREBY== "VIP")$FIRST_DEBT == "YES"]))
# Calculate proportion Account with Trade/ with Debt / with first Debt (Careby)
PERCENTAGE_TRADE_MASS = Account_TRADE_MASS / Account_ALL_MASS
PERCENTAGE_TRADE_MG = Account_TRADE_MG / Account_ALL_MG
PERCENTAGE_TRADE_VIP = Account_TRADE_VIP / Account_ALL_VIP
PERCENTAGE_INTEREST_MASS = Account_INTEREST_MASS / Account_ALL_MASS
PERCENTAGE_INTEREST_MG = Account_INTEREST_MG / Account_ALL_MG
PERCENTAGE_INTEREST_VIP = Account_INTEREST_VIP / Account_ALL_VIP
PERCENTAGE_FIRSTDEBT_MASS = Account_FIRSTDEBT_MASS / Account_ALL_MASS
PERCENTAGE_FIRSTDEBT_MG = Account_FIRSTDEBT_MG / Account_ALL_MG
PERCENTAGE_FIRSTDEBT_VIP = Account_FIRSTDEBT_VIP / Account_ALL_VIP
# Calculate proportion Account full withdraw (Careby)
mergeT0 = merge (TradeT1, TradeT2, by = "AFACCTNO")
mergeT0 = merge (TradeT0, mergeT0, by = "AFACCTNO")
mergeT0 $ CHURN = ifelse((mergeT0$NAV_END.y > 20*mergeT0$NAV_END) &
((mergeT0$NAV_END.y > 20*mergeT0$NAV_END.x) & (mergeT0$NAV_END.y >=500000))
,"CHURN","RETAIN")
ACCOUNT_CHURN_T0_MASS = length (subset(subset (mergeT0, CAREBY == "MASS"), CHURN == "CHURN")$CHURN)
ACCOUNT_CHURN_T0_MG = length (subset(subset (mergeT0, CAREBY == "MG"), CHURN == "CHURN")$CHURN)
ACCOUNT_CHURN_T0_VIP = length (subset(subset (mergeT0, CAREBY == "VIP"), CHURN == "CHURN")$CHURN)
PERCENTAGE_CHURN_T0_MASS = ACCOUNT_CHURN_T0_MASS / Account_ALL_MASS [3]
PERCENTAGE_CHURN_T0_MG = ACCOUNT_CHURN_T0_MASS / Account_ALL_MG [3]
PERCENTAGE_CHURN_T0_VIP = ACCOUNT_CHURN_T0_MASS / Account_ALL_VIP [3]
# Calculate 90% Percenticle NAV
TOP_90PERCENTILE_MASS = c()
TOP_90PERCENTILE_MASS [1] = quantile (subset(TradeT0, CAREBY == "MASS")$NAV_END, 0.9, na.rm = "TRUE" )
TOP_90PERCENTILE_MASS [2] = quantile (subset(TradeT1, CAREBY == "MASS")$NAV_END, 0.9, na.rm = "TRUE" )
TOP_90PERCENTILE_MASS [3] = quantile (subset(TradeT2, CAREBY == "MASS")$NAV_END, 0.9, na.rm = "TRUE" )
TOP_90PERCENTILE_MASS [4] = quantile (subset(TradeT12, CAREBY == "MASS")$NAV_END, 0.9, na.rm = "TRUE")
TOP_90PERCENTILE_MG = c()
TOP_90PERCENTILE_MG [1] = quantile (subset(TradeT0, CAREBY == "MG")$NAV_END, 0.9, na.rm = "TRUE" )
TOP_90PERCENTILE_MG [2] = quantile (subset(TradeT1, CAREBY == "MG")$NAV_END, 0.9, na.rm = "TRUE" )
TOP_90PERCENTILE_MG [3] = quantile (subset(TradeT2, CAREBY == "MG")$NAV_END, 0.9, na.rm = "TRUE" )
TOP_90PERCENTILE_MG [4] = quantile (subset(TradeT12, CAREBY == "MG")$NAV_END, 0.9, na.rm = "TRUE")
TOP_90PERCENTILE_VIP = c()
TOP_90PERCENTILE_VIP [1] = quantile (subset(TradeT0, CAREBY == "VIP")$NAV_END, 0.9, na.rm = "TRUE" )
TOP_90PERCENTILE_VIP [2] = quantile (subset(TradeT1, CAREBY == "VIP")$NAV_END, 0.9, na.rm = "TRUE" )
TOP_90PERCENTILE_VIP [3] = quantile (subset(TradeT2, CAREBY == "VIP")$NAV_END, 0.9, na.rm = "TRUE" )
TOP_90PERCENTILE_VIP [4] = quantile (subset(TradeT12, CAREBY == "VIP")$NAV_END, 0.9, na.rm = "TRUE")
# Calculate NAV/Trade Fee/Interest top 10% (Careby)
TOP_NAV_MASS = c()
TOP_NAV_MASS[1] = sum (subset (subset(TradeT0, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[1]) $NAV_END)
TOP_NAV_MASS[2] = sum (subset (subset(TradeT1, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[2]) $NAV_END)
TOP_NAV_MASS[3] = sum (subset (subset(TradeT2, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[3]) $NAV_END)
TOP_NAV_MASS[4] = sum (subset (subset(TradeT12, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[4]) $NAV_END)
TOP_FEE_MASS = c()
TOP_FEE_MASS[1] = sum (subset (subset(TradeT0, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[1]) $FEE)
TOP_FEE_MASS[2] = sum (subset (subset(TradeT1, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[2]) $FEE)
TOP_FEE_MASS[3] = sum (subset (subset(TradeT2, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[3]) $FEE)
TOP_FEE_MASS[4] = sum (subset (subset(TradeT12, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[4]) $FEE)
TOP_INTEREST_MASS = c()
TOP_INTEREST_MASS[1] = sum (subset (subset(TradeT0, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[1]) $INTEREST)
TOP_INTEREST_MASS[2] = sum (subset (subset(TradeT1, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[2]) $INTEREST)
TOP_INTEREST_MASS[3] = sum (subset (subset(TradeT2, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[3]) $INTEREST)
TOP_INTEREST_MASS[4] = sum (subset (subset(TradeT12, CAREBY == "MASS"), NAV_END >= TOP_90PERCENTILE_MASS[4]) $INTEREST)
TOP_NAV_MG = c()
TOP_NAV_MG[1] = sum (subset (subset(TradeT0, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[1]) $NAV_END)
TOP_NAV_MG[2] = sum (subset (subset(TradeT1, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[2]) $NAV_END)
TOP_NAV_MG[3] = sum (subset (subset(TradeT2, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[3]) $NAV_END)
TOP_NAV_MG[4] = sum (subset (subset(TradeT12, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[4]) $NAV_END)
TOP_FEE_MG = c()
TOP_FEE_MG[1] = sum (subset (subset(TradeT0, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[1]) $FEE)
TOP_FEE_MG[2] = sum (subset (subset(TradeT1, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[2]) $FEE)
TOP_FEE_MG[3] = sum (subset (subset(TradeT2, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[3]) $FEE)
TOP_FEE_MG[4] = sum (subset (subset(TradeT12, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[4]) $FEE)
TOP_INTEREST_MG = c()
TOP_INTEREST_MG[1] = sum (subset (subset(TradeT0, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[1]) $INTEREST)
TOP_INTEREST_MG[2] = sum (subset (subset(TradeT1, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[2]) $INTEREST)
TOP_INTEREST_MG[3] = sum (subset (subset(TradeT2, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[3]) $INTEREST)
TOP_INTEREST_MG[4] = sum (subset (subset(TradeT12, CAREBY == "MG"), NAV_END >= TOP_90PERCENTILE_MG[4]) $INTEREST)
TOP_NAV_VIP = c()
TOP_NAV_VIP[1] = sum (subset (subset(TradeT0, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[1]) $NAV_END)
TOP_NAV_VIP[2] = sum (subset (subset(TradeT1, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[2]) $NAV_END)
TOP_NAV_VIP[3] = sum (subset (subset(TradeT2, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[3]) $NAV_END)
TOP_NAV_VIP[4] = sum (subset (subset(TradeT12, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[4]) $NAV_END)
TOP_FEE_VIP = c()
TOP_FEE_VIP[1] = sum (subset (subset(TradeT0, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[1]) $FEE)
TOP_FEE_VIP[2] = sum (subset (subset(TradeT1, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[2]) $FEE)
TOP_FEE_VIP[3] = sum (subset (subset(TradeT2, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[3]) $FEE)
TOP_FEE_VIP[4] = sum (subset (subset(TradeT12, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[4]) $FEE)
TOP_INTEREST_VIP = c()
TOP_INTEREST_VIP[1] = sum (subset (subset(TradeT0, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[1]) $INTEREST)
TOP_INTEREST_VIP[2] = sum (subset (subset(TradeT1, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[2]) $INTEREST)
TOP_INTEREST_VIP[3] = sum (subset (subset(TradeT2, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[3]) $INTEREST)
TOP_INTEREST_VIP[4] = sum (subset (subset(TradeT12, CAREBY == "VIP"), NAV_END >= TOP_90PERCENTILE_VIP[4]) $INTEREST)
# Calculate proportion Trade Fee/ Interest top 10% (Careby)
PERCENTAGE_TOP_NAV_MASS = TOP_NAV_MASS / Total_NAV_MASS
PERCENTAGE_TOP_FEE_MASS = TOP_FEE_MASS / Total_TRADE_FEE_MASS
PERCENTAGE_TOP_INTEREST_MASS = TOP_INTEREST_MASS / Total_INTEREST_MASS
PERCENTAGE_TOP_NAV_MG = TOP_NAV_MG / Total_NAV_MG
PERCENTAGE_TOP_FEE_MG = TOP_FEE_MG / Total_TRADE_FEE_MG
PERCENTAGE_TOP_INTEREST_MG = TOP_INTEREST_MG / Total_INTEREST_MG
PERCENTAGE_TOP_NAV_VIP = TOP_NAV_VIP / Total_NAV_VIP
PERCENTAGE_TOP_FEE_VIP = TOP_FEE_VIP / Total_TRADE_FEE_VIP
PERCENTAGE_TOP_INTEREST_VIP = TOP_INTEREST_VIP / Total_INTEREST_VIP
# Calculate proportion Full Withdraw top 10% (Careby)
TOP_CHURN_T0_MASS = length (subset ( subset (subset(mergeT0, CAREBY == "MASS"), CHURN == "CHURN"), NAV_END.y >= TOP_90PERCENTILE_MASS[3]) $CHURN)
TOP_CHURN_T0_MG = length (subset ( subset (subset(mergeT0, CAREBY == "MG"), CHURN == "CHURN"), NAV_END.y >= TOP_90PERCENTILE_MG[3]) $CHURN)
TOP_CHURN_T0_VIP = length (subset ( subset (subset(mergeT0, CAREBY == "VIP"), CHURN == "CHURN"), NAV_END.y >= TOP_90PERCENTILE_VIP[3]) $CHURN)
PERCENTAGE_TOP_CHURN_T0_MASS = TOP_CHURN_T0_MASS / Account_ALL_MASS [3]
PERCENTAGE_TOP_CHURN_T0_MG = TOP_CHURN_T0_MG / Account_ALL_MG[3]
PERCENTAGE_TOP_CHURN_T0_VIP = TOP_CHURN_T0_VIP / Account_ALL_VIP[3]
output = data.frame()
output [1,1] = ""
output [1,2] = ""
output [1,3] = ""
output [1,4] = ""
output [1,5] = ""
output [1,6] = ""
colnames (output) = c('Indicators','CAREBY','T_0','T_1','T_2','T_12')
output = rbind (output,c('Tong NAV','MASS',Total_NAV_MASS))
output = rbind (output,c('','MG',Total_NAV_MG))
output = rbind (output,c('','VIP',Total_NAV_VIP))
output = rbind (output,c('Tong so TK','MASS',Account_ALL_MASS))
output = rbind (output,c('','MG',Account_ALL_MG))
output = rbind (output,c('','VIP',Account_ALL_VIP))
output = rbind (output,c('Tong so TK co GD trong thang','MASS',Account_TRADE_MASS))
output = rbind (output,c('','MG',Account_TRADE_MG))
output = rbind (output,c('','VIP',Account_TRADE_VIP))
output = rbind (output,c('Ti le TK co GD trong thang','MASS',PERCENTAGE_TRADE_MASS))
output = rbind (output,c('','MG',PERCENTAGE_TRADE_MG))
output = rbind (output,c('','VIP',PERCENTAGE_TRADE_VIP))
output = rbind (output,c('Tong gia tri GD trong thang','MASS',Total_TRADE_MASS))
output = rbind (output,c('','MG',Total_TRADE_MG))
output = rbind (output,c('','VIP',Total_TRADE_VIP))
output = rbind (output,c('Tong phi GD trong thang','MASS',Total_TRADE_FEE_MASS))
output = rbind (output,c('','MG',Total_TRADE_FEE_MG))
output = rbind (output,c('','VIP',Total_TRADE_FEE_VIP))
output = rbind (output,c('Phi GD cua KH dai dien','MASS',Median_FEE_MASS))
output = rbind (output,c('','MG',Median_FEE_MG))
output = rbind (output,c('','VIP',Median_FEE_VIP))
output = rbind (output,c('Ti le su dung STPC','MASS',PERCENTAGE_INTEREST_MASS))
output = rbind (output,c('','MG',PERCENTAGE_INTEREST_MG))
output = rbind (output,c('','VIP',PERCENTAGE_INTEREST_VIP))
output = rbind (output,c('Ti le su dung SPTC lan dau','MASS',PERCENTAGE_FIRSTDEBT_MASS))
output = rbind (output,c('','MG',PERCENTAGE_FIRSTDEBT_MG))
output = rbind (output,c('','VIP',PERCENTAGE_FIRSTDEBT_VIP))
output = rbind (output,c('Doanh thu tu SPTC','MASS',Total_INTEREST_MASS))
output = rbind (output,c('','MG',Total_INTEREST_MG))
output = rbind (output,c('','VIP',Total_INTEREST_VIP))
output = rbind (output,c('Ti le KH rut trang NAV','MASS',PERCENTAGE_CHURN_T0_MASS,c("","","")))
output = rbind (output,c('','MG',PERCENTAGE_CHURN_T0_MG,c("","","")))
output = rbind (output,c('','VIP',PERCENTAGE_CHURN_T0_VIP,c("","","")))
output = rbind (output,c('Muc thap nhat top 10% NAV','MASS',TOP_90PERCENTILE_MASS))
output = rbind (output,c('','MG',TOP_90PERCENTILE_MG))
output = rbind (output,c('','VIP',TOP_90PERCENTILE_VIP))
output = rbind (output,c('Ti le NAV top 10% / Tong NAV','MASS',PERCENTAGE_TOP_NAV_MASS))
output = rbind (output,c('','MG',PERCENTAGE_TOP_NAV_MG))
output = rbind (output,c('','VIP',PERCENTAGE_TOP_NAV_VIP))
output = rbind (output,c('Tong phi GD top','MASS',TOP_FEE_MASS))
output = rbind (output,c('','MG',TOP_FEE_MG))
output = rbind (output,c('','VIP',TOP_FEE_VIP))
output = rbind (output,c('Ti le phi GD top','MASS',PERCENTAGE_TOP_FEE_MASS))
output = rbind (output,c('','MG',PERCENTAGE_TOP_FEE_MG))
output = rbind (output,c('','VIP',PERCENTAGE_TOP_FEE_VIP))
output = rbind (output,c('Doanh thu SPTC top','MASS',TOP_INTEREST_MASS))
output = rbind (output,c('','MG',TOP_INTEREST_MG))
output = rbind (output,c('','VIP',TOP_INTEREST_VIP))
output = rbind (output,c('Ti le doanh thu SPTC top','MASS',PERCENTAGE_TOP_INTEREST_MASS))
output = rbind (output,c('','MG',PERCENTAGE_TOP_INTEREST_MG))
output = rbind (output,c('','VIP',PERCENTAGE_TOP_INTEREST_VIP))
output = rbind (output,c('Ti le rut trang NAV top','MASS',PERCENTAGE_TOP_CHURN_T0_MASS,c("","","")))
output = rbind (output,c('','MG',PERCENTAGE_TOP_CHURN_T0_MG,c("","","")))
output = rbind (output,c('','VIP',PERCENTAGE_TOP_CHURN_T0_VIP,c("","","")))
#output = rbind (output,c('','',))
#WriteToFile
write.table(output,file = 'sampletrade.csv',row.names = FALSE, sep = "")
Threshold_Asset = 10^5
Threshold_Trade = 10^5
Threshold_MVC = 5*10^9
mergeT0 $ CUSTOMER = ifelse (mergeT0$STATUS == "ACTIVE", 'Y', 'N')
mergeT0 $ ASSET = ifelse (mergeT0$NAV_END > Threshold_Asset, 'Y', 'N')
mergeT0 $ TRADE = ifelse (mergeT0$GTGD > Threshold_Trade, 'Y', 'N')
mergeT0 $ MVC = ifelse (mergeT0$GTGD > Threshold_MVC, 'Y', 'N')
mergeT0 $ ENTER_CUSTOMER = ifelse (mergeT0$STATUS.x == "CLOSE" & mergeT0$STATUS == "ACTIVE" ,'Y','N')
mergeT0 $ ENTER_ASSET = ifelse (mergeT0$NAV_END.x <= Threshold_Asset & mergeT0$NAV_END > Threshold_Asset ,'Y','N')
mergeT0 $ ENTER_TRADE = ifelse (mergeT0$GTGD.x <= Threshold_Trade & mergeT0$GTGD > Threshold_Trade ,'Y','N')
mergeT0 $ ENTER_MVC = ifelse (mergeT0$GTGD.x <= Threshold_MVC & mergeT0$GTGD > Threshold_MVC ,'Y','N')
mergeT0 $ CHURN_CUSTOMER = ifelse (mergeT0$STATUS.x == "ACTIVE" & mergeT0$STATUS == "CLOSE" ,'Y','N')
mergeT0 $ CHURN_ASSET = ifelse (mergeT0$NAV_END.x > Threshold_Asset & mergeT0$NAV_END <= Threshold_Asset ,'Y','N')
mergeT0 $ CHURN_TRADE = ifelse (mergeT0$GTGD.x > Threshold_Trade & mergeT0$GTGD <= Threshold_Trade ,'Y','N')
mergeT0 $ CHURN_MVC = ifelse (mergeT0$GTGD.x > Threshold_MVC & mergeT0$GTGD <= Threshold_MVC ,'Y','N')
table (mergeT0$CAREBY, mergeT0$CUSTOMER)
table (mergeT0$CAREBY, mergeT0$ASSET)
table (mergeT0$CAREBY, mergeT0$TRADE)
table (mergeT0$CAREBY, mergeT0$MVC)
table (mergeT0$CAREBY, mergeT0$CHURN_CUSTOMER)
table (mergeT0$CAREBY, mergeT0$CHURN_ASSET)
table (mergeT0$CAREBY, mergeT0$CHURN_TRADE)
table (mergeT0$CAREBY, mergeT0$CHURN_MVC)
table (mergeT0$CAREBY, mergeT0$ENTER_CUSTOMER)
table (mergeT0$CAREBY, mergeT0$ENTER_ASSET)
table (mergeT0$CAREBY, mergeT0$ENTER_TRADE)
table (mergeT0$CAREBY, mergeT0$ENTER_MVC)
length(mergeT0$CAREBY)
length(mergeT0$ASSET)
nrow(subset(mergeT0,STATUS=="ACTIVE" ))
CHURN_CUSTOMER = nrow(subset(subset(mergeT0,STATUS.x == "ACTIVE"), STATUS == "CLOSE"))
CHURN_ASSET = nrow(subset(subset(mergeT0,NAV_END.x > Threshold_Asset), NAV_END <= Threshold_Asset))
CHURN_TRADE = nrow(subset(subset(mergeT0,GTGD.x > Threshold_Trade), GTGD <= Threshold_Trade))
CHURN_MVC = nrow(subset(subset(mergeT0,GTGD.x > Threshold_MVC), GTGD <= Threshold_MVC))
ENTER_CUSTOMER = nrow(subset(subset(mergeT0,STATUS.x == "CLOSE"), STATUS == "ACTIVE"))
ENTER_ASSET = nrow(subset(subset(mergeT0,NAV_END.x <= Threshold_Asset), NAV_END > Threshold_Asset))
ENTER_TRADE = nrow(subset(subset(mergeT0,GTGD.x <= Threshold_Trade), GTGD > Threshold_Trade))
ENTER_MVC = nrow(subset(subset(mergeT0,GTGD.x <= Threshold_MVC), GTGD > Threshold_MVC))
GROUP_CUSTOMER = nrow(subset(mergeT0,STATUS == "ACTIVE"))
GROUP_ASSET = nrow(subset(mergeT0,NAV_END >Threshold_Asset))
GROUP_TRADE = nrow(subset(mergeT0,GTGD >Threshold_Trade))
GROUP_MVC = nrow (subset(mergeT0,GTGD >Threshold_MVC))
|
21cfb311c19c4a8cb4a528239bb3047dda719b8e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/PBD/tests/test-test_PBD.R | 57546b5ed399db56b12b300bad4ff57b7a9c9dae | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 552 | r | test-test_PBD.R | context("test_PBD")
test_that("PBD works", {
pbd_sim(c(0.2,1,0.2,0.1,0.1),15)
expect_equal(2 * 2, 4)
res1 <- pbd_ML(
brts = 1:10,
initparsopt = c(4.62,4.34,0.03),
exteq = 1,
cond = 1)
res2 <- pbd_ML(
brts = 1:10,
initparsopt = c(4.62,4.34,0.03),
exteq = 1,
cond = 2,
n_l = 2,
n_u = Inf)
res2a <- pbd_ML(
brts = 1:10,
initparsopt = c(4.62,4.34,0.03),
exteq = 1,
cond = 2,
n_l = 2,
n_u = 1000)
expect_equal(res1, res2)
expect_equal(res2, res2a)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.