blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8429b685cb65e15de53de345f848f61a579bb723
|
9e4df408b72687493cc23144408868a975971f68
|
/SMS_r_prog/r_prog_less_frequently_used/plot_m2_uncertanty.r
|
2e4bba3a4f63b9a8e54a1b521ad1bea287546ad5
|
[
"MIT"
] |
permissive
|
ices-eg/wg_WGSAM
|
7402ed21ae3e4a5437da2a6edf98125d0d0e47a9
|
54181317b0aa2cae2b4815c6d520ece6b3a9f177
|
refs/heads/master
| 2023-05-12T01:38:30.580056
| 2023-05-04T15:42:28
| 2023-05-04T15:42:28
| 111,518,540
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 757
|
r
|
plot_m2_uncertanty.r
|
#############################################################################
# confidence interval of M2 calculated from mean and standard deviations
tmp<-Read.SMS.std()
tst<-subset(tmp,name=='M2_sd2')
summary(tst)
if (SMS.control@no.species==3) tmp<-subset(tmp,select=c(-species,-prey,-predator))
# plot the result
X11()
plotConfM2<-function(M2age=0) {
var.name<-c("M2_sd0","M2_sd1","M2_sd2")
a<-subset(tmp,name %in% var.name & age==M2age)
a$Species=sp.names[a$species]
a$minus<-a$value-2*a$std
a$plus<-a$value+2*a$std
print(xyplot(minus+ value+plus~year|Species, type='l', ylab='Predation mortality, M2', xlab=NULL,
lwd=c(2,2.5,2),col=c(4,1,4),lty=c(2,1,2),scales = "free", data=a))
}
#plotConfM2(0)
plotConfM2(1)
plotConfM2(2)
|
9091a886c6e2144f922e8659789e2b1c552f5f6a
|
ad6f91c7fcbbb1e102edf4bff6c415d856757d72
|
/man/plus-ggmultiplot-ANY-method.Rd
|
798f29ac02466e6036f24fe21dfcbb93f23040a8
|
[] |
no_license
|
inambioinfo/ggfortify
|
dfb4c3b4c63ae796e50c660148ebb3c226972048
|
107768f6258699692275ab52fc72a99aa5b6cd25
|
refs/heads/master
| 2020-05-25T07:31:07.083432
| 2019-03-20T03:31:20
| 2019-03-20T03:31:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 428
|
rd
|
plus-ggmultiplot-ANY-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotlib.R
\docType{methods}
\name{+,ggmultiplot,ANY-method}
\alias{+,ggmultiplot,ANY-method}
\title{Generic add operator for \code{ggmultiplot}}
\usage{
\S4method{+}{ggmultiplot,ANY}(e1, e2)
}
\arguments{
\item{e1}{first argument}
\item{e2}{second argument}
}
\value{
\code{ggmultiplot}
}
\description{
Generic add operator for \code{ggmultiplot}
}
|
f54f868636478b7d061f6ce199a940d4f0a6dc12
|
1873bcdf1e78e2370f440d129d5d19f115095640
|
/R/add_padding.R
|
d9dc6193313ec769e7a08349b7f2f3a19600ff03
|
[] |
no_license
|
tylermorganwall/rayshader
|
bdc6314d093a7b874aec43576a975909f17a668d
|
81f95cf24973049f84f4a4250daa9b0b4659281d
|
refs/heads/master
| 2023-08-10T21:34:51.566012
| 2023-08-01T12:15:39
| 2023-08-01T12:15:39
| 133,241,343
| 1,939
| 219
| null | 2023-07-31T09:29:09
| 2018-05-13T13:51:00
|
R
|
UTF-8
|
R
| false
| false
| 2,575
|
r
|
add_padding.R
|
#'@title add_padding
#'
#'@description Adds padding to the matrix
#'
#'@param heightmap A two-dimensional matrix, where each entry in the matrix is the elevation at that point. All points are assumed to be evenly spaced.
#'@return Hillshade with edges padded
#'@keywords internal
add_padding = function(heightmap) {
temp = matrix(0, nrow = nrow(heightmap) + 2, ncol = ncol(heightmap) + 2)
temp[2:(nrow(temp)-1), 2:(ncol(temp)-1)] = heightmap
temp[2:(nrow(temp)-1), 1] = heightmap[,1]
temp[1, 2:(ncol(temp)-1)] = heightmap[1,]
temp[2:(nrow(temp)-1), ncol(temp)] = heightmap[,ncol(heightmap)]
temp[nrow(temp), 2:(ncol(temp)-1)] = heightmap[nrow(heightmap),]
temp[1,1] = temp[1,2]
temp[1,ncol(temp)] = temp[1,ncol(temp)-1]
temp[nrow(temp),1] = temp[nrow(temp)-1,2]
temp[nrow(temp),ncol(temp)] = temp[nrow(temp)-1,ncol(temp)]
temp
}
#'@title add_multi_padding
#'
#'@description Adds multiple levels padding to the matrix
#'
#'@param heightmap A two-dimensional matrix, where each entry in the matrix is the elevation at that point. All points are assumed to be evenly spaced.
#'@param pad Number of padding entries
#'@return Hillshade with edges padded
#'@keywords internal
add_multi_padding = function(heightmap, pad = 1) {
temp = matrix(0, nrow = nrow(heightmap) + 2*pad, ncol = ncol(heightmap) + 2*pad)
temp[(1+pad):(pad + nrow(heightmap)), (1+pad):(pad+ncol(heightmap))] = heightmap
temp[(1+pad):(pad + nrow(heightmap)), 1:pad] = heightmap[,1]
for(i in 1:pad) {
temp[i, (1+pad):(pad + ncol(heightmap))] = heightmap[1,]
}
for(i in (pad+ncol(heightmap)+1):ncol(temp)) {
temp[(1+pad):(pad+nrow(heightmap)), i] = heightmap[,ncol(heightmap)]
}
for(i in (pad+nrow(heightmap)+1):nrow(temp)) {
temp[i, (1+pad):(pad+ncol(heightmap))] = heightmap[nrow(heightmap),]
}
temp[1:pad,1:pad] = heightmap[1,1]
temp[1:pad,(pad+ncol(heightmap)+1):ncol(temp)] = heightmap[1,ncol(heightmap)]
temp[(pad+nrow(heightmap)+1):nrow(temp),1:pad] = heightmap[nrow(heightmap),1]
temp[(pad+nrow(heightmap)+1):nrow(temp),(pad+ncol(heightmap)+1):ncol(temp)] = heightmap[nrow(heightmap),ncol(heightmap)]
temp
}
#'@title trim_padding
#'
#'@description Trims padding
#'
#'@param heightmap A two-dimensional matrix, where each entry in the matrix is the elevation at that point. All points are assumed to be evenly spaced.
#'@param pad Number of padding entries
#'@return Hillshade with edges trimmed
#'@keywords internal
trim_padding = function(heightmap, pad = 1) {
heightmap[(1+pad):(nrow(heightmap)-pad), (1+pad):(ncol(heightmap)-pad)]
}
|
ba5ed1d30fea344be3bc30a8866619a072ff4f0e
|
e760785c239161c0e760eefc5420399eeadb0a72
|
/R Codes/Functions for CI charts.R
|
36e60b673e0191f4e7be76df5044938ea76777bc
|
[] |
no_license
|
tkakar/DataPrep
|
4474a75452ac7c446762af89d493fa8bca852aeb
|
6c80cfda8b6bb57385f75bbec95613f4c6f7e823
|
refs/heads/main
| 2023-02-20T18:34:20.292190
| 2021-01-19T13:00:51
| 2021-01-19T13:00:51
| 330,507,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,712
|
r
|
Functions for CI charts.R
|
library(boot)
library(ggplot2)
library (dplyr)
library(Hmisc)
library(readr)
library(magrittr)
library(ggrepel)
library(scales)
library(bootES)
# library(psych)
# T2Time_bkp <- T2Time
# T2Time[T2Time== "null"] <- NA
fullReport <- function(data, y, group, yRange=0, paired=TRUE){
data['group_'] <- data[group]
data['y_'] <- data[y]
wt <- wilcox.test(y_ ~ group_, data, conf.int=TRUE,paired=paired)
print(wt)
}
plotParticipants <- function(df, X, Y, group, title,xlabel, ylabel, ystat){
group.colors <- c("Visual" = "#998EC3", "Table"= "#F1A340")
p <- ggplot(df) +
aes(x = X , y=Y, colour = group, group=group) +
ggtitle(title) +
# geom_jitter(position = position_jitter(width = 0.2, height = 0))+
geom_line(position=position_jitter(width = 0.2, height = 0)) +
scale_color_manual(values= group.colors, name = group)+
labs(x = xlabel, y = ylabel) +
theme_bw() +
theme(legend.title = element_blank())
theme(plot.title = element_text(hjust = 0.5))
p
ggsave(paste( title,".pdf" ,sep=""), width =7, height = 3, dpi = 120)
}
mean.fun <- function(D, d) {
return( mean(D[d]) )
}
reportES <- function(data, attr, group) {
# if(group=="Visual"){
b <- bootES(data,
data.col=attr,
group.col=group,
contrast=c(Visual=1,Table=-1),
effect.type="cohens.d"
)
cat( "d=",round( b$t0, 2), "~",
"[", round( b$bounds[1], 2), ",",
round( b$bounds[2], 2), "]",
sep="")
# }
}
######## Confidence Interval ############
ciplot <- function(data, y, x, yRange, xRange, gap=0, cTitle) {
group.colors <- c("Visual" = "#998EC3", "Table"= "#F1A340")
data['x_'] <- data[x]
data['y_'] <- data[y]
data[['x_']] <- factor(data[['x_']])
groups <- group_by_(data, 'x_')
# print (y)debugonce
# yRange <- c(min(data[y]), max(data[y]))
# So far the only way to enable string as param, ###### change na.omit if problem occurse
groupedData <- dplyr::summarize(groups,
mean=mean(y_),
UCI= boot.ci(na.omit(boot(y_, statistic = mean.fun, R=1000, sim="ordinary")))$bca[,5],
LCI= boot.ci(na.omit(boot(y_, statistic = mean.fun, R=1000, sim="ordinary")))$bca[,4])
# print(UCI)
df <- data.frame(
trt = factor(groupedData[[1]]),
resp = groupedData[["mean"]],
group = factor(groupedData[[1]]),
upper = c(groupedData[["UCI"]]),
lower = c(groupedData[["LCI"]])
)
print (df)
p <- ggplot(df, aes(trt, resp, color = group))
p <- p + scale_color_manual(values=group.colors, name = y)
p <- p + theme(axis.title=element_text(size=20), axis.text=element_text(size=20))
p <- p + geom_pointrange(aes(ymin = lower, ymax = upper))
p <- p + expand_limits(y = yRange)
# p <- p + ylim(0, 30) ##### uncomment when want similar scale/gap for Time information
# p <- p + scale_y_continuous(limits=c(1, 7), breaks=c(1:7)) #### uncomment for EAse and Conf, and comment expand_limit line
p <- p + scale_y_continuous(limits=c(0, 10), breaks=seq(0,10,by=2))
# p <- p + scale_y_continuous(limits=c(0, 6), breaks=seq(0,6,by=1))
# p <- p + scale_x_discrete(expand=c(0,2))
p <- p + ylab("")
p <- p + xlab("")
p <- p + geom_errorbar(aes(ymin = lower, ymax = upper, color=group), width = 0.2, size=1)
p <- p + coord_flip()
p <- p + theme_bw()
p <- p + theme(plot.title=element_text(hjust=0))
p <- p + theme(panel.border=element_blank())
p <- p + theme(panel.grid.minor=element_blank())
p <- p + theme(axis.ticks=element_blank())
p <- p + theme(axis.text.y = element_blank())
p <- p+ theme(axis.text.x = element_text(size = 14, colour = "black" ))
p <- p + theme(legend.position = "none")
p
ggsave(paste( cTitle, y,".pdf" ,sep=""), width =5, height = 1, dpi = 120)
}
######## Confidence Interval with %s ############
ciplotPercentage <- function(data, y, x, yRange, xRange, gap=0, cTitle) {
# data <- ddply(data, c('group'))
# data %>% arrange(desc(group))
group.colors <- c("Visual" = "#998EC3", "Table"= "#F1A340")
data['x_'] <- data[x]
data['y_'] <- data[y]
data[['x_']] <- factor(data[['x_']])
groups <- group_by_(data, 'x_')
# print (y)debugonce
# yRange <- c(min(data[y]), max(data[y]))
# So far the only way to enable string as param, ###### change na.omit if problem occurse
groupedData <- dplyr::summarize(groups,
mean=mean(y_),
UCI= boot.ci(na.omit(boot(y_, statistic = mean.fun, R=1000, sim="ordinary")))$bca[,5],
LCI= boot.ci(na.omit(boot(y_, statistic = mean.fun, R=1000, sim="ordinary")))$bca[,4])
# print(UCI)
df <- data.frame(
trt = factor(groupedData[[1]]),
resp = groupedData[["mean"]],
group = factor(groupedData[[1]]),
upper = c(groupedData[["UCI"]]),
lower = c(groupedData[["LCI"]])
)
print (df)
p <- ggplot(df, aes(trt, resp, color = group))
p <- p + scale_color_manual(values=group.colors, name = y)
p <- p + theme(axis.title=element_text(size=20), axis.text=element_text(size=18))
p <- p + geom_pointrange(aes(ymin = lower, ymax = upper))
p <- p + expand_limits(y = yRange)
p <- p + ylab("")
p <- p + xlab("")
p <- p + scale_y_continuous(labels =percent)
p <- p + geom_errorbar(aes(ymin = lower, ymax = upper, color=group), width = 0.2, size=1)
p <- p + coord_flip()
p <- p + theme_bw()
p <- p + theme(plot.title=element_text(hjust=0))
p <- p + theme(panel.border=element_blank())
p <- p + theme(panel.grid.minor=element_blank())
p <- p + theme(axis.ticks=element_blank())
p <- p + theme(axis.text.y = element_blank())
p <- p+ theme(axis.text.x = element_text(size = 14, colour = "black" ))
p <-p + theme(legend.position = "none")
# p <- p + scale_color_discrete()
p
ggsave(paste( cTitle, y,".pdf" ,sep=""), width =5, height = 1, dpi = 120)
}
plotSlopeParticipants <- function(df, X, Y, group, title,xlabel, ylabel, ystat){
group.colors <- c("Visual" = "#998EC3", "Table"= "#F1A340")
p <- ggplot(df) +
# aes(x = Y , y=X, colour = group, group=group) +
ggtitle(title) +
geom_point(aes(x = as.factor(group), y = X, group = Y, color = group), size = 3) +
geom_line(aes(x = as.factor(group), y = X, group = Y, color = group), size = 1) +
scale_color_manual(values= group.colors, name = group)+
labs(x = xlabel, y = ylabel) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
p
ggsave(paste( title, ".pdf" ,sep=""), width =4, height = 7, dpi = 120)
}
######## Notes - Alpha is used for transparency - different color shades for different values of a variable
### flag if we need facets or not
plotLineSlope <- function(df, flag, chartTitle) {
group.colors <- c("VisualFirst" = "#998EC3", "TableFirst"= "#F1A340")
df <- df[df$diff !=100,]
p <- ggplot(data = df,
aes(x = Condition, y = Acc, group = workerId, color=Vis)) +
# geom_jitter(width=0, height=1)+
geom_line(aes(color = Vis, alpha=1,
linetype=diff>0), size = 1)
ifelse(flag=='facet', p <- p+ facet_grid(.~Vis) ,p)
# geom_point(aes(color = Vis, y = jitter(Acc, 5)), size = 4) #can add shape=diff to change shape of points+
p<- p+ geom_text_repel(data = df %>% filter(Condition == "Table"),
aes(label = paste0(workerId, " - ", Acc)) ,
hjust = 1,
fontface = "bold",
size = 4,
direction="y",
nudge_x = -0.5,
# vjust=3,
# box.padding = 0.7,
segment.size = 0.2) +
geom_text_repel(data = df %>% filter(Condition == "Visual"),
aes(label = paste0(workerId, " - ", Acc)) ,
hjust = 0,
fontface = "bold",
size = 4,
direction="y",
nudge_x = 0.5,
segment.size = 0.2) +
# move the x axis labels up top
scale_x_discrete(position = "top") +
theme_bw() +
theme(panel.border = element_blank()) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_blank()) +
theme(panel.grid.major.y = element_blank()) +
theme(panel.grid.minor.y = element_blank()) +
theme(axis.title.x = element_blank()) +
theme(panel.grid.major.x = element_blank()) +
theme(axis.text.x.top = element_text(size=12)) +
theme(axis.ticks = element_blank()) +
theme(panel.spacing = unit(2, "lines")) +
theme(plot.title = element_text(size=14, face = "bold", hjust = 0.5)) +
theme(plot.subtitle = element_text(hjust = 0.5)) +
labs(title = chartTitle) +
scale_color_manual(values= group.colors, name = "Table 1st or 2nd") +
scale_alpha_continuous(guide = "none")
p
ggsave(paste( chartTitle,flag, ".pdf" ,sep=""), width =4, height = 7, dpi = 120)
}
############# Barcharts as slope graphs
plotBarSlopes <- function(df, plotTitle){
df <- df[df$diff !=100,]
df$Cond <- ifelse(df$diff > 0, "Visual Wins","Table Wins")
# print(df$Cond)
group.colors <- c("Visual Wins" = "#998EC3", "Table Wins"= "#F1A340")
p <- ggplot(df,aes(x=workerId, y=diff,fill=Cond)) + geom_bar(stat = "identity", width=0.4, position=position_dodge()) + coord_flip()+
ggtitle(plotTitle) + theme_bw() + theme(aspect.ratio = 2/1.3) +
theme( axis.title.y=element_blank()) +
theme( axis.title.x=element_blank()) +
theme( plot.title=element_blank()) +
scale_fill_manual(values=group.colors)+
theme(axis.text=element_text(size=9, color = "black"))+
theme(legend.position = "none")
ggsave(paste( plotTitle, ".pdf" ,sep=""), width =3, height = 5, dpi = 120)
}
plotBarSlopesOld <- function(df, plotTitle){
group.colors <- c("VisualFirst" = "#998EC3", "TableFirst"= "#F1A340")
df <- df[df$diff !=100,]
p <- ggplot(df,aes(x=workerId, y=diff,fill=Vis)) + geom_bar(stat = "identity", width=0.4, position=position_dodge()) + coord_flip()+
ggtitle(plotTitle) + theme_bw() + theme(aspect.ratio = 2/1.3) +#+ facet_grid(.~Condition) + #+ theme_grey(base_size=15) +
theme( axis.title.y=element_blank()) + scale_fill_manual(values=group.colors, name = c("Layout Order"))+
theme(plot.title = element_text(size=12, hjust=-0.2), axis.text=element_text(size=12))+
theme(legend.position = "none")
ggsave(paste( plotTitle, ".pdf" ,sep=""), width =3, height = 5, dpi = 120)
}
|
73cd679df3ff5ba79dd1861819162ce67079b9e7
|
6609b80e83b49152fcd5172690ac6c8d77866cb4
|
/plot2.R
|
8379999c01ad8dcb22e14c54560cc1f570431ae0
|
[] |
no_license
|
disadone/ExData_Plotting1
|
7b88819555769b838345deb0c360de0f1673c685
|
9371fdd4eca55197d9a832cd161736c653db8f4b
|
refs/heads/master
| 2021-01-17T21:58:16.983560
| 2015-09-11T05:52:08
| 2015-09-11T05:52:08
| 42,284,719
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 929
|
r
|
plot2.R
|
# read data
Sys.setlocale("LC_TIME","English");
fUrl<-"exdata-data-household_power_consumption/household_power_consumption.txt"
td<-read.table(fUrl,nrows=10,sep=";",header=TRUE,na.strings = "?")
coltd<-colnames(td)
classes<-sapply(td,class)
td<-read.table(fUrl,sep=";",skip=66637,nrows=2881,na.strings = "?",colClasses=classes)
colnames(td)<-coltd
datetime<-paste(as.character(td$Date),as.character(td$Time))
datetime<-strptime(datetime,"%d/%m/%Y %H:%M:%S")
td$Date<-as.Date(td$Date,"%d/%m/%Y")
td<-td[,3:ncol(td)]
td<-data.frame(dt=datetime,td)
daterange=c(
as.POSIXlt(min(td$dt)),as.POSIXlt(max(td$dt)))
rm(datetime)
library(datasets)
cexSet=0.95
#draw2
png(file = "plot2.png",width=480,height=480,bg="transparent")
attach(td)
plot(dt,Global_active_power,cex=cexSet,xaxt="n",type="l",xlab="",ylab="Global Active Power (kilowatts)")
axis.POSIXct(1,at=seq(daterange[1],daterange[2],by="day",format="%b"))
detach(td)
dev.off()
|
51aa69a02a5e8053859325d5b58b7197789452ed
|
ad4c53a6246c9308faaee7672156fc56d1ae9ad6
|
/OGHHdash20190110.R
|
263171948c7b96e21349114008fe689459ff9ec8
|
[] |
no_license
|
PeaceCorps-ICPI/PEPFAR_analyses
|
7f0689cf07c9d98f671d042aaaac2d23158404dd
|
6a81241dc59a8fae9809887be44fa6bc2180f7ee
|
refs/heads/master
| 2020-04-16T16:21:35.531021
| 2019-09-13T14:16:51
| 2019-09-13T14:16:51
| 165,733,449
| 0
| 1
| null | 2019-04-16T19:43:40
| 2019-01-14T20:54:13
|
R
|
UTF-8
|
R
| false
| false
| 3,897
|
r
|
OGHHdash20190110.R
|
#Date updated: 01/10/2019
#Purpose: To subset OU X IM Genie Extract for OGHH dashboard
#Software: R
memory.limit(size = 900000)
getwd()
#Load Packages
#B.Installation directory - please change username to your username. This will direct R libraries/packages to install and run smoothly with RStudio.
.libPaths(c("C:/Users/knoykhovich/R", .libPaths()))
library(tidyverse)
library(ICPIutilities)
#read genie file with ICPI Utilities functions
df1 <- read_msd("GenieOUxIM_PC_20190110.txt")
colnames(df1)
#drop unnecessary columns
df2 <- df1 %>%
select(operatingunit, countryname, primepartner:implementingmechanismname, indicator:standardizeddisaggregate,
categoryoptioncomboname:agesemifine, sex, otherdisaggregate:coarsedisaggregate, fy2017_targets, fy2017q2, fy2017q4:fy2017apr, fy2018_targets, fy2018q2, fy2018q4:fy2019_targets)
#filter out zeroes and NAs
df3 <- df2 %>%
filter_at(vars(starts_with("FY")), any_vars(!is.na(.) & .!=0))
#remove exit without graduation from program status
df4 <- filter(df3, standardizeddisaggregate != "ProgramStatus" | categoryoptioncomboname != "Exited without Graduation")
# Create new column called "calcfy2018apr" to pull Q4 values from Age/Sex disaggs and TransferExit disaggs into new calcfy2018aapr column, all other values take from fy2018apr and put into new "calcfy2018apr" column.
df5 <- mutate(df4, calcfy2018aprclean = if_else(indicator == "OVC_SERV" & standardizeddisaggregate == "Age/Sex", fy2018q4,
if_else(indicator == "OVC_SERV" & standardizeddisaggregate == "TransferExit", fy2018q4,
if_else(indicator == "OVC_HIVSTAT" | indicator == "OVC_HIVSTAT_POS" | indicator == "OVC_HIVSTAT_NEG", fy2018q4,
if_else(indicator == "OVC_SERV", fy2018q4, fy2018apr)))))
#adjust fy2017 apr OVC SERV total N to fall under Total Numerator, instead of Program Status
df6 <- df5 %>%
mutate(standardizeddisaggregate = case_when(
indicator %in% c('OVC_SERV') ~ ' TotalNumerator',
TRUE~standardizeddisaggregate
))
#reshape from wide to long, putting all results and targets in one colum, removes rows with missing values
longdf <- gather(df5, key = "period", "value", fy2017_targets:calcfy2018aprclean, na.rm=TRUE)
#adjust fy2017apr OVC SERV Total N to fall under Total Numerator, not Program Status
#dfalternate <- longdf %>%
# mutate(standardizeddisaggregate = case_when(
# indicator == "OVC_SERV" & period == "fy2017apr" ~ 'Total Numerator',
# TRUE~standardizeddisaggregate))
#export dataset to csv for reading into Tableau
write.csv(longdf, "GenieOUxIMclean_20190110.csv", row.names = FALSE)
#run checks of number of columns
ncol(df5)
ncol(longdf)
names(df5)
names(longdf)
#import prior dataset with Fy15-16 data
MSDv1 <- read_msd("MSDmergedFY15toFy18.txt")
ncol(MSDv1)
names(MSDv1)
unique(MSDv1$period)
#drop fy2017 targets, Fy2017q2, Fy2017q4, fy2017apr, fy2018 targets, fy2018q2, fy2019 targets from previous file
MSDv2 <- filter(MSDv1, period == "FY2015Q2" | period =="FY2015Q4" | period =="FY2015APR" | period =="FY2016_TARGETS" | period
=="FY2016Q2" | period =="FY2016Q4" | period =="FY2016APR" )
unique(MSDv2$period)
#merge previous dataset with FY15-FY16 data with current dataset
mergedf <- rbind(longdf, MSDv2)
#subset data without NULL values
finaldf <- select(filter(mergedf, value != "NULL"), c(operatingunit:value))
str(finaldf)
unique(finaldf$period)
#export dataset to csv for reading into Tableau
write.csv(finaldf, "MergedFile20190211.csv", row.names = FALSE)
#import funding data
Funds <- read_csv("FundingData.csv")
colnames(Funds)
#reshape from wide to long, putting all dollars in one column, and post name in another
longfund <- gather(Funds, key = "post", "amount", Botswana:Zambia)
write.csv(longfund, "FundingDataFY17to19.csv", row.names = FALSE)
|
9135537b93aec5eaa5aa9f592989d2751239c2eb
|
0230df8e6fb3aa652b8dbe5a8229231afe62b036
|
/man/getCategoryUrl.Rd
|
13e868c1c39192e6e459e54e9831880c1bf3ecca
|
[
"MIT"
] |
permissive
|
jongokko/N2H4
|
0426aeeb1fc83796105df6a948d40cffd6698a54
|
887019d063f52b23c9a54b1db013ae409b41d434
|
refs/heads/master
| 2021-06-08T13:08:41.189658
| 2016-11-07T15:17:24
| 2016-11-07T15:17:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 624
|
rd
|
getCategoryUrl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getCategoryUrl.R
\name{getCategoryUrl}
\alias{getCategoryUrl}
\title{Get Category Url}
\usage{
getCategoryUrl()
}
\arguments{
\item{select}{from 1 to 7 numeric values which mean categories.}
}
\value{
Get data.frame(cate_name, cate_sub, sid1, sid2).
}
\description{
Get naver news sub_category names and urls recently.
Main category urls are written manually like blow.
cate_code<-c(100,101,102,103,104,105)
There are 6 categories in never news.
1: Politics, 2: Economics, 3: Social, 4: Living / Culture, 5: World, 6: IT / science, 7: Opinion
}
|
236ea0c89a384820b2e04c37aa58ecd67a9872e2
|
65ad8d10519f7d5ef05e9b99cb5f03986e904fdd
|
/R/2017_09_22_PEEC_Manuscript_Final_R.R
|
5b8093a9465af62a1d5ccff7f0d91966ad0f8697
|
[] |
no_license
|
adsteen/PEEC_MXSHS
|
63328f08c3f57e39e8ca7e12c56b6e84940862e2
|
d0c752a6567b548700abb38fa42d12e32ea7cc48
|
refs/heads/master
| 2021-01-25T00:39:18.966724
| 2018-02-28T15:49:19
| 2018-02-28T15:49:19
| 123,304,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,202
|
r
|
2017_09_22_PEEC_Manuscript_Final_R.R
|
rm(list=ls())
######################################################################################
######################################################################################
# Load required packages
library(plyr) # 'split-apply-combine'
library(lubridate) # Deals with dates
library(ggplot2) # plotting
library(reshape2)
library(gridExtra)
library(cowplot)
library(scales)
# Use a function I wrote
source("R/lm_stats.R") # start from working directory
#source("../R/lm_stats.R") # for Drew
#source("R/process_LM_PEEC_compiled.R")
####Could not figure out how to process the data using the process_LM_PEEC_compiled.R file########
# Set graphical theme
theme_set(theme_bw() + theme(text=element_text(size=9)))
#load data
thirteen <- read.csv ("data/MSXUT_v2_2013_updated.csv")
fifteen <- read.csv ("data/MSXUT_2015_compiled.csv")
tnmine <- read.csv ("data/2015_TN_compiled_fixed.csv")
sixteen <- read.csv ("data/2016_12_12_PEEC_2016_data.csv")
all_data_list <- list(thirteen=thirteen, fifteen=fifteen, tnmine=tnmine, sixteen=sixteen)
raw_df <- ldply(all_data_list, identity)
#raw_df <- rename(raw_df, c(".id" = "location"))
#add substrate names and set unlabled reps to A
raw_df$substrate <- factor(raw_df$substrate, labels=c("Arg-AP", "Gly-AP", "Leu-AP", "Pyr-AP", "Trypsin"))
raw_df$rep[raw_df$rep==""] <- "A"
times <- paste("2015_06_01", raw_df$time)
raw_df$Rtime <- ymd_hm(times) #makes time into time object
raw_df <- ddply(raw_df, c("site"), mutate, elapsed=as.numeric(Rtime-min(Rtime))/3600)
attr(raw_df$elapsed, "units") <- "hours"
#generate plot of raw data
p_raw <- ggplot(raw_df, aes(elapsed, y=RFU, shape=treatment, colour=site, fill=treatment)) +
geom_point() +
geom_smooth(method="lm", se=TRUE) +
facet_grid(when ~ fluorimeter ~ substrate, scales = "free") +
theme(text=element_text(size=9), axis.text.x=element_text(angle=-70, hjust=0))
# print(p_raw)
# Calculate the slopes
uncalibrated_slopes <- ddply(raw_df, c(".id", "fluorimeter", "substrate", "when", "treatment", "site", "volume", "rep"),
function(x) lm_stats(xvar="elapsed", yvar="RFU", d=x))
# Export the slopes to a .csv file
# write.csv(uncalibrated_slopes, "data/PEEC_TN_final_data_sheets/2016_12_12_PEEC_and_TN_uncalibrated_slopes.csv")
# Plot the slopes
p_slopes <- ggplot(uncalibrated_slopes, aes(x=substrate, y=slope, colour=site, shape=treatment)) +
geom_pointrange(aes(ymin=slope-slope.se, ymax=slope+slope.se), position=position_jitter(width=0.1)) +
facet_grid(when ~ fluorimeter, scales = "free_y") +
theme_bw()+
theme(text=element_text(size=9), axis.text.x=element_text(angle=-70, hjust=0)) +
ggtitle("Uncalibrated Slopes")
# print(p_slopes)
#load calibration data
thirteen_calib <- read.csv("data/2013_all_calib_data.csv")
fifteen_calib <- read.csv("data/2016_12_13_PEEC_2015_calib_nM.csv")
tnmine_calib <- read.csv("data/2015_TN_compiled_calib_fixed.csv")
sixteen_calib <-read.csv("data/2016_12_12_2016_peec_calib.csv")
#bind calibration data
all_calib_list <- list(thirteen_calib=thirteen_calib, fifteen_calib=fifteen_calib,
tnmine_calib=tnmine_calib, sixteen_calib=sixteen_calib)
all_sites_df_calib <- do.call(rbind.fill, all_calib_list)
#look at calibration data
p_calib <- ggplot(all_sites_df_calib, aes(x=conc.AMC.nM, y=RFU, colour= site)) +
geom_point() + #scatterplot
geom_smooth(method="lm", se=TRUE) +
facet_wrap(when~fluorimeter , scales="free") +
theme(text=element_text(size=9), axis.text.x=element_text(angle=90, hjust=0)) +
theme_bw()+
ggtitle("Total Calibration for AMC")
#print(p_calib)
#ggsave("plots/PEEC and TN Final/for_pub_2016_12_14_peec_and_tn_calibration_curve.tiff", height=2.5, width=7.5, units="in", dpi=300, compression='lzw')
# Calculate a slope of fluorescence vs concentration for each unique fluorimeter and site and year
calib_slopes <- ddply(all_sites_df_calib, c("site", "fluorimeter", "when", "fluorophore"), function(x) lm_stats(xvar="conc.AMC.nM", yvar="RFU", d=x))
# Merge the calibration slopes into the data of uncalibrated slopes of RFU vs time
calib_slopes_short <- plyr::rename(calib_slopes[ , c("site", "fluorimeter", "when", "slope")], c("slope" = "calib.slope"))
#write.csv(calib_slopes_short, "data/2016_12_19_calib_slopes_short_2.csv")
# Merge calibration slopes into main data frame of uncalibrate slopes
slopes_merged <- merge(uncalibrated_slopes, calib_slopes_short, by=c("site", "fluorimeter", "when"), all=TRUE)
# Calculate the calibrated slopes
slopes_merged$v0 <- slopes_merged$slope / slopes_merged$calib.slope / 1000 # CHANGING FROM NM TO uM
# Set units correctly
attr(slopes_merged$v0, "units") <- "umol per liter per hour"
# Pull out slopes for the saturation curves
slopes_sat_curve <- subset(slopes_merged, .id="thirteen")
# Eliminate saturatino curve slopes from the main slopes data frame
slopes <- subset(slopes_merged, .id!="thirteen" | (.id == "thirteen" & volume == 40))
# Calculate average and standard deviation of slopes
slopes_summ <- ddply(slopes_merged, c("site", "fluorimeter", "when", "substrate", "treatment"), summarise,
v0.mean = mean(v0, na.rm=TRUE),
v0.sd = sd(v0, na.rm=TRUE))
#Cast
live_v0 <- slopes_summ[slopes_summ$treatment == "live", ]
live_v0$substrate <- revalue(live_v0$substrate, c("Arg-AP"="ArgAP", "Leu-AP"="LeuAP", "Trypsin"="Trypsin", "Gly-AP"="GlyAP", "Pyr-AP"="PyrAP"))
# remove NA values
live_v0 <- live_v0[!is.na(live_v0$v0.mean), ]
# Calculate median, geometric mean, and IQR; make fig S1
library(EnvStats)
gmean.v0s <- live_v0$v0.mean[live_v0$v0.mean > 0]
g.mean <- geoMean(gmean.v0s); print(g.mean)
v0.IQR <- quantile(gmean.v0s, c(0.25, 0.5, 0.75)); print(v0.IQR)
p_dist <- ggplot(live_v0, aes(x=v0.mean)) +
geom_density() +
geom_rug(sides="t") +
scale_x_log10() +
annotation_logticks(sides="b") +
xlab(expression(paste(log[10], " ", v[0], ", ", mu, "M ", hr^{-1})))
print(p_dist)
# ggsave("plots/S1_v0_distribution.png", height=3, width=4, units="in", dpi=300)
v0_c <- dcast(live_v0, site+fluorimeter+when~substrate, value.var= 'v0.mean')
v0_c_sd <- dcast(live_v0, site+fluorimeter+when~substrate, value.var="v0.sd")
colnames(v0_c_sd)[4:ncol(v0_c_sd)] <- paste0(colnames(v0_c_sd)[4:ncol(v0_c_sd)], ".sd") # add .sd to the st dev data frame names, to prevent confusion in the merge
#merge data into one "wide" frame
v0_wide <- merge(v0_c, v0_c_sd, by=c("site", "fluorimeter", "when"))
# make a boxplot of live v0 data
p_boxes <- ggplot(live_v0, aes(x=substrate, y=log10(v0.mean))) + # trying log v0 for kicks
geom_boxplot(outlier.shape=NA) +
geom_jitter(alpha=0.3) +
xlab("Substrate") +
ylab(expression(paste("log" [10], v[0], ", ", mu, "M ", hr^{-1}))) # see demo(plotmath)
print(p_boxes)
# ggsave("plots/v0_boxplot.tiff", height=2.5, width=7.08/2, units="in", dpi=300, compression="lzw")
# ANOVA with post-hoc testing of rates by substrate
# BUT WE NEED PAIRED ANOVA
# # Try it with un-transformed rates
# bad_aov <- aov(v0.mean ~ substrate, data=live_v0)
# # plot(bad_aov) # pretty bad!
#
# live_v0$log10.v0 <- log10(live_v0$v0.mean)
# live_v0_aov <- subset(live_v0, !is.na(log10.v0))
# v0_mod <- aov(log10.v0 ~ substrate, data=live_v0) # p<0.001, n=192
# # plot(v0_mod) # plots look fine I guess
# summary(v0_mod)
# v0_Tukey <- TukeyHSD(v0_mod)
# print(v0_Tukey) ## NOTES ON TUKEY:
#
# # Pyr < ARG
# # Pyr < GLY
# # Pyr < Leu
# # Pyr < TRP
# #######hydrolysis rates
fs <- 8
ee_scatterplot <- function(y.var, y.var.name) {
# Create strings for errorbars
y.err.min <- paste0(y.var, "-", y.var, ".sd")
y.err.max <- paste0(y.var, "+", y.var, ".sd")
# Create string for axis label
y.ax.label <- expression(paste("log" [10], " ", eval(y.var.name), ", ", mu, "M L ", hr^{-1}), sep="")
# Make the plot
p <- ggplot(v0_wide, aes_string(x="LeuAP", y=y.var)) +
geom_point() +
geom_errorbar(aes_string(ymin=y.err.min, ymax=y.err.max)) +
geom_errorbarh(aes(xmin=LeuAP-LeuAP.sd, xmax=LeuAP+LeuAP.sd)) +
geom_smooth(colour="black", method="lm", se=TRUE) +
scale_x_log10() +
scale_y_log10() +
xlab(expression(paste("log" [10], " LeuAP", ", ", mu, "M L ", hr^{-1}))) +
ylab(y.ax.label) +
theme(axis.text.x=element_text(angle=-70, hjust=0), text=element_text(size=fs), axis.text=element_text(size=fs))
p
}
p_arg_leu <- ee_scatterplot("ArgAP", "ArgAP")
print(p_arg_leu)
#for arg vs leu
hyd_rate_comp_arg_leu <- ggplot(v0_wide, aes(x=LeuAP, y=ArgAP)) +
#geom_pointrange(aes(ymin=ArgAP-ArgAP.sd, ymax=ArgAP+ArgAP.sd)) +
geom_point() +
geom_errorbar(aes(ymin=ArgAP-ArgAP.sd, ymax=ArgAP+ArgAP.sd)) +
geom_errorbarh(aes(xmin=LeuAP-LeuAP.sd, xmax=LeuAP+LeuAP.sd)) +
geom_smooth(colour="black", method="lm", se=TRUE) +
scale_x_log10() +
scale_y_log10() +
ylab(expression(paste("log" [10], " ArgAP", ", ", mu, "M L ", hr^{-1}))) + # see demo(plotmath)
xlab(expression(paste("log" [10], " LeuAP", ", ", mu, "M L ", hr^{-1}))) + # see demo(plotmath)
theme(axis.text.x=element_text(angle=-70, hjust=0), text=element_text(size=fs), axis.text=element_text(size=fs))
print(hyd_rate_comp_arg_leu)
#ggsave("plots/PEEC and TN Final/for_pub_2016_12_21_peec_and_tn_arg_vs_leu.tiff", hyd_rate_comp_arg_leu, height=2.5, width=7.08, units="in", dpi=300, compression= 'lzw')
#for Gly vs leu
hyd_rate_comp_Gly_leu <- ggplot(v0_wide, aes(x=LeuAP, y=GlyAP)) +
#geom_pointrange(aes(ymin=GlyAP-GlyAP.sd, ymax=GlyAP+GlyAP.sd)) +
geom_point() +
geom_errorbar(aes(ymin=GlyAP-GlyAP.sd, ymax=GlyAP+GlyAP.sd)) +
geom_errorbarh(aes(xmin=LeuAP-LeuAP.sd, xmax=LeuAP+LeuAP.sd)) +
geom_smooth(colour="black", method="lm", se=TRUE) +
scale_x_log10() +
scale_y_log10() +
ylab(expression(paste("log" [10], " GlyAP", ", ", mu, "M L ", hr^{-1}))) + # see demo(plotmath)
xlab(expression(paste("log" [10], " LeuAP", ", ", mu, "M L ", hr^{-1}))) + # see demo(plotmath)
theme(axis.text.x=element_text(angle=-70, hjust=0), text=element_text(size=fs), axis.text=element_text(size=fs))
#facet_wrap(~when, scales="free_y") +
print(hyd_rate_comp_Gly_leu)
#ggsave("plots/PEEC and TN Final/for_pub_2016_12_21_peec_and_tn_gly_vs_leu.tiff", hyd_rate_comp_Gly_leu, height=2.5, width=7.08, units="in", dpi=300, compression= 'lzw')
#for GlyGlyArg vs. Leu
hyd_rate_comp_GlyGlyArg_leu <- ggplot(v0_wide, aes(x=LeuAP, y=Trypsin)) +
geom_point() +
#geom_pointrange(aes(ymin=GlyGlyArgAP-GlyGlyArgAP.sd, ymax=GlyGlyArgAP+GlyGlyArgAP.sd)) +
geom_errorbar(aes(ymin=Trypsin-Trypsin.sd, ymax=Trypsin+Trypsin.sd)) +
geom_errorbarh(aes(xmin=LeuAP-LeuAP.sd, xmax=LeuAP+LeuAP.sd)) +
geom_smooth(colour="black", method="lm", se=TRUE) +
scale_x_log10() +
scale_y_log10() +
ylab(expression(paste("log" [10], " Trypsin", ", ", mu, "M L ", hr^{-1}))) + # see demo(plotmath)
xlab(expression(paste("log" [10], " LeuAP", ", ", mu, "M L ", hr^{-1}))) + # see demo(plotmath)
theme(axis.text.x=element_text(angle=-70, hjust=0), text=element_text(size=fs), axis.text=element_text(size=fs))
print(hyd_rate_comp_GlyGlyArg_leu)
#ggsave("plots/PEEC and TN Final/2016_03_11_glyglyarg_vs_leu.tiff", hyd_rate_comp_GlyGlyArg_leu, height=2.5, width=7.08, units="in", dpi=300, compression= 'lzw')
#for Pyr vs. Leu
hyd_rate_comp_Pyr_leu <- ggplot(v0_wide, aes(x=LeuAP, y=PyrAP)) +
geom_point() +
#geom_pointrange(aes(ymin=GlyGlyArgAP-GlyGlyArgAP.sd, ymax=GlyGlyArgAP+GlyGlyArgAP.sd)) +
geom_errorbar(aes(ymin=PyrAP-PyrAP.sd, ymax=PyrAP+PyrAP.sd)) +
geom_errorbarh(aes(xmin=LeuAP-LeuAP.sd, xmax=LeuAP+LeuAP.sd)) +
geom_smooth(colour="black", method="lm", se=TRUE) +
scale_x_log10() +
scale_y_log10() +
ylab(expression(paste("log" [10], " PyrAP", ", ", mu, "M L ", hr^{-1}))) + # see demo(plotmath)
xlab(expression(paste("log" [10], " LeuAP", ", ", mu, "M L ", hr^{-1}))) + # see demo(plotmath)
theme(axis.text.x=element_text(angle=-70, hjust=0), text=element_text(size=fs), axis.text=element_text(size=fs))
print(hyd_rate_comp_Pyr_leu)
#create a figure that has the above 3 plots in one
#going to try using the package gridExtra package cowplot
# library("gridExtra")
# library("cowplot")
p_all_hydrolysis <- plot_grid(hyd_rate_comp_arg_leu, hyd_rate_comp_Gly_leu, hyd_rate_comp_GlyGlyArg_leu,
hyd_rate_comp_Pyr_leu, NULL, labels=c("A", "B", "C", "D"), nrow=2, ncol=2)
#save_plot("plots/PEEC and TN Final/2017_03_27_all_hydrolysis_comp_v3.tiff", p_all_hydrolysis, ncol = 2, nrow = 2)
print(p_all_hydrolysis)
#same plot without pyrleu comp
p_all_hydrolysis_2 <- plot_grid(hyd_rate_comp_arg_leu, hyd_rate_comp_Gly_leu, hyd_rate_comp_GlyGlyArg_leu,
NULL, labels=c("A", "B", "C", ""), nrow=2, ncol=2)
#save_plot("plots/PEEC and TN Final/2017_08_22_all_hydrolysis_comp_v4.tiff", p_all_hydrolysis_2, ncol = 2, nrow = 2)
print(p_all_hydrolysis)
# save_plot("plots/PEEC and TN Final/2017_03_27_all_hydrolysis_comp_v2.tiff", p_all_hydrolysis,
# base_height=2.5, base_width=7.08, units="in", dpi=400, compression= 'lzw')
save_plot("plots/scatter_plots.tiff", p_all_hydrolysis,
base_height=5, base_width=6, units="in", dpi=400, compression= 'lzw')
# Do propoer linear model analysis
# #Arg vs. Leu
# mArgLeu <- lm(ArgAP ~ LeuAP, data=v0_wide)
# summary(mArgLeu)
# # plot(mArgLeu)
m_log_ArgLeu <- lm(log10(ArgAP) ~ log10(LeuAP), data=v0_wide) ## Added log to normalize the plots
summary(m_log_ArgLeu)
#plot(m_log_ArgLeu)
# #Gly vs. Leu
# mGlyLeu <- lm(GlyAP ~ LeuAP, data=v0_wide)
# summary(mGlyLeu)
# plot(mGlyLeu) # plot function: returns homoscedasticity tests, Hmm, this lm looks pretty abysmal
m_log_GlyLeu <- lm(log10(GlyAP) ~ log10(LeuAP), data=v0_wide)
summary(m_log_GlyLeu)
#plot(m_log_GlyLeu)
#GGR vs. Leu
# mGGRLeu <- lm(Trypsin ~ LeuAP, data=v0_wide)
# summary(mGGRLeu)
# plot(mGGRLeu)
m_log_GGRLeu <- lm(log10(Trypsin) ~ log10(LeuAP), data=v0_wide) ## Added log to normalize the plots
summary(m_log_GGRLeu)
# plot(m_log_GGRLeu)
#pyr vs. leu
mPyrLeu <- lm(log10(PyrAP) ~ log10(LeuAP), data=v0_wide)
summary(mPyrLeu)
#interquartile range calculations for trypsin to leu-AMC
#mGGRLeu <- lm(Trypsin ~ LeuAP, data=v0_wide)
# min(v0_wide$LeuAP)
# max(v0_wide$LeuAP)
# quantile(v0_wide$LeuAP, c(0.25, 0.5, 0.75))
#
# min(v0_wide$Trypsin)
# max(v0_wide$Trypsin)
# quantile(v0_wide$Trypsin, c(0.25, 0.5, 0.75))
v0_wide$tryp.ratio <- v0_wide$Trypsin / v0_wide$LeuAP
ggplot(v0_wide, aes(x=tryp.ratio)) +
#geom_histogram(bins=10) +
geom_density() +
geom_rug(sides="t") +
scale_x_log10() +
annotation_logticks() +
xlab(expression(paste(v[0], ",trypsin ", "/ ", v[0], ",LeuAP")))
ggsave("plots/S2_tryp_ratio_distribution.tiff", height=3, width=3, units="in", dpi=300, compression="lzw")
#ratio_df <- subset(v0_wide, select = c("site", "fluorimeter", "when", "LeuAP", "Trypsin", "PyrAP"))
v0_wide$pyr.ratio <- v0_wide$PyrAP / v0_wide$LeuAP
v0_wide$arg.ratio <- v0_wide$ArgAP / v0_wide$LeuAP
v0_wide$gly.ratio <- v0_wide$GlyAP / v0_wide$LeuAP
ratio_df <- melt(v0_wide[ , c("site", "fluorimeter", "when", "tryp.ratio", "pyr.ratio", "arg.ratio", "gly.ratio")], id.vars=c("site", "fluorimeter", "when"), value.name="ratio")
ratio_df <- ratio_df[ratio_df$ratio > 0 & !is.na(ratio_df$ratio), ]
library(cvequality)
ratio_df_no_pyr <- subset(ratio_df, variable != "pyr.ratio")
ratio_df_no_pyr$log.ratio <- log10(ratio_df_no_pyr$ratio)
#with(ratio_df_no_pyr, asymptotic_test(log.ratio, variable))
#asymptotic_test(ratio_df_no_pyr$log.ratio, ratio_df_no_pyr$variable)
p_ratio_dist <- ggplot(ratio_df, aes(x=ratio, fill=variable)) +
geom_density(alpha=0.3) +
scale_x_log10()
print(p_ratio_dist)
# ggsave("PEEC_Final/Final_plots/PEEC and TN Final/supplemental/S2_tryp_ratio_distribution.png", height=3, width=3, units="in", dpi=300)
# ggplot(ratio_df, aes(x=variable, y=log10(ratio))) +
# geom_boxplot() +
# geom_point(position=position_jitter(width=0.3))
# ratio_df$tryp.ratio <- ratio_df$Trypsin / ratio_df$LeuAP
# ggplot(ratio_df, aes(x=tryp.ratio)) +
# #geom_histogram(bins=10) +
# geom_density() +
# geom_rug(sides="t") +
# scale_x_log10() +
# annotation_logticks() +
# xlab(expression(paste(v[0], ",trypsin ", "/ ", v[0], ",LeuAP")))
# ggsave("PEEC_Final/Final_plots/PEEC and TN Final/supplemental/S2_ratio_distribution.png", height=3, width=3, units="in", dpi=300)
ratios_asymptotic_test <- with(ratio_df, mslr_test(nr=1e4, ratio, variable))
geoMean(ratio_df$tryp.ratio)
max(ratio_df$tryp.ratio)
min(ratio_df$tryp.ratio)
quantile(ratio_df$ratio, c(0.25, 0.5, 0.75))
m2 <- lm(LeuAP ~ Trypsin, data=df.LeuGGR)
summary(m2)
den <- density(df.LeuGGR$LeuAP)
den2 <- density(df.LeuGGR$Trypsin)
plot(den)
plot(den2)
quantile(df.LeuGGR)
p_density <- ggplot(mGGRLeu, aes(x=v0_mean)) +
geom_density()
print(p_density)
#####################################
#finish this
#make a density plot of trypsin to leu
v0_trypsin_leu <- data.frame(v0_wide$site, v0_wide$fluorimeter, v0_wide$when, v0_wide$Trypsin, v0_wide$LeuAP, v0_wide$Trypsin.sd, v0_wide$LeuAP.sd)
names(v0_trypsin_leu) <- c("site", "fluorimeter", "when", "Trypsin", "LeuAP", "Trypsin.sd", "LeuAP.sd")
p_density <- ggplot(v0_trypsin_leu, aes(x=v0_mean)) +
geom_density()
print(p_density)
#Mapping
#install.packages("ggmap")
#install.packages("RColorBrewer")
#install.packages("ggrepel")
library(ggplot2)
library(ggrepel)
library(ggmap)
library(RColorBrewer)
#Generate Large map
#big_map <- get_googlemap(center=c(-83.921761, 35.959802), zoom=10, maptype = "terrain")
big_map <- get_googlemap(center= c(-80.365531, 38.365531), zoom=6, maptype = "terrain")
p_big <- ggmap(big_map) +
#geom_polygon(data=map_data("state"), aes(x=long, y=lat, group=group), col="black", fill=NA) +
#coord_map(xlim = c(-80, -68), ylim=c(32.5, 42.5)) +
theme(text=element_text(size=8))
print(p_big)
#Add a detailed pennsylvania map
PAbase <- get_googlemap(center=c(-74.9149, 41.17067), zoom=13, maptype = "terrain")
p_PAbase <- ggmap(PAbase) +
theme(text=element_text(size=9))
print(p_PAbase)
# Load data
coords <- read.csv("data/2015_06_03_peec_sample_sites.csv")
coords_2 <- read.csv("data/2015_06_01_sampling_sites.csv")
p_whole_both <- ggmap(p_big) +
geom_point(data=coords, aes(x=long, y=lat), colour="black") +
geom_point(data=coords_2, aes(x=long, y=lat), colour="red")
print(p_whole_both)
#ggsave("plots/PEEC and TN Final/p_whole_both.tiff", p_whole_both, height=2.5, width=7.08, units="in", dpi=300, compression='lzw')
#Generate a detailed TN map
p_TN_map <- ggmap(big_map) +
geom_point(data=coords, aes(x=long, y=lat), colour="red", zoom=8) +
ggrepel::geom_label_repel(data=coords, aes(x=long, y=lat, label=site.init), size=3, colour="black", fill="white")
print(p_TN_map)
#ggsave("plots/PEEC and TN Final/TN_map_two.tiff", p_TN_map, height=2.5, width=7.08, units="in", dpi=300, compression= 'lzw')
p_PA_map <- ggmap(PAbase) +
geom_point(data=coords_2, aes(x=long, y=lat), colour="red", zoom=8) +
ggrepel::geom_label_repel(data=coords_2, aes(x=long, y=lat, label=site.init), size=3, colour="black", fill="white")
print(p_PA_map)
#ggsave("plots/PEEC and TN Final/PA_map_final.tiff", p_PA_map, height=2.5, width=7.08, units="in", dpi=300, compression= 'lzw')
#create a figure that has the above 2 maps in one plot
#load gridExtra and cowplot packages
library("gridExtra")
library("cowplot")
#Make a plot with gridextra that has the TN and PA map with lables, this will be the base for the less detailed map
bottom_row_map <- plot_grid(p_TN_map, p_PA_map, labels = c('TN', 'PA'), ncol = 1, align = 'h', rel_widths = c(1, 1))
print(bottom_row_map)
#Generate a plot that has all three maps in one image
p_maps_cow_top <- plot_grid(p_whole_both, bottom_row_map,
NULL, labels=c('A', ''), ncol=1, rel_heights = c(1,1))
print(p_maps_cow_top)
#save_plot("plots/PEEC and TN Final/2017_08_28_finished_map.tiff", p_maps_cow_top, ncol = 2, nrow = 2)
p_all_maps <- plot_grid(p_TN_map, p_PA_map, NULL,
nrow=1, ncol=2)
print(p_all_maps)
#save_plot("plots/PEEC and TN Final/2017_03_27_all_maps.tiff", p_all_maps,
base_height=2.5, base_width=7.08, units="in", dpi=400, compression= 'lzw')
|
3f633827c45a6cad3c92d89e20a5c8deec3b6bc8
|
dd1b092a182ee6be842493115e459afdb2211a70
|
/r/数据挖掘/交叉树/随机森林.R
|
be35c01fd7790194992e8833308cf579db81e7de
|
[] |
no_license
|
qingfengliu/statics_use
|
e8db1721b73d8567ca5947d52f3c81bd9d8dfd83
|
ba89e6d657f0fdc0d4246865709ffddc443ad3fc
|
refs/heads/master
| 2022-12-10T19:41:32.965452
| 2022-12-01T02:04:09
| 2022-12-01T02:04:09
| 207,328,002
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,460
|
r
|
随机森林.R
|
#########################随机森林
library("randomForest")
MailShot<-read.table(file="D:\\书籍资料整理\\《R语言数据挖掘(第2版)》R代码和案例数据\\邮件营销数据.txt",header=TRUE)
MailShot<-MailShot[,-1]
set.seed(12345)
#proximity临近
#randomForest(输出变量~输入变量,data=数据框名,mtry=k,ntree=M,importance=TRUE)
#参数mtry用于指定变量子集Θi包含的输入变量的个数。若输出变量为数值型变量,基础学习器为回归树,
#k默认为√p。若输出变量为数值型变量,基础学习器为回归树,k默认为P/3。
#参数ntree用于指定随机森林包含M棵决策树,默认为500。
#参数importance=TRUE表示计算输入变量对输出变量的重要性的测度值。
#randomForest函数的返回值为列表:
#predicted各决策树对其袋外观测值的预测类别的众数,或预测值的平均。
#confusion基于袋外观测的混淆矩阵。
#votes适用于分类树。给出各预测类别的概率值,即随机森林中有多大比例的决策树投票给第i类别。
#oob.times各个观测作为袋外观测的次数,即在重抽样自举中有多少次未进入自举样本。
#它会影响基于袋外观测的误差结果。
#err.rate:随机森林对袋外观测的整体预测错误率,以及对各个类别的预测错误率。
#importance:输入变量重要性的测度矩阵。具体说明可以去书中对照。
(rFM<-randomForest(MAILSHOT~.,data=MailShot,importance=TRUE,proximity=TRUE))
#本例的袋外观测错误率高达42%,NO的错误率达33%,YES错误率高达54%
#模型不理想
#各观测的各类别预测概率
head(rFM$votes)
#各观测作为袋外观测的次数
head(rFM$oob.times)
DrawL<-par()
par(mfrow=c(2,1),mar=c(5,5,3,1))
#等价于对err.rate画图
plot(rFM,main="随机森林的OOB错判率和决策树棵树")
#图中黑线代表整体错误率,红线为对NO的判错率,绿线为对YES的判错率。
#可见当决策树达到380后,各判错率基保持稳定。本例的ntree可设置为380
#探测边界点
plot(margin(rFM),type="h",main="边界点探测",xlab="观测序列",ylab="比率差")
#边界附近点的判断依据是利用,对于某个观测点投票给正确类别的树的比率减去投票给其他
#类别的树的比率。也就是说比率差接近正数,代表判断正确,负数为判断错误。
#越接近0则越在边界,不易判断类别。
par(DrawL)
#随机森林对全部观测做预测
Fit<-predict(rFM,MailShot)
#混淆矩阵
ConfM5<-table(MailShot$MAILSHOT,Fit)
#错判率
(E5<-(sum(ConfM5)-sum(diag(ConfM5)))/sum(ConfM5))
#整体的判错率是较低的为0.03
#浏览各个树的叶节点个数
#treesize(随机森林结果对象名,terminal=TRUE/FALSE)
#terminal=TRUE代表仅统计决策树的叶节点个数。
#false代表统计所有节点的个数
head(treesize(rFM))
#提取第1棵树的部分信息
#labelVar=FALSE/TRUE,TRUE代表所有输入变量中找到当前节点的最佳分组。
head(getTree(rfobj=rFM,k=1,labelVar=TRUE))
#返回值书中也有比较详细的介绍由于比较复杂不在这里提到。
#变量重要性的测度。
#柱图
barplot(rFM$importance[,3],main="输入变量重要性测度(预测精度变化)指标柱形图")
box()
#点图
importance(rFM,type=1)
varImpPlot(x=rFM, sort=TRUE, n.var=nrow(rFM$importance),main="输入变量重要性测度散点图")
|
276a29e2146873c3d591f03bfa260a59fabdd1a0
|
6c2ef39649935905e25d7f1ce37ad4a34a5e29fb
|
/confidence_interval_pipeline_pruned/scripts/confidence_intervals_mega_3.R
|
871d1fe1df59d253e97a458d9737db8b66f345d4
|
[] |
no_license
|
jzou1115/CFW_code
|
41538be44f5be753b2af25f90662b8a7a4d37ef7
|
5dcc45270f81cad7a7aee2dc18011da4f93867b6
|
refs/heads/master
| 2023-08-23T00:03:31.697078
| 2021-09-24T03:57:56
| 2021-09-24T03:57:56
| 337,891,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 605
|
r
|
confidence_intervals_mega_3.R
|
library(parallel)
library(graphics)
source("scripts/outbred.pruned.R")
source("scripts/emma.R")
scandir <- snakemake@params[["scan_dir"]]
ci_out <- snakemake@params[["ci_out"]]
evaluate.confidence.intervals.logP( qtl.dir = ci_out,
scan.dir =scandir, pdf.file="CI.pdf",
quantiles=c(0.25,0.5,0.75,0.9,0.95),
dist.thresh=5.0e6,
CI.file =paste0(ci_out,"/CI_pruned.txt"), #output file
fine.dir = NULL,
mc.cores=1)
|
22a0dcb0beb92b31e8724de7a3f5b83b31bf979c
|
f7aed4a1d8fc64c3b84c8f2fb8280c124521cc24
|
/R/us.R
|
0556bb90ea02979e2f05dbee6b8a74655ac08386
|
[
"MIT"
] |
permissive
|
loganstundal/spatialhelpers
|
17fd80e05d3d549a700a14b4e1daec5a73ff3f13
|
e1a7b26e648128b816b80913d5747186164c77b8
|
refs/heads/main
| 2023-06-27T18:02:30.402164
| 2021-08-04T00:10:41
| 2021-08-04T00:10:41
| 388,148,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 475
|
r
|
us.R
|
#' A simple feature collection of 48 contiguous states
#'
#' @description A dataset containing geometry for 48 contiguous US states.
#'
#' @format A data frame with 48 rows and 1 variable:
#' \describe{
#' \item{fips}{Unique state FIPS code}
#' \item{name}{State name}
#' \item{geometry}{Spatial geometry}
#' }
#' @source \url{https://www.census.gov/geographies/mapping-files/time-series/geo/carto-boundary-file.html}
#'
#' @examples
#' data(trade)
#' head(trade)
"us"
|
8f096551f390f73ccddd3f3c5de34e6a56d41418
|
a20921ccfe5a96a2362c732ccc9b09be0918dd67
|
/server.R
|
d7ac998555bf2a4b4b76fa24f0af785b3a8a8f39
|
[] |
no_license
|
HF-Research/ht2
|
897e309a6cd66c9bd13759781fc8067cd2fcde34
|
2b4ee1d6191ed3d28e7ab0bb59e31a79ee980bbb
|
refs/heads/master
| 2022-04-28T06:47:16.202058
| 2022-03-24T13:56:19
| 2022-03-24T13:56:19
| 152,376,713
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,913
|
r
|
server.R
|
shinyServer(function(input, output, session) {
# source("global.R")
# session$onSessionEnded(stopApp)
source(file.path("server", "main_server.R"),
encoding = "UTF-8",
local = TRUE)$value
source(file.path("server", "chd_server.R"), local = TRUE)$value
# source(file.path("server", "risk_predict_server.R"), local = TRUE)$value
source(file.path("server", "about_server.R"), local = TRUE)$value
source(file.path("server", "about_chd_server.R"), local = TRUE)$value
# POPUP IE WARNING --------------------------------------------------------
observeEvent(label = "IEwarning", input$check, {
if (input$check == "TRUE") {
showModal(
modalDialog(
title = "HjerteTal does not work with Internet Explorer",
easyClose = TRUE,
fade = TRUE,
tags$p(
"Please choose Chrome / Firefox / Safari / Edge"
)
)
)
}
})
# BOOKMARKING -------------------------------------------------------------
always_exclude <- c(
"check",
# "agCHD",
# "agCVD",
# "year",
"rate_count",
"rate_count_chd",
"about_selection_chd",
"data_vi
s_tabs_chd",
"downloadMapsFemale_bttn",
"downloadMapsMale_bttn",
"data_vis_tabs",
"download_line",
"table_rates_rows_selected",
"table_counts_rows_selected",
"table_rates_rows_current",
"table_rates_rows_all",
"table_rates_search",
"table_counts_cell_clicked",
"table_rates_cell_clicked",
"table_rates_rows_all",
"table_counts_rows_current",
"download_bar",
"map_male_zoom",
"map_male_zoom",
"map_male_center",
"downloadMapsMale_bttn",
"download_line",
"map_female_center",
"map_female_bounds",
"map_male_bounds",
"plotly_hover-A",
"plotly_afterplot-A",
"table_counts_chd_rows_all",
"table_counts_chd_cell_clicked",
"table_counts_chd_rows_selected",
"table_counts_chd_state",
"table_counts_chd_search",
"table_counts_chd_rows_current",
"table_counts_chd_rows_all",
"table_faq_state",
"table_diag_search",
"table_diag_rows_all",
"table_faq_rows_current",
"table_diag_rows_selected",
"table_diag_cell_clicked",
"table_faq_cell_clicked",
"table_faq_chd_rows_current",
"table_diag_chd_search",
"table_faq_chd_cell_clicked",
"table_diag_chd_rows_current",
"table_edu_cell_clicked",
"table_edu_rows_current",
"table_ethnicity_state",
"table_med_state",
"table_opr_rows_selected",
"table_opr_rows_selected",
"table_opr_state",
"table_ethnicity_rows_current",
"table_ethnicity_rows_selected",
"table_pop_rows_current",
"table_pop_rows_selected",
"table_edu_state",
"table_pop_cell_clicked",
"table_pop_rows_all",
"table_ethnicity_cell_clicked",
"table_opr_rows_current",
"table_edu_rows_all",
"table_faq_cell_clicked",
"table_med_rows_selected",
"table_edu_search",
"table_ethnicity_rows_all",
"table_pop_search",
"table_pop_state",
"table_ethnicity_search",
"table_opr_search",
"table_opr_cell_clicked",
"table_med_search",
"table_edu_rows_selected",
"table_med_rows_current",
"table_diag_chd_cell_clicked",
"table_med_rows_all",
"table_opr_rows_all",
"table_med_cell_clicked",
"table_counts_search",
"table_counts_rows_all",
"table_counts_state",
"table_rates_state",
"map_female_zoom",
".clientValue-default-plotlyCrosstalkOpts",
"table_faq_rows_all",
"table_faq_rows_selected",
"table_faq_search",
"table_diag_rows_current",
"table_diag_state",
"table_faq_chd_state",
"table_diag_chd_rows_selected",
"table_diag_chd_state",
"table_diag_chd_rows_all",
"table_faq_chd_rows_selected",
"table_faq_chd_rows_all",
"table_faq_chd_search",
"table_diag_row_last_clicked",
"table_counts_chd_row_last_clicked",
"table_faq_chd_row_last_clicked",
"table_faq_chd_row_last_clicked",
"table_rates_row_last_clicked",
"table_counts_row_last_clicked",
"table_diag_chd_row_last_clicked",
'table_faq_row_last_clicked',
"table_med_row_last_clicked",
"table_edu_row_last_clicked",
"table_ethnicity_row_last_clicked",
"table_counts_columns_selected",
"table_rates_cells_selected",
"table_counts_cells_selected",
"table_rates_columns_selected",
"table_counts_chd_cells_selected",
"table_counts_chd_columns_selected",
"table_pop_row_last_clicked",
'map_male_shape_mouseover',
"map_male_shape_mouseout",
"map_female_shape_mouseout",
"map_female_shape_mouseover",
"table_opr_row_last_clicked",
"plotly_relayout-A",
"table_faq_columns_selected",
"table_faq_cells_selected",
"table_updates_rows_selected",
"table_updates_search",
"table_updates_state",
"table_updates_rows_all",
"table_updates_columns_selected",
"table_updates_rows_current",
"table_updates_cell_clicked",
"table_updates_cells_selected",
"table_diag_cells_selected",
"table_diag_cells_selected",
"table_diag_columns_selected",
"table_opr_columns_selected",
"table_opr_cells_selected",
"table_med_columns_selected",
"table_med_cells_selected",
"table_edu_cells_selected",
"table_edu_columns_selected",
"table_ethnicity_cells_selected",
"table_ethnicity_columns_selected",
"table_pop_columns_selected",
"table_pop_cells_selected"
)
barChange <- reactive({input$bar})
# Everything below only fires when barCahnge() is invalidates (i.e. when
# input$bar changes)
observeEvent(barChange(),label = "BkmrkExclude", {
if (input$bar == "cvd") {
bookmarkingWhitelist <- c("bar", "varCVD", "oCVD", "agCVD", "year")
} else if (input$bar == "chd") {
bookmarkingWhitelist <- c("bar", "oCHD", "var_chd", "agCHD")
} else if (input$bar == "helpCVD") {
bookmarkingWhitelist <- c("bar", "about_selection")
} else if (input$bar == "helpCHD") {
bookmarkingWhitelist <- c("bar")
}
# browser()
toExclude <- setdiff(names(input), bookmarkingWhitelist)
toExclude <- c(always_exclude, toExclude)
setBookmarkExclude(toExclude)
})
toListen <- reactive(label = "bkmrkListen", {
# Put any events that are to be bookmarked in here. These events should NOT
# be in the always_exclude() list
req(input$varCVD) # This stops multiple bookmark setting during initialization
list(input$bar,
input$oCVD,
input$varCVD,
input$agCVD,
input$year,
input$oCHD,
input$agCHD,
input$var_chd,
input$about_selection)
})
observeEvent(toListen(), label = "doBookmark", {
session$doBookmark()
})
onBookmarked(function(url) {
updateQueryString(url)
})
})
|
217e791264ea68a27f324eee0ae84ddc54080502
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/purrr/examples/array-coercion.Rd.R
|
838906b322fe2f4935cd260ed0a219e23787d9f5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 981
|
r
|
array-coercion.Rd.R
|
library(purrr)
### Name: array-coercion
### Title: Coerce array to list
### Aliases: array-coercion array_branch array_tree
### ** Examples
# We create an array with 3 dimensions
x <- array(1:12, c(2, 2, 3))
# A full margin for such an array would be the vector 1:3. This is
# the default if you don't specify a margin
# Creating a branch along the full margin is equivalent to
# as.list(array) and produces a list of size length(x):
array_branch(x) %>% str()
# A branch along the first dimension yields a list of length 2
# with each element containing a 2x3 array:
array_branch(x, 1) %>% str()
# A branch along the first and third dimensions yields a list of
# length 2x3 whose elements contain a vector of length 2:
array_branch(x, c(1, 3)) %>% str()
# Creating a tree from the full margin creates a list of lists of
# lists:
array_tree(x) %>% str()
# The ordering and the depth of the tree are controlled by the
# margin argument:
array_tree(x, c(3, 1)) %>% str()
|
a013e1c7bf1f121e62e17d897a88f6e1b5813386
|
a88b145a4665ceaaaa3b4ebc8c6bcc2ce18b56f5
|
/occupations/occupations.R
|
4c6bde061da816e895efe5ad791f83eeb93eb366
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
bbartholdy/hitchr
|
3a2267bc5e8aa69bcdb9bee375c9e45f30dbb906
|
8f2a84806dbc59298057a956a41c5bb37ef7bb02
|
refs/heads/main
| 2023-09-03T07:59:58.337777
| 2021-11-20T14:58:17
| 2021-11-20T14:58:17
| 244,217,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 906
|
r
|
occupations.R
|
# occupations
human_occupations <- readr::read_tsv("occupations/human_occupations.tsv",
col_names = F)
class(human_occupations) <- "data.frame"
usethis::use_data(human_occupations, overwrite = T)
vogon_occupations <- readr:::read_tsv("occupations/vogon_occupations.tsv",
col_names = F)
class(vogon_occupations) <- "data.frame"
usethis::use_data(vogon_occupations, overwrite = T)
dentrassi_occupations <- readr:::read_tsv("occupations/dentrassi_occupations.tsv",
col_names = F)
class(dentrassi_occupations) <- "data.frame"
usethis::use_data(dentrassi_occupations, overwrite = T)
occupations <- function(n, race){
occupation <- hitchr::dentrassi_occupations[sample(1:nrow(hitchr::dentrassi_occupations),
size = n, replace = T), ]
}
|
d7a8f316888c2eec3f3ff6f9e3ef47158b2f2485
|
1a0d039d56e335e631a023ccf11b6d90ab827e4c
|
/assignments/assignment-02.R
|
b16aa623e9ad6cee456712af9a0812f461e37bb4
|
[
"MIT"
] |
permissive
|
Weiming-Hu/PSU-2019FALL-GEOG365-GISIntroR
|
c3289160d5a8b2b26ce5a2f3e4fe5455dc0afc21
|
375bed99566eceba1fa952ecc090daa78b0731b7
|
refs/heads/master
| 2022-06-23T11:28:05.981292
| 2019-10-22T15:59:10
| 2019-10-22T15:59:10
| 205,169,804
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,155
|
r
|
assignment-02.R
|
# This is the Assignment 2 for GEOG 365.
#
# This script contains tasks you will need to complete and instructions for how
# to prepare your submission. Read through this document carefully.
#
# Directly make changes to this R script and submit the finished R script
# following the name convention:
#
# GEOG365_Assignment2_LastName_FirstName.R
#
# Keep in mind the good practices for writing readable code!
#
# You have been given a CSV file, 2018.csv. Make sure you have the file.
#
# 1. Write the code to read the CSV files into a data frame in R. Print the
# dimension of the data frame.
#
##################
# Your code here #
##################
# 2. There are some negative values in the column 'OutdoorTemperature' and
# there are some missing values represented as '--.-' in the column. Change
# the missing values and the negative values to NAs. Make sure the results
# are numeric. Print the summary information of outdoor temperature using
# the 'summary' function.
#
##################
# Your code here #
##################
# 3. Convert the column 'Time' from character to POSIXct. Print the range
# of the column 'Time' using the function 'range()'.
#
##################
# Your code here #
##################
# 4. Use the function 'cor' to compute the correlation between
#
# 1) outdoor temperature (x) and solar radiance (y);
# 2) outdoor humidity (x) and solar radiance (y);
#
# Which pair has the stronger correlation? 1 or 2.
#
# If you get NA, try to read the documentation for 'cor' or try to find how
# to deal with NAs when using the function 'cor'.
#
##################
# Your code here #
##################
# 5. Plot a scatter plot with the package 'ggplot2'. The x variable is
# outdoor humidity and the y variable is solar radiance. Also associate
# the color of points to the value of outdoor temperature so we can relate
# point colors to temperature.
#
##################
# Your code here #
##################
# 6. Based on the figure generated from task 5, associate the color of
# points to the value of outdoor temperature by adding the argument
# 'color = ?' to the 'aes' mapping function.
#
# Use the function 'scale_color_viridis_c()' with the argument
# "option = 'inferno'" to change the default color scheme.
#
##################
# Your code here #
##################
# 7. Based on the figure you generated from task 6, add a regression
# line between x and y. You can use the function 'geom_smooth' and the
# following arguments to the function:
#
# 1) data = ?
# 2) mapping = ?
# 3) formula = y ~ poly(x, 2) # We are fitting a polynomial line
# 4) method = 'gam'
#
##################
# Your code here #
##################
# 8. Generate a line plot of the solar radiance between Sep. 15
# and Sep. 18 with the package 'ggplot2'. Try to make changes
# to the following figure components:
#
# 1) Change x axis name to 'Local Time'
# 2) Remove y axis name
# 3) Add title 'Solar Radiance' to the title
# 4) Add subtitle 'Time period Sep. 15 - 18'
# 5) Change the color of the line to 'darkorange'
#
##################
# Your code here #
##################
|
e4d026575fb6979b4d01773b8feec4f8fc380343
|
97cee412458510c609cf59ef5216f786172ae4c3
|
/man/makeCrit.Rd
|
4ad4a2aaf7d28a38581689638a4da2c1850ac0ea
|
[] |
no_license
|
georgeshirreff/multiNe
|
d6cb043b5c403ab44074f81933733341e55fbece
|
22d6cdddc8da854eb285f3bb8fd245ae604b0313
|
refs/heads/master
| 2021-01-18T23:20:19.474721
| 2016-03-23T22:36:49
| 2016-03-23T22:36:49
| 32,545,410
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 597
|
rd
|
makeCrit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LDNe.R
\name{makeCrit}
\alias{makeCrit}
\title{Removing alleles below critical frequency}
\usage{
makeCrit(genObj, crit = 0.01)
}
\arguments{
\item{genObj}{genind object}
\item{crit}{the lowest allele frequency allowed to occur. Alleles at a lower frequency are excluded.}
}
\description{
Takes a genind object, removes alleles below a certain allele frequency, and returns a list of genind objects, one object per locus
}
\examples{
library(adegenet)
}
\author{
Christine Ewers-Saucedo <ewers.christine@gmail.com>
}
|
10264e5a1bcf6bc576181f9436d2149eb02b34a1
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/gsEasy/R/RcppExports.R
|
c8a1b0c7416f5e39f3b8736537bf5114aeb8ceda
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,233
|
r
|
RcppExports.R
|
#' Calculate the 'enrichment score' for gene-set amongst ranking/correlations
#'
#' @param N Integer value
#' @param S Ranks of gene set
#' @param p Weighting of ranking/correlations
#' @param r Rank/correlation scores
#' @return Numeric value - enrichment score
es <- function(N, S, p=1, r=N:1/N) {
stopifnot(N == length(r))
stopifnot(all(S) %in% names(r) | is.integer(S))
.Call(
"R_es",
sort((if (is.character(S)) match(S, names(r)) else S)-1),
p,
r,
PACKAGE="gsEasy"
)
}
#' Gene set enrichment test
#'
#' @param S Ranks of gene set
#' @param N Integer value. Only required if \code{r} is not specified.
#' @param r Rank/correlation scores. If \code{S} is \code{character}, then \code{r} must be named by gene or be a character vector itself of the gene names (necessarily containing \code{S}) in rank order.
#' @param p Weighting of ranking/correlations, see Subramanian et. al 2005.
#' @param min_its Minimum number of null permutations to compare.
#' @param max_its Maximum number of null permutations to compare.
#' @param significance_threshold Maximum p-value of significant result.
#' @param log_dismiss Threshold log probability of returning a significant result, below which function returns current p-value.
#' @return Numeric value - p-value of enrichment.
#' @examples
#' gset(S=1:5 * 2, N=1000)
#' gset(S=letters[1:3], r=letters)
#' @export
#' @importFrom Rcpp evalCpp
#' @importFrom stats setNames
#' @useDynLib gsEasy
gset <- function(
S,
N=NULL,
r=NULL,
p=1,
min_its=2e2,
max_its=1e5,
significance_threshold=0.05,
log_dismiss=-10
) {
if (is.null(N) & is.null(r))
stop("Must specify either N or r!")
if (is.null(N))
N <- length(r)
if (is.null(r))
r <- (N:1)/N
r_sorted <- if (is.character(r)) setNames(nm=r, (length(r):1/length(r))) else sort(decreasing=TRUE, r)
stopifnot(is.vector(S) & (all(S %in% names(r_sorted)) | is.numeric(S)))
.Call(
"R_gset",
N,
sort((if (is.character(S)) match(S, names(r_sorted)) else as.integer(S))-1),
p,
r_sorted,
min_its,
max_its,
significance_threshold,
log_dismiss
)
}
#' Create list of gene sets defined by ontological annotation
#'
#' @param ontology \code{ontology_index} object.
#' @param gene Character vector of genes.
#' @param term Character vector of term IDs annotated to corresponding genes.
#' @param min_genes Minimum number of genes in gene sets.
#' @param max_genes Maximum number of genes in gene sets.
#' @return List of character vectors of term IDs.
#' @export
#' @importFrom ontologyIndex get_ancestors
get_ontological_gene_sets <- function(
ontology,
gene,
term,
min_genes=1,
max_genes=500
) {
gene.anno <- lapply(split(term, gene), get_ancestors, ontology=ontology)
genes.by.term <- lapply(FUN=as.character, X=split(unlist(mapply(SIMPLIFY=FALSE, FUN=rep, names(gene.anno), sapply(gene.anno, length))), unlist(gene.anno)))
Filter(x=genes.by.term, f=function(x) length(x) <= max_genes & length(x) >= min_genes)
}
#' Create list of gene sets defined by GO term annotation
#'
#' Note, this function takes several minutes to execute.
#'
#' @param GO_annotation_file File path of annotation file, which should contain a column of genes and a column of terms. Can be downloaded from at http://geneontology.org/gene-associations/gene_association.goa_human.gz.
#' @param GO_file File path of gene ontology.
#' @param min_genes Minimum number of genes in gene sets.
#' @param max_genes Maximum number of genes in gene sets.
#' @param verbose Print progress.
#' @return List of character vectors of term IDs.
#' @export
#' @importFrom ontologyIndex get_ontology
#' @importFrom utils read.table
get_GO_gene_sets <- function(
GO_annotation_file,
GO_file="http://purl.obolibrary.org/obo/go.obo",
min_genes=15,
max_genes=500,
verbose=TRUE
) {
if (verbose) cat("reading ontology file...\n")
go <- get_ontology(GO_file, qualifier="GO")
if (verbose) cat("reading annotation file...\n")
anno.df <- read.table(GO_annotation_file, sep="\t", quote="", stringsAsFactors=FALSE, comment.char="!", skipNul=TRUE)
if (verbose) cat("creating gene set list...\n")
get_ontological_gene_sets(ontology=get_ontology(GO_file, "GO"), term=anno.df[,5], gene=anno.df[,3], min_genes=min_genes, max_genes=max_genes)
}
|
11a813235a24fcdc8822c4d48597db736168a9bc
|
9892ea75d937243c4f664d9fbedea94d2a5f0d51
|
/plot1.R
|
d53ea7eeaab92672390aa229d193a27fe5246ba5
|
[] |
no_license
|
IotaiusXXIII/ExData_Plotting1
|
5037ad893b36c6661ca2b1f83dca4b1c872d96f9
|
156ddffa9c82a8ddb274cec105a83f6863e92895
|
refs/heads/master
| 2021-01-14T13:21:23.112227
| 2014-05-11T10:36:38
| 2014-05-11T10:36:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,179
|
r
|
plot1.R
|
setwd("C:\\Users\\iNejc\\Desktop\\predavanja_ostala\\coursera\\Data_science_specialization_John_Hopkins_University\\4Exploratory_data_analysis\\project_1")
################################## READING DATA ###########################################
start_day <- strptime("1/2/2007 00:00:00", format = "%d/%m/%Y %H:%M:%S")# day when we start analysis
finish_day <- strptime("3/2/2007 00:00:00", format = "%d/%m/%Y %H:%M:%S")# day when we start analysis
first_row <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", nrows = 1)#readin just first row
beg <- strptime(paste(first_row$Date, first_row$Time), format = "%d/%m/%Y %H:%M:%S")#beginning in dataset
start <- as.numeric(difftime(start_day, beg, units = "mins"))#calculating minutes from beginning to start date
finish <- as.numeric(difftime(finish_day, start_day, units = "mins"))#calculating minutes from start day to finish day
data <- read.table("household_power_consumption.txt", header = T, sep = ";",
skip = start, nrows = finish)#read data
col_names <- read.table("household_power_consumption.txt", header = F, sep = ";", nrows = 1)#read just column names
names <- c()
for(i in 1:ncol(col_names)){
#making col names as vector instead of data frame
names <- append(names, as.character(col_names[,i]))
}
colnames(data) <- names#seting column names
#################################### ARRANGING DATA ####################################
for(i in 3:ncol(data)){
#because all variables are type factor we have to convert them into numeric
#we convert from third variable till end in data set. First two variables
#are type date
data[,names(data)[i]] <- as.numeric(as.character(data[,names(data)[i]]))
}
data$date <- as.POSIXct(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
Sys.setlocale("LC_ALL", "C")#internationalization of script
############################### GRAPH 1 ################################################
png("plot1.png", width = 480, height = 480, units = "px")
hist(data$Global_active_power, col = "red",
main = "Global active power", xlab = "Global active power (kilowatts)", bg = "white")
dev.off()
|
031146a64286121a8d3b50949cf78ca947f0b094
|
4f285a60cbee46613fa4d61dfb37aaf3f67ed422
|
/app.R
|
66ee6a591998fc3f03dffb9282528a3ddb64a5d3
|
[] |
no_license
|
suhasxavier/DataPreProcessApps
|
ed6b6eb73ec73b8c9c5ae47dd0872e796d43f5df
|
e68eaf3ecc5b5c46262de86804d1e7b8e6a124ca
|
refs/heads/master
| 2020-07-27T23:11:14.236577
| 2016-11-10T21:34:48
| 2016-11-10T21:34:48
| 73,422,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,635
|
r
|
app.R
|
library(shiny)
library(shinydashboard)
library(data.table)
library(shinyjs)
ui=dashboardPage(
dashboardHeader(
#title=imageOutput("JLLImage")
),
dashboardSidebar(),
dashboardBody(
tabsetPanel(id="welcometab",tabPanel("Data Merge",br(),h4("This segment serves to merge and reconcile data. Please ensure the uploaded files are either a text(.txt) or a csv(.csv) file."),
fluidRow(box(h3("File 1"),width=6, height=650,fileInput("file1upload","Upload File:",accept = c("text/csv","text/comma-separated-values,text/plain",".csv",".txt")),uiOutput("file1name"),uiOutput("column_sel1"),uiOutput("subset_check1"),uiOutput("pivot_check1"),uiOutput("pivot_cols1"),uiOutput("subsetbox1"),uiOutput("colsel1but1"),uiOutput("checkselbut1"),uiOutput("completeboxone"),uiOutput("mergeSubmit")),
box(h3("File 2"),width = 6,height = 650,uiOutput("file2upload"),uiOutput("column_sel2"),uiOutput("subset_check2"),uiOutput("pivot_check2"),uiOutput("pivot_cols2"),uiOutput("subsetbox2"),uiOutput("colsel2but1"),uiOutput("checkselbut2"))
,uiOutput("completeboxtwo"))),
tabPanel("DeDuplication",fluidPage(box(br(),h4("This segment serves to deduplicate data. Please ensure the uploaded files are either a text(.txt) or a csv(.csv) file."),uiOutput("DeDeupFileSel"),uiOutput("dedup_col"),uiOutput("dedupsel_but"),uiOutput("dedup_checkbox1"),uiOutput("deDuplic")),width=15,height=650))
,tabPanel("Pattern Matching"),tabPanel("Geo Coding"),shinyjs::useShinyjs()
)
))
server=function(input,output)
{
#### Data Merge Logic
data_merge_process=function(user_choice)
{
if(user_choice=="Data Merge")
{
output$JLLImage=renderImage({
imgf="c:/users/Suhas.Xavier/Desktop/jll.png"
return(list(src=imgf,filetype="image/png"))},deleteFile = F
)
options(shiny.maxRequestSize=10000000000000000000000*1024^2)
observe({
if(!is.null(input$file1upload))
{
inpfile1=input$file1upload
file1_data=fread(inpfile1$datapath)
file1_dat=data.frame(file1_data)
print("file read successfully")
gc()
##print(head(file1_dat))
file1_cols=as.character(colnames(file1_dat))
output$file1name=renderUI(selectInput("colmer1","* Select column(s) to merge file:",choices =file1_cols,multiple = T, width=250))
observe({
if(!is.null(input$colmer1))
{
output$mergeSubmit=renderUI(actionButton("mergesub","Ok"))
shinyjs::onclick("mergesub",{
merge_columns=input$colmer1
shinyjs::hide("mergesub")
##print(namefile1)
##print(namefile1)
output$column_sel1=renderUI(selectInput("colsel1","* Select columns to view in output file: (Select more than 1)",choices =c("All Columns",file1_cols),multiple = T, width=250))
#output_df1=NULL
observe({
if(!is.null(input$colsel1))
{
output$colsel1but1=renderUI(actionButton("colsel1button1","Done"))
print(file1_dat[,merge_columns])
shinyjs::onclick("colsel1button1",{
if(length(input$colsel1)==1)
{
if(input$colsel1=="All Columns")
{
output_df1=file1_dat
#print(output_df1)
subsetted_df1="FALSE"
}
}
else{
colvals1 = c(input$colsel1)
col_names1=colnames(file1_dat)
# #print(length(colvals1))
col_index1=match(colvals1,col_names1)
# #print(col_index1)
output_df1=subset(file1_dat,select=col_index1)
subsetted_df1="TRUE"
print(subsetted_df1)
sel_cols1=names(output_df1)
# #print(output_df1)
}
output$subset_check1=renderUI(checkboxInput("subcheck1","Would you like to subset this dataset with a condition?"))
shinyjs::hide("colsel1button1")
output$submitButtonVal=renderUI(actionButton("submitbut","Submit"))
# once check box checked
observe({
if(!is.null(input$subcheck1))
{
if(input$subcheck1=="TRUE")
{
shinyjs::hide("box1complete")
##print("box was checked")
if(subsetted_df1=="TRUE")
{
output$subsetbox1=renderUI(flowLayout(selectInput("subsetbox1sel","Select column: (Max 3)",choices=c(sel_cols1),multiple = T),textInput("subsetbox1text","Values(Separate by comma)")))
}
else
{
output$subsetbox1=renderUI(flowLayout(selectInput("subsetbox1sel","Select column: (Max 3)",choices=c(file1_cols),multiple = T),textInput("subsetbox1text","Values(Separate by comma)")))
}
observe({
if(!is.null(input$subsetbox1sel))
{
if(!is.null(input$subsetbox1text))
{
output$checkselbut1=renderUI(actionButton("checkselbutton1","Done"))
shinyjs::onclick("checkselbutton1", {
shinyjs::hide("checkselbutton1")
cols_sel1=as.character(input$subsetbox1sel)
vals1_sel1=as.character(input$subsetbox1text)
##print(cols_sel1)
##print(vals_sel1)
##print(output_df1)
print(cols_sel1)
vals_sel1=strsplit(vals1_sel1,split = ",")
print(vals_sel1)
## if multiple values are entered
if(length(vals_sel1)==1)
{
temp_dat=output_df1[output_df1[cols_sel1]==vals_sel1,]
#temp_dat=subset(output_df1,cols_sel1==vals_sel1)
##print(head(temp_dat))
output$completeboxone=renderUI(actionButton("box1complete","Go to next file"))
}
if(length(vals_sel1)==2)
{
print(vals_sel1[1])
print(vals_sel1[2])
temp_dat=output_df1[output_df1[cols_sel1][1]==vals_sel1[1] & output_df1[cols_sel1][2]==vals_sel1[2],]
#temp_dat=subset(output_df1,cols_sel1==vals_sel1)
##print(head(temp_dat))
output$completeboxone=renderUI(actionButton("box1complete","Go to next file"))
}
if(length(vals_sel1)==3)
{
temp_dat=output_df1[output_df1[cols_sel1][1]==vals_sel1[1] & output_df1[cols_sel1][2]==vals_sel1[2] & output_df1[cols_sel1][3]==vals_sel1[3],]
#temp_dat=subset(output_df1,cols_sel1==vals_sel1)
##print(head(temp_dat))
output$completeboxone=renderUI(actionButton("box1complete","Go to next file"))
}
#print(temp_dat)
})
}
}
})
}
else
{
output$completeboxone=renderUI(actionButton("box1complete","Go to next file"))
}
}
})
})
#start part 2
shinyjs::onclick("completeboxone",{
shinyjs::hide("box1complete")
output$file2upload=renderUI(fileInput("fileupload2","Upload File:",accept = c("text/csv","text/comma-separated-values,text/plain",".csv",".txt")))
observe({
if(!is.null(input$fileupload2))
{
inpfile2=input$fileupload2
file2_dat=fread(inpfile2$datapath)
file2_cols=as.character(colnames(file2_dat))
print("aaahoo")
output$column_sel2=renderUI(selectInput("colsel2","* Select columns to view in output file: (Select more than 1)",choices =c("All Columns",file2_cols),multiple = T, width=250))
#output_df1=NULL
observe({
if(!is.null(input$colsel2))
{
output$colsel2but1=renderUI(actionButton("colsel2button1","Done"))
shinyjs::onclick("colsel2button1",{
if(length(input$colsel2)==1)
{
if(as.character(input$colsel2)=="All Columns")
{
output_df2=file2_dat
##print(output_df2)
subsetted_df2="FALSE"
}
}
else{
colvals2 = c(input$colsel2)
col_names2=colnames(file2_dat)
# #print(length(colvals2))
col_index2=match(colvals2,col_names2)
##print(col_index2)
output_df2=subset(file2_dat,select=col_index2)
# #print(output_df2)
sel_cols2=colnames(output_df2)
subsetted_df2="TRUE"
}
output$subset_check2=renderUI(checkboxInput("subcheck2","Would you like to subset this dataset with a condition?"))
shinyjs::hide("colsel2button1")
# once check box checked
observe({
if(!is.null(input$subcheck2))
{
if(input$subcheck2=="TRUE")
{
##print("box was checked")
if(subsetted_df2=="TRUE")
{
output$subsetbox2=renderUI(flowLayout(selectInput("subsetbox2sel","Select column:",choices=c(sel_cols2),multiple = T),textInput("subsetbox2text","Values(Separate by comma)")))
}
else
{
output$subsetbox2=renderUI(flowLayout(selectInput("subsetbox2sel","Select column:",choices=c(file2_cols),multiple = T),textInput("subsetbox2text","Values(Separate by comma)")))
}
observe({
if(!is.null(input$subsetbox2sel))
{
if(!is.null(input$subsetbox2text))
{
output$checkselbut2=renderUI(actionButton("checkselbutton2","Done"))
shinyjs::onclick("checkselbutton2", {
shinyjs::hide("checkselbutton2")
cols_sel2=as.character(input$subsetbox2sel)
vals2_sel2=as.character(input$subsetbox2text)
vals_sel2=strsplit(vals2_sel2,split = ",")
#print(vals_sel1)
## if multiple values are entered
if(length(vals_sel2)==1)
{
temp_dat2=output_df1[output_df1[cols_sel2]==vals_sel2,]
#temp_dat=subset(output_df1,cols_sel1==vals_sel1)
##print(head(temp_dat))
output$completeboxone=renderUI(actionButton("box1complete","Go to next file"))
}
if(length(vals_sel2)==2)
{
print(vals_sel2[1])
print(vals_sel2[2])
temp_dat2=output_df1[output_df1[cols_sel2][1]==vals_sel2[1] & output_df1[cols_sel2][2]==vals_sel2[2],]
#temp_dat=subset(output_df1,cols_sel1==vals_sel1)
##print(head(temp_dat))
output$completeboxone=renderUI(actionButton("box1complete","Go to next file"))
}
if(length(vals_sel2)==3)
{
temp_dat2=output_df1[output_df1[cols_sel2][1]==vals_sel2[1] & output_df1[cols_sel2][2]==vals_sel2[2] & output_df1[cols_sel2][3]==vals_sel2[3],]
#temp_dat=subset(output_df1,cols_sel1==vals_sel1)
##print(head(temp_dat))
output$completeboxone=renderUI(actionButton("box1complete","Go to next file"))
}
print(temp_dat2)
})
}
}
})
#print(tail(temp_dat2))
print("then this")
# print(tail(temp_dat))
print("end")
}
else
{
output$completeboxtwo=renderUI(actionButton("box2complete","Merge Data"))
}
}
})
})
}
})
# }
# }
# })
}
})
})
#end part 2
}
})
})
}
})
}
})
}
}
### Reroute to appropriate functions
observe({
if(!is.null(input$welcometab))
{
tab_chosen=as.character(input$welcometab)
print(tab_chosen)
if(tab_chosen=="Data Merge")
{
print("test")
data_merge_process("Data Merge")
}
if(tab_chosen=="DeDuplication")
{
print("test")
data_dedup_process("Data DeDuplication")
}
}
})
data_dedup_process=function(user_choice)
{
if(user_choice=="Data DeDuplication")
{
output$DeDeupFileSel=renderUI(fileInput("dupfilesel","Choose a file",accept = c("text/csv","text/comma-separated-values,text/plain",".csv",".txt")))
if(!is.null(input$dupfilesel))
{
inpfile3=input$dupfilesel
file3_dat=fread(inpfile3$datapath)
file3_data=data.frame(file3_dat)
file3_cols=colnames(file3_data)
output$dedup_col=renderUI(selectInput("dedupcol","Select columns to deduplicate on",choices = c(file3_cols),multiple = T,width = 250))
observe({
if(!is.null(input$dedupcol))
{
output$dedupsel_but=renderUI(actionButton("dedupselbut1","Ok"))
shinyjs::onclick("dedupselbut1",{
shinyjs::hide("dedupselbut1")
dedupsel_seld=c(input$dedupcol)
output$dedup_checkbox1=renderUI(checkboxInput("dedupcheckbox","Keep all instances of duplicates"))
observe({
if(!is.null(input$dedupcheckbox))
{
if(input$dedupcheckbox=="TRUE")
{
dedupcols1=match(dedupsel_seld,colnames(file3_data))
de_dup_complete=file3_data[!duplicated(file3_data[dedupcols1]) | duplicated(file3_data[dedupcols1],fromLast = T),]
print("good")
print(nrow(de_dup_complete))
}
}
else
{
dedupcols1=match(dedupsel_seld,colnames(file3_data))
print(dedupcols1)
output_dat_test=!duplicated(file3_data[,dedupcols1])
de_dup_complete=file3_data[output_dat_test,]
print(nrow(de_dup_complete))
}
output$deDuplic=renderUI(actionButton("dedupcomplete","Done"))
})
})
}
})
}
}
}
}
shinyApp(ui,server)
|
1517e33d863e64d62a1d21866fa86e8594ae76eb
|
b8f199e0f95a708197ba1f733fa0ff34fe03cd2f
|
/health_thresholds.R
|
132c3a4992b55d546d203ca33fd25a2c5e7d9839
|
[
"CC0-1.0"
] |
permissive
|
slopp/beyond-dashboard-fatigue
|
cff6541dac2f9f7237196cf03eecd24c6c974798
|
89a7efc97f61825801aa16d3803067d7c1a063e1
|
refs/heads/master
| 2022-11-18T23:42:31.984733
| 2020-07-14T02:44:10
| 2020-07-14T02:44:10
| 279,591,852
| 0
| 0
|
CC0-1.0
| 2020-07-14T13:24:56
| 2020-07-14T13:24:55
| null |
UTF-8
|
R
| false
| false
| 4,223
|
r
|
health_thresholds.R
|
library(tidyverse)
library(gt)
library(glue)
library(scales)
#
# Utility functions for the main R Markdown report
#
# Define thresholds for KPIs
health_thresholds <-
list(
dau = 250000, # should be above this value
dac = 30000, # should be above this value
dac_dau = 0.1, # should be above this value
new_users = 1000, # should be above this value
daily_revenue = 25000, # should be above this value
churn_users = 1000 # should be below this value
)
#
# Utility functions for dplyr transformations
#
# Add columns that contain threshold values `t_<KPI>` for the purpose
# of determining weather daily values are above or below
add_threshold_columns <- function(data, health_thresholds) {
data %>%
mutate(
t_dau = health_thresholds$dau,
t_dac = health_thresholds$dac,
t_dac_dau = health_thresholds$dac_dau,
t_new_users = health_thresholds$new_users,
t_churn_users = health_thresholds$churn_users,
t_daily_revenue = health_thresholds$daily_revenue
)
}
#
# Utility functions for gt table styling
#
# This is a gt-based function that styles rows with a
# light gray fill if they correspond to weekend days
highlight_weekends <- function(data) {
data %>%
tab_style(
style = cell_fill(color = "gray95"),
locations = cells_body(
columns = TRUE,
rows = wday %in% c(1, 7)
)
)
}
# This is a gt-based function that uses the `health_thresholds`
# list to highlight the exceeding KPIs in a light red color
highlight_exceedances <- function(data, health_thresholds) {
data %>%
tab_style(
cell_fill(color = "#FF9999"),
locations = cells_body(
columns = vars(dau),
rows = dau < t_dau
)
) %>%
tab_style(
cell_fill(color = "#FF9999"),
locations = cells_body(
columns = vars(dac),
rows = dac < t_dac
)
) %>%
tab_style(
cell_fill(color = "#FF9999"),
locations = cells_body(
columns = vars(dac_dau),
rows = dac_dau < t_dac_dau
)
) %>%
tab_style(
cell_fill(color = "#FF9999"),
locations = cells_body(
columns = vars(new_users),
rows = new_users < t_new_users
)
) %>%
tab_style(
cell_fill(color = "#FF9999"),
locations = cells_body(
columns = vars(churn_users),
rows = churn_users > t_churn_users
)
) %>%
tab_style(
cell_fill(color = "#FF9999"),
locations = cells_body(
columns = vars(daily_revenue),
rows = daily_revenue < t_daily_revenue
)
) %>%
tab_footnote(
footnote = md(glue(
"Threshold for DAU (**{comma(health_thresholds$dau)}**) exceeded.")
),
locations = cells_body(
columns = vars(dau),
rows = dau < t_dau
)
) %>%
tab_footnote(
footnote = md(glue(
"Threshold for DAC (**{comma(health_thresholds$dac)}**) exceeded.")
),
locations = cells_body(
columns = vars(dac),
rows = dac < t_dac
)
) %>%
tab_footnote(
footnote = md(glue(
"Threshold for DAC/DAU (**{health_thresholds$dac_dau}**) exceeded.")
),
locations = cells_body(
columns = vars(dac_dau),
rows = dac_dau < t_dac_dau
)
) %>%
tab_footnote(
footnote = md(glue(
"Threshold for New Users (**{comma(health_thresholds$new_users)}**) exceeded.")
),
locations = cells_body(
columns = vars(new_users),
rows = new_users < t_new_users
)
) %>%
tab_footnote(
footnote = md(glue(
"Threshold for Churned Users (**{comma(health_thresholds$churn_users)}**) exceeded.")
),
locations = cells_body(
columns = vars(churn_users),
rows = churn_users > t_churn_users
)
) %>%
tab_footnote(
footnote = md(glue(
"Threshold for Daily Revenue (**{dollar(health_thresholds$daily_revenue)}**) exceeded.")
),
locations = cells_body(
columns = vars(daily_revenue),
rows = daily_revenue < t_daily_revenue
)
) %>%
cols_hide(columns = starts_with("t_"))
}
|
95b2450d2491fc7ec5c3b5b9b080b23069888d72
|
0668b0c606b5c52ba76389ccf8dc01490ab3cd44
|
/man/GenData.GGUM.Rd
|
c8564e9019537444dd3c26c3c9e70d9f3e433222
|
[] |
no_license
|
jorgetendeiro/GGUM
|
d404103c22aa0aae92f680999f6f5dedddaad34d
|
3f9ed904aae45148ac79dae86aefcfd7f7930435
|
refs/heads/master
| 2022-09-25T11:24:16.689358
| 2022-09-12T12:06:50
| 2022-09-12T12:06:50
| 102,503,703
| 5
| 4
| null | 2021-10-18T00:49:28
| 2017-09-05T16:15:55
|
R
|
UTF-8
|
R
| false
| true
| 3,703
|
rd
|
GenData.GGUM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Accessory.R
\name{GenData.GGUM}
\alias{GenData.GGUM}
\title{Generate data from the GUM/GGUM}
\usage{
GenData.GGUM(N, I, C, model = "GGUM", seed = 123)
}
\arguments{
\item{N}{Number of persons (rows).}
\item{I}{Number of items (columns).}
\item{C}{\eqn{C} is the number of observable response categories minus 1
(i.e., the item scores will be in the set \eqn{\{0, 1, ..., C\}}{{0, 1,
..., C}}). It should either be a vector of \eqn{I} elements or a scalar. In
the latter, case it is assumed that \eqn{C} applies to all items.}
\item{model}{A string identifying the model. Possible values are "GUM" or
"GGUM" (default).}
\item{seed}{An integer, allowing the user to control the generation process
(for replication purposes).}
}
\value{
The function returns a list with five elements: \item{alpha.gen}{The
discrimination parameters.} \item{delta.gen}{The difficulty parameters.}
\item{taus.gen}{The threshold parameters.} \item{theta.gen}{The person
parameters.} \item{data}{The (NxI) data matrix. The item scores are coded
0, 1, ..., C for an item with (C+1) observable response categories.}
}
\description{
\code{GenData.GGUM} generates all model parameters (items and
persons) and item scores.
}
\section{Details}{
The generalized graded unfolding model (GGUM; Roberts &
Laughlin, 1996; Roberts et al., 2000) is given by \deqn{P(Z_i=z|\theta_n) =
\frac{f(z) + f(M-z)}{\sum_{w=0}^C\left[f(w)+f(M-w)\right]}, }{P(Z_i =
z|t_n) = ( f(z) + f(M-z) ) / (sum( f(w) + f(M - w); w = 0, ..., C )),}
\deqn{f(w) = exp\left\{\alpha_i\left[w(\theta_n-\delta_i)-
\sum_{k=0}^w\tau_{ik}\right]\right\}, }{f(w) = exp( alpha_i ( w(t_n -
delta_i) - sum( tau_ik; k = 0, ..., w) ) ),}
where: \itemize{ \item The subscripts \eqn{i} and \eqn{n} identify the item
and person, respectively. \item \eqn{z=0,\ldots,C}{z = 0, ..., C} denotes
the observed answer response. \item \eqn{M = 2C + 1} is the number of
subjective response options minus 1. \item \eqn{\theta_n}{t_n} is the
latent trait score for person \eqn{n}. \item \eqn{\alpha_i}{alpha_i} is the
item slope (discrimination). \item \eqn{\delta_i}{delta_i} is the item
location. \item \eqn{\tau_{ik}}{tau_ik} (\eqn{k=1,\ldots,M}{k = 1, ..., M}
) are the threshold parameters. }
Parameter \eqn{\tau_{i0}}{tau_i0} is arbitrarily constrained to zero and
the threshold parameters are constrained to symmetry around zero, that is,
\eqn{\tau_{i(C+1)}=0}{tau_{i(C+1)} = 0} and
\eqn{\tau_{iz}=-\tau_{i(M-z+1)}}{tau_{iz} = -tau_{i(M-z+1)}} for
\eqn{z\not= 0}{z != 0}.
Parameters \eqn{\alpha_i}{alpha_i} are randomly uniformly drawn from the
(.5, 2) interval. Parameters \eqn{\delta_i}{delta_i} are randomly drawn
from the standard normal distribution bounded between \eqn{-2} and 2. The
threshold parameters are generated following the same procedure of Roberts,
Donoghue, and Laughlin (2002). Finally, the person parameters are randomly
drawn from the standard normal distribution.
If \code{model = "GUM"} the data based on the GUM (Roberts and Laughlin,
1996) model are generated. The GUM is a constrained version of the GGUM,
where all discrimination parameters are equal to 1 and the item thresholds
are shared by all items.
}
\examples{
gen1 <- GenData.GGUM(500, 10, 5, seed = 456)
gen1$data # Retrieve the data.
gen1$alpha.gen # The discrimination parameters.
# Generate data based on items varying in the number of observable response categories:
gen2 <- GenData.GGUM(500, 5, c(5, 5, 5, 4, 4), seed = 789)
}
\author{
Jorge N. Tendeiro, \email{tendeiro@hiroshima-u.ac.jp}
}
|
73525fd9ac0aae4733fa4cfd9d525f7771cfb7b2
|
e189d2945876e7b372d3081f4c3b4195cf443982
|
/man/competitions_list.Rd
|
96d24eec9e70ce3c4394c755d624c4db5192b49a
|
[
"Apache-2.0"
] |
permissive
|
Cdk29/fastai
|
1f7a50662ed6204846975395927fce750ff65198
|
974677ad9d63fd4fa642a62583a5ae8b1610947b
|
refs/heads/master
| 2023-04-14T09:00:08.682659
| 2021-04-30T12:18:58
| 2021-04-30T12:18:58
| 324,944,638
| 0
| 1
|
Apache-2.0
| 2021-04-21T08:59:47
| 2020-12-28T07:38:23
| null |
UTF-8
|
R
| false
| true
| 645
|
rd
|
competitions_list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kaggle_wrappers.R
\name{competitions_list}
\alias{competitions_list}
\title{Competitions list}
\usage{
competitions_list(
group = NULL,
category = NULL,
sort_by = NULL,
page = 1,
search = NULL
)
}
\arguments{
\item{group}{group to filter result to}
\item{category}{category to filter result to}
\item{sort_by}{how to sort the result, see valid_competition_sort_by for options}
\item{page}{the page to return (default is 1)}
\item{search}{a search term to use (default is empty string)}
}
\value{
list of competitions
}
\description{
Competitions list
}
|
a9fb4ff8a71bd3bf35137b77695d394050caea80
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/logOfGamma/tests/testthat.R
|
1b8a9a64763cf17f86ab911a70a7a0046879fb12
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(logOfGamma)
test_check("logOfGamma")
|
22519a4615cc519ec7022be4b7a17580de6f1a3a
|
e8254864430201549bacfeaa9f44bceda2a0389d
|
/plot1.R
|
775ee89e81ebb833a928e5fa71087b5c680fac32
|
[] |
no_license
|
mcm45/ExData_Plotting1
|
c6855f5c83fdb655d8d85ed3f108898663f3dd27
|
6474cd537cb8f063f7d663c72ba31864f1865e2a
|
refs/heads/master
| 2021-01-09T06:58:16.119244
| 2016-09-01T23:22:37
| 2016-09-01T23:22:37
| 67,170,846
| 0
| 0
| null | 2016-09-01T22:30:00
| 2016-09-01T22:29:59
| null |
UTF-8
|
R
| false
| false
| 612
|
r
|
plot1.R
|
#Exploratory Data Project 1 - Plot 1
#reading in data
file <- "./Documents/Coursera/exdata_data_household_power_consumption/household_power_consumption.txt"
epc <- read.table(file, header=TRUE, sep=";")
epc2 <- subset(epc, Date %in% c("1/2/2007","2/2/2007")) #subset two days
GlobalActivePower <- as.numeric(epc2$Global_active_power) #convert format to numeric
png("plot1.png", width=480, height=480) #initialize device
#with(epc2, hist(GlobalActivePower))
hist(GlobalActivePower, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off() #to view png file
|
f313b5488419a49c1a3b68bbf5779d09517dc098
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.end.user.computing/man/appstream_disable_user.Rd
|
7f052e3757ee06173805ed698e23749d2ba48e3b
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 773
|
rd
|
appstream_disable_user.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appstream_operations.R
\name{appstream_disable_user}
\alias{appstream_disable_user}
\title{Disables the specified user in the user pool}
\usage{
appstream_disable_user(UserName, AuthenticationType)
}
\arguments{
\item{UserName}{[required] The email address of the user.
Users' email addresses are case-sensitive.}
\item{AuthenticationType}{[required] The authentication type for the user. You must specify USERPOOL.}
}
\description{
Disables the specified user in the user pool. Users can't sign in to AppStream 2.0 until they are re-enabled. This action does not delete the user.
See \url{https://www.paws-r-sdk.com/docs/appstream_disable_user/} for full documentation.
}
\keyword{internal}
|
bdcb5f123b7bd3b51c878c4a0cf87bfe757edfd3
|
e84638b670fe5d999a952b8d50c0cbcd23063e95
|
/cleanPhrase.R
|
2433f375df124bfc4ba15e30ae959f0a70a66bab
|
[] |
no_license
|
gmuldoon/DS-Coursera-Capstone
|
e9187bec4aace7a53ca7a4b8970a22c98b2b848c
|
d5c3350ced22a8b66d480e1ae105c222465bfbbd
|
refs/heads/master
| 2021-01-13T03:58:21.888601
| 2017-01-05T18:00:01
| 2017-01-05T18:00:01
| 78,135,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,577
|
r
|
cleanPhrase.R
|
library(shinythemes)
library(shiny)
library(tm)
library(stringr)
library(markdown)
library(stylo)
quadData = readRDS(file="quadgramData.rdata")
triData = readRDS(file="trigramData.rdata")
biData = readRDS(file="bigramData.rdata")
textCleaner = function(text){
cleanText = tolower(text)
cleanText = removePunctuation(cleanText)
cleanText = removeNumbers(cleanText)
cleanText = str_replace_all(cleanText, "[^[:alnum:]]", " ")
cleanText = stripWhitespace(cleanText)
return(cleanText)
}
cleanPhrase = function(text){
textIn = textCleaner(text)
textIn = txt.to.words.ext(textIn,
language="English.all",
preserve.case = TRUE)
return(textIn)
}
nextWordPrediction = function(wordCount,textIn){
if (wordCount>=3) {
textIn = textIn[(wordCount-2):wordCount] }
else if(wordCount==2) {
textIn = c(NA,textIn) }
else {
textIn = c(NA,NA,textIn)}
wordPredict = as.character(quadData[quadData$unigram==textIn[1] &
quadData$bigram==textIn[2] &
quadData$trigram==textIn[3],][1,]$quadgram)
if(is.na(wordPredict)) {
wordPredict = as.character(triData[triData$unigram==textIn[2] &
triData$bigram==textIn[3],][1,]$trigram)
if(is.na(wordPredict)) {
wordPredict = as.character(biData[biData$unigram==textIn[3],][1,]$bigram)
}
}
cat(wordPredict)
}
|
93be7c4dc0df2ef86091bbb0cc860e074251dedf
|
b16d10d9a8641b0ec6ab426835e154584cf6dad2
|
/templates/E1oM20agnat.R
|
78056c43ef7e29f2423bf5eccb2907490549cffa
|
[] |
no_license
|
Racha711/GCAP2
|
8ef3bfcb0b772c06f033ab7830f6e5e792d68bb9
|
6a4a6686dede2ce07cfb6f927a21f2b4587bfbd3
|
refs/heads/main
| 2023-05-25T17:55:28.542809
| 2021-04-25T18:02:02
| 2021-04-25T18:02:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,033
|
r
|
E1oM20agnat.R
|
E1oM20agnat.R GISS Model E coupled version larissa 01/22/2009
E1oM20agnat: replace this section by a description of what distinguishes this run ?
Use as many lines as you need. Look carefully at all the possible ?
choices, particularly the lines containing '?'. In some cases, you ?
will have to pick the appropriate choice to make this rundeck work ?
The final rundeck should contain no '?'
Check and modify the rest of the description below: ?
modelE1 (3.0) 4x5 hor. grid with 20 lyrs, top at .1 mb (+ 3 rad.lyrs) ?
atmospheric composition from year 1880 ? 1979 ?
ocean: coupled to GISS ocean model (Russell - Schmidt) ?
uses turbulence scheme (no dry conv), simple strat.drag (no grav.wave drag) ?
time steps: dynamics 7.5 min leap frog; physics 30 min.; radiation 2.5 hrs ?
filters: U,V in E-W direction (after every dynamics time step) ?
sea level pressure (after every physics time step) ?
Preprocessor Options
#define TRACERS_ON ! include tracers code
#define CHECK_OCEAN ! needed to compile aux/file CMPE002
#define TRACERS_OCEAN ! GISS Ocean tracers activated
#define TRACERS_AGE_OCEAN ! Natassa's Ocean Age tracer code
End Preprocessor Options
Object modules: (in order of decreasing priority)
Atm72x46 ! horizontal resolution is 72x46 -> 4x5deg
AtmL20 STRAT_DUM ! vertical resolution is 20 layers -> 0.1mb
RES_5x4_L13 ! ocean horiz res 4x5deg, 13 vert layers
MODEL_COM GEOM_B IORSF ! model variables and geometry
TRIDIAG ! tridiagonal matrix solver
MODELE ! Main and model overhead
! parameter database
ALLOC_DRV ! domain decomposition, allocate global distributed arrays
ATMDYN_COM ATMDYN MOMEN2ND ! atmospheric dynamics
ATM_UTILS ! utilities for some atmospheric quantities
QUS_COM QUSDEF QUS_DRV ! advection of tracers
TQUS_DRV ! advection of Q
CLOUDS2 CLOUDS2_DRV CLOUDS_COM ! clouds modules
SURFACE SURFACE_LANDICE FLUXES ! surface calculation and fluxes
GHY_COM GHY_DRV GHY GHY_H ! land surface and soils
VEG_DRV VEG_COM VEGETATION ! vegetation
PBL_COM PBL_DRV PBL ! atmospheric pbl
ATURB ! turbulence in whole atmosphere
LAKES_COM LAKES ! lake modules
SEAICE SEAICE_DRV ! seaice modules
LANDICE LANDICE_COM LANDICE_DRV ! land ice modules
ICEDYN_DRV ICEDYN ! ice dynamics modules
ODIAG_COM OCEAN_COM OSTRAITS_COM OGEOM ! dynamic ocean modules
OCNDYN OCNDYN2 OTIDELL ! dynamic ocean routines
OCN_Interp OCN_Int_LATLON ! dynamic ocean routines
OSTRAITS OCNGM OCNKPP ! dynamic ocean routines
OCEANR_DIM AFLUXES OFLUXES
ODIAG_PRT ! ocean diagnostic print out
OCNFUNTAB ! ocean function look up table
SNOW_DRV SNOW ! snow model
RAD_COM RAD_DRV RADIATION ! radiation modules
RAD_UTILS ALBEDO READ_AERO ! radiation and albedo
DIAG_COM DIAG DEFACC DIAG_PRT ! diagnostics
DIAG_ZONAL GCDIAGb ! grid-dependent code for lat-circle diags
DIAG_RES_M ! diagnostics (resolution dependent)
FFT72 OFFT72E ! utilities
POUT ! post-processing output
SparseCommunicator_mod ! sparse gather/scatter module
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!! tracer part !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
TRACER_COM TRACERS_DRV ! configurable tracer code
TRACERS ! generic tracer code
TRDIAG_COM TRACER_PRT ! tracer diagnostic printout
OCN_TRACER
OCN_TRACER_COM
!!!!!!!!!!!!!!!!!!!!!!!!!!! tracer part !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Components:
tracers MPI_Support shared
Data input files:
! AIC=AIC.RES_M20A.D771201 ! initial conditions (atm.) needs GIC,OIC ISTART=2
! GIC=GIC.E046D3M20A.1DEC1955 ! initial conditions (ground) and 300 year spin-up
! OIC=OIC4X5LD.Z12.gas1.CLEV94.DEC01 ! ocean initial conditions
! AIC=1JAN2012.rsfE051oM20A ! full IC (GIC,OIC not needed) ISTART=8 (spun up 380 yrs)
AIC=AIC/1JAN2600.rsfE13AoM20 ! istart=5
OFTAB=OFTABLE_NEW ! ocean function table
KBASIN=KB4X513.OCN.gas1.nc ! ocean basin designations
TOPO_OC=OZ72X46N_gas.1_nocasp.nc ! ocean bdy.cond
OSTRAITS=OSTRAITS_72x46.nml ! parameterized straits info
CDN=CD4X500S.ext.nc
! VEG=V72X46.1.cor2.ext
VEG=V72X46.1.cor2_no_crops.ext.nc
CROPS=CROPS2007_72X46N.cor4_nocasp.nc ! veg. fractions, crops history
SOIL=S4X50093.ext.nc
TOPO=Z72X46N_gas.1_nocasp.nc ! bdy.cond
REG=REG4X5 ! special regions-diag
RVR=RD_modelE_M.nc ! river direction file
NAMERVR=RD_modelE_M.names.txt ! named river outlets
RADN1=sgpgxg.table8 ! rad.tables and history files
RADN2=LWTables33k_lowH2O_CO2_O3_planck_1-800 ! rad.tables and history files
RADN4=LWCorrTables33k ! rad.tables and history files
RADN5=H2Ocont_MT_CKD ! Mlawer/Tobin_Clough/Kneizys/Davies H2O continuum table
! other available H2O continuum tables:
! RADN5=H2Ocont_Ma_2000
! RADN5=H2Ocont_Roberts
! RADN5=H2Ocont_Ma_2008
RADN3=miescatpar.abcdv2
TAero_PRE=dec2003_PRE_Koch_kg_m2_ChinSEA_Liao_1850 ! pre-industr trop. aerosols
TAero_SUI=sep2003_SUI_Koch_kg_m2_72x46x9_1875-1990 ! industrial sulfates
TAero_OCI=sep2003_OCI_Koch_kg_m2_72x46x9_1875-1990 ! industrial organic carbons
TAero_BCI=sep2003_BCI_Koch_kg_m2_72x46x9_1875-1990 ! industrial black carbons
RH_QG_Mie=oct2003.relhum.nr.Q633G633.table
RADN7=STRATAER.VOL.1850-1999.Apr02
RADN8=cloud.epsilon4.72x46
RADN9=solar.lean02.ann.uvflux_hdr ! need KSOLAR=2
RADNE=topcld.trscat8
ISCCP=ISCCP.tautables
#include "rad_72x46_input_files"
GHG=GHG.Mar2004.txt
dH2O=dH2O_by_CH4_monthly
TOP_INDEX=top_index_72x46_a.ij.ext.nc
MSU_wts=MSU_SSU_RSS_weights.txt
GLMELT=GLMELT_4X5.OCN.nc ! glacial melt distribution
Label and Namelist:
E1oM20agnat (coupled version, 4x5, 20 lyrs, 1880 atm; use up to 72 (or 80) columns and ??
up to 60 (or 52) columns here to describe your run)?<- col 53 to 72 -> 80 ->
DTFIX=300
&&PARAMETERS
! parameters set for coupled ocean runs:
KOCEAN=1 ! ocn is prognostic
OTIDE = 0 ! Ocean tides are not used
! parameters usually not changed when switching to coupled ocean:
! drag params if grav.wave drag is not used and top is at .01mb
X_SDRAG=.002,.0002 ! used above P(P)_sdrag mb (and in top layer)
C_SDRAG=.0002 ! constant SDRAG above PTOP=150mb
P_sdrag=1. ! linear SDRAG only above 1mb (except near poles)
PP_sdrag=1. ! linear SDRAG above PP_sdrag mb near poles
P_CSDRAG=1. ! increase CSDRAG above P_CSDRAG to approach lin. drag
Wc_JDRAG=30. ! crit.wind speed for J-drag (Judith/Jim)
ANG_SDRAG=1 ! conserve ang. mom.
PTLISO=15. ! press(mb) above which rad. assumes isothermal layers
xCDpbl=1.
cond_scheme=2 ! more elaborate conduction scheme (GHY, Nancy Kiang)
U00a=.55 ! above 850mb w/o MC region; tune this first to get 30-35% high clouds
U00b=1.00 ! below 850mb and MC regions; then tune this to get rad.balance
! U00a,U00b replace the U00 parameters below - U00ice/U00wtrX are kept only for the _E1 version
U00ice=.59 ! U00ice up => nethtz0 down (alb down); goals: nethtz0=0,plan.alb=30%
U00wtrX=1.39 ! U00wtrX+.01=>nethtz0+.7 for global annual mean
!?1979 U00wtrX=1.38
! HRMAX=500. ! not needed unless do_blU00=1, HRMAX up => nethtz0 down (alb up)
CO2X=1.
H2OstratX=1.
H2ObyCH4=1. ! activates strat.H2O generated by CH4
KSIALB=0 ! 6-band albedo (Hansen) (=1 A.Lacis orig. 6-band alb)
KSOLAR=2
! parameters that control the atmospheric/boundary conditions
! if set to 0, the current (day/) year is used: transient run
crops_yr=1880 ! if -1, crops in VEG-file is used ? 1979
s0_yr=1880 !? 1979
s0_day=182
ghg_yr=1880 !? 1979
ghg_day=182
volc_yr=1880 !? or -1 to get mean volc.aerosols
volc_day=182
aero_yr=1880 !? 1979
od_cdncx=0. ! don't include 1st indirect effect
cc_cdncx=0.0036 ! include 2nd indirect effect
albsn_yr=1880 !? 1979
dalbsnX=.015 ! should be .024
o3_yr=-1880 !? 1979
! parameters that control the Shapiro filter
DT_XUfilter=450. ! Shapiro filter on U in E-W direction; usually same as DT (below)
DT_XVfilter=450. ! Shapiro filter on V in E-W direction; usually same as DT (below)
DT_YVfilter=0. ! Shapiro filter on V in N-S direction
DT_YUfilter=0. ! Shapiro filter on U in N-S direction
! parameters that may have to be changed in emergencies:
DTsrc=1800.
DT=450.
NIsurf=1 ! increase as layer 1 gets thinner
! parameters that affect at most diagn. output:
Ndisk=480 ! use =48 except on halem
SUBDD=' ' ! no sub-daily frequency diags
NSUBDD=0 ! saving sub-daily diags every NSUBDD*DTsrc/3600. hour(s)
KCOPY=2 ! saving acc + rsf
isccp_diags=1 ! use =0 to save cpu time if isccp-diags are not essential
nda5d=13 ! use =1 to get more accurate energy cons. diag (increases CPU time)
nda5s=13 ! use =1 to get more accurate energy cons. diag (increases CPU time)
ndaa=13
nda5k=13
nda4=48 ! to get daily energy history use nda4=24*3600/DTsrc
&&END_PARAMETERS
&INPUTZ
YEARI=1901,MONTHI=1,DATEI=1,HOURI=0, ! from default: IYEAR1=YEARI
YEARE=2000,MONTHE=1,DATEE=2,HOURE=0, KDIAG=13*0,
ISTART=5,IRANDI=0, YEARE=1901,MONTHE=1,DATEE=1,HOURE=1,IWRITE=1,JWRITE=1,
/
! Instructions for related rundeck types
! ======================================
! the "frozen (or 'slush') version" of 2006 paper E1oM20 -> EofzM20
! ------------------------------------------- =======
! replace in "Object modules" the 4 files
! CLOUDS2 PBL ATURB RADIATION RAD_DRV by:
! CLOUDS2_E1 PBL_E1 ATURB_E1 RADIATION_E1 RAD_DRV_E1
! replace in "Data input files:" RADN2 by:
! RADN2=radfil33k ! RADN4 and RADN5 are not used
! set in &&PARAMETERS : variable_lk=0 ! lake fractions are fixed in time
! river_fac=1.04
! wsn_max=0. ! do not restrict snow depth
! glmelt_on=2 ! skip annual adjustment of glacial melt
! glmelt_fac_nh=2.91
! glmelt_fac_sh=1.98
! oBottom_drag=0
! oCoastal_drag=0
|
4e2652e3dcfa48dada7f9008826cf637feb6ef5e
|
dcca5a4064cc431e64726e1b51f3be22b7295d8b
|
/server.R
|
39715be7473b7d657ff225d0a952c5379c3a39a7
|
[] |
no_license
|
mklienz/dude-wmb
|
b9166e5d904d20cbed76c93e6fa6ac69c6d49933
|
f3e80d2f03741d1448ebf6f43f9c65917cc6f796
|
refs/heads/master
| 2021-03-24T22:02:46.528741
| 2020-04-28T04:37:17
| 2020-04-28T04:41:07
| 247,567,977
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,721
|
r
|
server.R
|
server <- function(input, output, session) {
##### SIDEBAR
##### USER LOCATION VALUE
# Set up null location
location <- reactiveVal(value = data.frame(lat = c(), lng = c()))
# Update location to search location when it changes
observeEvent(search_location(), {
location(search_location())
})
# Update location to map click location when it changes
observeEvent(input$user_map_click, {
new_location = as.data.frame(input$user_map_click) %>%
dplyr::select(lat, lng)
location(new_location)
})
# Handling search location
search_location <- eventReactive(input$user_location_search, {
if (input$user_location == "") {
location = data.frame(lat = c(), lng = c())
} else {
location = osm_search_nz(input$user_location)
}
return (location)
}, ignoreNULL = FALSE)
# Create messages to send if the location searched for is not found
# Isolated from location definiton as to keep existing location in tact
location_validation <- eventReactive(input$user_location_search, {
validate(need(
nrow(search_location()) > 0,
'Location could not be found, try again'
))
})
output$loc_validation <- renderText({
loc_valid = location_validation()
})
##### Location map
# Generate initial default map
# Centered on Queen St Farmers
output$user_map <- renderLeaflet({
leaflet() %>%
addProviderTiles(provider = 'CartoDB.Voyager') %>%
setView(lng = 174.7646424,
lat = -36.8486696,
zoom = 14)
})
# Observer to set view and marker to a location found by user search
observe({
proxy <- leafletProxy('user_map')
df = location()
if (nrow(df) > 0) {
proxy %>%
clearMarkers() %>%
addMarkers(data = df,
lng = ~ lng,
lat = ~ lat) %>%
setView(lng = df$lng,
lat = df$lat,
zoom = 15)
}
})
# Observer to update location search UI to inform user that the click location is being used
observeEvent(input$user_map_click, {
updateSearchInput(session = session,
inputId = 'user_location',
value = 'Using map location')
}, ignoreInit = TRUE)
#### REFRESHING API DATA
refresh_time <-
reactiveVal(value = lubridate::with_tz(Sys.time(), tz = "Pacific/Auckland"))
api_response <- reactive({
waiter_show(html = tagList(
spin_solar(),
h4('Getting data from AT', style = 'color: #283D51;')
),
color = waiter::transparent(0.7))
input$refresh_now
if (as.numeric(input$refresh_interval) > 0) {
invalidateLater(as.numeric(input$refresh_interval) * 1000)
}
refresh_time(lubridate::with_tz(Sys.time(), tz = "Pacific/Auckland"))
response = call_at_feed()
waiter_hide()
return(response)
})
observe({
if (is.null(input$chosen_routes) ||
input$chosen_routes == '' || length(route_list()) == 0) {
shinyjs::disable('send_routes')
updateActionButton(session,
inputId = 'send_routes',
label = 'Select some routes first')
} else {
shinyjs::enable('send_routes')
updateActionButton(session,
inputId = 'send_routes',
label = 'Show me where these vehicles are')
}
})
observeEvent(input$send_routes, {
updateSelectizeInput(
session,
inputId = 'selected_routes',
label = "Select routes that you're interested in:",
choices = full_route_names,
selected = input$chosen_routes
)
updateTabItems(session,
inputId = 'tabs',
selected = 'live-buses')
})
filtered_routes_table <- reactive({
df <- route_destinations %>%
dplyr::filter(route_short_name %in% !!input$selected_routes) %>%
dplyr::arrange(route_short_name, trip_headsign) %>%
dplyr::select(route_short_name, trip_headsign, route_type) %>%
dplyr::distinct()
return (df)
})
active_vehicles <- reactive({
df = get_active_vehicles2(api_response(),
df_last_stop_in_route,
route_destinations,
refresh_time()) %>%
dplyr::mutate(route_to = paste(route_short_name, 'to', trip_headsign))
return(df)
})
active_routes_table <- reactive({
df = append_active_vehicles(active_vehicles(), filtered_routes_table()) %>%
dplyr::mutate(route_to = paste(route_short_name, 'to', trip_headsign))
})
output$table <- DT::renderDT({
active_routes_table() %>%
dplyr::select(route_to, n)
},
rownames = FALSE,
colnames = c('Route/Destination' = 'route_to', 'Num Active' = 'n'),
options = list(dom = 't', scrollX = TRUE))
route_filters <- reactive({
df = active_routes_table()[input$table_rows_selected, ] %>%
dplyr::select(route_short_name, trip_headsign, route_to)
return (df)
})
filtered_vehicles <- reactive({
df = active_vehicles() %>%
dplyr::filter(route_to %in% route_filters()$route_to) %>%
dplyr::select(
route_id,
route_short_name,
route_type,
trip_headsign,
delay,
latitude,
longitude
) %>%
dplyr::mutate(route_to = as.character(paste(
route_short_name, 'to', trip_headsign
)))
if (nrow(df) > 0) {
colour_routes = data.frame(route_to = unique(df$route_to),
colour = colours[1:length(unique(df$route_to))]) %>%
dplyr::mutate_if(is.factor, as.character)
df = df %>%
dplyr::left_join(colour_routes, by = 'route_to')
} else {
df$colour = character(0)
}
return(df)
})
filtered_route_geoms <- reactive({
df = filtered_vehicles() %>%
dplyr::select(route_id, route_to, colour) %>%
dplyr::distinct() %>%
dplyr::left_join(df_route_geoms %>% dplyr::select(route_id, Shape__Len, geometry),
by = 'route_id')
return(df)
})
# Build base map
output$routes_map <- renderLeaflet({
leaflet() %>%
addProviderTiles(provider = 'CartoDB.Voyager') %>%
setView(lng = 174.7646424,
lat = -36.8486696,
zoom = 15)
})
# Plotting stops for all of the routes requires that we collect the required stops
stops_in_selected_routes <- reactive({
df = df_routes_in_stops %>%
dplyr::filter(route_to %in% filtered_vehicles()$route_to) %>%
dplyr::left_join(df_stops, by = 'stop_name') %>%
dplyr::left_join(filtered_vehicles() %>% dplyr::select(route_to, colour),
by = 'route_to') %>%
dplyr::distinct()
})
observeEvent(input$table_rows_selected, {
proxy <- leafletProxy('routes_map')
proxy %>%
clearMarkers() %>%
clearShapes() %>%
clearControls()
}, ignoreNULL = FALSE)
# Add stops to map
observeEvent(input$table_rows_selected, {
proxy <- leafletProxy('routes_map')
df = stops_in_selected_routes()
proxy %>%
addCircleMarkers(
lng = df$stop_lon,
lat = df$stop_lat,
popup = df$stop_name,
stroke = FALSE,
# radius = 10,
fillOpacity = 0.5,
fillColor = df$colour,
group = df$route_to
)
df3 = filtered_route_geoms() %>%
st_as_sf() %>%
group_by(route_to) %>%
summarise(Shape__Len = max(Shape__Len),
colour = first(colour))
if (nrow(df3) > 0) {
proxy %>%
addPolylines(
data = df3$geometry,
group = df3$route_to,
color = df3$colour
)
}
df2 = filtered_vehicles() %>%
dplyr::mutate(route_type = dplyr::if_else(route_type == 3, 'bus', 'train')) %>%
dplyr::mutate(pop_up_msg = paste(sep = '<br/>', route_to, dplyr::if_else(
delay > 0,
paste('Behind by',
as.character(round(abs(
delay
) / 60)),
'mins'),
paste('Ahead by',
as.character(round(abs(
delay
) / 60)),
'mins')
)))
df_legend = df2 %>%
dplyr::select(route_to, colour) %>%
dplyr::distinct()
icons = awesomeIcons(
# icon = 'bus',
icon = df2$route_type,
library = 'fa',
markerColor = df2$colour,
iconColor = '#fff'
)
if (nrow(df2) > 0) {
proxy %>%
addAwesomeMarkers(
data = df2,
lat = ~ latitude,
lng = ~ longitude,
icon = icons,
popup = ~ pop_up_msg,
group = ~ route_to
) %>%
fitBounds(
min(df2$longitude),
min(df2$latitude),
max(df2$longitude),
max(df2$latitude)
) %>%
addLegend(colors = df_legend$colour, labels = df_legend$route_to, position = 'topleft')
}
}, ignoreNULL = FALSE)
# Observer - add Layer Controls
observeEvent(input$table_rows_selected, {
proxy <- leafletProxy('routes_map')
df = filtered_vehicles()
routes = df %>%
dplyr::arrange(route_to) %>%
dplyr::pull(route_to) %>%
unique()
proxy %>%
addLayersControl(
overlayGroups = c(routes),
position = 'topleft',
options = layersControlOptions(collapsed = FALSE)
)
}, ignoreNULL = FALSE)
######### MULTIPLE REAL TIME BOARDS
# Observer to update route options UI when selected stops and/or services changes
output$max_stops <- renderText({
if (length(input$selected_stops) == 4) {
return('Max Stops Reached')
}
})
bookmarking_values <- reactiveValues()
active_tab <- reactiveVal(NULL)
observe({
# Hide all inputs
toExclude <- names(input)
setBookmarkExclude(toExclude)
active_tab(input$tabs)
if (input$tabs == 'multi-boards') {
bookmarking_values$stops = input$selected_stops
bookmarking_values$services = input$selected_services
bookmarking_values$routes = input$chosen_routes
} else if (input$tabs == 'live-buses') {
bookmarking_values$routes = input$selected_routes
}
# Save sidebar settings based on additional options from user
if ('Refresh Settings' %in% input$saved_sidebar_settings |
'Refresh Settings' %in% input$saved_sidebar_settings2) {
bookmarking_values$refresh = input$refresh_interval
}
if ('Your Location' %in% input$saved_sidebar_settings |
'Your Location' %in% input$saved_sidebar_settings2) {
bookmarking_values$lat = location()$lat
bookmarking_values$lng = location()$lng
}
})
onBookmark(function(state) {
state$values$active = active_tab()
if (input$tabs == 'multi-boards') {
state$values$stops = bookmarking_values$stops
state$values$services = bookmarking_values$services
} else if (input$tabs == 'live-buses') {
state$values$routes = bookmarking_values$routes
}
# Save sidebar settings based on additional options from user
if ('Refresh Settings' %in% input$saved_sidebar_settings |
'Refresh Settings' %in% input$saved_sidebar_settings2) {
state$values$refresh = bookmarking_values$refresh
}
if ('Your Location' %in% input$saved_sidebar_settings |
'Your Location' %in% input$saved_sidebar_settings2) {
state$values$lat = bookmarking_values$lat
state$values$lng = bookmarking_values$lng
}
})
observeEvent(input$multi_boards_bookmark, {
session$doBookmark()
})
observeEvent(input$live_buses_bookmark, {
session$doBookmark()
})
onBookmarked(showBookmarkUrlModal)
onRestore(function(state) {
# Perform some defensive programming so people can't mess with your urls
# First active tabs states - if the value not in the url, don't proceed with the restore steps
if (state$values$active %in% c('multi-boards', 'live-buses')) {
# Load sidebar state
if (!is.null(state$values$refresh)) {
if (state$values$refresh %in% c(0, 30, 60, 300)) {
updateSelectInput(
session,
inputId = 'refresh_interval',
label = 'Auto Refresh Interval',
choices = c(
'Off' = 0,
'30 seconds' = 30,
'1 minute' = 60,
'5 minutes' = 300
),
selected = state$values$refresh
)
}
}
if (state$values$active == 'multi-boards') {
updateTabItems(session,
inputId = 'tabs',
selected = 'multi-boards')
# Stops loading
new_stops = dplyr::intersect(state$values$stops, full_stop_names)
if (length(new_stops) > 0) {
bookmarking_values$stops = new_stops
updateSelectizeInput(
session,
inputId = 'selected_stops',
label = 'Select the stops you want to see the boards for:',
choices = full_stop_names,
selected = new_stops,
options = list(
plugins = list('remove_button'),
maxItems = 4
)
)
}
# Services loading
new_services = dplyr::intersect(state$values$services, c(3, 2))
if (length(new_services) > 0) {
updateCheckboxGroupInput(
session,
inputId = 'selected_services',
label = NULL,
choices = c('Bus' = 3, 'Train' = 2),
selected = new_services,
inline = TRUE
)
}
} else if (state$values$active == 'live-buses') {
updateTabItems(session,
inputId = 'tabs',
selected = 'live-buses')
new_routes = dplyr::intersect(state$values$routes,
unique(df_trips_full$route_short_name))
if (length(new_routes) > 0) {
updateSelectizeInput(
session,
inputId = 'selected_routes',
label = "Select routes that you're interested in:",
choices = full_route_names,
selected = new_routes
)
}
}
}
})
onRestored(function(state) {
if (!is.null(state$values$lat)) {
if (is.numeric(c(state$values$lat, state$values$lng))) {
if (state$values$lat > -47 &
state$values$lat < -34 &
state$values$lng > 150 & state$values$lng < 179) {
new_loc = data.frame(lat = c(state$values$lat),
lng = c(state$values$lng))
location(new_loc)
}
}
}
})
route_list <- reactive({
df = get_wanted_routes(
stops = input$selected_stops,
services = input$selected_services,
df_routes = df_routes_in_stops
)
})
output$select_routes_ui <- renderUI({
# browser()
if (length(route_list()) == 0) {
strong(id = 'no_routes_notif', 'No routes found')
} else {
checkboxGroupInput(
inputId = 'chosen_routes',
label = NULL,
choices = '',
inline = TRUE
)
}
})
observe({
if (length(dplyr::intersect(isolate(input$chosen_routes), route_list())) > 0) {
update_selected = dplyr::intersect(input$chosen_routes, route_list())
} else {
update_selected = route_list()
}
updateCheckboxGroupInput(
session = session,
inputId = 'chosen_routes',
label = NULL,
choices = route_list(),
inline = TRUE,
selected = update_selected
)
})
relevant_trips <- reactive({
df = df_trips_full %>%
dplyr::filter(stop_name %in% input$selected_stops) %>%
dplyr::filter(route_short_name %in% input$chosen_routes)
return(df)
})
# Reactive DF, walk times to selected stops depending on user location
stop_walk_times <- reactive({
df_selected_stops = df_stops %>%
dplyr::filter(stop_name %in% input$selected_stops)
user_location = location()
if (nrow(user_location) > 0) {
df = get_walking_times(df_selected_stops, user_location)
} else {
df = df_selected_stops %>%
dplyr::mutate(time_to_stop = NA)
}
return(df)
})
board_trips <- reactive({
# Generate current day and time
day_of_week = tolower(weekdays(refresh_time()))
current_time = round(hms::as_hms(refresh_time()), 0) - hms::as_hms(0)
# Filter trips on day and time
trips = relevant_trips() %>%
dplyr::filter(!!as.name(day_of_week) == 1) %>%
dplyr::filter(arrival_time > current_time) %>%
dplyr::arrange(arrival_time)
df_walk_times = stop_walk_times() %>%
dplyr::select(stop_id, time_to_stop)
# Get the board update
df = get_board_update(api_response(), trips, current_time) %>%
dplyr::left_join(df_walk_times,
by = 'stop_id') %>%
dplyr::select(
stop_name,
route_short_name,
trip_headsign,
arrival_time,
due,
delay,
time_to_stop
) %>%
dplyr::rename('Time to stop (mins)' = time_to_stop)
if (nrow(location()) == 0) {
df = df %>% dplyr::select(-'Time to stop (mins)')
}
return(df)
})
output$board_table <-
DT::renderDataTable(
DT::datatable(
board_trips(),
rownames = FALSE,
colnames = c(
'Stop' = 'stop_name',
'Route' = 'route_short_name',
'Destination' = 'trip_headsign',
'Scheduled Time' = 'arrival_time',
'Due (mins)' = 'due',
'Delay (mins)' = 'delay'
),
filter = 'top',
selection = 'none',
style = 'bootstrap',
options = list(
scrollX = TRUE,
pageLength = 15,
columnDefs = list(list(
className = 'dt-center', targets = '_all'
))
)
)
)
# Text for board update time stamp, if nothing has been selected displays a help text
last_update_text <- reactive({
time = refresh_time()
hour = lubridate::hour(time)
min = lubridate::minute(time)
string = paste0('Last updated: ',
ifelse(hour < 10, paste0('0', hour), hour),
':',
ifelse(min < 10, paste0('0', min), min))
return (string)
})
# Simple text output for update text
output$board_update_time <- renderText({
last_update_text()
})
observeEvent(input$find_stops_btn, {
updateTabItems(
session,
inputId = 'tabs',
selected = 'stop-locations'
)
})
# Stops map
stops_map_routes <- reactive({
df = df_routes_in_stops %>%
dplyr::filter(route_short_name %in% input$routes_for_stops)
if (nrow(df) > 0) {
select_colours = data.frame(route_short_name = unique(df$route_short_name),
colour = colours[2:(length(unique(df$route_short_name)) + 1)]) %>%
dplyr::mutate_if(is.factor, as.character)
df = df %>%
dplyr::left_join(select_colours, by = 'route_short_name')
} else {
df$colours = character(0)
}
return(df)
})
output$stops_map <- renderLeaflet({
if (isolate(nrow(location())) > 0) {
leaflet() %>%
addProviderTiles(provider = 'CartoDB.Voyager') %>%
setView(
lng = isolate(location()$lng),
lat = isolate(location()$lat),
zoom = 15
)
} else {
leaflet() %>%
addProviderTiles(provider = 'CartoDB.Voyager') %>%
setView(lng = 174.7646424,
lat = -36.8486696,
zoom = 15)
}
})
# Move the map to center at the new location
stops_map_center <- eventReactive(input$stop_location_search_search, {
if (input$stop_location_search == "") {
location = data.frame(lat = c(), lng = c())
} else {
location = osm_search_nz(input$stop_location_search)
}
return (location)
})
stop_location_validation <- eventReactive(input$stop_location_search_search, {
validate(need(
nrow(stops_map_center()) > 0,
'Location could not be found, try again'
))
})
output$stop_loc_validation <- renderText({
stop_loc_valid = stop_location_validation()
})
observeEvent(input$stop_location_search_search, {
if (nrow(stops_map_center()) > 0) {
proxy = leafletProxy('stops_map')
proxy %>%
clearPopups() %>%
setView(lng = stops_map_center()$lng,
lat = stops_map_center()$lat,
zoom = 17) %>%
addPopups(lng = stops_map_center()$lng,
lat = stops_map_center()$lat,
popup = stringr::str_to_title(input$stop_location_search))
}
})
# Plot the stops
observe({
proxy = leafletProxy('stops_map')
if (!is.null(input$stops_map_zoom)) {
map_bounds = input$stops_map_bounds
proxy %>%
clearMarkers() %>%
clearControls()
if (input$stops_map_zoom >= 14) {
df = df_stops %>%
dplyr::filter(
stop_lat > min(map_bounds$north, map_bounds$south),
stop_lat < max(map_bounds$north, map_bounds$south),
stop_lon > min(map_bounds$east, map_bounds$west),
stop_lon < max(map_bounds$east, map_bounds$west),
)
if (nrow(stops_map_routes()) > 0) {
df_stops_in_routes = df %>%
dplyr::filter(stop_name %in% stops_map_routes()$stop_name) %>%
dplyr::left_join(stops_map_routes(), by = 'stop_name')
df_stops_rest = df %>%
dplyr::filter(!(stop_name %in% stops_map_routes()$stop_name))
df_legend = stops_map_routes() %>%
dplyr::select(route_short_name, colour) %>%
dplyr::distinct() %>%
dplyr::arrange(route_short_name) %>%
rbind(c('Other', 'blue'))
proxy %>%
addCircleMarkers(
data = df_stops_rest,
lat = ~ stop_lat,
lng = ~ stop_lon,
popup = ~ stop_name,
group = 'Other'
) %>%
addCircleMarkers(
data = df_stops_in_routes,
lat = ~ stop_lat,
lng = ~ stop_lon,
popup = ~ stop_name,
color = ~ colour,
group = ~ route_short_name,
) %>%
addLegend(
colors = df_legend$colour,
labels = df_legend$route_short_name,
position = 'topleft'
) %>%
addLayersControl(
overlayGroups = (df_legend$route_short_name),
position = 'topleft',
options = layersControlOptions(collapsed = FALSE)
)
} else {
proxy %>%
addCircleMarkers(
data = df,
lat = ~ stop_lat,
lng = ~ stop_lon,
popup = ~ stop_name
)
}
}
}
})
observeEvent(input$help_multi_boards_title, {
js$collapse('help_multi_boards')
})
observeEvent(input$help_live_buses_title, {
js$collapse('help_live_buses')
})
observeEvent(input$help_stops_title, {
js$collapse('help_stops')
})
observeEvent(input$save_boards_title, {
js$collapse('save_board_view')
})
observeEvent(input$save_buses_title, {
js$collapse('save_buses_view')
})
waiter_hide()
}
|
5529c5f5d2c0315434268d54d624a1dd7be709f7
|
460adc8d1918d26cff8f4c00124d65193d561ce2
|
/dataTable/dataTableDemoApp.R
|
225c261afcff34ccb9030f80c6eb3729cf2e60e6
|
[
"Apache-2.0"
] |
permissive
|
paul-shannon/shinyModules
|
74ad8ee492e9cb9c9d82119b616296028369c8d3
|
faecba65fb8d77149f4b9c34441b17e67bf35f82
|
refs/heads/master
| 2022-12-06T13:55:59.807356
| 2020-09-12T19:34:08
| 2020-09-12T19:34:08
| 272,457,191
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,491
|
r
|
dataTableDemoApp.R
|
library(shiny)
source("../messageBox/messageBoxModule.R")
source("dataTableModule.R")
tbl.demo <- mtcars
fatLine <- paste(LETTERS, collapse="")
multiFatLine <- sprintf("%s\n%s\n%s\n", fatLine, fatLine, fatLine, fatLine)
tbl.demo$fatLine <- multiFatLine
#----------------------------------------------------------------------------------------------------
ui <- fluidPage(
div(dataTableUI("table"),
style="margin: 20px; padding: 10px; border: 3px solid black; border-radius: 10px;"),
messageBoxUI(id="messageBox.1", title=NULL, titleSpan=0, boxSpan=10),
div(dataTableUI("subtable"),
style="margin: 20px; padding: 10px; border: 3px solid black; border-radius: 10px;")
)
#----------------------------------------------------------------------------------------------------
server <- function(input, output, session){
rowNames <- reactiveVal("none")
rowNames <- callModule(dataTableServer, "table", tbl=tbl.demo, #mtcars,
selectionPolicy="multiple",
pageLength=10,
visibleRows = reactive("all"))
callModule(messageBoxServer, "messageBox.1", newContent=rowNames)
callModule(dataTableServer, "subtable", tbl=mtcars,
selectionPolicy="none",
pageLength=10,
visibleRows=rowNames)
}
#----------------------------------------------------------------------------------------------------
runApp(shinyApp(ui, server), port=9088)
|
7536546d3147978bf3a1fa71c4dd9ef18f4d4dcb
|
4952a58db47915937f150447bbb4e932d1a9a9ca
|
/s2/EZU_Monthly.R
|
1616b2dae3a76cb383434f4f0ba0b3b906157982
|
[] |
no_license
|
jzazueta/portfolioOptimization
|
9a798c3a852965c0f72ec55b4fbdb43c53eef760
|
f5016febcb063fb679d76e39a48db3670b8f6a1f
|
refs/heads/master
| 2020-04-08T16:38:39.633225
| 2012-12-12T21:15:49
| 2012-12-12T21:15:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 999
|
r
|
EZU_Monthly.R
|
## 'iShares MSCI EMU Index
library("quantmod")
getSymbols("EZU",src="yahoo")
barChart(EZU)
EZU.returns = periodReturn(EZU,period='monthly')
ndays = dim(EZU.returns)[1]
today.returns = EZU.returns
today.returns[1,] = NA
today.returns = subset(today.returns, !is.na(today.returns))
prev.returns = EZU.returns
prev.returns[ndays,] = NA
prev.returns = subset(prev.returns, !is.na(prev.returns))
corr.returns = data.frame(today.returns)
corr.returns$prev.returns = prev.returns
names(corr.returns) = c("today.returns", "prev.returns")
cor(corr.returns)
corr.returns$today_bins=cut(corr.returns$today.returns, breaks = c(-1.0,-0.04,-0.02, -0.01, 0.0, 0.01, 0.02, 0.04, 1.0))
corr.returns$prev_bins =cut(corr.returns$prev.returns, breaks = c(-1.0,-0.04,-0.02, -0.01, 0.0, 0.01, 0.02, 0.04, 1.0))
freq_table = table(corr.returns$prev_bins, corr.returns$today_bins)
freq_table
sum(freq_table)
cond_prob = freq_table
cond_prob = cond_prob/colSums(cond_prob)
print(round(cond_prob,2))
|
3f9ca9b2137004ac3e27819575c73216d39691a2
|
e2bcb193e8b36b5f9aeea1c50e9401de72861a7e
|
/date11232014/rplot2D.r
|
79ca47a9c63f1bdff4f74aa3d10b79026a73de86
|
[] |
no_license
|
loonuh/Overflows
|
3c5ff69f52d076a237fade8da0833cea3c49753a
|
3621af359a83e3aba6b68d5324fded9ba8d1d73d
|
refs/heads/master
| 2021-01-20T12:22:16.515175
| 2014-12-13T06:55:35
| 2014-12-13T06:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,635
|
r
|
rplot2D.r
|
#you will need to install.package(<package>)
#'R.matlab'
#'ggplot2'
#'reshape'
#'grid'
#you will need to create an alias=Rscript <path to this script>/plotDens.r
#you can then use that alias like this:
#alias <only name of .mat file excluding .mat part> <name you want for pdf file excluding .pdf> <args>
#For example, if I named my alias rplot2D, and I was in a directory with the file mu.mat.
#I could execute the following at the commmand line:
#rplot2D mu muplot 'Degree (k)' 'Rewiring coefficient (w)' 'Degree Density u(k,w)';evince muplot.pdf
#You will need these libraries
library(R.matlab);
library(ggplot2);
library(reshape);
library(grid);
#This is used for entrepreting your inputs from the command line
args <- commandArgs(TRUE)
nameIn = paste(args[1],'.mat',sep='') #1st arugment
nameOut = paste(args[2],'.pdf',sep='') #2nd argument
nameX = toString(args[3]) #3rd argument is x-axis title
nameY = toString(args[4]) #4th argument is y-axis title
nameTitle = toString(args[5]) #5th argument is plot title
filenameread = toString(nameIn)
data <- readMat(filenameread)
evalStr = paste('data$',args[1],sep='')
M = eval(parse(text=evalStr))
Melt <- melt(M)
p <- ggplot(Melt, aes(X1, X2, fill = value)) + geom_tile()
q <- p + theme(plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank()
)
xstep = 2 #You'll have to edit this for your problem
ystep = .025*2 #You'll have to edit this for your problem
xlabels <- as.character(seq(0,20,by=xstep))
ylabels <- as.character(seq(0,1,by=ystep))
pdf(toString(nameOut))
q <- q + coord_fixed(ratio = dim(M)[1]/dim(M)[2]) #Makes plot ~square
#q <- q + scale_fill_continuous(low = "white",high="green4") #Pick your poison
#q <- q + scale_fill_continuous(fill=rainbow(10)) #Pick your poison
q<-q + scale_fill_gradientn(colours = rev(rainbow(50))) #Pick your poison
q <- q + guides(fill = guide_colorbar(title=NULL,barwidth = 0.5, barheight = 25,color="black"))
#q <- q + guides(fill = guide_colorbar(barwidth = 0.5, barheight = 21,color="black"))
q <- q + scale_x_continuous(nameX,breaks=seq(1,21,by=2),labels=xlabels) + #You'll have to edit this for your problem
scale_y_continuous(nameY,breaks=seq(1,41,by=2),labels=ylabels) + #You'll have to edit this for your problem
theme(axis.ticks.length=unit(0,"cm"), axis.ticks.margin=unit(-.5,"cm"),
axis.title.x = element_text(vjust = -.5),
axis.title.y = element_text(vjust = 1.5),
plot.title = element_text(vjust = -1.25))
q + geom_tile(colour = "black") + ggtitle(nameTitle)
dev.off()
|
41bd1862475d863a5255d8d9010a933a13b33c7d
|
240e4740000bc47cca6c0505cbf771587b65fac8
|
/rplots.R
|
663dbf4c695e09828cb339c794b18e045d7f6592
|
[] |
no_license
|
NicholasHarrington-Johnson/Thesis2spline
|
a56f6dd34e4fba43c6ac22437bd8033145e23959
|
df329428832645c4c65f4d117fc32f91219dfca2
|
refs/heads/master
| 2016-09-06T19:32:17.340615
| 2015-08-22T22:21:19
| 2015-08-22T22:21:19
| 39,865,563
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,409
|
r
|
rplots.R
|
## Select restaurant 4
restnum <-4
h=14
rest <- tr[[restnum]]
## plot restaurant and public holidays into pdf
pdf(paste("Restaurant",restnum,".pdf"),width=16/2.54,height=10/2.54)
plotpub(rest$totpeople,rest$pubd,rest$pubi,rest$pubny,restnum)
dev.off()
## Forecast
logpeople <- ts(log(rest$totpeople+1), start=1, frequency=7)
# Create x regressor public holiday dummies
xdums <- cbind(as.numeric(rest$pubd),as.numeric(rest$pubi),as.numeric(rest$pubny))
colnames(xdums) <- c("going down","going up","ny")
# Change public holiday dates to numeric
nphols <- as.numeric(as.timeDate(phols$Date))
# Create time series public holiday variable with appropriate dimensions
# Dimensions - 2011 - 2015
pholt <- as.numeric(seq(as.Date("2011-01-01"),as.Date("2015-12-31"),by="1 day"))
ispubh <- ts(pholt %in% phols, start=2011, frequency=365)
# Dimensions - start when y series ends
end <- tail(time(rest$totpeople),n=1)
fispubh <- window(ispubh,start=end+(1/365))
# Public Holidays with suspected decreases
fpubd <- nphols[which(phols$Holiday=="1")]
fpubd <- ts(as.numeric(pholt %in% fpubd), start=2011,frequency = 365)
# Begin at end of y series
fpubd <- window(fpubd,start=end+(1/365))
# Public Holidays with suspected increases
fpubi <- nphols[which(phols$Holiday=="2")]
fpubi <- ts(as.numeric(pholt %in% fpubi), start=2011,frequency = 365)
# Begin at end of y series
fpubi <- window(fpubi,start=end+(1/365))
# New Years Eve - suspected increases
fpubny <- nphols[which(phols$Holiday=="3")]
fpubny <- ts(as.numeric(pholt %in% fpubny),start=2011,frequency = 365)
# Begin at end of y series
fpubny <- window(fpubny,start=end+(1/365))
# Create marestx of public holidays for forecasting
xfor <- cbind(as.numeric(fpubd),as.numeric(fpubi),as.numeric(fpubny))
colnames(xfor) <- c("going down","going up","ny")
xny <- as.numeric(fpubny)
# Arima fit with public holidays
fit2 <- auto.arima(logpeople, xreg=xdums)
# Arima fit2 forecast
fc2 <- forecast(fit2,xreg=xfor[1:h,], h=h)
fc2$mean <- exp(fc2$mean)-1
fc2$lower <- exp(fc2$lower)-1
fc2$upper <- exp(fc2$upper)-1
fc2$x <- rest$totpeople
fc2$mean <- ts(fc2$mean, start = tsp(fc2$x)[2]+1/365, frequency=365)
tsp(fc2$upper) <- tsp(fc2$lower) <- tsp(fc2$mean)
pdf("Restaurant_4ARIMAForecast.pdf",width = 16/2.54,height = 10/2.54)
plot(fc2,main="Restaurant 4: Arima model with public holidays",include=70, xlab="Year", ylab="Total people booked")
dev.off()
|
158e0e0c62f2e09bb234d1407a82098f9fb0b2f7
|
00b1c810c5e106e4f3801a7b7bee75a516fc9512
|
/2_UseCases/5_2_Plot_Benchmarks.R
|
bbb16640bcfaedc8879a3cc9a7501cfac462f4cb
|
[] |
no_license
|
nldoc/nlrx_usecase
|
901e3d42ad1c503b2da20f0c9e67f0524fa6001e
|
a4c67976eba62513c747eb269325fe814094632b
|
refs/heads/master
| 2020-04-06T09:21:28.462798
| 2019-06-13T11:56:12
| 2019-06-13T11:56:12
| 157,339,028
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,030
|
r
|
5_2_Plot_Benchmarks.R
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Plot Benchmark results of nlrx and RNetLogo:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## Pkgs:
library(tidyverse)
library(ggthemes)
## Read rds files in benchmark folder:
benchmarks <- list.files("3_Results/benchmark", pattern = ".rds", full.names = TRUE) %>%
map_dfr(readRDS)
## Add names for measurement positions:
benchmarks$posname <- ifelse(benchmarks$pos == 1, "Pre simulation", ifelse(benchmarks$pos == 2, "Post simulation", "Post gc"))
benchmarks$posname <- factor(benchmarks$posname, levels = c("Pre simulation", "Post simulation", "Post gc"))
## Calculate time demand:
time <- benchmarks %>%
group_by(pkg, sysname) %>%
filter(pos==max(pos) | pos==min(pos)) %>%
mutate(diff = time - lag(time, default = first(time))) %>%
dplyr::filter(diff > 0) %>%
dplyr::select(pkg, sysname, diff) %>%
spread(pkg, diff) %>%
mutate_at(c("nlrx", "RNetLogo"), funs(as.numeric(.))) %>%
mutate(nlrx_percent = ((RNetLogo - nlrx) / RNetLogo) * 100)
windowsFonts(A = windowsFont("Roboto"))
pt <- ggplot(time, aes(x=0, y=nlrx_percent)) +
geom_boxplot(fill="#268BD2") +
coord_flip() +
theme_classic() +
ylab("Execution time reduction of nlrx compared to RNetLogo [%]") +
xlab("\n") +
ggtitle("Simulation runtime") +
theme(axis.title.y = element_text(color="white"),
axis.text.y = element_text(color="white"),
axis.ticks.y = element_blank(),
#axis.line.y = element_blank(),
panel.grid = element_blank(),
axis.text.x = element_text(size=14, color="black", family="A"),
axis.title.x = element_text(size=14, color="black", family="A"),
title = element_text(size=16, color="black", family="A"),
strip.text = element_text(size=14, color="black", family="A"),
strip.background = element_rect(color="black", fill="gray95"),
plot.margin = margin(0,4,30,2, unit = "pt"))
ggsave(plot=pt, "4_Plots/nlrx_benchmarks_time.png", width=6, height=2, dpi=300)
## Postpro memory:
benchmarks_meta <- list.files("3_Results/benchmark", pattern = ".txt", full.names = TRUE)
systemram <- map_dfr(benchmarks_meta, function(x) {
xl <- read_lines(x)
ram <- tibble(systemram = as.numeric(substring(xl[grep("System RAM", xl)], 13, 14)))
})
systemram$sysname <- unique(benchmarks$sysname)
# Postpro memory:
mem_nlrx <- benchmarks %>% filter(pkg=="nlrx") %>% select(posname, sysname, mem) %>% rename(mem_nlrx = mem)
mem_rnet <- benchmarks %>% filter(pkg=="RNetLogo") %>% select(posname, sysname, mem) %>% rename(mem_rnet = mem)
memplot <- mem_nlrx %>%left_join(mem_rnet) %>%
left_join(systemram) %>%
mutate(nlrx = ((systemram - mem_nlrx) / systemram) * 100,
RNetLogo = ((systemram - mem_rnet) / systemram) * 100) %>%
select(-mem_nlrx, -mem_rnet, -systemram) %>%
gather(pkg, ram_p, nlrx, RNetLogo)
memplot$posname <- factor(memplot$posname,
levels=c("Pre simulation", "Post simulation", "Post gc"),
labels=c("Pre\nsimulation", "Post\nsimulation", "Post\ngc"))
pm <- ggplot(memplot, aes(x=posname, y=ram_p, fill=pkg)) +
facet_wrap(~pkg, ncol=2) +
geom_boxplot() +
ylab("System memory in use [%]") +
xlab("") +
scale_fill_solarized() +
guides(fill="none") +
theme_classic() +
ggtitle("Memory usage") +
theme(axis.text = element_text(size=14, color="black", family="A"),
axis.title = element_text(size=14, color="black", family="A"),
title = element_text(size=16, color="black", family="A"),
strip.text = element_text(size=14, color="black", family="A"),
strip.background = element_rect(color="black", fill="gray95"))
ggsave(plot=pm, "4_Plots/nlrx_benchmarks_memory.png", width=7, height=4, dpi=300)
### Arrange plots:
library(gridExtra)
pall <- grid.arrange(grobs = list(pt, pm), ncol=1, heights=c(2,4))
ggsave(plot=pall, filename = "4_Plots/nlrx_benchmarks.png", width=7, height=5, dpi=300)
|
fd1308f66c162097f6a712e3c722b16347e833d9
|
aeeffab0ac605dbc66296c6b49b0148fab44001d
|
/run_analysis.R
|
520302486f6ed5ae7745e92c1db8734329755d43
|
[] |
no_license
|
Apujals/Getting-and-cleaning-data-Project-Coursera
|
9d1421cdc49d87114275a06e2c252d223330fca4
|
8026b730d613c257a4b16392be50da05e7c07afa
|
refs/heads/master
| 2020-12-30T16:27:53.308847
| 2017-05-11T15:36:24
| 2017-05-11T15:36:24
| 90,988,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,921
|
r
|
run_analysis.R
|
library(dplyr)
## Downloading the raw data and decompressing it
nameFile <- "raw_data.zip"
if (!file.exists(nameFile)) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, nameFile)
unzip(nameFile)
}
## Step 1.
## Merge training and test sets to create one data set.
xtrain <- read.table("./UCI HAR Dataset/train/x_train.txt")
ytrain <- read.table("./UCI HAR Dataset/train/y_train.txt")
subjtrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
train <- cbind(subjtrain, ytrain, xtrain)
xtest <- read.table("./UCI HAR Dataset/test/X_test.txt")
ytest <- read.table("./UCI HAR Dataset/test/y_test.txt")
subjtest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
test <- cbind(subjtest, ytest, xtest)
alldata <- rbind(train, test)
## Step 2 & Step 4.
## Extracts only the measurements on the mean and standard deviation for each measurement.
## Appropriately labels the data set with descriptive variable names.
features <- read.table("./UCI HAR Dataset/features.txt", colClasses = "character")
names(alldata) = c("subject", "activity", features$V2)
fwanted <- grep(".*mean.*|.*std.*", names(alldata))
datawanted <- alldata[c(1,2,fwanted)]
##Step 3.
## Uses descriptive activity names to name the activities in the data set.
actlabels <- read.table("./UCI HAR Dataset/activity_labels.txt")
datawanted$subject <- as.factor(datawanted$subject)
datawanted$activity <- factor(datawanted$activity, levels = actlabels$V1, labels = actlabels$V2)
## Step 5.
## From the data set in step 4, creates a second, independent tidy data set with the
## average of each variable for each activity and each subject.
tidydata <- aggregate(. ~subject + activity, datawanted, mean)
tidydata <- arrange(tidydata, subject, activity)
write.table(tidydata, "./UCI HAR Dataset/mean_tidy_data.txt", row.names = FALSE, quote = FALSE)
|
8cd442ca489e350eaac953622ef4fac408f10e49
|
a2ea354ac7e7bc199b68a3e5a6f8238db479a1b2
|
/man/pkgdown_exist.Rd
|
ca216233ce490b40038d09d2ed4a200b10e6f897
|
[
"MIT"
] |
permissive
|
robertzk/rocco
|
84c641f490f7d296bf39affef248a36d1e68c761
|
d6c6969567c023536358b3ebb044abae7ef15a0e
|
refs/heads/master
| 2020-06-03T07:04:58.543644
| 2018-08-04T17:59:17
| 2018-08-04T17:59:17
| 33,039,832
| 2
| 2
| null | 2017-05-19T18:15:53
| 2015-03-28T16:05:48
|
CSS
|
UTF-8
|
R
| false
| true
| 345
|
rd
|
pkgdown_exist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/staticdocs.R
\name{pkgdown_exist}
\alias{pkgdown_exist}
\title{Check whether pkgdown exist.}
\usage{
pkgdown_exist(directory)
}
\arguments{
\item{directory}{character. The directory of the package to check for pkgdown.}
}
\description{
Check whether pkgdown exist.
}
|
e47db8a610dc714e01e0d631134d24c09cf8f584
|
6b4fe2baa84e74af637f319ea5d887cb2fd6f9a2
|
/kevin/network-integration-kevin/deg_network_creation_140218.R
|
fe6d0ea95cc38695e2bbd76066f5f9f51cb16cd3
|
[] |
no_license
|
dznetubingen/analysis_scripts
|
1e27ca43a89e7ad6f8c222507549f72b1c4efc20
|
4fcac8a3851414c390e88b4ef4ac461887e47096
|
refs/heads/master
| 2021-06-25T10:47:40.562438
| 2021-01-04T16:02:34
| 2021-01-04T16:02:34
| 187,789,014
| 1
| 0
| null | 2020-09-03T11:37:25
| 2019-05-21T07:55:17
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 16,437
|
r
|
deg_network_creation_140218.R
|
#################################################################
## Integrative network generation based on CAGE and miRNA data ##
#################################################################
# first created on 15-02-2018
#
# Network creation based on differentially expressed protein coding and miRNA genes
# Integration of miRNA-target, TF-target and PPI information
#
#
# Currently, for sRNA-seq an CAGE-seq data, the rLog transformed and normalized values are used for correlation calculation
# Both sRNA and CAGE data were analyzed using DESeq2 with Age and Gender as additional covariates included in the model and in the
# rLog transformation
#
# load libs
library(pheatmap)
library(viridis)
library(stringr)
library(igraph)
source("~/scripts/utility_funs.R")
######################################
### Load and modify necessary data ###
######################################
# Load rLog transformed expression values
cage <- read.table("~/rimod/CAGE/analysis/CAGE_rLog_expression_values_080218.txt", sep="\t", header=T, check.names = F, row.names = 1)
mirna <- read.table("~/rimod/smallRNA/analysis/deseq_analysis_170118/rLog_expression_values_sRNA.txt", sep="\t", header=T, row.names=1)
# load phenodata
design <- read.table("~/rimod/smallRNA/smallRNA_design_file.txt", sep="\t", header=T)
design$sample <- str_pad(design$sample, 5, side = "left", pad = "0")
# Exclude bad samples
design <- design[!is.na(design$gender),]
design <- design[!design$sample == "10166",] # bad RIN
mirna <- mirna[,substr(colnames(mirna), 8, 12) %in% design$sample]
# Load target prediction file (currently TargetScan)
targets <- read.csv("~/resources/miRNA_target_predictions/mirwalk/miRWalk_miRNA_targets_FTD.NDC.DEGs.csv")
# Target filtering
# targets <- targets[targets$TargetScan == 1,] # only consider targets also found in TargetScan
## Data formatting
# Bring data in suitable format
samples <- as.character(design$sample)
# Extract sample IDs from RNA count table headers
cage.samples <- substr(colnames(cage), 8, 12)
# Only consider mRNA samples that are available in miRNA dataset
cage.keep <- cage.samples %in% design$sample
cage <- cage[,cage.keep]
cage.samples <- cage.samples[cage.keep]
# Bringt data in compatible format and order everything
mir.samples <- substr(colnames(mirna), 8, 12)
mirna.order <- match(cage.samples, mir.samples)
mir.samples <- mir.samples[mirna.order]
mirna <- mirna[mirna.order]
design <- design[match(cage.samples, design$sample),]
## Load DEG files
mirna.deg <- read.table("~/rimod/smallRNA/analysis/deseq_analysis_170118/DEGs_smallRNA_padj0.05.txt", sep="\t", header=T, row.names = 1)
cage.deg <- read.table("~/rimod/CAGE/analysis/DEGs_cage_deseq_rimod_frontal_shrLFC_080218.txt", sep="\t", header=T, row.names=1)
## Load PPI information
ints <- read.table("~/resources/ppi/adjusted_int_table_900_symbols.txt", sep="\t", header=T)
## Load TF-Gene mapping (currently from TRRUST)
org <- read.table("~/resources/TF_interactions/oreganno/ORegAnno_Combined_2016.01.19_hs.tsv", sep="\t", fill = T, stringsAsFactors = F)
org.header <- read.table("~/resources/TF_interactions/oreganno/oreganno_header.tsv", sep="\t", header=T)
colnames(org) <- colnames(org.header)
keep <- c("NEGATIVE OUTCOME", "POSITIVE OUTCOME")
org <- org[org$Outcome %in% keep,] # only keep interactions with known outcome (neutral is probably unknown)
# Don't use miRNA-target interactions
org <- org[!grepl("hsa-miR", org$Regulatory_Element_Symbol),]
## Load TF-miRNA mapping (currently from RegNetwork)
tf.mir <- read.table("~/resources/TF_interactions/RegNetwork/human/human.source", sep="\t", stringsAsFactors = F)
colnames(tf.mir) <- c("symbol_source", "gene_id_source", "target_symbol", "gene_id_target")
tf.mir$regulation <- rep("positive", nrow(tf.mir))
### Melt everything down to three data frames
# 1: expression of genes (mRNA + miRNA)
# 2: DE analysis of genes (mRNA + miRNA)
# 3: regulatory information (TF-miR, TF-gene, miRNA-gene)
# # rLog value dataframe for all genes
# rlog.df <- mirna
# colnames(rlog.df) <- colnames(cage)
# rlog.df <- rbind(rlog.df, cage)
#
# # Regulatory interaction data frame
# reg.df <- tf.mir
# org.regs <- as.character(org$Outcome)
# org.regs[org.regs == "POSITIVE OUTCOME"] <- "positive"
# org.regs[org.regs == "NEGATIVE OUTCOME"] <- "negative"
# org.df <- data.frame(symbol_source = org$Regulatory_Element_Symbol,
# gene_id_source = org$Regulatory_Element_ID,
# target_symbol = org$Gene_Symbol,
# gene_id_target = org$Gene_ID,
# regulation = org.regs)
############################################
### data loading and formatting complete ###
############################################
###########################################################
### Regulatory correlation analysis ###
### Filter regulatory interactions based on correlation ###
###########################################################
cor_method = "spearman"
lfc_cutoff = log2(1.5)
cor_cutoff = 0.5
pval_cutoff = 0.05
## Filter DEGs
cage.deg <- cage.deg[cage.deg$padj <= 0.01,]
cage.deg <- cage.deg[abs(cage.deg$log2FoldChange) >= lfc_cutoff,]
# Only consider genes with sufficient LFC
mirna.sig <- mirna.deg[abs(mirna.deg$log2FoldChange) >= lfc_cutoff,]
cage.sig <- cage.deg[abs(cage.deg$log2FoldChange) >= lfc_cutoff,]
## Data frame to store network in
network <- data.frame(source = "dec",
target = "dec",
interaction = "dec",
intValue = 1)
degenes <- rownames(cage.deg)
#========================================================================#
### TF-gene interactions
for (deg in degenes){
deg.exp <- as.numeric(cage[rownames(cage) == deg,])
# acts as TF
if (deg %in% org$Regulatory_Element_Symbol){
deg.regs <- org[org$Regulatory_Element_Symbol == deg,]
deg.regs <- deg.regs[deg.regs$Gene_Symbol %in% degenes,]
if (nrow(deg.regs) > 0){
for (i in 1:nrow(deg.regs)){
b <- deg.regs[i,]$Gene_Symbol
b.exp <- as.numeric(cage[rownames(cage) == b,])
ct <- cor.test(deg.exp, b.exp, method = cor_method)
if (ct$p.value <= pval_cutoff){
entry <- data.frame(source = deg, target = b, interaction = deg.regs[i,]$Outcome, intValue = ct$estimate)
network <- rbind(network, entry)
}
}
}
}
# regulated by TF
if(deg %in% org$Gene_Symbol){
deg.regs <- org[org$Gene_Symbol == deg,]
deg.regs <- deg.regs[deg.regs$Regulatory_Element_Symbol %in% degenes,]
if (nrow(deg.regs) > 0){
for (i in 1:nrow(deg.regs)){
b <- deg.regs[i,]$Regulatory_Element_Symbol
b.exp <- as.numeric(cage[rownames(cage) == b,])
ct <- cor.test(deg.exp, b.exp, method = cor_method)
if (ct$p.value <= pval_cutoff){
entry <- data.frame(source = b, target = deg, interaction = deg.regs[i,]$Outcome, intValue = ct$estimate)
network <- rbind(network, entry)
}
}
}
}
}
network <- network[-1,]
netdup <- paste(network$source, network$target, sep="")
keep <- !duplicated(netdup)
network <- network[keep,]
# Remove interactions where the action does not match the correlation
keep <- c()
for (i in 1:nrow(network)){
if (network[i,]$interaction == "POSITIVE OUTCOME"){
if (network[i,]$intValue > 0){
keep <- c(keep, i)
}
}
if (network[i,]$interaction == "NEGATIVE OUTCOME"){
if (network[i,]$intValue < 0 ){
keep <- c(keep, i)
}
}
}
network <- network[keep,]
#========================================================================#
#========================================================================#
#### miRNA gene interactions
for (deg in degenes){
deg.exp <- as.numeric(cage[rownames(cage) == deg,])
# acts as TF
if (deg %in% targets$genesymbol){
deg.regs <- targets[targets$genesymbol == deg,]
if (nrow(deg.regs) > 0){
for (i in 1:nrow(deg.regs)){
b <- deg.regs[i,]$mirnaid
b.exp <- as.numeric(mirna[rownames(mirna) == b,])
ct <- cor.test(deg.exp, b.exp, method = cor_method)
if (ct$p.value <= pval_cutoff && ct$estimat <= -cor_cutoff){
entry <- data.frame(source = b, target = deg, interaction = "inhibition", intValue = ct$estimate)
network <- rbind(network, entry)
}
}
}
}
}
# remove duplicated edges
netdup <- paste(network$source, network$target, sep="")
keep <- !duplicated(netdup)
network <- network[keep,]
#========================================================================#
#========================================================================#
### Add PPIs
net.ppis <- ints[ints$hgnc_symbol_a %in% degenes,]
net.ppis <- net.ppis[net.ppis$hgnc_symbol %in% degenes,]
net.ppis <- net.ppis[net.ppis$is_directional == "t",]
# remove duplicated edges
netdup <- paste(net.ppis$hgnc_symbol_a, net.ppis$hgnc_symbol)
keep <- !duplicated(netdup)
net.ppis <- net.ppis[keep,]
net.ppis <- data.frame(source = net.ppis$hgnc_symbol_a, target = net.ppis$hgnc_symbol, interaction = net.ppis$action, intValue = net.ppis$score/1000)
network <- rbind(network, net.ppis)
#========================================================================#
#############################
### iGraph analysis #########
#############################
# Convert network to igraph format
edges <- c()
for (i in 1:nrow(network)) {
e <- c(as.character(network[i,1]), as.character(network[i,2]))
edges <- c(edges, e)
}
g <- graph(edges=edges, directed = T)
l <- layout_with_fr(g)
plot(g, vertex.color = "gold", vertex.size = 5, edge.arrow.size = .5, layout = l,
vertex.label.cex = 0.6, vertex.label.color = "black")
### Collect information on vertices and add to the graph
# Collect fold changes
vnames <- V(g)$name
net.lfcs <- c()
for (i in 1:length(vnames)){
lfc <- 0
v <- vnames[i]
if (grepl("hsa-", v)) { # check if miRNA
lfc <- as.numeric(mirna.deg[rownames(mirna.deg) == v,]$log2FoldChange)
}
else if (v %in% rownames(cage.deg)){
lfc <- as.numeric(cage.deg[rownames(cage.deg) == v,]$log2FoldChange)
}
net.lfcs <- c(net.lfcs, lfc)
}
# Assign type
types = c()
for (i in 1:length(vnames)){
v <- vnames[i]
type = ""
if (grepl("hsa-",v)){
type = "miRNA"
}
else if (v %in% org$Regulatory_Element_Symbol){
type = "TF"
}
else {
type = "Gene"
}
types <- c(types, type)
}
# Assign to graph object
V(g)$lfc <- net.lfcs
V(g)$type <- types
### Collect information on edges and add to the graph
E(g)$intType <- as.character(network$interaction)
E(g)$intValue <- network$intValue
write_graph(g, file = "~/rimod/CAGE/analysis/cage_network_analysis/test_graph_deg_network.gml", format = "gml")
#################################
### Network Analysis ############
#################################
#
# Remove nodes with degree < 3, as they lack additional information to confirm their validity in the network context
# Detect communities (clusters) using the Multilevel algorithm
# Extract communites and save them for futher analysis
#
g2 <- delete.vertices(g, degree(g) < 3)
g2 <- as.undirected(g2)
l <- layout_with_fr(g2)
ebc <- cluster_louvain(g2)
V(g2)$color = membership(ebc)
plot(g2, vertex.size = 5, edge.arrow.size = .5, layout = l, vertex.label.cex = 0.6, vertex.label.color = "black")
# plot every cluster separately
for (i in 1:length(table(membership(ebc)))){
gsub <- delete.vertices(g, !membership(ebc) == i)
plot(gsub, vertex.size = 4, edge.arrow.size = .5, layout = l, vertex.label.cex = 0.6, vertex.label.color = "black")
}
# Extract genes belonging to the communities
no.com <- length(table(membership(ebc))) # number of detected communities
mem <- membership(ebc)
for (i in 1:no.com) {
com <- mem[mem == i]
com.genes <- names(com)
file.name <- paste("~/rimod/smallRNA/analysis/network_analysis/community_analysis/community_", i, "_multilevel.txt", sep="")
write.table(com.genes, file.name, quote=F, row.names = F)
# Additional generate igraph objects
g.com <- delete.vertices(g2, !V(g2)$name %in% com.genes)
file.name.gml <- paste("~/rimod/smallRNA/analysis/network_analysis/community_analysis/community_", i, "_multilevel.gml", sep="")
write_graph(g.com, file.name.gml, format = "gml")
}
### ======================================================================================================================================= ###
####
## Create rLog expression table from miRNA and RNA genes that are in the network
## for non-integrated network inference (e.g. ARACNE)
################
# x_mirna <- mirna
# x_rna <- rna
# colnames(x_rna) <- colnames(x_mirna)
# x_all <- rbind(x_mirna, x_rna)
# net.genes <- c(as.character(network$source), as.character(network$target))
# x_all <- x_all[rownames(x_all) %in% net.genes,]
# write.table(x_all, "~/rimod/smallRNA/analysis/network_analysis/rLog_expression_network_genes.txt", sep="\t", col.names=NA, quote=F)
# ### Read ARACNE inferred network edges
# arac <- read.csv("~/rimod/smallRNA/analysis/network_analysis/ARACNE Inference 2--clustered default edge.csv")
# arac.ints <- as.character(arac$name)
# arac.g1 <- as.character(sapply(arac.ints, function(x){strsplit(x, split=" ")[[1]][[1]]}))
# arac.g2 <- as.character(sapply(arac.ints, function(x){strsplit(x, split=" ")[[1]][[3]]}))
# arac.gg <- data.frame(gene1 = arac.g1, gene2 = arac.g2)
#
# # Not considering directionality, find edges that are both found in the
# # integrated approach and in the ab-initio ARACNe approach
# count <- 0
# cand.edges <- c()
# genes1 <- as.character(arac.gg$gene1)
# genes2 <- as.character(arac.gg$gene2)
# for (i in 1:nrow(arac.gg)){
# g1 <- genes1[i]
# g2 <- genes2[i]
#
# sub1 <- network[network$source == g1,]
# sub2 <- network[network$target == g1,]
#
# sub1.count <- nrow(sub1[sub1$target == g2,])
# sub2.count <- nrow(sub2[sub2$source == g2,])
#
# count <- count + sub1.count + sub2.count
#
# if (sub1.count > 0 || sub2.count > 0){
# cand.edges <- c(cand.edges, i)
# }
# }
# Create connections for PyPanda analysis
net.ppi <- network[network$interactionType == "ppi",]
net.tf <- network[!network$interactionType == "ppi",]
net.ppi.pp <- net.ppi[,c(1,2,4)]
net.ppi.tf <- net.tf[,c(1,2,4)]
# write.table(net.ppi.pp, "~/rimod/smallRNA/analysis/network_analysis/pypanda_analysis/network_ppi_data_lfc0.5_pval0.5_pypanda.txt", sep="\t", row.names = F, quote=F)
# write.table(net.ppi.tf, "~/rimod/smallRNA/analysis/network_analysis/pypanda_analysis/network_motif_data_lfc0.5_pval0.5_pypanda.txt", sep="\t", row.names = F, quote=F)
############################
### Compare to results of Paper "Weighted Protein Interaction Network Analysis of Frontotemporal Dementia" (2017)
pn_hubs <- c("COPS5", "ESR1", "HSP90AB1", "STUB1", "EGFR", "FN1", "HSP90AA1", "HSPA8", "PDCD6IP", "TP53", "VCP", "APP", "FSCN1", "GNBL21",
"HDAC1", "HSPA4", "HTT", "PIN1", "VCAM1", "YWHAZ", "CDK2", "ELAVL1", "EP300", "MCM7", "PML", "RPS3", "TCP1", "TRIM32", "TUBA1A")
verts <- V(g)$name
hub_overlap <- pn_hubs[pn_hubs %in% verts]
##### Create network containing these hub genes
edges <- c()
for (i in 1:nrow(network)) {
if (network[i,1] %in% hub_overlap || network[i,2] %in% hub_overlap){
e <- c(as.character(network[i,1]), as.character(network[i,2]))
edges <- c(edges, e)
}
}
g <- graph(edges=edges, directed = T)
l <- layout_with_fr(g)
plot(g, vertex.color = "gold", vertex.size = 5, edge.arrow.size = .5, layout = l,
vertex.label.cex = 0.6, vertex.label.color = "black")
### Collect information on vertices and add to the graph
# Collect fold changes
vnames <- V(g)$name
net.lfcs <- c()
for (i in 1:length(vnames)){
lfc <- 0
v <- vnames[i]
if (grepl("hsa-", v)) { # check if miRNA
lfc <- as.numeric(mirna.deg[rownames(mirna.deg) == v,]$log2FoldChange)
}
else if (v %in% rownames(rna.deg)){
lfc <- as.numeric(rna.deg[rownames(rna.deg) == v,]$log2FoldChange)
}
net.lfcs <- c(net.lfcs, lfc)
}
# Assign type
types = c()
for (i in 1:length(vnames)){
v <- vnames[i]
type = ""
if (grepl("hsa-",v)){
type = "miRNA"
}
else if (v %in% tf.gene$source){
type = "TF"
}
else {
type = "Gene"
}
types <- c(types, type)
}
# Assign to graph object
V(g)$lfc <- net.lfcs
V(g)$type <- types
write_graph(g, file = "~/rimod/smallRNA/analysis/network_analysis/hub_gene_network.gml", format = "gml")
|
e0445d1ac105cf63bee89e39e7ea969a6ac031ab
|
3e3176c252ee79419044dbbe7e5c557d3d5a7d93
|
/mortality_dispo_models/RCRI_analysis_impute_missing.R
|
7c21c62182eef24f41a7542ead82720343bf512a
|
[] |
no_license
|
slr72090/periop_MI
|
05cf97d91db3395e7db184d6124cbb6e179b49d0
|
f2126a02e81289f3273c3afeae8b9d2a4d2b3d72
|
refs/heads/master
| 2020-11-27T02:23:02.334366
| 2020-03-19T18:16:18
| 2020-03-19T18:16:18
| 229,271,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,340
|
r
|
RCRI_analysis_impute_missing.R
|
## Exploratory data analysis; Perioperative MI
## Sylvia Ranjeva
# Set the working directory to the source file directory
setwd("~/Desktop/perip_MI_GITHUB/mortality_dispo_models/")
## Load package scripts -----------------------------------------------
require(ggplot2)
require(foreign)
require(readstata13)
require(dplyr)
require(tidyr)
require(FactoMineR)
require(PCAmixdata)
require(factoextra)
require(ggfortify)
require(RSQLite)
require(GGally)
require(reshape2)
require(cowplot)
require(corrplot)
require(tidyverse)
require(MASS)
library(proto)
library(pROC)
library(xtable)
library(survey)
select <- dplyr::select
#Plotting specs
textSize = 12
save_plots = F
source("../utility_scripts/plot_themes.R")
#Data saving
data_filename = "Data_objects/data_raw_with_weights_imputed.rda"
save_data = F
drop_missing = F
impute_missing = T
generate_data = F
output_tables = F
if(generate_data == T){
# Identify procedures that correspond to non-cardiac surgeries
year_vec = c(2008:2013)
n_procedures_vec <- rep(15,length(year_vec)) #rep(15,length(year_vec)) # Number of possible procedures per individual for this year
n_dx_vec <- c(15, 25, 25, 25, 25, 25) # Number of possible diagnoses per individual for this year
source("process_data.R")
data_formatted <- data_all %>%
rename(obesity = cm_obese,
alcoholic = cm_alcohol,
HTN = cm_htn_c,
valve_dz = cm_valve,
chrnlung = cm_chrnlung,
anemia = cm_anemdef,
PAD = cm_perivasc) %>%
mutate(age_factor = as.factor(ntile(age,3)),
age = as.numeric(scale(age)),
nchronic = as.numeric(scale(nchronic)),
invasive_mgmt = as.factor(invasive_mgmt),
high_risk_surgery = as.factor(as.numeric(transplant == 1|thoracic_surgery == 1|vascular == 1|abdominal == 1)),
high_risk_surgery_2 = as.factor(as.numeric(transplant == 1|thoracic_surgery == 1|vascular == 1)),
hx_revasc = as.factor(as.numeric(prior_CABG == 1 | prior_PCI ==1))) %>%
mutate(RCRI_pt = as.factor(as.numeric(RCRI_pt) + as.numeric(high_risk_surgery == 1))) %>%
mutate(severe_MI = as.factor(as.numeric(cardiogenic_shock == 1 | IABP == 1))) %>%
select(-c(prior_CABG, prior_PCI, prior_MI, CAD)) %>% #transplant,thoracic_surgery,vascular,)) %>%
mutate(`RCRI >= 3` = as.factor(as.numeric(RCRI_pt) > 3))
if(impute_missing){
source("impute_missing.R")
data_formatted[,missing_vars] <- complete_data[,missing_vars]
}
data = data_formatted
if(save_data){
save(data_formatted, data_all, file = data_filename)
}
}
if(generate_data == F){
load(data_filename)
data <- data_formatted
rm(data_formatted)
}
## Step 1: retain all variables ## -----------------------------------------------------------------------------------------
dat_PCA <- data %>%
mutate(RCRI_pt = as.numeric(scale(as.numeric(RCRI_pt))))
dat_PCA <- dat_PCA[, (colnames(dat_PCA) %in% c("age",
"race",
"gender",
"obesity",
"smoking",
"alcoholic",
"HTN",
"HLD",
"hx_DM",
"hx_ckd",
"hx_isch_heart",
"hx_revasc",
"PAD",
"valve_dz",
"hx_chf",
"hx_VTE",
"chrnlung",
"malignancy",
"cm_mets",
"anemia",
"hx_CVA",
"high_risk_surgery"
))]
split.data.all <- splitmix(dat_PCA)
split.data <- split.data.all
X1 <- split.data$X.quanti
X2 <- split.data$X.quali
res.pcamix <- PCAmix(X.quanti=X1, X.quali=X2,rename.level=TRUE, ndim = 5,
graph=FALSE)
out.eig <- as.data.frame(res.pcamix$eig)
out.eig$dim <- c(1:nrow(out.eig))
out.ind <- as.data.frame(res.pcamix$ind$coord) %>%
mutate(ind = row_number()) %>%
left_join(data, by = "ind") %>%
drop_na()
out.quant <- res.pcamix$quanti$contrib.pct
out.qual <- res.pcamix$quali$contrib.pct
out.contrib <- rbind(out.quant,out.qual) %>%
as.data.frame()
out.contrib$var <- rownames(out.contrib)
out.coord <- as.data.frame(rbind(res.pcamix$quanti.cor, res.pcamix$categ.coord))
out.coord$var <- rownames(out.coord)
out.loadings <- res.pcamix$sqload
out.coord$sig <- as.factor(out.coord$`dim 1` > 0.4 | out.coord$`dim 2` > 0.5 |out.coord$`dim 1` < -0.4 | out.coord$`dim 2` < -0.5 )
## Feature selection -------------------------------------------------------------------------------------------
#Select retained variables
out.contrib$max_contrib <- apply(out.contrib %>% select(-var),1,max)
retained_vars <- out.contrib %>% filter(max_contrib > 10) %>% select(var)
### Plot PCA output ## -----------------------------------------------------------------------------------------
xlabel <- paste0(" Dim 1, ",round(out.eig$Proportion[1],2),"%")
ylabel <- paste0(" Dim 2, ",round(out.eig$Proportion[3],2),"%")
out.coord.sig <- out.coord %>% filter(sig ==T)
p_loadings = ggplot()+
#geom_point(data = out.loadings, aes(x=`dim 1`, y= `dim 2`, colour = "#DCDCDC"))+
geom_segment(data = out.coord, aes(x=0,y=0,xend=`dim 1`,yend=`dim 2`), color = "grey",
arrow=arrow(length=unit(0.2,"cm"))) +
geom_text(data = out.coord.sig, aes(x=`dim 1`, y=`dim 2`, label=factor(out.coord.sig$var)), color = "black") +
xlab(xlabel) +
ylab(ylabel) +
plot_themes
p_cumvar <- ggplot(out.eig, aes(x = dim, y = Cumulative)) +
geom_point() +
geom_line(linetype = 2) +
geom_hline(aes(yintercept = 70, color = "red")) +
labs("") +
xlab("Principal component dimension") +
ylab("Percentage of cumulative variance") +
plot_themes
p_eig <- ggplot(out.eig, aes(x = dim, y = Eigenvalue)) +
geom_point() +
geom_line(linetype = 2) +
geom_hline(aes(yintercept = 1, color = "red")) +
labs("") +
xlab("Principal component dimension") +
ylab("Eigenvalue") +
plot_themes +
theme(legend.position = "none")
if(save_plots){
save_plot("eigenvalues.pdf", p_eig, base_width = 8, base_height = 6)
save_plot(paste0("loadings.pdf"), p_loadings, base_width = 8, base_height = 6)
}
p_ind_RCRI <- qplot(data = out.ind, x = `dim 1`, y = `dim 2`, colour = `RCRI >= 3`) +
stat_ellipse(geom = "polygon", alpha = .2, aes(fill = `RCRI >= 3`)) +
plot_themes
p_ind_year <- qplot(data = out.ind, x = `dim 1`, y = `dim 2`, colour = year) +
stat_ellipse(geom = "polygon", alpha = .2, aes(fill = year)) +
plot_themes
if(save_plots){
save_plot("RCRI_ind.pdf", p_ind_RCRI, base_width = 6, base_height =4)
save_plot("year_ind.pdf", p_ind_year, base_width = 8, base_height =6)
ind_plots <- plot_grid(p_ind_RCRI, p_ind_year, ncol =2)
save_plot("ind_plots.pdf", ind_plots, base_width = 12, base_height = 4)
}
## Logit Regression analysis ## ------------------------------------------------------------------------------
survey_df <- svydesign(ids = ~hospid, strata = ~nis_stratum, weights = ~trendwt, data = data, nest =T)
sub.PCA <- data[,(colnames(data) %in% c("died", "year", "race", "gender", "invasive_mgmt", "PNA", "sepsis", "Afib", "PE", "DVT", "Bleed", "NSTEMI", "severe_MI", "trendwt", unlist(retained_vars)))]
out.PCA <- glm(as.numeric(died) ~.,
data = sub.PCA,
weights = trendwt,
family = "binomial")
var_names_full <- c(names(X1), names(X2), "cm_mets", "PNA", "sepsis", "Afib", "PE", "DVT", "Bleed", "NSTEMI", "severe_MI")#, "nchronic")
data_full <- data %>% select(c(year, died, invasive_mgmt, trendwt, var_names_full))
out.full <- glm(died ~.,
data = data_full,
family = "binomial")
options(survey.lonely.psu="adjust")
out.RCRIplus <- svyglm(as.numeric(died) ~ age +
gender +
race +
year +
Afib +
sepsis +
PNA +
PE +
DVT +
Bleed +
hx_chf +
hx_CVA +
hx_DM +
hx_ckd +
hx_isch_heart +
hx_revasc +
high_risk_surgery +
#surgery_type +
invasive_mgmt +
hx_revasc +
NSTEMI +
severe_MI ,
design = survey_df)
out.RCRIplus <- glm(as.numeric(died) ~ age +
gender +
race +
year +
Afib +
sepsis +
PNA +
PE +
DVT +
Bleed +
hx_chf +
hx_CVA +
hx_DM +
hx_ckd +
hx_isch_heart +
high_risk_surgery +
#surgery_type +
invasive_mgmt +
NSTEMI +
hx_revasc +
severe_MI ,
data = data,
family = "binomial")
out.null <- glm(as.numeric(died) ~ age +
gender +
race +
year +
Afib +
sepsis +
PNA +
PE +
DVT +
Bleed +
invasive_mgmt +
NSTEMI +
severe_MI +
NSTEMI*Bleed,
data = data,
weights = trendwt,
family = "binomial")
model_comp <- data.frame(model = c("Full", "PCA", "RCRIPlus", "null"),
n_par = c(out.full$rank, out.PCA$rank, out.RCRIplus$rank, out.null$rank),
LL = c(logLik(out.full),logLik(out.PCA), logLik(out.RCRIplus), logLik(out.null)),
BIC = c(BIC(out.full), BIC(out.PCA), BIC(out.RCRIplus), BIC(out.null))) %>%
mutate(delta = BIC - min(BIC))
confidence_intervals = confint.default(out.RCRIplus, adjust = "bonferroni")
mod_coefs <- data.frame(apply(exp(cbind(OR = coef(out.RCRIplus),confidence_intervals)),2,round,2))
mod_coefs <- data.frame(apply(exp(cbind(OR = coef(out.RCRIplus), confint(out.RCRIplus))),2,round,2))
names(mod_coefs)<- c("OR", "2.5% CI", "97.5% CI")
if(output_tables){
tab_coefs <- xtable(mod_coefs)
print(tab_coefs, file="mortality_model_coefs.txt")
}
## Testing model out of sample ## -------------------------------------------
set.seed(364)
sample <- sample(nrow(data),floor(nrow(data)*0.75))
train <- data[sample,]
test <- data[-sample,]
library(pROC)
out.RCRIplus.train <- glm(as.numeric(died) ~ age +
gender +
race +
year +
Afib +
sepsis +
PNA +
PE +
DVT +
Bleed +
hx_isch_heart +
PAD +
hx_chf +
hx_CVA +
hx_DM +
hx_ckd +
high_risk_surgery +
invasive_mgmt +
hx_revasc +
NSTEMI +
severe_MI,
data = train,
family = "binomial")
out.full.train <- glm(died ~., data = (train %>% select(c(year, died, invasive_mgmt, severe_MI, var_names_full))), family = "binomial")
test_prob_RCRIplus = predict(out.RCRIplus, newdata = test, type = "response")
test_roc_RCRIplus = roc(test$died ~ test_prob_RCRIplus, plot = TRUE, print.auc = TRUE)
test_prob_full = predict(out.full, newdata = test, type = "response")
test_roc_full = roc(test$died ~ test_prob_full, plot = TRUE, print.auc = TRUE)
roc.test(test_roc_full, test_roc_RCRIplus)
p_roc <- ggroc(list(`Full model` = test_roc_full, `RCRI model` = test_roc_RCRIplus), linetype = 2) +
plot_themes +
labs(color = "Model") +
ggtitle("ROC curves")
if(save_plots){
save_plot("ROC_curves_mortality.pdf", p_roc, base_width = 8, base_height = 4)
}
## Summary statistics, and evaluate dependnece of ROC on percent test data #### -----------------------
AUC_RCRIplus <- ci.auc(test_roc_RCRIplus, method = "bootstrap")
AUC_full <- ci.auc(test_roc_full, method = "bootstrap")
if(compare_models){
var_names_RCRI <- c("age", "race", "gender",
"hx_DM", "Afib", "sepsis",
"PNA", "PE", "DVT",
"Bleed", "hx_chf", "hx_CVA",
"hx_DM", "hx_ckd", "hx_isch_heart",
"hx_revasc", "high_risk_surgery",
"invasive_mgmt", "NSTEMI", "severe_MI")
dat_AUC <- data %>% filter(!is.na(died)) %>%
select(c(died,var_names_full)) %>%
mutate_if(is.factor, as.character) %>%
mutate_if(is.character, as.integer)
mat_RCRI <- as.matrix(dat_AUC[,(names(dat_AUC) %in% var_names_RCRI)])
mat_Full <- as.matrix(dat_AUC[,var_names_full[!(var_names_full %in% var_names_RCRI)]])[,-13]
AUC_compare <- deltaAUC(y = dat_AUC$died,
x = mat_RCRI,
z = mat_Full)
}
# x = dat_AUC[,(names(dat_AUC) %in% var_names_RCRI)],
# z = dat_AUC[,var_names_full[!(var_names_full %in% var_names_RCRI)]])
ROC_test_size <- data.frame()
test_percent_vec <- seq(0.5,1.0,0.05)
for(i in c(1:length(test_percent_vec))){
print(i)
set.seed(364)
sample <- sample(nrow(data),floor(nrow(data)*test_percent_vec[i]))
this_train <- data[sample,]
this_test <- data[-sample,]
out.RCRIplus.train <- glm(as.numeric(died) ~ age +
gender +
race +
year +
#smoking +
#HLD +
Afib +
sepsis +
PNA +
PE +
DVT +
Bleed +
hx_isch_heart +
PAD +
hx_chf +
hx_CVA +
hx_DM +
hx_ckd +
high_risk_surgery +
invasive_mgmt +
NSTEMI +
severe_MI,
data = this_train,
family = "binomial")
test_prob_RCRIplus_temp = predict(out.RCRIplus.train, newdata = this_test, type = "response")
test_roc_RCRIplus_temp = roc(this_test$died ~ test_prob_RCRIplus_temp, plot = TRUE, print.auc = TRUE)
this_df <- data.frame(percent = test_percent_vec[i],
AUC = test_roc_RCRIplus_temp$auc
)
ROC_test_size <- rbind(ROC_test_size, this_df)
}
## Examining discharge outcomes
#Select retained variables
out.contrib$max_contrib <- apply(out.contrib %>% select(-var),1,max)
retained_vars <- out.contrib %>% filter(max_contrib > 9) %>% select(var)
## Prediction by number of RCRI factors ## --------------------------------------
dat_sub <- data %>% filter(!is.na(died) #&
#invasive_mgmt == 0 &
#sepsis == 0 &
#PNA == 0
#Bleed == 0
)
pred.test <- predict(out.RCRIplus,
dat_sub,
type = "response")
dat_sub_agg <- dat_sub %>% select(ind, died, NSTEMI, gender, invasive_mgmt, hx_chf, hx_isch_heart, hx_CVA, hx_DM, hx_ckd, high_risk_surgery) %>%
mutate_if(is.factor, as.character) %>%
mutate_if(is.character, as.numeric) %>%
mutate(RCRI = hx_chf + hx_isch_heart + hx_CVA + hx_DM + hx_ckd + high_risk_surgery) %>%
mutate(RCRI2 = RCRI) %>%
mutate(prob = pred.test)
dat_sub_agg[dat_sub_agg$RCRI2 > 4,]$RCRI2 <- 4
df <- dat_sub_agg %>%
group_by(RCRI2, invasive_mgmt, hx_isch_heart) %>%
summarise(med = median(prob),
q1 = quantile(prob,.25),
q2 = quantile(prob,.75))
df$invasive_mgmt = as.factor(df$invasive_mgmt)
levels(df$invasive_mgmt)<- c("No invasive management", "Invasive management")
df$hx_isch_heart = as.factor(df$hx_isch_heart)
levels(df$hx_isch_heart)<- c("No prior CAD", "Prior CAD")
p_risk <- ggplot(df, aes(x = RCRI, y = med)) +
geom_point() + geom_line() +
geom_errorbar(aes(ymin = q1, ymax = q2), linetype = 2) +
facet_wrap(.~invasive_mgmt + hx_isch_heart, scales = "free_y") +
xlab("RCRI") +
ylab("Median probability (IQR)") +
plot_themes
if(save_plots){
save_plot("Risk_by_RCRI_and_CAD.pdf", p_risk, base_width = 8, base_height = 6)
}
## Analysis of dispo outcome ## ------------------------------------------------------------------------------
data$ICF <- as.numeric(data$ICF==1)
sub.PCA.dispo <- data[,(colnames(data) %in% c("ICF", "died", "year", "race", "gender", "invasive_mgmt", "PNA", "sepsis", "Afib", "PE", "DVT", "Bleed", "NSTEMI", "severe_MI", unlist(retained_vars)))]
out.PCA.dispo <- glm(as.numeric(ICF) ~., data = sub.PCA.dispo, family = "binomial")
#summary(out.PCA)
#exp(cbind(OR = coef(out.PCA), confint(out.PCA)))
var_names_full <- c(names(X1), names(X2), "cm_mets", "PNA", "sepsis", "Afib", "PE", "DVT", "Bleed", "NSTEMI","invasive_mgmt", "severe_MI")
data_full_dispo <- data %>% select(c(year, died, ICF, invasive_mgmt, var_names_full)) # %>% select(-c(RCRI_pt, Ischemic_stroke, los, ind, age_factor, `RCRI > 3`))
out.full.dispo <- glm(ICF ~. + gender*NSTEMI*hx_revasc , data = data_full_dispo, family = "binomial")
out.RCRIplus.dispo <- glm(as.numeric(ICF) ~ age +
gender +
race +
year +
hx_isch_heart +
hx_chf +
hx_CVA +
hx_DM +
hx_ckd +
high_risk_surgery +
invasive_mgmt +
NSTEMI +
severe_MI +
PNA +
sepsis +
Afib +
PE +
DVT +
Bleed +
died,
data = data, family = "binomial")
model_comp.dispo <- data.frame(model = c("Full", "PCA", "RCRIPlus"),
BIC = c(BIC(out.full.dispo),
BIC(out.PCA.dispo),
BIC(out.RCRIplus.dispo))) %>%
mutate(delta = BIC - min(BIC))
mod_coefs.dispo <- data.frame(apply(exp(cbind(OR = coef(out.full.dispo), confint(out.full.dispo))),2,round,2))
names(mod_coefs.dispo)<- c("OR", "2.5% CI", "97.5% CI")
if(output_tables){
tab_coefs <- xtable(mod_coefs.dispo)
print(tab_coefs, file="dispo_model_coefs.txt")
}
## Testing model out of sample ## -------------------------------------------
set.seed(123)
sample <- sample(nrow(data),floor(nrow(data)*0.75))
train.dispo <- data[sample,]
test.dispo <- data[-sample,]
library(pROC)
out.RCRIplus.train.dispo <- glm(as.numeric(ICF) ~ age +
gender +
race +
year +
#smoking +
#HLD +
Afib +
sepsis +
PNA +
PE +
DVT +
Bleed +
hx_isch_heart +
PAD +
hx_chf +
hx_CVA +
hx_DM +
hx_ckd +
high_risk_surgery +
invasive_mgmt +
NSTEMI +
severe_MI +
died,
data = train,
family = "binomial")
out.full.train.dispo <- glm(ICF ~., data = (train %>% select(c(year, died, invasive_mgmt, severe_MI, var_names_full))), family = "binomial")
test_prob_RCRIplus.dispo = predict(out.RCRIplus.train.dispo, newdata = test, type = "response")
test_roc_RCRIplus.dispo = roc(test$ICF~ test_prob_RCRIplus, plot = TRUE, print.auc = TRUE)
test_prob_full.dispo = predict(out.full.train.dispo, newdata = test, type = "response")
test_roc_full.dispo = roc(test$ICF ~ test_prob_full.dispo, plot = TRUE, print.auc = TRUE)
p_roc <- ggroc(list(`Full model` = test_roc_full.dispo, `RCRI model` = test_roc_RCRIplus.dispo), linetype = 2) +
plot_themes +
labs(color = "Model") +
ggtitle("ROC curves")
if(save_plots){
save_plot("ROC_curves_dispo.pdf", p_roc, base_width = 8, base_height = 4)
}
|
41bd57abbf2998e136354c2809d9763408b1f711
|
bd27496789711097510ed8a16b94d6fba77b683f
|
/R4CouchDB/man/cdbAddAttachment.Rd
|
b4ba8202a965f70be1d7f3d503f4cfdf6f5c9791
|
[] |
no_license
|
t9nf/R4CouchDB
|
c4d4ec6774eec61e1f6eccf0e5b4e22360f4bf9e
|
00c8d588fe20be946c75444f375a0876269dfe4f
|
refs/heads/master
| 2017-12-04T14:41:33.057494
| 2013-10-30T12:38:32
| 2013-10-30T12:38:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 671
|
rd
|
cdbAddAttachment.Rd
|
\name{cdbAddAttachment}
\alias{cdbAddAttachment}
\title{Add attachments}
\usage{
cdbAddAttachment(cdb)
}
\arguments{
\item{cdb}{The list \code{cdb} has to contain
\code{cdb$fileName},\code{cdb$serverName},
\code{cdb$DBName} and a \code{cdb$id}.}
}
\value{
\item{cdb}{The result is stored in \code{cdb$res} }
}
\description{
This function adds attachments to a database document.
}
\details{
The function uses \code{guessMIMEType()} to do exactly
this. If the switch \code{cdb$attachmentsWithPath} is set
to \code{TRUE} the attachments were saved with path which
is the default since version 0.2.5 of R4CouchDB
}
\author{
wactbprot
}
\keyword{misc}
|
ad0a5604941dcff34ecdf22cfe11245d2f6d1bdd
|
97c8c0aab89d975438c5b1cb23acd9b7cd7bb099
|
/Archive/heatmap_notes.R
|
9cd1328d037136046d06cfe6c3bece1654bdeac6
|
[] |
no_license
|
DanRunfola/KFW
|
4a402a66f9d47e8cb6c29dbdb5114454c485307d
|
2b35167f80de69d962a0da993a3d13df5a604043
|
refs/heads/master
| 2020-05-16T03:42:08.233479
| 2015-11-30T21:28:09
| 2015-11-30T21:28:09
| 32,172,710
| 0
| 1
| null | 2020-09-21T12:35:22
| 2015-03-13T18:13:16
|
R
|
UTF-8
|
R
| false
| false
| 1,543
|
r
|
heatmap_notes.R
|
#Making a heatmap of the results
psm_PairsC <- psm_Pairs
psm_PairsC@data["subID"] <- do.call(paste,c(psm_PairsC@data["SP_ID"],psm_PairsC@data["terrai_nom"],sep="_"))
row.names(psm_PairsC@data) <- psm_PairsC@data$subID
#Subset only the NDVI record
psm_PairsC <- psm_PairsC[14:44]
#Test for negative year-on-year change:
psm_PairsD <- psm_PairsC
for(i in 2:length(psm_PairsD@data))
{
year <- as.numeric(sub("NDVI","",colnames(psm_PairsD@data)[i]))
str <- sub("NDVI","Chg_",colnames(psm_PairsD@data)[i])
str_loss <- sub("NDVI","Loss_",colnames(psm_PairsD@data)[i])
str_gain <- sub("NDVI","Gain_",colnames(psm_PairsD@data)[i])
str_bin <- sub("NDVI","BinMod_",colnames(psm_PairsD@data)[i])
last_year <- paste("NDVI",round(year-1,0),sep="")
cur_year <- paste("NDVI",round(year),sep="")
psm_PairsD@data[str] <- (psm_PairsD@data[cur_year] - psm_PairsD@data[last_year])
psm_PairsD@data[str] <- (psm_PairsD@data[cur_year] - psm_PairsD@data["NDVI1995"])
psm_PairsD@data[str_loss] <- 0
psm_PairsD@data[str_gain] <- 0
psm_PairsD@data[str_loss][psm_PairsD@data[str] < -0.025,] <- -1
psm_PairsD@data[str_gain][psm_PairsD@data[str] > 0.025,] <- 1
psm_PairsD@data[str_bin] <- psm_PairsD@data[str_loss] + psm_PairsD@data[str_gain]
}
psm_PairsD@data <- psm_PairsD@data[grepl("NDVI",names(psm_PairsD@data))]
Pairs_matrix <- data.matrix(psm_PairsD@data)
hmcol <- brewer.pal(10,"RdYlGn")
#test_heat <- heatmap(Pairs_matrix, Rowv=NA, Colv=NA, col=hmcol, scale="none", margins=c(5,10),add.expr=abline(v=13,col="blue",lty=1,lwd=3))
|
4735346e0fed57882c9a69f9abd5ac77ca73c44d
|
66ae31e851638ad20305409b99df93d8ce2f8133
|
/man/grs.Rd
|
589ca90f0882199817242835f0c562681064375a
|
[] |
no_license
|
rwoldford/edmcr
|
150e1702ceb451d154223ff5e9ded10defeda9e6
|
ee322d7dcc0bf3f497576c31a87a4886bc17d8a8
|
refs/heads/main
| 2021-12-06T06:09:38.997297
| 2021-09-08T17:59:47
| 2021-09-08T17:59:47
| 142,780,936
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,597
|
rd
|
grs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grs.R
\name{grs}
\alias{grs}
\title{Guided Random Search}
\usage{
grs(D, d)
}
\arguments{
\item{D}{An nxn partial-distance matrix to be completed. D must satisfy a list of conditions (see details), with unkown entries set to NA}
\item{d}{The dimension for the resulting completion.}
}
\value{
\item{P }{The completed point configuration in dimension d}
\item{D }{The completed Euclidean distance matrix}
}
\description{
\code{grs} performs Euclidean Distance Matrix Completion using the guided random search algorithm
of Rahman & Oldford. Using this method will preserve the minimum spanning tree in the partial distance
matrix.
}
\details{
The matrix D is a partial-distance matrix, meaning some of its entries are unknown.
It must satisfy the following conditions in order to be completed:
\itemize{
\item{diag(D) = 0}
\item{If \eqn{a_{ij}} is known, \eqn{a_{ji} = a_{ij}}}
\item{If \eqn{a_{ij}} is unknown, so is \eqn{a_{ji}}}
\item{The graph of D must contain ONLY the minimum spanning tree distances}
}
}
\examples{
#D matrix containing only the minimum spanning tree
D <- matrix(c(0,3,NA,3,NA,NA,
3,0,1,NA,NA,NA,
NA,1,0,NA,NA,NA,
3,NA,NA,0,1,NA,
NA,NA,NA,1,0,1,
NA,NA,NA,NA,1,0),byrow=TRUE, nrow=6)
edmc(D, method="grs", d=3)
}
\references{
Rahman, D., & Oldford, R.W. (2016). Euclidean Distance Matrix Completion and Point Configurations from the Minimal Spanning Tree.
}
|
2eff6a203ed0b6e9c661dca9627ced07db1cd7f7
|
4c744498868d6a9e6d40035a1ebbdc52fc6107ee
|
/figure_supp generator/Figure5_supp.R
|
a71ad99b686847d217b53d1d84658755398e1209
|
[] |
no_license
|
FloBeni/2023-alternative_splicing
|
2e0d565ca35dd19a84a4ba5bded2529b3b23d072
|
84d434d3db7bccba116d2860042e1015d60811ad
|
refs/heads/main
| 2023-08-05T03:40:30.322136
| 2022-12-08T14:51:29
| 2022-12-08T14:51:29
| 575,819,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,241
|
r
|
Figure5_supp.R
|
source("figure_supp generator/library_path.R")
############## Supplementary Pannel 5 A
data_5 = read.delim(paste("data/Data5_supp.tab",sep=""),comment.char = "#")
df = data_5[data_5$filtering == "Homo_sapiens_CpG_abundant_sv",]
df$pos=c(2.19,2.78,1.45,3.46,4.16)
p3 = ggplot(df,aes(x=pos,y=mean_polymorphism,fill=color_group)) + geom_col(width=0.1,col="black") + theme_bw()+
geom_errorbar(aes(ymin=error_bar, ymax=error_bar_2),width=00.03,show.legend=FALSE)+ggtitle("Abundant SVs (all protein-coding genes)")+
geom_text(data=df,aes(x=pos-0.07,y=mean_polymorphism+0.004, family="serif",label=paste(round(Nb_introns_minor,3))),angle=90,vjust=0,size=6)+
theme(legend.position = "none") + xlim(c(1,5)) +labs(y=expression(paste("SNP density (",italic("per")," bp)")))+
theme(
axis.title.x = element_text(color=NA, size=NA,family="serif"),
axis.title.y = element_text(color="black",margin = margin(t = 0, r = 20, b = 0, l = 0), size=28, family="serif"),
axis.text.y = element_text(color="black", size=22, family="serif"),
axis.text.x = element_text(color=NA, size=NA, family="serif"),
title = element_text(color="black", size=20, family="serif"),
text = element_text(color="black", size=31, family="serif"),
legend.text = element_text(color="black", size=26, family="serif"),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank() ,
panel.grid.major.y = element_line( size=.1, color="black" ) ,
) +scale_y_continuous(limits=c(0,0.185))
p3
resolution=1
p = ggdraw() + draw_plot(p3, 0, 0.25, 1, .7) + draw_image(paste(path_require,"polymorphism_position_CpG.png",sep=""),0.09,-0.31,0.91,1)+
draw_image(paste(path_require,"human.png",sep=""),.85,.65,0.15,.17)
p
resolution=1
jpeg(paste(path_figure,"p29_snp_cpg_abundant_sv.jpg",sep=""), width = 3600/resolution, height = 2500/resolution,res=350/resolution)
print(p)
dev.off()
############## Supplementary Pannel 5 B
df = data_5[data_5$filtering == "Homo_sapiens_CpG_rare_sv",]
df$pos=c(2.19,2.78,1.45,3.46,4.16)
p3 = ggplot(df,aes(x=pos,y=mean_polymorphism,fill=color_group)) + geom_col(width=0.1,col="black") + theme_bw()+
geom_errorbar(aes(ymin=error_bar, ymax=error_bar_2),width=00.03,show.legend=FALSE)+ggtitle("Rare SVs (all protein-coding genes)")+
geom_text(data=df,aes(x=pos-0.07,y=mean_polymorphism+0.004, family="serif",label=paste(round(Nb_introns_minor,3))),angle=90,vjust=0,size=6)+
theme(legend.position = "none") + xlim(c(1,5)) +labs(y=expression(paste("SNP density (",italic("per")," bp)")))+
theme(
axis.title.x = element_text(color=NA, size=NA,family="serif"),
axis.title.y = element_text(color="black",margin = margin(t = 0, r = 20, b = 0, l = 0), size=28, family="serif"),
axis.text.y = element_text(color="black", size=22, family="serif"),
axis.text.x = element_text(color=NA, size=NA, family="serif"),
title = element_text(color="black", size=20, family="serif"),
text = element_text(color="black", size=31, family="serif"),
legend.text = element_text(color="black", size=26, family="serif"),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank() ,
panel.grid.major.y = element_line( size=.1, color="black" ) ,
) +scale_y_continuous(limits=c(0,0.185))
p3
resolution=1
p = ggdraw() + draw_plot(p3, 0, 0.25, 1, .7) + draw_image(paste(path_require,"polymorphism_position_CpG.png",sep=""),0.09,-0.31,0.91,1)+
draw_image(paste(path_require,"human.png",sep=""),.85,.65,0.15,.17)
p
resolution=1
jpeg(paste(path_figure,"p30_snp_cpg_rare_sv.jpg",sep=""), width = 3600/resolution, height = 2500/resolution,res=350/resolution)
print(p)
dev.off()
############## Supplementary Figure 5
imgA = load.image(paste(path_figure,"p29_snp_cpg_abundant_sv.jpg",sep=""))
imgB = load.image(paste(path_figure,"p30_snp_cpg_rare_sv.jpg",sep=""))
{
pdf(file=paste(path_pannel,"Figure5_supp.pdf",sep=""), width=4, height=6)
m=matrix(c(1,2), nrow=2)
m
layout(m)
par(mar=c(1, 0, 1, 0))
plot(imgA, axes = F)
mtext("A", side=2,at=0,adj=-3, line=1, font=2, cex=1.3,las=2)
par(mar=c(1, 0, 1, 0))
plot(imgB, axes = F)
mtext("B", side=2,at=0,adj=-3, line=1, font=2, cex=1.3,las=2)
dev.off()
}
|
0d9c6c9406ad0d0769ba104fa64061e2297c657e
|
84ad2b48e39073b3730678a480df191e6317e626
|
/R/geo_atom_parse.R
|
8fedc289a8bdc994f6d5866afe1a6104c1533650
|
[
"MIT"
] |
permissive
|
RobertMyles/tidygeoRSS
|
daa3439a73bc0792031ddccaf0ee4b21e57c1009
|
94006565750fdba9e7345c9918b785d3a2282e32
|
refs/heads/master
| 2020-12-19T16:20:53.321134
| 2020-07-20T15:15:46
| 2020-07-20T15:15:46
| 235,787,769
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,599
|
r
|
geo_atom_parse.R
|
geo_atom_parse <- function(response, list, clean_tags, parse_dates) {
parsed <- suppressMessages(atom_parse(response, list = TRUE, clean_tags, parse_dates))
entries <- parsed$entries
res <- read_xml(response)
res_entry <- xml_find_all(res, "//*[name()='entry']")
if (geocheck(res)) {
entries <- entries %>%
mutate(
entry_latlon = safe_run(
res_entry, "all", "//*[name()='georss:point']"
) %>%
map_if(.p = ~ {
!is.na(.x)
}, ~ {
# take out elements of character vector, join together
# transform into POINT
x1 <- str_before_first(.x, " ") %>% as.numeric()
x2 <- str_after_first(.x, " ") %>% as.numeric()
x <- c(x1, x2)
x
}) %>%
map_if(.p = ~ {
check_p(.x)
}, st_point),
entry_line = safe_run(res_entry, "all", "//*[name()='georss:line']") %>%
map_if(.p = ~ {
!is.na(.x)
}, ~ {
# count how many elements, create matrix of (elements/2)*2
# place pairs in matrix
# matrix becomes LINESTRING
wspaces <- stringr::str_count(.x, " ") + 1
w_len <- wspaces / 2
geomatrix <- matrix(nrow = wspaces / 2, ncol = 2)
for (i in 1:w_len) {
geomatrix[i, 1] <- str_before_first(.x, " ") %>% as.numeric()
.x <- str_after_first(.x, " ")
if (i < w_len) {
geomatrix[i, 2] <- str_before_first(.x, " ") %>% as.numeric()
.x <- str_after_first(.x, " ")
} else {
geomatrix[i, 2] <- .x %>% as.numeric()
}
}
geomatrix
}) %>%
map_if(.p = ~ {
check_p(.x)
}, st_linestring),
entry_pgon = safe_run(
res_entry, "all", "//*[name()='georss:ploygon']"
) %>%
map_if(.p = ~ {
!is.na(.x)
}, ~ {
# same as LINETSRING, except input is list
wspaces <- stringr::str_count(.x, " ") + 1
w_len <- wspaces / 2
geomatrix <- matrix(nrow = wspaces / 2, ncol = 2)
for (i in 1:w_len) {
geomatrix[i, 1] <- str_before_first(.x, " ") %>% as.numeric()
.x <- str_after_first(.x, " ")
if (i < w_len) {
geomatrix[i, 2] <- str_before_first(.x, " ") %>% as.numeric()
.x <- str_after_first(.x, " ")
} else {
geomatrix[i, 2] <- .x %>% as.numeric()
}
}
list(geomatrix)
}) %>%
map_if(.p = ~ {
check_p(.x)
}, st_polygon),
entry_bbox = safe_run(res_entry, "all", "//*[name()='georss:box']") %>%
map_if(.p = ~ {
!is.na(.x)
}, ~ {
# get first pair, create POINT
b1 <- str_before_first(y, " ") %>% as.numeric()
b2 <- str_after_first(y, " ") %>%
str_before_first(" ") %>%
as.numeric()
b12 <- c(b1, b2) %>% st_point()
# second pair
b3 <- str_after_nth(y, " ", 2) %>%
str_before_first(" ") %>%
as.numeric()
b4 <- str_after_nth(y, " ", 3) %>% as.numeric()
b34 <- c(b3, b4) %>% st_point()
# join and make BBOX
x <- c(b12, b34)
x
}) %>%
map_if(.p = ~ {
check_p(.x)
}, st_bbox),
entry_elev = safe_run(res_entry, "all", "//*[name()='georss:elev']") %>%
as.numeric(),
entry_floor = safe_run(res_entry, "all", "//*[name()='georss:floor']") %>%
as.numeric(),
entry_radius = safe_run(
res_entry, "all", "//*[name()='georss:radius']"
) %>% as.numeric()
)
meta <- parsed$meta
meta <- clean_up(meta, "atom", clean_tags, parse_dates)
entries <- clean_up(entries, "atom", clean_tags, parse_dates) %>%
st_as_sf()
if (isTRUE(list)) {
result <- list(meta = meta, entries = entries)
return(result)
} else {
if (!has_name(meta, "feed_title")) {
meta$feed_title <- NA_character_ # nocov
}
entries$feed_title <- meta$feed_title
out <- suppressMessages(safe_join(meta, entries))
if (is.null(out$error)) {
out <- out$result
if (all(is.na(out$feed_title))) out <- out %>% select(-feed_title) # nocov
return(out)
}
}
} else {
stop(msg)
}
}
|
8b7a6c44b19a256cd50f263a379169d5c7b44b86
|
0b6aaebb780047de5cdf9f5b5105ba3239023e9c
|
/R/datasetPreparation.R
|
7e9e70eccc6d590317c1ef6d26955adf7e12a452
|
[] |
no_license
|
jcizel/EBAResultAnalysis
|
6500d38977145ab8131ec909e881f4b71efc1f37
|
145eb016003bb34423d8301f2b2f2995594f90cf
|
refs/heads/master
| 2020-04-25T16:30:04.770120
| 2014-11-14T11:58:22
| 2014-11-14T11:58:22
| 26,629,655
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,245
|
r
|
datasetPreparation.R
|
## ------------------------------------------------------------------------------------------- ##
## SET OF FUNCTIONS THAT PREPARES A LIST OF BANKS INVOLVED IN EBA AND ECB ##
## STRESS TESTS, AND CONSTRUCTS A LOOKUP TABLE BETWEEN THE ##
## BANKS' ORIGINAL NAMES (AS USED IN THE TESTS) AND THEIR IDENTIFYING INFORMATION IN BANKSCOPE ##
## ------------------------------------------------------------------------------------------- ##
##' This function loads a table of banks that participated in EBA or ECB
##' tests. Due to a lack of consistency in naming and identification of banks
##' across different test, the participating banks needed to be manually linked
##' acroos the tests. I also manually link the banks with their identifying
##' information in Bankscope.
##'
##' .. content for \details{} ..
##' @title Load a table of manually linked banks that participated in EBA and
##' ECB tests
##' @return data.table with banks that participated in EBA and ECB tests.
##' @author Janko Cizel
loadManuallyLinkedBanks <- function(){
out <-
fread(input = './inst/extdata/MANUAL_CHECK.csv',
stringsAsFactors = FALSE)[order(SOURCE,COUNTRY,`_NAME_A_ORIG`,CONSOL)]
return(out)
}
collapseManuallyLinkedTable <- function(dt){
.o1 <-
dt[, {
presence <- .condense(SOURCE)
bsname <- .condense(`_NAME_B_ORIG`)
leicode <- .condense(LEI)
ebacode <- .condense(BANKCODE)
consol <- .condense(CONSOL)
names <- paste(
unique(paste0(`_NAME_A_ORIG`," [[",SOURCE,"]]")),
collapse = "; "
)
list(
bsname = bsname,
consol = consol,
presence = presence,
names = names,
ebacode = ebacode,
leicode = leicode)
}
, keyby = list(COUNTRY,INDEX)]
.o2 <-
.o1[, {
presence <- .condense(presence)
leicode <- .condense(leicode)
ebacode <- .condense(ebacode)
names <- collapseDuplicatesWithinString(.condense(names))
bscode <- paste(
unique(paste0(INDEX," (",consol,")")),
collapse = "; "
)
list(presence = presence,
names = names,
bscode = bscode,
ebacode = ebacode,
leicode = leicode)
}
, keyby = list(COUNTRY,bsname)]
return(.o2)
}
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##' @title Collapse duplicate enteries within a string
##' @param str Character vector in which individual strings are separated by
##' `sep`, which by default is "; "
##' @param sep separator of enteries within individual strings
##' @return A character vector with removed duplicated enteries.
##' @author Janko Cizel
collapseDuplicatesWithinString <- function(str,
sep = "; "){
l <- strsplit(str,
split = sep)
out <-
sapply(l, function (str.vec) {
o <- unique(.trim(str.vec))
paste(o[order(o)],
collapse = sep)
})
return(out)
}
##' A given bank typically has multiple (manually) matched Bankscope
##' identifiers, corresponding to different levels of accounting
##' consolidation. For a given bank, this function selects an index at the
##' higest available level of consolidation.
##'
##' TODO: Describe Bankscope consolidation concepts
##
##' @title Bankscope Index Selector
##' @param bsIndexCol Character vector in which each entry has the following
##' strucuture: [BS_INDEX_1] ([CONSOL_1]); [BS_INDEX_2] ([CONSOL_2]);...
##' @return Numeric vector of selected Bankscope Indices
##' @author Janko Cizel
selectUniqueBSIndex <- function(bsIndexCol){
l <- strsplit(bsIndexCol,split = "; ")
out <-
sapply(l, function (str.vec ) {
o <- unique(.trim(str.vec))
cat("#### OBJECT:",dput(o),"\n")
if (length(o)==1){
## cat("LENGTH OF THE OBJECT = 1\n")
SEL <- as.numeric(str_extract(o, "[0-9]+")[1])
if (is.na(SEL)){
SEL = 0
cat("## CHOSE: ",SEL,"\n")
return(SEL)
} else {
cat("## CHOSE: ",SEL,"\n")
return(SEL)
}
cat("## CHOSE: ",SEL,"\n")
return(SEL)
} else {
## cat("LENGTH OF THE OBJECT > 1\n")
.o <-
o[grepl(x = o,
pattern = "C2")]
## cat("#### OBJECT (looking for C2):",dput(.o),"\n")
if (length(.o) == 0){
.o <-
o[grepl(x = o,
pattern = "C1")]
if (length(.o) == 0){
.o <-
o[grepl(x = o,
pattern = "U1")]
if (length(.o) == 0){
.o <-
o[grepl(x = o,
pattern = "U")]
SEL <- as.numeric(str_extract(.o, "[0-9]+")[1])
cat("FLAG!!!!\n")
cat("## CHOSE: ",SEL,"\n")
return(SEL)
} else {
SEL <- as.numeric(str_extract(.o, "[0-9]+")[1])
cat("## CHOSE: ",SEL,"\n")
return(SEL)
}
} else {
SEL <- as.numeric(str_extract(.o, "[0-9]+")[1])
cat("## CHOSE: ",SEL,"\n")
return(SEL)
}
} else {
SEL <- as.numeric(str_extract(.o, "[0-9]+")[1])
cat("## CHOSE: ",SEL,"\n")
return(SEL)
}
}
})
return(out)
}
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##' @title Get a selection of banks involved in a specified test
##' @param testName Name of the test. Currently supported: "AQR2014",
##' "EBARECAP2012", "EBATRANSPARENCY2013", "STRESSTEST2011", "STRESSTEST2014"
##'
##' @param outfile path of the output csv file. By default, no file is written.
##' @return data.table with banks involved in the querried test.
##' @author Janko Cizel
selectBanksInTest <- function (testName = 'AQR2014',
outfile = NULL){
banks <- loadManuallyLinkedBanks()
t1 <- collapseManuallyLinkedTable(dt = banks)
t2 <- t1[, lapply(.SD, collapseDuplicatesWithinString)]
t2[, bscode.unique := selectUniqueBSIndex(bscode)]
out <-
t2[grepl(x = presence, pattern = toupper(query)), {
l <- strsplit(names,split = "; ")
names <-
sapply(l, function (str.vec ) {
o <- unique(.trim(str.vec))
cat("#### OBJECT:",dput(o),"\n")
cat("## TestName: ", dput(testName),"\n")
.o <-
o[grepl(x = o,
pattern = testName)]
cat("## NEW OBJECT:",dput(.o),"\n")
cat("## LENGTH OF NEW OBJECT:",length(.o),"\n")
if (length(.o)>1) {
## stop("TestName matches multiple names")
warning("TestName matches multiple names")
return(.o[[1]])
}
return(.o)
})
names <- .trim(gsub(x = names, pattern = '(.*)\\[\\[.*\\]\\]','\\1'))
list(bsname = bsname,
nameOrig = names,
ebacode = ebacode,
leicode = leicode,
bscode = bscode.unique)
}]
if (!is.null(outfile)){
write.csv(
x = out,
file = paste0(outfile,".csv")
)
}
return(out)
}
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##' @title Convinience function: Get post-processed list of all banks involved
##' in any of EBA or ECB tests.
##' @param outfile path of the output csv file. By default, no file is written.
##' @return data.table with banks (containing Bankscope lookup information).
##' @author Janko Cizel
getAllBanks <- function (outfile = NULL){
banks <- loadManuallyLinkedBanks()
t1 <- collapseManuallyLinkedTable(dt = banks)
t2 <- t1[, lapply(.SD, collapseDuplicatesWithinString)]
t2[, bscode.unique := selectUniqueBSIndex(bscode)]
if (!is.null(outfile)){
write.csv(
x = t2,
file = paste0(outfile,".csv")
)
}
return(t2)
}
|
2699ec5166ba1f043fdd039ae9014cceb4d5deb6
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/thiazole-5-carboxani.R
|
0d2145a5d8fc3a4b9e3c87e48b797b44e9c128f8
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
thiazole-5-carboxani.R
|
library("knitr")
library("rgl")
#knit("thiazole-5-carboxani.Rmd")
#markdownToHTML('thiazole-5-carboxani.md', 'thiazole-5-carboxani.html', options=c("use_xhml"))
#system("pandoc -s thiazole-5-carboxani.html -o thiazole-5-carboxani.pdf")
knit2html('thiazole-5-carboxani.Rmd')
|
fb051a68217182676436b3446b4ce551ac3deca1
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/cIRT/R/RcppExports.R
|
aea359225a16cd532789308fd6a30d125b627b8a
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,144
|
r
|
RcppExports.R
|
# This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @title Direct Sum of Matrices
#'
#' @description Computes the direct sum of all matrices passed in via the list.
#'
#' @details
#' Consider matrix A (mxn) and B (k x p).
#' A direct sum is a diagonal matrix A (+) B with dimensions (m + k) x (n + p).
#'
#' @param x list containing matrices
#' @return ds_matrix matrix containing the direct sum in list.
#' @author James J Balamuta
#' @examples
#'
#' x = list(matrix(0,nrow=5,ncol=3),
#' matrix(1,nrow=5,ncol=3))
#' direct_sum(x)
#'
#' x = list(matrix(rnorm(15),nrow=5,ncol=3),
#' matrix(rnorm(30),nrow=5,ncol=6),
#' matrix(rnorm(18),nrow=2,ncol=9))
#' direct_sum(x)
direct_sum <- function(x) {
.Call('cIRT_direct_sum', PACKAGE = 'cIRT', x)
}
#' @title Center a Matrix
#' @description Obtains the mean of each column of the matrix and subtracts it from the given matrix in a centering operation.
#' @param x A \code{matrix} with any dimensions
#' @return centered_matrix A \code{matrix} with the same dimensions of X that has been centered.
#' @details The application of this function to a matrix mimics the use of a centering matrix
#' given by: \eqn{{C_n} = {I_n} - \frac{1}{n}{11^T}}
#' @seealso \code{\link{cIRT}}
#' @author James J Balamuta
#' @examples
#' nobs = 500
#' nvars = 20
#' x = matrix(rnorm(nobs*nvars),nrow=nobs,ncol=nvars)
#' r_centered = scale(x)
#' arma_centered1 = center_matrix(x)
center_matrix <- function(x) {
.Call('cIRT_center_matrix', PACKAGE = 'cIRT', x)
}
#' @title Two Parameter Choice IRT Model MCMC
#' @description Performs an MCMC routine for a two parameter IRT Model using Choice Data
#' @param unique_subject_ids A \code{vector} with length N x 1 containing unique subject IDs.
#' @param subject_ids A \code{vector} with length N*K x 1 containing subject IDs.
#' @param choices_nk A \code{vector} with length N*K x 1 containing subject choices.
#' @param fixed_effects A \code{matrix} with dimensions N*K x P_1 containing fixed effect design matrix without theta.
#' @param B A V dimensional column \code{vector} relating theta_i and zeta_i.
#' @param rv_effects_design A \code{matrix} with dimensions N*K x V containing random effect variables.
#' @param gamma A \code{vector} with dimensions P x 1 containing fixed parameter estimates, where \eqn{P = P_1 + P_2}
#' @param beta A \code{vector} with dimensions \eqn{P_2} containing random parameter estimates.
#' @param zeta_rv A \code{matrix} with dimensions N x V containing random parameter estimates.
#' @param Sigma_zeta_inv A \code{matrix} with dimensions \eqn{P_2 x P_2}
#' @param Y dichotomous item responses, a \code{matrix} of dimensions n x J
#' @param theta0 latent theta, a \code{vector} of length n
#' @param a0 item discriminations, a \code{vector} of length J
#' @param b0 item locations, a \code{vector} of length J
#' @param mu_xi0 prior for item parameter means, requires a \code{vector} of dimension 2 (i.e. c(0,1))
#' @param Sig_xi0 prior for item parameter vc matrix, a \code{matrix} of dimension 2x2 (i.e. diag(2))
#' @return A \code{list} that contains:
#' \describe{
#' \item{\code{ai1}}{A \code{vector} of length J}
#' \item{\code{bi1}}{A \code{vector} of length J}
#' \item{\code{theta1}}{A \code{vector} of length N}
#' \item{\code{Z_c}}{A \code{matrix} of length NK}
#' \item{\code{Wzeta_0}}{A \code{matrix} of length NK}
#' }
#' @seealso \code{\link{cIRT}}, \code{\link{rmvnorm}}, and \code{\link{riwishart}}
#' @author Steven Culpepper and James J Balamuta
#' @examples \dontrun{
#' #Call with the following data:
#' TwoPLChoicemcmc(cogDAT,theta0,a0,b0,mu_xi0,Sig_xi0)
#' }
TwoPLChoicemcmc <- function(unique_subject_ids, subject_ids, choices_nk, fixed_effects, B, rv_effects_design, gamma, beta, zeta_rv, Sigma_zeta_inv, Y, theta0, a0, b0, mu_xi0, Sig_xi0) {
.Call('cIRT_TwoPLChoicemcmc', PACKAGE = 'cIRT', unique_subject_ids, subject_ids, choices_nk, fixed_effects, B, rv_effects_design, gamma, beta, zeta_rv, Sigma_zeta_inv, Y, theta0, a0, b0, mu_xi0, Sig_xi0)
}
#' @title Probit Hierarchial Level Model
#' @description Performs modeling procedure for a Probit Hierarchial Level Model.
#' @param unique_subject_ids A \code{vector} with length N x 1 containing unique subject IDs.
#' @param subject_ids A \code{vector} with length N*K x 1 containing subject IDs.
#' @param choices_nk A \code{vector} with length N*K x 1 containing subject choices.
#' @param fixed_effects_design A \code{matrix} with dimensions N*K x P containing fixed effect variables.
#' @param rv_effects_design A \code{matrix} with dimensions N*K x V containing random effect variables.
#' @param B_elem_plus1 A V[[1]] dimensional column \code{vector} indicating which zeta_i relate to theta_i.
#' @param gamma A \code{vector} with dimensions P_1 x 1 containing fixed parameter estimates.
#' @param beta A \code{vector} with dimensions P_2 x 1 containing random parameter estimates.
#' @param theta A \code{vector} with dimensions N x 1 containing subject understanding estimates.
#' @param zeta_rv A \code{matrix} with dimensions N x V containing random parameter estimates.
#' @param WtW A \code{field<matrix>} P x P x N contains the caching for direct sum.
#' @param Z_c A \code{vec} with dimensions N*K x 1
#' @param Wzeta_0 A \code{vec} with dimensions N*K x 1
#' @param inv_Sigma_gamma A \code{matrix} with dimensions P x P that is the prior inverse sigma matrix for gamma.
#' @param mu_gamma A \code{vector} with length P x 1 that is the prior mean vector for gamma.
#' @param Sigma_zeta_inv A \code{matrix} with dimensions V x V that is the prior inverse sigma matrix for zeta.
#' @param S0 A \code{matrix} with dimensions V x V that is the prior sigma matrix for zeta.
#' @param mu_beta A \code{vec} with dimensions P_2 x 1, that is the mean of beta.
#' @param sigma_beta_inv A \code{mat} with dimensions P_2 x P_2, that is the inverse sigma matrix of beta.
#' @return A \code{matrix} that is an inverse wishart distribution.
#' @details The function is implemented to decrease the amount of vectorizations necessary.
#' @seealso \code{\link{rwishart}} and \code{\link{TwoPLChoicemcmc}}
#' @author Steven A Culpepper, James J Balamuta
#' @return A \code{list} that contains:
#' \describe{
#' \item{\code{zeta_1}}{A \code{vector} of length N}
#' \item{\code{sigma_zeta_inv_1}}{A \code{matrix} of dimensions V x V}
#' \item{\code{gamma_1}}{A \code{vector} of length P}
#' \item{\code{beta_1}}{A \code{vector} of length V}
#' \item{\code{B}}{A \code{matrix} of length V}
#' }
probitHLM <- function(unique_subject_ids, subject_ids, choices_nk, fixed_effects_design, rv_effects_design, B_elem_plus1, gamma, beta, theta, zeta_rv, WtW, Z_c, Wzeta_0, inv_Sigma_gamma, mu_gamma, Sigma_zeta_inv, S0, mu_beta, sigma_beta_inv) {
.Call('cIRT_probitHLM', PACKAGE = 'cIRT', unique_subject_ids, subject_ids, choices_nk, fixed_effects_design, rv_effects_design, B_elem_plus1, gamma, beta, theta, zeta_rv, WtW, Z_c, Wzeta_0, inv_Sigma_gamma, mu_gamma, Sigma_zeta_inv, S0, mu_beta, sigma_beta_inv)
}
#' @title Generic Implementation of Choice IRT MCMC
#' @description Builds a model using MCMC
#' @param subject_ids A \code{vector} that contains subject IDs for each line of data in the choice vector
#' (e.g. For 1 subject that made 5 choices, we would have the number 1 appear five times consecutively.)
#' @param fixed_effects A \code{matrix} with NK x P1 dimensions that acts as the design matrix for terms WITHOUT theta.
#' @param B_elem_plus1 A V[[1]] dimensional column \code{vector} indicating which zeta_i relate to theta_i.
#' @param rv_effects A \code{matrix} with NK x V dimensions for random effects design matrix.
#' @param trial_matrix A \code{matrix} with N x J dimensions, where J denotes the number of items presented.
#' The matrix MUST contain only 1's and 0's.
#' @param choices_nk A \code{vector} with NK length that contains the choice value e.g. 0 or 1.
#' @param chain_length An \code{int} that controls how many MCMC draws there are. (>0)
#' @param burnit An \code{int} that describes how many MCMC draws should be discarded.
#' @return A \code{list} that contains:
#' \describe{
#' \item{\code{as}}{A \code{matrix} of dimension chain_length x J}
#' \item{\code{bs}}{A \code{matrix} of dimension chain_length x J}
#' \item{\code{gs}}{A \code{matrix} of dimension chain_length x P_1}
#' \item{\code{Sigma_zeta_inv}}{An \code{array} of dimension V x V x chain_length}
#' \item{\code{betas}}{A \code{matrix} of dimension chain_length x P_2}
#' }
#' @seealso \code{\link{TwoPLChoicemcmc}}, \code{\link{probitHLM}}, \code{\link{center_matrix}}, \code{\link{rmvnorm}}, \code{\link{rwishart}}, and \code{\link{riwishart}}
#' @author Steven Culpepper, James J Balamuta
#' @examples \dontrun{
#' # Variables
#' # Y = trial matix
#' # C = KN vector of binary choices
#' # N = #of subjects
#' # J = # of items
#' # K= # of choices
#' # atrue = true item discriminations
#' # btrue = true item locations
#' # thetatrue = true thetas/latent performance
#' # gamma = fixed effects coefficients
#' # Sig = random-effects variance-covariance
#' # subid = id variable for subjects
#'
#' # Load the Package
#' library(cIRT)
#'
#' # Load the Data
#' data(trial_matrix)
#' data(choice_matrix)
#'
#' # Thurstone design matrices
#' all_nopractice = subset(all_data_trials,experiment_loop.thisN>-1)
#' hard_items = choice_matrix$hard_q_id
#' easy_items = choice_matrix$easy_q_id
#'
#' D_easy = model.matrix(~-1+factor(easy_items))
#' D_hard = -1*model.matrix(~-1+factor(hard_items))[,-c(5,10,15)]
#'
#' # Defining effect-coded contrasts
#' high_contrasts <- rbind(-1,diag(4))
#' rownames(high_contrasts) = 12:16
#' low_contrasts <- rbind(-1,diag(2))
#' rownames(low_contrasts) = 4:6
#'
#' # Creating high & low factors
#' high = factor(choice_matrix[,'high_value'])
#' low = factor(choice_matrix[,'low_value'])
#' contrasts(high) = high_contrasts
#' contrasts(low) = low_contrasts
#'
#' fixed_effects = model.matrix(~high+low)
#' fixed_effects_base = fixed_effects[,1]
#' fixed_effects_int = model.matrix(~high*low)
#'
#'
#' # Model with Thurstone D Matrix
#' system.time({
#' out_model_thurstone = cIRT(choice_matrix[,'subject_id'],
#' cbind(fixed_effects[,-1],D_easy,D_hard),
#' c(1:ncol(fixed_effects)),
#' as.matrix(fixed_effects),
#' as.matrix(trial_matrix),
#' choice_matrix[,'choose_hard_q'],
#' 20000,
#' 25000)
#' })
#'
#'
#' vlabels_thurstone = colnames(cbind(fixed_effects[,-1],D_easy,D_hard))
#' G_thurstone = t(apply(out_model_thurstone$gs0, 2, FUN = quantile,probs=c(.5,.025,.975)))
#' rownames(G_thurstone)=vlabels_thurstone
#' B_thurstone = t(apply(out_model_thurstone$beta, 2, FUN = quantile,probs=c(.5,0.025,.975)))
#' rownames(B_thurstone)=colnames(fixed_effects)
#' S_thurstone = solve(apply(out_model_thurstone$Sigma_zeta_inv, c(1,2), FUN = mean))
#' inv_sd = diag(1/sqrt(diag(solve(apply(out_model_thurstone$Sigma_zeta_inv, c(1,2), FUN = mean)))))
#' inv_sd%*%S_thurstone%*%inv_sd
#' apply(out_model_thurstone$as, 2, FUN = mean)
#' apply(out_model_thurstone$bs, 2, FUN = mean)
#' }
cIRT <- function(subject_ids, fixed_effects, B_elem_plus1, rv_effects, trial_matrix, choices_nk, burnit, chain_length = 10000L) {
.Call('cIRT_cIRT', PACKAGE = 'cIRT', subject_ids, fixed_effects, B_elem_plus1, rv_effects, trial_matrix, choices_nk, burnit, chain_length)
}
#' @title Generate Random Multivariate Normal Distribution
#' @description Creates a random Multivariate Normal when given number of obs, mean, and sigma.
#' @param n An \code{int}, which gives the number of observations. (> 0)
#' @param mu A \code{vector} length m that represents the means of the normals.
#' @param S A \code{matrix} with dimensions m x m that provides Sigma, the covariance matrix.
#' @return A \code{matrix} that is a Multivariate Normal distribution
#' @seealso \code{\link{TwoPLChoicemcmc}} and \code{\link{probitHLM}}
#' @author James J Balamuta
#' @examples
#' #Call with the following data:
#' rmvnorm(2, c(0,0), diag(2))
#'
rmvnorm <- function(n, mu, S) {
.Call('cIRT_rmvnorm', PACKAGE = 'cIRT', n, mu, S)
}
#' @title Generate Random Wishart Distribution
#' @description Creates a random wishart distribution when given degrees of freedom and a sigma matrix.
#' @param df An \code{int}, which gives the degrees of freedom of the Wishart. (> 0)
#' @param S A \code{matrix} with dimensions m x m that provides Sigma, the covariance matrix.
#' @return A \code{matrix} that is a Wishart distribution, aka the sample covariance matrix of a Multivariate Normal Distribution
#' @seealso \code{\link{riwishart}} and \code{\link{probitHLM}}
#' @author James J Balamuta
#' @examples
#' #Call with the following data:
#' rwishart(3, diag(2))
#'
#' # Validation
#' set.seed(1337)
#' S = toeplitz((10:1)/10)
#' n = 10000
#' o = array(dim = c(10,10,n))
#' for(i in 1:n){
#' o[,,i] = rwishart(20, S)
#' }
#' mR = apply(o, 1:2, mean)
#' Va = 20*(S^2 + tcrossprod(diag(S)))
#' vR = apply(o, 1:2, var)
#' stopifnot(all.equal(vR, Va, tolerance = 1/16))
#'
rwishart <- function(df, S) {
.Call('cIRT_rwishart', PACKAGE = 'cIRT', df, S)
}
#' @title Generate Random Inverse Wishart Distribution
#' @description Creates a random inverse wishart distribution when given degrees of freedom and a sigma matrix.
#' @param df An \code{int} that represents the degrees of freedom. (> 0)
#' @param S A \code{matrix} with dimensions m x m that provides Sigma, the covariance matrix.
#' @return A \code{matrix} that is an inverse wishart distribution.
#' @seealso \code{\link{rwishart}} and \code{\link{TwoPLChoicemcmc}}
#' @author James J Balamuta
#' @examples
#' #Call with the following data:
#' riwishart(3, diag(2))
riwishart <- function(df, S) {
.Call('cIRT_riwishart', PACKAGE = 'cIRT', df, S)
}
#' @title Calculate Tabulated Total Scores
#' @description Internal function to -2LL
#' @param N An \code{int}, which gives the number of observations. (> 0)
#' @param J An \code{int}, which gives the number of items. (> 0)
#' @param Y A N by J \code{matrix} of item responses.
#' @return A vector of tabulated total scores.
#' @author Steven Andrew Culpepper
Total_Tabulate <- function(N, J, Y) {
.Call('cIRT_Total_Tabulate', PACKAGE = 'cIRT', N, J, Y)
}
#' @title Generate Observed Data from choice model
#' @description Generates observed cognitive and choice data from the IRT-Thurstone model.
#' @param N An \code{integer} for the number of observations.
#' @param J An \code{integer} for the number of items.
#' @param K An \code{integer} for the number of paired comparisons.
#' @param theta A \code{vector} of latent cognitive variables.
#' @param as item discriminations, a \code{vector} of length J
#' @param bs item locations, a \code{vector} of length J
#' @param zeta A \code{matrix} with dimensions N x V containing random parameter estimates.
#' @param gamma A \code{vector} with dimensions P x 1 containing fixed parameter estimates, where \eqn{P = P_1 + P_2}
#' @param X A \code{matrix} with dimensions N*K x P_1 containing fixed effect design matrix without theta.
#' @param W A \code{matrix} with dimensions N*K x V containing random effect variables.
#' @param subject_ids A \code{vector} with length NK x 1 containing subject-choice IDs.
#' @param unique_subject_ids A \code{vector} with length N x 1 containing unique subject IDs.
#' @return A \code{list} that contains:
#' \describe{
#' \item{\code{Y}}{A \code{matrix} of dimension N by J}
#' \item{\code{C}}{A \code{vector} of length NK}
#' }
#' @author Steven Culpepper and James J Balamuta
Generate_Choice <- function(N, J, K, theta, as, bs, zeta, gamma, X, W, subject_ids, unique_subject_ids) {
.Call('cIRT_Generate_Choice', PACKAGE = 'cIRT', N, J, K, theta, as, bs, zeta, gamma, X, W, subject_ids, unique_subject_ids)
}
|
91fc31ffb798398866e7df6cc58f39122ba7b209
|
9d61b282ed44c563098095f5aa344bbf2d16ee04
|
/man/stackedBarchartsForSingleTranscripts.Rd
|
0ceb8637f45b9b587475dea5e187f0ca1e17110a
|
[
"MIT"
] |
permissive
|
p-carter/cscplots
|
13d27a539a35ba0250c56882f5eaee8cb725a9e9
|
87d4da474bc02c914c06a678c6c36dcab5fd6eec
|
refs/heads/master
| 2023-07-20T11:18:06.295811
| 2021-08-23T17:29:15
| 2021-08-23T17:29:15
| 326,286,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,381
|
rd
|
stackedBarchartsForSingleTranscripts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_preferential_expression_bar_graphs.R
\name{stackedBarchartsForSingleTranscripts}
\alias{stackedBarchartsForSingleTranscripts}
\title{Print expression stacked bar graph for genes/transcripts most preferentially expressed in selected
putative CSCs.}
\usage{
stackedBarchartsForSingleTranscripts(
tExprMatr,
pCSC_group_positions,
tumour_only_positions,
this_patient_tumour_only_positions = NULL,
pCSC_group_name,
tissue_type,
family_type_name,
patient_source_name,
separate_row_for_high_expression_transcripts = NULL,
no_ribo = NULL,
manually_set_max_number_of_graph_rows = NULL,
HUGO_abbreviations_and_fullnames = NULL,
HUGOFullNameFilesDir = NULL
)
}
\arguments{
\item{tExprMatr}{Expression matrix describing a cancer single-cell population; column
names should be gene names, row names should be cancer cell names.}
\item{pCSC_group_positions}{Row number of CSCs in the CSC group in the expression matrix
tExprMatr.}
\item{tumour_only_positions}{Positions (row numbers) in the expression matrix tExprMatr
thought to be tumour cells e.g. when the population contains cells annotated or predicted
as other cell types a subset may be defined, or if all cells are thought to be malignant
cancer cells this may be all cells.}
\item{this_patient_tumour_only_positions}{Optional: Expression matrix tExprMatr positions
(row numbers) for tumour cells from a specific patient i.e. that the CSCs for the
preferential expression plots are taken from. So, the plot needs to fulfil the criteria
that the CSCs are from only one patient, but the tumour cell population is from multiple
patients.}
\item{pCSC_group_name}{Name of CSC group to print preferential expression plots for
e.g. 'pCSC_group_1_SC4_Talazoparib.LB17011', but can be anything.}
\item{tissue_type}{Name of tissue type e.g. 'CDX'; can be anything.}
\item{family_type_name}{Name of the gene group type; usually 'HUGO'.}
\item{patient_source_name}{Optional: Name of the patient sample that CSC cells are from;
just name for in the plot title, can be anything.}
\item{separate_row_for_high_expression_transcripts}{Optional: Set to 1 to print very high
expression transcripts or gene groups on a separate row in some of the bar charts; helpful
when there is a big difference between the most highly expressed and those on the same
bar chart row.}
\item{no_ribo}{Optional: Set to 1 to remove ribosomal transcripts from some of the bar charts}
\item{manually_set_max_number_of_graph_rows}{Optional: Set to manually override the
maximum number of rows shown in the plot.}
}
\value{
Highest ranked preferentially expressed genes
}
\description{
This function prints a stacked bar graph to display expression in the genes/transcripts most
preferentially expressed in a selected set of putative CSCs. This may use expression in the
form of read counts, TPM, TPM count fractions, Cells Per Million, etc. Cells Per Million or
TPM count fractions are recommended for tumour cell populations containing a mix of cell types.
Read counts are recommended for homogenous cell populations e.g. PDXs, CDXs. Each bar in the
plot generated is subdivided by the proportion of the average expression that is contributed
by each cell in the pCSC group. The bars are ordered by these average expression scores across
the pCSC group.
}
|
f3b005179420b287cb69e212e8cbb31175240879
|
f1ba45ad71dc30131794fb69ede9ac6f02351b64
|
/R/greedyRegistration.R
|
a40d39a30360e1ef95dee0146b540f026807b661
|
[] |
no_license
|
jeffduda/greedyr
|
eb2e8239ccb721a62e323b9c0a0d91d91fda9ae4
|
664aef96abbadacaddaf0e40147bceff023a8131
|
refs/heads/master
| 2021-01-12T04:16:52.351965
| 2019-05-07T18:45:54
| 2019-05-07T18:45:54
| 77,561,301
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 917
|
r
|
greedyRegistration.R
|
#' @title greedyRegistration
#' @description find transform between two image spaces
#' @param fixed target image (or vector/list of images)
#' @param moving moving image space (or vector/list of images)
#' @param metric metric
#' @param mode mode
#' @param parameters list of parameters obtained from greedyParameters()
#' @return antsrTransform
greedyRegistration <- function( fixed, moving, metric="SSD", mode="GREEDY", parameters=NA ) {
fixedImg = fixed
movingImg = moving
ofile = paste0( tempfile(), ".nii.gz" )
if ( !fixed@isVector) {
fixedImg = mergeChannels(list(fixed))
}
if ( !moving@isVector) {
movingImg = mergeChannels(list(moving))
}
results = .Call("greedyregistration", fixedImg, movingImg, metric, mode, ofile, PACKAGE="greedyr")
if ( class(results)=="character" ) {
results = antsrTransformFromDisplacementField( antsImageRead(results) )
}
return(results)
}
|
2c95fb029c22d2fcfdb5ea5cccef717f618cb66d
|
384c3dbc571be91c6f743d1427dec00f13e0d8ae
|
/r/kernels/pyy0715-titanic-data-analysis-with-r/script/titanic-data-analysis-with-r.r
|
214b0e1e648835a15e49faea0495429f9ffa8c31
|
[] |
no_license
|
helenaK/trustworthy-titanic
|
b9acdd8ca94f2fa3f7eb965596eed4a62821b21e
|
ade0e487820cf38974561da2403ebe0da9de8bc6
|
refs/heads/master
| 2022-12-09T20:56:30.700809
| 2020-09-10T14:22:24
| 2020-09-10T14:22:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,272
|
r
|
titanic-data-analysis-with-r.r
|
library(readr)
library(stringr) # 문자열 처리 패키지
library(doBy)
library(ggplot2)
library(scales)
library(RColorBrewer)
library(corrplot)
library(doBy)
library(dplyr) # 전처리
library(randomForest)
library(gridExtra)
train <- read_csv('../input/train.csv')
test <- read_csv('../input/test.csv')
full <- bind_rows(train, test)
full <- full %>% # ticket과 cabin은 파생변수 생성을 위해 문자열로 놔둠
mutate(Survived = factor(Survived),
Pclass = factor(Pclass, ordered = T),
Name = factor(Name),
Sex = factor(Sex),
Embarked = factor(Embarked))
str(full)
head(full)
str(full)
summary(full)
sapply(train, function(x) length(unique(x)))
colSums(is.na(full))
missing_values <- full %>% # 결측치 비율을 데이터프레임으로
dplyr::summarize_all(funs(sum(is.na(.))/n()))
# tidyr::gather()함수를 이용하여 stack화 시킴 (설명변수들이 key로 지정한 변수에 나열되고, 결측값들이 value로 지정한 변수의 값으로)
missing_values <- tidyr::gather(missing_values,
key = "feature", value = "missing_pct")
missing_values
missing_values %>%
ggplot(aes(x = reorder(feature, missing_pct), y = missing_pct)) + # 정렬을 위한 reorder() 축지정
geom_bar(stat = "identity", fill = "red") + # bar plot 그리기 stat = 'identity' 데이터프레임 값을 그대로 이용하여 그리라는 옵션
ggtitle("Rate of missing values in each features") +
theme(plot.title = element_text(face = "bold", # 글씨체
hjust = 0.5, # Horizon(가로비율) = 0.5
size = 15, color = "darkblue")) +
labs(x = "Feature names", y = "Rate") + # x,y축 제목 지정
coord_flip() # Plot의 x, y축 변환
# 결측값이 있는 변수로만 시각화
missing_values <- missing_values[missing_values$missing_pct > 0, ]
missing_values <- missing_values[missing_values$missing_pct > 0, ]
missing_values %>%
ggplot(aes(x = reorder(feature, missing_pct), y = missing_pct)) + # 정렬을 위한 reorder() 축지정
geom_bar(stat = "identity", fill = "red") + # bar plot 그리기 stat = 'identity' 데이터프레임 값을 그대로 이용하여 그리라는 옵션
ggtitle("Rate of missing values in each features") +
theme(plot.title = element_text(face = "bold", # 글씨체
hjust = 0.5, # Horizon(가로비율) = 0.5
size = 15, color = "darkblue")) +
labs(x = "Feature names", y = "Rate") + # x,y축 제목 지정
coord_flip() # Plot의 x, y축 변환
table(full$Sex)
full %>% group_by(Survived, Sex) %>% summarise(freq = n())
prop.table(table(full$Sex,full$Survived),1) #여자들이 생존할 확률이 높음
# 성별 막대그래프
sex.p1 <- full %>%
dplyr::group_by(Sex) %>%
summarize(N = n()) %>%
ggplot(aes(Sex, N)) +
geom_col() +
geom_text(aes(label = N), size = 5, vjust = 1.2, color = "#FFFFFF") +
ggtitle("Bar plot of Sex") +
labs(x = "Sex", y = "Count")
# 성별에 따른 생존률 막대그래프
sex.p2 <- full%>%
filter(!is.na(Survived)) %>%
ggplot(aes(factor(Sex), fill = factor(Survived))) +
geom_bar(position = "fill") +
scale_y_continuous(labels = percent) +
scale_fill_brewer(palette = "Set1") + # palette에 어떤색 넣을지 지정
# 일정한 간격으로 x축과 y축 설정 : scale_x_continuous(breaks=seq())
# 분석가 마음대로 x축과 y축 설정 : scale_x_continuous(breaks=c())
ggtitle("Survival Rate by Sex") +
labs(x = "Sex", y = "Rate")
grid.arrange(sex.p1,sex.p2,ncol=2)
table(full$Pclass)
prop.table(table(full$Pclass,full$Survived),1) # 더 좋은 객실 이용자일수록 생존할 확률이 높음
# Pclass 막대그래프
pclass.p1 <- full %>%
dplyr::group_by(Pclass) %>%
summarize(N = n()) %>%
ggplot(aes(Pclass, N)) +
geom_col() +
geom_text(aes(label = N), size = 5, vjust = 1.2, color = "#FFFFFF") +
ggtitle("Bar plot of Pclass") +
labs(x = "Pclass", y = "Count")
# Pclass에 따른 생존률 막대그래프
pclass.p2 <- full%>%
filter(!is.na(Survived)) %>%
ggplot(aes(factor(Pclass), fill = factor(Survived))) +
geom_bar(position = "fill") +
scale_fill_brewer(palette = "Set1") +
ggtitle("Survival Rate by Pclass") +
labs(x = "Pclass", y = "Rate")
grid.arrange(pclass.p1,pclass.p2,ncol=2)
hist(full$Fare)
# fare 히스토그램
Fare.p1 <- full %>%
ggplot(aes(Fare)) +
geom_histogram(col = "yellow",
fill = "blue",
alpha = .5) +
ggtitle("Histogram of passengers Fare") +
theme(plot.title = element_text(face = "bold", hjust = 0.5, size = 15))
# 생존여부에 따른 fare box plot
Fare.p2 <- full %>%
filter(!is.na(Survived)) %>%
ggplot(aes(Survived, Fare)) + # x축에 생존 y축에 fare
# 관측치를 회색점으로 찍되, 중복되는 부분은 퍼지게 그려줍니다.
geom_jitter(col = "gray") +
# 상자그림 : 투명도 50%
geom_boxplot(alpha = .5) +
ggtitle("Boxplot of passengers Fare") +
theme(plot.title = element_text(face = "bold", hjust = 0.5, size = 15))
grid.arrange(Fare.p1,Fare.p2,ncol=2)
hist(full$Age)
# 나이 분포 히스토그램
age.p1 <- full %>%
ggplot(aes(Age)) + # x값에 따른 y값을 그리는 것이 아니므로 축 지정 안해줘도 됨
# 히스토그램 그리기, 설정
geom_histogram(breaks = seq(0, 80, by = 1), # 간격 설정
col = "red", # 막대 경계선 색깔
fill = "green", # 막대 내부 색깔
alpha = .5) + # 막대 투명도 = 50%
# Plot title
ggtitle("All Titanic passengers age hitogram") +
theme(plot.title = element_text(face = "bold", # 글씨체
hjust = 0.5, # Horizon(가로비율) = 0.5
size = 15, color = "darkblue"))
# 나이에 따른 생존 분포 파악
age.p2 <- full %>%
filter(!is.na(Survived)) %>%
ggplot(aes(Age, fill = Survived)) +
geom_density(alpha = .5) + # 막대그래프가 아니고 밀도그래프니까 plot으로 축 지정하고 geom_bar 대신에 geom_density
ggtitle("Titanic passengers age density plot") +
theme(plot.title = element_text(face = "bold", hjust = 0.5,
size = 15, color = "darkblue"))
grid.arrange(age.p1,age.p2,ncol=2)
table(full$SibSp)
train %>% group_by(Survived, SibSp) %>% summarise(freq = n())
prop.table(table(train$SibSp,train$Survived),1) #배우자,형제자매가 많을수록 생존률이 떨어짐
table(train$Parch)
train %>% group_by(Survived, Parch) %>% summarise(freq = n())
prop.table(table(train$Parch,train$Survived),1) #부모와 자녀를 1~3명 정도 동승했을 경우 생존률이 높음
table(train$Embarked) #결측값 2개
train %>% group_by(Survived, Embarked) %>% summarise(freq = n())
prop.table(table(train$Embarked,train$Survived),1) # C에서 탑승한 인원들만 생존률이 더 높다
colSums(is.na(full))
full[is.na(full$Embarked), ] #두개의 관측치 모두 Fare가 80이고, Pclass가 1임
embark_fare <- full[!is.na(full$Embarked), ]
ggplot(embark_fare, aes(x = Embarked, y = Fare, fill = factor(Pclass))) +
geom_boxplot() +
geom_hline(aes(yintercept=80), # fare가 80에 line 생성
colour='red', linetype='dashed', lwd=2) +
scale_y_continuous()
full$Embarked[c(62, 830)] <- 'C'
full[c(62, 830),]
full %>% filter(is.na(full$Fare)) #Pclasss가 3이고, Embarked는 S임
full$Fare[1044] <- median(full[full$Pclass == '3' & full$Embarked == 'S', ]$Fare, na.rm = TRUE) #중앙값으로 결측치 처리
full[1044,]
Title <- full$Name
Title <- gsub("^.*, (.*?)\\..*$", "\\1", Title) # 정규표현식
full$Title <- Title
unique(full$Title)
# 범주별 빈도수, 비율 확인
descr::CrossTable(full$Title)
# 5개 범주로 단순화 시키는 작업
full <- full %>%
# "%in%" 대신 "=="을 사용하게되면 Recyling Rule 때문에 원하는대로 되지 않습니다.
mutate(Title = ifelse(Title %in% c("Mlle", "Ms", "Lady", "Dona"), "Miss", Title), # %in% 개념
Title = ifelse(Title == "Mme", "Mrs", Title),
Title = ifelse(Title %in% c("Capt", "Col", "Major", "Dr", "Rev", "Don",
"Sir", "the Countess", "Jonkheer"), "Officer", Title),
Title = factor(Title))
# 파생변수 생성 후 각 범주별 빈도수, 비율 확인
descr::CrossTable(full$Title) # 5개의 범주로 축소
full$Sex <- ifelse(full$Sex == "male" ,0 , 1)
full$Sex <- as.factor(full$Sex)
full$Fsize <- full$SibSp + full$Parch + 1
table(full$Fsize)
# Fsize에 따른 생존율 시각화
Fsize.p1 <- full%>%
filter(!is.na(Survived)) %>%
ggplot(aes(Fsize, fill = Survived)) +
geom_bar(position = "fill") +
scale_y_continuous(labels = percent) +
scale_x_continuous(breaks=c(1:11)) +
scale_fill_brewer(palette = "Set1") + # palette에 어떤색 넣을지 지정
# 일정한 간격으로 x축과 y축 설정 : scale_x_continuous(breaks=seq())
# 분석가 마음대로 x축과 y축 설정 : scale_x_continuous(breaks=c())
ggtitle("Survival Rate by Fsize") +
labs(x = "Fsize", y = "Rate")
Fsize.p1
#ggplot(full[1:891,], aes(x = Fsize, fill = factor(Survived))) +
# geom_bar(stat='count', position='fill') + #position = 'dodge', 'fill' 구분
# scale_x_continuous(breaks=c(1:11)) +
# labs(x = 'Family Size', y = 'Rate')
# 범주화
full$Familysize[full$Fsize == 1] <- 'single'
full$Familysize[full$Fsize < 5 & full$Fsize > 1] <- 'small'
full$Familysize[full$Fsize > 4] <- 'large'
full$Familysize <- as.factor(full$Familysize)
table(full$Familysize)
# 범주화 후 Familiysize에 따른 생존율 시각화
ggplot(full[1:891,], aes(x = Familysize, fill = Survived)) +
geom_bar(position = 'fill') +
ggtitle("Survival Rate by Familysize")
labs(x="Familysize", y="Rate")
full$Cabin[1:28]
strsplit(full$Cabin[2], NULL)[[1]]
full$Deck<-factor(sapply(full$Cabin, function(x) strsplit(x, NULL)[[1]][1]))
full$Deck=as.character(full$Deck)
str(full)
#Cabin 변수 제거
full=full[,-11]
head(full)
full$Deck[is.na(full$Deck)] <- "U"
cabin=full %>%filter(!is.na(full$Survived)& full$Deck!='U')
ggplot(cabin,aes(x=Deck, fill=factor(Survived), na.rm=TRUE)) +
geom_bar(stat='count') +
facet_grid(.~Pclass) +
labs(title="Survivor split by Pclass and Deck")
full=full %>%
mutate(Deck= ifelse(Pclass==1 & Deck=="U","X",
ifelse(Pclass==2 & Deck=="U","Y",
ifelse(Pclass==3 & Deck=="U","Z",Deck)))
)
full %>% count(Deck)
age.sex <- full %>%
ggplot(aes(Age, fill = Sex)) +
geom_density(alpha = .5) +
ggtitle("Titanic passengers Age density plot") +
theme(plot.title = element_text(face = "bold", hjust = 0.5,
size = 15, color = "darkblue"))
age.sex
age.pclass <- full %>%
ggplot(aes(Age, fill = Pclass)) +
geom_density(alpha = .5) +
ggtitle("Titanic passengers Age density plot") +
theme(plot.title = element_text(face = "bold", hjust = 0.5,
size = 15, color = "darkblue"))
age.pclass
age.title <- full %>%
ggplot(aes(Age, fill = Title)) +
geom_density(alpha = .5) +
ggtitle("Titanic passengers Age density plot") +
theme(plot.title = element_text(face = "bold", hjust = 0.5,
size = 15, color = "darkblue"))
age.title
plot(full$Title)
# title별 Median Age를 통한 결측값 처리
full=as.data.frame(full)
summaryBy(Age ~ Title, data=full, FUN=c(mean, sd, median), na.rm=TRUE) ## ddply로도
full$Age <- ifelse((is.na(full$Age) & full$Title == 'Master'), 4, full$Age)
full$Age <- ifelse((is.na(full$Age) & full$Title == 'Miss'), 22, full$Age)
full$Age <- ifelse((is.na(full$Age) & full$Title == 'Mr'), 29, full$Age)
full$Age <- ifelse((is.na(full$Age) & full$Title == 'Mrs'), 35, full$Age)
full$Age <- ifelse((is.na(full$Age) & full$Title == 'Officer'), 48, full$Age)
hist(full$Age, freq=F, main='Age',col='lightgreen', ylim=c(0,0.05))
# child : 18세 이하
# adult : 19세 이상 64세 이하
# senior : 65세 이상
full$Age <- ifelse(full$Age <= 18, "child",
ifelse(full$Age > 18 & full$Age <= 64, "adult","senior"))
length(unique(full$Ticket))
head(full$Ticket)
full %>% arrange(Ticket) #같은 티켓인데도 불구하고 Family가 single, 친구등과 같이 온것으로 유추
full$TravelGroup <- NA
full <- (transform(full, TravelGroup = match(Ticket, unique(Ticket))))
full <- full %>%
group_by(TravelGroup) %>%
mutate(GroupSize = n()) %>%
ungroup()
full %>% arrange(Ticket) %>% head()
str(full)
#범주화 안된 변수들 범주화 처리
factor_vars <- c('Age','GroupSize','Deck')
full[factor_vars] <- lapply(full[factor_vars], function(x) as.factor(x))
#Fare log변환
full$Fare=log1p(full$Fare)
full=full %>% select(-c(1,4,7,8,9,13,16))
str(full)
train <-full %>% filter(is.na(Survived)==FALSE)
test <-full %>% filter(is.na(Survived)==TRUE)
train_label <- as.numeric(train$Survived)-1
test_label <- test$Survived
x_train<- model.matrix(~.-1, data = train[,-1]) %>% data.frame
x_test <- model.matrix(~.-1, data = test[,-1]) %>% data.frame
library(xgboost)
dtrain <- xgb.DMatrix(data = as.matrix(x_train), label=train_label)
dtest <- xgb.DMatrix(data = as.matrix(x_test))
set.seed(2019)
param <- list(objective = "binary:logistic",
eval_metric = "auc",
max_depth = 6,
eta = 0.01,
gammma = 0,
subsamle = 0.5,
colsample_bytree = 0.5,
min_child_weight = 5)
# xgb_cv <- xgb.cv(params = param,
# data = dtrain,
# nrounds = 5000,
# nfold = 5,
# nthread = -1,
# silent = 1,
# print_every_n = 100,
# verbose = 0)
# best=xgb_cv$best_iteration # optimal number of tree
# auc=xgb_cv$evaluation_log
# auc %>% filter(test_auc_mean==max(auc[,4]))
xgb <- xgb.train(params = param,
data = dtrain,
nrounds = 4790,
silent = 1,
print_every_n = 100,
verbose = 0)
library(caret)
set.seed(123)
split <- createDataPartition(y = train$Survived,p = 0.7,list = FALSE)
new_train <- train[split,]
new_test <- train[-split,]
x_label= as.numeric(new_train$Survived)-1
y_label= as.numeric(new_test$Survived)-1
new_train2 <- model.matrix(~.-1, data = new_train[,-1]) %>% data.frame
new_test2 <- model.matrix(~.-1, data = new_test[,-1]) %>% data.frame
dtrain2 <- xgb.DMatrix(data = as.matrix(new_train2), label=x_label)
dtest2 <- xgb.DMatrix(data = as.matrix(new_test2), label=y_label)
xgb2 <- xgb.train(params = param,
data = dtrain2,
nrounds = 4790,
silent = 1,
print_every_n = 100,
verbose = 0)
set.seed(2019)
XGB_pred2 <- predict(xgb2, dtest2)
head(XGB_pred2,10)
head(new_test$Survived,10)
set.seed(2019)
XGB_pred2 <- predict(xgb2, dtest2)
XGB_pred2 <- ifelse(XGB_pred2>=0.5,1,0)
#plot ROC
library(ROCR)
library(Metrics)
pr <- prediction(XGB_pred2,new_test$Survived)
perf <- performance(pr,measure = "tpr",x.measure = "fpr")
plot(perf) > auc(new_test$Survived,XGB_pred) #0.8109
auc(new_test$Survived,XGB_pred2)
set.seed(2019)
XGB_pred2 <- predict(xgb2, dtest2)
XGB_pred2 <- ifelse(XGB_pred2>=0.4,1,0)
#plot ROC
library(ROCR)
library(Metrics)
pr <- prediction(XGB_pred2,new_test$Survived)
perf <- performance(pr,measure = "tpr",x.measure = "fpr")
plot(perf) > auc(new_test$Survived,XGB_pred2) #0.815
auc(new_test$Survived,XGB_pred2)
set.seed(2019)
XGB_pred <- predict(xgb, dtest)
XGB_pred <- ifelse(XGB_pred>=0.4,1,0)
xgb.importance(colnames(dtrain), model = xgb) %>%
xgb.plot.importance(top_n = 30)
submission_xgb <- read.csv('../input/sample_submission.csv')
submission_xgb$Survived <- XGB_pred
write.csv(submission_xgb, file='submission_xgb.csv', row.names = F)
|
e7406965208c2261593ec17b2e77d1434a5b9f2c
|
1ea4bb89856f1a7ef9eb0fc9bc98b3325a2e27f0
|
/man/assemble.data.Rd
|
8228cf6940f7dea569ab6444a585ed256fe17bf8
|
[] |
no_license
|
rxmenezes/SIM
|
f5c3b8eaf978d7cf12e83d1bb028fc7bc93fcf36
|
3642268b425e642633cdd5d85d152465732aa7b4
|
refs/heads/master
| 2021-04-21T03:12:54.246487
| 2020-03-30T16:18:09
| 2020-03-30T16:18:09
| 249,744,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,634
|
rd
|
assemble.data.Rd
|
\name{assemble.data}
\alias{assemble.data}
\title{Assemble the data to run the integrated analysis}
\description{
Assembles the dependent and independent data and annotation of the both data sets.
}
\usage{
assemble.data(dep.data,
indep.data,
dep.id = "ID",
dep.chr = "CHROMOSOME",
dep.pos = "STARTPOS",
dep.ann = NULL,
dep.symb,
indep.id = "ID",
indep.chr = "CHROMOSOME",
indep.pos = "STARTPOS",
indep.ann = NULL,
indep.symb,
overwrite = FALSE,
run.name = "analysis_results")
}
\arguments{
\item{dep.data}{The dependent data (\code{data.frame}), along with annotations.
Each row should correspond to one feature. The following columns are expected to exist,
and the column names should be inserted in the function.
\code{dep.id}: A unique identifier.
\code{dep.chr}: The number of the chromosome (1,2, ..., 22, X, Y).
\code{dep.pos}: The base pair position, relative to the chromosome.
\code{dep.symb}: Gene symbol (optional).
\code{dep.ann}: Annotation can be multiple columns.}
\item{indep.data}{\code{data.frame} The independent data, along with annotations.
Each row should correspond to one feature. The following columns are expected to exist,
and the column names should be inserted in the function.
\code{indep.id}: A unique identifier.
\code{indep.chr}: The number of the chromosome (1,2, ..., 22, X, Y).
\code{indep.pos}: The base pair position, relative to the chromosome.
\code{indep.symb}: Gene symbol (optional).
\code{indep.ann}: Annotation can be multiple columns.}
\item{dep.ann}{\code{vector} with either the names of the columns or the
column numbers in the dependent data that contain the annotation.}
\item{indep.ann}{\code{vector} with either the names of the columns or the
column numbers in the independent data that contain the annotation.}
\item{dep.id}{\code{vector} with the column name in the dependent
data that contains the ID. Will be used in the \link{sim.plot.zscore.heatmap} function.
Empty ID's will be substituted by \code{NA}.}
\item{dep.chr}{\code{vector} with column name in the dependent
data that contains the chromosome numbers.}
\item{dep.pos}{\code{vector} with the column name in the dependent data
that contains the position on the chromosome in bases.}
\item{dep.symb}{Optional, either missing or a single vector with the column name
in the dependent data that contains the symbols. Will be used in \link{sim.plot.zscore.heatmap}
as label.}
\item{indep.id}{\code{vector} with the column name in the independent
data that contains the ID. Will be used in the \link{sim.plot.zscore.heatmap} function.
Empty ID's will be substituted by \code{NA}.}
\item{indep.chr}{\code{vector} with the column name in the independent data
that contains the chromosome numbers.}
\item{indep.pos}{\code{vector} with the column name in the independent data
that contains the position on the chromosome in bases.}
\item{indep.symb}{Optional, either missing or a vector with the column name
in the dependent data that contains the Symbols. Will be used in \link{sim.plot.zscore.heatmap}
as label.}
\item{overwrite}{\code{logical}, indicate when a \code{run.name} is already present,
the results can be overwritten.}
\item{run.name}{Name of the analysis. The results will be
stored in a folder with this name in the current working directory
(use \code{getwd()} to print the current working directory).
If the missing, the default folder \code{"analysis_results"} will be generated.}
}
\details{
Based on the chromosome and probe position an absolute position is calculated according to
\eqn{chromosome number * 1e9 + probe position}. Chromosome column is converted to \code{factor} and releveled according to
the levels of the \link{chrom.table}, so the only levels allowed are \code{c(1:22, "X", "Y")}.
Currently only human genome support without mitochondrial DNA.
}
\value{
No values are returned. Instead, the datasets and annotation columns are stored in
separate files in the \code{data} folder in the directory specified in \code{run.name}.
If \code{assemble.data} has run succesfully, the \link{integrated.analysis} function can be performed.
}
\author{Marten Boetzer, Melle Sieswerda, Renee X. de Menezes \email{R.X.Menezes@lumc.nl}}
\seealso{
\link{SIM}, \link{integrated.analysis}
}
\examples{
# Generate datasets and the samples to run the integrated analysis
set.seed(53245)
ngenes <- 100
nsamples <- 100
# generate copy number measurements
x <- matrix(rnorm(n = ngenes*nsamples), nrow = ngenes, ncol = nsamples)
# add mean shift effect for half of the samples, copy gain for 2nd half of the genes
x[ seq_len(ngenes/2), seq_len(nsamples/2)] <- x[ seq_len(ngenes/2), seq_len(nsamples/2)] + 2
# generate gene expression with normal distribution and mean equal to gene copy number
y <- rnorm(n = ngenes*nsamples, mean = matrix(x, nrow = ngenes*nsamples, ncol = 1), sd = 0.8)
y <- matrix(y, nrow = ngenes, ncol = nsamples)
samples <- paste0("S", seq_len(nsamples))
colnames(x) <- colnames(y) <- samples
# Making data objects
acgh.data <- data.frame(ID = paste0("G", seq_len(ngenes)),
CHROMOSOME = rep(1, ngenes),
STARTPOS = seq_len(ngenes)*12*10^5,
Symbol = paste0("Gene", seq_len(ngenes)),
x)
expr.data <- data.frame(ID = paste0("G", seq_len(ngenes)),
CHROMOSOME = rep(1, ngenes),
STARTPOS = seq_len(ngenes)*12*10^5,
Symbol = paste0("Gene", seq_len(ngenes)),
y)
#assemble the data
assemble.data(dep.data = acgh.data,
indep.data = expr.data,
dep.ann = colnames(acgh.data)[1:4],
indep.ann = colnames(expr.data)[1:4],
dep.id="ID",
dep.chr = "CHROMOSOME",
dep.pos = "STARTPOS",
dep.symb="Symbol",
indep.id="ID",
indep.chr = "CHROMOSOME",
indep.pos = "STARTPOS",
indep.symb="Symbol",
overwrite = TRUE,
run.name = "chr1p")
}
\keyword{manip}
|
11ffdb800549fc5f169b2cd909265286d65ca65c
|
ab74acf5ff0e3a2ad66b16f128efb8ed684b97da
|
/R/print.R
|
1da6c725a0914be08123bde233c73178a50ff619
|
[] |
no_license
|
lepennec/ggmap
|
559e3320682b5e19375276d2c6c780b87ef17d26
|
74781262cf2e8e2d829c12fc7e4848916487f4cb
|
refs/heads/master
| 2021-01-25T08:19:37.843241
| 2017-06-08T15:06:15
| 2017-06-08T15:06:15
| 93,755,505
| 3
| 2
| null | 2017-06-08T13:56:04
| 2017-06-08T13:56:04
| null |
UTF-8
|
R
| false
| false
| 825
|
r
|
print.R
|
#' Print a map
#'
#' Print a console description of a map
#'
#' @param x an object of class elicit
#' @param ... additional parameters
#' @usage \method{print}{ggmap}(x, ...)
#' @return Invisibly returns \code{x}.
#' @export
#' @examples
#'
#' get_map()
#' ggmap(get_map())
#'
#'
print.ggmap <- function(x, ...){
r <- nrow(x)
c <- ncol(x)
source <- attr(x, "source")
if(source == "google"){
source <- "Google Maps"
} else if(source == "stamen"){
source <- "Stamen Maps"
} else if(source == "osm"){
source <- "OpenStreetMap"
} else if(source == "cloudmade"){
source <- "Cloudmade"
} else if(source == "naver"){
source <- "Naver Map"
}
cat(paste0(r, "x", c, " ", attr(x, "maptype"), " map image from ", source, ".",
" see ?ggmap to plot it.\n"), ...)
invisible(x)
}
|
7a98f824a6750afe06e9b1ec93887dad6a964206
|
063933d69f6167115109ae09ddd9c07a60882ba0
|
/step 2.R
|
f118312dd69f833c3a423c5c004a3b77023d3313
|
[] |
no_license
|
maneeshvsharma/run_analysis
|
3b49f698d08c45cc80ee483ab9bbea3934bc65a4
|
a1d8fd2e2643383bf7fff0a365765c0bd26fdd48
|
refs/heads/master
| 2021-05-24T10:46:13.095066
| 2020-06-07T15:14:39
| 2020-06-07T15:14:39
| 253,525,656
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 184
|
r
|
step 2.R
|
# STEP 2 : Extracts only the measurements on the mean and standard deviation for each measurement.
TidyData <- Merged_Data %>% select(subject, code, contains("mean"), contains("std"))
|
a96e75bc63fcddd61e54ad728a0dcaaa5653241f
|
c11add087b282d8ccb64806c8e9ad89f467c8f20
|
/R/pars.R
|
f9cf6e64d1266ff95a8b04addf3cb17333c2212c
|
[
"MIT"
] |
permissive
|
mwpennell/tsr
|
5fb208ea841f84e4889a45baa4c9d5597e5caae5
|
01df4e33bc51333ec32270e2d9a583371c7a2f07
|
refs/heads/master
| 2021-03-22T01:09:22.553202
| 2016-06-29T22:39:00
| 2016-06-29T22:39:00
| 59,715,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,746
|
r
|
pars.R
|
#' Function for getting mass dependent paramaters
#' Constants from DeLong 2015
#' @export pars_mass
#'
pars_mass <- function(M){
list(r=5.25 * M^(-0.2),
K=5.83*10^8 * M^(-0.81),
a=1.2*10^-6 * M^1,
m=5.62 * M^(-0.29),
e=2.16 * M^(-0.5))
}
#' Function for getting temp dependent parameters
#' Standardized to some arbitrary value (here 15C)
#'
#' @export pars_temp
#'
pars_temp <- function(t, t_stnd=15){
const <- temp_const()
stnd <- temp_stnd()
r <- r_temp(t, const, t_std=t_stnd, value_std=stnd$r)
K <- K_temp(t, const, t_std=t_stnd, value_std=stnd$K)
m <- m_temp(t, const, t_std=t_stnd, value_std=stnd$m)
a <- a_temp(t, const, t_std=t_stnd, value_std=stnd$a)
list(r=r,K=K,m=m,a=a)
}
temp_const <- function(){
list(eb=0.32,
em=0.65,
es=0.9,
ev_c=0.46,
ev_r=0.46,
k=8.62*10^-5,
v_c=1,
v_r=1)}
temp_stnd <- function(){
list(r=2,
K=100,
m=0.6,
a=1)}
r_temp <- function(t, x, t_std, value_std)
value_std / tfx_r(x, t_std) * tfx_r(x, t)
tfx_r <- function(x, t)
exp(-x$eb / x$k / (273.15 + t))
K_temp <- function(t, x, t_std, value_std)
value_std / tfx_K(x, t_std) * tfx_K(x, t)
tfx_K <- function(x, t)
exp(x$eb / x$k / (273.15 + t) - x$es / x$k / (273.15 + t))
m_temp <- function(t, x, t_std, value_std)
value_std / tfx_m(x, t_std) * tfx_m(x, t)
tfx_m <- function(x, t)
exp(-x$em / x$k / (273.15 + t))
a_temp <- function(t, x, t_std, value_std)
value_std / tfx_a(x, t_std) * tfx_a(x,t)
tfx_a <- function(x, t)
sqrt(x$v_c^2 * exp(-2 * x$ev_c / x$k / (273.15 + t)) +
x$v_r^2 * exp(-2 * x$ev_r / x$k / (273.15 + t)))
|
5821e315889fce2bb021b1cb09b7f98ae13ea50c
|
b26ccc28d443c0d3d57f2aabf0126883d273dda9
|
/Scripts/evaluateOptimizedLearnerKNN.R
|
0e2e091133e9aee2fa8163cd59fd39f98962e676
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
gkampolis/WandererID
|
33ac0b655c3e3c3bd62fe34a3251ccce9571cab6
|
2b81405d80aeae0ddbef1ead34b6170942740808
|
refs/heads/master
| 2020-08-07T23:10:30.973281
| 2019-11-29T21:50:50
| 2019-11-29T21:50:50
| 213,616,854
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,015
|
r
|
evaluateOptimizedLearnerKNN.R
|
# Intro Comments ----------------------------------------------------------
# Purpose: Script to evaluate k-NN learners with optimal
# hyperparameters, as determined in previous steps.
# Author: Georgios Kampolis
# For: Marine Scotland Science
# Comments: As part of RGU MSc course in Data Science
# Details: This script is called by Main.R in the root folder of the project.
# Max Accuracy hyperparameters --------------------------------------------
set.seed(seed = seed)
## Evaluate
knnEvalResultsMaxAccParams <- resample(
learner = knnOptLearnerMaxAccParams,
task = dataTask,
resampling = resamplingSchemeEval,
measures = measuresSetEval,
show.info = FALSE
)
## Save results
saveRDS(knnEvalResultsMaxAccParams, compress = FALSE,
file = here::here(
"ModelFiles",
"knnOptLearnerEvalResultsMaxAccParams.rds"
)
)
# Max Balanced Accuracy hyperparameters -----------------------------------
set.seed(seed = seed)
## Evaluate
knnEvalResultsMaxBalAccParams <- resample(
learner = knnOptLearnerMaxBalAccParams,
task = dataTask,
resampling = resamplingSchemeEval,
measures = measuresSetEval,
show.info = FALSE
)
## Save results
saveRDS(knnEvalResultsMaxBalAccParams, compress = FALSE,
file = here::here(
"ModelFiles",
"knnOptLearnerEvalResultsMaxBalAccParams.rds"
)
)
# Min Logloss hyperparameters ---------------------------------------------
set.seed(seed = seed)
## Evaluate
knnEvalResultsMinLoglossParams <- resample(
learner = knnOptLearnerMinLoglossParams,
task = dataTask,
resampling = resamplingSchemeEval,
measures = measuresSetEval,
show.info = FALSE
)
## Save results
saveRDS(knnEvalResultsMinLoglossParams, compress = FALSE,
file = here::here(
"ModelFiles",
"knnOptLearnerEvalResultsMinLoglossParams.rds"
)
)
# Notify ------------------------------------------------------------------
beepr::beep(1)
print("k-NN evaluation results are saved!")
|
2cf09d1eed0b5c779f695b4201febad699bce624
|
3d3562e615d992c00725ab86a8ab09a9d140a487
|
/R/authors.R
|
2d13e142bbd5997e2bbc659f35fbe459b5667a74
|
[
"CC0-1.0"
] |
permissive
|
bldavies/motuwp
|
6169c54adbe43389acf5add32203881722b4b92a
|
a0a7e0238b65ea010a532729d92804419a10a41f
|
refs/heads/master
| 2021-06-19T18:13:46.985079
| 2020-12-26T17:48:19
| 2020-12-26T17:48:19
| 138,146,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 402
|
r
|
authors.R
|
#' Motu working paper authors
#'
#' Data frame containing paper-author correspondences.
#'
#' @docType data
#'
#' @name authors
#'
#' @usage data(authors)
#'
#' @format Data frame with columns
#' \describe{
#' \item{number}{Working paper number}
#' \item{author}{Author name}
#' }
#'
#' @source \href{https://motu.nz/resources/working-papers/}{Motu Economic and Public Policy Research}
"authors"
|
b69703f9e871b23129a2827db88b9054ecb495f3
|
8a2a112e9a76b7dc751c5a75f88cfe5ec7b6f89f
|
/code/plots.R
|
d78a91e0f7a6660a2fa2fd5da0d11e070d958a8a
|
[] |
no_license
|
YameiW/Imitation-experiment
|
9c2f89e6ceacc9c4e22735d82d952f33c87b91a9
|
04f4e5d6160695828deacc5deebf6b1c97545764
|
refs/heads/master
| 2021-02-07T19:01:35.071354
| 2020-03-01T02:47:50
| 2020-03-01T02:47:50
| 244,065,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,343
|
r
|
plots.R
|
library(lme4)
library(lmerTest)
library(plyr)
library(reshape)
library(ggplot2)
library(moments) # for skewness and kurtosis
library(ggrepel)
library(ggsignif)
dataset <- read.csv("dataset_1.csv")
names(dataset) <- c("word","block","id","cond","order","word_duration","vot","mem_1","mem_2")
dataset$id <- as.factor(dataset$id)
dataset$order <- as.numeric(dataset$order)
dataset$word_rest <- dataset$word_duration-dataset$vot
dataset$word_rest=dataset$word_rest/1000
contrasts(dataset$cond) <- c(.5,-.5)
contrasts(dataset$block) <- c(-.5,.5)
model.final <- lmer(vot~cond+block+word_rest+order
+(1+cond+block|word)+(1+block+word_rest+order|id),
control=lmerControl(optimizer = "bobyqa",
optCtrl = list(maxfun=1e5)),
data=dataset,REML=FALSE)
summary(model.final)
# plot for conds
data_1 <- aggregate(dataset$vot,by=list(dataset$cond),FUN=function(x) c(mean=mean(x),sd=sd(x),n=length(x)))
data_1 <- do.call(data.frame, data_1)
data_1$se <- c(23.12673/sqrt(1990.00000),19.63133/sqrt(1905.00000))
colnames(data_1) <- c("cond","mean","sd","n","se")
data_1$cond <- revalue(data_1$cond,c("g1"="meaning","g2"="no meaning"))
data_1 <- data_1[order(data_1$cond),]
limits1 <- aes(ymin=data_1$mean-data_1$se,
ymax=data_1$mean+data_1$se)
ggplot(data_1,aes(x=cond,y=mean,fill=cond))+geom_bar(stat="identity",width=.5)+geom_errorbar(limits1,width=.2,color="black")+
scale_y_continuous(breaks = seq(0,85,10))+
labs(x="Condtions",y="Mean VOT (ms)",fill="Conditions")+
theme(legend.position="none")+
ylim(0,85)
# plot for block
data_2 <- aggregate(dataset$vot,by=list(dataset$block),FUN=function(x) c(mean=mean(x),sd=sd(x),n=length(x)))
data_2 <- do.call(data.frame, data_2)
data_2$se <- data_2$x.sd/sqrt(data_2$x.n)
colnames(data_2) <- c("block","mean","sd","n","se")
data_2$block <- revalue(data_2$block,c("a"="baseline","b"="post-shadowing"))
data_2 <- data_2[order(data_2$block),]
limits2 <- aes(ymin=data_2$mean-data_2$se,
ymax=data_2$mean+data_2$se)
anno="p=0.00313 **"
ggplot(data_2,aes(x=block,y=mean,fill=block))+geom_bar(stat="identity",width=.5)+geom_errorbar(limits2,width=.2,color="black")+
scale_y_continuous(breaks = seq(0,85,10))+
labs(x="Blocks",y="Mean VOT (ms)",fill="Blocks")+
theme(legend.position="none")+
ylim(0,85)+ geom_signif(annotation=formatC(anno, digits=1),
y_position=80, xmin=1, xmax=2,
tip_length = c(0.2, 0.1))
# plot for interaction between cond and block
data_3 <- aggregate(dataset$vot,by=list(dataset$block,dataset$cond),FUN=function(x) c(mean=mean(x),sd=sd(x),n=length(x)))
data_3 <- do.call(data.frame, data_3)
data_3$se <- data_3$x.sd/sqrt(data_3$x.n)
colnames(data_3) <- c("block","cond","mean","sd","n","se")
data_3$block <- revalue(data_3$block,c("a"="baseline","b"="post-shadowing"))
data_3$cond <- revalue(data_3$cond,c("g1"="meaning","g2"="no meaning"))
ggplot(data_3, aes(x=cond, y=mean, fill=block)) +
geom_bar(position=position_dodge(), stat="identity", colour='black') +
geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=.2,position=position_dodge(.9))+
scale_y_continuous(breaks = seq(0,85,10))+
ylim(0,85)+
labs(x="Conditions",y="Mean VOT (ms)",fill="Blocks")+
theme(legend.position="top")
# interaction plot
head(data_3)
ggplot(data_3)+aes(x=block, y=mean,color=cond)+geom_line(aes(group=cond))+geom_point()+
geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=.05)+
labs(x="Blocks",y="Mean VOT (ms)",color="Conditions")+
theme(legend.position="top")
# plot for individual variability for different conditions
data_4 <- dataset[,c(2,3,4,7)]
data_4 <- cast(data_4,id+cond~block,mean)
colnames(data_4) <- c("participant","cond","baseline","post_shadowing")
data_4$cond <- revalue(data_4$cond,c("g1"="meaning","g2"="no meaning"))
ggplot(data_4,aes(x=baseline, y=post_shadowing,color=cond))+ geom_point(size=2)+
geom_abline(slope=1,intercept=0,linetype="dashed")+
geom_text(label=data_4$participant,color="black", size=3,hjust=0, vjust=0)+
theme(aspect.ratio = 1)+
xlim(40,130)+
ylim(40,130)+
labs(x="Baseline Mean VOTs (ms)",y="Post-shadowing Mean VOTs (ms)",color="Conditions")
colnames(data_4)[1] <- "id"
data_5 <- melt(data_4,id="id")
summary(data_5)
summary(data_4)
data_4$change <- (data_4$post_shadowing-data_4$baseline)/data_4$baseline
mean(data_4$change)
median(data_4$change)
skewness(data_4$change)
kurtosis(data_4$change)
data_6 <- aggregate(dataset$word_duration,by=list(dataset$id),FUN=function(x) c(mean=mean(x),sd=sd(x),n=length(x)))
summary(data_6)
# plot for individual words
data_7 <- aggregate(dataset$vot, by=list(dataset$word,dataset$block),FUN=mean)
colnames(data_7) <- c("word","block","mean VOT")
data_7 <- cast(data_7,word~block)
colnames(data_7) <- c("word","baseline","post_shadowing")
ggplot(data_7,aes(x=baseline, y=post_shadowing))+ geom_point(size=2,color="red")+
geom_abline(slope=1,intercept=0,linetype="dashed")+
geom_text_repel(label=data_7$word,color="black",size=3)+
theme(aspect.ratio = 1)+
xlim(40,130)+
ylim(40,130)+
labs(x="Baseline Mean VOTs (ms)",y="Post-shadowing Mean VOTs (ms)",color="Conditions")
summary(data_7)
data_7$change=(data_7$post_shadowing-data_7$baseline)/data_7$baseline
mean(data_7$change)
median(data_7$change)
skewness(data_7$change)
kurtosis(data_7$change)
hist(data_7$change)
data_8 <- aggregate(dataset$vot, by=list(dataset$word,dataset$block,dataset$cond),FUN=mean)
colnames(data_8) <- c("word","block","cond","vot")
data_8 <- cast(data_8,word+cond~block, mean)
data_8$cond <- revalue(data_8$cond,c("g1"="meaning","g2"="no meaning"))
colnames(data_8) <- c("word","cond","baseline","post_shadowing")
ggplot(data_8,aes(x=baseline, y=post_shadowing))+ geom_point(size=2,color="red")+
geom_abline(slope=1,intercept=0,linetype="dashed")+
theme(aspect.ratio = 1)+
xlim(40,100)+
ylim(40,100)+
labs(x="Baseline Mean VOTs (ms)",y="Post-shadowing Mean VOTs (ms)",color="Conditions")+
geom_text_repel(label=data_8$word,color="black",size=3)+
facet_grid(.~cond)
data_8$change <-(data_8$post_shadowing-data_8$baseline)/data_8$baseline
mean(data_8$change)
median(data_8$change)
skewness(data_8$change)
kurtosis(data_8$change)
hist(data_8$change)
summary(data_8)
|
1a3a747544757a4ba75c8673a45cd9864310300f
|
8bbaddc6aa715fee0fc3a2d29a1edeeb78460bed
|
/Practical 3/script.R
|
ce5fc43275f4a424ca991633c1be7d23322cb922
|
[] |
no_license
|
gabrielmpp/stats_UniReading_2021
|
9545f799601b37966afc351d47e8cb3304192193
|
fa6ede1691399fd83f6215f037eb670dda3ea942
|
refs/heads/main
| 2023-04-01T01:53:13.511406
| 2021-03-28T09:41:58
| 2021-03-28T09:41:58
| 328,645,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,357
|
r
|
script.R
|
library(ggplot2)
library(dplyr)
accuracy <- function(obs, fcast){
A <-sum(obs & fcast)
B <-sum(!obs & fcast)
C <-sum(obs & !fcast)
D <-sum(!obs & !fcast)
return((A+D)/(A+B+C+D))
}
# Data ingestion
data24 <- read.csv('data/temps2012_24hr.csv')
data72 <- read.csv('data/temps2012_72hr.csv')
good_days <-!(is.na(data24$obs) | is.na(data24$fcst) | is.na(data72$fcst))
data24<-data24[good_days,]
data72<-data72[good_days,]
# Plotting time series
plot(data24$obs, type='l', main='MetOffice temperature forecast',
xlab='Day', ylab='Temperature (deg Celsius)')
lines(data24$fcst, col='blue')
lines(data72$fcst, col='red')
legend(x='topright', legend=c('Obs', 'Pred 24h', 'Pred 72h'),
col=c('black', 'blue', 'red'), lty=c(1,1,1))
# Checking Bias
bias24 <- data24$fcst - data24$obs
bias72 <- data72$fcst - data24$obs
sum(bias24)
sum(bias72)
# Checking sharpness
sd(data24$fcst)
sd(data72$fcst)
sd(data24$obs)
hist(data24$fcst)
hist(data24$obs)
# Checking accuracy
cor(data24$obs, data24$fcst)
cor(data24$obs, data72$fcst)
rmse24 <- sqrt(mean((data24$obs-data24$fcst)^2))
rmse72 <- sqrt(mean((data24$obs-data72$fcst)^2))
# Contingency table
thres = 19.5
obs_warm = (data24$obs > thres)
f24_warm = (data24$fcst > thres)
f72_warm = (data72$fcst > thres)
acc24 = accuracy(obs_warm, f24_warm)
acc72 = accuracy(obs_warm, f72_warm)
|
26a7866b7806c2aca8a79836dc21cc7651df8b63
|
72186ca45fef2d8ffe88dcd9ab44265c293cb4c4
|
/Data/DataAlreadyUploadedToEDI/EDIProductionFiles/MakeEMLCTD/CTD_EDI_2022/Old/2019/MakeEMLCTD.R
|
6abcc3a96c2fdb513ca4bd0530883b8379d2e42c
|
[] |
no_license
|
CareyLabVT/Reservoirs
|
a4b3d025ade448cb54dd7a7c1d91fea5ff0368b6
|
2b6d62376d24e9e24d1841b83ebead8a21d57aa0
|
refs/heads/master
| 2023-08-17T10:28:24.450403
| 2023-08-10T18:23:14
| 2023-08-10T18:23:14
| 91,721,780
| 2
| 8
| null | 2023-09-11T16:08:55
| 2017-05-18T17:43:27
|
HTML
|
UTF-8
|
R
| false
| false
| 1,847
|
r
|
MakeEMLCTD.R
|
# Install and load devtools
install.packages("devtools")
library(devtools)
# Install and load EMLassemblyline
install_github("EDIorg/EMLassemblyline")
library(EMLassemblyline)
# Import Templates ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import_templates(path = "C:/Users/Owner/Dropbox/MakeEMLCTD",
license = "CCBY",
data.files = "CTD_Meta_13_18_final.csv")
#Geographic coverage
template_geographic_coverage(path = getwd(),
data.path = getwd(),
data.table = "chemistry.csv",
empty = TRUE,
write.file = TRUE)
# Define Categorical Variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#template_categorical_variables(path = "C:/Users/Owner/Dropbox/MakeEMLCTD")
??template_geographic_coverage
# Make the EML for EDI ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
make_eml(path = ,
dataset.title = "Time series of high-frequency profiles of depth, temperature, dissolved oxygen, conductivity, specific conductivity, chlorophyll a, turbidity, pH, oxidation-reduction potential, photosynthetic active radiation, and descent rate for Beaverdam Reservoir, Carvins Cove Reservoir, Falling Creek Reservoir, Gatewood Reservoir, and Spring Hollow Reservoir in Southwestern Virginia, USA 2013-2018",
data.table = ,
data.table.description = c("Reservoir CTD dataset"),
temporal.coverage = c("2013-03-07", "2019-12-16"),
geographic.description = "Southwestern Virginia, USA, North America",
maintenance.description = "ongoing",
user.domain = "EDI",
user.id = "ccarey",
package.id = "edi.200.7")
|
d01807d4e557177fc50a74b011ba0b4a96f64dc1
|
8ab0f0ed758c2f5459647b650b9a7c75c4d63da1
|
/Ch04/4_1_Operator.R
|
94dea4a0061943cfbeff4db88f52a7dcc475aa7e
|
[] |
no_license
|
HJCHOI910828/bigdata
|
4c67f57daf1bfe707132f6ea85f2212d41226430
|
28641c41cf39e98931e468f7d25340966a7cb7a7
|
refs/heads/master
| 2023-06-26T09:42:56.125326
| 2021-07-29T00:23:41
| 2021-07-29T00:23:41
| 390,547,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 699
|
r
|
4_1_Operator.R
|
#108p 산술연산자 실습
num1 <- 100
num2 <- 20
result <- num1 + num2
result
result
result <- num1 - num2
result
result <- num1 * num2
result
result <- num1 / num2
result
result <- num1 %% num2
result
result <- num1^2
result
result <- num1^num2
result
#109p 관계연산자 실습
boolean <- num1 == num2
boolean
boolean <- num1 != num2
boolean
boolean <- num1 > num2
boolean
boolean <- num1 >= num2
boolean
boolean <- num1 < num2
boolean
boolean <- num1 <= num2
boolean
#109p 논리연산자 실습
logical <- num1 >= 50 & num2 <= 10
logical
logical <- num1 >= 50 | num2 <= 10
logical
logical <- num1 >= 50
logical
logical <- !(num1 >= 50)
logical
x <- TRUE;y<- FALSE
xor(x,y)
|
f0324825b395fe78cf6a176f98f3b11735190f80
|
150ddbd54cf97ddf83f614e956f9f7133e9778c0
|
/man/color_conv.Rd
|
85b5adfef318184f3d075d874077cc540e5e64ab
|
[
"CC-BY-4.0"
] |
permissive
|
debruine/webmorphR
|
1119fd3bdca5be4049e8793075b409b7caa61aad
|
f46a9c8e1f1b5ecd89e8ca68bb6378f83f2e41cb
|
refs/heads/master
| 2023-04-14T22:37:58.281172
| 2022-08-14T12:26:57
| 2022-08-14T12:26:57
| 357,819,230
| 6
| 4
|
CC-BY-4.0
| 2023-02-23T04:56:01
| 2021-04-14T07:47:17
|
R
|
UTF-8
|
R
| false
| true
| 2,017
|
rd
|
color_conv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/color_conv.R
\name{color_conv}
\alias{color_conv}
\title{Convert colors}
\usage{
color_conv(
color,
alpha = 1,
from = c("guess", "col", "hex", "hexa", "hex3", "rgb", "rgba", "lab"),
to = c("hexa", "hex", "rgba", "rgb", "lab", "hsv")
)
}
\arguments{
\item{color}{A color in one of the input formats (see Details)}
\item{alpha}{Alpha transparency (values <=1 converted to 0-255); ignored if color has alpha already}
\item{from, to}{Input and output color spaces, see \code{Details} below.}
}
\value{
color in \code{to} format
}
\description{
Convert from common color inputs to specified output type, adding alpha transparency for output formats that support it (hexa, rgba).
}
\details{
\itemize{
\item color: one of the R colours listed in \code{\link[grDevices:colors]{grDevices::colors()}}, e.g., "red"
\item hex: hexadecimal string, e.g., "#FF0000"
\item hexa: hexadecimal string with alpha, e.g., "#FF0000FF"
\item hex3: abbreviated hexadecimal string, e.g., "#F00"
\item rgb: vector of red, green and blue values 0-255, e.g., c(255, 0, 0)
\item rgba: vector of red, green, blue and alpha values 0-255, e.g., c(255, 0, 0, 255)
\item lab: CIE-Lab color
\item hsv: vector of hue, saturation and value values (0-1), e.g., c(h=0, s = 1, v = 1)
}
}
\examples{
# different ways to input red
color_conv("red")
color_conv("#FF0000")
color_conv("#FF0000FF")
color_conv(c(255,0,0))
color_conv("rgb(255,0,0)") # you can use CSS-style text
color_conv(c(255,0,0,255))
# Lab must have names or use text format to be guessed
color_conv(c(l = 53.2, a = 80.1, b = 67.2))
color_conv("lab(53.2,80.1,67.2)")
# else, it will be guessed as rgb; fix by setting from explicitly
color_conv(c(53.2, 80.1, 67.2))
color_conv(c(53.2, 80.1, 67.2), from = "lab")
# add 50\% alpha transparency to dodgerblue
color_conv("dodgerblue", alpha = 0.5, to = "rgba")
}
\seealso{
Other color:
\code{\link{col2lab}()},
\code{\link{lab2rgb}()}
}
\concept{color}
|
8473b4454de4c1c6856126ae3130333d9a2ff201
|
0ea20d932c43531b3a75563feb5fcc0541b6c329
|
/plot1.R
|
8486b55df990e5f9b5ea968066bf9f5cc05b39ec
|
[] |
no_license
|
rleonen/ExData_Plotting1
|
1e859076dedeadbe5b67d50038dc44aa0aa8d36d
|
b49cd12501785c2a17088fb4fe8010cee90c1ad0
|
refs/heads/master
| 2021-01-18T09:57:01.854209
| 2015-03-08T15:50:35
| 2015-03-08T15:50:35
| 31,741,288
| 0
| 0
| null | 2015-03-05T22:54:40
| 2015-03-05T22:54:40
| null |
UTF-8
|
R
| false
| false
| 878
|
r
|
plot1.R
|
# this script must execute in the same directory as the data file 'household_power_consumption.txt'
# the data file may be downloaded from https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# this script also requires the chron libary to be loaded.
# read entire data set
hpc <- read.table("household_power_consumption.txt"
,sep=c(";",".")
,header=TRUE
,na.strings="?")
# change hpc$Date column to proper data type
hpc$Date <- as.Date(hpc$Date,"%d/%m/%Y")
hpc$Time <- times(hpc$Time)
# subset to 02/01/2007 and 02/02/2007
hpc2 <- hpc[hpc$Date == as.Date("2007-02-01") | hpc$Date == as.Date("2007-02-02"),]
png(file="plot1.png")
hist(hpc2$Global_active_power
,col="red"
,xlab="Global Active Power (kilowatts)"
,main="Global Active Power"
,ylim=c(0,1200))
dev.off()
|
681e6b0fade01eaf7e3b6678a66f9c3777737520
|
ffe512523341b570fcd4c4756008ca213ed126c4
|
/R/flow_evaluate.R
|
afedfde44de8b58676fd1866ddd04d3a54913f61
|
[] |
no_license
|
ahorawzy/TFTSA
|
a13e357d675a6bf43c41b091705b87daa5af4497
|
943c97373e384f5c0b175d2fd1d349be2fea927d
|
refs/heads/master
| 2020-03-18T17:21:22.691236
| 2019-04-23T10:43:57
| 2019-04-23T10:43:57
| 135,023,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 796
|
r
|
flow_evaluate.R
|
#' Evaluate the forecast result.
#'
#' This function evaluates the forecast result in several evaluation criterions.
#'
#' @param rv real value of traffic flow, a row from a dataframe;
#' @param pv forecast value, can be the return value of flow_knn function,
#' a row from a dataframe;
#' @param method can be "mse", "rmse", "mae" or "all"
#' @export
flow_evaluate <- function(rv,pv,method="all"){
n <- length(rv)
z <- rv-pv
mse <- 1/n*sum(z^2)
rmse <- sqrt(1/n*sum(z^2))
mae <- 1/n*sum(abs(z))
imse <- 1/n*sum(ifelse(z>0,1.5,0.5)*z^2)
if(method == "all"){
result <- data.frame(mse,rmse,mae,imse)
} else if(method == "mse"){
result <- mse
} else if(method == "rmse"){
result <- rmse
} else if(method == "mae"){
result <- mae
}
return(result)
}
|
fd7d5707e4359988e0a5b349d4d85c9128bc646f
|
782deb74bdc9affb2199cf41fa52827d81ac070f
|
/run_analysis.R
|
3a47589852f4328137c2b34cc70b696268503e7d
|
[] |
no_license
|
rkeijser/Getting-and-Cleaning-Data
|
bc15daeec24db98f8f865f77b8f26a2e0a4daac7
|
5c69b450961d140653e8f145be69d3e574f094af
|
refs/heads/master
| 2021-01-10T05:13:03.187582
| 2015-09-26T12:42:25
| 2015-09-26T12:42:25
| 43,201,137
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,957
|
r
|
run_analysis.R
|
install.packages("dplyr")
library("dplyr")
# WORKING DIRECTORY IS THE MAIN FOLDER OF THE UCI HAR Dataset
# IMPORT VARIABLES OF EACH FEATURE VECTOR
features <- read.table(file="features.txt", header=FALSE, dec=".")
# IMPORT ACTIVITY LABELS
activity_labels <- read.table(file="activity_labels.txt", header=FALSE, dec=".")
# IMPORT TRAINING DATA SET
subject_id_train <- read.table(file="train/subject_train.txt", header=FALSE, dec=".")
X_train <- read.table(file="train/X_train.txt", header=FALSE, dec=".")
y_train <- read.table(file="train/y_train.txt", header=FALSE, dec=".")
# ADD SUBJECT AND TRAINING LABEL TO DATASET X_train
colnames(X_train) <- as.factor(features[,2])
X_train$subject <- as.numeric(subject_id_train[,1])
X_train$training.label <- as.numeric(y_train[,1])
# IMPORT TEST DATA SET
subject_id_test <- read.table(file="test/subject_test.txt", header=FALSE, dec=".")
X_test <- read.table(file="test/X_test.txt", header=FALSE, dec=".")
y_test <- read.table(file="test/y_test.txt", header=FALSE, dec=".")
# ADD SUBJECT AND TRAINING LABEL TO DATASET X_train
colnames(X_test) <- as.factor(features[,2])
X_test$subject <- as.numeric(subject_id_test[,1])
X_test <- as.numeric(y_test[,1])
# MERGE TRAINING AND TEST DATA SETS
DATASET <- rbind(X_train, X_test)
# SUBSET MERGED DATASET ON MEAN AND STD OF EACH MEASUREMENT
meanStdColumns <- grep("mean|std", features$V2, value = FALSE)
meanStdColumnsNames <- grep("mean|std", features$V2, value = TRUE)
DATASET.SUBSET <- DATASET[,c(meanStdColumns)]
DATASET.SUBSET$subject <- DATASET$subject
DATASET.SUBSET$training.label <- DATASET$training.label
TIDY.DATASET <- DATASET.SUBSET
# DESCRIPTIVE VARIABEL NAMES AND CREATE FINAL TIDY DATASET
names(TIDY.DATASET) <- tolower(names(TIDY.DATASET))
names(TIDY.DATASET) <- gsub("\\)","", names(TIDY.DATASET),)
names(TIDY.DATASET) <- gsub("\\(","", names(TIDY.DATASET),)
names(TIDY.DATASET) <- gsub("-",".", names(TIDY.DATASET),)
names(TIDY.DATASET) <- gsub("_",".", names(TIDY.DATASET),)
TIDY.DATASET$training.label <- sub("1", activity_labels[1,2], TIDY.DATASET$training.label)
TIDY.DATASET$training.label <- sub("2", activity_labels[2,2], TIDY.DATASET$training.label)
TIDY.DATASET$training.label <- sub("3", activity_labels[3,2], TIDY.DATASET$training.label)
TIDY.DATASET$training.label <- sub("4", activity_labels[4,2], TIDY.DATASET$training.label)
TIDY.DATASET$training.label <- sub("5", activity_labels[5,2], TIDY.DATASET$training.label)
TIDY.DATASET$training.label <- sub("6", activity_labels[6,2], TIDY.DATASET$training.label)
write.table(TIDY.DATASET, file = "TIDY.DATASET.txt", row.name=FALSE, sep="\t")
# SECOND INDEPENDANT DATESET WITH AVERAGE OF EACH VARIABLE FOR EACH ACTIVITY AND EACH SUBJECT
TIDY.DATASET2 <- TIDY.DATASET %>% group_by(subject, training.label) %>% summarise_each(funs(mean))
write.table(TIDY.DATASET2, file = "TIDY.DATASET2.txt", row.name=FALSE, sep="\t")
|
87bc28f325b51d8d31582b35cc8ae3e943604b50
|
66317f3e1ba137b5a16e339e358350587cc8ad85
|
/tests/testthat/test_calc_egfr_cystatin.R
|
07e57a75f8c8652c4230902c02c99212d69dcdae
|
[] |
no_license
|
cran/clinPK
|
2dd3c8f90cc711186003a47a229c2153f272be8f
|
935e5cd7f2e0877814a1ce7347da48fc0ffda0a6
|
refs/heads/master
| 2022-05-16T16:11:20.119203
| 2022-05-09T07:10:05
| 2022-05-09T07:10:05
| 94,758,508
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 462
|
r
|
test_calc_egfr_cystatin.R
|
test_that("calculate egfr based on cystatin works", {
expect_equal(
round(calc_egfr_cystatin(1.3, unit_out = "ml/min")$value, 1),
54.1
)
expect_equal(
round(calc_egfr_cystatin(1.3, unit_out = "l/hr")$value, 2), 3.24
)
expect_equal(
round(calc_egfr_cystatin(1.3, unit_out = "ml/hr")$value, 1), 3244.1
)
expect_equal(
round(
calc_egfr_cystatin(1.3, unit_out = "ml/min", method = "larsson")$value,
1
), 55.5
)
})
|
6d2a29ed78f54b8450f3514e1f607df6668bc1cd
|
a3af6d05aa75a1508e38d3be453f0495758e39be
|
/R/zchunk_batch_Fert_USA_xml.R
|
569030fdcc982eb5e9d12c161401b97e164b35f0
|
[] |
no_license
|
shaohuizhang/gcamdata
|
541be0be6f4751cc0fc6be635db12ebe443431e2
|
d4cbefc16b42844bd92b1f5cf671089643c3eed4
|
refs/heads/master
| 2020-03-12T06:01:40.880740
| 2018-04-20T12:20:16
| 2018-04-20T12:20:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,558
|
r
|
zchunk_batch_Fert_USA_xml.R
|
#' module_gcamusa_batch_Fert_USA_xml
#'
#' Construct XML data structure for \code{Fert_USA.xml}.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{Fert_USA.xml}. The corresponding file in the
#' original data system was \code{batch_Fert_USA_xml.R} (gcamusa XML).
module_gcamusa_batch_Fert_USA_xml <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c("L2322.DeleteSubsector_USAFert",
"L2322.FinalEnergyKeyword_USAFert",
"L2322.SubsectorLogit_USAFert",
"L2322.SubsectorShrwtFllt_USAFert",
"L2322.SubsectorInterp_USAFert",
"L2322.TechShrwt_USAFert",
"L2322.Production_USAFert",
"L2322.TechCoef_USAFert",
"L2322.StubTechProd_Fert_USA",
"L2322.StubTechCoef_Fert_USA",
"L2322.StubTechMarket_Fert_USA"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c(XML = "Fert_USA.xml"))
} else if(command == driver.MAKE) {
all_data <- list(...)[[1]]
# Load required inputs
L2322.DeleteSubsector_USAFert <- get_data(all_data, "L2322.DeleteSubsector_USAFert")
L2322.FinalEnergyKeyword_USAFert <- get_data(all_data, "L2322.FinalEnergyKeyword_USAFert")
L2322.SubsectorLogit_USAFert <- get_data(all_data, "L2322.SubsectorLogit_USAFert")
L2322.SubsectorShrwtFllt_USAFert <- get_data(all_data, "L2322.SubsectorShrwtFllt_USAFert")
L2322.SubsectorInterp_USAFert <- get_data(all_data, "L2322.SubsectorInterp_USAFert")
L2322.TechShrwt_USAFert <- get_data(all_data, "L2322.TechShrwt_USAFert")
L2322.Production_USAFert <- get_data(all_data, "L2322.Production_USAFert")
L2322.TechCoef_USAFert <- get_data(all_data, "L2322.TechCoef_USAFert")
L2322.StubTechProd_Fert_USA <- get_data(all_data, "L2322.StubTechProd_Fert_USA")
L2322.StubTechCoef_Fert_USA <- get_data(all_data, "L2322.StubTechCoef_Fert_USA")
L2322.StubTechMarket_Fert_USA <- get_data(all_data, "L2322.StubTechMarket_Fert_USA")
# ===================================================
# Produce outputs
create_xml("Fert_USA.xml") %>%
add_xml_data(L2322.DeleteSubsector_USAFert,"DeleteSubsector") %>%
add_xml_data(L2322.FinalEnergyKeyword_USAFert,"FinalEnergyKeyword") %>%
add_logit_tables_xml(L2322.SubsectorLogit_USAFert,"SubsectorLogit") %>%
add_xml_data(L2322.SubsectorShrwtFllt_USAFert,"SubsectorShrwtFllt") %>%
add_xml_data(L2322.SubsectorInterp_USAFert,"SubsectorInterp") %>%
add_xml_data(L2322.TechShrwt_USAFert,"TechShrwt") %>%
add_xml_data(L2322.Production_USAFert,"Production") %>%
add_xml_data(L2322.TechCoef_USAFert,"TechCoef") %>%
add_xml_data(L2322.StubTechProd_Fert_USA,"StubTechProd") %>%
add_xml_data(L2322.StubTechCoef_Fert_USA,"StubTechCoef") %>%
add_xml_data(L2322.StubTechMarket_Fert_USA,"StubTechMarket") %>%
add_precursors("L2322.DeleteSubsector_USAFert", "L2322.FinalEnergyKeyword_USAFert", "L2322.SubsectorLogit_USAFert", "L2322.SubsectorShrwtFllt_USAFert", "L2322.SubsectorInterp_USAFert", "L2322.TechShrwt_USAFert", "L2322.Production_USAFert", "L2322.TechCoef_USAFert", "L2322.StubTechProd_Fert_USA", "L2322.StubTechCoef_Fert_USA", "L2322.StubTechMarket_Fert_USA") ->
Fert_USA.xml
return_data(Fert_USA.xml)
} else {
stop("Unknown command")
}
}
|
e7085217d18e90df407eecde56c78b95bd0e5478
|
0c87569d74db9640f11e9bb98742606716bfc856
|
/man/get_tx_seq.Rd
|
e86bd42f36b6c80fe2d2ab91ebe5380e46c9d744
|
[
"MIT"
] |
permissive
|
zhiyhu/masonmd
|
442b632a1ea45f49f2d033981ab7a8da11c3be4d
|
c94fd8f41317c11af929b44602c491497babdb30
|
refs/heads/master
| 2021-10-22T11:28:39.425745
| 2019-03-10T10:20:19
| 2019-03-10T10:20:19
| 78,127,906
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 855
|
rd
|
get_tx_seq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/masonmd.R
\name{get_tx_seq}
\alias{get_tx_seq}
\title{Get coding sequence of a transcript}
\usage{
get_tx_seq(tx_pos, ncbi_build)
}
\arguments{
\item{tx_pos}{The positional information of transcipt from TxDB}
\item{ncbi_build}{NCBI_build}
}
\value{
A list containing two entries:
\itemize{
\item{\code{wt_pos}} {numeric string; the absolute positions on chromosomes for each nucleotide on the coding sequence}
\item{\code{exonJunction}} {numeric string with 0 and 1; 1 represents the position of junctions}
}
}
\description{
Get positions and junction for the coding sequence of a transcript
}
\details{
Get the absolute positions on chromosomes for each nucleotide on the coding sequence for
a given transcript; and indicate the exon-exon junctions in the coding sequence
}
|
ad069602b60f340bd34d85be7fc66a90d07d394f
|
5ae76660e0da98ca8d6633b0bec8cbca9298cab2
|
/import/bcs_offense_codes/import
|
12353a133bf7d0dd8b4b77885117bb18e1d53bae
|
[] |
no_license
|
vdorie/dojr
|
5958ceeb54043a5fe65ea7a18557a3d30f3889d3
|
f8c191fbbc24b2ad449caf178b2277b44cc02ecf
|
refs/heads/master
| 2021-01-10T23:50:23.816061
| 2018-02-09T21:21:13
| 2018-02-09T21:21:13
| 70,095,726
| 0
| 1
| null | 2017-01-26T19:22:07
| 2016-10-05T20:15:15
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,057
|
import
|
#!/usr/bin/env Rscript
offenseCodes <- read.csv(file.path("input", "combined-Table 1.csv"), stringsAsFactors = FALSE)
keepCols <- c("offense_code", "summary_code", "summary_offense_type", "offense_category")
uniqueOffenseCodes <- unique(offenseCodes[,keepCols])
uniqueOffenseCodes$new_2013 <- NA
for (i in seq_len(nrow(uniqueOffenseCodes))) {
matchingRows <- which(offenseCodes$offense_code == uniqueOffenseCodes$offense_code[i] &
offenseCodes$summary_code == uniqueOffenseCodes$summary_code[i] &
offenseCodes$summary_offense_type == uniqueOffenseCodes$summary_offense_type[i] &
offenseCodes$offense_category == uniqueOffenseCodes$offense_category[i])
if (length(matchingRows) == 1L) {
uniqueOffenseCodes$new_2013[i] <- offenseCodes$before_2013[matchingRows]
} else {
uniqueOffenseCodes$new_2013[i] <- 0
}
}
dataPath <- file.path("..", "..", "common", "data")
write.csv(uniqueOffenseCodes, file = file.path(dataPath, "bcs_offense_codes.csv"), row.names = FALSE)
|
|
4e0eb1ae8fb61374106860de669af690d396f02c
|
9e0dfef5931245047a0f85024c935d890d032b8b
|
/analysis_script.R
|
28b2f0fcd1087cdaf76cd8d2b5209e65882f9802
|
[] |
no_license
|
caitlinhudon/underrated_ds_skills
|
294af0c39565e5f470d1e82dfc304d755ab8646f
|
45799e5586515d17d061134094d48f6cb6913552
|
refs/heads/master
| 2021-05-08T20:11:18.921942
| 2018-02-04T16:19:51
| 2018-02-04T16:19:51
| 119,598,019
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,523
|
r
|
analysis_script.R
|
library(tidyverse)
library(rtweet)
library(ggraph)
library(igraph)
library(ggiraph)
library(tidytext)
library(wordcloud2)
library(widyr)
tweets <- search_tweets(q = "@beeonaposy OR to:beeonaposy OR beeonaposy",
sinceId = 958062360811405313,
n = 500,
include_rts = FALSE)
tweets <- tweets %>%
distinct()
id <- c("958062360811405313", "958062683118624768")
diff <- 1
while (diff != 0) {
id_next <- tweets %>%
filter(in_reply_to_status_status_id %in% id) %>%
pull(status_id)
id_new <- unique(c(id, id_next))
diff <- length(id_new) - length(id)
id <- id_new
}
all_replies <- tweets %>%
filter(in_reply_to_status_status_id %in% id)
################################################################################
drop_pattern <- "https://t.co/[A-Za-z\\d]+|http://[A-Za-z\\d]+|&|<|>|RT|https"
#unnest_pattern <- "([^A-Za-z_\\d#@']|'(?![A-Za-z_\\d#@]))"
handle_list <- c("drewconway", "joelgrus", "timclicks", "aaronslowey",
"becomingdatasci", "timsteeno")
numbers <- c(1, 2, 3, 4, 5)
words <- all_replies %>%
mutate(text = stringr::str_replace_all(text, drop_pattern, "")) %>%
unnest_tokens(word,
text,
token = "ngrams",
n = 1) %>%
anti_join(stop_words) %>%
filter(!(word %in% screen_name),
!(word %in% handle_list),
!(word %in% numbers))
# use a widyr function to count when words co-occur
# to remove duplicates, use upper = FALSE
word_pairs <- words %>%
pairwise_count(word, status_id, upper = FALSE)
pairs <- word_pairs %>%
mutate(token = paste(item1, item2)) %>%
select(token, n)
agg <- words %>%
rename(token = word) %>%
count(token, sort = TRUE)
combined <- rbind(agg, pairs) %>%
arrange(desc(n))
bigrams <- all_replies %>%
mutate(text = stringr::str_replace_all(text, drop_pattern, "")) %>%
unnest_tokens(bigram, text, token = "ngrams", n = 2)
test <- grepl(paste(stop_words$word,collapse="|"),
bigrams$bigram)
test <- subset(bigrams, grepl(paste(stop_words$word,collapse="|"),
bigrams$bigram))
bi_agg <- bigrams %>%
group_by(bigram) %>%
summarise(n = n()) %>%
filter(!(bigram ))
arrange(desc(n))
trigrams <- all_replies %>%
mutate(text = stringr::str_replace_all(text, drop_pattern, "")) %>%
unnest_tokens(trigram, text, token = "ngrams", n = 3)
tri_agg <- trigrams %>%
group_by(trigram) %>%
summarise(n = n()) %>%
arrange(desc(n))
|
fd4a98712e8abe785a6b4a257b99ce48ddab646b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sqldf/examples/read.csv.sql.Rd.R
|
a473566a7842600d9107753c8b3b492747f1fc23
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 418
|
r
|
read.csv.sql.Rd.R
|
library(sqldf)
### Name: read.csv.sql
### Title: Read File Filtered by SQL
### Aliases: read.csv.sql read.csv2.sql
### Keywords: manip
### ** Examples
## Not run:
##D # might need to specify eol= too depending on your system
##D write.csv(iris, "iris.csv", quote = FALSE, row.names = FALSE)
##D iris2 <- read.csv.sql("iris.csv",
##D sql = "select * from file where Species = 'setosa' ")
##D
## End(Not run)
|
a4ee40b982c210c0821daca4507fabee72b33687
|
0c348467079ca79fa71eee3434ad474cae1ed0fb
|
/man/anc.Rd
|
2479ae05bca87d8b27164e3d236a41cac2485aa7
|
[
"MIT"
] |
permissive
|
josue-rodriguez/pwr2ppl
|
34e4da1a354c45afaa28104676361d46c4f7dc05
|
92f5bb1e8d79d271c86066161e5a3274c91c9014
|
refs/heads/master
| 2020-03-18T20:25:42.122758
| 2018-05-31T04:43:19
| 2018-05-31T04:43:19
| 135,215,090
| 0
| 0
| null | 2018-05-28T22:32:50
| 2018-05-28T22:32:50
| null |
UTF-8
|
R
| false
| true
| 1,653
|
rd
|
anc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anc.R
\name{anc}
\alias{anc}
\title{Compute Power for One or Two Factor ANCOVA with a single covariate
Takes means, sds, and sample sizes for each group. Alpha is .05 by default, alterative values may be entered by user}
\usage{
anc(m1.1, m2.1, m1.2, m2.2, m1.3 = NULL, m2.3 = NULL, m1.4 = NULL,
m2.4 = NULL, s1.1, s2.1, s1.2, s2.2, s1.3 = NULL, s2.3 = NULL,
s1.4 = NULL, s2.4 = NULL, r, s = NULL, alpha = 0.05, factors, n)
}
\arguments{
\item{m1.1}{Cell mean for First level of Factor A, First level of Factor B}
\item{m2.1}{Cell mean for Second level of Factor A, First level of Factor B}
\item{m1.2}{Cell mean for First level of Factor A, Second level of Factor B}
\item{m2.2}{Cell mean for Second level of Factor A, Second level of Factor B}
\item{s1.1}{Cell standard deviation for First level of Factor A, First level of Factor B}
\item{s2.1}{Cell standard deviation for Second level of Factor A, First level of Factor B}
\item{s1.2}{Cell standard deviation for First level of Factor A, Second level of Factor B}
\item{s2.2}{Cell standard deviation for Second level of Factor A, Second level of Factor B}
\item{r}{Correlation between covariate and dependent variable.}
\item{alpha}{Type I error (default is .05)}
\item{n}{Sample Size per cell}
}
\value{
Power for One or Two Factor ANCOVA with a single covariate
}
\description{
Compute Power for One or Two Factor ANCOVA with a single covariate
Takes means, sds, and sample sizes for each group. Alpha is .05 by default, alterative values may be entered by user
}
|
3e80732e6478ac082d410ec45f40a21c346a84fc
|
668f049093767f87ff43fb90946bc9c10f70f248
|
/src/Escogiendo un Hogar/usage.R
|
28637202745f1ef5c53a77a72ad39b47ff9881d6
|
[] |
no_license
|
ericbellet/AprendizajeSupervisado
|
12b9b3df056830c03b67ecd5272a9b8cdb98be2c
|
9a036d412d477712439d6029de57110b54daf866
|
refs/heads/master
| 2021-01-21T05:43:44.656678
| 2016-03-12T05:20:43
| 2016-03-12T05:20:43
| 53,052,520
| 0
| 0
| null | 2016-03-03T13:32:12
| 2016-03-03T13:32:12
| null |
ISO-8859-1
|
R
| false
| false
| 21,663
|
r
|
usage.R
|
install = function(pkg)
{
# Si ya está instalado, no lo instala.
if (!require(pkg, character.only = TRUE)) {
install.packages(pkg)
if (!require(pkg, character.only = TRUE)) stop(paste("load failure:", pkg))
}
}
#Instalo automaticamente los paquetes.
install('readxl')
#install('dplyr')
install('jsonlite')
install('pROC')
install('FactoMineR')
install('pscl')
install('MASS')
library(readxl)
#library(dplyr)
library(jsonlite)
library(pROC)
library(FactoMineR)
library(pscl)
library(MASS)
#--------------------------------------API----------------------------------------
# Seleccionar google_api.R en su sistema de archivos
source(file.choose())
#source("C:/Users/EricBellet/Desktop/AprendizajeSupervisado/src/Escogiendo un Hogar/google_api.R")
df <- read_excel("C:/Users/EricBellet/Desktop/AprendizajeSupervisado/data/hogares.xlsx")
df <- na.omit(df)
df$Foto <- NULL
#Inicializamos dataframe y vectores.
dataframe <- data.frame()
Distancia <- vector()
Minutos <- vector()
ori <- vector()
destino =c("Piazzale Aldo Moro")
origen =c("Via Gatteschi")
#Realizo un ciclo para hacer peticiones al API dirección por dirección.
for (origen in df$Dirección){
#Elimino los \n
origen <- strsplit(as.character(origen), "\n")
#Utilizo mi API key
api_key = "AIzaSyD04qKCMM18-iQzY6QK1MSmmii_aVhqUPE"
api_url = get_url(origen, destino, api_key)
datos = get_data(api_url)
#Parseo los datos obtenidos.
timedistancia = parse_data(datos)
#Concateno las distancia y el tiempo que arroja el API.
Distancia <- c(Distancia,timedistancia[1])
Minutos <- c(Minutos,timedistancia[2])
ori <- c(ori, origen)
Distancia <- cbind(Distancia)
Minutos <- cbind(Minutos)
#Guardo todos los datos parseados en un dataframe.
dataframe <- cbind(ori,Distancia,Minutos)
}#endfor
#--------------------------------------FIN API----------------------------------------
#--------------------------------------MINUTOS----------------------------------------
dataframe <- as.data.frame(dataframe)
#Tranformamos todos los tiempos a minutos
enHoras <- grepl("h",dataframe$Minutos)
for (i in 1:length(enHoras)){
if (enHoras[i] == TRUE){
num <- as.numeric(unlist(strsplit(unlist(dataframe$Minutos[i]), "[^0-9]+")))
dataframe$Minutos[i] <- (num[1]*60) + num[2]
}else{
num <- as.numeric(unlist(strsplit(unlist(dataframe$Minutos[i]), "[^0-9]+")))
dataframe$Minutos[i] <- num[1]
}#endif
}#endfor
#Agrego las columnas distancia y tiempo al dataframe que se utilizara para la regresion lineal.
df$Distancia <- dataframe$Distancia
df$Minutos <- dataframe$Minutos
#Elimino las filas cuya direccion el API no encontro.
df <- df[!df$Minutos == "NA", ]
#Asigno un valor de importancia a los tiempos
df$Minutos <- as.numeric(df$Minutos)
df["ValorMinutos"] <- as.factor(ordered(cut(df$Minutos, c(-Inf,15,60,120,180,240,300,360,420,600,Inf)),labels = c("160", "100", "90", "60","50","20","10","5","1","0")))
#Realizo una transformacion a la columna valor minutos para poder utilizarla.
for (i in 1:nrow(df)){
df$ValorMinutos2[i] <- as.numeric(as.character(df["ValorMinutos"][[1]][i]))
}
#--------------------------------------FIN MINUTOS----------------------------------------
#--------------------------------------HABITACIONES----------------------------------------
#AGREGO FILAS POR CADA HABITACION DISPONIBLE CON SU CORRESPONDIENTE PRECIO.
df$`Habitaciones Disponibles`
df$Disponibles[grepl("1 singola", df$`Habitaciones Disponibles`)] <- "1"
df$Disponibles[grepl("1 singole", df$`Habitaciones Disponibles`)] <- "1"
df$Disponibles[grepl("1 Singola", df$`Habitaciones Disponibles`)] <- "1"
df$Disponibles[grepl("1 Singola", df$`Habitaciones Disponibles`)] <- "1"
df$Disponibles[grepl("Intero Appartamento", df$`Habitaciones Disponibles`)] <- "1"
df$Disponibles[grepl("Intero appartamento", df$`Habitaciones Disponibles`)] <- "1"
df$Disponibles[grepl("Mini Appartamento", df$`Habitaciones Disponibles`)] <- "1"
df$Disponibles[grepl("intero appartamento", df$`Habitaciones Disponibles`)] <- "1"
df$Disponibles[grepl("Mini Appartamento", df$`Habitaciones Disponibles`)] <- "1"
df$Disponibles[grepl("2 singola", df$`Habitaciones Disponibles`)] <- "2"
df$Disponibles[grepl("2 singole", df$`Habitaciones Disponibles`)] <- "2"
df$Disponibles[grepl("2 Singola", df$`Habitaciones Disponibles`)] <- "2"
df$Disponibles[grepl("2 Singola", df$`Habitaciones Disponibles`)] <- "2"
df$Disponibles[grepl("3 singola", df$`Habitaciones Disponibles`)] <- "3"
df$Disponibles[grepl("3 singole", df$`Habitaciones Disponibles`)] <- "3"
df$Disponibles[grepl("3 Singola", df$`Habitaciones Disponibles`)] <- "3"
df$Disponibles[grepl("3 Singola", df$`Habitaciones Disponibles`)] <- "3"
df$Disponibles[grepl("4 singola", df$`Habitaciones Disponibles`)] <- "4"
df$Disponibles[grepl("4 singole", df$`Habitaciones Disponibles`)] <- "4"
df$Disponibles[grepl("4 Singola", df$`Habitaciones Disponibles`)] <- "4"
df$Disponibles[grepl("4 Singola", df$`Habitaciones Disponibles`)] <- "4"
df$Disponibles[is.na(df$Disponibles)] <- 1
#Replico las filas que posee mas de una habitacion disponible.
df <- df[rep(seq_len(nrow(df)), df$Disponibles),]
#Asigno precio a cada habitacion.
i <- 1
while (i != (nrow(df)+1)) {
array <- na.omit(as.numeric(unlist(strsplit(unlist(df$`Precio Mensual`[i]),
"[^0-9]+"))))
if (df$Disponibles[i] == 1){
df$PrecioTotal[i] <- array[1]
i <- i + 1
}else{
for (j in 1:length(array)) {
df$PrecioTotal[i + (j-1)] <- array[j]
}
i <- i + as.numeric(df$Disponibles[i])
}
}#endwhile
#--------------------------------------FIN HABITACIONES----------------------------------------
#--------------------------------------PRECIO MENSUAL----------------------------------------
#Creo una columna donde coloco el valor si esta todo incluido o no.
df$TuttoIncluido[grepl("TUTTO INCLUSO", df$`Precio Mensual`)] <- 100
df$TuttoIncluido[grepl("Tutto incluso", df$`Precio Mensual`)] <- 100
df$TuttoIncluido[grepl("tutto incluso", df$`Precio Mensual`)] <- 100
df$TuttoIncluido[is.na(df$TuttoIncluido)] <- 0
#Columna a utilizar para la regresion lineal donde 1 es si tiene todo incluido y 0 es no
df$TodoIncluido[grepl("TUTTO INCLUSO", df$`Precio Mensual`)] <- 1
df$TodoIncluido[grepl("Tutto incluso", df$`Precio Mensual`)] <- 1
df$TodoIncluido[grepl("tutto incluso", df$`Precio Mensual`)] <- 1
df$TodoIncluido[is.na(df$TodoIncluido)] <- 0
#--------------------------------------FIN PRECIO MENSUAL----------------------------------------
#--------------------------------------DESCRIPCION----------------------------------------
#Etiqueto la columna descripcion donde separo por coma y e (solo el conector).
separador <- function(x)
splat <- unlist(strsplit(x, ", | e "))
df$Descripción2 <- lapply(df$Descripción, separador)
x <- vector()
#Genero columnas que utilizare en la regresion lineal
#Descripcion3 es una columna que genera valor segun los componente de la habitacion.
df$Descripción3 <- 0
df$Pasillo <- 0
df$Cocina <- 0
df$Cuarto <- 0
df$Bagno <- 0
df$Balcon <- 0
df$Comedor <- 0
df$Armario <- 0
df$Salon <- 0
#Obtengo cuantas y cuales habitaciones tiene el apartamento y genero valor.
for (i in 1:nrow(df)) {
for (j in 1:length(unlist(df$Descripción2[i]))) {
x[1] <- as.numeric(unlist(strsplit(unlist(df$Descripción2[i])[j],
"[^0-9]+")))
if (is.na(x) == TRUE){
x[1] <- 1
}
if (grepl("Ingresso", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (5 * x[1])
df$Pasillo[i] <- df$Pasillo[i] +x[1]
}
if (grepl("ingresso", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (5 * x[1])
df$Pasillo[i] <- df$Pasillo[i] +x[1]
}
if (grepl("cucina", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (20 * x[1])
df$Cocina[i] <- df$Cocina[i] +x[1]
}
if (grepl("angolo cottura", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (20 * x[1])
df$Cocina[i] <- df$Cocina[i] +x[1]
}
if (grepl("stanze", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (10 * x[1])
df$Cuarto[i] <- df$Cuarto[i] +x[1]
}
if (grepl("camere", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (10 * x[1])
df$Cuarto[i] <- df$Cuarto[i] +x[1]
}
if (grepl("camera", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (10 * x[1])
df$Cuarto[i] <- df$Cuarto[i] +x[1]
}
if (grepl("bagni", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (15 * x[1])
df$Bagno[i] <- df$Bagno[i] +x[1]
}
if (grepl("bagno", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (15 * x[1])
df$Bagno[i] <- df$Bagno[i] +x[1]
}
if (grepl("disimpegno", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (5 * x[1])
}
if (grepl("balcone", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (5 * x[1])
df$Balcon[i] <- df$Balcon[i] +x[1]
}
if (grepl("ampiio terrazzo", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (5 * x[1])
df$Balcon[i] <- df$Balcon[i] +x[1]
}
if (grepl("sala da pranzo", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (30 * x[1])
df$Comedor[i] <- df$Comedor[i] +x[1]
}
if (grepl("doppio soggiorno", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (20 * x[1])
df$Salon[i] <- df$Salon[i] +x[1]
}
if (grepl("salotto", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (20 * x[1])
df$Salon[i] <- df$Salon[i] +x[1]
}
if (grepl("armario", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (5 * x[1])
df$Armario[i] <- df$Armario[i] +x[1]
}
if (grepl("ripostiglio", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (5 * x[1])
df$Armario[i] <- df$Armario[i] +x[1]
}
if (grepl("corridoio", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (5 * x[1])
df$Pasillo[i] <- df$Pasillo[i] +x[1]
}
if (grepl("Appartamento su due livelli", unlist(df$Descripción2[i])[j]) == TRUE){
df$Descripción3[i] <- df$Descripción3[i] + (50 * x[1])
}
}#endfor
}#endfor
#MUESTRA UN WARNING QUE EN REALIDAD NO CAUSA NINGUN PROBLEMA
#--------------------------------------FIN DESCRIPCION----------------------------------------
#Categorizo los tipos de habitacion.
df$TipoHabitacion[grepl("Intero Appartamento", df$`Habitaciones Disponibles`)] <- "1"
df$TipoHabitacion[grepl("Intero appartamento", df$`Habitaciones Disponibles`)] <- "1"
df$TipoHabitacion[grepl("intero appartamento", df$`Habitaciones Disponibles`)] <- "1"
df$TipoHabitacion[grepl("monolocale", df$`Habitaciones Disponibles`)] <- "2"
df$TipoHabitacion[grepl("Mini Appartamento", df$`Habitaciones Disponibles`)] <- "2"
df$TipoHabitacion[grepl("posto letto", df$`Habitaciones Disponibles`)] <- "4"
df$TipoHabitacion[grepl("doppia", df$`Habitaciones Disponibles`)] <- "5"
df$TipoHabitacion[grepl("doppie", df$`Habitaciones Disponibles`)] <- "5"
df$TipoHabitacion[grepl("singola", df$`Habitaciones Disponibles`)] <- "3"
df$TipoHabitacion[grepl("singole", df$`Habitaciones Disponibles`)] <- "3"
df$TipoHabitacion[grepl("Singola", df$`Habitaciones Disponibles`)] <- "3"
df$TipoHabitacion[grepl("Singole", df$`Habitaciones Disponibles`)] <- "3"
#-------------------Cual es la mejor habitacion-------------------------
#df$Disponibles <- NULL
#df$Descripción2 <- NULL
df$valorInmobiliario <- df$Descripción3
#df$Descripción3 <- NULL
#El valor de un apto esta relacionado con el tiempo, cuantas habitaciones posee
# y si el precio tiene todo incluido
df$ValorTotal <- df$ValorMinutos2 + df$valorInmobiliario + df$TuttoIncluido
df$ValorMinutos <- NULL
#df$ValorMinutos2 <- NULL
#df$TuttoIncluido <- NULL
#df$valorInmobiliario <- NULL
#Escalo o estandarizo el valor entre 1 y 10.
range01 <- function(x){((x-min(x))/(max(x)-min(x)))*10}
df$ValorTotal <- range01(df$ValorTotal)
df$TipoHabitacion <- as.numeric(df$TipoHabitacion)
df$Disponibles <- as.numeric(df$Disponibles)
df$TipoHabitacion <- as.numeric(df$TipoHabitacion)
#DIVIDO EL DATAFRAME EN DOS, MUJERES Y HOMBRES.
df$Sexo <- df$Notas
df$Sexo[grepl("ragazzi/e", df$Sexo)] <- "Ambos"
df$Sexo[grepl("ragazze/i", df$Sexo)] <- "Ambos"
df$Sexo[grepl("ragazzi/ragazze", df$Sexo)] <- "Ambos"
df$Sexo[grepl("ragazze", df$Sexo)] <- "Femenino"
df$Sexo[grepl("ragazzi", df$Sexo)] <- "Masculino"
df$Sexo[!grepl("Ambos", df$Sexo) & !grepl("Masculino",
df$Sexo) & !grepl("Femenino", df$Sexo)] <- "Ambos"
dfM <- df[df$Sexo == 'Masculino' | df$Sexo == 'Ambos',]
dfF <- df[df$Sexo == 'Femenino' | df$Sexo == 'Ambos',]
#----------------------------------------------------------
dfM$Sexo <- NULL
dfF$Sexo <- NULL
dfM$Notas <- NULL
dfF$Notas <- NULL
#************************************************************************************
#--------------------------SAMPLING PARA MASCULINO----------------------------------
#************************************************************************************
#Obtengo los valore unicos de mIngreso para los hombres.
valores <- unique(dfM$PrecioTotal)
totalvalores <- nrow(dfM)
probabilidad <- vector()
#Calculo la probabilidad de cada valor de mIngreso.
for (i in 1:length(valores)){
probabilidad <- c(probabilidad, sum(dfM$PrecioTotal == valores[i]) / totalvalores)
}
asignarProb <- function(x){
for (i in 1:length(valores)) {
if (valores[i] == x){
return(probabilidad[i])
}
}
}
#Obtengo un vector de probabilidades para cada valor de PrecioTotal
probabilidades <- lapply(dfM$PrecioTotal, asignarProb)
probabilidades<-unlist(probabilidades)
#**********************************************************************************
#----------Genero un train y test data estratificado para hombres------------------
#**********************************************************************************
set.seed(1)
sets <- sample(nrow(dfM), nrow(dfM)*0.8, prob=probabilidades, replace=F)
trainingM <- dfM[sets,]
testingM <- dfM[-sets,]
#************************************************************************************
#--------------------------SAMPLING PARA FEMENINO----------------------------------
#************************************************************************************
#Obtengo los valore unicos de mIngreso para mujeres
valores <- unique(dfF$PrecioTotal)
totalvalores <- nrow(dfF)
probabilidad <- vector()
#Calculo la probabilidad de cada valor de mIngreso.
for (i in 1:length(valores)){
probabilidad <- c(probabilidad, sum(dfF$PrecioTotal == valores[i]) / totalvalores)
}
asignarProb <- function(x){
for (i in 1:length(valores)) {
if (valores[i] == x){
return(probabilidad[i])
}
}
}
#Obtengo un vector de probabilidades para cada valor de PrecioTotal
probabilidades <- lapply(dfF$PrecioTotal, asignarProb)
probabilidades<-unlist(probabilidades)
#**********************************************************************************
#----------Genero un train y test data estratificado para mujeres------------------
#**********************************************************************************
set.seed(1)
sets <- sample(nrow(dfF), nrow(dfF)*0.8, prob=probabilidades, replace=F)
trainingF <- dfF[sets,]
testingF <- dfF[-sets,]
#---------------------------------------------------------------------------
#************************************************************************************
#----------------------ANALISIS EXPLORATORIO DE LOS DATOS----------------------------
#************************************************************************************
#Selecciono las variables que puedo utilizar para aplicar regresion lineal
#para realizar un analisis exploratorio de los datos
Femenino <- select(dfF, Minutos, TodoIncluido, TipoHabitacion, Pasillo, Cocina, Cuarto, Bagno, Balcon, Comedor, Armario,
Salon, PrecioTotal, ValorTotal)
PCA <- PCA(Femenino)
plot(PCA, choix = "var")
Masculino <- select(dfM, Minutos, TodoIncluido, TipoHabitacion, Pasillo, Cocina,Cuarto, Bagno, Balcon, Comedor, Armario,
Salon, PrecioTotal, ValorTotal)
PCA <- PCA(Masculino)
plot(PCA, choix = "var")
#OBTENGO LA MEJOR HABITACION PARA HOMBRE.
dfM <- dfM[order(dfM$PrecioTotal) , ]
medianaMM <- median(dfM$PrecioTotal)
mejoresHabitacionesM <- subset(dfM , PrecioTotal < medianaMM)
mejorHabitacionesM <- mejoresHabitacionesM[which(mejoresHabitacionesM$ValorTotal == max(mejoresHabitacionesM$ValorTotal)), ]
print(mejorHabitacionesM$Distrito)
print(mejorHabitacionesM$Dirección)
print(mejorHabitacionesM$Descripción)
print(mejorHabitacionesM$PrecioTotal)
print(mejorHabitacionesM$ValorTotal)
#----------------------------------------------------------------
#OBTENGO LA MEJOR HABITACION PARA LAS MUJERES.
dfF <- dfF[order(dfF$PrecioTotal) , ]
medianaMF <- median(dfF$PrecioTotal)
mejoresHabitacionesF <- subset(dfF , PrecioTotal < medianaMF)
mejorHabitacionesF <- mejoresHabitacionesF[which(mejoresHabitacionesF$ValorTotal == max(mejoresHabitacionesF$ValorTotal)), ]
print(mejorHabitacionesF$Distrito)
print(mejorHabitacionesF$Dirección)
print(mejorHabitacionesF$Descripción)
print(mejorHabitacionesF$PrecioTotal)
print(mejorHabitacionesF$ValorTotal)
#----------------------------------------------------------------
#************************************************************************************
#--------------------------REGRESION LINEAL-----------------------------------------
#************************************************************************************
#Selección de variables
#Realice la selección de variables por pasos (forward, backward, both) utilizando la
#función stepAIC () del paquete MASS. stepAIC () realiza la selección del modelo paso
#a paso por la AIC exacta.
#------------------------------MASCULINO------------------------------------------
#Aplico regresion lineal con todas las variables
modeloM1 <- lm(PrecioTotal ~ Minutos + TodoIncluido + TipoHabitacion + Pasillo +
Cocina + Cuarto + Bagno + Balcon + Comedor + Armario + Salon +
ValorTotal,data = trainingM)
regresionM1 <- predict(modeloM1, newdata = testingM)
#Evaluo, y busco las mejores variables para predecir
step <- stepAIC(modeloM1, direction="both")
step$anova # display results
#En teoria las mejores variables para predecir son:
modeloM2 <- lm(PrecioTotal ~ Minutos + TodoIncluido + TipoHabitacion + Balcon,data = trainingM)
regresionM2 <- predict(modeloM2, newdata = testingM)
anova(modeloM1, modeloM2)
comparacionM <- cbind(regresionM2,testingM$PrecioTotal )
#-----------------------------FIN MASCULINO------------------------------------------
#------------------------------FEMENINO------------------------------------------
#Aplico regresion lineal con todas las variables
modeloF1 <- lm(PrecioTotal ~ Minutos + TodoIncluido + TipoHabitacion + Pasillo +
Cocina + Cuarto + Bagno + Balcon + Comedor + Armario + Salon +
ValorTotal,data = trainingF)
regresionF1 <- predict(modeloF1, newdata = testingF)
#Evaluo, y busco las mejores variables para predecir
step <- stepAIC(modeloF1, direction="both")
step$anova # display results
#En teoria las mejores variables para predecir son:
modeloF2 <- lm(PrecioTotal ~ Minutos + TodoIncluido + TipoHabitacion + Balcon,data = trainingF)
regresionF2 <- predict(modeloF2, newdata = testingF)
anova(modeloM1, modeloM2)
comparacionF <- cbind(regresionF2,testingF$PrecioTotal )
#-----------------------------FIN FEMENINO------------------------------------------
#utilizando regresion lineal para decidir el mejor apto.
#OBTENGO LA MEJOR HABITACION PARA HOMBRE.
hombre <- lm(PrecioTotal ~ Minutos + TodoIncluido + TipoHabitacion + Balcon,data = dfM)
hombre <- predict(hombre, newdata = dfM)
dfM$PrediccionPrecio <- cbind(hombre)
dfM <- dfM[order(dfM$PrecioTotal) , ]
medianaMM <- median(dfM$PrecioTotal)
mejoresHabitacionesM <- subset(dfM , PrecioTotal < medianaMM)
mejorHabitacionesM <- mejoresHabitacionesM[which(mejoresHabitacionesM$PrediccionPrecio == max(mejoresHabitacionesM$PrediccionPrecio)), ]
print(mejorHabitacionesM$Distrito)
print(mejorHabitacionesM$Dirección)
print(mejorHabitacionesM$Descripción)
print(mejorHabitacionesM$PrecioTotal)
print(mejorHabitacionesM$ValorTotal)
#OBTENGO LA MEJOR HABITACION PARA LAS MUJERES.
mujer <- lm(PrecioTotal ~ Minutos + TodoIncluido + TipoHabitacion + Balcon,data = dfF)
mujer <- predict(mujer, newdata = dfF)
dfF$PrediccionPrecio <- cbind(mujer)
dfF <- dfF[order(dfF$PrecioTotal) , ]
medianaMF <- median(dfF$PrecioTotal)
mejoresHabitacionesF <- subset(dfF , PrecioTotal < medianaMF)
mejorHabitacionesF <- mejoresHabitacionesF[which(mejoresHabitacionesF$PrediccionPrecio == max(mejoresHabitacionesF$PrediccionPrecio)), ]
print(mejorHabitacionesF$Distrito)
print(mejorHabitacionesF$Dirección)
print(mejorHabitacionesF$Descripción)
print(mejorHabitacionesF$PrecioTotal)
print(mejorHabitacionesF$ValorTotal)
|
2da79f5b3b6ad4c60b09ab64ae66a7511b10ea85
|
8527c725baba37bc75031f40093183462db7073f
|
/man/compare_tef_vuln.Rd
|
0aa6181d5a6f94f8435d1b4defb10ca7b7bcb9af
|
[
"MIT"
] |
permissive
|
davidski/evaluator
|
776aafae717d68b017e2db8ee3fef8fc98342e66
|
27d475bb06ecda7ba11feef3e219519f2d6ce404
|
refs/heads/main
| 2023-01-20T20:15:17.259065
| 2022-01-25T04:55:13
| 2022-01-25T04:55:19
| 57,260,994
| 142
| 47
|
NOASSERTION
| 2022-12-21T05:01:15
| 2016-04-28T01:36:16
|
R
|
UTF-8
|
R
| false
| true
| 1,083
|
rd
|
compare_tef_vuln.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/openfair.R
\name{compare_tef_vuln}
\alias{compare_tef_vuln}
\title{Calculate number of loss events which occur in a simulated period}
\usage{
compare_tef_vuln(tef, vuln, n = NULL)
}
\arguments{
\item{tef}{Threat event frequency (n).}
\item{vuln}{Vulnerability (percentage).}
\item{n}{Number of samples to generate.}
}
\value{
List containing samples (as a vector) and details (as a list).
}
\description{
Composition function for use in \code{\link{sample_lef}}. Given a count of
the number of threat events (TEF) and the level of vulnerability (as a
percentage), calculate how many of those become loss events (LEF).
}
\examples{
compare_tef_vuln(tef = 500, vuln = .25)
}
\seealso{
Other OpenFAIR helpers:
\code{\link{get_mean_control_strength}()},
\code{\link{openfair_tef_tc_diff_lm}()},
\code{\link{sample_diff}()},
\code{\link{sample_lef}()},
\code{\link{sample_lm}()},
\code{\link{sample_tc}()},
\code{\link{sample_vuln}()},
\code{\link{select_loss_opportunities}()}
}
\concept{OpenFAIR helpers}
|
40fb465ca8a6e5ac3b36da8f4cf77286c51a0a14
|
602a440feb2b51cfa46acbe455646b3275730b34
|
/plot4.R
|
642fbb8c8546af76ecf765797a5cc9047a240227
|
[] |
no_license
|
jyork13/ExData_Plotting1
|
3d11c0247405a482ae529c1d5f27023714c3b29d
|
4d43690040af1054cdd3734ef7559584c3d21cb3
|
refs/heads/master
| 2021-01-18T08:48:19.875254
| 2015-09-09T03:42:28
| 2015-09-09T03:42:28
| 42,132,656
| 0
| 0
| null | 2015-09-08T18:55:39
| 2015-09-08T18:55:38
| null |
UTF-8
|
R
| false
| false
| 5,765
|
r
|
plot4.R
|
########################################################################################################
#
# plot4.R v 0.0.2
#
# by Jim York 09/08/2015
#
# This is a project assignment for the following course:
#
# Coursera exdata-032: Exploratory Data Analysis (Johns Hopkins Bloomberg School of Public Health)
# by Roger D. Peng, PhD, Jeff Leek, PhD, Brian Caffo, PhD
#
# This program retrieves the "Individual household electric power consumption Data Set" zipped data file
# from the UC Irvine Machine Learning Repository and creates a 2 x 2 matrix of plots of various data
# plotted against the day of week for the Dates 02-01-2007 and 02-02-2007 and stores the line graph as a png file.
#
#
########################################################################################################
########################################################################################################
#
# 0) Load libraries, Retrieve dataset, unzip and read data.
#
########################################################################################################
# Load libraries ()
# packages function checks to see if package is installed and if not, installs and loads
# (got this from http://stackoverflow.com/questions/9341635/check-for-installed-packages-before-running-install-packages)
packages<-function(x){
x<-as.character(match.call()[[2]])
if (!require(x,character.only=TRUE)){
install.packages(pkgs=x,repos="http://cran.r-project.org")
require(x,character.only=TRUE)
}
}
packages(lubridate)
packages(dplyr)
# Initialize url and directory path and set working directory
fileurl <- 'http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
initial_working_directory <- "~/../RStudio/Data Exploration"
setwd(initial_working_directory)
# Get file, unzip and set working directory to directory with unzipped files
if (!file.exists("data")){dir.create("data")} #Create data directory if it doesn't exist
if (!file.exists("./data/Dataset.zip")){
download.file(fileurl, destfile="./data/Dataset.zip", mode='wb')
unzip("./data/Dataset.zip", exdir = "./data")
}
setwd("./data")
# Read file into data frame
hh_power <- read.table("household_power_consumption.txt", header = TRUE, stringsAsFactors = FALSE, sep = ";")
########################################################################################################
#
# 1) Convert data types for dates, times and numbers and filter data by dates.
#
########################################################################################################
# Create a date/time variable
hh_power$DateTime <- dmy_hms(paste(hh_power$Date, hh_power$Time))
# Convert chracter date to POSIXct using dmy function from lubridate
hh_power$Date <- dmy(hh_power$Date)
# Filter on dates of interest
hh_power.f <- filter(hh_power, Date == ymd("2007-02-01") | Date == ymd("2007-02-02"))
# Convert Sub_metering variables from character to numeric
hh_power.f$Sub_metering_1 <- as.numeric(hh_power.f$Sub_metering_1)
hh_power.f$Sub_metering_2 <- as.numeric(hh_power.f$Sub_metering_2)
hh_power.f$Sub_metering_3 <- as.numeric(hh_power.f$Sub_metering_3)
hh_power.f$Global_active_power <- as.numeric(hh_power.f$Global_active_power)
hh_power.f$Global_reactive_power <- as.numeric(hh_power.f$Global_reactive_power)
hh_power.f$Voltage <- as.numeric(hh_power.f$Voltage)
########################################################################################################
#
# 2) Create a plot function and then execute function to view on screen and verify it's what we want
#
########################################################################################################
# Function to create the four plots
create_plots <- function(){
# Setup for 4 plot grid and set margins
par(mfrow = c(2, 2), mar = c(5, 4, 2, 2), oma = c(2, 2, 2, 2))
# Plot 1 - Global Active Power
with(hh_power.f, plot(DateTime, Global_active_power, type="n", ylab="Global Active Power", xlab=""))
with(hh_power.f, lines(DateTime, Global_active_power, type="l"))
# Plot 2 - Voltage
with(hh_power.f, plot(DateTime, Voltage, type="n", ylab="Voltage", xlab="datetime"))
with(hh_power.f, lines(DateTime, Voltage, type="l"))
# Plot 3 - Energy sub metering
with(hh_power.f, plot(DateTime, Sub_metering_1, type="l", ylab="Energy sub metering", xlab=""))
with(hh_power.f, lines(DateTime, Sub_metering_2, type="l", col="red"))
with(hh_power.f, lines(DateTime, Sub_metering_3, type="l", col="blue"))
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1,1), bty = "n", cex=0.8) #lty puts lines in legend, cex changes legend font size
# Plot 4 - Global Reactive Power
with(hh_power.f, plot(DateTime, Global_reactive_power, type="n", ylab="Global_reactive_power", xlab="datetime"))
with(hh_power.f, lines(DateTime, Global_reactive_power, type="l"))
}
# Display plots on screen to verify
create_plots()
########################################################################################################
#
# 3) Open png file graphics device, create plot, then close device (saving file).
#
########################################################################################################
# Open PNG device; create 'plot4.png' in my working directory
png(filename = "plot4.png", width=480,height=480)
# Create plot and send to a file (no plot appears on screen)
create_plots()
## Close the PNG file device
dev.off()
|
d4604bef5bbe665f30eebf760b661f61f3caf5b2
|
ee3d47bebd1f19ff1e52bb71e22f6237097d364b
|
/PCATrafficChannelsR.R
|
3bd8279e2e037e3c71df6de57957728ea71eb956
|
[] |
no_license
|
Anakeyn/PCATrafficChannelsR
|
0f8a58d88083d7134e12ebec8290774d709f6a59
|
ac4160c279eed3a681a461973ed5363262e410e7
|
refs/heads/master
| 2020-06-01T10:49:16.252079
| 2019-06-07T16:10:10
| 2019-06-07T16:10:10
| 190,754,547
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,084
|
r
|
PCATrafficChannelsR.R
|
##########################################################################
# Auteur : Pierre Rouarch 2019 - Licence GPL 3
# PCATrafficChannelsR
# Analyse en Composantes principales canaux trafic Web.
# Pour illustrer notre propos nous utiliserons le jeu de données de
# l'association Networking-Morbihan
##########################################################################
#Packages et bibliothèques utiles (décommenter au besoin)
##########################################################################
#install.packages("lubridate") #si vous ne l'avez pas
#install.packages("tseries")
#install.packages("devtools")
#devtools::install_github("twitter/AnomalyDetection") #pour anomalyDetection de Twitter
#devtools::install_github('sinhrks/ggfortify') #pour ggfortify
#install.packages("XML")
#install.packages("stringi")
#install.packages("BSDA")
#install.packages("BBmisc")
#install.packages("stringi")
#install.packages("FactoMineR")
#install.packages("factoextra")
#install.packages("rcorr")
#install.packages("lubridate") #si vous ne l'avez pas
library (lubridate) #pour yday
#library(tseries) #pour ts
library(AnomalyDetection) #pour anomalydetectionVec
#library(XML) # pour xmlParse
#library(stringi) #pour stri_replace_all_fixed(x, " ", "")
library(BSDA) #pour SIGN.test
library(BBmisc) #pour which.first
#install.packages("stringi")
library(stringi) #pour stri_detect
library(ggfortify) #pour ploter autoplot type ggplot
#install.packages("tidyverse") #si vous ne l'avez pas #pour gggplot2, dplyr, tidyr, readr, purr, tibble, stringr, forcats
#install.packages("forecast") #pour ma
#Chargement des bibliothèques utiles
library(tidyverse) #pour gggplot2, dplyr, tidyr, readr, purr, tibble, stringr, forcats
library(forecast) #pour arima, ma, tsclean
library(FactoMineR) #pour ACP
library(factoextra) #compléments ACP FactoMineR
##########################################################################
# Récupération des fichiers
##########################################################################
dfPageViews <- read.csv("dfPageViews.csv", header=TRUE, sep=";")
#str(dfPageViews) #verif
#transformation de la date en date :-)
dfPageViews$date <- as.Date(dfPageViews$date,format="%Y-%m-%d")
#str(dfPageViews) #verif
str(dfPageViews) #72821 obs
dfPageViews$index <- 1:nrow(dfPageViews) #création d'un pour retrouver les "articles marketing"
#ensuite
#pour les articles
myArticles <- read.csv("myArticles.csv", header=TRUE, sep=";")
#transformation de la date en date :-)
myArticles$date <- as.Date(myArticles$date,format="%Y-%m-%d")
str(myArticles) #verif
#recuperer le fichier avec les channels
mySourcesChannel <- read.csv2("mySourcesChannel.csv", header=TRUE)
str(mySourcesChannel) #voyons ce que l'on récupère
#pour effectuer le left join besoin d'une chaine de caractère.
mySourcesChannel$source <- as.character(mySourcesChannel$source)
##########################################################################
# Pour le traffic Global on ajoute les canaux
##########################################################################
#recuperation de la variable channel dans le dataframe
#principal par un left join.
dfPVChannel <- left_join(dfPageViews, mySourcesChannel, by="source")
#verifs
str(dfPVChannel) #72821
head(dfPVChannel)
plyr::count(as.factor(dfPVChannel$channel)) #5 canaux
plyr::count(as.factor(dfPVChannel$pagePath)) #1262 types de pages
#Préparationd des données pour l'ACP
PVDataForACP <- dfPVChannel[, c("pagePath", "channel")] %>% #on ne garde que PagePath et channel
group_by(pagePath, channel) %>% #groupement par cleanLandingPagePath et channel
mutate(pageviews = n()) %>% #on décompte les pages vues
unique() %>% #découblonnement
spread(key=channel, value=pageviews, fill = 0, convert = FALSE, drop = TRUE,
sep = NULL) #eclatement du facteur channel en variables
#Calcul de l'ACP avec FactoMineR
res.pca = PCA(PVDataForACP[, -1], scale.unit=TRUE, ncp=5, graph=F)
summary(res.pca) #resumé des données
#############################Plots Simples individus et variables.
#plot(res.pca, choix = "ind") #individus
#plot(res.pca, choix = "var") #variables
######################### ScreePlot ################################################
ScreePlot <- fviz_eig(res.pca, addlabels = TRUE, ylim = c(0, 100))
ggpubr::ggpar(ScreePlot,
title = "ScreePlot - Toutes les Pages vues",
subtitle = "Pratiquement toute l'information est contenue dans la première composante",
xlab = "Dimensions",
ylab = "Pourcentage de variance expliquée",
caption = "ScreePlot pour toutes les Pages Vues")
ggsave(filename = "PV-ScreePlot.jpg", dpi="print") #sauvegarde du graphique
######################### Diagramme des variables ################################################
# Colorer en fonction du cos2: qualité de représentation
VarPlot <- fviz_pca_var(res.pca, col.var = "cos2",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Évite le chevauchement de texte
)
ggpubr::ggpar(VarPlot,
title = "ACP - VarPlot - Toutes les Pages vues",
subtitle = "Les variables sont quasiment toutes sur la première dimension \n webmail diffère légèrement",
caption = "Diagramme des variables pour toutes les pages vues")
ggsave(filename = "PV-VarPlot.jpg", dpi="print") #sauvegarde du graphique
##########################################################################
# Pour le traffic de base
##########################################################################
#Recréation du trafic de base
#récupere les chemins des pages pour les comparer dans dfPageViews
myArticles$pagePath <- str_split_fixed(myArticles$link, "https://www.networking-morbihan.com", 2)[,2]
patternArticlesToRemove <- unique(myArticles$pagePath)
#Pour les pages de base on enleve les pagePath de nos articles
indexPagePathToRemove <- -grep(pattern = paste(patternArticlesToRemove, collapse="|"), dfPageViews$pagePath)
dfBasePageViews <- dfPageViews[indexPagePathToRemove,]
#puis on enleve les landingPagePath de nos articles
indexLandingPagePathToRemove <- -grep(pattern = paste(patternArticlesToRemove, collapse="|"), dfBasePageViews$landingPagePath)
dfBasePageViews <- dfBasePageViews[indexLandingPagePathToRemove,]
str(dfBasePageViews) #37614 obs.
#recuperation de la variable channel dans la dataframe principale par un left join.
dfBasePVChannel <- left_join(dfBasePageViews, mySourcesChannel, by="source")
#verifs
str(dfBasePVChannel)
head(dfBasePVChannel)
plyr::count(as.factor(dfBasePVChannel$channel))
#préparation des données pour l'ACP
BasePVDataForACP <- dfBasePVChannel[, c("pagePath", "channel")] %>% #on ne garde que PagePath et channel
group_by(pagePath, channel) %>% #groupement par cleanLandingPagePath et channel
mutate(pageviews = n()) %>% #on décompte les pages vues
unique() %>% #découblonnement
spread(key=channel, value=pageviews, fill = 0, convert = FALSE, drop = TRUE,
sep = NULL) #eclatement du facteur channel en variables
#Calcul de la
res.pca = PCA(BasePVDataForACP[, -1], scale.unit=TRUE, ncp=5, graph=F)
summary(res.pca) #resumé des données
######################### ScreePlot ################################################
ScreePlot <- fviz_eig(res.pca, addlabels = TRUE, ylim = c(0, 100))
ggpubr::ggpar(ScreePlot,
title = "ScreePlot - Pages de Base",
subtitle = "L'information est encore plus concentrée dans la première composante",
xlab = "Dimensions",
ylab = "Pourcentage de variance expliquée",
caption = "ScreePlot pour Les pages de Base")
ggsave(filename = "Base-PV-ScreePlot.jpg", dpi="print") #sauvegarde du graphique
######################### Diagramme des variables ################################################
# Colorer en fonction du cos2: qualité de représentation
VarPlot <- fviz_pca_var(res.pca, col.var = "cos2",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Évite le chevauchement de texte
)
ggpubr::ggpar(VarPlot,
title = "ACP - VarPlot - Pages de Base",
subtitle = "Les variables sont quasiment toutes sur la première dimension \n webmail s'est même rapprochée de l'axe.",
caption = "Diagramme des variables pour les pages de bases")
ggsave(filename = "Base-PV-VarPlot.jpg", dpi="print") #sauvegarde du graphique
##########################################################################
#regardons pour le trafic Direct Marketing uniquement i.e le traffic dont
# la source a dirigé vers une page Articles Marketing
##########################################################################
#Construction du trafic Direct Marketing
#on garde uniquement les landingPagePath de nos articles :
#DM = Direct Marketing
patternArticlesToKeep <- unique(myArticles$pagePath)
indexLandingPagePathToKeep <- grep(pattern = paste(patternArticlesToKeep, collapse="|"), dfPageViews$landingPagePath)
dfDMPageViews <- dfPageViews[indexLandingPagePathToKeep,]
str(dfDMPageViews) #28553 obs.
dfDMPVChannel <- left_join(dfDMPageViews, mySourcesChannel, by="source")
str(dfDMPVChannel)
#recuperation de la variable channel dans la dataframe principale par un left join.
dfDMPVChannel <- left_join(dfDMPageViews, mySourcesChannel, by="source")
#verifs
str(dfDMPVChannel)
head(dfDMPVChannel)
plyr::count(as.factor(dfDMPVChannel$channel))
DMPVPVDataForACP <- dfDMPVChannel[, c("pagePath", "channel")] %>% #on ne garde que PagePath et channel
group_by(pagePath, channel) %>% #groupement par cleanLandingPagePath et channel
mutate(pageviews = n()) %>% #on décompte les pages vues
unique() %>% #découblonnement
spread(key=channel, value=pageviews, fill = 0, convert = FALSE, drop = TRUE,
sep = NULL) #eclatement du facteur channel en variables
res.pca = PCA(DMPVPVDataForACP[, -1], scale.unit=TRUE, ncp=5, graph=F)
summary(res.pca) #resumé des données
######################### ScreePlot ################################################
ScreePlot <- fviz_eig(res.pca, addlabels = TRUE, ylim = c(0, 100))
ggpubr::ggpar(ScreePlot,
title = "ScreePlot - Pages Direct Marketing",
subtitle = "Cette fois l'information est moins concentrée dans la première composante \n mais cela reste important",
xlab = "Dimensions",
ylab = "Pourcentage de variance expliquée",
caption = "ScreePlot pour Les pages Direct Marketing")
ggsave(filename = "DM-PV-ScreePlot.jpg", dpi="print") #sauvegarde du graphique
######################### Diagramme des variables ################################################
# Colorer en fonction du cos2: qualité de représentation
VarPlot <- fviz_pca_var(res.pca, col.var = "cos2",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Évite le chevauchement de texte
)
ggpubr::ggpar(VarPlot,
title = "ACP - VarPlot - Pages Direct MArketing",
subtitle = "Cette fois le search se détache des autres variables. Ces dernières \ncorrespondent plus aux actions Marketing ponctuelles.",
caption = "Diagramme des variables pour les pages Direct Marketing")
ggsave(filename = "DM-PV-VarPlot.jpg", dpi="print") #sauvegarde du graphique
##########################################################################
# MERCI pour votre attention !
##########################################################################
|
863cb5034e2036c7c0078368e5be29f548f14306
|
7218af052bcc733a0a709028a31019e0bdc60702
|
/data/make.genera.R
|
4dccf438052896e544571ba7c008b57d924070f8
|
[
"Unlicense"
] |
permissive
|
SimonGoring/GoringetalPollenRichness
|
3e82f0dd3fafbfe7d7acd0645000feb7329653cf
|
d5bd174c39e9c6154131aad859f9db34563dc6e9
|
refs/heads/master
| 2021-01-20T21:29:06.471818
| 2014-08-20T15:32:26
| 2014-08-20T15:32:26
| 11,551,672
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 560
|
r
|
make.genera.R
|
# Makes a dataset that aggregates presence and absence to the generic level.
p.set <- pollen.set[match(colnames(presence), pollen.set$spec.code),]
pres.ge <- adply(.data = presence,
.margins = 1,
.expand = FALSE,
.fun = function(x) {run <- aggregate(as.numeric(x) ~ p.set$genus, FUN=sum)
out <- run[,2]
names(out) = run[,1]; out},
.progress='text')[,-1]
write.csv(pres.ge, 'RichnessCSVs/pres.ge.csv')
rm(p.set)
|
6a01e211e0f59043c007a15d37688e70b9660389
|
a484084de1aa0b04d33234834124d328971147e9
|
/R/app_server.R
|
41dc885c80d470fdabe14e7de620304f97fdce94
|
[
"MIT"
] |
permissive
|
sowiks2711/kolejkeRShiny
|
dc5e3908e5a17b4932a55150a000b913c8ec3949
|
5b54a55cbfbbfeb7b94c9bd38236f49fbefa1911
|
refs/heads/master
| 2020-12-10T00:49:18.353550
| 2020-02-03T20:18:22
| 2020-02-03T20:18:22
| 233,462,615
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,826
|
r
|
app_server.R
|
#' @import shiny
#' @import ggplot2
#' @import waffle
#' @import RColorBrewer
#' @import extrafont
app_server <- function(input, output,session) {
lang <- reactive(
input$lang
)
i18n_r <- reactive({
i18n$set_translation_language(input$lang)
i18n
})
use_mock <- reactive(
ifelse(input$mock == "TRUE", TRUE, FALSE)
)
# List the first level callModules here
offices <- reactive({
result <- unique(mock_data[["name"]])
result
})
raw_data <- reactive({
result <- mock_data[mock_data$name == input$of,]
if(!use_mock()) {
result <- kolejkeR::get_raw_data(input$of)
}
result
})
queue_data <- reactive({
raw_data()[raw_data()$nazwaGrupy == input$queue,]
})
observe({
updateSelectInput(session, "of", i18n_r()$t("Select office"), choices = offices(), selected = offices()[1])
})
observe({
updateSelectizeInput(session, "queue", i18n_r()$t("Select available queue"), choices = choices())
})
observe({
updateActionButton(session, "submit", label = i18n_r()$t("Print results"))
})
output$of_label = renderText(i18n_r()$t("Select office"))
output$queue_label = renderText(i18n_r()$t("Select available queue"))
output$submit_label = renderText(i18n_r()$t("Print results"))
output$diagram_label = renderText(i18n_r()$t("Diagram"))
output$state_label = renderText(i18n_r()$t("Current state"))
output$table_label = renderText(i18n_r()$t("Table"))
output$predictions_label = renderText(i18n_r()$t("Predictions"))
output$lang_label = renderText(i18n_r()$t("Select language"))
output$mock_label = renderText(i18n_r()$t("Data source"))
choices <- reactive({
office_name = input$of
if(office_name== '') {
office_name <- offices()[1]
}
#kolejkeR::get_available_queues(office_name)
unique(mock_data[mock_data$name == office_name,'nazwaGrupy'])
})
results <- rv(res1="", res2="", res3="")
observeEvent(input$submit,{
if (!input$queue %in% choices()) return()
results$res1 <- kolejkeR::get_current_ticket_number_verbose(input$of,input$queue, language = lang())
results$res2 <- kolejkeR::get_number_of_people_verbose(input$of,input$queue, language = lang())
results$res3 <- kolejkeR::get_waiting_time_verbose(input$of,input$queue, language = lang())
results$res4 <- raw_data()
service_booths <- queue_data()[['liczbaCzynnychStan']]
queuers <- queue_data()[['liczbaKlwKolejce']]
output$open_booths_vis <- renderQueuePlot(
queue_data()[['liczbaCzynnychStan']],
queue_data()[['liczbaKlwKolejce']],
input$of,
input$queue
)
})
output$result1 <- renderText(results$res1)
output$result2 <- renderText(results$res2)
output$result3 <- renderText(results$res3)
output$result4 <- renderDT(results$res4)
output$result5 <- renderText(i18n$t("Summary"))
}
renderQueuePlot <- function(service_booths, queuers, office, queue_name) {
renderPlot({
if (service_booths <= 0 ) {
ggplot() +
ggtitle(label = "Brak otwartych stanowisk!")
} else {
waffle(
#c(`Serving booths` = service_booths, `Queuers` = queuers),
#c(`Stanowiska obslugi` = service_booths, `Osoby w kolejce` = queuers),
c(`Stanowiska obslugi` = service_booths, `Stojący w kolejce` = queuers),
rows = service_booths,
use_glyph = c("institution", "child"),
colors=brewer.pal(n = 3, name = "Set2")[-3],
glyph_size = 10
) +
theme(legend.position = "bottom", legend.direction = "horizontal") +
ggtitle(label = gsub("_", " ", office), subtitle = queue_name)
}
})
}
|
a025b45789119237641a80823af8ad86d58e13fb
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Kontchakov/SUBMITTED/Core1108_tbm_28.tex.moduleQ2.2S.000003/Core1108_tbm_28.tex.moduleQ2.2S.000003.R
|
e242aa34285b758520ce4b9b415a61b8e8f79666
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,191
|
r
|
Core1108_tbm_28.tex.moduleQ2.2S.000003.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 3827
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 3635
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 3635
c
c Input Parameter (command line, file):
c input filename QBFLIB/Kontchakov/SUBMITTED/Core1108_tbm_28.tex.moduleQ2.2S.000003.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1375
c no.of clauses 3827
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 3635
c
c QBFLIB/Kontchakov/SUBMITTED/Core1108_tbm_28.tex.moduleQ2.2S.000003.qdimacs 1375 3827 E1 [551 552 553 554 555 556 604 605 606 607 608 609 657 658 659 660 661 662 710 711 712 713 714 715 763 764 765 766 767 768 816 817 818 819 820 821 869 870 871 872 873 874 922 923 924 925 926 927 975 976 977 978 979 980 1028 1029 1030 1031 1032 1033 1081 1082 1083 1084 1085 1086 1134 1135 1136 1137 1138 1139 1187 1188 1189 1190 1191 1192 1240 1241 1242 1243 1244 1245 1293 1294 1295 1296 1297 1298 1346 1347 1348 1349 1350 1351] 0 128 1111 3635 RED
|
c56b51b99f1d52cb8a636c3f80cb25785949e430
|
e3c84ee3a1a19330dbaa43a42cb353011c1b3501
|
/man/set_blocks_from_df.Rd
|
c096f1fd379efd75f541d347531cbf31a38ee9c4
|
[] |
no_license
|
cran/thurstonianIRT
|
d8b2b940629f164e487ec8c02ab1ca097304e529
|
468a68c67b3777ba736dd61628da02b11f1746bc
|
refs/heads/master
| 2023-09-03T02:49:27.465847
| 2023-08-22T15:50:02
| 2023-08-22T16:34:56
| 201,459,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,139
|
rd
|
set_blocks_from_df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-helpers.R
\name{set_blocks_from_df}
\alias{set_blocks_from_df}
\title{Prepare blocks of items from a data frame}
\usage{
set_blocks_from_df(
data,
blocks = "block",
items = "item",
traits = "trait",
names = items,
signs = "sign"
)
}
\arguments{
\item{data}{A \code{data.frame} containing all the required columns
(see the arguments below) to specify the item blocks.}
\item{blocks}{Name of column vector denoting the block each item
corresponds to. Each block must have an equal number of items.}
\item{items}{Name of column vector denoting items to be combined into
one block. Should correspond to variables in the data.}
\item{traits}{Names of column vector denoting the traits to which each
item belongs.}
\item{names}{Optional column vector of item names in the output.
Can be used to equate parameters of items across blocks,
if the same item was used in different blocks.}
\item{signs}{Name of column vector with expected signs of the
item loadings (1 or -1).}
}
\description{
Prepare blocks of items and incorporate information
about which item belongs to which trait from a pre-existing dataframe.
This is a wrapper function for \code{\link{set_block}}, eliminating the need
to manually set each item, trait, name and sign (loading) info per block.
}
\details{
A block of items is a set of two or more items presented and answered
together by fully ranking them or selecting the most and/or least favorite
in a forced choice format. A whole test usually contains
several blocks and items may reappear in different blocks.
}
\examples{
block_info <- data.frame(
block = rep(1:4, each = 3),
items = c("i1", "i2", "i3", "i4", "i5", "i6",
"i7", "i8", "i9", "i10", "i11", "i12"),
traits = rep(c("t1", "t2", "t3"), times = 4),
signs = c(1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1)
)
blocks <- set_blocks_from_df(
data = block_info,
blocks = "block",
items = "items",
traits = "traits",
signs = "signs"
)
}
\seealso{
\code{\link{set_block}}
}
|
bad9f21d4432da8adbf685ee390f8ed4ded12b7d
|
725a33f27fce430ee481a3542aae5bb81a94dfc0
|
/man/qcMetric_MSMSScans_TopNoverRT-class.Rd
|
59092102ba1c8bd4e3bda91af295e5a6fb38e2df
|
[
"BSD-3-Clause"
] |
permissive
|
cbielow/PTXQC
|
fac47ecfa381737fa0cc36d5ffe7c772400fb24e
|
f4dc4627e199088c83fdc91a1f4c5d91f381da6c
|
refs/heads/master
| 2023-07-20T00:39:45.918617
| 2023-05-17T14:23:03
| 2023-05-17T14:23:03
| 20,481,452
| 41
| 30
|
NOASSERTION
| 2023-05-17T14:23:04
| 2014-06-04T11:53:49
|
HTML
|
UTF-8
|
R
| false
| true
| 361
|
rd
|
qcMetric_MSMSScans_TopNoverRT-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qcMetric_MSMSScans.R
\docType{class}
\name{qcMetric_MSMSScans_TopNoverRT-class}
\alias{qcMetric_MSMSScans_TopNoverRT-class}
\alias{qcMetric_MSMSScans_TopNoverRT}
\title{Metric for msmsscans.txt, showing TopN over RT.}
\description{
Metric for msmsscans.txt, showing TopN over RT.
}
|
6df4353bdba2af12606c81761d25c393dfa49a7c
|
eec9e00a19babcf7e8d0f7e7596b31fa3e95693b
|
/R/changingn-normaldist.R
|
bcbfba49c2a0538eab240549b3c3ca84efe3d250
|
[] |
no_license
|
biometry/phenologicalOverlap
|
59dc05bf39adeff1525bde6c8ecfb0f2669732a8
|
967d1ca059229de35e243df3190619794e881800
|
refs/heads/master
| 2021-08-24T06:07:10.593531
| 2017-12-08T10:09:15
| 2017-12-08T10:09:15
| 105,532,951
| 1
| 0
| null | 2017-12-08T10:09:16
| 2017-10-02T12:43:28
|
R
|
UTF-8
|
R
| false
| false
| 6,099
|
r
|
changingn-normaldist.R
|
#' simulates abundance data and calculates overlap for changing sample sizes
#'
#' simulates abundance data and calculates overlap for changing sample sizes
#'
#' Produces two abundance datastes and dteremines the overlpa-measures given with OLest betweeen these. The overlap will be determined using three different methods : kernel density estimation, fitting a distribution or normalization (normalize function). The sample size will be set to the maximum value of ns. The type argument determinnes whether these data shall be created using a beta or a normal distribution. Then the sample size will be decreased, using the normaldist.crossout function, to the next lower value of ns, and the overlap will be determined again. This will be repeated, until an overlap for all the ns was calculated. Afterwards This procedure itself will be repeated as many times as repeatings says. A listt will be returned, that has 3 lists in it, one for the kernel ddensity estimation one for fitted distributions etc. This lists have a list stored in them for each stepsize. Those lists contain all the results of the estimations for the respective sample size and way the overlap was determined.
#'
#' @param ns vector of numeric objects (whole numbers); the sample sizes for which the overlap will be determined. Applies for both datasets.
#' @param repeatings numeric (whole numbers); Determines how often the whole procedure (data producing and calculation of the overlap) shall be repeated.
#' @param type string; If set to beta the abundance data will bbe drawn from a beta distribution. If not beta this will be from a normal distribution.
#' @param OLest vector of strings; determines in which way(s) the overlap shall be determined, will be passed to OLE function.
#'
#' @return a list. First index level determines the way of estimating the pdf so it has the three indexes: 'kernel', 'fitdistr' and 'normalize'. The second index-level is the sample size and the last the number of the repeating. In there are the results as returned by OLE function. To illustrate: list[[kernel]][[sample-size]][[repeating]]
#'
#' @author Florian Berger <florian_berger@ymail.com>
#'
#' @seealso \code{scale}
#'
#' @examples
#' changingn.normaldist(ns=c(300,50), repeatings = 2, type = 'normal')
#'
#' @export
changingn.normaldist <- function(ns = c(1000,500,200,100,50,20,10), repeatings = 100, type= 'beta', OLest = c('Weitzman', 'Matusita', 'Pianka', 'Morisita', 'Duration', 'WMD', 'Horn','n')){
# Produces two abundance datastes and dteremines the overlpa-measures given with OLest betweeen these. The overlap will be determined using three different methods : kernel density estimation, fitting a distribution or normalization (normalize function). The sample size will be set to tthe maximum value of ns. The type argument determinnes whether these data shall be created using a beta or a normal distribution. Then the sample size will be decreased, using the normaldist.crossout function, to the next lower value of ns, and the overlap will be determined again. This will be repeated, until an overlap for all the ns was calculated. Afterwards This procedure itself will be repeated as many times as repeatings says. A listt will be returned, that has 3 lists in it, one for the kernel ddensity estimation one for fitted distributions etc. This lists have a list stored in them for each stepsize. Those lists contain all the results of the estimations for the respective sample size and way the overlap was determined.
time.start <- Sys.time()
calc <- 1
ns <- sort(ns, decreasing = T)
start.n <- ns[1]
OL <- list('kernel'= rep(list(list()), length(ns)), 'fitdistr'=rep(list(list()), length(ns)), 'normalize'= rep(list(list()), length(ns)))
for(dis in c('kernel','fitdistr','normalize')){
OL[[as.character(dis)]][[1]] <- rep(list(data.frame()), repeatings )
}
first1 <- TRUE
for(i in c(1:repeatings)){
if(type=='beta'){
start <- as.Date('2017-01-01')
end <- start + 250
a <- BetaData(n=start.n, shape1 = 1, shape2 = 10, startandend = c(start, end))
b <- BetaData(n=start.n, shape1 = 1, shape2 = 10, startandend = c(start, end))
}else{
a <- NormData(mean = 0, sd=20, n=start.n)
b <- NormData(mean = 0, sd=20, n=start.n)
}
for(dis in c('kernel','fitdistr','normalize')){
OL[[as.character(dis)]][[1]][[i]] <- OLE(a,b, distrest = dis, OLest = OLest)$Overlap
print(paste0('finished: ', calc/(length(ns)*repeatings*3) *100, '%'))
calc <- calc +1
}
listing <- 2
if(first1){
first <- TRUE
}
for(n in ns[-which(ns == start.n)]){
if(first){
for(dis in c('kernel','fitdistr','normalize')){
OL[[as.character(dis)]][[listing]] <- rep(list(data.frame()), repeatings)
}
first <- FALSE
}
red1 <- sum(a$Count) - n
red2 <- sum(b$Count) - n
if(type == 'beta'){
a <- betadist.crossout(a, shape1 = 1, shape2 = 10, reduction = red1, DataCheck = FALSE, start = start, end = end)
b <- betadist.crossout(b, shape1 = 1, shape2 = 10, reduction = red1, DataCheck = FALSE, start = start, end = end)
}else{
a <- normaldist.crossout(a, mean=0, sd=20, reduction = red1, DataCheck = FALSE)
b <- normaldist.crossout(b, mean=0, sd=20, reduction = red2, DataCheck = FALSE)
}
for(dis in c('kernel','fitdistr','normalize')){
OL[[as.character(dis)]][[listing]][[i]] <- OLE(a,b, distrest = dis, OLest = OLest)$Overlap
print(paste0('finished: ', calc/(length(ns)*repeatings*3) *100, '%'))
calc <- calc +1
}
listing <- listing + 1
}
if(first1){
time.end <- Sys.time()
print(paste0('estimated time of ending: ', Sys.time() + (repeatings-1) * (time.end - time.start) ))
first1 <- FALSE
}
}
for(dis in c('kernel','fitdistr','normalize')){
names(OL[[as.character(dis)]]) <- ns
}
return(OL)
}
|
5247230665a00be2c2d33a1de8e056091e6476fe
|
7032d3a245a7a6f44c21d2e11051b2687b28192a
|
/pkg/man/unsworth06a.Rd
|
29bcdd351dc85fe4c827b526f49eab35df7df48b
|
[] |
no_license
|
ajwills72/BenchmarksWM
|
e78f38b02cb79523a433b2bc99c199c063809e90
|
32488a5d4f5a33387d6adaaedf299ea0dff3bef3
|
refs/heads/master
| 2020-04-17T22:57:34.451563
| 2019-05-04T08:38:18
| 2019-05-04T08:38:18
| 167,015,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,746
|
rd
|
unsworth06a.Rd
|
\name{unsworth06a}
\alias{unsworth06a}
\docType{data}
\title{
Unsworth & Engle's (2006a) set-size effect on accuracy in serial recall
}
\description{
Individual-subject data for Unsworth & Engle's (2006a) set-size effect
on accuracy in serial recall with simple and complex span tests using
verbal materials.
}
\usage{data("unsworth06a")}
\format{
A data frame with 4935 observations on the following 4 variables.
\describe{
\item{\code{subject}}{Unique ID number for each subject.}
\item{\code{type}}{Type of test: 'simple', 'complex'}
\item{\code{test}}{Test used: 'word', 'letter', 'operation', or 'reading'}
\item{\code{size}}{Set size: 2-8.}
\item{\code{acc}}{Proportion correct}
}
}
\details{
These are the individual-subject data collected by Kane et al. (2004),
further analysed by Unsworth & Engle (2006a), listed as a benchmark
dataset in working memory by Oberauer et al. (2018, BM1.1), and
archived in Oberauer (2019). The comprise the proportion correct
scores for 235 U.S. adults aged 18-35. Each participant completed four
serial recall working memory tests, using: (a) words, (b) letters, (c)
words presented within a math operations task ('operation'), (d)
letters presented within a sentence-reading task ('reading'). In
Unsworth & Engle (2006a), tests (a) and (b) are classified as simple
recall tasks; tests (c) and (d) as complex recall tasks.
}
\source{
Oberauer, K. (2019). BenchmarksWM. https://github.com/oberauer/BenchmarksWM
}
\references{
Kane, M.J., Hambrick, D.Z., Tuholski, S.W., Wilhelm, O., Payne, T.W.,
and Engle, R.W. (2004). The generality of working memory capacity: A
latent-variable approach to verbal and visuospatial memory span and
reasoning. \emph{Journal of Experimental Psychology: General, 133}, 189-217.
Oberauer, K., Lewandowsky, S., Awh, E., Brown, G.D.A, Conway, A.,
Cowan, N., Donkin, C., Farrell, S., Hitch, G.J., Hurlstone, M.J., Ma,
W.J., Morey, C.C., Nee, D.E., Schweppe, J., Vergauwe, E., and Ward,
G. (2018). Benchmarks for models of short-term and working
memory. \emph{Psychological Bulletin, 144}, 885-958.
Unsworth, N., and Engle, R.W. (2006a). Simple and complex memory spans
and their relation to fluid abilities: Evidence from list-length
effects. \emph{Journal of Memory and Language, 54}, 68-80.
}
\examples{
## Reproducing Figure 2A of Oberauer et al. (2018).
data(unsworth06a)
library(tidyverse)
unsplot <- unsworth06a \%>\% filter(size < 8) \%>\%
group_by(type, size) \%>\%
summarise(acc=mean(acc))
unsplot \%>\% ggplot(aes(x=size, y=acc, color=type)) +
geom_point() + geom_line() + ylim(0,1)
}
\keyword{datasets}
|
2988ae7ae49166bc2472f1f5662430a494a7230c
|
d545784864e85b543cbc837863333550fbab31f3
|
/ixp/R/testing_area.R
|
5c939c9a072958589b4f872704c4e8243091a0f9
|
[] |
no_license
|
phil8192/ixp
|
64ebebc667496ead3433a6a55b2868457c65340c
|
09dda001dc2240152ffcb5047f3ea7d42be44772
|
refs/heads/master
| 2021-11-11T05:49:35.800122
| 2021-10-27T16:11:19
| 2021-10-27T16:11:19
| 219,201,400
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 823
|
r
|
testing_area.R
|
time_2_increase <- function(list){
if(length(list)==1){
list(
MsM = list %>% extract_features(2,1) %>% diff_find(),
Bandwidth = list %>% extract_features(1, 1) %>% diff_find()
)}else if(length(list)>1){
lapply(seq(length(list)), function(x)
list(
MsM = list %>% extract_features(2, x) %>% diff_find(),
Bandwidth = list%>% extract_features(1, x) %>% diff_find()
)
)
}
}
diff_find <- function(list){
diff(unlist(x = list, use.names = F, recursive = T))
}
extract_features <- function (list, i, d){
lapply(seq(list[[1]]), function(x) list[[d]][[x]][i])
}
gradients <- function(list){
lapply(seq(length(list)), function(x) grads(list, x))
}
grads <- function(list, i){Map("/", list[[i]][2], list[[i]][1])}
|
270e717cb6b715f1abbaba3abcd157241dbfd354
|
66dd0b831434c4ad85bf58e473278883aeadbb1b
|
/analysis/findBestPredictors.R
|
296edd94cb800d54bb54349d51fbf7e4c64a454b
|
[] |
no_license
|
andrewGhazi/bayesianMPRA
|
6e394893c7cfb079ca55698339eaa77c568a6736
|
4b9823f5a05b431adac9caae83737e525ae5d93c
|
refs/heads/master
| 2021-01-20T03:34:35.703409
| 2018-09-17T07:41:10
| 2018-09-17T07:41:10
| 83,831,741
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,187
|
r
|
findBestPredictors.R
|
varFuns %>%
filter(abs(TS) > .33, abs(TS) < 2) %>%
select(TS, eigen:DeepSeaDnaase, BroadK562H3k4me1:SydhK562H3k27me3b) %>%
gather(predictor, val, -TS) %>%
na.omit %>%
group_by(predictor) %>%
summarise(corr = cor(TS, val)) %>%
arrange(desc(abs(corr)))
getRefNNdiffs = function(NNs, meanRefMu) {
meanRefMu - varFuns[NNs,]$meanRefMu
}
getRefRandDiffs = function(meanRefMu) {
meanRefMu - varFuns[sample(1:7803, 30),]$meanRefMu
}
getMutNNdiffs = function(NNs, meanMutMu) {
meanMutMu - varFuns[NNs,]$meanMutMu
}
getMutRandDiffs = function(meanMutMu) {
meanMutMu - varFuns[sample(1:7803, 30),]$meanMutMu
}
varFuns %<>%
mutate(RefNNdiffs = map2(kNN, meanRefMu, getRefNNdiffs),
RefRandDiffs = map(meanRefMu, getRefRandDiffs),
MutNNdiffs = map2(kNN, meanMutMu, getMutNNdiffs),
MutRandDiffs = map(meanMutMu, getMutRandDiffs))
tmp = varFuns %>%
filter(abs(transcriptionalShift) > .33) %>%
select(contains('iffs')) %>%
unnest %>%
gather(src, diffs)
tmp %>%
ggplot(aes(diffs)) +
geom_histogram(bins = 50) +
facet_grid(src ~ .)
tmp %>%
group_by(src) %>%
summarise(sd = sd(diffs),
mn = mean(diffs))
|
9b7ad308a6ec7bd8dbd7ae742933e1c653b2fbd1
|
c0b29712073ce54f3d75e864bdd1f770c688d236
|
/script/archive/Alignment20210324.R
|
4b9d8ed3ca885de2d3ac378269333e5e1f91dbeb
|
[
"MIT"
] |
permissive
|
achiral/rbioc
|
fb6f173430f974e68b5e7e3af6e3e464de9f4d78
|
1a0c5ab2d1eebe2161ba518853179aa7ae2c50a8
|
refs/heads/main
| 2023-08-31T00:43:54.542237
| 2021-10-21T03:18:10
| 2021-10-21T03:18:10
| 414,892,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,859
|
r
|
Alignment20210324.R
|
#Alignment.R
#DECIPHER
#http://www.iu.a.u-tokyo.ac.jp/~kadota/20140612_kadota.pdf
#http://www.iu.a.u-tokyo.ac.jp/~kadota/r_seq.html#about_analysis_general_alignment_multiple
#Biostrings
#http://www.iu.a.u-tokyo.ac.jp/~kadota/bioinfo_ngs_sokushu_2014/20140909_3-4_kadota.pdf
#msa
#https://bioconductor.org/packages/release/bioc/vignettes/msa/inst/doc/msa.pdf
####################################################################################################################
#if (!requireNamespace("BiocManager", quietly=TRUE)) install.packages("BiocManager")
#BiocManager::install("Biostrings")
#BiocManager::install("DECIPHER")
#BiocManager::install("msa")
####################################################################################################################
#directory choose関数の定義
dir.choose <- function() {
system("osascript -e 'tell app \"RStudio\" to POSIX path of (choose folder with prompt \"Choose Folder:\")' > /tmp/R_folder",
intern = FALSE, ignore.stderr = TRUE)
p <- system("cat /tmp/R_folder && rm -f /tmp/R_folder", intern = TRUE)
return(ifelse(length(p), p, NA))
}
setwd(dir.choose())
getwd() #directoryの確認
dir() #directoryの内容確認
####################################################################################################################
in_f <- "CYP1A2m1F.fasta" #入力ファイル名を指定してin_fに格納
out_f <- "out.fasta" #出力ファイル名を指定してout_fに格納
#必要なパッケージをロード
library(DECIPHER) #パッケージの読み込み
#入力ファイルの読み込み
fasta <- readAAStringSet(in_f, format="fasta") #in_fで指定したファイルの読み込み
fasta #確認してるだけです
#本番(MSA)
out <- AlignSeqs(fasta) #MSAを実行した結果をoutに格納
out #確認してるだけです
#ファイルに保存
writeXStringSet(out, file=out_f, format="fasta", width=50)#outの中身を指定したファイル名で保存
####################################################################################################################
library(Biostrings)
in_f <- "ABCB1AOKES016F1-PREMIX.fasta"
out_f <- "out.fasta"
fasta <- readDNAStringSet(in_f, format="fasta")
writeXStringSet(fasta, file=out_f, format="fasta")
#hoge <- translate(fasta)
#names(hoge) <- names(fasta)
#writeXStringSet(hoge, file=out_f, format="fasta")
writeXStringSet(out, file=out_f, format="fasta", width=50)#outの中身を指定したファイル名で保存
####################################################################################################################
#msa
####################################################################################################################
library(msa)
in_f <- "sequence.txt"
mySequenceFile <- system.file("examples", in_f, package="msa")
mySequences <- readAAStringSet(in_f)
mySequences
myFirstAlignment <- msa(mySequences)
myFirstAlignment
print(myFirstAlignment, show="complete")
msaPrettyPrint(myFirstAlignment, output="pdf", showNames="none", showLogo="none", askForOverwrite=FALSE, verbose=FALSE)
msaPrettyPrint(myFirstAlignment, y=c(164, 213), output="asis", showNames="none", showLogo="none", askForOverwrite=FALSE)
myClustalWAlignment <- msa(mySequences, "ClustalW")
myClustalWAlignment
myClustalOmegaAlignment <- msa(mySequences, "ClustalOmega")
myClustalOmegaAlignment
myMuscleAlignment <- msa(mySequences, "Muscle")
myMuscleAlignment
print(myFirstAlignment)
print(myFirstAlignment, show="complete")
print(myFirstAlignment, showConsensus=FALSE, halfNrow=3)
print(myFirstAlignment, showNames=FALSE, show="complete")
myMaskedAlignment <- myFirstAlignment
colM <- IRanges(start=1, end=100)
colmask(myMaskedAlignment) <- colM
myMaskedAlignment
unmasked(myMaskedAlignment)
conMat <- consensusMatrix(myFirstAlignment)
dim(conMat)
conMat <- consensusMatrix(myMaskedAlignment)
conMat[, 95:104]
printSplitString <- function(x, width=getOption("width") - 1){
starts <- seq(from=1, to=nchar(x), by=width)
for (i in 1:length(starts))
cat(substr(x, starts[i], starts[i] + width - 1), "\n")
}
printSplitString(msaConsensusSequence(myFirstAlignment))
printSplitString(msaConsensusSequence(myFirstAlignment, type="upperlower", thresh=c(40, 20)))
printSplitString(msaConsensusSequence(myMaskedAlignment, type="upperlower", thresh=c(40, 20)))
data(BLOSUM62)
msaConservationScore(myFirstAlignment, BLOSUM62)
msaConservationScore(myFirstAlignment, BLOSUM62, gapVsGap=0, type="upperlower", thresh=c(40, 20))
msaConservationScore(myMaskedAlignment, BLOSUM62, gapVsGap=0, type="upperlower", thresh=c(40, 20))
hemoSeq <- readAAStringSet(system.file("examples/HemoglobinAA.fasta", package="msa"))
hemoAln <- msa(hemoSeq)
hemoAln
hemoAln2 <- msaConvert(hemoAln, type="seqinr::alignment")
install.packages("seqinr")
library(seqinr)
d <- dist.alignment(hemoAln2, "identity")
as.matrix(d)[2:5, "HBA1_Homo_sapiens", drop=FALSE]
install.packages("ape")
library(ape)
hemoTree <- nj(d)
plot(hemoTree, main="Phylogenetic Tree of Hemoglobin Alpha Sequences")
hemoAln3 <- msaConvert(hemoAln, type="bios2mds::align")
str(hemoAln3)
hemoAln4 <- as(hemoAln, "BStringSet")
hemoAln4
msaPrettyPrint(myFirstAlignment, output="asis", y=c(164, 213), subset=c(1:6), showNames="none", showLogo="none",
consensusColor="ColdHot", showLegend=FALSE, askForOverwrite=FALSE)
msaPrettyPrint(myFirstAlignment, output="asis", y=c(164, 213), subset=c(1:6), showNames="none", showLogo="top",
logoColors="rasmol", shadingMode="similar", showLegend=FALSE, askForOverwrite=FALSE)
msaPrettyPrint(myFirstAlignment, output="asis", y=c(164, 213), showNames="none", shadingMode="similar",
shadingColors="blues", showLogo="none", showLegend=FALSE, askForOverwrite=FALSE)
msaPrettyPrint(myFirstAlignment, output="asis", y=c(164, 213), showNames="none", shadingMode="functional",
shadingModeArg="structure", askForOverwrite=FALSE)
msaPrettyPrint(myFirstAlignment, output="asis", y=c(164, 213),
subset=c(1:6), showNames="none", showLogo="none",
consensusColor="ColdHot", showLegend=FALSE,
shadingMode="similar", askForOverwrite=FALSE,
furtherCode=c("\\defconsensus{.}{lower}{upper}","\\showruler{1}{top}"))
## how much fits on one page depends on the length of names and the number of sequences;
## change to what suits your needs
chunkSize <- 300
for (start in seq(1, ncol(aln), by=chunkSize)){
end <- min(start + chunkSize - 1, ncol(aln))
alnPart <- DNAMultipleAlignment(subseq(unmasked(aln), start, end))
msaPrettyPrint(x=alnPart, output="pdf", subset=NULL, file=paste0("aln_", start, "-", end, ".pdf"))
}
toBibtex(citation("msa"))
|
2d535eeb418598735594aee447af0ef4b98ddbc5
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.networking/man/apigatewayv2_delete_route_request_parameter.Rd
|
37f269de5145042100a7ecc2b4f7ad5922edc1f0
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 739
|
rd
|
apigatewayv2_delete_route_request_parameter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigatewayv2_operations.R
\name{apigatewayv2_delete_route_request_parameter}
\alias{apigatewayv2_delete_route_request_parameter}
\title{Deletes a route request parameter}
\usage{
apigatewayv2_delete_route_request_parameter(
ApiId,
RequestParameterKey,
RouteId
)
}
\arguments{
\item{ApiId}{[required] The API identifier.}
\item{RequestParameterKey}{[required] The route request parameter key.}
\item{RouteId}{[required] The route ID.}
}
\description{
Deletes a route request parameter. Supported only for WebSocket APIs.
See \url{https://www.paws-r-sdk.com/docs/apigatewayv2_delete_route_request_parameter/} for full documentation.
}
\keyword{internal}
|
1c7359a30d9c7b4b2ac0e15843c2263972d33092
|
f6b0bdf61d4eb3793f8c14c720ab4fd26aabbf70
|
/New_2020/prep_data.R
|
ebd027f770d2e8c0dda47b1bdf585ffecfef6b02
|
[] |
no_license
|
RafaelSdeSouza/Medical_stat
|
6f077d8488ce6a8419ba982aa55da02a2b47c294
|
459d7fbad0886772cd09efedaba6f16811a27d6f
|
refs/heads/master
| 2021-06-24T08:47:46.511315
| 2020-11-13T13:55:16
| 2020-11-13T13:55:16
| 157,614,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,689
|
r
|
prep_data.R
|
# Prep data
# Pregnancy data
require(dplyr)
require(magrittr)
require(mgcv)
require(caret)
library(pROC)
require(reshape)
require(randomForest)
library(vip) # ML global interpretation
library(pdp) # ML global interpretation
library(ggplot2) # visualization pkg leveraged by above packages
require(xgboost)
require(DALEX)
source("my_style.R")
require(ggthemes)
require(kernlab)
require(forcats)
require(VGAM)
# Auxiliar function to randomly select a given column
outcomes <- read.csv("BTA-Pregnancies-anonymized.csv") %>% dplyr::select(c("OutcomeGpNumeric",
"LIGATION_GROUP",
"AGE",
"TUBELENGTH_L_DISTAL", "TUBELENGTH_R_DISTAL",
"LEFT_TUBE_LENGTH", "RIGHT_TUBE_LENGTH",
"TUBELENGTH_L_PROX", "TUBELENGTH_R_PROX",
"L_DIAMETER_NUMERIC","R_DIAMETER_NUMERIC",
"L_DIAMETER_NUMERIC", "R_DIAMETER_NUMERIC",
"L_FIBROSIS_NUMERIC", "R_FIBROSIS_NUMERIC",
"ANASTOMOSIS2_NUMERIC","ANASTOMOSIS1_NUMERIC"
)) %>%
filter(AGE != "Yes") %>% mutate(AGE = as.numeric(as.character(AGE)) ) %>%
filter(AGE > 10) %>%
na.omit() %>% mutate(LEFT_TUBE_LENGTH = as.numeric(as.character(LEFT_TUBE_LENGTH)) ) %>%
mutate(TUBELENGTH_L_DISTAL = as.numeric(as.character(TUBELENGTH_L_DISTAL)) ) %>%
droplevels()
# Sort left or right for each woman via bernoulli process
set.seed(42)
rlist <- rbinom(nrow(outcomes),1,0.5) + 1
temp1 <- outcomes[,c("LEFT_TUBE_LENGTH","RIGHT_TUBE_LENGTH")]
temp2 <- outcomes[,c( "TUBELENGTH_L_DISTAL", "TUBELENGTH_R_DISTAL")]
temp3 <- outcomes[,c("TUBELENGTH_L_PROX", "TUBELENGTH_R_PROX")]
temp4 <- outcomes[,c("ANASTOMOSIS2_NUMERIC","ANASTOMOSIS1_NUMERIC")]
temp5 <- outcomes[,c("L_FIBROSIS_NUMERIC","R_FIBROSIS_NUMERIC")]
temp6 <- outcomes[,c("L_DIAMETER_NUMERIC", "R_DIAMETER_NUMERIC")]
TL_rand <- c()
TLD_rand <- c()
TLP_rand <- c()
ANAS_rand <- c()
Fibr_rand <- c()
Diam_rand <- c()
for (i in 1:nrow(outcomes)) {
TL_rand <- append(TL_rand,temp1[i,rlist[i]])
TLD_rand <- append(TLD_rand,temp2[i,rlist[i]])
TLP_rand <- append(TLP_rand,temp3[i,rlist[i]])
ANAS_rand <- append(ANAS_rand,temp4[i,rlist[i]])
Fibr_rand <- append(Fibr_rand,temp5[i,rlist[i]])
Diam_rand <- append(Diam_rand,temp6[i,rlist[i]])
}
# Create new dataset with choosen features
outcomes2 <- outcomes %>%
mutate(TL_rand = TL_rand) %>%
mutate(TLD_rand = TLD_rand) %>%
mutate(TLP_rand = TLP_rand) %>%
mutate(ANAS_rand = ANAS_rand) %>%
mutate(Fibr_rand = Fibr_rand) %>%
mutate(OutcomeGpNumeric = as.factor(OutcomeGpNumeric)) %>%
mutate(Diam_rand = Diam_rand) %>%
dplyr::select(c("OutcomeGpNumeric",
"LIGATION_GROUP",
"AGE", "TL_rand","ANAS_rand","Fibr_rand",
"Diam_rand")) %>%
mutate(Fibr_rand = recode(Fibr_rand, "0" = "None",
"1" = "Mild",
"2" = "Moderate",
"3" = "Severe")) %>%
mutate(Fibr_rand = as.factor(Fibr_rand)) %>%
mutate(Fibr_rand = factor(Fibr_rand,levels=c("None","Mild","Moderate","Severe"))) %>%
mutate(ANAS_rand = recode(ANAS_rand, "0" = "Identical",
"1" = "1-SPD",
"2" = "2-SPD",
"3" = "3-SPD")) %>%
mutate(ANAS_rand = factor(ANAS_rand,levels=c("Identical","1-SPD","2-SPD","3-SPD"))) %>%
mutate(Diam_rand = recode(Diam_rand, "1" = "Similar","2" = "Somewhat dissimilar","3" = "Dissimilar")) %>%
mutate(Diam_rand = factor(Diam_rand,levels=c("Similar","Somewhat dissimilar","Dissimilar")))
colnames(outcomes2) <- c("OutcomeGpNumeric","Sterilization_Method", "Age", "Length","Location","Fibrosis",
"Diameter")
#colnames(outcomes2) <- c("OutcomeGpNumeric", "Age", "Length","Location","Fibrosis",
# "Diameter")
outcomes3 <- outcomes2 %>%
mutate(OutcomeGpNumeric = recode(OutcomeGpNumeric,
"1"="Birth","2"="Ongoing","3" = "Miscarriage",
"4" = "Ectopic"))
write.csv(outcomes3,"Outcomes.csv",row.names = F)
|
bfc74dac02f23dd8627cdc42ff7496d04367ae76
|
c12b98c7d6f8cbf4953cc3ed01bb74404ceb9515
|
/R/hotspot2SplitR.R
|
696d25f1214dd2abc9c1df32902f0073eb0f28ab
|
[
"MIT"
] |
permissive
|
pavlovc2/goessmoke
|
73ced5fd78f3a0ceaf72748fbe563dd5aff50f62
|
d4c0940ef6a4d7960ef37ce80ddbc5c0c87415da
|
refs/heads/master
| 2020-03-19T00:08:22.570268
| 2018-08-01T06:45:16
| 2018-08-01T06:45:16
| 135,455,902
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,974
|
r
|
hotspot2SplitR.R
|
#' Convert GOES-16 hotspot data to input format for dispersion modeling
#'
#' This function reformats csv data to the required formatting for
#' dispersion modeling, either data frame or json.
#'
#' @param in_df data.frame. Prepared GOES-16 hotspot data.
#' @param out_format string. "df" for data.frame or "json".
#' @param config_file string. Path to input config_file for HYSPLIT run
#' @return The desired output format to be input to the dispersion model
hotspot2disp <- function(in_df, out_format, config_file) {
config <- read.csv(config_file)
if(nrow(config) != 1) {
stop("Error: Incorrect number of rows in config_file.
Expect 1 header row and one config row.")
}
if (out_format == "df") {
# TODO: Calculate SEV plume rise
plume_height <-
# TODO Calculate range based on max distance from fire
# Create data frame
out_dat <- data.frame(
rate =
duration =
fire_start_day =
fire_start_hour =
pdiam = 1,
density = 1,
shape_factor = 1,
range = c(),
division = c(),
lat = in_df$lat
lon = in_df$lon
height = plume_height
duration = config$disphr
start_day = as.Date(disp_start) # TODO: Finalize timezone handling
start_hour = format(disp_start, "%H")
direction = "forward",
met_type = "hrrr")
# Return data.frame
return(out_dat)
} else if (out_format == "json") {
# TODO: Convert templatejson to internal data as in:
# http://r-pkgs.had.co.nz/data.html
# Check all required variables are defined
required_vars <- c("grdlatne","grdlonne", "grdlatsw",
"grdlonsw", "grdspace", "disphr", "disp_out_dir", "disp_start",
"export_out_dir", "met_dat_index", "met_dir", "viz_data_dir",
"viz_img_dir", )
if (!all(required_vars %in% names(config))) {
missing_vars <- required_vars[required_vars %in% names(config)]
stop(paste0("Error: Required variables not included", paste(missing_vars, collapse = ", ")))
}
# Assign config variables
for (col in names(config)) {
assign(col, config[[col]])
}
# Fire inputs
firecnt <- nrow(in_df)
# TODO: Parse in_df to json fire info
fireinfoall <- jsonlite::toJSON()
# Run ID
trunid <- UUIDgenerate()
# Today's date
today_dat <- as.Date(Sys.time())
# Create fire info
fire_info_all <- NULL
for (irow in 1:nrow(in_df)) {
event_id <- paste0("id", sample(1e12, 1))
start_time <- in_df$start_time
end_time <- in_df$end_time
fire_area <- in_df$area
fire_ecoregion <- "western"
fire_lat <- in_df$lat
fire_lon <- in_df$lon
pm25_tot <- in_df$pm25
pm_tot <- in_df$pm25
pm25_tot_flam <- in_df$pm25/3
pm25_tot_smold <- in_df$pm25/3
pm25_tot_resid <- in_df$pm25/3
heat_tot <- in_df$heat/3
heat_tot_flam <- in_df$heat/3
heat_tot_smold <- in_df$heat/3
heat_tot_resid <- in_df$heat/3
# Insert data into template
# Append irow to fire_info_all
}
# Find and replace variable names in document
# Insert full fire info
return(out_dat)
}
}
|
9dbb0742964b6d04a41d2630b653d1e32492d508
|
8cf07299550c0486dd03b27bedefeb169798d61d
|
/man/asset.Rd
|
179a7bae014b4b2ea4733c813da6c54cd44cc3ec
|
[] |
no_license
|
common2016/CapitalStock
|
1283ad29de754dd485694166155f5cc2c93584da
|
27b228156776da4192aaf513da047bdbb68758ec
|
refs/heads/master
| 2023-03-04T09:53:29.639363
| 2021-02-14T11:00:32
| 2021-02-14T11:00:32
| 265,717,352
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 638
|
rd
|
asset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/asset.R
\docType{data}
\name{asset}
\alias{asset}
\title{Assets}
\format{
An object of class \code{data.frame} with 2002 rows and 6 columns.
}
\usage{
asset
}
\description{
A dataset containing investment, the indices of investment and
the price indices of investment
}
\details{
@format A data frame:
\describe{
\item{prv}{provinces}
\item{yr}{year}
\item{invest}{total fixed capital formation}
\item{InvestIndex}{index of fixed capital formation}
\item{InvestPrice}{price index of investment in fixed assets}
\item{depr}{depreciation}
}
}
\keyword{datasets}
|
440a5d5b28848506d9e85819af3a6a89dd8f1d80
|
0100f53e831a4ca2de4bcdfaa5d0cd9473c7f81f
|
/process/src/get_GLPF_blank_GRnums.R
|
0557f52c2f1175cffdf60a7fa9cc00554994daad
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ldecicco-USGS/GLPF_manuscript
|
01d064c299fcbcc2b43f666acdf0291e908b1507
|
73ed0b1d0fe47d739d0b42606d6bd1a018b02f1b
|
refs/heads/master
| 2021-09-23T03:53:21.118859
| 2021-09-09T17:18:44
| 2021-09-09T17:18:44
| 157,996,434
| 0
| 1
| null | 2021-09-09T17:18:45
| 2018-11-17T15:14:28
|
R
|
UTF-8
|
R
| false
| false
| 993
|
r
|
get_GLPF_blank_GRnums.R
|
# GLPF: identify blank GR numbers. Much of the work was done when separating
# QA data in the original GLPF munging on the repo that Laura developed for GLPF modeling.
#
library(dplyr)
get_GLPF_blank_GRnums <- function(){
#Use GLPF summary file for defining blanks
df <- readRDS(file.path("raw","GLPF","summary_QA.rds"))
df$FieldExpID <- gsub(" ","",df$FieldID)
#Define GRnumbers for blank samples in original summary file from CA
blankRows1 <- grep("blank",df$Comments, ignore.case = TRUE)
blankRows2 <- grep("2",df$SampleType9regular2blank7replicate, ignore.case = TRUE)
blanks1 <- df[blankRows1,]
blanks2 <- df[blankRows2,] #RM015 is not a blank
sum(!(blankRows2 %in% blankRows1)) #
sum(!(blankRows1 %in% blankRows2)) #
blankRows <- unique(c(blankRows1,blankRows2))
df_blanks <- df[blankRows,]
blankGRnums <- df_blanks[,"CAGRnumber"]
saveRDS(data.frame(GRnumbers = blankGRnums),file=file.path("process","out","GLPF_blank_GRnumbers.rds"))
}
|
74f18882f0efc8812b1cf4ac85198368d4e15a0a
|
90bd757ecbf7ce9d97403345e233412c0f79a7bc
|
/README.rd
|
36562732750d65bbb5f27a58ae9993133cff064a
|
[] |
no_license
|
gtr0y/elastic-unit
|
38833bf303196737ccdba3d3f9bb5c8c5b1bedc9
|
789a7b60b6c429ef9a630600f6573bbe23b93cbf
|
refs/heads/main
| 2023-01-03T14:12:22.638291
| 2020-11-02T13:02:12
| 2020-11-02T13:02:12
| 305,785,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 115
|
rd
|
README.rd
|
This should install Elastic over 7 nodes (3*master, 3*data, 2*control)
In version 1 certificate creation is manual
|
70723427155ad6816f491837fd46ac571c7b9aeb
|
7454b821ef28f09c165ab76960d9fbd41220eee2
|
/Rscript/7- T2 Centralized_3.R
|
a1abc4800e7b9f11bd3b395dc04cfbe2afd2d8c4
|
[] |
no_license
|
galuoc87/Function
|
91852e369eb0a9b37f2a445dc2255a2c6db4323b
|
69e93afff462b975286b86788928fed9ead5da32
|
refs/heads/master
| 2021-06-30T10:53:45.569864
| 2017-09-19T05:41:35
| 2017-09-19T05:41:35
| 100,446,833
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,379
|
r
|
7- T2 Centralized_3.R
|
# Try to install package tidyverse
install.packages("tidyverse")
# Call functions in "tivyverse" every day
library(tidyverse)
#Add library: stringr
library(stringr)
#Add lubridate
library(lubridate)
# Add geosphere to calculate the Geographic distance:
library(sp)
library(geosphere)
#add the padr
install.packages("padr")
library(padr)
# Processed with the list of customer (narrowed for the centralized Delivery)
#great the working path
setwd("D:/Goolge Drive/FOBRA/02. Project Documents/04. Data Analysis/Analysis BAO/Data/Master Data/") #company
setwd("D:/Google Drive/FOBRA/02. Project Documents/04. Data Analysis/Analysis BAO/Data/Master Data/") #home
# Add the list of shorted customer.
df.cus.t2.centralized <- read_csv("Customer_Master_v02.csv")
df.cus.t2.centralized <- df.cus.t2.centralized %>%
mutate(Check.avai = rep(1,times=n()))
# Keep only 2 columns Des.Code and Check.avai
df.cus.t2.centralized <- df.cus.t2.centralized %>%
select(Dest.Code,Group,Check.avai)
# Add to the file
df.success.transportation <- df.transportation %>%
group_by(Shipment.Document)%>%
arrange(Shipment.Document) %>%
mutate(Totalfrieghtcost = sum(actual.Net.freight.cost),
Occurance = n(),
Rank = row_number()
) %>%
filter(!(is.na(actual.pallet) | Totalfrieghtcost <= 0))
# add the group of customer to Df.success.transportation
df.t2.cen.shipment <- df.success.transportation %>%
left_join(df.cus.t2.centralized, by = "Dest.Code") %>%
filter(!is.na(Check.avai)) %>%
filter(!(actual.pallet==0))
#add the distance
df.t2.cen.shipment <- df.t2.cen.shipment %>%
rowwise() %>%
mutate(Distance.GPS = distm (c(df.GPS.site$lng[match(Fiscal.Unit,df.GPS.site$`Planta Origem`)],
df.GPS.site$lat[match(Fiscal.Unit,df.GPS.site$`Planta Origem`)]),
c(df.GPS.customer$lng[match(Dest.Code,df.GPS.customer$`SAP Recebedor Mercadoria`)],
df.GPS.customer$lat[match(Dest.Code,df.GPS.customer$`SAP Recebedor Mercadoria`)]
), fun = distHaversine)/1000
)
# Consolidated multidrop.
df.t2.cen.shipment <- df.t2.cen.shipment %>%
arrange(Shipment.Document,Distance.GPS) %>%
group_by(Shipment.Document) %>%
mutate (Totalvolume = sum(actual.Gross.weight.Kg),
Totalvol.pallet = sum(actual.pallet),
Totalvol.trancost= sum(actual.Net.freight.cost),
Occurance = n(),
Rank =row_number()) %>%
mutate (consol.weight = ifelse(Rank ==Occurance ,Totalvolume,0 ),
consol.pallet = ifelse(Rank ==Occurance ,Totalvol.pallet,0 ),
consol.trancost= ifelse(Rank ==Occurance ,Totalvol.trancost,0 ),
Cost.of.first.leg = ifelse( Fiscal.Unit == 7032 | Fiscal.Unit == 7264,0,
df.cost.kg.1st.leg$`Cost per Kg`[match(Dest.State,df.cost.kg.1st.leg$state)]*actual.Gross.weight.Kg)
) %>%
mutate (filling.rate = ifelse(Rank ==Occurance,( Totalvolume / df.trucksize$`Capacity.(KG)`[match(Vehicle.SAP.code,df.trucksize$SAP.code)]*100),0 )) %>%
ungroup()
# calculate total transporatation cost
sum(df.t2.cen.shipment$actual.Net.freight.cost) #9598381
# Calculate the theoritica transportation cost
df.t2.cen.shipment <- df.t2.cen.shipment %>%
mutate(consol.fiscal = ifelse(Rank ==Occurance ,Fiscal.Unit,0 ),
consol.city = ifelse(Rank ==Occurance ,str_to_upper(Dest.City..SAP.),"0" ),
consol.truck.type = ifelse(Rank ==Occurance ,Vehicle.SAP.code,"0" )
) %>%
mutate(Lookup.current = paste(consol.fiscal,consol.city,consol.truck.type,sep="-"),
Current.convert.transcost =ifelse( consol.pallet ==0,0,
df.cost.transportation$TARIFA.FRETE[match(Lookup.current,df.cost.transportation$Look.Up)]
)
)
#check error in transport cost
Check.erro.Rodo = df.t2.cen.shipment %>%
filter(is.na(Current.convert.transcost))
write.csv(df.t2.cen.shipment, file = "C:/Users/Bao/Desktop/T2basedline.csv", row.names = FALSE)
write.csv(Check.erro.Rodo, file = "C:/Users/Bao/Desktop/errorRodo.csv", row.names = FALSE)
sum(df.t2.cen.shipment$Current.convert.transcost) #15,062,304
# Try to filter T1 and T2 only:
df.t2.cen.shipment <- df.t2.cen.shipment %>%
filter(!(T1.T2.x == "BRO"))
sum(df.t2.cen.shipment$actual.Net.freight.cost)
sum(df.t2.cen.shipment$Current.convert.transcost)
write.csv(df.t2.cen.shipment_2, file = "C:/Users/Bao/Desktop/T2 consolidation.csv", row.names = FALSE)
# Filter the toval total pallet below 0.8 pallet
df.t2.cen.shipment_2 <- df.t2.cen.shipment
#filter((Totalvol.pallet > 0.8)) # will adjust here
sum(df.t2.cen.shipment_2$actual.Net.freight.cost) # 8044561
sum(df.t2.cen.shipment_2$Current.convert.transcost) # 10334165
# Do the creation of the DC
df.t2.cen.shipment_3 <- df.t2.cen.shipment_2 %>%
group_by(Dest.State,Dest.City..SAP.,Group,Dest.Code) %>%
summarise(Vol.weight = sum(actual.Gross.weight.Kg),
Vol.Pallet = sum(actual.pallet)) %>%
group_by(Dest.State,Group)%>%
mutate( Max.volume = max(Vol.weight),
Availabe = ifelse(Vol.weight == Max.volume,1,0 )
)%>%
ungroup()
df.t2.cen.shipment_3 <- df.t2.cen.shipment_3 %>%
filter((Availabe ==1)) %>%
select(Dest.State,Dest.City..SAP.,Group,Dest.Code)
# Create the group of date.
days_group <- df.t2.cen.shipment_2 %>%
select(transport.Date) %>%
distinct(transport.Date) %>%
pad()
days_group <-
days_group %>%
mutate(day.nb = 1:nrow(days_group)) %>%
mutate(day.group = floor((7-1+day.nb)/7)) %>% #adjust
select(transport.Date, day.group)
# Add back date-group to replace the
df.t2.cen.shipment_2 <- df.t2.cen.shipment_2 %>%
left_join(days_group, by ="transport.Date")
#create the number of shipment.
df.t2.cen.shipment_2 <- df.t2.cen.shipment_2 %>%
mutate(drop = rep(1,times=n()) )
# Group the demand by day_group and the state
df.t2.cen.shipment_4 <- df.t2.cen.shipment_2 %>%
group_by(day.group,Dest.State,Group) %>%
summarise(Total.shipment = sum(drop),
vol.Kg = sum(actual.Gross.weight.Kg),
vol.pallet = sum(actual.pallet),
vol.trancost= sum(actual.Net.freight.cost),
vol.trancost1= sum(Current.convert.transcost),
Cost.1st.leg = sum(Cost.of.first.leg),
Tot.Baseline = Cost.1st.leg + vol.trancost
) %>%
ungroup()
# add the origin to the new set
df.t2.cen.shipment_4 <- df.t2.cen.shipment_4 %>%
mutate(Fiscal = rep(7032,times=n()) )
# Add the destination of Warehouse.
df.t2.cen.shipment_4 <- df.t2.cen.shipment_4 %>%
left_join(df.t2.cen.shipment_3, by = c("Dest.State","Group"))
# Add distance GPS
df.t2.cen.shipment_4 <- df.t2.cen.shipment_4 %>%
rowwise() %>%
mutate(Distance.GPS = distm (c(df.GPS.site$lng[match(Fiscal,df.GPS.site$`Planta Origem`)],
df.GPS.site$lat[match(Fiscal,df.GPS.site$`Planta Origem`)]),
c(df.GPS.customer$lng[match(Dest.Code,df.GPS.customer$`SAP Recebedor Mercadoria`)],
df.GPS.customer$lat[match(Dest.Code,df.GPS.customer$`SAP Recebedor Mercadoria`)]
), fun = distHaversine)/1000
)
# Calculate the transportation cost.
# Check number of Rodotrem
df.t2.cen.shipment_4 <- df.t2.cen.shipment_4 %>%
mutate(Nu.Careta = floor(vol.pallet / 28),
Fillrate.Careta = ifelse(Nu.Careta >0,vol.Kg / (Nu.Careta*27000),0),
Remaining = vol.pallet - Nu.Careta *28,
RemainingKg = ifelse((vol.Kg - Nu.Careta*27000)<0,0, vol.Kg - Nu.Careta*27000),
Lookup.careta = paste(Fiscal,str_to_upper(Dest.City..SAP.),"BR024",sep="-"),
Cost.careta = (df.cost.transportation$TARIFA.FRETE[match( Lookup.careta ,df.cost.transportation$Look.Up)])*Nu.Careta
) %>%
# Calculate the transportation for consolidation at the state
mutate(S.Truck.size = (Remaining>0)*(Remaining<=1)*1
+(Remaining>1)*(Remaining<=4)*4
+(Remaining>4)*(Remaining<=8)*8
+(Remaining>8)*(Remaining<=12)*12
+(Remaining>12)*(Remaining<=28)*28
#(Re.pal.state>0)*(Re.pal.state<=28)*28
+(Remaining>28)*48) # Assign the suitatble truck for remaining
df.t2.cen.shipment_4 <- df.t2.cen.shipment_4 %>%
mutate(S.Truck.code= ifelse(S.Truck.size == 0,"0",df.trucksize$SAP.Replace.Code[match(S.Truck.size,df.trucksize$Pallets)]),
S.Truck.sizeKg = ifelse(S.Truck.size == 0,0,df.trucksize$`Capacity.(KG)`[match(S.Truck.code,df.trucksize$SAP.Replace.Code)])
) %>%
mutate(Lookup.other = paste(Fiscal,str_to_upper(Dest.City..SAP.),S.Truck.code,sep="-"),
cost.other = df.cost.transportation$TARIFA.FRETE[match(Lookup.other,df.cost.transportation$Look.Up)],
Tot.NewModel = cost.other + Cost.careta,
CostSaving = Tot.NewModel - Tot.Baseline,
new.shipment = ifelse(S.Truck.size>0,1,0),
Filling.rate.Kg = ifelse(RemainingKg == 0,0,RemainingKg/S.Truck.sizeKg)
)
#check error in transport cost
Check.erro.Rodo = df.t2.cen.shipment_4 %>%
filter(is.na(Cost.careta))
write.csv(Check.erro.Rodo, file = "errorRodo1.csv", row.names = FALSE)
Check.erro.Rodo = df.t2.cen.shipment_4 %>%
filter(is.na(cost.other))
write.csv(Check.erro.Rodo, file = "errorRodo2.csv", row.names = FALSE)
# Add the cost for the first leg
setwd("C:/Users/Do Dinh Bao/Desktop/") #home
setwd("C:/Users/Bao/Desktop/") #company
df.cost.kg.1st.leg<- read_csv("Cost.kg.1st.leg.csv")
# Add the cost for the first leg
#df.t2.cen.shipment_4 <- df.t2.cen.shipment_4 %>%
#mutate(Cosf.1st.leg = df.cost.kg.1st.leg$`Cost per Kg`[match(Dest.State,df.cost.kg.1st.leg$state)]*vol.Kg
#)
# Create the summary table
sumtab_T2.centralized <- df.t2.cen.shipment_4 %>%
group_by(Group,Dest.State) %>%
summarise(Old.Shipment = sum(Total.shipment),
New.Shipment = sum(Nu.Careta) + sum(new.shipment),
Old.cost = sum(Tot.Baseline),
New.cost = sum(cost.other) + sum(Cost.careta),
total.costsaving = sum(CostSaving),
total.weight = sum(vol.Kg ),
fill.rate.kg = mean(Filling.rate.Kg[Filling.rate.Kg>0]),
fill.rate.kg.careta = mean(Fillrate.Careta[Fillrate.Careta>0]),
Distance = mean(Distance.GPS),
Avaliable = ifelse(total.costsaving >0,1,0)
)
write_csv (sumtab_T2.centralized, "t2.centralized.summary.7day.csv" )
sum(sumtab_T2.centralized$cost.diff)
write.csv(df.t2.cen.shipment_4, file = "T2.centralized.at.state.7 day.csv", row.names = FALSE)
write_csv (df.t2.cen.shipment, "t2.centralized.basedline.csv" )
# Create th look_kup table to filter at master data.
sumtab_T2.centralized_4 <- sumtab_T2.centralized %>%
select(Group,Dest.State,Avaliable)
# Create the Truck Profile.
sumtab_T2.centralized <- df.t2.cen.shipment_4 %>%
group_by(Group,Dest.State,S.Truck.code) %>%
summarise(
Shipment = n()
)
#create shipment profile
sumtab_T2.centralized_2 <- spread( sumtab_T2.centralized , key = S.Truck.code , value = Shipment )
sumtab_T2.centralized_2[is.na(sumtab_T2.centralized_2)] <-0
sumtab_T2.centralized_3 <- df.t2.cen.shipment_4 %>%
group_by(Group,Dest.State) %>%
summarise(
BR012.1 = sum(Nu.RoRo)
)
sumtab_T2.centralized_2 <- sumtab_T2.centralized_2 %>%
left_join(sumtab_T2.centralized_3 )
sumtab_T2.centralized_2[is.na(sumtab_T2.centralized_2)] <-0
sumtab_T2.centralized_2 <- sumtab_T2.centralized_2 %>%
mutate(BR012.2 =BR012+BR012.1)%>%
select(Group,Dest.State,BR300,BR063,BR065,BR020,BR024,BR012.2)
write.csv(sumtab_T2.centralized_2, file = "C:/Users/Bao/Desktop/Truck-profile.csv", row.names = FALSE)
# [Step]Filter and keep only the possitive value
df.t2.cen.shipment_4 <- df.t2.cen.shipment_4 %>%
group_by(Group,Dest.State) %>%
mutate( Old.cost = sum(vol.trancost1),
New.cost = sum(cost.other) + sum(Cost.rodo),
cost.diff = New.cost - Old.cost
)
df.t2.cen.shipment_5 <- df.t2.cen.shipment_4 %>%
filter(cost.diff>0)
#[Step]add the state which can consolidatied
df.t2.cen.shipment_5<- df.t2.cen.shipment_5 %>%
left_join(df.destination, by = "Dest.State")
# Create the consolidation case.
df.t2.cen.shipment_5<- df.t2.cen.shipment_5 %>%
arrange(day.group,Group,Replace.Dest.State,Distance.GPS) %>%
group_by(day.group,Group,Replace.Dest.State) %>%
mutate(Rank.Date=row_number(Distance.GPS)) %>%
mutate(Max.Date=max(Rank.Date)) %>%
mutate(Cumpallet=cumsum(Remaining)) %>% # add cum pallet
ungroup()
# Consolidated volume
df.t2.cen.shipment_5<- df.t2.cen.shipment_5 %>%
group_by(day.group) %>%
mutate(Consol.state =ifelse(Max.Date==1 | Rank.Date==2, Cumpallet,
ifelse((Rank.Date %% 2 )==0,
Cumpallet - lag(Cumpallet, n=2L, default = 0),
ifelse(
Rank.Date==Max.Date,
Cumpallet - lag(Cumpallet, n=1L, default = 0),
0
)
)
)
) %>%
ungroup()
df.t2.cen.shipment_5<- df.t2.cen.shipment_5 %>%
mutate(Nu.RoRo.state = floor(Consol.state / 50),
Re.pal.state = Consol.state - Nu.RoRo.state *50
)
df.t2.cen.shipment_5<- df.t2.cen.shipment_5 %>%
mutate(Lookup.state = paste(Fiscal,str_to_upper(Dest.City..SAP.),"BR012",sep="-"),
Cost.state = (df.cost.transportation$TARIFA.FRETE[match(Lookup.state,df.cost.transportation$Look.Up)])*Nu.RoRo.state
) %>%
# [Step]Calculate other transportation for remaining qty at the state
mutate(S.Truck.size.state = ifelse( Re.pal.state ==0 ,0,
(Re.pal.state>0)*(Re.pal.state<=1.5)*1
+(Re.pal.state>1.5)*(Re.pal.state<=4.5)*4
+(Re.pal.state>4.5)*(Re.pal.state<=8.5)*8
+(Re.pal.state>8.5)*(Re.pal.state<=12.5)*12
+(Re.pal.state>12.5)*(Re.pal.state<=30)*28
#(Re.pal.state>0)*(Re.pal.state<=28)*28
+(Re.pal.state>30)*48 # Assign the suitatble truck for remaining
))
df.t2.cen.shipment_5<- df.t2.cen.shipment_5 %>%
mutate(S.Truck.code.state= ifelse(Re.pal.state ==0 ,0,
df.trucksize$SAP.Replace.Code[match(S.Truck.size.state,df.trucksize$Pallets)])) %>%
mutate(Lookup.state.other = paste(Fiscal,str_to_upper(Dest.City..SAP.),S.Truck.code.state,sep="-"),
Other.cost.state = ifelse(Re.pal.state ==0 ,0,
df.cost.transportation$TARIFA.FRETE[match(Lookup.state.other,df.cost.transportation$Look.Up)]
)
)
# Summary table for the consolidation
sumtab_T2.centralized_4 <- df.t2.cen.shipment_5 %>%
group_by(Group,Replace.Dest.State) %>%
summarise(Old.Shipment = sum(Total.shipment),
New.Shipment = sum(Nu.RoRo) + sum(new.shipment),
Old.cost = sum(vol.trancost1),
New.cost = sum(Cost.rodo)
+sum(Cost.state)
+sum(Other.cost.state),
cost.diff = New.cost - Old.cost,
total.weight = sum(vol.Kg ),
Distance = mean(Distance.GPS)
)
sum(sumtab_T2.centralized_4$cost.diff)
# Second appoarch for the negative saving, we increase 3 days leadtimes.
df.t2.cen.shipment_6 <- df.t2.cen.shipment_2 %>%
left_join(sumtab_T2.centralized)
df.t2.cen.shipment_6 <- df.t2.cen.shipment_6 %>%
filter(Avaliable == 1)
# add Date group
days_group <- df.t2.cen.shipment_6 %>%
select(transport.Date) %>%
distinct(transport.Date) %>%
pad()
days_group <-
days_group %>%
mutate(day.nb = 1:nrow(days_group)) %>%
mutate(day.group = floor((14-1+day.nb)/14)) %>% #adjust
select(transport.Date, day.group)
# Add Date group back to new data frame
df.t2.cen.shipment_6 <- df.t2.cen.shipment_6 %>%
left_join(days_group, by ="transport.Date")
# group the volume by day.group 14 days
df.t2.cen.shipment_6 <- df.t2.cen.shipment_6 %>%
group_by(day.group,Dest.State,Group) %>%
summarise(Total.shipment = sum(drop),
vol.Kg = sum(actual.Gross.weight.Kg),
vol.pallet = sum(actual.pallet),
vol.trancost= sum(actual.Net.freight.cost),
vol.trancost1= sum(Current.convert.transcost)
) %>%
ungroup()
# add the origin to the new set
df.t2.cen.shipment_6 <- df.t2.cen.shipment_6 %>%
mutate(Fiscal = rep(7032,times=n()) )
# Add the destination of Warehouse.
df.t2.cen.shipment_6 <- df.t2.cen.shipment_6 %>%
left_join(df.t2.cen.shipment_3, by = c("Dest.State","Group"))
# Add distance GPS
df.t2.cen.shipment_6 <- df.t2.cen.shipment_6 %>%
rowwise() %>%
mutate(Distance.GPS = distm (c(df.GPS.site$lng[match(Fiscal,df.GPS.site$`Planta Origem`)],
df.GPS.site$lat[match(Fiscal,df.GPS.site$`Planta Origem`)]),
c(df.GPS.customer$lng[match(Dest.Code,df.GPS.customer$`SAP Recebedor Mercadoria`)],
df.GPS.customer$lat[match(Dest.Code,df.GPS.customer$`SAP Recebedor Mercadoria`)]
), fun = distHaversine)/1000
)
# Calculate the transportation cost.
# Check number of Rodotrem
df.t2.cen.shipment_6 <- df.t2.cen.shipment_6 %>%
mutate(Nu.RoRo = floor(vol.pallet / 48),
Remaining = vol.pallet - Nu.RoRo *48,
Lookup.rodo = paste(Fiscal,str_to_upper(Dest.City..SAP.),"BR012",sep="-"),
Cost.rodo = (df.cost.transportation$TARIFA.FRETE[match( Lookup.rodo ,df.cost.transportation$Look.Up)])*Nu.RoRo
) %>%
# Calculate the transportation for consolidation at the state
mutate(S.Truck.size = (Remaining>0)*(Remaining<=1.5)*1
+(Remaining>1.5)*(Remaining<=4.5)*4
+(Remaining>4.5)*(Remaining<=8.5)*8
+(Remaining>8.5)*(Remaining<=12.5)*12
+(Remaining>12.5)*(Remaining<=30)*28
#(Re.pal.state>0)*(Re.pal.state<=28)*28
+(Remaining>30)*48) # Assign the suitatble truck for remaining
df.t2.cen.shipment_6 <- df.t2.cen.shipment_6 %>%
mutate(S.Truck.code= df.trucksize$SAP.Replace.Code[match(S.Truck.size,df.trucksize$Pallets)]) %>%
mutate(Lookup.other = paste(Fiscal,str_to_upper(Dest.City..SAP.),S.Truck.code,sep="-"),
cost.other = df.cost.transportation$TARIFA.FRETE[match(Lookup.other,df.cost.transportation$Look.Up)],
new.shipment = ifelse(S.Truck.size>0,1,0)
)
# Create the summary table
sumtab_T2.centralized_5 <- df.t2.cen.shipment_6 %>%
group_by(Group,Dest.State) %>%
summarise(Old.Shipment = sum(Total.shipment),
New.Shipment = sum(Nu.RoRo) + sum(new.shipment),
Old.cost = sum(vol.trancost1),
New.cost = sum(cost.other) + sum(Cost.rodo),
cost.diff = New.cost - Old.cost,
total.weight = sum(vol.Kg ),
Distance = mean(Distance.GPS),
Avaliable = ifelse(cost.diff >0,1,0)
)
write_csv (df.t2.cen.shipment_6, "C:/Users/Bao/Desktop/t2.centralized.detail. 14day.lt.csv" )
write_csv (sumtab_T2.centralized_5, "C:/Users/Bao/Desktop/t2.centralized.summary.for 14day.lt.csv" )
|
34ecf7893ae25b4753d92572c58d0d9fb8320481
|
e54d7766968f6535fed9c3cf1f709f6dcf132844
|
/R/animal_spp_direction_frequencies.R
|
6198995b07630a15c05486a608cd2ed8da199e6c
|
[] |
no_license
|
jscamac/Alpine_Elicitation_Project
|
911ffbd5cd32279266dc9c8cd824749cb6c86787
|
fefa65ac7f58819d24ce14e42d9f4be5c0633a33
|
refs/heads/master
| 2023-05-18T19:54:44.288830
| 2021-06-10T06:54:37
| 2021-06-10T06:54:37
| 111,359,439
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,985
|
r
|
animal_spp_direction_frequencies.R
|
#' Summarise expert data best estimates to determine response directions for animals
#'
#' Summarise expert data best estimates for plant data response directions for animals
#'
#' @param data Dataframe derived from \code{compile_animal_data()}.
#' @param Q_IDs Character vector. The pair of questions to summarise by. For example if one was interested in changes in population abundance you would
#' use c("3A", "3B"). Changes in minimum elevation would be c("1A", "1B"), and changes in upper elevation would be c("2A","2B")
#' @details This function takes best estimates of current and future cover and determines the number of responses indicating
#' no change, positive change or negative change
#' @importFrom dplyr tidyr
#' @export
animal_spp_direction_frequencies <- function(data, Q_IDs) {
data %>%
dplyr::filter(Q_ID %in% Q_IDs) %>%
select(Expert_ID, Species, Species_short, SPP_ID, Plot_ID, Water_centric, Taxon, Q_TYPE, Q50th) %>%
tidyr::spread(Q_TYPE,Q50th) %>%
na.omit %>% # Remove experts that did not provide both answers
dplyr::mutate(Diff = Future - Current,
`No change` = ifelse(Diff == 0, 1,0),
Decrease = ifelse(Diff < 0, 1,0),
Increase = ifelse(Diff > 0, 1,0)) %>%
dplyr::group_by(Species, Species_short, SPP_ID, Plot_ID, Taxon, Water_centric) %>%
dplyr::summarise(N = n(),
`No change` = sum(`No change`),
Decrease = sum(Decrease),
Increase = sum(Increase),
rank = sum(Decrease)/N) %>%
tidyr::gather(Direction, Responses, -Species,-Species_short, -SPP_ID, -Plot_ID, -Taxon, -Water_centric, -N, -rank) %>%
dplyr::mutate(Direction = factor(Direction,
levels = c("Increase",
"No change",
"Decrease")),
Responses_prop = Responses/N)
}
|
01f99b7b31f0cf0cf10da166c70f95f047c7f0cd
|
79b0d43549a50d0dfc950e420e23f921a7bde12a
|
/Phylogenetic Analyses/sCoRRE_trait_ordination.R
|
a0f6748b0e6c07bacd9b864c409a3ec62ae3f38d
|
[] |
no_license
|
mavolio/scorre
|
55de4ef0c9bfd946b9257a9e876147a5d24462dd
|
d67a78da2cc627eae9ab25ee4cd8f931a5102200
|
refs/heads/master
| 2023-07-26T22:51:21.000447
| 2023-07-12T15:26:27
| 2023-07-12T15:26:27
| 226,844,825
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,389
|
r
|
sCoRRE_trait_ordination.R
|
################################################################################
## sCoRRE_trait_ordination.R: Figuring out what traits to use.
##
## Author: Kimberly Komatsu
## Date created: March 10, 2021
################################################################################
library(ecodist)
library(FD)
library(PerformanceAnalytics)
library(tidyverse)
setwd('C:\\Users\\lapie\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared')
setwd("C:\\Users\\wilco\\Dropbox\\shared working groups\\sDiv_sCoRRE_shared\\")
setwd('C:\\Users\\mavolio2\\Dropbox\\sDiv_sCoRRE_shared')
#our traits to determine how much data we are missing
dat<-read.csv("Trait Data\\TRY Data\\Try Continuous data\\TRY_trait_data_continuous_long.csv")
length(unique(dat$species_matched))
numsp<-dat%>%
group_by(species_matched, CleanTraitName)%>%
summarize(ave=mean(StdValue))%>%
group_by(CleanTraitName)%>%
summarize(n=length(ave))%>%
mutate(pct=n/2400)
play<-dat%>%
group_by(species_matched, CleanTraitName)%>%
summarize(ave=mean(StdValue))%>%
spread(CleanTraitName, ave, fill=0)%>%
filter(seed_number<3e+08)
with(subset(play, SRL>00&root_density>0), plot(SRL, rooting_density))
# Read in relative abundance data
sp_name_key <- read.csv("CoRRE data\\CoRRE data\\trait data\\corre2trykey.csv")
rel_abun_df <- read.csv("CoRRE data\\CoRRE data\\community composition\\CoRRE_RelativeAbundanceMar2021.csv") %>%
left_join(dplyr::select(sp_name_key, genus_species, species_matched), by="genus_species") %>%
drop_na(species_matched)
abund_species_vector <- unique(rel_abun_df$species_matched)
#read in data
contTraits <- read.csv('Trait Data\\TRY Data\\Gap_Filled\\TRY_new.csv')%>%
rename(species_matched=Species)%>%
select(-X.1, -X, -Family, -Genus, -ObservationID)%>%
group_by(species_matched)%>%
summarise_all(funs(mean))%>%
ungroup()
contTraitsSubset <- contTraits%>%
rename(ssd=X4, rooting_depth=X6, SLA=X11, leaf_C_mass=X13, leaf_N_mass=X14, leaf_P_mass=X15, stem_diameter=X21, seed_mass=X26, seed_length=X27, leaf_thickness=X46, LDMC=X47, leaf_dry_mass=X55, germination_rate=X95, leaf_length=X144, leaf_width=X145, leaf_CN=X146, stem_conduit_density=X169, stem_conduit_diameter=X281, seed_number=X138, SRL=X1080)%>%
select(-X18, -X50, -X78, -X163, -X223, -X224, -X237, -X282, -X289, -X3112, -X3113, -X3114, -X3120)
traits <- read.csv('CoRRE data\\CoRRE data\\trait data\\sCoRRE categorical trait data - traits_complete_pre spot check_03102021.csv')%>%
full_join(contTraitsSubset) %>%
drop_na()%>%
filter(leaf_P_mass<20, stem_diameter<0.5, seed_mass<50, seed_number<10000, leaf_width<40, stem_conduit_density<1000, stem_conduit_diameter<200)
traitsOutliersRemoved <- traits %>%
filter(!leaf_type %in% c("microphyll","frond")) %>%
filter(!species_matched %in% c("Centrolepis aristata", "Centrolepis strigosa", "Acorus calamus"))
traitsScaled <- traitsOutliersRemoved %>% ## only scales continuous traits
mutate_at(vars(ssd:SRL), scale)
# Create Gower trait disimilarity matrix
traitMatrix <- distance(traitsScaled[,15:34], method='gower') #ignoring all categorical traits
# Run PCoA
traitPCO <- pco(traitMatrix)
# Create matrix of first two axes from PCoA
PCOOutMatrix <- traitPCO$vectors[,1:2]
PCOOutMatrix_3and4 <- traitPCO$vectors[,3:4]
# Create vector fitting object (for overlaying trait vectors on top of PCoA plot) -- should probably up the permutations to 1000
trait_vf <- vf(PCOOutMatrix, traitsScaled[,15:34], nperm=100)
trait_vf_3and4 <- vf(PCOOutMatrix_3and4, traitsScaled[,15:34], nperm=100)
### Plotting
# plot(PCOOutMatrix, col=1:length(traits$species_matched),
# pch=1:length(traits$species_matched), main="PCO", xlab="PCO 1", ylab="PCO 2")
plot(PCOOutMatrix, main="PCO", xlab="PCO 1", ylab="PCO 2", col="grey")
plot(trait_vf)
plot(PCOOutMatrix_3and4, main="PCO", xlab="PCO 3", ylab="PCO 4", col="grey")
plot(trait_vf_3and4)
# Plot with species names
PCO <- data.frame(traitPCO$vectors[,1:2], species_matched=traitsScaled$species_matched)
PCO_3and4 <- data.frame(traitPCO$vectors[,3:4], species_matched=traitsScaled$species_matched)
ggplot(data=PCO, aes(x=X1, y=X2, label=species_matched)) +
# geom_point() +
geom_text()
ggplot(data=PCO_3and4, aes(x=X1, y=X2, label=species_matched)) +
# geom_point() +
geom_text()
chart.Correlation(traitsScaled[,15:34], histogram=TRUE, pch=19)
|
af1b29c8c64ae88b3ea9c9c488e6d67f8b8f3553
|
9cceeabb7422bb316d8053f7b532a714263e4407
|
/B_clustering.R
|
ac9d8e0a123c19124054dcc5d2ca44ebc4a1da00
|
[] |
no_license
|
Novabreed/andreabertoli_masterdegree_codes
|
e6a73bb16bb83774bbf1f8279f913e24cb11881f
|
935620e2e919ac3501dd328c8b4c88d1ea94b4c0
|
refs/heads/master
| 2020-03-27T21:57:46.312402
| 2018-09-03T11:01:13
| 2018-09-03T11:01:13
| 147,191,215
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,860
|
r
|
B_clustering.R
|
args=(commandArgs(TRUE))
input_file<-as.character(unlist(strsplit(args[1],"="))[2])
output_file<-as.character(unlist(strsplit(args[2],"="))[2])
clust.size=2
library(gplots)
library(ape)
library(RColorBrewer)
expdata = read.table(input_file)
expdata = as.matrix(expdata)
cormx1 = 1-cor(expdata,method="spearman")
cormx.dist1 = as.dist(cormx1)
cluster1 = hclust(cormx.dist1, method = "complete")
noRep<-c("berzamino_rep1", "cabernet.franc_rep1", "carignano_rep1", "chaouch.blanc_rep1", "chasselas_rep1", "garganega_rep1" , "glera_rep1", "plechistik_rep1", "raboso.piave_rep1", "sahibi.safid_rep1", "traminer_rep1", "V278_rep1", "verduzzo_rep1", "vernaccia_rep1")
x1<-(cluster1$labels[cluster1$order] %in% noRep)
rep_col<- rep(1, length(colnames(expdata)))
rep_col[x1]<-2
rep_col<-as.integer(rep_col)
names(rep_col)<- cluster1$labels[cluster1$order]
labelColors=c("black", "red") #funzione che applica i colori alle labels (leafs) per i dendrogrammi semplici
colLab <- function(n) {
if (is.leaf(n)) {
a <- attributes(n)
labCol <- labelColors[rep_col[which(names(rep_col) == a$label)]]
attr(n, "nodePar") <- c(a$nodePar, lab.col = labCol)
}
n
}
h1<-(cluster1$labels %in% noRep)
rep_col_h1 = rep(1, length(colnames(expdata)))
rep_col_h1[h1] = 2
pdf(output_file)
#clustering tutti i geni, heatmap+dendrogram+as.phylo
heatmap.2(cormx1, main = "Spearman+Complete all genes", trace="none", dendrogram="row", Rowv=as.dendrogram(cluster1), Colv=as.dendrogram(cluster1), colRow=rep_col_h1, colCol=rep_col_h1, cexCol=0.3, cexRow=0.3, col=rev(brewer.pal(9, "RdYlGn")))
par(cex=0.25)
dendrogram = dendrapply(as.dendrogram(cluster1), colLab)
plot(dendrogram, horiz=TRUE)
par(cex=1)
plot(as.phylo(cluster1), type = "fan", cex=0.3, tip.color=rep_col_h1)
dev.off()
q()
|
425369fea3a91f6c5d7140d3106b458ef2849ab3
|
6e4f004782186082b73025cda95f31bcae76afcf
|
/R/gl.keep.loc.r
|
43334a9c64d6975b571d3baa7ec2fd7a5c1cbdc1
|
[] |
no_license
|
carlopacioni/dartR
|
319fbff40a385ca74ab7490b07857b0b027c93a8
|
06614b3a328329d00ae836b27616227152360473
|
refs/heads/master
| 2023-08-23T00:32:10.850006
| 2021-09-08T06:52:44
| 2021-09-08T06:52:44
| 262,468,788
| 0
| 0
| null | 2020-05-09T02:07:08
| 2020-05-09T02:07:07
| null |
UTF-8
|
R
| false
| false
| 4,810
|
r
|
gl.keep.loc.r
|
#' Remove all but the specified loci from a genelight \{adegenet\} object
#'
#' The script returns a genlight object with the all but the specified loci deleted.
#'
#' @param x -- name of the genlight object containing SNP genotypes or presence/absence data [required]
#' @param loc.list -- a list of loci to be kept [required, if loc.range not specified]
#' @param first -- first of a range of loci to be kept [required, if loc.list not specified]
#' @param last -- last of a range of loci to be kept [if not specified, last locus in the dataset]
#' @param verbose -- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2 or as specified using gl.set.verbosity]
#' @return A genlight object with the reduced data
#' @export
#' @author Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr})
#' @examples
#' # SNP data
#' gl2 <- gl.keep.loc(testset.gl, loc.list=c("100051468|42-A/T", "100049816-51-A/G"))
#' # Tag P/A data
#' gs2 <- gl.keep.loc(testset.gs, loc.list=c("20134188","19249144"))
gl.keep.loc <- function(x, loc.list=NULL, first=NULL, last=NULL, verbose=NULL){
# TRAP COMMAND, SET VERSION
funname <- match.call()[[1]]
build <- "Jacob"
hold <- x
# SET VERBOSITY
if (is.null(verbose)){
if(!is.null(x@other$verbose)){
verbose <- x@other$verbose
} else {
verbose <- 2
}
}
if (verbose < 0 | verbose > 5){
cat(paste(" Warning: Parameter 'verbose' must be an integer between 0 [silent] and 5 [full report], set to 2\n"))
verbose <- 2
}
# FLAG SCRIPT START
if (verbose >= 1){
if(verbose==5){
cat("Starting",funname,"[ Build =",build,"]\n")
} else {
cat("Starting",funname,"\n")
}
}
# STANDARD ERROR CHECKING
if(class(x)!="genlight") {
stop(" Fatal Error: genlight object required!\n")
}
if (all(x@ploidy == 1)){
cat(" Processing Presence/Absence (SilicoDArT) data\n")
} else if (all(x@ploidy == 2)){
cat(" Processing a SNP dataset\n")
} else {
stop ("Fatal Error: Ploidy must be universally 1 (fragment P/A data) or 2 (SNP data)")
}
# FUNCTION SPECIFIC ERROR CHECKING
if (!is.null(loc.list) && !is.null(first)){
flag <- 'both'
if (verbose >= 2){
cat(" Both a range of loci and a list of loci to keep has been specified\n")
}
} else if (!is.null(loc.list)){
flag <- 'list'
if (verbose >= 2){
cat(" List of loci to keep has been specified\n")
}
} else if (!is.null(first)){
flag <- 'range'
if (verbose >= 2){
cat(" Range of loci to keep has been specified\n")
}
} else {
cat(" Warning: Need to specify either a range of loci to keep, or specific loci to keep\n")
}
if (flag=='both' || flag=='list'){
for (case in loc.list){
if (!(case%in%locNames(x))){
cat(" Warning: Listed loci",case,"not present in the dataset -- ignored\n")
loc.list <- loc.list[!(loc.list==case)]
}
}
}
if (flag=='range'){
if (first <=0){
cat(" Warning: Lower limit to range of loci cannot be less than 1, set to 1\n)")
first <- 1
}
if (first > nLoc(x)){
cat(" Warning: Upper limit to range of loci cannot be greater than the number of loci, set to",nLoc(x),"\n)")
last <- nLoc(x)
}
if (first > last){
cat(" Warning: Upper limit is smaller than lower limit, reversed\n")
tmp <- first
first <- last
last <- tmp
}
}
# DO THE JOB
if (verbose >= 2) {
cat(" Deleting all but the specified loci\n")
}
# Remove duplicated loci if specified
if (!is.null(first) && !is.null(loc.list)){
list.from.range <- locNames(x)[first:last]
loc.list <- unique(c(loc.list,list.from.range))
} else if (!is.null(first)) {
loc.list <- locNames(x)[first:last]
}
if (length(loc.list) == 0) {
cat(" Warning: no loci listed to keep! Genlight object returned unchanged\n")
x2 <- x
} else {
# Remove loci flagged for deletion
x2 <- x[,x$loc.names%in%loc.list]
x2@other$loc.metrics <- x@other$loc.metrics[x$loc.names%in%loc.list,]
}
# REPORT A SUMMARY
if (verbose >= 3) {
cat(" Summary of recoded dataset\n")
cat(paste(" Original No. of loci:",nLoc(hold),"\n"))
cat(paste(" No. of loci deleted:",nLoc(hold)-nLoc(x2),"\n"))
cat(paste(" No. of loci retained:",nLoc(x2),"\n"))
cat(paste(" No. of individuals:", nInd(x2),"\n"))
cat(paste(" No. of populations: ", nPop(x2),"\n"))
}
# ADD TO HISTORY
nh <- length(x2@other$history)
x2@other$history[[nh + 1]] <- match.call()
# FLAG SCRIPT END
if (verbose >= 1) {
cat("Completed:",funname,"\n")
}
return(x2)
}
|
55c1efb4671e8e50c287c493ce33b0a680e0c646
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/COUNT/examples/lbw.Rd.R
|
43707f73ffc1a3e38f23d0ad8422a48bfeb67746
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 337
|
r
|
lbw.Rd.R
|
library(COUNT)
### Name: lbw
### Title: lbw
### Aliases: lbw
### Keywords: datasets
### ** Examples
data(lbw)
glmbwp <- glm(ftv ~ low + smoke + factor(race), family=poisson, data=lbw)
summary(glmbwp)
exp(coef(glmbwp))
library(MASS)
glmbwnb <- glm.nb(ftv ~ low + smoke + factor(race), data=lbw)
summary(glmbwnb)
exp(coef(glmbwnb))
|
8564f7d40e6036d0f465ee4793b6f9073250cea8
|
135840819e7d2769d2ca228b00cc9bac6e2d37bb
|
/R/DataDistribution.R
|
6b6ad653e388c3fab7c2271e1bd269a31ec40da3
|
[
"MIT"
] |
permissive
|
biostata/adoptr
|
26d956cf7f8ba7552f59c7366a8f9f960ddadd4c
|
7fefdf6753c7538a5428d23fb0cb5c61a3642207
|
refs/heads/master
| 2023-01-08T20:37:49.207484
| 2020-10-20T19:57:42
| 2020-10-20T19:57:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,614
|
r
|
DataDistribution.R
|
#' Data distributions
#'
#' \code{DataDistribution} is an abstract class used to represent the distribution
#' of a sufficient statistic \code{x} given a sample size \code{n} and a
#' single parameter value \code{theta}.
#'
#' This abstraction layer allows the representation of t-distributions
#' (unknown variance), normal distribution (known variance), and normal
#' approximation of a binary endpoint.
#' Currently, the two implemented versions are \code{\link{Normal-class}} and
#' \code{\link{Binomial-class}}.
#'
#' The logical option \code{two_armed} allows to decide whether a one-arm or
#' a two-arm (the default) design should be computed. In the case of a two-arm
#' design all sample sizes are per group.
#'
#' @slot two_armed Logical that indicates if a two-arm design is assumed.
#'
#' @examples
#' normaldist <- Normal(two_armed = FALSE)
#' binomialdist <- Binomial(rate_control = .25, two_armed = TRUE)
#'
#' @template DataDistributionTemplate
#'
#' @aliases DataDistribution
#' @exportClass DataDistribution
setClass("DataDistribution", representation(
two_armed = "logical")
)
#' Probability density function
#'
#' \code{probability_density_function} evaluates the probability density
#' function of a specific distribution \code{dist} at a point \code{x}.
#'
#' @template dist
#' @template DataDistributionTemplate
#'
#' @export
setGeneric("probability_density_function", function(dist, x, n, theta, ...) standardGeneric("probability_density_function"))
#' Cumulative distribution function
#'
#' \code{cumulative_distribution_function} evaluates the cumulative distribution
#' function of a specific distribution \code{dist} at a point \code{x}.
#'
#' @template dist
#' @template DataDistributionTemplate
#'
#' @export
setGeneric("cumulative_distribution_function", function(dist, x, n, theta, ...) standardGeneric("cumulative_distribution_function"))
setMethod("show", signature(object = "DataDistribution"), function(object) {
cat(print(object), "\n")
})
#' Normal data distribution
#'
#' Implements a normal data distribution for z-values given an observed z-value
#' and stage size.
#' Standard deviation is 1 and mean \ifelse{html}{\out{θ √n}}{\eqn{\theta\sqrt n}} where
#' \ifelse{html}{\out{θ}}{\eqn{\theta}} is the standardized effect size.
#' The option \code{two_armed} can be set to decide whether a one-arm or a
#' two-arm design should be computed.
#'
#' See \code{\link{DataDistribution-class}} for more details.
#'
#' @template DataDistributionTemplate
#'
#' @rdname NormalDataDistribution-class
#' @exportClass Normal
setClass("Normal", representation(
two_armed = "logical"
),
contains = "DataDistribution")
#' @param two_armed logical indicating if a two-armed trial is regarded
#'
#' @examples
#' datadist <- Normal(two_armed = TRUE)
#'
#' @seealso see \code{\link{probability_density_function}} and
#' \code{\link{cumulative_distribution_function}} to evaluate the pdf
#' and the cdf, respectively.
#'
#' @rdname NormalDataDistribution-class
#' @export
Normal <- function(two_armed = TRUE) new("Normal", two_armed = two_armed)
#' @examples
#' probability_density_function(Normal(), 1, 50, .3)
#'
#' @details If the distribution is \code{\link{Normal}}, then
#' the mean is assumed to be
#' \ifelse{html}{\out{√ n theta}}{\eqn{\sqrt{n} theta}}.
#'
#' @rdname probability_density_function
#' @export
setMethod("probability_density_function", signature("Normal", "numeric", "numeric", "numeric"),
function(dist, x, n, theta, ...) {
if (dist@two_armed) {
theta <- theta / sqrt(2)
}
stats::dnorm(x, mean = sqrt(n) * theta, sd = 1)
})
#' @examples
#' cumulative_distribution_function(Normal(), 1, 50, .3)
#'
#' @details If the distribution is \code{\link{Normal}}, then
#' the mean is assumed to be
#' \ifelse{html}{\out{√ n theta}}{\eqn{\sqrt{n} theta}}.
#'
#' @rdname cumulative_distribution_function
#' @export
setMethod("cumulative_distribution_function", signature("Normal", "numeric", "numeric", "numeric"),
function(dist, x, n, theta, ...) {
if (dist@two_armed) {
theta <- theta / sqrt(2)
}
stats::pnorm(x, mean = sqrt(n) * theta, sd = 1)
})
#' @param probs vector of probabilities
#' @rdname NormalDataDistribution-class
#' @export
setMethod("quantile", signature("Normal"),
function(x, probs, n, theta, ...) { # must be x to conform with generic
if (x@two_armed) {
theta <- theta / sqrt(2)
}
stats::qnorm(probs, mean = sqrt(n) * theta, sd = 1)
})
#' @rdname NormalDataDistribution-class
#'
#' @param object object of class \code{Normal}
#' @param nsim number of simulation runs
#' @param seed random seed
#'
#' @export
setMethod("simulate", signature("Normal", "numeric"),
function(object, nsim, n, theta, seed = NULL, ...) {
if (object@two_armed)
theta <- theta / sqrt(2)
if (!is.null(seed))
set.seed(seed)
stats::rnorm(nsim, mean = sqrt(n) * theta, sd = 1)
})
setMethod("print", signature('Normal'), function(x, ...) {
glue::glue(
"{class(x)[1]}<{if (x@two_armed) 'two-armed' else 'single-armed'}>"
)
})
#' Binomial data distribution
#'
#' Implements the normal approximation for a test on rates.
#' The reponse rate in the control group,
#' \ifelse{html}{\out{r<sub>C</sub>}}{\eqn{r_C}}, has to be specified by
#' \code{rate_control}.
#' The null hypothesis is:
#' \ifelse{html}{\out{r<sub>E</sub> ≤ r<sub>C</sub>}}{\eqn{r_E <= r_C}},
#' where \ifelse{html}{\out{r<sub>E</sub>}}{\eqn{r_E}} denotes the response rate
#' in the invervention group.
#' It is tested against the alternative
#' \ifelse{html}{\out{r<sub>E</sub> > r<sub>C</sub>}}{\eqn{r_E > r_C}}.
#' The test statistic is given as
#' \ifelse{html}{\out{X<sub>1</sub> = √n (r<sub>E</sub> - r<sub>C</sub>) / √(2 r<sub>0</sub> (1-r<sub>0</sub>))}}{\eqn{X_1 = \sqrt{n}(r_E - r_C) / \sqrt{2 r_0 (1- r_0)}}},
#' where \ifelse{html}{\out{r<sub>0</sub>}}{\eqn{r_0}} denotes the mean between
#' \ifelse{html}{\out{r<sub>E</sub>}}{\eqn{r_E}} and
#' \ifelse{html}{\out{r<sub>C</sub>}}{\eqn{r_C}} in the two-armed case,
#' and \ifelse{html}{\out{r<sub>E</sub>}}{\eqn{r_E}} in the one-armed case.#'
#' All priors have to be defined for the rate difference
#' \ifelse{html}{\out{r<sub>E</sub> - r<sub>C</sub>}}{\eqn{r_E - r_C}}.
#'
#' @slot rate_control cf. parameter 'rate_control'
#'
#' @template DataDistributionTemplate
#'
#' @rdname BinomialDataDistribution-class
#' @exportClass Binomial
setClass("Binomial", representation(
rate_control = "numeric",
two_armed = "logical"
),
contains = "DataDistribution")
#' @param rate_control assumed response rate in control group
#' @param two_armed logical indicating if a two-armed trial is regarded
#'
#' @examples
#' datadist <- Binomial(rate_control = 0.2, two_armed = FALSE)
#'
#' @seealso see \code{\link{probability_density_function}} and
#' \code{\link{cumulative_distribution_function}} to evaluate the pdf
#' and the cdf, respectively.
#'
#' @rdname BinomialDataDistribution-class
#' @export
Binomial <- function(rate_control, two_armed = TRUE) {
if (any(rate_control >= 1, rate_control <= 0))
stop("The response rate in the control group must be in (0,1)!")
new("Binomial", rate_control = rate_control, two_armed = two_armed)
}
#' @examples
#' probability_density_function(Binomial(.2, FALSE), 1, 50, .3)
#'
#' @details If the distribution is \code{\link{Binomial}},
#' \ifelse{html}{\out{theta}}{\eqn{theta}} denotes the rate difference between
#' intervention and control group.
#' Then, the mean is assumed to be
#' \ifelse{html}{\out{√ n theta}}{\eqn{\sqrt{n} theta}}.
#'
#' @rdname probability_density_function
#' @export
setMethod("probability_density_function", signature("Binomial", "numeric", "numeric", "numeric"),
function(dist, x, n, theta, ...) {
rate_intervention <- theta + dist@rate_control
if (any(rate_intervention >= 1, rate_intervention <= 0))
stop("The response rate in the intervention group must be in (0,1)! Probably the combination of prior and control rate is ill-defined.")
sigma_A <- sqrt(rate_intervention * (1 - rate_intervention) +
ifelse(dist@two_armed, dist@rate_control * (1 - dist@rate_control), 0))
p_0 <- (rate_intervention + ifelse(dist@two_armed, dist@rate_control, rate_intervention)) / 2
sigma_0 <- sqrt(2 * p_0 * (1 - p_0))
return(stats::dnorm(x, mean = sqrt(n) * theta / sigma_0, sd = sigma_A / sigma_0))
})
#' @examples
#' cumulative_distribution_function(Binomial(.1, TRUE), 1, 50, .3)
#'
#' @details If the distribution is \code{\link{Binomial}},
#' \ifelse{html}{\out{theta}}{\eqn{theta}} denotes the rate difference between
#' intervention and control group.
#' Then, the mean is assumed to be
#' \ifelse{html}{\out{√ n theta}}{\eqn{\sqrt{n} theta}}.
#'
#' @rdname cumulative_distribution_function
#' @export
setMethod("cumulative_distribution_function", signature("Binomial", "numeric", "numeric", "numeric"),
function(dist, x, n, theta, ...) {
rate_intervention <- theta + dist@rate_control
if (any(rate_intervention >= 1, rate_intervention <= 0))
stop("The response rate in the intervention group must be in (0,1)! Probably the combination of prior and control rate is ill-defined.")
sigma_A <- sqrt(rate_intervention * (1 - rate_intervention) +
ifelse(dist@two_armed, dist@rate_control * (1 - dist@rate_control), 0))
p_0 <- (rate_intervention + ifelse(dist@two_armed, dist@rate_control, rate_intervention)) / 2
sigma_0 <- sqrt(2 * p_0 * (1 - p_0))
return(stats::pnorm(x, mean = sqrt(n) * theta / sigma_0, sd = sigma_A / sigma_0))
})
#' @param probs vector of probabilities
#' @rdname BinomialDataDistribution-class
#' @export
setMethod("quantile", signature("Binomial"),
function(x, probs, n, theta, ...) { # must be x to conform with generic
rate_intervention <- theta + x@rate_control
if (any(rate_intervention >= 1, rate_intervention <= 0))
stop("The response rate in the intervention group must be in (0,1)! Probably the combination of prior and control rate is ill-defined.")
sigma_A <- sqrt(rate_intervention * (1 - rate_intervention) +
ifelse(x@two_armed, x@rate_control * (1 - x@rate_control), 0))
p_0 <- (rate_intervention + ifelse(x@two_armed, x@rate_control, rate_intervention)) / 2
sigma_0 <- sqrt(2 * p_0 * (1 - p_0))
return(stats::qnorm(probs, mean = sqrt(n) * theta / sigma_0, sd = sigma_A / sigma_0))
})
#' @details Note that \code{simulate} for class \code{Binomial} simulates the
#' normal approximation of the test statistic.
#'
#' @rdname BinomialDataDistribution-class
#'
#' @param object object of class \code{Binomial}
#' @param nsim number of simulation runs
#' @param seed random seed
#'
#' @export
setMethod("simulate", signature("Binomial", "numeric"),
function(object, nsim, n, theta, seed = NULL, ...) {
rate_intervention <- theta + object@rate_control
if (any(rate_intervention >= 1, rate_intervention <= 0))
stop("The response rate in the intervention group must be in (0,1)! Probably the combination of prior and control rate is ill-defined.")
sigma_A <- sqrt(rate_intervention * (1 - rate_intervention) +
ifelse(object@two_armed, object@rate_control * (1 - object@rate_control), 0))
p_0 <- (rate_intervention + ifelse(object@two_armed, object@rate_control, rate_intervention)) / 2
sigma_0 <- sqrt(2 * p_0 * (1 - p_0))
if (!is.null(seed)) set.seed(seed)
return(stats::rnorm(nsim, mean = sqrt(n) * theta / sigma_0, sd = sigma_A / sigma_0))
})
setMethod("print", signature('Binomial'), function(x, ...) {
glue::glue(
"{class(x)[1]}<{if (x@two_armed) 'two-armed' else 'single-armed'}>",
'response rate in control group: {x@rate_control}',
.sep = ", "
)
})
|
90dee597fe294e7d41eb2c3a5669154325c6789b
|
667993cf49e87c96d29eec496d71e07635e5c0d0
|
/man/corMatrix.pdSRM.Rd
|
41fa3af27246a1f85c4b2aa87096942942728cfc
|
[
"MIT"
] |
permissive
|
andrewpknight/roundRobinR
|
0f86fed2a2855cb45c8b87a3ebabe86dfc54dd4d
|
38eb7348bbe24f8b5fa0358bcfb6552f2dae3606
|
refs/heads/master
| 2023-08-25T10:48:44.301531
| 2021-10-18T23:14:48
| 2021-10-18T23:14:48
| 416,403,341
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 960
|
rd
|
corMatrix.pdSRM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corMatrix.pdSRM.R
\name{corMatrix.pdSRM}
\alias{corMatrix.pdSRM}
\title{Extract Correlation Matrix from a pdSRM Object}
\usage{
\method{corMatrix}{pdSRM}(object, ...)
}
\arguments{
\item{object}{an object inheriting from pdSRM}
\item{...}{some methods for this require additional arguments}
}
\value{
the correlation matrix corresponding to the positive-definite
matrix represented by object
}
\description{
This function is used internally as part of the construction of the pdSRM
object that will fit the appropriate structure for the SRM. The correlation matrix
corresponding to the positive-definite matrix represented by object is
obtained.
}
\examples{
\dontrun{
o = lme(liking ~ 1, random=list(groupId=pdBlocked(list(pdIdent(~1),
pdSRM(~-1 + a1 + a2 + a3 + a4 + p1 + p2 + p3 + p4)))),
correlation=corCompSymm(form=~1 | groupId/pdSRM_dyad_id),
data=d, na.action=na.omit)
}
}
|
bee509dc372fe7bfab9ed591a34ea8145b8b7f3e
|
e03f9a8d544daf7d91e7df928d7491408a468a90
|
/HoseFile.r
|
039704cba5a913087eb30266df6b19db1d5443f5
|
[] |
no_license
|
fruitsamples/LW8_Hosesample
|
7ad77adac825d8c76182667060ccf3bdfd656dab
|
651a13db7aa20a636ef4f901a2c0e7f38b884f4b
|
refs/heads/master
| 2021-01-10T11:06:18.683293
| 2015-11-25T21:36:01
| 2015-11-25T21:36:01
| 46,888,492
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 1,376
|
r
|
HoseFile.r
|
/*
File: HoseFile.r
Contains: resource definition for Irda Hose (Infrared)
Written by: Chorng-Shyan Lin and Ingrid Kelly
Copyright: Copyright © 1999 by Apple Computer, Inc., All Rights Reserved.
You may incorporate this Apple sample source code into your program(s) without
restriction. This Apple sample source code has been provided "AS IS" and the
responsibility for its operation is yours. You are not permitted to redistribute
this Apple sample source code as "Apple sample source code" after having made
changes. If you're going to re-distribute the source, we require that you make
it clear in the source that the code was descended from Apple sample source
code, but that you've made changes.
Change History (most recent first):
7/26/1999 Karl Groethe Updated for Metrowerks Codewarror Pro 2.1
*/
#include "Types.r"
#define kPluginResourceInfoType 'PLGN'
#define kPluginResourceInfoID -8192
type 'PLGN' { // see Printing Plug-ins Manager Spec.
integer = $$Countof(LibInfo); // number of libraries
wide array LibInfo {
unsigned longint; // Type this library handles
unsigned longint; // SubType this library handles
pstring; // Library Name
align word;
};
};
resource kPluginResourceInfoType (kPluginResourceInfoID,
purgeable) {
{
'hose', '=Ird', "HoseIrdaLib",
}
};
|
f153fd03f7076a9e3bdd44b5c710abc39631b869
|
0892d6f15b571d1dd5b2fc910dd064adb702902e
|
/cachematrix.R
|
cf8639d47811f647ad1365cec49d779de5ca0d98
|
[] |
no_license
|
vdaliparthi/ProgrammingAssignment2
|
578840a2e77c6e25b4f94dcf2e8cceb28f265ab9
|
6b4865873aa5697b05f9ea153c8bd4fdbefa8212
|
refs/heads/master
| 2021-06-28T22:50:09.699105
| 2017-09-19T08:45:32
| 2017-09-19T08:45:32
| 103,614,676
| 0
| 0
| null | 2017-09-15T04:42:41
| 2017-09-15T04:42:41
| null |
UTF-8
|
R
| false
| false
| 2,351
|
r
|
cachematrix.R
|
## makeCacheMatrix function creates a special "matrix", which is really a list containing functions to
## 1) set a new matrix data in the cache, 2) get the matrix data from the cache,
## 3) set the inverse value of matirx into cache and
## 4) get the inverse matrix value from the cache
##
## cacheSolve function calculates/solves the inverse matrix for the special matrix made using makeCacheMatrix
## It first checks to see if the inverse of the matrix has already been solved.
## If so, it gets the inverse matrix from the cache and skips the re-calculation.
## Otherwise, it calculates the inverse of the matrix and
## updates the value in the cache via the cacheInvMatrix function.
## This function creates a special "matrix" object that can cache its inverse.
## It takes one argument of matrix class
makeCacheMatrix <- function(x = matrix()) {
InvMatrix <- NULL #Reset the value of inverse matrix
setMatrix <- function(y) {
x <<- y # Set the matrix data x with new data y
InvMatrix <<- NULL # Reset the value of inverse matrix
}
getMatrix <- function() x #Return the matrix data from cache
cacheInvMatrix <- function(z) InvMatrix <<- z #push the inverse matirx into cache
getCachedInvMatrix <- function() InvMatrix #Return cached inverse matrix data
list(setMatrix=setMatrix, getMatrix=getMatrix,
cacheInvMatrix=cacheInvMatrix,
getCachedInvMatrix=getCachedInvMatrix)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve would return the inverse matrix from the cache.
## Argument x is the special "matrix" returned by makeCacheMatrix function.
cacheSolve <- function(x, ...) {
iMat <- x$getCachedInvMatrix() #Retrive the inverse matrix of x from the cache if any
if (!is.null(iMat)){ #If chache value is not emply
message("Getting Inversed Matrix from cached data")
return(iMat) #Return the cached inverse matrix
}
m <- x$getMatrix() #When chached inverse value is empty get the matrix from cache to solve it fresh
iMat <- x$cacheInvMatrix(solve(m)) #Solve the inverse of the matrix and cache it for next use
return(iMat) #Return the cached inverse matrix
}
|
0b01210199f2de43ad1000e5d910f24448c93a88
|
449f47610706df14cb5f3a4f683797581f4ee066
|
/plot4.R
|
cb24b4014f5d406d01e4f01a1dd13b6728cc5694
|
[] |
no_license
|
Tim-Brooks/ExData_Plotting1
|
181760b10669b753c8db3fdf0b33a4eb8dc2306a
|
3463b5bf15fea91a7912989bf4d443cf4562ebb4
|
refs/heads/master
| 2022-12-09T19:41:00.114882
| 2014-07-13T22:30:52
| 2014-07-13T22:30:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,672
|
r
|
plot4.R
|
setAs("character","myDate", function(from) as.Date(from, format="%d/%m/%Y"))
read_file <- function() {
df <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings="?",
colClasses=c("Date"="myDate"))
df <- df[df$Date %in% as.Date(c("2007-02-01", "2007-02-02")),]
df
}
create_plot <- function(data_frame) {
png(filename="plot4.png", width=480, height=480)
midpoint <- sum(data_frame$Date == as.Date("2007-02-01"))
par(mfrow=c(2, 2))
with(data_frame, plot.ts(Global_active_power, main="", xlab="",
ylab="Global Active Power", axes=F))
axis(2)
axis(1, at=c(0, 1440, length(data_frame$Date)), labels=c("Thu", "Fri", "Sat"))
box()
with(data_frame, plot.ts(Voltage, main="", xlab="datetime", ylab="Voltage",
axes=F))
axis(2)
axis(1, at=c(0, 1440, length(data_frame$Date)), labels=c("Thu", "Fri", "Sat"))
box()
plot.ts(data_frame[, c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")], plot.type="single", main="", xlab="",
ylab="Energy sub metering", axes=F, col=c("black", "red", "blue"))
axis(2)
axis(1, at=c(0, 1440, length(data_frame$Date)), labels=c("Thu", "Fri", "Sat"))
box()
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1, col=c("black", "red", "blue"), bty="n")
with(data_frame, plot.ts(Global_reactive_power, main="", xlab="datetime",
ylab="Global_reactive_power", axes=F))
axis(2)
axis(1, at=c(0, 1440, length(data_frame$Date)), labels=c("Thu", "Fri", "Sat"))
box()
dev.off()
}
run_script <- function() {
create_plot(read_file())
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.