blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
666da8177db93beec6f0182ca4384d09a10556fe
|
580f4e51729818f62d340b3dec0ad080f0eba8b5
|
/man/fortify.trackeRdataSummary.Rd
|
aa123cebd4da3f774344c199d0c5effa0ada085c
|
[] |
no_license
|
DrRoad/trackeR
|
77b552fab86da85f53b2c27203490686d03ed496
|
2d0a486e78c2cf073051eb667fb6c9f26fe0664f
|
refs/heads/master
| 2020-03-19T19:12:29.544263
| 2018-04-22T23:40:33
| 2018-04-22T23:40:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 622
|
rd
|
fortify.trackeRdataSummary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trackeRdata_summary.R
\name{fortify.trackeRdataSummary}
\alias{fortify.trackeRdataSummary}
\title{Fortify a trackeRdataSummary object for plotting with ggplot2.}
\usage{
\method{fortify}{trackeRdataSummary}(model, data, melt = FALSE, ...)
}
\arguments{
\item{model}{The \code{\link{trackeRdata}} object.}
\item{data}{Ignored.}
\item{melt}{Logical. Should the data be melted into long format
instead of the default wide format?}
\item{...}{Currently not used.}
}
\description{
Fortify a trackeRdataSummary object for plotting with ggplot2.
}
|
b8967445d47102d26141f1aa1ad389b58d30d64c
|
c21e1626047c96e3f3ebd3cc347d1d64655aa8a0
|
/man/fastClustering.Rd
|
dfb6b30ef6f7a173761e87d2ac917eaa06c5adad
|
[] |
no_license
|
cran/sClust
|
e8917a49af7826325419127bbfcae86e0465f9fa
|
bbb200aacce30a9af93148fb26a8cf060a5b6b58
|
refs/heads/master
| 2023-07-05T06:40:26.901212
| 2021-08-23T17:50:02
| 2021-08-23T17:50:02
| 399,418,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,144
|
rd
|
fastClustering.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fastClustering.R
\name{fastClustering}
\alias{fastClustering}
\title{Fast Spectral Clustering}
\usage{
fastClustering(
dataFrame,
smplPoint,
stopCriteria = 0.99,
neighbours = 7,
similarity = TRUE,
clustFunction,
...
)
}
\arguments{
\item{dataFrame}{The dataFrame.}
\item{smplPoint}{maximum of sample number for reduction.}
\item{stopCriteria}{criterion for minimizing intra-group distance and select final smplPoint.}
\item{neighbours}{number of points that will be selected for the similarity computation.}
\item{similarity}{if True, will use the similarity matrix for the clustering function.}
\item{clustFunction}{the clustering function to apply on data.}
\item{...}{additional arguments for the clustering function.}
}
\value{
returns a list containing the following elements:
\itemize{
\item{results: }{clustering results}
\item{sample: }{dataframe containing the sample used}
\item{quantLabels: }{quantization labels}
\item{clustLabels: }{results labels}
\item{kmeans: }{kmeans quantization results}
}
}
\description{
This function will sample the data before performing a classification function on the samples and then applying K nearest neighbours.
}
\examples{
### Example 1: 2 disks of the same size
n<-100 ; r1<-1
x<-(runif(n)-0.5)*2;
y<-(runif(n)-0.5)*2
keep1<-which((x*2+y*2)<(r1*2))
disk1<-data.frame(x+3*r1,y)[keep1,]
disk2 <-data.frame(x-3*r1,y)[keep1,]
sameTwoDisks <- rbind(disk1,disk2)
res <- fastClustering(scale(sameTwoDisks),smplPoint = 500,
stopCriteria = 0.99, neighbours = 7, similarity = TRUE,
clustFunction = UnormalizedSC, K = 2)
plot(sameTwoDisks, col = as.factor(res$clustLabels))
### Example 2: Speed and Stopping Distances of Cars
res <- fastClustering(scale(iris[,-5]),smplPoint = 500,
stopCriteria = 0.99, neighbours = 7, similarity = TRUE,
clustFunction = spectralPAM, K = 3)
plot(iris, col = as.factor(res$clustLabels))
table(res$clustLabels,iris$Species)
}
\author{
Emilie Poisson Caillault and Erwan Vincent
}
|
758fc46ad76ded434943e673af862ac032e9d83c
|
0d4c1d4a347fbf9202d21aa1710a3b056711cedf
|
/vignettes/reporter-figure.R
|
1e076c8bbcd6722615e926d9ae94ded003cc9bac
|
[] |
no_license
|
armenic/reporter
|
6a5756977da13340f7bf80cd63d13d340d97d8f9
|
00dc496ca93afef4b6e05f0f24a74dc935e91123
|
refs/heads/master
| 2023-02-18T06:49:22.454126
| 2021-01-19T14:55:15
| 2021-01-19T14:55:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,138
|
r
|
reporter-figure.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----eval=FALSE, echo=TRUE----------------------------------------------------
# library(reporter)
# library(ggplot2)
#
# # Create temporary path
# tmp <- file.path(tempdir(), "example10.pdf")
#
# # Prepare data
# dat <- mtcars[order(mtcars$cyl), ]
#
# # Generate plot
# p <- ggplot(dat, aes(x=disp, y=mpg)) + geom_point()
#
# # Define plot object
# plt <- create_plot(p, height = 4, width = 8) %>%
# titles("Figure 1.0", "MTCARS Mileage By Displacement", blank_row = "none") %>%
# footnotes("* Motor Trend, 1974")
#
# # Add plot to report
# rpt <- create_report(tmp, output_type = "PDF") %>%
# set_margins(top = 1, bottom = 1) %>%
# options_fixed(font_size = 12) %>%
# page_header("Sponsor", "Study: cars") %>%
# add_content(plt) %>%
# page_footer(Sys.time(), "Confidential", "Page [pg] of [tpg]")
#
# # Write out report
# if (rmarkdown::pandoc_available("1.12.3")) {
# res <- write_report(rpt)
# }
#
# # View report
# # file.show(tmp)
|
3805e38a1eadf270d481f9ae132d5f1bd3b14545
|
12150c61edf9bb228cc80496226bba0abc3f0064
|
/Plot4.R
|
98c9919ed18f6620125ef9ab848d39b039830917
|
[] |
no_license
|
diazidx/Exploratory-Data-Analysis-Project
|
0bfa126b49566c7f2fbf25b2340d557c68293b38
|
53a3368ced52b5c72dbc5b7a947f806d805ee429
|
refs/heads/master
| 2020-05-01T09:44:38.983687
| 2019-10-16T00:14:05
| 2019-10-16T00:14:05
| 177,407,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 647
|
r
|
Plot4.R
|
## lattice
library(lattice)
## read source
data01 <- readRDS("summarySCC_PM25.rds")
classification <- readRDS("Source_Classification_Code.rds")
## getting all data related with coal and sum
coalclass <- classification[grepl("Coal", classification$Short.Name), ]
coaldata01 <- data01[data01$SCC %in% coalclass$SCC, ]
emyear <- aggregate(coaldata01$Emissions, by=list(coaldata01$year), FUN=sum)
colnames(emyear) <- c("year", "emissions")
## creting plot using lattice system
png(filename = "plot4.png")
xyplot(emissions ~ year, data = emyear, type = "l", xlab = "Year", ylab = "Total (tons)", main = "Coal Emissions Nationwide by Year")
dev.off()
|
aec31b8f56d0f3a2f287966df2d0744dedcb8804
|
cf581ab61b20fa39bec3cb7f0e9e5c3b8c0442ef
|
/build/docker_install_rpkgs.R
|
9defdaac18c246743ee69540f461acaf53e79f8f
|
[
"MIT"
] |
permissive
|
kaybenleroll/insurance-modelling
|
52e78b46cbcc87f03e434e305404c73cad0220cb
|
b2a134a094a511db1b58141537ba1dac28618c5b
|
refs/heads/master
| 2023-04-10T08:09:09.801108
| 2021-03-05T16:10:21
| 2021-03-05T16:10:21
| 281,653,899
| 3
| 2
|
MIT
| 2022-06-07T11:41:36
| 2020-07-22T11:02:14
|
Makefile
|
UTF-8
|
R
| false
| false
| 147
|
r
|
docker_install_rpkgs.R
|
remotes::install_github(
"dutangc/CASdatasets",
ref = "cc69b33959a42f24b8aaaf732e7c3d623896eeea",
subdir = "pkg",
upgrade = "never"
)
|
af8fc0a90977bd81e75e15eec2f16bee65505580
|
96cf33c736f40c3ef3854f0834b673e63515d787
|
/006_by_individual_analysis.R
|
23d85be9caa13ff662a04480f108a8194e3fcd69
|
[] |
no_license
|
bodowinter/rapid_prosody_transcription_analysis
|
859a2ba1e87859fb57f98e593621804b728d907f
|
1ed4ec0751681ccdadd2613f587101325bfc548d
|
refs/heads/master
| 2021-01-19T01:41:53.813893
| 2018-08-15T07:18:18
| 2018-08-15T07:18:18
| 37,758,524
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,573
|
r
|
006_by_individual_analysis.R
|
## Bodo Winter
## By-individual analyses
## June 22, 2015
## July 27, 2016: Finishing brushes and incorporation of spectral tilt
## July 27, 2016: Replaced ggplot2 with base graphs
##------------------------------------------------------------------
## Load in data and packages + preprocessing:
##------------------------------------------------------------------
## Load in libraries:
library(cluster)
library(pvclust)
## Load mixed models:
mainDir <- '/Users/teeniematlock/Desktop/research/rapid_prosody_transcription/analysis/data'
setwd(mainDir)
load('mixed_models.RData')
## Generate a matrix with all the listeners' random effects:
allsubs <- data.frame(Subject = rownames(coef(xmdl.MeanPitch)$Listener),
MeanPitch = coef(xmdl.MeanPitch)$Listener[, 2],
MaxPitch = coef(xmdl.MaxPitch)$Listener[, 2],
Amplitude = coef(xmdl.RMS_amplitude)$Listener[, 2],
VowelDur = coef(xmdl.VowelDur)$Listener[, 2],
SyllableDur = coef(xmdl.SyllableDur)$Listener[, 2],
NSyll = coef(xmdl.NSyll)$Listener[, 2],
Range = coef(xmdl.RangeST)$Listener[, 2],
Slope = coef(xmdl.SlopeST)$Listener[, 2],
Freq = coef(xmdl.Freq)$Listener[, 2],
Vowel = coef(xmdl.Vowel)$Listener[, 2],
POS = coef(xmdl.POS)$Listener[, 2], # for content
Focused = coef(xmdl.Focused)$Listener[, 2], # for focus particle
Argument = coef(xmdl.argument)$Listener[, 2], # for last argument
SpectralEmphasis = coef(xmdl.SpectralEmphasis)$Listener[, 2],
H1A2 = coef(xmdl.H1A2)$Listener[, 2],
H1A3 = coef(xmdl.H1A3)$Listener[, 2],
Accented = coef(xmdl.Accented)$Listener[, 2]
)
## Add mean absolute change for the multi-level factors:
allsubs$AccentType <- rowMeans(abs(coef(xmdl.AccentType)$Listener[, 1:4]))
allsubs$AccentPosition <- rowMeans(abs(coef(xmdl.AccentPosition)$Listener[, c(1:3)]))
##------------------------------------------------------------------
## Correlations between random effects slopes:
##------------------------------------------------------------------
## Generate correlation matrices:
round(cor(allsubs[, -1]), 2)
round(cor(allsubs[, c('Accented', 'Focused', 'Argument', 'POS', 'AccentType', 'AccentPosition')]), 2)
round(cor(allsubs[, c('POS', 'MaxPitch', 'Amplitude', 'VowelDur', 'AccentType', 'AccentPosition')]), 2)
## Group variables according to meaningful categories:
prosodic_variables <- c('Accented', 'AccentPosition', 'AccentType')
syntactic_variables <- c('Argument', 'Focused', 'POS')
phonetic_variables <- c('MeanPitch', 'MaxPitch', 'Amplitude', 'VowelDur', 'SyllableDur',
'SpectralEmphasis', 'H1A2', 'H1A3')
## Create a data frame with means of these variables:
newsubs <- data.frame(Prosody = rowMeans(allsubs[, prosodic_variables]),
Syntax = rowMeans(allsubs[, syntactic_variables]),
Phonetics = rowMeans(allsubs[, phonetic_variables]),
Freq = allsubs$Freq)
## Group variables according to different phonetic parameters:
pitch <- c('MeanPitch', 'MaxPitch')
spectrum <- c('SpectralEmphasis', 'H1A2', 'H1A3')
duration <- c('VowelDur', 'SyllableDur')
## Create a data frame with means of these variables:
subsphon <- data.frame(Pitch = rowMeans(allsubs[, pitch]),
Spectrum = rowMeans(abs(allsubs[, spectrum])),
Duration = rowMeans(allsubs[, duration]))
## Perform correlations:
cor(newsubs)
cor(subsphon)
## Perform significance test on correlations for the 'newsubs' data frame:
cor.test(newsubs$Prosody, newsubs$Syntax)
cor.test(newsubs$Prosody, newsubs$Phonetics)
cor.test(newsubs$Prosody, newsubs$Freq)
cor.test(newsubs$Syntax, newsubs$Freq)
cor.test(newsubs$Phonetics, newsubs$Freq)
cor.test(newsubs$Syntax, newsubs$Phonetics)
## Perform Dunn-Sidak correction:
dunnsidak <- function(P, N) 1 - ((1 - P) ^ N)
dunnsidak(cor.test(newsubs$Prosody, newsubs$Syntax)$p.val, 6)
dunnsidak(cor.test(newsubs$Prosody, newsubs$Phonetics)$p.val, 6)
dunnsidak(cor.test(newsubs$Prosody, newsubs$Freq)$p.val, 6)
dunnsidak(cor.test(newsubs$Syntax, newsubs$Freq)$p.val, 6)
dunnsidak(cor.test(newsubs$Phonetics, newsubs$Freq)$p.val, 6)
dunnsidak(cor.test(newsubs$Syntax, newsubs$Phonetics)$p.val, 6)
## Perform significance test on correlations for the 'subsphon' data frame:
cor.test(subsphon$Pitch, subsphon$Spectrum)
cor.test(subsphon$Pitch, subsphon$Duration)
cor.test(subsphon$Duration, subsphon$Spectrum)
## Perform Dunn-Sidak correction:
dunnsidak(cor.test(subsphon$Pitch, subsphon$Spectrum)$p.val, 3)
dunnsidak(cor.test(subsphon$Pitch, subsphon$Duration)$p.val, 3)
dunnsidak(cor.test(subsphon$Duration, subsphon$Spectrum)$p.val, 3)
##------------------------------------------------------------------
## Cluster analysis following Levshina (2016, Ch. 5):
##------------------------------------------------------------------
## Create a hierarchical agglomerative cluster model:
slope.dist <- allsubs[, -1]
rownames(slope.dist) <- allsubs$Subject
slope.dist <- dist(slope.dist, method = 'euclidian')
slope.hc <- hclust(slope.dist, method = 'ward.D2')
## Test Silhouette width of different cluster solutions:
asw <- sapply(2:10,
function(x) summary(silhouette(cutree(slope.hc, k = x), slope.dist))$avg.width)
asw # three cluster solution is best
## Plot this with subject names:
quartz('', 11, 6)
plot(slope.hc, hang = -1)
rect.hclust(slope.hc, k = 3)
## Get cluster affiliations:
allsubs_clust <- cbind(allsubs,
Cluster = cutree(slope.hc, k = 3))[, -1]
allsubs_clust <- split(allsubs_clust, allsubs_clust$Cluster)
clust_sum <- lapply(allsubs_clust, FUN = colMeans)
clust_sum <- as.data.frame(clust_sum)
clust_sum <- clust_sum[-nrow(clust_sum), ]
names(clust_sum) <- paste0('Cluster', 1:3)
## How do cluster 1 and cluster 2 differ?
diffs <- clust_sum$Cluster2 - clust_sum$Cluster1
names(diffs) <- rownames(clust_sum)
# cluster 2 pays more attention to: frequency, POS, focus particle and last argument
# much less to Accented
## How do cluster 1 and cluster 3 differ?
diffs <- clust_sum$Cluster3 - clust_sum$Cluster1
names(diffs) <- rownames(clust_sum)
## What is the gender distribution for the clusters?
clust1_names <- rownames(allsubs_clust[[1]])
clust2_names <- rownames(allsubs_clust[[2]])
clust3_names <- rownames(allsubs_clust[[3]])
table(RPT[match(clust1_names, RPT$Listener), ]$ListenerGender)
table(RPT[match(clust2_names, RPT$Listener), ]$ListenerGender)
## Validate cluster solution:
this_df <- allsubs[, -1]
rownames(this_df) <- allsubs$Subject
set.seed(42)
slope.pvc <- pvclust(t(this_df),
method.hclust = 'ward.D2', method.dist = 'euclidian')
## Visualize this with clusters that surpass a = 0.05:
quartz('', 11, 6)
plot(slope.pvc, hang = -1)
pvrect(slope.pvc, alpha = 0.95)
|
5517ca78a11fda74d03f89c4a6aa768289314e94
|
fe442b183f49aa2f49302e31295c1bc3ed254fb5
|
/scripts/internal_use_required_packages_installer.R
|
c51132dca5762b59cbac8a5c7315a3c4ede767fb
|
[] |
no_license
|
sergiu-burlacu/book
|
fcc27ded8b6997a0603aaa383184f346929cf372
|
1f5b6713944c4082fb04c35621b78fa61c9a0eba
|
refs/heads/master
| 2023-02-12T14:55:56.223144
| 2021-01-03T21:23:18
| 2021-01-03T21:23:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 293
|
r
|
internal_use_required_packages_installer.R
|
library(checkpoint)
found_packages <- scanForPackages(".", use.knitr = TRUE)$pkgs
if (length(found_packages[!found_packages %in% installed.packages()]) > 0) {
install.packages(found_packages[!found_packages %in% installed.packages()])
}
# devtools::install_github("ebenmichael/augsynth")
|
839f24a94cde0aa6ec2749e865230ce2b729b279
|
987a38b326796527b997a61af2f2be7afa80a249
|
/etch.R
|
4c9f77b5059248f7b25fb521d56757eec836a82d
|
[] |
no_license
|
zaomy/R
|
1dec13fdd674163cfe27e7c7e82040c27a1d36cd
|
3bd6998b780be31fc75a6758d9e89af238a5ea1a
|
refs/heads/master
| 2021-08-07T11:12:33.885756
| 2017-11-08T03:38:38
| 2017-11-08T03:38:38
| 109,583,386
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 346
|
r
|
etch.R
|
#################################################
# step 1: import data
dataname = "./etch1.dat"
data <- read.delim(dataname, header = FALSE, sep="", skip=0, as.is=TRUE)
#data <-read.table(dataname, header=FALSE)
colnames(data) <- c("power","etch")
boxplot(etch ~ power, data=data)
lmod = lm(etch ~ power, data=data)
summary(lmod)
|
ad2ed80aab0aaf530d4f828f5a1e76b2ce2b13eb
|
53307f9c1ea403d12f8f237423167bad974c31b9
|
/plot3.R
|
4c336dd4b5be97a729376b1f6f4e3c127991e514
|
[] |
no_license
|
samuelsherls/ExData_Plotting_shers
|
85017d27cf890736c6846e3a67699c77d65c6fae
|
6183912fa927a339146664fad96a2cb0811b9347
|
refs/heads/master
| 2021-01-16T20:01:36.903784
| 2015-08-06T16:00:43
| 2015-08-06T16:00:43
| 40,281,349
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 842
|
r
|
plot3.R
|
dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header = TRUE, sep = ";", stringsAsFactors=FALSE, dec=".")
febdata <- data[data$Date %in% c("1/2/2007", "2/2/2007"),]
activepowerdata <- as.numeric(febdata$Global_active_power)
submeter1 <- as.numeric(febdata$Sub_metering_1)
submeter2 <- as.numeric(febdata$Sub_metering_2)
submeter3 <- as.numeric(febdata$Sub_metering_3)
datetime <- strptime(paste(febdata$Date, febdata$Time, sep = " " ),"%d/%m/%Y %H:%M:%S")
png("plot3.png", width= 480, height = 480)
plot(datetime,submeter1,type = "S", ylab = "Energy Submetering", xlab ="")
lines(datetime, submeter2, type ="S", col="red")
lines(datetime, submeter3, type ="S", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
81b33db27cbf2657115e8d6dd785cb2c98d10ecb
|
1879fba0c2bc1acbad17656af70575beeac7c9be
|
/data/analysis.R
|
ddea3b4d2f3492c037b8df29b3cbf00ff0a27fd2
|
[] |
no_license
|
mricha41/mricha41.github.io
|
e3cbe46c5213df0b83abf94a0bf013bcccfd8569
|
ee421d27b605dfeaf1bfdb8cd2e4fe35f18dd237
|
refs/heads/master
| 2021-06-17T22:28:40.826001
| 2021-03-27T04:42:27
| 2021-03-27T04:42:27
| 131,674,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,569
|
r
|
analysis.R
|
#######################################
#Cerberus Online Survey Data Analysis
#######################################
#######################################
#Very low counts...four responses total
#######################################
#interpret results accordingly ;)
#######################################
cerberus = read.csv("Cerberus_online_survey_data.csv")
#take a look at responses...
#except flag questions (Q2/Q4)
#and open-ended (Q8/Q9/Q10) for now
xtabs(~Q1, data=cerberus)
xtabs(~Q3, data=cerberus)
xtabs(~Q5, data=cerberus)
xtabs(~Q6, data=cerberus)
xtabs(~Q7, data=cerberus)
#there were not surprises for Q4_x...
#therefore, nothing to analyze/report there for now ;)
#zero really drags avg down...
mean(cerberus$Q3)
#Q3 has fairly diverse responses...
#and everyone seems to have varying
#degrees of feature use, so....
#create breaks for profiling
cerberus$featureSum = rowSums(
cerberus[
,
c(
"Q2_1",
"Q2_2",
"Q2_3",
"Q2_4",
"Q2_5",
"Q2_6",
"Q2_7",
"Q2_8"
)
],
na.rm = TRUE
)
xtabs(~featureSum, cerberus)
cerberus$featureCat = ifelse(
#lower use...
cerberus$featureSum < 3,
1,
ifelse(
#medium use...
cerberus$featureSum == 3,
2,
ifelse(
#higher use...
cerberus$featureSum > 3,
3,
NA
)
)
)
xtabs(~featureCat, cerberus)
cerberus$homeCat = ifelse(
#home less often...
cerberus$Q5 < 50,
1,
ifelse(
#home somewhat often...
cerberus$Q5 %in% c(50:75),
2,
ifelse(
#home quite often...
cerberus$Q5 >= 76,
3,
NA
)
)
)
xtabs(~homeCat, cerberus)
#categorize devices...
cerberus$deviceProfile = ifelse(
#cheap...
cerberus$Q1 == 1,
1,
#not cheap...;)
2
)
xtabs(~deviceProfile, cerberus)
#personas...pretty subjective
#because of the low counts!
#kind of smooshing people into
#my previously arrived-at conclusions
#but using different measurements
#NOT ideal...not really good analysis either
#more participation might have helped ;)
#at any rate - my theoretical framework
#from structured interview data (also few participants)
#is somewhat at odds with the survey data
xtabs(~featureCat+homeCat+deviceProfile, cerberus)
cerberus$persona = ifelse(
#practical busybody...cheap device, happens to also be home a lot
#meh...kind of weak since they use a lot of features
cerberus$deviceProfile == 1 & cerberus$homeCat == 3,
1,
ifelse(
#sophisticated shut-in (better device, home a lot, uses a lot of features)
#this jives well...
cerberus$deviceProfile == 2 & cerberus$homeCat == 3 & cerberus$featureCat == 2,
2,
ifelse(
#sophisticated socialite (better device, not home so much)
#oddly, they are in cat 1/2 for feature use...not ideal
cerberus$deviceProfile == 2 & cerberus$homeCat %in% c(1:2) & cerberus$featureCat %in% c(1,2),
3,
NA
)
)
)
xtabs(~persona, cerberus)
#how do personas stack up on recommendations?
xtabs(~persona+Q3, cerberus)
#how do personas stack up on visits?
xtabs(~persona+Q6, cerberus)
xtabs(~persona+Q7, cerberus)
#nothing interesting here...
#found some support for features in open-ended:
#add feature for silence when the customer wants no disturbances
#(for example pets/children going nuts when the doorbell rings...)
#support for high-end devices that include local storage...
#nobody is using the cloud feature:
xtabs(~Q2_5, cerberus)
#womp...womp...
|
b7cddd41bef3ec04f7ad116d232d77128df8ba2a
|
a8710ca51f2c3bd9fa19947b0c0c36b4062aaa33
|
/man/ReferenceMale.Rd
|
aa2f5f2c40b69096899495832d10594800c5d103
|
[] |
no_license
|
cran/ELT
|
2ba6dd97a0de6620120b93fe008a834f5e8e4f6a
|
938a633944b42d92572234a4d1c5c12c9463fc94
|
refs/heads/master
| 2021-01-17T08:48:38.579908
| 2016-04-11T09:06:26
| 2016-04-11T09:06:26
| 17,678,902
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 322
|
rd
|
ReferenceMale.Rd
|
\name{ReferenceMale}
\alias{ReferenceMale}
\docType{data}
\title{ReferenceMale used for the exemple.}
\description{This data corresponds to an adjusted version of the French national demographic projections INSEE 2060 for the male population.}
\usage{data(ReferenceMale)}
\examples{data(ReferenceMale)}
\keyword{datasets}
|
f4d4ee0033b75cca6c8f32f4f61d5d78ba584dfa
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BatchExperiments/examples/summarizeExperiments.Rd.R
|
bfac7edbbb69c233c3d4a5f83607e8f914855cfc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 752
|
r
|
summarizeExperiments.Rd.R
|
library(BatchExperiments)
### Name: summarizeExperiments
### Title: Summarize selected experiments.
### Aliases: summarizeExperiments
### ** Examples
reg = makeExperimentRegistry("summarizeExperiments", seed = 123, file.dir = tempfile())
p1 = addProblem(reg, "p1", static = 1)
a1 = addAlgorithm(reg, id = "a1", fun = function(static, dynamic, alpha, beta) 1)
a2 = addAlgorithm(reg, id = "a2", fun = function(static, dynamic, alpha, gamma) 2)
ad1 = makeDesign(a1, exhaustive = list(alpha = 1:2, beta = 1:2))
ad2 = makeDesign(a2, exhaustive = list(alpha = 1:2, gamma = 7:8))
addExperiments(reg, algo.designs = list(ad1, ad2), repls = 2)
print(summarizeExperiments(reg))
print(summarizeExperiments(reg, show = c("prob", "algo", "alpha", "gamma")))
|
01cb591e4212460be76b35afaf1b05b85d07a67e
|
69d8086cbf6b7395c62bb65a82edfc170f441777
|
/oncosnpMasterfileParser.R
|
bdf1c95ed54f853aa3aef2d5de07706ae13efdef
|
[] |
no_license
|
flywind2/pancancer_ith
|
8fe9fc45082528992e0a32fa9e964ec29d67f197
|
144172fca33dc394a9b230c7774c7b8510b57fd2
|
refs/heads/master
| 2020-06-18T15:39:46.297774
| 2016-10-20T23:54:55
| 2016-10-20T23:54:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,731
|
r
|
oncosnpMasterfileParser.R
|
masterfile = read.table("C:/Users/joseph/Documents/UCECcnvMasterFile.txt", header = TRUE, sep = "\t",
stringsAsFactors = FALSE, quote = "", comment.char = "",row.names = NULL)
samples = as.factor(masterfile[,"sample"])
for (i in 1:length(levels(samples))){
SampleID = as.character(levels(samples)[i])
print(SampleID)
TumourFile = masterfile[masterfile[,"sample"] == SampleID & masterfile[,"type"] == "Tumor", "filename"]
NormalFile = masterfile[masterfile[,"sample"] == SampleID & masterfile[,"type"] == "BloodN", "filename"]
output = cbind(SampleID, TumourFile, NormalFile)
if(ncol(output) != 3){
NormalFile = masterfile[masterfile[,"sample"] == SampleID & masterfile[,"type"] == "SolidN", "filename"]
output = cbind(SampleID, TumourFile, NormalFile)
}
if(ncol(output) == 3){
if (i != 1) final = rbind(final, output)
else final = output
}
}
filename = "C:/Users/joseph/Documents/UCEC_oncosnp_masterfile.txt"
write.table(x = final, file = filename, append = FALSE, quote = FALSE,
col.names = TRUE, row.names = FALSE, sep = "\t")
oncosnpbatch = read.table("C:/Users/joseph/Documents/UCEC_oncosnp_masterfile.txt", header = TRUE, sep = "\t",
stringsAsFactors = FALSE, quote = "", comment.char = "",row.names = NULL)
for (i in 1:30){
if(nrow(oncosnpbatch) < 18*i) small = nrow(oncosnpbatch) else small = 18*i
output = oncosnpbatch[c(seq((i-1)*18 + 1, small)),]
filename = paste("C:/Users/joseph/Documents/UCEC_oncosnp_masterfile", i, ".txt", sep = "")
write.table(x = output, file = filename, append = FALSE, quote = FALSE,
col.names = TRUE, row.names = FALSE, sep = "\t")
}
|
f0fa3f0194eb7357ba4de125a39be870d20c2205
|
5babfd17883edfc39fc662d242f62dcbe6b4c898
|
/R/timestamp-package.R
|
7d01de5562ad8b9cd2102c2c4c1f026077310882
|
[] |
no_license
|
Dasonk/timestamp
|
6c673e5181f6aa7685f806db561ca2b45ccae874
|
d81a09e5da72a41f7dee969b810fcca16fd2fe0a
|
refs/heads/master
| 2021-01-23T02:34:30.319384
| 2017-06-02T17:16:02
| 2017-06-02T17:16:02
| 12,607,310
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 237
|
r
|
timestamp-package.R
|
#' Adds a timestamp to the current prompt.
#'
#' Adds a timestamp to the prompt. It will update anytime a top
#' level call occurs.
#'
#' @docType package
#' @name timestamp
#' @aliases timestamp timestamp-package package-timestamp
NULL
|
1c2357a1119bc8135dfe5051034c6bcbd9515b5d
|
5e7cfe48b3a86cf20bddff7383f0292a2c12514d
|
/plot2.R
|
3cea92d9172cda9b9e8b22be709cbb7b665548be
|
[] |
no_license
|
JongHyun-Evan-Park/Exploratory-Data-Analysis-CoursePJ2
|
f0720ddcd1302c08e0803adafaf29b901e768c5c
|
d1665cae676f88471775939f8c014281ef890a5d
|
refs/heads/main
| 2023-08-19T19:30:50.232168
| 2021-10-22T04:16:42
| 2021-10-22T04:16:42
| 419,962,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 804
|
r
|
plot2.R
|
#2 Total emissions from PM2.5 in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008
Url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
if(!file.exists("./data")){dir.create("./data")}
download.file(Url, destfile="./data/exdata_data_NEI_data")
FilePath <- "./data/exdata_data_NEI_data"
unzip(FilePath, exdir = "./data")
NEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
library(dplyr)
library(ggplot2)
EmBalt <- NEI[which(NEI$fips=="24510"),]
EmBaltTotal <- tapply(EmBalt$Emissions,EmBalt$year,sum)
png("plot2.png",width=480,height=480)
barplot(height=EmBaltTotal, names.arg=dimnames(EmBaltTotal)[[1]],xlab = "Year", ylab="Total Emission", main="Total PM2.5 Emissions by Year in Baltimore", col= c(1:4))
dev.off()
|
9658dae79498f6fa6d6949ab0dca66afa565b2da
|
a8c5cff02ee11e446c465d7cd6ba2591fb90a3bd
|
/LoadFunctions.R
|
f723ad2c309ba6baa2b73a755e8d4f8b99a10cce
|
[] |
no_license
|
jvduijvenbode/assignmentJonas
|
f06bf07bbe0f5bb3a1ee9bf9ccc83a8bebc3acbe
|
5de394d03f94b4e31731f0c26efbbd148a0b1bf2
|
refs/heads/master
| 2016-09-05T18:32:34.939824
| 2013-12-04T14:20:56
| 2013-12-04T14:20:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,037
|
r
|
LoadFunctions.R
|
#convert factor data with unnecessary comma's to numeric data
factortopopnum<-function(x){
x1<-unlist(strsplit(x,split=","))
x2<-as.numeric(paste(x1,collapse=""))
return(x2)
}
#select a year of the worldpopulation to use in this script
sel_year<-function(poptable,y){
#make year readable for dataframe
years<-paste0("X",as.character(y))
#select the data from the selected year
outtable<-(cbind(as.data.frame(poptable$ISO.country.code),poptable$Country))
for (year in years){
#convert the selected year factor values to numerical data
convertedyear<-mapply(as.character(poptable[,year]),FUN=factortopopnum)
outtable<-cbind(outtable,convertedyear)
}
names(outtable)<-c("ISO2","country",paste0("population",as.character(years)))
#remove countries with no country code
outtable<-outtable[outtable$ISO2!="",]
return(outtable)
}
#convert Inf data created by dividing by zero (missing data) to NA
Inf2NA <- function(x){
for (i in 1:ncol(x)){
x[,i][is.infinite(x[,i])] = NA
}
return(x)
}
|
96bf3ca18e4a8eacbd2d8261354a632f4ca37081
|
01ececa7c221357eaedf85a1c2b8414fd45302a2
|
/tests/testthat/test-05-sessionPath.R
|
58b7b350cc19d128b937886028de8d5ba45cdb47
|
[] |
no_license
|
sonejilab/cellexalvrR
|
d7e30f147d3d991e6858f50b11e90a31863e759c
|
c533136f59fa906e1c173a46cc4f2e36608c0204
|
refs/heads/master
| 2023-03-31T19:05:15.669831
| 2023-03-17T07:24:40
| 2023-03-17T07:24:40
| 133,559,720
| 4
| 2
| null | 2018-05-15T18:52:17
| 2018-05-15T18:52:15
| null |
UTF-8
|
R
| false
| false
| 3,997
|
r
|
test-05-sessionPath.R
|
context('create sessionPath')
if ( ! expect_true( rmarkdown::pandoc_available() ,label= "pandoc is installed") ){
skip ( "Pandoc needed - but missing here")
}
prefix = './'
#prefix = 'tests/testthat'
#data = file.path(prefix, 'data/cellexalObj.RData')
#cellexalObj = loadObject( data )
cellexalObj = reset(cellexalObj)
datadir = file.path( prefix, 'data', 'output','sessionPath')
if ( file.exists( datadir) ) {
unlink( datadir, recursive=TRUE )
}
dir.create( datadir )
datadir <- normalizePath(datadir)
cellexalObj@outpath = datadir ## to not mess up the package
pidfile = file.path(cellexalObj@outpath, 'mainServer.pid')
if ( file.exists(pidfile)){
unlink( pidfile )
}
cellexalObj = sessionPath( cellexalObj )
#"2020_09_30_09_17_08"
seped = as.numeric(unlist(stringr::str_split (cellexalObj@usedObj$sessionName,"_")))
expect_true( length(seped) == 6)
expect_true( all( is.numeric(seped)))
expect_true( file.exists(cellexalObj@usedObj$sessionPath ),
label="session path created")
for ( f in c('png', 'tables') ) {
expect_true( file.exists(file.path(cellexalObj@usedObj$sessionPath,f) ), label=paste("session sub-path",f) )
}
defaultW <- getOption("warn")
options(warn = -1)
Sys.sleep(1) ## to make the timestamp different.
## this should not be overwritable without a renderReport!
old= cellexalObj@usedObj$sessionName
cellexalObj = sessionPath( cellexalObj, 'somethingNew' )
expect_true( cellexalObj@usedObj$sessionName == old, label="session name is not changable in session")
old= cellexalObj@usedObj$sessionName
cellexalObj = sessionPath( cellexalObj, 'somethingNew' )
expect_true( cellexalObj@usedObj$sessionName == old, label="session name is really not changable in session")
options(warn = defaultW)
context('create sessionPath - simulated server')
cellexalObj@usedObj$sessionPath = cellexalObj@usedObj$sessionRmdFiles = cellexalObj@usedObj$sessionName = NULL
cat( Sys.getpid() , file = pidfile )
cellexalObj = sessionPath( cellexalObj, old )
expect_true(file.exists( file.path(cellexalObj@outpath, 'mainServer.sessionName')), label='file mainServer.sessionName')
cellexalObj@usedObj$sessionPath = cellexalObj@usedObj$sessionRmdFiles = cellexalObj@usedObj$sessionName = NULL
cellexalObj = sessionPath( cellexalObj, 'something' )
expect_true(cellexalObj@usedObj$sessionName == old, label=paste("session name is read from file old =",old, "== new =", cellexalObj@usedObj$sessionName," ?") )
## so if we start from scratch here and reset the obejct.
## I still want it to have the same session name here!
cellexalObj = reset(cellexalObj)
expect_true( ! file.exists(file.path(cellexalObj@outpath, 'mainServer.sessionName')),
label="reset removes sessionName file" )
cellexalObj = sessionPath( cellexalObj )
expect_true( cellexalObj@usedObj$sessionName != old,
label=paste("is not read from sesssionName file",
cellexalObj@usedObj$sessionName," != ",old) )
cellexalObj = reset(cellexalObj)
# writeLines( "shoulNotBeRead" , file.path(cellexalObj@outpath, 'mainServer.sessionName') )
# expect_true( cellexalObj@usedObj$sessionName != "shoulNotBeRead",
# label="sessionName file is ignored without pid file")
unlink( pidfile )
unlink( file.path(cellexalObj@outpath, 'mainServer.sessionName') )
cellexalObj = sessionPath( cellexalObj, 'newSession' )
expect_true( cellexalObj@usedObj$sessionName == 'newSession', label="without server session the session can be reset.")
#expect_true( ! file.exists(file.path(cellexalObj@outpath, 'mainServer.sessionName')),
# label="sessionName is not create if not in server mode" )
cellexalObj= renderReport(cellexalObj)
expect_true( file.exists( file.path(cellexalObj@outpath, 'session-log-for-session-newsession.html')), label="final report is created")
expect_true( ! file.exists(file.path(cellexalObj@outpath, 'mainServer.sessionName')), label="renderReport removes sessionName file" )
|
53c0ab6e3a3d7abb7f46ab3d83d2eae01fe3407f
|
54dfd12ed56937495b2051cd0ee5f89db09477e4
|
/analysis/Experiment_1/data_preprocessing.R
|
db7e44f16316558aad82796b150d88d2c66a76d5
|
[
"MIT"
] |
permissive
|
qed-lab/Persona
|
3fab5214360975db59fa5613aca3324e14dfd78d
|
c2bb38633c3d1e48b1ad6d8ba53920c5ef05739a
|
refs/heads/master
| 2021-10-19T10:31:03.965931
| 2019-02-20T00:04:59
| 2019-02-20T00:04:59
| 78,043,670
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,328
|
r
|
data_preprocessing.R
|
# ================================================
# data_preprocessing.R
#
# This code loads and aggregates plan recognition configuration data.
#
# Assumptions:
# 0. All data is within the folder "~/Developer/Persona/analysis/Experiment #1"
# 1. The folder needs to contain only numbers for it to be considered.
# 2. All such folders in the experiment directory contain the same amount of files, using a consistent naming scheme.
# ================================================
# Raw Data Import
# Load all the folders in the Experiment Directory.
experiment_directory <- "~/Developer/Persona/analysis/Experiment #1"
folders <- list.files(experiment_directory, pattern="[0-9]+[.]?[0-9]*", all.files = FALSE, full.names = FALSE)
# For each folder (i.e. a player's data folder)
for(folder in folders)
{
# experiment_directory + folder gives the directory containing CSV files for each player.
data_directory <- paste(experiment_directory, folder, sep = "/")
csv_files <- list.files(data_directory, pattern = NULL, all.files = FALSE, full.names = FALSE)
# For each csv,
for(csv in csv_files)
{
# Variable name = folder+csv, but without the extension ".csv":
variable_name <- paste(folder, csv, sep = "")
extension_start_index <- nchar(variable_name) - 4
variable_name <- substr(variable_name, 0, extension_start_index)
# assign the corresponding CSV file to the variable name constructed above
csv_file_to_read <- paste(data_directory, csv, sep = "/")
assign(variable_name, read.csv(csv_file_to_read))
}
# print(data_directory)
# `-5811686_baseline` <- read.csv("~/Developer/Persona/analysis/Experiment #1/-5811686/_baseline.csv")
}
# ================================================
# Data Synthesis
configurations <- vector(mode="character", length = length(csv_files))
i <- 1 # vectors are indexed by 1
for(csv in csv_files)
{
# configuration name = csv without the front underscore and without the extension ".csv"
configuration_name <- csv
extension_start_index <- nchar(configuration_name) - 4
configuration_name <- substr(configuration_name, 2, extension_start_index)
# store the configuration name
configurations[i] <- (configuration_name)
i <- i + 1
# compile all data for every player under the given configuration
for(folder in folders)
{
# variable name = folder + csv, but without the extension ".csv"
variable_name <- paste(folder, csv, sep="")
extension_start_index <- nchar(variable_name) - 4
variable_name <- substr(variable_name, 0, extension_start_index)
# if a variable with this name exists,
if(exists(variable_name))
{
# if a variable with the configuration does not exist,
if(! exists(configuration_name))
{
# get the value pointed to by "variable_name" and assign it
assign(configuration_name, get(variable_name))
}
# otherwise, bind the value pointed to by "variable_name"
else
{
tmp <- rbind(get(configuration_name), get(variable_name))
assign(configuration_name, tmp)
}
}
# write the CSV out
summary_csv_path <- paste(experiment_directory, csv, sep = "/")
write.csv(get(configuration_name), summary_csv_path)
}
}
|
9bf97c1d561f4a245a89c12ce0de5e47a31e5491
|
d8d203b1274b616b29f4dcbb4d9d08a18535954f
|
/code/descriptive-summaries_scripts/table1-surgeon-cabg_function.R
|
bcd56149a59c48d4a75d3ec5677981a6452c6fe4
|
[] |
no_license
|
arinmadenci/volume-surgeon
|
824515594dc8031208abbf37f90ab6fa6f67ee27
|
c57fdbd39b47e40e2181fd56b24af387da6d850a
|
refs/heads/master
| 2022-11-29T01:36:26.656662
| 2020-08-10T16:39:48
| 2020-08-10T16:39:48
| 254,953,882
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,388
|
r
|
table1-surgeon-cabg_function.R
|
surgeon.table1.fun <- function(dat, title, file.tex){
if(!require("pacman",character.only=T)){install.packages("pacman")}
pacman::p_load(tableone, xtable)
allVars <- c("volume", "age_mean",
"comorb_ami_mean",
"comorb_dementia_mean", "comorb_afib_mean", "comorb_ckd_mean",
"comorb_copd_mean", "comorb_chf_mean", "comorb_diabetes_mean",
"comorb_stroketia_mean",
"hospital.volume_mean", "hosp.totalbeds_mean",
"hosp.p_medicare_mean",
"surgeon.age",
"md_female")
myVars <- allVars
catVars <- c("md_female")
binaryVars <- c("md_female")
continuous <- c("volume", "age_mean",
"comorb_ami_mean",
"comorb_dementia_mean", "comorb_afib_mean", "comorb_ckd_mean",
"comorb_copd_mean", "comorb_chf_mean", "comorb_diabetes_mean",
"comorb_stroketia_mean",
"hospital.volume_mean", "hosp.totalbeds_mean",
"hosp.p_medicare_mean",
"surgeon.age")
hospital.ids <- dat %>% filter(op_npi %in% {dat %>% filter(volume != 0 & volume_prev != 0 & surgeon_period==2) %>% .$op_npi}) %>% .$orgnpinm %>% unique()
hospitals <- dat %>% filter(orgnpinm %in% hospital.ids & surgeon_period == 3) %>% # surgeon_period ==3 because hosp.volume is volume prior to 3 (i.e., in 1 and 2)
summarise(mean.hosp.volume={paste0(round(mean(hospital.volume_mean),1), " (", round(sd(hospital.volume_mean),1), ")")},
mean.hosp.beds={paste0(round(mean(hosp.totalbeds_mean),1), " (", round(sd(hosp.totalbeds_mean),1), ")")},
mean.hosp.medicare={paste0(round(mean(hosp.p_medicare_mean),1), " (", round(sd(hosp.p_medicare_mean),1), ")")}) %>%
as.character()
dat <- dat %>% group_by(op_npi) %>%
mutate(death.percent_lead1=ifelse(lead(volume)!=0,lead(death.percent),NA),
death.count_lead1=ifelse(lead(volume)!=0,lead(death.count),NA),
volume_lead1=lead(volume)) %>%
ungroup() %>%
filter(op_npi %in%
{dat %>% filter(volume != 0 & volume_prev != 0 & surgeon_period==2) %>% .$op_npi}) # FILTER: restricted to during baseline
tab1 <- CreateTableOne(vars = myVars,
data = dat %>% filter(surgeon_period<=2),
factorVars = catVars)
tab1.p <- print(tab1, #nonnormal=continuous,
quote=FALSE, test=FALSE, noSpaces=TRUE, printToggle = FALSE, contDigits=1, catDigits=0)
tab1.p[1] <- sum(dat$volume[dat$surgeon_period<=2]) # number of patients
mean.surgeon.agesex <- dat %>%
group_by(op_npi) %>% filter(row_number()==1) %>% ungroup() %>%
summarise(mean.age={paste0(round(mean(surgeon.age),1), " (", round(sd(surgeon.age),1), ")")},
num.female={paste0(round(sum(md_female),1), " (", round(100*mean(md_female),1), ")")}) %>%
as.character()
tab1.p[c(15:16)] <- as.matrix(mean.surgeon.agesex)
num.hospitals <- dat %>% filter(surgeon_period <=2) %>% {length(unique(.$orgnpinm))} # among eligible surgeons during the baseline period
total.num.hospitals <- dat %>% filter(surgeon_period <=2) %>% {length(unique(.$orgnpinm))} # during baseline
tab1.p2 <- c("",
length(unique(dat$op_npi)), # number of surgeons
tab1.p[c(2,15:16),], # surgeon characteristics
"",
num.hospitals, # number of hospitals
hospitals, # hospital characteristics
"",
tab1.p[c(1,3:11),] # patient characteristics
) %>% as.matrix()
rownames(tab1.p2) <- c("Surgeon characteristics",
" Total number of surgeons",
names(tab1.p[c(2,15:16),]),
"Hospital characteristics",
" Total number of hospitals",
names(tab1.p[12:14,]),
"Case mix characteristics",
names(tab1.p[c(1,3:11),]))
tab1.format <- t(t(as.matrix(tab1.p2)) %>% as.data.frame %>% rename(
" Total number of patients"="n",
" Mean count of CABG operations per 90 days"="volume (mean (SD))",
" Number of female surgeons"="md_female = 1 (%)",
" Mean surgeon age, years"="surgeon.age (mean (SD))",
# " Proportion of patient mortality"="death.percent (mean (SD))",
" Mean patient age, years"="age_mean (mean (SD))",
" Proportion of patients with AMI"="comorb_ami_mean (mean (SD))",
" Proportion of patients with atrial fibrillation"="comorb_afib_mean (mean (SD))",
" Proportion of patients with CKD"="comorb_ckd_mean (mean (SD))",
" Proportion of patients with COPD"="comorb_copd_mean (mean (SD))",
" Proportion of patients with CHF"="comorb_chf_mean (mean (SD))",
" Proportion of patients with dementia"="comorb_dementia_mean (mean (SD))",
" Proportion of patients with diabetes"="comorb_diabetes_mean (mean (SD))",
" Proportion of patients with stroke or TIA"="comorb_stroketia_mean (mean (SD))",
" Mean hospital annual volume"="hospital.volume_mean (mean (SD))",
" Hospital proportion of patients with Medicare"="hosp.p_medicare_mean (mean (SD))",
" Mean hospital number of beds"="hosp.totalbeds_mean (mean (SD))",
" Mean hospital volume (operations per 90-day interval)"="hospital.volume_mean (mean (SD))"
))
named1 <- rownames(tab1.format)
tags1 <- grepl("^ ", rownames(tab1.format))
rownames(tab1.format) <- c(ifelse(tags1==FALSE, named1, paste("\\hskip .5cm", named1, sep=' ')))
colnames(tab1.format) <- "Number (\\%) or mean (s.d.)"
print(xtable(tab1.format, align=c("lr"),
caption=paste0("Baseline characteristics (during the previous 6 months) of ",
length(unique(dat$op_npi))," eligible surgeons who performed CABG fo U.S. Medicare beneficiaries"),
label="table:surgeon-characteristics"),
caption.placement="top",
type="latex", sanitize.text.function = function(x){x},
tabular.environment="longtable",
file=here::here("tables","table-1","surgeon-cabg-table1.tex"), floating=FALSE
)
tab1.format
}
|
b477ed1727a46912e657b9193a325b5b82e54dab
|
94d9e1666588919eaa2ffe0dd70d999f4fc08293
|
/tests/testthat/test-fields.R
|
bb2d5d3f124d39e67388d3749445ccfd2bfaccc3
|
[] |
no_license
|
datasketch/dsvalidate
|
a980428722146bc692bb5609e0789b40b39dafb5
|
b6ead04b3ccf8e8d09ed3ecf46fd3ff7991770ed
|
refs/heads/master
| 2023-07-14T20:37:16.949736
| 2021-09-02T16:53:29
| 2021-09-02T16:53:29
| 341,588,751
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,862
|
r
|
test-fields.R
|
test_that("columns validated", {
df <- data.frame(id = c(1:20),
b = c(rep("A", 10), rep("B", 10)),
c = c(rep("A", 10), rep("B", 10)))
f <- homodatum::fringe(df)
dic <- create_dic(df, extended = TRUE)
specs <- list(hdType = list(is_any_of = c("Cat", "Num")),
unique = list(equals = TRUE),
n_distinct = list(greater_than = 10))
expected_output <- dplyr::tibble(id = names(df),
meets_requirement = c(TRUE, FALSE, FALSE),
matches_id = c(FALSE, TRUE, FALSE))
actual_output <- validate_columns(dic, specs, field_id = "b")
expect_equal(actual_output, expected_output)
})
test_that("field requirements checked", {
specs <- list(hdType = list(is_any_of = c("Cat", "Num")),
unique = list(equals = TRUE),
n_distinct = list(greater_than = 10))
field <- list(field_id = "id",
label = "ID of a node",
n_cols = list(greater_than = 0),
id_required = TRUE,
specs = specs)
df <- data.frame(id = c(1:20),
b = c(rep("A", 10), rep("B", 10)),
c = c(rep("A", 10), rep("B", 10)))
f <- homodatum::fringe(df)
dic <- create_dic(df, extended = TRUE)
expected_output <- field
expected_output$id_found <- TRUE
expected_output$id_meets_requirements <- TRUE
expected_output$validated_columns <- validate_columns(dic, specs, field_id = "id")
expected_output$diff_want_is <- 0
actual_output <- check_field_requirements(field, dic)
expect_equal(actual_output, expected_output)
})
test_that("fields validated", {
path <- system.file("test_dsvalidate", "ex02-network", "dsvalidate", package = "dsvalidate")
requirements <- requirements_load(path = path)
df <- data.frame(id = c(1:20),
b = c(rep("A", 10), rep("B", 10)),
c = c(rep("A", 10), rep("B", 10)))
f <- homodatum::fringe(df)
df1 <- data.frame(col1 = c(rep("A", 5), rep("B", 5), rep("C", 10)),
col2 = c(rep("A", 10), rep("B", 10)))
x <- list(nodes = df,
edges = df1)
table_id <- "nodes"
output_validate_fields <- validate_fields(x = x,
requirements = requirements)
checked_fields <- check_fields(requirements$table[[table_id]]$fields)
expect_equal(output_validate_fields[[table_id]]$id$met, TRUE)
expect_equal(output_validate_fields[[table_id]]$label, list(met = FALSE,
id_found = FALSE,
id_required = FALSE,
specs = checked_fields$label$specs,
req_n_cols = list(greater_than = 0),
n_columns_available = 0,
use_cols = NULL,
col_used_in_other_requirement = NULL))
expect_equal(output_validate_fields[[table_id]]$description, list(met = TRUE,
use_cols = "b"))
table_id <- "edges"
expect_equal(output_validate_fields[[table_id]]$target, list(met = TRUE,
use_cols = "col1"))
expect_equal(output_validate_fields[[table_id]]$source, list(met = TRUE,
use_cols = "col2"))
expect_null(validate_fields(x, requirements, validated_table_meta = FALSE))
expect_null(validate_fields(x, requirements, validated_table_specs = FALSE))
})
|
0f984bc8e3f896e0e3e739de4b137739f97bbb4c
|
ff2daefdd0ead3005164e2179a9b8fbb2fff6c40
|
/data-mining-in-r-torgo.R
|
f154edec37dc13ce0e30a6f3a12d607a64e89be1
|
[
"MIT"
] |
permissive
|
prasants/r-code
|
8c076832a396833f558862745a31a25740674e07
|
6c509b6e8df860f666f4869ba87ac7fcd218aaab
|
refs/heads/master
| 2020-05-08T22:53:47.959552
| 2015-12-09T06:16:43
| 2015-12-09T06:16:43
| 34,384,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,576
|
r
|
data-mining-in-r-torgo.R
|
#Data Mining with R by Luis Torgo
#Prasant Sudhakaran####
#Chapter 1####
install.packages("RMySQL")
install.packages("DMwR")
installed.packages()
library()
#To check if there are newer versions of installed packages at CRAN
old.packages()
update.packages()
#To search the r-project site
#Format : RSiteSearch('Search term')
RSiteSearch('neural networks')
#R Objects
#Assigning values
x <- 945
x
#To list the current objects in memory
ls()
#Or alternatively:
objects()
#Vectors
#Vectors are used to store a set of elements of the same atomic data type
x <- 45.3 #Example of a single element vector
length(x) #To check the length of the vector
#Using the c() function to create vectors
v <- c(4,7, 23.5, 76.2, 90)
v
length(v)
mode(v) #Get or set the type or storage mode of an object.
#All elements of a vector must belong to the same mode, else R will coerce the type
#For example:
v <- c(4,7, 23.5, 76.2, 90, "rrt")
v
mode(v)
u <- c(4, 6, NA, 2) #All vectors may contain NA values
u
k <- c(T, F, NA, TRUE)
k
#Accessing a specific element of a vector
v[2] #Calling the second element of the vector v
#Changing the value of a particular vector
v[1] <- "hello"
v
#Creating an empty vector
x <- vector()
x
length(x)
#Add a value to a specific index, to alter the length of the vector
x[3] <- 45
x
#Assignment Operations are destructive (or can be destructive)
v <- c(45, 243, 78, 343, 445, 44, 56, 77)
v
v <- c(v[5], v[7])
v # v now consists of 2 elements
#Vectorisation
v <- c(4,7, 23.5, 76.2, 80)
x <- sqrt(v)
x
#Vector Arithmetic
v1 <- c(4, 6, 87)
v2 <- c(34, 32.4, 12)
v1 + v2
#If the vector isn't of sufficient length, R will use a recycling rule by repeating the
#shorter vector until it fills in the size of the larger vector
v1 <- c(4, 6, 8, 24)
v2 <- c(10, 2)
v1+v2
#If the lengths are not multiples, then a warning will be issued:
v1 <- c(4, 6, 8, 24)
v2 <- c(10, 2, 4)
v1+v2
#Single numbers are represented in R as vectors of length 1
v1 <- c(4,6,8,24)
2*v1 #The vector 2 is being multiplied with each element of v1
#Factors
#Easy and compact form of handling categorical (nominal) data
#Factors have levels that are the possible values they can take.
g <- c("f", "m", "m", "m", "f", "m", "f", "m", "f", "f")
g
#Transform vector g into a factor
g <- factor(g)
g
#Defining factor levels even when data consists of only one factor (at the moment)
other.g <- factor(c("m", "m", "m", "m", "m"))
other.g
other.g <- factor(c("m", "m", "m", "m", "m"), levels= c("f", "m"))
other.g #Now has two levels
#Counting the occurrence of each possible value, using the table function
table(g)
table(other.g)
#The table function can also be used for cross tabulation of several factors
a <- factor(c("adult", "adult", "juvenile", "juvenile", "adult", "adult",
"adult", "juvenile", "adult", "juvenile"))
table(a)
table(a,g)
#Calculate marginal and relative frequencies of contingency tables
t <- table(a,g)
margin.table(t,1) # 1 represents the first dimension of the table
margin.table(t,2) # 2 represents the second dimension of the table
#For relative frequencies
prop.table(t,1)
prop.table(t,2)*100 #Multiplied by 100 to get percentage figures instead of decimals
#Generating Sequences
#To create a vector containing integers between 1 and 100
c <- 1:100
c
#Decreasing sequences
5:0
#To generate sequences of real numbers, use the function seq()
seq(-4, 1, 0.5) #A sequene of real numbers between -4 and 1, with increments of 0.5
#More example of the use of seq()
seq(from=1, to=5, length=4)
#Repeating a sequence of characters
rep(5,10) #Repeats the number 5, ten times
rep("hi", 3) #Repeats the string 'hi' three times
rep(1:2, 3) #Repeats the sequence 1:2 three times
rep(1:2, each =3) #Repeats the numbers 1 and 2, each of the three times
#The gl() function:
#Used to generate sequences involving factors
#Syntax of the function is gl(k,n), where k is the number of levels of the factor,
#n is the number of repetitions of each level.
gl(3,5)
gl(2,5, labels=c("female", "male"))
#Generating random numbers
#Ten randomly generated numbers from a normal distribution with zero mean
#and unit standard deviation
a <- rnorm(10)
plot (a)
#Randomly genarated numbers with a mean of 10 and SD of 3
a <- rnorm(10, mean = 10, sd = 3)
a
plot(a)
#Five numbers drawn randomly from a student t distribution with 10 degrees of freedom
rt(5, df = 10)
#Sub-setting
#Logical index vectors: Extract elements corresponding to true values
x <- c(0,-3, 4, -1, 45, 90, -5)
x>0 #Only the values greater than 0 will return TRUE
x[x>0] #Give me the values of x, for which the following logical expression is true
#More complex logical operators
x[x<=-2 | x>5]
x[x>40 & x<100]
#Extracting several elements from a vector
x[c(4,6)]
x[1:3]
y <- c(1,4)
x[y]
#Use a vector with negative indices to indicate which elements are to be excluded from selection
x[-1]
x[-(1:3)]
pH <- c(4.5, 7, 7.3, 8.2, 6.3)
names(pH) <- c("area 1", "area 2", "mud", "dam", "middle")
pH
#If you already know the names of the vectors:
ph <- c(area1=4.5, area2=7, mud=7.3, dam=8.2, middle=6.3)
ph
ph["mud"] #Indexing of the name
ph[c("area1", "mud", "middle")]
#Empty Index
#Represents absense of a restriction on the selection process
ph[] <- 0 #Assigns 0 as the value for all vectors in "ph"
ph[]
#Matrices and Arrays
#Matrices are a special case of arrays with two single dimensions
m <- c(45, 23, 66, 77, 33, 44, 56, 12, 78, 23)
m
dim(m) <- c(2,5) #Specifying the dimensions of the matrix - 2 rows, 5 columns
m
#Alternate way to create the same matrix
m <- matrix(c(45, 23, 66, 77, 33, 44, 56, 12, 78, 23),2,5)
m #Matrix, filled by column
m <- matrix(c(45, 23, 66, 77, 33, 44, 56, 12, 78, 23), 2,
5, byrow=T)
m #Matrix, by row
m[2,3] #2nd row, 3rd column
m[-2,1]
m[1,] #Obtain the entire first row
m[,4] #Obtain the entire fourth column
m[,90] #Will give an 'Out of Bounds' error if you specify column that doesn't exist
#Using the cbind/rbind function to join two or more matrices by columns or rows respectively
m1 <- matrix(c(45, 23, 66, 77, 33, 44, 56, 12, 78, 23), 2, 5)
m1
cbind(c(4,76), m1[,4])
m2 <- matrix(rep(10,20), 4,5) #Repeat the number '10', twenty times, arrange into a 4X5 matrix
m2
m3 <- rbind(m[1,],m2[3,]) #combine the 1st row of m1, and 3rd row of m2
m3
#Column and Row Name
results <- matrix(c(10,30,40,50,43,56,21,30),2,4,
byrow=T)
colnames(results) <- c("1qrt", "2qrt", "3qrt", "4qrt")
rownames(results) <- c("store1", "store2")
results
results["store1",]
results["store2", c("1qrt", "4qrt")]
#Arrays
#Arrays are extesnions of matrices to more than 2 dimensions
#Initiated by using the array() function
a <- array(1:24, dim = c(4, 3, 2))
a
a[1,3,2] #First row, third column, 2nd matrix
a[1,,2] #First row, 2nd matrix
a[4,3,] #Selects the elements at 4th row, 3rd column from both matrices
a[c(2,3),,-2]
m <- matrix(c(45, 23, 66, 77, 33, 44, 56, 12, 78, 23),2,5)
m
m <- matrix(c(45, 23, 66, 77, 33, 44, 56, 12, 78, 23), 2, 5, byrow=T) #Same as the previous matrix, but arranged by row
m
m*3
m1 <- matrix(c(45, 23, 66, 77, 33, 44), 2,3)
m1
m2 <- matrix(c(12,65,32,7,4,78),2,3)
m2
m1+m2
#Lists
#List components need not be of the same type, mode or length
my.list <- list(stud.id=34453,stud.name="John",
stud.marks=c(14.3,12,15,19))
my.list
my.list[1]
my.list[[1]] #Compare the difference between the two notations
mode(my.list[1])
mode(my.list[[1]])
#Alternate way of extracting values from a list
my.list$stud.id
names(my.list)
names(my.list) <- c("id", "name", "marks")
my.list
#Adding extra components
my.list$parents.names <- c("Anna", "Mahesh")
my.list
length(my.list)
#Removing components
my.list <- my.list[-3]
my.list
other <- list(age=19, sex = "male")
lst <- c(my.list, other) #Combining two lists
lst
#Using unlist() function to unflatten all data in a list
unlist(my.list) #Coerces everything into a character string
#Data Frames
#In R, Dataframes are a special class of lists
#Each row of the dataframe can be seen as an observation, described by a set of variables
my.dataset <- data.frame(site=c("A", "B", "A", "A", "B"),
season=c("Winter", "Summer", "Summer", "Spring", "Fall"),
pH = c(7.4, 6.3, 8.6, 7.2, 8.9))
my.dataset
my.dataset[3,2] #Accessing elements of a data frame
my.dataset$pH
my.dataset[my.dataset$pH >8.2,] #Querying a specific column of the dataframe
my.dataset[my.dataset$site =="A", "pH"] #Extracting pH values of sites with value "A"
my.dataset[my.dataset$season == "Summer", c("site", "pH")]
#The attach() function simplifies these queries by allowing to access the columns of a
#dataframe directly without having to use the name of the respective data frame
attach(my.dataset)
my.dataset[site=="B",]
season
#Inverse of attach() is detach()
detach(my.dataset)
season #Give the following error: "Error: object 'season' not found"
#attaching again
attach(my.dataset)
season
#Use the subset() function when only querying the dataframe
subset(my.dataset, pH>8)
subset(my.dataset, season=="Summer", season:pH)
#Adding 1 to all observations in pH column for the season Summer
my.dataset[my.dataset$season=="Summer", "pH"] <- my.dataset[my.dataset$season =="Summer",'pH']+1
my.dataset
#Add a new column to the dataframe, i.e. a new set of observations for each row
my.dataset$NO3 <- c(234.5, 256.6, 654.1, 356.7, 776.4)
my.dataset
nrow(my.dataset)
ncol(my.dataset)
my.dataset <- edit(my.dataset)
names(my.dataset)[4] <- "PO4" #Changing the name of the 4th column to PO4
my.dataset
#Creating New Functions
|
ed527989b7da3002ff7b31587455d3a93e7a20a9
|
2f395e94c4b57d0832efd43b4555392a1377778b
|
/plots_umap_types.R
|
e844ce68982ada81bddbaa4e55001fa13ff2a0eb
|
[] |
no_license
|
jrboyd/scRNA_DK
|
2e30a27cae387949ced5c2d35ff4ad34ea1fa15d
|
e94578f4afe91c4e970b1359662102725f023bbf
|
refs/heads/master
| 2021-03-16T01:31:31.925322
| 2020-06-26T19:31:55
| 2020-06-26T19:31:55
| 246,893,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,067
|
r
|
plots_umap_types.R
|
library(Seurat)
source("../SF_AutoImmune_ssv/functions_setup.R")
dksc = readRDS("datasets/DKSC.combined.Rds")
meta_dt = get_meta_dt(dksc)
dksc.integrated = readRDS("datasets/DKSC.integrated.Rds")
meta_dt.integrated = get_meta_dt(dksc.integrated)
mt_genes = rownames(dksc)[grepl("mt-", rownames(dksc))]
## combined umaps
p1 <- DimPlot(dksc, reduction = "umap", group.by = "sampleId")
p2 <- DimPlot(dksc, reduction = "umap", group.by = "treatment",
repel = TRUE)
p3 <- DimPlot(dksc, reduction = "umap", group.by = "rep",
repel = TRUE)
p4 = DimPlot(dksc, reduction = "umap", group.by = "Phase",
repel = TRUE)
p5 = DimPlot(dksc, reduction = "umap", group.by = "seurat_clusters",
repel = TRUE) +
labs(color = "Cluster")
p6 = ggplot(meta_dt, aes(x = UMAP_1, y = UMAP_2, color = log10(nCount_RNA))) +
geom_point(size = .4) +
scale_color_viridis_c() + theme(legend.title = element_text(size = 10, angle = 90)) +
guides(color = guide_colorbar(title.position = "left"))
mt_rna_dt = get_rna_dt(dksc, mt_genes)[, .(mt_average = mean(expression)), .(id)]
mt_rna_dt = merge(mt_rna_dt, meta_dt[, .(id, source, sampleId, treatment, rep, UMAP_1, UMAP_2, seurat_clusters)], by = "id")
p7 = ggplot(mt_rna_dt, aes(x = UMAP_1, y = UMAP_2, color = mt_average)) +
geom_point(size = .4) +
scale_color_viridis_c(option = "B")+
labs(color = "mitochondrial average") +
theme(legend.title = element_text(size = 10, angle = 90)) +
guides(color = guide_colorbar(title.position = "left"))
pg = cowplot::plot_grid(p1 + labs(title = "Unanchored", color = "sampleId"),
p2 + labs(color = "treatment"),
p3 + labs(color = "rep"),
p4 + labs(color = "Cell cycle"),
p5,
p6,
p7,
ncol = 4)
pg
ggsave("combined_umap.pdf", pg, width = 7*2, height = 6.1)
## integrated umaps
p1 <- DimPlot(dksc.integrated, reduction = "umap", group.by = "sampleId")
p2 <- DimPlot(dksc.integrated, reduction = "umap", group.by = "treatment",
repel = TRUE)
p3 <- DimPlot(dksc.integrated, reduction = "umap", group.by = "rep",
repel = TRUE)
p4 = DimPlot(dksc.integrated, reduction = "umap", group.by = "Phase",
repel = TRUE)
p5 = DimPlot(dksc.integrated, reduction = "umap", group.by = "seurat_clusters",
repel = TRUE) +
labs(color = "Cluster")
p6 = ggplot(meta_dt.integrated, aes(x = UMAP_1, y = UMAP_2, color = log10(nCount_RNA))) +
geom_point(size = .4) +
scale_color_viridis_c() + theme(legend.title = element_text(size = 10, angle = 90)) +
guides(color = guide_colorbar(title.position = "left"))
mt_rna_dt.integrated = get_rna_dt(dksc.integrated, mt_genes)[, .(mt_average = mean(expression)), .(id)]
mt_rna_dt.integrated = merge(mt_rna_dt.integrated, meta_dt.integrated[, .(id, source, sampleId, treatment, rep, UMAP_1, UMAP_2, seurat_clusters)], by = "id")
p7 = ggplot(mt_rna_dt.integrated, aes(x = UMAP_1, y = UMAP_2, color = mt_average)) +
geom_point(size = .4) +
scale_color_viridis_c(option = "B") +
labs(color = "mitochondrial average") +
theme(legend.title = element_text(size = 10, angle = 90)) +
guides(color = guide_colorbar(title.position = "left"))
pg = cowplot::plot_grid(p1 + labs(title = "Anchored", color = "sampleId"),
p2 + labs(color = "treatment"),
p3 + labs(color = "rep"),
p4 + labs(color = "Cell cycle"),
p5,
p6,
p7,
ncol = 4)
gc()
pg
ggsave("anchored_umap.pdf", pg, width = 7*2, height = 6.1)
## composition
p1 = DimPlot(dksc, reduction = "umap", group.by = "seurat_clusters",
repel = TRUE, label = TRUE) + NoLegend() +
labs(title = "Unanchored")
p2 = DimPlot(dksc.integrated, reduction = "umap", group.by = "seurat_clusters",
repel = TRUE, label = TRUE) + NoLegend() +
labs(title = "Anchored")
cnt_dt = meta_dt[, .N,.(sampleId, treatment, seurat_clusters)]
cnt_dt[, fraction := N / sum(N), .(sampleId)]
sample_counts = dcast(cnt_dt, seurat_clusters~treatment+sampleId, value.var = "N")
fwrite(sample_counts, file = "combined_cluster_counts.csv")
total_dt = cnt_dt[, .(total = sum(N)), .(seurat_clusters)]
setkey(total_dt, seurat_clusters)
lev = levels(total_dt$seurat_clusters)
levels(cnt_dt$seurat_clusters) = paste0(lev, " (", total_dt[.(lev)]$total, ")")
p1a = ggplot(cnt_dt, aes(x = sampleId, y = fraction, fill = treatment)) +
geom_bar(stat = "identity") +
facet_wrap(~seurat_clusters) +
theme(legend.position = "bottom")
p1b = ggplot(cnt_dt, aes(x = sampleId, y = N, fill = treatment)) +
geom_bar(stat = "identity") +
facet_wrap(~seurat_clusters) +
theme(legend.position = "bottom")
cnt_dt.integrated = meta_dt.integrated[, .N,.(sampleId, treatment, seurat_clusters)]
cnt_dt.integrated[, fraction := N / sum(N), .(sampleId)]
sample_counts.integrated = dcast(cnt_dt.integrated, seurat_clusters~treatment+sampleId, value.var = "N")
fwrite(sample_counts.integrated, file = "anchored_cluster_counts.csv")
total_dt = cnt_dt.integrated[, .(total = sum(N)), .(seurat_clusters)]
setkey(total_dt, seurat_clusters)
lev = levels(total_dt$seurat_clusters)
levels(cnt_dt.integrated$seurat_clusters) = paste0(lev, " (", total_dt[.(lev)]$total, ")")
p2a = ggplot(cnt_dt.integrated, aes(x = sampleId, y = fraction, fill = treatment)) +
geom_bar(stat = "identity") +
facet_wrap(~seurat_clusters) +
theme(legend.position = "bottom")
p2b = ggplot(cnt_dt.integrated, aes(x = sampleId, y = N, fill = treatment)) +
geom_bar(stat = "identity") +
facet_wrap(~seurat_clusters) +
theme(legend.position = "bottom")
pg = cowplot::plot_grid(p1, p2, p1b, p2b, p1a, p2a, rel_heights = c(1, 1.3, 1.3), ncol = 2)
ggsave("cluster_composition.pdf", pg, width = 5.9, height = 10)
|
d031d469406eb891396bfeae8e604e819cbbc1db
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pheno2geno/tests/test_analysis.R
|
aa04568906abc98f8c0a7870e2b07edcb5dd3f3f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
test_analysis.R
|
require(pheno2geno)
#setwd("C:/Users/Konrad/Documents/Github/phenotypes2genotypes/tests")
children <- read.csv(file="offspring_phenotypes.csv",header=TRUE,row.names=1)
parents <- read.csv(file="parental_phenotypes.csv",header=TRUE,row.names=1)
genotypes <- read.csv(file="genotypes.csv",header=TRUE,row.names=1)
map <- read.csv(file="map.csv",header=TRUE,row.names=1)
#with parental data
population <- create.population(children,parents,c(0,0,0,0,0,0,1,1,1,1,1,1),genotypes,mapsPhysical=map,verbose=TRUE)
population <- find.diff.expressed(population)
population <- generate.biomarkers(population, threshold=0.001, margin=5, pProb=0.8, verbose=T, debug=2)
population <- scan.qtls(population,verbose=T,step=4, map="physical", epistasis = "ignore")
####THREE WAYS TO ASSIGN CHROMOSOMES
set.seed(101010)
cross_newmap <- cross.denovo(population,n.chr=16,map="physical",comparisonMethod=sumMajorityCorrelation,reOrder=TRUE,use.orderMarkers=FALSE,verbose=TRUE,debugMode=2)
cross_saturated <- cross.saturate(population,map="physical",verbose=TRUE,debugMode=2)
|
c0bb5125f314c3de2919653521719e47dcba010f
|
5b0bc403547001551cb6148fc111ec66b3a3b076
|
/scripts/plot_PCA.R
|
e1ff288ebdc2105b2c0000049edc195bec0cff88
|
[] |
no_license
|
erdavenport/japaneseEel
|
a8fef4c91433df0a9fde421ae06d1c2bc1d83e4b
|
4d8c845de4626cfda8116c931f16018c0512c552
|
refs/heads/master
| 2020-04-10T04:26:33.478362
| 2019-03-05T15:16:04
| 2019-03-05T15:16:04
| 160,798,270
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,003
|
r
|
plot_PCA.R
|
#!/usr/bin/env Rscript
######
suppressMessages(library("docopt"))
"
Usage:
plot_PCA.R --full_plink_in=<full_plink_in> --part_plink_in=<part_plink_in> --outpath=<outpath>
Description: This script will generate a PCA plot of the eel samples
Options:
--full_plink_in=<full_plink_in> prefix on plink eigenval and eigenvec files
--part_plink_in=<part_plink_in> prefix on plink eigenval and eigenvec files (outgroup excluded from calculations)
--outpath=<outpath> path of where to save output
" -> doc
######
###### PARAMETERS ##########
# Set the parameters:
today <- Sys.Date() # Set the date that will go on the end of the files generated by this script
today <- format(today, format="%m%d%y")
#############################
##### Load arguments:
opts <- docopt(doc)
full_plink_in <- opts$full_plink_in
part_plink_in <- opts$part_plink_in
outpath <- opts$outpath
# full_plink_in <- "../data/STACKS_processed/7_depth_optimization/m3/rxstacks_corrected/coverage_filtered/batch_2.plink.for.admixture.pca"
# part_plink_in <- "../data/STACKS_processed/7_depth_optimization/m3/rxstacks_corrected/coverage_filtered/batch_2.plink.for.admixture.pca.no.hainan"
# outpath <- "../results/9_PCA/m3/"
##### Load common functions:
source("scripts/common_functions_for_eelseq_analyses.R")
##### Read in data:
print("reading in data")
evals <- read.table(paste0(full_plink_in, ".eigenval"), sep="\n")
evecs <- read.table(paste0(full_plink_in, ".eigenvec"), sep=" ", header=FALSE)
evalsNoOut <- read.table(paste0(part_plink_in, ".eigenval"), sep="\n")
evecsNoOut <- read.table(paste0(part_plink_in, ".eigenvec"), sep=" ", header=FALSE)
##### Generate output folder if it isn't there already:
if (!file.exists(outpath)) {
print(paste0("creating ",outpath," in filesystem"))
dir.create(file.path(outpath))
}
##### Plot PCA (all samples):
# Calculate percent variance explained by each PC:
PVE <- round(evals$V1/sum(evals$V1), 2)
# Stuff for legend:
forLegend <- unique(pops$pop_name)
print("saving PCA plot")
# Black and White:
# pdf(paste(outpath, "PCA.all.samples.",today,"ERD.pdf", sep=""), width=6, height=6)
# plot(evecs[,3], evecs[,4], pch=sapply(evecs$V1, pop.pch), cex=1.5, xlab=paste("PC 1 -", (PVE[1]*100),"% of variance", sep=" "),ylab=paste("PC 2 -",(PVE[2]*100),"% of variance",sep=" "))
# legend("bottomleft", cex=0.75, pch=sapply(forLegend, pop.pch), legend=forLegend, bty="n")
# hi <- dev.off()
# Color:
pdf(paste(outpath, "PCA.all.samples.",today,"ERD.pdf", sep=""), width=6, height=6)
plot(evecs[,3], evecs[,4], pch=16, col = sapply(evecs$V1, pop.cols), cex=1.5, xlab=paste("PC 1 -", (PVE[1]*100),"% of variance", sep=" "),ylab=paste("PC 2 -",(PVE[2]*100),"% of variance",sep=" "))
legend("bottomleft", cex=0.75, pch=16, col = sapply(forLegend, pop.cols), legend=forLegend, bty="n")
hi <- dev.off()
##### Plot PCA (outgroup excluded):
# Calculate percent variance explained by each PC:
PVENoOut <- round(evalsNoOut$V1/sum(evalsNoOut$V1), 2)
# Stuff for legend:
forLegend <- forLegend[-which(forLegend == "Hainan province")]
print("saving PCA plot")
# Black and White:
# pdf(paste(outpath, "PCA.no.outgroup.",today,"ERD.pdf", sep=""), width=6, height=6)
# plot(evecsNoOut[,3], evecsNoOut[,4], pch=sapply(evecsNoOut$V1, pop.pch), col=rgb(0,0,0, alpha = 0.5), cex=1.5, xlab=paste("PC 1 -", (PVENoOut[1]*100),"% of variance", sep=" "),ylab=paste("PC 2 -",(PVENoOut[2]*100),"% of variance",sep=" "))
# legend("topleft", cex=0.75, pch=sapply(forLegend, pop.pch), legend=forLegend, bty="n")
# hi <- dev.off()
# Color:
pdf(paste(outpath, "PCA.no.outgroup.",today,"ERD.pdf", sep=""), width=6, height=6)
plot(evecsNoOut[,3], evecsNoOut[,4], pch=16, col = sapply(evecsNoOut$V1, pop.cols), cex=1.5, xlab=paste("PC 1 -", (PVENoOut[1]*100),"% of variance", sep=" "),ylab=paste("PC 2 -",(PVENoOut[2]*100),"% of variance",sep=" "))
legend("topright", cex=0.75, pch=16, col = sapply(forLegend, pop.cols), legend=forLegend, bty="n")
hi <- dev.off()
print("DONE!")
|
2ccaa29f0e73e8c5a2ad9cc110c30a34d562bea3
|
1d930b9fad37edb5550b8109359b569ca999d53d
|
/R/filter-n-obs.R
|
14ed4c832afded202360af1ca45219350134ad4b
|
[
"MIT"
] |
permissive
|
cderv/brolgar
|
1ef174dd50bdca8fb886c56114a1ad978e4c3171
|
f2fee3c636d84d21a2bf2061fc1a4e1d21d5136c
|
refs/heads/master
| 2020-06-19T18:18:09.270305
| 2019-07-14T08:09:20
| 2019-07-14T08:09:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,085
|
r
|
filter-n-obs.R
|
#' Filter by the number of observations for a `key`.
#'
#' When exploring longitudinal data it can be useful to filter by the number of
#' observations in a compact way. `filter_n_obs` allows for the user to
#' filter by the number of observations for each `key`.
#'
#' @param .data data.frame
#' @param filter A description of how you want to filter the number of
#' observations for each `key`, in terms of `n_obs`. See examples for more
#' detail.
#'
#' @return data.frame filtered by the number of observations, with an
#' additional column `n_obs`, which contains the number of observations for
#' each `key`.
#' @export
#' @name filter_n_obs
#'
#' @examples
#' wages_ts %>% filter_n_obs(n_obs > 10)
#' wages_ts %>% filter_n_obs(n_obs == 2)
#'
filter_n_obs <- function(.data, filter, ...){
test_if_tsibble(.data)
test_if_null(.data)
UseMethod("filter_n_obs")
}
#' @rdname filter_n_obs
#' @export
filter_n_obs.tbl_ts <- function(.data, filter, ...){
quo_filter <- rlang::enquos(filter)
add_n_key_obs(.data) %>%
dplyr::filter(!!!quo_filter)
}
|
5d673f5e371fbad1304aea8674233d9add8d8df5
|
7ad193bcb130588dbf788564a22574e49734c8bf
|
/man/MetaboSignal_NetworkCytoscape.Rd
|
2e98ea249579e3f856a6e4e56acba8817809e736
|
[] |
no_license
|
Rafael-Ayala/MetaboSignal
|
9f03972efbf2ee691ddd1263cfa2b3f2d9496ade
|
83c25f550f92805c28e9384c844a204a2d0a3f8c
|
refs/heads/master
| 2021-06-05T09:38:11.881994
| 2016-10-17T15:17:57
| 2016-10-17T15:17:57
| 281,378,956
| 1
| 0
| null | 2020-07-21T11:28:49
| 2020-07-21T11:28:48
| null |
UTF-8
|
R
| false
| false
| 7,960
|
rd
|
MetaboSignal_NetworkCytoscape.Rd
|
\name{MetaboSignal_NetworkCytoscape}
\alias{MetaboSignal_NetworkCytoscape}
\title{Build shortest-path subnetwork}
\description{
This function allows calculating the shortest paths from a set of genes to a set
of metabolites, and representing them as a network-table (i.e. two-column matrix).
By default, the function exports a network file ("CytoscapeNetwork.txt") and two
attribute files ("CytoscapeAttributesType.txt", "CytoscapeAttributesTarget.txt"),
which can be imported into cytoscape to visualize the network. The first attribute
file allows customizing the nodes of the network based on the molecular entity
they represent: metabolic-genes, signaling-genes, or metabolites. The second
attribute file allows discriminating the source_genes and the target_metabolites
("target") from any other node ("untarget") of the network.
The network-table generated with this function can be further customized based
on different criteria. For instance, undesired nodes can be removed or replaced
using the functions "MS_RemoveNode( )" or "MS_ReplaceNode( )" respectively. The
final version of the network-table can be used to generate new cytoscape files
using the function "MS_ToCytoscape( )".
}
\usage{
MetaboSignal_NetworkCytoscape(network_table, organism_code, organism_name, source_genes,
target_metabolites, mode = "SP", type = "first",
distance_th = Inf, collapse_genes = FALSE, names = TRUE,
export_cytoscape = TRUE, file_name = "Cytoscape")
}
\arguments{
\item{network_table}{two-column matrix where each row represents an edge
between two nodes. See function "MetaboSignal_matrix ( )".
}
\item{organism_code}{character vector containing the KEGG code for the organism
of interest. For example the KEGG code for the rat is "rno". See the function
"MS_FindKEGG( )".
}
\item{organism_name}{character vector containing the common name of the organism of
interest (e.g. "rat", "mouse", "human", "zebrafish") or taxonomy id. For more
details, check: http://docs.mygene.info/en/latest/doc/data.html#species. This
argument is only required when source_genes are gene symbols.
}
\item{source_genes}{character vector containing the genes from which the
shortest paths will be calculated. All input genes need to have the same ID
format. Possible ID formats are: entrez IDs, official gene symbols, or gene
nodes of the network (i.e. KEGG orthology IDs or KEGG gene IDs). The latter
option allows reducing the time required to compute this function. Entrez IDs
or gene symbols can be transformed into KEGG IDs using the function
"MS_GetKEGG_GeneID( )".
}
\item{target_metabolites}{character vector containing the KEGG IDs of the
metabolites to which the shortest paths will be calculated. Compound KEGG IDs
can be obtained using the function "MS_FindKEGG( )".
}
\item{mode}{character constant indicating whether a directed or an undirected
network will be considered. "all" indicates that all the edges of the network
will be considered as undirected. "out" indicates that all the edges of the
network will be considered as directed. "SP" indicates that all network will
be considered as directed except the edges linked to target metabolite, which
will be considered as undirected. The difference between the "out" and the
"SP" options, is that the latter aids reaching target metabolites that are
substrates of irreversible reactions. By default, mode = "SP".
}
\item{type}{character constant indicating whether all shortest paths or a
single shortest path will be considered when there are several shortest paths
between a source_gene and a target_metabolite. If type = "all", all shortest
paths will be considered. If type = "first" a single path will be considered.
If type = "bw" the path with the highest betweenness score will be considered.
The betweenness score is calculated as the average betweenness of the gene
nodes of the path. Note that using type = "bw" increases the time required to
compute this function. By default, type = "first".
}
\item{distance_th}{establishes a shortest path length threshold. Only shortest
paths with length below this threshold will be included in the network. By
default, distance_th = Inf.
}
\item{collapse_genes}{logical scalar indicating whether KEGG gene IDs will be
transformed into orthology IDs. Since several gene isoforms are associated
with the same orthology ID, this options leads to a dramatic decrease in the
dimensionality of the network. This argument is ignored if the gene nodes of
the network_table already represent orthology IDs. By default, collapse_genes
= FALSE.
}
\item{names}{logical scalar indicating whether the metabolite or gene KEGG IDs
will be transformed into common metabolite names or gene symbols. Reaction IDs
remain unchanged. By default, names = TRUE.
}
\item{export_cytoscape}{logical scalar indicating whether network and attribute
cytoscape files will be generated and exported. By default, export_cytoscape =
TRUE.
}
\item{file_name}{character vector that allows customizing the name of the
exported files. By default, file_name = "Cytoscape".
}
}
\value{
A two-column matrix where each row represents an edge between two nodes. By
default, the function also generates a network file ("CytoscapeNetwork.txt") and
two attribute files ("CytoscapeAttributesType.txt", "CytoscapeAttributesTarget.txt"),
which can be imported into cytoscape to visualize the network.
}
\note{
The network-table generated with this function can be also visualized in R using
the igraph package. The network-table can be transformed into an igraph object
using the function "graph.data.frame( )" from igraph.
}
\references{
Csardi, G. & Nepusz, T. (2006). The igraph software package for complex network
research. InterJournal, Complex Systems, 1695.
Shannon, P., Markiel, A., Ozier, O., Baliga, N.S., Wang, J.T., Ramage, D., Amin,
N. & Ideker, B.S.T. (2003). Cytoscape: a software environment for integrated
models of biomolecular interaction networks. Genome Research, 13, 2498-2504.
}
\examples{
data(MetaboSignal_table)
# Shortest-path subnetwork from Foxo1 (84482), Ldha (24533) to alpha D-glucose
#("cpd:C00267") and lactate ("cpd:C00186"). Different source_gene formats are valid:
# 1) Source_genes as network IDs (in this case orthology IDs): fastest option.
# To get gene KEGG IDs use "MS_GetKEGG_GeneID( )", as shown below:
\donttest{
MS_GetKEGG_GeneID(c("foxo1", "ldha"), organism_code = "rno", organism_name = "rat")
}
subnet_KEGG <- MetaboSignal_NetworkCytoscape(MetaboSignal_table, organism_code="rno",
source_genes = c("K07201", "K00016"),
target_metabolites = c("cpd:C00267",
"cpd:C00186"),
names = FALSE)
\donttest{
# 2) Source_genes as entrez IDs
subnet_Entrez <- MetaboSignal_NetworkCytoscape(MetaboSignal_table, organism_code="rno",
source_genes = c("84482", "24533"),
target_metabolites = c("cpd:C00267",
"cpd:C00186"),
names = FALSE)
# 3) Source_genes as symbols
subnet_Symbol <- MetaboSignal_NetworkCytoscape(MetaboSignal_table,
organism_code="rno", organism_name ="rat",
source_genes = c("foxo1", "ldha"),
target_metabolites = c("cpd:C00267",
"cpd:C00186"),
names = FALSE)
}
}
|
5c0243f6f4825f92d56e5725ac6d47dd245b1c67
|
2470551eed9989ee2c1fad7989fec6cd0948da29
|
/ui.R
|
23dafc0cc0181b42e4d7bd3f88763fa7a8f69400
|
[] |
no_license
|
sandeepbm/Coursera_Data_Science_Capstone
|
3a7b11823dd133dace3387fb9fdcaa0f8655330e
|
607040f0291d0a84b28068e57d5b3f5a660380ef
|
refs/heads/master
| 2021-05-06T22:50:08.601710
| 2017-12-03T01:34:12
| 2017-12-03T01:34:12
| 112,865,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,866
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Text Prediction App"),
# Input text
sidebarLayout(
sidebarPanel(
textInput("input_text","Enter Ngram phrase:")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(selected="Prediction",
tabPanel("About",
br(),
tags$p("This app predits next word for a given N-gram/text phrase in the 'Prediction' tab."),
tags$ul(
tags$li(tags$u("Input:"),"A text box on left hand side panel accepts an N-gram/text phrase as input."),
tags$li(tags$u("Output:"),"The prediction algorithm determines next word for the given input and displays the same in a text box at the top of the main panel."),
tags$li(tags$u("Algorithm:"),"A Katz back off n-gram model, that is trained on data sampled from a text Corpus, is used to determine conditional probabilty of possible words and maximum likelihood estimation is done to arrive at the output."),
tags$li(tags$u("Visualization:"),"N-gram plots are displayed on main panel to compare probabilty of top words, upto maximum of 5 words, that have the maximum likelihood estimate. When smoothing is applied, plot for back off model is also displayed.")),
tags$p(tags$b("Github link:"),
tags$u(tags$a(href="https://github.com/sandeepbm/Coursera_Data_Science_Capstone","https://github.com/sandeepbm/Coursera_Data_Science_Capstone")))
),
tabPanel("Prediction",
tags$u(h4("Prediction:")),
verbatimTextOutput("prediction"),
br(),
tags$u(h4("Ngram plots:")),
plotOutput("displot")
)
)
)
)
))
|
3ff7a24dde4442ee38e21897e1839ae0fc49188d
|
ac2cb89d07dff7ec0fc0544f7583efa8b6363a5e
|
/code/archive 19_20/Additional/03 text/02 gutenberg.R
|
9c908fa69721df8f3d3f6426d7c5f2f352fcb644
|
[
"MIT"
] |
permissive
|
JimDuggan/CT1100
|
76fac7ad12d40b4f6f255e447514667ef9fc5617
|
644ee71da3e5e97122cc3ee63323a0aac39db4be
|
refs/heads/master
| 2021-12-23T10:43:26.968430
| 2021-10-01T13:59:18
| 2021-10-01T13:59:18
| 205,094,238
| 10
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 369
|
r
|
02 gutenberg.R
|
library(gutenbergr)
library(tidytext)
hgwells <- gutenberg_download(c(35, 36, 5230, 159))
bronte <- gutenberg_download(c(1260, 768, 969, 9182, 767))
tidy_hgw <- unnest_tokens(hgwells,word, text) %>% anti_join(stop_words)
count(tidy_hgw, word, sort = T) %>% slice(1:20)
portrait <- gutenberg_download(c(4217),
meta_fields = "title")
|
92e1a6aaca11573f93c5aee79814bef175e60614
|
533047d7c4e0738db063cc35836a8e46033e6a37
|
/R/print_methods.R
|
a9e0b961de72b2c7fb5903b5b4b8a641d68da869
|
[] |
no_license
|
jtigani/bigQueryR
|
f87101981c01642b65a5018eccfefbe33a2d6514
|
9f50e963c09d3cbef3d18550c574c1bf1905cfea
|
refs/heads/master
| 2021-01-20T12:38:01.785913
| 2017-05-05T10:05:10
| 2017-05-05T10:05:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 629
|
r
|
print_methods.R
|
#' @export
print.bqr_job <- function(x, ...){
cat("==Google BigQuery Job==\n")
cat0("JobID: ", x$jobReference$jobId)
cat0("ProjectID: ", x$jobReference$projectId)
cat0("Status: ", x$status$state)
cat0("User: ", x$user_email)
cat0("Created: ", as.character(js_to_posix(x$statistics$creationTime)))
cat0("Start: ", as.character(js_to_posix(x$statistics$startTime)))
cat0("End: ", as.character(js_to_posix(x$statistics$endTime)))
cat("## View job configuration via job$configuration\n")
cat0("## Job had error: \n", x$status$errorResult$message)
}
|
50dc40f9bda4fb8b9a07b654fde855e186d3d84a
|
77d130a37122031a98e8f030dff129f50a7c7068
|
/r_scripts/data_wrangle/asthma_saba_msa_timeseries.R
|
38ee20f703dd97576f330617ad9fe1f58fa63679
|
[] |
no_license
|
RyanGan/oregon_wildfire
|
282e7f3757afb98e711192c322399105f3eb88c1
|
758ef82e2085046236f187b15fe77b924da995e0
|
refs/heads/development
| 2020-06-28T06:13:12.923359
| 2019-09-28T23:47:05
| 2019-09-28T23:47:05
| 74,503,206
| 0
| 3
| null | 2018-05-18T19:02:09
| 2016-11-22T18:48:29
|
R
|
UTF-8
|
R
| false
| false
| 5,162
|
r
|
asthma_saba_msa_timeseries.R
|
# ------------------------------------------------------------------------------
# Title: Creation of asthma and saba fill Oregon and MSA time series
# Author: Ryan Gan
# Date Created: 2018-09-12
# ------------------------------------------------------------------------------
# Script purpose is to create time series data frames for Oregon, and each
# Oregon MSA for analysis
# load tidyverse library
library(tidyverse)
# I am able to create the time series I need using the asthma fireseason cohort
# dataset created. This contains all primary diagnoses of asthma and saba fills
# during the study period.
# read asthma cohort (n = 550610)
asthma_c <- read_csv('./data/health/2013-oregon_asthma_fireseason_cohort.csv',
col_types = cols(.default = "c")) %>%
# filter to just primary diagnosis or saba fill
filter(visit_type != 'dx_asthma_not_primary')
# find zipcodes in each metroarea
zip <- asthma_c %>%
select(ZIP, MSA) %>%
rename(ZIPCODE = ZIP) %>%
unique()
# read zip pm; join with msa
msa_pm = read_csv('./data/pm/2013-oregon_zip_pm25.csv',
col_types = cols(ZIPCODE = 'c')) %>%
# join with unique MSA vector
left_join(zip, by = 'ZIPCODE') %>%
# mutate
mutate(ZIPCODE = as.character(ZIPCODE),
MSA = as.factor(MSA),
# assign metro name to number
metroarea = case_when(MSA == 13460 ~ "Bend",
MSA == 18700 ~ "Corvallis",
MSA == 21660 ~ "Eugene",
MSA == 32780 ~ "Medford",
MSA == 38900 ~ "Portland",
MSA == 41420 ~ "Salem")) %>%
# filter to zips in an MSA only
filter(!is.na(metroarea))
# It's possible we could duplicate claimids; not sure I want to count these
claim_count <- asthma_c %>%
group_by(clmid) %>%
summarize(count = n())
# looking at an example where a person had multiple asthma inhaler fills with
# same claimid. It looks like they are all filled on unique dates, so I'm going
# to assume that even though it has same claimid they are unique fills
check <- filter(asthma_c, clmid == '162416557')
# this is a case where it's a duplicate claim I think
check2 <- filter(asthma_c, clmid == '289349081')
# claim with no place of service
check3 <- filter(asthma_c, clmid == '258561201')
# solution may be to take only one observation forunique dates for person/claims
# n unique asthma and saba events during the time period
# n = 161329 total
event_count <- asthma_saba_unique_visit %>%
group_by(pos_simple) %>%
summarize(count = n()) %>%
filter(pos_simple %in% c('Ambulance', 'Emergency Room Hospital',
'Inpatient Hospital', 'Office', 'Outpatient Hospital',
'Pharmacy', 'Urgent Care'))
event_count
event_stats <- event_count %>%
group_by(pos_simple) %>%
summarize(total_vis = sum(count), mean_vis = mean(count), med_vis = median(vis),
min_vis = min(count), max_vis = max(count))
# write event counts to csv file
write_csv(event_count, './data/health/2013-fireseason_asthma_counts.csv')
# limit to unique claims based on unique date and place of service
asthma_saba_unique_visit <- asthma_c %>%
# group by person id and date
group_by(personkey, service_place, fromdate) %>%
filter(row_number()==1) %>%
mutate(date = as.Date(fromdate),
ZIPCODE = as.character(ZIP)) %>%
select(-metroarea) %>%
# join with pm values
left_join(msa_pm, by = c('ZIPCODE', 'date', 'MSA')) %>%
# filter to only MSAs
filter(!is.na(metroarea)) %>%
# filter to following places of service
filter(pos_simple %in% c('Ambulance', 'Emergency Room Hospital',
'Inpatient Hospital', 'Office', 'Outpatient Hospital',
'Pharmacy', 'Urgent Care'))
# read in population denom for msa
population <- read_csv("./data/health/saba_month_counts.csv") %>%
dplyr::select(msa_name, POPESTIMATE2013) %>%
rename(metroarea = msa_name,
pop = POPESTIMATE2013) %>%
unique()
# time series counts
asthma_msa_ts <- asthma_saba_unique_visit %>%
# rename pharamcy to saba fill
mutate(pos_simple = case_when(pos_simple == 'Pharmacy' ~ 'SABA Fill',
pos_simple == 'Emergency Room Hospital' ~ 'Emergency Department',
TRUE ~ pos_simple)) %>%
left_join(population, by = 'metroarea') %>%
# group by date, metroarea and place of service
group_by(date, metroarea, pos_simple) %>%
summarize(n_events = n(), pop = max(pop), avg_smk_pm = mean(geo_smk_pm),
avg_temp = mean(wrf_temp)) %>%
# set missing value to 0
mutate(n_events = ifelse(is.na(n_events), 0, n_events)) %>%
# identify day and weekend
mutate(day = lubridate::wday(date, label = T),
weekend = ifelse(day %in% c('Sat', 'Sun'), 1, 0),
month = as.factor(lubridate::month(date))) %>%
# rename place of service
rename(service_place = pos_simple)
# write file
write_csv(asthma_msa_ts, './data/health/2013-asthma_msa_smk.csv')
summary(asthma_msa_ts$date)
|
41c0fbf2a09f4a14965e96cc89261a18eab67cdc
|
128aad713f698c8cf7d41d18289d8d140c1a46e8
|
/Text Mining/Home Assignment/Code/Home Assignment Code(testing).R
|
5c1bc3409718e2bccf2465fb34ec75d71fc9eb72
|
[] |
no_license
|
mbalakiran/Data-Analysis-and-Visualization-In-R
|
8f3cec8708625964559ec6dc62150a16cfd0e3f4
|
f6403cfc953194cdd69e7e4b0d7a6ec1d7635594
|
refs/heads/master
| 2021-02-19T20:14:39.273578
| 2020-03-29T09:49:23
| 2020-03-29T09:49:23
| 245,319,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,123
|
r
|
Home Assignment Code(testing).R
|
install.packages("ggridges")
library(dplyr)
library(readr)
library(base)
library(ggplot2)
library(tm)
library(stringr)
library(wordcloud)
library(corpus)
library(tidytext)
library(data.table)
library(tidyverse)
library(wordcloud2)
library(reshape2)
library(radarchart)
#library(RWeka)
library(topicmodels)
library(ggridges)
#library(pryr)
mem_used()
??pryr
?dplyr
?memory.limit
gc()
setwd("~/Documents/Master Program/Data Analysis & Visualization/Home Assignment/Files/CSV")
?lapply
?base
combined = list.files(pattern = "*.csv")
myfiles = lapply(combined, read.delim)
#or
df <- list.files(full.names = TRUE) %>%
lapply(read_csv, skip = 1) %>%
bind_rows
dim(df)
df
str(df)
setwd("~/Documents/Master Program/Data Analysis & Visualization/Home Assignment/Files")
write.csv(df,'combinedfile.csv')
names(df)
df
tempdf <- read.table("combinedfile.csv",header = TRUE,sep =",", dec=".")
names(tempdf)
ggplot(tempdf, aes(Publication.Year, fill = Size)) +
geom_bar() +
xlab("Publication Year")
ggplot(tempdf, aes(Publication.Year, fill = Region)) +
geom_bar() +
xlab("Publication Year")
ggplot(tempdf, aes(Publication.Year)) +
geom_line() +
xlab("Publication Year")
ggplot(tempdf, aes(Publication.Year, fill = Sector)) +
geom_bar() +
xlab("Publication Year")
ggplot(tempdf, aes(Publication.Year, fill = Type)) +
geom_bar() +
xlab("Publication Year")
ggplot(tempdf, aes(Publication.Year, fill = Listed.Non.listed)) +
geom_bar() +
xlab("Publication Year")
ggplot(tempdf, aes(Publication.Year, Country, fill = Listed.Non.listed)) +
geom_raster() +
xlab("Publication Year")
ggplot(tempdf, aes(Publication.Year, Status, fill = Listed.Non.listed)) +
geom_raster() +
xlab("Publication Year")
#ggplot(tempdf, aes(Publication.Year, Country, fill = Listed.Non.listed)) +
# geom_hex() +
# xlab("Publication Year")
#ggplot(tempdf, aes(Publication.Year, Status, fill = Listed.Non.listed)) +
# geom_hex() +
# xlab("Publication Year")
#ggplot(tempdf, aes(Size,Publication.Year, fill = Listed.Non.listed)) +
# geom_raster() +
# xlab("Publication Year")
ggplot(tempdf, aes(Size, Publication.Year)) +
geom_count() +
ylab("Publication Year")
ggplot(tempdf, aes(Size, Publication.Year)) +
geom_violin() +
ylab("Publication Year")
ggplot(tempdf, aes(Type, Publication.Year)) +
geom_violin() +
ylab("Publication Year")
ggplot(tempdf, aes(Country)) +
geom_bar() +
xlab("Publication Year")
ggplot(tempdf, aes(Publication.Year, Type, fill=Type))+
geom_density_ridges() +
labs(x ="Year of Publication", y = "GRI Standards", title = "All Companies") +
theme(plot.title = element_text(hjust = 0.5),legend.position = "buttom")
ggplot(tempdf, aes(Country, Sector, colour = Size)) +
geom_point()
ggplot(tempdf[which(tempdf$Publication.Year == c(2001,2005, 2009, 2013, 2017 )),],
aes(Sector, fill = Size)) +
scale_fill_manual(values = c("red4", "red2", "grey")) +
geom_bar(colour = "black")+
theme_bw() +
facet_wrap(~Publication.Year, nrow = 5) +
labs(title = "Number of publications by firms over different years and sectors:") +
theme(
axis.text.x = element_text(angle = 90, hjust = 1, size = 10),
axis.ticks = element_blank(),
axis.text.y = element_text(size = 10),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
strip.text.x = element_text(size = 10),
strip.background = element_rect(color = "white", fill = "white"),
panel.border = element_blank(),
legend.title = element_text(size = 10),
legend.text = element_text(size = 9, color = "black" ),
plot.title = element_text(size = 12),
legend.position = "top"
)
# by number of companies per region over the years
ggplot(tempdf[which(tempdf$Publication.Year<2018),],aes(Region, Publication.Year,color = ..n..),alpha = 0.5) +
geom_count(show.legend = TRUE) +
scale_fill_continuous(name = "Number of Publications") +
theme_bw() +
guides(color = FALSE) +
theme(
legend.position = "right",
legend.text = element_text(size = 10, hjust = 1, angle = 90),
legend.title = element_text(size = 10, hjust = 1, angle = 90),
legend.key.size = unit(1, "cm"),
legend.direction = "vertical",
axis.text.x = element_text(color = "darkblue",angle = 90,hjust = 1, size = 10),
axis.title.x = element_blank(),
axis.text.y = element_text(color = "darkblue",size = 10, angle = 90),
axis.title.y = element_blank(),
panel.border = element_blank()
) +
coord_fixed(ratio = 0.7) +
labs(title = "Publications per Regions per Year", subtitle = "(dot size represents number of publications)")
aviation <- filter(df, Sector == "Aviation")
aviation
write.csv(aviation,'aviation.csv')
tempav <- read.table("aviation.csv",header = TRUE,sep =",", dec=".")
names(tempav)
ggplot(tempav, aes(Publication.Year, fill = Region)) +
geom_bar() +
xlab("Publication Year")
ggplot(tempav, aes(Publication.Year, Type, fill=Type))+
geom_density_ridges() +
labs(x ="Year of Publication", y = "GRI Standards", title = "Aviation") +
theme(plot.title = element_text(hjust = 0.5),legend.position = "none")
#combined is the data file which is the combination of all the individual files
#or
#Df is the data file which is the combination of all the individual files
#tempdf is the data frame where we are reading the combined data file
#aviation is the df where we filtered the data according to the industry
#tempav is the df where we are reading the aviation file
# For a text working with only one text file
readLines("Airbus_2012.txt")
str(readLines("Airbus_2012.txt"))
airbus <- paste(readLines("Airbus_2012.txt"), collapse = " ")
nairbus <- gsub(pattern="\\W", replace=" ", airbus) # Removing Punctuations
nairbus
nairbus <- gsub(pattern = "\\d", replace = " ", nairbus) # Removing Digits
nairbus <- tolower(nairbus) # To lower case the letters
stopwords()
nairbus <- removeWords(nairbus, stopwords()) # Removing Stopwords
nairbus <- gsub(pattern = "\\b[A-z]\\b{1}", replace=" ", nairbus) #Removig 1 letter words
nairbus <- stripWhitespace(nairbus) # removing extraspaces
nairbus
#sentement Analysis
nairbus <- str_split(nairbus, pattern = "\\s+") # Divding the Strings
nairbus
str(nairbus)
finalairbus <- unlist(nairbus) # Converting list to a char
class(finalairbus)
finalairbus
## Preaparing postive words files
####postive <- scan('p.....txt',what='character',comment.char=";")
match(finalairbus,postive) #matching the postive words
sum(!is.na(match(finalairbus,postive)))
sum(!is.na(match(finalairbus,negative)))
score <- sum(!is.na(match(finalairbus,postive))) - sum(!is.na(match(finalairbus, negative)))
mean(score)
sd(score)
hist(score)
#########we need to have the scores for the multiple files so you
#### can perform mean,sd and sentement analysis
wordcloud(finalairbus)
wordcloud(finalairbus, min.freq = 40)
worldcloud(finalairbus, min.freq = 20, random.order = FALSE)
wordcloud(finalairbus, min.freq = 10, random.order = FALSE, scale =c(7,0.5), color = rainbow(7))
#Combining multiple files
#file.choose()
textfiles <- ("~/Documents/Master Program/Data Analysis & Visualization/Home Assignment/Files/Aviation copy")
setwd(textfiles)
allfiles <- list.files(path = textfiles, pattern = "*.txt")
#allfiles
class(allfiles)
allfiles <- paste(textfiles, "/", allfiles, sep="")
#allfiles
typeof(allfiles)
class(allfiles)
newpath <- ("~/Documents/Master Program/Data Analysis & Visualization/Home Assignment/Files")
setwd(newpath)
#datan <- scan(allfiles)
datan <- lapply(allfiles, FUN = readLines)
#datan
newdata <- lapply(datan, FUN = paste, collapse = " ")
#newdata
#class(newdata2)
#write.table(newdata2, file = "alldata.txt")
newdata2 <- gsub(pattern = "\\W", replace = " ", newdata)
newdata2 <- gsub(pattern = "\\d", replace= " ", newdata2)
newdata2 <- tolower(newdata2)
newdata2 <- removeWords(newdata2, stopwords("english"))
newdata2 <- gsub(pattern = "\\b[A-z]\\b{1}", replace= " ", newdata2)
newdata2 <- stripWhitespace(newdata2)
write.table(newdata2, file = "afterclean.txt")
#a <- scan("afterclean.txt", what = "character")
#b <- str_split(a, pattern = "\\s+")
wordcloud(newdata2, min.freq=2000, random.order=FALSE, scale = c(3,0.5), col=rainbow(3))
comparison.cloud(newdata2)
newdata3 <- Corpus(VectorSource(newdata2))
newdata3
tdm <- TermDocumentMatrix(newdata3)
tdm
mat <- as.matrix(tdm)
a<- rownames(tdm)
colnames(mat)
comparison.cloud(mat)
data(mat)
view(mat)
#######
dim(newdata3)
ap_lda <- LDA(AssociatedPress, k = 2, control = list(seed = 1234))
ap_lda
#beta <- extracting the per-topic-per-word probabilities
ap_topics <- tidy(ap_lda, matrix = "beta")
ap_topics
ap_top_terms <- ap_topics %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
arrange(topic, -beta)
data(AssociatedPress)
ap_top_terms %>%
mutate(term = reorder_within(term, beta, topic)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip() +
scale_x_reordered()
beta_spread <- ap_topics %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1 > .001 | topic2 > .001) %>%
mutate(log_ratio = log2(topic2 / topic1))
beta_spread
ap_documents <- tidy(ap_lda, matrix = "gamma")
ap_documents
tidy(tdm) %>%
filter(document == 6) %>%
arrange(desc(count))
#Sentiment Analysis
postive <- scan("postivewords.txt",what='character',comment.char=";")
negative <- scan("negativewords.txt",what='character',comment.char=";")
str(postive)
newdataforsem <- str_split(newdata2, pattern = "\\s+")
write.table(newdataforsem, file = "newdataforsem.txt")
sumofpos <- lapply(newdataforsem, function(x){sum(!is.na(match(x, postive)))})
#sumofpos
sumofneg <- lapply(newdataforsem, function(x){sum(!is.na(match(x, negative)))})
#sumofneg
total <- lapply(newdataforsem, function(x){sum(!is.na(match(x, postive))) - sum(!is.na(match(x,negative)))})
total
total <- unlist(total)
total
mean(total)
sd(total)
hist(total)
myDict <- dictionary(list(terror = c("terror*"),
economy = c("job*", "business*", "econom*")))
dict_tdm <- dfm_lookup(tdm, myDict, nomatch = "_unmatched")
tail(dict_tdm)
set.seed(2)
# create a document variable indicating pre or post war
docvars(tdm, "is_prewar") <- docvars(tdm, "Year") < 1945
# sample 40 documents for the training set and use remaining (18) for testing
train_tdm <- dfm_sample(tdm, size = 40)
test_tdm <- tdm[setdiff(docnames(tdm), docnames(train_tdm)), ]
# fit a Naive Bayes multinomial model and use it to predict the test data
nb_model <- textmodel_NB(train_tdm, y = docvars(train_tdm, "is_prewar"))
pred_nb <- predict(nb_model, newdata = test_tdm)
# compare prediction (rows) and actual is_prewar value (columns) in a table
table(prediction = pred_nb$nb.predicted, is_prewar = docvars(test_tdm, "is_prewar"))
texts = corpus_reshape(data_corpus_inaugural, to = "paragraphs")
par_tdm <- dfm(texts, stem = TRUE, remove_punct = TRUE, remove = stopwords("english"))
par_tdm <- dfm_trim(par_tdm, min_count = 5) # remove rare terms
par_tdm <- convert(par_tdm, to = "topicmodels") # convert to topicmodels format
set.seed(1)
lda_model <- topicmodels::LDA(par_tdm, method = "Gibbs", k = 5) terms(lda_model, 5)
### Done First part of Word Cloud
## LDA
#write.table(newdata, file = "beforeclean.txt")
#text <- scan("beforeclean.txt", what = "character")
#write.csv(b, "1afterclean.csv")
alldata <- scan("afterclean.txt", what = "character")
write.csv(alldata, "new2.csv")
data <- fread("new.csv")
data <- data %>% select(a,x)
data
####NEW METHOD
frame <- read.table("afterclean.csv")
frame2 <- gsub(pattern = "\\W", replace = " ", frame)
frame2 <- gsub(pattern = "\\d", replace= " ", frame2)
frame2 <- tolower(frame2)
frame2 <- removeWords(frame2, stopwords("english"))
frame2 <- gsub(pattern = "\\b[A-z]\\b{1}", replace= " ", frame2)
frame2 <- stripWhitespace(frame2)
cleanCorpus <- function(frame){
corpus.tmp <- tm_map(frame, removePunctuation)
corpus.tmp <- tm_map(corpus.tmp, stripWhitespace)
corpus.tmp <- tm_map(corpus.tmp, content_transformer(tolower))
v_stopwords <- c(stopwords("english"))
corpus.tmp <- tm_map(corpus.tmp, removeWords, v_stopwords)
corpus.tmp <- tm_map(corpus.tmp, removeNumbers)
return(corpus.tmp)
}
frequentTerms <- function(text){
s.cor <- Corpus(VectorSource(text))
s.cor.cl <- cleanCorpus(s.cor)
s.tdm <- TermDocumentMatrix(s.cor.cl)
s.tdm <- removeSparseTerms(s.tdm, 0.999)
m <- as.matrix(s.tdm)
word_freqs <- sort(rowSums(m), decreasing=TRUE)
dm <- data.frame(word=names(word_freqs), freq=word_freqs)
return(dm)
}
tokenizer <- function(x){
NGramTokenizer(x, Weka_control(min=2, max=2))
}
frequentBigrams <- function(text){
s.cor <- VCorpus(VectorSource(text))
s.cor.cl <- cleanCorpus(s.cor)
s.tdm <- TermDocumentMatrix(s.cor.cl, control=list(tokenize=tokenizer))
s.tdm <- removeSparseTerms(s.tdm, 0.999)
m <- as.matrix(s.tdm)
word_freqs <- sort(rowSums(m), decreasing=TRUE)
dm <- data.frame(word=names(word_freqs), freq=word_freqs)
return(dm)
}
length(frame$V1)
length(levels(frame$V1))
top.chars <- as.data.frame(sort(table(frame$V1), decreasing=TRUE))[1:20,]
ggplot(data=top.chars, aes(x=Var1, y=Freq)) +
geom_bar(stat="identity", fill="#56B4E9", colour="black") +
theme_bw() +
theme(axis.text.x=element_text(angle=45, hjust=1)) +
labs(x="Character", y="Number of dialogues")
#data$x <- sub("RT.*:", "", data$x)
#data$x <- sub("@.* ", "", data$x)
#text_cleaning_tokens <- data %>%
# tidytext::unnest_tokens(a, x)
#text_cleaning_tokens$x<- gsub('[[:digit:]]+', '', text_cleaning_tokens$x)
#text_cleaning_tokens$x <- gsub('[[:punct:]]+', '', text_cleaning_tokens$x)
#text_cleaning_tokens <- text_cleaning_tokens %>% filter(!(nchar(x) == 1))%>%
# anti_join(stop_words)
#tokens <- text_cleaning_tokens %>% filter(!(x==""))
#tokens <- tokens %>% mutate(ind = row_number())
#tokens <- tokens %>% group_by(a) %>% mutate(ind = row_number()) %>%
# tidyr::spread(key = ind, value = x)
#tokens [is.na(tokens)] <- ""
#tokens <- tidyr::unite(tokens, text,-id,sep =" " )
#tokens$a <- trimws(tokens$a)
#dtm <- CreateDtm(tokens$x,
# doc_names = tokens$a,
# ngram_window = c(1, 2))
#ggplot(newdata2, aes(Publication.Year, fill = Sector)) +
# geom_bar() +
# xlab("Publication Year")
#?text_tokens
#text_ntoken(newdata2)
#text <- c(newdata2)
#write.table(text, file = "readwrite.txt")
#text_df <- tibble(line= 1:n,text = text)
#text_df %>% unnest_tokens(word, newdata2)
#?dplyr
#allfiles2 <- list.files(textfiles, pattern = "*.txt")
#mydata <- lapply(allfiles2, readLines(), sep=" ", header=T, row.names=NULL)
#class(mydata)
#write.file<-""
#alldata <- dir(textfiles, pattern ="*.txt")
#for(i in 1:length(alldata)){
# file <- readLines(alldata[i])
# write.file <- rbind(write.file, file)
#}
#corpus <- paste(write.file, collapse = "")
#write.table(write.file, file = "alldata.txt")
#corpus <- paste(corpus, collapse = "")
#write.table(corpus, file = "alldata.txt", sep = "")
#OR
##### cat *.txt > alldata.txt
#mypath <- ("~/Documents/Master Program/Data Analysis & Visualization/Home Assignment/Files")
#setwd(mypath)
#alldata <- scan("all data.txt", what = "character")
#class(alldata)
#alldata <- paste(readLines("all data.txt"), collapse = " ")
#alldata2 <- paste(alldata, collapse = " ")
#write.table(alldata, file = "New data.txt")
#alldata2 <- gsub(pattern = "\\W", replace = " ", alldata)
#alldata2 <- gsub(pattern = "\\d", replace= " ", alldata2)
#alldata2 <- tolower(alldata2)
#alldata2 <- removeWords(alldata2, stopwords("english"))
#alldata2 <- gsub(pattern = "\\b[A-z]\\b{1}", replace= " ", alldata2)
#alldata2 <- stripWhitespace(alldata2)
#write.table(alldata2, file = "New data.txt")
#alldata3 <- paste(alldata2, collapse = " ")
#alldata3 <- stripWhitespace(alldata3)
#alldata4 <- strsplit(alldata3, " ")[[1]]
#write.table(alldata4, file = "New data.txt")
#class(alldata4)
#wordcloud(alldata2, min.freq = 2000, random.order = FALSE, scale = c(3,0.4))
#wordcloud(alldata3, min.freq = 2000, random.order = FALSE)
#wordcloud(alldata3, min.freq = 1000, random.order = FALSE, scale =c(3,0.4), color = rainbow(5))
#wordcloud(alldata2, min.freq = 2000, random.order = FALSE, scale =c(3,0.4), color = rainbow(5))
#comparison.cloud(alldata2)
#alldata3 <- Corpus(VectorSource(alldata2))
#alldata3
#td <- TermDocumentMatrix(alldata3)
#td
#mats <- as.matrix(td)
#comparison.cloud(mats)
#Sentiment Analysis
#postiveall <- scan("postivewords.txt",what='character',comment.char=";")
#negativeall <- scan("negativewords.txt",what='character',comment.char=";")
#str(postiveall)
#newdatasem <- str_split(alldata2, pattern = "\\s+")
#sumpos <- lapply(newdatasem, function(x){sum(!is.na(match(x, postiveall)))})
#sumpos
#sumneg <- lapply(newdatasem, function(x){sum(!is.na(match(x, negativeall)))})
#sumofneg
#newtotal <- lapply(newdatasem, function(x){sum(!is.na(match(x, postiveall))) - sum(!is.na(match(x,negativeall)))})
#newtotal
#newtotal <- unlist(newtotal)
#newtotal
#mean(newtotal)
#sd(newtotal)
#hist(newtotal)
|
cb7733e46f925565f7e12d5299920a24702e80ca
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/matsbyname/examples/quotient_byname.Rd.R
|
0d006eb501b13669fabc1a70cc2218f2572b8ebf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 975
|
r
|
quotient_byname.Rd.R
|
library(matsbyname)
### Name: quotient_byname
### Title: Name-wise matrix element division
### Aliases: quotient_byname
### ** Examples
library(dplyr)
quotient_byname(100, 50)
commoditynames <- c("c1", "c2")
industrynames <- c("i1", "i2")
U <- matrix(1:4, ncol = 2, dimnames = list(commoditynames, industrynames)) %>%
setrowtype("Commodities") %>% setcoltype("Industries")
G <- matrix(rev(1:4), ncol = 2, dimnames = list(rev(commoditynames), rev(industrynames))) %>%
setrowtype("Commodities") %>% setcoltype("Industries")
U / G # Non-sensical. Names aren't aligned
quotient_byname(U, G)
quotient_byname(U, 10)
quotient_byname(10, G)
# This also works with lists
quotient_byname(10, list(G,G))
quotient_byname(list(G,G), 10)
quotient_byname(list(U, U), list(G, G))
DF <- data.frame(U = I(list()), G = I(list()))
DF[[1,"U"]] <- U
DF[[2,"U"]] <- U
DF[[1,"G"]] <- G
DF[[2,"G"]] <- G
quotient_byname(DF$U, DF$G)
DF %>% mutate(elementquotients = quotient_byname(U, G))
|
7780fd0020e653b02ad8f3144fecb40e78fc81a6
|
83c72d7783be9198bd9770a96256421891d47667
|
/inst/shiny/ui.R
|
d5714250f45499b2b6a66a6b1bdc48abd0bcd51c
|
[
"MIT"
] |
permissive
|
stevecoward/ripal
|
4ae1717cff36227bd5bf1e9b313f98f32cabd3ea
|
9b464b321db5fcd2cb41853672eae452331e7848
|
refs/heads/master
| 2021-05-28T07:38:42.130572
| 2015-02-19T05:30:32
| 2015-02-19T05:30:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,642
|
r
|
ui.R
|
#' ui.R
#'
#' ripal Shiny client-side renderer
#'
shinyUI(pageWithSidebar(
headerPanel("ripal - password dump analysis in R"),
sidebarPanel(
tags$head(
tags$link(rel="stylesheet",
type="text/css",
href="ripal.css"),
tags$link(rel="stylesheet",
type="text/css",
href="http://openfontlibrary.org/face/fantasque-sans-mono"),
tags$link(rel="stylesheet",
type="text/css",
href="http://fonts.googleapis.com/css?family=Lato:400,700,400italic")
),
helpText("Select your own cracked password dump (ASCII/UTF-8, pls) ",
"or choose from an existing password dump in the list ",
"and get some spiffy stats in return! Large password ",
"dumps will take a while, so pls be kind to the server."),
selectInput("localDumpFile",
"Choose from existing lists:",
c("hak5.txt", "hotmail.txt", "myspace.txt",
"phpbb.txt", "singles.org.txt"),
selected="hak5.txt"),
div(HTML("<b>OR</b>")),
fileInput('dumpfile',
'Choose a password dump to analyze:',
accept=c('text/plain')),
numericInput("topN",
"'Top #' lists max items:",
10, 5, 30, step=1),
sliderInput("dateRange", "Date Range (for 3rd tab)", min=1975 , max=2050, value=c(1990,2020), step=1),
div(HTML("You can find many password dumps at <a href='https://wiki.skullsecurity.org/Passwords'>SkullSecurity</a>.<hr/>")),
div(HTML("Source at: <a href='https://github.com/ddsbook/ripal'>github</a>")),
br(),
div(HTML("Another app brought to you by <a href='http://datadrivensecurity.info/'>Data Driven Security</a>"))
),
mainPanel(
tabsetPanel(
tabPanel("Overview",
htmlOutput("overview1"),
br(),
div(class="topContainer",
div(class="topDiv",
strong("Top Passwords"),
tableOutput("top1")),
div(class="topDiv",
strong("Top Basewords"),
tableOutput("topBasewords"))),
plotOutput("top1Chart"),
plotOutput("topBasewordsChart")
),
tabPanel("Length/Composition Analyses",
div(class="topContainer",
div(class="topDiv",
strong("Top By Length"),
tableOutput("topLen")),
div(class="topDiv",
strong("Top By Freq"),
tableOutput("topFreq"))),
br(),
plotOutput("pwLenFreq"),
br(),
htmlOutput("pwCompStats"),
br()
),
tabPanel("Word List/Dates Analyses",
h4("25 'Worst' Internet Passwords Corpus Counts"),
dataTableOutput("worst25"),
br(),
h4("Weekdays (Full) Corpus Counts"),
dataTableOutput("weekdaysFullDT"),
br(),
h4("Weekdays (Abbrev) Corpus Counts"),
dataTableOutput("weekdaysAbbrevDT"),
br(),
h4("Months (Full) Corpus Counts"),
dataTableOutput("monthsFullDT"),
br(),
h4("Months (Abbrev) Corpus Counts"),
dataTableOutput("monthsAbbrevDT"),
br(),
h4(textOutput("yearRangeTitle")),
dataTableOutput("yearsDT"),
br(),
h4("Colors Corpus Counts"),
dataTableOutput("colorsDT"),
br(),
h4("Seasons Corpus Counts"),
dataTableOutput("seasonssDT"),
br(),
h4("Planets Corpus Counts"),
dataTableOutput("planetsDT"),
br()
),
tabPanel("Last Digit(s) Analyses",
plotOutput("pwLastDigit"),
br(),
div(class="topContainer",
div(class="topDiv", tableOutput("last2")),
div(class="topDiv", tableOutput("last3"))),
br(),
div(class="topContainer",
div(class="topDiv", tableOutput("last4")),
div(class="topDiv", tableOutput("last5"))),
br()
),
id="tabs"
)
)
))
|
f50073b592d97199c5e751644c59f3e66ed91b41
|
97eedfe2f40d45b10a7b1e49dd6ae59bfcfe2fa1
|
/runTest.PowerTest.R
|
71d6a8fd143c28db0de88baaf3b5acb8ce07d6ce
|
[] |
no_license
|
nihar/kruskal-wallis
|
289b3a149633a3bc4edbbc9f61fda2bff285a8af
|
63b36809695db0c0ae2cb0927c885fdf3e4a0076
|
refs/heads/master
| 2021-01-20T07:10:32.658329
| 2012-09-16T19:10:28
| 2012-09-16T19:10:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 922
|
r
|
runTest.PowerTest.R
|
#---------------------------------------------------------------------------
# runTest.PowerTest.R
# Run the Monte Carlo simulation for the specified number of iterations
# @param N The number of iterations for Monte Carlo simulation
# @author Nihar Shah
#---------------------------------------------------------------------------
setMethodS3("runTest", "PowerTest",
appendVarArgs = FALSE, function(this, N = 100)
{
generateGroups.PowerTest(this);
this$empAnovaPw = rep(0, N);
this$empKWPw = rep(0, N);
for (i in 1:N)
{
X = generateSampleData.PowerTest(this,
sSize=this$sampleSizes,
sMean=this$mu0, sSigma=this$sigma);
aTest = summary(aov(X ~ this$groups));
kruskal = kruskal.test(X, this$groups);
this$empKWPw[i] = kruskal$p.value;
this$empAnovaPw[i] = aTest[[1]][["Pr(>F)"]][1];
}
});
#---------------------------------------------------------------------------
|
d8f85b51d39270fcdae445f4c3eda637eea1fd70
|
8c6a81de5c9b579cb51d19f6ec7c9371440cc61a
|
/Clustering/tumorClustering.R
|
0d3e7e1ac827397e33d851beae2952bba7f27877
|
[] |
no_license
|
ababen/Springboard-Section7
|
c414ee4acd7d28f4b9fe853508bdc7eb4a4a66eb
|
44860609daac8873a834b5cf7e0f537413264576
|
refs/heads/master
| 2021-01-11T01:48:51.199634
| 2016-11-29T00:58:11
| 2016-11-29T00:58:11
| 70,661,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 979
|
r
|
tumorClustering.R
|
setwd("~/R/Springboard-Section7/Clustering")
healthy = read.csv("healthy.csv", header = FALSE)
healthyMatrix = as.matrix(healthy)
str(healthyMatrix)
image(healthyMatrix, axes = FALSE, col = grey(seq(0,1,,length=256)))
healthyVector = as.vector(healthyMatrix)
distance = dist(healthyVector, method = "euclidean")
str(healthyVector)
n=365636
n*(n-1)/2
k = 5
set.seed(1)
KMC = kmeans(healthyVector, centers = k, iter.max = 1000)
str(KMC)
healthyClusters = KMC$cluster
KMC$centers[2]
dim(healthyClusters) = c(nrow(healthyMatrix),ncol(healthyMatrix))
image(healthyClusters, axes = FALSE, col = rainbow(k))
tumor = read.csv("tumor.csv", header = FALSE)
tumorMatrix = as.matrix(tumor)
tumorVector = as.vector(tumorMatrix)
install.packages("flexclust")
library(flexclust)
KMC.kcca = as.kcca(KMC, healthyVector)
tumorClusters = predict(KMC.kcca, newdata = tumorVector)
dim(tumorClusters) = c(nrow(tumorMatrix), ncol(tumorMatrix))
image(tumorClusters, axes = FALSE, col = rainbow(k))
|
03c90457a251e460fd72013fe020f8b7128ed974
|
eefb8e7651673265daa41b3200bb5db4e9436d03
|
/Listas/plot_functions.R
|
56185533631d16983c698c8ce1c9d0aa9b5b9d54
|
[] |
no_license
|
victordalla/Trabalho_ME613
|
111146861943afac4537ed849feb07cfca0b04ad
|
bd324b19bf0e11c850e52badcc8df7678f39cffd
|
refs/heads/master
| 2022-02-01T05:35:45.546630
| 2019-07-09T13:37:51
| 2019-07-09T13:37:51
| 190,424,025
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,620
|
r
|
plot_functions.R
|
plot_prediction <- function(model, response, response_name = "resposta") {
# precisa de dplyr, ggplot2
p <- predict.lm(model, interval = "prediction") %>%
dplyr::as_tibble() %>% dplyr::mutate(response = response) %>%
ggplot2::ggplot() +
ggplot2::geom_line(aes(fit, fit), col = "chocolate") +
ggplot2::geom_point(aes(fit, response), col = "cadetblue") +
ggplot2::geom_ribbon(aes(x = fit, ymin = lwr, ymax = upr), fill = "coral", alpha = 0.3) +
ggplot2::labs(x = "predição", y = response_name) + ggplot2::theme_bw()
p
}
plot_residuals <- function(model, binwidth = NULL, bins = NULL) {
# precisa de ggplot2, gridExtra, qqplotr
residuals <- data.frame(
residual = rstandard(model),
fitted = model$fitted.values,
index = 1:length(model$fitted.values)
)
p <- ggplot2::ggplot(residuals) +
ggplot2::labs(y = "resíduo student.") + ggplot2::theme_bw()
gridExtra::grid.arrange(
p + ggplot2::geom_point(aes(x = index, y = residual), col = "gray30", alpha = 0.80) +
ggplot2::labs(x = "índice"),
p + ggplot2::geom_point(aes(x = fitted, y = residual), col = "gray30", alpha = 0.80) +
ggplot2::labs(x = "valores ajustados"),
p + ggplot2::geom_histogram(aes(x = residual), fill = "gray30", col = "gray80"),
ggplot2::ggplot(residuals, aes(sample = residual)) +
qqplotr::stat_qq_band(bandType = "pointwise") +
qqplotr::stat_qq_line() +
qqplotr::stat_qq_point(col = "gray20", alpha = 0.80) +
ggplot2::labs(x = "quantil teórico", y = "quantil amostral") + ggplot2::theme_bw(),
nrow = 2
)
}
|
20942812b6c78ad1367aef12a175f4e653280c2d
|
2f74890bca4e2405e91f30e26f6ff560653b808a
|
/Movie Budget-Rating Script.R
|
a53914b1f8bd4f2d4db5042bfaed892e8e0bcde0
|
[] |
no_license
|
andcar23/Movie-Budget-Rating-Simple-Linear-Regression
|
082d90343e10e2b5703fd398914a251d6beacd4f
|
e212c3c2cb68069b66695bba0cb5b847edad1ab1
|
refs/heads/master
| 2020-07-18T08:42:31.802661
| 2019-09-04T02:47:30
| 2019-09-04T02:47:30
| 206,215,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,103
|
r
|
Movie Budget-Rating Script.R
|
#Andrew Carroll
#Project 3
#April 23, 2018
library(summarytools)
library(ggplot2)
#Import Data
movie <- read.csv(file.choose())
#recode variable to drop outliers
descr(movie$budget)
movies <- subset(movie, movie$budget < 400000000 & movie$country == 'USA')
#Vieing varibale to see if it was imported correctly
View(movies)
str(movies)
descr(movie)
descr(movies)
#Putting movvies variables into their own object and getting descriptive statistics
budget <- movies$budget
score <- movies$imdb_score
descr(budget)
descr(score)
#Make OLS model into own object and check
model <- lm(score ~ budget)
model
#Check OLS asumptions
plot(model)
#Summary Statistics of OLS model
summary(model)
cor(budget, score)
#Plot Data
xtick <- c(0, 100, 200, 300, 400)
plot(budget,score, main = "Relationship Between \n Movies' Budget and Score",
xlab = "Budget (Millions of Dollars)", ylab = "IMDb Score", xaxt = 'n')
axis(side = 1, at= c(0, 1e+08, 2e+08, 3e+08, 4e+08), labels = c("0","100","200","300", "400"))
abline(model, cex = 3, col = "blue")
|
281c09c6677141560038defdca4b2c93523fe93f
|
0c591324f1c2a3669bec7203556d971e73a74810
|
/src/Cal_fit_Auto.R
|
7193cbaaf5883c6f2cabff0cc60b5d4ee8ef2bd2
|
[] |
no_license
|
DmitryMarkovich/Thrombin_Analyzer
|
58949683660091f88d8815e8f117deafcb8c60c6
|
f79cc112a88a437d69f99f17681d919d8d6ea385
|
refs/heads/master
| 2021-01-21T04:33:22.755343
| 2018-01-30T16:23:09
| 2018-01-30T16:23:09
| 50,573,140
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,518
|
r
|
Cal_fit_Auto.R
|
################################################################################
Cal$set(
which = "public", name = "fit_Auto",
value = compiler::cmpfun(
f = function(silent = TRUE) {
if (!is.null(fit$Auto)) {
## print(fit$Auto);
warning(">> No fitting: Auto fit already exists!");
} else {
print(">>> Cal.fit_Auto called!");
ft <- NULL;
if (num.smry$rat$x <= 1.5 || num.smry$rat$y <= 6) {
ft <- fit_EarlyMM(silent = TRUE);
if (!is.null(ft)) {
fit$Auto <<- TRUE;
fit$Auto_model <<- "EarlyMM";
} else {
fit$Auto <<- FALSE; fit$Auto_model <<- "None";
}
} else if (num.smry$rat$x >= 5 && num.smry$rat$x <= 25 &&
num.smry$rat$y >= 10 && num.smry$rat$y <= 30) {
ft <- fit_LateExp(silent = TRUE);
if (!is.null(ft)) {
ft2 <- fit_T0LateExp(silent = TRUE);
compare_two_models("LateExp", "T0LateExp", ft, ft2);
## fit$Auto <<- ft;
## fit$Auto_model <<- "T0LateExp";
} else {
fit$Auto <<- FALSE; fit$Auto_model <<- "None";
}
} else if (num.smry$rat$x >= 15 && num.smry$rat$y >= 40) {
ft <- fit_LateMM(silent = TRUE);
if (!is.null(ft)) {
ft2 <- fit_T0LateMM(silent = TRUE);
compare_two_models("LateMM", "T0LateMM", ft, ft2);
## fit$Auto <<- ft;
## fit$Auto_model <<- "T0LateMM";
} else {
fit$Auto <<- FALSE; fit$Auto_model <<- "None";
}
} else {
fit$Auto <<- FALSE; fit$Auto_model <<- "None";
}
} ## End of if (exists)
}, options = kCmpFunOptions),
overwrite = FALSE); ## End of Cal$fit_Auto
################################################################################
################################################################################
Cal$set(
which = "public", name = "get_Auto",
value = compiler::cmpfun(
f = function() {
if (exists(x = "Auto", where = fit)) {
return(get_model(fit$Auto_model));
} else {
warning(">> fit$Auto does not exist!");
return(rep(NA, length(data$x)));
}
}, options = kCmpFunOptions),
overwrite = FALSE); ## End of Cal$get_Auto
################################################################################
################################################################################
Cal$set(
which = "public", name = "get_init_rate_Auto",
value = compiler::cmpfun(
f = function() {
if (exists(x = "Auto", where = fit)) {
return(get_init_rate(fit$Auto_model));
} else {
warning(">> fit$Auto does not exist!");
return(rep(NA, length(data$x)));
}
}, options = kCmpFunOptions),
overwrite = FALSE); ## End of Cal$get_init_rate_Auto
################################################################################
################################################################################
Cal$set(
which = "public", name = "parms_Auto",
value = compiler::cmpfun(
f = function(e0, s0) {
print(">> Call to Cal.parms_Auto");
if (exists(x = "Auto", where = fit)) {
return(parms_model(fit$Auto_model, e0, s0));
} else {
warning(">> fit$Auto does not exist!");
return(NULL);
}
}, options = kCmpFunOptions),
overwrite = FALSE);
################################################################################
################################################################################
######################################## Legacy RF classes code
################################################################################
## ################################################################################
## Cal.fit_Auto <- function(silent = TRUE) {
## if (exists(x = "Auto", where = fit)) {
## print(fit$Auto);
## warning(">> No fitting: Auto fit already exists!");
## } else {
## print(">>> Cal.fit_Auto called!");
## ft <- NULL;
## if (num.smry$rat$x <= 1.5 && num.smry$rat$y <= 6) {
## ft <- fit_EarlyMM(silent = TRUE);
## if (!is.null(ft)) {
## fit$Auto <<- TRUE;
## fit$Auto_model <<- "EarlyMM";
## } else {
## fit$Auto_model <<- "None";
## }
## } else if (num.smry$rat$x >= 5 && num.smry$rat$x <= 25 &&
## num.smry$rat$y >= 10 && num.smry$rat$y <= 30) {
## ft <- fit_T0LateExp(silent = TRUE);
## if (!is.null(ft)) {
## fit$Auto <<- ft;
## fit$Auto_model <<- "T0LateExp";
## } else {
## fit$Auto_model <<- "None";
## }
## } else if (num.smry$rat$x >= 15 && num.smry$rat$y >= 40) {
## ft <- fit_T0LateMM(silent = TRUE);
## if (!is.null(ft)) {
## fit$Auto <<- ft;
## fit$Auto_model <<- "T0LateMM";
## } else {
## fit$Auto_model <<- "None";
## }
## }
## } ## End of if (exists)
## } ## End of Cal.fit_Auto
## ################################################################################
## ################################################################################
## Cal.get_Auto <- function() {
## if (exists(x = "Auto", where = fit)) {
## return(get_model(fit$Auto_model));
## } else {
## warning(">> fit$Auto does not exist!");
## return(rep(NA, length(data$x)));
## }
## } ## End of Cal_get_Auto
## ################################################################################
## ################################################################################
## Cal.get_init_rate_Auto <- function() {
## if (exists(x = "Auto", where = fit)) {
## return(get_init_rate(fit$Auto_model));
## } else {
## warning(">> fit$Auto does not exist!");
## return(rep(NA, length(data$x)));
## }
## } ## End of Cal_get_init_rate_Auto
## ################################################################################
## ################################################################################
## Cal.parms_Auto <- function(e0, s0) {
## print(">> Call to Cal.parms_Auto");
## if (exists(x = "Auto", where = fit)) {
## return(parms_model(fit$Auto_model, e0, s0));
## } else {
## warning(">> fit$Auto does not exist!");
## return(NULL);
## }
## } ## End of Cal.parms_Auto
## ################################################################################
################################################################################
######################################## End of Legacy RF classes code
################################################################################
|
b97542a2402e3f8521b513be50c3c5f510f74115
|
4175b5a7e7c6bce3b3db9694cfa13591aabd8ca2
|
/better visualize.R
|
605c38fd883f25b5a42ad3b687c18bddfda002c9
|
[] |
no_license
|
abhinav-sharma-6167/Kaggle-Practice-Facebook-Checkin
|
0d5fe534071ce39479cfac9897eb4576fdcb020e
|
90467a91e661e7a9d88c7c1ee7b8b5a21437a6ba
|
refs/heads/master
| 2021-09-08T05:02:44.559558
| 2016-11-18T19:35:17
| 2016-11-18T19:35:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,388
|
r
|
better visualize.R
|
library(ggplot2) # Data visualization
library(readr) # CSV file I/O, e.g. the read_csv function
library(dplyr)
#Import train dataset
train <- read_csv("../input/train.csv")
train <- train %>% group_by(place_id) %>%
mutate(check_ins = n()) %>%
ungroup() %>%
arrange(desc(check_ins))
most_popular_place <- train %>% filter(place_id == train$place_id[1])
ggplot(train %>%
filter(x > min(most_popular_place$x),
x < max(most_popular_place$x),
y > min(most_popular_place$y),
y < max(most_popular_place$y)),
aes(x=x, y=y, color = as.factor(place_id))) +
geom_point(alpha = .05, size = .05) +
theme(legend.position="none") +
annotate("point",
x= most_popular_place$x,
y = most_popular_place$y)
ggplot(most_popular_place,
aes(x=x, y=y, color = accuracy, size = accuracy)) +
geom_point() +
theme(legend.position="none")
a_random_place <- train %>% filter(place_id == sample(train$place_id, 1))
ggplot(train %>%
filter(x > min(a_random_place$x),
x < max(a_random_place$x),
y > min(a_random_place$y),
y < max(a_random_place$y)),
aes(x=x, y=y, color = as.factor(place_id))) +
geom_point(alpha = .05, size = .05) +
theme(legend.position="none") +
annotate("point",
a_random_place$x,
a_random_place$y)
|
3ee9ee7152701f7e2e0438a67044ccf084af1166
|
b3a0ed1700c1313c0453320feb84859567848e86
|
/explanatory-data-analysis/webcrawling-naverblog-warmtone.R
|
28659331dc7504f12a8191bd561dfc2b9d21d2a1
|
[] |
no_license
|
cs13syy/snu-fira-bigdata-analytics
|
f7e45acc3a5d0d0288c0e13ff314c366e00a3736
|
d8ce6273b4ff4e136d923e1522f60b2576c02c3e
|
refs/heads/master
| 2020-03-21T02:03:15.123119
| 2018-10-28T13:45:51
| 2018-10-28T13:45:51
| 137,976,936
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,128
|
r
|
webcrawling-naverblog-warmtone.R
|
# 라이브러리
library(rvest)
library(dplyr)
library(KoNLP)
# header 설정 : api 승인을 위한 과정
client_id = 'XXXXXXXXXXXXXXXXXXXXXX';
client_secret = 'XXXXXXXXXXX';
header = httr::add_headers(
'X-Naver-Client-Id' = client_id,
'X-Naver-Client-Secret' = client_secret)
# 키워드 쿼리 변경
query = '웜톤'
# iconv(query, to = "UTF-8", toRaw = F)
query = iconv(query, to = 'UTF-8', toRaw = T)
query = paste0('%', paste(unlist(query), collapse = '%'))
query = toupper(query) # 대문자화
query
end_num = 1000 # 1000개까지 보겠다
display_num = 100 # display 제한에 따라 100개씩 끊어서 가져와야
start_point = seq(1,end_num,display_num) # 시작 포인트는 1, 101, 201...
i = 1 # 초기값 설정
url = paste0('https://openapi.naver.com/v1/search/blog.xml?query=',
query,'&display=',display_num,'&start=',
start_point[i],'&sort=sim')
url_body = read_xml(GET(url, header)) # header 없으면 권한이 없어서 출력 안하니 유의
# title, name, date, link, description 정보 각각 가져옴
GET(url, header)
as.character(url_body)
title = url_body %>% xml_nodes('item title') %>%
xml_text()
bloggername = url_body %>%
xml_nodes('item bloggername') %>% xml_text()
postdate = url_body %>% xml_nodes('postdate') %>%
xml_text()
link = url_body %>% xml_nodes('item link') %>%
xml_text()
description = url_body %>% xml_nodes('item description') %>%
html_text()
i = 1
final_dat = NULL # 초기값 설정
for(i in 1:length(start_point))
{
# request xml format
url = paste0('https://openapi.naver.com/v1/search/blog.xml?query=',query,
'&display=',display_num,'&start=',start_point[i],'&sort=sim')
#option header
url_body = read_xml(GET(url, header), encoding = "UTF-8")
title = url_body %>% xml_nodes('item title') %>% xml_text()
bloggername = url_body %>% xml_nodes('item bloggername') %>% xml_text()
postdate = url_body %>% xml_nodes('postdate') %>% xml_text()
link = url_body %>% xml_nodes('item link') %>% xml_text()
description = url_body %>% xml_nodes('item description') %>% html_text()
temp_dat = cbind(title, bloggername, postdate, link, description)
final_dat = rbind(final_dat, temp_dat)
cat(i, '\n')
}
final_dat # matrix
final_dat[1,1] # title
final_dat[1,2] # name
final_dat[1,3] # date
final_dat[1,4] # link
final_dat[1,5] # description
# 전처리
final_dat = data.frame(final_dat, stringsAsFactors = F)
final_dat$description = gsub('\n|\t|<.*?>|"',' ',final_dat$description)
final_dat$description = gsub('[^가-힣a-zA-Z]',' ',final_dat$description)
final_dat$description = gsub(' +',' ',final_dat$description)
# 빈도 분석
library(KoNLP)
nouns=KoNLP::extractNoun(final_dat$description)
nouns[1:20]
ewdic=data.frame(V1=c("트렌디","립스틱","발색","이글립스","색조","완성","베이스","톤업","선크림"),"ncn")
KoNLP::mergeUserDic(newdic)
nouns_unlist <- unlist(nouns)
nouns_unlist <- Filter(function(x){nchar(x)>=2}, nouns_unlist)
wordcount <- table(nouns_unlist)
head(wordcount)
wordcount_top <-head(sort(wordcount, decreasing = T),100)
head(wordcount_top, n=10)
|
701c15f592e6797ca733054a5cb04a80bfafe81f
|
7d14874f217714b4c6eeaff7e40cf6b12668ac47
|
/tick_dataset_results_analysis/manuscript_figures/old figs/manuscript_figures_06272022.R
|
b1f6efcb43d7f546ff5afa19e25bde4697f221e8
|
[] |
no_license
|
rowan-christie21/ixodes_scapularis_research
|
9368bd4f01f3934e6fb585717de2e3b47eaab688
|
78129b0bcde96aa66d18a10b35f66f291adcb901
|
refs/heads/main
| 2023-04-07T22:19:49.141465
| 2022-08-02T01:58:00
| 2022-08-02T01:58:00
| 512,540,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 35,723
|
r
|
manuscript_figures_06272022.R
|
#----------------------------------------------------------------------------------------
# 6/26/2022 Code for figures in manuscript
# title: Longer study length, standardized sampling techniques, and broader geographic scope leads to higher likelihood of detecting stable abundance patterns in long term deer tick studies
# doi: https://doi.org/10.1101/2021.03.06.434217
# github repo: https://github.com/SMCCoder/tick_dataset_results_analysis
#----------------------------------------------------------------------------------------
#-------------------------------------------
# load libraries
library(ggplot2)
library(ggpubr)
#import script from QsRutils package to add functions for creating letter assignments for groups that are not signifcantly different
#reference: Piepho, H. P. 2004. An algorithm for a letter-based representation of all-pairwise comparisons. Journal of Computational and Graphical Statistics **13**:456-466.
source("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/QsRutils_05182020/make_letter_assignments.R")
source("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/QsRutils_05182020/get_plot_limits.R")
#-------------------------------------------
# read in tick dataset results from 7/11/2022
tick_dataset_results <- readxl::read_xlsx("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/data/tick dataset results_07112022.xlsx", sheet = 1)
#-------------------------------------------
##############################
# Figure 1: The fraction of datasets that take y years to reach stability
##############################
#need to create new column indicating # of datasets with stability time greater than a certain value
tick_dataset_results$stability_time_culamative <- 0
#calculating number of datasets with stability time higher than years
for(years in tick_dataset_results$stability_time) {
tick_dataset_results[tick_dataset_results$stability_time == years,]$stability_time_culamative <- length(tick_dataset_results[tick_dataset_results$stability_time>years,]$stability_time)
}
#need to calculate proportion of datasets for each stability time value
tick_dataset_results$stability_time_proportion <- 0
for(years in tick_dataset_results$stability_time) {
tick_dataset_results[tick_dataset_results$stability_time == years,]$stability_time_proportion <- length(tick_dataset_results[tick_dataset_results$stability_time<=years,]$stability_time)
}
# create lineplot for years to reach stability for each range of years culamative
years_to_reach_stability_num <- ggplot(tick_dataset_results, aes(x = stability_time_proportion, y = stability_time)) +
geom_line(color="skyblue", size=2)+
scale_y_continuous(name = "Years to reach stability", expand = c(0,0), limits = c(0,25)) +
xlab("Number of datasets") +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_text(colour="black", size = 18),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)),
plot.margin = margin(10, 20, 5, 5))
years_to_reach_stability_num
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_1_years_to_reach_stability_num_line_chart ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
years_to_reach_stability_num
dev.off()
##############################
# Figure 2: Comparison of study length, years to reach stability and the number of datasets
##############################
length(tick_dataset_results$vector)
#confirm 289 observations (raw datasets)
t.test(tick_dataset_results$stability_time, tick_dataset_results$data_range)
#t = -15.933, df = 485.02, p-value < 2.2e-16
#difference between datasets is significant
#compare stability time with data range
cor(tick_dataset_results$stability_time, tick_dataset_results$data_range)
#0.8302934
#compare stability time with total number of datasets
cor(tick_dataset_results$stability_time, tick_dataset_results$stability_time_culamative)
# -0.9782056
#x axis overall study length and y axis 'years to stability' and
#use the colors/symbols to graph EVERY observation
# create lineplot for years to reach stability for each range of years culamative
years_to_reach_stability_length <- ggplot(tick_dataset_results, aes(x = data_range, y = stability_time, size=stability_time_proportion)) +
geom_point()+
scale_y_continuous(name = "Years to reach stability", expand = c(0,0), limits = c(0,25)) +
xlab("Study length (years)") +
labs(size="Number of datasets") +
scale_size_continuous(limits = c(1,300), breaks=seq(50,300,by=50)) +
xlim(0,25) +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title=element_text(size = 27, margin=margin(0,0,15,0)),
legend.text = element_text(size = 15),
legend.title = element_text(size = 17),
legend.position = c(0.2, 0.8),
axis.text.x=element_text(colour="black", size = 18),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)),
plot.margin = margin(10, 20, 5, 5))
years_to_reach_stability_length
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_2_years_to_reach_stability_length_line_chart ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
years_to_reach_stability_length
dev.off()
##############################
# Figure 3: Overall proportion wrong compared to proportion wrong before stability
##############################
#proportion wrong (overall) is looking at all the iterations (the breakups) of the data and
#regression fits to see your odds of getting misleading patterns (across all years and subsets
#of years)
#Proportion wrong before stability is looking specifically at the iterations (breakups)
#*before stability is reached* to give more specific insight into the odds of finding a misleading
#pattern (outside the error bounds around the stability trend line) if your iteration (or study
#length) is shorter than the stability time calculation. This is more informative about stability
#time as a meaningful function than proportion wrong overall.
#comparing overall proportion wrong with proportion wrong before stability
plot(tick_dataset_results$`proportion wrong`)
plot(tick_dataset_results$`proportion wrong before stability`)
t.test(tick_dataset_results$proportion_wrong, tick_dataset_results$proportion_wrong_before_stability)
#t = -1.2137, df = 571.46, p-value = 0.2254
#insignificant difference
#no need for letter assignment
tick_dataset_results$pw_label <- "Overall"
tick_dataset_results$pwbs_label <- "Before reaching stability"
#organizing labels and values into dataframe
overall_pw_vs_pwbs_lab <- c(tick_dataset_results$pw_label, tick_dataset_results$pwbs_label)
overall_pw_vs_pwbs_value <- c(tick_dataset_results$proportion_wrong, tick_dataset_results$proportion_wrong_before_stability)
overall_pw_vs_pwbs_df <- data.frame(overall_pw_vs_pwbs_lab, overall_pw_vs_pwbs_value)
#overall proportion significantly wrong by proportion wrong before stability
overall_pw_vs_pwbs <- ggplot(overall_pw_vs_pwbs_df, aes(x = reorder(overall_pw_vs_pwbs_lab, overall_pw_vs_pwbs_value), y = overall_pw_vs_pwbs_value)) +
geom_boxplot() +
geom_jitter() +
scale_x_discrete(name=NULL) +
scale_y_continuous(name = "Proportion wrong", limits = c(0,1.05)) +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title=element_text(size = 27, margin=margin(0,0,15,0)),
axis.text.x=element_text(colour="black", size = 16),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)))
overall_pw_vs_pwbs
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_3_overall_pw_vs_pwbs ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
overall_pw_vs_pwbs
dev.off()
##############################
# Figure 4A, 4B comparing stability time and proportion significantly wrong for sampling technique
##############################
dragging <- subset(tick_dataset_results, sampling_technique == "dragging")
found <- subset(tick_dataset_results, sampling_technique == "found on a person")
length(dragging$stability_time)
#90 datasets
length(found$stability_time)
#198 datasets
length(subset(tick_dataset_results, sampling_technique == "bites found on a person")$stability_time)
#one instance was recorded with sampling technique = bites found on a person which was excluded from this analysis
median(dragging$stability_time)
#7
median(found$stability_time)
#12
t.test(dragging$stability_time, found$stability_time)
#t = -8.5346, df = 236.23, p-value = 1.724e-15
#significant
#use letter assignment to differentiate groups
t.test(dragging$proportion_wrong_before_stability, found$proportion_wrong_before_stability)
#t = 0.083576, df = 155.58, p-value = 0.9335
#insignificant
#no need for letter assignment
###############
# 4A sampling technique vs stability time
###############
# create boxplot for proportion significantly wrong between different sampling methods
tick_dataset_results_drag_found <- subset(tick_dataset_results, sampling_technique == "dragging" | sampling_technique == "found on a person")
#set up compact letter display
box.rslt <- with(tick_dataset_results_drag_found, graphics::boxplot(stability_time ~ sampling_technique, plot = FALSE))
ttest.rslt <- with(tick_dataset_results_drag_found, pairwise.t.test(stability_time, sampling_technique, pool.sd = FALSE))
ltrs <- make_letter_assignments(ttest.rslt)
x <- c(1:length(ltrs$Letters))
y <- box.rslt$stats[5, ]
cbd <- ltrs$Letters
ltr_df <- data.frame(x, y, cbd)
stability_time_by_samp_tech <- ggplot(tick_dataset_results_drag_found, aes(x = sampling_technique, y = stability_time)) +
geom_boxplot() +
geom_jitter() +
geom_text(data = ltr_df, aes(x=x, y=y, label=cbd), nudge_y = 1.25,color="red",size=6) +
scale_x_discrete(name = "Sampling technique") +
scale_y_continuous(name = "Stability time", limits = c(0,25)) +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title=element_text(size = 27, margin=margin(0,0,15,0)),
axis.text.x=element_text(colour="black", size = 18),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)))
stability_time_by_samp_tech
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_4A_stability_time_by_samp_tech ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
stability_time_by_samp_tech
dev.off()
###############
# 4B sampling technique vs proportion wrong before stability
###############
# create boxplot for proportion significantly wrong between different sampling methods
tick_dataset_results_drag_found <- subset(tick_dataset_results, sampling_technique == "dragging" | sampling_technique == "found on a person")
proportion_wrong_before_stab_by_samp_tech <- ggplot(tick_dataset_results_drag_found, aes(x = sampling_technique, y = proportion_wrong_before_stability)) +
geom_boxplot() +
geom_jitter() +
scale_x_discrete(name = "Sampling technique") +
scale_y_continuous(name = "Proportion significantly wrong \nbefore stability", limits = c(0,1.05)) +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title=element_text(size = 27, margin=margin(0,0,15,0)),
axis.text.x=element_text(colour="black", size = 18),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)))
proportion_wrong_before_stab_by_samp_tech
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_4B_proportion_wrong_before_stab_by_samp_tech ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
proportion_wrong_before_stab_by_samp_tech
dev.off()
###############
# Combined plots 4A and 4B
###############
#arrange plots 4A and 4B into single image
figure4AB <- ggarrange(
stability_time_by_samp_tech +
scale_x_discrete(name = NULL) +
theme(axis.title.y = element_text(margin=margin(0,-20,0,0))),
proportion_wrong_before_stab_by_samp_tech,
labels = c("A", "B"), nrow = 2, ncol=1, align = "v", font.label = list(size=25), hjust=-7
)
figure4AB
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_4AB ",Sys.Date(),".png", sep = ''), width = 2379, height = 3600, res = 300)
figure4AB
dev.off()
##############################
# Figure 5A, 5B comparing stability time and proportion significantly wrong for life stage
##############################
adults <- subset(tick_dataset_results, life_stage == "adult" | life_stage == "adults")
nymphs <- subset(tick_dataset_results, life_stage == "nymph" | life_stage == "nymphs")
larvae <- subset(tick_dataset_results, life_stage == "larvae")
t.test(adults$stability_time, nymphs$stability_time)
#t = -0.63139, df = 128.99, p-value = 0.5289
#insignificant
t.test(adults$stability_time, larvae$stability_time)
#t = -5.9627, df = 10.111, p-value = 0.0001328
#significant
t.test(nymphs$stability_time, larvae$stability_time)
#t = -5.5593, df = 10.325, p-value = 0.0002145
#significant
t.test(adults$proportion_wrong_before_stability, nymphs$proportion_wrong_before_stability)
#t = 2.9877, df = 112.55, p-value = 0.003451
#significant
t.test(adults$proportion_wrong_before_stability, larvae$proportion_wrong_before_stability)
#t = 0.43788, df = 8.7735, p-value = 0.6721
#insignificant
t.test(nymphs$proportion_wrong_before_stability, larvae$proportion_wrong_before_stability)
#t = -0.77913, df = 7.8459, p-value = 0.4588
#insignificant
length(adults$stability_time)
#63 datasets
length(nymphs$stability_time)
#68 datasets
length(larvae$stability_time)
#8 datasets
length(subset(tick_dataset_results, life_stage == "unspecified" | life_stage == "not specified")$stability_time)
#150
#all subsets add up to 289
median(adults$stability_time)
#7
median(nymphs$stability_time)
#7
median(larvae$stability_time)
#11.5
median(adults$proportion_wrong_before_stability)
#0.1
median(nymphs$proportion_wrong_before_stability)
#0.04166667
median(larvae$proportion_wrong_before_stability)
#0.07340067
###############
# 5A life stage vs stability time
###############
#change any nymphs to nymph in life stage column in case of any spelling errors
for(i in 1:nrow(tick_dataset_results)) {
if(tick_dataset_results$life_stage[i] == "nymphs") {
tick_dataset_results$life_stage[i] = "nymph"
}
}
tick_dataset_results_ls <- subset(tick_dataset_results, life_stage == "larvae" | life_stage == "nymph" | life_stage == "adult")
tick_dataset_results_ls$life_stage <- factor(tick_dataset_results_ls$life_stage, c("larvae", "nymph", "adult"))
#set up compact letter display
box.rslt <- with(tick_dataset_results_ls, graphics::boxplot(stability_time ~ life_stage, plot = FALSE))
ttest.rslt <- with(tick_dataset_results_ls, pairwise.t.test(stability_time, life_stage, pool.sd = FALSE))
ltrs <- make_letter_assignments(ttest.rslt)
x <- c(1:length(ltrs$Letters))
y <- box.rslt$stats[5, ]
cbd <- ltrs$Letters
ltr_df <- data.frame(x, y, cbd)
stability_time_by_life_stage <- ggplot(tick_dataset_results_ls, aes(x = life_stage, y = stability_time)) +
geom_boxplot() +
geom_jitter() +
geom_text(data = ltr_df, aes(x=x, y=y, label=cbd), nudge_y = 1.25,color="red", size=6) +
scale_x_discrete(name = "Life stage") +
scale_y_continuous(name = "Stability time", limits = c(0,25)) +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title=element_text(size = 27, margin=margin(0,0,15,0)),
axis.text.x=element_text(colour="black", size = 18),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)))
stability_time_by_life_stage
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_5A_stability_time_by_life_stage ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
stability_time_by_life_stage
dev.off()
###############
#life stage vs proportion wrong before stability
###############
# create boxplot for proportion significantly wrong between different life stages
tick_dataset_results_ls <- subset(tick_dataset_results, life_stage == "larvae" | life_stage == "nymph" | life_stage == "adult")
tick_dataset_results_ls$life_stage <- factor(tick_dataset_results_ls$life_stage, c("larvae", "nymph", "adult"))
#set up compact letter display
box.rslt <- with(tick_dataset_results_ls, graphics::boxplot(proportion_wrong_before_stability ~ life_stage, plot = FALSE))
ttest.rslt <- with(tick_dataset_results_ls, pairwise.t.test(proportion_wrong_before_stability, life_stage, pool.sd = FALSE))
ltrs <- make_letter_assignments(ttest.rslt)
x <- c(1:length(ltrs$Letters))
y <- box.rslt$stats[5, ]
cbd <- ltrs$Letters
ltr_df <- data.frame(x, y, cbd)
proportion_wrong_before_stab_by_life_stage <- ggplot(tick_dataset_results_ls, aes(x = life_stage, y = proportion_wrong_before_stability)) +
geom_boxplot() +
geom_jitter() +
geom_text(data = ltr_df, aes(x=x, y=y, label=cbd), nudge_y = 0.05,color="red", size=6) +
scale_x_discrete(name = "Life stage") +
scale_y_continuous(name = "Proportion significantly wrong \nbefore stability", limits = c(0,1.05)) +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title=element_text(size = 27, margin=margin(0,0,15,0)),
axis.text.x=element_text(colour="black", size = 18),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)))
proportion_wrong_before_stab_by_life_stage
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_5B_proportion_wrong_before_stab_by_life_stage ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
proportion_wrong_before_stab_by_life_stage
dev.off()
###############
# Combined plots 5A and 5B
###############
#arrange plots 5A and 5B into single image
figure5AB <- ggarrange(
stability_time_by_life_stage +
scale_x_discrete(name = NULL) +
theme(axis.title.y = element_text(margin=margin(0,-20,0,0))),
proportion_wrong_before_stab_by_life_stage,
labels = c("A", "B"), nrow = 2, ncol=1, align = "v", font.label = list(size=25), hjust=-7
)
figure5AB
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_5AB ",Sys.Date(),".png", sep = ''), width = 2379, height = 3600, res = 300)
figure5AB
dev.off()
##############################
# Figure 6A, 6B comparing stability time and proportion significantly wrong for geographic scope
##############################
county <- subset(tick_dataset_results, geographic_scope == "County")
town <- subset(tick_dataset_results, geographic_scope == "Town")
state_forest <- subset(tick_dataset_results, geographic_scope == "State forest")
grid <- subset(tick_dataset_results, geographic_scope == "Grid")
length(county$stability_time)
#73 datasets
length(town$stability_time)
#186 datasets
length(state_forest$stability_time)
#6 datasets
length(grid$stability_time)
#24 datasets
median(county$stability_time)
#73
median(town$stability_time)
#186
median(state_forest$stability_time)
#6
median(grid$stability_time)
#24
t.test(county$stability_time, town$stability_time)
#t = -17.029, df = 243.09, p-value < 2.2e-16
#signficiant
t.test(county$stability_time, state_forest$stability_time)
#t = -5.5457, df = 5.2241, p-value = 0.002278
#significant
t.test(county$stability_time, grid$stability_time)
#t = -17.207, df = 33.87, p-value < 2.2e-16
#signifcant
t.test(town$stability_time, state_forest$stability_time)
#t = 0.47098, df = 6.1169, p-value = 0.654
#insignificant
t.test(town$stability_time, grid$stability_time)
#t = 0.22406, df = 86.176, p-value = 0.8232
#insignificant
t.test(state_forest$stability_time, grid$stability_time)
#t = -0.37552, df = 6.0307, p-value = 0.7201
#insignificant
#----
t.test(county$proportion_wrong_before_stability, town$proportion_wrong_before_stability)
#t = 0.71555, df = 115.03, p-value = 0.4757
#insignficiant
t.test(county$proportion_wrong_before_stability, state_forest$proportion_wrong_before_stability)
#t = 5.2409, df = 74.293, p-value = 1.445e-06
#significant
t.test(county$proportion_wrong_before_stability, grid$proportion_wrong_before_stability)
#t = 1.3299, df = 43.066, p-value = 0.1905
#insignifcant
t.test(town$proportion_wrong_before_stability, state_forest$proportion_wrong_before_stability)
#t = 7.4221, df = 46.441, p-value = 2.013e-09
#significant
t.test(town$proportion_wrong_before_stability, grid$proportion_wrong_before_stability)
#t = 1.0046, df = 28.519, p-value = 0.3235
#insignificant
t.test(state_forest$proportion_wrong_before_stability, grid$proportion_wrong_before_stability)
#t = -1.9093, df = 25.101, p-value = 0.06772
#insignificant
###############
#geographic scope vs stability time
###############
#order factors
tick_dataset_results$geographic_scope <- factor(tick_dataset_results$geographic_scope, c("Grid", "State forest", "Town", "County"))
#set up compact letter display
box.rslt <- with(tick_dataset_results, graphics::boxplot(stability_time ~ geographic_scope, plot = FALSE))
ttest.rslt <- with(tick_dataset_results, pairwise.t.test(stability_time, geographic_scope, pool.sd = FALSE))
ltrs <- make_letter_assignments(ttest.rslt)
x <- c(1:length(ltrs$Letters))
y <- box.rslt$stats[5, ]
cbd <- ltrs$Letters
ltr_df <- data.frame(x, y, cbd)
# create boxplot for stability time between different geographic scopes
stability_time_by_geographic_scope <- ggplot(tick_dataset_results, aes(x = geographic_scope, y = stability_time)) +
geom_boxplot() +
geom_jitter() +
geom_text(data = ltr_df, aes(x=x, y=y, label=cbd), nudge_y = 1.25,color="red", size=6) +
scale_x_discrete(name = "Geographic scope") +
scale_y_continuous(name = "Stability time", limits = c(0,25), breaks = c(0,5,10,15,20,25)) +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title=element_text(size = 27, margin=margin(0,0,15,0)),
axis.text.x=element_text(colour="black", size = 18),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)))
stability_time_by_geographic_scope
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_6A_stability_time_by_geographic_scope ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
stability_time_by_geographic_scope
dev.off()
###############
#geographic scope vs proportion wrong before stability
###############
# create boxplot for proportion significantly wrong between different geographic scopes
tick_dataset_results$geographic_scope <- factor(tick_dataset_results$geographic_scope, c("Grid", "State forest", "Town", "County"))
#set up compact letter display
box.rslt <- with(tick_dataset_results, graphics::boxplot(proportion_wrong_before_stability ~ geographic_scope, plot = FALSE))
ttest.rslt <- with(tick_dataset_results, pairwise.t.test(proportion_wrong_before_stability, geographic_scope, pool.sd = FALSE))
ltrs <- make_letter_assignments(ttest.rslt)
x <- c(1:length(ltrs$Letters))
y <- box.rslt$stats[5, ]
cbd <- ltrs$Letters
ltr_df <- data.frame(x, y, cbd)
proportion_wrong_before_stab_by_geographic_scope <- ggplot(tick_dataset_results, aes(x = geographic_scope, y = proportion_wrong_before_stability)) +
geom_boxplot() +
geom_jitter() +
geom_text(data = ltr_df, aes(x=x, y=y, label=cbd), nudge_y = 0.05,color="red", size=6) +
scale_x_discrete(name = "Geographic scope") +
scale_y_continuous(name = "Proportion significantly wrong \nbefore stability", limits = c(0,1.06)) +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title=element_text(size = 27, margin=margin(0,0,15,0)),
axis.text.x=element_text(colour="black", size = 18),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)))
proportion_wrong_before_stab_by_geographic_scope
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_6B_proportion_wrong_before_stab_by_geographic_scope ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
proportion_wrong_before_stab_by_geographic_scope
dev.off()
###############
# Combined plots 6A and 6B
###############
#arrange plots 6A and 6B into single image
figure6AB <- ggarrange(
stability_time_by_geographic_scope +
scale_x_discrete(name = NULL) +
theme(axis.title.y = element_text(margin=margin(0,-20,0,0))),
proportion_wrong_before_stab_by_geographic_scope,
labels = c("A", "B"), nrow = 2, ncol=1, align = "v", font.label = list(size=25), hjust=-7
)
figure6AB
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_6AB ",Sys.Date(),".png", sep = ''), width = 2379, height = 3600, res = 300)
figure6AB
dev.off()
##############################
# Figure 7A, 7B comparing stability time and proportion significantly wrong for sampling metric
##############################
pathogen <- subset(tick_dataset_results, tested_for_b_burgdoferi == "Y")
abundance <- subset(tick_dataset_results, tested_for_b_burgdoferi == "N")
length(pathogen$stability_time)
#114 datasets
length(abundance$stability_time)
#175 datasets
t.test(pathogen$stability_time, abundance$stability_time)
#t = -1.2879, df = 283.9, p-value = 0.1988
t.test(pathogen$proportion_wrong_before_stability, abundance$proportion_wrong_before_stability)
#t = -1.1828, df = 232.98, p-value = 0.2381
###############
#ticks infected vs stability time
###############
tested_for_path <- tick_dataset_results[tick_dataset_results$tested_for_b_burgdoferi == "Y",]
tested_for_path$label <- "Tested for infection \nof B. burgdorferi"
test_abuance <- tick_dataset_results[tick_dataset_results$tested_for_b_burgdoferi == "N",]
test_abuance$label <- "Sampled for \nAbundance"
abundance_vs_infected_ticks <- c(tested_for_path$label, test_abuance$label)
stability_time_for_abudance_vs_infected_ticks <- c(tested_for_path$stability_time, test_abuance$stability_time)
tick_infection_data <- data.frame(abundance_vs_infected_ticks, stability_time_for_abudance_vs_infected_ticks)
t.test(tick_infection_data[tick_infection_data$abundance_vs_infected_ticks == "Tested for infection \nof B. burgdorferi",]$stability_time_for_abudance_vs_infected_ticks, tick_infection_data[tick_infection_data$abundance_vs_infected_ticks == "Sampled for \nAbundance",]$stability_time_for_abudance_vs_infected_ticks)
# t = -1.2879, df = 283.9, p-value = 0.1988
# t-value indicates probability below 0.5
# low probability of difference between datasets
#
# p-value higher than 0.05, accept null hypothesis
# therefore alternative hypothesis: true difference in means is not equal to 0 is not supported
# statiscally insignificant
# no need for letter assignment
#proportion significant by ticks infected and total ticks
stability_time_by_metric <- ggplot(tick_infection_data, aes(x = abundance_vs_infected_ticks, y = stability_time_for_abudance_vs_infected_ticks)) +
geom_boxplot() +
geom_jitter() +
scale_x_discrete(name = "Sampling metric") +
scale_y_continuous(name = "Stability time", limits = c(0, 25)) +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title=element_text(size = 27, margin=margin(0,0,15,0)),
axis.text.x=element_text(colour="black", size = 18),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)))
stability_time_by_metric
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_7A_stability_time_by_metric ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
stability_time_by_metric
dev.off()
###############
#ticks infected vs proportion wrong before stability
###############
tested_for_path <- tick_dataset_results[tick_dataset_results$tested_for_b_burgdoferi == "Y",]
tested_for_path$label <- "Tested for infection \nof B. burgdorferi"
test_abuance <- tick_dataset_results[tick_dataset_results$tested_for_b_burgdoferi == "N",]
test_abuance$label <- "Sampled for \nAbundance"
abundance_vs_infected_ticks <- c(tested_for_path$label, test_abuance$label)
proportion_wrong_before_stab_for_abudance_vs_infected_ticks <- c(tested_for_path$proportion_wrong_before_stability, test_abuance$proportion_wrong_before_stability)
tick_infection_data <- data.frame(abundance_vs_infected_ticks, proportion_wrong_before_stab_for_abudance_vs_infected_ticks)
t.test(tick_infection_data[tick_infection_data$abundance_vs_infected_ticks == "Tested for infection \nof B. burgdorferi",]$proportion_wrong_before_stab_for_abudance_vs_infected_ticks, tick_infection_data[tick_infection_data$abundance_vs_infected_ticks == "Sampled for \nAbundance",]$proportion_wrong_before_stab_for_abudance_vs_infected_ticks)
# t = -1.1828, df = 232.98, p-value = 0.2381
# t-value indicates probability below 0.5
# low probability of difference between datasets
#
# p-value higher than 0.05, accept null hypothesis
# therefore alternative hypothesis: true difference in means is equal to 0 is supported
# statiscally insignificant
# no need for letter assignment
#proportion significant by ticks infected and total ticks
proportion_wrong_before_stab_by_metric <- ggplot(tick_infection_data, aes(x = abundance_vs_infected_ticks, y = proportion_wrong_before_stab_for_abudance_vs_infected_ticks)) +
geom_boxplot() +
geom_jitter() +
scale_x_discrete(name = "Sampling metric") +
scale_y_continuous(name = "Proportion significantly wrong \nbefore stability", limits = c(0, 1.05)) +
theme(axis.line.x = element_line(size = 0.5, colour = "black"),
axis.line.y = element_line(size = 0.5, colour = "black"),
axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title=element_text(size = 27, margin=margin(0,0,15,0)),
axis.text.x=element_text(colour="black", size = 18),
axis.text.y=element_text(colour="black", size = 18),
axis.title.x = element_text(size = 23, margin=margin(15,0,0,0)),
axis.title.y = element_text(size = 23, margin=margin(0,15,0,0)))
proportion_wrong_before_stab_by_metric
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_7B_proportion_wrong_before_stab_by_metric ",Sys.Date(),".png", sep = ''), width = 2379, height = 1800, res = 300)
proportion_wrong_before_stab_by_metric
dev.off()
###############
# Combined plots 7A and 7B
###############
#arrange plots 7A and 7B into single image
figure7AB <- ggarrange(
stability_time_by_metric +
scale_x_discrete(name = NULL) +
theme(axis.title.y = element_text(margin=margin(0,-20,0,0))),
proportion_wrong_before_stab_by_metric,
labels = c("A", "B"), nrow = 2, ncol=1, align = "v", font.label = list(size=25), hjust=-7
)
figure7AB
png(filename = paste("D:/Ixodes_scapularis_research_2019/tick_dataset_results_analysis/manuscript_figures/figure_7AB ",Sys.Date(),".png", sep = ''), width = 2379, height = 3600, res = 300)
figure7AB
dev.off()
|
bc2353df318fc51df0c74ed2c76f65ab7529d0cb
|
ae418ff00f688c16ebca5db69bfaf3cb6f05b1c0
|
/R/melt-internal.R
|
e058142d0acc4cb1dd8e9ac63a0daa25fe656605
|
[
"MIT"
] |
permissive
|
enginbozaba/bcbioRNASeq
|
8461b7dcbe0b0e92589016402e790184b3c364f2
|
0216e92a166d28392ecf7ec6057cc510b3f4c0c9
|
refs/heads/master
| 2020-05-02T20:02:04.141120
| 2019-01-28T14:03:24
| 2019-01-28T14:03:41
| 178,177,306
| 1
| 0
|
MIT
| 2019-03-28T10:07:37
| 2019-03-28T10:07:35
| null |
UTF-8
|
R
| false
| false
| 1,261
|
r
|
melt-internal.R
|
#' Melt Counts Matrix to Long Format
#'
#' @author Michael Steinbaugh
#' @keywords internal
#' @noRd
#'
#' @seealso [reshape2::melt()].
#'
#' @return `grouped_df`, grouped by `sampleID` and `geneID`.
#'
#' @examples
#' counts <- counts(bcb_small)
#' sampleData <- sampleData(bcb_small)
#' x <- .meltCounts(counts, sampleData)
.meltCounts <- function(counts, sampleData = NULL) {
assert_is_matrix(counts)
data <- counts %>%
as.data.frame() %>%
rownames_to_column() %>%
melt(id = 1L) %>%
as_tibble() %>%
set_colnames(c("geneID", "sampleID", "counts")) %>%
arrange(!!!syms(c("sampleID", "geneID"))) %>%
group_by(!!!syms(c("sampleID", "geneID")))
if (length(sampleData)) {
assert_are_set_equal(colnames(counts), rownames(sampleData))
sampleData[["sampleID"]] <- rownames(sampleData)
data <- merge(
x = data,
y = as.data.frame(sampleData),
by = "sampleID",
all.x = TRUE
)
}
if (!"interestingGroups" %in% colnames(data)) {
data[["interestingGroups"]] <- data[["sampleID"]]
}
data
}
.meltLog2Counts <- function(counts, ...) {
counts <- log2(counts + 1L)
.meltCounts(counts, ...)
}
|
9d2f4dd315b1f4b543fc833bf02e16fab4554093
|
e47a4995c1f02d90521f4cdb5a3becdba2520d49
|
/man/WLmult_dauer_res.Rd
|
b475b60463e69024187283dc6d16147274b2e6e7
|
[] |
no_license
|
SenguptaLab/MF.matR
|
9af3fbf77628250748af1a2a4a4515c09093e936
|
c64b0097131c6a2103e9489aa76692ae78085533
|
refs/heads/master
| 2023-01-24T22:06:28.450669
| 2023-01-12T22:19:19
| 2023-01-12T22:19:19
| 167,600,474
| 0
| 4
| null | 2023-01-12T22:19:20
| 2019-01-25T19:17:28
|
R
|
UTF-8
|
R
| false
| true
| 803
|
rd
|
WLmult_dauer_res.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WLmult_dauer_res.R
\name{WLmult_dauer_res}
\alias{WLmult_dauer_res}
\title{Wrapper for 'Import.WL.data' and 'plot_Residency' to allow multiple datasets to be simultaneously
analyzed. Uses recursive search for a *position.csv file, then makes a file list.}
\usage{
WLmult_dauer_res()
}
\arguments{
\item{bin.length}{length of time bins in seconds. Used for state analysis}
\item{frame.rate}{video frame rate}
\item{num.tracks}{optional argument to limit input to certain number of worm tracks}
}
\description{
Wrapper for 'Import.WL.data' and 'plot_Residency' to allow multiple datasets to be simultaneously
analyzed. Uses recursive search for a *position.csv file, then makes a file list.
}
\examples{
WLmult_dauer_res()
}
|
31cc5c46eb3063f42a8bd33959b52c127007159c
|
0b551347a29f4e01e9273615ce0c5242f9bdb63a
|
/pkg/tests/testthat/misc/test_simple2.R
|
3198fa3c33d0d1a903046860f278ad3f5182bf5a
|
[] |
no_license
|
timemod/dynmdl
|
8088fecc6c2b84d50ecb7d7b762bddb2b1fcf629
|
8dc49923e2dcc60b15af2ae1611cb3a86f87b887
|
refs/heads/master
| 2023-04-07T21:30:53.271703
| 2023-03-03T13:02:30
| 2023-03-03T13:02:30
| 148,925,096
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,685
|
r
|
test_simple2.R
|
library(dynmdl)
source("simple_model_utils.R")
mod_file <- "mod/simple2.mod"
# compile the model
report <- capture_output(mdl <- dyn_mdl(mod_file))
mdl$set_period("2015/2017")
data_per <- mdl$get_data_period()
nper <- nperiod(data_per)
lead_per <- mdl$get_lead_period()
lag_per <- mdl$get_lag_period()
eigvals <- get_analytical_eigvals(mdl$get_param())
y_ref_per <- period_range(start_period(data_per), end_period(data_per) + 1)
y_ref1 <- get_analytical_result(y0 = 1, x1 = 0, period = y_ref_per,
mdl$get_param())
y_ref2 <- get_analytical_result(y0 = 1, x1 = 1, period = y_ref_per,
mdl$get_param())
ymin_ref1 <- lag(y_ref1, -1)
colnames(ymin_ref1) <- "ymin"
yplus_ref1 <- lag(y_ref1, 1)
colnames(yplus_ref1) <- "yplus"
exo_ref1 <- regts(0, period = data_per)
ref1 <- cbind(y_ref1, yplus_ref1, ymin_ref1, exo = exo_ref1)[data_per]
ref1[lag_per, "ymin"] <- 0
ts_labels(ref1) <- colnames(ref1)
ymin_ref2 <- lag(y_ref2, -1)[data_per]
colnames(ymin_ref2) <- "ymin"
yplus_ref2 <- lag(y_ref2, 1)[data_per]
colnames(yplus_ref2) <- "yplus"
exo_ref2 <- regts(c(0, 1, rep(0, nper - 2)), period = data_per)
ref2 <- cbind(y_ref2, yplus_ref2, ymin_ref2, exo = exo_ref2)[data_per]
ref2[lag_per, "ymin"] <- 0
ts_labels(ref2) <- colnames(ref2)
mdl$set_data(regts(1, period = lag_per), names = "y")
test_that("steady state calculation", {
mdl_stat <- mdl$clone()
mdl$set_static_endos(c(y = 2, yplus = 1, ymin = 9, exo = 0))
mdl_stat$solve_steady(control = list(silent = TRUE))
expected_result <- c(y = 0, yplus = 0, ymin = 0, exo= 0)
expect_equal(mdl_stat$get_static_endos(), expected_result)
})
test_that("solve", {
mdl1 <- mdl$clone()
mdl1$set_data(ref1[lead_per, "y", drop = FALSE])
mdl1$solve(silent = TRUE)
mdl2 <- mdl1$clone()
mdl2$set_data(ref2[lead_per, "y", drop = FALSE])
mdl2$set_data(regts(1, start = start_period(mdl$get_period())),
names = "x")
mdl2$solve(silent = TRUE)
per <- mdl$get_period()
expect_equal(mdl1$get_endo_data(period = per), ref1[per, ])
expect_equal(mdl2$get_endo_data(period = per), ref2[per, ])
})
test_that("solve_perturbation", {
mdl1 <- mdl$clone()
mdl1$solve_perturbation()
x <- lag(y_ref1)[lag_per]
mdl1$set_data(lag(y_ref1)[lag_per], names = "yplus")
expect_equal(mdl1$get_endo_data(), ref1)
expect_equal(mdl1$get_eigval(), eigvals)
mdl2 <- mdl1$clone()
mdl2$set_data(lag(y_ref2)[lag_per], names = "yplus")
mdl2$set_data(regts(1, start = start_period(mdl$get_period())),
names = "x")
mdl2$solve_perturbation()
expect_equal(mdl1$get_endo_data(), ref1)
expect_equal(mdl2$get_endo_data(), ref2)
})
|
d2c2a03e859572f0aae5a2901ae55ba130a76228
|
0a4d3bed2892a640ad8d2a6fb77f95212b0ce618
|
/code/transmission/deseq2_transmission_mcav_sym.R
|
10e202d9cef3b6c3ce45c7021f5a5e770496e095
|
[] |
no_license
|
mstudiva/SCTLD-intervention-transcriptomics
|
ec2b24ad9259628157ad4f5352a4aae6d1f9f4d9
|
1f537dc0d2a4d7317b2d68b910caf670167fdb9a
|
refs/heads/main
| 2023-06-14T01:34:23.522098
| 2023-06-02T15:21:41
| 2023-06-02T15:21:41
| 418,661,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,202
|
r
|
deseq2_transmission_mcav_sym.R
|
#### PACKAGES ####
# run these once, then comment out
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# BiocManager::install(version = "3.10")
# BiocManager::install("DESeq2",dependencies=T)
# BiocManager::install("arrayQualityMetrics",dependencies=T) # requires Xquartz, xquartz.org
# BiocManager::install("BiocParallel")
# install.packages("pheatmap")
# install.packages("VennDiagram")
# install.packages("gplots")
# install.packages("vegan")
# install.packages("plotrix")
# install.packages("ape")
# install.packages("ggplot2")
# install.packages("rgl")
# install.packages("adegenet")
#### DATA IMPORT ####
# assembling data, running outlier detection, and fitting models
# (skip this section if you don't need to remake models)
library(DESeq2)
library(arrayQualityMetrics)
#read in counts
counts = read.table("allcounts_transmission_mcav_sym.txt")
# how many genes we have total?
nrow(counts)
ncol(counts)
# how does the data look?
head(counts)
# removing the parent sample
counts <- subset(counts, select = -c(41))
keep <- rowSums(counts) >= 10
countData <- counts[keep,]
nrow(countData)
ncol(countData)
write.csv(countData, file="countData.csv")
# for WCGNA: removing all genes with counts of <10 in more than 90 % of samples
counts4wgcna = counts[apply(counts,1,function(x) sum(x<10))<ncol(counts)*0.9,]
nrow(counts4wgcna)
ncol(counts4wgcna)
write.csv(counts4wgcna, file="counts4wgcna.csv")
# importing a design .csv file
design = read.csv("design_transmission_mcav.csv", head=TRUE)
design <- design[design$genotype != "Parent", ]
design
str(design)
#### MODEL DESIGN and OUTLIERS ####
# make big dataframe including all factors and interaction, getting normalized data for outlier detection
dds = DESeqDataSetFromMatrix(countData=countData, colData=design, design=~ genotype+fate)
# reorders fate factor according to "control" vs "treatment" levels
dds$fate <- factor(dds$fate, levels = c("healthy", "nai", "diseased"))
# for large datasets, rlog may take too much time, especially for an unfiltered dataframe
# vsd is much faster and still works for outlier detection
Vsd=varianceStabilizingTransformation(dds)
library(Biobase)
e=ExpressionSet(assay(Vsd), AnnotatedDataFrame(as.data.frame(colData(Vsd))))
# running outlier detection
arrayQualityMetrics(e,intgroup=c("fate"),force=T)
# open the directory "arrayQualityMetrics report for e" in your working directory and open index.html
# Array metadata and outlier detection overview gives a report of all samples, and which are likely outliers according to the 3 methods tested. I typically remove the samples that violate *1 (distance between arrays).
# Figure 2 shows a bar plot of array-to-array distances and an outlier detection threshold based on your samples. Samples above the threshold are considered outliers
# under Figure 3: Principal Components Analyses, look for any points far away from the rest of the sample cluster
# use the array number for removal in the following section
# if there were outliers:
outs=c(7,8,35,39)
countData=countData[,-outs]
Vsd=Vsd[,-outs]
counts4wgcna=counts4wgcna[,-outs]
design=design[-outs,]
# remaking model with outliers removed from dataset
dds = DESeqDataSetFromMatrix(countData=countData, colData=design, design=~ genotype+fate)
dds$fate <- factor(dds$fate, levels = c("healthy", "nai", "diseased"))
# save all these dataframes as an Rdata package so you don't need to rerun each time
save(dds,design,countData,Vsd,counts4wgcna,file="initial.RData")
# generating normalized variance-stabilized data for PCoA, heatmaps, etc
vsd=assay(Vsd)
# takes the sample IDs and factor levels from the design to create new column names for the dataframe
snames=paste(colnames(countData),design[,4],design[,6],sep=".")
# renames the column names
colnames(vsd)=snames
save(vsd,design,file="vsd.RData")
# more reduced stabilized dataset for WGCNA
wg = DESeqDataSetFromMatrix(countData=counts4wgcna, colData=design, design=~ genotype+fate)
vsd.wg=assay(varianceStabilizingTransformation(wg), blind=TRUE)
# vsd.wg=assay(rlog(wg), blind=TRUE)
head(vsd.wg)
colnames(vsd.wg)=snames
save(vsd.wg,design,file="data4wgcna.RData")
#### PCOA and PERMANOVA ####
# heatmap and hierarchical clustering:
load("vsd.RData")
library(pheatmap)
# similarity among samples
pdf(file="heatmap_transmission_mcav_sym.pdf", width=15, height=15)
pheatmap(cor(vsd))
dev.off()
# Principal coordinates analysis
library(vegan)
# library(rgl)
library(ape)
conditions=design
conditions$fate <- factor(conditions$fate, levels = c("healthy", "nai", "diseased"))
# creating a PCoA eigenvalue matrix
dds.pcoa=pcoa(dist(t(vsd),method="manhattan")/1000)
scores=dds.pcoa$vectors
# copy this table for % variation explained by each axis (Relative_eig column)
dds.pcoa$values
# how many good PC's do we have? Compared to random ("broken stick") model
# plotting PCoA eigenvalues
pdf(file="PCoA_Manhattan.pdf", width=6, height=6)
plot(dds.pcoa$values$Relative_eig)
points(dds.pcoa$values$Broken_stick,col="red",pch=3)
dev.off()
# the number of black points above the line of red crosses (random model) corresponds to the number of good PC's
# plotting PCoA by fate and treatment
pdf(file="PCoA_transmission_mcav_sym.pdf", width=12, height=6)
par(mfrow=c(1,2))
plot(scores[,1], scores[,2],col=c("green","orange","red")[as.numeric(as.factor(conditions$fate))],pch=c(15,17,19)[as.numeric(as.factor(conditions$treatment))], xlab="Coordinate 1", ylab="Coordinate 2", main="Fate")
ordispider(scores, conditions$fate, label=F, col=c("green","orange","red"))
legend("topright", legend=c("healthy", "NAI", "diseased"), fill = c("green","orange","red"), bty="n")
legend("topleft", legend=c("control","sctld"), pch=c(15,19), bty="n")
plot(scores[,1], scores[,2],col=c("green","black","red")[as.numeric(as.factor(conditions$treatment))],pch=c(15,17,19)[as.numeric((as.factor(conditions$fate)))], xlab="Coordinate 1", ylab="Coordinate 2", main="Treatment")
ordispider(scores, conditions$treatment, label=F, col=c("green","black","red"))
legend("topleft", legend=c("control", "sctld"), fill = c("green","red"), bty="n")
legend("topright", legend=c("healthy","NAI","diseased"), pch=c(15,17,19), bty="n")
dev.off()
# neighbor-joining tree of samples (based on significant PCo's):
pdf(file="PCoA_tree.pdf", width=10, height=10)
tre=nj(dist(scores[,1:4]))
plot(tre,cex=0.8)
dev.off()
# formal analysis of variance in distance matricies:
ad=adonis(t(vsd)~genotype+fate,data=conditions,method="manhattan",permutations=1e6)
ad
# creating pie chart to represent ANOVA results
cols=c("blue","orange","grey80")
pdf(file="ANOVA_pie.pdf", width=6, height=6)
pie(ad$aov.tab$R2[1:3],labels=row.names(ad$aov.tab)[1:4],col=cols,main="genotype vs fate")
dev.off()
#### DESEQ ####
# with multi-factor, multi-level design - using LRT
load("initial.RData")
library(DESeq2)
library(BiocParallel)
# Running full model for contrast statements
dds=DESeq(dds, parallel=TRUE)
# model for the effect of fate: (>2 factor levels => LRT)
dds$fate <- factor(dds$fate, levels = c("healthy","nai","diseased"))
dds_fate=DESeq(dds,test="LRT",reduced=~genotype, parallel=TRUE)
# saving all models
save(dds,dds_fate,file="realModels.RData")
#### DEGs and CONTRASTS ####
load("realModels.RData")
library(DESeq2)
# fate factor
fate=results(dds_fate)
summary(fate)
degs_fate=row.names(fate)[fate$padj<0.1 & !(is.na(fate$padj))]
# genotype factor
genotype=results(dds)
summary(genotype)
degs_genotype=row.names(genotype)[genotype$padj<0.1 & !(is.na(genotype$padj))]
# fate contrasts
diseased_healthy=results(dds,contrast=c("fate","diseased","healthy"))
summary(diseased_healthy)
degs_diseased_healthy=row.names(diseased_healthy)[diseased_healthy$padj<0.1 & !(is.na(diseased_healthy$padj))]
nai_healthy=results(dds,contrast=c("fate","nai","healthy"))
summary(nai_healthy)
degs_nai_healthy=row.names(nai_healthy)[nai_healthy$padj<0.1 & !(is.na(nai_healthy$padj))]
diseased_nai=results(dds,contrast=c("fate","diseased","nai"))
summary(diseased_nai)
degs_diseased_nai=row.names(diseased_nai)[diseased_nai$padj<0.1 & !(is.na(diseased_nai$padj))]
save(fate, genotype, diseased_healthy, nai_healthy, diseased_nai,file="pvals.RData")
# density plots: are my DEGs high-abundant or low-abundant?
load("vsd.RData")
load("pvals.RData")
means=apply(vsd,1,mean)
pdf(file="DEG_density.pdf", height=5, width=5)
plot(density(means))
lines(density(means[degs_genotype]),col="blue")
lines(density(means[degs_fate]),col="orange")
legend("topright", title = "Factor", legend=c("genotype","fate"), fill = c("blue","orange"))
dev.off()
#### VENN DIAGRAMS ####
load("pvals.RData")
library(DESeq2)
candidates=list("genotype"=degs_genotype, "fate"=degs_fate)
# install.packages("VennDiagram")
library(VennDiagram)
# overall factors, full model
fullmodel_venn=venn.diagram(
x = candidates,
filename=NULL,
col = "transparent",
fill = c("blue", "orange"),
alpha = 0.5,
label.col = c("darkblue", "white", "darkred"),
cex = 3,
fontfamily = "sans",
fontface = "bold",
cat.default.pos = "text",
cat.col =c("darkblue", "darkred"),
cat.cex = 3,
cat.fontfamily = "sans",
cat.dist = c(0.06, 0.06),
cat.pos = 3
)
pdf(file="Venn_transmission_mcav_sym.pdf", height=6, width=6)
grid.draw(fullmodel_venn)
dev.off()
pairwise=list("diseased_healthy"=degs_diseased_healthy,"nai_healthy"=degs_nai_healthy, "diseased_nai"=degs_diseased_nai)
# overall factors, full model
pairwise.venn=venn.diagram(
x = pairwise,
filename=NULL,
col = "transparent",
fill = c("blue", "orange", "lightblue"),
alpha = 0.5,
label.col = c("darkblue", "white", "darkred", "white", "white", "white", "cornflowerblue"),
cex = 3,
fontfamily = "sans",
fontface = "bold",
cat.default.pos = "text",
cat.col =c("darkblue", "darkred", "cornflowerblue"),
cat.cex = 3,
cat.fontfamily = "sans",
cat.dist = c(0.06, 0.06, -0.06),
cat.pos = 3
)
pdf(file="Venn_transmission_mcav_sym_pairwise.pdf", height=8, width=8)
grid.draw(pairwise.venn)
dev.off()
#### GO/KOG EXPORT ####
load("realModels.RData")
load("pvals.RData")
# fold change (fc) can only be used for binary factors, such as control/treatment, or specific contrasts comparing two factor levels
# log p value (lpv) is for multi-level factors, including binary factors
# genotype factor
# signed log p-values: -log(pvalue)* direction:
source=genotype[!is.na(genotype$pvalue),]
genotype.p=data.frame("gene"=row.names(source))
genotype.p$lpv=-log(source[,"pvalue"],10)
genotype.p$lpv[source$stat<0]=genotype.p$lpv[source$stat<0]*-1
head(genotype.p)
write.csv(genotype.p,file="genotype_lpv.csv",row.names=F,quote=F)
save(genotype.p,file="genotype_lpv.RData")
# fate factor
# signed log p-values: -log(pvalue)* direction:
source=fate[!is.na(fate$pvalue),]
fate.p=data.frame("gene"=row.names(source))
fate.p$lpv=-log(source[,"pvalue"],10)
fate.p$lpv[source$stat<0]=fate.p$lpv[source$stat<0]*-1
head(fate.p)
write.csv(fate.p,file="fate_lpv.csv",row.names=F,quote=F)
save(fate.p,file="fate_lpv.RData")
# fate contrasts
# diseased vs healthy
# log2 fold changes:
source=diseased_healthy[!is.na(diseased_healthy$pvalue),]
diseased_healthy.fc=data.frame("gene"=row.names(source))
diseased_healthy.fc$lfc=source[,"log2FoldChange"]
head(diseased_healthy.fc)
write.csv(diseased_healthy.fc,file="diseased_healthy_fc.csv",row.names=F,quote=F)
save(diseased_healthy.fc,file="diseased_healthy_fc.RData")
# signed log p-values: -log(pvalue)* direction:
diseased_healthy.p=data.frame("gene"=row.names(source))
diseased_healthy.p$lpv=-log(source[,"pvalue"],10)
diseased_healthy.p$lpv[source$stat<0]=diseased_healthy.p$lpv[source$stat<0]*-1
head(diseased_healthy.p)
write.csv(diseased_healthy.p,file="diseased_healthy_lpv.csv",row.names=F,quote=F)
save(diseased_healthy.p,file="diseased_healthy_lpv.RData")
# nai vs healthy
# log2 fold changes:
source=nai_healthy[!is.na(nai_healthy$pvalue),]
nai_healthy.fc=data.frame("gene"=row.names(source))
nai_healthy.fc$lfc=source[,"log2FoldChange"]
head(nai_healthy.fc)
write.csv(nai_healthy.fc,file="nai_healthy_fc.csv",row.names=F,quote=F)
save(nai_healthy.fc,file="nai_healthy_fc.RData")
# signed log p-values: -log(pvalue)* direction:
nai_healthy.p=data.frame("gene"=row.names(source))
nai_healthy.p$lpv=-log(source[,"pvalue"],10)
nai_healthy.p$lpv[source$stat<0]=nai_healthy.p$lpv[source$stat<0]*-1
head(nai_healthy.p)
write.csv(nai_healthy.p,file="nai_healthy_lpv.csv",row.names=F,quote=F)
save(nai_healthy.p,file="nai_healthy_lpv.RData")
# diseased vs nai
# log2 fold changes:
source=diseased_nai[!is.na(diseased_nai$pvalue),]
diseased_nai.fc=data.frame("gene"=row.names(source))
diseased_nai.fc$lfc=source[,"log2FoldChange"]
head(diseased_nai.fc)
write.csv(diseased_nai.fc,file="diseased_nai_fc.csv",row.names=F,quote=F)
save(diseased_nai.fc,file="diseased_nai_fc.RData")
# signed log p-values: -log(pvalue)* direction:
diseased_nai.p=data.frame("gene"=row.names(source))
diseased_nai.p$lpv=-log(source[,"pvalue"],10)
diseased_nai.p$lpv[source$stat<0]=diseased_nai.p$lpv[source$stat<0]*-1
head(diseased_nai.p)
write.csv(diseased_nai.p,file="diseased_nai_lpv.csv",row.names=F,quote=F)
save(diseased_nai.p,file="diseased_nai_lpv.RData")
|
e422ea4aeb196e35a3d2cf4bc29b5bd6b24843e6
|
6a4593ac8bb196d85d58b2042aa3cca0fbde0eb7
|
/R/anl_2.R
|
02feece0d95ba77fa9a744c2b8e8e9cacfc8b8d1
|
[] |
no_license
|
whiteaegis/Imputation
|
59883addc98378a736d96fea1f2ea6c3d3c9eb77
|
f85be4a1f8f94db1e6c6bb847caeb1246e0998f8
|
refs/heads/master
| 2023-02-25T01:00:06.548131
| 2021-02-02T07:56:28
| 2021-02-02T07:56:28
| 335,208,709
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
anl_2.R
|
setwd("C:/Users/user1/Documents/imputation/anl2.test1")
final<-wwknn_final
sum(final[data.p]==kk[data.p])/sum(zz=="N")
result1<-array(0,ncol(kk),1)
for(i in 1:ncol(kk)){
result1[i]<-sum(final[,i]==kk[,i])/length(zz[,i])
}
result2<-array(0,ncol(kk),1)
for(i in 1:ncol(kk)){
result2[i]<-sum(final[(data.p[data.p[,2]==i,])]==kk[(data.p[data.p[,2]==i,])])/sum(zz[(data.p[data.p[,2]==i,])]=="N")
}
result3<-array(0,ncol(kk),1)
for(i in 1:ncol(kk)){
result3[i]<-sum(!kk[(data.p[data.p[,2]==i,])]%in%c("A","T","C","G"))
}
#plot(result1, type = "h")
#plot(result2, type = "h")
plot(result3, type = "h")
which(result2<0.95)
sum(result2<0.95)
final[(data.p[data.p[,2]==2,])]
sum(!kk[(data.p[data.p[,2]==i,])]%in%c("A","T","C","G"))
#plot(n.count, type = "h")
cor(n.count.nol,result1)
cor(n.count.nol,result2)
cor(result1,result3)
cor(result2,result3)
n.count.nol<-n.count-mean(n.count)/max(n.count)
max(n.count)
write.table(data.p,file="imupte position.txt",sep="\t")
write.table(result2,file="wwknn_accuracy_by sample.txt",sep="\t")
|
ca074633abd346f91b681f422fea89cf9f6566a9
|
5a4fe535d64e16351a3f72e3a2fd934157de95b8
|
/R/TBploter.R
|
09ac0bf09259f6de668719889d4c7acd64a37436
|
[
"MIT"
] |
permissive
|
likelet/PlotAppForTBtools
|
a94f6cce39ac6a385f487d96de2518ce304d82d4
|
2307ff3c333768cf806059b48d24e3ff4c14e2d0
|
refs/heads/master
| 2021-01-20T20:52:32.738356
| 2016-08-15T11:47:09
| 2016-08-15T11:47:09
| 65,454,944
| 37
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 206
|
r
|
TBploter.R
|
#' Run the default TBploter app for analysis locally
#'
#' \code{TBploter} run TBploter locally
#' @author Qi Zhao
TBploter <- function() {
shiny::runApp(system.file("TBploter", package = "TBploter"))
}
|
4fbbce6e3b54104a10bc0be0e8fd61c1393c862a
|
1d193689ccfe0c2fabd8c24b322f7b8613744fea
|
/Total Price.R
|
a902fcb247a8a88c87259a2e652177e5d8bce1b0
|
[] |
no_license
|
jrjaskol/ITOxygenDenso
|
7e4e368395194493cb8c6eaaf8bf6926d62787a2
|
4b7908d57899348c903283450badd6b457b1b184
|
refs/heads/master
| 2020-08-22T14:42:33.594040
| 2019-12-11T07:59:08
| 2019-12-11T07:59:08
| 216,418,190
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,041
|
r
|
Total Price.R
|
setwd('~/Desktop/IT_Oxygen')
install.packages('xlsx')
library('readxl')
data <- read_excel('~/Desktop/IT_Oxygen/Product_Data/Product Data.xlsx')
dateArray=data$Date
productArray=data$product
avgPriceArray=data$Avg
price=data$price
price.function <- function(date, price, perContract, perSpot, product.amount, product, productArray, dateArray, avgPriceArray) {
amount.Contract <- perContract*product.amount
amount.Spot <- perSpot*product.amount
index.contract <- match(c(product), productArray)
while(productArray[index.contract] == product) {
if (price[index.contract] == "contract") {
print(price[index.contract])
#priceIndex = match(c(date), dateArray)
priceIndex = match(date, dateArray[index.contract])
}
index.contract = index.contract + 1
}
# avgContractPrice = avgPriceArray[priceIndex] * amount.Contract
print(avgPriceArray[priceIndex])
}
price.function(date="8/31/17", price, perContract=0.5, perSpot=0.5, product.amount=1000, product=1, productArray, dateArray, avgPriceArray)
|
373ecdea7c8ad03020f06f4c6c29af1b0f3f2660
|
8de53d7d7b2af23ac04fc82b379e5a9b2a18512f
|
/R/projection.R
|
2bb9f3509c2f97d0503e5f2761e26db0fa5c968e
|
[] |
no_license
|
KNMI/DutchClimate
|
16f05f2ac8452710775d1613d3a755ed4b97511a
|
1eb957d85fa7835ad7cebbb0df47bf6bc91957cb
|
refs/heads/master
| 2021-01-21T03:39:13.109562
| 2017-06-25T20:54:13
| 2017-06-25T20:54:13
| 64,229,649
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,438
|
r
|
projection.R
|
#' Compute daily climatology
#'
#' @param data data.table daily data
#' @param startYear int starting year
#' @param endYear int end year
#' @export
ComputeDailyClimatology <- function(data, startYear, endYear) {
tmp <- data[year(date) %in% seq.int(startYear, endYear), .(tg = mean(tg)),
by = .(month(date), mday(date))]
tmp[, date := as.Date(paste(endYear + 1, month, mday, sep = "-"),
format="%Y-%m-%d")]
na.omit(tmp)
}
#' Hindcast
#'
#' Necessary to compute, bias, sd, rmse and other statistics of projection
#' @param year integer year for hindcast
#' @param dailyData of hindcast year
#' @param dailyClimatology daily climatology
#' @export
Hindcast <- function(year, dailyData, dailyClimatology) {
# check that both data have same number of rows
stopifnot(nrow(dailyData) == nrow(dailyClimatology))
annualMean <- CalculateAnnualMean(dailyData[year(date) == year], "WMO")[, TG]
nDays <- nrow(dailyData)
projection <- numeric(nDays)
for (i in 1 : (nDays-1)) {
projection[i] = mean(c(dailyData[1 :i, tg],
dailyClimatology[(i+1) : (nDays-1), tg]))
}
projection[nDays] <- annualMean
projection
tmp <- copy(dailyData)
tmp[, tg := projection]
tmp[, res := annualMean - tg]
}
#' Projection
#'
#' @param day date of projection
#' @param measurements daily data
#' @param forecast 14-day forecast (not implemented yet)
#' @param climatology daily climatology
#' @param statistics statistics for bias correction and uncertainty
#' @param sdfactor factor to determine cofidence interval (default = 1.96)
#' @export
MeanProjection <- function(day, measurements, forecast, climatology,
statistics, sdfactor = 1.96) {
stopifnot(as.Date(day) %in% measurements$date)
statistics[, date := as.Date(paste(year(day), month, mday, sep = "-"), format="%Y-%m-%d")]
statistics <- na.omit(statistics)
projection <- rbind(measurements[year(date) == year(day),
.(date, tg)][date <= day, ],
climatology[year(date) == year(day),
.(date, tg)][date > day, ])
projection <- projection[, mean(tg)] #+ statistics[date == day, bias]
uncertainty <- sdfactor * statistics[date == day, sd]
projection <- projection + cbind(-1, 0, 1) * uncertainty
colnames(projection) <- c("lower", "mean", "upper")
projection <- as.data.table(projection)
projection[, date := day]
return(projection)
}
PredictMovingWindowBasis <- function(Date, dt, forecast = NULL, k = 12) {
stopifnot(as.Date(Date) %in% dt$date)
current <- dt[year >= year(Date) & date <= Date]
ndays <- yday(paste0(year(Date), "-12-31"))
mdays <- yday(Date)
lambda <- mdays / ndays
if (ndays == mdays) {
return(current[, mean(tg)])
}
remainder <- dt[year < year(Date) &
year > (year(Date) - (k+1))][month > month(Date) |
(month == month(Date) & day > mday(Date)),
mean(tg)]
prediction <- lambda * current[, mean(tg)] + (1 - lambda) * remainder
prediction
}
#' Moving window prediction
#' @param Date date from which to predict
#' @param dt data.table with daily measurements (at least last 30 years)
#' @param forecast data.table with operational forecast
#' @param probs probabilities to predict
#' @param k integer size of moving window in years
#' @export
PredictMovingWindow <- function(Date, dt, forecast = NULL, probs = c(0.05, 0.50, 0.95), k = 12L) {
stopifnot(as.Date(Date) %in% dt$date)
dt <- copy(dt)
dt[, year := year(date)]
dt[, month := month(date)]
dt[, day := mday(date)]
currentPrediction <- PredictMovingWindowBasis(Date, dt,
forecast = forecast, k = k)
# startDate <- as.Date(Date) - 365.25*30 #
startDate <- as.Date(paste0(as.integer(substr(Date, 0, 4))-30, substr(Date, 5, 10)))
dates <- seq.Date(startDate, as.Date(Date), by = "year")[-31]
hindcast <- map_dbl(dates, PredictMovingWindowBasis, dt = dt,
forecast = NULL, k = k)
actualMeans <- map_dbl(dates, CalcActualMean, dt = dt)
res <- actualMeans - hindcast
stdDev <- sd(res)
prediction <- qnorm(probs, currentPrediction, stdDev)
prediction <- as.data.frame(t(prediction))
colnames(prediction) <- paste0("p", probs*100)
cbind(date = Date, prediction)
}
CalcActualMean <- function(Date, dt) {
dt[year(date) == year(Date), mean(tg)]
}
#' Gamlss projection
#'
#' @inheritParams PredictMovingWindow
#' @export
PredictGamlss <- function(Date, dt, forecast, probs = c(0.05, 0.50, 0.95)) {
stopifnot(as.Date(Date) %in% dt$date)
dt <- copy(dt)
dt[, year := year(date)]
dt[, month := month(date)]
dt[, day := mday(date)]
current <- dt[year >= year(Date) & date <= Date]
past <- dt[year < year(Date)]
ndays <- yday(paste0(year(Date), "-12-31"))
mdays <- yday(Date)
lambda <- mdays / ndays
if (ndays == mdays) {
prediction <- as.data.frame(t(rep(mean(current$tg), length(probs))))
colnames(prediction) <- paste0("p", probs*100)
prediction <- cbind(date = Date, prediction)
return(prediction)
}
tmp <- past[date < Date & (month > month(Date) | (month == month(Date) & day > mday(Date))), .(TG = mean(tg)), by = year]
fit <- gamlss(TG ~ pb(year), data = tmp, family = "NO", control = gamlss.control(trace=FALSE))
f = file()
sink(file = f)
params <- predictAll(fit, newdata = data.frame(year = year(Date)), data = tmp)
sink()
close(f)
remainder <- qnorm(probs, params$mu, params$sigma)
prediction <- lambda * mean(current$tg) + (1 - lambda) * remainder
prediction <- as.data.frame(t(prediction))
colnames(prediction) <- paste0("p", probs*100)
prediction <- cbind(date = Date, prediction)
# list(current, past, fit, params, lambda, current[, mean(tg)], remainder, prediction)
prediction
}
#' Produces trend envelope data frame
#'
#' @description uses linear interpolation
#' @param start dt with year, lower, and upper
#' @param end dt with year, lower, and upper
#' @export
MakeTrendEnvelope <- function(start, end) {
combined <- rbind(start, end)
years <- seq.int(start[, year], end[, year], by = 1)
lower <- approx(combined[, year], combined[, lower], xout = years)$y
upper <- approx(combined[, year], combined[, upper], xout = years)$y
data.table(year = years, lower = lower, upper = upper)
}
|
70ccb3cf3978eb8f432816da98ecd6fe3071ab15
|
b5bf6c06807aab62869706c4104bb62d50587cbf
|
/R/get-vcssanova-basis.R
|
0dd9ace492290142ebdfe5d63b5a1bfee9448e5e
|
[] |
no_license
|
weirichd/cautious-guacamole
|
39b07c4c0aea014ee9b47293e9ac6046bfba3319
|
0c3e496a9de9b15b9a96c4bfd84300b47d6c9629
|
refs/heads/master
| 2021-07-12T11:30:38.258113
| 2017-10-18T23:04:18
| 2017-10-18T23:04:18
| 107,332,709
| 0
| 0
| null | 2017-10-17T22:54:26
| 2017-10-17T22:54:25
| null |
UTF-8
|
R
| false
| false
| 4,791
|
r
|
get-vcssanova-basis.R
|
get_vcssanova_basis <- function (formula=as.formula("y~x"), data,
type = NULL, wt, subset=NULL,
offset=NULL, na.action = na.omit,
partial = NULL, method = "v",
alpha = 1.4, varht = 1,
nbasis = NULL, seed = NULL,
random = NULL, skip.iter = FALSE)
{
mf <- match.call()
mf$type <- mf$method <- mf$varht <- mf$partial <- NULL
mf$alpha <- mf$id.basis <- mf$nbasis <- mf$seed <- NULL
mf$random <- mf$skip.iter <- NULL
data$x <- as.matrix(data$x)
dimnames(data$x)[1:2] <- NULL
mf$formula <- as.formula("y~x")
mfr <- model.frame(formula=mf$formula,
data=list(x=data$x,
y=rep(1,dim(data$x)[1])))
mf <- mfr
rm(mfr)
nobs <- dim(mf)[1]
# list(formula=mf$formula,x=data$x,y=rep(1,dim(data$x)[1]),mf=mf)
id.basis <- 1:nobs
if (is.null(id.basis)) {
if (is.null(nbasis))
nbasis <- max(30, ceiling(10 * nobs^(2/9)))
if (nbasis >= nobs)
nbasis <- nobs
if (!is.null(seed))
set.seed(seed)
id.basis <- sample(nobs, nbasis, prob = wt)
}
else {
if (max(id.basis) > nobs | min(id.basis) < 1)
stop("gss error in ssanova: id.basis out of range")
nbasis <- length(id.basis)
}
term <- mkterm(mf, type)
if (!is.null(random)) {
if (class(random) == "formula")
random <- mkran(random, data)
}
s <- q <- NULL
nq <- 0
for (label in term$labels) {
if (label == "1") {
s <- cbind(s, rep(1, len = nobs))
next
}
x <- mf[, term[[label]]$vlist]
x.basis <- mf[id.basis, term[[label]]$vlist]
nphi <- term[[label]]$nphi
nrk <- term[[label]]$nrk
if (nphi) {
phi <- term[[label]]$phi
for (i in 1:nphi) s <- cbind(s, phi$fun(x, nu = i,
env = phi$env))
}
if (nrk) {
rk <- term[[label]]$rk
for (i in 1:nrk) {
nq <- nq + 1
q <- array(c(q, rk$fun(x, x.basis, nu = i, env = rk$env,
out = TRUE)), c(nobs, nbasis, nq))
}
}
}
if (is.null(q)) {
stop("gss error in ssanova: use lm for models with only unpenalized terms")
}
if (!is.null(partial)) {
mf.p <- model.frame(partial, data)
for (lab in colnames(mf.p)) mf[, lab] <- mf.p[, lab]
mt.p <- attr(mf.p, "terms")
lab.p <- labels(mt.p)
matx.p <- model.matrix(mt.p, data)[, -1, drop = FALSE]
if (dim(matx.p)[1] != dim(mf)[1])
stop("gss error in ssanova: partial data are of wrong size")
matx.p <- scale(matx.p)
center.p <- attr(matx.p, "scaled:center")
scale.p <- attr(matx.p, "scaled:scale")
s <- cbind(s, matx.p)
part <- list(mt = mt.p, center = center.p, scale = scale.p)
}
else part <- lab.p <- NULL
if (qr(s)$rank < dim(s)[2]){
stop("gss error in ssanova: unpenalized terms are linearly dependent")
}
W <- matrix(data=0,nrow=nrow(data$y)*(ncol(data$y)-1),
ncol=choose(ncol(data$y),2))
no.skip <- 0
for (t in 2:ncol(data$y)){
W[((0:(nrow(data$y)-1))*(ncol(data$y)-1)) + t-1,(no.skip+1):(no.skip+t-1)] <- data$y[,1:(t-1)]
no.skip <- no.skip + t - 1
}
W <- matrix(data=0,nrow=nrow(data$y)*(ncol(data$y)-1),
ncol=choose(ncol(data$y),2))
no.skip <- 0
for (t in 2:ncol(data$y)){
W[((0:(nrow(data$y)-1))*(ncol(data$y)-1)) + t-1,
(no.skip+1):(no.skip+t-1)] <- data$y[,1:(t-1)]
no.skip <- no.skip + t - 1
}
y <- as.vector(t(data$y[,-1]))
Dinv <- diag(1/wt)
if (!is.null(offset)) {
term$labels <- c(term$labels, "offset")
term$offset <- list(nphi = 0, nrk = 0)
y <- y - offset
}
## ------------------------------------------------------
M <- t(W) %*% Dinv %*% W
Minv <- solve(M)
y <- t(W) %*% Dinv %*% y
if(nq==1){
q[,,1] <- M %*% as.matrix(q[1:dim(q)[1],1:dim(q)[2],1]) %*% M
}
s <- M %*% s
list(y=y,M=M,q=q,s=s,nq=nq,q=q)
}
|
59cb1bdcd204bfb06a51ced887d1008c95f712b7
|
866f5a41c375d2c5be9d60d9a5f9a6ad13f96081
|
/R/plotVisregList.R
|
f856b99176eb63811754966ed149567f565d2390
|
[] |
no_license
|
pbreheny/visreg
|
c375126d98b4859423b4d8004c1721b62bf529aa
|
bc06525c2bc2f612717c9f8caa27434b9ee11a7c
|
refs/heads/master
| 2023-08-03T05:16:58.849754
| 2023-08-01T22:03:15
| 2023-08-01T22:03:15
| 5,400,642
| 62
| 20
| null | 2017-06-23T21:08:24
| 2012-08-13T15:20:12
|
R
|
UTF-8
|
R
| false
| false
| 523
|
r
|
plotVisregList.R
|
plot.visregList <- function(x, ask=TRUE, ...) {
n <- length(x)
prompt.user <- FALSE
if (ask & (prod(par("mfcol")) < n) && dev.interactive()) {
oask <- devAskNewPage()
prompt.user <- TRUE
on.exit(devAskNewPage(oask))
}
for (i in 1:length(x)) {
p <- plot(x[[i]], ...)
if (inherits(p, 'gg')) {
if (i==1) {
ggList <- vector('list', length(x))
}
ggList[[i]] <- p
} else {
if (prompt.user) devAskNewPage(TRUE)
}
}
if (inherits(p, 'gg')) return(ggList)
}
|
63aa3973b645bd1a27926798d0a74ffcba91e92d
|
a8f4f4647a2718059cf36ee92e773cb3810a1807
|
/ui.R
|
af56221b36783b52b99bdd77431a2c45b06ec139
|
[] |
no_license
|
chechuco/data_product_proj
|
8ab7949b571ea9bc6f2c1edb4cae724476828b62
|
8517e0641a7259219290e8f108dd3912c16a0323
|
refs/heads/master
| 2020-12-24T10:16:24.391659
| 2016-02-15T05:59:01
| 2016-02-15T05:59:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 887
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Stock Price Graph and Return"),
sidebarLayout(
sidebarPanel(
helpText("Input a valid stock symbol and date range to examine the price (adjusted for splits and dividends) trend and monthly return."),
helpText("Information source is from yahoo finance."),
textInput("symb", "Symbol", "NLY"),
dateRangeInput("dates",
"Date range",
start = "2015-01-01",
end = as.character(Sys.Date())),
br(),
br(),
checkboxInput("SMA90","90 Days Simple Moving Average",value=TRUE)
),
mainPanel(tabsetPanel(type = "tabs",
tabPanel("Plot", plotOutput("plot")),
tabPanel("Monthly Return Table", plotOutput("plot_return"))
)
)
)))
|
bee6a7d28723007921f7a357df8b2af1a680a686
|
c8e71af48d925c34d1cb9f4dad262c970e8968d5
|
/man/bac.Rd
|
e61b3efacdd8754bf41a0c9807de1b9c54a845fc
|
[
"MIT"
] |
permissive
|
tessington/qsci381
|
43c7cd323ab64cf28ba738be35779157c93e62cf
|
b981f0bd345b250d42ff5f1c0609e5e61f5911f7
|
refs/heads/master
| 2022-12-24T20:56:56.045374
| 2020-09-24T20:50:29
| 2020-09-24T20:50:29
| 284,817,926
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 822
|
rd
|
bac.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-bac.R
\docType{data}
\name{bac}
\alias{bac}
\title{Beer and blood alcohol content}
\format{
A data frame with 16 observations on the following 3 variables.
\describe{
\item{student}{a numeric vector}
\item{beers}{a numeric vector}
\item{bac}{a numeric vector}
}
}
\source{
J. Malkevitch and L.M. Lesser. For All Practical Purposes:
Mathematical Literacy in Today's World. WH Freeman & Co, 2008.
}
\usage{
bac
}
\description{
Here we examine data from sixteen student volunteers at Ohio State
University who each drank a randomly assigned number of cans of beer.
}
\examples{
library(ggplot2)
ggplot(bac, aes(x = beers, y = bac)) +
geom_point() +
labs(x = "Number of beers", y = "Blood alcohol content")
}
\keyword{datasets}
|
201ecfe824c3712283af54f56e6e8ffec8870b78
|
4b0cff5e09efd41994db11d589ef3069266ccce4
|
/man/generateSignificance.Rd
|
3d0de41e80a5c4f7497ab4ff6f76c516a47fc3bc
|
[] |
no_license
|
cran/Jmisc
|
7d43070011ebd9b56327ca8704dacbeeb5e84c2c
|
0b141061bedc22bc9c7e7b6fa97dde67066f06a9
|
refs/heads/master
| 2022-07-13T05:12:17.641540
| 2022-06-22T04:53:25
| 2022-06-22T04:53:25
| 17,680,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
rd
|
generateSignificance.Rd
|
\name{generateSignificance}
\alias{generateSignificance}
\title{Generate t-statistics, p-value and significance}
\usage{
generateSignificance(x, row_names)
}
\arguments{
\item{x}{A matrix or data.frame}
\item{row_names}{names of row}
}
\value{
a data.frame
}
\description{
Generate t-statistics, p-value and significance from
estimates and its sd. Estimates and its SD is the first and
second column respectively
}
\examples{
n<-1000
x_data<-cbind(rnorm(n,mean=0),rnorm(n,mean=1))
x_estimates<-cbind(apply(x_data,2,mean),apply(x_data,2,sd)/sqrt(n))
generateSignificance(x_estimates)
generateSignificance(x_estimates,row_names=c("mean0","mean1") )
}
\author{
TszKin Julian Chan \email{ctszkin@gmail.com}
}
|
feb52e467de39cef002140db090492fa80ab0e37
|
6483fea671e8edd3bd34446d1cfc787d0f9127bb
|
/TeamReyLew.R
|
d01bb18154fa428f6029c630e484da44fb541200
|
[] |
no_license
|
sharnsl/ReyLew
|
f622453122544b12eeb68733e15d566d4fa4b981
|
00cb889938607186c99bd60a0a2c8f1cb234a70a
|
refs/heads/main
| 2023-01-24T09:45:01.995037
| 2020-11-12T04:12:20
| 2020-11-12T04:12:20
| 304,148,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 565
|
r
|
TeamReyLew.R
|
keeps <- c("year", "month", "intent","sex","age","race","education")
guns2 <- guns[keeps]
keeps <- c("Date","Age","Sex","ResidenceCity","ResidenceState","DeathCity","Location","DescriptionofInjury","COD","OtherSignifican")
data2 <- data[keeps]
drugdata <- na.omit(data2)
gunsdata <- na.omit(guns2)
keeps <- c("Heroin","Cocaine","Fentanyl","FentanylAnalogue","Oxycodone","Oxymorphone","Ethanol","Hydrocodone","Benzodiazepine","Methadone","Amphet","Tramad","Morphine_NotHeroin","Hydromorphone","Other","OpiateNOS","AnyOpioid")
data1 <- data[keeps]
data1
|
f5a82b13311c928874b744f432da68133c49f0f1
|
8284e88e32d095f7582001ce62013d7773ff1cc9
|
/Desktop/Doctorado/Variabilidad Estructural/FUNCTIONS/ReadHeme.R
|
64758912d6d9fce179e82b33d74a59d1293f8b9a
|
[] |
no_license
|
marialauramarcos/PruebaGit
|
4396a5cd95447e227393b92675890d78e637a312
|
c849361d5ea37af8fb75364ff515c39bc83461f7
|
refs/heads/master
| 2021-01-10T07:12:32.456064
| 2015-10-04T18:50:08
| 2015-10-04T18:50:08
| 43,513,416
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 648
|
r
|
ReadHeme.R
|
#Function that reads a pdb file and returns coordinates of NA, NB, NC, ND and Fe of the Heme group
#A pdb file name and the chain must be specified
readHeme <- function(pdb.fname,chain){
pdb <- read.pdb(file=pdb.fname,het2atom = TRUE)
selNA <- atom.select(pdb,chain=chain,elety="NA")
selNB <- atom.select(pdb,chain=chain,elety="NB")
selNC <- atom.select(pdb,chain=chain,elety="NC")
selND <- atom.select(pdb,chain=chain,elety="ND")
selFE <- atom.select(pdb,chain=chain,elety="FE")
xyz.heme <- matrix(c(pdb$xyz[selNA$xyz],pdb$xyz[selNB$xyz],pdb$xyz[selNC$xyz],pdb$xyz[selND$xyz],pdb$xyz[selFE$xyz]),nrow=3)
xyz.heme
}
|
4b35a51bccd9076b9305e55e972b79269c4c7e87
|
e2366c7366dcaa4d86d88d8eb8493aae405f1086
|
/plot2.R
|
241546ab323bd23d788f217bea5f0b0fa7b27d0b
|
[] |
no_license
|
gverma14/ExData_Plotting1
|
91e4de1793276169efb8260598dfaca7bd4bc98e
|
59d8a5f4c669a639b8fb98fe09d39a10d5c4ea53
|
refs/heads/master
| 2021-01-17T23:17:38.904569
| 2014-10-11T22:00:53
| 2014-10-11T22:00:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 954
|
r
|
plot2.R
|
plot2 <- function(filename = "household_power_consumption.txt")
{
#checks if database has already been imported
if (!exists(as.character(substitute(consData)))) {
#if not already imported, imports dataframe into global environment for future use
consData <<- read.table(filename,header = T,sep = ";", stringsAsFactors = F)
}
#subsets data to specific dates required
febData <- consData[(consData$Date == "1/2/2007") | (consData$Date == "2/2/2007"),]
#retrieve Global_active_power variable
active_power <- febData$Global_active_power
#concatenate Date and Time variables
fullDate <- paste (febData$Date, febData$Time)
#convert concatenated strings to POSIXlt objects
dates <- strptime(fullDate, "%d/%m/%Y %H:%M:%S")
#create png plot
png("plot2.png")
plot(dates, active_power, xlab = "", ylab = "Global Active Power (kilowatts)", type = "l")
dev.off()
}
|
21a149f0e47be6e3440548999c8d62ad34ff9fb1
|
f1b9b81a57dad419c7216445b9a75df120a47791
|
/R/Class-SDMXAgencyScheme.R
|
4353fdea7a511d6ee669ad49d6e152d9702d8536
|
[] |
no_license
|
opensdmx/rsdmx
|
d71dc83799d76da3233ddfc0d4fa75ce5ff097b9
|
3c0c2316ff4fa237cdc62731d379a17369e05ae3
|
refs/heads/master
| 2023-08-31T01:56:25.934458
| 2023-08-28T09:57:08
| 2023-08-28T09:57:08
| 10,642,895
| 96
| 38
| null | 2021-04-21T20:41:10
| 2013-06-12T13:01:55
|
R
|
UTF-8
|
R
| false
| false
| 2,197
|
r
|
Class-SDMXAgencyScheme.R
|
#' @name SDMXAgencyScheme
#' @docType class
#' @aliases SDMXAgencyScheme-class
#'
#' @title Class "SDMXAgencyScheme"
#' @description A basic abstract class to handle a SDMXAgencyScheme
#'
#' @slot id Object of class "character" giving the ID of the concept scheme (required)
#' @slot agencyID Object of class "character" giving the AgencyID
#' @slot version Object of class "character" giving the version
#' @slot uri Object of class "character" giving the concept uri
#' @slot urn Object of class "character" giving the concept urn
#' @slot isExternalReference Object of class "logical" indicating if the concept scheme is an external reference
#' @slot isFinal Object of class "logical" indicating if the concept scheme is final
#' @slot validFrom Object of class "character" indicating the start validity period
#' @slot validTo Object of class "character" indicating the end validity period
#' @slot Name Object of class "list" giving the agency scheme name (by language) - required
#' @slot Description Object of class "list" giving the agency scheme description (by language)
#' @slot agencies object of class "list" giving the list of \code{SDMXAgency}
#'
#' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com}
#'
setClass("SDMXAgencyScheme",
contains = "SDMXOrganisationScheme",
representation(
#attributes
id = "character", #required
agencyID = "character", #optional
version = "character", #optional
uri = "character", #optional
urn = "character", #optional
isExternalReference = "logical", #optional
isFinal = "logical", #optional
validFrom = "character", #optional
validTo = "character", #optional
#elements
Name = "list",
Description = "list", #optional
agencies = "list"
),
prototype = list(
id = "AGENCIES",
version = "1.0",
isFinal = FALSE,
agencies = list()
),
validity = function(object){
return(TRUE);
}
)
|
40ab4f41bfd72518a53e7ab31325ce32ec5cfc8f
|
75ac4422811de46609cdcb009dabdaa73203f3cf
|
/Analyzing_twitter_ sentiments_project/R-Codecs/BagofWords.R
|
4b2904f01aed79581c1151815641977dab0a2e94
|
[] |
no_license
|
hemanthkannan003/MyProjects
|
ec170af29b7712dd47f380a1dfdb290af1482ee4
|
e43cdb2cadd0c635682cae0264523ab2fa08c2d4
|
refs/heads/master
| 2021-01-25T06:30:28.869872
| 2018-04-16T21:59:59
| 2018-04-16T21:59:59
| 93,587,382
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,053
|
r
|
BagofWords.R
|
library(ff)
library(bigmemory)
library(NLP)
library(wordcloud)
library(tm)
library(SnowballC)
library(plyr)
library(stringr)
library(quanteda)
library(FSelector)
input <- read.csv(file="manual_machine.csv",head=TRUE,sep=",")
CleanTweets<-function(input)
Text<-input$text
senti<-input$sentiment
text<-gsub("\r?\n|\r|\t", " ", Text)
text<-gsub(" http.*","",Text)
text<- gsub("#\\w+","",Text)
text <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", Text)
text<- gsub("@\\w+", "", Text)
text<- gsub("[[:punct:]]", "", Text)
text<- gsub("[[:digit:]]","",Text)
text<- gsub("[^a-zA-Z0-9]"," ",Text)
text<- gsub("^\\s+|\\s+$","",Text)
cor<- Corpus(VectorSource(text))
cor = tm_map(cor, content_transformer(tolower))
cor<- tm_map(cor, removeWords,c(stopwords("english")))
cor <- tm_map(cor, removePunctuation)
mystopwords<-c("a","able","about","across","after","all","almost","also","am","among",
"an","and","any","are","as","at","be","because","been","but","by","can",
"cannot","could","dear","did","do","does","either","else","ever","every","for",
"from","get","got","had","has","have","he","her","hers","him","his","how","however","i","if",
"in","into","is","it","its","just","least","let","like","likely","may","me","might","most","must",
"my","neither","nor","of","off","often","on","only","or","other","our","own","rather","said",
"say","says","she","should","since","so","some","than","that","the","their","them","then","there","these",
"they","this","tis","to","too","twas","us","wants","was","we","were","what","when","where","which","while",
"who","whom","why","will","with","would","yet","you","your","last","amp","night","fox","gop","one","can","amp","just",
"get","going","still","term","now","httpstcobhvimxjew","even","anything","back","done","gonna","keep","know","make", "much",
"nothing","rep","right","see","thats","really","yall","thats","want","pass", "two","thing","things","though","today","tonight",
"take","rep","run","running","ryan","scotus","remember","potus", "please","next","needs","made","makes","many","looking","lot",
"look","lets","gets","give","goes","happen","hes","forget","end","everyone","everything","dems","day","delaware","dem", "come",
"check","another","actually","gotta","your" )
cor=tm_map(cor,removeWords,mystopwords)
cor <- tm_map(cor, stripWhitespace)
cor<-tm_map(cor, stemDocument)
cor<-tm_map(cor,PlainTextDocument)
dtm <- DocumentTermMatrix(cor)
m<-as.matrix(dtm)
freq<-sort(colSums(m),decreasing=TRUE)
findFreqTerms(dtm,lowfreq = 1000,highfreq =1500 )
d <- data.frame(word = names(freq),freq=freq)
options(max.print = 100000)
cloud<-wordcloud(words = d$word, freq = d$freq, min.freq = 50,
max.words=200, width=2000,height=1000,random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Accent"))
barplot(d[1:10,]$freq, las = 2, names.arg = d[1:10,]$word,
col=c("lightblue", "mistyrose", "lightcyan","lavender", "cornsilk"),
ylab = "Word frequencies")
library(pander)
library(syuzhet)
mySentiment <- get_nrc_sentiment(text)
angry_items <- which(mySentiment$anger > 0)
text[angry_items]
pander::pandoc.table(mySentiment[, 1:8], split.table = Inf)
barplot(
sort(colSums(prop.table(mySentiment[, 1:8]))),
horiz = TRUE,
cex.names = 0.7,
las = 1,
col=c("darkblue","red","yellow","orange","pink","green","blue"),
main = "Emotions in Sample text", xlab="Percentage"
)
df<-data.frame(text=unlist(sapply(corp, `[`, "content")),
stringsAsFactors=F)
neg <-read.csv(file = "negative.csv",header=FALSE,sep=",",stringsAsFactors = FALSE)
pos <-read.csv(file="positive.csv", head=TRUE,sep=",", comment.char=';',stringsAsFactors = FALSE)
neg <- unlist(neg)
neg <- stemDocument(neg)
pos <- unlist(pos)
pos <- stemDocument(pos)
summa<- function(dat,pos,neg)
{
Text<- character(nrow(dat))
Label<- character(nrow(dat))
Scores<- numeric(nrow(dat))
poscount=0
negcount=0
for (i in 1:nrow(dat))
{
one<- dat[i,]
txt <- strsplit(one, split=" ")
words <- unlist(txt)
neg.matches = match(words, neg)
neg.matches
pos.matches = match(words, pos)
pos.matches <- sum(!is.na(pos.matches))
neg.matches <- sum(!is.na(neg.matches))
score = sum(pos.matches) - sum(neg.matches)
if(score>0){
Text[i]<-dat[i,]
Label[i]<- "POSITIVE"
Scores[i]<-score
}else if(score<0){
Text[i]<-dat[i,]
Label[i]<- "NEGATIVE"
Scores[i]<-score
}else{
Text[i]<-dat[i,]
Label[i]<- "NEUTRAL"
Scores[i]<-score
}
}
df2<-data.frame(Text,Label,Scores,stringsAsFactors=FALSE)
return(df2)
}
m <- summa(df,pos,neg)
dim(m)
write.csv(m, file="sentiment.csv")
count(m)
ggplot(data2, aes(x=id, y=frequency, fill=Group)) +
geom_bar(position="dodge", # prevents overlapping
stat = "identity",
colour="black",
size=0.5
)
|
e3b44abaddcd35c8bd5963d0d84030f79238ddd8
|
037546e5139aedd8522d4b15ed1bde29f5629774
|
/Square Lakes Together Armley.R
|
3f8d235182f7bc206641d725952e836456288b8e
|
[] |
no_license
|
Rivers-Project-2018/Jack-Willis
|
280761e9783e1525f9a65f9c13a716b35b2e7957
|
adca82dd3b278303cc1a23ba1e99494264e3fd94
|
refs/heads/master
| 2020-04-08T11:59:26.412175
| 2019-03-18T16:58:36
| 2019-03-18T16:58:36
| 159,328,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,090
|
r
|
Square Lakes Together Armley.R
|
par(mfcol = c(2,2))
plot(1, type="n", xlab="x [m]", ylab="y [m]", xlim=c(0,2150), ylim=c(0,2150))
rect(xleft=0, ybottom=0, xright=172, ytop=2150, lwd=2, col = "blue")
rect(xleft=172, ybottom=0, xright=2150, ytop=2150, col="lightblue", lwd=2)
arrows(x0=0, y0=1075, x1=2150, y1=1075, length = 0.05, code=3)
arrows(x0=172, y0=500, x1=2150, y1=500, length = 0.05, code=3)
text(x=1075, y=1200, label="Total: 100%", cex = 0.9)
text(x=1075, y=950, label="£75M or £0.75M/1%", cex = 0.8)
text(x=1075, y=580, label="Flood Walls: 92%", cex=0.9)
text(x=1075, y=420, label="£65M or £0.707M/1%", cex=0.8)
arrows(x0=0, y0=1900, x1=172, y1=1900, length = 0.05, code=3)
text(x=750, y=1975, label="Calverley Storage: 8%", cex=0.9)
text(x=750, y=1825, label="£10M or £1.25M/1%", cex=0.8)
plot(1, type="n", xlab="x [m]", ylab="y [m]", xlim=c(0,2150), ylim=c(0,2150))
rect(xleft=0, ybottom=0, xright=258, ytop=2150, lwd=2, col = "blue")
rect(xleft=258, ybottom=0, xright=2150, ytop=2150, col="lightblue", lwd=2)
arrows(x0=0, y0=1075, x1=2150, y1=1075, length = 0.05, code=3)
arrows(x0=258, y0=500, x1=2150, y1=500, length = 0.05, code=3)
text(x=1075, y=1200, label="Total: 100%", cex = 0.9)
text(x=1075, y=950, label="£76.2M or £0.762M/1%", cex = 0.8)
text(x=1075, y=580, label="Flood Walls: 88%", cex=0.9)
text(x=1075, y=420, label="£62.2M or £0.707M/1%", cex=0.8)
arrows(x0=0, y0=1900, x1=258, y1=1900, length = 0.05, code=3)
text(x=600, y=1975, label="Rodley Storage: 12%", cex=0.9)
text(x=600, y=1825, label="£14M or £1.17M/1%", cex=0.8)
plot(1, type="n", xlab="x [m]", ylab="y [m]", xlim=c(0,2150), ylim=c(0,2150))
rect(xleft=0, ybottom=0, xright=301, ytop=2150, lwd=2, col = "blue")
rect(xleft=301, ybottom=0, xright=2150, ytop=2150, col="lightblue", lwd=2)
arrows(x0=0, y0=1075, x1=2150, y1=1075, length = 0.05, code=3)
arrows(x0=301, y0=500, x1=2150, y1=500, length = 0.05, code=3)
text(x=1075, y=1200, label="Total: 100%", cex = 0.9)
text(x=1075, y=950, label="£84.8M or £0.848M/1%", cex = 0.8)
text(x=1075, y=580, label="Flood Walls: 86%", cex=0.9)
text(x=1075, y=420, label="£60.8M or £0.707M/1%", cex=0.8)
arrows(x0=0, y0=1900, x1=301, y1=1900, length = 0.05, code=3)
text(x=750, y=1975, label="Rodley/Calverly Storage: 14%", cex=0.9)
text(x=750, y=1825, label="£24M or £1.71M/1%", cex=0.8)
plot(1, type="n", xlab="x [m]", ylab="y [m]", xlim=c(0,2150), ylim=c(0,2150))
rect(xleft=0, ybottom=0, xright=1084, ytop=2150, lwd=2, col = "blue")
rect(xleft=1084, ybottom=0, xright=2150, ytop=2150, col="lightblue", lwd=2)
arrows(x0=0, y0=1075, x1=2150, y1=1075, length = 0.05, code=3)
arrows(x0=1084, y0=500, x1=2150, y1=500, length = 0.05, code=3)
text(x=1075, y=1200, label="Total: 100%", cex = 0.9)
text(x=1075, y=950, label="£70.1M or £0.701M/1%", cex = 0.8)
text(x=1600, y=580, label="Flood Walls: 49.6%", cex=0.9)
text(x=1600, y=420, label="£35.1M or £0.707M/1%", cex=0.8)
arrows(x0=0, y0=1900, x1=1084, y1=1900, length = 0.05, code=3)
text(x=780, y=1975, label="Cononley Washlands and Holden Park: 50.4%", cex=0.9)
text(x=780, y=1825, label="£35M or £0.69M/1%", cex=0.8)
|
114a58d595f2153f40197f9d190b3ece91d1b0b4
|
50a02ea701f5b7b2e1c1dc549c386646e896baa1
|
/man/matrix2syt.Rd
|
8ca862102868f4efbd10dd77d96950a658c4c55a
|
[] |
no_license
|
stla/syt
|
f39a02e1b55e07918120344ab21ae9c8436c3890
|
8976f87401b34a52b1ca2d7f33bf54530db550c0
|
refs/heads/master
| 2021-06-18T12:29:52.899574
| 2021-01-16T09:47:48
| 2021-01-16T09:47:48
| 140,968,144
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 434
|
rd
|
matrix2syt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/syt2matrix.R
\name{matrix2syt}
\alias{matrix2syt}
\title{Standard Young tableau from a matrix}
\usage{
matrix2syt(M)
}
\arguments{
\item{M}{a matrix}
}
\value{
A standard Young tableau.
}
\description{
Converts a matrix to a standard Young tableau.
}
\examples{
M <- rbind(c(1,2,6), c(3,5,0), c(4,0,0))
matrix2syt(M)
}
\seealso{
\code{\link{syt2matrix}}
}
|
e780ea0d492eb6d4f9898b465ca37c227474aae3
|
fb0b8c413ae95c961e0351eccb22263a2d0917dd
|
/man/divisible.Rd
|
3f043b9f081a23fbdddd551bc53bf22d095c38f3
|
[] |
no_license
|
cran/hutilscpp
|
f3acfdf2af949df69c8887d937050aa3daf39a02
|
22994140414c52919756eb799ddd10ed4d666f74
|
refs/heads/master
| 2022-10-14T17:45:09.954044
| 2022-10-07T07:00:02
| 2022-10-07T07:00:02
| 168,961,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 772
|
rd
|
divisible.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/divisible.R
\name{divisible}
\alias{divisible}
\alias{divisible2}
\alias{divisible16}
\title{Divisibility}
\usage{
divisible(x, d, nThread = getOption("hutilscpp.nThread", 1L))
divisible2(x, nThread = getOption("hutilscpp.nThread", 1L))
divisible16(x, nThread = getOption("hutilscpp.nThread", 1L))
}
\arguments{
\item{x}{An integer vector}
\item{d}{\code{integer(1)}. The divisor.}
\item{nThread}{The number of threads to use.}
}
\value{
Logical vector: \code{TRUE} where \code{x} is divisible by \code{d}.
\code{divisible2},\code{divisible16} are short for (and quicker than)
\code{divisible(x, 2)} and \code{divisble(x, 16)}.
}
\description{
Divisibility
}
|
2fce555080710e43aaa30436a1dc04ee51ac77e1
|
0fbec50ff92d15c52df3597330356a5f789bf887
|
/SigTaxa/ThreeModel_HeatMap.R
|
9fccc90ea8b5b2ad62fd52a61e49f3e79b39f498
|
[] |
no_license
|
lynchlab-ucsf/lab-code
|
b54e63f650e68a78d6c28b0b28b3e28835b775bb
|
324d273c0afcbfb9edea2f42f9b58e599e0f62fd
|
refs/heads/master
| 2022-05-30T22:27:57.912333
| 2022-05-13T22:37:40
| 2022-05-13T22:37:40
| 182,319,934
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,556
|
r
|
ThreeModel_HeatMap.R
|
# Three-Model Heat-Map
```{r}
library(extrafont)
# Function to bring in and format all Diff-Abundance Data
make_plots <- function(csvfile,outcome_val) {
read.data <- read.csv(csvfile)
sig.data <- read.data[read.data$qval.best < 0.1,]
sig.data$outcome <- outcome_val
sig.data
}
tip.order <- read.table("/data/Users/kmccauley/PROSE_NEW/AnalysisData/Tip_Order.txt",sep="\t",header=F)
fin <- rbind(make_plots("/data/Users/kmccauley/PROSE_NEW/AnalysisData/exac_ThreeModel_subset.csv","Exac (Overall)"),
make_plots("/data/Users/kmccauley/PROSE_NEW/AnalysisData/RV_ThreeModel_subset.csv","RV (Overall)"),
make_plots("/data/Users/kmccauley/PROSE_NEW/AnalysisData/HRV_A_ThreeModel_subset.csv","HRV-A (Overall)"),
make_plots("/data/Users/kmccauley/PROSE_NEW/AnalysisData/HRV_B_ThreeModel_subset.csv","HRV-B (Overall)"),
make_plots("/data/Users/kmccauley/PROSE_NEW/AnalysisData/HRV_C_ThreeModel_subset.csv","HRV-C (Overall)"))
fin$outcome <- factor(fin$outcome, levels=c("Exac (Overall)","RV (Overall)","HRV-A (Overall)","HRV-B (Overall)","HRV-C (Overall)","Exac (Placebo)","RV (Placebo)","HRV-A (Placebo)","HRV-B (Placebo)","HRV-C (Placebo)","RV (ICS)","HRV-A (ICS)","HRV-B (ICS)","HRV-C (ICS)","Exac (Xolair)","RV (Xolair)","HRV-A (Xolair)","HRV-B (Xolair)","HRV-C (Xolair)"))
taxanames <- strsplit(as.character(fin$taxonomy),";")
mat <- t(sapply(taxanames,
function(x,m) c(x,rep(NA,m-length(x))),
max(rapply(taxanames,length))))
newnames <- as.data.frame(cbind(substr(mat[,1],4,35),substr(mat[,2:7],5,35)))
names(newnames) <- c("Kingdom","Phylum","Class","Order","Family","Genus","Species")
newnames$bactnames <- as.character(newnames$Genus)
newnames$bactnames[newnames$bactnames == "" | is.na(newnames$bactnames)] <- as.character(newnames$Family[newnames$bactnames == "" | is.na(newnames$bactnames)])
newnames$bactnames[newnames$bactnames == "" | is.na(newnames$bactnames)] <- as.character(newnames$Order[newnames$bactnames == "" | is.na(newnames$bactnames)])
bactnames <- newnames$bactnames
bactnames[bactnames == "Planococcaceae"] <- "Staphylococcaceae"
fin <- cbind(fin,bactnames)
fin$OTUname2 <- gsub("_","~",fin$OTUname)
fin$bactnames2 <- paste0(bactnames," (",fin$OTUname2,")")
fin$finalnames <- factor(fin$bactnames2,levels=names(sort(table(fin$bactnames2))))
fin$mean.diff.bin[fin$best.coef < 1] <- 1
fin$mean.diff.bin[fin$best.coef > 1] <- 0
fin$mean.diff.bin <- factor(fin$mean.diff.bin,labels=c("Enriched","Depleted"))
# Change the direction of the "weighted mean difference"
fin$wgt_mean_diff <- -fin$wgt_mean_diff
dim(fin)
fin$OTUname_sorted <- factor(fin$OTUname,levels=tip.order$V1)
fin <- fin[order(fin$OTUname_sorted),]
#Drop obs sig in fewer than 3 analyses
#obs.to.drop <- table(fin$OTUname)[table(fin$OTUname) < mean(table(fin$OTUname))]
fin1 <- fin[1:(nrow(fin)/2),]
fin2 <- fin[(nrow(fin)/2)+1:nrow(fin),]
fin1$finalnames <- factor(fin1$finalnames)
fin1$finalnames <- factor(fin1$finalnames,unique(as.character(fin1$finalnames)))
fin2$finalnames <- factor(fin2$finalnames)
fin2$finalnames <- factor(fin2$finalnames,unique(as.character(fin2$finalnames)))
```
### Heat Map
```{r fig.height=9,fig.width=4,dpi=500}
library(ggplot2)
library(plotrix)
#reorder_size <- function(x) {
# factor(x, levels = tip.order$V1)
#}
labs1 <- sapply(strsplit(as.character(unique(fin$finalnames)), " "),
function(x) {
parse(text = paste0("italic('", x[1], "')~", x[2]))
})
labs2 <- sapply(strsplit(as.character(unique(fin2$finalnames)), " "),
function(x) {
parse(text = paste0("italic('", x[1], "')~", x[2]))
})
#The names weren't lining up, so I thought it might have something to do with the inherent underlying order of the factors
fin$finalnames <- factor(fin$finalnames,levels=unique(as.character(fin$finalnames)))
#png("HeatMapFig.png",width=500,height=2500)
p <- ggplot(fin, aes(outcome, finalnames,fill=mean.diff.bin)) + geom_tile() + theme(axis.text.x = element_text(angle = 60, hjust = 1), text=element_text(family="Avenir",size=5),legend.title=element_blank()) + xlab(" ") + ylab(" ") + scale_y_discrete(labels=labs1) + coord_fixed(ratio=1)
p
dev.off()
q <- ggplot(fin2, aes(outcome, finalnames,fill=mean.diff.bin)) + geom_tile() + theme(axis.text.x = element_text(angle = 60, hjust = 1),legend.title=element_blank()) + xlab(" ") + ylab(" ") #+ scale_y_discrete(labels=labs2)
q
```
### Make Table
```{r}
#keep only a certain set of variables
table <- subset(fin, select=c("OTUname","wgt_mean_diff","best.mod","best.pval","qval.best","mean.diff.bin","outcome","taxonomy"))
#Thinking that I should separate out into my four groups and then merge back together somehow... Maybe consider changing variable names instead of doing suffixes, though.
#Also need to figure out how to highlight cells a certain way, though
#Also, check the weighted mean difference value to make sure that it's right (or the enriched/depleted value)
exac <- subset(table, outcome=="Exac (Overall)")
rv <- subset(table, outcome=="RV (Overall)")
hrva <- subset(table, outcome=="HRV-A (Overall)")
hrvb <- subset(table, outcome=="HRV-B (Overall)")
hrvc <- subset(table, outcome=="HRV-C (Overall)")
make.table1 <- merge(exac,rv,all=TRUE,by="OTUname",suffixes=c(".a",".b"))
make.table2 <- merge(make.table1,hrva, all=TRUE,by="OTUname",suffixes=c(".c",".d"))
make.table3 <- merge(make.table2,hrvb, all=TRUE,by="OTUname",suffixes=c(".e",".f"))
make.table4 <- merge(make.table3,hrvc, all=TRUE,by="OTUname",suffixes=c(".g",".h"))
write.table(make.table4,"/data/Users/kmccauley/PROSE_NEW/PublicationTables/OTU_DE_Table.txt",sep="\t",quote=F,row.names=FALSE)
```
|
7f9abb172a8ed301a0b75c0bb034b2b7c287a75c
|
d0ea7cb41a07daafa3a8ecc46525701adfe0b26e
|
/man/infqnt.Rd
|
a70d4bfc21359110d87fbc671e5f6a2b1997df58
|
[] |
no_license
|
cran/timeslab
|
72e988debce4a9805f7cf6987bdf009b685e1b22
|
ba1972872f3db49ff069195197c30762767f3835
|
refs/heads/master
| 2023-04-09T15:02:14.680416
| 1999-01-01T01:20:17
| 1999-01-01T01:20:17
| 17,719,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 343
|
rd
|
infqnt.Rd
|
\name{infqnt}
\alias{infqnt}
\description{Plot Informative Quantile Function of a Data Set}
\title{Plot Informative Quantile Function of a Data Set}
\usage{infqnt(x)}
\arguments{
\item{x}{Array of length $n$ containing the data.}
}
\value{
\item{infqnt}{returns a plot of the informative quantile
function for the data set {\code{x}}.}
}
|
a4084b1ff47346d93a4aa8dfae547534ba09c06f
|
d75b7bc015b47d94254bcc9334ba15972d3ec9a1
|
/4. FOURTH YEAR/Medical Statistics/2. Special trial Designs/Crosover (Problemas Masculinos).R
|
8c2327b816d725b9d5fe0ff4d02d01dee529e50b
|
[] |
no_license
|
laurajuliamelis/BachelorDegree_Statistics
|
a0dcfec518ef70d4510936685672933c54dcee80
|
2294e3f417833a4f3cdc60141b549b50098d2cb1
|
refs/heads/master
| 2022-04-22T23:55:29.102206
| 2020-04-22T14:14:23
| 2020-04-22T14:14:23
| 257,890,534
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,295
|
r
|
Crosover (Problemas Masculinos).R
|
# Para determinar un posible efecto cardiovascular de Sildenafil durante el ejercicio en hombres
# con problemas coronarios, se ha dispuesto un ensayo clínico con intercambio, en la que la variable
# respuesta es un índice de fatiga medido tras una prueba de esfuerzo. El tratamiento (o el
# correspondiente placebo) se suministraba una hora antes de cada prueba, estando éstas separadas
# por un periodo de lavado de tres días. Los datos son: A1, A2, B1, B2, donde A indica "tratado con
# Sildenafil", B "tratado con placebo", y {1,2} se refiere al periodo.
# Dades
bd <- read.csv("Crossover.csv", header=T, sep=";")
bd <- as.data.frame(bd)
# 1. ¿Cuál es el efecto estimado puntualmente de Sildenafil?
n1 <- 9 # CANVIAR-HO SEGONS LES DADES!!
n2 <- 12 # CANVIAR-HO SEGONS LES DADES!!
d_i1 <- bd[1:n1,4]- bd[(n1+1):(2*n1),4]
d_i2 <- bd[(2*n1+1):((2*n1)+n2),4]- bd[((2*n1)+n2+1):nrow(bd),4]
d1 <- mean(d_i1)
d2 <- mean(d_i2)
efecto <- (d1-d2)/2
efecto # RESULTAT 1
# 2. Indique el extremo inferior del IC (95%) para el efecto de los tratados con Sildenafil.
var1 <- var(d_i1)
var2 <- var(d_i2)
s2 <- (((n1-1)*var1)+((n2-1)*var2))/(n1+n2-2)
s <- sqrt(s2)
pvalor <- qt(0.025, (n1+n2-2), lower.tail = F)
IC_L <- efecto - (0.5*pvalor*s*sqrt((1/n1) + (1/n2)))
IC_L # RESULTAT 2
# 3. Idem para el extremo superior.
IC_U <- efecto + (0.5*pvalor*s*sqrt((1/n1) + (1/n2)))
IC_U # RESULTAT 3
# 4. ¿Influye el hecho de haber realizado la prueba antes o después? Obtenga ahora la estimación
# por IC del efecto periodo; extremo inferior:
efecto_período <- (d1 + d2)/2
efecto_período
IC_L_período <- efecto_período - (0.5*pvalor*s*sqrt((1/n1) + (1/n2)))
IC_L_período # RESULTAT 4
# 5. Idem para el extremo superior.
IC_U_período <- efecto_período + (0.5*pvalor*s*sqrt((1/n1) + (1/n2)))
IC_U_período # RESULTAT 5
# 6. ¿Presenta efectos arrastrados el tratamiento, a pesar del periodo de lavado? Ya sabemos que
# esta es una prueba con escasa potencia, pero estime el posible efecto tardío del tratamiento;
# extremo inferior:
suma1 <- bd[1:n1,4]+bd[(n1+1):(2*n1),4]
suma2 <- bd[(2*n1+1):((2*n1)+n2),4]+ bd[((2*n1)+n2+1):nrow(bd),4]
efecto_tardío <- mean(suma1)-mean(suma2)
efecto_tardío
var1_tardío <- var(suma1)
var2_tardío <- var(suma2)
s2_tardío <- (((n1-1)*var1_tardío)+((n2-1)*var2_tardío))/(n1+n2-2)
s_tardío <- sqrt(s2_tardío)
pvalor <- qt(0.025, (n1+n2-2), lower.tail = F)
IC_L_tardío <- efecto_tardío - (pvalor*s_tardío*sqrt((1/n1) + (1/n2)))
IC_L_tardío # RESULTAT 6
# 7. Idem para el extremo superior.
IC_U_tardío <- efecto_tardío + (pvalor*s_tardío*sqrt((1/n1) + (1/n2)))
IC_U_tardío # RESULTAT 7
# 8. ¿En cuánto estima que vale la variancia intraindividuos?
intra <- s2/2
intra # RESULTAT 8
# 9. Halle un valor estimado de la variancia entreindividuos.
entre <- (s2_tardío-(2*intra))/4
entre # RESULTAT 9
# 10. Finalmente, calcule la potencia de la prueba realizada sobre el efecto directo,
# asumiendo que la desviación intraindividuo vale 3.5 unid. y el posible efecto de Sildenafil
# es incrementar la respuesta en 6 puntos.
# Considere los tamaños por grupo obtenidos en esta prueba, y un riesgo bilateral del 5%.
|
8b99bd7234cee755ef915f3171f1d7e8eb87cf34
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610555140-test.R
|
19f3aa5e7533c77d6b9d36ece729f75bd2dc3ae9
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 240
|
r
|
1610555140-test.R
|
testlist <- list(data = structure(c(2.78671099579809e-309, 2.34365931087967e-308, 1.9285913724733e-168, 2.84809453888922e-306, 0, 0, 0, 0, 0), .Dim = c(3L, 3L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
63a571ba2dcdc1991c6fea371db06e787ce458bb
|
9fe6b499985b1573050a3309165b217acaf50034
|
/man/p_oneFhatone.Rd
|
906e09b84fa1d0fde007cf72035b110dc77843ea
|
[] |
no_license
|
chvrngit/wmpvaer
|
efa2f381ee006bf852c23b9869ba63ed391b1799
|
d1f006b00e7b6fd1a9943f382f0d55dfe640cfd7
|
refs/heads/master
| 2022-11-10T21:16:43.369196
| 2019-11-30T21:30:30
| 2019-11-30T21:30:30
| 107,996,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 683
|
rd
|
p_oneFhatone.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects_and_functions.r
\name{p_oneFhatone}
\alias{p_oneFhatone}
\title{p_oneFhatone, computation of an approximate 1F1 value}
\usage{
p_oneFhatone(s, n, m, omega)
}
\arguments{
\item{s}{The value needed for the first derivative to equal log(Wilks)}
\item{n}{The error Df of the one-way MANOVA analysis considered}
\item{m}{The hypothesis Df of the one-way MANOVA analysis considered}
\item{omega}{The a vector of eigenvalues of the Wilks Non-Centrality Parameter
corresponding to one independent variable.}
}
\description{
p_oneFhatone, computation of an approximate 1F1 value
}
|
61252276fdedc8623c4efd2cbbfdf378c10c12db
|
84ef24f89f4bf70a019783cfb34d553b2199c460
|
/MakeDict.r
|
c7f20e903d377c08437898be9004dcdec826835c
|
[] |
no_license
|
xxxjvila/rutines
|
cdfe0d02452e783bdd8fb71929e37a6e283e8543
|
71449c53973bf126bd028432242cbd96d40c8a96
|
refs/heads/master
| 2020-12-02T08:18:18.843230
| 2017-07-10T17:50:05
| 2017-07-10T17:50:05
| 96,804,033
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 626
|
r
|
MakeDict.r
|
MakeDict <- function(data){
xdat <- data
value.labels<-NULL
for (i in 1:ncol(xdat)){
temp<-attr(xdat[,i],"value.labels")
if (!is.null(temp)){
temp<-sort(temp)
temp<-paste(paste(names(temp),"=",temp,sep=""),collapse=";")
} else {
temp<-""
}
value.labels<-c(value.labels,temp)
}
vari.label<-as.character(lapply(xdat,function(x) attr(x,"vari.label")))
dict<-data.frame(varname=names(xdat),label=vari.label,valuelabels=value.labels)
dict$label<-as.character(dict$label)
dict$label<-ifelse(dict$label=="NULL", "", dict$label)
return(dict)
}
|
d66bacd29fb8bec422443ffbd29e47d3ab1b91cc
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/5345_7/rinput.R
|
eed13a767b57f0935573bcdd64a991c2d1f37e7e
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("5345_7.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5345_7_unrooted.txt")
|
c006cc87bb017a52c17b25452a3c89c9009e20ab
|
092e6cb5e99b3dfbb089696b748c819f98fc861c
|
/src/smoothLDS_SS_withOffsetsAndInputs.R
|
64aa632de170d8b6b82dbf534c8ec93608e77c57
|
[] |
no_license
|
joacorapela/kalmanFilter
|
522c1fbd85301871cc88101a9591dea5a2e9bc49
|
c0fb1a454ab9d9f9a238fa65b28c5f6150e1c1cd
|
refs/heads/master
| 2023-04-16T09:03:35.683914
| 2023-04-10T16:36:32
| 2023-04-10T16:36:32
| 242,138,106
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,808
|
r
|
smoothLDS_SS_withOffsetsAndInputs.R
|
smoothLDS_SS_withOffsetsAndInputs <- function(B, u, C, c, Q, xnn, Vnn, xnn1, Vnn1, initStateAt=0, m0=NA, V0=NA) {
if(initStateAt==1 && (!is.na(m0) || !is.na(V0)))
warning("m0 and V0 are not used when initStateAt==1")
if(initStateAt==0 && (is.na(m0) || is.na(V0)))
stop("m0 and V0 are needed when initStateAt==0")
nObs <- dim(xnn)[3]
M <- nrow(B)
xnN <- array(NA, dim=c(M, 1, nObs))
VnN <- array(NA, dim=c(M, M, nObs))
Jn <- array(NA, dim=c(M, M, nObs))
xnN[,,nObs] <- xnn[,,nObs]
VnN[,,nObs] <- Vnn[,,nObs]
for(n in nObs:2) {
Jn[,,n-1] <- t(solve(Vnn1[,,n], B%*%Vnn[,,n-1]))
# xnN[,,n-1] <- xnn[,,n-1]+Jn[,,n-1]%*%(xnN[,,n]-xnn1[,,n])-(Vnn[,,n-1]-Jn[,,n-1]%*%Vnn1[,,n]%*%t(Jn[,,n-1]))%*%t(B)%*%solve(Q,u+C%*%c[,,n])
xnN[,,n-1] <- xnn[,,n-1]+Jn[,,n-1]%*%(xnN[,,n]-xnn1[,,n])
VnN[,,n-1] <- Vnn[,,n-1]+Jn[,,n-1]%*%(VnN[,,n]-Vnn1[,,n])%*%t(Jn[,,n-1])
}
if(initStateAt==1) {
# initial state x01 and V01
# no need to return the smooth estimates of the state at time 0: x0N and V0N
answer <- list(xnN=xnN, VnN=VnN, Jn=Jn)
return(answer)
} else {
if(initStateAt==0) {
# initial state m0 and V0
# return the smooth estimates of the state at time 0: x0N and V0N
J0 <- t(solve(Vnn1[,,1], B%*%V0))
# x0N <- m0+J0%*%(xnN[,,1]-xnn1[,,1])-(V0-J0%*%Vnn1[,,1]%*%t(J0))%*%t(B)%*%solve(Q, u+C%*%c[,,1])
x0N <- m0+J0%*%(xnN[,,1]-xnn1[,,1])
V0N <- V0+J0%*%(VnN[,,1]-Vnn1[,,1])%*%t(J0)
answer <- list(xnN=xnN, VnN=VnN, Jn=Jn, x0N=x0N, V0N=V0N, J0=J0)
# browser()
return(answer)
} else {
stop(sprintf("Invalid initialStateAt=%d", initStateAt))
}
}
}
|
2202a61ebb3612328afcaca6dd83ac5818c8e9e8
|
dc7f48710f8761ee3a7bbb7e43e39b45ee181631
|
/PIUMET/Non_Obese_NAFL/05.selecting_diff_features.r
|
386aea24403d51efa85f7c4ca43c66307860c695
|
[] |
no_license
|
ddasic123/NAFLD
|
0d6fe42274a6fe03ddc7812a6d4db3e94aad14e3
|
60997bf4100aed32e3880ea37226c1ea786da33c
|
refs/heads/main
| 2023-09-05T21:09:23.902002
| 2021-11-10T02:44:38
| 2021-11-10T02:44:38
| 419,341,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 727
|
r
|
05.selecting_diff_features.r
|
rm(list = ls())
library(data.table);library(plyr);library(igraph)
setwd("d:/data/NAFLD/")
#
metabolite = "lipid_bile"
disease = "non_obese_nafl"
#checked!!!
dat1 = read.table("SNU_DEG_nafld.txt")
gene_up = rownames(dat1)[dat1$logFC > 0 & dat1$adj.P.Val < 0.05]
gene_do = rownames(dat1)[dat1$logFC < 0 & dat1$adj.P.Val < 0.05]
#
storage = paste0(metabolite, "_", disease, ".txt")
dat1 = fread(storage)
dat1 = data.frame(dat1)
#
idx1 = which(dat1$p < 0.2 & dat1$fc > 1)
idx2 = which(dat1$p < 0.2 & dat1$fc < 1)
lipid_up = dat1$SNU_ID[idx1]
lipid_do = dat1$SNU_ID[idx2]
storage = paste0("diff_features_", metabolite, "_",disease, ".rdata")
save(file = storage, gene_up, gene_do, lipid_up, lipid_do)
|
f9dfc8a306f2ab465d65bafec9b5a9c235939fe2
|
68c6d5dee2884a7e361c0633fd9bf158d18df60c
|
/Exploratory Data Analysis/ex2/plot5.R
|
3c84758773047d72a4361c7b24c998568d84f627
|
[] |
no_license
|
OlegRozenshtain/datasciencecoursera
|
961c56bea1191ba2743646dead70cec932998673
|
ef2f464196d48712b5b65cc4ec490cc17b4cbeba
|
refs/heads/master
| 2021-01-10T22:11:06.105728
| 2015-08-02T16:16:53
| 2015-08-02T16:16:53
| 27,956,877
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 2,001
|
r
|
plot5.R
|
# How have emissions from motor vehicle sources changed from 1999–2008 in
# Baltimore City?
plot5<-function()
{
# read Environmental Protection Agency database on emissions of PM2.5
# for 1999, 2002, 2005, and 2008
emissionsData<-readRDS("summarySCC_PM25.rds")
#read mapping from the source classification code digit strings in the
#Emissions table to the actual name of the PM2.5 source
sourceClassificationCode<-readRDS("Source_Classification_Code.rds")
# filter emissions data for only Baltimore City observations before joining
# tables to improve performance (merge function takes a lot of time)
emissionsDataBaltimore = subset(emissionsData, fips == "24510")
# join emissions table with source classification code table by key column
# 'SCC' (Source Classification Code)
emissionsWithSourceClassification<-merge(emissionsDataBaltimore,
sourceClassificationCode,
by.x = "SCC", by.y = "SCC")
# filter the joined table to get only observations related to motor vehicles
# sources
motorVehicleEmissionsBaltimore<-subset(emissionsWithSourceClassification,
Data.Category == "Onroad")
# summarize the total pm2.5 emission from the filtered data for each year
motorVehicleEmissionsSumByYear<-with(motorVehicleEmissionsBaltimore,
tapply(Emissions, year, sum))
# save the plot to a png file
png("plot5.png", width = 600)
# create a bar showing the total pm2.5 emission in tons for each year
barplot(motorVehicleEmissionsSumByYear,
main = "Total pm2.5 emission per year from motor vehicle-related sources",
xlab = "Year", ylab = "pm2.5 emission [tons]")
# add a line connecting the bars to show trend
lines(motorVehicleEmissionsSumByYear, col = "green", lwd = 3)
dev.off()
}
|
fda55cf644ca24e242015f21461b0c0cbddaeb7e
|
cb54fbf79c8ddb2c1d2a4fa2404d2e95faa61db3
|
/Solution_4.R
|
8a5c8a84223029b6bd8549e975b0e765db2cec26
|
[] |
no_license
|
abhay30495/CASE-STUDY-Healthcare-Org
|
139cdc4714fc752cd779c5ee986e811ecbabe4a9
|
1838d9e1ad3d11000e357ad3fe1b8108097270c4
|
refs/heads/master
| 2020-05-19T03:36:55.563598
| 2019-05-03T20:03:14
| 2019-05-03T20:03:14
| 184,806,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,885
|
r
|
Solution_4.R
|
##Question 4
> View(diabetes)
> View(diabetes_1)
> diabetes_1=diabetes[c(1:133),c(3,8,9)]
> View(diabetes_1)
> library(caTools)
> data=sample.split(diabetes_1,SplitRatio = 0.8)
> train=subset(diabetes_1,data=="TRUE")
##Warning message:
##Length of logical index must be 1 or 133, not 3
> test=subset(diabetes_1,data=="FALSE")
##Warning message:
##Length of logical index must be 1 or 133, not 3
> View(train)
> View(test)
##############################################################################################
##Checking consistency for both the variable, FBS & PPBS1
> library(caret)
> model=glm(NDD~.,train, family="binomial")
> model
Call: glm(formula = NDD ~ ., family = "binomial", data = train)
Coefficients:
(Intercept) FBS PPBS1
-24.7263 0.1157 0.0700
Degrees of Freedom: 87 Total (i.e. Null); 85 Residual
(1 observation deleted due to missingness)
Null Deviance: 106.8
Residual Deviance: 16.56 AIC: 22.56
> prediction=predict(model, test, type="response")
> prediction
> table(test$NDD,prediction>0.5)
FALSE TRUE
0 12 4
1 0 28
> (12+28)/(12+28+4)
[1] 0.9090909
##############################################################################################
##Checking consistency for FBS variable
> View(train)
> View(test)
> model_1=glm(NDD~FBS,train, family = "binomial")
Warning message:
glm.fit: fitted probabilities numerically 0 or 1 occurred
> model_1
Call: glm(formula = NDD ~ FBS, family = "binomial", data = train)
Coefficients:
(Intercept) FBS
-12.7891 0.1195
Degrees of Freedom: 88 Total (i.e. Null); 87 Residual
Null Deviance: 107.5
Residual Deviance: 46.36 AIC: 50.36
> prediction=predict(model_1, test, type="response")
> prediction
> table(test$NDD,prediction>0.5)
FALSE TRUE
0 8 8
1 2 26
> (8+26)/(8+26+2+8)
[1] 0.7727273 ## lower accuracy from the previous case
##############################################################################################
##Checking consistency for PPBS1 Variable
> model_2=glm(NDD~PPBS1,train, family = "binomial")
##Warning message:
##glm.fit: fitted probabilities numerically 0 or 1 occurred
> model_2
Call: glm(formula = NDD ~ PPBS1, family = "binomial", data = train)
Coefficients:
(Intercept) PPBS1
-11.40871 0.06929
Degrees of Freedom: 87 Total (i.e. Null); 86 Residual
(1 observation deleted due to missingness)
Null Deviance: 106.8
Residual Deviance: 28.57 AIC: 32.57
> prediction=predict(model_2, test, type="response")
> table(test$NDD,prediction>0.5)
FALSE TRUE
0 13 3
1 1 27
> (13+27)/(13+3+1+27)
[1] 0.9090909
## PPBS1 is having the highest accuracy of all the above mentioned ways.
##Hence, PPBS1 is alone capable of predicting the diabetes.
|
e7a0485d55b9e7815ab0fc9aad97962f5c7b8b9e
|
e11e91e1e46d0577f429a20c7af304e3b35f7d40
|
/TrafficDataCleansing.R
|
a53d65d43fec36492f9901174c4f993cff104d87
|
[] |
no_license
|
viveks13/Traffic-Analysis
|
6820af3ce6e6acf1cca84aa1d1920890e4f3426b
|
649990d4aae987bf9e03f778a1b885994bcddaa2
|
refs/heads/master
| 2020-03-19T00:28:33.704757
| 2018-06-28T12:53:16
| 2018-06-28T12:53:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,099
|
r
|
TrafficDataCleansing.R
|
library(ggplot2)
library(tibble)
library(dplyr)
library(reshape2)
library(scales)
library(leaflet)
library(lubridate)
path = "D:\\NUIG Project Data Set\\Census Data Set\\Traffic data after 2012"
setwd(path)
file.names <- dir(path,pattern = ".csv")
newDf <- data.frame(Route = "X",Yeartaken = 2011,PeakAm = "1111",PeakAmVolume = 1111,PeakPm = "peakPm",PeakPmVolume = 1111,AADT = 1111)
#routes <- c("M6","N17","N18","N59","N84")
for (i in 1:length(file.names)) {
dataSet <- read.csv(file.names[i],stringsAsFactors = F)
df <- dataSet[c(14:39),c(1:ncol(dataSet))]
extractYear <- as.Date(df[1,2])
numYear <- as.numeric(format(extractYear,"%Y"))
colCount <- ncol(df) - 3
#countData <- df[c(2:25),c(2:ncol(df))]
clockData <- df[c(2:25),c(2:colCount)]
fnSum <- function(x,a,b){
rowSm <-0
for (i in c(a:b))
{
rowSm = rowSm+ sum(as.numeric(x[i,]),na.rm = T)
}
return(rowSm)
}
##mean(as.numeric(clockData[2,]))
##amMean <- fnMean(clockData,7,11)
##pmMean <- fnMean(clockData,17,21)
##interMean <- (fnMean(clockData,12,16) + fnMean(clockData,22,24) + fnMean(clockData,1,6))/3
AADT <- fnSum(clockData,1,24)/(ncol(clockData))
peakAm <- dataSet[45,ncol(clockData)]
peakAmVolume <- as.numeric(dataSet[46,ncol(clockData)])
peakPm <- dataSet[47,ncol(clockData)]
peakPmVolume <- as.numeric(dataSet[48,ncol(clockData)])
route <- substring(file.names[i],1,3)
tempDf <- data.frame(Route = route,Yeartaken = numYear,PeakAm = peakAm,PeakAmVolume = peakAmVolume,PeakPm = peakPm,PeakPmVolume = peakPmVolume,AADT = AADT)
newDf <<- rbind(newDf,tempDf)
}
newDf <- newDf[-1,]
######################################################## Cleansing data prior to 2012
path = "D:\\NUIG Project Data Set\\Census Data Set\\Traffic Data Prior 2013\\Traffic Data"
setwd(path)
file.names <- dir(path,pattern = ".csv")
for (i in 1:length(file.names)) {
dataSet <- read.csv(file.names[i],stringsAsFactors = F)
yearVal <- substring(trimws(dataSet[2,2], which = "both"), 7,11)
metadata <- dataSet %>% select("Hour.ending","Total.volume") %>% group_by(Hour.ending) %>% summarise(total = sum(Total.volume))
hours <- seq(from=as.POSIXct("2012-01-01 00:00:00"),
to=as.POSIXct("2012-01-01 23:00:00"), by="hour", format = "%Y-%M-%D %H:%M:%S")
timeinHrs <- substring(hours,12,19)
metadata$time <- timeinHrs
route <- substring(file.names[i],1,3)
peakAmtime <- metadata %>% filter(Hour.ending %in% c(600:1200)) %>% top_n(n=1) %>% select(time)
peakPmtime <- metadata %>% filter(Hour.ending %in% c(1600:2000)) %>% top_n(n=1) %>% select(time)
peakAmVolume <- metadata %>% filter(Hour.ending %in% c(600:1200)) %>% top_n(n=1) %>% select(total)
peakPmVolume <- metadata %>% filter(Hour.ending %in% c(1600:2000)) %>% top_n(n=1) %>% select(total)
peakAmVolume <- (peakAmVolume/nrow(dataSet)) * 24
peakPmVolume <- (peakPmVolume/nrow(dataSet)) * 24
AADT <- (sum(metadata[,2])/nrow(dataSet)) * 24
tempDf <- data.frame(Route = route,Year = yearVal,PeakAm = peakAmtime,PeakAmVolume = peakAmVolume,PeakPm = peakPmtime,PeakPmVolume = peakPmVolume,AADT = AADT)
colnames(tempDf) <- c("Route","Yeartaken","PeakAm","PeakAmVolume","PeakPm","PeakPmVolume","AADT")
newDf <<- rbind(newDf,tempDf)
}
newDf$Yeartaken <- as.numeric(newDf$Yeartaken)
newDf <- newDf %>% arrange(Yeartaken)
str(newDf)
grouped_Data <- newDf %>% select("Yeartaken","PeakAmVolume","PeakPmVolume","AADT") %>% group_by(Yeartaken) %>% summarise(MeanPeakAmVolume = sum(PeakAmVolume),MeanPeakPmVolume = sum(PeakPmVolume),MeanAADT = sum(AADT))
#mean_peakAm <- newDf %>% select("Yeartaken","PeakAm") %>% group_by(Yeartaken) %>% top_n(n=1) %>% select(PeakAm)
grouped_Data$City <- "Galway"
#View(grouped_Data)
write.csv(grouped_Data, "D:\\NUIG Project Data Set\\Census Data Set\\CleansedData\\CleansedTrafficData.csv", row.names = F)
# newDf$Year <- as.numeric(newDf$Year)
# str(newDf)
#newDf <- newDf %>% group_by(Yeartaken) %>% mutate(AADT_Total = sum(AADT))
|
805766c16dd99f0d60a10ee88efcc46a0a4fcdd7
|
0b3e8045987a9565a20231ea1fdd4dddf9eafb15
|
/code_figures/figure_image.R
|
29099597ff0f844132f50da458f1bc9bbe973762
|
[] |
no_license
|
wangxsiyu/Lu_Drought_Identification
|
75972680c607f3d35261413f6a51a221c40c9fe5
|
caed8465c7b31f245534d772df60457333bb6d36
|
refs/heads/main
| 2023-04-02T17:21:34.193557
| 2021-04-19T22:28:50
| 2021-04-19T22:28:50
| 349,785,695
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,152
|
r
|
figure_image.R
|
library(rstudioapi)
funcdir = dirname(rstudioapi::getActiveDocumentContext()$path)
setwd(funcdir)
data = read.csv("../../data/runoff/runoff.csv")
{
library(fields)
### image
flname = "f001"
starty = 1957
endy = 2020
#### obs
obs = data
idxx = which(obs$year >= starty & obs$year <= endy)
obs = obs[idxx,]
obs_q = obs[,flname]
idx_p = which(obs_q>0)
idx_0 = which(obs_q == 0)
obs_pos = obs_q[idx_p]
obs_q[idx_p] = obs_pos/max(obs_pos,na.rm = T) #>0 normalize
cd_0 = calc_0value(obs_q == 0)
nor_cd = cd_0/max(cd_0,na.rm = T)
obs_q[idx_0] = -nor_cd[idx_0] #==0 cdpm normalize
doy = get_dayid(obs$month,obs$day)
year = obs$year
zlime = c(min(obs_q,na.rm = T), max(obs_q,na.rm = T)) #c(0,max(obs[,flname],na.rm = T))
colorimage = c(colorRampPalette(c("red","yellow"))(50),colorRampPalette(c("green","blue","black","black","black"))(50))
#colorRampPalette(c("blue","black"))(40))
param = list(method = "Runoff", zlime = zlime, color = colorimage)
tab_obs = transmat(doy, year, obs_q)
#### perc
perc = read.csv("../../data/perc/perc_T0_C0_fr0_1912_2020.csv")
idxx = which(perc$year >= starty & perc$year <= endy)
perc = perc[idxx,]
percc = perc[,flname]
doy_p = get_dayid(perc$month,perc$day,option29 = 0)
year_p = perc$year
colorimage2 = rev(tim.colors(100))
param2 = list(method = "T0C0F0", zlime = c(0,1), color = colorimage2)
tab_perc = transmat(doy_p, year_p, percc)
}
#################### plot ########################
{
par(plt = c(0.07,0.29, 0.1,0.9), mgp = c(1,0.5,0) )
image(x=tab_obs$x, y = tab_obs$y, z = tab_obs$z, xlab = "",ylab = "", main = param$method, zlim = param$zlime,
col = param$color)
par(new = T, plt = c(0.015,0.025, 0.2,0.8) )
z = array(1:100, dim = c(1,100) )
image( 1,1:100, z, col = colorimage, axes = FALSE, xlab = "", ylab = "" )
a = seq(0,max(cd_0,na.rm = T),length.out = 6)
b = seq(min(obs_pos),max(obs_pos),length.out = 6)
axis( side = 4, at = 50-c(0,100,200,300)/max(cd_0,na.rm = T)*50, tck = -0.2, labels = F )
mtext( side = 4, at = 50-c(0,100,200,300)/max(cd_0,na.rm = T)*50, line = 0.3,
text = as.character(c(0,100,200,300)), las = 1)
axis( side = 4, at = c(0,0.1,0.2,0.3,0.4)/max(obs_pos)*50+50, tck = -0.2, labels = F )
mtext( side = 4, at = c(0,0.1,0.2,0.3,0.4)/max(obs_pos)*50+50, line = 0.3,
text = as.character(c(0,0.1,0.2,0.3,0.4)), las = 1)
box()
mtext(side = 2, at = 20, line = 0.1, text = "Cumulative dry days")
mtext(side = 2, at = 70, line = 0.1, text = "Runoff")
####### perc # z = c(0,1) / tim.colors(100)
par(new = T, plt = c(0.72,0.73, 0.2,0.8) )
z = array(1:100, dim = c(1,100) )
image( 1,1:100, z, col = rev(colorimage2), axes = FALSE, xlab = "", ylab = "" )
axis( side = 4, at = seq(0,100,20), labels = F, tck = -0.2, las = 1)
mtext( side = 4, at = seq(0,100,20), line = 0.3, text = seq(0,1,0.2), las = 1)
mtext(side = 2, at = 50, line = 0.1, text = "Percentile")
box()
par(new = T, plt = c(0.77,0.99, 0.1,0.9) )
image(x=tab_perc$x, y = tab_perc$y, z = tab_perc$z, xlab = "",ylab = "", main = param2$method, zlim = param2$zlime,
col = param2$color)
####### ecdf
par(new = T, plt = c(0.32, 0.705, 0.1,0.9) )
source("./figure_ecdf.R")
}
|
f9f549b124415e4ca55118161a69f7efc65222b9
|
e0b87eb63633d601f3e9e23e35b62b72835f091c
|
/R/nl_corrts.R
|
eec9d2fec4ad8df6cda5b0e3d9a201f5f6a4c28c
|
[] |
no_license
|
Allisterh/nlr
|
7737c85aa80485f5173469d2479d89be72c427ba
|
7f45b89f1748d356af588920b6f7ae2a5269f80d
|
refs/heads/master
| 2022-04-02T20:53:11.162698
| 2019-07-31T11:40:02
| 2019-07-31T11:40:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,609
|
r
|
nl_corrts.R
|
#******************************************************************************************************
nl.corrts <- function(formula, data, start=getInitial(formula,data),
control=nlr.control(tolerance=0.0010, minlanda=1 / 2 ^ 10, maxiter=25 * length(start)),
correlation=NULL,...)
{
tols1 <- nlsqr(formula, data=data, start=start,control=control,...)
if(is.Fault(tols1)) return(tols1)
ri <- residuals(tols1) # should be corrected to work with formula in spherical norm
n <- length(ri)
switch(class(correlation)[1],
"corAR1"={
tm <- ar(ri,order.max=1,aic=F)
cs2<-Initialize(correlation,data=as.matrix(ri))
vmat <- corMatrix(cs2)
#vinv <- solve(vmat)
#umat <- chol(vmat)
#ut <- t(umat)
#rmat <- solve(ut)
.temp1 <- 1.0/(1.0-tm$ar^2)
vinv <- diag(c(1,rep(1+tm$ar^2,n)))
vinv[col(vinv)==row(vinv)+1] <- -tm$ar
vinv[row(vinv)==col(vinv)+1] <- -tm$ar
rmat <- diag(c(sqrt(1-tm$ar^2),rep(1,n-1)))
rmat[row(rmat)==col(rmat)+1] <- -tm$ar
rmat <- rmat / sqrt(1-tm$ar^2)
autpar<-tm$ar
},
"corARMA"={
pcorr <- attr(correlation,"p")
qcorr <- attr(correlation,"q")
ncorr <- pcorr+qcorr
tm <- arima(ri,order=c(pcorr,0,qcorr),include.mean = FALSE)
correst <- corARMA(tm$coef,form=attr(correlation,"formula"),p=pcorr,q=qcorr,fixed=attr(correlation,"fixed"))
cs2<-Initialize(correst,data=as.matrix(ri))
vmat <- corMatrix(cs2)
v2 <- eiginv(vmat,symmetric=T,stp=F)
if(is.Fault(v2)) return(v2)
for(i in 1:n)
for(j in i:n)
v2[i,j] <- v2[j,i]
umat <- chol(v2)
ut <- t(umat)
rmat <- solve(ut)
autpar<-tm$coef
},
"corCAR1"={
},
"corCompSymm"={
},
"corExp"={
},
"corGaus"={
},
"corLin"={
},
"corRatio"={
},
"corSpher"={
},
"corSymm"={
}
)
tolerance <- control$tolerance*1e3
minlanda<-control$minlanda / 1e4
t2st <- nlsqr.gn(formula,data=data,
start=tols1$parameters[names(formula$par)],
vm=vmat, rm=rmat,
control=nlr.control(tolerance=tolerance,minlanda=minlanda),
...)
if(is.Fault(t2st)) return(t2st)
t2st@autpar<-as.list(autpar)
#t2st@autcorr <- tm
return(t2st)
}
|
382a504dcba29823254cd4f41c8a3832e63e211d
|
d406b9068f635eed507ed21f000aaa55e3ec034c
|
/man/okun.Rd
|
fb67ea8b25cb62c98ccb1119fb5f0c2e285ab6a0
|
[] |
no_license
|
Worathan/PoEdata
|
273eab591e56b08252dff3f681c1c9e0b34f7d79
|
d415eb1b776b04c29ee38d58bbbd1bf37ef92eb1
|
refs/heads/master
| 2020-04-20T11:10:02.167677
| 2019-02-02T08:35:48
| 2019-02-02T08:35:48
| 168,808,539
| 0
| 0
| null | 2019-02-02T07:53:41
| 2019-02-02T07:53:41
| null |
UTF-8
|
R
| false
| false
| 657
|
rd
|
okun.Rd
|
\name{okun}
\alias{okun}
\docType{data}
\title{
Okun Data
}
\description{
Obs: 98, quarterly (1985Q2 - 2009Q3)
}
\usage{data("okun")}
\format{
A data frame with 98 observations on the following 2 variables.
\describe{
\item{\code{g}}{percentage change in U.S. Gross Domestic Product, seasonally adjusted.}
\item{\code{u}}{U.S. Civilian Unemployment Rate (Seasonally adjusted)}
}
}
\details{
The variable DU used in Chapter 9 is defined as U(t)-U(t-1).
}
\source{
http://principlesofeconometrics.com/poe4/poe4.htm
}
\references{
Federal Reserve Bank of St Louis
}
\examples{
data(okun)
## maybe str(okun) ; plot(okun) ...
}
\keyword{datasets}
|
eeba378f9b3a90e923269198c5b1c9a2af90333b
|
738af78c24b08b63b4fdbd947ea2b2a5dc34d0ce
|
/geo2r-volcano-rot.R
|
8f2b97829febdcde41e9ea1f78a18a027c1d7b2c
|
[] |
no_license
|
pklemmer/rotenone-geo2r-volcano
|
b09fd04ebc1037408fd39564af6482bf3b40c850
|
1f156b893fe4d70df02be0a2b808ffbca8d75111
|
refs/heads/main
| 2023-04-08T06:12:08.345859
| 2022-06-22T13:25:49
| 2022-06-22T13:25:49
| 495,457,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,912
|
r
|
geo2r-volcano-rot.R
|
sessionInfo()
install.packages("readr")
install.packages("ggplot2")
install.packages("ggrepel")
install.packages("svglite")
library(readr)
#Installing required packages
setwd("~/GitHub/rotenone-geo2r-volcano")
xp1 <- read_delim("Expression/8_dmso_vs_8_rot50_12.tsv",
delim = "\t", escape_double = FALSE,
trim_ws = TRUE)
xp2 <- read_delim("Expression/8_dmso_vs_8_rot50_24.tsv",
delim = "\t", escape_double = FALSE,
trim_ws = TRUE)
xp3 <- read_delim("Expression/8_dmso_vs_8_rot100_24.tsv",
delim = "\t", escape_double = FALSE,
trim_ws = TRUE)
#Loading GEO2R data sets
library(ggplot2)
library(ggrepel)
volcp <- function(xp) {
xp$diffexpressed <- "Not sig"
xp$diffexpressed[xp$logFC > 0.26 & xp$P.Value < 0.05] <- "UP"
xp$diffexpressed[xp$logFC < -0.26 & xp$P.Value < 0.05] <- "DOWN"
#Setting the labels for significantly (P-value < 0.05) up and down regulated genes
xp$xplabel <- NA
xp$xplabel[xp$diffexpressed != "Not sig"] <- xp$GENE_SYMBOL[xp$diffexpressed != "Not sig"]
ggplot(data=xp, aes(x=logFC, y=-log10(P.Value), col=diffexpressed, label=xplabel)) +
geom_point() +
theme_grey() +
scale_color_manual(values = c("blue", "dimgrey", "red")) +
#Setting the color for the points according to their expression
geom_vline(xintercept=c(-0.26, 0.26), col="red") +
geom_hline(yintercept=-log10(0.05), col="red")+
#Adding a vertical and horizontal line to indicate logFC and p-value significance
theme(legend.title=element_blank())
#Hiding the legend title
}
#Defining the function to generate a volcano plot from the given expression data
library(svglite)
svglite(file="Plots/rot-xp1.svg")
volcp(xp1)
dev.off()
#Saving the plot as jpeg to current directory
|
4423dd8a084a381fe47d6e1149cd60c43bbbe31a
|
45701b348dcbc54758a61aa381ea9f7821c2cb49
|
/inst/app/ui/data_tab.R
|
ff30b39fdc2549eeb1f2d5d1a494c1f11ccc0996
|
[
"CC-BY-4.0"
] |
permissive
|
Rstat-project/markr
|
11c58642185d922de3fe948fb0f7e7de5f3cdfdd
|
72299eb48613a898ca1c97f50fa27fe3f07c3c77
|
refs/heads/master
| 2023-06-02T18:43:04.504269
| 2021-06-16T11:36:10
| 2021-06-16T11:36:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 848
|
r
|
data_tab.R
|
data_tab <- tabItem(
tabName = "data_tab",
h2("Data"),
fileInput("tbl_file", NULL, width = "100%", placeholder = "Load Table"),
fluidRow(
column(2, downloadButton("dl_tbl", "Download")),
column(10, radioButtons("tbl_yml_switch", NULL, c("table", "yaml"), "table", inline = TRUE))
),
fluidRow(
column(4, actionButton("add_col", NULL, icon("plus")),
actionButton("delete_col", NULL, icon("minus")),
actionButton("rename_col", "Rename"),
actionButton("reorder_col", "Reorder")
),
column(4, selectizeInput("col_name", NULL, c(), multiple = TRUE,
options = list(create = TRUE), width = "100%")),
column(4, textInput("col_val", NULL, placeholder = "Column value/new name", width = "100%"))
),
DTOutput("tbl"),
verbatimTextOutput("yml")
)
|
8071c3d00d92b74914edc40572063bc701775d52
|
72d364a45b3ffbe7f1fcfdc33e9bf125b5d122e9
|
/dh/R/strings.R
|
08370a6de16c5fdff0eef6b6b4ac48bbd15c8442
|
[
"MIT"
] |
permissive
|
dhaase-de/dh-r-dh
|
53a5281080ea196e349a90e92b0eb2bfda0cc4ae
|
a4f067f1bfb1ac1337c024d2b82361b5370068b0
|
refs/heads/master
| 2021-01-11T17:20:51.563117
| 2017-01-23T00:59:48
| 2017-01-23T01:00:31
| 79,754,214
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,236
|
r
|
strings.R
|
# deparse and substitute an expression ("turn symbol into string")
"desub" <- function(expr) {
deparse(substitute(expr))
}
# fills strings to a given length (vectorized)
"fill" <- function(v, to.length = 4, with.character = "0", from.left = TRUE) {
v.fill <- character(length(v))
remaining <- to.length - strlen(v)
remaining[remaining < 0] <- 0
for (i in seq(1, length(v))) {
filler <- paste(rep(with.character, remaining[i]), collapse = "")
v.fill[i] <- paste(
ifelse(from.left, "", v[i]),
filler,
ifelse(from.left, v[i], ""),
sep = ""
)
}
names(v.fill) <- names(v)
v.fill
}
# 'paste' with 'sep = ""'
"paste0" <- function(...) {
paste(..., sep = "")
}
# return the first characters of strings (vectorized)
"strfirst" <- function(v, characters = 1) {
substr(v, 1, characters)
}
# return the last characters of strings (vectorized)
"strlast" <- function(v, characters = 1) {
v.strlen <- strlen(v)
substr(v, v.strlen - characters + 1, v.strlen)
}
# return the length of strings (vectorized)
"strlen" <- function(v) {
v.strlen <- unlist(lapply(strsplit(as.character(v), ""), length))
names(v.strlen) <- names(v)
v.strlen
}
|
b6c7e083d1c81a6911698b39c893adfadb3f1cbe
|
f50a745235da540a8a28299da8007f5a4c417fb8
|
/old.R
|
4a13fe3aa722409b330ce1c46140f3e63fa99052
|
[] |
no_license
|
lara-maleen/Project_Fur_Seals
|
b26d039f73282665cee9fdf07b7d90e8e1084dd3
|
e4ed571c794bd94123c65476ca443ee11cbb4ae4
|
refs/heads/master
| 2021-08-29T17:59:56.758010
| 2021-08-23T14:42:33
| 2021-08-23T14:42:33
| 178,859,144
| 0
| 0
| null | 2019-06-18T10:32:13
| 2019-04-01T12:33:22
|
R
|
UTF-8
|
R
| false
| false
| 39,901
|
r
|
old.R
|
#Minimal Model IBM - Fur Seal Project
#Hypothesis 1: Marginal-Male Theory
#Code for running in cluster (without replicates and storing information in matrix every t)
rm(list=ls())
##### START SIMULATION.RUN-FUNCTION #####
simulation.fun <- function(time=100, #number of generations
age=15, #age limit for an individual; life span for A. gazella. 15-25 years --> literature!?
patches=2, #number of Patches (two different sites: high/low density)
territories=c(50,50), #number of territories per patch
mutate=0.05, #mutation factor
die.fight=0.35, #propability to die from fight/competition
loci.col=c(14:53), #loci column numbers of the pop matrix
p= 0.5, #parameter for philopatry function (female patch choice) -> the higher p is, the more intense the philopatric (side-fidelity) influence
u = 100, #assumed normal average density (for each patch), used for female patch choice function
i=-0.8, #intercept for infanticide function
s=1.8, #slope for infanticide function
surv=0.90 #survival for total population
){
#setwd("~/Studium/WHK/WHK Bielefeld Meike/Project_Fur_Seals")
#gen_phen_map <- readRDS('/data/home/lara/genes.rds') #load the gene array (10 loci, 10 alleles) #gene map used in cluster
#gen_phen_map2 <- readRDS('/data/home/lara/genes2.rds') #load the gene array (10 loci, 10 alleles) #gene map used in cluster
gen_phen_map <- readRDS('genes.rds') #load the gene array (10 loci, 10 alleles), used for male trait values
gen_phen_map2 <- readRDS('genes2.rds') #second gene map for female trait value (10 loci, 10 alleles). Phenotype of -0.2 and +0.2 initially
##### FUNCTIONS #####
ID.fun <- function(offspring.vector){ #ID-FUNCTION: for each individual a new ID is created
ID.offspring <- ID.scan:(ID.scan+sum(offspring.vector)-1)
ID.scan <<- ID.scan + sum(offspring.vector)
return(ID.offspring)
}
trait.fun <- function(population.total,value.matrix, loci.matrix, gen_phen_map, gen_phen_map2){ #TRAIT-VALUE-FUNCTION - used for male quality + female philopatry trait
#Male Trait Value
value.matrix <- matrix(NA,nrow(population.total),ncol=10) #empty matrix for the trait values for each loci
for(y in 1:nrow(population.total)){ #for each individual
for(z in 1:10){ #for number of loci
value.matrix[y,z] <- (gen_phen_map[loci.matrix[y,z],loci.matrix[y,10+z],z]) #get value from gene map 1 (this is the male trait gene map), go through all loci and see what alleles individual have
}
population.total[y,4] <- abs(sum(value.matrix[y,])) #calculate additive phenotypic trait value, stored in column number 4 (male trait value)
}
#Female Trait Value:
value.matrix <- matrix(NA,nrow(population.total),ncol=10) #empty matrix for the trait values for each loci
for(y2 in 1:nrow(population.total)){ #for each individual
for(z2 in 1:10){ #for each loci
value.matrix[y2,z2] <- gen_phen_map2[loci.matrix[y2,z2+20],loci.matrix[y2,10+z2+20],z2] #get value from gene map 2 (female trait gene map), loci columns 21-40 in pop matrix (i.e. loci matrix 21-40)
}
population.total[y2,5] <- (sum(value.matrix[y2,])) #calculate additive phenotypic trait value, stored in column number 5 (female trait value)
}
return(population.total)
}
choice.fun <- function(N.male, patches){ #MALE PATCH CHOICE: decide where each adult male goes to this t
for(i in 1:nrow(N.male)){ #for each male
if (N.male$nr.offspring[i]>0){ #If reproductive success (offspring number) is greater than 0 patch stays the same (from last year)
N.male$patch[i] <- N.male$patch.last.year[i]
}
else{ #Otherwise the patch is changed in contrary patch
N.male$patch[i] <- (N.male$patch[i] - 1 + floor(runif(1,1,patches)))%%patches + 1
}
}
return(N.male) #New patch is written into patch column in male matrix
}
choice.fun.females <- function(N.female,p,u,N.last1,N.last2, patches){ #FEMALE PATCH CHOICE: determines the patch for females, depending on last years N (from last years patch) as well as density-preference trait & philopatry
N.last <- c(N.last1,N.last2) #get the population size from the previous year per patch
#Add philopatric decision (parameter set at the beginning)
p.patch <- p > runif(nrow(N.female),0,1) #decide wether female is philopatric or not (stays at birth patch = TRUE), if not then the density-dependent choice takes place
#on the positions where p.patch is TRUE, the patch number is the birth patch:
for (i in 1:length(p.patch)){
if (p.patch[i]){ #if this is true, than female go to the patch it was born
N.female$patch[i] <- N.female$patch.born[i]
}
else{ #otherwise the female gets a new TRUE or FAlSE depending on the density of last years patch
patch.u <- plogis(N.female$female.trait[i]*(N.last[N.female$patch[i]] - u)) > runif(1,0,1)
if ((patch.u)){ #if that is true, the patch is changed to the other patch
N.female$patch[i] <- (N.female$patch[i] - 1 + floor(runif(1,1,patches)))%%patches + 1
}
}
}
return(N.female)
}
competition.fun <- function(N.male, patches, population.males, territories){ #LET MALES COMPETE FOR TERRITORIES, DEPENDING ON THEIR QUALITY TRAIT
### 1.) Males choose their territory in this patch
for(p in 1:patches){ #Going through previous determined patches of males (at first Patch I than Patch II)
if(nrow(N.male[N.male$patch==p&N.male$terr==0,])>0){ #Are their any males in the patch (with no territory yet)
ID.terr.males <- matrix(NA, nrow=nrow(N.male[N.male$patch==p&N.male$terr==0,]), ncol=2) #new matrix for storing IDs
ID.terr.males[,1] <- N.male[N.male$patch==p&N.male$terr==0,]$ID #get IDs of males that have no territory yet
for(i in 1:nrow(ID.terr.males)){ #go through all males that have no territory
ID.terr.males[i,2] <- sample(territories[p], 1) #randomly decide which territory male goes to
N.male[N.male$ID==ID.terr.males[i,1],]$terr <- ID.terr.males[i,2] #write the territory number in matrix of males in this patch
}#End individual's loop
}
}#End 1.) patch loop
### 2) Males compete for their territory - the one with highest quality trait obtains it
male.matrix <- c() #for storing the males for all patches
for(p3 in 1:patches){ #Go again trough all patches
male.matrix2 <- c() #for storing the males per patch
for(t in 1:territories[p3]){ #loop over all territory numbers (1-50)
matrix.terr <- N.male[which(N.male[,"terr"]==t&N.male[,"patch"]==p3),] #Choose all males in this particular territory (as matrix)
if(nrow(matrix.terr)>=2){ #If there are at least two in the territory...
winner <- matrix.terr[matrix.terr$trait==(max(matrix.terr[,"trait"])),] #That's the WINNER in this territory
if(nrow(winner)>1){ #if trait values are equal, more rows in winner matrix than 1: decide to take the first male in matrix. That equals the case, that the male that was first at territory, obtains it
winner <- winner[1,]
}
matrix.terr <- matrix.terr[which(matrix.terr$ID!=winner$ID),] #remove winner from matrix
for (i4 in 1:nrow(matrix.terr)){ #For the looser(s) change territory to 0
matrix.terr$terr[i4] <- 0
}
male.matrix2 <- rbind(male.matrix2, winner, matrix.terr) #Safe new info in patch matrix
}
else{ #What happens when there is just one male (or zero) in this territory?
winner <- N.male[which(N.male[,"terr"]==t&N.male[,"patch"]==p3),] #He "wins" and is added to patch matrix
male.matrix2 <- rbind(male.matrix2, winner)
}
}#End territory loop
male.matrix <- rbind(male.matrix,male.matrix2) #add patch matrix, so that all males get stored (from each patch)
}#End 2) step
N.male <- male.matrix #the male matrix is not sorted (that will happen with sorting the IDs afterwards in simulation)
return(N.male)
}
mortality <- function(N, surv){ #Calculate density-dependent mortality rate. Dependent on total population size in this t and initially specified surviving rate
1-(plogis(qlogis(surv)-(N-600)*0.005)) #carying capacity with ~600 individuals total
}
##### INITIALISATION #####
population.total <- c() #empty vector for the population matrix
statistic.matrix <- matrix(ncol=15, nrow=time) #empty array for the statistics
for(k in 1:patches){ #LOOP OVER PATCHES
patchx.N <- abs(round(rnorm(1, mean=300, sd=5))) #Number of individuals in the patch
patchx.male <- round(runif(1,patchx.N/4,3*patchx.N/4)) #Number of males in the patch
ID <- c(1:(patchx.N)) #vector ID: gives each individual an ID
patch <- c(rep(k,patchx.N)) #vector patch: gives each individual their patch Nr.
gender <- c(rep("male",patchx.male),rep("female",patchx.N-patchx.male)) #vector gender: is filled with males and females
trait <- c(rep(0.5,patchx.N)) #vector trait: is for all individuals from both patches set as 0.5
female.trait <- c(rep(0.5,patchx.N))
survival <- ceiling(runif(patchx.N, min=0, max=age)) #vector survival: randomly distributed between 1 and age limit
ID.mother <- c(rep(NA,patchx.N)) #the first generation has no mother and therefore no ID in the column for the mothers ID
ID.father <- c(rep(NA,patchx.N)) #the first generation has no father and therefore no ID in the column for the fathers ID
patch.last.year <- ceiling(runif(patchx.N, min=0, max=2)) #generates randomly ID of last years patch for each individual (patch 1 or 2)
nr.offspring <- c(rep(0,patchx.N)) #number of offspring in first generation, will be filled with males success/offspring from last year
terr <- c(rep(0, patchx.N)) #here the obtained territory is stored, emptied every t
repro <- c(rep(0, patchx.N)) #decision stored if male can reproduce this t or not (1=True, 0=False)
patch.born <- patch
loci <- c(1:40) #empty space for loci (nr of loci=40) 20 for male quality trait + 20 for female philopatry trait value
patchx <- data.frame(ID,patch,gender,trait,female.trait,survival,ID.mother,ID.father, patch.last.year, nr.offspring, terr, repro, patch.born) #the dataframe is constructed for each patch including all vectors which where defined just before
loci.matrix.pop <- matrix(ncol=length(loci.col), nrow=patchx.N)
patchx <- cbind(patchx, loci.matrix.pop)
population.total <- rbind(population.total,patchx) #data frame including all individuals of all patches (the dataframe of a patch is included in the population matrix)
}
population.total$ID <- c(1:nrow(population.total)) #the first generation of the population becomes a new ID
ID.scan <- nrow(population.total)+1
##### STATISTIC START #####
population.N <- rep(0,time) #empty vector for the populationsize of each generation (includes also pending males...)
population.N1 <- rep(0,time) #empty vector for the pop size in patch 1 of each generation
population.N2 <- rep(0,time) #empty vector for the pop size in patch 2 of each generation
population.meantrait1.males <- rep(0,time) #empty vector for the mean trait in patch 1 of each generation
population.meantrait2.males <- rep(0,time) #empty vector for the mean trait in patch 2 of each generation
population.meantrait1.females <- rep(0,time) #empty vector for the mean trait in patch 1 of each generation
population.meantrait2.females <- rep(0,time) #empty vector for the mean trait in patch 2 of each generation
population.males1 <- rep(0,time) #empty vector for the number of males in patch 1 of each generation
population.males2 <- rep(0,time) #empty vector for the number of males in patch 2 of each generation
population.females1 <- rep(0,time) #empty vector for the number of females in patch 1 of each generation
population.females2 <- rep(0,time) #empty vector for the number of females in patch 2 of each generation
offspring.produced1 <- rep(0, time) #empty vector for number of offspring produced in patch 1
offspring.produced2 <- rep(0, time) #empty vector for number of offspring produced in patch 2
cov.males1 <- rep(0,time) #empty vector for covariance of number of offspring and male quality in patch 1
cov.males2 <- rep(0,time) #empty vector for covariance of number of offspring and male quality in patch 2
########STATISTIC END #####
population <- nrow(population.total) #number of individuals
for(x in 1:population){ #LOOP OVER THE INDIVIDUALS
population.total[x,loci.col] <- ceiling(runif(40,1e-16,10)) #each individual has 40 random numbers (first 10:row //last 10:column), the first 20 are for male trait, the last 20 for female trait
}
loci.matrix <- population.total[,loci.col] #get all loci from current pop matrix
population.total <- trait.fun(population.total,values.population,loci.matrix, gen_phen_map, gen_phen_map2) #traitvalue-function: traitvalues for the population are included and overwrite the population matrix
#population.total <- female.trait.fun(population.total,values.population,loci.matrix, gen_phen_map2) #traitvalue-function: traitvalues for the population are included and overwrite the population matrix
##### GENERATION LOOP START #####
for(t in 1:time){
N.last1 <- nrow(population.total[population.total$patch==1,]) #storing patch 1 N for previous year
N.last2 <- nrow(population.total[population.total$patch==2,]) #storing patch 2 N for previous year
N <- nrow(population.total) #number of individuals in total (all patches included)
if(N>0) { #START IS ANYBODY THERE-LOOP: if there are any individuals and the population is not extinct
##### WHICH MALES ARE READY TO COMPETE? ####
if(nrow(population.total[population.total$gender=="female",])>0){ #are there any females?
population.total[population.total$gender=="female",]$repro <- 1 #all females are able to reproduce
}
if(nrow(population.total[population.total$gender=="male",])>0){ #are there any males?
if(nrow(population.total[population.total$gender=="male"&population.total$survival<(age-3),])>0){ #are there any males that are over 3 years old --> Hoffman 2003 'MALE REPRODUCTIVE STRATEGY AND THE IMPORTANCE OF MATERNAL STATUS IN THE ANTARCTIC FUR SEAL ARCTOCEPHALUS GAZELLA'
population.total[population.total$survival<(age-3)&population.total$gender=="male",]$repro <- 1 #males that are old enough get a 1 to make sure they can compete and reproduce afterwards, will be changed when they loose fight (dont obtain a territory)
if(nrow(population.total[population.total$gender=="male"&population.total$survival>=(age-3),])>0){
population.total[population.total$survival>=(age-3)&population.total$gender=="male",]$repro <- 0 #males that are not old enough get a 0 to make sure they cannot compete and reproduce afterwards, will be changed when they loose fight (dont obtain a territory)
#####
N.male <- subset(population.total,population.total$gender=="male"&population.total$repro==1) #get all male individuals as new matrix
population.males <- nrow(N.male) #number of male individuals
##### MALE PATCH CHOICE #####
patchbook_males <- c() #vector for storing the patch choice of males
N.male <- choice.fun(N.male, patches) #Males decide where to go this year depending on last years success
patchbook_males <- N.male$patch #overwrite patch from previous year
population.total[population.total$gender=='male'&population.total$repro==1,]$patch <- patchbook_males #add info to population matrix
population.total[population.total$gender=='male'&population.total$repro==1,]$nr.offspring <- rep(0,nrow(N.male)) #set number of offspring to zero, so that number of offspring in this t can be added after reproduction again
##### MALE PATCH CHOICE END #####
##### MALE COMPETITION - WHICH TERRITORY MALE ESTABLISH/OBTAIN #####
population.total$terr <- c(rep(0, nrow(population.total))) #empty the territory vector for all indivduals
terrbook_males <- c() #vector for storing the territory choice for each male
N.male <- competition.fun(N.male, patches, population.males, territories) #territories are obtained after competition of males
N.male <- N.male[order(N.male$ID),] #order ID's because in the comp. function the individuals are reorderd and not the same order as in male matrix before. Ordering ID's gets it back in previous order
terrbook_males <- N.male$terr
population.total[population.total$gender=='male'&population.total$repro==1,]$terr <- terrbook_males #obtained territories of "winners" are written into pop.matrix
#All males that lost territory competition have certain mortality:
dying.males <- matrix(NA,nrow(population.total[population.total$gender=="male"&population.total$terr==0&population.total$repro==1,]),ncol=2) #empty matrix for all losers
dying.males[,2] <- runif(nrow(population.total[population.total$gender=="male"&population.total$terr==0&population.total$repro==1,]),0,1) < die.fight #for each individual is a random number distributed. if the number is below the deathrate a true is written into the vector + ID
dying.males[,1] <- population.total[population.total$gender=="male"&population.total$terr==0&population.total$repro==1,]$ID #IDS of the males are written here
dying.males.ID <- c()
dying.males.ID <- dying.males[,1][dying.males[,2]==1] #IDs of the males that died are stored
for(d2 in dying.males.ID){ #go trough the died males and change survival number
population.total[population.total$ID==d2,]$survival <- 0
}
#Update all population info after males died
population.total <-subset(population.total,population.total$survival>0) #population matrix: Individuals which have a survival higher then 0 stay alive in the dataframe. the others are deleted
N.male <- subset(population.total,population.total$gender=="male"&population.total$repro==1)
N <- nrow(population.total)
##### MALE COMPETITION - FIGHT FOR TERRITORIES II #####
#Let males that lost in previous fight switch to other patch
males.patch.shift <- N.male[N.male$terr==0,]$ID #get the males that didnt obtain territory, they shift patches (ID is safed)
if(length(males.patch.shift>0)){
for(i9 in 1:length(males.patch.shift)){
N.male[N.male$ID==males.patch.shift[i9],]$patch <- (N.male[N.male$ID==males.patch.shift[i9],]$patch - 1 + floor(runif(1,1,patches)))%%patches + 1
}
}
patchbook_males <- c()
patchbook_males <- N.male$patch
population.total[population.total$gender=='male'&population.total$repro==1,]$patch <- patchbook_males #overwrite patch choice from before
#Males choose their territory again, fight again
terrbook_males <- c()
N.male <- competition.fun(N.male, patches, population.males, territories)
terrbook_males <- N.male$terr
population.total[population.total$gender=='male'&population.total$repro==1,]$terr <- terrbook_males
#All males that lost territory competition have certain mortality:
dying.males <- matrix(NA,nrow(population.total[population.total$gender=="male"&population.total$terr==0&population.total$repro==1,]),ncol=2) #empty matrix for all losers
dying.males[,2] <- runif(nrow(population.total[population.total$gender=="male"&population.total$terr==0&population.total$repro==1,]),0,1) < die.fight #for each individual is a random number distributed. if the number is below the deathrate a true is written into the vector + ID
dying.males[,1] <- population.total[population.total$gender=="male"&population.total$terr==0&population.total$repro==1,]$ID #IDS of the males are written here
dying.males.ID <- c()
dying.males.ID <- dying.males[,1][dying.males[,2]==1] #IDs of the males that died are stored
for(d2 in dying.males.ID){ #go trough the died males and change survival number and the loci matrix
population.total[population.total$ID==d2,]$survival <- 0
}
#Update all population info after males died
population.total <-subset(population.total,population.total$survival>0) #population matrix: Individuals which have a survival higher then 0 stay alive in the dataframe. the others are deleted
N.male <- subset(population.total,population.total$gender=="male"&population.total$repro==1)
N <- nrow(population.total)
##### CHOOSE MALES FOR REPRODUCTION ####
population.total[population.total$terr>0&population.total$gender=="male",]$repro <- 1 #males that obtained territory during competition are able to reproduce
population.total[population.total$terr==0&population.total$gender=="male",]$repro <- 0 #males that did not obtain territory during competition are not able to reproduce
N.male <- subset(population.total,population.total$gender=="male"&population.total$repro==1) #change male matrix
}
else{
N.male <- subset(population.total,population.total$gender=="male"&population.total$repro==1) #this happens when there are no males
}
}
else{
N.male <- subset(population.total,population.total$gender=="male"&population.total$repro==1) #this happens when there are no males over 3 years
}
} #End are there any males for fight?
else{
N.male <- subset(population.total,population.total$gender=="male"&population.total$repro==1) #this happens when there are no males under 3 years
}
##### COMPETITION END #####
#### FEMALE PATCH CHOICE ####
N.female <- subset(population.total,population.total$gender=="female") #matrix with just female individuals
if(nrow(N.female)>0){ #are there any females?
patchbook_females <- c()
N.female <- choice.fun.females(N.female,p,u,N.last1,N.last2, patches) #patch choice this year, depending on philopatry trait and last years density on birth patch
patchbook_females <- N.female$patch
population.total[population.total$gender=='female',]$patch <- patchbook_females #overwrite patch choice from before
}
### FEMALE CHOICE END ###
#Check if male and female are in same patch for mating:
tryst <- c(rep(0,patches))
N.female <- c()
offspring.vector <- 0
for(pls in 1:patches){#in which patches are both males and females
if(
nrow(subset(population.total,population.total$patch==pls&population.total$gender=="male"&population.total$repro==1))>0 &
nrow(subset(population.total,population.total$patch==pls&population.total$gender=="female"))>0
){
tryst[pls]<-2
}
}
for(neko in 1:patches){#all females which have males in their patches
if(tryst[neko]>0){
N.female<-rbind(N.female,subset(population.total,population.total$gender=="female"&population.total$patch==neko))
}
}
if(max(tryst)==2){ #IS OFFSPRING POSSIBLE? If one patch contains both genders then tyst has a level of 2
N.0 <- N/500
if(nrow(N.female)>0){ #number of offspring per female
offspring.vector <- rep(1,nrow(N.female)) #each female gets one pup
}
ID.offspring <- c() #empty vector for the ID of the offspring
patch.offspring <- c() #empty vector for the patch of the offspring
gender.offspring <- c() #empty vector for the gender of the offspring
trait.offspring <- c() #empty vector for the trait of the offspring
female.trait.offspring <- c()
survival.offspring <- c() #each offspring gets the survival of the maximum age
ID.mother.offspring <- c() #empty vector for the mothers ID of the offspring
ID.father.offspring <- c() #empty vector for the fathers ID of the offspring
loci.offspring <- matrix(NA,nrow=sum(offspring.vector),ncol=40) #empty vector for the locis of the offspring
#### START LOOP PARTNERFINDING #####
patchbook <- c() #empty vector for the patchnumber of the offspring
genderbook <- c() #empty vector for the gender of the offspring
N.female.patch <- table(factor(N.female$patch,levels = 1:patches)) #number of females in each patch (as a vector)
N.male.patch <- table(factor(N.male$patch,levels = 1:patches))#number of males in each patch (as a vector)
current.offspring <- 1 #counter that keeps track of how much offspring have emerged so far during the loop below
if(nrow(N.female)>0){ #START ANY FEMALES?: loop starts if there is at least one female individual
if(nrow(N.male)>0){
for(u in 1:nrow(N.female)){ #START LOOP PARTNERFINDING/mother
if(offspring.vector[u]>0){ #START GETS THE MOTHER OFFSPRING?
if(N.male.patch[N.female$patch[u]]>0){ #START ANY MALES IN THE PATCH OF THE MOTHER?: loop starts if there is at least one male individual in the mothers patch
mother <- N.female$ID[u] #gives the ID of the mother
ID.mother.offspring <- c(ID.mother.offspring, rep(mother,offspring.vector[u])) #ID of the mother is written into the vector for all her offspring
###FATHER####
potfather <- N.male$ID[N.male$patch==N.female$patch[u]] # storing the id's of potential fathers
if(length(potfather) > 1){
father <- sample(N.male$ID[N.male$patch==N.female$patch[u]],1) #sample the ID of one male which patchnumber is the same as the patchnumber of the mother
}else{
father <- potfather
}
ID.father.offspring <- c(ID.father.offspring,rep(father,offspring.vector[u])) #ID of the father is written into the vector as often as he becomes offspring with the mother
#GENETICS:
loci.mother <- population.total[population.total$ID==mother,loci.col] #vector of locis of the mother
loci.father <- population.total[population.total$ID==father,loci.col] #vector of locis of the father
loci.child <- rep(0,length(loci.col)) #empty vector with fixed length for the locis of the offspring
for(o in 1:offspring.vector[u]){ #START LOOP NUMBER CHILDREN per female
loci.child[1:10] <- loci.mother[(1:10) +sample(c(0,10),10,replace=TRUE)] #the offspring becomes 10 locis for male trait sampled from the mother (this is the allele row)
loci.child[11:20] <- loci.father[(1:10) +sample(c(0,10),10,replace=TRUE)] #the offspring becomes 10 locis sampled for male trait from the father (this is allele column)
loci.child[21:30] <- loci.mother[(1:10) +sample(c(0,10),10,replace=TRUE)] #the offspring becomes 10 locis sampled for female trait from the mother (this is the allele row)
loci.child[31:40] <- loci.father[(1:10) +sample(c(0,10),10,replace=TRUE)] #the offspring becomes 10 locis sampled for female trait from the father (this is allele column)
#total sum of loci = 40
loci.child <- unlist(loci.child)
#MUTATION
if(runif(1,0,1) < mutate){ #if a random number is lower than the mutationrate the offspring becomes a random distributed loci
loci.child[round(runif(1,1,10))] <- round(runif(1,1,10)) #the first runif selects the mutated loci and the second runif determines the new loci value (1-10 alleles)
}
loci.child <- unlist(loci.child)
loci.offspring[current.offspring,] <- loci.child #connects loci of the offspring to the matrix of the other offspring in this generation
current.offspring <- current.offspring + 1
if(runif(1,0,1)>0.5){ #if random number is higher as 0.5, the offspring is female
genderbook <- c(genderbook,"female") #the gender is written in the gender vector for the offspring
} else{ #otherwise the offspring is male
genderbook <- c(genderbook,"male") #the gender is written in the gender vector for the offspring
}
} #END LOOP NUMBER CHILDREN
} #END ANY MALES IN THE PATCH OF THE MOTHER?
} #END GETS THE MOTHER OFFSPRING?
} #END LOOP PARTNERFINDING/mother
loci.offspring <- as.data.frame(loci.offspring)
patchbook <- rep(N.female$patch,offspring.vector) #each offspring becomes the patchnumber of the mother
ID.offspring <- ID.fun(offspring.vector) #the ID of the offspring is calculated by the ID-function and written into the vector for their ID
trait.offspring <- c(rep(0,length(patchbook))) #the traitvalue of the offspring is set to 0 for the moment
female.trait.offspring <- c(rep(0,length(patchbook))) #the traitvalue of the offspring is set to 0 for the moment
survival.offspring <- c(rep(age,length(patchbook))) #each offspring gets the survival of the age limit pre defined
gender.offspring <- genderbook #genders of the offspring are written into the matrix
patch.offspring <- patchbook #patches of offspring are written into the matrix
nr.offspring.offspring <- c(rep(0,length(patchbook))) #empty column for the subsequent offspring they will get
terr.offspring <- c(rep(0, length(patchbook))) #empty column for subsequent territory
repro.offspring <- c(rep(0, length(patchbook))) #empty column for subsequent decision for reproduction in t
patch.born.offspring <- rep(N.female$patch,offspring.vector) #this stores info where on individuals is born (not changed over time)
population.offspring <- data.frame(ID.offspring,patch.offspring,gender.offspring,trait.offspring, female.trait.offspring, survival.offspring,ID.mother.offspring,ID.father.offspring, patch.offspring, nr.offspring.offspring, terr.offspring, repro.offspring, patch.born.offspring) #a new dataframe is made for the offspring of this generation
colnames(population.offspring) <- c("ID","patch","gender","trait","female.trait","survival","ID.mother","ID.father", "patch.last.year", "nr.offspring","terr","repro", "patch.born") #column names of the dataframe
population.offspring <- cbind(population.offspring, loci.offspring)
colnames(population.offspring) <- c("ID","patch","gender","trait","female.trait","survival","ID.mother","ID.father", "patch.last.year", "nr.offspring","terr","repro", "patch.born",1:40) #column names of the dataframe
population.offspring <- trait.fun(population.offspring,values.offspring, loci.offspring, gen_phen_map, gen_phen_map2) #the offspring matrix is overwritten including the traitvalues calculated by the traitvalue-function
#population.offspring <- female.trait.fun(population.offspring,values.offspring, loci.offspring, gen_phen_map2) #the offspring matrix is overwritten including the traitvalues calculated by the traitvalue-function
#INFATICIDE: Let offspring die with mortality depending on patch density
infanticide.vector <- c(rep(NA,patches))
for(p4 in 1:patches){ #for each patch a specific mortality/infanticide rate, depending on density on the patch
curr_N <- sum(population.total$patch==p4)
y=i+s*plogis(0.01*(curr_N)) #mortality is created
infanticide.vector[p4] <- y #safed in vector
}
dying.offspring <- matrix(NA,nrow(population.offspring),ncol=2)
dying.offspring[,2] <- runif(nrow(population.offspring),0,1) < infanticide.vector[population.offspring$patch] #for each individual is a random number distributed. if the number is below the deathrate the individual is written into a vector
dying.offspring[,1] <- population.offspring$ID
dying.offspring.ID <- c()
dying.offspring.ID <- dying.offspring[,1][dying.offspring[,2]==1]
for(d3 in dying.offspring.ID){
population.offspring[population.offspring$ID==d3,]$survival <- 0
}
population.offspring <-subset(population.offspring,population.offspring$survival>0) #remove all dead offspring
population.total <- rbind(population.total,population.offspring) #the offspring population matrix is added to the general population matrix
rownames(population.total) <- 1:nrow(population.total) #rownames are overwritten
}#END ANY MALES?
}#END ANY FEMALES?
}#END IS OFFSPRING POSSIBLE?
population.total$nr.offspring <- table(factor(ID.father.offspring,levels=population.total$ID)) #writing the number of offspring into the nr_offspring columns, stored for one t
##### DEATH START #####
#death by age:
N <- nrow(population.total)
population.total$survival <- population.total$survival-1 #every adult loses one survival counter per generation
#random Death:
die <- mortality(N,surv) #get density dependend mortality rate (=die)
dying.individuals <- runif(nrow(population.total),0,1) < die #for each individual is a random number distributed. if the number is below the deathrate the individual is written into a vector
population.total$survival[dying.individuals] <- 0 #the individuals that where written into the vector below, become a 0 in their survival
#erasing dead individuals:
if(nrow(population.total)>0){
population.total <-subset(population.total,population.total$survival>0) #population matrix: Individuals which have a survival higher then 0 stay alive in the dataframe. the others are deleted
}
##### END DEATH #####
###Statistic 2##
population.N[t] <- nrow(population.total) #overwrites the populationsizes for each generation in the empty vector
population.N1[t] <- nrow(population.total[population.total$patch==1&population.total$repro==1,]) #get population size from patch 1 for all individuals that reproduced
population.N2[t] <- nrow(population.total[population.total$patch==2&population.total$repro==1,]) #get population size from patch 2 for all ind. that reproduced
population.meantrait1.males[t] <- mean(population.total[population.total$gender=="male"&population.total$patch==1&population.total$repro==1,]$trait) #average trait-value from males for patch 1 for first generation
population.meantrait2.males[t] <- mean(population.total[population.total$gender=="male"&population.total$patch==2&population.total$repro==1,]$trait) #average trait-value from males for patch 2 for first generation
population.meantrait1.females[t] <- mean(population.total[population.total$gender=="female"&population.total$patch==1&population.total$repro==1,]$female.trait) #average trait-value from females for patch 1 for first generation
population.meantrait2.females[t] <- mean(population.total[population.total$gender=="female"&population.total$patch==2&population.total$repro==1,]$female.trait) #average trait-value from females for patch 2 for first generation
population.males1[t] <- nrow(population.total[population.total$gender=="male"&population.total$patch==1&population.total$repro==1,]) #Number of males in patch 1 for first generation
population.males2[t] <- nrow(population.total[population.total$gender=="male"&population.total$patch==2&population.total$repro==1,]) #Number of males in patch 2 for first generation
population.females1[t] <- nrow(population.total[population.total$gender=="female"&population.total$patch==1,]) #Number of females in patch 1 for first generation
population.females2[t] <- nrow(population.total[population.total$gender=="female"&population.total$patch==2,]) #Number of females in patch 2 for first generation
offspring.produced1[t] <- nrow(population.total[population.total$survival==(age-1)&population.total$patch==1,])#number of new offspring in patch 1
offspring.produced2[t] <- nrow(population.total[population.total$survival==(age-1)&population.total$patch==2,])#number of new offspring in patch 2
cov.males1[t] <- cov((population.total[population.total$gender=="male"&population.total$patch==1&population.total$repro==1,]$nr.offspring),(population.total[population.total$gender=="male"&population.total$patch==1&population.total$repro==1,]$trait)) #covariance of number of offspring and male quality in patch 1
cov.males2[t] <- cov((population.total[population.total$gender=="male"&population.total$patch==2&population.total$repro==1,]$nr.offspring),(population.total[population.total$gender=="male"&population.total$patch==2&population.total$repro==1,]$trait)) #covariance of number of offspring and male quality in patch 2
statistic.matrix[t,] <- cbind(population.N[t],population.N1[t],population.N2[t],population.meantrait1.males[t], population.meantrait2.males[t],population.meantrait1.females[t], population.meantrait2.females[t], population.males1[t], population.males2[t], population.females1[t], population.females2[t], offspring.produced1[t], offspring.produced2[t], cov.males1[t], cov.males2[t])
##### End Statistic 2#############
}#END IS ANYBODY THERE?
#print(t)
}##### END GENERATION LOOP #####
#Stored summary statistic formatted for output data
statistic.matrix[is.na(statistic.matrix)] <- 0 #NaN can be produced when trait values are not existing (remove these and call them 0)
colnames(statistic.matrix) <- c("N","N1","N2","meantrait.males1","meantrait.males2","meantrait.females1","meantrait.females2","N.males1","N.males2", "N.females1", "N.females2", "offspring.produced1", "offspring.produced2", "cov.males1", "cov.males2") #column names of statistic store matrix
return(statistic.matrix)
}#END SIMULATION.RUN
#Run function
#debug(simulation.fun)
#statistic <- simulation.fun()
|
590232d924089a5d9c9528378cdb39251b152dc5
|
7f93d02173ea31f52112f8ced858c9b9aa839094
|
/LIME/script_LIME.R
|
87a0833cbb6801889e8f0b134310b1ab9ab1745e
|
[] |
no_license
|
KunWang-Fishery/DataPoorMethods
|
c9274449ffb0d15066dbb7b0a0da9587e5438d0c
|
2ee26e5dc02afb8511a164c9eb47023a675cf49a
|
refs/heads/master
| 2023-04-21T20:43:49.174102
| 2021-05-05T15:38:42
| 2021-05-05T15:38:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,538
|
r
|
script_LIME.R
|
##############################################################
########### Run length based models LBSPR and LIME ###########
########## mpons@uw.edu - September 2018 #####################
##############################################################
rm(list=ls())
########
R.version$os # to check how lucky you are ...
Sys.getenv('PATH')
session_info()
getRversion()
install.packages("LBSPR")
install.packages("devtools", repos='http://cran.us.r-project.org')
install.packages("TMB")
devtools::install_github("kaskr/TMB_contrib_R/TMBhelper", force=T)
devtools::install_github("merrillrudd/LIME")
#devtools::install_github("brodieG/fansi")
find_rtools() # should be TRUE, assuming you have Rtools 3.5
#8920811a4a029297c12eb16745054a29cdbe8b7e
#fb21b779f95a16bf77037218522135694af61e90
library(pkgbuild)
library(devtools)
library("LBSPR")
library("TMB")
library("LIME")
library(dplyr)
library(ggplot2)
### read length data
Main.dir<-"C:/Users/mauricio.mardones/Documents/IFOP/Cursos/UW_Seattle/LBSPR_LIME" # your directory
setwd(Main.dir)
Length.comps<-read.csv("length_struc_urchin.csv",header=T)
head(Length.comps) #First column is the year, the following are the numbers of individuals in each lenght bin
tail(Length.comps)
#Biology
MaxAge=12
L50=43.2 #Jaramillo
L95=65 # verrificar bibliografia
M=0.25
h=0.8
wla=0.0005
wlb=2.97973
K=0.139
t0=(-0.45)
Linf=136
LenCV=0.1
SigmaR=0.4
SigmaF=0.2
SigmaC=0.2
SigmaI=0.2
R0=1
qcoef=1e-5
start_ages=0
rho=0
nseasons=1
binwidth=2
S50=65
S95=70
####################################################################
##### Length based methods #################
####################################################################
minL<-34
maxL<-136
Bins<- seq(from=minL, to= maxL,
by = binwidth)
#####################################################################
########## plot parametres #################
####################################################################
x11()
par(mfrow=c(2,2), mar=c(4,4,3,1))
plot(lh$L_a, type="l", lwd=4, xlab="Age", ylab="Length (cm)")
plot(lh$W_a, type="l", lwd=4, xlab="Age", ylab="Weight (g)")
plot(lh$Mat_l, type="l", lwd=4, xlab="Length (cm)", ylab="Proportion mature")
plot(lh$S_l, type="l", lwd=4, xlab="Length (cm)", ylab="Proportion selected to gear")
plot(lh$S_fl[1,], type="l", lwd=4, xlab="Length (cm)", ylab="Proportion selected to gear")
plba <- with(lh, age_length(highs, lows, L_a, CVlen))
tallas <- seq(40,138,2)
x11()
plot(tallas,plba[1,], type ="n", ylab="Probabilidad", xlab="Tallas(mm)")
for(i in 1:12){
lines(tallas,plba[i,], col=i)
}
#####################################################################
##################### simulate data ################################
######################################################################
true <- generate_data(modpath=NULL,
itervec=1,
lh=lh,
Fdynamics="Ramp",
Rdynamics="AR",
Nyears=17,
Nyears_comp=17,
comp_sample=200,
init_depl=0.8,
seed=1)
## years with length data -- rename with your own years with length data
length_years <- rownames(true$LF)
## length bins -- rename with your upper length bins
length_bins <- colnames(true$LF)
###########################################################################
###### list of parameters to use for LBSPR and LIME (use create_lh_list)
lh <- create_lh_list(vbk=K, linf=Linf, t0=t0,
lwa=wla, lwb=wlb,
S50=S50, S95=S95, selex_input="length",
M50=L50, M95=L95, selex_type=c("logistic"),
maturity_input="length", M= M,
SigmaR=SigmaR, SigmaF=SigmaF, SigmaC=SigmaC,
SigmaI=SigmaI,CVlen=LenCV,
h=h, R0=R0, qcoef=qcoef,
start_ages=start_ages, rho=rho,
nseasons=nseasons, binwidth=binwidth,
AgeMax= MaxAge,
Frate=0.1,
Fequil=0.25,
nfleets=1)
lfdata<-as.matrix(Length.comps[,-1])
colnames(lfdata)<-Bins
years <- Length.comps[,1]
rownames(lfdata)<-years
lf <- lfdata;lf
## input data
data_list <- list("years"=as.numeric(rownames(lf)), "LF"=lf)
## create input list -- adds some zeros on the end as well to make sure there is room for larger fish
inputs <- create_inputs(lh=lh, input_data=data_list)
mids<- seq(from=minL+binwidth/2,
to= maxL+binwidth/2,
by = binwidth)
inputs$mids<-mids
## run LIME
res <- run_LIME(modpath=NULL,
input=inputs,
data_avail="LC")
## check TMB inputs
Inputs <- res$Inputs
## Report file
Report <- res$Report
## Standard error report
Sdreport <- res$Sdreport
## check convergence
hessian <- Sdreport$pdHess
gradient <- res$opt$max_gradient <= 0.001
hessian == TRUE & gradient == TRUE
#Example: Length data only
lc_only <- run_LIME(modpath = NULL, input = inputs_all, data_avail = "LC")
###################################
## Run LBSPR
###################################
LB_pars <- new("LB_pars")
LB_pars@Species <- ""
LB_pars@MK <- lh$M/lh$vbk
LB_pars@M <- lh$M
LB_pars@Linf <- lh$linf
LB_pars@CVLinf <- lh$CVlen
LB_pars@L50 <- lh$ML50
LB_pars@L95 <- lh$ML95
LB_pars@Walpha <- lh$lwa
LB_pars@Wbeta <- lh$lwb
LB_pars@SL50 <- S50
LB_pars@SL95 <- S95
LB_pars@BinWidth <- binwidth
LB_pars@Steepness <- 0.8
LB_pars@R0 <- 1
LB_pars@L_units<-"mm"
LB_lengths <- new("LB_lengths")
LB_lengths@LMids <- mids
LB_lengths@LData <- t(inputs$LF[,,1])
LB_lengths@Years <- data_list$years
LB_lengths@NYears <- length(data_list$years)
#RUN de model
lbspr_res <- LBSPRfit(LB_pars=LB_pars, LB_lengths=LB_lengths, yrs=NA, Control=list(modtype="GTG"))
plot_LCfits(Inputs=Inputs,
Report=Report,
LBSPR=lbspr_res)
plot_output(Inputs=Inputs,
Report=Report,
Sdreport=Sdreport,
lh=lh,
LBSPR=lbspr_res,
plot=c("Fish","Rec","SPR","Selex"),
set_ylim=list("SPR" = c(0,1)),
true_years=inputs$years)
plot_LCfits(Inputs=list("LF"=true$LF))
plot_LCfits(LF_df = LF_df, Inputs = lc_only$Inputs, Report = lc_only$Report)
|
63c3f6ace5bec05eab27d0ded4c458fb10034fa2
|
6e2334f8dca54e7a54e71b8790475cbb7bd354d2
|
/plot2.R
|
27e857cf981f107a80f9b4d8635ebf8082ea766d
|
[] |
no_license
|
sashaxp/ExData_Plotting1
|
df040be4120364fdb8d2341f81f131febc2efaa9
|
e2e95b2feb91e4e7f2aa50a29778d17c1f748e44
|
refs/heads/master
| 2021-01-18T11:57:57.470538
| 2015-01-18T14:10:10
| 2015-01-18T14:10:10
| 29,423,270
| 0
| 0
| null | 2015-01-18T11:09:50
| 2015-01-18T11:09:48
| null |
UTF-8
|
R
| false
| false
| 493
|
r
|
plot2.R
|
dat <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings="?")
dat$Date<-as.Date(dat$Date, format="%d/%m/%Y")
dat2 <- dat[ which(dat$Date>='2007-02-01' & dat$Date<='2007-02-02'), ]
## Converting dates
datetime <- paste(as.Date(dat2$Date), dat2$Time)
dat2$Datetime <- as.POSIXct(datetime)
plot(dat2$Datetime, dat2$Global_active_power,
type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
b6a4987eb9ca70e4c6d2238a0345f4c2c494ec8f
|
cb3b2c16ff49b1e87bfaddab62a3ecb2fbf67e1e
|
/man/hbic_med.Rd
|
4afb5f569b5740154dbe4ecb136aab9bccac00e0
|
[] |
no_license
|
seonjoo/smm
|
466668bbe89c8ed7d48f27d2827c5803d790e9a3
|
cf0eff3d803550be0a58a21cfba36ec1da84eb11
|
refs/heads/master
| 2021-06-05T09:42:34.788365
| 2021-02-09T14:50:17
| 2021-02-09T14:50:17
| 134,604,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 341
|
rd
|
hbic_med.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hbic_med.R
\name{hbic_med}
\alias{hbic_med}
\title{Function to do HBIC computation for multiple mediations model}
\usage{
hbic_med(fit, fit.n)
}
\arguments{
\item{fit.n}{}
}
\value{
}
\description{
Function to do HBIC computation for multiple mediations model
}
|
95f3a10524df3235bcaf4d16115000476541fc2a
|
a9c610bd87d4270f1ce8fa5a16c1d374b02cb419
|
/app/install.r
|
57a3793c40cafd69f38d56ccb269839429569df8
|
[] |
no_license
|
johnjung/clustering_workshop_tool
|
7eee343d1e042ad68c474ab87a0c66536e319175
|
da0a55cac20ffc6ab43a21a7a3503027b3c0e403
|
refs/heads/master
| 2020-12-22T13:01:58.788996
| 2020-03-04T22:20:33
| 2020-03-04T22:20:33
| 236,791,607
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
r
|
install.r
|
install.packages('GGally', repos='https://ftp.ussg.iu.edu/CRAN/')
install.packages('ggdendro', repos='https://ftp.ussg.iu.edu/CRAN/')
install.packages('ggplot2', repos='https://ftp.ussg.iu.edu/CRAN/')
install.packages('igraph', repos='https://ftp.ussg.iu.edu/CRAN/')
install.packages('svglite', repos='https://ftp.ussg.iu.edu/CRAN/')
install.packages('network', repos='https://ftp.ussg.iu.edu/CRAN/')
install.packages('sna', repos='https://ftp.ussg.iu.edu/CRAN/')
install.packages('textshape', repos='https://ftp.ussg.iu.edu/CRAN/')
|
e96a0d771986f8f54c86ed9b925d67e8012ceaee
|
cfc6a45c07d8f73165930dbaf10ceb2578aa9f8e
|
/Other tools/Weyerhauser_huc12_temp_data_pull.R
|
240f5c8d18cd53637f8a3755d7297b43ce688f0c
|
[] |
no_license
|
TravisPritchardODEQ/IR2018
|
b9deae2c794ecb7b53f3fc64e5293ab1fe750043
|
94baee642789cce9f8e49771d28ff45e61ca310f
|
refs/heads/master
| 2021-06-21T22:21:06.892038
| 2020-12-10T16:07:05
| 2020-12-10T16:07:05
| 137,256,651
| 2
| 0
| null | 2020-03-24T16:13:43
| 2018-06-13T18:41:05
|
R
|
UTF-8
|
R
| false
| false
| 1,899
|
r
|
Weyerhauser_huc12_temp_data_pull.R
|
# This code generates temperature assessment results and temperature data
# in response to a request from Weyerhaeuser
# The provided a list of HUC12 and wanted watershed units from those HUCs
# Saving this script due to the function to extract HUC12 from WS AUs
# and combining the assessed data with station information
library(tidyverse)
library(openxlsx)
library(AWQMSdata)
library(stringi)
load("E:/Documents/2018-2020_IR_Database/data/assessment_display.Rdata")
WH_request <- read.xlsx("//deqhq1/WQASSESSMENT/2018IRFiles/2018_WQAssessment/Information_Requests/Weyerhaeuser/WY HUC12 Watersheds List_DEQ Request .xlsx") %>%
mutate(HUC12.ID = as.character(HUC12.ID))
temp_assessments <- joined_BU_summary %>%
filter(Char_Name == 'Temperature') %>%
mutate(huc12 = sapply(strsplit(as.character(AU_ID), "_"), function(x) x[[3]][1])) %>%
filter(huc12 %in% WH_request$HUC12.ID)
temp_assessments <- temp_assessments[,c(1:2,11,3:10)]
write.xlsx(temp_assessments, '//deqhq1/WQASSESSMENT/2018IRFiles/2018_WQAssessment/Information_Requests/Weyerhaeuser/WH_temp_assessments.xlsx')
temperature_data_final <- read.csv("//deqhq1/WQASSESSMENT/2018IRFiles/2018_WQAssessment/Draft List/Temperature/Data_Review/Temperature_IR_data_ALLDATA - final.csv",
stringsAsFactors = FALSE)
WH_temperature_data <- temperature_data_final %>%
filter(AU_ID %in% temp_assessments$AU_ID) %>%
arrange(AU_ID)
wH_stations <- query_stations(mlocs = WH_temperature_data$MLocID) %>%
select(MLocID, StationDes, Lat_DD, Long_DD, Datum)
WH_temperature_data_stations <- WH_temperature_data %>%
left_join(wH_stations, by = "MLocID")
WH_temperature_data_stations <- WH_temperature_data_stations[,c(1:2, 51:54, 3:49)]
write.xlsx(WH_temperature_data_stations, '//deqhq1/WQASSESSMENT/2018IRFiles/2018_WQAssessment/Information_Requests/Weyerhaeuser/WH_temp_data.xlsx')
|
12385e0477ebaa8042bf3014d057bc4f5807ae12
|
5f2da1bef657a78b702a1a963ecf293388b6bb2a
|
/man/eval.cart.Rd
|
67e858163bafc1df15b136db93fe84c0caf4eaf5
|
[] |
no_license
|
cran/delt
|
d311a4453dbe8e232cc31d9c824fdc97bd669a11
|
351c15327bf12fa06add21b4a2ec6b57d39fada6
|
refs/heads/master
| 2021-01-20T12:06:20.790793
| 2015-06-02T00:00:00
| 2015-06-02T00:00:00
| 17,718,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,361
|
rd
|
eval.cart.Rd
|
\name{eval.cart}
\alias{eval.cart}
\title{ Calculates a CART histogram }
\description{
Calculates a CART histogram.
The estimate is represented as an evaluation tree.
An CART histogram is a multivariate adaptive histogram
which is obtained by pruning an evaluation tree of an overfitting
histogram.
}
\usage{
eval.cart(dendat, leaf, minobs = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dendat}{ n*d data matrix }
\item{leaf}{ positive integer; the cardinality of the partition of the
histogram }
\item{minobs}{ non-negative integer; splitting of a bin of the overfitting
histogram will be continued if
the bin containes "minobs" or more observations }
}
\details{
The partition of the histogram may not contain exactly "leaf"
rectangles: the cardinality of the partition is as close as possible
to "leaf"
}
\value{
An evaluation tree
}
%\references{ ~put references to the literature/web site here ~ }
\author{ Jussi Klemela }
%\note{ ~~further notes~~ }
\seealso{
\code{\link{lstseq.cart}},
\code{\link{densplit}}
}
\examples{
library(denpro)
dendat<-sim.data(n=600,seed=5,type="mulmodII")
eva<-eval.cart(dendat,16)
dp<-draw.pcf(eva,pnum=c(60,60))
persp(dp$x,dp$y,dp$z,theta=-20,phi=30)
}
\keyword{ smooth }% at least one, from doc/KEYWORDS
\keyword{ multivariate }% __ONLY ONE__ keyword per line
|
d566219d6d47b5506557dda8b01ddee882253f4f
|
6053b45fed30ced7465aa0589c33749781571e9e
|
/bugs/nimble-normal-linear.R
|
58a655afacaf33863240482415ddd350ef37d132
|
[] |
no_license
|
Freshwater-Fish-Ecology-Laboratory/model-templates
|
d8627a84f62c8a3ce6198c2d5f8f7f8a6021a16e
|
563eb5b45b898455a262d8074d1f67bcb0e63d50
|
refs/heads/main
| 2023-08-27T19:34:16.876560
| 2021-11-06T00:16:25
| 2021-11-06T00:16:25
| 406,216,920
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,819
|
r
|
nimble-normal-linear.R
|
### Simulate river temperature in relation to discharge
source("header.R")
discharge <- runif(100, 0, 50)
bDischarge <- -0.2
bIntercept <- 25
bSigma <- 2
x <- bIntercept + bDischarge*discharge
temperature <- rnorm(100, mean = x, sd = bSigma)
data <- data.frame(Discharge = discharge,
Temperature = temperature)
ggplot(data = data, aes(x = Discharge, y = Temperature)) +
geom_point()
code <- nimble::nimbleCode({
bIntercept ~ dnorm(0, 1000)
bDischarge ~ dnorm(0, 1000)
bSigma ~ dunif(0, 100)
for(i in 1:nObs) {
eTemperature[i] <- bIntercept + bDischarge * Discharge[i]
Temperature[i] ~ dnorm(eTemperature[i], bSigma)
}
})
cmodel <- nimbleModel(code,
constants = list(nObs = 100),
# inits = list(bIntercept = 25,
# bDischarge = -0.2,
# bSigma = 0.5),
data = list(Discharge = runif(100, 0, 50),
Temperature = data$Temperature)) %>%
compileNimble()
nodes <- cmodel$getDependencies(c("bIntercept", "bDischarge", "bSigma"),
self = FALSE, downstream = TRUE)
cmodel$simulate(nodes)
### plot simulated temperatures
data <- data.frame(Discharge = cmodel$Discharge,
Temperature = cmodel$Temperature,
eTemperature = cmodel$eTemperature)
ggplot(data = data, aes(x = Discharge, y = Temperature)) +
geom_point()
cmodel$setData(list(Temperature = cmodel$Temperature,
Discharge = cmodel$Discharge))
sim_mcmc <- buildMCMC(cmodel) %>% compileNimble(project = cmodel)
samples <- runMCMC(sim_mcmc, niter = 50000, nburnin = 5000)
plot(samples[ , 'bIntercept'], type = 'l', xlab = 'iteration', ylab = "bIntercept")
|
d97aadefde0e0ed48181e4cedbb9e272e4d75b2c
|
2567e5f2f0400ed30e8a3aa2f777968e080a3ee6
|
/Kaggle-SantanderCustomerSatisfaction/kaggle_randn_sample.R
|
5062e620ce5faa05a9a7a0cbba6837b923d4c0a3
|
[] |
no_license
|
xlhtc007/Practice
|
90b9261ce59451e6a3f823ed6bd6645ccd6fb2c7
|
32b73c3c8b219d58317187178626c6176ebee026
|
refs/heads/master
| 2020-04-07T11:50:03.873855
| 2018-08-14T14:43:33
| 2018-08-14T14:43:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,150
|
r
|
kaggle_randn_sample.R
|
train_feature <- createDataPartition(y = df_train_x$TARGET, p = 0.7,
list = FALSE,
times = 1)
df.train <- df_train_x[train_feature,-1]
df.test <- df_train_x[-train_feature,-1]
df.test%>%count(TARGET)%>%mutate(pct =n/sum(n)) # 0.03876173
df.train%>%count(TARGET)%>%mutate(pct =n/sum(n)) # 0.03991431
X_test_predictions <- c()
for(i in 1:90) {
# i <- 1
start_time <- proc.time()
set.seed(234 * i)
rate <- 0.8
feature_set_index <- createDataPartition(1:(ncol(df.train)-1), p = rate,
list = FALSE,
times = 1)
feature_set_index <- feature_set_index[,1]
X_sp_ <- df.train[, c(feature_set_index,307)]
X_test_sp_ <- df.test[, c(feature_set_index,307)]
X_all_feature_ <- names(X_sp_)
dx_xgb <- sparse.model.matrix(TARGET ~ ., data = X_sp_)
dx_xgb_test <- sparse.model.matrix(TARGET ~ ., data = X_test_sp_)
dX_xgb_ <- xgb.DMatrix(dx_xgb, label = X_sp_$TARGET)
param <- list( objective = "binary:logistic",
booster = "gbtree",
eval_metric = "auc",
eta = 0.02,
max_depth = 5,
subsample = 0.7,
colsample_bytree = 0.7, # auc0.842956
min_child_weight = 1,
nthread = 24)
# Run Cross Valication
cv.Folds = 5
cv.nround = 1200
bst.cv = xgb.cv(param = param,
data = dX_xgb_,
# label = X_sp_$TARGET, # if matrix,不用设置;
nfold = cv.Folds,
nrounds = cv.nround,
verbose = 1,
early.stop.round = 20,
maximize = T)
if(max(bst.cv$test.auc.mean) > 0.8411){
print(paste0("the best cv auc:", max(bst.cv$test.auc.mean)))
dX_xgb_test_ <- xgb.DMatrix(dx_xgb_test, label = X_test_sp_$TARGET)
watchlist <- list(train=dX_xgb_, eval = dX_xgb_test_)
# train xgboost
xgb.rand <- xgb.train(params= param,
data = dX_xgb_,
nrounds = which.max(bst.cv$test.auc.mean),
verbose = 1,
watchlist = watchlist,
maximize = T)
# **************************************
# calculate XGBoost importance
# **************************************
# X_all_imp <- xgb.importance(X_all_feature$feature_name, model=xgb)
# saveRDS(X_all_imp_, paste0("cache/",folder,"test/X_all_imp",i,".RData"))
# predict values in test set
y_pred <- predict(xgb.rand, dX_xgb_test_)
X_test_predictions <- as.data.frame(cbind(
df_train_x[-train_feature, 1],
y_pred ))
saveRDS(X_test_predictions, paste0("cache/X_test_predictions_",i,".RData"))
saveRDS(X_all_feature_, paste0("cache/X_test_features_",i,".RData"))
}
}
elapse_time <- proc.time() - start_time
print(paste0("time pass", elapse_time))
|
b3ff06552e12cf2095bbb913f05f06b90f126dd6
|
7c39da976f28af016e5b1f847e68473c659ea05d
|
/man/fuseCDS_Rlist.Rd
|
5b08623dc73c8ff124316d76286dfcb54b513a54
|
[] |
no_license
|
cancer-genomics/trellis
|
b389d5e03959f8c6a4ee7f187f7749048e586e03
|
5d90b1c903c09386e239c01c10c0613bbd89bc5f
|
refs/heads/master
| 2023-02-24T05:59:44.877181
| 2023-01-09T20:38:36
| 2023-01-09T20:38:36
| 59,804,763
| 3
| 1
| null | 2023-01-11T05:22:52
| 2016-05-27T04:45:14
|
R
|
UTF-8
|
R
| false
| true
| 775
|
rd
|
fuseCDS_Rlist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fusion-utils.R
\name{fuseCDS_Rlist}
\alias{fuseCDS_Rlist}
\title{Extract the CDS involved in each rearrangement of a RearrangementList object}
\usage{
fuseCDS_Rlist(rlist, jxns)
}
\arguments{
\item{rlist}{a \code{RearrangementList}}
\item{jxns}{a \code{GRanges} specifying the 5-prime and 3-prime genomic regions that form a new sequence junction}
}
\value{
a \code{List} of the CDS
}
\description{
Extract for each rearrangement the full CDS of the fused sequence in the somatic genome (fusions), the partial CDS of the 5-prime (tum.5p) and 3-prime (tum.3p) transcripts that are involved in the fusion, and the full CDS of the 5-prime (ref.5p) and 3-prime transcripts in the reference genome.
}
|
ea097e1005fc16f7bed2eb4ca3664bc75facbad0
|
455e0abb21d3fc00025a6c082c6555815ad137bf
|
/lue-dependencies_cross-validation.R
|
582864e83261abd0ab69f7285f744bc925381b66
|
[] |
no_license
|
kjbloom/lue-controls-publication
|
c8dd343030b7af23223e7a5d5f60c398fbcb5ef0
|
4b15d1758cd000a6368c37f083a7a8b868647f41
|
refs/heads/master
| 2023-04-11T21:57:59.292740
| 2022-11-01T09:53:26
| 2022-11-01T09:53:26
| 560,121,259
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,317
|
r
|
lue-dependencies_cross-validation.R
|
###########################################################
### Cross-validation of the final empirical model ###
### Supplementary figure 6, Bloomfield et al. 2020, GCB ###
###########################################################
## load the necessary packages:
library(lme4)
library(reshape2)
# Read in the cleaned-up data-set used in the main analysis - confined to growing season and time-averaged:
source("load_GPP-dependencies.R")
# remove some items not needed here
rm(ddf, ddf_2, ddf_grow, mass_C)
## to evaluate the predicted values against the observations we employ Beni Stocker's function:
source("functions/analyse_modobs.R")
## We have missing values in the core file - especially related to flux site measures of soil moisture; so a first step is to prune the dataframe to the variables required for the final model.
summary(ddf_3)
ben_cc <- ddf_3 %>%
# we retain only the variables of interest:
select(sitename, year, tDay.ma, vpdDay.ma, splash.ma, Cloud_index, lue3_obs) %>%
# omit rows with missing values:
na.omit() %>%
# drop redundant factor levels (e.g. Sites)
droplevels()
#I'm going to replace zero soilm estimates with a nominal value to aid later computation (e.g. log transformation)
ben_cc$splash.ma <- with(ben_cc, ifelse(splash.ma < 0.001, 0.001, splash.ma))
# We have a final agreed model structure (M_fin, main analysis). The cross-validation exercise, however, in which we sequentially drop one site from the data used to train the model, is unable to generate site-level random effects and so here we adopt a simpler GLM that excludes any mixed effects:
form <- formula(lue3_obs ~ poly(tDay.ma, 2) + log(vpdDay.ma) + log(splash.ma) + Cloud_index)
## Now the idea is to sequentially drop one site from the dataset. The remaining df becomes 'training' and the single excluded site is 'evaluation'.
# We run our model for the training df and then apply those coefficients to create a predicted LUE for the single site under evaluation.
# We repeat those steps, generating predictions for each evaluation site in turn. And combine all the evaluation predictions into a single dataframe.
all_sites <- unique(ben_cc$sitename)
oob_preds <- list()
for (i in seq_along(all_sites)) {
eval_site <- all_sites[i]
train_df <- ben_cc %>%
subset(sitename != eval_site)
eval_df <- ben_cc %>%
subset(sitename == eval_site)
train_mod <- glm(form,
family = Gamma(link = "log"),
data = train_df)
oob_preds[[i]] <- predict(train_mod, newdata = eval_df, re.form = NULL, type = "response")
}
## We use another Hadley Wickham function to convert this list into a dataframe:
pred_lue <- melt(oob_preds)
ben_cc <- ben_cc %>%
mutate(pred_cv = pred_lue$value)
# Now for the figure:
with(ben_cc, analyse_modobs(pred_cv, lue3_obs, heat = T,
plot.title = "Statistical model cross-validation (no random term)",
xlab = "Predicted LUE (glm)",
ylab = expression("Measured LUE"~ (molC~mol^{-1}~photons)),
xlim = c(0, 0.10)))
# clean-out
rm(oob_preds, pred_lue, eval_df, train_df, train_mod, eval_site, form, i)
### END ###
|
13cc14141e1238b982804929ef98d63ba3866c99
|
6e33bd44e1245ba2a6e57077047865d04fabf6bf
|
/man/estimate_bias.Rd
|
48afb458e732b1f88db1c741f98cf5471ffacb20
|
[
"MIT"
] |
permissive
|
mikemc/metacal
|
6914b0c599af4e45a19637381dfd06cfb9e49fb2
|
f56792d02bd722ab16c1ed215b07c1187829a226
|
refs/heads/main
| 2022-02-16T05:06:09.983748
| 2022-02-15T00:59:59
| 2022-02-15T00:59:59
| 192,036,279
| 16
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,739
|
rd
|
estimate_bias.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimate.R
\name{estimate_bias}
\alias{estimate_bias}
\alias{estimate_bias.matrix}
\alias{estimate_bias.otu_table}
\alias{estimate_bias.phyloseq}
\title{Estimate bias from control measurements}
\usage{
estimate_bias(observed, actual, ...)
\method{estimate_bias}{matrix}(observed, actual, margin, boot = FALSE, times = 1000)
\method{estimate_bias}{otu_table}(observed, actual, ...)
\method{estimate_bias}{phyloseq}(observed, actual, ...)
}
\arguments{
\item{observed}{Abundance matrix of observed compositions.}
\item{actual}{Abundance matrix of actual or reference compositions for the
same samples and taxa in \code{observed}.}
\item{...}{Arguments passed to the matrix method.}
\item{margin}{Matrix margin that corresponds to observations (samples);
\code{1} for rows, \code{2} for columns.}
\item{boot}{Whether to perform bootstrapping.}
\item{times}{Number of bootstrap replicates.}
}
\value{
A \code{mc_bias_fit} object with \code{\link[=coef]{coef()}}, \code{\link[=fitted]{fitted()}}, \code{\link[=residuals]{residuals()}}, and
\code{\link[=summary]{summary()}} methods.
}
\description{
Estimate bias using the compositional least-squares approach described in
McLaren, Willis, and Callahan (2019).
}
\details{
Bias is estimated by applying \code{\link[=center]{center()}} to the compositional error matrix
defined by \code{observed/actual}, which requires that \code{observed} and \code{actual}
are non-zero for the same sample-taxa pairs. For convenience, this
function will automatically set values in \code{observed} to 0 whose
corresponding entries are 0 in \code{actual}, but it is up to you to replace 0
values in \code{observed} with a non-zero value (such as a pseudocount).
Requirements for \code{observed} and \code{actual}: The row and column names (for
matrices) or taxa and sample names (for phyloseq objects) must match, but
can be in different orders. Any taxa and samples in \code{observed} but not in
\code{actual} will be dropped prior to estimation.
}
\examples{
# Load data from the cellular mock communities of Brooks et al 2015
dr <- system.file("extdata", package = "metacal")
list.files(dr)
actual <- file.path(dr, "brooks2015-actual.csv") |>
read.csv(row.names = "Sample") |>
as("matrix")
observed <- file.path(dr, "brooks2015-observed.csv") |>
read.csv(row.names = "Sample") |>
subset(select = - Other) |>
as("matrix")
sam <- file.path(dr, "brooks2015-sample-data.csv") |> read.csv()
# Estimate bias with bootstrapping for error estimation
mc_fit <- estimate_bias(observed, actual, margin = 1, boot = TRUE)
summary(mc_fit)
}
\seealso{
\code{\link[=center]{center()}} \code{\link[=calibrate]{calibrate()}}
}
|
fa299f9e35a26642e55848126e244608a12b4756
|
649b5fcbe310f5a31e5deeae99d97c9782afe10b
|
/tests/testthat.R
|
b7a97db57f40faf46523fe177933a11f13bd1e7e
|
[] |
no_license
|
sepkamal/powers
|
faa8809de0aa10b3379a52c5704a96b1c0cc8be4
|
f5700e4f2af05b6ec84df07b9de1f0f1980c8f04
|
refs/heads/master
| 2021-08-22T04:23:29.056650
| 2017-11-29T07:42:25
| 2017-11-29T07:42:25
| 111,840,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
testthat.R
|
library(testthat)
library(powers)
library(dplyr)
test_check("powers")
|
14173fa522678412ca8343d9928040b09949741a
|
18ed091bdc4c2dac3728249710985d6b6da9c48f
|
/ui.R
|
d002228361aafa4cc4713276c7c7950615e5db34
|
[] |
no_license
|
smosqueda/shiny-pagespeed-graphing
|
de52c11cea8b32379ef39298dd9928efec02c228
|
6c96893ea56db935cb28d168f3c60966a1281efa
|
refs/heads/master
| 2020-06-24T17:06:34.893807
| 2016-11-24T02:55:54
| 2016-11-24T02:55:54
| 74,631,600
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 703
|
r
|
ui.R
|
library(shiny)
#source("helper.R")
#library(survival)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("PageSpeed Stats"),
fluidRow(
column(2,
wellPanel(
sliderInput("days", "Number of Days:",
min = 1, max = 30, value = 7, step = 1)
)
)
,
# column(6,
mainPanel(
tabsetPanel(type = "tabs", size="100%",
tabPanel("Plots",
plotOutput("ggPlotVersion"),
plotOutput("plotData", width = "900px", height="900px")
)
)
)
)
))
|
a501a7b087d01d4b4c0938582b648edf76368317
|
87bda86c8f157f8eb02bb6eac56621d20009625f
|
/R/kiss-rule.R
|
ef3d054a75fb202d13a4f4dc8ce31a4c3f33c966
|
[] |
no_license
|
jack-palmer/kissr
|
3da4935b3b4260703b2132ea85d01668b4e4936d
|
f57b22665e9fd597e45454122c50eb32556d1c36
|
refs/heads/master
| 2020-12-28T23:51:00.347963
| 2016-07-18T17:32:27
| 2016-07-18T17:32:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,473
|
r
|
kiss-rule.R
|
#' \code{KissRule.Event} objects are used to define target segments to run a report on.
#' @param eventId Which event are you targeting on? We need the index or id.
#' @param frequencyValue How many times the event needs to have happened for the
#' rule
#' @param frequencyOccurance How we are comparing against the
#' \code{frequencyValue}. Must be \code{at_least}, \code{at_most}, or
#' \code{exactly}
#' @param interval What time frame are we looking at the events over? Defaults
#' to NA which uses the overall report time frame
#' @param comparisonMode - unclear what this does. Defaults to 'any_value'
#' @param negate Is this an inclusive rule or an exclusionary rule?
#'
#' @examples
#' firstTimeVisitedCalculation <- KissCalculation.Event(
#' label = "First time of visited site",
#' eventId = 6,
#' type = "first_date_in_range")
#' lastTimeVisitedCalculation <- KissCalculation.Event(
#' label = "Last time of visited site",
#' eventId = 6,
#' type = "last_date_in_range")
#' reportDates <- lubridate::interval(as.Date("2015-06-01"), as.Date("2015-06-02"))
#' rules <- list(KissRule.Event(FALSE, 72, 1, "at_least", "any_value"))
#' segment <- KissSegment(type = "and",
#' rules = rules,
#' defaultInterval = reportDates)
#' report <- KissReport(productId = "6581c29e-ab13-1030-97f2-22000a91b1a1",
#' segment = segment,
#' calculations = list(
#' firstTimeVisitedCalculation,
#' lastTimeVisitedCalculation
#' ),
#' interval = reportDates
#' )
#' reportResults <- read(report)
#' @export
KissRule.Event <- function(negate, eventId, frequencyValue, frequencyOccurance,
interval = NA, comparisonMode = 'any_value') {
if (!lubridate::is.interval(interval))
stop("interval must be a valid interval")
structure(list(
type = "event",
negate = negate,
event = eventId,
frequencyValue = frequencyValue,
frequencyOccurance = frequencyOccurance,
comparisonMode = comparisonMode,
interval = interval),
class = c("KissRule.Event", "KissRule"))
}
#' \code{KissRule.Property} objects are used to define target segments to run a
#' report on.
#' @param propertyId Which property are you targeting? We need the index or id.
#' @param comparisonMode How are we comparing the property? Can be any of
#' \code{any_value}, \code{empty}, \code{equals}, \code{contains},
#' \code{begins_with}, \code{ends_with}
#' @param comparisonString What should we compare against? Only used if
#' \code{comparisonMode} is \code{equals}, \code{contains},
#' \code{begins_with}, or \code{ends_with}
#' @param interval What time frame are we looking at the properties over?
#' Defaults to NA which uses the overall report time frame
#' @param negate Is this an inclusive rule or an exclusionary rule?
#'
#' @examples
#' firstTimeVisitedCalculation <- KissCalculation.Event(
#' label = "First time of visited site",
#' eventId = 6,
#' type = "first_date_in_range")
#' lastTimeVisitedCalculation <- KissCalculation.Event(
#' label = "Last time of visited site",
#' eventId = 6,
#' type = "last_date_in_range")
#' reportDates <- lubridate::interval(as.Date("2015-06-01"), as.Date("2015-06-02"))
#' rules <- list(KissRule.Property(negate = FALSE, propertyId = 10, comparisonMode = "any_value", interval = reportDates),
#' KissRule.Property(negate = FALSE, propertyId = 2, comparisonMode = "contains", comparisonString = "copywriting", interval = reportDates))
#' segment <- KissSegment(type = "and",
#' rules = rules,
#' defaultInterval = reportDates)
#' report <- KissReport(productId = "6581c29e-ab13-1030-97f2-22000a91b1a1",
#' segment = segment,
#' calculations = list(
#' firstTimeVisitedCalculation,
#' lastTimeVisitedCalculation
#' ),
#' interval = reportDates
#' )
#' reportResults <- read(report)
#' @export
KissRule.Property <- function(negate, propertyId, comparisonMode, comparisonString = NA, interval = NA) {
property <- list(
type = "property",
negate = negate,
property = propertyId,
comparisonMode = comparisonMode,
interval = interval)
if(comparisonMode %in% c("equals", "contains", "begins_with", "ends_with")) {
if(is.na(comparisonString)) stop("You must provide a comparison string for KissRule.Property comparison modes of 'equals', 'contains', 'begins_with', 'ends_with'")
property["comparisonString"] <- comparisonString
}
structure(property,
class = c("KissRule.Property", "KissRule"))
}
#' Generates json for a KissRule.
#' @export
asJson.KissRule.Event <- function(rule) {
template <- '
{
"type":"{{type}}",
"negate": {{negate}},
"event":{{event}},
"frequencyValue":{{frequencyValue}},
"frequencyOccurance":"{{frequencyOccurance}}",
"comparisonMode":"{{comparisonMode}}",
"dateRange":{{dateRange}}
}
'
if (!lubridate::is.interval(rule$interval))
stop("rule must have a valid interval")
json <- template
json <- replacePlaceholder(json, "\\{\\{type\\}\\}", rule$type)
json <- replacePlaceholder(json, "\\{\\{negate\\}\\}", tolower(rule$negate))
json <- replacePlaceholder(json, "\\{\\{event\\}\\}",rule$event)
json <- replacePlaceholder(json, "\\{\\{frequencyValue\\}\\}", rule$frequencyValue)
json <- replacePlaceholder(json, "\\{\\{frequencyOccurance\\}\\}", rule$frequencyOccurance)
json <- replacePlaceholder(json, "\\{\\{comparisonMode\\}\\}", rule$comparisonMode)
json <- replacePlaceholder(json, "\\{\\{dateRange\\}\\}",
jsonlite::toJSON(makeKMDateRange(rule$interval),
auto_unbox = TRUE))
json
}
asJson.KissRule.Property <- function(rule) {
template <- '
{
"type":"{{type}}",
"negate": {{negate}},
"property":{{property}},
"comparisonMode":"{{comparisonMode}}",
"dateRange":{{dateRange}}{{comparison}}
}
'
if (!lubridate::is.interval(rule$interval))
stop("rule must have a valid interval")
json <- template
json <- replacePlaceholder(json, "\\{\\{type\\}\\}", rule$type)
json <- replacePlaceholder(json, "\\{\\{negate\\}\\}", tolower(rule$negate))
json <- replacePlaceholder(json, "\\{\\{property\\}\\}",rule$property)
json <- replacePlaceholder(json, "\\{\\{comparisonMode\\}\\}", rule$comparisonMode)
if(rule$comparisonMode %in% c("equals", "contains", "begins_with", "ends_with")) {
json <- replacePlaceholder(json,"\\{\\{comparison\\}\\}",
paste0(',\n',
'\"comparisonString\": \"',
rule$comparisonString,
'\"'))
} else {
json <- replacePlaceholder(json,"\\{\\{comparison\\}\\}","")
}
json <- replacePlaceholder(json, "\\{\\{dateRange\\}\\}",
jsonlite::toJSON(makeKMDateRange(rule$interval),
auto_unbox = TRUE))
json
}
|
9c19d7edb31aeffdbbb255143fc422cd3cdfae2e
|
c4bd48cc8156e85212ca2d6ef522a8ea7b318aeb
|
/R/samp_tab.R
|
3acc7b897c98862a8644e791b9e83f57520a9e5b
|
[] |
no_license
|
ErlendNilsen/HFP
|
9af4007e8baf2bb5587ddf3c96fdfa93ea822592
|
8ceb7415fcb2d264edea0be17fb6037dba887042
|
refs/heads/master
| 2020-07-09T02:57:43.875307
| 2019-08-23T07:14:23
| 2019-08-23T07:14:23
| 203,856,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,593
|
r
|
samp_tab.R
|
#' Data table for sampling effort
#'
#' Generates a data table with an overview of the LineID's, stratification
#' level ('År' or 'Områdenavn') and the effort (distance taxated) for each LineID.
#' @param strat Level of stratification - 'No', 'OmradeNavn' or 'Year'
#' @keywords table sampling effort
#' @export
#' @examples
#' samp_tab(strat = LEVEL)
samp_tab <- function(strat){
Sample_tab <- switch(strat,
Year={
sample_tab <- matrix(ncol=3, nrow=0)
tempA <- sort(unique(d$Year))
for(i in unique(tempA)){
temp1 <- subset(d, Year==i)
tempB <- sort(unique(temp1$LinjeID))
for(j in unique(tempB)){
temp2 <- subset(temp1, LinjeID==j)
tempD <- as.data.frame(cbind(paste(i,j, sep="_"), i))
tempE <- as.numeric(temp2$LengdeTaksert[1])
tempF <- cbind(tempD, tempE)
sample_tab <- rbind(sample_tab, tempF)
}
}
},
OmradeNavn={
tempA <- sort(unique(d$LinjeID))
sample_tab <- as.data.frame(matrix(ncol=3, nrow=0))
for(i in unique(tempA)){
temp1 <- subset(d, LinjeID==i)
tempB <- temp1[c("LinjeID", "OmradeNavn", "LengdeTaksert")][1,]
sample_tab <- rbind(sample_tab, tempB)
}
},
No={
tempA <- sort(unique(d$LinjeID))
sample_tab <- as.data.frame(matrix(ncol=3, nrow=0))
for(i in unique(tempA)){
temp1 <- subset(d, LinjeID==i)
tempB <- temp1[c("LinjeID", "Year", "LengdeTaksert")][1,]
sample_tab <- rbind(sample_tab, tempB)
}
}
)
Sample_tab <- as.data.frame(sample_tab)
colnames(Sample_tab) <- c("Sample.Label", "Region.Label", "Effort")
Sample_tab <- transform(Sample_tab, Effort=Effort/1000)
Sample_tab
}
|
3734c6571fb395dfce61eb9ea394a99ece556935
|
f0feee021d3b59eaf0b223ea2bc57d4173bb25da
|
/man/eq_map.Rd
|
ef1961733c969db861055f0750af4824a43229bc
|
[] |
no_license
|
Liddlle/capstone
|
e8446896dc845d19c2c36936c9238b5a1e095596
|
a6be26260f4895a0741db5ff6eb2ab223f69b54e
|
refs/heads/master
| 2020-05-18T17:43:17.665636
| 2019-05-30T15:11:01
| 2019-05-30T15:11:01
| 184,564,063
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,258
|
rd
|
eq_map.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/leaflet.R
\name{eq_map}
\alias{eq_map}
\title{Mapping the earthquake epicenters
The function maps the epicenters (LATITUDE/LONGITUDE) and annotates each point
with in pop up window containing annotation data stored in a column of the data frame.
The user should be able to choose which column is used for the annotation in the
pop-up with a function argument named annot_col.}
\usage{
eq_map(df_clean, annot_col)
}
\arguments{
\item{df_clean, }{data containing the filtered data frame with earthquakes}
\item{annot_col}{column that should be used for the annotation in the pop-up}
}
\value{
This function returns a leaflet map with earthquake epicentres and annotations
within pop-up window
}
\description{
Mapping the earthquake epicenters
The function maps the epicenters (LATITUDE/LONGITUDE) and annotates each point
with in pop up window containing annotation data stored in a column of the data frame.
The user should be able to choose which column is used for the annotation in the
pop-up with a function argument named annot_col.
}
\examples{
\dontrun{
df \%>\%
dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(DATE) >= 2000) \%>\%
eq_map(annot_col = "DATE")
}
}
|
60acefdd2bfa3e4c8a8e930798bda6984a35e7e0
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Sauer-Reimer/ISCAS89/s05378_PR_2_5/s05378_PR_2_5.R
|
1d5ec04cfc6b650d15ede7ecb3739fb2cc158c3f
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 65
|
r
|
s05378_PR_2_5.R
|
c300bde402bcb07b3a1592139e17e864 s05378_PR_2_5.qdimacs 4996 14064
|
dc92352cefaac3b96f03a3259b875a7b317cadab
|
7c65bcebea70b5769503833ef4a8472e18caabdd
|
/plot1.R
|
9b38bdc929758ee0c4731579b11ca20474e3de73
|
[] |
no_license
|
brodo80/Exploratory-Data-Analysis-Project-1
|
edb76802501c9db11374e0e328b66c059127f4f8
|
0c0490486472532278559be367d6fec77ea919d0
|
refs/heads/master
| 2020-12-06T19:56:43.476978
| 2016-09-10T22:39:43
| 2016-09-10T22:39:43
| 67,897,549
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 864
|
r
|
plot1.R
|
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url,"//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads/data.zip")
unzip(zipfile="//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads/data.zip", exdir = "//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads")
data <- read.table("//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads/household_power_consumption.txt",header=FALSE, skip=66637, sep=";", nrows=2880)
names(data)<-c("Date","Time","Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2","Sub_metering_3")
head(data)
##########Plot 1
Plot1 <- function (){
hist(data$Global_active_power, main = "Global Active Power", col="red", xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
}
Plot1()
|
f6bcd52152044a6217b76b262215884ddaa64ffb
|
709d477a2fe8c61ebe2b0d5fe63084e0a59e4322
|
/R/style.R
|
f1dd53321d9b0926ad9c3cd6d17316c02abcdcde
|
[
"MIT"
] |
permissive
|
kshtzgupta1/ramlegacy
|
37caa01dfc8bd0eb9476a22aa6fef3d1f11beb34
|
4d56b90cf838e3915a590404059ca8f08d2e4ce1
|
refs/heads/master
| 2020-05-07T16:44:48.277304
| 2019-04-11T01:19:13
| 2019-04-11T01:19:13
| 147,247,622
| 1
| 2
|
NOASSERTION
| 2019-03-23T01:54:47
| 2018-09-03T19:55:57
|
R
|
UTF-8
|
R
| false
| false
| 302
|
r
|
style.R
|
completed <- function(msg) {
packageStartupMessage(crayon::green(cli::symbol$tick), " ", msg)
}
not_completed <- function(msg) {
packageStartupMessage(crayon::red(cli::symbol$circle_cross), " ", msg)
}
notify <- function(msg) {
packageStartupMessage(crayon::blue(cli::symbol$star), " ", msg)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.