blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M โ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 โ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 โ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
914d15bcc73e0f6229b0b2554af77d5a3f8c93ba | f8cd722f40a9f5c1b08809765bd785928cd60c6f | /man/adaptiveLassoEst.Rd | c35fcd683173b5cf12d78625067ea7726c90d348 | [
"MIT"
] | permissive | PhilBoileau/cvCovEst | bd62df4c7811995e756701091f3a5d4d4ef5f1d2 | 89d57c6326f3723dca9e8e878f1d09c18b85cb51 | refs/heads/master | 2023-06-25T03:40:50.232754 | 2023-06-22T23:44:31 | 2023-06-22T23:44:31 | 256,326,827 | 12 | 8 | NOASSERTION | 2023-01-12T20:40:06 | 2020-04-16T20:50:11 | R | UTF-8 | R | false | true | 1,304 | rd | adaptiveLassoEst.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimators.R
\name{adaptiveLassoEst}
\alias{adaptiveLassoEst}
\title{Adaptive LASSO Estimator}
\usage{
adaptiveLassoEst(dat, lambda, n)
}
\arguments{
\item{dat}{A numeric \code{data.frame}, \code{matrix}, or similar object.}
\item{lambda}{A non-negative \code{numeric} defining the amount of
thresholding applied to each element of \code{dat}'s sample covariance
matrix.}
\item{n}{A non-negative \code{numeric} defining the exponent of the adaptive
weight applied to each element of \code{dat}'s sample covariance matrix.}
}
\value{
A \code{matrix} corresponding to the estimate of the covariance
matrix.
}
\description{
\code{adaptiveLassoEst()} applied the adaptive LASSO to the
entries of the sample covariance matrix. The thresholding function is
inspired by the penalized regression introduced by
\insertCite{zou2006;textual}{cvCovEst}. The thresholding function assigns
a weight to each entry of the sample covariance matrix based on its
initial value. This weight then determines the relative size of the penalty
resulting in larger values being penalized less and reducing bias
\insertCite{rothman2009}{cvCovEst}.
}
\examples{
adaptiveLassoEst(dat = mtcars, lambda = 0.9, n = 0.9)
}
\references{
\insertAllCited{}
}
|
b431969ab049d5a9a6023b3c82f2c6892be22eb4 | 62d4a5ee6f68119bcd54fbf3799254acbb22beb9 | /Test1.R | 6c30bf4c54689cd9eb048d7ed851fbc0cf7bffda | [] | no_license | mssavoca/NewMacTest | 910e61ee58127e8ce50d38c214c6b8d9b88aa5b3 | 9ef5bad09929f672aa4c81866e3174307c4721d4 | refs/heads/master | 2020-04-01T12:10:40.985072 | 2018-10-15T23:48:28 | 2018-10-15T23:48:28 | 153,194,896 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27 | r | Test1.R | # Connecting get to RStudio |
87517647382186d44f6d5c348745e231a623f708 | 94aed35f1f7cca636419b88a53799f34e5c5dfee | /tests/testthat/test-makeGene2Symbol.R | fb56163da4d63fc1dafb290311337ab4c562f26c | [
"MIT"
] | permissive | trichelab/basejump | a4a3b9e58016449faeb9b3d77cf1c09d4eafe4c7 | 6724b10dbf42dd075c7db5854a13d9509fe9fb72 | refs/heads/master | 2020-12-12T11:54:17.660956 | 2020-01-08T13:24:07 | 2020-01-08T13:24:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 335 | r | test-makeGene2Symbol.R | context("makeGene2Symbol")
with_parameters_test_that(
"makeGene2SymbolFromEnsDb", {
object <- makeGene2SymbolFromEnsDb(
object = "EnsDb.Hsapiens.v75",
format = format
)
expect_s4_class(object, "Gene2Symbol")
},
format = eval(formals(makeGene2SymbolFromEnsDb)[["format"]])
)
|
c3ab8ac5345ff6f4a6117aa495ee784c2149a191 | 2099a2b0f63f250e09f7cd7350ca45d212e2d364 | /DUC-Dataset/Summary_p100_R/D109.FBIS4-26769.html.R | 58cc7ad66353470e8fe9e1b291c6140706ca1960 | [] | no_license | Angela7126/SLNSumEval | 3548301645264f9656b67dc807aec93b636778ef | b9e7157a735555861d2baf6c182e807e732a9dd6 | refs/heads/master | 2023-04-20T06:41:01.728968 | 2021-05-12T03:40:11 | 2021-05-12T03:40:11 | 366,429,744 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 849 | r | D109.FBIS4-26769.html.R | <html>
<head>
<meta name="TextLength" content="SENT_NUM:5, WORD_NUM:101">
</head>
<body bgcolor="white">
<a href="#0" id="0">Meanwhile, 740 million yuan in loans and another 50 million yuan in donations have also been sent to flooded areas in Guangdong Province.</a>
<a href="#1" id="1">As one of China's major bread baskets, Jiangxi is of key importance to China in agricultural production, Wen said.</a>
<a href="#2" id="2">Major flood monitoring stations on the two rivers recorded their highest water levels, all four meters above the warning level.</a>
<a href="#3" id="3">Meanwhile, he said, tension has been eased in the Pearl River delta as the latest tropical storm has been weakened after landing in Yangjiang city, Guangdong Province.</a>
<a href="#4" id="4">It has a direct bearing on the grain supply in south China.</a>
</body>
</html> |
bd39273d66138a2ebde9814c0db5d8952a8f2786 | 92626a21f23ab35e82cb439255e10cde2a7047c1 | /man/heat_transfer_coefficient_approximation.Rd | bf635cef601cde8ca0b46578483fc50774d19c4b | [
"MIT"
] | permissive | ArchiYujie/TrenchR | 04630ddd078eca187a517c0c98e59065b3054a74 | f45c2f0b54eab4ce578c0b3b631f9d93058ba731 | refs/heads/master | 2023-07-16T22:25:21.419072 | 2021-08-26T21:30:12 | 2021-08-26T21:30:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,980 | rd | heat_transfer_coefficient_approximation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energybalance_functions.R
\name{heat_transfer_coefficient_approximation}
\alias{heat_transfer_coefficient_approximation}
\title{Calculate heat transfer coefficient using a sphereical approximation (based on Mitchell 1976)}
\usage{
heat_transfer_coefficient_approximation(V, D, K, nu, taxa = "sphere")
}
\arguments{
\item{V}{Air velocity m/s.}
\item{D}{Characteristic dimension (e.g., diameter or snout-vent length) in meters.}
\item{K}{Thermal conductivity of air, W m^-1 K^-1, can calculate using DRYAIR or WETAIR in NicheMapR}
\item{nu}{Kinematic Viscocity of air, m^2 s^-1, can calculate using DRYAIR or WETAIR in NicheMapR}
\item{taxa}{Which class of organism, current choices: sphere,frog,lizard,flyinginsect,spider}
}
\value{
heat transfer coefficient, H_L (W m^-2 K^-1)
}
\description{
Calculate heat transfer coefficient using a sphereical approximation (based on Mitchell 1976)
}
\details{
This function allows you to estimate the heat transfer coefficient for various taxa. Approximates forced convective heat transfer for animal shapes using the convective relationship for a sphere. Reference: Mitchell. 1976. Heat transfer from spheres and other animal forms. Biophysical Journal 16(6): 561โ569. (Uses Table III: Convective Heat Transfer Relations for Animal Shapes.)
}
\examples{
\dontrun{
heat_transfer_coefficient_approximation(V=3,D=0.05,K= 25.7 * 10^(-3),nu= 15.3 * 10^(-6), "sphere")
}
}
\seealso{
Other biophysical models:
\code{\link{Free_or_forced_convection}()},
\code{\link{Grashof_number_Gates}()},
\code{\link{Grashof_number}()},
\code{\link{Nu_from_Gr}()},
\code{\link{Nu_from_Re}()},
\code{\link{Nusselt_number}()},
\code{\link{Prandtl_number}()},
\code{\link{Qconduction_animal}()},
\code{\link{Qconduction_substrate}()},
\code{\link{Qconvection}()},
\code{\link{Qemitted_thermal_radiation}()},
\code{\link{Qevaporation}()},
\code{\link{Qmetabolism_from_mass_temp}()},
\code{\link{Qmetabolism_from_mass}()},
\code{\link{Qnet_Gates}()},
\code{\link{Qradiation_absorbed}()},
\code{\link{Qthermal_radiation_absorbed}()},
\code{\link{Reynolds_number}()},
\code{\link{Tb_CampbellNorman}()},
\code{\link{Tb_Fei}()},
\code{\link{Tb_Gates2}()},
\code{\link{Tb_Gates}()},
\code{\link{Tb_butterfly}()},
\code{\link{Tb_grasshopper}()},
\code{\link{Tb_limpetBH}()},
\code{\link{Tb_limpet}()},
\code{\link{Tb_lizard}()},
\code{\link{Tb_mussel}()},
\code{\link{Tb_salamander_humid}()},
\code{\link{Tb_snail}()},
\code{\link{Tbed_mussel}()},
\code{\link{Tsoil}()},
\code{\link{actual_vapor_pressure}()},
\code{\link{boundary_layer_resistance}()},
\code{\link{external_resistance_to_water_vapor_transfer}()},
\code{\link{heat_transfer_coefficient_simple}()},
\code{\link{heat_transfer_coefficient}()},
\code{\link{saturation_vapor_pressure}()},
\code{\link{saturation_water_vapor_pressure}()}
}
\concept{biophysical models}
\keyword{coefficient}
\keyword{heat}
\keyword{transfer}
|
5fc719be1616b281f82ae54eb4f3406efb82d59e | c51ca43d4e5be4cce45281acd89f2bb3c0294ea2 | /regression-Dalgaard.R | 4d22e6f851b15544e4108ea1b039c342f1bb751f | [] | no_license | professorbeautiful/bioinf2118 | e9d6adf2c8b596a1fab239301e511999e0d9f401 | aa70f61333119372680fb66d34be350aa9947c39 | refs/heads/master | 2022-07-19T02:22:20.667851 | 2022-06-27T20:35:11 | 2022-06-27T20:35:11 | 116,890,225 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 556 | r | regression-Dalgaard.R | # regression-Dalgaard
while( ! require(ISwR) )
install.packages("ISwR")
data("thuesen")
help("thuesen")
dim(thuesen)
thuesen
##Notice one missing value.
attach(thuesen)
search()
ls(pos=2)
plot(short.velocity, blood.glucose)
summary(lm(blood.glucose ~ short.velocity, 1))
abline(lm(blood.glucose ~ short.velocity))
### Note the special method for abline when the arg is a "lm" object.
lm.out =lm(data=thuesen[-is.na(thuesen$short.velocity), ],
blood.glucose ~ short.velocity)
points(short.velocity, predict(lm.out), col="red", pch=2)
|
005822a54822a992edf445ee953fde59f072f93a | ceb9d8ee5c3cb352693d119d04c81fb1ee46426f | /data/processData.R | 1ba4897c24b91f37f37bbf181c7caf1575fc2abc | [] | no_license | jtipton25/mysis | 662495376f67a87422ad0f72120ddb3c9bc2fa4e | 464063b67cd27f4abef2991a2ccf1ea56de1a335 | refs/heads/master | 2023-05-01T00:27:35.645837 | 2023-04-21T16:40:32 | 2023-04-21T16:40:32 | 40,909,801 | 0 | 0 | null | 2016-01-12T21:36:31 | 2015-08-17T16:21:29 | HTML | UTF-8 | R | false | false | 6,908 | r | processData.R | ## File to process the CSV files to RData files
##
## Created 12_15_2015
## Edited 12_15_2015
##
##
## Count Data
##
data_one_way <- read.csv("~/mysis/data/ttestData.csv", skip = 3, nrows = 40, header = TRUE)
# data_one_way$full ## count per m^2 for full net
# data_one_way$half ## count per m^2 for half net
count <- c(data_one_way$full.5, ## count for full net
4 * data_one_way$half.5) ## count for half net adjusted for radius
date <- rep((data_one_way$month), 2)
date[date == 7] <- "July"
date[date == 8] <- "August"
date[date == 9] <- "September"
date <- factor(date, levels=c("July", "August", "September"))
net <- factor(c(rep("Large Net", 40), rep("Small Net", 40)), levels=c("Small Net", "Large Net"))
station <- factor(data_one_way$Station)
mysisCountData <- data.frame(count=count, date=date, net=net, station=station)
## Save RData file
save(mysisCountData, file="~/mysis/data/mysisCountData.RData")
## remove all data to avoid conflicts
rm(list=ls())
##
## Length Data
##
data_length <- read.csv("~/mysis/data/lengthData.csv", skip = 4, header = TRUE)
net <- data_length$Size..m.
net[net== 0.5] <- "Small Net"
net[net== 1.0] <- "Large Net"
net <- factor(net, levels=c("Small Net", "Large Net"))
y <- data_length$Length..mm.
## remove whitespace from string, then truncate into
## broad gender classes, not subclasses
library(stringr)
gender <- as.factor(substr(str_trim(as.character(data_length$Gender)), 0, 1))
date <- as.numeric(data_length$Date)
date[date == 2] <- 1 ## collapse into monthly values
date[date == 3] <- 2
date[date == 4] <- 3
date <- as.factor(date)
plot(as.numeric(date), type='l', main = "Is this a data error? - yes?")
## correct date mislabeling in the raw data
date <- as.numeric(data_length$Date)
date[7330:7519] <- 4
date[7559:7773] <- 4
date[date == 2] <- 1 ## collapse into monthly values
date[date == 3] <- 2
date[date == 4] <- 3
# date <- as.factor(date)
plot(as.numeric(date), type='l', main = "Data looks better")
date[date == 1] <- "July"
date[date == 2] <- "August"
date[date == 3] <- "September"
date <- factor(date, levels=c("July", "August", "September"))
station <- factor(data_length$Station)
label <- factor(data_length$Sample.Label)
mysisLengthData <- data.frame(y=y, date=date, net=net, gender=gender,
station=station, label=label)
## Save RData file
save(mysisLengthData, file='~/mysis/data/mysisLengthData.RData')
## remove data to elimate conflicts
rm(list=ls())
##
## Juvenile ratio data
##
fullData <- read.csv("~/mysis/data/lengthData.csv", skip=4)
males <- rep(0, 80)
females <- rep(0, 80)
juveniles <- rep(0, 80)
unknowns <- rep(0, 80)
size <- rep(0, 80)
date_tmp <- rep(0, 80)
station <- rep(0, 80)
idx <- 1
for(i in unique(fullData$Sample.Label)){
tmp_data = subset(fullData, fullData$Sample.Label == i)
tmp = substr(as.character(tmp_data$Gender), 0, 1)
males[idx] <- sum(tmp == "M")
juveniles[idx] <- sum(tmp == "J")
females[idx] <- sum(tmp == "F")
unknowns[idx] <- sum(tmp == "U")
size[idx] <- tmp_data$Size..m.[1]
date_tmp[idx] <- tmp_data$Date[1]
station[idx] <- tmp_data$Station[1]
idx <- idx + 1
}
prop <- juveniles / (juveniles + females + males + unknowns)
## Correct for mislabeling
date_tmp[70] <- 4
date_tmp[72] <- 4
date <- rep(0, 80)
date[date_tmp == 1] <- "July"
date[date_tmp == 2] <- "July"
date[date_tmp == 3] <- "August"
date[date_tmp == 4] <- "September"
date <- factor(date, levels=c("July", "August", "September"))
size[size == 0.5] <- "Small Net"
size[size == 1.0] <- "Large Net"
net <- factor(size, levels=c("Small Net", "Large Net"))
station <- factor(station)
mysisJuvenileData <- data.frame(prop=prop, date=date, net=net, station=station)
## Save RData file
save(mysisJuvenileData, file='~/mysis/data/mysisJuvenileData.RData')
## remove data to elimate conflicts
rm(list=ls())
##
## Sex ratio data
##
fullData <- read.csv("~/mysis/data/lengthData.csv", skip=4)
males <- rep(0, 80)
females <- rep(0, 80)
unknowns <- rep(0, 80)
size <- rep(0, 80)
date_tmp <- rep(0, 80)
station <- rep(0, 80)
idx <- 1
for(i in unique(fullData$Sample.Label)){
tmp_data = subset(fullData, fullData$Sample.Label == i)
tmp = substr(as.character(tmp_data$Gender), 0, 1)
males[idx] <- sum(tmp == "M")
females[idx] <- sum(tmp == "F")
unknowns[idx] <- sum(tmp == "U")
size[idx] <- tmp_data$Size..m.[1]
date_tmp[idx] <- tmp_data$Date[1]
station[idx] <- tmp_data$Station[1]
idx <- idx + 1
}
prop <- females / (females + males)
prop_corrected <- (females + 0.5 * unknowns) /
(females + males + unknowns)
prop[prop == 0] <- 0.01
prop_corrected[prop_corrected == 0] <- 0.01
## Correct for mislabeling
date_tmp[70] <- 4
date_tmp[72] <- 4
date <- rep(0, 80)
date[date_tmp == 1] <- "July"
date[date_tmp == 2] <- "July"
date[date_tmp == 3] <- "August"
date[date_tmp == 4] <- "September"
date <- factor(date, levels=c("July", "August", "September"))
size[size == 0.5] <- "Small Net"
size[size == 1.0] <- "Large Net"
net <- factor(size, levels=c("Small Net", "Large Net"))
station <- factor(station)
mysisSexData <- data.frame(prop=prop, date=date, net=net, station=station)
## Save RData file
save(mysisSexData, file='~/mysis/data/mysisSexData.RData')
## remove data to elimate conflicts
rm(list=ls())
##
## Sex count data
##
fullData <- read.csv("~/mysis/data/lengthData.csv", skip=4)
males <- rep(0, 80)
females <- rep(0, 80)
juveniles <- rep(0, 80)
unknowns <- rep(0, 80)
size <- rep(0, 80)
date_tmp <- rep(0, 80)
station <- rep(0, 80)
idx <- 1
for(i in unique(fullData$Sample.Label)){
tmp_data = subset(fullData, fullData$Sample.Label == i)
tmp = substr(as.character(tmp_data$Gender), 0, 1)
males[idx] <- sum(tmp == "M")
juveniles[idx] <- sum(tmp == "J")
females[idx] <- sum(tmp == "F")
unknowns[idx] <- sum(tmp == "U")
size[idx] <- tmp_data$Size..m.[1]
date_tmp[idx] <- tmp_data$Date[1]
station[idx] <- tmp_data$Station[1]
idx <- idx + 1
}
## construct count vector
count <- c(males, females, juveniles, unknowns)
gender <- factor(rep(c("M", "F", "J", "U"), each=80))
## Correct for mislabeling
date_tmp[70] <- 4
date_tmp[72] <- 4
date <- rep(0, 80)
date[date_tmp == 1] <- "July"
date[date_tmp == 2] <- "July"
date[date_tmp == 3] <- "August"
date[date_tmp == 4] <- "September"
date <- factor(date, levels=c("July", "August", "September"))
size[size == 0.5] <- "Small Net"
size[size == 1.0] <- "Large Net"
net <- factor(size, levels=c("Small Net", "Large Net"))
station <- factor(station)
count[rep(net, times=4) == 0.5] <- count[rep(net, times=4) == 0.5] * 4
mysisSexCountData <- data.frame(count=count, date=rep(date, times=4),
net=rep(net, times=4), gender=gender,
station=rep(station, times=4))
## Save RData file
save(mysisSexCountData, file='~/mysis/data/mysisSexCountData.RData')
## remove data to elimate conflicts
rm(list=ls())
|
222ea9d2021d4aa6a525bfa650d1ffa43b55673b | ce0a89ee1244e995c160d3202ebedb3ff860c3f3 | /Archive/test_Records.R | 867870ddec783c830c3c5d1e62df9a167f2b4403 | [] | no_license | Iceylee/NGS-Pacbio | 1dc8d9f7e6327414dffe8500921fa2cf1e56fe72 | 4d4f6052eff9ae13b170e8f8431e79ed5b39d6ca | refs/heads/master | 2020-04-05T06:32:17.031168 | 2019-09-04T03:34:24 | 2019-09-04T03:34:24 | 156,641,487 | 9 | 5 | null | null | null | null | UTF-8 | R | false | false | 3,112 | r | test_Records.R | Rscript /data1/script/deseq2+GO+KEGG/Rpipe/3GO-KEGG.R AH57973 hsa SYMBOL ENTREZID /data2/ClientData/2018_08/Lishuang/
##1 2.1
setwd("/Users/Icey/work/2018/plot/0823็ญๅพ-่ฐขๅๅนณ/")
coldata_file="colData.csv"
count_table="CountMatrix4DESeq.csv"
path1="./"
path3="./"
path2="./"
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/run.R CountMatrix4DESeq_SvsF.csv colData_SvsF.csv LZY_Con /data2/ClientData/2018_08/XieHuaPing/ TRUE
##3GO-KEGG.Ra
dbname <- 'AH57973'
kegg_org <- "hsa"
GO_KEY <- "SYMBOL"
KEGG_NEED_KEY <- "ENTREZID"
output_path <- "/data2/ClientData/2018_08/XuLeLe/"
##pathview.R
#่ทๅๅ ้คpathwayๆไปถๅคนใไธ็ถๆๅฏ่ฝๆฅ้:
# Error in readPNG(paste(kegg.dir, "/", pathway.name, ".png", sep = "")) :
# libpng error: Read Error
#ๆจกๅผ็็ฉ๏ผsig files ไธญๆlog2FoldChangeๅ
#้ๆจกๅผ็็ฉ๏ผ็ฌฌๅๅไธบlog2FoldChange็ๆฐๅผใๆๅจๅ ๅๅ
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/pathview.R ./nonModel/3.DiffExprGene/ ./nonModel/4.GO_KEGG_Enrichment/ ENSEMBL FALSE
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/pathview.R ./Model/3.DiffExprGene/ ./Model/4.GO_KEGG_Enrichment/0.05/ ENSEMBL TRUE
#4.GetGene2KO.R
##ๅพๅฐ็ฉ็ง็gene symbolๅฏนๅบKO็ผๅทใ๏ผๅคงK๏ผ
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/4GetGene2KO.R AH57973 hsa ./
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/4GetGene2KO.R AH59553 vda ./
##length_hist.R
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/length_hist.R 1.Trinity/CD-HIT/Trinity_CD-HIT_0.9.seqLengths 3000
#5.gfold R ๅนถๅ
ฅ
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/run.R R_input/CountMatrix4DESeq.csv R_input/colData.csv SH1 /data2/ClientData/2018_08/XuLeLe/Gfold_test/ FALSE
#5DAG.R
#goAnnoFile sigPath outputPath MODEL
#ไผๅจoutputPathไธๆฐๅปบไธไธชDAG_plotsๆไปถๅคน๏ผ้้ขๆฏๅพ็็ปๆ
#XuLeLe ไบบ ๆจกๅผ็็ฉ ๏ผGOๆณจ้ๆไปถๆฒกๆ๏ผ
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/5DAG.R ./4.GO_KEGG_Enrichment/AllGene_GO_Annotation.txt ./3.DiffExprGene/ ./4.GO_KEGG_Enrichment TRUE
#/data3/ClientData/2018_07/DuKeBing/WangXH/Dukebing_reanalysis
#้ๆจกๅผ ๆต่ฏok
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/5DAG.R $PWD/4.GO_KEGG_Enrichment/eggnog.emapper.modified.annotations $PWD/3.DiffExprGene $PWD/DAG FALSE
#run.R
#baseGroup ๅๆฐไฟฎๆน ๏ผ็ดๆฅๅๆฏ่พ็ปๅซใๅค็ป้ๅท้ๅผ
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/run.R CountMatrix4DESeq.csv colData.csv NC_vs_V ./ TRUE
#PlotEnrichHeatmapVenn.sh (chipseq ็ปๅฏ้็ญๅพๅ็ปดๆฉๅพ)
bash /data1/script/deseq2+GO+KEGG/Rpipe/PlotEnrichHeatmapVenn.sh ./ A1 A2
#ๅจcall peak่ทฏๅพ๏ผ็ฌฌไธไธชๅๆฐ๏ผไธ้ขๆฐๅปบไธไธช็ฎๅฝCommDiffPeaks๏ผๆๆๆไปถ่พๅบๅฐ่ฏฅๆไปถๅคน
#Plot_Violin.R
#/data2/ClientData/2018_08/ZhangYi2/3.ExpressAnalysis
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/6Plot_Violin.R RSEM.FPKM.matrix ../colData_SvsF.csv ./ FALSE
#/data/ClientData/2018_10/ZhongXuanBo/2.GenesExpress
Rscript /data1/script/deseq2+GO+KEGG/Rpipe/6Plot_Violin.R AllSamplesFPKMValue.txt ../R_input/colData.csv ./ TRUE
RPKM_file = args[1] #"RSEM.FPKM.matrix"
coldata_file=args[2] #"colData.csv"
Path = args[3] #"./"
whether_ref = args[4] #FALSE
|
f2fb148979eb267a9c6f20873cdea0d80a5bf63f | 2d1f4a315a1b6fda16341144a95e62da2898c9d9 | /workflow/dashboards/rivm-date-corrections.R | 68abc34f45c61c71ec9a7688dd549d5466d5be13 | [
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | sonar98/covid-19 | 913893c15a5ff1f765b7c55bcd16b9ce185849a3 | cd56342803266f83850be4170d54ad520f0966bf | refs/heads/master | 2023-01-10T00:10:00.817896 | 2020-11-02T13:59:58 | 2020-11-02T13:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,589 | r | rivm-date-corrections.R | require(tidyverse)
require(data.table)
rm(list=ls())
#### Corrections scripts
temp = tail(list.files(path = "data-rivm/casus-datasets/",pattern="*.csv", full.names = T),2)
myfiles = lapply(temp, read.csv)
df <- map_dfr(myfiles, ~{
.x
})
df$date <- as.Date(df$Date_file)
df.cases <- as.data.frame(table(df$Date_statistics,df$date)) ## Success
df.cases <- spread(df.cases, key = Var2, value = Freq)
col.start.diff <- ncol(df.cases)+1
dates.lead <- names(df.cases)[3:ncol(df.cases)] ## Set lead colnames for diff
dates.trail <- names(df.cases)[2:(ncol(df.cases)-1)] ## Set trail colnames for diff
# Calculate moving difference between cases per day
df.cases[paste0("diff",seq_along(dates.lead)+1,seq_along(dates.trail))] <- df.cases[dates.lead] - df.cases[dates.trail]
write.csv(df.cases, file = "corrections/cases_perday.csv")
## Hospital
df.hospital <- df %>%
dplyr::filter(Hospital_admission == "Yes")
df.hospitals <- as.data.frame(table(df.hospital$Date_statistics,df.hospital$date)) ## Success
hospitals.wide <- spread(df.hospitals, key = Var2, value = Freq)
# Calculate moving difference between cases per day
hospitals.wide[paste0("diff",seq_along(dates.lead)+1,seq_along(dates.trail))] <- hospitals.wide[dates.lead] - hospitals.wide[dates.trail]
write.csv(hospitals.wide, file = "corrections/hospital_perday.csv")
## Deaths
df.death <- df %>%
dplyr::filter(Deceased == "Yes")
df.deaths <- as.data.frame(table(df.death$Date_statistics,df.death$date)) ## Success
deaths.wide <- spread(df.deaths, key = Var2, value = Freq)
# Calculate moving difference between cases per day
deaths.wide[paste0("diff",seq_along(dates.lead)+1,seq_along(dates.trail))] <- deaths.wide[dates.lead] - deaths.wide[dates.trail]
write.csv(deaths.wide, file = "corrections/deaths_perday.csv")
## Week of death - diff file
temp = tail(list.files(path = "data-rivm/casus-datasets/",pattern="*.csv", full.names = T),2)
myfiles = lapply(temp, read.csv)
dat.today <- as.data.frame(myfiles[2])
dat.yesterday <- as.data.frame(myfiles[1])
dat.today$Week <- substr(dat.today$Week_of_death, 5, 6)
dat.yesterday$Week <- substr(dat.yesterday$Week_of_death, 5, 6)
today.weekdeath <- count(dat.today,Week)
yesterday.weekdeath <- count(dat.yesterday,Week)
df.weekdeath <- merge(today.weekdeath,yesterday.weekdeath,by="Week",all.X=T)
df.weekdeath$diff <- df.weekdeath$n.x - df.weekdeath$n.y
colnames(df.weekdeath) <- c("Week","weekdeath_today","weekdeath_yesterday","diff")
df.weekdeath <- df.weekdeath[1:(nrow(df.weekdeath)-1),]
write.csv(df.weekdeath, file = "corrections/deaths_perweek.csv")
|
268e6d6f3a10f6744f880624fd7f05094a5e75df | 5a767b9bde2c4099cd4796e541ac92b31d7e5350 | /Aansluitingen_analyse_obv_CAR.R | a8dc1bafd7160a5ecb03dc5f96f87456873df2bc | [] | no_license | jaccoheres/VnDigitaalNetbeheer | 5cf4485f47f4fff85cc4e41f2541c33b48d98604 | 924bf7809f316995f24d45bf43b9a01c8924876b | refs/heads/master | 2020-06-04T05:31:07.690552 | 2015-01-16T16:16:31 | 2015-01-16T16:16:31 | 25,514,417 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,136 | r | Aansluitingen_analyse_obv_CAR.R | setwd("N:/Bottum Up Analyse/2. Data/3. Nettopologie")
library(data.table)
library(plyr)
# Hoe goed is elke aansluitcategorie in beeld? ---------------------------------------------------------
CAR <- fread("CAR_E.csv",colClass="character") #inlezen alle EAN's
PC4s <- read.csv("N:/Bottum Up Analyse/2. Data/0. Gebiedsafbakening/Noord_Holland_PCzonderVelsen.csv",sep=";")
PC6s <- read.csv("N:/Bottum Up Analyse/2. Data/0. Gebiedsafbakening/Noord_Holland_PC6.csv",sep=";")
GVBCDB <- read.csv("BAR_GIS_MH_NRG_GVB_AANSLUITING.csv",sep=";" ,colClasses=c(EAN_CODE="character"))
EANStoHFD <- read.csv("N:/Bottum Up Analyse/2. Data/1. Baseload KV/LS_HLD_AANSLUITING.csv",sep=";",dec=",",colClasses=c(EAN="character"))
EANStoMSR <- read.csv("N:/Bottum Up Analyse/2. Data/1. Baseload KV/MSR_AANSLUITING.csv",colClasses=c(EAN="character"))
MEETPUNTCDB<- read.csv("MEETPUNT_cdb.csv",sep=";",dec=",")
CDBGISMV <- read.csv("N:/Bottum Up Analyse/2. Data/2. Baseload GV/CDB.GIS_MV.ssv",sep=";",dec=',')
CAR <- data.frame(CAR)
GVBCDB <- data.frame(GVBCDB)
sum(CAR$POSTCOD=="") # aantal postcodes onbekend
CAR$PC4 <- substr(CAR$POSTCOD,1,4) #PC4 aanmaken
CARNHN <- CAR[CAR$PC4 %in% PC4s$Postcode,] #Selectie op Noord Holland Noord
CARNHN$totaal_verbruik <- as.numeric(CARNHN$totaal_verbruik)
#Toevoegen aansluitcategorie
AC1fyscat <- c("1x25","1x35","1x6","3x25")
AC2fyscat <- c("1x50","1x63","1x80","3x35","3x50","3x63","3x80")
CARNHN$AC<- ""
CARNHN$AC[which(CARNHN$FYSIEKE_CAPACITEI %in% AC1fyscat)] <- "AC1"
CARNHN$AC[which(CARNHN$FYSIEKE_CAPACITEI %in% AC2fyscat)] <- "AC2"
CARNHN$AC[which(CARNHN$FYSIEKE_CAPACITEI=="OBK")] <- "OBK"
CARNHN$CONTRACT_CAPACITEI <-as.numeric(CARNHN$CONTRACT_CAPACITEI )
CARNHN$AC[which(CARNHN$AC=="" & CARNHN$CONTRACT_CAPACITEI<60)] <- "AC3"
CARNHN$AC[which(CARNHN$AC=="" & CARNHN$CONTRACT_CAPACITEI>=60 & CARNHN$CONTRACT_CAPACITEI<160)] <- "AC4"
CARNHN$AC[which(CARNHN$AC=="" & CARNHN$CONTRACT_CAPACITEI>=160 & CARNHN$CONTRACT_CAPACITEI<2000)] <- "AC5"
CARNHN$AC[which(CARNHN$AC=="" & CARNHN$CONTRACT_CAPACITEI>=2000 & CARNHN$CONTRACT_CAPACITEI<10000 )] <- "AC6"
CARNHN$AC[which(CARNHN$AC=="" & CARNHN$CONTRACT_CAPACITEI>=10000)] <- "AC7"
table(CARNHN$AC)
#wegschrijven eans voor grootverbruikers
#write.csv(CARNHN[which(CARNHN$AC != "AC1" & CARNHN$AC != "AC2"),],file="CARNHNGV.csv")
write.csv(CARNHN$EAN_CODE_AANSLUITING[which(CARNHN$AC != "AC1" & CARNHN$AC != "AC2")],file="CARNHNGV$EAN.csv",row.names=F)
#Hoeveel zitten in set LS aanlsuitingen?
CARinLSHLD <- CARNHN[which(CARNHN$EAN_CODE_AANSLUITING %in% EANStoHFD$EAN),]
table(CARinLSHLD$AC)
#Hoeveel zitten in set GVB CDB?
CARinGVBCDB <- CARNHN[which(CARNHN$EAN_CODE_AANSLUITING %in% GVBCDB$EAN_CODE),]
table(CARinGVBCDB$AC)
#Hoeveel zitten in set Meetpunt CDB?
MEETPUNTCDB$MEETWAARDE_SLEUTEL <- as.character(MEETPUNTCDB$MEETWAARDE_SLEUTEL)
MEETPUNTCDB$giskey <- as.character(MEETPUNTCDB$giskey)
#names(MEETPUNTCDB)
#table(MEETPUNTCDB$MEETWAARDE_SLEUTEL==MEETPUNTCDB$giskey)
CARinMEETPUNTCDB <- CARNHN[which(CARNHN$EAN_CODE_AANSLUITING %in% MEETPUNTCDB$giskey),]
table(CARinMEETPUNTCDB$AC)
#Alle EANS die zijn teruggevonden
CARTERUG <- rbind(CARinGVBCDB, CARinLSHLD,CARinMEETPUNTCDB)
CARTERUG <- unique(CARTERUG)
CARnietTERUG <- CARNHN[-which(CARNHN$EAN_CODE_AANSLUITING %in% CARTERUG$EAN_CODE_AANSLUITING),]
table(CARTERUG$AC)
ddply(CARNHN,"AC",summarise,SJVAC=sum(totaal_verbruik))
ddply(CARTERUG,"AC",summarise,SJVAC=sum(totaal_verbruik))
#MPOINTS GVBCDB in CDB
names(GVBCDB)
CARinGVBCDB2 <- merge(data.frame(CARinGVBCDB),GVBCDB, by.x=c("EAN_CODE_AANSLUITING"),by.y="EAN_CODE",all.x=T)
sum(GVBCDB$m_point %in% CDBGISMV$M_POINT)
sum(!(GVBCDB$m_point %in% CDBGISMV$M_POINT))
uniekeeans <- unique(CARinGVBCDB2$m_point)
sum(uniekeeans %in% CDBGISMV$M_POINT)
test<-(uniekeeans %in% CDBGISMV$M_POINT)
uniekecombi <- unique(CARinGVBCDB2[,c("EAN_CODE_AANSLUITING","m_point")])
uniekecombi$m_point[which(duplicated(uniekecombi$m_point))]
unique(CARNHN[-which(CARNHN$EAN_CODE_AANSLUITING %in% CARTERUG$EAN_CODE_AANSLUITING),]$PC4)
LSHLDnietinCARNHN <- EANStoHFD[-which(EANStoHFD$EAN %in% CARNHN$EAN_CODE_AANSLUITING),]
#Wegschrijven databestanden van niet gekoppelde assets
LSHLDnietTERUG <- CARnietTERUG[which(CARnietTERUG$AC=='AC1'|CARnietTERUG$AC=='AC2'),]
write.csv( LSHLDnietTERUG,file="N:/Bottum Up Analyse/2. Data/1. Baseload KV/KVonbekendeHLD.csv")
MSRnietTERUG <- CARNHN[which(!(CARNHN$EAN_CODE_AANSLUITING %in% EANStoMSR$EAN) & !(CARNHN$EAN_CODE_AANSLUITING %in% CARnietTERUG$EAN_CODE_AANSLUITING)
& (CARNHN$AC=='AC1'|CARNHN$AC=='AC2')) ,] #selecteer EANS waarbij geen MSR is teruggevonden, maar wel een HLD
write.csv( MSRnietTERUG,file="N:/Bottum Up Analyse/2. Data/1. Baseload KV/KVonbekendeMSR.csv") #wegschrijven EANS waarbij HLD wel, maar MSR niet bekend is
sum(LSHLDnietTERUG$EAN_CODE_AANSLUITING %in% MSRnietTERUG$EAN_CODE_AANSLUITING) |
6135816c508bfb9c53934d1ddda6ab48a94bc06d | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.security.identity/man/macie_list_s3_resources.Rd | 02b298b91e521e2f4fd0c30f5dbb37665db61627 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 1,657 | rd | macie_list_s3_resources.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/macie_operations.R
\name{macie_list_s3_resources}
\alias{macie_list_s3_resources}
\title{Lists all the S3 resources associated with Amazon Macie Classic}
\usage{
macie_list_s3_resources(memberAccountId, nextToken, maxResults)
}
\arguments{
\item{memberAccountId}{The Amazon Macie Classic member account ID whose associated S3 resources
you want to list.}
\item{nextToken}{Use this parameter when paginating results. Set its value to null on
your first call to the ListS3Resources action. Subsequent calls to the
action fill nextToken in the request with the value of nextToken from
the previous response to continue listing data.}
\item{maxResults}{Use this parameter to indicate the maximum number of items that you want
in the response. The default value is 250.}
}
\value{
A list with the following syntax:\preformatted{list(
s3Resources = list(
list(
bucketName = "string",
prefix = "string",
classificationType = list(
oneTime = "FULL"|"NONE",
continuous = "FULL"
)
)
),
nextToken = "string"
)
}
}
\description{
Lists all the S3 resources associated with Amazon Macie Classic. If
memberAccountId isn't specified, the action lists the S3 resources
associated with Amazon Macie Classic for the current master account. If
memberAccountId is specified, the action lists the S3 resources
associated with Amazon Macie Classic for the specified member account.
}
\section{Request syntax}{
\preformatted{svc$list_s3_resources(
memberAccountId = "string",
nextToken = "string",
maxResults = 123
)
}
}
\keyword{internal}
|
dffdfd1ea58478911e4381716bb1f035f7b5d61f | 8dd9aac2ca7b762fd56fdba4394b3171e68b1ce2 | /code/R/functions.R | d92f49a97b79c852eb5248bf6b2e56280ad601de | [] | no_license | zach-nelson/veg-water-requirement | e4a93078273f5c91501807cbf6103d72628b3205 | 4937d406adf63a60f5106cf4711e4a267f17a37d | refs/heads/master | 2023-06-15T21:43:53.998005 | 2021-07-08T21:00:02 | 2021-07-08T21:00:02 | 382,192,973 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,591 | r | functions.R |
#' Title
#'
#' @param vwr
#'
#' @return
#' @export
#'
#' @examples
weighted_avg <- function(vwr){
vwr %>% filter(species != 'OTHER') %>% filter(all.hits >0) %>%
group_by(site,period) %>%
summarise(w_avg_vwr_per_lai = sum(vwr)/sum(lai)) %>%
mutate(species = 'OTHER')
}
#' Title
#'
#' @param vwr
#' @param weighted.avg.
#'
#' @return
#' @export
#'
#' @examples
vwr_total <- function(vwr, weighted.avg.){
vwr %>% left_join(weighted.avg., by = c('site', 'species', 'period')) %>%
mutate(other.vwr = w_avg_vwr_per_lai * lai,
total.vwr = case_when(!is.na(vwr)~vwr,
!is.na(other.vwr)~other.vwr),
site.f = factor(site, levels = c("LW1",
"LW2",
"LW3",
"BC1",
"BC2",
"BC3",
"BP1",
"BP2",
"BP3",
"BP4",
"TA3",
"TA4",
"TA5",
"TA6",
"TAC",
"TS1",
"TS2",
"TS3",
"TS4",
"TSC",
"IO1",
"IO2",
"IC1",
"IC2",
"SS1",
"SS2",
"SS3",
"SS4",
"BG2",
"BGC")
)
)
}
#' Title
#'
#' @param vwr.total
#' @param cYear
#'
#' @return
#' @export
#'
#' @examples
vwr_site_total_period <- function(vwr.total,cYear){
vwr.total %>% filter(all.hits > 0) %>%
select(site.f,period,species,lai,total.vwr) %>%
group_by(site.f,period) %>%
summarise(site.vwr = sum(total.vwr)) %>%
pivot_wider(names_from = period, values_from = site.vwr)
}
|
988b7d05bf9f88b12b879781e51d5d126b029e9c | 66416793e3f2a8371cdb815906b6f60b9bddb83f | /R_Programming/week2/lecture.R | a7b4059bd962dce569b4ac162236ec1dff5d02c0 | [] | no_license | abysmalocean/Data_Online | 7fff3310af5b50a3326a904bb39365ee85e8d5db | 1e674b1f6d267af27b528eb23e1b5b7a47bd5120 | refs/heads/master | 2021-01-10T18:35:27.518244 | 2015-01-20T05:04:33 | 2015-01-20T05:04:33 | 29,333,053 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 452 | r | lecture.R |
add2 <- function(x,y){
x+y
}
above <- function(x,n = 10){
use <- x>n
x[use]
}
colum <- function(y){
nc <- ncol(y)
means <- numeric(nc)
for(i in 1:nc){
means[i] <- mean(y[,i],na.rm = )
}
means
}
x <- matrix(1:6,2,3)
for (i in seq_len(nrow(x))){
for(j in seq_len(ncol(x))){
print(x[i,j])
}
}
count <- 0
while (count<100){
count <- count +1
if (i >20){next}
print(count)
}
x0<- 1
tol<-1e-8
repeat{
x<-computeEs
} |
3fcaadeff5474ebd8d7310ab22bd58f6763fdb31 | aa0e6b5c88fb5351d207aebf04dc96d6870f73d4 | /functions/func_compute_avalanche_static_grids.R | a084461e917e3628ac360004a992b027f01375fc | [] | no_license | pohleric/mass_balance_model | 119d96164389002a8689fb4feef3bedc1b791d1d | de1e641bb7639542d36f0a8d262b4833ae24507d | refs/heads/main | 2023-08-05T05:56:30.270151 | 2021-09-22T13:44:06 | 2021-09-22T13:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,062 | r | func_compute_avalanche_static_grids.R | ###################################################################################################
# Author: Enrico Mattea (@unifr.ch) #
# Description: this program models the distributed mass balance of a glacier at daily #
# resolution, optimizing model parameters towards the best fit with point #
# mass balance measurements. #
# This file contains the one-time computation of derived grids from the DHMs, to #
# be used by the avalanche module. #
###################################################################################################
func_compute_avalanche_static_grids <- function(run_params, data_dhms) {
cat("Computing avalanche static grids...\n")
conv_deg2rad <- pi / 180
#### Prepare data structures ####
# We keep a lot of intermediate grids to make it easy
# to debug problems with the avalanche routine.
# Structure: avalanche list of lists
# $elevation_proc list of rasters, one element per available grid
# $dz list of lists, one element per available grid
# [[1]] list of lists, one element per direction (always 4 elements)
# So: avalanche$elevation_proc[[4]] is the hydrologically corrected grid for the fourth elevation raster available
# avalanche$dz[2]][[3]] is the elevation-difference grid for the second elevation raster available, for the east direction
avalanche <- list()
avalanche$elevation_proc <- list()
avalanche$slope_proc <- list()
avalanche$aspect_proc <- list()
avalanche$movable_frac <- list()
avalanche$flow_width <- list()
avalanche$dz <- list()
avalanche$draining_coeff <- list()
avalanche$draining_coeff_sum <- list()
avalanche$residual_sink_cell_ids <- list()
avalanche$draining_fraction <- list()
avalanche$deposition_max <- list()
avalanche$elevation_sorted_ids <- list()
#### Fill data structures ####
for (grid_id in 1:data_dhms$n_grids) {
# Create directional structures.
avalanche$flow_width[[grid_id]] <- list()
avalanche$dz[[grid_id]] <- list()
avalanche$draining_coeff[[grid_id]] <- list()
avalanche$draining_fraction[[grid_id]] <- list()
# Process the elevation grid to make it hydrologically correct
# (no flat patches, no sinks).
cat(" Pre-processing elevation grid", paste0(grid_id, "...\n"))
avalanche$elevation_proc[[grid_id]] <- func_elevation_preprocess(run_params, data_dhms$elevation[[grid_id]])
# Compute slope and aspect.
# We extend along the borders
# (nearest neighbor: we replicate
# the closest row/column)
# to avoid having dangerous NA values.
avalanche$slope_proc[[grid_id]] <- terrain(avalanche$elevation_proc[[grid_id]], "slope", "degrees")
avalanche$slope_proc[[grid_id]][1:run_params$grid_ncol] <- avalanche$slope_proc[[grid_id]][run_params$grid_ncol + (1:run_params$grid_ncol)]
avalanche$slope_proc[[grid_id]][run_params$grid_ncells - run_params$grid_ncol + 1:run_params$grid_ncol] <- avalanche$slope_proc[[grid_id]][run_params$grid_ncells - (2*run_params$grid_ncol) + 1:run_params$grid_ncol]
avalanche$slope_proc[[grid_id]][seq(1,run_params$grid_ncells,run_params$grid_ncol)] <- avalanche$slope_proc[[grid_id]][seq(2,run_params$grid_ncells,run_params$grid_ncol)]
avalanche$slope_proc[[grid_id]][seq(run_params$grid_ncol,run_params$grid_ncells,run_params$grid_ncol)] <- avalanche$slope_proc[[grid_id]][seq(run_params$grid_ncol-1,run_params$grid_ncells,run_params$grid_ncol)]
avalanche$aspect_proc[[grid_id]] <- terrain(avalanche$elevation_proc[[grid_id]], "aspect", "degrees")
avalanche$aspect_proc[[grid_id]][1:run_params$grid_ncol] <- avalanche$aspect_proc[[grid_id]][run_params$grid_ncol + (1:run_params$grid_ncol)]
avalanche$aspect_proc[[grid_id]][run_params$grid_ncells - run_params$grid_ncol + 1:run_params$grid_ncol] <- avalanche$aspect_proc[[grid_id]][run_params$grid_ncells - (2*run_params$grid_ncol) + 1:run_params$grid_ncol]
avalanche$aspect_proc[[grid_id]][seq(1,run_params$grid_ncells,run_params$grid_ncol)] <- avalanche$aspect_proc[[grid_id]][seq(2,run_params$grid_ncells,run_params$grid_ncol)]
avalanche$aspect_proc[[grid_id]][seq(run_params$grid_ncol,run_params$grid_ncells,run_params$grid_ncol)] <- avalanche$aspect_proc[[grid_id]][seq(run_params$grid_ncol-1,run_params$grid_ncells,run_params$grid_ncol)]
# Movable fraction of the initial mass distribution
# linearly increases from 0 to 1 between the lower
# and upper slope thresholds.
avalanche$movable_frac[[grid_id]] <- setValues(avalanche$slope_proc[[grid_id]],
pmax(0, pmin(1, scales::rescale(getValues(avalanche$slope_proc[[grid_id]]),
to = c(0, 1),
from = c(run_params$movable_slope_lim_lower, run_params$movable_slope_lim_upper)))))
# Compute flow widths to 4-neighbors (Eqs. 3-6 of Gruber, 2007).
# Indices: 1 top, 2 left, 3 right, 4 bottom (as in Gruber, 2007, Fig. 1).
avalanche$flow_width[[grid_id]][[1]] <- cos(conv_deg2rad * avalanche$aspect_proc[[grid_id]]) * run_params$grid_cell_size
avalanche$flow_width[[grid_id]][[2]] <- -sin(conv_deg2rad * avalanche$aspect_proc[[grid_id]]) * run_params$grid_cell_size
avalanche$flow_width[[grid_id]][[3]] <- -avalanche$flow_width[[grid_id]][[2]]
avalanche$flow_width[[grid_id]][[4]] <- -avalanche$flow_width[[grid_id]][[1]]
# Compute elevation difference to 4-neighbors.
# Same indexing as above.
avalanche$dz[[grid_id]][[1]] <- setValues(avalanche$elevation_proc[[grid_id]], c(rep(NA, run_params$grid_ncol), avalanche$elevation_proc[[grid_id]][2:run_params$grid_nrow,] - avalanche$elevation_proc[[grid_id]][1:(run_params$grid_nrow - 1),]))
avalanche$dz[[grid_id]][[2]] <- avalanche$dz[[grid_id]][[1]]
avalanche$dz[[grid_id]][[2]][,1:run_params$grid_ncol] <- c(rep(NA, run_params$grid_nrow), avalanche$elevation_proc[[grid_id]][,2:run_params$grid_ncol] - avalanche$elevation_proc[[grid_id]][,1:(run_params$grid_ncol - 1)]) # We cannot use setValues() here because we compute by column and setValues sets by row.
avalanche$dz[[grid_id]][[3]] <- avalanche$dz[[grid_id]][[1]]
avalanche$dz[[grid_id]][[3]][,1:run_params$grid_ncol] <- c(avalanche$elevation_proc[[grid_id]][,1:(run_params$grid_ncol - 1)] - avalanche$elevation_proc[[grid_id]][,2:run_params$grid_ncol], rep(NA, run_params$grid_nrow)) # We cannot use setValues() here because we compute by column and setValues sets by row.
avalanche$dz[[grid_id]][[4]] <- setValues(avalanche$elevation_proc[[grid_id]], c(avalanche$elevation_proc[[grid_id]][1:(run_params$grid_nrow - 1),] - avalanche$elevation_proc[[grid_id]][2:run_params$grid_nrow,], rep(NA, run_params$grid_ncol)))
# Compute draining coefficient to 4-neighbors (Eq. 7 in Gruber, 2007).
for (dir_id in 1:4) {
avalanche$draining_coeff[[grid_id]][[dir_id]] <- avalanche$flow_width[[grid_id]][[dir_id]] * (avalanche$dz[[grid_id]][[dir_id]] > 0) * (avalanche$flow_width[[grid_id]][[dir_id]] > 0)
}
avalanche$draining_coeff_sum[[grid_id]] <- avalanche$draining_coeff[[grid_id]][[1]] + avalanche$draining_coeff[[grid_id]][[2]] + avalanche$draining_coeff[[grid_id]][[3]] + avalanche$draining_coeff[[grid_id]][[4]]
# Despite all the DEM pre-processing, there can be cells
# with draining_coeff_sum = 0, i.e. no drainage possible.
# These arise when the computed flow widths do not match
# the computed dz (flow width is computed from aspect, which is
# determined via curve fitting since a plane is determined
# by 3 points, so on a regular grid with 4 neighbors aspect is not
# well defined!). In those cases, the direction for which dz<i> > 0
# does not match the direction for which L<i> > 0, so that no drainage
# would be possible from those cells. These would behave as infinite
# sinks, stealing mass as it enters but cannot exit.
# To solve this, we take those (hopefully rare) cells and
# we force drainage towards the most likely direction
# (i.e., of all the directions with dz<i> > 0, the direction
# for which flow width is least negative).
avalanche$residual_sink_cell_ids[[grid_id]] <- which(getValues(avalanche$draining_coeff_sum[[grid_id]]) == 0)
residual_sinks_n <- length(avalanche$residual_sink_cell_ids[[grid_id]])
# cat("Residual sinks detected:", residual_sinks_n, "\n")
if (residual_sinks_n) {
for (residual_sink_id in 1:residual_sinks_n) {
residual_sink_cell_id <- avalanche$residual_sink_cell_ids[[grid_id]][residual_sink_id]
# Get dz and flow widths of problem cell (two 4-vectors).
# There won't be corresponding elements which are both positive
# (else the cell would not be problematic).
# We want the element with least negative flow width,
# and positive dz.
# Nested sapply(): we retrieve from the nested list the 4 grids (of both dz
# and flow width) of the current grid index (inner sapply()), then for each
# we extract the value at cell index residaul_sink_cell_id.
cell_orig_ids <- 1:4
cell_dzs <- sapply(avalanche$dz[[grid_id]], `[`, residual_sink_cell_id)
cell_flow_widths <- sapply(avalanche$flow_width[[grid_id]], `[`, residual_sink_cell_id)
cell_df <- data.frame(cell_orig_ids, cell_dzs, cell_flow_widths)
cell_df_downslope <- cell_df[which(cell_df$cell_dzs >= 0),]
cell_df_id_sel <- which.max(cell_df_downslope$cell_flow_widths)
dir_id <- cell_df_downslope$cell_orig_ids[cell_df_id_sel]
# Send all snow towards the selected cell.
avalanche$draining_coeff[[grid_id]][[dir_id]][residual_sink_cell_id] <- 1
avalanche$draining_coeff_sum[[grid_id]][residual_sink_cell_id] <- 1
}
}
cat(" Residual sinks fixed.\n")
# Compute normalized draining fractions for the 4 directions (Eq. 9 in Gruber, 2007).
for (dir_id in 1:4) {
avalanche$draining_fraction[[grid_id]][[dir_id]] <- avalanche$draining_coeff[[grid_id]][[dir_id]] / avalanche$draining_coeff_sum[[grid_id]]
}
# Compute max deposition (Eq. 10 in Gruber, 2007); [kg m-2].
avalanche$deposition_max[[grid_id]] <- (1 - avalanche$slope_proc[[grid_id]] / run_params$deposition_slope_lim) * run_params$deposition_mass_lim * (avalanche$slope_proc[[grid_id]] < run_params$deposition_slope_lim)
# Compute indices for loop from highest to lowest domain cell.
# Cells are indexed by row (i.e. first all the first row, then
# all the second row; the last cell is the bottom-right corner).
# We exclude all cells along the border since their drainage is problematic.
# Of course the glacier should not reach the grid border.
elevation_sorted_ids_raw <- sort(getValues(avalanche$elevation_proc[[grid_id]]), decreasing = TRUE, index.return = TRUE)[[2]]
elevation_ids_border <- unique(c(cellFromRow(avalanche$elevation_proc[[grid_id]], c(1, run_params$grid_nrow)), cellFromCol(avalanche$elevation_proc[[grid_id]], c(1, run_params$grid_ncol))))
avalanche$elevation_sorted_ids[[grid_id]] <- setdiff(elevation_sorted_ids_raw, elevation_ids_border)
}
cat(" Finished computation of avalanche static grids.\n")
return(avalanche)
} |
3298564796a21e0325f3c724af9f06373a6615a2 | 6e1127ae119393660d3e527944dbf84b0b808bc1 | /Machine Learning with R/Chapter3/knneighbor.R | 8e8caa216ad4a7bdadac87bf38b4c3736cd55eac | [] | no_license | dheis1337/MachineLearning | 48cb55cadecefb1ef8653056249ba206d2a7ecf5 | cad92f7972bb611e3de104a703a7debe88b8fc05 | refs/heads/master | 2021-01-11T23:27:24.061971 | 2018-05-03T04:50:26 | 2018-05-03T04:50:26 | 77,752,090 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,206 | r | knneighbor.R | library(class)
library(gmodels)
setwd("c:/mystuff/datascience/projects/machinelearning/Machine Learning with R/chapter3")
dat <- read.csv("data.csv", stringsAsFactors = FALSE) # load data
# Let's do some exploring
head(dat)
summary(dat)
# Our data is comprised mostly of numeric data, that represent different measurements
# for tumor masses found in patients. The two non-numeric variables are id and
# diagnosis. The id variable is a simple patient id, and the diagnosis is our outcome
# variable we are attempting to classify. Since we don't need the id, let's remove
# it.
dat <- dat[, -c(1, 33)]
# Before we begin running our model, we need to do some additional cleaning. First
# let's make the diagnosis variable a factor and extend the outcomes to "benign"
# and "malignant".
dat$diagnosis <- factor(dat$diagnosis, levels = c("B", "M"),
labels = c("Benign", "Malignant"))
# Our next cleaning requirement will be to normalize our data. We must normalize our
# data, because we have numeric variables that are of different magnitude. For instance
# the compactness_mean variable is between .01938 and .34540, while our area_mean
# variable is between 143.5 and 2501. Since we're going to be classifying using
# Euclidean distance, the differences in magnitude will affect our analysis negatively.
# To normalize our data, we'll create a simple function, which will then be applied
# to each column of our data.
normalize <- function(x) {
# Takes one argument - a numeric variable - and normalizes it using the appropriate
# normalizing formula
(x - min(x)) / (max(x) - min(x))
}
# Now, let's ensure this function works correctly
normalize(c(1, 2, 3, 4, 5))
# Our function works properly, so let's apply it to each column in our data set.
dat[2:31] <- apply(dat[2:31], MARGIN = 2, FUN = normalize)
# Now that we've cleaned our data, we want to split it up into test and training
# sets.
train <- dat[1:469, 2:31]
test <- dat[470:569, 2:31]
# Let's also store the diagnosis variables in separate vectors for training and
# testing sets
diag.train <- dat[1:469, 1]
diag.test <- dat[470:569, 1]
# We're ready to run our model over our data. We'll use the knn() function from
# the class package to do this.
dat.pred <- knn(train = train, test = test, cl = diag.train, k = 21)
# Now that we have our prediction vector, let's evaluate the performance of our model.
# To do so, we'll use the CrossTable() function from the gmodels package.
CrossTable(x = diag.test, y = dat.pred, prop.chisq = FALSE)
# Examining the output of this function call, we see that our true negative rate
# of classification was 100%. This means we correctly identified benign tumors as
# such 97.5% of the time. We also correctly classified malignant tumors 91.3% of the
# time, our true positive rate. Our false negatie rate is .25%, which is where our model
# predicted benign but the tumor was malignant. Finally, our false positive rate
# is 0, which is where our model predicted malignant but the tumor was actually benign.
# Overall, we correctly classified tumors 98% of the time, which is great for such
# a simple model and less than 100 lines of code!
|
e0949c407e9a1bf17a99071e3becde399643166e | d631485475ccb99744272b6749e519944c1f98d2 | /3-GeographicInformation.R | 0b06d2df712d4ec2d284d4911f10a40a93ec9a37 | [] | no_license | garonen/2019_Acropora | e052c67799ff768c2d3bce9e360f4bead315c052 | d1b342d0bc7f310dea4137a6fdc69c448158dba1 | refs/heads/master | 2021-10-27T19:03:50.474130 | 2019-04-19T01:42:12 | 2019-04-19T01:42:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,043 | r | 3-GeographicInformation.R | # Characterising geographic data and save to locations.Rdata for future use
# Makes a dataframe that includes site specific colors and additional set of geographic variables are
# created with an axis that captures distance to shoreline and an orthogonal eigenvector (parallel to shore line, roughly)
# Written by Cynthia Riginos 2017-2019
library(plotrix)
library(maps)
library(rgeos)
library(maptools)
library(raster)
library(rgdal)
library(vegan)
locations<-read.csv("ORIGDATA/ReefID_centroids_tenuis.csv", header =TRUE) #these are lat and long for centers of each reef location
#locations<-read.csv("ORIGDATA/ReefID_centroids_millepora.csv", header =TRUE) #these are lat and long for centers of each reef location
#ASSIGN COLORS BY LATITUDE
loc_color<-color.scale(1:100, c(0,1,1), c(0,1,0),c(1,1,0),color.spec="rgb") #blue to red with white in middle
latvalue<-(locations$Lat+24)/16
latvalue2<-(100*round(latvalue, digits = 2))
locations$loc_color<-loc_color[latvalue2]
#quick check that colors and locations look ok
plot(locations$Long, locations$Lat, ylab = "Latitude", xlab = "Longitude", type ="n")
map(database = "world2", add= TRUE, interior=FALSE) #world2 is Pacific centered
points(locations$Long, locations$Lat, col = locations$loc_color, pch=19, cex = 1.5)
text(locations$Long, locations$Lat, locations$ReefID, cex=0.6, pos=3)
##GET DISTANCE TO COASTLINE
#Coordinate systems
wgs.84 <- CRS("+init=epsg:4326")
#from Geoscience Australia - http://spatialreference.org/ref/epsg/3112/
epsg.3112<-"+proj=lcc +lat_1=-18 +lat_2=-36 +lat_0=0 +lon_0=134 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
coast <- readShapeLines("ORIGDATA/ne_50m_coastline/ne_50m_coastline.shp") #http://www.naturalearthdata.com
proj4string(coast)<-wgs.84
#Create locations spatial points dataframe
locations.sp<-locations[,c(3,2)]
coordinates(locations.sp)<- c("Long", "Lat")
proj4string(locations.sp)<-wgs.84
#Transform
locations.proj.sp <- spTransform(locations.sp,CRS(epsg.3112))
coast.proj <- spTransform(coast,CRS(epsg.3112))
#Extract distance to coastline
for (p in 1:length(locations.proj.sp)) {
locations[p,"DistToCoastline"]<-round(gDistance(locations.proj.sp[p],coast.proj)/1000, digits=0)
}
cor(locations$Lat,locations$DistToCoastline) #AT: -0.66; AM: -0.41
cor(locations$Long,locations$DistToCoastline) #AT: 0.83; AM: 0.61
#Use RDA to find eigenvector and eigenvalues perpendicular to shoreline distance eigenvector
long.lat<-locations[,c(3,2)]
geog.pca<-rda(long.lat~DistToCoastline, data=locations)
summary(geog.pca) #AT: RDA1=54.95% variance, PC1 = 44.00% (total 98.9%): AM: RDA1=26%, PC1 = 73 (total: 98.9)
newscores<-as.data.frame(scores(geog.pca, scaling=1, choices=1:2)$sites)
plot(newscores$RDA1, -newscores$PC1, pch=20, col=locations$loc_color) #quick check
locations$DistToCoastPC<-newscores$RDA1
locations$ParallelToCoastPC<- -newscores$PC1
save(locations, file="CLEANDATA/tenuis/locations.aten.RData" )
#save(locations, file="CLEANDATA/millepora/locations.mill.RData" )
rm(list=ls())
|
a0e3c4a8f67c691dd590c6fde4e300a303407c7b | 3b7ef709ee9adadf13d71087e817647565ad1bb7 | /plot4.R | b796585335bdc04b8178f6cd9099931f40828790 | [] | no_license | nclsbarreto/ExData_Plotting1 | a1538e89ad7bd5b50fff1d622e480f9a136c4bef | 790419c45e72b4ffcc83e8a57bdd61650d4c05dc | refs/heads/master | 2020-05-22T11:20:00.493110 | 2019-05-13T01:02:27 | 2019-05-13T01:02:27 | 186,321,602 | 0 | 0 | null | 2019-05-13T00:52:51 | 2019-05-13T00:52:51 | null | UTF-8 | R | false | false | 1,525 | r | plot4.R | library(tidyverse)
getwd()
list.files()
# read in file
df <- read.table(file = "./household_power_consumption.txt", header = TRUE, sep = ";",
na.strings = "?")
# look at data
head(df)
names(df)
str(df)
head(df[c(1,10)], 30)
# convert date to date format
df$Date.2 <- as.Date(df$Date, "%d/%m/%Y")
str(df)
# subset dates 2007-02-01 and 2007-02-02 -- i used the pipe for practice
df.sub <- df %>% subset(Date.2 == "2007-02-01" | Date.2 == "2007-02-02")
str(df.sub)
# make datetime variable
df.sub$datetime <- as.POSIXct(paste0(df.sub$Date, " ", df.sub$Time),
format = "%d/%m/%Y %H:%M:%OS")
str(df.sub)
# plot4 - grid
png("plot4.png", height = 480, width = 480)
par("mfcol" = c(2,2))
plot(x = df.sub$datetime, y = df.sub$Global_active_power,
ylab = "Global Active Power (Kilowatts)", xlab = "",
type = "l")
plot(x = df.sub$datetime, y = df.sub$Sub_metering_1,
ylab = "Energy Sub Metering", xlab = "",
type = "l")
lines(x = df.sub$datetime, y = df.sub$Sub_metering_2, col = "red")
lines(x = df.sub$datetime, y = df.sub$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lty = c(1,1), lwd = c(1,1))
plot(x = df.sub$datetime, y = df.sub$Voltage,
ylab = "Voltage", xlab = "datetime",
type = "l")
plot(x = df.sub$datetime, y = df.sub$Global_reactive_power,
ylab = "Global_reactive_power", xlab = "datetime",
type = "l")
dev.off()
|
cceb51d476ec735874920f66586c78c551b876c0 | 1446f8659af6b8c82f46b511cfd7d18d75975017 | /R/moreno.R | 0b4d74155bec10dc01203efba13b5c734bce7d93 | [] | no_license | aL3xa/moReno | beaa957ba514a85b8c15877b1ab91d3408cbd401 | b50751ed27cf09601e7781757ae4e73ac7a2f097 | refs/heads/master | 2020-05-17T22:34:55.417163 | 2014-01-20T19:54:29 | 2014-01-20T19:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 250 | r | moreno.R | #' moReno - an R package for Jacob Moreno's sociometry technique
#'
#' Provides helpers for calculating sociometric indexes (both group and individual) and sociogram plot. Allows data import from CSV.
#' @docType package
#' @name moreno-package
NULL
|
d6b3ced09962e280c0ec57f88c4f4065eda1df14 | 5cdc28ca46d98557a4199a4664308454c860751d | /R/master_field_stat_mle.R | 41305ce1e79648e04d00703d370e4e1d9d719439 | [] | no_license | AndyTan524/baseball_backend | e96902192981b3528d111c5aafcf06a7f24b0059 | f8de8733e83a1306e75c9d42b18b34ad1dfcdf55 | refs/heads/master | 2020-05-16T18:51:18.653678 | 2019-04-24T14:15:22 | 2019-04-24T14:15:22 | 183,242,198 | 0 | 1 | null | 2019-05-10T09:34:15 | 2019-04-24T14:08:25 | R | UTF-8 | R | false | false | 3,306 | r | master_field_stat_mle.R | library(XML)
library(tidyr)
library(plyr)
master_field <- read.csv("master_field_stat_mle.csv")
aaa <- read.csv("2016_aaa.csv")
aaa$date <- substr(aaa$game,5,14)
aaa$date <- as.Date(aaa$date,"%Y_%m_%d")
aaa <- aaa[aaa$date == as.Date("2016-07-10"),]
master1 <- data.frame(matrix(NA,nrow=1,ncol=9))
colnames(master1) <- c("id","name_display_first_last","pos","po","a","e","gameday","date","used")
for(k in 1:nrow(aaa)){
print(paste0(k," of ",nrow(aaa)))
url <- paste0("http://gd2.mlb.com/components/game/aaa/year_2016/","month_",substr(aaa$game[k],10,11),"/day_",substr(aaa$game[k],13,14),"/",aaa$game[k],"boxscore.xml")
t <- try(xmlTreeParse(url))
if(class(t) == "try-error"){
next
}
xmlfile <- xmlTreeParse(url)
xmlfile <- xmlfile[[1]]
xmltop = xmlRoot(xmlfile)
xmltop <- xmltop[[3]]
xmltop2 <- xmlToList(xmltop)
master <- data.frame(matrix(NA,nrow=6,ncol=1))
rownames(master) <- c("id","name_display_first_last","pos","po","a","e")
colnames(master) <- c("no_name")
for(i in 1:length(xmltop2)){
data <- as.data.frame(xmltop2[i])
if(length(data) == 0){
next;
}
if("team_flag" %in% row.names(data)){
next;
}
data <- data[c("id","name_display_first_last","pos","po","a","e"),]
master <- cbind(master,data)
colnames(master)[i+1] <- as.character(data[1])
}
master2 <- data.frame(matrix(NA,nrow=ncol(master),ncol=6))
colnames(master2) <- c("id","name_display_first_last","pos","po","a","e")
master2$id <- t(master[1,])
master2$name_display_first_last <- t(master[2,])
master2$pos <- t(master[3,])
master2$po <- t(master[4,])
master2$a <- t(master[5,])
master2$e <- t(master[6,])
master2 <- master2[!master2$id %in% NA,]
master2 <- master2[!master2$pos %in% "PH",]
master2$gameday <- ""
master2$gameday <- substr(x = url,start = 66,stop=95)
master2$date <- substr(x=master2$gameday,start =5 ,stop=14)
master2$date <- as.character(as.Date(master2$date,"%Y_%m_%d"))
master2$used <- ""
master1 <- rbind(master1,master2)
}
master1 <- master1[!master1$pos %in% c("PR","DH","PH","NA","",NA),]
dash <- grep(pattern = "-",x = master1$pos)
for(j in 1:length(dash)){
print(paste0(j," of ",length(dash)))
all <- length(unlist(strsplit(x = master1$pos[dash[j]],split = "-")))
if(unlist(strsplit(x = master1$pos[dash[j]],split = "-"))[1] %in% c("PR","DH","PH")){
for(k in 1:all){
if(!unlist(strsplit(x = master1$pos[dash[j]],split = "-"))[k] %in% c("PH","PR","DH")){
master1$pos[dash[j]] <- unlist(strsplit(x = master1$pos[dash[j]],split = "-"))[k]
break;
}
if(unlist(strsplit(x = master1$pos[dash[j]],split = "-"))[k] %in% c("PH","PR","DH")){
if(k == all){
master1$pos[dash[j]] <- unlist(strsplit(x = master1$pos[dash[j]],split = "-"))[k]
}
next;
}
}
}
if(!unlist(strsplit(x = master1$pos[dash[j]],split = "-"))[1] %in% c("PR","DH","PH")){
master1$pos[dash[j]] <- unlist(strsplit(x = master1$pos[dash[j]],split = "-"))[1]
}
}
master1 <- master1[!master1$pos %in% c("PH","PR","DH"),]
id_exist <- as.character(unique(master_field$gameday))
master1 <- master1[!master1$gameday %in% id_exist,]
write.csv(master1,"master_field_stat_mle.csv",row.names = FALSE)
|
2469bde307c43623e2ebccf32187e0a3986fa2fa | 4e184c3d5fb98b6b0d860970755f31beb737a453 | /Exploratory Data Analysis/Project Assignment 2/Plot2.R | 3c248e92b1dc6bb6e8522637e66072796746b604 | [] | no_license | Teolone88/datasciencecoursera | 97cece01aacd982ebed2346580b37b230ddef53b | 521109847ca7807e427be8dc6f523ae5c233d6ef | refs/heads/master | 2023-03-01T04:49:16.082810 | 2021-02-08T13:31:07 | 2021-02-08T13:31:07 | 292,025,815 | 0 | 1 | null | 2020-10-15T01:54:34 | 2020-09-01T14:49:18 | HTML | UTF-8 | R | false | false | 1,370 | r | Plot2.R | Sys.setlocale("LC_ALL", "English")
## Download file best practice
##if (!file.exists("./downloaded_files")) {
## dir.create("./dowloaded_files")
##}
## Download zip file
fileUrl <-
"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileUrl, destfile = "./exdata%2Fdata%2FNEI_data.zip", mode = "wb")
## Unzip file
unzip(
"./exdata%2Fdata%2FNEI_data.zip",
exdir = "./Unzipped"
)
## Read tables from unzipped RDS files
NEI <- readRDS("./Unzipped/summarySCC_PM25.rds")
SCC <- readRDS("./Unzipped/Source_Classification_Code.rds")
## Aggregate the sum of Emissions per year and source
NEI$Emissions <- as.integer(NEI$Emissions)
fipsNEI <- aggregate(NEI$Emissions[NEI$fips == "24510"], list(NEI$year[NEI$fips == "24510"]), sum, na.rm = TRUE)
## Rename properly the col names
colnames(fipsNEI) <- c("Year", "Tot_Emission")
fipsNEI$Year <- fipsNEI$Year[order(fipsNEI$Year)]
str(sumNEI)
## Plot with basic graphic
par(mfrow=c(1,1))
png(file = "plot2.png",
width = 480,
height = 480)
## Barplot selected to better visualize the trend
## Measure adjusted to `Kg` from `Grams`
barplot(
(fipsNEI$Tot_Emission)/10^3,
names.arg= fipsNEI$Year,
xlab="Year",
ylab=expressions("PM"[2.5]*" Emissions (Kg)"),
ylim=c(0.0,3.5),
main=expression("Total PM"[2.5]*" Emissions From Baltimore City, Maryland"))
dev.off()
|
88d3d090c465faaaf826e0352d91235ada6243b4 | b9121d329483b371fcd048e036661cfa288bfe08 | /lectures/R/07-data-vis2.R | f6bc3c82890bc9b993162b9b1e0489b68ff27a3e | [] | no_license | earowang/stats220 | 3ca02f5113294a40b3101edfe78e0edcda4c6ca2 | 183c260d3ac3036f4e842829f8f6501c1da3c33e | refs/heads/master | 2023-08-26T02:48:58.138357 | 2021-06-07T22:55:18 | 2021-06-07T22:55:18 | 242,031,925 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,990 | r | 07-data-vis2.R | ## ---- t-shape
library(tidyverse)
set.seed(220)
n <- 30
tbl_pre <- tibble(
x = runif(n), y = runif(n),
type = sample(c(TRUE, FALSE), replace = TRUE, size = n, prob = c(0.1, 0.9)))
ggplot(tbl_pre, aes(x, y)) +
geom_text(aes(angle = ifelse(type, 0, 90), label = "T"),
size = 3, fontface = "bold") +
labs(x = "", y = "") +
guides(angle = FALSE) +
theme(axis.text = element_blank(), axis.ticks = element_blank())
## ---- shape
ggplot(tbl_pre, aes(x, y)) +
geom_point(aes(shape = type), size = 3) +
labs(x = "", y = "") +
guides(shape = FALSE) +
theme(axis.text = element_blank(), axis.ticks = element_blank())
## ---- colour
ggplot(tbl_pre, aes(x, y)) +
geom_point(aes(color = type), size = 3) +
labs(x = "", y = "") +
guides(color = FALSE) +
scale_color_manual(values = c("black", "#e6550d")) +
theme(axis.text = element_blank(), axis.ticks = element_blank())
## ---- time-use
time_use <- readxl::read_xlsx("data/time-use-oecd.xlsx") %>%
rename_with(janitor::make_clean_names) %>%
filter(country %in% c("Australia", "New Zealand", "USA")) %>%
mutate(category = fct_lump(category, n = 5, w = time_minutes,
other_level = "Misc")) %>%
group_by(country, category) %>%
summarise(time_minutes = sum(time_minutes)) %>%
ungroup() %>%
mutate(category = fct_reorder(category, time_minutes))
## ---- fill-cat
time_use %>%
ggplot(aes(country, time_minutes)) +
geom_col(aes(fill = category), position = "dodge") +
scale_fill_viridis_d() +
labs(y = "") +
theme(legend.position = "bottom")
## ---- fill-country
time_use %>%
ggplot(aes(category, time_minutes)) +
geom_col(aes(fill = country), position = "dodge") +
scale_x_discrete(guide = guide_axis(n.dodge = 2)) +
scale_fill_viridis_d() +
labs(y = "") +
theme(legend.position = "bottom")
## ---- angle
time_use %>%
group_by(category) %>%
mutate(time_minutes = time_minutes / sum(time_minutes)) %>%
ungroup() %>%
ggplot(aes(x = "", time_minutes)) +
geom_col(aes(fill = country)) +
facet_wrap(~ category) +
scale_fill_viridis_d() +
coord_polar("y") +
labs(x = "", y = "") +
theme(
legend.position = "bottom",
axis.text.x = element_blank())
## ---- rel-pos
time_use %>%
ggplot(aes(category, time_minutes)) +
geom_col(aes(fill = country), position = "fill") +
labs(y = "") +
scale_x_discrete(guide = guide_axis(n.dodge = 2)) +
scale_fill_viridis_d() +
theme(legend.position = "bottom")
## ---- time-use-bad
readxl::read_xlsx("data/time-use-oecd.xlsx") %>%
rename_with(janitor::make_clean_names) %>%
filter(country %in% c("Australia", "New Zealand", "USA")) %>%
group_by(country, category) %>%
summarise(time_minutes = sum(time_minutes)) %>%
ungroup() %>%
mutate(category = fct_reorder(category, time_minutes)) %>%
ggplot(aes(country, time_minutes)) +
geom_col(aes(fill = category), position = "dodge") +
guides(fill = guide_legend(nrow = 4)) +
labs(y = "") +
theme(legend.position = "bottom")
## ---- colorspace-q
colorspace::hcl_palettes("Qualitative", plot = TRUE, n = 7)
## ---- colorspace-s
colorspace::hcl_palettes("Sequential", plot = TRUE, n = 7)
## ---- colorspace-d
colorspace::hcl_palettes("Diverging", plot = TRUE, n = 7)
## ---- gg-palette
time_use %>%
ggplot(aes(country, time_minutes)) +
geom_col(
aes(fill = category),
position = "dodge") +
scale_fill_brewer(palette = "Dark2") + #<<
labs(y = "") +
theme(legend.position = "bottom")
## ---- gg-manual
time_use %>%
ggplot(aes(country, time_minutes)) +
geom_col(
aes(fill = category),
position = "dodge") +
scale_fill_manual( #<<
values = c("#EF476F", "#FFD166", #<<
"#06D6A0", "#118AB2", #<<
"#073B4C", "grey")) + #<<
labs(y = "") +
theme(legend.position = "bottom")
## ---- covid
covid19 <- read_csv("data/covid19-daily-cases.csv")
covid19
## ---- covid-scale0
covid19 %>%
ggplot(aes(
x = date,
y = confirmed,
colour = country_region)) +
geom_line() +
guides(colour = FALSE) # rm colour legend #<<
## ---- covid-scale1
covid19 %>%
ggplot(aes(
x = date,
y = log10(confirmed), #<<
colour = country_region)) +
geom_line() +
guides(colour = FALSE)
## ---- covid-scale-log10
covid19 %>%
ggplot(aes(
x = date,
y = confirmed,
colour = country_region)) +
geom_line() +
guides(colour = FALSE) +
scale_y_log10() #<<
## ---- covid-rel
covid19_rel <- covid19 %>%
group_by(country_region) %>%
mutate(days = as.numeric(date - min(date))) %>%
ungroup()
covid19_rel
## ---- covid-rel-p
covid19_rel %>%
ggplot(aes(
x = days,
y = confirmed,
colour = country_region)) +
geom_line() +
scale_y_log10() +
guides(colour = FALSE)
## ---- covid-rel-nz
covid19_nz <- covid19_rel %>%
filter(country_region == "New Zealand")
p_nz <- covid19_rel %>%
ggplot(aes(x = days, y = confirmed,
group = country_region)) + #<<
geom_line(colour = "grey", alpha = 0.5) +
geom_line(colour = "#238b45", size = 1, data = covid19_nz) +
scale_y_log10() +
guides(colour = FALSE)
p_nz
## ---- covid-rel-nz-lab
p_nz <- p_nz +
geom_label(aes(
x = max(days), y = max(confirmed), #<<
label = country_region), data = covid19_nz,
colour = "#238b45", nudge_x = 3, nudge_y = .5)
p_nz
## ---- covid-rel-nz-lim
p_nz <- p_nz +
scale_y_log10(labels = scales::label_comma()) + #<<
xlim(c(0, 100)) #<<
p_nz
## ---- covid-rel-nz-title
p_nz <- p_nz +
labs(
x = "Days since March 1",
y = "Confirmed cases (on log10)",
title = "Worldwide coronavirus confirmed cases",
subtitle = "highlighting New Zealand",
caption = "Data source: John Hopkins University, CSSE"
)
p_nz
## ---- covid-rel-nz-theme
# remotes::install_github("Financial-Times/ftplottools")
p_nz +
ftplottools::ft_theme() +
theme(
plot.title.position = "plot",
plot.background = element_rect(fill = "#FFF1E0"))
## ---- plotly
library(plotly)
ggplotly(p_nz) #<<
|
eedc40e123744fbeeb420e844d7c7fcd1efb4f2c | b43aecbaa84a8219521430e8f2832e4311828901 | /Coursera/R/Loop - tapply.R | ddb00e6a5383beab60a460b4a0686118e9f8cfeb | [] | no_license | cpm205/Machine-Learning | f36f0920847d9789954b793b33848e5f099e48d7 | df2a540005301822a746b78e3a48d0578975890e | refs/heads/master | 2022-12-24T05:37:46.428660 | 2020-01-19T09:53:18 | 2020-01-19T09:53:18 | 100,146,868 | 1 | 0 | null | 2022-12-08T02:48:55 | 2017-08-13T01:35:37 | Jupyter Notebook | UTF-8 | R | false | false | 117 | r | Loop - tapply.R | x <- c(rnorm(10),runif(10),rnorm(10,1))
f <- gl(3,10)
## above operation can be replaced by tapply
tapply(x, f, mean) |
18b3fa292da0438cc5f9ebd4f51b8b16f4c409ba | 5ac16c68d5b9445a5e76c724b63ea8694e36bedb | /R/mpestrf.R | 7c7275691531cae4030536a2910b9c22cb68dbeb | [] | no_license | lmw40/mpMap | 47542430e0cd02783e6d2de80442fa420122ad10 | af113ccc97fa3ebd1d71b4588218e949e20a3fbb | refs/heads/master | 2021-01-15T10:26:14.922590 | 2015-03-26T02:59:16 | 2015-03-26T02:59:16 | 33,126,984 | 1 | 0 | null | 2015-03-30T14:11:33 | 2015-03-30T14:11:32 | R | UTF-8 | R | false | false | 16,921 | r | mpestrf.R | #' Estimate pairwise recombination fractions between markers
#'
#' Estimates pairwise recombination fractions by maximizing the likelihood for a multi-parent cross over a grid of possible values. Theta values and corresponding LOD scores are returned for each pair of markers in the object.
#' @export
#' @useDynLib mpMap
#' @param object Object of class \code{mpcross}
#' @param r Grid of potential recombination values. If missing the function will maximize over (0, .005, .01, .015, ... , .095, .1, .11, .12, ... .49, .5).
#' @param gpu Boolean value, true indicates that a GPU should be used if available
#' @param lineWeights In some cases of segregation distortion it can be useful to weight the contribution of each line to the likelihood
#' @param mpi Flag for whether to parallelize the computation
#' @return Returned object is of the class 'mpcross' with the additional component \code{rf}. If n.mrk is the number of markers genotypes, this is a list with components:
#' \item{rf$theta}{ n.mrk x n.mrk matrix of estimated recombination fractions between each pair of loci}
#' \item{rf$lod}{ n.mrk x n.mrk matrix of LOD scores at the estimated recombination values}
#' \item{rf$lkhd}{ n.mrk x n.mrk matrix of likelihood values at the estimated recombination values}
#' @seealso \code{\link[mpMap]{mpcross}}, \code{\link[mpMap]{plot.mpcross}}
#' @examples
#' map <- sim.map(len=100, n.mar=11, eq.spacing=TRUE, include.x=FALSE)
#' sim.ped <- sim.mpped(4, 1, 500, 6, 1)
#' sim.dat <- sim.mpcross(map=map, pedigree=sim.ped, qtl=matrix(data=c(1, 50, .4, 0, 0, 0), nrow=1, ncol=6, byrow=TRUE), seed=1)
#' dat.rf <- mpestrf(sim.dat)
#' plot(dat.rf)
mpestrf <- function(object, r, gpu, lineWeights, mpi=FALSE, ...)
{
if(mpi)
{
require(Rmpi) || stop("Unable to find Rmpi")
if (mpi.comm.size()>0)
{
if(inherits(object, "mpcross"))
{
tryCatch({
return(mpestrfMpi(list(object), r, gpu, lineWeights, ...))
}, error = function(err) {
stop(paste("mpestrfMpi failed: ", err))
})
}
else
{
return(mpestrfMpi(object, r, gpu, lineWeights, ...))
}
}
else stop("Attempted to use MPI outside mpirun")
}
else
{
if(inherits(object, "mpcross"))
{
if(missing(lineWeights))
{
lineWeights <- rep(1, nrow(object$finals))
}
if(!is.numeric(lineWeights) || length(lineWeights) != nrow(object$finals)) stop("Invalid input for argument lineWeights")
return(mpestrfSubset(objects = list(object), r=r, gpu=gpu, lineWeights = list(lineWeights)))
}
else
{
if(missing(lineWeights))
{
lineWeights <- lapply(object, function(x) rep(1, nrow(x$finals)))
}
if(length(lineWeights) != length(object)) stop("Invalid input for argument lineWeights")
for(i in 1:length(object))
{
if(!is.numeric(lineWeights[[i]]) || length(lineWeights[[i]]) != nrow(object[[i]]$finals)) stop("Invalid input for argument lineWeights")
}
return(mpestrfSubset(objects = object, r=r, gpu=gpu, lineWeights = lineWeights))
}
}
}
stopifany <- function(...) { stopifnot(!any(...)) }
# hack which I'm yet to understand
custom.bcast.Robj2slave <- function(object) {
tryCatch({
customrecv <- function() {
mpi.send.Robj(0,0,1)
customobjects <<- mpi.recv.Robj(mpi.any.source(),mpi.any.tag())
}
mpi.bcast.Robj2slave(customrecv)
mpi.bcast.cmd(customrecv())
closed_slaves <- 0
n_slaves <- mpi.comm.size()-1
while (closed_slaves < n_slaves) {
message <- mpi.recv.Robj(mpi.any.source(),mpi.any.tag())
message_info <- mpi.get.sourcetag()
slave_id <- message_info[1]
tag <- message_info[2]
mpi.send.Robj(object, slave_id, 1);
closed_slaves <- closed_slaves + 1
}
}, error = function(err) {
stop(paste("custom.bcast.Robj2slave failed: ", err))
})
}
masterStoreTile <- function(rect,rf, theta, lod, lkhd)
{
theta[rect$x1:rect$x2,rect$y1:rect$y2] <- rf$theta[1:rect$sizex,1:rect$sizey]
lod[rect$x1:rect$x2,rect$y1:rect$y2] <- rf$lod[1:rect$sizex,1:rect$sizey]
lkhd[rect$x1:rect$x2,rect$y1:rect$y2] <- rf$lkhd[1:rect$sizex,1:rect$sizey]
}
mpi.run.slavempestrf <- function(gridDimX, gridDimY, theta, lod, lkhd)
{
print("In mpi.run.slavempestrft")
tryCatch({
mpi.bcast.cmd(slavempestrf())
print("broadcast done")
closed_tiles <- 0
n_tiles <- gridDimX * gridDimY
while (closed_tiles < n_tiles) {
print("mpi.run.slavempestrft waiting on a nibble..")
tileDim <- mpi.recv.Robj(mpi.any.source(),mpi.any.tag())
message_info <- mpi.get.sourcetag()
slave_id <- message_info[1]
tag <- message_info[2]
print(paste("mpi.run.slavempestrft waiting on a tile from ",slave_id))
res <- mpi.recv.Robj(slave_id, tag)
masterStoreTile(tileDim, res$rf, theta, lod, lkhd)
closed_tiles <- closed_tiles + 1
print(paste(closed_tiles," of ", n_tiles, " complete"))
}
}, error = function(err) {
stop(paste("mpi.run.slavempestrf failed: ", err))
})
return(res)
}
mpestrfMpi <- function(objects, r, gpu, lineWeights,leaveAsFileBacked=FALSE, onlyMasterWrites=TRUE, dir_base, passObjectsAsFile = FALSE)
{
nmrks <- ncol(objects[[1]]$founders)
if(missing(dir_base)) dir_base <- "bigdata/"
# create the empty output matrices
library(bigmemory,quietly=TRUE)
# decide how we want to decompose the data
gridDimX <- 1
gridDimY <- mpi.comm.size() - 1
tryCatch({
dir.create(dir_base, showWarnings=FALSE)
file_base <- Sys.getenv("PBS_JOBID")
if (file_base == "")
{
require(R.utils)
file_base <- paste(System$getHostname(),Sys.getpid(),sep="-")
}
thetabf=paste(file_base,"theta.bin",sep=".")
thetadf=paste(file_base,"theta.desc",sep=".")
lodbf=paste(file_base,"lod.bin",sep=".")
loddf=paste(file_base,"lod.desc",sep=".")
lkhdbf=paste(file_base,"lkhd.bin",sep=".")
lkhddf=paste(file_base,"lkhd.desc",sep=".")
theta <- filebacked.big.matrix(nmrks, nmrks, backingpath=dir_base, backingfile=thetabf, type="double", descriptorfile=thetadf)
lod <- filebacked.big.matrix(nmrks, nmrks, backingpath=dir_base, backingfile=lodbf, type="double", descriptorfile=loddf)
lkhd <- filebacked.big.matrix(nmrks, nmrks, backingpath=dir_base, backingfile=lkhdbf, type="double", descriptorfile=lkhddf)
thetadesc <- describe(theta)
loddesc <- describe(lod)
lkhddesc <- describe(lkhd)
}, error = function(err) {
stop(paste("mpestrfMpi failed to setup big matrices: ", err))
})
# upload required data to the slaves
tryCatch({
if (passObjectsAsFile)
{
filename <- paste(file_base,'mpcrossobjects',sep=".")
save(objects,file=paste(dir_base,filename,sep="/"))
}
else
{
custom.bcast.Robj2slave(objects)
}
if (!missing(r)) mpi.bcast.Robj2slave(r)
mpi.bcast.Robj2slave(gpu)
if (!missing(lineWeights)) mpi.bcast.Robj2slave(lineWeights)
mpi.bcast.Robj2slave(dir_base)
mpi.bcast.Robj2slave(file_base)
mpi.bcast.Robj2slave(thetadesc)
mpi.bcast.Robj2slave(loddesc)
mpi.bcast.Robj2slave(lkhddesc)
mpi.bcast.Robj2slave(slavempestrf)
mpi.bcast.Robj2slave(nmrks)
mpi.bcast.Robj2slave(gridDimX)
mpi.bcast.Robj2slave(gridDimY)
mpi.bcast.Robj2slave(onlyMasterWrites)
mpi.bcast.Robj2slave(passObjectsAsFile)
}, error = function(err) {
stop(paste("mpestrfMPI failed to bcast data to slaves: ", err))
})
# process the data
tryCatch({
if (onlyMasterWrites) {
# use the method where slaves send the tile back to the master to write out
res <- mpi.run.slavempestrf(gridDimX, gridDimY, theta, lod, lkhd)
} else {
# use the method where slaves write their own tiles to disk
reslist <- mpi.remote.exec(slavempestrf())
if (all(unlist(lapply(reslist,is.atomic)))) {
# one or more slaves returned an error message
stop(paste("Slave failed:",reslist))
}
res <- reslist$slave1$result
}
}, error = function(err) {
stop(paste("A slavempestrft call failed: ", err))
})
if (is.list(res) == FALSE) {
stop(paste("slavempestrf failed",reslist))
}
# extract the results
# it seems we need to reattach if the slaves have made changes..
theta <- attach.big.matrix(thetadesc,backingpath=dir_base)
lkhd <- attach.big.matrix(lkhddesc,backingpath=dir_base)
lod <- attach.big.matrix(loddesc,backingpath=dir_base)
if (leaveAsFileBacked) {
res$rf$thetadesc <- thetadesc
res$rf$loddesc <- loddesc
res$rf$lkhddesc <- lkhddesc
res$rf$theta <- theta
res$rf$lkhd <- lkhd
res$rf$lod <- lod
} else {
tryCatch({
# may not fit into memory...
res$rf$theta <- theta[,]
res$rf$lkhd <- lkhd[,]
res$rf$lod <- lod[,]
}, error = function(err) {
stop(paste("mpestrfMPI failed, consider leaveAsFileBacked. Error: ", err))
})
}
stopifany(is.null(res$rf$theta), is.null(res$rf$lkhd), is.null(res$rf$lod))
# regenerate the dimnames
markerNames <- colnames(res$finals)
dimnames(res$rf$theta) <- list(markerNames,markerNames)
dimnames(res$rf$lod) <- list(markerNames,markerNames)
dimnames(res$rf$lkhd) <- list(markerNames,markerNames)
return(res)
}
slavempestrf <- function() {
library(bigmemory,quietly=TRUE)
library(mpMap,quietly=TRUE)
slaves <- mpi.comm.size() - 1
myID <- mpi.comm.rank()
tryCatch(
{
if (passObjectsAsFile)
{
filename <- paste(file_base,'mpcrossobjects',sep=".")
objects <- load(file=paste(dir_base,filename,sep="/"))
}
else
{
objects <- customobjects
}
}, error = function(err) stop(paste("slave failed to get mpcross objects: ", err)))
str <- paste("Slave ",myID," of ",slaves)
str <- paste(str, "class(objects)=", class(objects))
str <- paste(str, " length(objects)=",length(objects))
str <- paste(str, " lapply(objects,class)=",lapply(objects,class))
tryCatch({
markers <- nmrks
# ncol(objects[[1]]$founders)
tileDimX <- ceiling(markers / gridDimX)
tileDimY <- ceiling(markers / gridDimY)
}, error = function(err) {
stop(paste("slave setup failed: ", err))
})
calcTile <- function(x,y) {
stopifnot(x>0, y>0, x<=gridDimX,y<=gridDimY)
rect <- new.env()
rect$x1 <- max(1, (x-1) * tileDimX + 1)
rect$x2 <- min(x * tileDimX, markers)
rect$y1 <- max(1, (y-1) * tileDimY + 1)
rect$y2 <- min(y * tileDimY, markers)
rect$sizex <- rect$x2 - rect$x1 + 1
rect$sizey <- rect$y2 - rect$y1 + 1
rect
}
getLock <- function(desc)
{
dir.create(".locks", showWarnings=FALSE)
lockname <- file.path(".locks",desc@description$filename)
success <- FALSE
attempts <- 0
while(success != TRUE)
{
success <- dir.create(lockname, showWarnings=FALSE)
if (success != TRUE)
{
Sys.sleep(1)
attempts <- attempts + 1
}
}
}
releaseLock <- function(desc)
{
lockname <- file.path(".locks",desc@description$filename)
unlink(lockname, recursive = TRUE)
}
storeTile <- function(rect, rf)
{
getLock(thetadesc)
theta <- attach.big.matrix(thetadesc,backingpath=dir_base)
theta[rect$x1:rect$x2,rect$y1:rect$y2] <- rf$theta[1:rect$sizex,1:rect$sizey]
bigmemory::flush(theta)
releaseLock(thetadesc)
getLock(loddesc)
lod <- attach.big.matrix(loddesc,backingpath=dir_base)
lod[rect$x1:rect$x2,rect$y1:rect$y2] <- rf$lod[1:rect$sizex,1:rect$sizey]
bigmemory::flush(lod)
releaseLock(loddesc)
getLock(lkhddesc)
lkhd <- attach.big.matrix(lkhddesc,backingpath=dir_base)
lkhd[rect$x1:rect$x2,rect$y1:rect$y2] <- rf$lkhd[1:rect$sizex,1:rect$sizey]
bigmemory::flush(lkhd)
releaseLock(lkhddesc)
}
sendResToMaster <- function(rect, res)
{
mpi.send.Robj(rect,0,1)
mpi.send.Robj(res,0,1)
}
inc <- function(x) { eval.parent(substitute(x <- x + 1)) }
tryresult <- tryCatch({
x <- 1
while(x <= gridDimX) {
y <- 1
while(y <= gridDimY) {
if (y %% slaves == myID-1) {
str <- paste(str, "[" , x , "," , y , "] ")
tryCatch({
tileDim <- calcTile(x,y)
}, error = function(err) {
stop(paste("calcTile failed: ", err))
})
if(!exists("lineWeights")) lineWeights <- lapply(objects, function(x) vector(mode="integer", length=0))
if(!exists("r")) r <- c(0:20/200, 11:50/100)
tryCatch({
res <- mpMap:::mpestrfSubset(objects=objects,
gpu=gpu, r=r,
start1=tileDim$x1, finish1=tileDim$x2+1,
start2=tileDim$y1, finish2=tileDim$y2+1)
}, error = function(err) {
stop(paste("mpestrfSubset failed: ", err, class(objects)))
})
if (onlyMasterWrites) {
tryCatch({
sendResToMaster(tileDim,res)
}, error = function(err) {
stop(paste("sendResToMaster failed: ", err))
})
} else {
tryCatch({
storeTile(tileDim,res$rf)
}, error = function(err) {
stop(paste("storeTile failed: ", err))
})
}
}
inc(y)
}
inc(x)
}
}, error = function(err) return(list(mesg=paste(str,err),result=NULL)))
return(list(mesg=paste(str),result=res))
}
mpestrfSubset <-
function(objects, r, gpu, lineWeights, start1, finish1, start2, finish2)
{
if(missing(lineWeights))
{
lineWeights <- lapply(objects, function(x) rep(1, nrow(x$finals)))
}
lineWeights <- lapply(lineWeights, as.numeric)
if (missing(objects)) stop("Missing a required argument for this function")
if (missing(r)) r <- c(0:20/200, 11:50/100)
for(i in 1:length(objects))
{
if (!inherits(objects[[i]], "mpcross")) stop("Object must be of class mpcross")
if(is.null(colnames(objects[[i]]$finals)) && !is.null(colnames(objects[[i]]$founders)))
{
warning("Entry finals did not have column names, replacing with column names from entry founders")
colnames(objects[[i]]$finals) <- colnames(objects[[i]]$founders)
}
if(is.null(colnames(objects[[i]]$founders)) && !is.null(colnames(objects[[i]]$finals)))
{
warning("Entry founders did not have column names, replacing with column names from entry finals")
colnames(objects[[i]]$founders) <- colnames(objects[[i]]$finals)
}
if(is.null(colnames(objects[[i]]$founders)) && is.null(colnames(objects[[i]]$finals)))
{
stop("One of the values founders and finals must have column names")
}
if(ncol(objects[[i]]$founders) != ncol(objects[[i]]$founders))
{
stop("Founders and finals data matrices must have the same number of columns")
}
if(any(colnames(objects[[i]]$founders) != colnames(objects[[i]]$finals)))
{
stop("Columns names for object$founders and object$finals were inconsistent")
}
objects[[i]]$pedigree <- convertped(objects[[i]]$pedigree)
n.founders <- nrow(objects[[i]]$founders)
n.loci <- ncol(objects[[i]]$founders)
n.finals <- nrow(objects[[i]]$finals)
if(missing(gpu)) gpu <- FALSE
if(class(objects[[i]]$founders) != "matrix") objects[[i]]$founders <- as.matrix(objects[[i]]$founders, rownames.force=TRUE)
if(class(objects[[i]]$finals) != "matrix") objects[[i]]$finals <- as.matrix(objects[[i]]$finals, rownames.force=TRUE)
if(mode(objects[[i]]$founders) != "integer") mode(objects[[i]]$founders) <- "integer"
if(mode(objects[[i]]$finals) != "integer") mode(objects[[i]]$finals) <- "integer"
if(class(objects[[i]]$id) != "integer") objects[[i]]$id <- as.integer(objects[[i]]$id)
if(class(objects[[i]]$fid) != "integer") objects[[i]]$fid <- as.integer(objects[[i]]$fid)
if(missing(start1) || missing(finish1))
{
marker1Range <- c(1, n.loci+1)
}
else marker1Range <- c(start1, finish1)
if(missing(start2) || missing(finish2))
{
marker2Range <- c(1, n.loci+1)
}
else marker2Range <- c(start2, finish2)
# if(any(apply(objects[[i]]$founders, 2, function(x) length(unique(x)) == 1))) stop("Non-segregating markers must be removed prior to calculation of recombination fractions")
}
rpairs <- .Call("rfhaps", objects, r, marker1Range, marker2Range, lineWeights, gpu, -2, PACKAGE="mpMap")
output <- objects[[1]]
if(length(objects) > 1)
{
markers <- colnames(objects[[1]]$founders)
n.markers <- length(markers)
output$founders <- matrix(0L, nrow=0, ncol=n.markers)
colnames(output$founders) <- markers
output$finals <- matrix(0L, nrow=0, ncol=n.markers)
colnames(output$finals) <- markers
output$pedigree <- NULL
output$pheno <- NULL
}
output$rf <- rpairs
return(output)
}
|
9d250a7b54d2cdf9558b824e1833be76ada8d438 | d0c72def22b832453eda4ecaf2d3ef350dc5fab0 | /se.R | 15f0ed050e2a6f8ebea68e7c6fbf6d2c8922a636 | [] | no_license | diazrenata/ldats2020 | 9e12ac91ba4e4a172a3515880281a6632e63f7cb | 10dfafb89443a90a9c430328f52ef32058dcf443 | refs/heads/master | 2023-07-06T07:57:13.605804 | 2021-05-20T19:07:22 | 2021-05-20T19:07:22 | 294,782,847 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,144 | r | se.R | library(ggplot2)
library(readr)
library(dplyr)
all_evals_bbs_rtrg_102_18_cv <- read_csv("all_evals_bbs_rtrg_102_18_cv.csv")
#View(all_evals_bbs_rtrg_102_18_cv)
#library(ggplot2)
ae <- all_evals_bbs_rtrg_102_18_cv %>% group_by(k, seed, cpts, nit, nfolds) %>% summarize(mean_ll = mean(sum_loglik), se_ll = sd(sum_loglik) / sqrt(nfolds)) %>% ungroup() %>% distinct() %>%
mutate(seed = as.factor(seed))
best_se <- filter(ae, mean_ll == max(ae$mean_ll))
ae <- ae %>%
group_by_all() %>%
mutate(good_se = mean_ll >= best_se$mean_ll[1] - best_se$se_ll[1])
ggplot(ae, aes(k, mean_ll, color = good_se)) + geom_point() + facet_wrap(vars(cpts)) + theme(legend.position = "none")
ggplot(filter(ae, good_se), aes(k, mean_ll, color = seed)) + geom_point() + facet_wrap(vars(cpts)) + theme(legend.position = "none")
good_se_configs <- filter(ae, good_se) %>%
filter(k == 2,
cpts == 3) %>%
arrange(desc(mean_ll))
library(MATSS)
library(drake)
library(LDATS)
library(cvlt)
source(here::here("analysis", "fxns", "crossval_fxns.R"))
h <- MATSS::get_bbs_route_region_data(route = 102, region = 18)
an_lda <- cvlt::LDA_set_user_seeds(h$abundance, topics = 2, seed = 6)
ts_2 <- TS_on_LDA(an_lda, as.data.frame(h$covariates), formulas = ~1, nchangepoints = 3, timename = "year", control = TS_control(nit = 100))
plot(an_lda)
gamma_plot(ts_2[[1]])
rho_plot(ts_2[[1]])
abund_probs <- get_abund_probabilities(list(full = h), fitted_lda = an_lda[[1]], fitted_ts = ts_2[[1]], max_sims = 100)
one_prob <- abund_probs[[1]] %>%
unique()
library(vegan)
bc <- vegdist(one_prob)
bc
# checking hasty
library(ggplot2)
library(readr)
library(dplyr)
all_evals_bbs_rtrg_102_18_cv_h <- read_csv("all_evals_f_hasty_bbs_rtrg_102_18_cv.csv")
#View(all_evals_bbs_rtrg_102_18_cv)
#library(ggplot2)
ae_h <- all_evals_bbs_rtrg_102_18_cv_h %>% group_by(k, seed, cpts, nit, nfolds) %>% summarize(mean_ll = mean(sum_loglik), se_ll = sd(sum_loglik) / sqrt(nfolds)) %>% ungroup() %>% distinct() %>%
mutate(seed = as.factor(seed)) %>%
rename(mean_ll_h = mean_ll,
se_ll_h = se_ll)
ae <- left_join(ae, select(ae_h, k, seed, cpts, mean_ll_h, se_ll_h))
ggplot(ae, aes(mean_ll, mean_ll_h)) +
geom_point()
ggplot(ae, aes(se_ll, se_ll_h)) +
geom_point()
best_se_h <- filter(ae, mean_ll_h == max(ae$mean_ll_h))
best_se_h
best_se
ae <- ae %>%
group_by_all() %>%
mutate(good_se_h = mean_ll_h >= best_se_h$mean_ll_h[1] - best_se_h$se_ll_h[1]) %>%
mutate(good_se = mean_ll >= best_se$mean_ll[1] - best_se$se_ll[1])
ggplot(ae, aes(good_se, good_se_h)) +
geom_point()
ggplot(ae, aes(k, mean_ll, color = good_se)) + geom_point() + facet_wrap(vars(cpts)) + theme(legend.position = "none")
ggplot(filter(ae, good_se), aes(k, mean_ll, color = seed)) + geom_point() + facet_wrap(vars(cpts)) + theme(legend.position = "none")
good_se_configs <- filter(ae, (good_se + good_se_h) > 0)
ggplot(good_se_configs, aes(k, cpts, color = good_se_h)) + geom_point()
best_se_config <- filter(good_se_configs, cpts == min(good_se_configs$cpts))
best_se_config <- best_se_config %>%
filter(k == min(best_se_config$k))
an_lda <- cvlt::LDA_set_user_seeds(h$abundance, topics = 2, seed = 6)
ts_2_100 <- TS_on_LDA(an_lda, as.data.frame(h$covariates), formulas = ~1, nchangepoints = 3, timename = "year", control = TS_control(nit = 100))
plot(an_lda)
gamma_plot(ts_2_100[[1]])
rho_plot(ts_2_100[[1]])
abund_probs <- get_abund_probabilities(list(full = h), fitted_lda = an_lda[[1]], fitted_ts = ts_2_100[[1]], max_sims = 100)
one_prob <- abund_probs[[1]] %>%
unique()
library(vegan)
bc <- vegdist(one_prob)
bc
ts_2_1000 <- TS_on_LDA(an_lda, as.data.frame(h$covariates), formulas = ~1, nchangepoints = 3, timename = "year", control = TS_control(nit = 1000))
plot(an_lda)
gamma_plot(ts_2_1000[[1]])
gridExtra::grid.arrange(grobs = list(rho_plot(ts_2_1000[[1]]) + ggtitle("1000"),
rho_plot(ts_2_100[[1]]) + ggtitle("100")))
abund_probs <- get_abund_probabilities(list(full = h), fitted_lda = an_lda[[1]], fitted_ts = ts_2_1000[[1]], max_sims = 100)
one_prob <- abund_probs[[1]] %>%
unique()
library(vegan)
bc <- vegdist(one_prob)
bc
|
20ef54f3cb6a0c5c78612ad5a33463e0e47b328a | 9022bd8259d28cb22fc6c99212a240bf21296e04 | /R/methods-inferMagicFounder.R | db84f1286b9d4c0741892038cc819a6893a87eb9 | [] | no_license | tavareshugo/MagicHelpR | e82fe83169bf2c2f657994e46f8d630b10c2b8a0 | 0077f511dab084f2ec0c9a2c7a5fb950549d7106 | refs/heads/master | 2021-01-20T15:41:39.531644 | 2020-05-01T07:19:40 | 2020-05-01T07:19:40 | 63,859,789 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,828 | r | methods-inferMagicFounder.R | #' Infer most likely founder for a given SNP
#'
#' @param x an object of class MagicData.
#' @param marker name of SNP marker to infer founder.
#' @param prob_thr probability threshold for attributing a genotype to a founder.
#'
#' @export
#' @rdname inferMagicFounder
setGeneric("inferMagicFounder",
function(x, marker, prob_thr = 0.5)
standardGeneric("inferMagicFounder"))
#' @return a data.frame with the most likely founder for each MAGIC line.
#'
#' @export
#' @rdname inferMagicFounder
#'
#' @examples
#' # to be added...
setMethod("inferMagicFounder", "MagicGenPhen",
function(x, marker, prob_thr = 0.5){
# Get the genotype probabilities for the specified marker
gen_prob <- getGenotypes(x)[[marker]]
gen_snp <- getGenotypes(x, type = "allele")[[marker]]
# Convert genotype probabilities to binary using user's threshold
gen_prob <- ifelse(gen_prob > prob_thr, 1, 0)
# Report number of times each MAGIC line is represented
n_magic <- rowSums(gen_prob)
message(sum(n_magic == 1), " MAGIC lines were attributed to exactly one founder.")
message(sum(n_magic == 0), " MAGIC lines were not attributed a founder.")
message(sum(n_magic > 1), " MAGIC lines were attributed more than one founder.")
# Presumed founder allele for each magic
magic_founder <- apply(gen_prob, 2, function(x) ifelse(x == 1, names(x), NA)) %>%
as.data.frame(stringsAsFactors = F) %>%
gather(founder, magic, na.rm = T)
# Make sure that founder is a factor with levels for all founders
magic_founder$founder <- factor(magic_founder$founder, levels = colnames(gen_prob))
# Merge with phenotypes
magic_founder <- merge(magic_founder, getPhenotypes(x), by = "magic")
# Add SNP genotypes
magic_founder$allele <- gen_snp[magic_founder$magic]
return(magic_founder)
})
|
0777663c8a6ae486cfd3e956a108ff8b616b616b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/jsonlite/examples/serializeJSON.Rd.R | 9e25c0739a5a1cdeb15a12754403624c750687b6 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 729 | r | serializeJSON.Rd.R | library(jsonlite)
### Name: serializeJSON
### Title: serialize R objects to JSON
### Aliases: serializeJSON unserializeJSON
### ** Examples
jsoncars <- serializeJSON(mtcars)
mtcars2 <- unserializeJSON(jsoncars)
identical(mtcars, mtcars2)
set.seed('123')
myobject <- list(
mynull = NULL,
mycomplex = lapply(eigen(matrix(-rnorm(9),3)), round, 3),
mymatrix = round(matrix(rnorm(9), 3),3),
myint = as.integer(c(1,2,3)),
mydf = cars,
mylist = list(foo='bar', 123, NA, NULL, list('test')),
mylogical = c(TRUE,FALSE,NA),
mychar = c('foo', NA, 'bar'),
somemissings = c(1,2,NA,NaN,5, Inf, 7 -Inf, 9, NA),
myrawvec = charToRaw('This is a test')
);
identical(unserializeJSON(serializeJSON(myobject)), myobject);
|
64c40d881edcf44042177babd38cef259aad9450 | 10475485cd2efa1a57e6a2043ad03f29faf5c963 | /man/create_statement_cran_versions.Rd | 5152986b40e00a83009db9f23f14e226c7b20a6f | [] | no_license | cran/dockr | 6efe668f71d42e51cade47676faa22bf9de6bea1 | d6f13708d3bce79b146ced6feeb2c23f0761ba25 | refs/heads/master | 2020-12-21T22:36:40.435776 | 2019-12-20T09:30:02 | 2019-12-20T09:30:02 | 236,586,401 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 612 | rd | create_statement_cran_versions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_statement_cran_versions.R
\name{create_statement_cran_versions}
\alias{create_statement_cran_versions}
\title{Create Install CRAN Packages Statement for Dockerfile}
\usage{
create_statement_cran_versions(deps, verbose = TRUE)
}
\arguments{
\item{deps}{\code{data.frame} dependency packages with version numbers.}
\item{verbose}{\code{logical} should messages be printed or not?}
}
\value{
\code{character} statement(s) for Dockerfile.
}
\description{
Create Install CRAN Packages Statement for Dockerfile
}
|
0060efeccd595c2e8f79787ec7850b3f343ec7da | fef4e424895c532b27ff52fd4331a6af5145d3c7 | /codes/chapter6.R | 92389bd2a008e791c63426cceb4e12f1338a98ac | [] | no_license | harryyang1982/tmwithR | fe7101a51ceafcd62ae6326a2a9a3ee6051b73a5 | ae8becd4b0a99fb123b63a71f145c7d8c4f7708c | refs/heads/master | 2021-05-13T22:01:11.185303 | 2018-04-09T06:38:38 | 2018-04-09T06:38:38 | 116,477,283 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,789 | r | chapter6.R | library(tidyverse)
# Latent Dirichlet Allocation
library(topicmodels)
data("AssociatedPress")
AssociatedPress
ap_lda <- LDA(AssociatedPress, k = 2, control = list(seed = 1234))
ap_lda
## Word-Topic Probabilities
library(tidytext)
ap_topics <- tidy(ap_lda, matrix = "beta")
ap_topics
ap_top_terms <- ap_topics %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
arrange(topic, -beta)
ap_top_terms %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = F) +
facet_wrap(~topic, scales = "free") +
coord_flip()
beta_spread <- ap_topics %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1 > .001 | topic2 > .001) %>%
mutate(log_ratio = log2(topic2 / topic1))
beta_spread %>%
mutate(topic_index = ifelse(log_ratio >= 0, "topic2", "topic1"),
topic_checker = abs(log_ratio)) %>%
group_by(topic_index) %>%
top_n(10, topic_checker) %>%
ungroup() %>%
mutate(term = reorder(term, log_ratio)) %>%
ggplot(aes(term, log_ratio, fill = log_ratio > 0)) +
geom_col(show.legend = F) +
ylab("Log2 ratio of beta in topic 2 / topic 1") +
coord_flip()
## Document-Topic Probabilities
ap_documents <- tidy(ap_lda, matrix = "gamma")
ap_documents
tidy(AssociatedPress) %>%
filter(document == 6) %>%
arrange(desc(count))
# Example: The Great Library Heist
titles <- c("Twenty Thousand Leagues under the Sea", "The War of the Worlds",
"Pride and Prejudice", "Great Expectations")
library(gutenbergr)
books <- gutenberg_works(title %in% titles) %>%
gutenberg_download(meta_fields = "title")
reg <- regex("^chapter ", ignore_case = T)
by_chapter <- books %>%
group_by(title) %>%
mutate(chapter = cumsum(str_detect(text, reg))) %>%
ungroup() %>%
filter(chapter > 0) %>%
unite(document, title, chapter)
by_chapter
by_chapter_word <- by_chapter %>%
unnest_tokens(word, text)
by_chapter_word
word_counts <- by_chapter_word %>%
anti_join(stop_words) %>%
count(document, word, sort = T) %>%
ungroup()
word_counts
## LDA on Chapters
chapters_dtm <- word_counts %>%
cast_dtm(document, word, n)
chapters_lda <- LDA(chapters_dtm, k = 4, control = list(seed = 1234))
chapters_lda
chapter_topics <- tidy(chapters_lda, matrix = "beta")
chapter_topics
top_terms <- chapter_topics %>%
group_by(topic) %>%
top_n(5, beta) %>%
ungroup() %>%
arrange(topic, -beta)
top_terms
top_terms %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = F) +
facet_wrap(~topic, scales = "free") +
coord_flip()
## Per-Document Classification
chapters_gamma <- tidy(chapters_lda, matrix = "gamma")
chapters_gamma
chapters_gamma <- chapters_gamma %>%
separate(document, c("title", "chapter"), sep = "_", convert = T)
chapters_gamma
chapters_gamma %>%
mutate(title = reorder(title, gamma * topic)) %>%
ggplot(aes(factor(topic), gamma)) +
geom_boxplot() +
facet_wrap(~title)
chapter_classifications <- chapters_gamma %>%
group_by(title, chapter) %>%
top_n(1, gamma) %>%
ungroup()
chapter_classifications
book_topics <- chapter_classifications %>%
count(title, topic) %>%
group_by(title) %>%
top_n(1, n) %>%
ungroup() %>%
transmute(consensus = title, topic)
book_topics
chapter_classifications %>%
inner_join(book_topics, by = "topic") %>%
filter(title != consensus)
## By-Word Assignments: augment
assignments <- augment(chapters_lda, data = chapters_dtm)
assignments
assignments <- assignments %>%
separate(document, c("title", "chapter"), sep = "_", convert = T) %>%
inner_join(book_topics, by = c(".topic" = "topic"))
assignments
assignments %>%
count(title, consensus, wt = count) %>%
group_by(title) %>%
mutate(percent = n / sum(n)) %>%
ggplot(aes(consensus, title, fill = percent)) +
geom_tile() +
scale_fill_gradient2(high = "red", label = scales::percent_format()) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, hjust = 1),
panel.grid = element_blank()) +
labs(x = "Book words were assigned to",
y = "Book words came from",
fill = "% of assignments")
wrong_words <- assignments %>%
filter(title != consensus)
wrong_words
wrong_words %>%
count(title, consensus, term, wt = count) %>%
ungroup() %>%
arrange(desc(n))
word_counts %>%
filter(word == "flopson")
# Alternative LDA Implementations
library(mallet)
collapsed <- by_chapter_word %>%
anti_join(stop_words, by = "word") %>%
mutate(word = str_replace(word, "'", "")) %>%
group_by(document) %>%
summarise(text = paste(word, collapse = " "))
collapsed
file.create(empty_file <- tempfile())
docs <- mallet.import(collapsed$document, collapsed$text, empty_file)
mallet_model <- MalletLDA(num.topics = 4)
mallet_model$loadDocuments(docs)
mallet_model$train(100)
tidy(mallet_model)
tidy(mallet_model, matrix = "gamma")
term_counts <- rename(word_counts, term = word)
augment(mallet_model, term_counts)
assignments2 <- augment(mallet_model, term_counts)
assignments2 <- assignments2 %>%
separate(document, c("title", "chapter"), sep = "_", convert = T) %>%
inner_join(book_topics, by = c(".topic" = "topic"))
assignments2 %>%
count(title, consensus, wt = count) %>%
group_by(title) %>%
mutate(percent = n / sum(n)) %>%
ggplot(aes(consensus, title, fill = percent)) +
geom_tile() +
scale_fill_gradient2(high = "red", label = scales::percent_format()) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, hjust = 1),
panel.grid = element_blank()) +
labs(x = "Book words were assigned to",
y = "Book words came from",
fill = "% of assignments")
|
3785d74105c8db81456e14df85d1e10b089fb72a | 29585dff702209dd446c0ab52ceea046c58e384e | /ProgGUIinR/inst/Examples/ch-gWidgets/ex-gWidgets-basic-menubar.R | d6e5942088de351ae24457cd9019f0c104127bbf | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,510 | r | ex-gWidgets-basic-menubar.R | ###################################################
### code chunk number 133: Controls.Rnw:1424-1441
###################################################
stub <- function(h,...) gmessage("called handler",
parent = window)
action_list = list(
new = gaction(label = "new", icon = "new",
handler = stub, parent = window),
open = gaction(label = "open", icon = "open",
handler = stub, parent = window),
save = gaction(label = "save", icon = "save",
handler = stub, parent = window),
save.as = gaction(label = "save as...", icon = "save as...",
handler = stub, parent = window),
quit = gaction(label = "quit", icon = "quit",
handler = function(...) dispose(window), parent = window),
cut = gaction(label = "cut", icon = "cut",
handler = stub, parent = window)
)
###################################################
### code chunk number 134: Controls.Rnw:1445-1451
###################################################
window <- gwindow("gtoolbar example")
tool_bar_list<- c(action_list[c("new","save")],
sep = gseparator(),
action_list["quit"])
tool_bar <- gtoolbar(tool_bar_list, cont = window)
gtext("Lorem ipsum ...", cont = window)
###################################################
### code chunk number 135: Controls.Rnw:1478-1491
###################################################
menu_bar_list <- list(file = list(
new = action_list$new,
open = action_list$open,
save = action_list$save,
"save as..." = action_list$save.as,
sep = gseparator(),
quit = action_list$quit
),
edit = list(
cut = action_list$cut
)
)
###################################################
### code chunk number 136: Controls.Rnw:1494-1498
###################################################
window <- gwindow("Menu bar example")
menu_bar <- gmenu(menu_bar_list, cont = window)
tool_bar <- gtoolbar(tool_bar_list, cont = window)
txt_widget <- gtext("", cont = window, expand = TRUE)
###################################################
### code chunk number 137: Controls.Rnw:1527-1535
###################################################
no_changes <- c("save","save.as","cut")
keyhandler <- function(...) {
for(i in no_changes)
enabled(action_list[[i]]) <-
(nchar(svalue(txt_widget)) > 0)
}
addHandlerKeystroke(txt_widget, handler = keyhandler)
keyhandler()
|
c717911b3fbcc4f63685abd54719335c9b8e1717 | 9fc5d07956259ecedcd36d1f1107975ffdf300f4 | /src/experiments/covariance_estimation/collect_results.R | 1bebd60d1065c152c11b0665042e3493920f1a20 | [] | no_license | ZhuoqunWang0120/LTN_analysis | 9412487b286a75063c1be7f533616f8caaddc066 | ac3911eadfece22d1f02adf9d7de856e17968404 | refs/heads/main | 2023-06-07T06:59:29.065089 | 2021-06-27T23:29:59 | 2021-06-27T23:29:59 | 381,838,286 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,370 | r | collect_results.R | #!/usr/bin/env Rscript
argv=commandArgs(TRUE)
WORK_DIR=argv[1]
lambda=argv[2]
nsim1=as.numeric(argv[3])
source(paste0(WORK_DIR,"/src/utility/utility.R"))
if (lambda=='0'){lambda='hp'}
risk=matrix(-1,4,4,dimnames = list(c("Frobenius","L1","L_inf","spectral"),c("LN-hub","LN-block","LN-sparse","DTM")))
# DTM
result_dir=paste0(WORK_DIR,"/results/covariance_estimation/dtm/")
clrcov0=readRDS(paste0(WORK_DIR,"/cache/dtm/clrcov_true.RData"))
files=grep(paste0('lambda',lambda,'.RData'),list.files(result_dir,full.names = T),value = T)
loss=do.call(rbind,lapply(files, function(x){matloss(cov2cor(readRDS(x))-cov2cor(clrcov0))}))
risk[,'DTM']=(colSums(loss)/nrow(loss))
# LN
result_dir=paste0(WORK_DIR,"/results/covariance_estimation/ln/")
for (m in c('hub','block','sparse')){
files=grep(paste0(m,'_lambda',lambda,'.RData'),list.files(result_dir,full.names = T),value = T)
loss=do.call(rbind,lapply(1:nsim1, function(x){
clrcov0=readRDS(paste0(WORK_DIR,"/cache/ln/",m,"clrcov_",x,".RData"))
clrcov1=readRDS(grep(paste0('i',x,'_'),files,value=T))
matloss(cov2cor(clrcov1)-cov2cor(clrcov0))}))
risk[,paste0("LN-",m)]=(colSums(loss)/nrow(loss))
}
if (lambda=='hp'){lambda='GammaPrior'}
system(paste0('mkdir -p ',WORK_DIR,"/results/covariance_estimation/risk/"))
saveRDS(risk,paste0(WORK_DIR,"/results/covariance_estimation/risk/lambda_",lambda,".RData"))
|
50d61ed01aee99f4dd261bc8cd96695777f8afd3 | 1bda17508c8734f8aa1132d4f8f80b91ba332ef7 | /R/0.preparation/old/2.clean_names/OLD/10feb_failed/join_shape_population.R | 8f51f78038dd217fd4626b547ca353158841ab3d | [] | no_license | araupontones/ETH_IC | b08b12bee201905859292e53de36ced454bd5dba | 681c6d7ae98926a111ed1b887974f0741519e2d4 | refs/heads/main | 2023-08-03T11:59:55.612161 | 2021-09-27T06:42:58 | 2021-09-27T06:42:58 | 329,865,799 | 0 | 0 | null | 2021-09-27T06:42:59 | 2021-01-15T09:25:24 | R | UTF-8 | R | false | false | 2,700 | r | join_shape_population.R | #joint population census
source("Set_up.R")
clean_zone = import(file.path(dir_data_reference_cleanINT, "population_SNNPR_Oromiya_int.rds"))
centroids_names = import(file.path(dir_data_reference_cleanINT, "centroids_int.rds"))
#Zones-----------------------------------------------------------------------
#check difference between centroinds and populations
zones_diff = check_diffs_(centroids_names,
clean_zone,
Zone,
Zone)
#Fuzzy joint
joint_zones_fuzzy = fuzzy_join_(zones_diff$left,
zones_diff$right,
by = Zone,
match_fun = is_name_distance_jacard_3,
method = "jaccard",
suffix_left = "_shape")
#join with population, and get Z_code from centroids
joint_1_zones_clean = left_join(clean_zone, select(joint_zones_fuzzy,c(Zone, Zone_shape)),
by=c("Zone"))
#WAREDA-----------------------------------------------------------------------
#check difference and create look up tables at the wareda level
wareda_diff = check_diffs_(centroids_names %>% mutate(Zone_Wareda = paste(Region, Zone, Wareda, sep = "-")),
joint_1_zones_clean%>% mutate(Zone_Wareda = paste(Region,Zone_shape, Wareda, sep = "-")),
Zone_Wareda,
c(Zone_Wareda, Region, Zone, Wareda)
)
#Fuzzy joint waredas
joint_waredas_fuzzy = fuzzy_join_(wareda_diff$left,
wareda_diff$right,
by = Zone_Wareda,
#match_fun = is_name_distance_jacard_3,
match_fun = is_name_distance_jw_1.1,
method = "jw",
suffix_left = "_shape") %>%
#keep best match
group_by(Zone_Wareda) %>%
filter(distance == max(distance)) %>%
ungroup()%>%
add_count(Zone_Wareda, name = "n_", wt = 1) %>%
add_count(Zone_Wareda_shape, name = "n_shape", wt = 1) %>%
#arrange(desc(distance)) %>%
relocate(Region, Zone, Wareda) %>%
arrange(Region, Zone, Wareda) %>%
select(-c(starts_with("Zone_Wareda"),distance, starts_with("n"), starts_with("g"), Region_shape))
#join with population
joint_2_waredas_clean = joint_1_zones_clean %>%
left_join(joint_waredas_fuzzy,
by=c("Region", "Zone", "Wareda","Zone_shape")) %>%
mutate(W_merged_= case_when(is.na(Wareda_shape) ~ "Only in census",
T ~ "Merged"))
unique(clean_wereda$Zone)
|
e879319110152b88150ec46b679adf47561a641a | addd191fee0de966c2c5b717dd083e91ea960438 | /R/name-dashboard-file.R | 3fbcf44b5731c08a413c1a21f73386d8babc3b7c | [
"MIT"
] | permissive | GiuseppeTT/RiskParityBrazil | a8efa0b28d213b5f89c1ca8fbdf3306cae8f954f | f2fd3c8db12b1b48585187b2ba267e482b001370 | refs/heads/main | 2023-05-03T05:48:21.700520 | 2021-05-07T14:48:25 | 2021-05-07T14:48:25 | 350,794,700 | 0 | 0 | MIT | 2021-05-07T14:48:26 | 2021-03-23T17:11:23 | R | UTF-8 | R | false | false | 228 | r | name-dashboard-file.R | #' @title
#' Name dashboard file
#'
#' @description
#' TODO
#'
#' @param name
#' TODO
#'
#' @return
#' TODO
#'
#' @export
name_dashboard_file <- function(
name
) {
"{name}.html" %>%
glue() %>%
return()
}
|
27cb7ddab3bdd2b9067fd95c08edcd53286ee2e3 | ea9c56c3fe7139f6eee0a4271ac4947e774da412 | /R/rlcd.R | 2ce9e0151d03fecb7ad0179ef3978fd340783fd7 | [] | no_license | cran/LogConcDEAD | a57e8a9f53610806a8a93cc77a6c6a58e9914218 | 9b0b745605a395a9b85f2a2f32e29ef74b38e573 | refs/heads/master | 2023-04-13T04:26:48.647672 | 2023-04-05T16:53:27 | 2023-04-05T16:53:27 | 17,691,853 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,701 | r | rlcd.R | ## Generate samples from mle log-concave density
'rlcd' <- function (n = 1, lcd, method = c("Independent","MH")) {
method <- match.arg(method)
triang <- lcd$triang
x <- lcd$x
logMLE <- lcd$logMLE
nrows <- nrow(triang)
d <- ncol(x)
prob <- rep(0, nrows)
for (j in 1:nrows) {
prob[j] <- lcd$detA[j] * JAD(lcd$logMLE[triang[j, ]])
}
prob <- prob/sum(prob)
samples <- matrix(0, nrow = n, ncol = d)
## pick a simplex for each sample
simp <- sample(1:nrows, n, prob = prob, replace = TRUE)
if (method == "MH"){
px = 0
qx = 0
while (sum(samples[1, ] == 0)) {
## generate point on unit simplex
w <- rexp(d + 1)
w <- w/sum(w)
y <- logMLE[triang[simp[1], ]]
##evaluate at the corresponding point
fw <- exp(y %*% w)
maxfx <- max(exp(y))
u <- runif(1)
if (u < fw/maxfx) {
samples[1, ] <- w %*% x[triang[simp[1], ], ]
px = fw / prob[simp[1]] * lcd$detA[simp[1]]
}
}
for (i in 2:n) {
w <- rexp(d + 1)
w <- w/sum(w)
y <- logMLE[triang[simp[i], ]]
fw <- exp(y %*% w)
qx = fw / prob[simp[i]] * lcd$detA[simp[i]]
u <- runif(1)
if (u < min(qx/px,1)) {
samples[i, ] <- w %*% x[triang[simp[i], ], ]
px = qx
}
else samples[i, ] = samples[i-1, ]
}
}
else {
for (i in 1:n) {
while (sum(samples[i, ] == 0)) {
w <- rexp(d + 1)
w <- w/sum(w)
y <- logMLE[triang[simp[i], ]]
fw <- exp(y %*% w)
maxfx <- max(exp(y))
u <- runif(1)
if (u < fw/maxfx) samples[i, ] <- w %*% x[triang[simp[i], ], ]
}
}
}
return(samples)
}
|
f121f966842d467a5085ad84f310967dcf95ad68 | 5cfc686149876fd49755d709a82047b68f85e7b5 | /r2.R | 135beb300ffad2ae0bee2d0ed5b694cf90df0f66 | [] | no_license | kira1508/Rprograms | 9ab2754bc312295b80a1fcf9dc7c5e6623ca56d9 | d09ea5cea8c44b695f8fb8356aed12392f703757 | refs/heads/master | 2021-09-01T07:28:36.629746 | 2017-12-25T17:30:50 | 2017-12-25T17:30:50 | 113,028,836 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 110 | r | r2.R | library(datasets)
hist(iris$Sepal.Length[1:5])
plot(density(iris$Sepal.Length[1:5]))
pie(table(iris$Species))
|
5ebd35ed726e1797bc160d63240fedc190f6d0ff | 2195aa79fbd3cf2f048ad5a9ee3a1ef948ff6601 | /docs/ViewFilterCustomAction.rd | 6f75ff34de16fc3515c8788505ef1a935e26fbd3 | [
"MIT"
] | permissive | snakamura/q3 | d3601503df4ebb08f051332a9669cd71dc5256b2 | 6ab405b61deec8bb3fc0f35057dd880efd96b87f | refs/heads/master | 2016-09-02T00:33:43.224628 | 2014-07-22T23:38:22 | 2014-07-22T23:38:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 571 | rd | ViewFilterCustomAction.rd | =begin
=ViewFilterCustomใขใฏใทใงใณ
ใซในใฟใ ใใฃใซใฟใ้ฉ็จใใพใใๅผๆฐใๆๅฎใใใใจๆๅฎใใใใใฏใญใใใฃใซใฟใซ่จญๅฎใใพใใๅผๆฐใๆๅฎใใใชใๅ ดๅใซใฏใ((<[ใซในใฟใ ใใฃใซใฟ]ใใคใขใญใฐ|URL:CustomFilterDialog.html>))ใ้ใใฆใใฃใซใฟใๆๅฎใใพใใใใฃใซใฟใซใคใใฆใฏใ((<ใใฃใซใฟ|URL:Filter.html>))ใๅ็
งใใฆใใ ใใใ
==ๅผๆฐ
:1
ใใฃใซใฟใซ่จญๅฎใใใใฏใญ
==ๆๅนใชใฆใฃใณใใฆใปใใฅใผ
*ใกใคใณใฆใฃใณใใฆ
=end
|
539857df590e644aedd159caa5a7dc7b16ca4af2 | e189d2945876e7b372d3081f4c3b4195cf443982 | /man/parsers_MasksMixin.Rd | a5f17236c0e0505802d59c6ab8e240cc81afb879 | [
"Apache-2.0"
] | permissive | Cdk29/fastai | 1f7a50662ed6204846975395927fce750ff65198 | 974677ad9d63fd4fa642a62583a5ae8b1610947b | refs/heads/master | 2023-04-14T09:00:08.682659 | 2021-04-30T12:18:58 | 2021-04-30T12:18:58 | 324,944,638 | 0 | 1 | Apache-2.0 | 2021-04-21T08:59:47 | 2020-12-28T07:38:23 | null | UTF-8 | R | false | true | 307 | rd | parsers_MasksMixin.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/icevision_parsers.R
\name{parsers_MasksMixin}
\alias{parsers_MasksMixin}
\title{MasksMixin}
\usage{
parsers_MasksMixin(...)
}
\arguments{
\item{...}{arguments to pass}
}
\value{
None
}
\description{
Adds masks method to parser
}
|
4910d8d7adb7a0fe6734409d9c62b291f857f552 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/plotGoogleMaps/examples/stfdfGoogleMaps.Rd.R | 629a0ff2cec69aa9c097ded303d519ff3dfd240a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 777 | r | stfdfGoogleMaps.Rd.R | library(plotGoogleMaps)
### Name: stfdfGoogleMaps
### Title: Create a plot of spacetime (STFDF) data on Google Maps
### Aliases: stfdfGoogleMaps
### ** Examples
## Data preparation
## STFDF data from spacetime vignette spacetime: Spatio-Temporal Data in R
#library("maps")
# states.m = map('state', plot=FALSE, fill=TRUE)
# IDs <- sapply(strsplit(states.m$names, ":"), function(x) x[1])
# library("maptools")
#states = map2SpatialPolygons(states.m, IDs=IDs)
#yrs = 1970:1986
#time = as.POSIXct(paste(yrs, "-01-01", sep=""), tz = "GMT")
# library("plm")
#data("Produc")
#Produc.st = STFDF(states[-8], time, Produc[order(Produc[2], Produc[1]),])
#Produc.st@sp@proj4string=CRS('+proj=longlat +datum=WGS84')
# m <- stfdfGoogleMaps(Produc.st, zcol= 'unemp')
|
808e707650cc94b4b1c4301cc180bf0296e0e6e7 | 26ec468c649c35fb6a4d360d96bfec5c052dde02 | /ui.R | 690f0c97279a668277808ee853ef671d9a0f410a | [] | no_license | sli-wb/WBTV-Budget-App | 4728038cee28db7f5d02e694d2a9f2aeb5951fab | 10f4196eee7a7e6d9581fc4a17ff3eed87d8d801 | refs/heads/main | 2023-02-17T18:58:49.298056 | 2021-01-09T00:44:13 | 2021-01-09T00:44:13 | 328,044,706 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,274 | r | ui.R | dashboardPage(
dashboardHeader(
title = tags$span(tags$img(src='img/Warner_Bros_Television_2019.png', height="90%"), "TV Show Production"),
titleWidth = 300
),
dashboardSidebar(
sidebarMenu(id = "tab",
menuItem("Cost Summary", tabName = "cost-summary"),
menuItem("View Budget", tabName = "view-budget"),
menuItem("Budget Comparison", tabName = "budget-comparison"),
menuItem("Cost Prediction", tabName = "cost-prediction")
),
conditionalPanel(
condition = "input.tab == 'cost-summary'",
#& (input.cost_summary_tabset1 == 'Season Cost' | input.cost_summary_tabset1 == 'Account Cost')",
selectInput("tvshow", "TV show:", choices =c(unique(showproject[order(TITLE)]$TITLE))),
uiOutput("season")
),
conditionalPanel(
condition = "input.tab== 'view-budget'",
selectInput("tvshow_view", "TV show:", choices =c(unique(showproject[order(TITLE)]$TITLE))),
uiOutput("season_view"),
uiOutput("type_view"),
uiOutput("project_view"),
uiOutput("budgetnum_view"),
uiOutput("scenariotitle_view"),
actionButton("view","View Budget")
),
conditionalPanel(
condition = "input.tab == 'cost-summary' & input.cost_summary_tabset1 == 'Season Cost'",
radioButtons("order1", "Order By:",
c("Episode Number","Above The Line",
"Other Costs",
"Other Costs 2",
"Post Production",
"Production")),
uiOutput('episode'),
uiOutput('role'),
uiOutput('group')
),
conditionalPanel(
condition = "input.tab == 'cost-summary' & input.cost_summary_tabset1 == 'Account Cost'",
uiOutput("role3"),
uiOutput("group3"),
uiOutput("item3")
),
conditionalPanel(
condition = "input.tab == 'cost-prediction'",
uiOutput('tv_pred'),
uiOutput('season_pred'),
uiOutput('episode_pred'),
actionButton("forecast", "Forecast")
)#,
# conditionalPanel(
# condition = "input.tab == 'budget-planning'",
# textInput('episode_plan',"Number of Episodes"),
# uiOutput('')
# )
),
dashboardBody(
#use_waiter(),
#waiter_show_on_load(spin_fading_circles()),
#waiter_hide_on_render("budgettotalPlot"),
tabItems(
tabItem(
tabName = "cost-summary",
fluidRow(
tabBox(
id = "cost_summary_tabset1",
width = 12,
tabPanel(
"Season Cost",
withSpinner(plotOutput("budgettotalPlot"),type = 5, color = "#062ecf"),
DT::dataTableOutput("summaryTable")
),
tabPanel(
"Account Cost",
plotOutput("itemcostPlot"),
DT::dataTableOutput("itemcostTable")
)
)
)
),
tabItem(
tabName = "view-budget",
fluidRow(
tabBox(
id = "dataset",
width = 12,
tabPanel("Budget",
h3(textOutput("info_view")),
downloadButton("downloadData", "Download"),
withSpinner(DT::dataTableOutput("budget_view"),type = 5, color = "#0dc5c1")
)
)
)
),
tabItem(
tabName = "budget-comparison",
fluidRow(
tabBox(
id = "dataset",
width = 12,
tabPanel("Two Budgets",
fluidRow(
tabBox(
id = "two_budgets_tabset1",
width = 12,
tabPanel(
"Budgets",
column(4,
selectInput("tvshow1", "TV show 1:", choices =c(unique(showproject[order(TITLE)]$TITLE))),
uiOutput("season1"),
uiOutput("type1"),
uiOutput("project1"),
uiOutput("budgetnum1"),
uiOutput("scenariotitle1")
),
column(4,
selectInput("tvshow2", "TV show 2:", choices =c(unique(showproject[order(TITLE)]$TITLE))),
uiOutput("season2"),
uiOutput("type2"),
uiOutput("project2"),
uiOutput("budgetnum2"),
uiOutput('scenariotitle2'),
actionButton("compare", "Compare")
)
#column(4,
# actionButton('insertBtn', 'Insert'),
# actionButton('removeBtn', 'Remove'),
# tags$div(id = 'placeholder')),
),
tabPanel(
"Summary",
column(4,
#selectInput("rolename2", "Cost Role:", choices = c("ALL", "Above The Line", "Other Costs","Other Costs 2", "Post Prodcution", "Production")),
uiOutput("role2"),
uiOutput('group2')),
column(8,plotOutput("budgetcomparisonPlot")),
DT::dataTableOutput("budgetcomparisonTable")
)
)
)
),
tabPanel("Three Budgets",
fluidRow(
tabBox(
id = "three_budgets_tabset1",
width = 12,
tabPanel(
"Budgets",
column(4,
selectInput("tvshow11", "TV show 1:", choices =c(unique(showproject[order(TITLE)]$TITLE))),
uiOutput("season11"),
uiOutput("type11"),
uiOutput("project11"),
uiOutput("budgetnum11"),
uiOutput("scenariotitle11")
),
column(4,
selectInput("tvshow22", "TV show 2:", choices =c(unique(showproject[order(TITLE)]$TITLE))),
uiOutput("season22"),
uiOutput("type22"),
uiOutput("project22"),
uiOutput("budgetnum22"),
uiOutput('scenariotitle22')
),
column(4,
selectInput("tvshow33", "TV show 3:", choices =c(unique(showproject[order(TITLE)]$TITLE))),
uiOutput("season33"),
uiOutput("type33"),
uiOutput("project33"),
uiOutput("budgetnum33"),
uiOutput('scenariotitle33'),
actionButton("compare_3", "Compare")
)
),
tabPanel(
"Summary",
column(4,
selectInput("rolename33", "Cost Role:", choices = c("ALL", "Above The Line", "Other Costs","Other Costs 2", "Post Prodcution", "Production")),
uiOutput('group33')
),
column(8,
plotOutput("budgetcomparisonPlot_3")),
DT::dataTableOutput("budgetcomparisonTable_3")
)
)
)
)
)
)
),
tabItem(
tabName = "cost-prediction",
fluidRow(
tabBox(
id = "cost_prediction_tabset1",
width = 12,
tabPanel(
"Prediction",
h4(strong("Comp Movies"),align="center"),
DT::dataTableOutput('row_modif'),
fluidRow(
column(2,uiOutput('comp1')),
column(2,uiOutput('comp2')),
column(2,uiOutput('comp3')),
column(2,uiOutput('comp4')),
column(2,uiOutput('comp5'))
),
hr(),
plotOutput("plot_pred"),
plotOutput("plot_pred_2"),
DT::dataTableOutput("table_pred"),
DT::dataTableOutput("test")
)
)
)
)#,
# tabItem(
# tabName = "budget-planning",
# fluidRow(
# tabBox(
# id = "budget-planning-tabset`1",
# width = 12,
# rHandsontableOutput("budget_plan")
# )
# )
# )
)
)
)
|
8a283c07e882083d03e49fc050b873fdf74f7313 | cd702f481643c7bdff1ba8fb200f794e7c271e33 | /man/summarizeFollowUpTime.Rd | 98c942d58d8b3b52675884c608a4e3bad19cca5f | [] | no_license | alondhe/MPHThesisPAD | 1177b4cd74f1721e12d710f92e076a15d25530e7 | 1e18269661af8b3b3251d7a1e58012e72a18ae03 | refs/heads/master | 2020-12-02T11:07:24.326159 | 2017-07-27T19:02:22 | 2017-07-27T19:02:22 | 96,605,165 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 614 | rd | summarizeFollowUpTime.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{summarizeFollowUpTime}
\alias{summarizeFollowUpTime}
\title{Get Median Days of Follow-up time}
\usage{
summarizeFollowUpTime(dcoList, cmOutputPath, reportsOutputPath)
}
\arguments{
\item{dcoList}{The list of drug-comparator-outcomes}
\item{cmOutputPath}{The path pointing to the folder that holds the CohortMethod assets}
\item{reportsOutputPath}{The path pointing to the folder that should hold charts and tables}
}
\details{
Parses StratPop files to calculate the median days of follow-up time
}
\author{
Ajit Londhe
}
|
6549bb5bbf4d80c32cd3be853f7c164e39fcdfe9 | b8056cebf9e05002d66f70c4be4c6599e65a7064 | /Walmart Store Sales Analysis.R | 520d40c64e892c199fe56f3c385b53c687305d55 | [] | no_license | shishir247/ML-Projects-with-R | a6d4cfbe77d14128c6b7d5e7e0bc4e736b815159 | 7ed4bd27cdc1460f7438f58e1a99593c37cafdef | refs/heads/master | 2022-12-15T06:42:39.358041 | 2020-09-13T19:05:29 | 2020-09-13T19:05:29 | 274,356,618 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,461 | r | Walmart Store Sales Analysis.R | # Cleaning global environment
rm(list = ls())
###################################################################################
# Project 1 Retail Analysis with Walmart Data #
###################################################################################
# Reading dataset and assigning it as WSS
WSS <- read.csv("Walmart_Store_sales.csv", header = T)
dim(WSS)
head(WSS)
summary(WSS)
str(WSS)
## changing date formats ##
# install.packages("lubridate", dependencies = T)
library(lubridate)
str(WSS$Date) # structure of date is factor
# Factor w/ 143 levels "01-04-2011","01-06-2012",..: 20 53 86 119 21 54 87 120 6 39 ...
## changing it to Date format
WSS$Date = as.Date(WSS$Date, "%d-%m-%Y")
str(WSS$Date) # structure of date is now changed to Date format
# Date[1:6435], format: "2010-02-05" "2010-02-12" "2010-02-19" "2010-02-26" "2010-03-05" "2010-03-12" "2010-03-19"
# "2010-03-26" "2010-04-02" ...
####################################################################################
# Analysis Tasks #
####################################################################################
## 1. Which store has maximum sales?
####################################################################################
# install.packages("dplyr", dependencies = T)
library(dplyr)
## store that has maximum sales
WSS %>% group_by(Store) %>% # grouping by stores
summarise(Total_Sales = sum(Weekly_Sales)) %>% # aggregating sales wrt individual stores
filter(Total_Sales == max(Total_Sales)) # filtering for maximum sale
# Store Total_Sales
# <int> <dbl>
# 1 20 301397792.
# store 20 has maximum sales of 301397792.
####################################################################################
## 2.1 Which store has maximum standard deviation?
####################################################################################
WSS %>% group_by(Store) %>% # grouping by stores
summarise(standard_deviation = sd(Weekly_Sales)) %>% # calculating standard deviation
filter(standard_deviation == max(standard_deviation)) # filtering for maximum sd
# Store standard_deviation
# <int> <dbl>
# 1 14 317570.
# store 14 has maximum standard deviation of 317570.
####################################################################################
## 2.2 coefficient mean to standard deviation
####################################################################################
WSS %>% group_by(Store) %>% # grouping by stores
summarise(standard_deviation = sd(Weekly_Sales), mean_value = mean(Weekly_Sales)) %>% # calculating standard deviation
mutate(cv = standard_deviation/mean_value*100) %>% # coefficient mean to standard deviation
arrange(desc(cv)) # arranging in decreasing order of cv
# Store standard_deviation mean_value cv
# <int> <dbl> <dbl> <dbl>
# 1 35 211243. 919725. 23.0
# 2 7 112585. 570617. 19.7
# 3 15 120539. 623312. 19.3
# 4 29 99120. 539451. 18.4
# 5 23 249788. 1389864. 18.0
# 6 21 128753. 756069. 17.0
# 7 45 130169. 785981. 16.6
# 8 16 85770. 519248. 16.5
# 9 18 176642. 1084718. 16.3
# 10 36 60725. 373512. 16.3
# # โฆ with 35 more rows
# store 35 has maximum coefficient mean to standard deviation.
####################################################################################
## 3 Which stores has good quarterly growth rate in Q3โ2012?
####################################################################################
# fetching 2012 Q2 data from main data (Walmart_Store_sales)
Weekly_sales_Q2.2012 <- WSS %>% group_by(Store) %>%
filter(Date >= as.Date("2012-04-01") & Date <= as.Date("2012-06-30")) %>% # filterring for Q2 (2012-04-01 to 2012-06-30)
summarise(sum(Weekly_Sales)) # summarising for sum of weekly sales
# fetching 2012 Q3 data from main data (Walmart_Store_sales)
Weekly_sales_Q3.2012 <- WSS %>% group_by(Store) %>%
filter(Date >= as.Date("2012-07-01") & Date <= as.Date("2012-09-30")) %>% # filterring for Q2 (2012-07-01 to 2012-09-30)
summarise(sum(Weekly_Sales)) # summarising for sum of weekly sales
# Growth Rate = (Weekly_Sales.Q3_2012-Weekly_Sales.Q2_2012)/Weekly_Sales.Q2_2012
Growth_Rate_Q3.2012 <-
mutate(Weekly_sales_Q3.2012, Performance = # putting values in formula Growth Q3 = ((Q3 - Q2) / Q2) * 100
((Weekly_sales_Q3.2012$`sum(Weekly_Sales)` - Weekly_sales_Q2.2012$`sum(Weekly_Sales)`) /
Weekly_sales_Q2.2012$`sum(Weekly_Sales)`)*100) %>% arrange(desc(Performance)) # arranging in decreasing order
head(Growth_Rate_Q3.2012)
# Store `sum(Weekly_Sales)` Performance
# <int> <dbl> <dbl>
# 1 7 8262787. 13.3
# 2 16 7121542. 8.49
# 3 35 11322421. 4.47
# 4 26 13675692. 3.96
# 5 39 20715116. 2.48
# 6 41 18093844. 2.46
# store 7 has higher growth rate in Q3.2012 with 13.3 %
# comparison bargraph
H <- Growth_Rate_Q3.2012$Performance
M <- Growth_Rate_Q3.2012$Store
# Ploting the bar chart
barplot(H,names.arg=M,xlab="Store",ylab="Sales_comparison_%",col="blue",
main="Sales_Performance_Q3'2012",border="white")
####################################################################################
##4. Some holidays have a negative impact on sales.
# Find out holidays which have higher sales than the mean sales in non-holiday season
# for all stores together
####################################################################################
# Holidays #
# Super Bowl: 12-Feb-10, 11-Feb-11, 10-Feb-12, 8-Feb-13
# Labour Day: 10-Sep-10, 9-Sep-11, 7-Sep-12, 6-Sep-13
# Thanksgiving: 26-Nov-10, 25-Nov-11, 23-Nov-12, 29-Nov-13
# Christmas: 31-Dec-10, 30-Dec-11, 28-Dec-12, 27-Dec-13
# average of weekly sales for non holiday week
mean_of_non_holiday_Sales <-
WSS %>%
filter(Holiday_Flag == "0") %>% # filtering for non holiday week
summarise(mean(Weekly_Sales)) # summarising mean of weekly sales
# mean(Weekly_Sales)
# # 1 1041256
# mutating comparision column for holiday weekly sales with mean sales of non holiday week in data
Sales_comparison <- WSS %>%
group_by(Date) %>% # grouping by date
filter(Holiday_Flag == "1") %>% # filtering for holiday week
summarise(Total_Sales_holiday_week = sum(Weekly_Sales)) %>% # summarising for sum of weekly sales
mutate(Greater_then_mean_of_non_holiday_week = Total_Sales_holiday_week>mean_of_non_holiday_Sales) # mutating comparison column
Sales_comparison
# Date Total_Sales_holiday_week Greater_then_mean_of_non_holiday_week
# <date> <dbl> <lgl>
# 1 2010-02-12 48336678. TRUE
# 2 2010-09-10 45634398. TRUE
# 3 2010-11-26 65821003. TRUE
# 4 2010-12-31 40432519 TRUE
# 5 2011-02-11 47336193. TRUE
# 6 2011-09-09 46763228. TRUE
# 7 2011-11-25 66593605. TRUE
# 8 2011-12-30 46042461. TRUE
# 9 2012-02-10 50009408. TRUE
# 10 2012-09-07 48330059. TRUE
# Adding holidays column to sales comparision
Sales_comparison$Holiday <- ifelse(month(ymd(Sales_comparison$Date)) == 2,"Super Bowl" ,
ifelse(month(ymd(Sales_comparison$Date)) == 9,"Labour Day" ,
ifelse(month(ymd(Sales_comparison$Date)) == 11,"Thanksgiving" , "Christmas")))
Sales_comparison
# Date Total_Sales_holiday_week Greater_then_mean_of_non_holiday_week Holiday
# <date> <dbl> <lgl> <chr>
# 1 2010-02-12 48336678. TRUE Super Bowl
# 2 2010-09-10 45634398. TRUE Labour Day
# 3 2010-11-26 65821003. TRUE Thanksgiving
# 4 2010-12-31 40432519 TRUE Christmas
# 5 2011-02-11 47336193. TRUE Super Bowl
# 6 2011-09-09 46763228. TRUE Labour Day
# 7 2011-11-25 66593605. TRUE Thanksgiving
# 8 2011-12-30 46042461. TRUE Christmas
# 9 2012-02-10 50009408. TRUE Super Bowl
# 10 2012-09-07 48330059. TRUE Labour Day
# Above holidays have sales greater then mean of non holiday week
####################################################################################
## 5 Provide a monthly and semester view of sales in units and give insights
####################################################################################
### monthly view of sales ###
monthly_view = WSS %>%
mutate(Month = month(Date), Year = year(Date)) %>% # mutating Month and Year column
group_by(Month, Year) %>% # grouping by Month and Year
summarise(Weekly_sales = sum(Weekly_Sales)) %>% # summarising for sum of weekly sales
arrange(desc(Weekly_sales)) # arranging wrt Weekly Sales
head(monthly_view)
# Month Year Weekly_sales
# <dbl> <dbl> <dbl>
# 1 12 2010 288760533.
# 2 12 2011 288078102.
# 3 6 2012 240610329.
# 4 8 2012 236850766.
# 5 7 2010 232580126.
# 6 3 2012 231509650.
### semester view of sales ###
semester_view <- WSS %>%
mutate(sem.year = paste(semester(ymd(Date)),year(ymd(Date)),sep = ".")) %>% # mutating sem.year column
group_by(sem.year) %>% # grouping by semester
summarise(Weekly_sales = sum(Weekly_Sales)) %>% # summarising for sum of weekly sales
arrange(desc(Weekly_sales)) # arranging wrt Weekly Sales
head(semester_view)
# sem.year Weekly_sales
# <chr> <dbl>
# 1 2.2011 1320860210.
# 2 2.2010 1306263860.
# 3 1.2012 1210765416.
# 4 1.2011 1127339797.
# 5 1.2010 982622260.
# 6 2.2012 789367443.
############################################################################################
## Statistical Model ##
############################################################################################
# For Store 1 โ Build prediction models to forecast demand
# Linear Regression โ Utilize variables like date and restructure dates as 1 for 5 Feb 2010
# (starting from the earliest date in order).
# Hypothesize if CPI, unemployment, and fuel price have any impact on sales.
############################################################################################
## Restructuring dates ##
# mutating days column such that first date i.e, 2010-02-05 will become Day 1
data_dates <- WSS %>% mutate(Days = yday(Date), margin = 35) %>% # mutating days column and margin (35 days) column
mutate(Days = (Days - margin), margin = NULL) # substracting Days with margin column so as to make it day 1
data_dates <- data_dates %>% filter(Days>0)
head(data_dates)
# Store Date Weekly_Sales Holiday_Flag Temperature Fuel_Price CPI Unemployment Days
# 1 1 2010-02-05 1643691 0 42.31 2.572 211.0964 8.106 1
# 2 1 2010-02-12 1641957 1 38.51 2.548 211.2422 8.106 8
# 3 1 2010-02-19 1611968 0 39.93 2.514 211.2891 8.106 15
# 4 1 2010-02-26 1409728 0 46.63 2.561 211.3196 8.106 22
# 5 1 2010-03-05 1554807 0 46.50 2.625 211.3501 8.106 29
# 6 1 2010-03-12 1439542 0 57.79 2.667 211.3806 8.106 36
## filtering the data set for store-1
store_1_data <- WSS %>% filter(Store==1)
head(store_1_data)
# Store Date Weekly_Sales Holiday_Flag Temperature Fuel_Price CPI Unemployment
# 1 1 2010-02-05 1643691 0 42.31 2.572 211.0964 8.106
# 2 1 2010-02-12 1641957 1 38.51 2.548 211.2422 8.106
# 3 1 2010-02-19 1611968 0 39.93 2.514 211.2891 8.106
# 4 1 2010-02-26 1409728 0 46.63 2.561 211.3196 8.106
# 5 1 2010-03-05 1554807 0 46.50 2.625 211.3501 8.106
# 6 1 2010-03-12 1439542 0 57.79 2.667 211.3806 8.106
################################################################################################################
## Hypothesis testing if CPI, unemployment, and fuel price have any impact on sales using Muliple Linear Model ##
################################################################################################################
## Multiple linear model for impact of CPI, Uemployment and fuel price on sales
model <- lm(formula = Weekly_Sales ~ CPI + Unemployment + Fuel_Price, data = store_1_data) # lm(dependent ~ independent, data)
### Model Evaluation Technique - R-Squared Value ###
Rsqd <- summary(model)$r.squared
Rsqd
# 0.08 = 8 %
### Model Evaluation Technique - RMSE ###
predicted_sales <- predict(model, store_1_data)
RMSE = sqrt(mean((store_1_data$Weekly_Sales - predicted_sales)^2)) # root mean squared error
RMSE
# 148683
summary(model)
# lm(formula = Weekly_Sales ~ CPI + Unemployment + Fuel_Price,
# data = store_1_data)
#
# Residuals:
# Min 1Q Median 3Q Max
# -287777 -86699 -23987 61849 882877
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -3887096 1740276 -2.234 0.02711 *
# CPI 21792 6785 3.212 0.00164 **
# Unemployment 124064 58779 2.111 0.03659 *
# Fuel_Price -64838 46842 -1.384 0.16851
# ---
# Signif. codes: 0 โ***โ 0.001 โ**โ 0.01 โ*โ 0.05 โ.โ 0.1 โ โ 1
#
# Residual standard error: 150800 on 139 degrees of freedom
# Multiple R-squared: 0.08499, Adjusted R-squared: 0.06524
# F-statistic: 4.303 on 3 and 139 DF, p-value: 0.006162
###########################################################################################
## summary model ##
# Ho: CPI has no effect on Sales
# Ha: CPI has effect on Sales
# comparing pvalues
pvalue <- 0.00164
alpha <- 0.05
pvalue < alpha
# TRUE
# Rej Ho, CPI has impact on sales
###########################################################################################
# Ho: Unemployment has no effect on Sales
# Ha: Unemployment has effect on Sales
# comparing pvalues
pvalue <- 0.03659
alpha <- 0.05
pvalue < alpha
# TRUE
# Rej Ho, Unemployment has impact on sales
###########################################################################################
# Ho: Fuel Price has no effect on Sales
# Ha: Fuel Price has effect on Sales
# comparing pvalues
pvalue <- 0.16851
alpha <- 0.05
pvalue < alpha
# FALSE
# Do not rej Ho, Fuel cost has no impact on sales
#==========================================================================================#
## conclusion ##
#==========================================================================================#
# CPI and Unemployment has significant impact on sales.
# Fuel cost has no impact on sales.
# As explained variation (R -Sqaured) is too small in each model,
# the current models does not have much explanatory power, and we have to look for other model.
############################################################################################
## Demand forcast using Multiple Linear Model ##
############################################################################################
# Model-1 #
############################################################################################
# Building model = mod1 for dependent variable ~ independent variables
mod1 <- lm(formula = Weekly_Sales ~ Holiday_Flag + Temperature +Fuel_Price + CPI + Unemployment , data = store_1_data)
### Model Evaluation Technique - R-Squared Value ###
Rsqd <- summary(mod1)$r.squared
Rsqd
# 0.1494714 = 14 %
### Model Evaluation Technique - RMSE ###
predicted_sales <- predict(mod1, store_1_data)
RMSE = sqrt(mean((store_1_data$Weekly_Sales - predicted_sales)^2)) # root mean squared error
RMSE
# 143348
summary(mod1)
# lm(formula = Weekly_Sales ~ Holiday_Flag + Temperature + Fuel_Price +
# CPI + Unemployment, data = store_1_data)
#
# Residuals:
# Min 1Q Median 3Q Max
# -305166 -78247 -18260 53643 854412
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -2427856 1752958 -1.385 0.1683
# Holiday_Flag 89376 49338 1.811 0.0723 .
# Temperature -2160 922 -2.343 0.0206 *
# Fuel_Price -24337 47335 -0.514 0.6080
# CPI 16632 6786 2.451 0.0155 *
# Unemployment 80209 58726 1.366 0.1742
# ---
# Signif. codes: 0 โ***โ 0.001 โ**โ 0.01 โ*โ 0.05 โ.โ 0.1 โ โ 1
#
# Residual standard error: 146500 on 137 degrees of freedom
# Multiple R-squared: 0.1495, Adjusted R-squared: 0.1184
# F-statistic: 4.815 on 5 and 137 DF, p-value: 0.0004359
############################################################################################
## Demand forcast using Multiple Linear Model ##
############################################################################################
# Model-2 #
############################################################################################
# Eliminating insignificant variables Holiday Flag and Fuel Price due to high p value
# compared to significance level (0.05) and Building model = mod2 for dependent variable ~ remaining independent variables
mod2 <- lm(formula = Weekly_Sales ~ Temperature + CPI + Unemployment , data = store_1_data)
### Model Evaluation Technique - R-Squared Value ###
Rsqd <- summary(mod2)$r.squared
Rsqd
# 0.1263056 = 12 %
### Model Evaluation Technique - RMSE ###
predicted_sales <- predict(mod2, store_1_data)
RMSE = sqrt(mean((store_1_data$Weekly_Sales - predicted_sales)^2)) # root mean squared error
RMSE
# 145287
summary(mod2)
# lm(formula = Weekly_Sales ~ Temperature + CPI + Unemployment,
# data = store_1_data)
#
# Residuals:
# Min 1Q Median 3Q Max
# -311770 -85435 -10998 55936 841075
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -2048494.5 1430740.6 -1.432 0.15445
# Temperature -2587.7 883.4 -2.929 0.00397 **
# CPI 14730.4 4893.0 3.011 0.00310 **
# Unemployment 78679.8 56007.2 1.405 0.16231
# ---
# Signif. codes: 0 โ***โ 0.001 โ**โ 0.01 โ*โ 0.05 โ.โ 0.1 โ โ 1
#
# Residual standard error: 147400 on 139 degrees of freedom
# Multiple R-squared: 0.1263, Adjusted R-squared: 0.1074
# F-statistic: 6.698 on 3 and 139 DF, p-value: 0.0002957
############################################################################################
## Demand forcast using Multiple Linear Model ##
############################################################################################
# Model-3 #
############################################################################################
# Eliminating insignificant variable Unemployment due to high p value compared to significance level (0.05) and
# Building model = mod3 for dependent variable ~ remaining independent variables
mod3 <- lm(formula = Weekly_Sales ~ Temperature + CPI , data = store_1_data)
### Model Evaluation Technique - R-Squared Value ###
Rsqd <- summary(mod3)$r.squared
Rsqd
# 0.113901 = 11 %
### Model Evaluation Technique - RMSE ###
predicted_sales <- predict(mod3, store_1_data)
RMSE = sqrt(mean((store_1_data$Weekly_Sales - predicted_sales)^2)) # root mean squared error
RMSE
# 146314
summary(mod3)
# lm(formula = Weekly_Sales ~ Temperature + CPI, data = store_1_data)
#
# Residuals:
# Min 1Q Median 3Q Max
# -312205 -85704 -9198 57222 830489
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -233190 616327 -0.378 0.70574
# Temperature -2769 877 -3.157 0.00195 **
# CPI 9156 2872 3.187 0.00177 **
# ---
# Signif. codes: 0 โ***โ 0.001 โ**โ 0.01 โ*โ 0.05 โ.โ 0.1 โ โ 1
#
# Residual standard error: 147900 on 140 degrees of freedom
# Multiple R-squared: 0.1139, Adjusted R-squared: 0.1012
# F-statistic: 8.998 on 2 and 140 DF, p-value: 0.0002107
############################################################################################
# For both the independent variables in model-3 we have p value less then significance level (0.05)
# so summarising model-3
############################################################################################
## summary of model-3 ##
# Ho: Temperature has no effect on Sales
# Ha: Temperature has effect on Sales
# comparing pvalues
pvalue <- 0.00195
alpha <- 0.05
pvalue < alpha
# TRUE
# Rej Ho, Temperature has impact on sales
###########################################################################################
# Ho: CPI has no effect on Sales
# Ha: CPI has effect on Sales
# comparing pvalues
pvalue <- 0.00177
alpha <- 0.05
pvalue < alpha
# TRUE
# Rej Ho, CPI has impact on sales
###########################################################################################
## Multiple linear regression equation ##
# y = b0 + b1x1 + b2x2 + b3x3 + b4x4
# Weekly_Sales = 89376 * Holiday_Flag + (-2160 ) * Temperature + (-24337) * Fuel_Price + 16632 * CPI + 80209 * Unemployment
# CPI coefficient
Weekly_Sales_1 = 89376 * 1 + (-2160 ) * 1 + (-24337) * 1 + 16632 * (1) + 80209 * 7
Weekly_Sales_1
Weekly_Sales_2 = 89376 * 1 + (-2160 ) * 1 + (-24337) * 1 + 16632 * (1) + 80209 * 6
Weekly_Sales_2
Weekly_Sales_2 - Weekly_Sales_1
# 640974 - 560765 = -80209
# With 1 unit change (decrease) in CPI, sales is affected by -80209
############################################################################################
# Temperature coefficient
Weekly_Sales_1 = 89376 * 1 + (-2160 ) * 69.2 + (-24337) * 1 + 16632 * (1) + 80209 * 1
Weekly_Sales_1
Weekly_Sales_2 = 89376 * 1 + (-2160 ) * 35.40 + (-24337) * 1 + 16632 * (1) + 80209 * 1
Weekly_Sales_2
Weekly_Sales_2 - Weekly_Sales_1
# 12408 - 85416 = 73008
# With decrease in 1 degree Celsius (33.8 Fahrenheit), sales is affected by 73008 sales unit.
summary(store_1_data)
# Store Date Weekly_Sales Holiday_Flag Temperature Fuel_Price CPI Unemployment
# Min. :1 Min. :2010-02-05 Min. :1316899 Min. :0.00000 Min. :35.40 Min. :2.514 Min. :210.3 Min. :6.573
# 1st Qu.:1 1st Qu.:2010-10-11 1st Qu.:1458105 1st Qu.:0.00000 1st Qu.:58.27 1st Qu.:2.764 1st Qu.:211.5 1st Qu.:7.348
# Median :1 Median :2011-06-17 Median :1534850 Median :0.00000 Median :69.64 Median :3.290 Median :215.5 Median :7.787
# Mean :1 Mean :2011-06-17 Mean :1555264 Mean :0.06993 Mean :68.31 Mean :3.220 Mean :216.0 Mean :7.610
# 3rd Qu.:1 3rd Qu.:2012-02-20 3rd Qu.:1614892 3rd Qu.:0.00000 3rd Qu.:80.48 3rd Qu.:3.594 3rd Qu.:220.5 3rd Qu.:7.838
# Max. :1 Max. :2012-10-26 Max. :2387950 Max. :1.00000 Max. :91.65 Max. :3.907 Max. :223.4 Max. :8.106
############################################################################################
#==========================================================================================#
## conclusion ##
#==========================================================================================#
############################################################################################
# As explained variation (R -Sqaured) is too small in each model,
# the current models does not have much explanatory power, and we have to look for other model.
# However, below points can be noted:
# 1. Fuel cost and Unemployment has no impact on weekly sales for store-1.
# 2. Whereas, Temperature and CPI has significant impact on sales for store-1.
# 3. As temperature has Max. value 91.65 Fahrenheit in summer and Min. 35.40 Fahrenheit in winter,
# With decrease in 1 degree Celsius (33.8 Fahrenheit), sales is affected and increase by 73008 sales unit.
# 4. With the consistent decrease in CPI ( from 8.106 on 2010-02-05 to 6.573 on 2012-10-26 ),
# sales are getting affected, with 1 unit change (decrease) in CPI,
# sales is affected and decreased by -80209 sales unit.
|
4ee5b152a1db6e98055f86775ae54e7ed4dfa8de | 28167d5d08a332b193a5e87c66f4687319dbbb9e | /man/pgas_the_package.Rd | 7911215a9f9dded496e1bc94be4245402b24b491 | [] | no_license | niharikag/PGAS | a8a26b06d6c0c3cec13e7e4357d1b49f28c71d6e | 45615ee2ea6ecdfa3cf6b07c0506a80dff79cf13 | refs/heads/master | 2020-03-23T12:55:35.087211 | 2019-03-17T06:22:03 | 2019-03-17T06:22:03 | 141,590,688 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,002 | rd | pgas_the_package.Rd | \docType{package}
\name{ PGAS-package}
\alias{PGAS-package}
\title{Implementation of Particle Gibbs Particle Gibbs Method and its Extensions and Variants}
\description{
Implementation of Particle Gibbs Particle Gibbs Method and its Extensions and Variants
}
\details{
Implementation of Particle Gibbs Particle Gibbs Method and its Extensions and Variants
}
\references{
[1] Lindsten, Fredrik, Michael I. Jordan, and Thomas B. Schรถn. "Particle Gibbs with ancestor sampling." The Journal of Machine Learning Research 15.1 (2014): 2145-2184.
[2] C. Andrieu, A. Doucet and R. Holenstein, "Particle Markov chain Monte Carlo methods" Journal of the Royal Statistical Society: Series B, 2010, 72, 269-342.
[3] Rainforth, Tom, et al. "Interacting particle Markov chain Monte Carlo."
International Conference on Machine Learning. 2016.
[4] Svensson, Andreas, et al. "Computationally efficient Bayesian learning of
Gaussian process state space models." Artificial Intelligence and Statistics. 2016.
}
|
77c4960d55a702cd95517e794419a23e7702861d | 0c2019feffbcb12510df6636264361538ee4e58f | /man/skktest.Rd | fdeb064f58966a013fc57d9c901e8e764e780a9e | [] | no_license | cran/MethylCapSig | 997472725776faefb4acd1ec380eb2bdbf2ca4af | 9b84e9651ea2aeca8f502970fbc00fab6b3496aa | refs/heads/master | 2020-06-08T07:48:49.908412 | 2015-08-12T00:00:00 | 2015-08-12T00:00:00 | 40,618,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,881 | rd | skktest.Rd | \name{skktest}
\alias{skktest}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Srivastava-Katayama-Kano test statistic
}
\description{
Calculates the two sample test statistic and p-value for the Srivastava-Katayama-Kano test.
}
\usage{
skktest(X, Y)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
A matrix of dimension \eqn{n \times k} whose rows represent the samples collected from \eqn{n} individuals from the first group on \eqn{k} variates.
}
\item{Y}{
A matrix of dimension \eqn{m \times k} whose rows correspond to samples collected from \eqn{m} individuals from the second group on \eqn{k} variates.
Default value is null. If not specified, the function performs a one-sample test using X.
}
}
\value{
A \eqn{2 \times 1} vector consisting of the test statistic and the p-value.
}
\details{
The Srivastava-Katayama-Kano test statistic is used to test equality of mean vectors for two groups of multivariate observations, where the dimension is greater than the sample size. \code{skktest} takes matrices \code{X} and \code{Y} as arguments whose rows represent samples from two groups respectively. Depending on the values in \code{X} and \code{Y}, the function initially determines whether to perform a one sample test (\eqn{\sum_{i,j} X_{i,j}^2 = 0} or \eqn{\sum_{i,j} Y_{i,j}^2 = 0}) or a two-sample test. The appropriate test statistic is then calculated and is returned along with the p-value which is calculated using right-tailed normal distribution.
}
\references{
Srivastava, M. S., Katayama, S. and Kano, Y. (2013) A two sample test in high dimensional data, Journal of Multivariate Analysis, 114, 349 -- 358.
}
\author{
Deepak N. Ayyala
}
\examples{
data(diffMethylData)
skktest(diffMethylData$region1.x, diffMethylData$region1.y)
# skktest(diffMethylData$region2.x, diffMethylData$region2.y)
}
|
d9b1d8a1d65d2737da018145c0ec701a8aa16c53 | f009dcaeb338350e148c02de800db563a04318eb | /FOSSGIS-workshop/sf_meets_airports.R | 214b98eec9afe32648fff8742e3ece966dc55ad4 | [] | no_license | crazycapivara/rosgeo-live | 1311d028d7a04d21a39e71dcde7c66c6b35e5567 | b566663069867c90d59ca626e15703bd27f365f1 | refs/heads/master | 2020-05-26T09:21:18.680262 | 2017-03-25T14:04:44 | 2017-03-25T14:04:44 | 82,475,499 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 663 | r | sf_meets_airports.R | library(sf)
library(magrittr)
countries <- readRDS("FOSSGIS-workshop/data/ne_10m_admin_0_countries.rds") %>%
st_as_sf()
names(countries)
germany <- subset(countries, SUBUNIT == "Germany")
plot(germany[c("LABELRANK")])
fn <- "FOSSGIS-workshop/data/ne_10m_airports/ne_10m_airports.shp"
st_layers(fn)
airports <- st_read(fn)
ap_ger <- st_contains(germany, airports)[[1]] %>% airports[., ]
st_intersects(germany, airports)
plot(ap_ger$geometry, add = TRUE, col = "black")
pts <- airports$geometry[[1]]
class(pts)
pts[1:2]
library(leaflet)
leaflet() %>% addTiles() %>%
addPolygons(data = germany, weight = 3) %>%
addMarkers(data = ap_ger, popup = ~ name)
|
a310d54a5f5a2b78ace3df342cd2c033e59d1aff | c4c0f4b3595ac4458261b3f45fdfba8366ee8cc0 | /Cap12/05-Frequencia.R | f2f5f7eb912bc489673c69af9c5e69a310b1aa9a | [] | no_license | laranelson/PowerBI | e9b467068fc43a7bb686275081862fc79d67cafd | 9cb52e7d1121aad7bcc5f6fe47f250e41e8dd7f5 | refs/heads/main | 2023-09-03T01:56:23.514053 | 2021-11-19T00:00:19 | 2021-11-19T00:00:19 | 420,478,679 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,339 | r | 05-Frequencia.R | # Estatรญstica Bรกsica
# Parte 4 - Tabela de Frequรชncia
# Definindo a pasta de trabalho
# Substitua o caminho abaixo pela pasta no seu computador
setwd("D:/DSA/PowerBI/Cap12")
getwd()
# Carregando o dataset
dados <- read.csv("usuarios.csv",
dec = ".",
sep = ",",
h = T,
fileEncoding = "windows-1252")
# Visualizando e Sumarizando os Dados
View(dados)
names(dados)
str(dados)
summary(dados$salario)
summary(dados$grau_instrucao)
mean(dados$salario)
mean(dados$grau_instrucao)
# Tabela de Frequรชncia Absoluta
freq <- table(dados$grau_instrucao)
View(freq)
# Tabela de frequรชncia Relativas
freq_rel <- prop.table(freq)
View(freq_rel)
# Porcentagem (100 * freq_rel_tabl)
p_freq_rel <- 100 * prop.table(freq_rel)
View(p_freq_rel)
# Adiciona linhas de total
View(freq)
freq <- c(freq, sum(freq))
View(freq)
names(freq)[4] <- "Total"
View(freq)
# Tabela final com todos os valores
# Calculamos frequncia relativa e frequรชncia proporcional
freq_rel <- c(freq_rel, sum(freq_rel))
p_freq_rel <- c(p_freq_rel, sum(p_freq_rel))
# Tabela final com todos os vetores
tabela_final <- cbind(freq,
freq_rel = round(freq_rel, digits = 2),
p_freq_rel = round(p_freq_rel, digits = 2))
View(tabela_final)
|
2471a25b1769c79e9e6bd7be9e04f29423011990 | fc41c0a908e332b2dc021f5aca4a17ca7d58803e | /R/round2.R | 16d91f2b64f672c2ad4ae18b0e9bdbed7d37a76c | [] | no_license | MHS-R/mhs | 92ba9bb646a1f55fcf06638773cad714547de108 | b7593333dc638634eb9adcc3f9fec8f4644d9590 | refs/heads/master | 2020-01-27T10:00:19.252327 | 2019-03-11T14:23:00 | 2019-03-11T14:23:00 | 66,571,885 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 398 | r | round2.R | #' Round half up
#'
#' R's default round() function does banker's rounding. This round2 function rounds a number half up.
#'
#' @param x number
#' @param digits numeric; a number specifying the number of digits to round to.
#' @export round2
round2 = function(x, digits = 0) {
posneg = sign(x)
z = abs(x) * 10^digits
z = z + 0.5
z = trunc(z)
z = z/10^digits
z * posneg
}
|
34a5511f1fb41c68aae0562873be4649e1cc7486 | ecb6ffc6b42a1f7d060e07a32241f6975945a83b | /JAGS/examples/classic-bugs/vol1/leuk/test1.R | 7a0930be4bb347f785465acf0c1cda542efbacf3 | [] | no_license | amandawarlick/IPMWorkshop | a260b987b8d7c44ec4e4bb6a0634ecc778e8b160 | 0e227039b2aa27ee862d92fc07e7be9ff4f43418 | refs/heads/master | 2020-03-19T06:58:51.286981 | 2018-07-14T02:28:46 | 2018-07-14T02:28:46 | 136,073,048 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 328 | r | test1.R | source("../../R/Rcheck.R")
d <- read.jagsdata("leuk-data.R")
inits <- read.jagsdata("leuk-init.R")
inits$tau <- NULL #not used in this model
m <- jags.model("leuk.bug", d, inits, n.chains=2)
check.data(m, d, skip=c("fail","Y","dN"))
update(m, 1000)
x <- coda.samples(m, "beta", n.iter=2000)
source("bench-test1.R")
check.fun()
|
eb4fa3dd6184d92d2adab1cb8ff00ff2458c99a1 | 442b144ad8aadefe504a14894abff925d70be070 | /all_commands_99_analysis_07.R | 7efeb0470ccfb8a60855ebc39022b6af1fcd9d7a | [] | no_license | RishiDeKayne/Alpine_whitefish_WGS | 20b4eba5590d1d1871f89a23a86dc17b49ddf24b | 6f9e458d26f6f7fab3e9e1c8ca57f4f65eee2fe7 | refs/heads/main | 2023-04-17T15:07:50.163799 | 2022-10-09T11:14:38 | 2022-10-09T11:14:38 | 389,911,930 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,860 | r | all_commands_99_analysis_07.R | ###This script contains commands run for the whole-genome analysis paper for whitefish De-Kayne et al.
#for all bash analyses please look at all_commands_99_analysis.txt
#then the following analyses
# 7. f4 statistics
#Pop1 โ brienz
#Pop2 โ neuchatel
#Pop3 โ Luzern
#Pop4 โ walen
#set directory and load background file
setwd("/Users/rishidek/Dropbox/RishiMAC/99_reanalysis/")
background <- read.csv("background_2021_99.csv", header = T)
#read in data
data <- read.csv("f4/f4_output.txt", sep = ";", header = TRUE)
data$number <- 12:1
#arrange in plot so topologies are split up
ylabel_vector_norm <- as.character(data$topology)
ylabel_vector <- rev(ylabel_vector_norm)
par(mar=c(5,12,4,1)+.1)
plot(data$f4, data$number, pch = 16, ylab = '', yaxt='n', xlab = "f4")
axis(2, at = c(1:12), labels=ylabel_vector, las=1, cex = 1)
abline(h=6.5, col = "red", lty = 3)
#now want to illustrate comparison of tree topologies so want same 4 taxon tree rearranged for each combo
lakepattern <- data[1:6,]
lakepattern$newplotnumber <- c(6:1)
lakepattern$colour <- "darkslategray4"
ecotypepattern <- data[7:12,]
ecotypepattern$newplotnumber <- c(6:1)
ecotypepattern$colour <- "goldenrod3"
laketop <- as.character(lakepattern$topology)
lakef4 <- as.character(lakepattern$f4)
ecotop <- as.character(ecotypepattern$topology)
ecof4 <- as.character(ecotypepattern$f4)
sidebyside <- as.data.frame(cbind(laketop, lakef4, ecotop, ecof4))
str(sidebyside)
sidebyside$lakef4 <- as.numeric(as.character(sidebyside$lakef4))
sidebyside$ecof4 <- as.numeric(as.character(sidebyside$ecof4))
#calculate the difference between topologies
sidebyside$difference_in_value <- ((as.numeric(sidebyside$ecof4))-(as.numeric(sidebyside$lakef4)))
#now actually make plot
labels_norm <- labels_norm <- c("Brienz-Neuchรขtel", "Brienz-Lucerne", "Brienz-Walen", "Neuchรขtel-Lucerne", "Neuchรขtel-Walen", "Lucerne-Walen")
labels_rev <- rev(labels_norm)
newplotdf <- rbind(lakepattern, ecotypepattern)
par(mar=c(5,8,4,1)+.1)
plot(newplotdf$f4, newplotdf$newplotnumber, pch = 16, ylab = '', yaxt='n', xlab = "f4", col = newplotdf$colour, ylim = c(0.5, 6.5))
axis(2, at = c(1:6), labels=labels_rev, las=1, cex = 1)
abline(h=0.5, col = "black", lty = 3)
abline(h=1.5, col = "black", lty = 3)
abline(h=2.5, col = "black", lty = 3)
abline(h=3.5, col = "black", lty = 3)
abline(h=4.5, col = "black", lty = 3)
abline(h=5.5, col = "black", lty = 3)
abline(h=6.5, col = "black", lty = 3)
mtext("(Big,Small);(Big,Small)", line = 1, side = 3, adj = 0.15)
mtext("(Big,Big);(Small,Small)", line = 1, side = 3, adj = 0.85)
newplotdf$newcol <- c()
for (i in 1:nrow(newplotdf)){
if(newplotdf$colour[i] == "darkslategray4"){
newplotdf$newcol[i] <- "grey"
}
else{
newplotdf$newcol[i] <- "black"
}
}
plot(newplotdf$f4, newplotdf$newplotnumber, pch = 16, ylab = '', yaxt='n', xlab = "f4", col = newplotdf$newcol, ylim = c(0.5, 6.5))
axis(2, at = c(1:6), labels=labels_rev, las=1, cex = 1)
abline(h=0.5, col = "black", lty = 3)
abline(h=1.5, col = "black", lty = 3)
abline(h=2.5, col = "black", lty = 3)
abline(h=3.5, col = "black", lty = 3)
abline(h=4.5, col = "black", lty = 3)
abline(h=5.5, col = "black", lty = 3)
abline(h=6.5, col = "black", lty = 3)
mtext("(Balchen,Albeli);(Balchen,Albeli)", line = 1, side = 3, adj = 0.15)
mtext("(Balchen,Balchen);(Albeli,Albeli)", line = 1, side = 3, adj = 0.85)
#raw data
#Pop1 โ brienz
#Pop2 โ neuchatel
#Pop3 โ Luzern
#Pop4 โ walen
#(LGR1,LGR3),(HGR1,HGR3);0.02380
#(LGR1,HGR1),(LGR3,HGR3);0.01021
#(LGR1,LGR4),(HGR1,HGR4);0.02480
#(LGR1,HGR1),(LGR4,HGR4);0.00868
#(LGR2,LGR3),(HGR2,HGR3);0.02067
#(LGR2,HGR2),(LGR3,HGR3);0.00732
#(LGR2,LGR4),(HGR2,HGR4);0.01697
#(LGR2,HGR2),(LGR4,HGR4);0.00560
#(LGR1,LGR2),(HGR1,HGR2);0.02063
#(LGR3,LGR4),(HGR3,HGR4);0.02035
#(LGR3,HGR3),(LGR4,HGR4);0.01272
#(LGR1,HGR1),(LGR2,HGR2);0.00325
|
9011bfe9b9fc8d5775bb2d2a0e61082de4bc88ed | 1b031918c16ba0dd5ef1939a6f860ccfd5a3e238 | /R/plantGrowth.R | 19b507a8a6e314007c5ce8fcc04030df9d816052 | [] | no_license | dgreenhoe/doc-building-blocks | 6e6434a436c1b53e7b96d15a4c4369255f9bc472 | 38784af9ece0e32bdf96c2656ee63b86903ca2a3 | refs/heads/master | 2023-08-10T14:13:12.766248 | 2023-07-23T18:34:49 | 2023-07-23T18:34:49 | 126,101,159 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,504 | r | plantGrowth.R | #============================================================================
# Daniel J. Greenhoe
# R script file
# setwd("c:/dan/personal/r/R");
# source("plantGrowth.R");
# Reference: https://math.stackexchange.com/questions/3990086/
#============================================================================
#---------------------------------------
# packages
#---------------------------------------
#install.packages("stats");
#install.packages("R.utils");
#install.packages("rootSolve");
require(stats);
require(R.utils);
require(rootSolve);
rm(list=objects());
#---------------------------------------
# Data
#---------------------------------------
tdata = c(0:10)
ydata = c(18,33,56,90,130,170,203,225,239,247,251)
t = seq( from=min(tdata), to=max(tdata), length=1000 )
#---------------------------------------
# Estimate Function N(t)
#---------------------------------------
N0 = ydata[1]
N = function(t,N0,Nh,a0)
{
result = Nh / ( 1 + (Nh/N0-1)*exp(-a0*t) )
}
#---------------------------------------
# Cost Function
#---------------------------------------
cost = function(N0,Nh,a0)
{
summ = 0;
for (i in c(1:length(tdata)))
{
summ = summ + ( N(tdata[i],N0,Nh,a0) - ydata[i] )^2
}
result = summ
}
#---------------------------------------
# Partial derivative with respect to a0 of Cost Function
#---------------------------------------
Pcosta0 = function(N0, Nh, a0)
{
summ = 0;
for (i in c(1:length(tdata)))
{
summ = summ + ( N(tdata[i],N0,Nh,a0) )^2 *
( N(tdata[i],N0,Nh,a0) - ydata[i] ) *
( tdata[i] * exp(-a0*tdata[i]) )
}
result = summ
}
#---------------------------------------
# Partial derivative with respect to Nh of Cost Function
#---------------------------------------
PcostNh = function(N0, Nh, a0)
{
summ = 0;
for (i in c(1:length(tdata)))
{
summ = summ + ( 1 - exp(-a0*tdata[i]) ) *
( N(tdata[i],N0, Nh, a0) )^2 *
( N(tdata[i],N0, Nh, a0) - ydata[i] )
}
result = summ
}
#---------------------------------------
# Partial derivative with respect to Nh of Cost Function
#---------------------------------------
PcostN0 = function(N0, Nh, a0)
{
summ = 0;
for (i in c(1:length(tdata)))
{
summ = summ + ( exp(-a0*tdata[i]) ) *
( N(tdata[i],N0, Nh, a0) )^2 *
( N(tdata[i],N0, Nh, a0) - ydata[i] )
}
result = summ
}
#---------------------------------------
# Partial derivative vector of cost
#---------------------------------------
Pcost = function(x)
{
N0 = x[1]
Nh = x[2]
a0 = x[3]
F1 = Pcosta0( N0, Nh, a0 );
F2 = PcostNh( N0, Nh, a0 );
F3 = PcostN0( N0, Nh, a0 );
result = c(F1, F2, F3);
}
#---------------------------------------
# Calculate roots
#---------------------------------------
Roots = multiroot( f=Pcost, start=c(ydata[1], ydata[11], 0.6) );
N0 = Roots$root[1]
Nh = Roots$root[2]
a0 = Roots$root[3]
#---------------------------------------
# Display
#---------------------------------------
printf("(N0, Nh, a0) = (%.10f, %.10f, %.10f) with estim.precis=%.2e\n", N0, Nh, a0, Roots$estim.precis )
colors = c( "red" , "blue" );
traces = c( "N(t)", "data" );
plot ( t , N(t, N0, Nh, a0), col=colors[1], lwd=2, type='l', xlab="t", ylab="y", ylim=c(0,max(ydata)+10) )
lines( tdata, ydata , col=colors[2], lwd=5, type='p' )
legend("topleft", legend=traces, col=colors, lwd=3, lty=1:1)
grid()
|
0224349f2c991be9b4f50038791c5372676e64b2 | 51df41598d9789998306f563f0e9b083e4e8c3f5 | /man/EdgeProba.Rd | fddb7fc78999b8f330cdbe9366a72ad8aaf88839 | [] | no_license | Rmomal/EMtree | 92f88a06cd58799750467d31cc524aafaed6d656 | 0358eb8e7f384ebc5227ab02f9f364b692f2657b | refs/heads/master | 2021-06-10T21:19:51.466382 | 2021-04-13T06:58:53 | 2021-04-13T06:58:53 | 166,967,948 | 5 | 2 | null | 2020-03-10T13:12:15 | 2019-01-22T09:42:08 | R | UTF-8 | R | false | true | 507 | rd | EdgeProba.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FunctionsTree.R
\name{EdgeProba}
\alias{EdgeProba}
\title{Computing edge probabilities}
\usage{
EdgeProba(W, verbatim = FALSE)
}
\arguments{
\item{W}{squared weight matrix}
\item{verbatim}{controls verbosity}
}
\value{
Edges conditional probabilities computed directly,
without using the Kirshner function.
}
\description{
Computing edge probabilities
}
\examples{
W = matrix(c(1,1,3,1,1,1,3,1,1),3,3,byrow=TRUE)
EdgeProba(W)
}
|
2da7b0e7f050d7cfa46f03dccea8fc853458c12f | 29585dff702209dd446c0ab52ceea046c58e384e | /fields/R/fields.x.to.grid.R | cf35f59d5731d2a01e60b6375c4070374a5daa48 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,711 | r | fields.x.to.grid.R | # fields is a package for analysis of spatial data written for
# the R software environment .
# Copyright (C) 2016
# University Corporation for Atmospheric Research (UCAR)
# Contact: Douglas Nychka, nychka@ucar.edu,
# National Center for Atmospheric Research, PO Box 3000, Boulder, CO 80307-3000
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the R software environment if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# or see http://www.r-project.org/Licenses/GPL-2
"fields.x.to.grid" <- function(x, nx = 80, ny = 80, xy = c(1, 2)) {
if (is.null(x)) {
stop("Need a an x matrix to determine ranges for grid")
}
M <- ncol(x)
grid.list <- as.list(1:M)
# add columns names
names(grid.list) <- dimnames(x)[[2]]
# cruise through x dimensions and find medians.
for (k in 1:M) {
grid.list[[k]] <- median(x[, k])
}
#
#
# overwrite with sequences for the two variables of surface
xr <- range(x[, xy[1]])
yr <- range(x[, xy[2]])
grid.list[[xy[1]]] <- seq(xr[1], xr[2], , nx)
grid.list[[xy[2]]] <- seq(yr[1], yr[2], , ny)
grid.list
}
|
7efa2c1f781b722b9ef0e24d74e04719233aec04 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /mgss/inst/testfiles/MVP_normalfactor_rcpp/AFL_MVP_normalfactor_rcpp/MVP_normalfactor_rcpp_valgrind_files/1615949508-test.R | 8064746f18a8c34f8864ba89343af0ec6e87430e | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 459 | r | 1615949508-test.R | testlist <- list(A = structure(c(2.78698306639832e+233, 4.08354876418797e+233, 4.08354876418806e+233, 6.01347001699907e-154, 4.08354876418797e+233, 4.08354876418797e+233, 4.08354877058483e+233, 3.37974877952032e-154, 6.01347001699907e-154, 1.87725413656227e-09, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result) |
9eb2852d9f0d66f7fe67a6c2417fb6db17453743 | 546907eb5b27d97e554e92564e4d660fef3601c1 | /222830-parametric-bootstrap-testing-for-random-effect-in-glmm.R | e034ceb7d44a630d87608d084b637da7ae0d1692 | [] | no_license | WRobertLong/Stackexchange | ec405b64d31065bfbdb485fa50c2514826e7de4d | ef1e383b32371ed1a6548f9ddfac9b1a9d85fcbb | refs/heads/master | 2020-12-15T01:57:57.989417 | 2020-09-26T14:17:41 | 2020-09-26T14:17:41 | 234,954,626 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,343 | r | 222830-parametric-bootstrap-testing-for-random-effect-in-glmm.R | m0 <- lmer(Reaction ~ Days + (1 | Subject) + (0 + Days | Subject), sleepstudy)
summary(m0)
str(sleepstudy)
head(sleepstudy)
summary(sleepstudy)
sleepstudy$days_fac <- sleepstudy$Days
sleepstudy$days_fac[sleepstudy$Days == 0] <- "A"
sleepstudy$days_fac[sleepstudy$Days == 1] <- "A"
sleepstudy$days_fac[sleepstudy$Days == 2] <- "A"
sleepstudy$days_fac[sleepstudy$Days == 3] <- "A"
sleepstudy$days_fac[sleepstudy$Days == 4] <- "B"
sleepstudy$days_fac[sleepstudy$Days == 5] <- "B"
sleepstudy$days_fac[sleepstudy$Days == 6] <- "B"
sleepstudy$days_fac[sleepstudy$Days == 7] <- "C"
sleepstudy$days_fac[sleepstudy$Days == 8] <- "C"
sleepstudy$days_fac[sleepstudy$Days == 9] <- "C"
str(sleepstudy)
m0.1 <- lmer(Reaction ~ Days + (1 | Subject) + (0 + days_fac | Subject), sleepstudy)
summary(m0.1)
## alternatively:
mySumm2 <- function(.) {
c(beta=fixef(.),sigma=sigma(.), sig01=sqrt(unlist(VarCorr(.))))
}
set.seed(123)
boo01 <- bootMer(m0, mySumm2, nsim = 100, type="parametric")
(bCI.1 <- boot.ci(boo01, index=3, type=c("norm", "basic", "perc")))
(bCI.2 <- boot.ci(boo01, index=4, type=c("norm", "basic", "perc")))
(bCI.3 <- boot.ci(boo01, index=5, type=c("norm", "basic", "perc")))
dt <- sleepstudy
head(dt)
dt$Y <- dt$Reaction - mean(dt$Reaction)
dt$Y <- dt$Y / sd(dt$Y)
summary(dt$Y)
sd(dt$Y)
foo <- dt$Y > rnorm(nrow(dt),0,1)
cbind(foo,dt)
plot(dt$Reaction,foo)
dt$Y <- foo
#m1 <- glmer(Y ~ Days + (1 | Subject) + (0 + Days | Subject), data=dt, family=binomial(link=logit))
m1 <- glmer(Y ~ Days + (1 | Subject), data=dt, family=binomial(link=logit))
set.seed(123)
#boo02 <- bootMer(m1, mySumm2, nsim = 100, type="parametric")
boo03 <- bootMer(m1, mySumm2, nsim = 100, use.u=TRUE, type="parametric")
#boo04 <- bootMer(m1, mySumm2, nsim = 100, use.u=TRUE, type="semiparametric")
boo03
(bCI.1 <- boot.ci(boo02, index=3, type=c("norm", "basic", "perc")))
(bCI.2 <- boot.ci(boo02, index=4, type=c("norm", "basic", "perc")))
(bCI.3 <- boot.ci(boo02, index=5, type=c("norm", "basic", "perc")))
x<-runif(100,0,10)
f1<-gl(n = 10,k = 10)
f2<-as.factor(rep(1:10,10))
data<-data.frame(x=x,f1=f1,f2=f2)
modmat<-model.matrix(~x,data)
fixed<-c(-0.12,0.35)
rnd1<-rnorm(10,0,0.7)
rnd2<-rnorm(10,0,0.2)
mus<-modmat%*%fixed+rnd1[f1]+rnd2[f2]
data$y<-rpois(100,exp(mus))
m<-glmer(y~x+(1|f1)+(1|f2),data,family="poisson")
boo05 <- bootMer(m, mySumm2, nsim = 100, use.u=TRUE, type="parametric")
(bCI.1 <- boot.ci(boo05, index=3, type=c("norm", "basic", "perc")))
(bCI.2 <- boot.ci(boo05, index=4, type=c("norm", "basic", "perc")))
(bCI.3 <- boot.ci(boo05, index=5, type=c("norm", "basic", "perc")))
rm(list=ls())
require(lme4)
require(boot)
mySumm <- function(.) { s <- sigma(.)
c(beta =getME(., "beta"), sigma = s, sig01 = unname(s * getME(., "theta"))) }
Penicillin$binY <- Penicillin$diameter > mean(Penicillin$diameter)
m0 <- glmer(binY ~ 1 + (1|plate) + (1|sample), Penicillin, family=binomial(link=logit))
boot <- bootMer(m0, mySumm, nsim = 100, use.u=TRUE, type="parametric")
summary(m0)
(bCI.1 <- boot.ci(boot, index=1, type=c("norm", "basic", "perc")))
(bCI.1 <- boot.ci(boot, index=2, type=c("norm", "basic", "perc")))
(bCI.2 <- boot.ci(boot, index=3, type=c("norm", "basic", "perc")))
(bCI.3 <- boot.ci(boot, index=4, type=c("norm", "basic", "perc")))
(bCI.1 <- boot.ci(boot, conf=0.05, index=3, type="norm"))
|
269314fdd863db47eb0008f95ac6864c72c0ddd4 | a8a679a3015eb257bf4a6825593b496ce380a58a | /ThomasZacharyTools/man/farmData.Rd | 87eeb82a189159498cb76be726e876bb4d019fab | [] | no_license | zthomas2222/ThomasTools | 3466b741f4fc8c34df35d94de461caec4b06833f | f70660e7506f20aac73b8831e8f033897660748e | refs/heads/master | 2021-01-25T10:01:00.446433 | 2018-03-07T19:46:05 | 2018-03-07T19:46:05 | 123,335,012 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 511 | rd | farmData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_farmData.R
\docType{data}
\name{farmData}
\alias{farmData}
\title{Example data from Lecture 7}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 9597 rows and 1754 columns.}
\usage{
farmData
}
\description{
Example data from Lecture 7
}
\references{
\url{http://users.stat.umn.edu/~almquist/3811_examples/data.csv}
}
\author{
Zack Almquist \email{almquist@umn.edu}
}
\keyword{datasets}
|
318333c1bab016dc9e3a6e76a1e6b331cefe5271 | 9c6e2ab15a4c32e6ed864807d887be8b5b467db9 | /R/pbpareto2.R | 371ec1eb5525afbe7a0e72c1344ef09ad106d877 | [] | no_license | cran/CaDENCE | 853b37566024fd329a0419b5e643c4b16c4d698e | 7c754e5e408f46aa3704cdf0c06626a52a548c00 | refs/heads/master | 2021-01-23T13:22:25.997825 | 2017-12-05T03:05:17 | 2017-12-05T03:05:17 | 17,678,401 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 396 | r | pbpareto2.R | pbpareto2 <-
function (q, prob, scale, shape)
{
if (length(prob) == 1)
prob <- rep(prob, length(q))
if (length(scale) == 1)
scale <- rep(scale, length(q))
if (length(shape) == 1)
shape <- rep(shape, length(q))
p <- 1 - prob
p[q > 0] <- 1 - prob[q > 0] + prob[q > 0] * ppareto2(q[q >
0], scale = scale[q > 0], shape = shape[q > 0])
p
}
|
6739ddd09f1371bdf2e83a6c4ff61dfa62b7877e | eaba1d36412ff88bbeebb8feedf852f994a3d2f7 | /Social Network Analysis Harry Potter.R | 708c6fc4983fbab769bd6d19ffb6027db0f3b934 | [] | no_license | pm867/Harry-Potter-sentiment-Analysis | fec2683d009f886a65137613f7c5a5fb87c313aa | 233ec28d3467c45919706440570a1073d7c24319 | refs/heads/main | 2023-01-13T07:05:56.214423 | 2020-11-02T13:04:13 | 2020-11-02T13:04:13 | 309,370,355 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,701 | r | Social Network Analysis Harry Potter.R | #########################################################################################
############################### Social Network Analysis #################################
#########################################################################################
##############################
######## load packages #######
##############################
# if the packages below are not installed, then uncomment the install.packages() lines and run them
#install.packages("dplyr")
#install.packages("igraph")
library(dplyr) # dplyr package is used for data manipulation; it uses pipes: %>%
library(igraph) # used to do social network analysis
##############################
##### read the data in R #####
##############################
# it's good practice to set up the working directory
# all the files youo read in R or write from R will be in the working directory you set up
# if you copy the path of your file from the foler, change all the \ to /
setwd("your path here")
scripts <- read.csv("Harry Potter Script.csv")
# keep only the first 2 columns to which we will apply Social Network Analysis
scripts <- scripts %>% select(Character1, Character2)
# there in the Character 2 column, there are some instances in which the character name is not populated
# remove these instances
scripts <- scripts %>% filter(Character2 != "")
# there are 58 different characters in the first column
length(unique(scripts$Character1))
unique(scripts$Character1)
# there are 56 different characters in the second column
length(unique(scripts$Character2))
unique(scripts$Character2)
# keep only the first 2 columns to which we will apply Social Network Analysis
scripts <- scripts %>% select(Character1, Character2)
# in social network analysis, we need 2 types of files:
# conversations: a data frame that shows who talks to whom and how many times
# nodes: a vector that stores all the character names
# in social network analysis, we shouldn't have duplicate rows, while keeping track of how many times a character talks to another
# in the counts column, write how many times characters communicate with each other
conversations <- scripts %>% group_by(Character1, Character2) %>% summarise(counts = n())
# keep just 50 random conversations in this notebook
set.seed(42) # setting a seed allows us to select the same sample every time
conversations <- conversations[sample(nrow(conversations), 50), ]
# store the character names in a vector
nodes <- c(as.character(conversations$Character1), as.character(conversations$Character2))
nodes <- unique(nodes)
# create the igraph object
# the graph_from_data_frame() function takes in 2 objects:
# edges: who talks to whom
# nodes (vertices): the unique list of all the characters included in the conversations
my_graph <- graph_from_data_frame(d=conversations, vertices=nodes, directed=FALSE)
my_graph # 36 nodes & 50 edges
# view the names of each node
V(my_graph)$name
# view the edges
E(my_graph)
# plot the graph (click on Zoom to see it larger)
plot(my_graph, vertex.label.color = "black")
# try different layouts of plotting the graph
# circle layout
plot(my_graph, vertex.label.color = "black", layout = layout_in_circle(my_graph))
# Fruchterman-Reingold layout
plot(my_graph, vertex.label.color = "black", layout = layout_with_fr(my_graph))
# tree layout
plot(my_graph, vertex.label.color = "black", layout = layout_as_tree(my_graph))
# Create a vector of weights based on the number of conversations each pair has
w1 <- E(my_graph)$counts
# plot the network varying edges by weights
# the thicker the width of the edge, the more conversations that pair has
plot(my_graph,
vertex.label.color = "black",
edge.color = 'black',
edge.width = sqrt(w1), # put w1 in sqrt() so that the lines don't become too wide
layout = layout_nicely(my_graph))
# create a new igraph object by keeping just the pairs that have at least 2 conversations
my_graph_2more_conv <- delete_edges(my_graph, E(my_graph)[counts < 2])
# plot the new graph
plot(my_graph_2more_conv,
vertex.label.color = "black",
edge.color = 'black',
edge.width = sqrt(E(my_graph_2more_conv)$counts),
layout = layout_nicely(my_graph_2more_conv))
# up until this point, we have only displayed undirected graphs
# therefore, the direction of the conversation was not accounted for
# create a new graph that takes into consideration the direction of the conversation
g <- graph_from_data_frame(conversations, directed = TRUE)
g
# Is the graph directed?
is.directed(g)
# plot the directed network; notice the direction of the arrows, they show the direction of the conversation
plot(g,
vertex.label.color = "black",
edge.color = 'orange',
vertex.size = 0,
edge.arrow.size = 0.03,
layout = layout_nicely(g))
# identify all neighbors of 'Harry' regardless of direction
neighbors(g, 'Harry', mode = c('all'))
# identify the nodes that go towards 'Harry'
neighbors(g, 'Harry', mode = c('in'))
# identify the nodes that go from 'Harry'
neighbors(g, 'Harry', mode = c('out'))
# identify any vertices that receive an edge from 'Harry' and direct an edge to 'Hagrid'
n1 <- neighbors(g, 'Harry', mode = c('out'))
n2 <- neighbors(g, 'Hagrid', mode = c('in'))
intersection(n1, n2)
# determine which 2 vertices are the furthest apart in the graph
farthest_vertices(g)
# shows the path sequence between two furthest apart vertices
get_diameter(g)
# identify vertices that are reachable within two connections from 'Harry'
ego(g, 2, 'Snape', mode = c('out'))
# identify vertices that can reach Harry' within two connections
ego(g, 2, 'Snape', mode = c('in'))
# calculate the out-degree of each vertex
# out-degree represents the number of vertices that are leaving from a particular node
g.outd <- degree(g, mode = c("out"))
g.outd
# find the vertex that has the maximum out-degree
which.max(g.outd)
# calculate betweenness of each vertex
# betweeness is an index of how frequently the vertex lies on shortest paths between any two vertices
# in the network. It can be thought of as how critical the vertex is to the flow of information
# through a network. Individuals with high betweenness are key bridges between different parts of
# a network.
g.b <- betweenness(g, directed = TRUE)
g.b
# Create plot with vertex size determined by betweenness score
plot(g,
vertex.label.color = 'black',
edge.color = 'black',
vertex.size = sqrt(g.b) / 1.2,
edge.arrow.size = 0.03,
layout = layout_nicely(g))
# geodesic distances of connections going out from 'Hagrid'
# create a plot of these distances from 'Hagrid'
# this graph will only show those that are wiithin 2 connections of Hagrid
# you can show the maximal number of connections by replacing 2 by diameter(g)
g184 <- make_ego_graph(g, 2, nodes = 'Hagrid', mode = c("all"))[[1]]
g184
# Get a vector of geodesic distances of all vertices from vertex 'Hagrid'
dists <- distances(g184, "Hagrid")
# Create a color palette of length equal to the maximal geodesic distance plus one.
colors <- c("black", "blue", "orange", "red", "green")
# Set color attribute to vertices of network g184.
V(g184)$color <- colors[dists+1]
# Visualize the network based on geodesic distance from vertex 184 (patient zero).
plot(g184,
vertex.label = dists,
vertex.label.color = "white",
vertex.label.cex = .6,
edge.color = 'black',
vertex.size = 7,
edge.arrow.size = .05) |
75a354b2aa7767181f2a1644d4c2a4207be58f52 | 58b911103a4cf74233b726681ddaadf7874a360f | /R/setDesignG.R | 3ab0e5bf8c6b3898c5ac11e79e3a875227a387a7 | [] | no_license | cran/PEIP | 3ebf210cfe3aebd5529a0bf7ffa3197e406e8c61 | 715c8edc83992565dabf8b910b4283cfcf9b1494 | refs/heads/master | 2021-06-26T01:36:10.166393 | 2020-08-29T13:30:03 | 2020-08-29T13:30:03 | 17,713,781 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,607 | r | setDesignG.R | setDesignG<-function()
{
##### create a 94 by 256 design matrix for 2D simple tomography
m = 256;
G = matrix(rep(0, 94*m), ncol=m, nrow=94)
for(i in 1:16)
{
for(j in seq(from= (i-1)*16+1, to= i*16))
{
G[i,j] = 1.;
}
}
### % Design matrix for the row scan
for(i in 1:16)
{
for(j in seq(from=i, by=16, to=240+i))
{
G[i+16,j] = 1.;
}
}
### % G matrix for the SW to NE diagonal scan, upper part
for(i in 1:16)
{
for(j in 0:(i-1))
{
G[i+32,i+j*15] = sqrt(2.);
}
}
### % G matrix for the SW to NE diagonal scan, lower part
for(i in 1:15)
{
for(j in (0:(15-i)))
{
G[i+48,(i+1)*16+j*15] = sqrt(2.);
}
}
### % G matrix for the NW to SE diagonal scan, lower part
for(i in 1:16)
{
for(j in 0:(i-1))
{
G[i+63,17-i+17*j] = sqrt(2.);
}
}
### % G matrix for the NW to SE diagonal scan, upper part
for(i in 1:15)
{
for(j in 0:(15-i))
{
G[i+79,(i*16)+1+17*j] = sqrt(2.);
}
}
return(G)
}
|
b7571383ddbb1448b3bf37b86512639f640a1117 | ac655728cfed40aacb3686b9a3fd2c26f8facdc0 | /scripts/nconds_analysis/summarize_by_n_tissues.R | 29111375a1bb17f18285b071b144dc5d18ad3bf3 | [] | no_license | jakeyeung/Yeung_et_al_2018_TissueSpecificity | 8ba092245e934eff8c5dd6eab3d265a35ccfca06 | f1a6550aa3d703b4bb494066be1b647dfedcb51c | refs/heads/master | 2020-09-20T12:29:01.164008 | 2020-08-07T07:49:46 | 2020-08-07T07:49:46 | 224,476,307 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,810 | r | summarize_by_n_tissues.R | rm(list=ls())
remove.wfat <- TRUE
library(ggplot2)
library(hash)
library(dplyr)
library(reshape2)
library(gplots)
library(penalizedLDA)
library(wordcloud)
# First source my functions I wrote
funcs.dir <- file.path('scripts', 'functions')
source(file.path(funcs.dir, 'SampleNameHandler.R')) # for shortening sample names
source(file.path(funcs.dir, 'PcaPlotFunctions.R')) # for visualizing PCA, periodoigrams
source(file.path(funcs.dir, 'FourierFunctions.R')) # for periodoigrams
source(file.path(funcs.dir, 'GetTissueSpecificMatrix.R')) # as name says
source(file.path(funcs.dir, "GrepRikGenes.R"))
source(file.path(funcs.dir, "GetTissueTimes.R"))
source(file.path(funcs.dir, "PCAFunctions.R"))
source(file.path(funcs.dir, "LoadAndHandleData.R"))
source(file.path(funcs.dir, "FitRhythmic.R"))
source(file.path(funcs.dir, "PlotGeneAcrossTissues.R"))
source(file.path(funcs.dir, "LoadArray.R"))
source(file.path(funcs.dir, "VarianceFunctions.R"))
source(file.path(funcs.dir, "FitRhythmicAcrossPeriods.R"))
source(file.path(funcs.dir, "GetClockGenes.R"))
source("scripts/functions/NcondsFunctions.R")
source("scripts/functions/NcondsAnalysisFunctions.R")
source("scripts/functions/SvdFunctions.R")
source("scripts/functions/PlotFunctions.R")
source("scripts/functions/OuterComplex.R")
source("scripts/functions/GetTFs.R")
source("scripts/functions/AlternativeFirstExonsFunctions.R")
source("scripts/functions/FisherTestSitecounts.R")
source("scripts/functions/RemoveP2Name.R")
source("scripts/functions/LdaFunctions.R")
source("scripts/functions/LoadActivitiesLong.R")
source("scripts/functions/PlotActivitiesFunctions.R")
source("scripts/functions/RemoveCommasBraces.R")
source("scripts/functions/PlotFunctions.R")
source("scripts/functions/FourierFunctions.R")
source("scripts/functions/HandleMotifNames.R")
source("scripts/functions/NcondsAnalysisFunctions.R")
source("scripts/functions/LongToMat.R")
load(file = "Robjs/dat.long.fixed_rik_genes.Robj")
load("Robjs/fits.best.max_3.collapsed_models.amp_cutoff_0.15.phase_sd_maxdiff_avg.Robj", v=T)
load("Robjs/dat.fit.Robj", v=T); dat.fit.24 <- dat.fit
library(hash)
fits.best$n.rhyth.fac <- as.factor(sapply(as.numeric(fits.best$n.rhyth), function(n) NrhythToStr(n)))
filt.tiss <- c("WFAT")
load("Robjs/dat.complex.fixed_rik_genes.Robj")
if (remove.wfat){
dat.complex <- subset(dat.complex, ! tissue %in% filt.tiss)
}
fits.rhyth <- subset(fits.best, n.params > 0)
fits.rhyth$label <- apply(fits.rhyth, 1, function(row){
cutoff <- 1
if (row[8] > cutoff & row[6] > 0){ # amp.avg > cutoff only for n.rhyth > 1
return(as.character(row[1])) # return gene
} else {
return("")
}
})
# count based on amp
amp.thres <- seq(from = 0, to = max(dat.fit.24$amp), by = 0.15)
fits.best$n.rhyth.lab <- sapply(fits.best$n.rhyth, function(n){
if (n >= 8){
return("8-11")
} else if (n == 1){
return("1")
} else if (n <= 3 & n >= 2){
return("2-3")
} else if (n <= 7 & n >- 4){
return("4-7")
} else {
print(n)
warning("Didnt fit any if statements")
}
})
fits.counts.by.amp <- subset(fits.best, n.rhyth > 0) %>%
group_by(n.rhyth.lab) %>%
do(NGenesByAmp.long(., amp.thres, labelid = "n.rhyth.lab", varid = "amp.avg", outlabel = "n.rhyth.lab"))
ggplot(fits.counts.by.amp, aes(x = 2 * amp.thres, y = n.genes, group = n.rhyth.lab, colour = as.factor(n.rhyth.lab))) + geom_line() +
geom_line(size = 2) +
theme_bw(20) +
labs(colour = "# Rhythmic\nTissues") +
theme(aspect.ratio=1,
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
xlab("Avg Amplitude of Rhythmic Tissues") + ylab("# Genes") + xlim(c(0.15, 6)) +
scale_y_log10(breaks = c(1, 10, 100, 1000)) +
geom_vline(xintercept = 2.8, linetype = "dotted") +
scale_colour_brewer(palette = "Spectral") |
309e42f87ecc24e3e9a095a28266e11753fbeb77 | f60bf36a00296046fd6bdf6a654c7925bdd8d228 | /SusitnaEG/data-raw/chinBEGs.R | fea4da06ed430a3b54639694e5da19a2b1b501ba | [] | no_license | adamreimer/SusitnaEG | 3c5b407ea74a307ef570580ce0bf2f268197c4e2 | 0eaf41a437ae20da102e9cc09be606d2b8f9feed | refs/heads/master | 2021-05-02T15:12:50.502298 | 2020-10-08T19:48:27 | 2020-10-08T19:48:27 | 120,691,401 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 209 | r | chinBEGs.R | chinBEGs <-
read.csv(".\\SusitnaEG\\data-raw\\chinBEGs.txt",
stringsAsFactors = FALSE) %>%
dplyr::filter(Stock != "Deshka")
devtools::use_data(chinBEGs, pkg = ".\\SusitnaEG", overwrite = TRUE)
|
64d6d30df8648c96fac43ec33417278477f68cb7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/openCR/examples/utility.Rd.R | ef141d623211c26b99275eefa4ebd2264b30a89f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 369 | r | utility.Rd.R | library(openCR)
### Name: JS.counts
### Title: Summarise Non-spatial Open-population Data
### Aliases: JS.counts m.array bd.array
### Keywords: manip
### ** Examples
JS.counts(ovenCH)
m.array(ovenCH)
## probabilities of b,d pairs
fit <- openCR.fit(ovenCH, type = 'JSSAbCL')
beta <- predict(fit)$b$estimate
phi <- predict(fit)$phi$estimate
bd.array(beta, phi)
|
eb21e18321ecfe08763b0fb88ea2171b820bc009 | f5d7ab6d2d174be0b13bc3ad3b29901565b115c5 | /scripts/predict.R | b037899e7bb809c1c2800a83d2c762f862f2510b | [] | no_license | josiahdavis/earl | 7b9a28f69d07b47bb7ec42dbbf2bd27f24244454 | 0b88955931c3613f98847098777274d66bd97ac2 | refs/heads/master | 2020-03-29T10:45:39.837390 | 2015-10-19T16:29:37 | 2015-10-19T16:29:37 | 40,487,297 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,450 | r | predict.R | # ============================
# SCRIPT FOR PREDICTING THE
# USEFULNESS OF A REVIEW
# ============================
# Identify bigrams and trigrams amongst nouns and adjectives
# Predict the usefulness of the review using this dtm
rm(list = ls()); gc()
library(tm)
library(openNLP)
library(magrittr)
library(randomForest)
library(glmnet)
loc <- '/Users/josiahdavis/Documents/GitHub/earl/'
dr <- read.csv(paste(loc, 'yelp_review.csv', sep=""))
dr <- dr[(dr$industry == "Banking"),]
# Conver to list of strings
texts <- lapply(dr$text, as.String)
# =====================================
# Identify and reviews to
# only include nouns
# =====================================
# Define types of annotations to perform
tagging_pipeline <- list(
Maxent_Sent_Token_Annotator(),
Maxent_Word_Token_Annotator(),
Maxent_POS_Tag_Annotator()
)
# Define function for performing the annotations
annotate_entities <- function(doc, annotation_pipeline) {
annotations <- annotate(doc, annotation_pipeline)
AnnotatedPlainTextDocument(doc, annotations)
}
# Annotate the texts
texts_annotated <- texts %>% lapply(annotate_entities, tagging_pipeline)
# Define the POS getter function
POSGetter <- function(doc, parts) {
s <- doc$content
a <- annotations(doc)[[1]]
k <- sapply(a$features, `[[`, "POS")
if(sum(k %in% parts) == 0){
""
}else{
s[a[k %in% parts]]
}
}
# Identify the nouns
nouns <- texts_annotated %>% lapply(POSGetter, parts = c("JJ", "JJR", "JJS", "NN", "NNS", "NNP", "NNPS"))
# Turn each character vector into a single string
nouns <- nouns %>% lapply(as.String)
# =====================================
# Perform text mining
# transformations
# =====================================
# Conver to dataframe
d <- data.frame(reviews = as.character(nouns))
# Replace new line characters with spaces
d$reviews <- gsub("\n", " ", d$reviews)
# Convert the relevant data into a corpus object with the tm package
d <- Corpus(VectorSource(d$reviews))
# Convert everything to lower case
d <- tm_map(d, content_transformer(tolower))
# Remove initial list of stopwords
stopwords <- c(stopwords("english"), "bank", "bofa", "boa", "wells",
"fargo", "america", "chase", "thing", "branch", "location",
"locations", "banking", "account")
d <- tm_map(d, removeWords, stopwords)
# Read in list of 5000+ stopwords compiled by Matthew Jockers
fileStopwords <- paste(loc, 'stopwords.txt', sep="")
stopwords <- readChar(fileStopwords, file.info(fileStopwords)$size)
stopwords <- unlist(strsplit(stopwords, split=", "))
# NEED TO MAKE THIS MORE EFFECIENT
for (i in 1:5){
if(i == 1){
start <- 1
}else{
start <- i * 1000
}
if(i < 5){
end <- (i + 1) * 1000
}else{
end <- 5631
}
d <- tm_map(d, removeWords, stopwords[start:end])
}
# Stem words
d <- tm_map(d, stemDocument)
# Strip whitespace
d <- tm_map(d, stripWhitespace)
# Define bigram tokenizer
BigramTokenizer <- function(x) {
unlist(lapply(ngrams(words(x), c(1, 2)), paste, collapse = " "), use.names = FALSE)
}
# Convert to a document term matrix (rows are documents, columns are words)
dtm <- as.matrix(DocumentTermMatrix(d, control = list(tokenize = BigramTokenizer,
weighting = weightTfIdf)))
idxs <- order(colSums(dtm), decreasing = TRUE)[1:1500]
dtm <- dtm[,idxs]
# ==========================
# CREATE PREDICTIVE MODEL
# ==========================
# Define the model specification
y <- as.factor(dr$votes_useful > 0)
x <- dtm
# Split into test and training examples
idxs <- sample(dim(dtm)[1], 500, replace=FALSE)
xTrain <- x[idxs,]
yTrain <- y[idxs]
xTest <- x[-idxs,]
yTest <- y[-idxs]
# Train a random forest model on the text
m <- randomForest(y = yTrain, x = xTrain, mtry = 70, ntree = 150)
lm <- glmnet(y = yTrain, x = xTrain, family = "binomial")
# Evaluate the prediction accuracy
p <- predict(lm, xTest, type = "class")
sum(yTest == TRUE) / length(yTest)
sum(p == yTest) / length(yTest)
sum(p[which(yTest == TRUE)] == yTest[which(yTest == TRUE)]) /
length(yTest[which(yTest == TRUE)])
sum(p[which(yTest == FALSE)] == yTest[which(yTest == FALSE)]) /
length(yTest[which(yTest == FALSE)])
# Create a dataframe
df <- data.frame(importance = m$importance)
df$words <- row.names(m$importance)
row.names(df) <- 1:nrow(df)
head(df, 15)
# Most frequent words are most associated with service and friendliness |
75d3c1c34aea02149aac2850c8022e049c9db57c | 279277403782c464d08a388e114fa28a41c2bb7c | /man/rob.ncutrange.Rd | 8febd77a8083f1fcf2e111189396c632c71d2432 | [] | no_license | nenaoana/SetMethods | 44be0a8331e51faaff72bd67ad05e1133520646b | eae2232c57c8db822c3cc6c0741d23b16249f0dc | refs/heads/master | 2023-04-07T12:52:41.675428 | 2023-03-31T08:40:13 | 2023-03-31T08:40:13 | 61,364,860 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,866 | rd | rob.ncutrange.Rd | \name{rob.ncutrange}
\alias{rob.ncutrange}
\title{
Function for identifying the frequency threshold range within which the Boolean formula for the solution does not change.
}
\description{
Function for identifying the frequency threshold range for a truth table within which the Boolean formula for the solution does not change. The function gradually increases and, then, decreases an inital selected threshold by the value specifyied in the step argument and checks whether the solution formula changes for finding the lower and upper ranges for the frequency threshold. The function performs this iteration for the number of times specified in the max.runs argument. If the solution formula does not change given the number of runs specified, it will return an NA, meaning that it could not find a limit to the range.
}
\usage{
rob.ncutrange(data,
step = 1,
max.runs = 20,
outcome,
conditions,
incl.cut = 1,
n.cut = 1,
include = "",
...)
}
\arguments{
\item{data}{A data frame containing the calibrated data for the sufficient solution.
}
\item{step}{The value to be gradually added and subtracted from the threshold tested.
}
\item{max.runs}{The maximum number of times the step value gets gradually added and subtracted.
}
\item{outcome}{A character string with the name of the outcome in capital letters. For the negated outcome a tilde "~" should be used. This had the same usage as the outcome argument in the minimize function.
}
\item{conditions}{
A vector of character strings containing the names of the conditions.This had the same usage as the conditions argument in the minimize function.
}
\item{incl.cut}{
The raw consistency threshold for the truth table rows.
}
\item{n.cut}{
The frequency threshold for the truth table rows.
}
\item{include}{
A vector of other output values (for example "?" for logical remainders) to include in the minimization. This had the same usage as the include argument in the minimize function.
}
\item{...}{
Other options that the minimize function in the QCA package accepts. Check them out using ?minimize.
}
}
\references{
Oana, Ioana-Elena, and Carsten Q. Schneider. 2020. Robustness tests in QCA: A fit-oriented and case-oriented perspective using R. Unpublished Manuscript.
Oana, Ioana-Elena, Carsten Q. Schneider, and Eva Thomann (forthcoming). Qualitative Comparative Analysis (QCA) using R: A Gentle Introduction. Cambridge: Cambridge University Press.
}
\author{
Ioana-Elena Oana
}
\examples{
# Load the calibrated data:
data(PAYF)
# Check frequency ranges:
rob.ncutrange(
data = PAYF,
step = 1,
max.runs = 10,
outcome = "HL",
conditions = c("HE","GG","AH","HI","HW"),
incl.cut = 0.87,
n.cut = 2,
include = "?"
)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{QCA}
\keyword{robustness} |
c9b0ad2bc4fa45414ac9b4031c6e70fd0fedec5a | 7e77b57a2d6e9aa91f16272dfe21518cf794c44d | /code/03_Empirical_Properties_of_Financial_Data/03_Ljung-Box_test_of_serial_independence.R | e3e048f8b70af3036eb167dd89d655aed1ed5160 | [] | no_license | qrmtutorial/qrm | 654ff4553584628c4837944e9986e077743b3dc2 | 7ef6cf15b9f19775dfc1f58fb16d955cb4673de0 | refs/heads/master | 2023-08-17T23:32:33.052610 | 2023-08-08T11:14:12 | 2023-08-08T11:14:12 | 36,744,252 | 224 | 163 | null | null | null | null | UTF-8 | R | false | false | 3,662 | r | 03_Ljung-Box_test_of_serial_independence.R | ## By Alexander McNeil and Marius Hofert
### Setup ######################################################################
library(xts)
library(qrmdata)
library(qrmtools)
### 1 Constituent data #########################################################
## Dow Jones constituent data
data("DJ_const")
## We extract a time period and remove the time series 'Visa' (as it only has
## a very short history in the index)
DJdata <- DJ_const['2006-12-29/2015-12-31',-which(names(DJ_const) == "V")]
## Use plot for zoo objects to get multiple plots
plot.zoo(DJdata, xlab = "Time", main = "DJ component series (without Visa)")
## Build log-returns and aggregate to obtain monthly log-returns
X <- returns(DJdata) # could also work with negative log-returns
X.m <- apply.monthly(X, FUN = colSums)
### 2 Ljung--Box tests of serial independence of stationary data ###############
## Compute (lists of) Ljung--Box tests
LB.raw <- apply(X, 2, Box.test, lag = 10, type = "Ljung-Box")
LB.abs <- apply(abs(X), 2, Box.test, lag = 10, type = "Ljung-Box") # could also work with squared log-returns
LB.raw.m <- apply(X.m, 2, Box.test, lag = 10, type = "Ljung-Box")
LB.abs.m <- apply(abs(X.m), 2, Box.test, lag = 10, type = "Ljung-Box")
## Extract p-values
p.LB.raw <- sapply(LB.raw, `[[`, "p.value")
p.LB.abs <- sapply(LB.abs, `[[`, "p.value")
p.LB.raw.m <- sapply(LB.raw.m, `[[`, "p.value")
p.LB.abs.m <- sapply(LB.abs.m, `[[`, "p.value")
round(cbind(p.LB.raw, p.LB.abs, p.LB.raw.m, p.LB.abs.m), 2)
### 3 Reproduce Ljung--Box tests from the QRM book #############################
## Up to minor differences (see below), this reproduces Table 3.1 in the
## QRM book (2015) (see also Table 4.1 in the QRM book (2005)):
## Note: This uses older DJ data from 'QRM'
DJ.QRM <- QRM::DJ
DJ.old <- as.xts(DJ.QRM)
DJ.old <- DJ.old['1993-01-01/2000-12-31']
X.old <- returns(DJ.old)
X.old.m <- apply.monthly(X.old, FUN = colSums)
## Compute (lists of) Ljung--Box tests
LB.raw <- apply(X.old, 2, Box.test, lag = 10, type = "Ljung-Box")
LB.abs <- apply(abs(X.old), 2, Box.test, lag = 10, type = "Ljung-Box")
LB.raw.m <- apply(X.old.m, 2, Box.test, lag = 10, type = "Ljung-Box")
LB.abs.m <- apply(abs(X.old.m), 2, Box.test, lag = 10, type = "Ljung-Box")
## Extract p-values
p.LB.raw <- sapply(LB.raw, `[[`, "p.value")
p.LB.abs <- sapply(LB.abs, `[[`, "p.value")
p.LB.raw.m <- sapply(LB.raw.m, `[[`, "p.value")
p.LB.abs.m <- sapply(LB.abs.m, `[[`, "p.value")
(res <- round(cbind(p.LB.raw, p.LB.abs, p.LB.raw.m, p.LB.abs.m), 2))
## Note: The minor differences to the tables in the book come from a different
## approach. The tables in the book were produces without 'xts' objects:
library(timeSeries) # Caution: returns() now from timeSeries
X.old. <- returns(DJ.QRM)
X.old. <- window(X.old., start = timeDate("1993-01-01"), end = timeDate("2000-12-31"))
X.old.m. <- aggregate(X.old., by = unique(timeLastDayInMonth(time(X.old.))), sum)
## Compute (lists of) Ljung--Box tests
LB.raw. <- apply(X.old., 2, Box.test, lag = 10, type = "Ljung-Box")
LB.abs. <- apply(abs(X.old.), 2, Box.test, lag = 10, type = "Ljung-Box")
LB.raw.m. <- apply(X.old.m., 2, Box.test, lag = 10, type = "Ljung-Box")
LB.abs.m. <- apply(abs(X.old.m.), 2, Box.test, lag = 10, type = "Ljung-Box")
## Extract p-values
p.LB.raw. <- sapply(LB.raw., `[[`, "p.value")
p.LB.abs. <- sapply(LB.abs., `[[`, "p.value")
p.LB.raw.m. <- sapply(LB.raw.m., `[[`, "p.value")
p.LB.abs.m. <- sapply(LB.abs.m., `[[`, "p.value")
## Result
(res. <- round(cbind(p.LB.raw., p.LB.abs., p.LB.raw.m., p.LB.abs.m.), 2))
## Differences
summary(res-res.)
|
bb1815134f02bc94d7b59547840bba1c7ee01c99 | a6af23799b58180495218fec7286f1ed5b80da6b | /Tidyverse Functions/06 Wide and Long Data Handling/06_04 convert long to wide/data-processing-barath.R | b999158ff6d843422d0cbabd1df685cdc142dcf0 | [] | no_license | barathevergreen/tidyverse_DS_toolkit_R | 9368972d06dbfada054addff206904dd0a47c840 | d88d4b34b21877b2b69b4ac83614bde8962b151f | refs/heads/master | 2023-06-12T12:12:24.274974 | 2021-07-09T15:14:03 | 2021-07-09T15:14:03 | 384,352,995 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 294 | r | data-processing-barath.R | library("tidyverse")
long_monthly_tdiff <- read_csv("data/long_monthly_tdiff.csv")
long_horses_data <- read_csv("data/long_horses_data.csv")
#spread using key and value:
long_monthly_tdiff %>%
spread(airport,avg.tdiff) %>%
view()
long_horses_data %>%
spread(year,horses) %>%
view()
|
caac7c8fba121fc5d9c33f664a39b90820d5c26e | ff469b2d284b46791ca52f35c64d22d3f81ba5f1 | /man/PhdAsy.Rd | 4fca88c2dbd3f2458912a76f3e4df8046884d7ef | [] | no_license | YanHanChen/iNEXTPD2 | fbcd0248e827c90734c9f7e1e4db5cb7ca17b61e | 38613d19c5acc4a0cf11c81a0ce816ab836035c8 | refs/heads/master | 2022-11-29T08:08:28.041885 | 2020-08-15T06:12:52 | 2020-08-15T06:12:52 | 283,961,817 | 2 | 5 | null | 2020-08-13T19:37:39 | 2020-07-31T06:37:05 | R | UTF-8 | R | false | true | 3,647 | rd | PhdAsy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MainFun.R
\name{PhdAsy}
\alias{PhdAsy}
\title{Computes asymptotic estimates for phylogenetic diversity and mean phylogenetic diversity (phylogenetic Hill numbers)}
\usage{
PhdAsy(
data,
nT,
datatype = "abundance",
tree,
q = seq(0, 2, by = 0.25),
reftime = NULL,
type = "PD",
nboot = 50,
conf = 0.95
)
}
\arguments{
\item{data}{a matrix/data.frame of species abundances (for abundance data) or species-by-site incidence raw matrix/data.frame (for incidence data).
See the function \code{\link{iNEXTPD}} for details.}
\item{nT}{needed only when \code{datatype = "incidence_raw"}, a sequence of named nonnegative integers specifying the number of sampling units in each assemblage.
If \code{names(nT) = NULL}, then assemblage are automatically named as "assemblage1", "assemblage2",..., etc. Ignored if \code{datatype = "abundance"}.}
\item{datatype}{data type of input data: individual-based abundance data (\code{datatype = "abundance"}),
or species-by-site raw incidence matrix (\code{datatype = "incidence_raw"}). Default is \code{"abundance"}.}
\item{tree}{a phylo object describing the phylogenetic tree in Newick format for all observed species in the pooled assemblage.}
\item{q}{a nonnegative value or sequence specifying the diversity order. Default is \code{seq(0, 2, by = 0.25)}.}
\item{reftime}{a positive value or sequence specifying the reference times for diversity computation. If \code{NULL},
then \code{reftime} is set to be the tree depth of the phylogenetic tree, which is spanned by all the observed species in
the pooled assemblage. Default is \code{NULL}.}
\item{type}{desired diversity type: \code{type = "PD"} for Chao et al. (2010) phylogenetic diversity
and \code{type = "meanPD"} for mean phylogenetic diversity (phylogenetic Hill number). Default is \code{"PD"}.}
\item{nboot}{a positive integer specifying the number of bootstrap replications when assessing sampling uncertainty and constructing confidence intervals.
Enter 0 to skip the bootstrap procedures. Default is 50.}
\item{conf}{a positive number < 1 specifying the level of confidence interval. Default is 0.95.}
}
\value{
Returns a table of estimated asymptotic phylogenetic diversity estimates (\code{type = "PD"}) or
phylogenetic Hill numbers (\code{type = "meanPD"}) with respect to specified/default order \code{q} and
reference time specified in the argument \code{reftime}.
}
\description{
Function \code{PhdAsy} computes asymptotic phylogenetic diversity estimates with respect to specified/default
diversity order q and reference time to infer true phylogenetic diversity (PD) or phylogenetic Hill numbers (meanPD). See Chao et al. (2015) and Hsieh and Chao (2017) for the statistical estimation detail.
}
\examples{
# Datatype: abundance data
data(data.abu)
data <- data.abu$data
tree <- data.abu$tree
out <- PhdAsy(data = data, datatype = "abundance", tree = tree,
q = seq(0, 2, by = 0.25), nboot = 30)
out
# Datatype: incidence_raw data
data(data.inc)
data <- data.inc$data
tree <- data.inc$tree
nT <- data.inc$nT
out <- PhdAsy(data = data, nT = nT, datatype = "incidence_raw",
tree = tree, q = seq(0, 2, by = 0.25))
out
}
\references{
Chao, A., Chiu, C.-H., Hsieh, T. C., Davis, T., Nipperess, D., and Faith, D. (2015). Rarefaction and extrapolation of phylogenetic diversity. \emph{Methods in Ecology and Evolution}, 6, 380-388.\cr\cr
Hsieh, T. C. and Chao, A. (2017). Rarefaction and extrapolation: making fair comparison of abundance-sensitive phylogenetic diversity among multiple assemblages. \emph{Systematic Biology}, 66, 100-111.
}
|
1f676d4ffa4104c32622a098a9ca6f52a93d65e9 | c3dae5adc7ccc7886a560e0939918fe2ade9124c | /packages/RSuite/R/21_repo_adapter_dir.R | 28f776b33fcc132ecf624d8fd9e98bd4ebf3b772 | [
"Apache-2.0"
] | permissive | gitter-badger/RSuite-1 | c85a20da50942e0d69fa288a6818671217a3aa02 | f5eea2791861f1453cd9ebb1ce549ad6d3884891 | refs/heads/master | 2021-07-12T06:37:12.788730 | 2017-09-30T13:15:49 | 2017-09-30T13:15:49 | 106,171,745 | 0 | 0 | null | 2017-10-08T11:40:09 | 2017-10-08T11:40:08 | null | UTF-8 | R | false | false | 4,470 | r | 21_repo_adapter_dir.R | #----------------------------------------------------------------------------
# RSuite
# Copyright (c) 2017, WLOG Solutions
#
# Repo adapter working on directory.
#----------------------------------------------------------------------------
.is_abs_path <- function(path) {
return(all(grepl("^([A-Za-z]:)?[\\/\\\\]", path)))
}
.can_eventualy_have_rw_access <- function(full_path) {
unlist(lapply(full_path,
function(base_dir) {
while(!dir.exists(base_dir)) {
base_dir <- dirname(base_dir)
}
return (file.access(base_dir, 2) != -1)
}))
}
#'
#' Creates repo adapter providing repository under path passed.
#'
#' The adapter can use argument which is interpreted as repository path. If not
#' passed default value for repository path is used.
#'
#' @param name under which repo adapter will be registered in RSuite.
#'
#' @return object of type rsuite_repo_adapter_dir
#'
#' @export
#'
repo_adapter_create_dir <- function(name) {
result <- repo_adapter_create_base(name)
result$get_full_path <- function(params, ix = NA) {
path <- params$get_repo_adapter_arg(name, default = "repository", ix = ix)
if (.is_abs_path(path)) {
full_path <- path
} else {
full_path <- file.path(params$prj_path, path)
}
return(rsuite_fullUnifiedPath(full_path))
}
class(result) <- c('rsuite_repo_adapter_dir', class(result))
return(result)
}
#'
#' Implementation of repo_adapter_get_info for rsuite_repo_adapter_dir (repo
#' adapter working on directory).
#'
#' @export
#'
repo_adapter_get_info.rsuite_repo_adapter_dir <- function(repo_adapter, params) {
full_path <- repo_adapter$get_full_path(params)
readonly <- !all(.can_eventualy_have_rw_access(full_path))
prj_local <- all(substring(full_path, 1, nchar(params$prj_path)) == params$prj_path)
return(list(
readonly = readonly,
reliable = prj_local
))
}
#'
#' Implementation of repo_adapter_get_path for rsuite_repo_adapter_dir (repo
#' adapter working on directory).
#'
#' @export
#'
repo_adapter_get_path.rsuite_repo_adapter_dir <- function(repo_adapter, params, ix = NA) {
full_path <- repo_adapter$get_full_path(params, ix)
return(paste0("file:///", full_path))
}
#'
#' Implementation of repo_adapter_start_management for rsuite_repo_adapter_dir (repo
#' adapter working on directory).
#'
#' @param repo_adapter repo adapter object.
#' @param ... should contain prj of class rsuite_project or path to repository
#' and rver. It also can contain types, a vector of types to be managed
#' (default: .Platform$pkgType)
#'
#' @export
#'
repo_adapter_create_manager.rsuite_repo_adapter_dir <- function(repo_adapter, ...) {
dots <- list(...)
if ("prj" %in% names(dots)) {
prj <- dots$prj
assert(!is.null(prj) && is_prj(prj), "rsuite_project expected for prj")
dots$params <- prj$load_params()
}
if ("params" %in% names(dots)) {
params <- dots$params
assert(!is.null(params) && "rsuite_project_params" %in% class(params),
"rsuite_project_params expected for params")
full_path <- repo_adapter$get_full_path(params)
rver <- params$r_ver
is_rw <- !repo_adapter_get_info.rsuite_repo_adapter_dir(repo_adapter, params)$readonly
types <- c(params$pkgs_type, params$aux_pkgs_type)
} else {
assert(all(c("rver", "path") %in% names(dots)),
paste0("Either prj, params or path and rver must be provided to",
" repo_adapter_create_manager.rsuite_repo_adapter_dir"))
full_path <- dots$path
assert(is_nonempty_char1(full_path), "Non empty character(1) expected for path")
assert(.is_abs_path(full_path), "Absolute expected for path: %s", full_path)
rver <- dots$rver
is_rw <- all(.can_eventualy_have_rw_access(full_path))
if ("types" %in% names(dots)) {
types <- dots$types
assert(is.character(types) & length(types) > 0, "Non empty character(N) expected for types")
assert(all(types %in% c("win.binary", "mac.binary", "binary", "source")),
"Invalid types management requested. Supported types are win.binary, mac.binary, binary, source")
} else {
types <- .Platform$pkgType
}
}
assert(is_rw,
"Repository cannot be managed due to insufficient access permissions")
repo_manager <- repo_manager_dir_create(path = full_path, types = types, rver = rver)
return(repo_manager)
}
|
1f3f93b65e69001e45fd3506d80213ecdefcbf43 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query04_query27_1344n/query04_query27_1344n.R | 37bba0f2d08da65112055b5a22cb285c871776be | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 70 | r | query04_query27_1344n.R | f14f7ad123c66363569c9877a8dd42a9 query04_query27_1344n.qdimacs 351 583 |
d5c4b52f11eebfc3f6e7d926aba0f8da585c6c10 | 0f37f192db2b523ddaff3747992d5632e3991c77 | /R/apiDF.R | 439883c74ab0718b6589eb745d9f92f7ec38f4fc | [] | no_license | mikeasilva/blsAPI | 42bc2353ab25483cbef2c7e1e4dd719cb4311360 | a125b3a7b8773477443636693623b5e79d20ea5d | refs/heads/master | 2023-05-11T01:44:28.499780 | 2023-05-07T01:05:48 | 2023-05-07T01:05:48 | 22,688,145 | 91 | 28 | null | 2023-05-07T01:05:50 | 2014-08-06T15:39:36 | R | UTF-8 | R | false | false | 850 | r | apiDF.R | # apiDF.R
#
#' @title Creates data frame after data is called using blsAPI.R
#' @description Used in the laus_get_data function
#' @param data The JSON used to extract the data gathered from the blsAPI function
#' @return returns a data frame of the data requested from the bLSAPI function call
#' @export apiDF
#' @import rjson
#' @examples
#' library(blsAPI)
#' library(rjson)
#' response <- blsAPI('LAUCN040010000000005')
#' json <- fromJSON(response)
#' df <- apiDF(json$Results$series[[1]]$data)
#'
apiDF <- function(data){
df <- data.frame(year=character(),
period=character(),
periodName=character(),
value=character(),
stringsAsFactors=FALSE)
i <- 0
for(d in data){
i <- i + 1
df[i,] <- c(d$year, d$period, d$periodName, d$value)
}
return(df)
} |
03030513eb7f3ea23a7c64b3a43541c12abd3b9f | b15a8b5fe7eae0e6c6a21a1764c8db559279b106 | /run_analysis.R | 80371628d1d59286e97165bae9362a271368f555 | [] | no_license | yacoan/datasciencecoursera | 9056deb61194bd000a6bdeaf472dc873c6d976b0 | ea15528300d421677079b06da05bb27010af0841 | refs/heads/master | 2016-09-05T20:01:36.781600 | 2014-04-28T12:24:47 | 2014-04-28T12:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,214 | r | run_analysis.R | #setwd("C:/Users/js1/Dropbox/SAS/Courses/Coursera/Getting and Cleaning Data/3 Lesson/UCI HAR Dataset")
# Source of data for the project:
# Getting and Cleaning Data Assigment
#Preparation Step, Read Test and Train Data
subject_test <- read.table("./test/subject_test.txt",header=FALSE,sep="")
subject_train <- read.table("./train/subject_train.txt",header=FALSE,sep="")
Data_Test <- read.table("./test/X_test.txt",header=FALSE,sep="")
Data_Train <- read.table("./train/X_train.txt",header=FALSE,sep="")
Label_Test <- read.table("./test/y_test.txt",header=FALSE,sep="")
Label_Train <- read.table("./train/y_train.txt",header=FALSE,sep="")
#Uses descriptive activity names to name the activities in the data set
activity_names <- read.table("./activity_labels.txt",header=FALSE,sep="")
Label_Test$V1 <- factor(Label_Test$V1 , levels=activity_names$V1, labels = activity_names$V2)
Label_Train$V1 <- factor(Label_Train$V1, levels=activity_names$V1, labels = activity_names$V2)
#Appropriately labels the data set with descriptive activity names.
feature_names <- read.table("./features.txt",header=FALSE,colClasses="character")
#head(feature_names)
colnames(Data_Test) <- feature_names$V2; #names(Data_Test)
colnames(Data_Train) <- feature_names$V2; #names(Data_Train)
colnames(subject_test) <- c("Subject"); #names(subject_test)
colnames(subject_train) <- c("Subject"); #names(subject_train)
colnames(Label_Test) <- c("Activity"); #names(Label_Test)
colnames(Label_Train) <- c("Activity"); #names(Label_Train)
#Merges the training and the test sets to create one data set.
All <- rbind(cbind(subject_train,Label_Train,Data_Train),cbind(subject_test,Label_Test,Data_Test))
#Extracts only the measurements on the mean and standard deviation for each measurement.
All_mean <- sapply(All[feature_names$V2],mean,na.rm=TRUE)
All_std <- sapply(All[feature_names$V2],sd,na.rm=TRUE)
#Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(data.table)
temp <- data.table(All)
TidyData <- temp[, lapply(.SD, mean), by=c("Subject", "Activity")]
#Print file
write.table(TidyData, file="./tidydata.txt", sep="\t", row.names=FALSE)
|
d3efbfc9992c0590839147d3bf467b3bdb075027 | a214e706c875e0af7221c0c9ae193d9d93ee20a7 | /R/PureCN.R | 7c6abfff6e00babb8b9e98d3cbf8edb5858aee97 | [] | no_license | inambioinfo/bioinformatics_scripts | fa2292e91ad4134204a09ace27c8a91ae70fa34c | 3a23611f382b7f3dd60e5e2abe841b84408c0d44 | refs/heads/master | 2020-03-20T21:17:10.163061 | 2017-03-28T23:41:39 | 2017-03-28T23:41:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 539 | r | PureCN.R |
#RTFM
#http://bioconductor.org/packages/devel/bioc/vignettes/PureCN/inst/doc/PureCN.pdf
## try http:// if https:// URLs are not supported
source("https://bioconductor.org/biocLite.R")
biocLite("PureCN")
library("PureCN")
browseVignettes("PureCN")
bam.file <- system.file("extdata", "ex1.bam", package="PureCN", mustWork = TRUE)
interval.file <- system.file("extdata", "ex1_intervals.txt", package="PureCN", mustWork = TRUE)
calculateBamCoverageByInterval(bam.file=bam.file, interval.file=interval.file, output.file="ex1_coverage.txt")
|
6863c5a59ab40e30287a9538854181dd972bd15c | 9f8a019d4d19bc763821361499694b606172f7f0 | /man/collate_roclet.Rd | f5f01226b9d6d44f6e9cd19f017a3cd0f7198025 | [] | no_license | miraisolutions/roxygen2 | 92111663d2cdb1dd5eb73ff933e10c6575078909 | 157e8913036954c6216354ef4aa6d2660b6ef95f | refs/heads/master | 2020-12-25T10:35:39.069369 | 2013-06-12T08:41:15 | 2013-06-12T08:41:15 | 10,638,946 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 787 | rd | collate_roclet.Rd | \name{collate_roclet}
\alias{collate_roclet}
\title{Roclet: make Collate field in DESCRIPTION.}
\usage{
collate_roclet()
}
\value{
Rd roclet
}
\description{
Topologically sort R files and record in Collate field.
}
\details{
Each \code{@include} tag should specify the filename of
one intrapackage dependency; multiple \code{@include}
tags may be given.
}
\examples{
#' `example-a.R', `example-b.R' and `example-c.R' reside
#' in the `example' directory, with dependencies
#' a -> {b, c}. This is `example-a.R'.
#' @include example-b.R
#' @include example-c.R
NULL
roclet <- collate_roclet()
\dontrun{
roc_proc(roclet, dir('example'))
roc_out(roclet, dir('example'), "example")
}
}
\seealso{
Other roclets: \code{\link{namespace_roclet}},
\code{\link{rd_roclet}}
}
|
4cecf274f500f86b7e8b358f49e0813a03b8c2df | 51e1c0fe739c69ee375b8609e707e553f588bc01 | /Referrals.R | 158bf336df163bd360d2c0c0a6a930d859fa5af3 | [] | no_license | richgillett/WalesReferrals | fe56cc629410b26c161c2baba902038ec975fbe8 | e764f9fd8a696d5f2a71f03e3f8b0cff2fe15485 | refs/heads/master | 2020-09-10T05:44:09.925187 | 2016-08-29T20:03:48 | 2016-08-29T20:03:48 | 66,875,467 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,458 | r | Referrals.R | # Referrals in NHS Wales, Rich Gillett
# https://statswales.gov.wales/v/BpON
# you'll have to export as csv from StatsWales, and clean the dates
# remove any summary totals
#rm(list = ls())
setwd("C:/Users/Rich/Documents/R/Referrals")
#read the data which is from statswales
raw <- read.csv("StatsWalesCleaned.csv", header=TRUE, na.string=c("."))
raw[1:5,1:5]
require(plyr)
#fix minor import problem
raw <-rename (raw, c("Betsi.Cadwaladr.University.Local.Health.Board"="HB"))
IndividualHB <- subset(raw,HB="Betsi.Cadwaladr.University.Local.Health.Board")
IndividualHB <- subset(IndividualHB, select = -c(HB))
numbercols <-ncol(IndividualHB)
IndividualHBts <- ts(IndividualHB,start=c(2012,4), end=c(2016,6), frequency=12)
forecastperiod <- 24
fcast <- matrix(NA, nrow=forecastperiod, ncol(IndividualHB))
fcastcolnames <- colnames(IndividualHB)
# export the image
for(i in 1:numbercols) {
fcast[,i] <- forecast(IndividualHBts[,i], h=forecastperiod, robust=TRUE)$mean
write((fcast[,i]),file=paste(fcastcolnames[i],".csv"),sep=",",ncol=numbercols)
img <- png(filename=paste(fcastcolnames[i],"plot.png"),
units="in",
width=5,
height=4,
pointsize=12,
res=72)
plot.forecast(forecast(IndividualHBts[,i],
h=forecastperiod,
robust=TRUE),
main=fcastcolnames[i])
dev.off()
}
|
2bf3e873704a467dd047bf5528161f5d5488a119 | 69668ad25d79800fed0a6efcf6fc8b0639d34e9c | /Public/Salmon/Salmon.R | d06eaaeb81536d86527084223ccc64ede6f9a3a0 | [
"MIT"
] | permissive | plbaldoni/epigraHMMPaper | 96dc6f18997a42ad8797acd17d86ae1712caa8ef | 93cab7c12bac3ca96c5d4e23cbe5c7aead1f1bc4 | refs/heads/main | 2023-03-30T20:27:08.611206 | 2021-04-10T10:51:02 | 2021-04-10T10:51:02 | 301,452,680 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,901 | r | Salmon.R | # Creating transcriptome index (from https://hbctraining.github.io/Intro-to-rnaseq-hpc-salmon/lessons/04_quasi_alignment_salmon.html)
if(!file.exists(file.path('Homo_sapiens.GRCh37.cdna.all.fa.salmon.index','versionInfo.json'))){
system('mkdir Homo_sapiens.GRCh37.cdna.all.fa.salmon.index')
system('salmon index -t ../../Data/transcriptome/Homo_sapiens.GRCh37.75.cdna.all.fa -i Homo_sapiens.GRCh37.cdna.all.fa.salmon.index')
}
# Run Salmon
cellline <- c('Helas3','Hepg2','Huvec','H1hesc')
names(cellline) <- c('Encode_helas3','Encode_hepg2','Encode_huvec','Encode_h1hesc')
for(i in seq_len(length(cellline))){
system(paste('mkdir',paste0(cellline[i])))
cmd = paste('salmon quant --gcBias -i Homo_sapiens.GRCh37.cdna.all.fa.salmon.index -l A -1',
paste0(paste0('../../Data/',names(cellline[i]),'/RNAseq/'),paste0('wgEncodeCshlLongRnaSeq',cellline[i],'CellPapFastqRd1Rep1.fastq.gz')),'-2',
paste0(paste0('../../Data/',names(cellline[i]),'/RNAseq/'),paste0('wgEncodeCshlLongRnaSeq',cellline[i],'CellPapFastqRd2Rep1.fastq.gz')),
'--validateMappings -p 1 -o',paste0(cellline[i],'/Output1'))
if(!file.exists(file.path(paste0(cellline[i],'/Output1'),'quant.sf'))){system(cmd)}
cmd = paste('salmon quant --gcBias -i Homo_sapiens.GRCh37.cdna.all.fa.salmon.index -l A -1',
paste0(paste0('../../Data/',names(cellline[i]),'/RNAseq/'),paste0('wgEncodeCshlLongRnaSeq',cellline[i],'CellPapFastqRd1Rep2.fastq.gz')),'-2',
paste0(paste0('../../Data/',names(cellline[i]),'/RNAseq/'),paste0('wgEncodeCshlLongRnaSeq',cellline[i],'CellPapFastqRd2Rep2.fastq.gz')),
'--validateMappings -p 1 -o',paste0(cellline[i],'/Output2'))
if(!file.exists(file.path(paste0(cellline[i],'/Output2'),'quant.sf'))){system(cmd)}
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Now, summarizing salmon results with tximport
rm(list=ls())
library(EnsDb.Hsapiens.v75)
library(tximport)
library(SummarizedExperiment)
library(DESeq2)
edb = EnsDb.Hsapiens.v75
Tx <- transcripts(edb,return.type='data.frame')
tx2gene = subset(Tx,select=c('tx_id','gene_id'))
cells = c('H1hesc','Helas3','Hepg2','Huvec')
files = NULL
for(i in cells){
cat(i)
files = c(files,list.files(path = i,pattern = '^quant.sf$',recursive = T,full.names = T))
}
for(i in files){
cellname = strsplit(i,'/')[[1]][length(strsplit(i,'/')[[1]])-2]
repname = strsplit(i,'/')[[1]][length(strsplit(i,'/')[[1]])-1]; repname = substr(repname,nchar(repname),nchar(repname))
names(files)[which(files==i)] = paste0(cellname,'.Rep',repname)
}
txi <- tximport(files, type = "salmon", tx2gene = tx2gene,countsFromAbundance='no')
txi.scaled <- tximport(files, type = "salmon", tx2gene = tx2gene,countsFromAbundance='lengthScaledTPM')
### Creating DESeqDataSet
#### Raw
eset.DESeq <- DESeqDataSetFromTximport(txi,colData = data.frame(Cells=unlist(lapply(strsplit(colnames(txi$counts),"\\."), FUN = function(x){x[1]})),
Replicates = unlist(lapply(strsplit(colnames(txi$counts),"\\."), FUN = function(x){substr(x[2],nchar(x[2]),nchar(x[2]))}))),design=~Cells)
rowRanges(eset.DESeq) <- genes(edb)[rownames(eset.DESeq)]
ENCODE.rnaseq.raw = eset.DESeq
seqlevelsStyle(ENCODE.rnaseq.raw) <- 'UCSC'
save(ENCODE.rnaseq.raw,file='ENCODE.rnaseq.raw.RData',compress = 'xz')
#### Scaled
eset.DESeq.scaled <- DESeqDataSetFromTximport(txi.scaled,
colData = data.frame(Cells=unlist(lapply(strsplit(colnames(txi.scaled$counts),"\\."), FUN = function(x){x[1]})),
Replicates = unlist(lapply(strsplit(colnames(txi.scaled$counts),"\\."), FUN = function(x){substr(x[2],nchar(x[2]),nchar(x[2]))}))),design=~Cells)
rowRanges(eset.DESeq.scaled) <- genes(edb)[rownames(eset.DESeq.scaled)]
ENCODE.rnaseq.scaled = eset.DESeq.scaled
seqlevelsStyle(ENCODE.rnaseq.scaled) <- 'UCSC'
save(ENCODE.rnaseq.scaled,file='ENCODE.rnaseq.scaled.RData',compress = 'xz')
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Now, I want to check whether gene bodies intersect with ChIP-seq peaks
rm(list=ls())
library(SummarizedExperiment)
library(AnnotationHub)
library(GenomicRanges)
library(rtracklayer)
library(bamsignals)
library(csaw)
library(EnsDb.Hsapiens.v75)
library(tximport)
library(DESeq2)
dirchip = '../../Data/'
chromosome = c(paste0('chr',1:22),'chrX')
# ChIP-seq data
chip.ezh2 <- c(paste0(dirchip,'Encode_h1hesc/EZH2/wgEncodeBroadHistoneH1hescEzh239875AlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_h1hesc/EZH2/wgEncodeBroadHistoneH1hescEzh239875AlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_helas3/EZH2/wgEncodeBroadHistoneHelas3Ezh239875AlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_helas3/EZH2/wgEncodeBroadHistoneHelas3Ezh239875AlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_hepg2/EZH2/wgEncodeBroadHistoneHepg2Ezh239875AlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_hepg2/EZH2/wgEncodeBroadHistoneHepg2Ezh239875AlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_huvec/EZH2/wgEncodeBroadHistoneHuvecEzh239875AlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_huvec/EZH2/wgEncodeBroadHistoneHuvecEzh239875AlnRep2.markdup.q10.sorted.bam'))
chip.h3k4me3 <- c(paste0(dirchip,'Encode_h1hesc/H3K4me3/wgEncodeBroadHistoneH1hescH3k4me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_h1hesc/H3K4me3/wgEncodeBroadHistoneH1hescH3k4me3StdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_helas3/H3K4me3/wgEncodeBroadHistoneHelas3H3k4me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_helas3/H3K4me3/wgEncodeBroadHistoneHelas3H3k4me3StdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_hepg2/H3K4me3/wgEncodeBroadHistoneHepg2H3k4me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_hepg2/H3K4me3/wgEncodeBroadHistoneHepg2H3k4me3StdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_huvec/H3K4me3/wgEncodeBroadHistoneHuvecH3k4me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_huvec/H3K4me3/wgEncodeBroadHistoneHuvecH3k4me3StdAlnRep2.markdup.q10.sorted.bam'))
chip.h3k27ac <- c(paste0(dirchip,'Encode_h1hesc/H3K27ac/wgEncodeBroadHistoneH1hescH3k27acStdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_h1hesc/H3K27ac/wgEncodeBroadHistoneH1hescH3k27acStdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_helas3/H3K27ac/wgEncodeBroadHistoneHelas3H3k27acStdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_helas3/H3K27ac/wgEncodeBroadHistoneHelas3H3k27acStdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_hepg2/H3K27ac/wgEncodeBroadHistoneHepg2H3k27acStdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_hepg2/H3K27ac/wgEncodeBroadHistoneHepg2H3k27acStdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_huvec/H3K27ac/wgEncodeBroadHistoneHuvecH3k27acStdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_huvec/H3K27ac/wgEncodeBroadHistoneHuvecH3k27acStdAlnRep2.markdup.q10.sorted.bam'))
chip.h3k36me3 <- c(paste0(dirchip,'Encode_h1hesc/H3K36me3/wgEncodeBroadHistoneH1hescH3k36me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_h1hesc/H3K36me3/wgEncodeBroadHistoneH1hescH3k36me3StdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_helas3/H3K36me3/wgEncodeBroadHistoneHelas3H3k36me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_helas3/H3K36me3/wgEncodeBroadHistoneHelas3H3k36me3StdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_hepg2/H3K36me3/wgEncodeBroadHistoneHepg2H3k36me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_hepg2/H3K36me3/wgEncodeBroadHistoneHepg2H3k36me3StdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_huvec/H3K36me3/wgEncodeBroadHistoneHuvecH3k36me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_huvec/H3K36me3/wgEncodeBroadHistoneHuvecH3k36me3StdAlnRep2.markdup.q10.sorted.bam'))
chip.h3k27me3 <- c(paste0(dirchip,'Encode_h1hesc/H3K27me3/wgEncodeBroadHistoneH1hescH3k27me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_h1hesc/H3K27me3/wgEncodeBroadHistoneH1hescH3k27me3StdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_helas3/H3K27me3/wgEncodeBroadHistoneHelas3H3k27me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_helas3/H3K27me3/wgEncodeBroadHistoneHelas3H3k27me3StdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_hepg2/H3K27me3/wgEncodeBroadHistoneHepg2H3k27me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_hepg2/H3K27me3/wgEncodeBroadHistoneHepg2H3k27me3StdAlnRep2.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_huvec/H3K27me3/wgEncodeBroadHistoneHuvecH3k27me3StdAlnRep1.markdup.q10.sorted.bam'),
paste0(dirchip,'Encode_huvec/H3K27me3/wgEncodeBroadHistoneHuvecH3k27me3StdAlnRep2.markdup.q10.sorted.bam'))
# Loading blacklisted regions
load('../../Data/hg19/human.hg19.ranges.blacklist.RData')
# Loading genes
load('ENCODE.rnaseq.raw.RData')
ENCODE.rnaseq.raw.subset <- ENCODE.rnaseq.raw[seqnames(ENCODE.rnaseq.raw)%in%chromosome]
# Mapping ChIP-seq reads
chipEzh2 = list()
chipH3K4me3 = list()
chipH3K27ac = list()
chipH3K36me3 = list()
chipH3K27me3 = list()
for(i in 1:length(chip.ezh2)){
if(!is.na(chip.ezh2[i])){
cat('ChIP-seq data: ',chip.ezh2[i],'\n')
fleng <- maximizeCcf(correlateReads(chip.ezh2[i],param=readParam(discard=hg19.discard)))
cat('Fragment length: ',fleng,'\n')
chipEzh2[[i]] <- bamCount(bampath = chip.ezh2[i],gr = rowRanges(ENCODE.rnaseq.raw.subset),shift = fleng/2) # Mapping onto gene bodies
} else{
chipEzh2[[i]] <- rep(NA,length(rowRanges(ENCODE.rnaseq.raw.subset)))
}
}
for(i in 1:length(chip.h3k4me3)){
if(!is.na(chip.h3k4me3[i])){
cat('ChIP-seq data: ',chip.h3k4me3[i],'\n')
fleng <- maximizeCcf(correlateReads(chip.h3k4me3[i],param=readParam(discard=hg19.discard)))
cat('Fragment length: ',fleng,'\n')
chipH3K4me3[[i]] <- bamCount(bampath = chip.h3k4me3[i],gr = promoters(rowRanges(ENCODE.rnaseq.raw.subset)),shift = fleng/2) # Mapping onto gene promoters
} else{
chipH3K4me3[[i]] <- rep(NA,length(rowRanges(ENCODE.rnaseq.raw.subset)))
}
}
for(i in 1:length(chip.h3k27ac)){
if(!is.na(chip.h3k27ac[i])){
cat('ChIP-seq data: ',chip.h3k27ac[i],'\n')
fleng <- maximizeCcf(correlateReads(chip.h3k27ac[i],param=readParam(discard=hg19.discard)))
cat('Fragment length: ',fleng,'\n')
chipH3K27ac[[i]] <- bamCount(bampath = chip.h3k27ac[i],gr = promoters(rowRanges(ENCODE.rnaseq.raw.subset)),shift = fleng/2) # Mapping onto gene promoters
} else{
chipH3K27ac[[i]] <- rep(NA,length(rowRanges(ENCODE.rnaseq.raw.subset)))
}
}
for(i in 1:length(chip.h3k36me3)){
if(!is.na(chip.h3k36me3[i])){
cat('ChIP-seq data: ',chip.h3k36me3[i],'\n')
fleng <- maximizeCcf(correlateReads(chip.h3k36me3[i],param=readParam(discard=hg19.discard)))
cat('Fragment length: ',fleng,'\n')
chipH3K36me3[[i]] <- bamCount(bampath = chip.h3k36me3[i],gr = rowRanges(ENCODE.rnaseq.raw.subset),shift = fleng/2) # Mapping onto gene bodies
} else{
chipH3K36me3[[i]] <- rep(NA,length(rowRanges(ENCODE.rnaseq.raw.subset)))
}
}
for(i in 1:length(chip.h3k27me3)){
if(!is.na(chip.h3k27me3[i])){
cat('ChIP-seq data: ',chip.h3k27me3[i],'\n')
fleng <- maximizeCcf(correlateReads(chip.h3k27me3[i],param=readParam(discard=hg19.discard)))
cat('Fragment length: ',fleng,'\n')
chipH3K27me3[[i]] <- bamCount(bampath = chip.h3k27me3[i],gr = rowRanges(ENCODE.rnaseq.raw.subset),shift = fleng/2) # Mapping onto gene bodies
} else{
chipH3K27me3[[i]] <- rep(NA,length(rowRanges(ENCODE.rnaseq.raw.subset)))
}
}
ENCODE.chipseq <- ENCODE.rnaseq.raw.subset
assay(ENCODE.chipseq,'EZH2',withDimnames = FALSE) <- do.call(cbind,chipEzh2)
assay(ENCODE.chipseq,'H3K4me3',withDimnames = FALSE) <- do.call(cbind,chipH3K4me3)
assay(ENCODE.chipseq,'H3K27ac',withDimnames = FALSE) <- do.call(cbind,chipH3K27ac)
assay(ENCODE.chipseq,'H3K36me3',withDimnames = FALSE) <- do.call(cbind,chipH3K36me3)
assay(ENCODE.chipseq,'H3K27me3',withDimnames = FALSE) <- do.call(cbind,chipH3K27me3)
save(ENCODE.chipseq,file = 'ENCODE.chipseq.RData',compress = 'xz')
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Now, creating RData element with RefSeq gene bodies to be used when plotting
rm(list=ls())
library("AnnotationHub")
# Loading genome
load('../../Data/Encode_helas3/H3K36me3/wgEncodeBroadHistoneHelas3H3k36me3StdAlnRep1.markdup.q10.sorted.RData')
# Setting up data
counts = subset(counts[[paste0(100)]],chr%in%c(paste0('chr',1:22),'chrX','chrY'),select=c('chr','start','stop'))
gr.counts = with(counts,GRanges(chr, IRanges(start=start, end=stop)))
# Loading genes
ah <- AnnotationHub()
qhs <- query(ah, c("RefSeq", "Homo sapiens", "hg19"))
genes <- qhs[['AH5040']]
gr.genes <- genes[overlapsAny(genes,gr.counts)]
gr.genes <- gr.genes[seqnames(gr.genes)%in%c(paste0('chr',1:22),'chrX','chrY')]
# Saving genes
save(gr.genes,file='UCSC_RefSeq_Hg19_Genes.RData',compress = 'xz')
|
dededd932653a0db70708512d5ddadc307d18d1f | 4faf55f71897a90c19491972bd7f7038ce116272 | /R/methyl_sim.R | 1db283c2da7ce01a6aa4c37f1bd46f8bafeb3623 | [] | no_license | cxystat/AFb | d45ac279e2af482bb4422344bb4aa2e70c937ffc | b58a7dae3d2fa5ab140914b733d712c709447850 | refs/heads/master | 2021-07-03T04:42:09.376279 | 2021-06-15T08:24:48 | 2021-06-15T08:24:48 | 239,228,835 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,904 | r | methyl_sim.R | ## Data Simulation
##
## @param n Number of subjects.
## @param tot Total length (in base pairs) of the region being tested.
## @param il Length (in base pairs) of CpG island.
## @param prop Proportion of true causal/associated basis.
## @param strg Effect size. Effects are selected from U(-strg, strg)
## distribution.
## @param beta Model used to construct methylation effects, "bs" for B-spline
## basis functions, "fourier" for Fourier basis functions,
## "ar" for AR(1) model. Default is "bs".
## @param weight Weight of baseline methylation levels. Default is 0.9.
## @param p.CG A vector of length three containing CpG site proportions on CpG
## island, shore and desert.
## @param knots A vector of knots for B-spline basis functions. Used only when
## argument beta = "bs".
## @param n.basis Number of Fourier basis functions. Used only when argument
## beta = "fourier".
## @param trans.prob Transition matrix for Markov chain. Used only when
## argument beta = "ar".
## @param seed Random seed.
## @param ... Optional argument for "bs" and "fourier".
##
## @return A list object.
## \describe{
## \item{trait}{A vector of values for a binary trait for n subjects.}
## \item{methyl}{A matrix with methylation levels for n subjects.}
## \item{pos}{Locations of methylation sites.}
## }
## @export
##
## @examples
## K <- 329 # Q1 of CpG island length
## t <- 39139 # Q1 of CpG + gap + gene
## n <- 1000
## p.sig <- 0.2
## delta <- 0.05
##
## bs_dense <- methyl_sim(n, t, K, p.sig, delta,
## beta = "bs", seed = 7)
# methyl_sim <- function(n, tot, il, prop, strg,
# beta = c("bs", "fourier", "ar"),
# weight = 0.9,
# p.CG = c(0.09443, 0.019, 0.012462),
# knots = NULL,
# n.basis = NULL,
# trans.prob = c(0.8, 0.95),
# seed = NULL, ...) {
# source("R/methyl_sim_components.R")
#
# temp <- CG(tot, il, p.CG, seed = seed)
# pos <- temp$pos
#
# designMat <- methylprop(n, tot, pos, weight = weight)
#
# beta <- match.arg(beta)
# FUN <- match.fun(paste0("beta_", beta))
# if (beta == "ar") {
# coefs <- FUN(tot, pos, trans.prob)
# } else {
# if (beta == "fourier") {
# if (is.null(n.basis)) {
# coefs <- FUN(pos, prop, strg, ...)
# } else {
# coefs <- FUN(pos, prop, strg, n.basis, ...)
# }
# } else {
# coefs <- FUN(tot, pos, prop, strg, knots = NULL, ...)
# }
# }
#
# methyl.std <- scale(designMat$methyl, scale = FALSE)
# n.cg <- length(pos)
# hd <- c(pos[2] - pos[1] ,diff(pos, lag = 2)/2,
# pos[n.cg]-pos[n.cg-1])
#
# w.coef <- coefs * hd
# d.prob <- inv.logit(methyl.std %*% w.coef)
# Y <- rbinom(n, 1, d.prob)
#
# result <- list(trait = Y, methyl = designMat$methylobs, pos = pos)
#
# return(result)
#
# }
|
2e28ce554c34d096051a6dc38abf064471bbf146 | 9b21a78292f73898d450253b82f4cb81849cc3b0 | /Cross The Form.R | 0548272fc8ddcf139c2d48a44d64a33b7c77abcc | [] | no_license | gaoyangxihehe/Crawling-Data-By-R | b173a5a7c172b4dbfae9b4b964a12964c9831064 | 0560e218c251648c9e108d0509bade0fc3818fd5 | refs/heads/master | 2021-05-11T03:42:15.516543 | 2018-01-18T10:43:18 | 2018-01-18T10:43:18 | 117,922,720 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 841 | r | Cross The Form.R | formurl <- "http://open.xmu.edu.cn/oauth2/authorize?client_id=1010&response_type=code"
session <- html_session(formurl)
#ๅๅปบไผ่ฏ
form <- html_form(session)
#ๅพๅฐ็ฝ้กตๅ
็ๆๆ่กจๅ๏ผไปฅlistๅฝขๅผ่ฟๅ
str(form)
form <- form[[1]]
#ๆๅๆไปฌๆณ่ฆ็่กจๅ
UserName <- "*******" #่ฟ้ๅกซๅไฝ ่ชๅทฑ็ๅญฆๅท
Password <- "*******" #่ฟ้ๆpasswordๆฟๆขๆไฝ ่ชๅทฑ็ๅฏ็
form <- set_values(form,'UserName'=UserName,'Password'=Password)
#ๅกซๅ่กจๅๅ
ๅฎน
out_url <- submit_form(session,form,submit=NULL)
#ๅจไผ่ฏไธญๆไบค่กจๅ๏ผๅฎ็ฐ่กจๅ็ฉฟ่ถ
class(out_url)
session2 <- follow_link(out_url,'Advanced Econometrics')
course.info <- session2 %>% html_nodes("ul.section") %>% html_text()
#็ฌๅๅฑๆงไธบsection็ul่็น๏ผ่ทๅๅ
ถไธ้ขๅ่กจ็ๆๆๆๆฌๅ
ๅฎน
cat(course.info[1])
|
95bc3a393b4614d6e303ad763433ad1b34764984 | 69162f720f226384a4f48c8ab5f72af856ff8e6d | /src/Model/DTModelTrain.R | 20eae7196ed0ca04a0ab095877be1ce77f9f027e | [] | no_license | SoftServeSAG/aws_anomaly_detection | 4899fdeb93218fa6f50cda190152b5e2211981ad | ca5c4d924032efe770eaaf238e721fb3d305d58d | refs/heads/master | 2021-09-13T09:40:48.016445 | 2018-04-28T02:00:33 | 2018-04-28T02:00:33 | 107,226,234 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,793 | r | DTModelTrain.R | dynamicThreshold.train <- function (ts.agg,
periods=NULL,
train.params = list(
mode = c("expert", "simple")[1],
sensitivity = c('High', 'Medium','Low')[2],
params = list
(
agg_th = 0.75,
local_trend = 0.5,
similar = 0.1
)
), type_th = c("Low", "High", "Both")[3])
# Dynamic Threshold model training wrapper - allows to traing model both for "simple" and "expert" modes
# for all anomalies types
#
# inputs:
# ts.agg - aggregated data list:
# (data.agg - aggregated time series - xts format
# ts_type, ts_val - aggregation parameters)
# type_th - type of thresholds
# periods - set of analyzed periods, if NULL - periods will be detected automatically
# train_params - traioning parameters:
# mode - simple or expert
# sensitivity - High, Medium, Low - for simple mode only
# params - for expert mode
#
# return :
# model = DT Model:
# list( thresholds - thresolds for shortest period,
# levels - local trends based on all periods,
# period = set of periods,
# max = maximal observed value in train set ,
# initial_time - from train set,
# ts_type - aggregation units,
# ts_val - aggregation step,
# ts_corr - correction for low-thresholds only,
# type_th - thersholds type)
# timeseries = time series in appropriate format for firther analysis,
# raw_timeseries - time series in initial (xts) format,
# ad_results - anomaly detection results
#
{
if (length(ts.agg$data.agg)<3)
{
stop("Too small timeseries")
}
ts.agg$data.agg[,1][is.na(ts.agg$data.agg[,1])]=median(ts.agg$data.agg[,1], na.rm = T)
model=NA
if (type_th %in% c("Low", "High"))
{
model = dynamicThreshold.smart_train(ts.agg = ts.agg,
periods = periods,
type_th = tolower(type_th),
train.params = train.params)
}
if (type_th =="Both"){
model = dynamicThreshold.smart_train.two_sided(ts.agg = ts.agg,
periods = periods,
train.params = train.params)
}
return(model)
} |
4a95211c945bb74567e997f7de9230619c12a512 | 04a7e4899d9aac6d1dbb0c37a4c45e5edb4f1612 | /man/latex_color.Rd | 8dc0db0f054d9f60373d7fd6f4d9b3d6630f0a0a | [
"MIT"
] | permissive | pbs-assess/csasdown | 796ac3b6d30396a10ba482dfd67ec157d7deadba | 85cc4dda03d6513c11350f7f607cce1cacb6bf6a | refs/heads/main | 2023-08-16T17:22:18.050497 | 2023-08-16T00:35:31 | 2023-08-16T00:35:31 | 136,674,837 | 47 | 18 | NOASSERTION | 2023-06-20T01:45:07 | 2018-06-08T23:31:16 | R | UTF-8 | R | false | true | 445 | rd | latex_color.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kable-extra-non-exports.R
\name{latex_color}
\alias{latex_color}
\title{latex_color is a \link{kableExtra} non-exported function}
\usage{
latex_color(colors, escape = TRUE)
}
\arguments{
\item{colors}{colors}
\item{escape}{escape}
}
\description{
Included to pass R CMD check cleanly
}
\references{
\url{https://github.com/haozhu233/kableExtra}
}
\keyword{internal}
|
d560444f9aa9da9a743189b3da2fa9c3c39f33b3 | 24679e1521fc631120ad1dfb00371896002ab066 | /preparingData.R | b22855da690221332d3f8d683f803f5a37e5748a | [] | no_license | harrig12/lil-bioinf | 61b412a47f3d1b648317f3eaac7f9ce6e526ab76 | 8115a92a1ded4fe4c1b3b4733c53bb776d00dfe5 | refs/heads/master | 2020-09-03T22:52:36.330962 | 2020-01-22T01:08:14 | 2020-01-22T01:08:14 | 219,593,964 | 0 | 0 | null | 2020-01-19T17:32:50 | 2019-11-04T20:49:33 | R | UTF-8 | R | false | false | 6,994 | r | preparingData.R | # preparingData.R
# Author: Gabriela Morgenshtern
# Date: Jan 2020
# Version 1.0
#
# This script works through downloading, processing, and integrating information from the
# STRING and GOSlim databases on Saccharomyces cerevisiae, in preparation for visualizing
# a network of high-confidence yeast genes with the "mitotic cell cycle" GOSlim annotation.
#
# Adapted from Boris Steipe's material for BCH441
#
# Dataset Access:
# STRING data source:
# Download page: https://string-db.org/cgi/download.pl?species_text=Saccharomyces+cerevisiae
# Data: (20.8 mb) https://string-db.org/download/protein.links.full.v11.0/4932.protein.links.full.v11.0.txt.gz
#
# GOSlim data source:
# Info page: http://geneontology.org/docs/go-subset-guide/
# Data: (3 mb) https://downloads.yeastgenome.org/curation/literature/go_slim_mapping.tab
#
if (!require(readr, quietly = TRUE)) {
install.packages("readr")
}
if (!require(dplyr, quietly = TRUE)) {
install.packages("dplyr")
}
library(readr)
library(dplyr)
# =============================================================================
# 1. Preparing STRING Data
# =============================================================================
#
# STRING has scored information on functional protein-protein interactions
# To read STRING data, it first needs to be downloaded from the online database
onlineData = gzcon(
url(
"https://string-db.org/download/protein.links.full.v11.0/4932.protein.links.full.v11.0.txt.gz"
)
)
STR <- readr::read_delim(onlineData, delim = " ")
#####
# 1.1 Exploring our data
# You can also run these functions prior to subsetting, of course,
# but given the size of the dataset, let's focus on learning only about our columns of interest today
dim(STR) # get number of variables (16), and number of cases (rows, 1,845,966) in your dataset
head(STR) # first 5 rows of data
dplyr::glimpse(STR) # dplyr equivalent to head()
summary(STR) # simple stats on each column
#####
#####
# 1.2 How to subset our data:
#
# Filtering for columns of interest:
# Subset only IDs and combined_score column using base R functionality
STR <- STR[, c("protein1", "protein2", "combined_score")]
######
# Same, but using the dyplr. dplyr a worthwhile library to know for
# data science tasks in R, and the select() function has many options
# that make filtering for specific variables (columns) in your dataset easier
# for more options, run ?dplyr::select in your R console
STR <- dplyr::select(STR, starts_with("protein"), combined_score)
######
# subset for the 100,000 highest confidence edges, then take a look at your data
STR <- STR[(STR$combined_score > 920),] # exploratory approach, ~106,000 edges
dplyr::glimpse(STR)
# Using dplyr for more precision:
STR <- dplyr::arrange(STR, desc(combined_score))
STR <- head(STR, 100000)
dplyr::glimpse(STR)
######
# Alternatively, top_n combines the functionality of both arrange() and head()
STR <- dplyr::top_n(STR, 100000, combined_score)
dplyr::glimpse(STR)
######
#####
#####
# 1.3 Clean the data into a more readable format
# IDs are formatted like 4932.YAL005C ... drop the "4932." prefix
# gsub(pattern, replacement, x): Use a regular expression in the pattern input
STR$protein1 <- gsub("^4932\\.", "", STR$protein1)
STR$protein2 <- gsub("^4932\\.", "", STR$protein2)
# View your text-replacement results:
head(STR)
#####
# get a vector of gene names in this list
myIntxGenes <- unique(c(STR$protein1, STR$protein2))
# =============================================================================
# 2. Preparing GOSlim Data
# =============================================================================
#
# GOSlim annotations give a broad overview of GO content
# To read GOSlim data, it first needs to be downloaded from the online database
# TSVs files have is Tab Separated Values, which is a lot like a CSV, just delineated with
# a different (tab) character
onlineData = url("https://downloads.yeastgenome.org/curation/literature/go_slim_mapping.tab")
# NOTE: this call may take 1-2 minutes to complete
Gsl <- read.csv(
onlineData,
col.names = c(
"ID",
"name",
"SGDId",
"Ontology",
"termName",
"termID",
"status"
),
sep = "\t"
)
#####
# 2.1 Exploring our data
head(Gsl)
dplyr::glimpse(Gsl)
summary(Gsl)
# What cell cycle names does it contain?
myGslTermNames <- unique(Gsl$termName)
length(myGslTermNames) # 169 unique terms
myGslTermNames[grep("cycle", myGslTermNames)]
# [1] "regulation of cell cycle" "mitotic cell cycle" "meiotic cell cycle"
#####
#####
# 2.2 Subsetting our data
# Choose "mitotic cell cycle" as the GOslim term to subset with,
# then filter out for duplicate genes in your final list
scCCgenes <- unique(Gsl$ID[Gsl$termName == "mitotic cell cycle"])
# length(scCCgenes) # 324 genes annotated to that term
######
# Alternative with dplyr
scCCgenes2 <- dplyr::select(filter(Gsl, termName == "mitotic cell cycle"), ID)
######
#####
#####
# 2.3 Integrating relevant scCCgenes data into our STR data
#
# How many of our scCCgenes can be found in the high-confidence
# (combined score) STR data we've prepared?
sum(scCCgenes %in% myIntxGenes) # 301 genes have high-confidence interactions
# Define scCCnet: the S. Cervisiae Cell Cycle network
# Subset all rows for which BOTH genes are in the GOslim cell cycle set
scCCnet <- STR[(STR$protein1 %in% scCCgenes) &
(STR$protein2 %in% scCCgenes),]
nrow(scCCnet) # 2455
######
# Alternative with dplyr
scCCnet2 <- dplyr::filter(STR, STR$protein1 %in% scCCgenes, STR$protein2 %in% scCCgenes)
nrow(scCCnet2) # 2455
######
# How many genes are there?
length(unique(c(scCCnet$protein1, scCCnet$protein2))) # 276
# Each edge is listed twice - now remove duplicates.
#
# Step 1: make a vector: sort two names so the frist one is alphabetically
# smaller han the second one. This brings the two names into a defined
# order. Then concatenate them with a "." - the resulting string
# is always the same, for any order. E.g. c("A", "B") gives "A.B"
# and c("B", "A") also gives "A.B". This identifies duplicates.
#
# Vectorized functions like apply() are a unique concept to R:
# They work not just on a single value, but on a whole vector of values at the same time
x <- apply(
cbind(scCCnet$protein1,
scCCnet$protein2),
1,
FUN = function(x) {
return(paste(sort(x), collapse = "."))
}
)
head(x) # "YAL016W.YGR040W" "YAL016W.YOR014W" "YAL016W.YDL188C" ... etc.
sum(duplicated(x)) # 1227
# Step 2: drop all rows that contain duplicates in x
scCCnet <- scCCnet[!duplicated(x),]
# Confirm we didn't lose genes
length(unique(c(scCCnet$protein1, scCCnet$protein2))) # 276, no change
# Network has 276 nodes, 1280 edges
save(scCCnet, file = "./data/scCCnet.RData")
# load("./data/scCCnet.RData")
# Use the statement above to load the scCCnet object when needed.
# This object can be used as any R dataframe you've seen today
# [END]
|
87d511ee4065c8cc2ee62b4a94ff3e8abc7c7d17 | fa9b0a7d8e31396f970c18448c88d8240ec6dd37 | /model_code/similarity.r | 1bfeb004749590baa2abd1c651ac9310b7a7b637 | [] | no_license | michael-franke/fuzzy_action | 4c4dacf0eaf1147375127f582d5d848a4f24a35e | 56b9ea899e6c29cecc7190fac31b3d725655df3b | refs/heads/master | 2021-01-10T08:08:29.266706 | 2016-04-12T09:49:27 | 2016-04-12T09:49:27 | 55,240,893 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,806 | r | similarity.r | ## main function: get_similarity
##
#### creates a similarity matrix between states
###### $ns$ is the number of states
##
## example call:
##
#### get_similarity(10)
##
## visualize results:
##
#### plot_similarity(get_similarity(10))
require('ggplot2')
require('reshape2')
get_similarity = function(ns, weber_fraction = 0.2, epsilon = 0) {
# $ns$ is the maximal number to be considered
# $epsilon$ is an additive constant on the $sigma_n$
states = 0:ns
sigma = sapply(states, function(n) weber_fraction * n + epsilon)
number_confusion_probs = matrix(0, ns+1, ns+1,
dimnames = list(states,states))
for (n in states) {
for (m in states) {
# prob that $m$ is perceived when $n$ is actual
number_confusion_probs[n+1, m+1] = pnorm(m+0.5, n, sigma[n+1]) - pnorm(m-0.5, n, sigma[n+1])
}
}
scene_confusion_probs = matrix(0, ns+1, ns+1,
dimnames = list(states,states))
for (n in states) {
for (m in states) {
# prob that scene $m$ is perceived when scene $n$ is actual
scene_confusion_probs[n+1, m+1] = number_confusion_probs[n+1,m+1] * number_confusion_probs[10-n+1,10-m+1]
}
}
scene_confusion_probs = prop.table(scene_confusion_probs,1)
similarity = matrix(0, ns+1, ns+1,
dimnames = list(states,states))
for (n in states) {
for (m in states) {
similarity[n+1, m+1] = (scene_confusion_probs[n+1,m+1] * scene_confusion_probs[m+1,n+1])
}
similarity[n+1, ] = similarity[n+1, ] / max(similarity[n+1, ])
}
return(similarity)
}
plot_similarity = function(similarity){
plotData = melt(similarity)
SimPlot = ggplot(plotData, aes(x = Var2, y = value, color = factor(Var1))) + geom_line( )
show(SimPlot)
return(SimPlot)
}
|
d553c196c9d2ef7067db2a325a1af04e2c671fcc | 9b2114f3bf672fd7d4825c1f045b58f2b4709df2 | /man/predict.mfbvar.Rd | fa5923d2b8912354275bf5b5f2ab9437ce6d56b2 | [] | no_license | ayotoasset/mfbvar | 18b9da360e4e41ab0fa3c8115222ddde69ac339b | 518f77b7e2226256e1a68fe3a5f1b4deda87af7b | refs/heads/master | 2020-06-21T15:16:46.438319 | 2019-05-09T07:16:06 | 2019-05-09T07:16:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 945 | rd | predict.mfbvar.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interface.R
\name{predict.mfbvar}
\alias{predict.mfbvar}
\title{Predict method for class \code{mfbvar}}
\usage{
\method{predict}{mfbvar}(object, pred_quantiles = c(0.1, 0.5, 0.9),
tidy = FALSE, ...)
}
\arguments{
\item{object}{object of class mfbvar}
\item{pred_quantiles}{The quantiles of the posterior predictive distribution to use.}
\item{tidy}{If results should be tidy or not.}
\item{...}{Currently not in use.}
}
\description{
Method for predicting \code{mfbvar} objects.
}
\details{
Note that this requires that forecasts were made in the original \code{mfbvar} call.
}
\examples{
prior_obj <- set_prior(Y = mf_sweden[, 4:5], freq = c("m", "q"),
n_lags = 4, n_burnin = 20, n_reps = 20, n_fcst = 4)
mod_minn <- estimate_mfbvar(prior_obj, prior_type = "minn")
predict(mod_minn)
predict(mod_minn, pred_quantiles = 0.5, tidy = TRUE)
}
|
d7c14cde055ff516206c27b3072f38527c24a0b9 | 19105c698572fb475facff1a0db4278cdc1a0c0e | /R/intracranial_eeg_data.R | 8c1ce3d3769810dfe32d95fd46f620fa8563537d | [] | no_license | AndrewFerris/BayesSpec | 5dfe65f946da2726d0aa65ecadc2b448d7d97e49 | a51f2c174916028ad81b74d63875d3a11cd0016b | refs/heads/master | 2021-01-20T02:38:26.893041 | 2017-04-26T03:56:44 | 2017-04-26T03:56:44 | 89,435,621 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 463 | r | intracranial_eeg_data.R | #' @name intracranial_eeg
#' @docType data
#' @aliases intracranial_eeg
#' @title Intracranial Electroencephalograph (IEEG) Dataset
#' @description A sample of IEEG data from a subject in an interictal state.
#' @usage data(intracranial_eeg)
#' @format A vector time series of 6,000 observations of intracranial electroencephalograph
#' @source kaggle.com
#' @references https://www.kaggle.com/c/melbourne-university-seizure-prediction
#' @keywords datasets
NULL
|
b61bdd336741c090311f808722f812995aec6e00 | 9bb764a46886ed9b8d1ff21055163d52258ee138 | /man/plot_coexpression.Rd | 6188f0b98b32cbdc184f90ee37fb0992626c96a3 | [
"MIT"
] | permissive | MarioniLab/geneBasisR | 422309db71b5da116bdf965b01f7af5e9f00d38c | 1e81fa70ab4457466706314e13fcd3223770495e | refs/heads/main | 2023-06-21T21:52:11.899681 | 2023-06-12T11:22:21 | 2023-06-12T11:22:21 | 327,073,039 | 13 | 3 | null | null | null | null | UTF-8 | R | false | true | 621 | rd | plot_coexpression.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization_functions.R
\name{plot_coexpression}
\alias{plot_coexpression}
\title{plot_coexpression}
\usage{
plot_coexpression(sce, genes, title = NULL, ...)
}
\arguments{
\item{sce}{SingleCellExperiment object containing gene counts matrix (stored in 'logcounts' assay).}
\item{genes}{Character vector containing gene names to evaluate for co-expression.}
\item{title}{String to be passed as a title. Default title=NULL.}
\item{...}{Additional arguments to pass.}
}
\value{
Heatmap for co-expression.
}
\description{
plot_coexpression
}
|
53ab906c85ce7887ec1491b0fb79e2227d1e39d5 | ecad2f65ae1b55e9497198ede38e6312d77ace4d | /tests/testthat/test-template-funs.R | 410c8a6aecd75def365518895c455fde173ac95d | [
"MIT"
] | permissive | fmichonneau/sandpaper | 3cfaff895eda905a8f1155cf5d7efc1163542d3b | 17f02d56be89ca4aef1a6b3d7e0c32bff3aca391 | refs/heads/main | 2023-08-03T15:06:27.207574 | 2021-05-27T00:06:13 | 2021-05-27T00:06:13 | 380,282,279 | 0 | 0 | NOASSERTION | 2021-06-25T15:35:35 | 2021-06-25T15:35:34 | null | UTF-8 | R | false | false | 353 | r | test-template-funs.R | test_that("template files point to the right places", {
expect_equal(fs::path_file(template_gitignore()), "gitignore-template.txt")
expect_equal(fs::path_file(template_episode()), "episode-template.txt")
expect_equal(fs::path_file(template_config()), "config-template.txt")
expect_equal(fs::path_file(template_links()), "links-template.txt")
})
|
f6e50bf2ad74ae7f2d1768339f4797472d34940d | fccda946aeab155ab523e683537e0f8cb5ecac8e | /man/getLineageTime.Rd | 048825d9379f8989fef066b8c1916cba60be53da | [] | no_license | lingxuez/SOUPR | d2931a89f46b310e5dc8f0bf2d817d0085b6dd1e | 68358c79a40b82174605fdb249e286bd7fc5eee3 | refs/heads/master | 2021-03-22T05:04:26.405694 | 2018-10-17T02:35:32 | 2018-10-17T02:35:32 | 122,891,238 | 8 | 7 | null | null | null | null | UTF-8 | R | false | true | 742 | rd | getLineageTime.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SOUPlineage.R
\name{getLineageTime}
\alias{getLineageTime}
\title{Decide cell pseudotime}
\usage{
getLineageTime(lineages, membership)
}
\arguments{
\item{lineages}{A list of vectors, one per lineage,
each containing the ordered cluster labels along the lineage}
\item{membership}{the n-by-K soft membership matrix}
}
\value{
An n-by-L pseudotime matrix for L lineages.
Each column contains the pseudotime of cells along one lineage, and NA if a cell does not belong to it.
Cells are assigned to different lineages, potentially with overlaps, according to their major clusters.
}
\description{
Compute the pseudotime of each cell along each lineage.
}
|
d11729d95823a6c53d2c338aeca5a927a1e1f9ad | fe53412ce862b91645f9ac590f6e51121bef64f2 | /Powers.R | ee2301476cdd417e5c48648d6dba8aaa390ff1c5 | [] | no_license | Dwopplee/032-Proj | 36ef7cdfaabc21a9f4c906edd36c3d8dd065b7ac | d49cf19d996935addb00e270ada1e4c9622937cd | refs/heads/master | 2020-08-21T07:53:04.302799 | 2019-12-09T00:45:25 | 2019-12-09T18:46:27 | 216,113,972 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,443 | r | Powers.R | rm(list = ls(all = TRUE))
# Sourcing this file takes my machine slightly less than 1 minute to run.
# This is the first iteration of the model.
# We find the "optimal" polynomial relationship for each predictor with our
# dependent variable by repeatedly splitting our data set into training and test
# sets, then iterating up through higher degree polynomials until the absolute
# error in the training set increases.
# We then create a model by adding together all the polynomials with degree of
# at least 1.
source('SetupPows.R')
# Shuffle the data set before doing anything
ranRows = sample(nrow(X))
X = X[ranRows, ]
y = y[ranRows]
folds = GenFolds(10, X)
# Our baseline model:
cat("Base error and Variance:", CrossValidate(1, X, y, folds, TRUE, NULL, TRUE), '\n')
exps = sapply(colnames(X), PosExps, X, y)
# We don't want predictors that are best as constants
Xmodel = X[, -which(exps == 0)]
exps = exps[-which(exps == 0)]
# Generate a right-hand-side with all predictors raised to generated powers
pow0 = paste(mapply(ExpFormula, colnames(Xmodel), exps), collapse = "+")
folds = GenFolds(10, Xmodel)
cat("model0 error and variance:", CrossValidate(pow0, Xmodel, y, folds, TRUE, NULL, TRUE), '\n')
# Alright so this model doesn't look that bad if you're not really paying attention
# It does bring the error down from ~0.1784 to 0.0996-0.1025
# Given that the values we're predicting range from 0 to 1, this isn't great.
|
e68275e737da7680142c5f22f0bb29704446163e | ee0689132c92cf0ea3e82c65b20f85a2d6127bb8 | /trg/iimkpv-d4.R | cda7db27ac224e9828359b82977acf35cd815e35 | [] | no_license | DUanalytics/rAnalytics | f98d34d324e1611c8c0924fbd499a5fdac0e0911 | 07242250a702631c0d6a31d3ad8568daf9256099 | refs/heads/master | 2023-08-08T14:48:13.210501 | 2023-07-30T12:27:26 | 2023-07-30T12:27:26 | 201,704,509 | 203 | 29 | null | null | null | null | UTF-8 | R | false | false | 609 | r | iimkpv-d4.R | #Day 4 - IIM Revision
(gender = sample(c('M','F'), size=3, replace=T))
(gender = sample(c('M','F'), size=30, replace=T, prob=c(.7,.3)))
t1= table(gender)
prop.table(t1)
(course = sample(c('BBA','MBA'), size=30, replace=T, prob=c(.5,.5)))
summary(course)
courseF = factor(course)
summary(course)
summary(courseF)
(grades = sample(c(LETTERS[1:5]), size=30, replace=T))
summary(grades)
gradesF = factor(grades)
summary(gradesF)
# Ex, Good, Sat, Poor
#Poor < Sat < Good < Ex
gradesOF = factor(grades, ordered=T, levels=c('E','D','A','C','B'))
summary(gradesOF)
gradesOF
levels(gradesOF)
levels(gradesOF)[1]
|
949bb47f11729c56fb3cc19869160a77564f383f | 9e8fcdf0d7f018f540dde7a65d9b3f2d65c9cbc6 | /man/median_impute.Rd | beb0192addb3720f66867f4eb5dae8e7a0f3c402 | [
"MIT"
] | permissive | yiheng-aug30/metaboprep | 6c02797bd3949c593d8ed70db6e7dcf67cea2eeb | 500b051ad8174a54f11b52caa41b9c57f14dcdc4 | refs/heads/master | 2023-08-29T08:14:57.380002 | 2021-11-15T13:27:40 | 2021-11-15T13:27:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 918 | rd | median_impute.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/median_impute.R
\name{median_impute}
\alias{median_impute}
\title{median impute missing data}
\usage{
median_impute(wdata)
}
\arguments{
\item{wdata}{the metabolite data matrix. samples in row, metabolites in columns}
}
\value{
the matrix passed to the function but with NA's imputed to each columns median value.
}
\description{
This function imputes features (columns) of a metabolome matrix to median estimates. Useful for PCA.
}
\examples{
ex_data = sapply(1:100, function(x){ rnorm(250, 40, 5) })
## define the data set
rownames(ex_data) = paste0("ind", 1:nrow(ex_data))
colnames(ex_data) = paste0("var", 1:ncol(ex_data))
## add in some missingness
ex_data[sample(1:length(ex_data), 500)] = NA
## Estimate missingness and generate plots
imp_data = median_impute(ex_data)
}
\keyword{imputation}
\keyword{median}
\keyword{metabolomics}
|
b304e0e2edb48990afd1ddad3d83594608c83c76 | 3ae8b98cf7e343aa355bf4886d857d4cf6731807 | /Data_Summarization/index.R | 5c68d6f336ade4f3518dbc329e3b135ba482f25b | [
"MIT",
"CC-BY-NC-SA-4.0"
] | permissive | muschellij2/intro_to_r | 2e9bf76c94dd6aea66add18a6af0fa968a4e7c96 | d7edbda03dd75eaa454999a041dc9f4a811d0b36 | refs/heads/gh-pages | 2022-01-14T22:37:14.262179 | 2021-05-18T21:55:04 | 2021-05-18T21:55:04 | 94,378,579 | 6 | 16 | MIT | 2022-01-06T14:25:41 | 2017-06-14T22:39:55 | HTML | UTF-8 | R | false | false | 6,787 | r | index.R | ## ---- echo = FALSE, message=FALSE, error = FALSE------------------------------
library(knitr)
opts_chunk$set(comment = "", message = FALSE)
suppressWarnings({library(dplyr)})
library(readr)
library(tidyverse)
library(jhur)
## -----------------------------------------------------------------------------
library(jhur)
head(jhu_cars)
## -----------------------------------------------------------------------------
mean(jhu_cars$hp)
quantile(jhu_cars$hp)
## -----------------------------------------------------------------------------
median(jhu_cars$wt)
quantile(jhu_cars$wt, probs = 0.6)
## -----------------------------------------------------------------------------
t.test(jhu_cars$wt)
broom::tidy(t.test(jhu_cars$wt))
## -----------------------------------------------------------------------------
x = c(1,5,7,NA,4,2, 8,10,45,42)
mean(x)
mean(x, na.rm = TRUE)
quantile(x, na.rm = TRUE)
## -----------------------------------------------------------------------------
library(readxl)
# tb <- read_excel("http://johnmuschelli.com/intro_to_r/data/tb_incidence.xlsx")
tb = jhur::read_tb()
colnames(tb)
## -----------------------------------------------------------------------------
library(dplyr)
tb = tb %>% rename(country = `TB incidence, all forms (per 100 000 population per year)`)
## -----------------------------------------------------------------------------
colnames(tb)
## -----------------------------------------------------------------------------
tb %>%
summarize(mean_2006 = mean(`2006`, na.rm = TRUE),
media_2007 = median(`2007`, na.rm = TRUE),
median(`2004`, na.rm = TRUE))
## ----colMeans-----------------------------------------------------------------
avgs = select(tb, starts_with("1"))
colMeans(avgs, na.rm = TRUE)
## -----------------------------------------------------------------------------
tb$before_2000_avg = rowMeans(avgs, na.rm = TRUE)
head(tb[, c("country", "before_2000_avg")])
## ---- echo = TRUE, eval=FALSE-------------------------------------------------
## summarize_all(DATASET, FUNCTION, OTHER_FUNCTION_ARGUMENTS) # how to use
## -----------------------------------------------------------------------------
summarize_all(avgs, mean, na.rm = TRUE)
## ----summary1-----------------------------------------------------------------
summary(tb)
## -----------------------------------------------------------------------------
yts = jhur::read_yts()
head(yts)
## ---- message = FALSE---------------------------------------------------------
head(unique(yts$LocationDesc), 10)
## -----------------------------------------------------------------------------
length(unique(yts$LocationDesc))
## ---- message = FALSE---------------------------------------------------------
head(table(yts$LocationDesc))
## ---- message = FALSE---------------------------------------------------------
yts %>% count(LocationDesc)
## ---- message = FALSE---------------------------------------------------------
yts %>% count(LocationDesc, Age)
## ---- message=FALSE-----------------------------------------------------------
library(dplyr)
sub_yts = filter(yts, MeasureDesc == "Smoking Status",
Gender == "Overall", Response == "Current",
Education == "Middle School")
sub_yts = select(sub_yts, YEAR, LocationDesc, Data_Value, Data_Value_Unit)
head(sub_yts, 4)
## -----------------------------------------------------------------------------
sub_yts = group_by(sub_yts, YEAR)
head(sub_yts)
## -----------------------------------------------------------------------------
sub_yts %>% summarize(year_avg = mean(Data_Value, na.rm = TRUE))
## -----------------------------------------------------------------------------
yts_avgs = sub_yts %>%
group_by(YEAR) %>%
summarize(year_avg = mean(Data_Value, na.rm = TRUE),
year_median = median(Data_Value, na.rm = TRUE))
head(yts_avgs)
## -----------------------------------------------------------------------------
sub_yts = ungroup(sub_yts)
sub_yts
## -----------------------------------------------------------------------------
sub_yts %>%
group_by(YEAR) %>%
mutate(year_avg = mean(Data_Value, na.rm = TRUE)) %>%
arrange(LocationDesc, YEAR) # look at year 2000 value
## -----------------------------------------------------------------------------
sub_yts %>%
group_by(YEAR) %>%
summarize(n = n(),
mean = mean(Data_Value, na.rm = TRUE)) %>%
head
## ---- eval = FALSE------------------------------------------------------------
## qplot
## ---- echo = FALSE------------------------------------------------------------
args(qplot)
## -----------------------------------------------------------------------------
library(ggplot2)
qplot(x = disp, y = mpg, data = jhu_cars)
## -----------------------------------------------------------------------------
qplot(x = before_2000_avg, data = tb, geom = "histogram")
## -----------------------------------------------------------------------------
qplot(x = YEAR, y = year_avg, data = yts_avgs, geom = "line")
## -----------------------------------------------------------------------------
qplot(x = Data_Value, data = sub_yts, geom = "density")
## -----------------------------------------------------------------------------
qplot(x = LocationDesc, y = Data_Value, data = sub_yts, geom = "boxplot")
## -----------------------------------------------------------------------------
qplot(x = LocationDesc, y = Data_Value,
data = sub_yts, geom = "boxplot") + coord_flip()
## ----ggally_pairs, warning=FALSE, echo = FALSE--------------------------------
library(GGally)
# ggpairs(avgs)
## ----scatter1-----------------------------------------------------------------
plot(jhu_cars$mpg, jhu_cars$disp)
## ----hist1--------------------------------------------------------------------
hist(tb$before_2000_avg)
## ----hist_date----------------------------------------------------------------
plot(yts_avgs$YEAR, yts_avgs$year_avg, type = "l")
## ----dens1,fig.width=5,fig.height=5-------------------------------------------
plot(density(sub_yts$Data_Value))
## ----box1---------------------------------------------------------------------
boxplot(sub_yts$Data_Value ~ sub_yts$LocationDesc)
## ----box2---------------------------------------------------------------------
boxplot(Data_Value ~ LocationDesc, data = sub_yts)
## ----matplot2-----------------------------------------------------------------
pairs(avgs)
## ----apply1-------------------------------------------------------------------
apply(avgs,2,mean, na.rm=TRUE) # column means
head(apply(avgs,1,mean, na.rm=TRUE)) # row means
apply(avgs,2,sd, na.rm=TRUE) # columns sds
apply(avgs,2,max, na.rm=TRUE) # column maxs
|
6d132e998b590b7a8044483100ef634c27e27d32 | 8df46ac90b787ab113e1d665ee6f5aa2091f9a5a | /Analyses/Experiment_sup_Prob/4a_modelling.R | 31f99b14f66c99db27172b7be92077fab3d916ad | [] | no_license | warren-james/Breaking_symmetry | ce8973954a6c9a1c916b84d36f403addbb8856a6 | aa33701d90d00e1d07aec41a9674f07debc1bfc4 | refs/heads/master | 2022-09-19T23:31:15.955310 | 2022-09-08T08:41:31 | 2022-09-08T08:41:31 | 140,823,729 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,906 | r | 4a_modelling.R | #### Modelling probability matching study ####
# Models to make:
# - fix to "most likely" side without the LFA
# - Same as above but for accuracy
#### Library ####
library(tidyverse)
library(tidybayes)
library(brms)
library(rstan)
# NB: Not sure which to use just yet... we'll try both though
#### Load data ####
load("scratch/new_data/df_part2")
load("scratch/new_data/AccMea")
#### Modelling ####
# restarting modelling since we know what we're doing a bit better now
# TO DO:
# - Fixations proportions;
# - Fixations to the "most likely" side by bias type and distance
# - Central fixations by bias type and distance
# - NB: Do we want to just use left and right, then say whether the left was the bias side as a factor?
# - Also, want to remove the furthest point for now?
# - Accuracy
# - Could reuse penguin script for overall accuracy
# - Want to look at actual and expected accuracy rates
#### Modelling: Fixations ####
#### Fixations: Most likely side ####
#### m1: Most likely side ~ group ####
# sort data
dat_m1_flike <- df_part2 %>%
group_by(participant) %>%
filter(separation != 640) %>%
mutate(fixate_most_likely = ifelse(standard_boxes == "most likely", 1, 0),
separation = separation/max(separation)) %>%
select(participant, condition, bias, bias_type, fixate_most_likely, separation) %>%
ungroup()
# averaged data
temp <- dat_m1_flike %>%
group_by(participant, separation, bias_type) %>%
summarise(fixate_most_likely = mean(fixate_most_likely)) %>%
mutate(prop_l = (fixate_most_likely + 1e-4)*0.999)
# run model
m1_flike <- brm(fixate_most_likely ~ bias_type,
data = dat_m1_flike,
family = "bernoulli",
chains = 1,
iter = 2000,
warmup = 1000)
m1_flike_re <- brm(fixate_most_likely ~ bias_type + (1 + bias_type|participant),
data = dat_m1_flike,
family = "bernoulli",
chains = 1,
iter = 2000,
warmup = 1000)
#### m2: most likely ~ (group + separation)^2 ####
# run model
m2_flike <- brm(fixate_most_likely ~ (bias_type + separation)^2,
data = dat_m1_flike,
family = "bernoulli",
prior = c(
prior(student_t(3, 1.6, .2), class = Intercept)
),
chains = 1,
iter = 2000,
warmup = 1000)
m2_flike_ri <- brm(fixate_most_likely ~ (bias_type + separation)^2 + (1 + bias_type|participant),
data = dat_m1_flike,
family = "bernoulli",
prior = c(
prior(student_t(3, 1.6, .2), class = Intercept)
),
chains = 1,
iter = 2000,
warmup = 1000)
# make a plot for the line
# works, could look nicer... will work on it
dat_m1_flike %>%
group_by(bias_type) %>%
modelr::data_grid(separation = modelr::seq_range(separation, n = 10)) %>%
add_fitted_draws(m2_flike) %>%
ggplot(aes(separation,
.value,
colour = bias_type,
fill = bias_type)) +
stat_lineribbon(aes(group = paste(group,
...width..)),
.width = c(.5, .8, .95),
alpha = .25) +
geom_point(data = temp, aes(separation, fixate_most_likely)) +
# see::theme_abyss() +
theme_bw() +
# scale_colour_manual(values = c("#FFED41", "#FFED42")) +
# scale_colour_manual(values = c("#FF0000", "#FF0001")) +
# theme(legend.position = "none")
see::scale_color_flat() +
see::scale_fill_flat()
# for random intercepts
dat_m1_flike %>%
group_by(bias_type, participant) %>%
modelr::data_grid(separation = modelr::seq_range(separation, n = 10)) %>%
add_fitted_draws(m2_flike_ri) %>%
ungroup() %>%
ggplot(aes(separation,
.value,
colour = bias_type,
fill = bias_type)) +
geom_smooth() +
stat_lineribbon(aes(group = paste(group,
...width..)),
.width = c(.5, .8, .95),
alpha = .25) +
geom_point(data = temp, aes(separation, fixate_most_likely)) +
theme_bw() +
see::scale_color_flat() +
see::scale_fill_flat()
# plot predictions?
dat_m1_flike %>%
group_by(bias_type) %>%
modelr::data_grid(separation = modelr::seq_range(separation, n = 1000)) %>%
add_predicted_draws(m2_flike) %>%
ungroup() %>%
group_by(bias_type, separation, .row) %>%
summarise(mean_pred = mean(.prediction)) %>%
ggplot(aes(mean_pred, colour = bias_type, fill = bias_type)) +
geom_density()
m2_flike_2 <- brm(fixate_most_likely ~ (bias_type + separation)^2 + (1|participant),
data = dat_m1_flike,
family = "bernoulli",
chains = 1,
iter = 2000,
warmup = 1000)
# temp plot
temp <- dat_m1_flike %>%
group_by(participant, separation, bias_type) %>%
summarise(fixate_most_likely = mean(fixate_most_likely))
dat_m1_flike %>%
modelr::data_grid(bias_type,
separation) %>%
add_predicted_draws(m2_fix) %>%
ggplot(aes(separation,
.prediction,
colour = bias_type)) +
geom_smooth(method = lm) +
geom_point(data = temp,
aes(separation,
fixte_most_likely,
colour = bias_type)) +
geom_path(data = temp,
aes(separation,
fixte_most_likely,
colour = bias_type,
group = interaction(participant,
bias_type)),
alpha = 0.5)
#### Fixations: side ####
#### m1: side ~ group ####
# sort data
dat_m1_side <- df_part2 %>%
group_by(participant) %>%
filter(separation != 640) %>%
mutate(side = ifelse(lcr == 0, 0, 1),
separation = separation/max(separation)) %>%
select(participant, condition, bias, bias_type, separation, side) %>%
ungroup()
# run model
m1_side <- brm(side ~ bias_type,
data = dat_m1_side,
family = "bernoulli",
chains = 1,
iter = 2000,
warmup = 1000)
#### m2: side ~ (group + separation)^2 ####
# run model
m2_side <- brm(side ~ (bias_type + separation)^2,
data = dat_m1_side,
family = "bernoulli",
chains = 1,
iter = 2000,
warmup = 1000)
# #### Sort Model data: Fixations ####
# m_data_fix <- df_part2 %>%
# mutate(participant = as.factor(participant),
# fixated_likely = ifelse(standard_boxes == "most likely", 1, 0),
# fixated_centre = ifelse(standard_boxes == "centre", 1, 0),
# fixated_side = 1 - fixated_centre) %>% # Should we add in dist_type?
# select(participant,
# bias_type,
# separation,
# fixated_likely,
# fixated_centre,
# fixated_side)
#
# # save
# save(m_data_fix, file = "modelling/Stan/model_data/m_data_fix")
#
# # remove lfa separation for now
# m_data_fix_trim <- m_data_fix %>%
# group_by(participant) %>%
# filter(separation != max(separation)) %>%
# mutate(separation = separation/max(separation))
#
# # save
# save(m_data_fix_trim, file = "modelling/Stan/model_data/m_data_fix_trim")
#
# #### Sort model data: Acc ####
# # plots to check some things
# AccMea %>%
# group_by(participant) %>%
# filter(separation != max(separation)) %>%
# mutate(diff = Acc - Actual) %>%
# ggplot(aes(separation, diff, colour = Pred_type)) +
# geom_point() +
# geom_smooth(method = "lm") +
# facet_wrap(~condition)
#
# # AccMea %>%
# # group_by(participant) %>%
# # filter(separation != max(separation)) %>%
# # spread(Pred_type, Acc) %>%
# # ggplot(aes(Expected, Optimal, colour = condition)) +
# # geom_point() +
# # geom_smooth(method = "binomail")
#
# # AccMea %>%
# # group_by(participant) %>%
# # filter(separation != max(separation)) %>%
# # ungroup() %>%
# # spread(Pred_type, Acc) %>%
# # gather(c(Actual, Centre, Optimal, Expected),
# # key = "acc_type",
# # value = "accuracy") %>%
# # ggplot(aes(separation, accuracy, colour = acc_type)) +
# # geom_point() +
# # geom_smooth(method = "glm") +
# # facet_wrap(~condition)
#
# m_data_acc <- AccMea %>%
# spread(Pred_type, Acc) %>%
# select(participant, separation, condition, Actual, Optimal) %>%
# gather(c(Actual,Optimal),
# key = "Acc_type",
# value = "Accuracy") %>%
# group_by(participant) %>%
# filter(separation != max(separation)) %>%
# ungroup() %>%
# mutate(beta_acc = Accuracy*(1-1e-4),
# separation = separation/max(separation))
#
# #### MODELS ####
# #### MODELS: Fixated "likely" ####
# #### MODELS: m_fl_1 - likey ~ bias_type ####
# # m_fl_1 <- brm(fixated_likely ~ bias_type,
# # data = m_data_fix_trim,
# # family = "bernoulli",
# # cores = 1,
# # chains = 1,
# # iter = 2000,
# # warmup = 1000)
# # save
# # save(m_fl_1, file = "modelling/Brms/model_output/m_fl_1")
#
# # add rand intercepts
# # m_fl_1_1 <- brm(fixated_likely ~ bias_type + (1|participant),
# # data = m_data_fix_trim,
# # family = "bernoulli",
# # cores = 1,
# # chains = 1,
# # iter = 2000,
# # warmup = 1000)
# # save
# # save(m_fl_1_1, file = "modelling/Brms/model_output/m_fl_1_1")
#
# # add rand effects
# m_fl_1_2 <- brm(fixated_likely ~ bias_type + (bias_type|participant),
# data = m_data_fix_trim,
# family = "bernoulli",
# cores = 1,
# chains = 1,
# iter = 2000,
# warmup = 1000)
#
# #### MODELS: m_fl_2 - likely ~ bias_type + separation ####
# # m_fl_2 <- brm(fixated_likely ~ (bias_type + separation)^2,
# # data = m_data_fix_trim,
# # family = "bernoulli",
# # cores = 1,
# # chains = 1,
# # iter = 2000,
# # warmup = 1000)
#
# # add rand intercepts
# # issues here too...
# # m2_1 <- brm(fixated_likely ~ (bias_type + separation)^2 + (1|participant),
# # data = m_data_fix_trim,
# # family = "bernoulli",
# # cores = 1,
# # chains = 1,
# # iter = 2000,
# # warmup = 1000)
#
# #### MODELS: Fixated centre ####
# #### MODELS: m_fc_1 - centre ~ bias_type ####
# # m_fc_1 <- brm(fixated_centre ~ bias_type,
# # data = m_data_fix_trim,
# # family = "bernoulli",
# # cores = 1,
# # chains = 1,
# # iter = 2000,
# # warmup = 1000)
#
#
# #### MODELS: m_fc_2 - centre ~ (bias_type + separation)^2 ####
# # m_fc_2 <- brm(fixated_centre ~ (bias_type + separation)^2,
# # data = m_data_fix_trim,
# # family = "bernoulli",
# # cores = 1,
# # chains = 1,
# # iter = 2000,
# # warmup = 1000)
#
# #### STAN MODELS ####
# #### STAN: m1 - likely ~ bias_type ####
# m_matrix <- model.matrix(fixated_likely ~ bias_type, data = m_data_fix_trim)
#
# stan_df <- list(
# N = nrow(m_data_fix_trim),
# K = ncol(m_matrix),
# y = m_data_fix_trim$fixated_likely,
# X = m_matrix
# )
#
# m1_fl_berno <- stan(
# file = "modelling/Stan/models/berno.stan",
# data = stan_df,
# chains = 1,
# warmup = 1000,
# iter = 2000,
# refresh = 100
# )
#
# # save output
# save(m1_fl_berno, file = "modelling/Stan/model_outputs/m1_fl_berno")
#
# #### STAN: m2 - likely ~ (bias_type + separation)^2 ####
# m_matrix <- model.matrix(fixated_likely ~ (bias_type + separation)^2, data = m_data_fix_trim)
#
# stan_df <- list(
# N = nrow(m_data_fix_trim),
# K = ncol(m_matrix),
# y = m_data_fix_trim$fixated_likely,
# X = m_matrix
# )
#
# m2_fl_berno <- stan(
# file = "modelling/Stan/models/berno.stan",
# data = stan_df,
# chains = 1,
# warmup = 1000,
# iter = 2000,
# refresh = 100
# )
#
# # save output
# save(m2_fl_berno, file = "modelling/Stan/model_outputs/m2_fl_berno")
#
#
# ##### ACCURACY ####
# # Try some beta regressions for the accuracy...
# # Probably need to do a comparison of expected vs actual and use that as
# # the comparison? since the Bias condition should have a higher accuracy anyway
# m_data <- m_data_acc %>%
# filter(Acc_type == "Actual")
# m1_acc <- brm(beta_acc ~ (separation + condition)^2,
# data = m_data_acc,
# family = "beta",
# chains = 1,
# iter = 2000,
# warmup = 1000)
|
33471c98d35db9fd346b10cd31d60674f88e3150 | 3a8e307fc6c3f68df79cd4e2c1efaed05437f4d2 | /ArealKernel/malawi_pop.r | 1226299379e9674ea1a459252021d80cc2294ebd | [] | no_license | edenx/PBCanalysis | c2e9e770701b85ecf3ca7eb5d25f2f45799226c8 | b7ce63db30a64e3f8d36034b79b82d3a3dc5e34e | refs/heads/master | 2021-07-07T11:26:51.442347 | 2020-10-22T17:41:35 | 2020-10-22T17:41:35 | 199,621,343 | 1 | 2 | null | 2019-09-09T16:25:06 | 2019-07-30T09:35:44 | R | UTF-8 | R | false | false | 2,315 | r | malawi_pop.r | library(sf)
library(raster)
library(tidyverse)
library(tictoc)
library(ggplot2)
library(dplyr)
library(reshape2)
source("WrapperFunc.r")
# Data preprocessing ------------------------------------------------------------------------------
## Get population density data from FB HRSL
# geotiff_file <- "https://data.humdata.org/dataset/8c2c0b1f-66af-4a8e-b30e-59ad2249ee24/resource/d83a3bad-b72a-4e4e-9be9-93b4c654ac0f/download/population_mwi_2018-10-01.zip"
# download.file(geotiff_file, "pop_malawi_geotiff.zip")
# unzip("pop_malawi_geotiff.zip")
# # Aggregation
# tif_name <- 'population_mwi_2018-10-01.tif'
# pop_malawi <- raster(tif_name)
# res(pop_malawi)
# # from 30m by 30m aggregate to 3km by 3km
# pop_malawi.aggre <- aggregate(pop_malawi, fact=100)
# save(pop_malawi.aggre, file="pop_malawi.aggre.Rdata")
# get population density of malawi
load("pop_malawi.aggre.Rdata")
res(pop_malawi.aggre)
plot(pop_malawi.aggre)
# get spatial data of malawi
df <- readRDS("~/Documents/sae/data/prev_malawi_2015.rds") %>%
st_as_sf() %>%
mutate(y = est * n_obs, # y: total number of cases (ignores survey design)
l_prev = qlogis(est), # l_prev: log prevalence estimate
l_prev_se = se / (est * (1 - est))) # l_prev_se: calculated by delta method
# Other models see "/sae/03_sae.r"
# INLA: bym3 ----------------------------------------------------------------------------------
# create aggregated Kernel
ls <- 0.4
regker <- create_ker(ls, df, pop_malawi.aggre, plot=TRUE)
inv_regker <- Matrix::solve(regker)
# # precompute the RFF for each lengthscale
# # can specify regional covariates
# temp <- precomp_ker(df, pop_malawi.aggre)
inla_df <- list(y = round(df$y), m = df$n_obs, id=1:nrow(df))
inla_form <- y ~ 1 + f(id, model = "bym2", graph = inv_regker, scale.model = TRUE, constr=TRUE)
tic()
inla.fit_bym3 <- inla(inla_form,
family = "binomial",
control.family = list(control.link = list(model = "logit")),
data = inla_df,
Ntrials = m,
control.predictor = list(compute = TRUE),
control.compute = list(dic = TRUE))
toc()
plot_pred(inla.fit_bym3$summary.fitted.values[,1], ls, df, compare=TRUE, df$est)
summary(inla.fit_bym3)
|
8865cd39c84495106345b8913fe24b7082b25131 | 9a49c0e424edfa60ab81fc07693f9c05f3cf984e | /man/append.AbstractCBS.Rd | 37183e96e60f977cd1412b4bfea3edf06ac58c17 | [] | no_license | HenrikBengtsson/PSCBS | c6967835662c27512e668f1a63e5cf3fccb10b3c | 4f6ccedc90d8cf58344b8dd436b5b9c82dab07ed | refs/heads/master | 2023-01-10T01:03:29.915343 | 2021-10-23T08:00:28 | 2021-10-23T08:00:28 | 20,844,732 | 9 | 6 | null | 2018-04-11T02:17:13 | 2014-06-15T00:15:30 | R | UTF-8 | R | false | false | 1,082 | rd | append.AbstractCBS.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% AbstractCBS.RESTRUCT.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{append.AbstractCBS}
\alias{append.AbstractCBS}
\alias{AbstractCBS.append}
\alias{append,AbstractCBS-method}
\title{Appends one segmentation result to another}
\description{
Appends one segmentation result to another,
where both holds segmentation results \emph{of the same sample}.
}
\usage{
\method{append}{AbstractCBS}(x, other, addSplit=TRUE, ...)
}
\arguments{
\item{x, other}{The two \code{\link{AbstractCBS}} objects to be combined.}
\item{addSplit}{If \code{\link[base:logical]{TRUE}}, a "divider" is added between chromosomes.}
\item{...}{Not used.}
}
\value{
Returns a object of the same class as argument \code{x}.
}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{AbstractCBS}}.
}
\keyword{internal}
\keyword{methods}
|
11f1f7441408a57c285c63cb7fbc8d2bac3a0424 | c391a620ce73c9ed23c530a8f33db0bfca4db7c7 | /server.R | fff9449424e24d6e16edf824bbe2b0632cf7e2ab | [] | no_license | Chrisss93/CANSIM-Shiny-App | 92ab2c1b237e5fabe602d251b9c26ada6cc7982b | ab3c17f1b07ca5c08b1760f50d416b283d8b5915 | refs/heads/master | 2016-09-06T17:21:41.038595 | 2014-09-26T15:53:58 | 2014-09-26T15:53:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,919 | r | server.R | ### [BEGIN] Conditional package installation and sourcing
required_packages <- c("shiny", "ggplot2", "scales", "plyr", "reshape2", "devtools", "shinyIncubator")
uninstalled_packages <- required_packages[!(required_packages %in% installed.packages()[,"Package"])]
if(length(uninstalled_packages) > 0) {
install.packages(uninstalled_packages[!uninstalled_packages == "shinyIncubator"])
install_github("shiny-incubator", "rstudio")
}
lapply(required_packages, require, character.only = TRUE)
### [BEGIN] Retrieving Data
fileUrl <- "http://www20.statcan.gc.ca/tables-tableaux/cansim/csv/02020802-eng.zip"
temp <- tempfile()
### [BEGIN] Minor data cleaning (cleaning is consistent with 24-09-2014 version of data)
small_cleaning <- function(base){
base <- base[,c("Ref_Date", "GEOGRAPHY", "CUTOFFBASE", "STATISTICS", "LICOPERSONS", "Value")]
colnames(base) <- c("Year", "Geography", "Line", "Statistic", "Population", "Value")
base$Population <- factor(gsub(" \\(x 1,000\\)", "", base$Population))
base$Statistic <- as.character(base$Statistic)
base$Statistic[base$Statistic == "Number of persons in low income"] <- paste(base$Statistic[base$Statistic == "Number of persons in low income"], "(x 1,000)")
base$Statistic <- factor(base$Statistic)
base$Value <- suppressWarnings(as.numeric(as.character(base$Value)))
return(base)
}
### [BEGIN] Defining useful constants
# Express general ggplot preset layers to be used (only options that do not have reactive elements)
gg_layers <- list(geom_line(),
geom_point(),
ylab("Poverty Rate"),
theme(legend.direction = "vertical",
legend.position = "top",
axis.text.x = element_text(angle = 45)))
# Too much junk/information in the dataset, create constant strings to denote parameters of interest
keep_population <- c("All persons", "Males", "Females", "Persons under 18 years", "Persons 18 to 64 years",
"Persons 65 years aand over", "Males, under 18 years", "Females, under 18 years",
"Males, 18 to 64 years", "Females, 18 to 64 years", "Males, 65 years and over",
"Females, 65 years and over")
keep_geography <- c("Canada", "Atlantic provinces","Newfoundland and Labrador",
"Prince Edward Island", "Nova Scotia", "New Brunswick",
"Quebec", "Ontario", "Prairie provinces","Manitoba",
"Saskatchewan", "Alberta", "British Columbia")
### [BEGIN] Shiny reactive programming
shinyServer(function(input, output, session) {
scrape_data <- reactive({
withProgress(session, min = 1, max = 30, {
setProgress(message = "Please wait while we retrieve the most current data from Statistics Canada.")
for(i in 1:30) {setProgress(value = i)}
download.file(fileUrl, temp)
base <- read.csv(unz(temp, "02020802-eng.csv"))
base <- small_cleaning(base)
unlink(temp)
base
})
})
line_select <- reactive({
base <- scrape_data()
base[base$Line %in% input$line, ]
})
pop_select <- reactive({
base2 <- line_select()
keep_all <- paste(input$pop, input$pop2)
#Dumb code below. I'll shore it up later.
keep_all <- suppressMessages(revalue(keep_all,
c("All All" = keep_population[1],
"Male All" = keep_population[2],
"Female All" = keep_population[3],
"All Children" = keep_population[4],
"All Adults" = keep_population[5],
"All Elderly" = keep_population[6],
"Male Children" = keep_population[7],
"Female Children" = keep_population[8],
"Male Adults" = keep_population[9],
"Female Adults" = keep_population[10],
"Male Elderly" = keep_population[11],
"Female Elderly" = keep_population[12])))
base2[base2$Population %in% keep_all, ]
})
geo_select <- reactive({
base2 <- pop_select()
base2[base2$Geography %in% input$geo, ]
})
output$plot1 <- renderPlot({
df <- geo_select()
df <- df[df$Statistic=="Percentage of persons in low income", ]
gg_statement <- ggplot(df, aes(x = Year, y = Value / 100, color = Line, group = Line))
gg_statement + gg_layers +
ggtitle(paste(input$pop, input$pop2, input$geo, sep = "-")) +
scale_x_continuous(breaks = seq(min(df$Year, na.rm = TRUE), max(df$Year, na.rm = TRUE), 5)) +
scale_y_continuous(breaks = seq(0, max(df$Value, na.rm=TRUE), 0.02), labels = percent) +
coord_cartesian(xlim = input$range)
})
output$plot2 <- renderPlot({
df <- geo_select()
df <- df[df$Statistic=="Number of persons in low income (x 1,000)", ]
gg_statement <- ggplot(df, aes(x = Year, y = Value, color = Line, group = Line))
gg_statement + gg_layers + ylab("Poverty count (x 1,000)") +
ggtitle(paste(input$pop, input$pop2, input$geo, sep = "-")) +
scale_x_continuous(breaks = seq(min(df$Year, na.rm = TRUE), max(df$Year, na.rm = TRUE), 5)) +
scale_y_continuous(labels = comma) + coord_cartesian(xlim = input$range)
})
# I'm making the table in a reactive({}) rather than renderTable({}) function, because I want write.csv()
# functionality for the table, and that cannot be done on the output object directly.
make_table <- reactive({
base <- scrape_data()
df <- base[base$Statistic %in% input$stat &
base$Geography %in% keep_geography &
base$Population %in% keep_population, ]
tab <- dcast(df, df[, input$var1] ~ df[, input$var2], value.var = "Value", function(x){mean(x, na.rm=T)})
colnames(tab)[1] <- names(df[input$var1])
if(unique(df$Statistic) == "Percentage of persons in low income") {
tab <- data.frame(tab[1], apply(tab[,-1], 2, function(x) {
out <- percent(x/100)
if (any(out == "NaN%") == TRUE) {
out[out == "NaN%"] <- NA }
return(out)
}), check.names = FALSE)
}
tab
})
output$table <- renderTable({
make_table()
})
output$downloadRaw <- downloadHandler(
filename = function() {
paste("CANSIM ",Sys.Date(), ".csv", sep = "")
},
content = function(file) {
write.csv(base, file)
}
)
output$downloadSummary <- downloadHandler(
filename = function() {
paste(paste("CANSIM", input$stat, input$var1, input$var2, "Summary "), Sys.Date(), ".csv", sep = "")
},
content = function(file) {
write.csv(make_table(), file)
}
)
}) |
f50f192ad9b1445e9b6896314766854f90dae8c1 | 2487dfa8bb23d3e1a9000dba265c416cccb69939 | /demo/UserGuide07.R | f00698c83893e4e546e18b03ce8cdc40db8871cd | [] | no_license | cran/R2MLwiN | f2c5694b60e3a392ad516ab63689c642f3fc72bb | 593d94db244d3fc07538aedf83fc183859b9f5fd | refs/heads/master | 2023-03-21T15:14:11.554599 | 2023-03-14T04:40:02 | 2023-03-14T04:40:02 | 17,681,793 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,937 | r | UserGuide07.R | ############################################################################
# MLwiN User Manual
#
# 7 Modelling the Variance as a Function of Explanatory Variables . . . 89
#
# Rasbash, J., Steele, F., Browne, W. J. and Goldstein, H. (2012).
# A User's Guide to MLwiN, v2.26. Centre for Multilevel Modelling,
# University of Bristol.
############################################################################
# R script to replicate all analyses using R2MLwiN
#
# Zhang, Z., Charlton, C., Parker, R, Leckie, G., and Browne, W.J.
# Centre for Multilevel Modelling, 2012
# http://www.bristol.ac.uk/cmm/software/R2MLwiN/
############################################################################
library(R2MLwiN)
# MLwiN folder
mlwin <- getOption("MLwiN_path")
while (!file.access(mlwin, mode = 1) == 0) {
cat("Please specify the root MLwiN folder or the full path to the MLwiN executable:\n")
mlwin <- scan(what = character(0), sep = "\n")
mlwin <- gsub("\\", "/", mlwin, fixed = TRUE)
}
options(MLwiN_path = mlwin)
# 7.1 A level 1 variance function for two groups . . . . . . . . . . . . .89
data(tutorial, package = "R2MLwiN")
covmatrix <- matrix(, nrow = 3, ncol = 1)
covmatrix[1, 1] <- 1
covmatrix[2, 1] <- "sexboy"
covmatrix[3, 1] <- "sexgirl"
(mymodel1 <- runMLwiN(normexam ~ 0 + sex + (0 + sex | student), estoptions = list(clre = covmatrix), data = tutorial))
# 7.2 Variance functions at level 2 . . . . . . . . . . . . . . . . . . . 95
(mymodel2 <- runMLwiN(normexam ~ 1 + standlrt + (1 + standlrt | school) + (1 | student), data = tutorial))
l2varfn <- mymodel2@RP["RP2_var_Intercept"] + (2 * mymodel2@RP["RP2_cov_Intercept_standlrt"] * mymodel2@data$standlrt) +
(mymodel2@RP["RP2_var_standlrt"] * mymodel2@data$standlrt^2)
varfndata <- as.data.frame(cbind(mymodel2@data$standlrt, l2varfn)[order(mymodel2@data$standlrt), ])
colnames(varfndata) <- c("standlrt", "l2varfn")
plot(varfndata$standlrt, varfndata$l2varfn, type = "l")
# 7.3 Further elaborating the model for the student-level variance . . . .99
(mymodel3 <- runMLwiN(normexam ~ 1 + standlrt + (1 + standlrt | school) + (1 + standlrt | student), data = tutorial))
l2varfn <- mymodel3@RP["RP2_var_Intercept"] + (2 * mymodel3@RP["RP2_cov_Intercept_standlrt"] * mymodel3@data$standlrt) +
(mymodel3@RP["RP2_var_standlrt"] * mymodel3@data$standlrt^2)
l1varfn <- mymodel3@RP["RP1_var_Intercept"] + (2 * mymodel3@RP["RP1_cov_Intercept_standlrt"] * mymodel3@data$standlrt) +
(mymodel3@RP["RP1_var_standlrt"] * mymodel3@data$standlrt^2)
varfndata <- as.data.frame(cbind(mymodel3@data$standlrt, l2varfn, l1varfn)[order(mymodel3@data$standlrt), ])
colnames(varfndata) <- c("standlrt", "l2varfn", "l1varfn")
if (!require(lattice)) {
warning("package lattice required to run this example")
} else {
xyplot(l2varfn + l1varfn ~ standlrt, data = varfndata, type = "l")
}
covmatrix <- matrix(, nrow = 3, ncol = 3)
covmatrix[1, 1] <- 1
covmatrix[2, 1] <- "standlrt"
covmatrix[3, 1] <- "standlrt"
covmatrix[1, 2] <- 1
covmatrix[2, 2] <- "sexgirl"
covmatrix[3, 2] <- "Intercept"
covmatrix[1, 3] <- 1
covmatrix[2, 3] <- "standlrt"
covmatrix[3, 3] <- "sexgirl"
(mymodel4 <- runMLwiN(normexam ~ 1 + standlrt + sex + (1 + standlrt | school) + (1 + standlrt + sex | student), estoptions = list(clre = covmatrix),
data = tutorial))
covmatrix <- matrix(, nrow = 3, ncol = 2)
covmatrix[1, 1] <- 1
covmatrix[2, 1] <- "standlrt"
covmatrix[3, 1] <- "standlrt"
covmatrix[1, 2] <- 1
covmatrix[2, 2] <- "sexgirl"
covmatrix[3, 2] <- "Intercept"
(mymodel5 <- runMLwiN(normexam ~ 1 + standlrt + sex + (1 + standlrt | school) + (1 + standlrt + sex | student), estoptions = list(clre = covmatrix),
data = tutorial))
l2varfn <- mymodel5@RP["RP2_var_Intercept"] + (2 * mymodel5@RP["RP2_cov_Intercept_standlrt"] * mymodel5@data$standlrt) +
(mymodel5@RP["RP2_var_standlrt"] * mymodel5@data$standlrt^2)
l1varfnboys <- mymodel5@RP["RP1_var_Intercept"] + (2 * mymodel5@RP["RP1_cov_Intercept_standlrt"] * mymodel5@data$standlrt)
l1varfngirls <- mymodel5@RP["RP1_var_Intercept"] + (2 * mymodel5@RP["RP1_cov_Intercept_standlrt"] * mymodel5@data$standlrt) +
(2 * mymodel5@RP["RP1_cov_standlrt_sexgirl"] * mymodel5@data$standlrt) + mymodel5@RP["RP1_var_sexgirl"]
varfndata <- as.data.frame(cbind(mymodel5@data$standlrt, l2varfn, l1varfnboys, l1varfngirls)[order(mymodel5@data$standlrt),
])
colnames(varfndata) <- c("standlrt", "l2varfn", "l1varfnboys", "l1varfngirls")
if (!require(lattice)) {
warning("package lattice required to run this example")
} else {
xyplot(l2varfn + l1varfnboys + l1varfngirls ~ standlrt, data = varfndata, type = "l")
}
# Chapter learning outcomes . . . . . . . . . . . . . . . . . . . . .106
############################################################################
|
7e64d47f58b97b2fd23f9ee3916df62df570d568 | 45b86ab61f57e639501425eb137e7cbcfd906d4a | /prelab7.r | 7cd8f59ceac3b09a7c0fee4436772a398d9de633 | [] | no_license | ZacharyThompson/math338 | b2abf61c451285502915d9321d6863a50a497395 | 1d8507cf5d7eed124852dbd8f19b5578b1ee91bb | refs/heads/main | 2023-04-28T07:37:55.690218 | 2021-05-22T21:10:56 | 2021-05-22T21:10:56 | 358,526,495 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 501 | r | prelab7.r | # Ch5 2-propZtest vs Chi-Square test
# Practice pg 23 assuming that is not equal
x1=50
n1=1655
x2=31
n2=1652
phat1=x1/n1
phat2=x2/n2
phat=(x1+x2)/(n1+n2)
#Test stats
z=(phat1-phat2)/(sqrt(phat*(1-phat))*sqrt((1/n1)+1/n2))
z
#Pvalue
pval=2*(1-pnorm(z))
pval
#Chi-square
Drug=c('Clarinex', 'Placebo')
Treatments=c('take', 'sample')
Data=c(50,1605,31,1621)
Table=matrix(Data,nrow = 2, ncol = 2, byrow = TRUE,
dimnames = list(Drug, Treatments))
Table
chisq.test(Table, correct = FALSE)
|
0dce3d349dbed31ff38ce933a36e82efa3b80bf5 | f11595ebddcd18bc0dfba671cf362fa030df9219 | /6_R_analyses/figures/fig6_Neb.R | eee45faddf69ef561ae92ac5428b457d696d9004 | [] | no_license | beausoleilmo/Genomic-diversity-in-Darwins-finches | 63ab4175b2a8844d5773ca7b972da48d4a10c320 | c95e58b31175489239a60b7df7ffa81fdbd60ba2 | refs/heads/master | 2020-08-24T18:03:09.821544 | 2019-10-22T12:50:59 | 2019-10-22T12:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,351 | r | fig6_Neb.R | library(tidyverse)
library(ggpubr)
library(gridExtra)
df <- read_csv('../data/df.csv') %>%
mutate(Red_list = ifelse(Red_list_two=='treatened',
'Threatened','Non-threatened'),
Red_list = factor(Red_list,levels=c("Threatened","Non-threatened")))
test <- t.test(logNeb~Red_list_two, data=df)
pval <- test$p.value
boxpl <- df %>%
ggplot(aes(x=Red_list,y=logNeb, fill=Red_list, color=Red_list)) +
#geom_jitter() +
geom_boxplot(alpha=0.6) +
scale_fill_manual(values=get_palette("npg",2)) +
scale_color_manual(values=get_palette("npg",2)) +
#ylab('log(Neb)') +
ylab(expression(log(italic(N)[eb]))) +
theme_bw() +
xlab("Status") +
theme(#axis.title.x=element_blank(),
axis.title=element_text(size=18),
axis.text=element_text(size=14,color="black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.line = element_line(colour = "black"),
legend.position='none',
strip.background =element_rect(fill="white"),
strip.text = element_text(size=18)) +
annotate("text",x=1.5,y=2.5,label=paste('P =',round(pval,2)),size=5.5) #+
#annotate("text",x=0.6,y=2.5,label="b)",size=6)
ggsave('fig5_Neb.png',height=7,width=7)
|
cf7a3c6872f1efb62614b699f1014251389b8d24 | 8c374f8b433c33bd2989a5cd66c6dff601208efa | /R/format_mr_results2.R | dff0f46dbbe93d0ee047110eee6a992087b5e834 | [
"MIT"
] | permissive | MRCIEU/TwoSampleMR | 2514d01692c95db1e9fbe23f8696e99a12c6ab34 | 592ebe05538558b330c39ddeda0d11b1313ad819 | refs/heads/master | 2023-08-29T22:47:33.163801 | 2023-05-29T20:46:39 | 2023-05-29T20:46:39 | 49,515,156 | 277 | 160 | NOASSERTION | 2023-06-13T00:24:11 | 2016-01-12T16:57:46 | R | UTF-8 | R | false | false | 16,736 | r | format_mr_results2.R | #' Split outcome column
#'
#' This function takes the outcome column from the results generated by [mr()] and splits it into separate columns for 'outcome name' and 'id'.
#'
#' @param mr_res Results from [mr()].
#'
#' @export
#' @return data frame
split_outcome <- function(mr_res)
{
Pos<-grep("\\|\\|",mr_res$outcome) #the "||"" indicates that the outcome column was derived from summary data in MR-Base. Sometimes it wont look like this e.g. if the user has supplied their own outcomes
if(sum(Pos)!=0){
Outcome<-as.character(mr_res$outcome[Pos])
Vars<-strsplit(Outcome,split= "\\|\\|")
Vars<-unlist(Vars)
Vars<-trim(Vars)
Trait<-Vars[seq(1,length(Vars),by=2)]
id<-Vars[seq(2,length(Vars),by=2)]
mr_res$outcome<-as.character(mr_res$outcome)
mr_res$outcome[Pos]<-Trait
}
return(mr_res)
}
#' Split exposure column
#'
#' This function takes the exposure column from the results generated by [mr()] and splits it into separate columns for 'exposure name' and 'id'.
#'
#' @param mr_res Results from [mr()].
#'
#' @export
#' @return data frame
split_exposure <- function(mr_res)
{
Pos<-grep("\\|\\|",mr_res$exposure) #the "||"" indicates that the outcome column was derived from summary data in MR-Base. Sometimes it wont look like this e.g. if the user has supplied their own outcomes
# Pos2<-grep("\\|\\|",mr_res$exposure,invert=T)
# mr_res2 <-mr_res[Pos2,]
# mr_res1<-mr_res[Pos,]
if(sum(Pos)!=0){
Exposure<-as.character(mr_res$exposure[Pos])
Vars<-strsplit(as.character(Exposure),split= "\\|\\|")
Vars<-unlist(Vars)
Vars<-trim(Vars)
Trait<-Vars[seq(1,length(Vars),by=2)]
mr_res$exposure<-as.character(mr_res$exposure)
mr_res$exposure[Pos]<-Trait
}
return(mr_res)
}
#' Generate odds ratios
#'
#' This function takes b and se from [mr()] and generates odds ratios and 95 percent confidence intervals.
#'
#' @param mr_res Results from [mr()].
#'
#' @export
#' @return data frame
generate_odds_ratios <- function(mr_res)
{
mr_res$lo_ci <- mr_res$b - 1.96 * mr_res$se
mr_res$up_ci <- mr_res$b + 1.96 * mr_res$se
mr_res$or <- exp(mr_res$b)
mr_res$or_lci95 <- exp(mr_res$lo_ci)
mr_res$or_uci95 <- exp(mr_res$up_ci)
return(mr_res)
}
#' Subset MR-results on method
#'
#' This function takes MR results from [mr()] and restricts to a single method per exposure x disease combination.
#'
#' @param mr_res Results from [mr()].
#' @param single_snp_method Which of the single SNP methods to use when only 1 SNP was used to estimate the causal effect? The default is `"Wald ratio"`.
#' @param multi_snp_method Which of the multi-SNP methods to use when there was more than 1 SNPs used to estimate the causal effect? The default is `"Inverse variance weighted"`.
#'
#' @export
#' @return data frame.
subset_on_method <- function(mr_res, single_snp_method="Wald ratio", multi_snp_method="Inverse variance weighted")
{
dat <- subset(mr_res, (nsnp==1 & method==single_snp_method) | (nsnp > 1 & method == multi_snp_method))
return(dat)
}
#' Combine all mr results
#'
#' This function combines results of [mr()], [mr_heterogeneity()], [mr_pleiotropy_test()] and [mr_singlesnp()] into a single data frame.
#' It also merges the results with outcome study level characteristics in [available_outcomes()].
#' If desired it also exponentiates results (e.g. if the user wants log odds ratio converted into odds ratios with 95 percent confidence intervals).
#' The exposure and outcome columns from the output from [mr()] contain both the trait names and trait ids.
#' The `combine_all_mrresults()` function splits these into separate columns by default.
#'
#' @param res Results from [mr()].
#' @param het Results from [mr_heterogeneity()].
#' @param plt Results from [mr_pleiotropy_test()].
#' @param sin Results from [mr_singlesnp()].
#' @param ao_slc Logical; if set to `TRUE` then outcome study level characteristics are retrieved from [available_outcomes()]. Default is `TRUE`.
#' @param Exp Logical; if set to `TRUE` results are exponentiated. Useful if user wants log odds ratios expressed as odds ratios. Default is `FALSE`.
#' @param split.exposure Logical; if set to `TRUE` the exposure column is split into separate columns for the exposure name and exposure ID. Default is `FALSE`.
#' @param split.outcome Logical; if set to `TRUE` the outcome column is split into separate columns for the outcome name and outcome ID. Default is `FALSE`.
#'
#' @export
#' @return data frame
#
# library(TwoSampleMR)
# library(MRInstruments)
# exp_dat <- extract_instruments(outcomes=c(2,300))
# chd_out_dat <- extract_outcome_data(
# snps = exp_dat$SNP,
# outcomes = c(6,7,8,9)
# )
# dat <- harmonise_data(
# exposure_dat = exp_dat,
# outcome_dat = chd_out_dat
# )
# dat<-power.prune(dat,method.size=F)
# Res<-mr(dat)
# Het<-mr_heterogeneity(dat)
# Plt<-mr_pleiotropy_test(dat)
# Sin<-mr_singlesnp(dat)
# All.res<-combine_all_mrresults(Res=Res,Het=Het,Plt=Plt,Sin=Sin)
# All.res<-split_exposure(All.res)
# All.res<-split_outcome(All.res)
combine_all_mrresults <- function(res,het,plt,sin,ao_slc=TRUE,Exp=FALSE,split.exposure=FALSE,split.outcome=FALSE)
{
het<-het[,c("id.exposure","id.outcome","method","Q","Q_df","Q_pval")]
# Convert all factors to character
# lapply(names(Res), FUN=function(x) class(Res[,x]))
Class<-unlist(lapply(names(res), FUN=function(x) class(res[,x])))
if(any(Class == "factor")) {
Pos<-which(unlist(lapply(names(res), FUN=function(x) class(res[,x])))=="factor")
for(i in 1:length(Pos)){
res[,Pos[i]]<-as.character(res[,Pos[i]])
}
}
# lapply(names(Het), FUN=function(x) class(Het[,x]))
Class<-unlist(lapply(names(het), FUN=function(x) class(het[,x])))
if(any(Class == "factor")) {
Pos<-which(unlist(lapply(names(het), FUN=function(x) class(het[,x])))=="factor")
for(i in 1:length(Pos)){
het[,Pos[i]]<-as.character(het[,Pos[i]])
}
}
# lapply(names(Sin), FUN=function(x) class(Sin[,x]))
Class<-unlist(lapply(names(sin), FUN=function(x) class(sin[,x])))
if(any(Class == "factor")) {
Pos<-which(unlist(lapply(names(sin), FUN=function(x) class(sin[,x])))=="factor")
for(i in 1:length(Pos)){
sin[,Pos[i]]<-as.character(sin[,Pos[i]])
}
}
sin<-sin[grep("[:0-9:]",sin$SNP),]
sin$method<-"Wald ratio"
names(sin)[names(sin)=="p"]<-"pval"
# Res<-Res[Res$method %in% c("MR Egger","Weighted median","Inverse variance weighted"),]
#method is also the name of an argument in the method function. this prevents all.x argument from working. rename method column
names(res)[names(res)=="method"]<-"Method"
names(het)[names(het)=="method"]<-"Method"
names(sin)[names(sin)=="method"]<-"Method"
res<-merge(res,het,by=c("id.outcome","id.exposure","Method"),all.x=TRUE)
res<-plyr::rbind.fill(res,sin[,c("exposure","outcome","id.exposure","id.outcome","SNP","b","se","pval","Method")])
if(ao_slc)
{
ao<-available_outcomes()
names(ao)[names(ao)=="nsnp" ]<-"nsnps.outcome.array"
res<-merge(res,ao[,!names(ao) %in% c("unit","priority","sd","path","note","filename","access","mr")],by.x="id.outcome",by.y="id")
}
res$nsnp[is.na(res$nsnp)]<-1
for(i in unique(res$id.outcome))
{
Methods<-unique(res$Method[res$id.outcome==i])
Methods<-Methods[Methods!="Wald ratio"]
for(j in unique(Methods))
{
res$SNP[res$id.outcome == i & res$Method==j]<-paste(res$SNP[res$id.outcome == i & res$Method=="Wald ratio"],collapse="; ")
}
}
if(Exp){
res$or<-exp(res$b)
res$or_lci95<-exp(res$b-res$se*1.96)
res$or_uci95<-exp(res$b+res$se*1.96)
}
# add intercept test from MR Egger
plt<-plt[,c("id.outcome","id.exposure","egger_intercept","se","pval")]
plt$Method<-"MR Egger"
names(plt)[names(plt)=="egger_intercept"]<-"intercept"
names(plt)[names(plt)=="se"]<-"intercept_se"
names(plt)[names(plt)=="pval"]<-"intercept_pval"
res<-merge(res,plt,by=c("id.outcome","id.exposure","Method"),all.x=TRUE)
if(split.exposure){
res<-split_exposure(res)
}
if(split.outcome){
res<-split_outcome(res)
}
Cols<-c("Method","outcome","exposure","nsnp","b","se","pval","intercept","intercept_se","intercept_pval","Q","Q_df","Q_pval","consortium","ncase","ncontrol","pmid","population")
res<-res[,c(names(res)[names(res) %in% Cols],names(res)[which(!names(res) %in% Cols)])]
# names(ResSNP)<-tolower(names(ResSNP))
return(res)
}
#' Power prune
#'
#' When there are duplicate summary sets for a particular exposure-outcome combination, this function keeps the
#' exposure-outcome summary set with the highest expected statistical power.
#' This can be done by dropping the duplicate summary sets with the smaller sample sizes.
#' Alternatively, the pruning procedure can take into account instrument strength and outcome sample size.
#' The latter is useful, for example, when there is considerable variation in SNP coverage between duplicate summary sets
#' (e.g. because some studies have used targeted or fine mapping arrays).
#' If there are a large number of SNPs available to instrument an exposure,
#' the outcome GWAS with the better SNP coverage may provide better power than the outcome GWAS with the larger sample size.
#'
#' @param dat Results from [harmonise_data()].
#' @param method Should the duplicate summary sets be pruned on the basis of sample size alone (`method = 1`)
#' or a combination of instrument strength and sample size (`method = 2`)? Default set to `1`.
#' When set to 1, the duplicate summary sets are first dropped on the basis of the outcome sample size (smaller duplicates dropped).
#' If duplicates are still present, remaining duplicates are dropped on the basis of the exposure sample size (smaller duplicates dropped).
#' When method is set to `2`, duplicates are dropped on the basis of instrument strength
#' (amount of variation explained in the exposure by the instrumental SNPs) and sample size,
#' and assumes that the SNP-exposure effects correspond to a continuous trait with a normal distribution (i.e. exposure cannot be binary).
#' The SNP-outcome effects can correspond to either a binary or continuous trait. If the exposure is binary then `method=1` should be used.
#'
#' @param dist.outcome The distribution of the outcome. Can either be `"binary"` or `"continuous"`. Default set to `"binary"`.
#'
#' @export
#' @return data.frame with duplicate summary sets removed
power_prune <- function(dat,method=1,dist.outcome="binary")
{
# dat[,c("eaf.exposure","beta.exposure","se.exposure","samplesize.outcome","ncase.outcome","ncontrol.outcome")]
if(method==1){
L<-NULL
id.sets<-paste(split_exposure(dat)$exposure,split_outcome(dat)$outcome)
id.set.unique<-unique(id.sets)
dat$id.set<-as.numeric(factor(id.sets))
for(i in 1:length(id.set.unique)){
# print(i)
print(paste("finding summary set for --", id.set.unique[i],"-- with largest sample size", sep=""))
dat1<-dat[id.sets == id.set.unique[i],]
id.subset<-paste(dat1$exposure,dat1$id.exposure,dat1$outcome,dat1$id.outcome)
id.subset.unique<-unique(id.subset)
dat1$id.subset<-as.numeric(factor(id.subset))
ncase<-dat1$ncase.outcome
if(is.null(ncase)){
ncase<-NA
}
if(any(is.na(ncase))){
ncase<-dat1$samplesize.outcome
if(dist.outcome=="binary") warning(paste("dist.outcome set to binary but case sample size is missing. Will use total sample size instead but power pruning may be less accurate"))
}
if(any(is.na(ncase))) stop("sample size missing for at least 1 summary set")
dat1<-dat1[order(ncase,decreasing=TRUE),]
# id.expout<-paste(split_exposure(dat)$exposure,split_outcome(dat)$outcome)
ncase<-ncase[order(ncase,decreasing=TRUE)]
# dat1$power.prune.ncase<-"drop"
# dat1$power.prune.ncase[ncase==ncase[1]]<-"keep"
dat1<-dat1[ncase==ncase[1],]
nexp<-dat1$samplesize.exposure
dat1<-dat1[order(nexp,decreasing=TRUE),]
nexp<-nexp[order(nexp,decreasing=TRUE)]
# dat1$power.prune.nexp<-"drop"
# dat1$power.prune.nexp[nexp==nexp[1]]<-"keep"
# dat1$power.prune<-"drop"
# dat1$power.prune[dat1$power.prune.ncase=="keep" & dat1$power.prune.nexp == "keep"]<-"keep"
# dat1<-dat1[,!names(dat1) %in% c("power.prune.ncase","power.prune.nexp")]
# dat1[,c("samplesize.exposure","ncase.outcome","exposure","outcome")]
dat1<-dat1[nexp==nexp[1],]
L[[i]]<-dat1
}
dat<-do.call(rbind,L)
dat<-dat[,!names(dat1) %in% c("id.set","id.subset")]
# if(drop.duplicates == T) {
# dat<-dat[dat$power.prune=="keep",]
# }
return(dat)
}
if(method==2){
L<-NULL
id.sets<-paste(split_exposure(dat)$exposure,split_outcome(dat)$outcome)
id.set.unique<-unique(id.sets)
dat$id.set<-as.numeric(factor(id.sets))
for(i in 1:length(id.set.unique)){
print(i)
print(id.set.unique[i])
dat1<-dat[id.sets == id.set.unique[i],]
# unique(dat1[,c("exposure","outcome")])
id.subset<-paste(dat1$exposure,dat1$id.exposure,dat1$outcome,dat1$id.outcome)
id.subset.unique<-unique(id.subset)
dat1$id.subset<-as.numeric(factor(id.subset))
L1<-NULL
for(j in 1:length(id.subset.unique)){
# print(j)
print(paste("identifying best powered summary set: ",id.subset.unique[j],sep=""))
dat2<-dat1[id.subset ==id.subset.unique[j], ]
p<-dat2$eaf.exposure #effect allele frequency
# b<-abs(dat2$beta.exposure) # effect of SNP on risk factor
se<-dat2$se.exposure
z<-dat2$beta.exposure/dat2$se.exposure
n<-dat2$samplesize.exposure
b<-z/sqrt(2*p*(1-p)*(n+z^2))
if(any(is.na(dat2$ncase.outcome))) stop(paste("number of cases missing for summary set: ",id.subset.unique[j],sep=""))
n.cas<-dat2$ncase.outcome
n.con<-dat2$ncontrol.outcome
var<-1 # variance of risk factor assumed to be 1
r2<-2*b^2*p*(1-p)/var
if(any(is.na(r2))) warning("beta or allele frequency missing for some SNPs, which could affect accuracy of power pruning")
r2<-r2[!is.na(r2)]
# k<-length(p[!is.na(p)]) #number of SNPs in the instrument / associated with the risk factor
# n<-min(n) #sample size of the exposure/risk factor GWAS
r2sum<-sum(r2) # sum of the r-squares for each SNP in the instrument
# F<-r2sum*(n-1-k)/((1-r2sum*k )
if(dist.outcome == "continuous"){
iv.se<- 1/sqrt(unique(dat2$samplesize.outcome)*r2sum) #standard error of the IV should be proportional to this
}
if(dist.outcome == "binary"){
iv.se<-1/sqrt(unique(n.cas)*unique(n.con)*r2sum) #standard error of the IV should be proportional to this
if(any(is.na(n.cas)) | any(is.na(n.con))) {
warning("dist.outcome set to binary but number of cases or controls is missing. Will try using total sample size instead but power pruning will be less accurate")
iv.se<- 1/sqrt(unique(dat2$samplesize.outcome)*r2sum)
}
}
# Power calculations to implement at some point
# iv.se<-1/sqrt(unique(n.cas)*unique(n.con)*r2sum) #standard error of the IV should be proportional to this
# n.outcome<-unique(n.con+n.cas)
# ratio<-unique(n.cas/n.con)
# sig<-alpha #alpha
# b1=log(or) # assumed log odds ratio
# power<-pnorm(sqrt(n.outcome*r2sum*(ratio/(1+ratio))*(1/(1+ratio)))*b1-qnorm(1-sig/2))
dat2$iv.se<-iv.se
# dat2$power<-power
L1[[j]]<-dat2
}
L[[i]]<-do.call(rbind,L1)
}
dat2<-do.call(rbind,L)
dat2<-dat2[order(dat2$id.set,dat2$iv.se),]
id.sets<-unique(dat2$id.set)
id.keep<-NULL
for(i in 1:length(id.sets)){
# print(i)
# print(id.sets[i])
id.temp<-unique(dat2[dat2$id.set==id.sets[i],c("id.set","id.subset")])
id.keep[[i]]<-paste(id.temp$id.set,id.temp$id.subset)[1]
}
dat2$power.prune<-"drop"
dat2$power.prune[paste(dat2$id.set,dat2$id.subset) %in% id.keep]<-"keep"
# if(drop.duplicates == T) {
dat2<-dat2[dat2$power.prune=="keep",]
# }
dat2<-dat2[,!names(dat2) %in% c("iv.se","power.prune","id.set","id.subset")]
dat<-dat2
# dat2[,c("exposure","outcome","iv.se","power","id.set","id.subset","power.prune")]
# unique(dat2[order(dat2$id.set,dat2$id.subset),c("samplesize.exposure","ncase.outcome","exposure","outcome","iv.se","power","id.set","id.subset")])
# dat2[dat2$id.set==1,c("iv.se","id.set","id.subset")]
return(dat)
}
}
#' Size prune
#'
#' Whens there are duplicate summary sets for a particular exposure-outcome combination,
#' this function drops the duplicates with the smaller total sample size
#' (for binary outcomes, the number of cases is used instead of total sample size).
#'
#' @param dat Results from [harmonise_data()].
#'
#' @export
#' @return data frame
size.prune <- function(dat)
{
dat$ncase[is.na(dat$ncase)]<-dat$samplesize[is.na(dat$ncase)]
dat<-dat[order(dat$ncase,decreasing=TRUE),]
id.expout<-paste(dat$exposure,dat$outcome)
id.keep<-id.expout[!duplicated(paste(dat$exposure,dat$originalname.outcome))]
dat<-dat[id.expout %in% id.keep,]
}
|
bcb2ce4d8e3ae818cd30e120e91be85b376dfaca | 7959c075b8d8fd90c423863d6cc51cb29ea517c5 | /lab_03.R | f27aafe73fc2f899e7bb65ed722f642c252e9632 | [] | no_license | salientsoph/Rexam | d8373e7bbfc85fc38cc203add6572574b88c7926 | 0d0b9cb7fc378654c886ca70ba56498770e8b4a8 | refs/heads/master | 2022-12-24T06:02:13.720153 | 2020-09-25T14:18:31 | 2020-09-25T14:18:31 | 293,553,326 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,828 | r | lab_03.R | # ๋ฌธ์ 1
# iris ๋ผ๋ ๋ฐ์ดํฐ์
์ด ๋ช ๊ฐ์ ๊ด์ธก์น๋ฅผ ๊ฐ์ง๊ณ ์๋์ง
# ์ด๋ ํ ๋ณ์๋ค์ ๊ฐ์ง๊ณ ์๋์ง
str(iris)
View(iris)
# ๋ฌธ์ 2
df1 <- data.frame(
x=c(1:5),
y=c(2,4,6,8,10))
# y = seq(2,10,2)
# ๋ฌธ์ 3
df2 <- data.frame(
col1=c(1:5),
col2=c('a','b','c','d','e'),
col3=c(6:10))
#col2=letters[1:5]
# ๋ฌธ์ 4
์ ํ๋ช
= c('์ฌ๊ณผ', '๋ธ๊ธฐ', '์๋ฐ')
๊ฐ๊ฒฉ = c(1800, 1500, 3000)
ํ๋งค๋ = c(24, 38, 13)
df3 <- data.frame(์ ํ๋ช
, ๊ฐ๊ฒฉ, ํ๋งค๋)
str(df3)
# ๋ฌธ์ 5
# ๋ฐ์ดํฐ ํ๋ ์์ ์ด์ฉํด์ ๊ณผ์ผ ๊ฐ๊ฒฉ ํ๊ท , ํ๋งค๋ ํ๊ท
mean(df3$๊ฐ๊ฒฉ)
mean(df3$ํ๋งค๋)
# ๋ฌธ์ 6
name <- c("Potter", "Elsa", "Gates", "Wendy", "Ben")
gender <- factor(c("M", "F", "M", "F", "M"))
math <- c(85, 76, 99, 88, 40)
df4 <- data.frame(name, gender, math)
df4
# (a) stat ๋ณ์๋ฅผ ์ถ๊ฐํ์์ค.
df4$stat <- c(76, 73, 95, 82, 35)
df4
# (b) math ๋ณ์์ stat ๋ณ์์ ํฉ์ ๊ตฌํ์ฌ score ๋ณ์์ ์ ์ฅ
df4$score <- df4$math + df4$stat
df4$score
# (c) score๊ฐ 150 ์ด์์ด๋ฉด A,
#100 ์ด์ 150 ๋ฏธ๋ง์ด๋ฉด B,
#70 ์ด์ 100 ๋ฏธ๋ง์ด๋ฉด C,
#70 ๋ฏธ๋ง์ด๋ฉด D ๋ฑ๊ธ์ ๋ถ์ฌํ๊ณ grade ๋ณ์์ ์ ์ฅ
df4$grade <- ifelse(df4$score >= 150,"A",
ifelse(df4$score>=100,"B",
ifelse(df4$score>=70,"C","D")))
df4
# ๋ฌธ์ 7
# emp๋ณ์์ ํ ๋น๋ ๋ฐ์ดํฐํ๋ ์ ๊ฐ์ฒด์ ๊ตฌ์กฐ
emp <- read.csv("data/emp.csv")
str(emp)
# ๋ฌธ์ 8
# emp ์์ 3ํ, 4ํ , 5ํ๋ง
emp[c(3,4,5),]
emp[3:5,]
emp[seq(3,5),]
# ๋ฌธ์ 9
# emp ์์ 4๋ฒ์ด์ ์ ์ธ
emp[,-4]
# ๋ฌธ์ 10
# emp ์์ ename์ปฌ๋ผ
emp[,"ename"]
# ๋ฌธ์ 11
# emp ์์ ename ๊ณผ sal์ปฌ๋ผ
emp[,c("ename", "sal")]
# ๋ฌธ์ 12
# ์
๋ฌด๊ฐ SALESMAN ์ธ ์ฌ์์ ์ด๋ฆ, ์๊ธ, ์ง์
์ ์ถ๋ ฅํ๋ค.
subset(emp,emp$job=="SALESMAN", c("ename","sal", "job"))
# ๋ฌธ์ 13
# ์๊ธ์ด 1000 ์ด์์ด๊ณ 3000์ดํ์ธ ์ฌ์๋ค์ ์ด๋ฆ, ์๊ธ, ๋ถ์๋ฒํธ
subset(emp, select=c("ename","sal", "deptno"), subset=(sal>=1000 & sal<=3000))
# ๋ฌธ์ 14
# emp ์์ ์ง์
์ด ANALYST ๊ฐ ์๋ ์ฌ์๋ค์ ์ด๋ฆ, ์ง์
, ์๊ธ
subset(emp,emp$job!="ANALYST", c("ename", "job", "sal"))
# ๋ฌธ์ 15
# emp ์์ ์
๋ฌด๊ฐ SALESMAN ์ด๊ฑฐ๋ ANALYST ์ธ ์ฌ์๋ค์ ์ด๋ฆ, ์ง์
subset(emp,emp$job==c("ANALYST", "SALESMAN"), c("ename", "job"))
# ๋ฌธ์ 16
# emp ์์ ์ปค๋ฏธ์
์ด ์ ํด์ง์ง ์์ ์ง์์ ์ด๋ฆ๊ณผ ์๊ธ ์ ๋ณด
subset(emp,is.na(emp$comm), c("ename", "sal"))
#emp[is.na(emp$comm), c("ename", "sal")]
# ๋ฌธ์ 17
# ์๊ธ์ด ์ ์ ์์ผ๋ก ๋ชจ๋ ์ง์ ์ ๋ณด(order)
emp[order(emp$sal, decreasing=F), c(1:8)]
emp[order(emp$sal),]
a <- emp[order(emp$sal),]
tail(emp[order(emp$sal),],1)
emp[which.max(emp$sal),]
# ๋ฌธ์ 18
# emp์ ํ๊ณผ ์ด์ ๊ฐฏ์๋ฅผ ์ ๊ฒํ๋ค.
nrow(emp)
ncol(emp)
dim(emp) |
ab612f7b904e544855fd0df6d15e3f1c1b451548 | 92493903bd694b01c0bffb1795de1eabeae61339 | /bayes/bayes_decision_boundary_2.R | d5164a18fab6d0574379a72ea4a7a6db3412cecb | [] | no_license | sreepunith/R | d217be08a73f5b87adf7bffab473997e47abfe49 | 9e0f77f906273894ee1ea265a0b29d5800d8310f | refs/heads/master | 2021-01-21T11:46:26.877635 | 2017-05-17T05:04:46 | 2017-05-17T05:04:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,224 | r | bayes_decision_boundary_2.R | # This experiment aims at demonstrating how we can plot the Bayes decision boundary
# for a two-category two-dimensional data. The data is sampled from two bivariate
# Gaussian distributions with different means and identical covariance matrix.
#
# To plot the decision boundary, we need to calculate the probability P(x | y = 0)
# and P(x | y =1) by using the bivariate probability density function. It can be
# observed that the decision boundary is the contour where two distributions
# intersect each other. All points along the contour have P(x | y = 0) - P(x | y = 1) = 0
################################################################################
# Load library
################################################################################
library(MASS) #mvrnorm
library(ggplot2) #plot
set.seed(123456)
################################################################################
# Simulate data
################################################################################
# Means
mu_0 <- c(3, 6)
mu_1 <- c(3, -2)
# Covariance matrices
sigma_1 <- matrix(c(1/2, 0, 0, 2), nrow = 2, ncol = 2, byrow = 2)
sigma_2 <- matrix(c(2, 0, 0, 2), nrow = 2, ncol = 2, byrow = 2)
# Sample 10 points from two distributions
mu_sample_0 <- mvrnorm(n = 10, mu = mu_0, Sigma = sigma_1) #class 0
mu_sample_1 <- mvrnorm(n = 10, mu = mu_1, Sigma = sigma_2) #class 1
# Combine sampling data points
dt <- rbind(mu_sample_0, mu_sample_1)
dt <- as.data.frame(dt)
names(dt) <- c("x1", "x2")
class <- rep(c(0, 1), each = 10)
dt <- cbind(dt, class)
dt$class <- as.factor(dt$class)
# Point plot
ggplot() +
geom_point(dt, mapping = aes(x = x1, y = x2, colour = class, shape = class), size = 2.5)
################################################################################
# Bayes decision boundary
################################################################################
# Calculate P(x | y = 0) - P(x | y = 1)
range_of_x <- c(min(mu_sample_0, mu_sample_1), max(mu_sample_0, mu_sample_1)) # get
# min and max of the data points
points <- seq(range_of_x[1], range_of_x[2], by = 0.001) #generate points
#that belong to the range
grid <- expand.grid(points, points) #create a grid of the corresponding plot
#and find points where P(x | y = 0) - P(x | y = 1) = 0
grid <- as.matrix(grid) #conver to matrix for math manipulation
mu_0 <- matrix(mu_0, nrow = 2)
mu_1 <- matrix(mu_1, nrow = 2)
prob <- vector()
for (i in 1:nrow(grid)) {
x <- grid[i, ] #get point x
x_prob_0 <- exp(-0.5 * t(x - mu_0) %*% solve(sigma_1) %*% (x - mu_0)) #P(x|y = 0)
x_prob_1 <- exp(-0.5 * t(x - mu_1) %*% solve(sigma_2) %*% (x - mu_1)) #P(x|y = 1)
prob <- c(prob, x_prob_0 - x_prob_1) ##P(x|y = 0) - #P(x|y = 1)
}
grid <- as.data.frame(grid)
grid$prob <- prob #store prob in dt
grid$pred <- ifelse(prob >= 0, 0, 1) #prediction by minimizing the probability of error
#(i.e., Bayes decision rule)
# Plot
ggplot() +
geom_point(dt, mapping = aes(x = x1, y = x2, colour = class, shape = class), size = 2.5) +
geom_contour(data = grid, mapping = aes(x = Var1, y = Var2, z = pred), size = 0.6)
|
ca360aa824a33c9bc8e92da0460a4b1dc9f469d6 | f0d35b6ea0ebe9517537ecdf921bb442f1fd7550 | /ColonelHouNote/src/main/java/com/hn/opensource/eclipse/้ไธญๅ้้ซไบฎๆพ็คบ.RD | 11ef13dd5f0755a2aec5c3ca092dd20fd006d635 | [] | no_license | jiangsy163/ColonelHouNote | 01191a63d51542b09ef23e9662896e8407211119 | 6173c265c82b7b0197846cf621ecebab44073ef6 | refs/heads/master | 2021-01-14T12:47:41.091704 | 2015-11-19T08:40:09 | 2015-11-26T08:41:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 245 | rd | ้ไธญๅ้้ซไบฎๆพ็คบ.RD | Mark Occurencesๅ
จ้
github for eclipse : http://eclipse.org/egit/download/
http://mirror.cc.columbia.edu/pub/software/eclipse/egit/updates/org.eclipse.egit.repository-3.5.1.201410131835-r.zip
่ฒ่ฐ๏ผ85ใ้ฅฑๅๅบฆ๏ผ120ใไบฎๅบฆ๏ผ208 |
cd27b5723c35ca8cb12f76dc67007957f9b8c5c1 | 098841409c03478ddae35c4cdf6367cfd65fa3bf | /plot/code/09_imp_eval.R | cb1a99136e3d378284707fa4059509acfcd2f9b0 | [] | no_license | wangdi2016/imputationBenchmark | 0281746b482788c347faf9d96e8288639ba388a6 | 0881121444975cd0a3ee2ce69aaec46c3acd7791 | refs/heads/master | 2023-07-29T10:16:14.004610 | 2021-09-09T20:00:43 | 2021-09-09T20:00:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,595 | r | 09_imp_eval.R | allmtd = list.files('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/procimpute/GSE81861/')
allmtd = setdiff(allmtd, c('viper','screcover'))
mtd = 'saver'
res = readRDS(file=paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/perf/hm_cellline_cor/',mtd,'.rds'))
v = unlist(res)
sexpr = readRDS(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/procimpute/GSE81861/',mtd,'/GSE81861_Cell_Line_COUNT.rds'))
bexpr = readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/data/bulkrna/expr/hm_cellline_combineEncsr.rds')
intergene = intersect(rownames(sexpr),rownames(bexpr))
colnames(bexpr)[which(colnames(bexpr) == 'H1-hESC')] = 'H1'
colnames(bexpr)[which(colnames(bexpr) == 'IMR-90')] = 'IMR90'
cl = 'H1'
library(ggplot2)
p1 <- ggplot(data=data.frame(sc = sexpr[intergene, which(sub('_.*','', colnames(sexpr)) == cl)[1]], bulk = bexpr[intergene,cl]), aes(x = sc, y = bulk)) +
geom_point(color = 'black', size = 0.2, alpha = 0.5) +
theme_classic() +
xlab('an imputed single cell') +
ylab('bulk') +
ggtitle(paste0('SAVER ', cl))
df <- sapply(allmtd, function(mtd){
res = readRDS(file=paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/perf/hm_cellline_cor/',mtd,'.rds'))
res[[cl]]
})
library(reshape2)
pd = melt(df)
colnames(pd) = c('sc','method','cor')
pd$method = factor(as.character(pd$method), levels = names(sort(tapply(pd$cor, pd$method, median))))
p2 <- ggplot(data=pd, aes(x=factor(method), y = cor)) + geom_violin() + coord_flip() + theme_classic() + xlab('') + ylab('correlation between sc and bulk')
hmdf1 <- sapply(allmtd, function(mtd){
res = readRDS(file=paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/perf/hm_cellline_cor/',mtd,'.rds'))
sapply(res, median)
})
hmdf2 <- sapply(allmtd, function(mtd){
print(mtd)
if (file.exists(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/perf/10xcellline_cor/',mtd,'.rds'))){
res = readRDS(file=paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/perf/10xcellline_cor/',mtd,'.rds'))
sapply(res, median)
}
})
hmdf = cbind(t(hmdf1),t(hmdf2))
hmdf = melt(hmdf)
colnames(hmdf) <- c('method','ct','cor')
p3 <- ggplot(data=hmdf, aes(x=ct, y=method)) + geom_tile(aes(fill=cor)) + scale_fill_gradient(low = "black", high = "yellow") + xlab('') + ylab('') + theme(legend.position = 'bottom')
library(gridExtra)
pdf('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/plot/plot/imp_eval.pdf',width=10,height=8)
grid.arrange(p1,p2,p3,layout_matrix=matrix(c(1,2,2,3,3,3),3))
dev.off()
|
01e5bef0c04fff3188ab18409590afa38d028f3a | 6780fb693096344776936b48a2721223182ea19c | /test_with_substitue_and_etc.R | 7eb8e4157ef0359855a6f56c84892e1fc47f7753 | [] | no_license | xueyan2015/R_Library_On_Mac | 5d07d2190a6817422e2e19e90ae1cf5439bd2b08 | 062d3d95a2ba12d28bc1930a3f9871c5bf28a082 | refs/heads/master | 2021-01-20T16:58:52.211233 | 2017-09-10T07:44:03 | 2017-09-10T07:44:03 | 82,813,617 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,749 | r | test_with_substitue_and_etc.R |
library(dplyr)
library(pryr)
library(lattice)
library(pryr)
#this example works
xyplot(mpg ~ disp, data = mtcars)
x <- quote(mpg)
y <- quote(disp)
xyplot(x ~ y, data = mtcars)
subs(xyplot(x ~ y, data = mtcars))
xyplot2 <- function(x, y, data = data) {
eval(substitute(xyplot(x ~ y, data = data)))
}
xyplot2(mpg, disp, data = mtcars)
set.seed(1234)
df<-data.frame(a=c('A','B','C','A','A','B'),b=rnorm(6))
#1
col0 <-'b'
subset(df, eval(as.name(col0)) >0 )
#it seems it never works with mutate()
mutate(df, s = eval(quote(as.name(col0) * 2)))
#this alternative approach works
eval(substitute(b * 2) , envir=df)
#this one does not work... because substitute does not replace variable with its value in global env.
col <- as.name('b')
substitute(subset(df, col >0 ))
eval(substitute(subset(df, col >0 )))
#this one work
subs(subset(df, col >0 ))
eval(subs(subset(df, col >0 )))
#now try a function
f1 <- function(){
# substitute(subset(df, col >0 ))
subs(subset(df, col >0 ))
}
f1()
#this one does not work.
eval(subs(subset(df, quote(b) >0 )))
#this one does not work either
eval(subset(df, quote(b) >0 ))
#this one really works!!!
xx <- quote(b)
eval(subs(subset(df, xx >0 )))
eval()
#now this one works...
f2 <- function(condition){
eval(substitute(subset(df, condition )))
}
f2(b>0)
f2b <- function(condition_str){
eval(substitute(subset(df, parse(text=condition_str ))))
}
f2b("b>0")
f3 <- function(col_name){
}
####
# a + b + c -> a * b * c
# f(g(a, b), c) -> (a + b) * c
# f(a < b, c, d) -> if (a < b) c else d
substitute(a + b +c, list('+'=quote(`*`)))
substitute(a + b +c, list('+'=`*`))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.