blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d032088f95530151279c115b3b8c440ea6078dbc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/shapeR/examples/getMeasurements.Rd.R
|
aa524c0a274dda1e2a74c8064e98dd2817548fe5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 422
|
r
|
getMeasurements.Rd.R
|
library(shapeR)
### Name: getMeasurements
### Title: Get simple shape variables, filtered according to filter
### Aliases: getMeasurements
### ** Examples
data(shape)
# Calculate the mean otolith area for each fish population
# The results are in square mm since the calibration ('cal') column
# in the data file is in pixels (1 mm/pixel).
tapply(getMeasurements(shape)$otolith.area, getMasterlist(shape)$pop,mean)
|
f901b5e1b2aad9bdcd499e1a15813a6c22b6ad62
|
7a4caaf86ce76c18ba31a85cda0d78e5a0b38fb8
|
/man/phyloHeights.Rd
|
3f406f9e19fe7a41e6e98a8a4104891a85169390
|
[] |
no_license
|
hferg/hfgr
|
3f8716cb8b056a92add743b6858fecf30dfe162d
|
5abb9350a10b63db89890367752f9938b92e44bf
|
refs/heads/master
| 2021-05-01T12:32:52.906825
| 2017-02-24T09:11:34
| 2017-02-24T09:11:34
| 35,668,754
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 260
|
rd
|
phyloHeights.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phyloHeights.R
\name{phyloHeights}
\alias{phyloHeights}
\title{phyloHeights}
\usage{
phyloHeights(tree)
}
\description{
Essentially the same function as heights.phylo from geiger
}
|
3ab044e4d038e7ecce216135ae6ac95307c0982f
|
33c3602679d4ba1688c01003a16b2185b6e1d571
|
/utils/geo_utils.r
|
2c6babb88ae0e629a558bb4c805cd1744271f1b3
|
[] |
no_license
|
mfzhao/covid_interdependence
|
c40d7f39930b1670d0a236c9172d7c05b76e983c
|
58b190c41bb338d9266b2fdcd00db091942b2045
|
refs/heads/master
| 2023-08-27T13:52:24.498715
| 2021-10-29T20:17:46
| 2021-10-29T20:17:46
| 273,065,104
| 10
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 85
|
r
|
geo_utils.r
|
state_code_from_fips <- function(x) {
floor(as.integer(as.character(x))/1000)
}
|
65220ea4dcb3c00539488e8461d1bcbdbf821d0a
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Linear_Algebra_And_Its_Applications_by_David_C._Lay/CH4/EX4.20/Ex4.20.R
|
033ec41f18e9fd63c1b55453da1d2f62483f6925
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 687
|
r
|
Ex4.20.R
|
#Chapter 4- Vector Spaces
#Change of Basis
#Page No.153 / 4-33
#Prob 7
#4.9.7
#clear console
cat("\014")
#clear variables
rm(list=ls(all=TRUE))
pv<-c(.7,.1,.1,.2,.8,.2,.1,.1,.7)
p=matrix(pv,3,3,TRUE)
print(p)
PI=p-diag(3)
print(PI)
zm=matrix(0,3,1,TRUE)
PIx=cbind(PI,zm)
print(PIx)
PIx[,c(1,3)]=PIx[,c(3,1)]
print(PIx)
PIx=PIx*10
print(PIx)
PIx[2,]=PIx[2,]-2*PIx[1,]
PIx[3,]=PIx[3,]+3*PIx[1,]
PIx[2,]=PIx[2,]/(-4)
PIx[3,]=PIx[3,]-4*PIx[2,]
PIx[1,]=PIx[1,]-1*PIx[2,]
print(round(PIx))
print('x1=x3')
print('x2=2x3')
print('x3 is free')
print('[x1 x2 x3]=x3[1 2 1]')
print('the entries in [1 2 1] sum to 4')
q=(1/4)*matrix(c(1,2,1),3,1,TRUE)
print(q)
|
6d06bb5acaf29fc239da537207b629fab8623121
|
a55e05d6bc8e0c0924ccbdbda230f442231cd5a0
|
/aws-api/plotBeeDat.R
|
e79df5a66821dba8d3e4e0f7007c9f7f440db162
|
[] |
no_license
|
yannikbehr/arduino_project
|
86956229c5d056ae5479fd9f31f5e9f033e24c89
|
fdfa417fd8af49f3434114eccc14e977714335ad
|
refs/heads/master
| 2023-05-02T04:59:57.335448
| 2023-04-26T06:13:06
| 2023-04-26T06:13:06
| 191,243,917
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,813
|
r
|
plotBeeDat.R
|
#!/usr/bin/env Rscript
dateStart = as.Date('2023-04-23')
dateEnd = Sys.Date()
data = character()
while (dateStart<=dateEnd){
month = substr(format(dateStart, '%B'), 1, 3)
fname=paste("data/", format(dateStart, '%d_'), month, sep="")
readDat=T
if (dateStart < dateEnd && file.exists(fname)){
dat <- readRDS(fname, refhook = NULL)
readDat=F
}
if (readDat){
cat(format(dateStart,'%A %B %d, %Y\n'))
dat = system(paste("./query_by_date.sh ", format(dateStart, '%d/'), month, " | grep WeightKg | cut -f 2- ", sep=""), intern=T)
cat(paste(dat[length(dat)], "\n", sep=""))
}
if (dateStart < dateEnd ){
saveRDS(dat, file = fname, ascii = FALSE, version = NULL,compress = TRUE, refhook = NULL)
}
data = c(data, dat)
dateStart = dateStart+1
}
datF = as.data.frame(strsplit(data, "\t"), stringsAsFactors=F)
dd = lapply(datF[2, ], function(d){strptime(strsplit(strsplit(d, "_")[[1]][2], " ")[[1]][1],format='%d/%B/%Y:%H:%M:%S')})
mydates <- do.call("c", dd)
df = data.frame(times = mydates, val=as.numeric(datF[1, ]), stringsAsFactors=F, row.names=NULL)
idx = mydates > strptime("03/June/2021:06:7:38",format='%d/%B/%Y:%H:%M:%S') & datF[1, ] != 1000
#idx = mydates > strptime("20/June/2021:06:7:38",format='%d/%B/%Y:%H:%M:%S') & datF[1, ] != 1000
mydates = mydates + 2 * 3600 # times seem to be two hours off for som reason
df = df[idx, ]
library(ggplot2)
pdf("GewichtsKurve.pdf")
ggplot(data=df, aes(x=times, y=val))+geom_line() + geom_smooth() + ylab("Gewicht [g]") + xlab("Zeitpunkt")
#ggplot(data=df, aes(x=dates, y=values))+geom_line()
#p = ggplot(data=df, aes(x=dates, y=values))+geom_point(size=1.5)
#p = p + geom_line()
#p = p + scale_x_date(date_breaks = '3 day', date_labels = '%b %d')+
#p
#plot(mydates[idx], datF[1, idx], type="l", xlab="Zeit", ylab="Gewicht [g]")
dev.off()
|
17f1e65048456941744ecc90cf1f49cf35afa562
|
156811aac95d26f45fa74d249f416e32254fb4eb
|
/Age discrimination permutation test.R
|
2ba7fb98b766d940b1c7e5a6e3baeab570770018
|
[] |
no_license
|
wangrenfeng0/SMU-Data-Science
|
652fe6e37541cd9ab42881229bcfb1aeab3bbb03
|
cc7ff4b0ee248dae7e515af4f59390ba6297d7d5
|
refs/heads/master
| 2023-01-07T11:40:30.836318
| 2020-10-21T03:26:04
| 2020-10-21T03:26:04
| 288,824,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,793
|
r
|
Age discrimination permutation test.R
|
fired = c(34,37, 37, 38, 41, 42, 43, 44, 44, 45, 45, 45, 46, 48, 49, 53, 53, 54, 54, 55, 56)
notfired = c(27, 33, 36, 37, 38, 38, 39, 42, 42, 43, 43, 44, 44, 44, 45, 45, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 51, 51, 52, 54)
#hist(fired)
#hist(notfired)
library(tidyverse)
obersved_diff=mean(fired)-mean(notfired)
obersved_diff
fired=data.frame(Value=fired, Status='Fired')
notfired=data.frame(Value=notfired, Status='Notfired')
employee=rbind(fired,notfired)
employee
xbarDiffHolder = numeric(10000)
for (i in 1:10000)
{
scramble_employee=sample(employee$Status, 51)
employeeTemp=employee
employeeTemp$Status=scramble_employee
xbars=employeeTemp %>% group_by(Status) %>% summarize(mean=mean(Value))
xbars
xbarNminusT=xbars[2,2]-xbars[1,2]
xbarNminusT
xbarDiffHolder[i]=xbarNminusT$mean
}
ggplot(mapping=aes(x=xbarDiffHolder), color='black')+geom_histogram(bins=25, color='black', fill='blue')
num_more_extreme = sum((xbarDiffHolder) >= 1.92381)
num_more_extreme
pvalue = num_more_extreme / 10000
pvalue
Fired = c(34, 37, 37, 38, 41, 42, 43, 44, 44, 45, 45, 45, 46, 48, 49, 53, 53, 54, 54, 55, 56)
Not_fired = c(27, 33, 36, 37, 38, 38, 39, 42, 42, 43, 43, 44, 44, 44, 45, 45, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 51, 51, 52, 54)
#t.test(x = Fired, y = Not_fired, conf.int = .95, var.equal = TRUE, alternative = "two.sided")
label1=rep('Fired',21)
label2=rep('Not_fired', 30)
label=as.factor(c(label1, label2))
people=data.frame(age=c(Fired, Not_fired), status=label)
people
graphics.off()
par(mar = rep(2, 4))
par(mfrow=c(2,2))
hist(Fired,xlab='Age', main='Fired')
hist(Not_fired, xlab='Age', main='Not Fired')
qqnorm(Fired, main='Fired')
qqline(Fired)
qqnorm(Not_fired, main='Not_fired')
qqline(Not_fired)
people %>% ggplot(aes(x=status, y=age))+geom_boxplot()
|
33a0eb1893b1955e0ec0ccc953b5bae2719b19ab
|
5915933e558a83bec3612d7be74552e65757ee0c
|
/man/plotVElatCont.Rd
|
e155adcf133a3423b4755a1178371d5b16d678bc
|
[] |
no_license
|
cran/CoRpower
|
8cc66ced0645c9912375b41e637134be89a069f9
|
3cd5f650a86d1b61d213d1f4c70c297d4c9567c6
|
refs/heads/master
| 2021-06-30T21:46:19.199414
| 2020-11-17T07:10:14
| 2020-11-17T07:10:14
| 151,888,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,132
|
rd
|
plotVElatCont.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotVElatCont.R
\name{plotVElatCont}
\alias{plotVElatCont}
\title{Plotting Treatment (Vaccine) Efficacy Curves for Different Correlate of Risk Relative Risks for Continuous Biomarkers}
\usage{
plotVElatCont(outComputePower, outDir = NULL)
}
\arguments{
\item{outComputePower}{a list of lists of length \code{1} containing output from \code{\link{computePower}} or a character string specifying the \code{.RData} file containing \code{\link{computePower}} output}
\item{outDir}{a character string specifying path to output \code{.RData} file, necessary if \cr\code{outComputePower} is a character string. Default is \code{NULL}.}
}
\value{
None. The function is called solely for plot generation.
}
\description{
Plots the treatment (vaccine) efficacy curve for the true latent biomarker for eight different values of the latent correlate of risk
relative risk and the lowest vaccine efficacy level for the true biomarker. All curves assume \code{rho=1}, and treatment (vaccine)
efficacy ranges from 0 to 1. The legend is completely determined by the function.
}
\details{
\code{\link{computePower}} function input parameter \code{VElowest} must have length greater than or equal to eight
for all eight scenarios to have unique RRc and VElowest. Otherwise, only \code{length(VElowest)} unique
VE curves will be displayed.
When interpreting the output of the function, the null hypothesis corresponds to a flat curve where vaccine efficacy for all values of the
true latent biomarker is equal to the overall vaccine efficacy. Increasing departures from the null hypothesis correspond
to increasingly variable and steep VE curves. The output assumes the overall placebo-group endpoint risk between \eqn{\tau} and \eqn{\tau_{max}}
is constant for all values of the latent and observed biomarker and that there is no measurement error (\eqn{\rho=1}). When this is the case,
an association of the biomarker with infection risk in the vaccine group (a correlate of risk) is equivalent to an association of the biomarker
with treatment (vaccine) efficacy.
The function's plot can also be interpreted in conjunction with the output of the \code{\link{plotPowerCont}} function by
matching the CoR relative risk in the two plots and examining power compared to VE. This sheds light on the importance
of overall VE on power and further enables correlates of risk results to be interpreted in terms of
potential correlates of efficacy/protection.
}
\examples{
# Example scenario with continuous biomarker, where values of rho are varied
# Set input parameters for computePower function
nCasesTx <- 10
nControlsTx <- 300
nCasesTxWithS <- 10
controlCaseRatio <- 3
VEoverall <- 0.75
risk0 <- 0.034
PlatVElowest <- 0.2
VElowest <- seq(0, VEoverall, len=8)
Plat0 <- P0 <- 0.2
Plat2 <- P2 <- 0.6
M <- 13
alpha <- 0.05
sigma2obs <- 1
rho <- 1
biomType <- "continuous"
# Output from computePower function is stored in an object as a list
pwr <- computePower(nCasesTx=nCasesTx, nControlsTx=nControlsTx, nCasesTxWithS=nCasesTxWithS,
controlCaseRatio=controlCaseRatio, risk0=risk0, VEoverall=VEoverall,
PlatVElowest=PlatVElowest, VElowest=VElowest, Plat0=Plat0, Plat2=Plat2,
P0=P0, P2=P2, M=M, alpha=alpha, sigma2obs=sigma2obs, rho=rho, biomType=biomType)
# Set parameters for plotPowerCont function
# outComputePower is a list containing output from the computePower function
outComputePower <- pwr
plotVElatCont(outComputePower=outComputePower)
\dontrun{
# Output from computePower function is saved in an RData file
computePower(..., saveDir = "myDir", saveFile = "myFile.RData")
# outComputePower is a character string specifying the file containing the computePower output
# outDir is a character string specifying the outComputePower file directory
outComputePower <- "myFile.RData"
outDir <- "~/myDir"
plotVElatCont(outComputePower, outDir=outDir)
}
}
\seealso{
\code{\link{computePower}}, \code{\link{plotPowerCont}}
}
|
4c8775757959628a2dbecfd78349643696693dba
|
19667baea785a5181c29daaea9799435107e91c3
|
/data/r/e39cfbf4789808a3d7fcd04b08a92ab1_HoltWintersNew.R
|
e945dddb16c2987a3bb263fdf21f4d015ebd3f54
|
[
"Apache-2.0"
] |
permissive
|
maxim5/code-inspector
|
d71f024390fe5a5f7c1144b289e0e22b59dbb2c7
|
14812dfbc7bac1d76c4d9e5be2cdf83fc1c391a1
|
refs/heads/master
| 2021-09-10T19:23:39.324768
| 2018-03-31T17:17:27
| 2018-03-31T17:17:27
| 115,271,942
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,456
|
r
|
e39cfbf4789808a3d7fcd04b08a92ab1_HoltWintersNew.R
|
# Modelled on the HoltWinters() function but with more conventional
# initialization.
# Written by Zhenyu Zhou. 21 October 2012
HoltWintersZZ <- function(x,
# smoothing parameters
alpha = NULL, # level
beta = NULL, # trend
gamma = NULL, # seasonal component
seasonal = c("additive", "multiplicative"),
exponential = FALSE, # exponential
phi = NULL, # damp
lambda = NULL, # box-cox
biasadj = FALSE, # adjusted back-transformed mean for box-cox
warnings = TRUE # return optimization warnings
) {
x <- as.ts(x)
seasonal <- match.arg(seasonal)
m <- frequency(x)
lenx <- length(x)
if (!is.null(lambda)) {
x <- BoxCox(x, lambda)
}
if (is.null(phi) || !is.numeric(phi)) {
phi <- 1
}
if (!is.null(alpha) && !is.numeric(alpha)) {
stop("cannot fit models without level ('alpha' must not be 0 or FALSE).")
}
if (!all(is.null(c(alpha, beta, gamma))) &&
any(c(alpha, beta, gamma) < 0 || c(alpha, beta, gamma) > 1)) {
stop("'alpha', 'beta' and 'gamma' must be within the unit interval.")
}
if ((is.null(gamma) || gamma > 0)) {
if (seasonal == "multiplicative" && any(x <= 0)) {
stop("data must be positive for multiplicative Holt-Winters.")
}
}
if (m <= 1) {
gamma <- FALSE
}
## initialise l0, b0, s0
if (!is.null(gamma) && is.logical(gamma) && !gamma) {
seasonal <- "none"
l.start <- x[1L]
s.start <- 0
if (is.null(beta) || !is.logical(beta) || beta) {
if (!exponential) {
b.start <- x[2L] - x[1L]
} else {
b.start <- x[2L] / x[1L]
}
}
} else {
## seasonal Holt-Winters
l.start <- mean(x[1:m])
b.start <- (mean(x[m + (1:m)]) - l.start) / m
if (seasonal == "additive") {
s.start <- x[1:m] - l.start
} else {
s.start <- x[1:m] / l.start
}
}
# initialise smoothing parameters
# lower=c(rep(0.0001,3), 0.8)
# upper=c(rep(0.9999,3),0.98)
lower <- c(0, 0, 0, 0)
upper <- c(1, 1, 1, 1)
if (!is.null(beta) && is.logical(beta) && !beta) {
trendtype <- "N"
} else if (exponential) {
trendtype <- "M"
} else {
trendtype <- "A"
}
if (seasonal == "none") {
seasontype <- "N"
} else if (seasonal == "multiplicative") {
seasontype <- "M"
} else {
seasontype <- "A"
}
## initialise smoothing parameter
optim.start <- initparam(
alpha = alpha, beta = beta, gamma = gamma, phi = 1,
trendtype = trendtype, seasontype = seasontype, damped = FALSE, lower = lower, upper = upper, m = m
)
# if(!is.na(optim.start["alpha"]))
# alpha2 <- optim.start["alpha"]
# else
# alpha2 <- alpha
# if(!is.na(optim.start["beta"]))
# beta2 <- optim.start["beta"]
# else
# beta2 <- beta
# if(!is.na(optim.start["gamma"]))
# gamma2 <- optim.start["gamma"]
# else
# gamma2 <- gamma
# if(!check.param(alpha = alpha2,beta = beta2, gamma = gamma2,phi=1,lower,upper,bounds="haha",m=m))
# {
# print(paste("alpha=", alpha2, "beta=",beta2, "gamma=",gamma2))
# stop("Parameters out of range")
# }
###################################################################################
# optimisation: alpha, beta, gamma, if any of them is null, then optimise them
error <- function(p, select) {
if (select[1] > 0) {
alpha <- p[1L]
}
if (select[2] > 0) {
beta <- p[1L + select[1]]
}
if (select[3] > 0) {
gamma <- p[1L + select[1] + select[2]]
}
zzhw(
x, lenx = lenx, alpha = alpha, beta = beta, gamma = gamma, seasonal = seasonal, m = m,
dotrend = (!is.logical(beta) || beta), doseasonal = (!is.logical(gamma) || gamma),
exponential = exponential, phi = phi, l.start = l.start, b.start = b.start, s.start = s.start
)$SSE
}
select <- as.numeric(c(is.null(alpha), is.null(beta), is.null(gamma)))
if (sum(select) > 0) # There are parameters to optimize
{
sol <- optim(optim.start, error, method = "L-BFGS-B", lower = lower[select], upper = upper[select], select = select)
if (sol$convergence || any(sol$par < 0 | sol$par > 1)) {
if (sol$convergence > 50) {
if (warnings) {
warning(gettextf("optimization difficulties: %s", sol$message), domain = NA)
}
} else {
stop("optimization failure")
}
}
if (select[1] > 0) {
alpha <- sol$p[1L]
}
if (select[2] > 0) {
beta <- sol$p[1L + select[1]]
}
if (select[3] > 0) {
gamma <- sol$p[1L + select[1] + select[2]]
}
}
final.fit <- zzhw(
x, lenx = lenx, alpha = alpha, beta = beta, gamma = gamma, seasonal = seasonal, m = m,
dotrend = (!is.logical(beta) || beta), doseasonal = (!is.logical(gamma) || gamma),
exponential = exponential, phi = phi, l.start = l.start, b.start = b.start, s.start = s.start
)
tspx <- tsp(x)
fitted <- ts(final.fit$fitted, frequency = m, start = tspx[1])
res <- ts(final.fit$residuals, frequency = m, start = tspx[1])
if (!is.null(lambda)) {
fitted <- InvBoxCox(fitted, lambda, biasadj, var(final.fit$residuals))
attr(lambda, "biasadj") <- biasadj
}
states <- matrix(final.fit$level, ncol = 1)
colnames(states) <- "l"
if (trendtype != "N") {
states <- cbind(states, b = final.fit$trend)
}
if (seasontype != "N") {
nr <- nrow(states)
nc <- ncol(states)
for (i in 1:m)
states <- cbind(states, final.fit$season[(m - i) + (1:nr)])
colnames(states)[nc + (1:m)] <- paste("s", 1:m, sep = "")
}
states <- ts(states, frequency = m, start = tspx[1] - 1 / m)
# Package output as HoltWinters class
# structure(list(fitted = fitted,
# x = x,
# alpha = alpha,
# beta = beta,
# gamma = gamma,
# coefficients = c(a = final.fit$level[lenx],
# b = if (!is.logical(beta) || beta) final.fit$trend[lenx],
# s = if (!is.logical(gamma) || gamma) final.fit$season[lenx - m + 1L:m]),
# seasonal = seasonal,
# exponential = exponential,
# SSE = final.fit$SSE,
# call = match.call(),
# level = final.fit$level,
# trend = final.fit$trend,
# season = final.fit$season,
# phi = phi
# ),
# class = "HoltWinters"
# )
# Package output as ets class
damped <- (phi < 1.0)
if (seasonal == "additive") { # This should not happen
components <- c("A", trendtype, seasontype, damped)
} else if (seasonal == "multiplicative") {
components <- c("M", trendtype, seasontype, damped)
} else if (seasonal == "none" & exponential) {
components <- c("M", trendtype, seasontype, damped)
} else { # if(seasonal=="none" & !exponential)
components <- c("A", trendtype, seasontype, damped)
}
initstate <- states[1, ]
param <- alpha
names(param) <- "alpha"
if (trendtype != "N") {
param <- c(param, beta = beta)
names(param)[length(param)] <- "beta"
}
if (seasontype != "N") {
param <- c(param, gamma = gamma)
names(param)[length(param)] <- "gamma"
}
if (damped) {
param <- c(param, phi = phi)
names(param)[length(param)] <- "phi"
}
if (components[1] == "A") {
sigma2 <- mean(res ^ 2)
} else {
sigma2 <- mean((res / fitted) ^ 2)
}
structure(
list(
fitted = fitted,
residuals = res,
components = components,
x = x,
par = c(param, initstate),
initstate = initstate,
states = states,
SSE = final.fit$SSE,
sigma2 = sigma2,
call = match.call(),
m = m
),
class = "ets"
)
}
###################################################################################
# filter function
zzhw <- function(x, lenx, alpha=NULL, beta=NULL, gamma=NULL, seasonal="additive", m,
dotrend=FALSE, doseasonal=FALSE, l.start=NULL, exponential = NULL, phi=NULL,
b.start=NULL, s.start=NULL) {
if (exponential != TRUE || is.null(exponential)) {
exponential <- FALSE
}
if (is.null(phi) || !is.numeric(phi)) {
phi <- 1
}
# initialise array of l, b, s
level <- trend <- season <- xfit <- residuals <- numeric(lenx)
SSE <- 0
if (!dotrend) {
beta <- 0
b.start <- 0
}
if (!doseasonal) {
gamma <- 0
s.start[1:length(s.start)] <- ifelse(seasonal == "additive", 0, 1)
}
lastlevel <- level0 <- l.start
lasttrend <- trend0 <- b.start
season0 <- s.start
for (i in 1:lenx) {
# definel l(t-1)
if (i > 1) {
lastlevel <- level[i - 1]
}
# define b(t-1)
if (i > 1) {
lasttrend <- trend[i - 1]
}
# define s(t-m)
if (i > m) {
lastseason <- season[i - m]
} else {
lastseason <- season0[i]
}
if (is.na(lastseason)) {
lastseason <- ifelse(seasonal == "additive", 0, 1)
}
# stop((lastlevel + phi*lasttrend)*lastseason)
# forecast for this period i
if (seasonal == "additive") {
if (!exponential) {
xhat <- lastlevel + phi * lasttrend + lastseason
} else {
xhat <- lastlevel * lasttrend ^ phi + lastseason
}
} else {
if (!exponential) {
xhat <- (lastlevel + phi * lasttrend) * lastseason
} else {
xhat <- lastlevel * lasttrend ^ phi * lastseason
}
}
xfit[i] <- xhat
res <- x[i] - xhat
residuals[i] <- res
SSE <- SSE + res * res
# calculate level[i]
if (seasonal == "additive") {
if (!exponential) {
level[i] <- alpha * (x[i] - lastseason) + (1 - alpha) * (lastlevel + phi * lasttrend)
} else {
level[i] <- alpha * (x[i] - lastseason) + (1 - alpha) * (lastlevel * lasttrend ^ phi)
}
}
else {
if (!exponential) {
level[i] <- alpha * (x[i] / lastseason) + (1 - alpha) * (lastlevel + phi * lasttrend)
} else {
level[i] <- alpha * (x[i] / lastseason) + (1 - alpha) * (lastlevel * lasttrend ^ phi)
}
}
# calculate trend[i]
if (!exponential) {
trend[i] <- beta * (level[i] - lastlevel) + (1 - beta) * phi * lasttrend
} else {
trend[i] <- beta * (level[i] / lastlevel) + (1 - beta) * lasttrend ^ phi
}
# calculate season[i]
if (seasonal == "additive") {
if (!exponential) {
season[i] <- gamma * (x[i] - lastlevel - phi * lasttrend) + (1 - gamma) * lastseason
} else {
season[i] <- gamma * (x[i] - lastlevel * lasttrend ^ phi) + (1 - gamma) * lastseason
}
} else {
if (!exponential) {
season[i] <- gamma * (x[i] / (lastlevel + phi * lasttrend)) + (1 - gamma) * lastseason
} else {
season[i] <- gamma * (x[i] / (lastlevel * lasttrend ^ phi)) + (1 - gamma) * lastseason
}
}
}
list(
SSE = SSE,
fitted = xfit,
residuals = residuals,
level = c(level0, level),
trend = c(trend0, trend),
season = c(season0, season),
phi = phi
)
}
#' Exponential smoothing forecasts
#'
#' Returns forecasts and other information for exponential smoothing forecasts
#' applied to \code{y}.
#'
#' ses, holt and hw are simply convenient wrapper functions for
#' \code{forecast(ets(...))}.
#'
#' @param y a numeric vector or time series of class \code{ts}
#' @param h Number of periods for forecasting.
#' @param damped If TRUE, use a damped trend.
#' @param seasonal Type of seasonality in \code{hw} model. "additive" or
#' "multiplicative"
#' @param level Confidence level for prediction intervals.
#' @param fan If TRUE, level is set to seq(51,99,by=3). This is suitable for
#' fan plots.
#' @param initial Method used for selecting initial state values. If
#' \code{optimal}, the initial values are optimized along with the smoothing
#' parameters using \code{\link{ets}}. If \code{simple}, the initial values are
#' set to values obtained using simple calculations on the first few
#' observations. See Hyndman & Athanasopoulos (2014) for details.
#' @param exponential If TRUE, an exponential trend is fitted. Otherwise, the
#' trend is (locally) linear.
#' @param alpha Value of smoothing parameter for the level. If \code{NULL}, it
#' will be estimated.
#' @param beta Value of smoothing parameter for the trend. If \code{NULL}, it
#' will be estimated.
#' @param gamma Value of smoothing parameter for the seasonal component. If
#' \code{NULL}, it will be estimated.
#' @param phi Value of damping parameter if \code{damped=TRUE}. If \code{NULL},
#' it will be estimated.
#' @param lambda Box-Cox transformation parameter. Ignored if NULL. Otherwise,
#' data transformed before model is estimated. When \code{lambda=TRUE},
#' \code{additive.only} is set to FALSE.
#' @param biasadj Use adjusted back-transformed mean for Box-Cox
#' transformations. If TRUE, point forecasts and fitted values are mean
#' forecast. Otherwise, these points can be considered the median of the
#' forecast densities.
#' @param x Deprecated. Included for backwards compatibility.
#' @param ... Other arguments passed to \code{forecast.ets}.
#' @return An object of class "\code{forecast}".
#'
#' The function \code{summary} is used to obtain and print a summary of the
#' results, while the function \code{plot} produces a plot of the forecasts and
#' prediction intervals.
#'
#' The generic accessor functions \code{fitted.values} and \code{residuals}
#' extract useful features of the value returned by \code{ets} and associated
#' functions.
#'
#' An object of class \code{"forecast"} is a list containing at least the
#' following elements: \item{model}{A list containing information about the
#' fitted model} \item{method}{The name of the forecasting method as a
#' character string} \item{mean}{Point forecasts as a time series}
#' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper
#' limits for prediction intervals} \item{level}{The confidence values
#' associated with the prediction intervals} \item{x}{The original time series
#' (either \code{object} itself or the time series used to create the model
#' stored as \code{object}).} \item{residuals}{Residuals from the fitted
#' model.} \item{fitted}{Fitted values (one-step forecasts)}
#' @author Rob J Hyndman
#' @seealso \code{\link{ets}}, \code{\link[stats]{HoltWinters}},
#' \code{\link{rwf}}, \code{\link[stats]{arima}}.
#' @references Hyndman, R.J., Koehler, A.B., Ord, J.K., Snyder, R.D. (2008)
#' \emph{Forecasting with exponential smoothing: the state space approach},
#' Springer-Verlag: New York. \url{http://www.exponentialsmoothing.net}.
#'
#' Hyndman, R.J., Athanasopoulos (2014) \emph{Forecasting: principles and
#' practice}, OTexts: Melbourne, Australia. \url{http://www.otexts.org/fpp}.
#' @keywords ts
#' @examples
#'
#' fcast <- holt(airmiles)
#' plot(fcast)
#' deaths.fcast <- hw(USAccDeaths,h=48)
#' plot(deaths.fcast)
#'
#' @export
ses <- function(y, h = 10, level = c(80, 95), fan = FALSE, initial=c("optimal", "simple"),
alpha=NULL, lambda=NULL, biasadj=FALSE, x=y, ...) {
initial <- match.arg(initial)
if (initial == "optimal") {
fcast <- forecast(ets(x, "ANN", alpha = alpha, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...)
} else {
fcast <- forecast(HoltWintersZZ(x, alpha = alpha, beta = FALSE, gamma = FALSE, lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...)
}
fcast$method <- fcast$model$method <- "Simple exponential smoothing"
fcast$model$call <- match.call()
fcast$series <- deparse(substitute(y))
return(fcast)
}
#' @rdname ses
#' @export
holt <- function(y, h = 10, damped = FALSE, level = c(80, 95), fan = FALSE,
initial=c("optimal", "simple"), exponential=FALSE, alpha=NULL, beta=NULL,
phi=NULL, lambda=NULL, biasadj=FALSE, x=y, ...) {
initial <- match.arg(initial)
if (length(y) <= 1L) {
stop("I need at least two observations to estimate trend.")
}
if (initial == "optimal" | damped) {
if (exponential) {
fcast <- forecast(ets(x, "MMN", alpha = alpha, beta = beta, phi = phi, damped = damped, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...)
} else {
fcast <- forecast(ets(x, "AAN", alpha = alpha, beta = beta, phi = phi, damped = damped, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...)
}
}
else {
fcast <- forecast(
HoltWintersZZ(x, alpha = alpha, beta = beta, gamma = FALSE, phi = phi, exponential = exponential, lambda = lambda, biasadj = biasadj),
h, level = level, fan = fan, ...
)
}
if (damped) {
fcast$method <- "Damped Holt's method"
if (initial == "simple") {
warning("Damped Holt's method requires optimal initialization")
}
}
else {
fcast$method <- "Holt's method"
}
if (exponential) {
fcast$method <- paste(fcast$method, "with exponential trend")
}
fcast$model$method <- fcast$method
fcast$model$call <- match.call()
fcast$series <- deparse(substitute(y))
return(fcast)
}
#' @rdname ses
#' @export
hw <- function(y, h = 2 * frequency(x), seasonal = c("additive", "multiplicative"), damped = FALSE,
level = c(80, 95), fan = FALSE, initial=c("optimal", "simple"), exponential=FALSE,
alpha=NULL, beta=NULL, gamma=NULL, phi=NULL, lambda=NULL, biasadj=FALSE, x=y, ...) {
initial <- match.arg(initial)
seasonal <- match.arg(seasonal)
m <- frequency(x)
if (m <= 1L) {
stop("The time series should have frequency greater than 1.")
}
if (length(y) < m + 3) {
stop(paste("I need at least", m + 3, "observations to estimate seasonality."))
}
if (initial == "optimal" | damped) {
if (seasonal == "additive" & exponential) {
stop("Forbidden model combination")
} else if (seasonal == "additive" & !exponential) {
fcast <- forecast(ets(x, "AAA", alpha = alpha, beta = beta, gamma = gamma, phi = phi, damped = damped, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...)
} else if (seasonal != "additive" & exponential) {
fcast <- forecast(ets(x, "MMM", alpha = alpha, beta = beta, gamma = gamma, phi = phi, damped = damped, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...)
} else { # if(seasonal!="additive" & !exponential)
fcast <- forecast(ets(x, "MAM", alpha = alpha, beta = beta, gamma = gamma, phi = phi, damped = damped, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...)
}
}
else {
fcast <- forecast(
HoltWintersZZ(x, alpha = alpha, beta = beta, gamma = gamma, phi = phi, seasonal = seasonal, exponential = exponential, lambda = lambda, biasadj = biasadj),
h, level = level, fan = fan, ...
)
}
if (seasonal == "additive") {
fcast$method <- "Holt-Winters' additive method"
} else {
fcast$method <- "Holt-Winters' multiplicative method"
}
if (exponential) {
fcast$method <- paste(fcast$method, "with exponential trend")
}
if (damped) {
fcast$method <- paste("Damped", fcast$method)
if (initial == "simple") {
warning("Damped methods require optimal initialization")
}
}
fcast$model$method <- fcast$method
fcast$model$call <- match.call()
fcast$series <- deparse(substitute(y))
return(fcast)
}
|
0156781d224488bb8f39e3d24f8fdf4d00c908a5
|
c85471f60e9d5c462de6c60c880d05898ec81411
|
/cache/Ryo-N7|soccer_ggplots|World Cup 2018__RMarkdown__worldcup_goal_plots.R
|
9972ffdcf274b6ffab4dce60673c5311708c1efe
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
a-rosenberg/github-content-scraper
|
2416d644ea58403beacba33349ee127e4eb42afe
|
ed3340610a20bb3bd569f5e19db56008365e7ffa
|
refs/heads/master
| 2020-09-06T08:34:58.186945
| 2019-11-15T05:14:37
| 2019-11-15T05:14:37
| 220,376,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,738
|
r
|
Ryo-N7|soccer_ggplots|World Cup 2018__RMarkdown__worldcup_goal_plots.R
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----echo=FALSE, fig.width=8, fig.height=5-------------------------------
library(ggsoccer)
library(ggplot2)
point_data <- data.frame(x = c( 100, 83, 100, 83, 88.5, 100, 83, 83, 100, 100),
y = c( 0, 21, 21, 38, 50, 50, 62, 80, 80, 100),
label = c("100, 0", "83, 21", "100, 21", "83, 38", "88.5, 50",
"100, 50", "83, 62", "83, 80", "100, 80", "100, 100"))
field <- ggplot(point_data) +
annotate_pitch() +
theme_pitch(aspect_ratio = NULL) +
coord_flip() +
geom_point(aes(x = x, y = y), size = 1.5) +
geom_text(aes(x= x, y = y,
label = label),
vjust = 1.5, color = "red")
ggsave(field, filename = "field.png", width = 8, height = 5)
## ----first goal, fig.height=6, fig.width=8-------------------------------
library(ggplot2)
library(dplyr)
library(ggsoccer)
library(ggimage)
library(extrafont)
# loadfonts()
# 2 1
pass_data <- data.frame(x = c( 84, 82, 94),
y = c( 6, 32, 35),
x2 = c(77, 84, 83),
y2 = c(13, 8 , 32.5))
# corner kick + golovin cross
curve_data <- data.frame(x = c(100, 76),
y = c(0, 19),
x2 = c(94, 94),
y2 = c(35, 60))
# Gazinsky header
ball_data <- data.frame(x = c(94),
y = c(60),
x2 = c(99.2),
y2 = c(47.5))
# soccer ball image
goal_img <- data.frame(x = 100,
y = 47) %>%
mutate(image = "https://d30y9cdsu7xlg0.cloudfront.net/png/43563-200.png")
# golovin + zhirkov movement
movement_data <- data.frame(x = c(83, 98),
y = c(24.25, 2),
x2 = c(77, 88),
y2 = c(21, 6))
saudi_data <- data.frame(
x = c(96.5),
y = c(35),
label = "M. Al-Breik"
)
g <- ggplot(pass_data) +
annotate_pitch() +
geom_segment(aes(x = x, y = y, xend = x2, yend = y2),
arrow = arrow(length = unit(0.25, "cm"),
type = "closed")) +
geom_segment(data = ball_data,
aes(x = x, y = y, xend = x2, yend = y2),
linetype = "dashed", size = 0.85,
color = "red") +
geom_segment(data = movement_data,
aes(x = x, y = y, xend = x2, yend = y2),
linetype = "dashed", size = 1.2,
color = "black") +
geom_curve(data = curve_data,
aes(x = x, y = y, xend = x2, yend = y2),
curvature = 0.25,
arrow = arrow(length = unit(0.25, "cm"),
type = "closed")) +
geom_image(data = goal_img,
aes(x = x, y = y,
image = image),
size = 0.035) +
theme_pitch() +
theme(text = element_text(family = "Dusha V5")) +
coord_flip(xlim = c(49, 101),
ylim = c(-1, 101)) +
ggtitle(label = "Russia (5) vs. (0) Saudi Arabia",
subtitle = "First goal, Yuri Gazinsky (12th Minute)") +
labs(caption = "By Ryo Nakagawara (@R_by_Ryo)") +
geom_label(aes(x = 94, y = 60,
label = "Gazinsky"),
hjust = -0.1, color = "red", family = "Dusha V5") +
geom_label(aes(x = 83, y = 23,
label = "Golovin"),
hjust = -0.05, color = "red", family = "Dusha V5") +
geom_label(aes(x = 75, y = 11,
label = "Golovin"),
hjust = -0.1, color = "red", family = "Dusha V5") +
geom_label(aes(x = 98, y = 0,
label = "Zhirkov"),
vjust = -0.3, color = "red", family = "Dusha V5") +
geom_label(aes(x = 84, y = 6,
label = "Zhirkov"),
vjust = -0.3, color = "red", family = "Dusha V5") +
geom_label(
data = saudi_data,
aes(x = x, y = y,
label = label),
color = "darkgreen", family = "Dusha V5") +
annotate("text", x = 69, y = 65, family = "Dusha V5",
label = "After a poor corner kick clearance\n from Saudi Arabia, Golovin picks up the loose ball, \n exchanges a give-and-go pass with Zhirkov\n before finding Gazinsky with a beautiful cross!")
ggsave(g, filename = "gazinsky_goal.png", height = 6, width = 8)
## ----complete gazinsky gganimate, fig.width=8, fig.height=6--------------
library(ggplot2)
library(dplyr)
library(ggsoccer)
library(ggimage)
library(extrafont)
library(gganimate)
# loadfonts()
# data
pass_data <- data.frame(
x = c(100, 94, 82, 82.5, 84, 76.5, 75.5, 94, 99.2), # pass balls
y = c(0, 35, 31, 22, 8, 13, 19, 60, 47.5),
time = c(1, 2, 3, 4, 5, 6, 7, 8, 9))
golovin_movement <- data.frame(
x = c(78, 80, 80, 80, 75.5, 74.5, 73.5, 73, 73), #75, 74, 73
y = c(30, 30, 27, 25, 10, 9, 15, 15, 15),
label = "Golovin",
time = c(1, 2, 3, 4, 5, 6, 7, 8, 9)
)
zhirkov_movement <- data.frame(
x = c(98, 90, 84, 84, 84, 84, 84, 84, 84),
y = c( 0, 2, 2, 2, 2, 2, 2, 2, 2),
label = "Zhirkov",
time = c(1, 2, 3, 4, 5, 6, 7, 8, 9)
)
gazinsky_movement <- data.frame(
x = c(92),
y = c(66.8),
label = "Gazinsky",
time = c(6, 7, 8, 9)
)
# segment golovin should only appear 4-5?
# segment zhirkov should only appear 1-3?
segment_data <- data.frame(
x = c(77.5, 98),
y = c(22, 2),
xend = c(75, 84),
yend = c(15, 3),
linetype = c("dashed", "dashed"),
color = c("black", "black"),
size = c(1.2, 1.25)
)
saudi_data <- data.frame(
x = c(95),
y = c(35),
label = "M. Al-Breik"
)
### soccer ball
ball_data <- tribble(
~x, ~y, ~time,
100, 0, 1,
94, 35, 2,
82, 31, 3,
82.5, 25, 4,
84, 6, 5,
77, 13, 6,
76, 19, 7,
94, 60, 8,
99.2, 47.5, 9,
)
gazin_ani <- ggplot(pass_data) +
annotate_pitch() +
theme_pitch() +
coord_flip(xlim = c(49, 101),
ylim = c(-1, 101)) +
geom_segment(data = segment_data,
aes(x = x, y = y,
xend = xend, yend = yend),
size = segment_data$size,
color = segment_data$color,
linetype = c("dashed", "dashed")) +
geom_label(
data = saudi_data,
aes(x = x, y = y,
label = label),
color = "darkgreen") +
geom_label(data = zhirkov_movement,
aes(x = x, y = y,
frame = time,
label = label),
color = "red") +
geom_label(data = golovin_movement,
aes(x = x, y = y,
frame = time,
label = label),
color = "red") +
geom_label(
data = gazinsky_movement,
aes(x = x, y = y,
label = label),
color = "red") +
ggimage::geom_emoji(
data = ball_data,
aes(x = x, y = y, frame = time),
image = "26bd", size = 0.035) +
ggtitle(label = "Russia (5) vs. (0) Saudi Arabia",
subtitle = "First goal, Yuri Gazinsky (12th Minute)") +
labs(caption = "By Ryo Nakagawara (@R_by_Ryo)") +
annotate("text", x = 69, y = 65, family = "Dusha V5",
label = "After a poor corner kick clearance\n from Saudi Arabia, Golovin picks up the loose ball, \n exchanges a give-and-go pass with Zhirkov\n before finding Gazinsky with a beautiful cross!") +
theme(text = element_text(family = "Dusha V5"))
gganimate(gazin_ani,
width = 8, height = 6,
title_frame = FALSE,
"gazin_ggani_final.gif")
## ----complete gazinskiy tweenr, fig.width=8, fig.height=6----------------
library(ggplot2)
library(dplyr)
library(ggsoccer)
library(ggimage)
library(extrafont)
library(gganimate)
library(tweenr)
library(purrr)
# loadfonts()
# data
pass_data <- data.frame(
x = c(100, 94, 82, 82.5, 84, 76.5, 75.5, 94, 99.2), # pass balls
y = c(0, 35, 31, 22, 8, 13, 19, 60, 47.5),
time = c(1, 2, 3, 4, 5, 6, 7, 8, 9))
golovin_movement <- data.frame(
x = c(78, 80, 80, 80, 75.5, 74.5, 73.5, 73, 73), #75, 74, 73
y = c(30, 30, 27, 25, 10, 9, 15, 15, 15),
label = "Golovin",
time = c(1, 2, 3, 4, 5, 6, 7, 8, 9)
)
zhirkov_movement <- data.frame(
x = c(98, 90, 84, 84, 84, 84, 84, 84, 84),
y = c( 0, 2, 2, 2, 2, 2, 2, 2, 2),
label = "Zhirkov",
time = c(1, 2, 3, 4, 5, 6, 7, 8, 9)
)
gazinsky_movement <- data.frame(
x = c(92),
y = c(66.8),
label = "Gazinsky",
time = c(6, 7, 8, 9)
)
# saudi defender
saudi_data <- data.frame(
x = c(95),
y = c(35),
label = "M. Al-Breik"
)
### soccer ball
ball_data <- tribble(
~x, ~y, ~time,
100, 0, 1,
94, 35, 2,
82, 31, 3,
82.5, 25, 4,
84, 6, 5,
77, 13, 6,
76, 19, 7,
94, 60, 8,
99.2, 47.5, 9,
)
### ball movement
b_list <- ball_data %>% pmap(data.frame)
ball_tween <- b_list %>%
tween_states(tweenlength = 0.5, statelength = 0.00000001, ease = "linear", nframes = 75)
### Golovin
golovin_movement_list <- golovin_movement %>% pmap(data.frame)
golovin_tween <- golovin_movement_list %>%
tween_states(tweenlength = 0.5, statelength = 0.00000001, ease = "linear", nframes = 75)
golovin_tween <- golovin_tween %>% mutate(label = "Golovin")
### Zhirkov
zhirkov_movement_list <- zhirkov_movement %>% pmap(data.frame)
zhirkov_tween <- zhirkov_movement_list %>%
tween_states(tweenlength = 0.5, statelength = 0.00000001, ease = "linear", nframes = 75)
zhirkov_tween <- zhirkov_tween %>% mutate(label = "Zhirkov")
### PLOT
gazin_move <- ggplot(pass_data) +
annotate_pitch() +
theme_pitch() +
coord_flip(xlim = c(49, 101),
ylim = c(-1, 101)) +
geom_label(
data = saudi_data,
aes(x = x, y = y,
label = label),
color = "darkgreen") +
geom_label(data = zhirkov_tween,
aes(x = x, y = y,
frame = .frame,
label = label),
color = "red") +
geom_label(data = golovin_tween,
aes(x = x, y = y,
frame = .frame,
label = label),
color = "red") +
geom_label(
data = gazinsky_movement,
aes(x = x, y = y,
label = label),
color = "red") +
ggimage::geom_emoji(
data = ball_tween,
aes(x = x, y = y, frame = .frame),
image = "26bd", size = 0.035) +
ggtitle(label = "Russia (5) vs. (0) Saudi Arabia",
subtitle = "First goal, Yuri Gazinsky (12th Minute)") +
labs(caption = "By Ryo Nakagawara (@R_by_Ryo)") +
annotate("text", x = 69, y = 65, family = "Dusha V5",
label = "After a poor corner kick clearance\n from Saudi Arabia, Golovin picks up the loose ball, \n exchanges a give-and-go pass with Zhirkov\n before finding Gazinsky with a beautiful cross!") +
theme(text = element_text(family = "Dusha V5"))
gganimate(gazin_move,
width = 8, height = 6,
title_frame = FALSE, interval = 0.25,
"gazinsky_goal_final.gif")
gganimate(gazin_move,
width = 8, height = 6,
title_frame = FALSE, interval = 0.25,
"gazin_move.mp4")
## ----cristiano hat trick, fig.height=5, fig.width=7----------------------
library(ggplot2)
library(ggsoccer)
library(extrafont)
library(emoGG)
library(ggimage)
# loadfonts()
# Official WC 2018 Font: "Dusha"
# http://fifa2018wiki.com/fifa-2018-font-typeface-download-dusha-font-ttf/509/
emoji_search("soccer") # "26bd"
goals_data <- data.frame(x = c(88, 80, 71),
y = c(50, 48, 54),
label = c(1, 2, 3))
curve_data <- data.frame(x = c(88, 71), y = c(50, 54),
xend = c(100, 100), yend = c(54, 54))
annotation_data <- data.frame(
hjust = c(0.5, 0.5, 0.5, 0, 0, 0),
label = c("Portugal (3) vs. Spain (3)",
"Cristiano's Hattrick (4', 44', 88')",
"by Ryo Nakagawara (@R_by_Ryo)",
"1. Fouled by Nacho in the box,\nCristiano confidently strokes the ball\ninto the right corner from the spot.",
"2. Guedes lays it off to Cristiano whose\nstrong shot is uncharacteristically\nfumbled by De Gea into the net.",
"In the final minutes of the game,\nCristiano wins a freekick against Pique\nand curls it beautifully over the wall."),
x = c(110, 105, 53, 76, 66, 66),
y = c(30, 20, 85, 5, 5, 55)
)
flag_data <- data.frame(
image = c("PT", "ES"),
x = c(110, 110),
y = c(19.1, 51.1)
)
# PLOT
cr <- ggplot(goals_data) +
annotate_pitch() +
theme_pitch() +
theme(text = element_text(family = "Dusha V5"),
legend.position = "none") +
coord_flip(xlim = c(55, 112),
ylim = c(-1, 101)) +
geom_segment(x = 80, y = 48,
xend = 97, yend = 48) + # 2nd
geom_segment(x = 97, y = 48,
xend = 100, yend = 45.5,
arrow = arrow(length = unit(0.25, "cm"),
type = "closed")) + # degea fumble
geom_curve(data = curve_data,
aes(x = x, y = y,
xend = xend, yend = yend), # FREEKICK
curvature = 0.3,
arrow = arrow(length = unit(0.25, "cm"), type = "closed")) +
geom_text(data = annotation_data,
family = "Dusha V5",
aes(x = x, y = y,
hjust = hjust, label = label),
size = c(6.5, 4.5, 3, 3.5, 3.5, 3.5)) +
geom_flag(data = flag_data,
aes(x = x, y = y,
image = image), size = c(0.08, 0.08)) + # Portugal + Spain Flag
ggimage::geom_emoji(aes(x = 105,
y = c(45, 50, 55)),
image = "26bd", size = 0.035) +
geom_point(aes(x = x, y = y),
shape = 21, size = 7, color = "black", fill = "white") +
geom_text(aes(x = x, y = y, label = label, family = "Dusha V5"))
ggsave(cr, filename = "cr_hattrick.png", height = 5, width = 7)
## ----osako winner, fig.height = 5, fig.width = 7-------------------------
library(ggplot2)
library(dplyr)
library(ggsoccer)
library(extrafont)
library(ggimage)
cornerkick_data <- data.frame(x = 99, y = 0.3,
x2 = 94, y2 = 47)
osako_gol <- data.frame(x = 94, y = 49,
x2 = 100, y2 = 55.5)
player_label <- data.frame(x = c(92, 99),
y = c(49, 2))
wc_logo <- data.frame(x = 107,
y = 85) %>%
mutate(image = "https://upload.wikimedia.org/wikipedia/en/thumb/6/67/2018_FIFA_World_Cup.svg/1200px-2018_FIFA_World_Cup.svg.png")
g <- ggplot(osako_gol) +
annotate_pitch() +
theme_pitch() +
theme(text = element_text(family = "Dusha V5")) +
coord_flip(xlim = c(55, 112),
ylim = c(-1, 101)) +
geom_curve(data = cornerkick_data,
aes(x = x, y = y, xend = x2, yend = y2),
curvature = -0.15,
arrow = arrow(length = unit(0.25, "cm"),
type = "closed")) +
geom_segment(aes(x = x, y = y, xend = x2, yend = y2),
arrow = arrow(length = unit(0.25, "cm"),
type = "closed")) +
geom_label(data = player_label,
aes(x = x, y = y),
label = c("Osako", "Honda"), family = "Dusha V5") +
geom_point(aes(x = 98, y = 50), size = 3, color = "green") +
geom_text(aes(x = 99.7, y = 50), size = 5, label = "???", family = "Dusha V5") +
annotate(geom = "text", family = "Dusha V5",
hjust = c(0.5, 0.5, 0.5, 0.5, 0.5),
size = c(6.5, 4.5, 4, 3.5, 3),
label = c("Japan (2) vs. Colombia (1)",
"Kagawa (PEN 6'), Quintero (39'), Osako (73')",
"Japan press their man advantage, substitute Honda\ndelivers a delicious corner kick for Osako to (somehow) tower over\nColombia's defense and flick a header into the far corner!",
"Bonus: Ospina looking confused and\ndoing a lil' two-step-or-god-knows-what.",
"by Ryo Nakagawara (@R_by_Ryo)"),
x = c(110, 105, 70, 92, 53),
y = c(30, 30, 45, 81, 85)) +
ggimage::geom_flag(aes(image = "JP"), # Japan Flag
x = 110, y = 13, size = 0.08) +
ggimage::geom_flag(aes(image = "CO"), # Colombia Flag
x = 110, y = 53, size = 0.08) +
ggimage::geom_emoji(aes(x = 95,
y = 50),
image = "26bd", size = 0.035) +
geom_image(data = wc_logo,
aes(x = x, y = y,
image = image), size = 0.17) +
theme(plot.margin=grid::unit(c(0,0,0,0), "mm"))
ggsave(g, filename = "osako_winner.png", height = 5, width = 7)
## ----osako anim----------------------------------------------------------
library(ggplot2)
library(dplyr)
library(ggsoccer)
library(extrafont)
library(emoGG)
library(ggimage)
library(gganimate)
ball_data <- data.frame(x = c(99, 94, 100),
y = c(0.3, 47, 55.5),
time = c(1, 2, 3))
player_label <- data.frame(x = c(92, 97),
y = c(48, 0))
wc_logo <- data.frame(x = 107,
y = 85) %>%
mutate(image = "https://upload.wikimedia.org/wikipedia/en/thumb/6/67/2018_FIFA_World_Cup.svg/1200px-2018_FIFA_World_Cup.svg.png")
g <- ggplot(ball_data) +
annotate_pitch() +
theme_pitch() +
theme(text = element_text(family = "Dusha V5")) +
coord_flip(xlim = c(55, 112),
ylim = c(-1, 101)) +
geom_label(data = player_label,
aes(x = x, y = y),
label = c("Osako", "Honda"), family = "Dusha V5") +
geom_point(aes(x = 98, y = 50), size = 3, color = "green") +
geom_text(aes(x = 99.7, y = 50), size = 5, label = "???", family = "Dusha V5") +
annotate(geom = "text", family = "Dusha V5",
hjust = c(0.5, 0.5, 0.5, 0.5, 0.5),
size = c(6.5, 4.5, 4, 3.5, 3),
label = c("Japan (2) vs. Colombia (1)",
"Kagawa (PEN 6'), Quintero (39'), Osako (73')",
"Japan press their man advantage, substitute Honda\ndelivers a delicious corner kick for Osako to (somehow) tower over\nColombia's defense and flick a header into the far corner!",
"Bonus: Ospina looking confused and\ndoing a lil' two-step-or-god-knows-what.",
"by Ryo Nakagawara (@R_by_Ryo)"),
x = c(110, 105, 70, 92, 53),
y = c(30, 30, 45, 81, 85)) +
ggimage::geom_flag(aes(image = "JP"), # Japan Flag
x = 110, y = 13, size = 0.08) +
ggimage::geom_flag(aes(image = "CO"), # Colombia Flag
x = 110, y = 53, size = 0.08) +
ggimage::geom_emoji(aes(x = x,
y = y,
frame = time),
image = "26bd", size = 0.035) +
geom_image(data = wc_logo,
aes(x = x, y = y,
image = image), size = 0.17) +
theme(plot.margin=grid::unit(c(0,0,0,0), "mm"))
g
gganimate(g, "osako_ani.gif")
## ----osako tween, fig.height = 5, fig.width = 7--------------------------
# TWEEN
library(ggplot2)
library(dplyr)
library(ggsoccer)
library(extrafont)
library(emoGG)
library(ggimage)
library(gganimate)
library(purrr)
library(tweenr)
player_label <- data.frame(x = c(92, 97),
y = c(48, 0))
wc_logo <- data.frame(x = 107,
y = 85) %>%
mutate(image = "https://upload.wikimedia.org/wikipedia/en/thumb/6/67/2018_FIFA_World_Cup.svg/1200px-2018_FIFA_World_Cup.svg.png")
# tweenr the ball movement data
ball_data <- data.frame(x = c(99, 94, 100),
y = c(0.3, 47, 55.5))
ball_list <- ball_data %>% pmap(data.frame)
osako_tween <- ball_list %>%
tween_states(tweenlength = 1.5, statelength = 0.01, ease = "quadratic-out", nframes = 50)
g2 <- ggplot(osako_tween) +
annotate_pitch() +
theme_pitch() +
theme(text = element_text(family = "Dusha V5")) +
coord_flip(xlim = c(55, 112),
ylim = c(-1, 101)) +
geom_label(data = player_label,
aes(x = x, y = y),
label = c("Osako", "Honda"), family = "Dusha V5") +
geom_point(aes(x = 98, y = 50), size = 3, color = "green") +
annotate(geom = "text", family = "Dusha V5",
hjust = c(0.5, 0.5, 0.5, 0.5),
size = c(6.5, 4.5, 5, 3),
label = c("Japan (2) vs. Colombia (1)",
"Kagawa (PEN 6'), Quintero (39'), Osako (73')",
"Japan press their man advantage, substitute Honda\ndelivers a delicious corner kick for Osako to (somehow) tower over\nColombia's defense and flick a header into the far corner!",
"by Ryo Nakagawara (@R_by_Ryo)"),
x = c(110, 105, 70, 53),
y = c(30, 30, 47, 85)) +
ggimage::geom_emoji(aes(x = x,
y = y,
frame = .frame),
image = "26bd", size = 0.035) +
ggimage::geom_flag(aes(image = "JP"), # Japan Flag
x = 110, y = 13, size = 0.08) +
ggimage::geom_flag(aes(image = "CO"), # Colombia Flag
x = 110, y = 53, size = 0.08) +
geom_image(data = wc_logo,
aes(x = x, y = y,
image = image), size = 0.17) +
theme(plot.margin=grid::unit(c(0,0,0,0), "mm"))
g2
gganimate(g2,
ani.width = 800, ani.height = 500,
interval = 0.5,
"osako_tween.gif")
gganimate(g2, title_frame = FALSE,
width = 700, height = 500,
interval = 0.01,
"osako_tween_final.gif")
## ----offside data, fig.height=6, fig.width=4-----------------------------
library(ggplot2)
library(dplyr)
library(ggsoccer)
library(ggimage)
library(extrafont)
library(gganimate)
library(tweenr)
library(purrr)
library(countrycode)
# library(StatsBombR)
# loadfonts()
# flags
flag_data <- data.frame(
x = c( 48, 87),
y = c(107, 107),
team = c("japan", "senegal")
) %>%
mutate(
image = team %>%
countrycode(., origin = "country.name", destination = "iso2c")
) %>%
select(-team)
# PLAYERS
# JAPAN: x, y (blue) Senegal: x2, y2 (lightgreen)
# push 2, 3 frames up above box >>> add 5, 6 as frames??
trap_data <- data.frame(
time = c(1, 2, 3, 4, 5),
# ball trajectory
x = c(70, 70, 70, 87, 95), # pass balls
y = c(85, 85, 85, 52, 33),
# offside bar
#xo = c(83, 81.2, 79, 77.5, 70),
xoend = c(83.8, 81.8, 79, 78.5, 71),
yo = c( 5, 5, 5, 5, 5),
yoend = c(95, 95, 95, 95, 95),
# players: japan
jx = c(83, 81, 77, 75, 70),
jy = c(rep(65, 5)),
jx2 = c(83, 81.8, 78.5, 77, 70),
jy2 = c(rep(60.5, 5)),
jx3 = c(83, 81, 76.5, 75, 71),
jy3 = c(rep(55, 5)),
jx4 = c(83, 81.2, 76.3, 75, 70),
jy4 = c(rep(52, 5)),
jx5 = c(82.8, 81, 77, 74, 70),
jy5 = c(rep(49, 5)),
jx6 = c(83, 81.8, 77, 74, 70),
jy6 = c(rep(45, 5)),
jx7 = c(83.8, 81, 79, 77.5, 70),
jy7 = c(rep(40, 5)),
# players: senegal
sx = c(83, 84, 84, 84, 84),
sy = c(rep(33, 5)),
sx2 = c(83, 85, 87, 92, 95),
sy2 = c(38, 37, 35, 34, 33),
sx3 = c(83, 84, 84, 83, 83),
sy3 = c(rep(41, 5)),
sx4 = c(83, 84, 83, 78, 78),
sy4 = c(rep(45, 5)),
sx5 = c(83, 84, 87, 88, 89),
sy5 = c(rep(52, 5)),
sx6 = c(83, 85, 84, 84, 83),
sy6 = c(rep(69, 5))
)
# fix focus field issue with coord_fixed() + aspect_ratio = NULL in theme_pitch()
g <- ggplot(trap_data) +
annotate_pitch() +
theme_pitch(aspect_ratio = NULL) +
coord_fixed(xlim = c(30, 101),
ylim = c(-5, 131)) +
# offside line
geom_segment(aes(x = xoend, y = yo,
xend = xoend, yend = yoend,
frame = time),
color = "black", size = 1.3) +
# start at 83 just use geom_segment instead
# japan
geom_point(aes(x = jx, y = jy, frame = time), size = 4, color = "blue") +
geom_point(aes(x = jx2, y = jy2, frame = time), size = 4, color = "blue") +
geom_point(aes(x = jx3, y = jy3, frame = time), size = 4, color = "blue") +
geom_point(aes(x = jx4, y = jy4, frame = time), size = 4, color = "blue") +
geom_point(aes(x = jx5, y = jy5, frame = time), size = 4, color = "blue") +
geom_point(aes(x = jx6, y = jy6, frame = time), size = 4, color = "blue") +
geom_point(aes(x = jx7, y = jy7, frame = time), size = 4, color = "blue") +
# senegal
geom_point(aes(x = sx, y = sy, frame = time), size = 4, color = "green") +
geom_point(aes(x = sx2, y = sy2, frame = time), size = 4, color = "green") +
geom_point(aes(x = sx3, y = sy3, frame = time), size = 4, color = "green") +
geom_point(aes(x = sx4, y = sy4, frame = time), size = 4, color = "green") +
geom_point(aes(x = sx5, y = sy5, frame = time), size = 4, color = "green") +
geom_point(aes(x = sx6, y = sy6, frame = time), size = 4, color = "green") +
# free kick spot (reference)
geom_point(aes(x = 70, y = 85), color = "blue", size = 1.2) +
annotate(geom = "text", family = "Dusha V5",
hjust = c(0, 0, 0, 0.5),
size = c(4.5, 3, 5.5, 3),
label = c("Japan (2) vs. Senegal (2)",
"Mane (11'), Inui (33'), Wague (71'), Honda (78')",
"The Perfect Offside Trap",
"by Ryo Nakagawara\n(@R_by_Ryo)"),
x = c(30, 30, 30, 94),
y = c(117, 108, 125, -3)) +
ggimage::geom_flag(data = flag_data,
aes(x = x, y = y,
image = image),
size = c(0.08, 0.08)) +
ggimage::geom_emoji(aes(x = x, y = y,
frame = time),
image = "26bd", size = 0.035)
g
## ----offside gganimate, fig.height=6, fig.width=4------------------------
# vline for offside line
# x1 Ja, x2 Sen
# sligh twiggle before kick?
# goalkeeper position
gganimate(g, "g_ani.gif")
## ----tweenr offside final, fig.height=10, fig.width=8--------------------
library(ggplot2)
library(dplyr)
library(ggsoccer)
library(ggimage)
library(extrafont)
library(gganimate)
library(tweenr)
library(purrr)
library(countrycode)
# library(StatsBombR)
# loadfonts()
# PLAYERS
# JAPAN: x, y (blue) Senegal: x2, y2 (lightgreen)
trap_data <- data.frame(
time = c(1, 2, 3, 4, 5),
# ball trajectory
x = c(70, 70, 70, 87, 95), # pass balls
y = c(85, 85, 85, 52, 33),
# offside bar
#xo = c(83, 81.2, 79, 77.5, 70),
xoend = c(83.8, 81.8, 79, 78.5, 71),
yo = c( 5, 5, 5, 5, 5),
yoend = c(95, 95, 95, 95, 95),
# players: japan
jx = c(83, 81, 77, 75, 70),
jy = c(rep(65, 5)),
jx2 = c(83, 81.8, 78.5, 77, 70),
jy2 = c(rep(60.5, 5)),
jx3 = c(83, 81, 76.5, 75, 71),
jy3 = c(rep(55, 5)),
jx4 = c(83, 81.2, 76.3, 75, 70),
jy4 = c(rep(52, 5)),
jx5 = c(82.8, 81, 77, 74, 70),
jy5 = c(rep(49, 5)),
jx6 = c(83, 81.8, 77, 74, 70),
jy6 = c(rep(45, 5)),
jx7 = c(83.8, 81, 79, 77.5, 70),
jy7 = c(rep(40, 5)),
# players: senegal
sx = c(83, 84, 84, 84, 84),
sy = c(rep(33, 5)),
sx2 = c(83, 85, 87, 92, 95),
sy2 = c(38, 37, 35, 34, 33),
sx3 = c(83, 84, 84, 83, 83),
sy3 = c(rep(41, 5)),
sx4 = c(83, 84, 83, 78, 78),
sy4 = c(rep(45, 5)),
sx5 = c(83, 84, 87, 88, 89),
sy5 = c(rep(52, 5)),
sx6 = c(83, 85, 84, 84, 83),
sy6 = c(rep(69, 5))
)
# flags
flag_data <- data.frame(
x = c( 42, 72),
y = c(107, 107),
team = c("japan", "senegal")
) %>%
mutate(
image = team %>%
countrycode(., origin = "country.name", destination = "iso2c")
) %>%
select(-team)
# extra players:
goalkeeper_data <- data.frame(
x = c(98),
y = c(50)
)
senegal_data <- data.frame(
x = c(55, 55, 68.5),
y = c(50, 60, 87)
)
# create list of dfs
offside_list <- trap_data %>% pmap(data.frame)
# tweenr
offside_tween <- offside_list %>%
tween_states(tweenlength = 0.5, statelength = 0.00000001, ease = "linear", nframes = 50)
# PLOT
g2 <- ggplot(offside_tween) +
annotate_pitch() +
theme_pitch(aspect_ratio = NULL) +
coord_fixed(xlim = c(30, 101),
ylim = c(-5, 117)) +
# offside line
geom_segment(aes(x = xoend, y = yo,
xend = xoend, yend = yoend,
frame = .frame),
color = "black", size = 1.3) +
# start at 83 just use geom_segment instead
# japan
geom_point(aes(x = jx, y = jy, frame = .frame), size = 4, color = "blue") +
geom_point(aes(x = jx2, y = jy2, frame = .frame), size = 4, color = "blue") +
geom_point(aes(x = jx3, y = jy3, frame = .frame), size = 4, color = "blue") +
geom_point(aes(x = jx4, y = jy4, frame = .frame), size = 4, color = "blue") +
geom_point(aes(x = jx5, y = jy5, frame = .frame), size = 4, color = "blue") +
geom_point(aes(x = jx6, y = jy6, frame = .frame), size = 4, color = "blue") +
geom_point(aes(x = jx7, y = jy7, frame = .frame), size = 4, color = "blue") +
# senegal
geom_point(aes(x = sx, y = sy, frame = .frame), size = 4, color = "green") +
geom_point(aes(x = sx2, y = sy2, frame = .frame), size = 4, color = "green") +
geom_point(aes(x = sx3, y = sy3, frame = .frame), size = 4, color = "green") +
geom_point(aes(x = sx4, y = sy4, frame = .frame), size = 4, color = "green") +
geom_point(aes(x = sx5, y = sy5, frame = .frame), size = 4, color = "green") +
geom_point(aes(x = sx6, y = sy6, frame = .frame), size = 4, color = "green") +
# free kick spot (reference)
geom_point(aes(x = 70, y = 85), color = "black", size = 1.2) +
# goalkeeper
geom_point(data = goalkeeper_data,
aes(x = x, y = y), size = 4, color = "blue") +
# senegal defenders
geom_point(data = senegal_data,
aes(x = x, y = y), size = 4, color = "green") +
annotate(
geom = "text", family = "Dusha V5",
hjust = c(0, 0, 0.5),
size = c(6, 6.5, 3),
label = c("Japan (2) vs. Senegal (2)",
"The Perfect Offside Trap",
"by Ryo Nakagawara\n(@R_by_Ryo)"),
x = c(30, 30, 94),
y = c(107, 115, -3)) +
ggimage::geom_flag(data = flag_data,
aes(x = c(48, 90), y = c(107, 107),
image = image),
size = c(0.07, 0.07)) +
ggimage::geom_emoji(aes(x = x, y = y,
frame = .frame),
image = "26bd", size = 0.035)
gganimate(g2,
interval = 0.001, height = 10, width = 8,
"offside_final.gif",
title_frame = FALSE)
## ----meme----------------------------------------------------------------
library(memery)
img <- ("https://imgflip.com/s/meme/Roll-Safe-Think-About-It.jpg")
meme_labs <- c("you can't lose the aerial battle", "if you set an offside trap")
meme(img, meme_labs, "offside_meme.png")
|
65502148b0572b1f1f82f530b9be13688f96d352
|
1443e812411278d1f776f8f7d1196add8e2dcc31
|
/man/readCellRanger.Rd
|
e939845249e4de766fb27376ba480a9805ce3b12
|
[
"MIT"
] |
permissive
|
WeiSong-bio/roryk-bcbioSinglecell
|
e96f5ab1cb99cf1c59efd728a394aaea104d82b2
|
2b090f2300799d17fafe086bd03a943d612c809f
|
refs/heads/master
| 2020-06-15T23:38:23.802177
| 2018-07-03T21:01:07
| 2018-07-03T21:01:07
| 195,422,697
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,300
|
rd
|
readCellRanger.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readCellRanger.R
\name{readCellRanger}
\alias{readCellRanger}
\title{Read 10X Genomics Cell Ranger Data}
\usage{
readCellRanger(uploadDir, sampleMetadataFile = NULL, refdataDir = NULL,
interestingGroups = "sampleName", transgeneNames = NULL,
spikeNames = NULL, ...)
}
\arguments{
\item{uploadDir}{Path to Cell Ranger output directory. This directory path
must contain \code{filtered_gene_bc_matrices*} as a child directory.}
\item{sampleMetadataFile}{Sample barcode metadata file. Optional for runs
with demultiplixed index barcodes (e.g. SureCell), but otherwise required
for runs with multipliexed FASTQs containing multiple index barcodes (e.g.
inDrop).}
\item{refdataDir}{Directory path to Cell Ranger reference annotation data.}
\item{interestingGroups}{Character vector of interesting groups. Must be
formatted in camel case and intersect with \code{\link[=sampleData]{sampleData()}} colnames.}
\item{transgeneNames}{\code{character} vector indicating which \code{\link[=assay]{assay()}} rows
denote transgenes (e.g. EGFP, TDTOMATO).}
\item{spikeNames}{\code{character} vector indicating which \code{\link[=assay]{assay()}} rows denote
spike-in sequences (e.g. ERCCs).}
\item{...}{Additional arguments, to be stashed in the \code{\link[=metadata]{metadata()}} slot.}
}
\value{
\code{SingleCellExperiment}.
}
\description{
Read \href{https://www.10xgenomics.com/software/}{10x Genomics Chromium} cell
counts from \code{barcodes.tsv}, \code{genes.tsv}, and \code{matrix.mtx} files.
}
\section{Directory structure}{
Cell Ranger can vary in its output directory structure, but we're requiring a
single, consistent data structure for datasets containing multiple samples.
Note that Cell Ranger data may not always contain per sample subdirectories,
or the "\code{outs}" subdirectory. We may make this more flexible in the future,
but for now we're making this strict to ensure reproducibility.
\preformatted{
file.path(
"<uploadDir>",
"<sampleName>",
"outs",
"filtered_gene_bc_matrices*",
"outs",
"<genomeBuild>",
"matrix.mtx"
)
}
}
\section{Sample metadata}{
A user-supplied sample metadata file defined by \code{sampleMetadataFile} is
required for multiplexed datasets. Otherwise this can be left \code{NULL}, and
minimal sample data will be used, based on the directory names.
}
\section{Reference data}{
We strongly recommend supplying the corresponding reference data required for
Cell Ranger with the \code{refdataDir} argument. When set, the function will
detect the \code{organism}, \code{ensemblRelease}, and \code{genomeBuild} automatically,
based on the 10X \code{refdataDir} YAML metadata. Additionally, it will convert
the gene annotations defined in the GTF file into a \code{GRanges} object, which
get slotted in \code{\link[=rowRanges]{rowRanges()}}. Otherwise, the function will attempt to use the
most current annotations available from Ensembl, and some gene IDs may not
match, due to deprecation in the current Ensembl release.
}
\examples{
uploadDir <- system.file("extdata/cellranger", package = "bcbioSingleCell")
x <- readCellRanger(uploadDir)
show(x)
}
\seealso{
Other Read Functions: \code{\link{readCellTypeMarkers}}
}
\author{
Michael Steinbaugh
}
|
df4f40750b4521727e550bdb217b865f0584ad2f
|
7f873c96ced1c3b57c378102febbdcda3bc8e0c4
|
/data_analysis/computer_classes/data_visualisation.R
|
e6a6f03d68bb790bade56d6e537fa3fbb6b5fe69
|
[] |
no_license
|
IanMadlenya/teaching
|
ec71610301c2f23f4e38b0e73c7f052edd71c984
|
6ca5a296b07b5b42e80ff56070edd4d72a561a8f
|
refs/heads/master
| 2020-03-14T03:32:16.376964
| 2018-04-25T14:09:56
| 2018-04-25T14:09:56
| 131,422,094
| 1
| 0
| null | 2018-04-28T15:38:38
| 2018-04-28T15:38:37
| null |
UTF-8
|
R
| false
| false
| 8,411
|
r
|
data_visualisation.R
|
#------------------------------------------------------------------------------
# Tutorial 2: Data visualisation
#------------------------------------------------------------------------------
setwd("~/Dropbox/ucd/data_analysis/") # Set working directory
par(las=1,bty="n") # Plot settings
# In this tutorial we are going to have a look at a number of different
# probability distributions.
# We are going to look at the characteristics of these distributions and
# see how weel these fit observed data.
# However, we first start with generating random data before we move on
# to real life data.
#------------------------------------------------------------------------------
#### 1) Generating probability distributions ####
# We start with generating some random data.
# The normal distribution is often a good vantage point:
set.seed(42);x<-rnorm(1000) # NB - 'set.seed' used for replicable results
# When working with data, one of the first things you do is examining the
# properties by looking and the descriptive statistics and plotting the data.
# This can often reveal relevant information such as the presence of missing
# values or whether the data is skewed.
# Important aspect to be considered when further processing the data and using
# it for more formal statistical analysis.
# For our randomly distributed data,
# let's have a look at the summary statistics:
summary(x)
sd(x)
# Plot the data in a histogram, adding the mean as a vertical line
hist(x) # You can use 'prob=TRUE' to get probabilities rather than frequencies.
abline(v=mean(x),col="red")
## Q1: Generate the following distributions, all with size 1000:
# Normal distribution with mean 2 and sd 1;
# Uniform distribution with min 0 and max 10;
# Poisson distribution with mean 2;
# Gamma distribution with mean 2.
# See '?Distributions' for help.
# Plot the results using the code given below.
par(mfrow=c(2,2)) # 2x2 plot
hist(Normal);hist(Uniform);hist(Poisson);hist(Gamma) # Note use of ';'
dev.off() # To remove plot and reset settings
## Q2; For each distribution, think of real life example where the data would
# fit that distribution.
# A recent study has shown that the Dutch men are the tallest in the world.
# http://www.bbc.com/news/science-environment-36888541
# The average height of the Dutch man is 182.5 cm with a standard deviation of
# 6 cm.
# Generate the height distribution for a population the size of 's-Hertogenbosch
# (150,000) and plot the data in a histogram indicating where the mean is.
## Q3: How many man will be at least 188.5 cm tall?
## Q4: The average Dutch woman has a height of 169 cm with a standard deviation
# of 2 cm.
# On a population of 8.5 million Dutch women, how many will be at least as tall
# as the average Dutch man?
# Hint: you have to use the 'pnorm' function here, check '?pnorm'.
# You can take the square root using 'sqrt'.
#------------------------------------------------------------------------------
#******************#
#### Tukey Time ####
#******************#
# John Tukey (1915-2000) was an American mathematician who made a number of
# important contributions to the field of statistics.
# For instance, he is responsible for laying out the foundations of
# exploratory data analysis, a central theme in this part of the course.
# One of his most famous contributions was the boxplot.
# The boxplot is a method to graphically display the distribution of the data,
# specifically focusing on the quartiles.
# Let's use the boxplot to visually summarise the height data you just generated.
boxplot(x,horizontal=TRUE) # Can change orientation using 'horizontal=TRUE'
# In a boxplot, the box shows the range of the data between the first and third
# quartile, where the thick black line indicates the median.
# The whiskers in this case extend to 1.5 of the Inter Quartile Range (Q3-Q1)
# of the lower or upper quartile.
# The dots outside of the whiskers represent the outliers.
# To get a summary we can get Tukey's Five Numbers, which will give the
# minimum, lower-hinge, median, upper-hing, and maximum.
fivenum(x)
# The boxplot is useful to compare data across groups.
# Let's look at the average height of women again aross groups of different
# size.
## Q5: What indication of sample size does the boxplot give?
index<-c(rep(1,30),rep(2,200),rep(3,1500),rep(4,20000)) # Generate groups
data<-c(rnorm(30,169,2),rnorm(200,169,2),
rnorm(1500,169,2),rnorm(20000,169,2)) # Generate data
boxplot(data~index)
#------------------------------------------------------------------------------
#### 2) The size of whales ####
# From Dutch women we move on to the size of whales.
# You should have downloaded a csv-file called 'whales.csv'.
# The data is taken from a paper called "How Large Should Whales Be?", which
# of course is an important scientific question.
# http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0053967
# The data consists of estimates of whale size for different species.
# Load said file using the following command:
whales<-read.csv("whales.csv",header=TRUE,sep=",",stringsAsFactors=FALSE)
# Let's check the data we just loaded
str(whales)
head(whales)
# A useful command in R is 'unique'.
# Let's find out the number of different groups and families.
length(unique(whales$group))
length(unique(whales$family))
## Q6: Examine the distribution of the size of whales.
# Do you think that the average size is a good measure of centrality?
# Explain why or why not.
# We can use the boxplot to summarise the size distribution per group.
# Due to the length of the names we first need to adjust some of the plot
# settings
par(mar=c(4,10,2,2),las=1)# 'mar' sets graph margins (bottom,right,top,left).
boxplot(mass_kg~family,whales,horizontal=TRUE)
# Figure shows that there is quite some variation.
# The large scale differences obscure details for some families.
# One thing we could do is use a log-scale.
# This is easily implemented in the plot function specifying the 'log' argument.
options(scipen=4) # To keep axis without scientific notations
boxplot(mass_kg~family,whales,horizontal=TRUE,log="x")
## Q7: Examine why the Physeteridae family produces such a peculiar boxplot.
#------------------------------------------------------------------------------
#### 3) A classic: Prussian cavalry officers kicked to death by horses ####
# This is a classic example in statistics based on the work by
# Ladislaus Bortkiewicz (1868-1931), a Russian economist and statistician,
# whose parents were Polish and lived and worked mostly in Germany.
# For his most famous work he used data on the number of officers in the
# Prussian cavalry which were kicked to deaths.
# The data spans 20 years and covers 14 corps, corps as in military units.
prussia<-read.csv("prussian.csv",stringsAsFactors=FALSE)
## Q8: Examine the fatality data and discuss what type of distribution would
# best fit the data.
# We continue by calculating the proportion of observations with size X.
# So we're interested in what percentage of the observations have 0 fatalities,
# the percentage with 1 fatality and so on.
table(prussia$deaths) # Frequency of fatalities
V<-as.vector(table(prussia$deaths)) # Vectorise the table
perc<-V/sum(V)*100 # Calculate proportion
dev.off() # Reset plot area
plot(0:4,perc,type="b",ylim=c(0,60),
xlab="Number of fatalities",ylab="Percentage") # Plot the results
## Q9: Use your anwer for question 8 to predict the proportion of
# observations of size X.
# Add your result to the existing plot using the code given below.
# Does your distribution fit the data well?
lines(0:4,'YOUR PREDICTIONS',lty=2,type="b")
# Let's examine the toal number of fatalities per year.
# Aggregating the data to annual level.
kicks.yr<-aggregate(deaths~year,prussia,sum)
plot(kicks.yr$year,kicks.yr$deaths,type="b",
xlim=c(1875,1894),ylim=c(0,20)) # Use 'xlim' and 'ylim' to adjust axis
## Q10: Suppose you're a young cavalry officer in the Prussian army.
# Which corps would you rather avoid being enlisted to?
# How much larger is the average fatality rate for this corps compared to all
# cavalry units?
# And which corps have below average fatality rates?
|
209155c47cf98a4107cb3d63303b2d6e6b6c086a
|
8ceef923a736f0161077cb8aeeb3a8ba595aff3c
|
/scripts/Non_cen_zeros.R
|
01eb569849353d5af90a87d8d28c5e222ccaa447
|
[] |
no_license
|
cont-limno/lagos_limno_qaqc
|
65501b0d23b4102cdf04841f492faf67e7ba5765
|
390ee4700c0533ff8095d184f6be29a5e2fb93e9
|
refs/heads/master
| 2022-12-01T05:24:57.802137
| 2020-08-05T19:13:47
| 2020-08-05T19:13:47
| 250,358,374
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,167
|
r
|
Non_cen_zeros.R
|
temp_flag = data.frame(eventida = 1,lagoslakeid = 1,variable="noah",value=1,flag="first_row")
for(i in 1:length(variable.cols)) {
sensor.col = which(names(Data)==paste(names(Data)[variable.cols[i]],"_censorcode",sep=""))
#if(i != 11) { #Can't remember why I excluded tdn in the past...i think the algorithm crapped out
{
var_data <- Data[,c(event.col, lakeid.col,variable.cols[i],sensor.col)] %>%
drop_na() %>%
filter((!!sym(variable.names[i])) == 0)
var_data = var_data
if(nrow(var_data)>0){
names(var_data)[4]="censor_code"
var_data <- var_data %>% filter(censor_code=="NC4" | censor_code == "" | is.na(censor_code)==TRUE | censor_code == "NC3")
if(nrow(var_data)>0) {
temp.out <- data.frame(eventida = var_data[,1],
lagoslakeid = var_data[,2],
variable = variable.names[i],
value = var_data[,3],
flag = "non_cen_zero")
temp_flag<- rbind(temp_flag,temp.out)
}
}
}
}
temp_flag <- temp_flag %>% filter(variable != "noah") %>% filter(variable !="secchi")
|
3aaaa3b4503fbae83bf42af851606d4f07917097
|
3163e89817ded391b753a1932421b96241756633
|
/tests/testthat.R
|
2308860708bf3e268a975e9566d493c4d3138d1e
|
[
"Apache-2.0"
] |
permissive
|
fursham-h/ponder
|
c1612c1e2dfc1dba64ddc75fb368be6e0538bfc2
|
5131a51c73fcf2a28fd43122f97b23932a59c4ca
|
refs/heads/master
| 2022-03-27T11:16:34.321088
| 2019-12-08T23:31:26
| 2019-12-08T23:31:26
| 126,469,267
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
testthat.R
|
library(testthat)
library(NMDer)
library("BSgenome.Mmusculus.UCSC.mm10")
test_check("NMDer")
|
c0995323a6b7c655d821dbc6e0758122e7983aa2
|
ec0988738060d355c6fa820946b5833dabf6af8b
|
/4.R
|
ad6e89e0088fd05d38da88659704dc7fec976485
|
[] |
no_license
|
karangambhir/ProgrammingAssignment2
|
d1738c4a18789ee0d4ea35271f330f20860d0876
|
a1d42bdbc162ee0e9b2d80b4bcb0ce7dce0afac5
|
refs/heads/master
| 2020-12-03T04:05:23.505141
| 2017-07-09T19:12:21
| 2017-07-09T19:12:21
| 95,814,605
| 0
| 0
| null | 2017-06-29T19:58:11
| 2017-06-29T19:58:11
| null |
UTF-8
|
R
| false
| false
| 296
|
r
|
4.R
|
## subset observations realte to coal
NEI_coal<- NEI[which(NEI$SCC %in% SCC[grep("coal",SCC$Short.Name,ignore.case = TRUE),"SCC"]),]
g<-ggplot(NEI_coal,aes(year,Emissions))
g+geom_line(stat = "summary",fun.y="sum")+ labs(y="Emissions from coal combustion-related sources",x="Year (1999 - 2008)")
|
c8c76f78a2878b8933b952ee2aaefe48ada470a2
|
c62c7fdf7bf578d4d1c4b57f8654a632e8ed73c9
|
/Assignment2.R
|
acf01b8de755fd089382e16101b86bd5529d0be0
|
[] |
no_license
|
bethstephenson96/university
|
84b3ad3b091d16fd7ed930a684e5c0a05fc2c775
|
f0a58849dd13e4ed542b6fa85d3f0ef935969a09
|
refs/heads/master
| 2022-04-18T20:24:44.431011
| 2020-04-20T09:55:59
| 2020-04-20T09:55:59
| 238,655,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,264
|
r
|
Assignment2.R
|
data<-read.table('conc_min.csv',TRUE,sep=",")
data
day<-data[,1]
obmintemp<-data[,2]
fcmintemp<-data[,3]
summary(obmintemp)
summary(fcmintemp)
sd(obmintemp,na.rm=TRUE)
sd(fcmintemp)
var(obmintemp,na.rm=TRUE)
var(fcmintemp)
m<-mean(obmintemp,na.rm=TRUE)
std<-sqrt(var(obmintemp,na.rm=TRUE))
m1<-mean(fcmintemp)
std1<-sqrt(var(fcmintemp))
hist(obmintemp,
prob=TRUE,
main="Histogram of observed minimum temperature",
xlab="Temperature, (degrees celcius)",
col="orange",
las=1)
curve(dnorm(x, mean=m, sd=std),
col="darkblue",lwd=2,add=TRUE,yaxt="n")
hist(fcmintemp,
prob=TRUE,
main="Histogram of forecast minimum temperature",
xlab="Temperature, (degrees celcius)",
col="pink",
las=1)
curve(dnorm(x, mean=m1, sd=std1),
col="darkblue",lwd=2,add=TRUE,yaxt="n")
#for a students t test:
t.test(obmintemp,fcmintemp,var.equal=TRUE)
#for an f test:
var.test(obmintemp,fcmintemp,conf.level = 0.9)
?var.test
#for welchs t test:
t.test(obmintemp,fcmintemp,alternative="two.sided",var.equal=FALSE)
#correlation:
cor(obmintemp,fcmintemp,use="complete.obs")
#method=c("spearman"))
cor.test(obmintemp,fcmintemp,use="complete.obs")
?cor
fit<-lm(fcmintemp~obmintemp)#not sure if i can use as lin reg means one depends on other
summary(fit)
plot(obmintemp,fcmintemp,
main="Scattergraph of observed minimum temperature and forcast minimum temperature",
xlab="Observed min temperature, (degrees celcius)",
ylab="Forecast min temperature, (degrees celcius)",
col="blue",
las=1)
abline(lm(fcmintemp~obmintemp), col="darkred")
residuals(fit)
qqnorm(residuals(fit))
qqnorm(obmintemp,
main="Normal Q-Q plot for observed data")
qqnorm(fcmintemp,
main="Normal Q-Q plot for forecast data")
obmintemp-fcmintemp
error<-obmintemp-fcmintemp
rmse <- function(error)
{
sqrt(mean(error^2,na.rm=TRUE))
}
rmse(error)
#turned pair into na values from this point
fcmintemp[140]=NA
obmintemp[1]=3.9
lowobtemp<-(obmintemp<=0)
highobtemp<-obmintemp>0
lowfctemp<-fcmintemp<=0
highfctemp<-fcmintemp>0
aa<-lowobtemp&lowfctemp
a<-length(which((aa==TRUE)))
bb<-highobtemp&lowfctemp
b<-length(which((bb==TRUE)))
cc<-lowobtemp&highfctemp
c<-length(which((cc==TRUE)))
dd<-highobtemp&highfctemp
d<-length(which((dd==TRUE)))
length(which((lowobtemp==TRUE)))
length(which((highobtemp==TRUE)))
length(which((lowfctemp==TRUE)))
length(which((highfctemp==TRUE)))
58+305
a
b
c
d
proba<-a/364
probb<-b/364
probc<-c/364
probd<-d/364
#skill scores and measures of accuracy
phi<-((a*d)-(b*c))/sqrt((a+b)*(c+d)*(a+c)*(b+d))
pod<-a/(a+c)
far<-b/(a+b)
csi<-a/(a+b+c)
accuracy<-(a+d)/(a+b+c+d)
bias<-(a+b)/(a+c)
pofd<-b/(b+d)
heidke<-(2*((a*d)-(b*c)))/((a+b)*(c+d)*(a+c)*(b+d))
hk<-((a*d)-(b*c))/((b+d)*(a+c))
clayton<-((a*d)-(b*c))/((a+b)*(c+d))
aref<-((a+b)*(a+c))/364
ets<-(a-aref)/(a+b+c-aref)
phi
pod
far
csi
accuracy
bias
pofd
heidke
hk
clayton
ets
#PERSISTENCE
data2<-read.table('new.txt')
persistence<-data2[,1]
obmintemp[1]=NA
lowpertemp<-persistence<=0
highpertemp<-persistence>0
aa2<-lowobtemp&lowpertemp
a2<-length(which((aa2==TRUE)))
bb2<-highobtemp&lowpertemp
b2<-length(which((bb2==TRUE)))
cc2<-lowobtemp&highpertemp
c2<-length(which((cc2==TRUE)))
dd2<-highobtemp&highpertemp
d2<-276
length(which((lowobtemp==TRUE)))
length(which((highobtemp==TRUE)))
length(which((lowpertemp==TRUE)))
length(which((highpertemp==TRUE)))
phi2<-((a2*d2)-(b2*c2))/sqrt((a2+b2)*(c2+d2)*(a2+c2)*(b2+d2))
pod2<-a2/(a2+c2)
far2<-b2/(a2+b2)
csi2<-a2/(a2+b2+c2)
accuracy2<-(a2+d2)/(a2+b2+c2+d2)
bias2<-(a2+b2)/(a2+c2)
pofd2<-b2/(b2+d2)
heidke2<-(2*((a2*d2)-(b2*c2)))/((a2+b2)*(c2+d2)*(a2+c2)*(b2+d2))
hk2<-((a2*d2)-(b2*c2))/((b2+d2)*(a2+c2))
clayton2<-((a2*d2)-(b2*c2))/((a2+b2)*(c2+d2))
aref2<-((a2+b2)*(a2+c2))/363
ets2<-(a2-aref2)/(a2+b2+c2-aref2)
phi2
pod2
far2
csi2
accuracy2
bias2
pofd2
heidke2
hk2
clayton2
ets2
#CLIMATE
problow<-59/364
#problow<-length(which((lowobtemp==TRUE)))/364
climate<-sample(c(0,1),365,replace=TRUE,prob=c(1-problow,problow))
climate
climate[140]=NA
lowclimtemp<-climate==1
highclimtemp<-climate==0
aa3<-lowobtemp&lowclimtemp
a3<-length(which((aa3==TRUE)))+1
bb3<-highobtemp&lowclimtemp
b3<-length(which((bb3==TRUE)))
cc3<-lowobtemp&highclimtemp
c3<-length(which((cc3==TRUE)))
dd3<-highobtemp&highclimtemp
d3<-length(which((dd3==TRUE)))
length(which((lowclimtemp==TRUE)))
length(which((highclimtemp==TRUE)))
a3
b3
c3
d3
phi3<-((a3*d3)-(b3*c3))/sqrt((a3+b3)*(c3+d3)*(a3+c3)*(b3+d3))
pod3<-a3/(a3+c3)
far3<-b3/(a3+b3)
csi3<-a3/(a3+b3+c3)
accuracy3<-(a3+d3)/(a3+b3+c3+d3)
bias3<-(a3+b3)/(a3+c3)
pofd3<-b3/(b3+d3)
heidke3<-(2*((a3*d3)-(b3*c3)))/((a3+b3)*(c3+d3)*(a3+c3)*(b3+d3))
hk3<-((a3*d3)-(b3*c3))/((b3+d3)*(a3+c3))
clayton3<-((a3*d3)-(b3*c3))/((a3+b3)*(c3+d3))
aref3<-((a3+b3)*(a3+c3))/364
ets3<-(a3-aref3)/(a3+b3+c3-aref3)
phi3
pod3
far3
csi3
accuracy3
bias3
pofd3
heidke3
hk3
clayton3
ets3
skillscore1<-(34-31)/(59-31) #compare forecast to persistence
skillscore2<-(34-12)/(59-12) #compare forecast to climatology
|
7714246976bb01cff73272f8fc4a313dcba2aeef
|
75cf3a2418eac8cca5798984534e7851e2c04951
|
/연습/prac_ts.R
|
45d0f924c0dcb08b480d5bcf1650ac726fc605ec
|
[
"MIT"
] |
permissive
|
Adrian123K/Pproject
|
b4aaa033c86a48e6238c45f9566a668f7dd6b00f
|
21908eaf075c7b5597bbc26b22b6aadb9847e4b0
|
refs/heads/master
| 2022-11-23T04:55:30.593429
| 2020-07-22T02:01:30
| 2020-07-22T02:01:30
| 279,070,355
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
prac_ts.R
|
setwd('D:/Desktop/Itwill ws/Pproject/연습')
# https://otexts.com/fppkr/graphics-exercises.html
install.packages('ggfortify')
library(forecast)
library(ggfortify)
library(ggplot2)
library(ggplot)
tute1 <- read.csv('tute1.csv',header=T)
View(tute1)
myts <- ts(tute1[,-1],start=1981,frequency = 4)
autoplot(myts,facets=T)
retail <- readxl::read_excel('retail.xlsx',skip=1)
View(retail)
myts_r <- ts(retail[,'A3349873A'],frequency = 12,start=c(1982,4))
autoplot(myts_r)
ggseasonplot(myts_r)
ggsubseriesplot(myts_r)
gglagplot(myts_r)
ggAcf(myts_r)
|
9bf4daecf35a69bb89e4ed64eae74adeec77bcfd
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/COMPoissonReg/inst/testfiles/qzicmp_cpp/libFuzzer_qzicmp_cpp/qzicmp_cpp_valgrind_files/1612729252-test.R
|
b19cf8e1f2e562e7fec3b7d557a0d37f8434debe
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 516
|
r
|
1612729252-test.R
|
testlist <- list(lambda = NaN, logq = numeric(0), nu = c(1.53063836115601e-18, 1.53063836115601e-18, NaN, NaN, 5.43230922486616e-312, 3.23785921002061e-319, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p = numeric(0), tol = 0)
result <- do.call(COMPoissonReg:::qzicmp_cpp,testlist)
str(result)
|
7f35fbcaec2baddfe5a125418ca6efa1b51e1213
|
c2192e1e6d654254e1cc8f9ea6b39a53101e187d
|
/R/f_highermomentsIV_IIV.R
|
95b27d95a13b40610eb420fbfb62fdb7a0c27f26
|
[] |
no_license
|
mmeierer/REndo
|
ddae9421d35b8f8b9063d42500f5058c47eff1e6
|
cdf2193678be6da0cac5c2427684dd78eec260b1
|
refs/heads/master
| 2023-08-18T11:58:43.600081
| 2023-08-11T14:35:29
| 2023-08-11T14:35:29
| 38,715,858
| 13
| 3
| null | 2023-09-06T19:42:16
| 2015-07-07T21:29:23
|
R
|
UTF-8
|
R
| false
| false
| 4,273
|
r
|
f_highermomentsIV_IIV.R
|
#' @importFrom stats model.response model.frame
#' @importFrom Formula model.part
higherMomentsIV_IIV <- function(F.formula, data, g=NULL, iiv, ...){
# Catch ellispsis
l.iivregressors <- list(...)
# Check inputs ---------------------------------------------------------------------------------------------
check_err_msg(checkinput_highermomentsiv_g(g=g))
check_err_msg(checkinput_highermomentsiv_iiv(iiv=iiv))
check_err_msg(checkinput_highermomentsiv_iivVSg(g=g, iiv=iiv))
check_err_msg(checkinput_highermomentsiv_iivregressors(l.iivregressors=l.iivregressors,
F.formula=F.formula, iiv=iiv))
# discard given exogenous regressors and g if not needed because otherwise description is wrong
if(iiv %in% c("y2", "p2", "yp")){
l.iivregressors <- list()
g <- NULL
}
# Read out needed data -------------------------------------------------------------------------------------
# To multiply: Read out as matrices as data.frames allow no element-wise mutliplication if dimensions do
# not match. For iiv = gp, gy this may be the case when col(g)>1
names.exo.regs <- unique(unlist(l.iivregressors))
mf <- model.frame(F.formula, rhs=1, lhs=1, data=data)
vec.data.endo <- model.part(F.formula, data=mf, rhs=2, lhs = 0, drop=TRUE) # endo data from rhs 2 AS VECTOR
# The exogenous regressors are only the ones specified in the IIV() part, not all exogenous ones
# Therefore read out the data by names. If none (NULL) results in zero-row data.frame
df.data.exo.iiv <- mf[, names.exo.regs, drop=FALSE]
vec.data.y <- model.response(mf)
# Calculate internal IVs -----------------------------------------------------------------------------------
# determine g function, if needed
if(!is.null(g))
fct.g <- switch(g,
"x2" = function(x){x^2},
"x3" = function(x){x^3},
"lnx" = function(x){log(x)},
"1/x" = function(x){1/x})
# Col-wise de-mean helper function
de.mean <- function(x){
if(length(dim(x)) > 1){
# >1 col (data.frame,...).
# Use sweep contrary to apply because it again returns data.frame
# return(apply(x, MARGIN = 2, FUN = function(x){x-mean(x)}))
return(sweep(x = x, MARGIN = 2, STATS = colMeans(x=x, na.rm = TRUE), FUN = "-"))
}else{
# vector
return(x-mean(x))
}
}
# IIV calculations
df.IIV <- data.frame(res.iiv =
switch(EXPR = iiv,
# to allow element-wise multiplication for data.frames.
# The vector data HAS to be the first input
"g" = de.mean(fct.g(df.data.exo.iiv)), # IIV1
"gp" = de.mean(fct.g(df.data.exo.iiv)) * de.mean(vec.data.endo), # IIV2
"gy" = de.mean(fct.g(df.data.exo.iiv)) * de.mean(vec.data.y), # IIV3
"yp" = de.mean(vec.data.y) * de.mean(vec.data.endo), # IIV4
"p2" = de.mean(vec.data.endo)^2, # IIV5
"y2" = de.mean(vec.data.y)^2)) # IIV6
# Naming ------------------------------------------------------------------------------------------------
# Rename after IIV()
# Cannot make in single paste() cmd as double . are introduced for emtpy chars
colnames.iiv <- paste0("IIV.is",iiv)
colnames.iiv <- if(is.null(g)) colnames.iiv else paste0(colnames.iiv,".gis",g)
colnames.iiv <- if(!length(names.exo.regs)) colnames.iiv else paste0(colnames.iiv,".regis",names.exo.regs)
# Keep make.names for the case g=1/x and too be sure its always correct
colnames(df.IIV) <- make.names(colnames.iiv)
# Readable description
desc.iiv <- paste0("IIV(iiv=",iiv)
desc.iiv <- if(is.null(g)) desc.iiv else paste0(desc.iiv,",g=",g)
desc.iiv <- if(!length(names.exo.regs)) desc.iiv else paste0(desc.iiv,",",paste(names.exo.regs,collapse = ","))
desc.iiv <- paste0(desc.iiv,")")
# Return ------------------------------------------------------------------------------------------------
# Return list with readable description and IIV as data.frame
return(list(desc.IIV = desc.iiv, df.IIV = df.IIV))
}
|
1e1db98c27ff8921daa8d2bb644a714c28717fd7
|
539948384886e13e600cce3b5d8ba189d6493151
|
/man/generate_DIAG.Rd
|
908067da0c91c8fa540cd3f82b10d4842604947d
|
[] |
no_license
|
huangrh/cmshcc
|
b9a32b33a0685ad3792d23f2445c5c24f57b8587
|
1150b4684fda2e2feb92fca841e29cd2caf86d30
|
refs/heads/master
| 2022-04-01T04:37:46.902487
| 2018-07-19T23:55:05
| 2018-07-19T23:55:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 737
|
rd
|
generate_DIAG.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/evaluate.r
\name{generate_DIAG}
\alias{generate_DIAG}
\title{Generate sample diagnosis data}
\usage{
generate_DIAG(cmshcc_map, size = 100, seed = 2, max_dx = 10)
}
\arguments{
\item{cmshcc_map}{data frame is the CMSHCC diagnosis to HCC mapping in question (Required)}
\item{size}{integer is the number of rows of diagnoses that are required (default 100)}
\item{seed}{integer is the random seed starting value for reproducability (default 2)}
\item{max_dx}{integer is the maximum number of diagnoses that a beneficiary can have (default 10)}
}
\value{
data frame DIAG is the list of HICNO and ICD diagnoses
}
\description{
Generate sample diagnosis data
}
|
7da2126ea0da960dc08e7bc57a1c121e6bf08250
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/ccaPP/R/cca.R
|
c7913388eacbb5071f581fd1c8ba7f6333319bb6
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,175
|
r
|
cca.R
|
# ------------------------------------
# Author: Andreas Alfons
# Erasmus University Rotterdam
# ------------------------------------
#' (Robust) CCA via alternating series of grid searches
#'
#' Perform canoncial correlation analysis via projection pursuit based on
#' alternating series of grid searches in two-dimensional subspaces of each
#' data set, with a focus on robust and nonparametric methods.
#'
#' The algorithm is based on alternating series of grid searches in
#' two-dimensional subspaces of each data set. In each grid search,
#' \code{nGrid} grid points on the unit circle in the corresponding plane are
#' obtained, and the directions from the center to each of the grid points are
#' examined. In the first iteration, equispaced grid points in the interval
#' \eqn{[-\pi/2, \pi/2)}{[-pi/2, pi/2)} are used. In each subsequent
#' iteration, the angles are halved such that the interval
#' \eqn{[-\pi/4, \pi/4)}{[-pi/4, pi/4)} is used in the second iteration and so
#' on. If only one data set is multivariate, the algorithm simplifies
#' to iterative grid searches in two-dimensional subspaces of the corresponding
#' data set.
#'
#' In the basic algorithm, the order of the variables in a series of grid
#' searches for each of the data sets is determined by the average absolute
#' correlations with the variables of the respective other data set. Since
#' this requires to compute the full \eqn{(p \times q)}{(p x q)} matrix of
#' absolute correlations, where \eqn{p} denotes the number of variables of
#' \code{x} and \eqn{q} the number of variables of \code{y}, a faster
#' modification is available as well. In this modification, the average
#' absolute correlations are computed over only a subset of the variables of
#' the respective other data set. It is thereby possible to use randomly
#' selected subsets of variables, or to specify the subsets of variables
#' directly.
#'
#' Note that also the data sets are ordered according to the maximum average
#' absolute correlation with the respective other data set to ensure symmetry
#' of the algorithm.
#'
#' For higher order canonical correlations, the data are first transformed into
#' suitable subspaces. Then the alternate grid algorithm is applied to the
#' reduced data and the results are back-transformed to the original space.
#'
#' @aliases print.cca
#'
#' @param x,y each can be a numeric vector, matrix or data frame.
#' @param k an integer giving the number of canonical variables to compute.
#' @param method a character string specifying the correlation functional to
#' maximize. Possible values are \code{"spearman"} for the Spearman
#' correlation, \code{"kendall"} for the Kendall correlation, \code{"quadrant"}
#' for the quadrant correlation, \code{"M"} for the correlation based on a
#' bivariate M-estimator of location and scatter with a Huber loss function, or
#' \code{"pearson"} for the classical Pearson correlation (see
#' \code{\link{corFunctions}}).
#' @param control a list of additional arguments to be passed to the specified
#' correlation functional. If supplied, this takes precedence over additional
#' arguments supplied via the \code{\dots} argument.
#' @param nIterations,maxiter an integer giving the maximum number of
#' iterations.
#' @param nAlternate,maxalter an integer giving the maximum number of
#' alternate series of grid searches in each iteration.
#' @param nGrid,splitcircle an integer giving the number of equally spaced
#' grid points on the unit circle to use in each grid search.
#' @param select optional; either an integer vector of length two or a list
#' containing two index vectors. In the first case, the first integer gives
#' the number of variables of \code{x} to be randomly selected for determining
#' the order of the variables of \code{y} in the corresponding series of grid
#' searches, and vice versa for the second integer. In the latter case, the
#' first list element gives the indices of the variables of \code{x} to be used
#' for determining the order of the variables of \code{y}, and vice versa for
#' the second integer (see \dQuote{Details}).
#' @param tol,zero.tol a small positive numeric value to be used for
#' determining convergence.
#' @param standardize a logical indicating whether the data should be
#' (robustly) standardized.
#' @param fallback logical indicating whether a fallback mode for robust
#' standardization should be used. If a correlation functional other than the
#' Pearson correlation is maximized, the first attempt for standardizing the
#' data is via median and MAD. In the fallback mode, variables whose MADs are
#' zero (e.g., dummy variables) are standardized via mean and standard
#' deviation. Note that if the Pearson correlation is maximized,
#' standardization is always done via mean and standard deviation.
#' @param seed optional initial seed for the random number generator (see
#' \code{\link{.Random.seed}}). This is only used if \code{select} specifies
#' the numbers of variables of each data set to be randomly selected for
#' determining the order of the variables of the respective other data set.
#' @param \dots additional arguments to be passed to the specified correlation
#' functional. Currently, this is only relevant for the M-estimator. For
#' Spearman, Kendall and quadrant correlation, consistency at the normal model
#' is always forced.
#'
#' @return An object of class \code{"cca"} with the following components:
#' \item{cor}{a numeric vector giving the canonical correlation measures.}
#' \item{A}{a numeric matrix in which the columns contain the canonical vectors
#' for \code{x}.}
#' \item{B}{a numeric matrix in which the columns contain the canonical vectors
#' for \code{y}.}
#' \item{centerX}{a numeric vector giving the center estimates used in
#' standardization of \code{x}.}
#' \item{centerY}{a numeric vector giving the center estimates used in
#' standardization of \code{y}.}
#' \item{scaleX}{a numeric vector giving the scale estimates used in
#' standardization of \code{x}.}
#' \item{scaleY}{a numeric vector giving the scale estimates used in
#' standardization of \code{y}.}
#' \item{call}{the matched function call.}
#'
#' @note \code{CCAgrid} is a simple wrapper function for \code{ccaGrid} for
#' more compatibility with package \pkg{pcaPP} concerning function and argument
#' names.
#'
#' @author Andreas Alfons
#'
#' @seealso \code{\link{ccaProj}}, \code{\link{maxCorGrid}},
#' \code{\link{corFunctions}}
#'
#' @examples
#' data("diabetes")
#' x <- diabetes$x
#' y <- diabetes$y
#'
#' ## Spearman correlation
#' ccaGrid(x, y, method = "spearman")
#'
#' ## Pearson correlation
#' ccaGrid(x, y, method = "pearson")
#'
#' @keywords multivariate robust
#'
#' @importFrom Rcpp evalCpp
#' @import robustbase
#' @useDynLib ccaPP, .registration = TRUE
#' @export
ccaGrid <- function(x, y, k = 1,
method = c("spearman", "kendall", "quadrant", "M", "pearson"),
control = list(...), nIterations = 10, nAlternate = 10,
nGrid = 25, select = NULL, tol = 1e-06, standardize = TRUE,
fallback = FALSE, seed = NULL, ...) {
## initializations
matchedCall <- match.call()
## define list of control arguments for algorithm
nIterations <- as.integer(nIterations)
nAlternate <- as.integer(nAlternate)
nGrid <- as.integer(nGrid)
tol <- as.numeric(tol)
ppControl <- list(nIterations=nIterations, nAlternate=nAlternate,
nGrid=nGrid, select=select, tol=tol)
## call workhorse function
cca <- ccaPP(x, y, k, method=method, corControl=control, algorithm="grid",
ppControl=ppControl, standardize=standardize, fallback=fallback,
seed=seed)
cca$call <- matchedCall
cca
}
## wrapper function for more compatibility with package pcaPP
#' @rdname ccaGrid
#' @export
CCAgrid <- function(x, y, k = 1,
method = c("spearman", "kendall", "quadrant", "M", "pearson"),
maxiter = 10, maxalter = 10, splitcircle = 25, select=NULL,
zero.tol = 1e-06, standardize = TRUE, fallback = FALSE,
seed = NULL, ...) {
## initializations
matchedCall <- match.call()
## call ccaGrid()
cca <- ccaGrid(x, y, k=k, method=method, nIterations=maxiter,
nAlternate=maxalter, nGrid=splitcircle, select=select,
tol=zero.tol, standardize=standardize, fallback=fallback,
seed=seed, ...)
cca$call <- matchedCall
cca
}
#' (Robust) CCA via projections through the data points
#'
#' Perform canoncial correlation analysis via projection pursuit based on
#' projections through the data points, with a focus on robust and
#' nonparametric methods.
#'
#' First the candidate projection directions are defined for each data set
#' from the respective center through each data point. Then the algorithm
#' scans all \eqn{n^2} possible combinations for the maximum correlation,
#' where \eqn{n} is the number of observations.
#'
#' For higher order canonical correlations, the data are first transformed into
#' suitable subspaces. Then the alternate grid algorithm is applied to the
#' reduced data and the results are back-transformed to the original space.
#'
#' @param x,y each can be a numeric vector, matrix or data frame.
#' @param k an integer giving the number of canonical variables to compute.
#' @param method a character string specifying the correlation functional to
#' maximize. Possible values are \code{"spearman"} for the Spearman
#' correlation, \code{"kendall"} for the Kendall correlation, \code{"quadrant"}
#' for the quadrant correlation, \code{"M"} for the correlation based on a
#' bivariate M-estimator of location and scatter with a Huber loss function, or
#' \code{"pearson"} for the classical Pearson correlation (see
#' \code{\link{corFunctions}}).
#' @param control a list of additional arguments to be passed to the specified
#' correlation functional. If supplied, this takes precedence over additional
#' arguments supplied via the \code{\dots} argument.
#' @param standardize a logical indicating whether the data should be
#' (robustly) standardized.
#' @param useL1Median a logical indicating whether the \eqn{L_{1}}{L1} medians
#' should be used as the centers of the data sets in standardization (defaults
#' to \code{TRUE}). If \code{FALSE}, the columnwise centers are used instead
#' (columnwise means if \code{method} is \code{"pearson"} and columnwise
#' medians otherwise).
#' @param fallback logical indicating whether a fallback mode for robust
#' standardization should be used. If a correlation functional other than the
#' Pearson correlation is maximized, the first attempt for standardizing the
#' data is via median and MAD. In the fallback mode, variables whose MADs are
#' zero (e.g., dummy variables) are standardized via mean and standard
#' deviation. Note that if the Pearson correlation is maximized,
#' standardization is always done via mean and standard deviation.
#' @param \dots additional arguments to be passed to the specified correlation
#' functional. Currently, this is only relevant for the M-estimator. For
#' Spearman, Kendall and quadrant correlation, consistency at the normal model
#' is always forced.
#'
#' @return An object of class \code{"cca"} with the following components:
#' \item{cor}{a numeric vector giving the canonical correlation
#' measures.}
#' \item{A}{a numeric matrix in which the columns contain the canonical vectors
#' for \code{x}.}
#' \item{B}{a numeric matrix in which the columns contain the canonical vectors
#' for \code{y}.}
#' \item{centerX}{a numeric vector giving the center estimates used in
#' standardization of \code{x}.}
#' \item{centerY}{a numeric vector giving the center estimates used in
#' standardization of \code{y}.}
#' \item{scaleX}{a numeric vector giving the scale estimates used in
#' standardization of \code{x}.}
#' \item{scaleY}{a numeric vector giving the scale estimates used in
#' standardization of \code{y}.}
#' \item{call}{the matched function call.}
#'
#' @note \code{CCAproj} is a simple wrapper function for \code{ccaProj} for
#' more compatibility with package \pkg{pcaPP} concerning function names.
#'
#' @author Andreas Alfons
#'
#' @seealso \code{\link{ccaGrid}}, \code{\link{maxCorProj}},
#' \code{\link{corFunctions}}
#'
#' @examples
#' data("diabetes")
#' x <- diabetes$x
#' y <- diabetes$y
#'
#' ## Spearman correlation
#' ccaProj(x, y, method = "spearman")
#'
#' ## Pearson correlation
#' ccaProj(x, y, method = "pearson")
#'
#' @keywords multivariate robust
#'
#' @importFrom Rcpp evalCpp
#' @import pcaPP
#' @import robustbase
#' @useDynLib ccaPP, .registration = TRUE
#' @export
ccaProj <- function(x, y, k = 1,
method = c("spearman", "kendall", "quadrant", "M", "pearson"),
control = list(...), standardize = TRUE, useL1Median = TRUE,
fallback = FALSE, ...) {
## initializations
matchedCall <- match.call()
## define list of control arguments for algorithm
ppControl <- list(useL1Median=isTRUE(useL1Median))
## call workhorse function
cca <- ccaPP(x, y, k, method=method, corControl=control, algorithm="proj",
ppControl=ppControl, standardize=standardize, fallback=fallback)
cca$call <- matchedCall
cca
}
## wrapper function for more compatibility with package pcaPP
#' @rdname ccaProj
#' @export
CCAproj <- function(x, y, k = 1,
method = c("spearman", "kendall", "quadrant", "M", "pearson"),
standardize = TRUE, useL1Median = TRUE, fallback = FALSE,
...) {
## initializations
matchedCall <- match.call()
## call ccaProj()
cca <- ccaProj(x, y, k=k, method=method, standardize=standardize,
useL1Median=useL1Median, fallback=fallback, ...)
cca$call <- matchedCall
cca
}
## workhorse function
ccaPP <- function(x, y, k = 1,
method = c("spearman", "kendall", "quadrant", "M", "pearson"),
corControl, forceConsistency = TRUE,
algorithm = c("grid", "proj"), ppControl, standardize = TRUE,
fallback = FALSE, seed = NULL) {
## initializations
x <- as.matrix(x)
y <- as.matrix(y)
n <- nrow(x)
if(nrow(y) != n) {
stop("'x' and 'y' must have the same number of observations")
}
p <- ncol(x)
q <- ncol(y)
# check number of canonical variables to compute
k <- rep(as.integer(k), length.out=1)
if(is.na(k) || k < 0) k <- formals()$k
k <- min(k, p, q)
## prepare the data and call C++ function
if(n == 0 || p == 0 || q == 0 || k == 0) {
# zero dimension
A <- B <- matrix(numeric(), 0, 0)
cca <- list(cor=NA, A=A, B=B)
} else {
# check high-dimensional data
if(k > 1 && (n <= p+1 || n <= q+1)) {
k <- 1
warning("higher-order canonical correlations not yet implemented",
"for high-dimensional data")
}
# check method and get list of control arguments
method <- match.arg(method)
corControl <- getCorControl(method, corControl, forceConsistency)
# additional checks for grid search algorithm
if(algorithm == "grid") {
# check subset of variables to be used for determining the order of
# the variables from the respective other data set
select <- ppControl$select
ppControl$select <- NULL
if(!is.null(select)) {
if(is.list(select)) {
# make sure select is a list with two index vectors and
# drop invalid indices from each vector
select <- rep(select, length.out=2)
select <- mapply(function(indices, max) {
indices <- as.integer(indices)
indices[which(indices > 0 & indices <= max)] - 1
}, select, c(p, q))
valid <- sapply(select, length) > 0
# add the two index vectors to control object
if(all(valid)) {
ppControl$selectX <- select[[1]]
ppControl$selectY <- select[[2]]
} else select <- NULL
} else {
# check number of indices to sample
select <- rep(as.integer(select), length.out=2)
valid <- !is.na(select) & select > 0 & select < c(p, q)
if(all(valid)) {
# generate index vectors and add them to control object
if(!is.null(seed)) set.seed(seed)
ppControl$selectX <- sample.int(p, select[1]) - 1
ppControl$selectY <- sample.int(q, select[2]) - 1
} else select <- NULL
}
}
if(is.null(select)) {
ppControl$selectX <- ppControl$selectY <- integer()
}
}
# call C++ function
cca <- .Call("R_ccaPP", R_x=x, R_y=y, R_k=k, R_method=method,
R_corControl=corControl, R_algorithm=algorithm,
R_ppControl=ppControl, R_standardize=isTRUE(standardize),
R_fallback=isTRUE(fallback), PACKAGE="ccaPP")
drop <- c("cor", "centerX", "centerY", "scaleX", "scaleY")
cca[drop] <- lapply(cca[drop], drop)
}
## assign class and return results
class(cca) <- "cca"
cca
}
|
bbf5e0e118458c708b7fab44520a77891a39b93d
|
a0e54828bf4aa9ff38baada5dadafce8a3f0e4bb
|
/align_scf.R
|
495ed7636183550746af84eef7c65fbaa4e2da49
|
[] |
no_license
|
jhavsmith/mimicry
|
694737f08a77550dd2f48512440806df7b26bbe3
|
866bd1aadc30fb1b3c887810baaa827698f35eb3
|
refs/heads/master
| 2021-05-13T11:21:08.734247
| 2019-02-04T18:18:02
| 2019-02-04T18:18:02
| 117,120,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,853
|
r
|
align_scf.R
|
load("scf_table.RDATA")
load("freq_matrix_SureSelect.RDATA")
load("Mapp_scf.RDATA")
load("Null_scf.RDATA")
load("scf_map.RDATA")
scf.map = data.frame(matrix(nrow=nrow(freq.matrix),ncol=9))
index=1
Null.scf = 0
Mapp.scf = 0
for (i in 1:nrow(scf.table)) {
index.b = index-1
scf.matches = freq.matrix[which(freq.matrix[,1]==scf.table[i,4]),]
if (length(scf.matches)==0) {print(paste("NULL",as.character(i)));Null.scf=c(Null.scf,i);next}
print(i)
Mapp.scf = c(Mapp.scf,i)
if (length(scf.matches)!=8) {
end.row=nrow(scf.matches)
new.pos=scf.matches[,2]
alleles=scf.matches[,c(3:4)]
freqs=scf.matches[,c(5:8)]
new.index = nrow(scf.matches)
}
if (length(scf.matches)==8) {
end.row=1;new.pos=scf.matches[2]
alleles=scf.matches[3:4]
freqs=scf.matches[5:8]
new.index = 1
}
scf.map[c((index):((index.b)+end.row)),1] = scf.table[i,1]
scf.map[c((index):((index.b)+end.row)),2] = scf.table[i,4]
scf.map[c((index):((index.b)+end.row)),3] = (as.numeric(scf.table[i,2])-1)+as.numeric(new.pos)
scf.map[c((index):((index.b)+end.row)),c(4:5)] = alleles
scf.map[c((index):((index.b)+end.row)),c(6:9)] = as.numeric(freqs)
index = index+new.index
}
scf.d = scf.map
scf.d[which(scf.map[,1]=="chr1_unmapped"),1] = "chr1"
scf.d[which(scf.map[,1]=="chr2_unmapped"),1] = "chr2"
scf.d[which(scf.map[,1]=="chr3_unmapped"),1] = "chr3"
scf.d[which(scf.map[,1]=="chr6_unmapped"),1] = "chr6"
scf.d[which(scf.map[,1]=="chr7_unmapped"),1] = "chr7"
scf.d[which(scf.map[,1]=="chr8_unmapped"),1] = "chr8"
scf.d[which(scf.map[,1]=="chr11_unmapped"),1] = "chr11"
scf.d[which(scf.map[,1]=="chr12_unmapped"),1] = "chr12"
scf.d[which(scf.map[,1]=="chr13_unmapped"),1] = "chr13"
scf.d[which(scf.map[,1]=="chr15_unmapped"),1] = "chr15"
scf.d[which(scf.map[,1]=="chr16_unmapped"),1] = "chr16"
scf.d[which(scf.map[,1]=="chr17_unmapped"),1] = "chr17"
scf.d[which(scf.map[,1]=="chr18_unmapped"),1] = "chr18"
scf.d[which(scf.map[,1]=="chr19_unmapped"),1] = "chr19"
scf.d[which(scf.map[,1]=="chr20_unmapped"),1] = "chr20"
scf.d[which(scf.map[,1]=="chrZ_unmapped"),1] = "chrZ"
og.freqs=as.numeric(matrix[,1])
og.cons = round(og.freqs)
og.pol = og.freqs
og.pol[which(og.cons==0)] = 1 - og.freqs[which(og.cons==0)]
#polarizing
no.chrom.d.pol=matrix
no.chrom.d.pol[,c(2:4)]= abs(matrix[,1]-matrix[,c(2:4)])
no.chrom.d.pol[,1] = og.pol
filler = no.chrom.d.pol
for (i in 1:ncol(filler)) {
filler[,i] = 0
}
no.chrom.d.pol = cbind(filler,no.chrom.d.pol)
no.chrom.d.pol[,2] = as.numeric(no.chrom.d[,2])
#window.tables for divergence
ol = window.table[22:23,]
bg = window.table[-c(22:23),]
window.list = list("ol"=ol,"bg"=bg)
#get rid of list
table=scf.d.pol
matrix = (matrix(nrow=nrow(table),ncol=4))
for (j in 1:4) {
matrix[,j] = as.numeric(paste(unlist(table[,j+4])))
}
blank.matrix=(matrix(nrow=nrow(table),ncol=4))
matrix = cbind(blank.matrix,matrix)
matrix[,2] = as.numeric(paste(unlist(table[,2])))
scp -r joelsmith@beast.uchicago.edu:kronforst_lab/agl_project/scf_d.RDATA scf_d.RDATA
factor = c(1:21)
plot.table = cbind(factor,d.table[1:21,3])
pdf(file="agl_plot.pdf",width=8,height=6,pointsize = 10)
plot(plot.table,col = rgb(0,0,1,.7),lwd=10,ylim=c(-.2,.2),xlim=c(1,23))
points(cbind(c(22:23),d.table[22:23,3]),col = rgb(0,1,0,.7),lwd=10,ylim=c(-.2,.2),xlim=c(1,23))
abline(h=0,lwd=2)
dev.off()
png(file="~/kronforst_lab/figures/agl_D.png",width=550,height=550)
plot(c(1:279),d.table[,3],ylim=c(-.7,.5),xlim=c(0,300),ylab="D",xlab="scaffold")
points(c(278:279),d.table[278:279,3],col="red",lwd=3,ylim=c(-.7,.5),xlim=c(0,300),ylab="")
abline(h=0)
dev.off()
png(file="~/kronforst_lab/figures/agl_SNPs.png",width=550,height=550)
plot(c(1:279),d.table[,2]-d.table[,1],ylab="# of SNPs",xlab="scaffold",xlim=c(0,300),ylim=c(0,80000))
points(c(278:279),d.table[278:279,2]-d.table[278:279,1],lwd=3,col="red",xlim=c(0,300),ylim=c(0,80000))
dev.off()
|
c293ce2fc39f434bec090ae9256691638ce38935
|
2712ec1deafe0fa3a9864c786b207d9ed409539a
|
/R/30_gadm_getBbox.R
|
a3f861aa1365b1d983c0bc31b624bb42a5d143ee
|
[] |
no_license
|
Epiconcept-Paris/GADMTools
|
de556df201c7c6ec4fff0b6cd1e78c02d3f6cbd3
|
9549ec2da551de579a17c1ac5c6d55122a7be660
|
refs/heads/master
| 2021-01-17T13:05:39.092878
| 2020-03-04T20:53:28
| 2020-03-04T20:53:28
| 42,582,951
| 9
| 2
| null | 2020-01-07T11:38:54
| 2015-09-16T11:24:35
|
R
|
UTF-8
|
R
| false
| false
| 422
|
r
|
30_gadm_getBbox.R
|
# gadm_getBB.gadm_sf ------------------------------------------------------
# =========================================================================
gadm_getBbox.gadm_sf <- function(x) {
sf::st_bbox(x$sf)
}
# gadm_getBbox.gadm_sp ------------------------------------------------------
# =========================================================================
gadm_getBbox.gadm_sp <- function(x) {
bbox(x$spdf)
}
|
4c71d2207eb53f27c4084f74661e5b02caea675c
|
d32dd48eaed05b905937b0eff322927695114229
|
/R/dfrankcop.R
|
e6c1d9f8d5d7f925e649046ea00d4e61f5d2cdab
|
[] |
no_license
|
AlexanderRitz/copR
|
5b8177ef2fa3f37482f93f8cdb971fc6a52a2e54
|
9fae452eb91e437aae36e24b238b022e37058de6
|
refs/heads/master
| 2020-06-28T03:20:51.019617
| 2019-09-23T18:27:50
| 2019-09-23T18:27:50
| 200,130,597
| 1
| 1
| null | 2019-09-23T00:21:06
| 2019-08-01T23:15:49
|
R
|
UTF-8
|
R
| false
| false
| 1,920
|
r
|
dfrankcop.R
|
#' Construction of Frank pdf
#'
#' @inheritParams dcop
#'
#' @export
dcop.frankcop <- function(copula, u = NULL) {
if (is.null(copula$distribution$pdf)) {
d <- copula$dimension
theta <- copula$parameter
if (is.null(copula$distribution$cdf)) {
stop("Supplied copula object does not contain a cdf expresssion")
} else {
pdf <- copula$distribution$cdf
for (i in 1:d) {
pdf <- stats::D(pdf, paste("u", i, sep = ""))
}
if (is.null(u)) {
return(parse(
text = paste(
as.character(pdf)[2],
as.character(pdf)[1],
as.character(pdf)[3],
sep = ""
)
))
} else {
if (length(u) == d) {
if (!any(is.na(u))) {
if (any(u >= 1) || any(u <= 0)) {
return(0)
}
for (i in 1:d) {
assign(paste("u", i, sep = ""), u[i])
}
eval(pdf)
} else {
stop(
"Supplied u contains missing values!"
)
}
} else {
stop(
"Supplied data vector not of appropriate length. Has to be of the same
dimension as the supplied copula."
)
}
}
}
} else if (is.null(u)) {
return(copula$distribution$pdf)
} else {
d <- copula$dimension
if (length(u) == d) {
if (!any(is.na(u))) {
if (any(u >= 1) || any(u <= 0)) {
return(0)
}
for (i in 1:d) {
assign(paste("u", i, sep = ""), u[i])
}
theta <- copula$parameter
eval(copula$distribution$pdf)
} else {
stop(
"Supplied u contains missing values!"
)
}
} else {
stop(
"Supplied data vector not of appropriate length. Has to be of the same
dimension as the supplied copula."
)
}
}
}
|
96ae695d54c3fd6eb48c32d081613f4519efc81c
|
34aacdad79d993e55eca4da3c1cc40423dd86738
|
/lib/plot_theme.R
|
76aa33b44f4319a534bc1b7fb3c6cbb6324a09ad
|
[] |
no_license
|
bfrggit/R-sc
|
1d5cb37915bed8f325c0ad299f2c77dbb7b4ce96
|
0ca0a9c1c3c27226ab383df96911836758898779
|
refs/heads/master
| 2020-04-20T22:31:15.788936
| 2019-01-17T00:06:46
| 2019-01-17T00:06:46
| 169,142,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,724
|
r
|
plot_theme.R
|
# plot_theme.R
#
# Created: 2018-10-08
# Updated: 2019-01-16
# Author: Charles Zhu
#
# Color theme for plots
# derived from plot_theme_no_move.R
#
if(!exists("EX_PLOT_THEME_R")) {
EX_PLOT_THEME_R <<- TRUE
library(ggplot2)
ggplot_theme <<- theme_light() +
theme(axis.text = element_text(size = 16, color = "black")) +
theme(axis.title = element_text(size = 18)) +
theme(legend.text = element_text(size = 16, color = "black")) +
theme(legend.title = element_text(size = 18)) + # element_blank())
theme(legend.background = element_rect(
linetype = "solid", color = "gray30")) +
theme(legend.justification = c(1, 0)) +
theme(legend.position = c(1, 0))
scale_names_selector <<- c(
"minimal" = "Minimal",
"all" = "All",
# "nodal" = "Nodal",
"local" = "Local",
# "nodal_lim" = "Nodal bnd.",
"local_lim" = "Local bnd.",
"interval_1" = "TTNI-driven"
)
scale_color_selector <<- scale_color_manual(
name = "Selectors",
labels = scale_names_selector,
values = c(
"minimal" = "brown",
"all" = "tomato",
# "nodal" = "purple",
"local" = "mediumpurple",
# "nodal_lim" = "blue",
"local_lim" = "skyblue",
"interval_1" = "black"
)
)
scale_shape_selector <<- scale_shape_manual(
name = "Selectors",
labels = scale_names_selector,
values = c(
"minimal" = 16,
"all" = 18,
# "nodal" = 2,
"local" = 17,
# "nodal_lim" = 0,
"local_lim" = 15,
"interval_1" = 11
)
)
} # ENDIF
|
c62d2b86725a9d480b2e640fdd2a9910b1c02c2c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gdata/examples/case.Rd.R
|
2c3a6022ede700ba18e3c6b0c3d78f878f974261
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 293
|
r
|
case.Rd.R
|
library(gdata)
### Name: case
### Title: Map elements of a vector according to the provided 'cases'
### Aliases: case
### Keywords: manip
### ** Examples
## default = NA
case( c(1,1,4,3), "a"=1, "b"=2, "c"=3)
## default = "foo"
case( c(1,1,4,3), "a"=1, "b"=2, "c"=3, default="foo" )
|
932db261506412dfdae005afb7374dda05594a98
|
d1935dbdd888acdec4fa6e8a7c7b43e718948a20
|
/server.R
|
1b39aa9993d8b12fb63b2de932ecab124bbbc628
|
[] |
no_license
|
hoangphand/shiny-weed-stocks
|
5d8139458be552f702fcf8f4ddf6498b5427d0f3
|
a1604b4474b79e530430a21e2156135d02ba3e90
|
refs/heads/master
| 2021-10-25T05:23:24.572909
| 2019-04-02T00:01:43
| 2019-04-02T00:01:43
| 168,997,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,433
|
r
|
server.R
|
library(shiny)
library(corrplot)
source("dataUtils.R")
source("plotUtils.R")
source("predictionUtils.R")
duration = NULL
server <- function(input, output, session) {
destinationFromURL <- reactive({
destination <- parseQueryString(session$clientData$url_search)$destination
})
selectedSymbolsFromURL <- reactive({
rawQuerySymbols <- parseQueryString(session$clientData$url_search)$selectedSymbols
as.integer(unlist(strsplit(rawQuerySymbols, split=",")))
})
periodSelectionFromURL <- reactive({
rawPeriodSelection <- parseQueryString(session$clientData$url_search)$periodSelection
})
dateRangeFromURL <- reactive({
fromDate <- parseQueryString(session$clientData$url_search)$fromDate
toDate <- parseQueryString(session$clientData$url_search)$toDate
c(fromDate, toDate)
})
userPredictionFromURL <- reactive({
rawUserPrediction <- strtoi(parseQueryString(session$clientData$url_search)$userPrediction, base = 0L)
})
investmentDurationFromURL <- reactive({
rawNoOfDays <- strtoi(parseQueryString(session$clientData$url_search)$investmentDuration, base = 0L)
})
investmentAmountFromURL <- reactive({
rawInitialAmount <- strtoi(parseQueryString(session$clientData$url_search)$investmentAmount, base = 0L)
})
symbolWeightsFromURL <- reactive({
rawQuerySymbolWeights <- parseQueryString(session$clientData$url_search)$symbolWeights
as.integer(unlist(strsplit(rawQuerySymbolWeights, split=",")))
})
dataOfSelectedSymbols <- reactive({
if (is.null(periodSelectionFromURL())) {
dateRange <- dateRangeFromURL()
fromDate <- as.character.Date(dateRange[1])
toDate <- as.character.Date(dateRange[2])
readSymbolListPriceByDateRange(selectedSymbolsFromURL(), fromDate, toDate)
# readSymbolListDataByDateRange(selectedSymbolsFromURL(), fromDate, toDate)
} else {
readSymbolListDataByPeriod(selectedSymbolsFromURL(), periodSelectionFromURL())
}
})
output$homePair <- renderPlot({
if (!is.null(destinationFromURL()) & length(destinationFromURL()) > 0) {
if (destinationFromURL() == 'home') {
validate(
need(length(selectedSymbolsFromURL()) >= 2, "Please select at least 2 symbols")
)
pairs(dataOfSelectedSymbols()[, -1], pch = 16)
}
}
})
output$homeCorr <- renderPlot({
if (!is.null(destinationFromURL()) & length(destinationFromURL()) > 0) {
if (destinationFromURL() == 'home') {
validate(
need(length(selectedSymbolsFromURL()) >= 2, "Please select at least 2 symbols")
)
corMatrix = cor(dataOfSelectedSymbols()[,-1], use='pair')
corrplot(corMatrix, method = "number")
}
}
})
output$homeIndividualSymbols <- renderPlot({
if (!is.null(destinationFromURL()) & length(destinationFromURL()) > 0) {
if (destinationFromURL() == 'home') {
validate(
need(length(selectedSymbolsFromURL()) >= 2, "Please select at least 2 symbols")
)
plotMultilinesSameGraph(dataOfSelectedSymbols())
}
}
})
output$predictionUserInput <- renderPlot({
if (!is.null(destinationFromURL()) & length(destinationFromURL()) > 0) {
if (destinationFromURL() == 'prediction') {
userPrediction = userPredictionFromURL()
investmentDuration = investmentDurationFromURL()
investmentAmount = investmentAmountFromURL()
selectedSymbols = selectedSymbolsFromURL()
symbolWeights = symbolWeightsFromURL()
portfolioValues = getPortfolioValues(userPrediction, selectedSymbols,
symbolWeights, investmentDuration, investmentAmount)
portfolioValuesToDisplay = calculatePortfolioValues(portfolioValues, investmentDuration)
output$predictionResult <- renderUI({
tagList(
h4(paste("The original investment was $", format(round(investmentAmount), big.mark = ",",
scientific = F), "placed on", as.character(as.Date(Sys.Date())))),
h4(paste("As of", as.character(as.Date(Sys.Date() + investmentDuration))), ", we think that:"),
h4(paste("You have a", sprintf(" %.1f %%", 100 * portfolioValuesToDisplay[4]),
"chance of having less than $", format(round(portfolioValuesToDisplay[1]),
big.mark = ",", scientific = F))),
h4(paste("You also have a", sprintf(" %.1f %%", 100 * portfolioValuesToDisplay[4]),
"chance of having more than $", format(round(portfolioValuesToDisplay[3]),
big.mark = ",", scientific = F))),
h4(paste("Our best guess is that you will have about $", format(round(portfolioValuesToDisplay[2]),
big.mark = ",", scientific = F))))
})
plotPortfolioValues(portfolioValues, investmentAmount, investmentDuration)
}
}
})
# output$dataTable <- renderDataTable(dataOfSelectedSymbols())
# output$table <- renderTable(dataOfSelectedSymbols()[, -1])
# output$dataTable <- renderDataTable(dateRangeFromURL())
# output$table <- renderTable(dateRangeFromURL())
output$value <- renderText({
})
}
|
da02988ba7eb58d728881839b9cc004498d99dfb
|
28ca05437ce8b1ef89e40096e090aaa069df086e
|
/cachematrix.R
|
e28fc018fac7e4f3fbf6ffd2a7068e4bb137b215
|
[] |
no_license
|
jaygoody101/ProgrammingAssignment2
|
3403017d2cc0e58cccccb8024d1da2dd0f474349
|
6d4bf6d9cd66ec879162d6e65916ad36d21ea6fe
|
refs/heads/master
| 2021-01-01T19:48:32.415561
| 2017-07-29T02:28:45
| 2017-07-29T02:28:45
| 98,693,088
| 0
| 0
| null | 2017-07-28T22:35:25
| 2017-07-28T22:35:25
| null |
UTF-8
|
R
| false
| false
| 1,183
|
r
|
cachematrix.R
|
## Pair of functions to invert a matrix and cache the result for subsequent use
## Using example "mean" function as a template:
## change vectors to matrices where appropriate
## change "NULL" setting to "NaN"
## change evaluate call from Mean to Solve
## Creates list of functions to store cached version of a matrix inverse
makeCacheMatrix <- function(x = matrix()) {
mInv <- as.matrix(NaN)
set <- function(y) {
x <<- y
mInv <<- as.matrix(NaN)
}
get <- function() x
setInv <- function(pInv) mInv <<- pInv
getInv <- function() mInv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## Checks for existence of cached matrix inverse.
## If present, uses it; if not, inverts input matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mInv <- x$getInv()
if(!is.nan(mInv[1,1])) {
message("getting cached data")
return(mInv)
}
data <- x$get()
mInv <- solve(data, ...)
mInv <-as.matrix(x$setInv(mInv))
mInv
}
|
33e829821c2272b454fb7422e029e719a6cd31b0
|
be45ed71a7d3c32ffd4817f855020fc0d3cce82a
|
/man/Ctry.msci.sql.Rd
|
f64d2947312a3a07197d7380552d3dad80c33904
|
[] |
no_license
|
Turnado-dx/EPFR
|
bcf113fc7def4bc311356c93e1ba469c02d906fb
|
5a7369d3e6cde36fa0fe5749c774f0fdd26a9905
|
refs/heads/master
| 2020-12-14T04:09:29.963255
| 2020-01-17T14:45:57
| 2020-01-17T14:45:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 691
|
rd
|
Ctry.msci.sql.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{Ctry.msci.sql}
\alias{Ctry.msci.sql}
\title{Ctry.msci.sql}
\usage{
Ctry.msci.sql(fcn, x, y, n)
}
\arguments{
\item{fcn}{= function to convert from yyyymm to yyyymmdd}
\item{x}{= output of Ctry.msci}
\item{y}{= single two-character country code}
\item{n}{= date field such as DayEnding or WeightDate}
}
\description{
SQL query to get date restriction
}
\seealso{
Other Ctry: \code{\link{Ctry.info}},
\code{\link{Ctry.msci.index.changes}},
\code{\link{Ctry.msci.members.rng}},
\code{\link{Ctry.msci.members}}, \code{\link{Ctry.msci}},
\code{\link{Ctry.to.CtryGrp}}
}
\keyword{Ctry.msci.sql}
|
06496d5198338d95bd22446e0523c05bf2f68d67
|
02c6e4282975166d6f0ea71ff904eeee5fcf36b6
|
/R/read_pnadcovid19.R
|
1cdb5e4f9dc5f94d23dd2c9f971168ab97c24d78
|
[] |
no_license
|
rodrigoesborges/PNADcovidIBGE
|
69dc0235e4f554633a7fc5fc8db44a0194adef96
|
0dda3c9d831c6f5af2502d7ba9ad211623f36266
|
refs/heads/master
| 2022-12-29T03:25:02.744751
| 2020-10-15T03:03:54
| 2020-10-15T03:03:54
| 286,861,096
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,277
|
r
|
read_pnadcovid19.R
|
#' Read PNAD-COVID19 microdata
#' @import readr dplyr magrittr
#' @param microdata A text file containing microdata from PNAD-COVID19 survey. The file must be downloaded from \url{ftp://ftp.ibge.gov.br/Trabalho_e_Rendimento/Pesquisa_Nacional_por_Amostra_de_Domicilios_PNAD_COVID19/Microdados/}
#' @param vars Character vector of the name of the variables you want to keep for analysys. \code{default} is to keep all variables
#' @return A tibble with the survey design variables and selected variables.
#' @examples
#' input_path <- pnadcovid19_example("input_example.xlsx")
#' data_path <- pnadcovid19_example("exampledata.txt")
#' pnadc.df <- read_pnadcovid19(data_path,input_path,vars="VD4002")
#' @export
read_pnadcovid19 <- function(microdata,vars=NULL) {
########## reading data
data_pnadcovid19 <- suppressWarnings(readr::read_csv(microdata))
input <- names(data_pnadcovid19)
########## creating columns specification
if(!is.null(vars)){
if(any(!(vars %in% input))) {
missvar=vars[!(vars %in% input)]
warning(paste("Variables", paste(missvar,collapse = ", "), "not present in dataset\n"))
}
data_pnadcovid19 %<>% select("UPA","Estrato","V1027","posest","V1029","V1031","V1030",vars)
}
return(data_pnadcovid19)
}
|
42174dc938d49e10539b928b80ba58e3fa82332e
|
7e1fff52409b817e17f8ce7a7d61298df54c78db
|
/swarmAnal2020.R
|
9fe338b9a697acfba30902b732be9c41033fb458
|
[] |
no_license
|
guisantagui/MSK_repo_new
|
fad0b0bce6a7c33358be22eefb4ec5e78879b40f
|
ccc27afe72fb48c7d12f8fd3873a647b601696d3
|
refs/heads/master
| 2021-07-03T23:22:31.328607
| 2021-02-09T23:27:58
| 2021-02-09T23:27:58
| 220,031,177
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,654
|
r
|
swarmAnal2020.R
|
setwd("C:/Users/Guillem/Documents/PhD/comput/wrkng_dirs_clean/swarmAnalysis")
swarmDat <- read.csv("C:/Users/Guillem/Documents/PhD/comput/pics/allSwarms.csv")
swarmDat$Strains <- gsub("H5707", "H5708", swarmDat$Strains)
swarmDat$Strains <- gsub("M6057", "M6075", swarmDat$Strains)
# Normalize each experiment to the mean value of PA14 (for each measure)
for(i in 1:length(exps)){
expDat <- swarmDat[swarmDat$exp == exps[i], ]
PA14 <- expDat[gsub("_.*", "", expDat$Strains) == "PA14", 2:8]
PA14_mean <- apply(PA14, 2, mean)
for(j in 2:8){
expDat[, j] <- expDat[, j]/PA14_mean[j - 1]
}
swarmDat[swarmDat$exp == exps[i], ] <- expDat
}
# PCA for looking what variable among the ones we used for measuring the swarming is most separated in metabolites PCA.
if(!require(factoextra)) install.packages("factoextra")
library(factoextra)
if(!require(readxl)) install.packages("readxl")
library(readxl)
# Load metabolomics data and dictionary
load("C:/Users/Guillem/Documents/PhD/comput/wrkng_dirs_clean/normMetAnal/oldDataGood/ccmn_norm_mets_good_old.RData")
load("C:/Users/Guillem/Documents/PhD/comput/wrkng_dirs_clean/dictionary/dictionary.RData")
dictionary$Consensus[match(colnames(ccmn_norm_mets_good_old), dictionary$`Old Data Names`)]
ccmnNormMets <- ccmn_norm_mets_good_old
colnames(ccmnNormMets) <- dictionary$Consensus[match(colnames(ccmn_norm_mets_good_old), dictionary$`Old Data Names`)]
ccmnNormMets <- ccmnNormMets[, !is.na(colnames(ccmnNormMets))]
groups <- unique(gsub("_.*", replacement = "", rownames(ccmnNormMets)))
getSwarmDataMeans <- function(swarmMat){
groups <- unique(gsub("\\_.*", replacement = "", swarmMat$Strains))
meanDF <- data.frame()
for(i in seq_along(groups)){
subMat <- swarmMat[grep(groups[i], swarmMat$Strains), ]
means <- apply(subMat[, 2:8], 2, mean)
meanDF <- rbind.data.frame(meanDF, means)
}
meanDF <- cbind.data.frame(groups, meanDF)
colnames(meanDF) <- colnames(swarmMat)[1:8]
return(meanDF)
}
swarmDatMeans <- getSwarmDataMeans(swarmDat)
save(swarmDatMeans, file = "swarmDatMeans.RData")
swarmDatMeansFilt <- swarmDatMeans[match(gsub("\\_.*|(PA14).*",
rownames(ccmnNormMets),
rep = "\\1"),
swarmDatMeans$Strains), ]
swarmDatMeansFilt
pcaMets <- prcomp(ccmnNormMets)
fviz_eig(pcaMets)
tiff("pcaSwarmMetsArea.tiff", res = 300, height = 5000, width = 5000)
fviz_pca_ind(pcaMets,
col.ind = swarmDatMeansFilt$Area,
gradient.cols = c("#003CFF", "#66FF00", "#FF0000"),
legend.title = "Area")
dev.off()
tiff("pcaSwarmMetsAreaPercentage.tiff", res = 300, height = 5000, width = 5000)
fviz_pca_ind(pcaMets,
col.ind = swarmDatMeansFilt$AreaPercentage,
gradient.cols = c("#003CFF", "#66FF00", "#FF0000"),
legend.title = "Area Percentage")
dev.off()
tiff("pcaSwarmMetsCircularity.tiff", res = 300, height = 5000, width = 5000)
fviz_pca_ind(pcaMets,
col.ind = swarmDatMeansFilt$Circularity,
gradient.cols = c("#003CFF", "#66FF00", "#FF0000"),
legend.title = "Circularity")
dev.off()
tiff("pcaSwarmMetsPerimeter.tiff", res = 300, height = 5000, width = 5000)
fviz_pca_ind(pcaMets,
col.ind = swarmDatMeansFilt$Perimeter,
gradient.cols = c("#003CFF", "#66FF00", "#FF0000"),
legend.title = "Perimeter")
dev.off()
tiff("pcaSwarmMetsMaxLength.tiff", res = 300, height = 5000, width = 5000)
fviz_pca_ind(pcaMets,
col.ind = swarmDatMeansFilt$MaxLength,
gradient.cols = c("#003CFF", "#66FF00", "#FF0000"),
legend.title = "Maximum Length")
dev.off()
# Obtain bidirectional correlations between swarming variables.
corMat <- matrix(ncol = ncol(swarmDat) - 2, nrow = ncol(swarmDat) - 2)
rownames(corMat) <- colnames(swarmDat[, 2:(ncol(swarmDat) - 1)])
colnames(corMat) <- colnames(swarmDat[, 2:(ncol(swarmDat) - 1)])
for(i in 1:ncol(corMat)){
for(j in 1:nrow(corMat)){
corMat[i, j] <- cor(swarmDat[[rownames(corMat)[j]]],
swarmDat[[colnames(corMat)[i]]])
}
}
corMat
# Do PCA with the different measures of swarming. We first normalize them by dividing each column by its
# highest value. That way all values range from 0 to 1. THe idea is to get a linear combination of the
# measures that spreads most of the variability of the strains and that we could use as the response
# variable for the supervised analysis.
save(swarmDat, file = "swarmDat.RData")
m <- swarmDatMeans[, 2:8]
m <- apply(m, 2, function(x) x/max(x))
rownames(m) <- make.unique(gsub(".tif", replacement = "", swarmDatMeans$Strains))
pcaSwarms <- prcomp(m)
tiff("pcaSwarmMeasures.tiff", res = 300, height = 5000, width = 5000)
fviz_pca_ind(pcaSwarms#,
#col.ind = swarmDatMeansFilt$MaxLength,
#gradient.cols = c("#003CFF", "#66FF00", "#FF0000"),
#legend.title = "Maximum Length"
)
dev.off()
tiff("biplotSwarmMeasures.tiff", res = 300, height = 5000, width = 5000)
fviz_pca_biplot(pcaSwarms)
dev.off()
m <- swarmDatMeans[, c(4, 6)]
m <- apply(m, 2, function(x) x/max(x))
rownames(m) <- make.unique(gsub(".tif", replacement = "", swarmDatMeans$Strains))
pcaSwarms <- prcomp(m)
tiff("biplotSwarmMeasures_CircAreaPerc.tiff", res = 300, height = 5000, width = 5000)
fviz_pca_biplot(pcaSwarms)
dev.off()
distManh <- dist(m, method = "canberra")
c <- cmdscale(distManh,
eig=TRUE,
x.ret=TRUE)
mds.var.per <- round(c$eig/sum(c$eig)*100, 1)
mds.var.per
## now make a fancy looking plot that shows the MDS axes and the variation:
mds.values <- c$points
mds.data <- data.frame(Sample=rownames(mds.values),
X=mds.values[,1],
Y=mds.values[,2])
mds.data
ggplot(data=mds.data, aes(x=X, y=Y, label=Sample)) +
geom_text() +
theme_bw() +
xlab(paste("MDS1 - ", mds.var.per[1], "%", sep="")) +
ylab(paste("MDS2 - ", mds.var.per[2], "%", sep="")) +
ggtitle("PCOA Swarm Data")
library(ggplot2)
pcomp1 <- pcaSwarms$x[, 1]
groups <- unique(gsub("\\_.*", replacement = "", names(pcomp1)))
pcomp1Means <- c()
for(i in seq_along(groups)){
stMean <- mean(pcomp1[grep(groups[i], names(pcomp1))])
pcomp1Means <- c(pcomp1Means, stMean)
}
names(pcomp1Means) <- groups
pcomp1MeansFilt <- pcomp1Means[names(pcomp1Means) %in% rownames(ccmnNormMets)]
match(gsub("\\_.*|(PA14).*", rownames(ccmnNormMets), rep = "\\1"), names(pcomp1Means))
pcomp1MeansFilt <- pcomp1Means[match(gsub("\\_.*|(PA14).*",
rownames(ccmnNormMets),
rep = "\\1"),
names(pcomp1Means))]
tiff("pcaSwarmMetsPrComp1.tiff", res = 300, height = 5000, width = 5000)
fviz_pca_ind(pcaMets,
col.ind = pcomp1MeansFilt,
gradient.cols = c("#FF0000", "#66FF00", "#003CFF"),
legend.title = "PrComp1")
dev.off()
pcomp1Means
save(pcomp1Means, file = "pcomp1AreaPercCircularity.RData")
# The metabolites that are more correlated with swarming, according to OPLS-DA, are succinate and formyl-methionine.
plot(log(swarmDatMeansFilt$AreaPercentage), ccmnNormMets$`Formyl-methionine`)
plot(log(swarmDatMeansFilt$AreaPercentage), ccmnNormMets$succinate)
metSwarm <- cbind.data.frame(ccmnNormMets, log(swarmDatMeansFilt$AreaPercentage))
colnames(metSwarm)[ncol(metSwarm)] <- "logAreaPct"
colnames(metSwarm) <- make.names(colnames(metSwarm))
# Let's do a PCA of the five metabolites that have highest loadings in OPLS-DA model to get the linear combination of these in
# x-axis.
pcaSelMets <- prcomp(ccmnNormMets[, c("Formyl-methionine",
"2-aminoadipate-6-semialdehyde",
"Methylcitrate 1",
"succinate",
"Acetylhomoserine 2"
)])
fviz_pca_biplot(pcaSelMets)
# Obtain values of PC1 and add to metSwarm dataframe
metSwarm <- cbind.data.frame(metSwarm, pcaSelMets$x[, 1])
colnames(metSwarm)[ncol(metSwarm)] <- "prComp1SelMets"
if(!require(ggpubr)) install.packages("ggpubr")
library(ggplot2)
if(!require(ggpmisc)) BiocManager::install("ggpmisc")
library(ggpmisc)
ggscatter(metSwarm, x = "logAreaPct", y = "Formyl.methionine", add = "reg.line") +
stat_cor(aes(label = paste(..rr.label.., ..p.label.., sep = "~`,`~"),
label.x = 10)) +
stat_regline_equation(label.x = 3, label.y = 13)
ggscatter(metSwarm, x = "logAreaPct", y = "succinate", add = "reg.line", label = rownames(metSwarm)) +
stat_cor(aes(label = paste(..rr.label.., ..p.label.., sep = "~`,`~"),
label.x = 9)) +
stat_regline_equation(label.x = 3, label.y = 10)
ggscatter(metSwarm, x = "logAreaPct", y = "prComp1SelMets", add = "reg.line") +
stat_cor(aes(label = paste(..rr.label.., ..p.label.., sep = "~`,`~"),
label.x = 10, label.y = 3)) +
stat_regline_equation(label.x = 3)
cor(log(swarmDatMeansFilt$AreaPercentage), pcaSelMets$x[, 1])
plot(log(swarmDatMeansFilt$AreaPercentage), pcaSelMets$x[, 1])
cor(log(swarmDatMeansFilt$AreaPercentage), ccmnNormMets$`Formyl-methionine`)
cor(log(swarmDatMeansFilt$AreaPercentage), ccmnNormMets$succinate)
|
4c68bfc5784f462ff2145e0d97878d8c20381e72
|
49228420f0239fad554daed38a7a138ae9c47904
|
/contornos.R
|
ea81f5f8047676f38db3c50c39e163bfcc92b5f9
|
[] |
no_license
|
Francisco-Pacopaco/Tesis
|
eddc61e6a44dd981938fb596e4c3c2f9957f0e37
|
572b906fc67a18736390fb9bc822f8b631ef27cc
|
refs/heads/main
| 2023-05-15T02:11:40.149977
| 2021-06-03T15:58:11
| 2021-06-03T15:58:11
| 370,120,880
| 0
| 0
| null | 2021-05-23T20:59:37
| 2021-05-23T17:53:33
| null |
ISO-8859-1
|
R
| false
| false
| 4,612
|
r
|
contornos.R
|
####MAs graficas para la tesis
library(lattice)
library(plotly)
library(ggplot2)
library(ContourFunctions)
library(shape)
#Graficas para las funciones de activación
t<-seq(from=-10,to=10,by=0.001)
sigmoid<-function(z){
1/(1+exp(-z))
}
x11()
plot(t,sigmoid(t),col="green",type = "l",ylab = " ",xlab = "", lwd=2)
abline(h=0)
abline(v=0)
ReLU<-function(z){
1/2*(z+abs(z))
}
x11()
plot(t,ReLU(t),col="purple",type = "l",ylab = " ",xlab = "", lwd=2)
abline(h=0)
abline(v=0)
x11()
plot(t,tanh(t),col="tomato",type = "l",ylab = " ",xlab = "",lwd=2)
abline(h=0)
abline(v=0)
mini<-function(z){
(1/2)*(z+1-abs(1-z))
}
maxi<-function(z){
(1/2)*(z-1+abs(z+1))
}
hardtanh<-function(z){
maxi(mini(z))
}
x11()
plot(t,hardtanh(t),col="blue",type = "l",ylab = " ",xlab = "", lwd=2)
abline(h=0)
abline(v=0)
#Este es el paraboliode
paraboliode<-function(x,y){
(2/3)*x^2+2*y^2
}
x<-seq(-1,1, length=20)
y<-seq(-1,1, length=20)
z<-outer(x,y,paraboliode)
x11()
wireframe(z, data = NULL,
drape = TRUE,,
xlab=expression('w'[1]),
ylab=expression('w'[2]),
colorkey=FALSE,
zlab='Error',
main='Función de perdida',
scales = list( arrows = FALSE, col="black"),
par.settings = list(axis.line = list(col = 'transparent')),
strip.border = list(col = 'transparent'),
strip.background = list(col = 'transparent'))
x11()
cf_grid(x, y, z, lines_only=TRUE,
bar = FALSE,
main='Contornos de nivel',
levels = c(0.2,0.4,0.6,1,1.4,1.8,2.2))
points(0.5,0.5,pch=19,col='tomato')
text(0.5,0.46,"mínimo")
Arrows(0.2,0.2,0.2,0.24,arr.type="triangle",arr.width=0.2,col = "aquamarine3")
Arrows(0.2,0.27,0.24,0.32,arr.type="triangle",arr.width=0.2,col = "aquamarine3")
Arrows(0.26,0.35,0.31,0.4,arr.type="triangle",arr.width=0.2,col = "aquamarine3")
x11()
cf_grid(x, y, z, lines_only=TRUE,
bar = FALSE,
main='Contornos de nivel',
levels = c(0.2,0.4,0.6,1,1.4,1.8,2.2))
points(0.5,0.5,pch=19,col='tomato')
text(0.5,0.46,"mínimo")
Arrows(0.1,0.1,0.3,0.6,arr.type="triangle",arr.width=0.2,col = "chocolate3")
Arrows(0.3,0.6,0.7,0.15,arr.type="triangle",arr.width=0.2,col = "chocolate3")
Arrows(0.7,0.15,0.9,0.7,arr.type="triangle",arr.width=0.2,col = "chocolate3")
#######Funcion
timo<-function(a,b,c){
(a+4*b+c)*(1/6)
}
timo2<-function(a,b){
((b-a)*(1/6))**2
}
print(timo(1,3,5))
print(timo2(1,5))
#####
contorno<-function(x,y){
800+10*x+7*y-8.5*(x**2)-5*(y**2)+4*x*y
}
x1<-seq(0,10, length=100)
y1<-seq(0,10, length=100)
z1<-outer(x1,y1,contorno)
x11()
cf_grid(x1, y1, z1, lines_only=TRUE,
bar = FALSE,
main='Contornos de nivel',
levels = c(800,750,700,625,550,475,400,325,250,175,100,25))
####Modelo polinomial varias variables
modelop<-function(T,C){
-1105.56+8.0242*T+22.994*C-0.0142*T**2-0.20502*C**2-0.062*T*C
}
x2<-seq(180,260, length=100)
y2<-seq(15,30, length=100)
z2<-outer(x2,y2,modelop)
x11()
cf_grid(x2, y2, z2, lines_only=TRUE,
bar = FALSE,
main='Contornos de nivel',
levels = c(80,75,70,62.5,55,47.5,40,32.5,25,17.5,10))
x11()
wireframe(z2, data = NULL,
drape = TRUE,,
xlab=expression('T'),
ylab=expression('C'),
colorkey=FALSE,
zlab='y',
main='Superficie de Respuesta',
scales = list( arrows = FALSE, col="black"),
par.settings = list(axis.line = list(col = 'transparent')),
strip.border = list(col = 'transparent'),
strip.background = list(col = 'transparent'))
temperatura<-c(200,250,200,250, 189.65, 260.35,225,225,225,225,225,225)
concentracion<-c(15,15,25,25,20,20,12.93,27.07,20,20,20,20)
respuesta<-c(43,78,69,73,48,76,65,74,76,79,83,81)
unos<-c()
for (i in 1:length(temperatura)) {
unos[i]=1
}
matriz<-cbind(unos,temperatura,concentracion,temperatura**2,concentracion**2,temperatura*concentracion)
beta<-t(matriz)%*%matriz
beta<-solve(beta)
beta<-beta%*%t(matriz)%*%respuesta
B<-c(-0.0142,-0.062/2)
B1<-c(-0.062/2,-0.20502)
B<-cbind(B,B1)
lin<-c(8.0242,22.994)
pt_cr<-(-1/2)*solve(B)%*%lin
val_cr<-beta[1]+(1/2)*t(pt_cr)%*%lin
pro<-eigen(B)
val_pro<-pro$values
vec_pro<-pro$vectors
norm_vec <- function(x) sqrt(sum(x^2))
print(norm_vec(vec_pro[,1]))
temp1<-temperatura-pt_cr[1,1]
conc1<-concentracion-pt_cr[2,1]
aux1<-cbind(temp1,conc1)
w<-t(vec_pro)%*%t(aux1)
print(val_pro)
|
db2e532b2df600bfd0bff92881d91488e61adecc
|
82029e1f7682d695cef736d778deec6839e6ebd2
|
/compareTime.R
|
b05f810952523b0cd6937b5896964ca5ff3e920b
|
[] |
no_license
|
leogalhardo/aprendendoGit
|
97b6322bd42b2deec73a3f4fe278432542ade9fc
|
b136a7afedb6a1992c5bb709494e2f6b556d1199
|
refs/heads/main
| 2023-07-02T21:54:52.919980
| 2021-08-08T14:59:19
| 2021-08-08T14:59:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 115
|
r
|
compareTime.R
|
microbenchmark::microbenchmark(
recursiva = potenciaRecursiva(2, 10, FALSE),
log = potenciaLog(2, 10, FALSE)
)
|
d8302ff9d48e8e6f40fa11558c1f2d89f8c64e7b
|
ca6240e30a203236d1eb2090def306c4bc22d32e
|
/R/sxtLength.R
|
aa1f03bbefecde3c71fb8f611e3b8afa4c19e39d
|
[
"MIT"
] |
permissive
|
jaspershen/sxtTools
|
a3ad0681afbddc3f01b2db9a6fa1239bb6331b65
|
209d7ecd37240bbacc3c27a93050f50bdfb24f0b
|
refs/heads/master
| 2021-05-13T21:56:21.852761
| 2020-04-27T20:43:04
| 2020-04-27T20:43:04
| 116,474,707
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
r
|
sxtLength.R
|
#' @title sxtLength
#' @description Calculate length of multiple vectors.
#' @param ... vectors.
#' @author Xiaotao Shen
#' @export
sxtLength <- function(...) {
data <- list(...)
cat(unlist(lapply(data, length)))
}
|
800f8824dee1e1c6f2470199c1c1d64943ea2422
|
2e5440c1f655304515c6ff97c08676d1c6eb5222
|
/exercise-3/exercise.R
|
8bac2baf16e4a82039756bc388d65f62110abfed
|
[
"MIT"
] |
permissive
|
ursulamichelle/ch10-dplyr
|
01788fee08bc9d88b68b9057c914394505b0bf23
|
8e229990524b8966c6ab627dc8cb8e29ab05301f
|
refs/heads/master
| 2020-03-10T03:30:59.868118
| 2018-04-21T05:17:05
| 2018-04-21T05:17:05
| 129,167,062
| 0
| 0
|
MIT
| 2018-04-11T23:45:23
| 2018-04-11T23:45:23
| null |
UTF-8
|
R
| false
| false
| 1,256
|
r
|
exercise.R
|
# Exercise 3: using the pipe operator
# Install (if needed) and load the "dplyr" library
#install.packages("dplyr")
library("dplyr")
# Install (if needed) and load the "fueleconomy" package
#install.packages('devtools')
#devtools::install_github("hadley/fueleconomy")
library(fueleconomy)
# Which 2015 Acura model has the best hwy MGH? (Use dplyr, but without method
# chaining or pipes--use temporary variables!)
acura_2015 <- filter(vehicles, make == 'Acura', year == 2015)
max_hwy_acura_2015 <- filter(acura_2015, hwy == max(hwy))
temp_vars_best_model <- select(max_hwy_acura_2015, model)
# Which 2015 Acura model has the best hwy MPG? (Use dplyr, nesting functions)
nested_best_model <- select(filter(filter(vehicles, make == 'Acura', year == 2015), hwy == max(hwy)), model)
# Which 2015 Acura model has the best hwy MPG? (Use dplyr and the pipe operator)
pipe_best_model <- filter(vehicles, make == 'Acura', year == 2015) %>%
filter(hwy == max(hwy)) %>%
select(model)
### Bonus
# Write 3 functions, one for each approach. Then,
# Test how long it takes to perform each one 1000 times
system.time(for (i in 1:1000) temp_vars_best_model())
system.time(for (i in 1:1000) nested_best_model())
system.time(for (i in 1:1000) pipe_best_model())
|
6df5e44eb1affabdc25999283bdb26c0ee598b25
|
feacd140ef39b3b7aa3646dfb4aa44ca3b721563
|
/lessons/14_week_modelsandTextAnalysis/textAnalysis/sentimentAnalysis_PositiveAndNegativeWords_2018.r
|
2baa3c9c97912030a72268414689c0573d1891b1
|
[] |
no_license
|
Allegheny-Computer-Science-301-F2018/classDocs
|
c76d4ba668ac640d9c345e02235437bb1e478549
|
0957acab72c678538c39084edff5212e97c4fadb
|
refs/heads/master
| 2020-03-27T09:22:27.813938
| 2018-12-06T17:01:04
| 2018-12-06T17:01:04
| 146,335,323
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,126
|
r
|
sentimentAnalysis_PositiveAndNegativeWords_2018.r
|
# date: 28 Nov 2018
# sentiment analysis in R
#check to see whether all libraries are installed properly.
#install.packages("janeaustenr")
library(janeaustenr)
library(dplyr)
library(stringr)
#install.packages("tidytext")
library(tidytext)
library(ggplot2)
library(tidyr)
original_books <- austen_books() %>%
group_by(book) %>%
mutate(linenumber = row_number(),
chapter = cumsum(str_detect(text, regex("^chapter [\\divxlc]", ignore_case = TRUE)))) %>% ungroup()
View(original_books)
tidy_books <- original_books %>%
unnest_tokens(word, text) #make a list of words from the paragraphs
View(tidy_books)
data("stop_words")
cleaned_books <- tidy_books %>%
anti_join(stop_words)
# anti_join() returns all rows from x where there are not matching values in y, keeping just columns from x.
cleaned_books %>%
count(word, sort = TRUE)
# consider the joy-words in Emma using the nrc lexicon.
nrcjoy <- get_sentiments("nrc") %>%
filter(sentiment == "joy")
tidy_books %>%
filter(book == "Emma") %>%
semi_join(nrcjoy) %>%
count(word, sort = TRUE)
bing <- get_sentiments("bing")
bing # the listing of words according to being of a positive of negative nature.
#determine the sentiment fluxuations in the books.
janeaustensentiment <- tidy_books %>%
inner_join(bing) %>%
count(book, index = linenumber %/% 80, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative)
# plot the sentiment in each book
ggplot(janeaustensentiment, aes(index, sentiment, fill = book)) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~book, ncol = 2, scales = "free_x")
# what are the common good and bad words?
bing_word_counts <- tidy_books %>%
inner_join(bing) %>%
count(word, sentiment, sort = TRUE) %>%
ungroup()
bing_word_counts
bing_word_counts %>%
filter(n > 150) %>%
mutate(n = ifelse(sentiment == "negative", -n, n)) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n, fill = sentiment)) +
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ylab("Contribution to sentiment")
|
64de0263e8ff95cc98ff2bdaaab4f028708f2d02
|
0ec9018173e5ab27df738e1bac01a9f3ecfb0388
|
/fgsea/unmc_fgsea.R
|
cc3c77da8242c5b407cc7ded3f77b6f457661585
|
[] |
no_license
|
yueli8/unmc
|
cd4f83e1dfd7aae1bad2c9a165f134ee8d59e226
|
8955fa8a1313d233833bd261c72074ccd9e36566
|
refs/heads/main
| 2023-05-09T01:08:42.401492
| 2021-06-07T08:19:41
| 2021-06-07T08:19:41
| 351,345,932
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,977
|
r
|
unmc_fgsea.R
|
library(fgsea)
library(tidyverse)
library(data.table)
library(ggplot2)
setwd("/Users/yli1/unmc01")
#hcq_vs_con
res<-read.table("hcq_vs_con_fgsea.txt")
ranks <- deframe(res)
pathways.kegg<- gmtPathways("c2.cp.kegg.v7.2.symbols.gmt")
pathways<-gmtPathways("c2.cp.v7.2.symbols.gtm")
fgseaRes_kegg<- fgsea(pathways = pathways.kegg, stats = ranks, minSize = 1,maxSize = Inf, nPermSimple = 100000)
fgseaRes<- fgsea(pathways = pathways, stats = ranks, minSize = 1,maxSize = Inf, nPermSimple = 100000)
fwrite(fgseaRes_kegg, file="hcq_vs_con_kegg01.txt", sep="\t", sep2=c("", " ", ""))
fwrite(fgseaRes, file="hcq_vs_con_all01.txt", sep="\t", sep2=c("", " ", ""))
plotEnrichment(pathways.kegg[["KEGG_SYSTEMIC_LUPUS_ERYTHEMATOSUS"]],
ranks) + labs(title="SYSTEMIC_LUPUS_ERYTHEMATOSUS")
#pcq_vs_con
res<-read.table("pcq_vs_con_fgsea.txt")
ranks <- deframe(res)
fgseaRes_kegg<- fgsea(pathways = pathways.kegg, stats = ranks, minSize = 1,maxSize = Inf, nPermSimple = 100000)
fgseaRes<- fgsea(pathways = pathways, stats = ranks, minSize = 1,maxSize = Inf, nPermSimple = 100000)
fwrite(fgseaRes_kegg, file="pcq_vs_con_kegg01.txt", sep="\t", sep2=c("", " ", ""))
fwrite(fgseaRes, file="pcq_vs_con_all01.txt", sep="\t", sep2=c("", " ", ""))
plotEnrichment(pathways.kegg[["KEGG_SYSTEMIC_LUPUS_ERYTHEMATOSUS"]],
ranks) + labs(title="SYSTEMIC_LUPUS_ERYTHEMATOSUS")
plotEnrichment(pathways.kegg[["KEGG_LEISHMANIA_INFECTION"]],
ranks) + labs(title="LEISHMANIA_INFECTION")
#phpma_vs_con
res<-read.table("phpma_vs_con_fgsea.txt")
ranks <- deframe(res)
fgseaRes_kegg<- fgsea(pathways = pathways.kegg, stats = ranks, minSize = 1,maxSize = Inf, nPermSimple = 100000)
fgseaRes<- fgsea(pathways = pathways, stats = ranks, minSize = 1,maxSize = Inf, nPermSimple = 100000)
fwrite(fgseaRes_kegg, file="phpma_vs_con_kegg01.txt", sep="\t", sep2=c("", " ", ""))
fwrite(fgseaRes, file="phpma_vs_con_all01.txt", sep="\t", sep2=c("", " ", ""))
|
4f98952c756c20fdec0371e69988bccd706ee8fd
|
b6b294c5d8c53fcc837882c3f55c13e4f7b4f95b
|
/tests/testthat/test-04-rmarkdown.R
|
fc80c455208e15f42437fcbc4efcb12018646477
|
[
"MIT"
] |
permissive
|
kiwiroy/perlbrewr
|
542ca8255a705052c596b2495220053f1474bec5
|
33118411f8ef78dcba110df172020b9f5a3292ba
|
refs/heads/master
| 2021-08-18T01:27:39.194005
| 2020-04-27T23:09:52
| 2020-04-27T23:09:52
| 171,650,256
| 0
| 0
|
NOASSERTION
| 2019-07-05T01:06:43
| 2019-02-20T10:16:44
|
R
|
UTF-8
|
R
| false
| false
| 1,276
|
r
|
test-04-rmarkdown.R
|
context("rmarkdown templates")
test_that("knit doc", {
# check there is still a template under inst
template <- "perlbrewr"
package <- "perlbrewr"
template_path = system.file("rmarkdown", "templates", template,
package = package)
expect_true(nzchar(template_path))
# create a draft
# Have to supply full template path. For some reason system.file() call in
# rmarkdown::draft() cannot find package, although the above does.
# Passing package = NULL, ensures template is used verbatim.
draft <- rmarkdown::draft(file = file.path(tempdir(), "perlbrewr.Rmd"),
template = template_path,
package = NULL,
create_dir = FALSE,
edit = FALSE)
# knit using rmarkdown rendering
output <-
rmarkdown::render(input = draft,
output_file = file.path(tempdir(), "perlbrewr.md"),
output_format = "github_document",
quiet = TRUE)
# test output
content <- readLines(output)
lib_lines <- content[grepl(pattern = "perl-5\\.26\\.0@template", content)]
expect_equal(length(lib_lines), 3)
# warning(paste0(content, collapse = "\n"))
})
|
bf91373c142b6d4676127809303834b6cf81d94a
|
0ccb2ef7d5d608d9c33ec1b68c176c17a7a3d888
|
/risk_prediction/Nasim_new_prs/code/merge_overall_model.R
|
8c035e6c69b10a3ecf994d3fed11698dbd4eebaf
|
[] |
no_license
|
andrewhaoyu/breast_cancer_data_analysis
|
dce6788aa526a9a35fcab73564a457e8fabb5275
|
d84441e315e3ce135149e111014fa9807228ee7c
|
refs/heads/master
| 2023-05-31T20:59:44.353902
| 2023-05-15T19:23:30
| 2023-05-15T19:23:30
| 103,444,023
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
r
|
merge_overall_model.R
|
setwd('/data/zhangh24/breast_cancer_data_analysis/')
logodds_overall <- NULL
var_overall <- NULL
for(i1 in 1:32){
load(paste0("/data/zhangh24/breast_cancer_data_analysis/risk_prediction/Nasim_new_prs/result/overall_log_odds_",i1,".Rdata"))
logodds_overall <- c(logodds_overall,result[[1]])
var_overall <- c(var_overall,
result[[2]])
}
load(paste0("./risk_prediction/Nasim_new_prs/result/discover_snp_id.rdata"))
final_result <- data.frame(snp_id[,1:3],
logodds_overall,
var_overall,
stringsAsFactors = F)
save(final_result,file= "./risk_prediction/Nasim_new_prs/result/32_overall_logodds.Rdata")
|
d6a85675ea2e94043643119c5f5da8d5383d8e2e
|
20d35a761bc689f482c296ca942549d401bc285a
|
/ui.R
|
03edaad4c8b61296182bb3b6afdbc505aac756a0
|
[] |
no_license
|
wengjingying/CheckWaite
|
edfaa47f7a69a06f7bd2b743c8e37e0e5f20cc2b
|
48e1a35a5215217a6ba6661f197ae896f53f1c41
|
refs/heads/master
| 2021-01-10T02:16:56.775903
| 2015-09-26T06:41:06
| 2015-09-26T06:41:06
| 43,194,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,440
|
r
|
ui.R
|
# shiny project about checkee, input month year city output average waiting time
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Average Waiting days for USA Visa Administrative Processing in China"),
sidebarPanel(
numericInput('id1','Year',2008,min = 2008,max = 2015,step = 1),
numericInput('id2','Month',0,min = 1,max = 12,step = 1),
radioButtons("id3",label=h5("Visa Type"),
choices = list("F1" = "F1",
"B1" = "B1",
"J1" = "J1",
"H1" = "H1"),
selected = 1),
radioButtons("id4",label=h5("Embassy Location"),
choices = list("BeiJing" = "BeiJing",
"ShangHai" = "ShangHai",
"ChengDu" = "ChengDu",
"GuangZhou" = "GuangZhou",
"ShenYang" = "ShenYang"),
selected = 1),
radioButtons("id5",label=h5("Visa Entry"),
choices = list("New" = "New",
"Renewal" = "Renewal"),
selected = 1),
submitButton('submit')
),
mainPanel(
h3('App Description:'),
p('Data sourse: ',a('Check Reporter',href = 'http://www.checkee.info/main.php?dispdate=')),
p('Calculate the average waiting days for Administrative Processing also known as Check, based on Month, Year, Embassy Location, Visa type and entry.'),
p('Data range from 2008/11 to 2015/7. NaN output means lack of data for selected Input.'),
h3('Result of prediction'),
h4('Your interview year'),
verbatimTextOutput("oid1"),
h4('Your interview month'),
verbatimTextOutput("oid2"),
h4('Your visa type'),
verbatimTextOutput("oid3"),
h4('Embassy Location'),
verbatimTextOutput("oid4"),
h4('Your Visa Entry'),
verbatimTextOutput("oid5"),
h4('Average waiting time in days'),
verbatimTextOutput("prediction")
)
))
|
755b65482f23173cdc85ecb69803f644b2c1444b
|
b588e0a4df002a71bc1948f660b0f033bab57858
|
/fitnesspob.R
|
ef80d5af71018b96335c85208986178088b8ea81
|
[] |
no_license
|
ccqa86/Proyecto-Maestria
|
2eacb09dee422c255d722c22f6922106e6eead64
|
29bf33516abc74280b763c70c473a1e7defd2fcd
|
refs/heads/master
| 2023-01-03T10:12:34.574582
| 2020-11-02T00:30:02
| 2020-11-02T00:30:02
| 294,197,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,798
|
r
|
fitnesspob.R
|
calcular_fitness_poblacion <- function(poblacion, funcion_objetivo, optimizacion,
verbose = TRUE, ...) {
# Esta función devuelve el fitness de cada individuo de una población.
#
# ARGUMENTOS
# ============================================================================
# poblacion: matriz que representa la población de individuos.
# funcion_objetivo: nombre de la función que se desea optimizar. Debe de haber
# sido definida previamente.
# optimizacion: "maximizar" o "minimizar". Dependiendo de esto, la relación
# del fitness es directamente o indirectamente proporcional
# al valor de la función.
# verbose: mostrar información del proceso por pantalla.
#
# RETORNO
# ============================================================================
# Vector con el fitness de todos los individuos de la población. El orden de
# los valores se corresponde con el orden de las filas de la matriz población.
# CÁLCULO DEL FITNESS DE CADA INDIVIDUO DE LA POBLACIÓN
# ----------------------------------------------------------------------------
# Vector donde almacenar el fitness de cada individuo.
fitness_poblacion <- rep(NA, times = nrow(poblacion))
for (i in 1:nrow(poblacion)) {
individuo <- poblacion[i, ]
fitness_individuo <- calcular_fitness_individuo(
individuo = individuo,
funcion_objetivo = funcion_objetivo,
optimizacion = optimizacion,
verbose = verbose
)
fitness_poblacion[i] <- fitness_individuo
}
# MEJOR INDIVIDUO DE LA POBLACIÓN
# ----------------------------------------------------------------------------
# Se identifica el mejor individuo de toda la población, el de mayor
# fitness.
indice_mejor_individuo <- which.max(fitness_poblacion)
# Se identifica el valor de la función objetivo para el mejor individuo.
if (optimizacion == "maximizar") {
valor_funcion <- fitness_poblacion[indice_mejor_individuo]
} else {
valor_funcion <- -1*fitness_poblacion[indice_mejor_individuo]
}
# INFORMACIÓN DEL PROCESO (VERBOSE)
# ----------------------------------------------------------------------------
if (verbose) {
cat("------------------", "\n")
cat("Población evaluada", "\n")
cat("------------------", "\n")
cat("Optimización =", optimizacion, "\n")
cat("Mejor fitness encontrado =", fitness_poblacion[indice_mejor_individuo], "\n")
cat("Mejor solución encontrada =",
paste(poblacion[indice_mejor_individuo,], collapse = " "), "\n")
cat("Valor función objetivo =", valor_funcion, "\n")
cat("\n")
}
return(fitness_poblacion)
}
|
109c8d2b8b67721a003f4e727c423ad9d23247d1
|
77090c3eaf15342505edc228ea19769ab219e0f7
|
/Tools/DECoN-master/Windows/packrat/lib-R/nlme/scripts/ch01.R
|
052757e1a885aac7f8b1d90289dba230e4bee7bc
|
[
"MIT"
] |
permissive
|
robinwijngaard/TFM_code
|
046c983a8eee7630de50753cff1b15ca3f7b1bd5
|
d18b3e0b100cfb5bdd9c47c91b01718cc9e96232
|
refs/heads/main
| 2023-06-20T02:55:52.071899
| 2021-07-13T13:18:09
| 2021-07-13T13:18:09
| 345,280,544
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,830
|
r
|
ch01.R
|
#-*- R -*-
library(nlme)
pdf(file = 'ch01.pdf')
options( width = 65, digits = 5 )
options( contrasts = c(unordered = "contr.helmert", ordered = "contr.poly") )
# Chapter 1 Linear Mixed-Effects Models: Basic Concepts and Examples
# 1.1 A Simple Example of Random Effects
Rail
fm1Rail.lm <- lm( travel ~ 1, data = Rail )
fm1Rail.lm
fm2Rail.lm <- lm( travel ~ Rail - 1, data = Rail )
fm2Rail.lm
fm1Rail.lme <- lme(travel ~ 1, data = Rail, random = ~ 1 | Rail)
summary( fm1Rail.lme )
fm1Rail.lmeML <- update( fm1Rail.lme, method = "ML" )
summary( fm1Rail.lmeML )
plot( fm1Rail.lme ) # produces Figure 1.4
intervals( fm1Rail.lme )
anova( fm1Rail.lme )
# 1.2 A Randomized Block Design
plot.design( ergoStool ) # produces Figure 1.6
contrasts( ergoStool$Type )
ergoStool1 <- ergoStool[ ergoStool$Subject == "1", ]
model.matrix( effort ~ Type, ergoStool1 ) # X matrix for Subject 1
fm1Stool <-
lme(effort ~ Type, data = ergoStool, random = ~ 1 | Subject)
summary( fm1Stool )
anova( fm1Stool )
options( contrasts = c( factor = "contr.treatment",
ordered = "contr.poly" ) )
contrasts( ergoStool$Type )
fm2Stool <-
lme(effort ~ Type, data = ergoStool, random = ~ 1 | Subject)
summary( fm2Stool )
anova( fm2Stool )
model.matrix( effort ~ Type - 1, ergoStool1 )
fm3Stool <-
lme(effort ~ Type - 1, data = ergoStool, random = ~ 1 | Subject)
summary( fm3Stool )
anova( fm3Stool )
intervals( fm1Stool )
plot( fm1Stool, # produces Figure 1.8
form = resid(., type = "p") ~ fitted(.) | Subject,
abline = 0 )
# 1.3 Mixed-effects Models for Replicated, Blocked Designs
with(Machines, interaction.plot( Machine, Worker, score, las = 1)) # Figure 1.10
fm1Machine <-
lme( score ~ Machine, data = Machines, random = ~ 1 | Worker )
fm1Machine
fm2Machine <- update( fm1Machine, random = ~ 1 | Worker/Machine )
fm2Machine
anova( fm1Machine, fm2Machine )
## delete selected rows from the Machines data
MachinesUnbal <- Machines[ -c(2,3,6,8,9,12,19,20,27,33), ]
## check that the result is indeed unbalanced
table(MachinesUnbal$Machine, MachinesUnbal$Worker)
fm1MachinesU <- lme( score ~ Machine, data = MachinesUnbal,
random = ~ 1 | Worker/Machine )
fm1MachinesU
intervals( fm1MachinesU )
fm4Stool <- lme( effort ~ Type, ergoStool, ~ 1 | Subject/Type )
if (interactive()) intervals( fm4Stool )
(fm1Stool$sigma)^2
(fm4Stool$sigma)^2 + 0.79621^2
Machine1 <- Machines[ Machines$Worker == "1", ]
model.matrix( score ~ Machine, Machine1 ) # fixed-effects X_i
model.matrix( ~ Machine - 1, Machine1 ) # random-effects Z_i
fm3Machine <- update( fm1Machine, random = ~Machine - 1 |Worker)
summary( fm3Machine )
anova( fm1Machine, fm2Machine, fm3Machine )
# 1.4 An Analysis of Covariance Model
names( Orthodont )
levels( Orthodont$Sex )
OrthoFem <- Orthodont[ Orthodont$Sex == "Female", ]
fm1OrthF.lis <- lmList( distance ~ age, data = OrthoFem )
coef( fm1OrthF.lis )
intervals( fm1OrthF.lis )
plot( intervals ( fm1OrthF.lis ) ) # produces Figure 1.12
fm2OrthF.lis <- update( fm1OrthF.lis, distance ~ I( age - 11 ) )
plot( intervals( fm2OrthF.lis ) ) # produces Figure 1.13
fm1OrthF <-
lme( distance ~ age, data = OrthoFem, random = ~ 1 | Subject )
summary( fm1OrthF )
fm1OrthFM <- update( fm1OrthF, method = "ML" )
summary( fm1OrthFM )
fm2OrthF <- update( fm1OrthF, random = ~ age | Subject )
anova( fm1OrthF, fm2OrthF )
random.effects( fm1OrthF )
ranef( fm1OrthFM )
coef( fm1OrthF )
plot( compareFits(coef(fm1OrthF), coef(fm1OrthFM))) # Figure 1.15
plot( augPred(fm1OrthF), aspect = "xy", grid = TRUE ) # Figure 1.16
# 1.5 Models for Nested Classification Factors
fm1Pixel <- lme( pixel ~ day + I(day^2), data = Pixel,
random = list( Dog = ~ day, Side = ~ 1 ) )
intervals( fm1Pixel )
plot( augPred( fm1Pixel ) ) # produces Figure 1.18
VarCorr( fm1Pixel )
summary( fm1Pixel )
fm2Pixel <- update( fm1Pixel, random = ~ day | Dog)
anova( fm1Pixel, fm2Pixel )
fm3Pixel <- update( fm1Pixel, random = ~ 1 | Dog/Side )
anova( fm1Pixel, fm3Pixel )
fm4Pixel <- update( fm1Pixel, pixel ~ day + I(day^2) + Side )
summary( fm4Pixel )
# 1.6 A Split-Plot Experiment
fm1Oats <- lme( yield ~ ordered(nitro) * Variety, data = Oats,
random = ~ 1 | Block/Variety )
anova( fm1Oats )
fm2Oats <- update( fm1Oats, yield ~ ordered(nitro) + Variety )
anova( fm2Oats )
summary( fm2Oats )
fm3Oats <- update( fm1Oats, yield ~ ordered( nitro ) )
summary( fm3Oats )
fm4Oats <-
lme( yield ~ nitro, data = Oats, random = ~ 1 | Block/Variety )
summary( fm4Oats )
VarCorr( fm4Oats )
intervals( fm4Oats )
plot(augPred(fm4Oats), aspect = 2.5, layout = c(6, 3),
between = list(x = c(0, 0, 0.5, 0, 0))) # produces Figure 1.21
# cleanup
proc.time()
q("no")
|
533c843fe31bff517e0e3298c492ab089a3e3fc2
|
a5f3d6c1cf45a7b8ee641c8a0c05c6046578c783
|
/man/AmigoDot.to.graphNEL.Rd
|
09d9c172d1443535ef27445223be6d61833a0091
|
[] |
no_license
|
mschroed/RamiGO
|
f71140f3ac1b5ae94d5f7cdeafaac175de125901
|
fe05fe3e166922db58796df169edbc0445e6d4da
|
refs/heads/master
| 2021-01-19T16:57:05.668955
| 2015-04-17T04:42:40
| 2015-04-17T04:42:40
| 1,807,803
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 714
|
rd
|
AmigoDot.to.graphNEL.Rd
|
\name{AmigoDot.to.graphNEL}
\alias{AmigoDot.to.graphNEL}
\title{
Converts an AmigoDot S4 object to a graphNEL object.
}
\description{
Converts an AmigoDot S4 object to a graphNEL object.
}
\usage{
AmigoDot.to.graphNEL(object)
}
\arguments{
\item{object}{
is a AmigoDot S4 object.
}
}
\value{
\item{gNEL}{
is a graphNEL object.
}
}
\author{
Markus Schroeder <mschroed@jimmy.harvard.edu>
}
\examples{
## set GO ID's and color
goIDs <- c("GO:0051130","GO:0019912","GO:0005783")
color <- c("lightblue","red","yellow")
dd <- getAmigoTree(goIDs=goIDs,color=color,
filename="example",picType="dot",saveResult=FALSE)
tt <- readAmigoDot(object=dd)
AmigoDot.to.graphNEL(tt)
}
|
8cac112fc665372019f690c2713acc0d535e19cd
|
6c28b83f3b8105c881daf4c068f78f14a1f1f153
|
/r_whether/plotMeanTemp.R
|
051099a964df3d1998e8c19878f52a6aaa579be1
|
[
"MIT"
] |
permissive
|
anp/whether
|
4de57e1343b0db0cde2dedf3e374122e22fd6e87
|
36447bb29209e7ee26307c9de782488bdc62fe7a
|
refs/heads/master
| 2021-06-06T13:15:52.737298
| 2015-06-24T18:55:01
| 2015-06-24T18:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,055
|
r
|
plotMeanTemp.R
|
res <- dbSendQuery(conn = con, statement = "select
s.latitude,
s.longitude,
avg(ds.mean_temp_fahr)
from stations s, daily_summaries ds
WHERE ds.station_id = s.station_id and ds.wban_id = s.wban_id
GROUP BY s.latitude, s.longitude")
print("Fetching average temperature query results...")
avgTempLocationFrame <- dbFetch(res)
dbClearResult(res)
baseMap <- ggplot() + borders("world", colour="gray15", fill="gray90")
meanTempMap <- baseMap +
geom_point(aes(x=avgTempLocationFrame$longitude, y=avgTempLocationFrame$latitude, color= avgTempLocationFrame$avg), alpha=0.95, size=3) +
scale_color_gradient(low="navy", high="red", na.value = "navy", limits=c(25,100), name="Temperature") +
ggtitle("Average Temperature 1973-2014") +
theme(plot.title = element_text(size=18, face="bold", vjust=2), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.ticks.x = element_blank(), axis.text.x = element_blank()) +
labs(x="", y="")
plot(meanTempMap)
ggsave(filename='mean_temperature_map.png', path=plotOutputPath, width=17.2, height=10.7)
|
24634be5c94911a3f933c9f42f0ffcd41492162b
|
61659cb027886dfbd05955797ce8bb84ed3defd4
|
/hw03/code/make-teams-table.R
|
5fc071ba1ffba23698d5741fa5a2cd806c1a94ed
|
[] |
no_license
|
efuna503/stat133-hws-fall17
|
504e159dab06cad0823f4c39aab579fab22ef984
|
b31da970a73d4fc7628274629f242f9d2faf74dd
|
refs/heads/master
| 2022-03-15T05:22:09.321819
| 2019-08-15T05:35:23
| 2019-08-15T05:35:23
| 104,022,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,263
|
r
|
make-teams-table.R
|
#=================================================================================
#title: make-teams-table
# description: a short description of what the script is about
# input(s): what are the inputs required by the script?
# output(s): what are the outputs created when running the script?
#=================================================================================
setwd("C:/Users/eriko/stat133/stat133-hws-fall17/hw03/code")
library(dplyr)
#Raw data and dictionaries
dat1 <- read.csv("../data/nba2017-roster.csv")
dat2 <- read.csv("../data/nba2017-stats.csv")
#Adding new variables
dat2 <- mutate(dat2,missed_fg=dat2$field_goals_atts - dat2$field_goals_made,
missed_ft=dat2$points1_atts-dat2$points1_made,
points= 3*dat2$points3_made +2*dat2$points2_made+dat2$points1_made,
rebounds = dat2$off_rebounds + dat2$def_rebounds,
efficiency= (points+rebounds+dat2$assist+dat2$steals+
dat2$blocks- missed_fg - missed_ft - dat2$turnovers)
/dat2$games_played)
sink(file = "../output/efficiency-summary.txt")
summary(dat2)
sink()
#Merging Tables
dat <- merge(dat1,dat2)
colnames(dat)[20] <- "free_throws"
#Creating nba2017-teams.csv
team <- summarize(group_by(dat,team),Experience=round(sum(experience),digits = 2),
Salary=sum(round(salary*(10^-6),digits = 2)),
Points3=sum(points3_made),Points2=sum(points2_made),
Free_throws=sum(free_throws),Points=sum(points),
Off_rebounds=sum(off_rebounds),Def_rebounds=sum(def_rebounds),
Assists=sum(assists),Steals=sum(steals),Blocks=sum(blocks),
Turnovers=sum(turnovers),Fouls=sum(fouls),
Efficiency=sum(efficiency))
team <- data.frame(team,row.names = team$team)
sink(file = "../data/teams-summary.txt")
summary(team)
sink()
write.csv(team,file = "../data/nba2017-teams.csv")
#Some graphics
##use stars() to get a star plot of the teams
stars(team[ ,-1],labels = as.character(team$team))
##use ggplot() to get a scatterplot of experience and salary
pdf(file = "../image/experience_salary.pdf")
ggplot(team,aes(x=Experience,y=Salary))+geom_point()+geom_text(aes(label=team))
dev.off()
|
f77f2f323b480868a12063eac22cbc9d1736f57a
|
167acb3748b35a6d43b991a0097f9bd89e7bf0dc
|
/tests/testthat/test-ip_to_binary.R
|
6a5a49e2556614c52fdf0b856b92019bfa368896
|
[
"MIT"
] |
permissive
|
davidchall/ipaddress
|
c3de405db6b7fcc2417f7e59e38fec51523b6519
|
f7c934cb8a5cb0d2fe9b9200cc1d004ec4354884
|
refs/heads/master
| 2023-04-08T12:47:00.530129
| 2023-04-04T15:00:50
| 2023-04-04T15:00:50
| 239,136,555
| 30
| 3
|
NOASSERTION
| 2022-12-17T18:24:54
| 2020-02-08T13:28:48
|
R
|
UTF-8
|
R
| false
| false
| 969
|
r
|
test-ip_to_binary.R
|
x <- ip_address(c("192.168.0.1", "2001:db8::8a2e:370:7334", NA))
test_that("binary encoding/decoding works", {
expect_snapshot(error = TRUE, {
ip_to_binary("hello")
})
expect_snapshot(error = TRUE, {
binary_to_ip(x)
})
expect_type(ip_to_binary(x), "character")
expect_equal(ip_to_binary(x), c(
"11000000101010000000000000000001",
"00100000000000010000110110111000000000000000000000000000000000000000000000000000100010100010111000000011011100000111001100110100",
NA_character_
))
expect_equal(binary_to_ip(ip_to_binary(x)), x)
# zero padding
expect_equal(ip_to_binary(ip_address("0.0.0.0")), strrep("0", 32))
expect_equal(ip_to_binary(ip_address("::")), strrep("0", 128))
expect_warning(binary_to_ip(strrep("a", 32)), "contains non-binary characters")
expect_warning(binary_to_ip("11000000"), "incorrect number of bits")
expect_warning(binary_to_ip("110000001010100000000000000000010"), "incorrect number of bits")
})
|
3e47e83d8fdf6bd84e739db73d6fe64f88def399
|
3870838ffbe0cde6fb869dcce9c5044b250c5c22
|
/code/sim_simulate_read_count.R
|
e443f0ef0960c6cebf440f3aebf007b49e42a4c4
|
[
"MIT"
] |
permissive
|
liangyy/mixqtl-pipeline
|
d6f9baf9db9ee260bc6bafd29773361e8d58b33e
|
556c983a2e5bb3feaf21a88a72c4a77c98df179d
|
refs/heads/master
| 2023-02-19T07:53:47.135434
| 2020-05-27T02:50:54
| 2020-05-27T02:50:54
| 191,611,597
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,458
|
r
|
sim_simulate_read_count.R
|
library(optparse)
option_list <- list(
make_option(c("-p", "--param"), type="character", default=NULL,
help="input YAML telling the parameters of simulation",
metavar="character"),
make_option(c("-o", "--output_prefix"), type="character", default=NULL,
help="prefix of output",
metavar="character"),
make_option(c("-y", "--genotype"), type="character", default=NULL,
help="simulated gene file",
metavar="character"),
make_option(c("-g", "--gene"), type="character", default=NULL,
help="simulated genotype file",
metavar="character"),
make_option(c("-t", "--type"), type="character", default='single',
help="specify [single] or [multi] to perform single-snp or milti-snp simulation",
metavar="character")
)
opt_parser <- OptionParser(option_list=option_list)
opt <- parse_args(opt_parser)
library(mixqtl)
## load parameter
param = yaml::read_yaml(opt$param)
# set.seed(param$seed)
## Pre-fixed parameters
L_read = param$L_read
if(opt$type == 'single') {
betas = log(eval(parse(text = param$betas))) # effect size (log fold change)
}
# load the gene
gene_param = readRDS(opt$gene)
gene = create_gene(gene_param$L_gene, gene_param$library_dist, gene_param$theta_dist, gene_param$f, gene_param$maf, gene_param$p)
# load genotypes
genotype = readRDS(opt$genotype)
if(opt$type == 'single') {
fG = genotype$fG
geno = genotype$genotype
} else if(opt$type == 'multi') {
maf = colMeans(rbind(genotype$h1, genotype$h2), na.rm = T)
betas = create_betas(
maf = maf,
genetic_var = c(param$genetic_var_l, param$genetic_var_h),
ncausal = c(param$ncausal_l, param$ncausal_h)
)
}
# Simulate reads
if(opt$type == 'single') {
data_collector = list()
for(beta in betas) {
df = simulate_read_count(gene, geno, beta, L_read, param$y_dist)
data_collector[[length(data_collector) + 1]] = list(data = df, fG = fG, beta = beta)
}
} else if(opt$type == 'multi') {
data_collector = list()
data_collector[[1]] = simulate_read_count_multi(
gene = gene,
genotype = genotype,
betas = rep(0, length(betas)),
L_read = L_read,
param$y_dist
)
data_collector[[2]] = simulate_read_count_multi(
gene = gene,
genotype = genotype,
betas = betas,
L_read = L_read,
param$y_dist
)
}
saveRDS(data_collector, opt$output)
|
3fe1851cfcf0748d7b44bb38d233e3d163f4b446
|
b5bf29c88403a74dcafcdf75aa1a6460f64133a6
|
/project/plot1.R
|
ff2a45cd4ee4acc5bec29204670cb98d100d9c48
|
[] |
no_license
|
dljvandenberg/coursera_data_analysis
|
611bfa873fd46ed0b36b71cecd60bb0ce222f230
|
c5ee3abb893c9ee78927fde02c0c58f2049f4abe
|
refs/heads/master
| 2021-01-18T13:22:22.079825
| 2015-11-04T15:54:49
| 2015-11-04T15:54:49
| 37,653,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,085
|
r
|
plot1.R
|
# Creates plot1.png aimed at answering the following question:
# ----
# 1. Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
# Using the base plotting system, make a plot showing the total PM2.5 emission
# from all sources for each of the years 1999, 2002, 2005, and 2008.
# ---
# Preparations
setwd("~/git/coursera_exdata_project_emissions")
# Read data set
NEI <- readRDS("exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("exdata-data-NEI_data/Source_Classification_Code.rds")
# Aggregate yearly totals
NEI.total.emmissions.by.year <- aggregate(NEI$Emissions, by=list(NEI$year), FUN=sum)
names(NEI.total.emmissions.by.year) <- c("year", "total_emissions")
# Plot data, add linear fit and write to file
png(filename="plot1.png")
plot(NEI.total.emmissions.by.year$year, NEI.total.emmissions.by.year$total_emissions, xlab="year", ylab="PM2.5 emissions (tons)", main="Yearly PM2.5 emissions USA (measured total)")
abline(lm(NEI.total.emmissions.by.year$total_emissions ~ NEI.total.emmissions.by.year$year),col="red",lwd=1.5)
dev.off()
|
8aed2f6a5c3526759e2c168e02bbeb597adda65a
|
e4c1422348ae1cd4aa316aad156fefe59670e776
|
/pkgs/debias/R/MLEw3p_secant.r
|
ace4c5a96470afda2a796879dd347373d495eba6
|
[] |
no_license
|
thomas4912/pkgs
|
161170c78068340a82ddde7535293f812cc77a73
|
8f060661cca439e13b16990bcd2268cc96aac4b3
|
refs/heads/master
| 2021-01-22T07:27:29.068120
| 2017-02-13T12:17:52
| 2017-02-13T12:17:52
| 81,818,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,273
|
r
|
MLEw3p_secant.r
|
## MLEw3p_secant
##
## This is a prototype function pending port to C++ using RcppArmadillo.
## This function optimizes the MLE for the three parameter Weibull distribution
## for a given dataset. Data may contain both failures and suspensions.
## The method of optimization is similar to the best method found to optimize the
## R_squared from MRR fitting for the third parameter. A discrete Newton method, also called
## the secant method is used to identify the root of the derivative of the MLE~t0 function.
## In this case the derivative is numerically determined by a two point method.
## The 3-parameter Weibull MLE optimization is known to present instability with some
## data. This is a particular challenge that this routine has been designed to handle.
## It is expected that when instability is encountered the function will terminate gracefully
## at the point at which MLE fitting calculations fail, providing output of last successful
## calculation.
##
## (C) David Silkworth 2013
##
## This program is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by the
## Free Software Foundation; either version 2, or (at your option) any
## later version.
##
## These functions are distributed in the hope that they will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, a copy is available at
## http://www.r-project.org/Licenses/
MLEw3p_secant<- function (x, s = NULL,limit=10^-5,listout=FALSE) {
## two-point derivative function
dLLdx<-function(data,Nf,vstart,limit) {
fit1<-.Call("MLEw2p", data, Nf,c(vstart, limit), PACKAGE = "debias")
fit2<-.Call("MLEw2p", data+0.1*limit, Nf,c(vstart, limit), PACKAGE = "debias")
dLLdx<-(fit1[3]-fit2[3])/(0.1*limit)
attr(dLLdx,"fitpar")<-fit1
dLLdx
}
# ## a starting position is simply constructed here (quicker than MRR?)
data<-c(x,s)
v <- var(log(data))
shape <- 1.2/sqrt(v)
vstart <- shape
Nf<-length(x)
warning=FALSE
## Tao Pang's original variable labels from FORTRAN are used where possible
DL<-limit
## Introduce constraints for the 3p Weibull
C1<-min(x)
maxit<-100
## initial step is based on min(x)*.1
DX<-C1*0.1
X0<-0.0
istep<-0
X1<-X0+DX
if(X1>C1) {X1<-X0+0.9*(C1-X0)}
FX0<-dLLdx(data,Nf,vstart,limit)
## introduce a new start estimate based on last fit
vstart<-attributes(FX0)[[1]][2]*0.5
## modify data by X1 for next slope reading
ms<-NULL
mx<-x-X1
if(length(s)>0) {
for(i in 1:length(s) ) {
if((s[i]-X1)>0 ) {ms<-c(ms,s[i]-X1)}
}
}
FX1<-dLLdx(c(mx,ms),Nf,vstart,limit)
vstart<-attributes(FX1)[[1]][2]*0.5
## FX1 will contain slope sign information to be used only one time to find X2
D<- abs(FX1-FX0)
X2<-X1+abs(X1-X0)*FX1/D
if(X2>C1) {X2<-X1+0.9*(C1-X1)}
X0<-X1
X1<-X2
DX<-X1-X0
istep<-istep+1
## Detail output to be available with listout==TRUE
DF<-data.frame(steps=istep,root=X0,error=DX,deriv=FX1)
while(abs(DX)>DL&&istep<maxit) {
FX0<-FX1
## modify data by X1 for next slope reading
ms<-NULL
mx<-x-X1
if(length(s)>0) {
for(i in 1:length(s) ) {
if((s[i]-X1)>0 ) {ms<-c(ms,s[i]-X1)}
}
}
FX1<-dLLdx(c(mx,ms),Nf,vstart,limit)
if(is.nan(FX1)) {
FX1<-FX0
warning=TRUE
break
}
vstart<-attributes(FX1)[[1]][2]*0.5
## FX1 will contain slope information only one time
D<- abs(FX1-FX0)
X2<-X1+abs(X1-X0)*FX1/D
if(X2>C1) {X2<-X1+0.9*(C1-X1)}
X0<-X1
X1<-X2
DX<-X1-X0
istep<-istep+1
DFline<-data.frame(steps=istep,root=X0,error=DX,deriv=FX1)
DF<-rbind(DF,DFline)
}
fit<-attributes(FX1)[[1]]
outvec<-c(Eta=fit[1],Beta=fit[2],t0=X0,LL=fit[3])
if(warning==TRUE) {
warn="optimization unstable"
attr(outvec,"warning")<-warn
}
if(listout==TRUE) {
outlist<-list(outvec,DF)
return(outlist)
}else{
return(outvec)
}
}
|
f593234aab8e710596f40f72269853ff239e8f6f
|
e06fde74d2fdbc3cbbc5038cc27ec80c41d94f05
|
/4.1 - RD plots (rdrobust).R
|
246295bbc0813bd7ecdf6c62e454db7f9c2b97d3
|
[] |
no_license
|
joseeduardo-gs/do-political-parties-matter
|
a55039b8136342ac532abed07ca8b4c6721e95d7
|
db62e331ca306e86dd9aa7af8090fd97951d1a20
|
refs/heads/main
| 2023-03-19T09:24:48.672246
| 2021-03-16T02:34:14
| 2021-03-16T02:34:14
| 348,171,201
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,290
|
r
|
4.1 - RD plots (rdrobust).R
|
############################################################################################################
# ANÁLISE EMPÍRICA - GRÁFICOS RDPLOTS
############################################################################################################
rm(list = ls())
# Pacotes:
library(dplyr)
library(ggplot2)
library(tidyr)
library(readr)
library(stringr)
library(htmltools)
library(htmlwidgets)
library(zeallot)
library(readxl)
library(gdata)
library(stringi)
library(Hmisc)
library(gmodels)
library(export)
library(ggpubr)
# Pacotes Específicos para se trabalhar com RDD:
library(rdd) # Pacote mais básico
library(rddtools) # Pacote com muitas opções de testes de placebo e sensibilidade das estimações e estimação paramétrica
library(rdrobust) # Pacote mais abrangente
library(rddapp) # Pacote com interface em Shiny, reduz a necessidade de se saber programar.
# Opções:
options(scipen = 999) # Desabilita notação scientífica. Para voltar ao padrão -> options(scipen = 1)
# Definindo o diretório onde se encontram as bases de dados prontas para análise empírica:
setwd("C:/Users/joseg_000.PC-JE/Documents/Dissertação - Dados/Bases de Dados Prontas")
# ----------------------------------------------------------------------------------------------------------------------------------
# Baixando as bases de dados (ESCOLHER A QUE FOR USAR)
# load("bases_prontas_ECD")
load("bases_prontas_ED")
# Definindo o diretório para o R salvar as imagens geradas:
# setwd("C:/Users/joseg_000.PC-JE/Google Drive/FGV-EPGE/Dissertação/Imagens/Partidos de Esquerda, Centro e Direita/Rdplots")
setwd("C:/Users/joseg_000.PC-JE/Google Drive/FGV-EPGE/Dissertação/Imagens/Partidos de Esquerda e Direita/Rdplots")
# Analisando quais partidos estão sendo considerados:
table(base_pronta_mandato$ideologia_partido_eleito)
summarytools::freq(base_pronta_mandato$ideologia_partido_eleito)
summarytools::descr(base_pronta_mandato$margem_vitoria_esquerda, transpose = T, stats = "common", round.digits = 3)
# -------------------------------------------------------------------------------------------------------
# ANÁLISE GRÁFICA DO EFEITO CAUSAL
# -------------------------------------------------------------------------------------------------------
# RDD: Análise Gráfica - RD Plots ---------------------------------------------------
# Primeiramente, iremos plotar um gráfico que relaciona a forcing variable com os outcomes de interesse, para
# termos uma primeira ideia se há indícios de efeito causal dos partidos de esquerda:
# Realizando a análise gráfica utilizando o Pacote rdrobust:
attach(base_pronta_mandato)
# RDPLOTS
# Outcome: despesa geral:
png("despesa_geral_pc.png", width = 1600, height = 837)
rdplot(y = despesa_geral_pc, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Total expenditures per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("despesa_geral_pib.png", width = 1600, height = 837)
rdplot(y = despesa_geral_pib, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Total expenditures as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
# ------------------------------------------------------------------------------------------------------
# Outcome: Receitas Totais per capita:
png("total_receitas_pc.png", width = 1600, height = 837)
rdplot(y = total_receitas_pc, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Current revenues per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("total_receitas_pib.png", width = 1600, height = 837)
rdplot(y = total_receitas_pib, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Current revenues as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
# ----------------------------------------------------------------------------------------------------------
# Outcome: Receita tributária per capita
png("receita_tributaria_pc.png", width = 1600, height = 837)
rdplot(y = receita_tributaria_pc, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Tax revenues per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("receita_tributaria_pib.png", width = 1600, height = 837)
rdplot(y = receita_tributaria_pib, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Tax revenues as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
# ----------------------------------------------------------------------------------------------------------
# Outcome: Total de servidores públicos por mil habitantes:
png("total_servidores_mil.png", width = 1600, height = 837)
rdplot(y = total_servidores_mil, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Total Public Employees (per 1000 residents)", title = "") # Polinômio de quarta ordem
dev.off()
# ---------------------------------------------------------------------------------------------------------
# Outcome: Servidores comissionados por mil habitantes:
png("servidores_comissionados_mil.png", width = 1600, height = 837)
rdplot(y = servidores_comissionados_mil, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Employees holding commissioned positions (per 1000 residents)", title = "") # Polinômio de quarta ordem
dev.off()
# Outcome: despesa com educação
png("educacao_pc.png", width = 1600, height = 837)
rdplot(y = educacao_pc, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with education per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("educacao_pib.png", width = 1600, height = 837)
rdplot(y = educacao_pib, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with education as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
png("educacao_prop.png", width = 1600, height = 837)
rdplot(y = educacao_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with education (share of total)", title = "") # Polinômio de quarta ordem
dev.off()
# ----------------------------------------------------------------------------------------------------------
# Outcome: despesa com saúde
png("saude_pc.png", width = 1600, height = 837)
rdplot(y = saude_pc, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with health per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("saude_pib.png", width = 1600, height = 837)
rdplot(y = saude_pib, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with health as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
png("saude_prop.png", width = 1600, height = 837)
rdplot(y = saude_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with health (share of total)", title = "") # Polinômio de quarta ordem
dev.off()
# ----------------------------------------------------------------------------------------------------------
# Outcome: despesa com assistência social
png("assistencia_social_pc.png", width = 1600, height = 837)
rdplot(y = assistencia_social_pc, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with social assistance per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("assistencia_social_pib.png", width = 1600, height = 837)
rdplot(y = assistencia_social_pib, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with social assistance as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
png("assistencia_social_prop.png", width = 1600, height = 837)
rdplot(y = assistencia_social_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with social assistance (share of total)", title = "") # Polinômio de quarta ordem
dev.off()
#########################################################################################################
#########################################################################################################
#########################################################################################################
# RDPLOTS - Agora tranasformando as variáveis que representam o tamanho de governo em log:
# Outcome: despesa geral:
png("log_despesa_geral_pc.png", width = 1600, height = 837)
rdplot(y = log(despesa_geral_pc), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log total expenditures per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("log_despesa_geral_pib.png", width = 1600, height = 837)
rdplot(y = log(despesa_geral_pib), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log total expenditures as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
# --------------------------------------------------------------------------------------------------------
# Outcome: Receitas Totais per capita:
png("log_total_receitas_pc.png", width = 1600, height = 837)
rdplot(y = log(total_receitas_pc), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log total revenues per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("log_total_receitas_pib.png", width = 1600, height = 837)
rdplot(y = log(total_receitas_pib), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log total revenues as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
# -------------------------------------------------------------------------------------------------------
# Outcome: Receita tributária per capita
png("log_receita_tributaria_pc.png", width = 1600, height = 837)
rdplot(y = log(receita_tributaria_pc), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log tax revenues per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("log_receita_tributaria_pib.png", width = 1600, height = 837)
rdplot(y = log(receita_tributaria_pib), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log tax revenues as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
# ------------------------------------------------------------------------------------------------------
# Outcome: Total de servidores públicos por mil habitantes:
png("log_total_servidores_mil.png", width = 1600, height = 837)
rdplot(y = log(total_servidores_mil + 0.00001), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log total public employees (per 1000 residents)", title = "") # Polinômio de quarta ordem
dev.off()
# ------------------------------------------------------------------------------------------------------
# Outcome: Servidores comissionados por mil habitantes:
png("log_servidores_comissionados_mil.png", width = 1600, height = 837)
rdplot(y = log(servidores_comissionados_mil + 0.00001), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log total employees holding commissioned positions (per 1000 residents)", title = "") # Polinômio de quarta ordem
dev.off()
# ------------------------------------------------------------------------------------------------------
# Outcome: despesa com educação
png("log_educacao_pc.png", width = 1600, height = 837)
rdplot(y = log(educacao_pc), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log expenditures with education per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("log_educacao_pib.png", width = 1600, height = 837)
rdplot(y = log(educacao_pib), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log expenditures with education as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
png("log_educacao_prop.png", width = 1600, height = 837)
rdplot(y = educacao_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with education (share of total)", title = "") # Polinômio de quarta ordem
dev.off()
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Outcome: despesa com saúde
png("log_saude_pc.png", width = 1600, height = 837)
rdplot(y = log(saude_pc), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log expenditures with health per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("log_saude_pib.png", width = 1600, height = 837)
rdplot(y = log(saude_pib), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log expenditures with health as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
png("log_saude_prop.png", width = 1600, height = 837)
rdplot(y = saude_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with health (share of total)", title = "") # Polinômio de quarta ordem
dev.off()
# -----------------------------------------------------------------------------------------------------------
# Outcome: despesa com assistência social
png("log_assistencia_social_pc.png", width = 1600, height = 837)
rdplot(y = log(assistencia_social_pc + 0.00001), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log expenditures with social assistance per capita", title = "") # Polinômio de quarta ordem
dev.off()
png("log_assistencia_social_pib.png", width = 1600, height = 837)
rdplot(y = log(assistencia_social_pib + 0.00001), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log expenditures with social assistance as a share of income", title = "") # Polinômio de quarta ordem
dev.off()
png("log_assistencia_social_prop.png", width = 1600, height = 837)
rdplot(y = assistencia_social_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with social assistance (share of total)", title = "") # Polinômio de quarta ordem
dev.off()
##########################################################################################################
##########################################################################################################
##########################################################################################################
# Gerando somente os RDplots que serão adicionados ao trabalho final:
# ECD
rdplot(y = log(despesa_geral_pc), x = margem_vitoria_esquerda, subset = log(despesa_geral_pc) > 7.15, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log total expenditures per capita", title = "")
rdplot(y = log(total_receitas_pc), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log total revenues per capita", title = "")
rdplot(y = log(receita_tributaria_pc), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log tax revenues per capita", title = "")
rdplot(y = log(servidores_comissionados_mil + 0.00001), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log commission employees (per 1000 residents)", title = "")
rdplot(y = educacao_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with education (share of total)", title = "")
rdplot(y = saude_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with health (share of total)", title = "")
rdplot(y = assistencia_social_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with social assistance (share of total)", title = "")
rdplot(y = urbanismo_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with urbanism (share of total)", title = "")
# ED
rdplot(y = log(despesa_geral_pc), x = margem_vitoria_esquerda, subset = log(despesa_geral_pc) > 7.15, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log total expenditures per capita", title = "")
rdplot(y = log(total_receitas_pc), x = margem_vitoria_esquerda, subset = log(total_receitas_pc) > 7.88, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log total revenues per capita", title = "")
rdplot(y = log(receita_tributaria_pc), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log tax revenues per capita", title = "")
rdplot(y = log(servidores_comissionados_mil + 0.00001), x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Log commission employees (per 1000 residents)", title = "")
rdplot(y = educacao_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with education (share of total)", title = "")
rdplot(y = saude_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with health (share of total)", title = "")
rdplot(y = assistencia_social_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with social assistance (share of total)", title = "")
rdplot(y = urbanismo_prop, x = margem_vitoria_esquerda, nbins = 30, p = 3, col.lines = "red", x.label = "Left Vote share margin of victory", y.label = "Expenditures with urbanism (share of total)", title = "")
detach(base_pronta_mandato)
|
ace96071e7950a4bcff94201767dd7057ef58ee0
|
d4d2d370f8cb50e002a3489d2e2b9186651ef92f
|
/inst/scripts/analysis/plots/hbonds/AHdist_CXL_ARG_bfac20.R
|
f010a26144c731b06f4380232b72ea3b26b781d4
|
[] |
no_license
|
momeara/RosettaFeatures
|
2c45012b042a76b0176a0924f1cc60fe3ba06e8b
|
2700b0735071971bbd2af91a6b1e7454ceeaa2a6
|
refs/heads/master
| 2021-01-19T03:54:05.386349
| 2017-03-24T14:07:21
| 2017-03-24T14:07:21
| 47,008,643
| 1
| 3
| null | 2016-06-16T23:00:32
| 2015-11-28T03:28:34
|
R
|
UTF-8
|
R
| false
| false
| 7,942
|
r
|
AHdist_CXL_ARG_bfac20.R
|
# -*- tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
# vi: set ts=2 noet:
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
library(ggplot2)
feature_analyses <- c(feature_analyses, methods::new("FeaturesAnalysis",
id = "AHdist_CXL_ARG_bfac20",
author = "Matthew O'Meara",
brief_description = "",
feature_reporter_dependencies = c("StructureFeatures", "HBondFeatures"),
run=function(self, sample_sources, output_dir, output_formats){
sele <-"
CREATE TEMPORARY TABLE max_residue_bfactors AS
SELECT
hb_pdb_site.struct_id as struct_id,
hb_pdb_site.resNum as resNum,
MAX( hb_pdb_site.heavy_atom_temperature ) as max_temp
FROM
hbond_sites_pdb as hb_pdb_site
GROUP BY
hb_pdb_site.struct_id,
hb_pdb_site.resNum;
CREATE TEMPORARY TABLE sc_hbond_card AS
SELECT
don_site.struct_id AS struct_id,
don_site.resNum AS don_resNum,
acc_site.resNum AS acc_resNum,
COUNT( hbond.hbond_id ) AS num_sc_hbs
FROM
hbonds as hbond,
hbond_sites as don_site,
hbond_sites as acc_site
WHERE
hbond.struct_id = don_site.struct_id and hbond.don_id = don_site.site_id and
hbond.struct_id = acc_site.struct_id and hbond.acc_id = acc_site.site_id and
don_site.HBChemType != 'hbdon_PBA' AND acc_site.HBChemType != 'hbacc_PBA'
GROUP BY
hbond.struct_id,
don_site.resNum,
acc_site.resNum;
CREATE TEMPORARY TABLE arg_cxl_hbonds AS
SELECT
hbond.hbond_id AS hbond_id,
hbond.struct_id AS struct_id,
hbond.don_id AS don_id,
hbond.acc_id AS acc_id
FROM
hbonds AS hbond,
hbond_sites AS don_site,
hbond_sites AS acc_site
WHERE
hbond.struct_id = don_site.struct_id AND hbond.don_id = don_site.site_id AND
hbond.struct_id = acc_site.struct_id AND hbond.acc_id = acc_site.site_id AND
(don_site.HBChemType = 'hbdon_GDE' OR don_site.HBChemType = 'hbdon_GDH' ) AND
acc_site.HBChemType = 'hbacc_CXL';
CREATE TEMPORARY TABLE arg_cxl_hbond_temps AS
SELECT
hbond.hbond_id AS hbond_id,
hbond.struct_id AS struct_id,
hbond.don_id AS don_id,
hbond.acc_id AS acc_id,
don_site_pdb.resNum AS don_resNum,
acc_site_pdb.resNum AS acc_resNum,
don_max_temp.max_temp AS don_temp,
acc_max_temp.max_temp AS acc_temp
FROM
arg_cxl_hbonds AS hbond,
hbond_sites_pdb AS don_site_pdb,
hbond_sites_pdb AS acc_site_pdb,
max_residue_bfactors AS don_max_temp,
max_residue_bfactors AS acc_max_temp
WHERE
hbond.struct_id = don_site_pdb.struct_id AND hbond.don_id = don_site_pdb.site_id AND
hbond.struct_id = acc_site_pdb.struct_id AND hbond.acc_id = acc_site_pdb.site_id AND
don_max_temp.struct_id = hbond.struct_id AND don_max_temp.resNum = don_site_pdb.resNum AND
acc_max_temp.struct_id = hbond.struct_id AND acc_max_temp.resNum = acc_site_pdb.resNum;
SELECT
structure.tag,
don_site_pdb.chain, don_site_pdb.resNum, don_site_pdb.iCode,
acc_site_pdb.chain, acc_site_pdb.resNum, acc_site_pdb.iCode,
geom.AHdist,
n_sc_hbonds.num_sc_hbs
FROM
arg_cxl_hbond_temps AS hbond,
hbond_geom_coords AS geom,
sc_hbond_card as n_sc_hbonds,
hbond_sites_pdb AS don_site_pdb,
hbond_sites_pdb AS acc_site_pdb,
structures AS structure
WHERE
hbond.don_temp < 20 AND hbond.acc_temp < 20 AND
n_sc_hbonds.struct_id = hbond.struct_id AND
n_sc_hbonds.don_resNum = hbond.don_resNum AND n_sc_hbonds.acc_resNum = hbond.acc_resNum AND
geom.struct_id = hbond.struct_id AND geom.hbond_id = hbond.hbond_id AND
don_site_pdb.struct_id = hbond.struct_id AND don_site_pdb.site_id = hbond.don_id AND
acc_site_pdb.struct_id = hbond.struct_id AND acc_site_pdb.site_id = hbond.acc_id AND
structure.struct_id = hbond.struct_id;"
#SELECT
# geom.AHdist,
# n_sc_hbonds.num_sc_hbsFROM
# hbonds AS hbond
# JOIN hbond_sites AS don_site ON hbond.struct_id = don_site.struct_id AND hbond.don_id = don_site.site_id
# JOIN hbond_sites AS acc_site ON hbond.struct_id = acc_site.struct_id AND hbond.acc_id = acc_site.site_id
# JOIN hbond_sites_pdb AS don_site_pdb ON hbond.struct_id = don_site_pdb.struct_id AND hbond.don_id = don_site_pdb.site_id
# JOIN hbond_sites_pdb AS acc_site_pdb ON hbond.struct_id = acc_site_pdb.struct_id AND hbond.acc_id = acc_site_pdb.site_id
# JOIN max_residue_bfactors AS don_temperature ON don_site_pdb.struct_id AND don_site_pdb.resNum = don_temperature.resNum
# JOIN max_residue_bfactors AS acc_temperature ON acc_site_pdb.struct_id AND acc_site_pdb.resNum = acc_temperature.resNum,
# sc_hbond_card AS n_sc_hbonds,
# hbond_geom_coords AS geom
#WHERE
# (don_site.HBChemType = 'hbdon_GDE' OR don_site.HBChemType = 'hbdon_GDH') AND
# acc_site.HBChemType = 'hbacc_CXL' AND
# don_temperature.max_temp < 20 AND
# acc_temperature.max_temp < 20 AND
# hbond.acc_id = acc_site.site_id AND
# n_sc_hbonds.struct_id = hbond.struct_id AND
# n_sc_hbonds.don_resNum = don_site.resNum AND
# n_sc_hbonds.acc_resNum = acc_site.resNum AND
# hbond.struct_id = geom.struct_id AND
# hbond.hbond_id = geom.hbond_id;"
f <- query_sample_sources(sample_sources, sele)
sele <- "
DROP TABLE max_residue_bfactors;
DROP TABLE sc_hbond_card;
DROP TABLE arg_cxl_hbonds;
DROP TABLE arg_cxl_hbond_temps;"
query_sample_sources(sample_sources, sele, warn_zero_rows=F)
# PRESERVE THIS
#SELECT
# geom.AHdist,
# n_sc_hbonds.num_sc_hbs
#FROM
# hbonds AS hbond,
# hbond_sites AS don_site,
# hbond_sites AS acc_site,
# hbond_sites_pdb AS don_site_pdb,
# hbond_sites_pdb AS acc_site_pdb,
# sc_hbond_card AS n_sc_hbonds,
# max_residue_bfactors AS don_temperature,
# max_residue_bfactors as acc_temperature,
# hbond_geom_coords AS geom
#WHERE
# hbond.struct_id = don_site.struct_id AND
# hbond.don_id = don_site.site_id AND
# hbond.struct_id = acc_site.struct_id AND
# (don_site.HBChemType = 'hbdon_GDE' OR don_site.HBChemType = 'hbdon_GDH') AND
# acc_site.HBChemType = 'hbacc_CXL' AND
# don_site_pdb.struct_id = hbond.struct_id AND don_site_pdb.site_id = hbond.don_id AND
# acc_site_pdb.struct_id = hbond.struct_id AND acc_site_pdb.site_id = hbond.acc_id AND
# hbond.struct_id = don_temperature.struct_id AND don_site_pdb.resNum = don_temperature.resNum AND
# hbond.struct_id = acc_temperature.struct_id AND acc_site_pdb.resNum = acc_temperature.resNum AND
# don_temperature.max_temp < 20 AND
# acc_temperature.max_temp < 20 AND
# hbond.acc_id = acc_site.site_id AND
# n_sc_hbonds.struct_id = hbond.struct_id AND
# n_sc_hbonds.don_resNum = don_site.resNum AND
# n_sc_hbonds.acc_resNum = acc_site.resNum AND
# hbond.struct_id = geom.struct_id AND
# hbond.hbond_id = geom.hbond_id;
#f$don_chem_type_name <- don_chem_type_name_linear(f$don_chem_type)
#f$acc_chem_type_name <- acc_chem_type_name_linear(f$acc_chem_type)
#f <- na.omit(f, method("r"))
dens <- estimate_density_1d(
f, c("sample_source", "num_sc_hbs" ),
"AHdist", weight_fun = radial_3d_normalization)
plot_id <- "AHdist_CXL_ARG_bcut20"
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=x, y=y, colour=sample_source)) +
geom_indicator(aes(indicator=counts, colour=sample_source, group=sample_source)) +
facet_wrap( ~ num_sc_hbs, nrow=1 ) +
ggtitle("Hydrogen Bonds A-H Distance between ASP/GLU and ARG with Bfactors < 20\nnormalized for equal weight per unit distance\nBy Number of sc hbonds between the acceptor and donor residues") +
scale_y_continuous("FeatureDensity", limits=c(0,6), breaks=c(1,3,5)) +
scale_x_continuous(expression(paste('Acceptor -- Proton Distance (', ring(A), ')')), limits=c(1.4,2.7), breaks=c(1.6, 1.9, 2.2, 2.6))
if(nrow(sample_sources) <= 3){
p <- p + theme(legend.position="bottom", legend.direction="horizontal")
}
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
})) # end FeaturesAnalysis
|
8cdc24fe1476a21231648bbb18423fd4f9b2c7e5
|
8dc0cbdcbe15c8e048649184ffc53f6ee6d3fc6f
|
/man/hinv_matrix_asreml.Rd
|
b1277427ad38f3867e8c5a19bdf8289dee4793ba
|
[] |
no_license
|
DPCscience/GS
|
47585e70fb75c8f76da8a6cd96359ac47433ca9e
|
f560b6ca96085f3772df87fb9add5756e31cc077
|
refs/heads/master
| 2021-01-17T06:15:14.574881
| 2018-10-29T07:35:54
| 2018-10-29T07:35:54
| 47,627,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,693
|
rd
|
hinv_matrix_asreml.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hinv_matrix_asreml.R
\name{hinv_matrix_asreml}
\alias{hinv_matrix_asreml}
\title{Build the H-inverse-matrix from G-inverse-matrix and pedigree-full and pedigree-genotype#'}
\usage{
hinv_matrix_asreml(M012, ped_full)
}
\arguments{
\item{ped_full}{It contains the full pedigree, it has three columns:ID,Sire,Dam}
\item{G_mat}{It is the matrix which has rownames and colnames(ID)}
\item{ped_geno}{It contains the pedigree that has the genotype, it is a part of the ped_full pedigree}
}
\value{
The H-inverse-matrix form the formula
}
\description{
Build the H-inverse-matrix from G-inverse-matrix and pedigree-full and pedigree-genotype#'
}
\examples{
# paper:Legarra A, Aguilar I, Misztal I. A relationship matrix including full pedigree and genomic information.[J]. Journal of Dairy Science, 2009, 92(9):4656-63.
ped_full <- data.frame(ID=9:17,Sire=c(1,3,5,7,9,11,11,13,13),Dam=c(2,4,6,8,10,12,4,15,14))
ped_full
G <- matrix(0.7,4,4)
diag(G) <- 1
rownames(G) <- colnames(G) <- 9:12
G
#another example
library(MASS)
animal <- 13:26
data.11.1 <- data.frame(animal,
sire = c(0,0,13,15,15,14,14,14,1,14,14,14,14,14),
dam = c(0,0,4,2,5,6,9,9,3,8,11,10,7,12),
mean = rep(1,length(animal)),
EDC = c(558,722,300,73,52,87,64,103,13,125,93,66,75,33),
fat_DYD = c(9.0,13.4,12.7,15.4,5.9,7.7,10.2,4.8,7.6,8.8,9.8,9.2,11.5,13.3),
SNP1 = c(2,1,1,0,0,1,0,0,2,0,0,1,0,1),
SNP2 = c(0,0,1,0,1,1,0,1,0,0,1,0,0,0),SNP3 = c(1,0,2,2,1,0,1,1,0,0,1,0,0,1),
SNP4 = c(1,0,1,1,2,1,1,0,0,1,0,0,1,1),
SNP5 = c(0,0,1,0,0,0,0,0,0,1,0,1,1,0),
SNP6 = c(0,2,0,1,0,2,2,1,1,2,1,1,2,2),
SNP7 = c(0,0,0,0,0,0,0,0,2,0,0,0,0,0),
SNP8 = c(2,2,2,2,2,2,2,2,2,2,2,2,2,1),
SNP9 = c(1,1,1,2,1,2,2,2,1,0,2,0,1,0),
SNP10 = c(2,0,2,1,2,1,0,0,2,0,1,0,0,0))
rm(list="animal")
animal <- 1:26
sire <- c(rep(0,12), data.11.1$sire)
dam <- c(rep(0,12), data.11.1$dam)
ped_full <- data.frame(animal, sire, dam)
rm(list=c("animal","dam","sire"))
M <- data.11.1[6:14, c(1, 7:16)]
rownames(M) <- M[, 1]
M012 <- as.matrix(M[, -1])
hinv_matrix(M012,ped_full) \%>\% round(3)
}
|
f03bec02fa644f0e2d9d418806d4f275de4a0f61
|
a98671b0ad15490cdc724c2f06c533ebe45038d5
|
/examples/bar-chart.R
|
7be35add198263edba2ab5527594e4121466f1ed
|
[] |
no_license
|
datahub/ggmjs
|
32efd19d622853b542417a2faacb9cf6188f3b98
|
a4ac60e6f49289d580d10ed7ee60e9ae5c7bda01
|
refs/heads/master
| 2021-09-25T17:24:00.940716
| 2018-10-24T15:31:23
| 2018-10-24T15:31:23
| 103,697,717
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 316
|
r
|
bar-chart.R
|
library(tidyverse)
library(ggmjs)
plot_original <- economics %>%
ggplot(aes(x = date, y = uempmed)) %+%
geom_line(color = 'steelblue') %+%
xlab('') %+%
ylab('') %+%
ggtitle('The long term unemployed', 'Median duration of unemployment, in weeks, 1967 to 2015')
plot_mjs <- plot_original %+%
theme_mjs()
|
061f3f91b3f2a8156ae5b47ba5d39e96531f6379
|
b471661cb2023ea82010fa46af1562e683bf645d
|
/Wang/final_project.R
|
2afa1e8c617356be211514b263a01c20146d7c1e
|
[] |
no_license
|
jrm365/Final
|
f1d7f97ce9865e655b24914732a9a6d146447126
|
65c088d8c0a0a4af2ecc08bbc53d8f564e80a315
|
refs/heads/master
| 2020-04-12T01:08:50.091875
| 2018-12-18T03:32:45
| 2018-12-18T03:32:45
| 162,222,972
| 0
| 0
| null | 2018-12-18T03:12:23
| 2018-12-18T03:12:23
| null |
UTF-8
|
R
| false
| false
| 4,827
|
r
|
final_project.R
|
# MATH 110 Final Project #
titanic <- read.csv("titanic.csv", header = T, stringsAsFactors = F)
install.packages('corrplot')
install.packages('ggthemes')
library(ggplot2)
library(ggthemes)
library(dplyr)
library(corrplot)
survival_factor <- function(df, variable)
{
survive <- df %>%
mutate(Class = ifelse(Survived == 0, 'Died', 'Survived')) %>%
group_by_(variable, 'Class') %>%
summarise(count = n())
p <- ggplot(survive, aes(y=survive$count, x=survive[[variable]],
color=survive$Class,
fill=survive$Class))
p <- p + geom_bar(stat="identity", position="dodge") + labs(color="Class", fill="Class", y="Number of Passenger",x=variable)
print(p)
}
survival_factor(titanic, 'Pclass')
survival_factor(titanic, 'Sex')
survival_factor(titanic, 'Parch')
class_fare <- function(df) {
m <- rep(NA, 3)
for (i in 1:length(m)) {
class_df <- dplyr::filter(df, Pclass == i)
m[i] <- mean(class_df$Fare)
}
fare_df <- data.frame(Class=1:3, Avg.Fare=m, stringsAsFactors = F)
return (fare_df)
}
survival_two_factors <- function(df, f1, f2) {
ggplot(df, aes(df[[f1]], fill = factor(df$Survived))) +
geom_bar(stat = "count")+
theme_few() +
xlab("Pclass") +
facet_grid(.~df[[f2]])+
ylab("Count") +
scale_fill_discrete(name = "Survived") +
ggtitle("Pclass vs Sex vs Survived")
}
survival_two_factors(titanic, 'Pclass', 'Sex')
survival_three_factors <- function(df, Age, Sex, Pclass){
p <- ggplot(df, aes(x = Age, y = Sex)) +
geom_jitter(aes(colour = factor(Survived))) +
theme_few()
p <- p + theme(legend.title = element_blank())+
facet_wrap(~Pclass)
p <- p + labs(x = "Age", y = "Sex", title = "Survivor factors: Class vs Sex vs Age")
p <- p + scale_fill_discrete(name = "Survived") +
scale_x_continuous(name="Age",limits=c(0, 81))
print(p)
}
survival_three_factors(titanic, 'Age', 'Sex', 'Pclass')
survival_title <- function(df) {
df$Title <- gsub('(.*, )|(\\..*)', '', df$Name)
officer <- c('Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev')
royalty <- c('Dona', 'Lady', 'the Countess','Sir', 'Jonkheer')
# Reassign titles
df$Title[df$Title == 'Mlle'] <- 'Miss'
df$Title[df$Title == 'Ms'] <- 'Miss'
df$Title[df$Title == 'Mme'] <- 'Mrs'
df$Title[df$Title %in% royalty] <- 'Royalty'
df$Title[df$Title %in% officer] <- 'Officer'
df$Surname <- sapply(df$Name,
function(x) strsplit(x, split = '[,.]')[[1]][1])
p <- ggplot(df, aes(Title,fill = factor(Survived))) +
geom_bar(stat = "count")+ xlab('Title') + ylab("Count")
p <- p + scale_fill_discrete(name = " Survived") +
ggtitle("Title vs Survived")+
theme_few()
print(p)
}
survival_title(titanic)
# Survival: Family Size
survival_familysize <- function(df) {
df$Fsize <- df$SibSp + df$Parch + 1
p <- ggplot(df, aes(x = Fsize, fill = factor(Survived))) +
geom_bar(stat='count', position='dodge')
p <- p + scale_x_continuous(breaks=c(1:11)) + xlab('Family Size') +
ylab("Count")
p <- p + theme_few()+scale_fill_discrete(name = "Survived")
p <- p + ggtitle("Family Size vs Survived")
print(p)
}
survival_familysize(titanic)
survival_correlation <- function(df) {
df <- titanic
df$Fsize <- df$SibSp + df$Parch + 1
df$FsizeD[df$Fsize == 1] <- 'Alone'
df$FsizeD[df$Fsize < 5 & df$Fsize > 1] <- 'Small'
df$FsizeD[df$Fsize > 4] <- 'Big'
df$Title <- gsub('(.*, )|(\\..*)', '', df$Name)
officer <- c('Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev')
royalty <- c('Dona', 'Lady', 'the Countess','Sir', 'Jonkheer')
# Reassign titles
df$Title[df$Title == 'Mlle'] <- 'Miss'
df$Title[df$Title == 'Ms'] <- 'Miss'
df$Title[df$Title == 'Mme'] <- 'Mrs'
df$Title[df$Title %in% royalty] <- 'Royalty'
df$Title[df$Title %in% officer] <- 'Officer'
corr_data <- df
corr_data$Sex <- revalue(corr_data$Sex,
c("male" = 1, "female" = 2))
corr_data$Title <- revalue(corr_data$Title,
c("Mr" = 1, "Master" = 2,"Officer" = 3,
"Mrs" = 4,"Royalty" = 5,"Miss" = 6))
corr_data$FsizeD <- revalue(corr_data$FsizeD,
c("Small" = 1, "Alone" = 2, "Big" = 3))
corr_data$FsizeD <- as.numeric(corr_data$FsizeD)
corr_data$Sex <- as.numeric(corr_data$Sex)
corr_data$Title <- as.numeric(corr_data$Title)
corr_data$Pclass <- as.numeric(corr_data$Pclass)
corr_data$Survived <- as.numeric(corr_data$Survived)
corr_data <-corr_data[,c("Survived", "Pclass", "Sex",
"FsizeD", "Fare", "Title")]
str(corr_data)
mcorr_data <- cor(corr_data)
corrplot(mcorr_data,method="circle")
}
survival_correlation(titanic)
|
bda15751e5bd3f7131d01202ed651b03414162d7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phylosim/examples/getRateParamList.TN93.Rd.R
|
cbaff69422749c7a370c63a960b601cbdac1f0b9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 529
|
r
|
getRateParamList.TN93.Rd.R
|
library(phylosim)
### Name: getRateParamList.TN93
### Title: Get the rate parameters
### Aliases: getRateParamList.TN93 TN93.getRateParamList
### getRateParamList,TN93-method
### ** Examples
# create TN93 object
p<-TN93()
# set/get rate parameters
setRateParamList(p,list(
"Alpha1"=1,
"Alpha2"=2,
"Beta"=0.5
))
getRateParamList(p)
# set/get rate parameters via virtual field
p$rateParamList<-list(
"Alpha1"=1,
"Alpha2"=1,
"Beta"=3
)
p$rateParamList
# get object summary
summary(p)
|
afa4e85f8a16617f845c0f594347758b55c0e62d
|
686edc60b01d182de4c1012e15952ecc06d769f4
|
/Linear Regression.R
|
7ca20efd40374cec5f727677da133b1a6b5689bb
|
[] |
no_license
|
jms5049/Data-Science
|
dcbfe345cbd1fd62c4e66d2a1afc1fd93209aff2
|
0730add09ac124f47499127d21a9502d96dab4f9
|
refs/heads/master
| 2020-03-11T18:09:47.189056
| 2018-06-06T05:22:18
| 2018-06-06T05:22:18
| 130,169,336
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,391
|
r
|
Linear Regression.R
|
rm(list=ls())
#set work directory path
setwd("directory path")
#predict price using 'toyota' dataset
rawdata <- read.csv('toyota.csv', header = TRUE, na.strings="")
rawdata <- subset(rawdata, select = c(Price,Age_08_04,KM,Fuel_Type,HP,Color))
#catergorize cars with their Fuel Types
unique_type <- unique(rawdata$Fuel_Type)
#create dummy dataframe
type_dummy = as.data.frame(matrix(0,nrow(rawdata), length(unique_type)))
#read toyota dataset's Fuel Type and write into dummy dataframe
for(i in 1:(length(unique_type))){
tmp = unique_type[i]
tmp_idx = which(rawdata$Fuel_Type == tmp)
type_dummy[tmp_idx,i] = 1
colnames(type_dummy)[i] = sprintf("type_%s",tmp)
}
rawdata <- rawdata[c(4,2,3,1,5,6)]
prdata = rawdata[,-1]
prdata = cbind(type_dummy,prdata)
#data partition
trn_ratio = 0.7
trn_idx = sample(1:nrow(prdata), round(trn_ratio * nrow(prdata)))
tst_idx = setdiff(1:nrow(prdata), trn_idx)
trn_data = prdata[trn_idx,]
tst_data = prdata[tst_idx,]
#linear regression
fit_lr = lm(Price ~ ., data=trn_data)
fit_lr
summary(fit_lr)
pred_lr = predict(fit_lr, tst_data)
mse_lr = mean((tst_data$Price-pred_lr)^2)
#Step Wise
step_lr = step(fit_lr, direction = "both")
summary(step_lr)
pred_step = predict(step_lr, tst_data)
mse_step = mean((tst_data$Price-pred_step)^2)
# Plot both and Compare
par(mfrow = c(1,2))
plot(tst_data$Price, pred_lr)
plot(tst_data$Price, pred_step)
|
354e69f2c97ec85196fe3e813e80fb5486c1e21d
|
2059668d2c9b4f26e61b876ce65f62c46c95d713
|
/data-raw/glottolog/scripts/cartogram.R
|
0a516b81657c83804be625190ef79a20b15215d4
|
[] |
no_license
|
cysouw/qlcData
|
f8ad6a1625136af898dc93920051b4a81e983e20
|
9d3271aa75876eadea9a119d7c05e1295b10d7e5
|
refs/heads/master
| 2021-12-14T00:05:11.836290
| 2021-12-07T16:55:53
| 2021-12-07T16:55:53
| 44,332,286
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,077
|
r
|
cartogram.R
|
library(qlcVisualize)
library(qlcData)
library(rworldmap)
library(spatstat)
library(maptools)
library(rgdal)
library(rmapshaper)
# glottolog
data(glottolog)
g <- glottolog
langs <- g[ !is.na(g$longitude) ,]
coords <- langs[, c("longitude","latitude")]
rownames(coords) <- rownames(langs)
# proj4
# scan("http://spatialreference.org/ref/sr-org/gall-peters-orthographic-projection/proj4/", what="character",sep="\n")
pacific <- "+proj=longlat +lon_wrap=155 +lon_0=155 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +no_defs"
atlantic <- "+proj=longlat +lon_wrap=8 +lon_0=8 +ellps=WGS84 +datum=WGS84 +no_defs"
gall <- "+proj=cea +lon_0=0 +lat_ts=45 +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs "
mollweide <- "+proj=moll +lon_wrap=155 +lon_0=155 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs "
mollweide_atlantic <- "+proj=moll +lon_wrap=9 +lon_0=9 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs "
wgs <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
# ===== simple worldmap
# w <- getMap("less islands")
# w <- w[-which(w$continent == "Antarctica"), ]
# w <- unionSpatialPolygons(w,rep(1,length(w)))
# some languages are missing from the polygon (8.7 %)
# test <- inside.owin(coords[,1],coords[,2],w = as.owin(w))
# sum(!test)/length(test)
makePoly <- function(point, size = .1) {
p <- cbind(c(point[1]+size, point[1]+size, point[1]-size, point[1]-size)
, c(point[2]-size, point[2]+size, point[2]+size, point[2]-size))
return(p)
}
# add <- sapply(which(!test),function(x){makePoly(as.numeric(coords[x,]))},simplify=F)
# add <- Polygons(sapply(add,Polygon), "add")
# add <- SpatialPolygons(list(add))
# poly <- SpatialPolygons(c(slot(w,"polygons"),slot(add,"polygons")),proj4string = CRS(wgs))
# poly <- spTransform(poly, CRS(pacific))
# this map is manually edited for better visibility
# IDs <- sapply(slot(poly, "polygons"), function(x) slot(x, "ID"))
# df <- data.frame(rep(0, length(IDs)), row.names=IDs)
# poly <- SpatialPolygonsDataFrame(poly, df)
# writeOGR(poly,"../gis", "world2","ESRI Shapefile")
# ========== Edited map
w <- readOGR("gis2/world.shp")
# w <- spTransform(w,CRS(atlantic))
# shifted coordinates!
# coords$longitude[coords$longitude < -172] <- coords$longitude[coords$longitude < -172] + 360
coords$longitude[coords$longitude < -35] <- coords$longitude[coords$longitude < -35] + 360
# test again: less languages are missing from the polygon (1.9 %)
test <- inside.owin(coords[,1],coords[,2],w = as.owin(w))
sum(!test)/length(test)
# add the missing ones
add <- sapply(which(!test),function(x){makePoly(as.numeric(coords[x,]), size = .4)},simplify=F)
add <- Polygons(sapply(add,Polygon, hole=FALSE), "add")
add <- SpatialPolygons(list(add),proj4string = CRS(wgs))
w <- SpatialPolygons(c(w@polygons,add@polygons),proj4string = CRS(wgs))
w <- unionSpatialPolygons(w,rep(1,length(w)))
# voronoi
v <- voronoi(coords,as.owin(w))
# conversion
poly <- as(v, "SpatialPolygons")
proj4string(poly) <- CRS(wgs)
poly <- spTransform(poly, CRS(mollweide))
# cartogram
poly <- SpatialPolygonsDataFrame(poly, data.frame(weight = rep(1, length(poly))))
# this works very slowly and the result has overlapping polygons. not nice
# library(cartogram)
# cart <- cartogram(poly,"weight", threshold = .05, itermax = 60)
# alternative. Parameter "res" influences the strength of the deformation
library(getcartr)
cart <- quick.carto(poly, poly$weight, res = 1000, gapdens = 0.5)
# back conversion
regions <- slot(cart, "polygons")
regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) })
windows <- lapply(regions, as.owin)
map <- tess(tiles = windows)
save(cart,v,langs,map,file="~/Desktop/cart5.Rdata")
# cols <- rep("grey",nrow(langs))
# cols[langs$stock == "Nuclear Trans New Guinea"] <- "blue"
# cols[langs$stock == "Turkic"] <- "red"
# cols[langs$stock == "Arawakan"] <- "green"
# cols[langs$stock == "Atlantic-Congo"] <- "orange"
# cols[langs$stock == "Afro-Asiatic"] <- "black"
# cols[langs$stock == "Mayan"] <- "purple"
# vmap(map,col=cols, border = NA, outer.border = NA)
|
112da895e428f257a1b1715cd92ea2bec83077f1
|
9232abf64672599fdcd2d6ebd86e661b38a36cdd
|
/CS498AML/HW1/HW1-PartB.R
|
a0ed28e4afa54bbe67a47797f99cda5bce4f72e5
|
[] |
no_license
|
pradeepkodam/pradeepkodam.github.io
|
446aad02068389d8345c856a7eb2337d4340cf8e
|
0bc2fc362ea7a173d9275a7f5d52c0c1a093070e
|
refs/heads/main
| 2023-06-26T22:25:59.660407
| 2021-08-01T20:01:59
| 2021-08-01T20:01:59
| 391,725,349
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,914
|
r
|
HW1-PartB.R
|
setwd('C:/SravansData/mcsds/aml')
wdat<-read.csv('pima-indians-diabetes.data.txt', header=FALSE)
library(klaR)
library(caret)
bigx<-wdat[,-c(9)] # all the features from colum 1 to 8 and 9th contains the label
bigy<-wdat[,9] #9th colum contains the label
nbx<-bigx
for (i in c(3, 4, 6, 8))
{vw<-bigx[, i]==0 #making the features 3,4,6,8 as NA when they are 0
nbx[vw, i]=NA
}
trscore<-array(dim=10) #declaring an array of 10 for the scores
tescore<-array(dim=10) #declaring the array of 10 for test scores
for (wi in 1:10) # for loop for 10 iteratiors for train and test split
{wtd<-createDataPartition(y=bigy, p=.8, list=FALSE) #Splitling the data by createdatapartition of Cartet package into 80% train and 20% test
ntrbx<-nbx[wtd, ] #nbx data frame has all features
ntrby<-bigy[wtd] # ntrby has the labels
trposflag<-ntrby>0 #trposflag contains true is the label is 1 else it contains false
ptregs<-ntrbx[trposflag, ] #all the rows from ntrbx which has postive label
ntregs<-ntrbx[!trposflag,] #all the rows from ntrbx which has negative label
ntebx<-nbx[-wtd, ] # ntebx contains the test data or the remaning 20% of the data
nteby<-bigy[-wtd] #the actual labels for the test data
ptrmean<-sapply(ptregs, mean, na.rm=TRUE) #calcuating the mean of all the eight features which has the postive label
ntrmean<-sapply(ntregs, mean, na.rm=TRUE) #calculating the mean of the eight features which have negative label
ptrsd<-sapply(ptregs, sd, na.rm=TRUE) #calculating the standard deviation of the eight features which have the postive label
ntrsd<-sapply(ntregs, sd, na.rm=TRUE) # calculating the standard deviation of the eight features which have the negative label
ptroffsets<-t(t(ntrbx)-ptrmean) # subtracting the mean from the training features for postive label
ptrscales<-t(t(ptroffsets)/ptrsd) #divdiving the offsets by standard deviation for postive label
ptrlogs<--(1/2)*rowSums(apply(ptrscales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ptrsd)) # log likelihoods using normal distribution for postive label of diabetes
ntroffsets<-t(t(ntrbx)-ntrmean) # subtracting the mean from the training features for Negative label
ntrscales<-t(t(ntroffsets)/ntrsd) #divdiving the offsets by standard deviation for Negative label
ntrlogs<--(1/2)*rowSums(apply(ntrscales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ntrsd)) # log likelihoods using normal distribution for Negative label of diabetes
lvwtr<-ptrlogs>ntrlogs #rows classfied as diabetes postive by the classifier
gotrighttr<-lvwtr==ntrby #comparing the results with the actual training label
trscore[wi]<-sum(gotrighttr)/(sum(gotrighttr)+sum(!gotrighttr)) # the accuarcy of the classification for the training set
pteoffsets<-t(t(ntebx)-ptrmean) #normalize the test data with mean of training
ptescales<-t(t(pteoffsets)/ptrsd) #normalize the test data with the standard deviation of training
ptelogs<--(1/2)*rowSums(apply(ptescales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ptrsd)) # log likehoods using normal distribution for postive labels of test data
nteoffsets<-t(t(ntebx)-ntrmean) #normalize the test data with mean of training for negative label
ntescales<-t(t(nteoffsets)/ntrsd) #normalize the test data with the standard deviation of training for negative label
ntelogs<--(1/2)*rowSums(apply(ntescales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ntrsd)) # log likehoods using normal distribution for negative labels of test data
lvwte<-ptelogs>ntelogs #rows classfied as diabetes postive by the classifier for test data
gotright<-lvwte==nteby #comparing the results with the actual testing label
tescore[wi]<-sum(gotright)/(sum(gotright)+sum(!gotright)) # the accuarcy of the classification for the test set
}
print(paste(mean(trscore), "mean accuracy of Training data"))
print(paste(mean(tescore), "mean accuracy of Testting data"))
#> mean(trscore)
#[1] 0.735935
#> mean(tescore)
#[1] 0.7418301
|
4d87b678326743e733d2ff09181f45c25c40112e
|
3c133db1216e38c2e17271e6b8d164451a398728
|
/Function/F_Shuffle_test_sp_model.r
|
d5bfdf29d6b54445cc67ba75643f110358fc104a
|
[] |
no_license
|
ArcticSnow/My-R-script
|
6fec5ff0b13e31e5655d4a98b4adbfc4354d57f8
|
5e0ac4cac6101b2a66a9d6a4467853c0d4a138a8
|
refs/heads/master
| 2021-01-18T18:25:34.652369
| 2013-09-03T19:48:15
| 2013-09-03T19:48:15
| 5,183,733
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,193
|
r
|
F_Shuffle_test_sp_model.r
|
# Base code by Margaret Short, transformed into function by Filhol S. on 3rd MAy 2012
# Do we need a spatial model?
#
# Idea:
#
# 1) Reorder the data many times, calculate and plot empirical
# semivariograms.
#
# 2) Overlay our empirical semivariogram. Compare.
#
Test.sp.model <- function(my.vario,my.geodata,trend.tec,titl){
library(geoR)
plot(my.vario, main = "", pts.range.cex = c(1,3), type='b',cex.lab=1.5, cex.axis=1.5)
title(main=titl)
my.longs <- my.geodata$coords[,1]
my.lats <- my.geodata$coords[,2]
my.new.geo <- my.geodata$data
#my.log.my.geodata <- my.geodata$data # remember, logged my.geodata earlier
n <- length( my.geodata$coords )
for( i in 1:100 ) {
my.new.order <- sample( 1:n, size=n, repl=FALSE )
my.reordered <- my.new.geo[my.new.order]
my.reordered.geo <- as.geodata(cbind(my.longs,my.lats,my.reordered),coords.col = 1:2, data.col = 3)
my.v.new <- variog(my.reordered.geo,trend=trend.tec,estimator.type="modulus")
lines(my.v.new, col="gray")
}
lines(my.vario,lwd=3) # overlay our semi-v'gram in thick, black
}
# Example:
# Test.sp.model(my.robust.vario,my.geodata,"2nd","b")
|
c9a8494a94b404a49b1e4bbc2c3d6f131492ed12
|
38f3b03257472a0b2aa5dcbd8382270d8c09571e
|
/man/IntLik-package.Rd
|
964fdd004b899baf53caadcaa8544df57964892e
|
[] |
no_license
|
cran/IntLik
|
d33342cf5a8aec5d88ba89711a692cddb4228a92
|
baab254edbcfbbb35452b3010862dd9bb011c07a
|
refs/heads/master
| 2020-05-31T18:22:16.710340
| 2012-01-25T00:00:00
| 2012-01-25T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,571
|
rd
|
IntLik-package.Rd
|
\name{IntLik-package}
\alias{IntLik-package}
\alias{IntLik}
\docType{package}
\title{
Numerical Integration for Integrated Likelihood
}
\description{
This package calculates the integrated likelihood numerically. Given the Likelihood function and the prior function, this package integrates out the nuisance parameters by Metropolis-Hastings (MCMC) Algorithm.
}
\details{
\tabular{ll}{
Package: \tab IntLik\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2012-01-25\cr
License: \tab GPL\cr
}
}
\author{
Zhenyu Zhao
\email{zhenyuzhao2014@u.northwestern.edu}
}
\references{
Chib, S. and Jeliazkov, I. (2001) Marginal likelihood from the Metropolis-Hastings Output. Journal of the American Statistical Association. 96, 270-281
Severini, T.A. (2007) Integrated likelihood functions for non-Bayesian inference. Biometrika. 94 529-542
}
\keyword{ package }
\examples{
##Integrated Likelihood for Ratio of Normal Mean (Example 2 in Severini 2007)
##Generating Data
n=10
u1=4
u2=1/5
x=rnorm(1,u1,sqrt(1/n))
y=rnorm(1,u2,sqrt(1/n))
##Calculate MLE for the start value
psi_hat=x/y
lambda_hat=(x*psi_hat+y)/(psi_hat^2+1)
#Define prior function
prior=function(lambda,psi){
dnorm((psi^2+1)*lambda/(psi*psi_hat+1),mean=0,sd=1)*(psi^2+1)/(psi*psi_hat+1)
}
#Define Likelihood
L=function(psi,lambda){
L=n/2/pi*exp(-n/2*((x-psi*lambda)^2+(y-lambda)^2))
L
}
#Estimate the Integrated Likelihood evaluated at a sequence of psi
ILik(L,prior, start=lambda_hat, seq(psi_hat-10,psi_hat+10,1), 1, "Normal")
}
|
a83784b6c25333a22ce67a60029bba52a03cc9af
|
d46bf0ba66ac1c27548453d456517c5c8998445a
|
/IndepDyads/R/HRS/1_Variable_Recode.R
|
f59008a07b46d6d23660d9960bf459086f66d9ba
|
[] |
no_license
|
timriffe/IndepDyads
|
a5bf7178783e4399142c6b5ebd82d4cf88e68058
|
6a65a842650cfbc734543be3e3d7f69fc41352f9
|
refs/heads/master
| 2021-06-11T22:33:12.455824
| 2021-06-07T10:41:18
| 2021-06-07T10:41:18
| 123,588,605
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,144
|
r
|
1_Variable_Recode.R
|
# --------------------------------------
rm(list=ls(all.names = TRUE))
# This script recodes most variables, makes some new ones.
# This is the step after extracting from the RAND version 2016v1
# file in H0_RAND_reshape.R
# --------------------------------------
# source("R/1_Variable_Recode.R")
# Sets working directory for Tim's machines
library(here)
library(lubridate)
library(data.table)
library(tidyverse)
#---------------------------------------------------------
# start utility function preamble
#----------------------------------------------------------
# convert yes no coded questions into binary
convertYN <- function(x){
x <- as.character(x)
xx <- rep(NA, length(x))
xx[grepl("yes", x)] <- 1
xx[grepl("no", x)] <- 0
invisible(xx)
}
#-----------------------------------------------------
# convert odd binary with first and second try into single binary
# TR: changed Aug 4, 2016. No more intermediate values: percent incorrect.
convertCI <- function(x){
xx <- rep(NA, length(x))
x <- as.character(x)
xx[x == "1.correct"] <- 0
xx[x == "2.correct, 1st try"] <- 0
#xx[x == "1.correct, 2nd try"] <- .5
xx[x == "1.correct, 2nd try"] <- 0
xx[x == "0.incorrect"] <- 1
invisible(as.numeric(xx))
}
#-----------------------------------------------------
# convert CESD variables into binary.
# 1 = yes or most of the time
convertCESD <- function(x){
if (class(x) == "numeric"){
return(x)
}
x <- as.character(x)
xx <- rep(NA, length(x))
xx[x == "0.no"] <- 0
xx[x == "4. none or almost none"] <- 0
xx[x == "4.none or almost none"] <- 0
# TR: changed Aug 4, 2016. No more intermediate values: percent incorrect.
# xx[x == "3. some of the time"] <- .5
# xx[x == "2. most of the time" ] <- .75
xx[x == "3. some of the time"] <- 0
xx[x == "3.some of the time"] <- 0
xx[x == "2. most of the time" ] <- 1
xx[x == "2.most of the time" ] <- 1
xx[x == "1.yes"] <- 1
xx[x == "1. all or almost all"] <- 1
xx[x == "1.all or almost all"] <- 1
xx
}
#-----------------------------------------------------
# convert dates to R internal format
convertDates <- function(Dat){
# can't be done with apply because we can't have Date class matrices.
# all date columns can be detected with the pattern _dt, it turns out.
DateInd <- grep(pattern = "_dt",colnames(Dat))
for (i in DateInd){
Dat[,i] <- as.Date(Dat[, i], origin = "1960-1-1")
}
invisible(Dat)
}
#-----------------------------------------------------
# two functions to get exact years lived and left
getThanoAge <- function(Date, DeathDate){
out <- rep(NA, length(Date))
Ind <- !is.na(Date)
out[Ind] <- lubridate::decimal_date(DeathDate[Ind]) - lubridate::decimal_date(Date[Ind])
out
}
getChronoAge <- function(Date, BirthDate){
out <- rep(NA, length(Date))
Ind <- !is.na(Date) & !is.na(BirthDate)
out[Ind] <- lubridate::decimal_date(Date[Ind]) - lubridate::decimal_date(BirthDate[Ind])
out
}
# -------------------------------#
# Weight imputation function #
# see code for annotation #
# -------------------------------#
imputeWeights <- function(wt,intv_dt){
# positive weights, also used for indexing
ind <- wt > 0
# if all weights 0, replace w NA
if (all(wt == 0)){
wt2 <- NA * wt
return(wt2)
}
# if only one valid weight, all later
# observations will keep that weight.
if (sum(ind) == 1){
wt2 <- approx(x = intv_dt[ind],
y = wt[ind],
xout = intv_dt,
rule = 1:2,
method = "constant",
f = .5)$y
}
# if at least two valid observations, we
# interpolate linearly for any missing,
# but extrapolate (rightward) with constant
if (sum(ind)>=2){
wt2 <- approx(x = intv_dt[ind],
y = wt[ind],
xout = intv_dt,
rule = 1:2,
method = "linear")$y
}
return(wt2)
}
# end utility function preamble
#----------------------------------------------------------
#----------------------------------------------------------
# load in long files from PHC
Dat <- readRDS(here::here("IndepDyads","Data","RAND_2016v1_long.rds"))
varnames <- readRDS( here::here("IndepDyads","Data","varnames.rds"))
# varnames[!varnames %in% colnames(Dat)]
# remove missed interviews
Dat <- Dat[!is.na(Dat$intv_dt), ]
# change all factors to character (to be later recoded in some instances)
factor.columns <- sapply(Dat, is.factor)
Dat[,factor.columns] <- apply(Dat[,factor.columns], 2, as.character)
# make sex column easier to use:
Dat$sex <- ifelse(Dat$sex == "1.male","m","f")
# reduce to deceased-only
Dat$dead <- ifelse(is.na(Dat$d_dt), 0, 1)
Dat <- Dat[Dat$dead == 1, ]
#(dead_cases <- nrow(Dat)) # 82609
# stats for paper
#nrow(Dat)
#dead_cases / length(unique(Dat$id))
# as of 2016v1: 5.368548
# as of vP: 4.822015 / person on average
#hist(rle(sort(Dat$id))$lengths)
# convert dates to native R format
Dat <- convertDates(Dat)
# --------------------------------------------------#
# merge weights (big assumption here: #
# weights in institutions are valid and #
# comparable with weights outside institutions. #
# soooo annoying ppl in institutions don't have #
# comparable weights. #
# --------------------------------------------------#
# TR: update: since for the bootstrap method we resample
# using weights on first appearance, this is innocuous
# (no one starts HRS in nursing home)
Dat$nh_wt[is.na(Dat$nh_wt)] <- 0
Dat$p_wt <- Dat$p_wt + Dat$nh_wt
# --------------------------------------------------#
# now we do weight interpolation/extrapolation #
# --------------------------------------------------#
Dat <- data.table(Dat)
Dat <- Dat[, p_wt2 := imputeWeights(p_wt,intv_dt), by = list(id) ]
Dat <- Dat[!is.na(Dat$p_wt2),]
#(cases_with_weights <- nrow(Dat))
#cases_with_weights / length(unique(Dat$id))
# RAND 2016v1: 5.201893
# RAND vP: 4.683211
# --------------------------------------------------#
# calculate thanatological ages #
# --------------------------------------------------#
Dat$ta <- getThanoAge(Dat$intv_dt, Dat$d_dt)
Dat$ca <- getChronoAge(Dat$intv_dt, Dat$b_dt)
Dat$la_int <- floor(Dat$ta + Dat$ca)
# hist(Dat$ca - (Dat$age /12) )
# there is one individual with an NA b_dt, and NA age,
# but thano age is known
# --------------------------------------------------#
# locate yes/no, correct/incorrect columns #
# --------------------------------------------------#
YNcols <- apply(Dat, 2, function(x){
xx <- unique(x)
length(xx) <= 4 & any(grepl("yes",xx))
})
CIcols <- apply(Dat, 2, function(x){
xx <- unique(x)
length(xx) <= 5 & any(grepl("correct",xx))
})
# which columns are these anyway?
#colnames(Dat)[YNcols]
#colnames(Dat)[CIcols]
# convert to binary
Dat <- data.frame(Dat)
Dat[YNcols] <- lapply(Dat[YNcols], convertYN)
Dat[CIcols] <- lapply(Dat[CIcols], convertCI)
Dat <- Dat %>% mutate(
srhfairpoor = case_when(srh == "1.excellent" ~ 0L,
srh == "2.very good" ~ 0L,
srh == "3.good" ~ 0L,
srh == "4.fair" ~ 1L,
srh == "5.poor" ~ 1L,
is.na(srh) ~ as.integer(NA)),
srhpoor = case_when( srh == "1.excellent" ~ 0L,
srh == "2.very good" ~ 0L,
srh == "3.good" ~ 0L,
srh == "4.fair" ~ 0L,
srh == "5.poor" ~ 1L,
is.na(srh) ~ as.integer(NA)),
srmfairpoor = case_when(srm == "1.excellent" ~ 0L,
srm == "2.very good" ~ 0L,
srm == "3.good" ~ 0L,
srm == "4.fair" ~ 1L,
srm == "5.poor" ~ 1L,
is.na(srm) ~ as.integer(NA)),
srmpoor = case_when( srm == "1.excellent" ~ 0L,
srm == "2.very good" ~ 0L,
srm == "3.good" ~ 0L,
srm == "4.fair" ~ 0L,
srm == "5.poor" ~ 1L,
is.na(srm) ~ as.integer(NA)),
pastmem = case_when( pastmem == "3.worse" ~ 1L,
pastmem == "2.same" ~ 0L,
pastmem == "1.better" ~ 0L,
is.na(pastmem) ~ as.integer(NA)))
# do cesd questions (1 bad, 0 good)
cesdquestions <- colnames(Dat)[grepl("cesd", colnames(Dat))]
cesdquestions <- cesdquestions[cesdquestions != "cesd"]
Dat[cesdquestions] <- lapply(Dat[cesdquestions],convertCESD)
# cesd_enjoy is flipped yet again, because 1 is 'yes I enjoyed life',
# ---------------------------------------------------------------
# various recodings in a mutate call
Dat <- Dat %>% mutate(# cesd_enjoy and cesd_happy flip so that high is bad
cesd_enjoy = 1 - cesd_enjoy,
cesd_happy = 1 - cesd_happy,
# total word recall (max 20)
twr = 1 - twr / 20,
# vocab: 1 worst 0 best
vocab = 1 - vocab / 10,
# total mental: 1 worst, 0 best (max 15)
tm = 1 - tm /15,
# delayed word recall (max 10)
dwr = 1 - dwr / 10,
# immediate word recall (max 10)
iwr = 1 - iwr / 10,
# mobility index, break at 1
mob = ifelse(mob > 1, 1, 0),
# large muscle difficulty index,
lg_mus = ifelse(lg_mus > 1, 1, 0),
# Gross motor difficulty index
gross_mot = ifelse(gross_mot > 1, 1, 0),
# Fine motor difficulty index
fine_mot = ifelse(fine_mot > 0, 1, 0),
# serial 7s hist(Dat$serial7s) # < 4
serial7s = ifelse(serial7s < 4, 1, 0),
# Number of chronic conditions > 2 table(Dat$cc)
cc = ifelse(cc > 2, 1, 0),
# Drinking days/week
alc_days = ifelse(alc_days > 1, 1, 0),
# alc_drinks > 0
alc_drinks = ifelse(alc_drinks > 0, 1, 0),
# adl 3 > 0
adl3 = ifelse(adl3 > 0, 1, 0),
# adl 5 cut points
adl5_1 = ifelse(adl5 > 0, 1, 0),
adl5_2 = ifelse(adl5 > 1, 1, 0),
adl5_3 = ifelse(adl5 > 2, 1, 0),
# iadl3 (any)
iadl3 = ifelse(iadl3 > 0, 1, 0),
# iadl 5 cut points
iadl5_1 = ifelse(iadl5 > 0, 1, 0),
iadl5_2 = ifelse(iadl5 > 1, 1, 0),
iadl5_3 = ifelse(iadl5 > 2, 1, 0),
# Depression score 2+
cesd = ifelse(cesd > 1, 1, 0),
# define underweight, obese, normalweight
underweight = ifelse(bmi < 18.5, 1, 0),
obese = ifelse(bmi > 30, 1, 0),
# nursing home yes or no?
nh_nights = ifelse(nh_nights > 0, 1, 0),
nh_stays = ifelse(nh_stays > 0, 1, 0),
# hospital night yes or no?
hosp_nights = ifelse(hosp_nights > 0, 1, 0),
hosp_stays = ifelse(hosp_stays > 0, 1, 0),
# doc visits standardize ref period, need helper column
wave3p = ifelse(wave > 2, 2, 1),
doc_visits = floor(doc_visits / wave3p),
doc_visits = ifelse(doc_visits > 8, 1, 0),
wave3p = NULL
)
# Everything else we estimate prevalence
not_fit <- c("id", "cohort", "sex", "hisp", "race", "b_mo", "b_yr", "b_dt",
"d_mo", "d_yr", "d_dt", "edu_yrs", "m_edu", "f_edu", "b_pl",
"s_wt", "wave", "adl5", "age", "alz", "bmi", "iadl_calc", "div",
"reg", "dem", "iadl5", "intv", "intv_dt", "mar", "srh", "srm",
"nh_wt", "p_wt", "dead", "p_wt2", "ta", "ca", "la_int","normalweight")
varnames_fit <- colnames(Dat)[!colnames(Dat)%in%not_fit]
# let's just make sure this will work:
stopifnot(all(varnames_fit %in% colnames(Dat)))
# make sure all will work as prevalence:
stopifnot(all(
sapply(varnames_fit, function(vn, Dat){
max(Dat[[vn]], na.rm = TRUE)
}, Dat = Dat) == 1)
)
# all in [0,1]
stopifnot(all(
sapply(varnames_fit, function(vn, Dat){
diff(range(na.omit(Dat[[vn]])))
}, Dat = Dat) == 1)
)
# these are the ones that should be fit as of now.
names(varnames_fit) <- NULL
saveRDS(varnames_fit, file = here::here("IndepDyads","Data","varnames_fit.rds"))
# use quasibinom to fit due to these three variables.
#varnames_fit[which(sapply(varnames_fit, function(vn, Dat){
# length(unique(na.omit(Dat[[vn]])))
# }, Dat = Dat) > 2)]
#"vocab" "tm" "twr"
# -------------------
# check cases by wave ( tapering in recent waves because selected down to deaths..)
#checkwaves <- function(var,Dat){
# table(Dat[[var]],Dat[["wave"]])
#}
#checkwaves("adl3_",Dat)
#checkwaves("adl5_",Dat)
#checkwaves("iadl3_",Dat)
#checkwaves("iadl5_",Dat)
#checkwaves("cesd",Dat)
# -------------------
# -------------------------------------------------------
# for binning purposes, akin to 'completed age'
Dat$tafloor <- floor(Dat$ta)
Dat$cafloor <- floor(Dat$ca)
# I guess actual interview date could be some weeks prior to registered
# interview date? There are two negative thano ages at wave 4 otherwise, but
# still rather close. Likely died shortly after interview.
#(Dat[Dat$tafloor < 0, ])
# there is one individual with an erroneous death date (or id!), throwing out.
Dat <- Dat[Dat$ta > -1, ]
Dat$tafloor[Dat$tafloor < 0] <- 0
# We use higher bin widths fur purposes of visualizing raw data,
# Just diagnostics. larger widths help cancel out noise. This
# Such binning can be done as an alternative to the loess smoothing,
# where we take weighted means in cells. It'd probably make sense
# to keep the final year of life in a single year width, but the
# general pattern ought to remain visible.
Dat$cafloor2 <- Dat$cafloor - Dat$cafloor %% 2
Dat$tafloor2 <- Dat$tafloor - Dat$tafloor %% 2
Dat$cafloor3 <- Dat$cafloor - Dat$cafloor %% 3
Dat$tafloor3 <- Dat$tafloor - Dat$tafloor %% 3
#----------------------------------------------
# save out, so this doesn't need to be re-run every time
saveRDS(Dat,file = here::here("IndepDyads","Data","RAND_2016v1_long.rds"))
#graphics.off()
# next step would be CreateMatrices.R, usually
#lapply(Dat[varnames_check],unique)
# how many NAs per variable?
#varnames_check[!varnames_check%in%colnames(Dat)]
# NAs <- lapply(Dat[varnames],function(x){
# sum(is.na(x))
# })
#sort(unlist(NAs) / nrow(Dat))
rm(list = ls(all.names = TRUE)) #will clear all objects includes hidden objects.
gc()
# end
# -------------------------------------------------------------------------
# Last checked 7-Nov-2019
# varnames <- c(adl3_ = "adl3", adl5_ = "adl5", iadl3_ = "iadl3", iadl5_ = "iadl5",
# cesd = "cesd", lim_work = "lim_work", srh = "srh", bmi = "bmi",
# back = "back", hosp = "hosp", hosp_stays = "hosp_stays", hosp_nights = "hosp_nights",
# nh = "nh", nh_stays = "nh_stays", nh_nights = "nh_nights", nh_now = "nh_now",
# doc = "doc", doc_visits = "doc_visits", hhc = "hhc", meds = "meds",
# surg = "surg", dent = "dent", shf = "shf", adl_walk = "adl_walk",
# adl_dress = "adl_dress", adl_bath = "adl_bath", adl_eat = "adl_eat",
# adl_bed = "adl_bed", adl_toilet = "adl_toilet", iadl_map = "iadl_map",
# iadl_money = "iadl_money", iadl_meds = "iadl_meds", iadl_shop = "iadl_shop",
# iadl_meals = "iadl_meals", mob = "mob", lg_mus = "lg_mus", gross_mot = "gross_mot",
# fine_mot = "fine_mot", bp = "bp", diab = "diab", cancer = "cancer",
# lung = "lung", heart = "heart", stroke = "stroke", psych = "psych",
# arth = "arth", cc = "cc", alc_ev = "alc_ev", alc_days = "alc_days",
# alc_drinks = "alc_drinks", smoke_ev = "smoke_ev", smoke_cur = "smoke_cur",
# cesd_depr = "cesd_depr", cesd_eff = "cesd_eff", cesd_sleep = "cesd_sleep",
# cesd_lone = "cesd_lone", cesd_sad = "cesd_sad", cesd_going = "cesd_going",
# cesd_enjoy = "cesd_enjoy", srm = "srm", pastmem = "pastmem",
# serial7s = "serial7s", serial7s = "bwc20", name_mo = "name_mo", name_dmo = "name_dmo",
# name_yr = "name_yr", name_dwk = "name_dwk", name_sci = "name_sci",
# name_cac = "name_cac", name_pres = "name_pres", name_vp = "name_vp",
# vocab = "vocab", tm = "tm")
# saveRDS(varnames, file = here::here("IndepDyads","Data","varnames.rds"))
|
03786f79a3fb8a5278320c013c51b2c196cc0bdd
|
e08bb6e9a5dedc51d8ee76449d89f143782f9654
|
/cachematrix.R
|
2e869a6b9aec6fb357e2662c37d512c211072790
|
[] |
no_license
|
Caroliem/ProgrammingAssignment2
|
fc57b921f1a73a8187de10d4ebc9fcb8f5d1a697
|
7d39012c99782b563a56bc8c8ec2f3afc33b4ee0
|
refs/heads/master
| 2021-01-22T00:19:48.602149
| 2014-05-20T10:31:32
| 2014-05-20T10:31:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,278
|
r
|
cachematrix.R
|
## This function will create a list of functions on the matrix(x)
makeCacheMatrix <- function(x = matrix()) {
mInv <<- NULL
## function to set matrix, assumption if we set new matrix we also reset inverse to NULL
set <- function(y) {
x <<- y
mInv <<- NULL
}
## function to get matrix
get <- function() x
## function to set inverse matrix
setinverse <- function(inverse) mInv <<- inverse
## function to get inverse matrix
getinverse <- function() mInv
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## CacheSolve is function that returns a matrix that is the inverse of 'x'
## input is the list retrieved from makeCacheMatrix
cacheSolve <- function(x) {
## retrieve current cached inverse for x
inverse <- x$getinverse()
## check if getinverse exists (ie is not NULL) and in that case retrieve the cached data
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
} else
## inverse doesn't exists and calculate inverse
{
message("getting new calculated data")
## retrieve matrix
data <- x$get()
## calculate inverse
inverse <- solve(data)
## cache calculated inverse
x$setinverse(inverse)
## print inverse
inverse
}
}
|
1d51537d162c0797c153a82876cba299d9992964
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GENEAclassify/examples/GENEAdistance.Rd.R
|
66268c5083b96ac9722d8564d11853fc7561bcdc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 272
|
r
|
GENEAdistance.Rd.R
|
library(GENEAclassify)
### Name: GENEAdistance
### Title: Find the mean distance between two steps detected
### Aliases: GENEAdistance
### Keywords: internal
### ** Examples
x1 <- c(20, 15, 10)
GENEAdistance(x1)
x2 <- c(300, 255, 111)
GENEAdistance(x2)
|
8ba3d79600f9e83026aea0130992bf6c8a1b6a67
|
7b8aa15d266826808c3cf35da01112eb0de2beed
|
/src/test/resources/testCBIND.r
|
91f234ffb8b3ad8ac0a1cbe04deb14ab7119b842
|
[] |
no_license
|
AcuoFS/acuo-algo
|
d6df24a7f74f098bcf2fbac6b5fa67fd10baa8e1
|
8c874675c02ffbdc711d3133fa3beebe592d12d3
|
refs/heads/master
| 2020-03-06T17:31:18.232081
| 2018-07-02T19:15:14
| 2018-07-02T19:15:14
| 126,991,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 482
|
r
|
testCBIND.r
|
callIds <- c('mcp1','mcp5','mcp7')
assetIds <- c("AUD", "CAD", "EUR", "GBP", "HKD", "JPY" ,"SGD","USD")
call.num <- length(callIds)
asset <- length(assetIds)
base.mat <- matrix(0,nrow=call.num,ncol=asset.num, dimnames = list(callIds,assetIds))
eli.mat <- base.mat
group.callIds <- c('mcp1','mcp1','mcp1','mcp5','mcp5','mcp5','mcp5''mcp7')
group.assetIds <- c("AUD", "CAD", "EUR", "GBP", "HKD", "JPY" ,"SGD","USD")
eli.mat[cbind(group.callIds ,group.assetIds)] <- 1
print(eli.mat)
|
87f807b0508728899c96b1773a96ec410060d037
|
6bdbc69ac73cd9d3009cd2a09149bdc79f2195e0
|
/R/corr_bestlag.R
|
9dacdcb2adec4c05d403a94386b5e662d2e0655e
|
[
"MIT"
] |
permissive
|
gitter-lab/LPWC
|
fdfe554b2d0b03536edcb2858a95cf7fd92fba6b
|
56d076d31adf00e6b466e2e7cd27c7983e6056ab
|
refs/heads/master
| 2021-01-15T11:04:01.948099
| 2020-04-25T19:52:47
| 2020-04-25T19:52:47
| 99,610,178
| 22
| 2
|
NOASSERTION
| 2020-01-23T17:43:21
| 2017-08-07T18:59:20
|
R
|
UTF-8
|
R
| false
| false
| 3,867
|
r
|
corr_bestlag.R
|
#' Computes best lag correlation
#'
#' This function computes correlation based on best picked lags. The lags indicate delayed changes.
#'
#' @param data a matrix or data frame with rows representing genes and columns
#' representing different timepoints. If data is a data frame, the gene names
#' can be specified using the \code{row.names()}.
#' @param max.lag a integer value of the maximum lags allowed in the dataset,
#' if null, defaults to the floor of the number of timepoints divided by 4
#' @param timepoints a vector of time points used in the dataset
#' @param C a numeric value of C used in computing weighted correlation,
#' if null, a default is computed based on the penalty argument
#' @param penalty a factor with two levels high and low penalty on the weighted correlation
#' @param iter an integer indicating the number of C values to test for low penalty
#' @return a list containing weighted correlation and best lags used in each row
#'
#' @examples
#' corr.bestlag(array(rnorm(30), c(5, 6)), max.lag = 1,
#' timepoints = c(0, 5, 10, 15, 20, 25), C = 10, penalty = "high")
#' corr.bestlag(array(runif(40, 0, 20), c(4, 10)),
#' timepoints = c(0, 0.5, 1.5, 3, 6, 12, 18, 26, 39, 50), penalty = "high")
#' corr.bestlag(matrix(data = rexp(n = 40, 2), nrow = 8),
#' timepoints = c(0, 5, 15, 20, 40), penalty = "low", iter = 5)
#'
#' @importFrom stats var
#'
#' @author Thevaa Chandereng, Anthony Gitter
#'
#' @export corr.bestlag
corr.bestlag <- function(data, timepoints, max.lag = NULL, C = NULL, penalty = "high", iter = 10){
#fixing the max lag if it is NULL
if(is.null(max.lag)){
max.lag <- floor(length(timepoints) / 4)
}
#ADDED test cases to see if C > 0
if(!is.null(C)){
stopifnot(C > 0)
}
#checking the condition
stopifnot(dim(data)[2] == length(timepoints), max.lag <= length(timepoints) / 4, is.numeric(iter),
penalty == "high" | penalty == "low", max.lag %% 1 == 0, iter %% 1 == 0, iter > 1,
max.lag >= 1)
#checking for 0 variance
if(any(apply(data, 1, var) == 0)){stop("At least one of the genes has 0 variance!")}
#finding the values of C
values <- findC(timepoints, max.lag, iter = iter)
#if C is already given, the matrix is computed based on that value
if(is.numeric(C)){
lags <- best.lag(data, max.lag = max.lag, timepoints, C = C)
new.data <- prep.data(data, lags, timepoints)
return(list(corr = comp.corr(new.data$data, new.data$time, C = C), lags = lags,
C = C))
}
# if C is not given but penalty is high, the matrix is computed based on first C value from values
else if(penalty == "high"){
lags <- best.lag(data, max.lag = max.lag, timepoints, C = values[1])
new.data <- prep.data(data, lags, timepoints)
return(list(corr = comp.corr(new.data$data, new.data$time, C = values[1]), lags = lags,
C = values[1]))
}
#if penalty is low, the matrix is computed for all 10 values in values vector
else if(penalty == "low"){
clustdiff <- rep(NA, length(values) - 1)
allcorr <- NULL
alllags <- NULL
for(v in 1:length(values)){
lags <- best.lag(data, max.lag = max.lag, timepoints, C = values[v])
new.data <- prep.data(data, lags, timepoints)
result <- list(corr = comp.corr(new.data$data, new.data$time, C = values[v]), lags = lags)
allcorr[[v]] <- result$corr
alllags[[v]] <- result$lags
}
# the adjacent similarity matrix
for(j in 1:(length(values) - 1)){
clustdiff[j] <- sum((as.vector(allcorr[[j + 1]]) - as.vector(allcorr[[j]]))^2)
}
#the best C matrix is picked based on the smallest different with the adjacent similarity matrix
return(list(corr = allcorr[[which.min(clustdiff) + 1]], lags = alllags[[which.min(clustdiff) + 1]],
C = values[which.min(clustdiff) + 1] ))
}
}
|
8b0e8a3736e95ab4c6cb0e7ca18d03e51c0fe1b4
|
8b94a5c8b2a6e14d39f77fc0d64d9b8dad386b26
|
/R/calculate_ozcbi_forest.R
|
810ac6c376566269f7bbda966f00ca9818ad25da
|
[
"MIT"
] |
permissive
|
dbca-wa/rOzCBI
|
0753351b91651c26b3ac027820dacf52101fa878
|
98014eb8f9590b0c4b2f3d410240e5bca177092a
|
refs/heads/main
| 2023-08-16T21:46:40.404365
| 2023-08-10T08:07:22
| 2023-08-10T08:07:22
| 232,464,945
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,373
|
r
|
calculate_ozcbi_forest.R
|
#' Calculate the Forest OzCBI from numeric input variables
#'
#' `r lifecycle::badge("stable")`
#'
#' @details The Forest OzCBI is calculated from the following components:
#'
#' * Stratum 1 fraction of cover: always 1 (100%), therefore never captured.
#' * Stratum {2..5} fcov: surface covered by the stratum in quarter steps
#' from 0 to 1: 0.0, 0.25, 0.5, 0.75, 1.0
#' * Stratum scores (CBI): The average grading from all non-null gradings out of
#' all captured variables for each stratum.
#' The variables are named with a stratum prefix.
#' E.g., all variables for stratum 1 are prefixed `stratum_1_surface_s1_`.
#'
#' ### Overall index: OzCBI
#' The OzCBI is calculated as:
#'
#' (sum of stratum scores) / (sum of stratum fraction of cover)
#'
#' ### Variables not used in OzCBI formula
#' Some variables are captured, but not used for OzCBI calculation.
#' They provide context and metadata, and include representative photos.
#'
#' @param stratum_1_surface_p_s1_unburnt A numeric grade from 0.0 to 3.0.
#' Unburnt area.
#' The approximate percentage of area not burned.
#' @param stratum_1_surface_p_s1_duff A numeric grade from 0.0 to 3.0.
#' Duff condition (crushed sticks and leaves).
#' Broken leaf pieces that form a type of mulch under the litter layer.
#' If necessary, scrape down to mineral soil to see how deeply the char has
#' penetrated.
#' @param stratum_3_elevated_p_s3_crown
#' A numeric grade from 0.0 to 3.0.
#' Intact original crown cover.
#' How much original crown is intact?
#' @param straum_4_intermediate_p_s4_crown
#' A numeric grade from 0.0 to 3.0.
#' Intact original crown cover.
#' How much original crown is intact?
#' @param straum_4_intermediate_p_s4_char
#' A numeric grade from 0.0 to 3.0.
#' Char height.
#' Fraction of total stratum height charred.
#' @param stratum_5_overstorey_p_s5_crown
#' A numeric grade from 0.0 to 3.0.
#' Intact original crown cover.
#' How much original crown is intact?
#' @param stratum_5_overstorey_p_s5_litter
#' A numeric grade from 0.0 to 3.0.
#' Ground surface covered by leaves that have fallen after the burn,
#' not unburned patches.
#' @param stratum_5_overstorey_p_s5_char
#' A numeric grade from 0.0 to 3.0.
#' Char height.
#' Fraction of total stratum height charred.
#' @param stratum_1_surface_s1_fcov
#' The fraction of coverage of stratum 1, default: 1. This value
#' is never captured in the digital form as it always is 1 (100%).
#' The variable is however provided here to allow different values.
#' @param stratum_3_elevated_p_s3_fcov
#' The fraction of coverage of stratum 2 in quarter steps
#' from 0.0 to 1.0. Default: 0.
#' @param straum_4_intermediate_p_s4_fcov
#' The fraction of coverage of stratum 2 in quarter steps
#' from 0.0 to 1.0. Default: 0.
#' @param stratum_5_overstorey_p_s5_fcov
#' The fraction of coverage of stratum 2 in quarter steps
#' from 0.0 to 1.0. Default: 0.
#' @param verbose Whether to display diagnostic messages, default: FALSE.
#' @family ozcbi
#' @export
#' @examples
#' # With missing variables
#' calculate_ozcbi_forest(
#' stratum_1_surface_p_s1_unburnt = 1,
#' verbose = TRUE
#' )
#'
#' # With complete variables, all set to 1
#' calculate_ozcbi_forest(
#' stratum_1_surface_p_s1_unburnt = 1,
#' stratum_1_surface_p_s1_duff = 1,
#' stratum_3_elevated_p_s3_crown = 1,
#' straum_4_intermediate_p_s4_crown = 1,
#' straum_4_intermediate_p_s4_char = 1,
#' stratum_5_overstorey_p_s5_crown = 1,
#' stratum_5_overstorey_p_s5_litter = 1,
#' stratum_5_overstorey_p_s5_char = 1,
#' stratum_1_surface_s1_fcov = 1,
#' stratum_3_elevated_p_s3_fcov = 0,
#' straum_4_intermediate_p_s4_fcov = 0,
#' stratum_5_overstorey_p_s5_fcov = 0,
#' verbose = TRUE
#' )
calculate_ozcbi_forest <- function(
stratum_1_surface_p_s1_unburnt = NA_real_,
stratum_1_surface_p_s1_duff = NA_real_,
stratum_3_elevated_p_s3_crown = NA_real_,
straum_4_intermediate_p_s4_crown = NA_real_,
straum_4_intermediate_p_s4_char = NA_real_,
stratum_5_overstorey_p_s5_crown = NA_real_,
stratum_5_overstorey_p_s5_litter = NA_real_,
stratum_5_overstorey_p_s5_char = NA_real_,
stratum_1_surface_s1_fcov = 1,
stratum_3_elevated_p_s3_fcov = 0,
straum_4_intermediate_p_s4_fcov = 0,
stratum_5_overstorey_p_s5_fcov = 0,
verbose = FALSE) {
# -------------------------------------------------------------------------- #
# Stratum 1
#
s1_cbi <- c(
stratum_1_surface_p_s1_unburnt,
stratum_1_surface_p_s1_duff
) %>%
purrr::discard(is.na) %>%
mean()
s1_score <- s1_cbi * stratum_1_surface_s1_fcov
if (verbose == TRUE) {
ruODK::ru_msg_info(
glue::glue(
"Stratum 1: CBI {s1_cbi} * FCOV ",
"{stratum_1_surface_s1_fcov} = Score {s1_score}"
)
)
}
# -------------------------------------------------------------------------- #
# Stratum 2 is unused
#
# s2_cbi <- c(
# stratum_2_near_surface_s2_area_unburnt,
# stratum_2_near_surface_s2_grass_trees_with_skirts,
# stratum_2_near_surface_s2_unburnt_shrub_density,
# stratum_2_near_surface_s2_fcov_regenerating_plants
# ) %>%
# purrr::discard(is.na) %>%
# mean()
# s2_score <- s2_cbi * stratum_2_near_surface_s2_fcov
# if (verbose == TRUE)
# ruODK::ru_msg_info(
# glue::glue(
# "Stratum 2: CBI {s2_cbi} * FCOV ",
# "{stratum_2_near_surface_s2_fcov} = Score {s2_score}"
# )
# )
#
# -------------------------------------------------------------------------- #
# Stratum 3
s3_cbi <- stratum_3_elevated_p_s3_crown
s3_score <- s3_cbi * stratum_3_elevated_p_s3_fcov
if (verbose == TRUE) {
ruODK::ru_msg_info(
glue::glue(
"Stratum 3: CBI {s3_cbi} * FCOV ",
"{stratum_3_elevated_p_s3_fcov} = Score {s3_score}"
)
)
}
# -------------------------------------------------------------------------- #
# Stratum 4
s4_cbi <- c(
straum_4_intermediate_p_s4_crown,
straum_4_intermediate_p_s4_char
) %>%
purrr::discard(is.na) %>%
mean()
s4_score <- s4_cbi * straum_4_intermediate_p_s4_fcov
if (verbose == TRUE) {
ruODK::ru_msg_info(
glue::glue(
"Stratum 4: CBI {s4_cbi} * FCOV ",
"{straum_4_intermediate_p_s4_fcov} = Score {s4_score}"
)
)
}
# -------------------------------------------------------------------------- #
# Stratum 5
s5_cbi <- c(
stratum_5_overstorey_p_s5_crown,
stratum_5_overstorey_p_s5_litter,
stratum_5_overstorey_p_s5_char
) %>%
purrr::discard(is.na) %>%
mean()
s5_score <- s5_cbi * stratum_5_overstorey_p_s5_fcov
if (verbose == TRUE) {
ruODK::ru_msg_info(
glue::glue(
"Stratum 5: CBI {s5_cbi} * FCOV ",
"{stratum_5_overstorey_p_s5_fcov} = Score {s5_score}"
)
)
}
# OzCBI
score_sum <- sum(
s1_score,
s3_score,
s4_score,
s5_score,
na.rm = TRUE
)
fcov_sum <- sum(
stratum_1_surface_s1_fcov,
stratum_3_elevated_p_s3_fcov,
straum_4_intermediate_p_s4_fcov,
stratum_5_overstorey_p_s5_fcov
)
ozcbi <- score_sum / fcov_sum
if (verbose == TRUE) {
ruODK::ru_msg_success(
glue::glue(
"OzCBI: {ozcbi} = Score sum {score_sum} / FCOV sums {fcov_sum}"
)
)
}
ozcbi
}
# usethis::use_test("calculate_ozcbi")
|
886110982e62afb07e619fed1ebcfc7ce76c90e6
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Statistics_-_Concepts_And_Applications_by_Nabendu_Pal_And_Sahadeb_Sarkar/CH4/EX4.1/Ex4_1.R
|
7316da46ffdb186797d40e7444beaeeeeebd6ee5
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 253
|
r
|
Ex4_1.R
|
#Page Number: 55
library(plyr)
dataset <- data.frame(Distinct_entry = c(8,9,0,9,1,4,3,7))
n <- length(dataset$Distinct_entry)
y <- count(dataset,'Distinct_entry')
print(y)
z <- table(dataset)
cat("Mode is",names(z)[which(z==max(z))] )
|
84401b82969748f444d889f983fff13cbb3dfb71
|
cf6cbe4fb89ae5b05e923b468a04980a78947cd9
|
/man/freshenGenes.Rd
|
3dfd133c5e62bb675dd7e6e38207b76a993b7444
|
[] |
no_license
|
jmw86069/genejam
|
281870956b4da77aa483587fd1c74e09dc39e901
|
f85495f0562f131ef2cfe50190f63510d878de74
|
refs/heads/master
| 2022-10-06T15:41:23.445595
| 2022-09-13T17:40:01
| 2022-09-13T17:40:01
| 233,139,523
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 9,220
|
rd
|
freshenGenes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genejam-freshen.R
\name{freshenGenes}
\alias{freshenGenes}
\title{Freshen gene annotations using Bioconductor annotation data}
\usage{
freshenGenes(
x,
ann_lib = c("", "org.Hs.eg.db"),
try_list = c("SYMBOL2EG", "ACCNUM2EG", "ALIAS2EG"),
final = c("SYMBOL"),
split = "[ ]*[,/;]+[ ]*",
sep = ",",
handle_multiple = c("first_try", "first_hit", "all", "best_each"),
empty_rule = c("empty", "original", "na"),
include_source = FALSE,
protect_inline_sep = TRUE,
intermediate = "intermediate",
ignore.case = FALSE,
verbose = FALSE,
...
)
}
\arguments{
\item{x}{character vector or \code{data.frame} with one or most columns
containing gene symbols.}
\item{ann_lib}{character vector indicating the name or names of the
Bioconductor annotation library to use when looking up
gene nomenclature.}
\item{try_list}{character vector indicating one or more names of
annotations to use for the input gene symbols in \code{x}. The
annotation should typically return the Entrez gene ID, usually
given by \code{'2EG'} at the end of the name. For example \code{SYMBOL2EG}
will be used with ann_lib \code{"org.Hs.eg.db"} to produce annotation
name \code{"org.Hs.egSYMBOL2EG"}. Note that when the \code{'2EG'} form of
annotation does not exist (or another suitable suffix defined in
argument \code{"revmap_suffix"} in \code{get_anno_db()}), it will be derived
using \code{AnnotationDbi::revmap()}. For example if \code{"org.Hs.egALIAS"}
is requested, but only \code{"org.Hs.egALIAS2EG"} is available, then
\code{AnnotationDbi::revmap(org.Hs.egALIAS2EG)} is used to create the
equivalent of \code{"org.Hs.egALIAS"}.}
\item{final}{character vector to use for the final conversion
step. When \code{final} is \code{NULL} no conversion is performed.
When \code{final} contains multiple values, each value is returned
in the output. For example, \code{final=c("SYMBOL","GENENAME")} will
return a column \code{"SYMBOL"} and a column \code{"GENENAME"}.}
\item{split}{character value used to separate delimited values in \code{x}
by the function \code{base::strsplit()}. The default will split values
separated by comma \verb{,} semicolon \verb{;} or forward slash \code{/}, and will
trim whitespace before and after these delimiters.}
\item{sep}{character value used to concatenate multiple entries in
the same field. The default \code{sep=","} will comma-delimit multiple
entries in the same field.}
\item{handle_multiple}{character value indicating how to handle multiple
values: \code{"first_hit"} will query each column of \code{x} until it finds the
first possible returning match, and will ignore all subsequent possible
matches for that row in \code{x}. For example, if one row in \code{x} contains
multiple values, only the first match will be used. \code{"first_try"}
will return the first match from \code{try_list} for all columns in \code{x}
that contain a match. For example, if one row in \code{x} contains two
values, the first match from \code{try_list} using one or both columns in
\code{x} will be maintained. Subsequent entries in \code{try_list} will not be
attempted for rows that already have a match. \code{"all"} will return all
possible matches for all entries in \code{x} using all items in \code{try_list}.}
\item{empty_rule}{character value indicating how to handle entries which
did not have a match, and are therefore empty: \code{"original"} will use
the original entry as the output field; \code{"empty"} will leave the
entry blank.}
\item{include_source}{logical indicating whether to include a column
that shows the colname and source matched. For example, if column
\code{"original_gene"} matched \code{"SYMBOL2EG"} in \code{"org.Hs.eg.db"} there
will be a column \code{"found_source"} with value
\code{"original_gene.org.Hs.egSYMBOL2EG"}.}
\item{protect_inline_sep}{logical indicating whether to
protect inline characters in \code{sep}, to prevent them from
being used to split single values into multiple values.
For example, \code{"GENENAME"} returns the full gene name, which
often contains comma \code{","} characters. These commas do
not separate multiple separate values, so they should not be
used to split a string like \code{"H4 clustered histone 10, pseudogene"}
into two strings \code{"H4 clustered histone 10"} and \code{"pseudogene"}.}
\item{intermediate}{\code{character} string with colname in \code{x} that
contains intermediate values. These values are expected from output
of the first step in the workflow, for example \code{"SYMBOL2EG"}
returns Entrez gene values, so if the input \code{x} already contains
some of these values in a column, assign that colname to
\code{intermediate}.}
\item{ignore.case}{\code{logical} indicating whether to use
case-insensitive matching when \code{ignore.case=TRUE}, otherwise
the default \code{ignore.case=FALSE} will perform default \code{mget()}
which requires the upper and lowercase characters are
an identical match. When \code{ignore.case=TRUE} this function
calls \code{genejam::imget()}.}
\item{verbose}{logical indicating whether to print verbose output.}
}
\value{
\code{data.frame} with one or more columns indicating the input
data, then a column \code{"intermediate"} containing the Entrez gene ID
that was matched, then one column for each item in \code{final},
by default \code{"SYMBOL"}.
}
\description{
Freshen gene annotations using Bioconductor annotation data
}
\details{
This function takes a vector or \code{data.frame} of gene symbols,
and uses Bioconductor annotation methods to find the most current
official gene symbol.
The annotation process runs in two basic steps:
\enumerate{
\item \strong{Convert the input gene to Entrez gene ID}.
\item \strong{Convert Entrez gene ID to official gene symbol}.
}
\subsection{Step 1. Convert to Entrez gene ID}{
The first step uses an ordered list of annotations,
with the assumption that the first match is usually the best,
and most specific. By default, the order is:
\itemize{
\item \code{"org.Hs.egSYMBOL2EG"} -- almost always 1-to-1 match
\item \code{"org.Hs.egACCNUM2EG"} -- mostly a 1-to-1 match
\item \code{"org.Hs.egALIAS2EG"} -- sometimes a 1-to-1 match, sometimes 1-to-many
}
When multiple Entrez gene ID values are matched, they are all
retained. See argument \code{handle_multiple} for custom options.
}
\subsection{Step 2. Use Entrez gene ID to return official annotation}{
The second step converts the Entrez gene ID (or multiple IDs)
to the official gene symbol, by default using \code{"org.Hs.egSYMBOL"}.
The second step may optionally include multiple annotation types,
each of which will be returned. Some common examples:
\itemize{
\item \code{"org.Hs.egSYMBOL"} -- official Entrez gene symbol
\item \code{"org.Hs.egALIAS"} -- set of recognized aliases for an Entrez gene.
\item \code{"org.Hs.egGENENAME"} -- official Entrez long gene name
}
For each step, the annotation matched can be returned, as an audit
trail to see which annotation was available for each input entry.
Note that if the input data already contains Entrez gene ID
values, you can define that colname with argument \code{intermediate}.
}
\subsection{Case-insensitive search}{
For case-insensitive search, which is particularly useful in non-human
organisms because they often use mixed-case, use the argument
\code{ignore.case=TRUE}. In our benchmark tests it appears to add roughly
0.1 seconds per annotation, regardless of the number of input entries.
This appears to be the time it takes to spool the list of annotation
keys stored in the SQLite database, and may therefore be dependent upon
the size of the annotation file.
}
}
\examples{
if (suppressPackageStartupMessages(require(org.Hs.eg.db))) {
cat("\nBasic usage\n");
print(freshenGenes(c("APOE", "CCN2", "CTGF")));
}
if (suppressPackageStartupMessages(require(org.Hs.eg.db))) {
## Optionally show the annotation source matched
cat("\nOptionally show the annotation source matched\n");
print(freshenGenes(c("APOE", "CCN2", "CTGF"), include_source=TRUE));
}
if (suppressPackageStartupMessages(require(org.Hs.eg.db))) {
## Show comma-delimited genes
cat("\nInput genes are comma-delimited\n");
print(freshenGenes(c("APOE", "CCN2", "CTGF", "CCN2,CTGF")));
}
if (suppressPackageStartupMessages(require(org.Hs.eg.db))) {
## Optionally include more than SYMBOL in the output
cat("\nCustom output to include SYMBOL, ALIAS, GENENAME\n");
print(freshenGenes(c("APOE", "HIST1H1C"),
final=c("SYMBOL", "ALIAS", "GENENAME")));
}
if (suppressPackageStartupMessages(require(org.Hs.eg.db))) {
## More advanced, match affymetrix probesets
if (suppressPackageStartupMessages(require(hgu133plus2.db))) {
cat("\nAdvanced example including Affymetrix probesets.\n");
print(freshenGenes(c("227047_x_at","APOE","HIST1H1D","NM_003166,U08032"),
include_source=TRUE,
try_list=c("hgu133plus2ENTREZID","REFSEQ2EG","SYMBOL2EG","ACCNUM2EG","ALIAS2EG"),
final=c("SYMBOL","GENENAME")))
}
}
}
\seealso{
Other genejam:
\code{\link{freshenGenes2}()},
\code{\link{freshenGenes3}()},
\code{\link{get_anno_db}()},
\code{\link{is_empty}()}
}
\concept{genejam}
|
b20426b603e5c4628a947bd895d1c417344edb5c
|
ddb525b0a9d9c45161f28140cc786e66af722d57
|
/man/nikkei.df.Rd
|
a483e62957008dc526c7ac308b3fc5f045fd04b7
|
[] |
no_license
|
cran/QRMlib
|
ebd393ae06d6770c906cbd72a4174794db273365
|
914a855242662effc9bd6f9e60ef45119bfdd882
|
refs/heads/master
| 2020-06-04T09:13:15.520228
| 2010-02-22T00:00:00
| 2010-02-22T00:00:00
| 17,717,887
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 747
|
rd
|
nikkei.df.Rd
|
\name{nikkei.df}
\docType{data}
\alias{nikkei.df}
\title{
Nikkei Stock Market Index (data.frame Object) January 4, 1994-March 25, 2004
}
\description{
The \code{nikkei.df} dataframe provides the daily closing value for the Nikkei index
from January 1994 to March 2004 in its nikkei@Data slot. QRMlib's R-version 1.4.2 and above
supplies data in both timeSeries and data.frame versions.
}
\usage{
data(nikkei.df)
}
\format{
This dataframe object contains the prices for the index at nikkei.df[,2] and the corresponding dates
at nikkei.df\$DATE. The dataframe can be converted to a timeSeries by calling the ConvertDFToTimeSeries()
method in functionsUtility.R.
}
\seealso{
\code{\link{nikkei}}
}
\keyword{datasets}
|
0a0aa00d7b66a528d483809c24a7ca6519c97110
|
1afbf6bd36c777bd33fed43f1724ecb08f0a0635
|
/functions/plot-resolution-effect.R
|
cd8398ef7cba3bfd3790e465d8cd4f272a20a1c8
|
[
"MIT"
] |
permissive
|
DanOvando/skynet
|
7328971f2839391b57b9259b9bb7e0af6c8be06d
|
e8ae39e1e894b3c503805f0f3354b753cded7525
|
refs/heads/master
| 2021-03-30T18:18:55.571456
| 2018-08-13T04:50:53
| 2018-08-13T04:50:53
| 93,210,132
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 543
|
r
|
plot-resolution-effect.R
|
plot_resolution_effect <- function(mod_res) {
res_effect <- mod_res %>%
mutate(obs = map_dbl(rounded_data, nrow)) %>%
ggplot(aes(resolution, r2)) +
geom_point(aes(size = obs), shape = 21, fill = "steelblue") +
geom_smooth(se = F) +
scale_y_continuous(limits = c(0, NA),
name = expression(R^2)) +
scale_size_continuous(name = "Observations",range = c(2,7), breaks = seq(0,3000, by = 500)) +
theme(axis.title.y = element_text(angle = 0)) +
labs(x = "Lat/Long Resolution", title = "Model: Fish ~ Fishing")
}
|
1506e4de782916a2cdcf4bb93dd9d7c101113434
|
f9dc1dda10c8967793424d503edc77bdead571f5
|
/step1-datacleaiing.R
|
2b8c05f068015cdf9de08e5c57b1c94ee0a21575
|
[] |
no_license
|
tingtingting118/decisiontree-for-income-in-SF
|
d03c90d1c23f0028d7231084bf1a726e204a773a
|
6bb33d971910a0f11f83ef49cbf07c07d66d7719
|
refs/heads/master
| 2021-09-07T23:11:37.827743
| 2018-03-02T19:17:13
| 2018-03-02T19:17:13
| 105,492,812
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,523
|
r
|
step1-datacleaiing.R
|
#import data first
library(readxl)
income_data <- read_excel("C:/Users/karen/Downloads/UIC/ids572/hw2/income.data.xlsx",
+ col_names = FALSE)
#add title for each column
names(income_data)<-c("hh_income","Sex","martial status","age","education","occupation","timeinSF","dualincomes",
"hh_size","under18","hh_status","home_type","ethinic_class","lang_speak")
#clean outliers
#clean missing values first
summary(income_data)
#found that some of the data are characters , including the "NA"-NEED TO MAKE THEM LOGICAL TYPE
income_data[income_data=="NA"]<-NA
#double check again
sum(!complete.cases(income_data))
#now start to find out all of them
library(mice)
md.pattern(income_data)
#only 6876 cases are completed
#visualize that first
library("VIM")
na_plot<-aggr(income_data,col=c('blue','red'),numbers=TRUE,prop=TRUE,sortVars=TRUE,labels=names(income_data),cex.axis=1,gap=0,
ylab=c("histrogram of missing values","pattern"))
#10% of data in timeinSF are missing now followed by the hh_size
#inputing the missing values using mice
#transform categorical var to factor first
income_data$`martial status`<-as.factor(income_data$`martial status`)
income_data$education<-as.factor(income_data$education)
income_data$occupation<-as.factor(income_data$occupation)
income_data$timeinSF<-as.factor(income_data$timeinSF)
income_data$hh_size<-as.factor(income_data$hh_size)
income_data$hh_status<-as.factor(income_data$hh_status)
income_data$home_type<-as.factor(income_data$home_type)
income_data$ethinic_class<-as.factor(income_data$ethinic_class)
income_data$lang_speak<-as.factor(income_data$lang_speak)
#now we could start the imputation
init = mice(income_data, maxit=0)
meth = init$method
predM = init$predictorMatrix
#now set different imputation for different variables
#ordinal
meth[c("timeinSF","hh_size","education")]='polyreg'
#nominal: lang_speak,home_type,"hh_status","martial status","occupation","ethic_class", try pmm firsst
meth[c("lang_speak","hh_status","martial status","occupation","ethinic_class","home_type")]='pmm'
#marital
summary(income_data$`martial status`)
#hh_type
summary(income_data$home_type)
#lang_speak
summary(income_data$lang_speak)
#hh_status
#after imputation
imputation<-mice(income_data,m=9,method=meth,predictorMatrix = predM)
complete(imputation)
#income ready to use now
income.data<-complete(imputation)
|
d4f5a5fb06c12bfd780e66f065b7e07d073673be
|
fc1167f0a785ee344179e887e08e248358713707
|
/man/shinyDND.Rd
|
982b7fb99f4d416fc5b8524dcd3e222b1c59f474
|
[] |
no_license
|
ktargows/shinydnd
|
3b228b4368535f29e9d02445f0602e211e72dbc1
|
15acb6229131519d5bd62dba2f59ac57fcb8398d
|
refs/heads/master
| 2021-01-22T07:57:40.832165
| 2016-08-25T07:13:19
| 2016-08-25T07:13:19
| 92,588,487
| 1
| 0
| null | 2017-05-27T09:32:04
| 2017-05-27T09:32:04
| null |
UTF-8
|
R
| false
| true
| 418
|
rd
|
shinyDND.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shinyDND.R
\docType{package}
\name{shinyDND}
\alias{shinyDND}
\alias{shinyDND-package}
\title{ShinyDND: A package for creating drag and drop elements in Shiny.}
\description{
The ShinyDND package provides three categories of important functions:
dragUI, dropUI, and dragSetUI.
}
\section{ShinyDND functions}{
The ShinyDND functions ...
}
|
15863e5f59ec5e4724f4feed73bfec6d76e5d4d4
|
2b4b1fd84d85a845df5d6d87837085e8af774be3
|
/R/seedrows.R
|
9989a264c1ba5bc37e638b96c12392b10a667fad
|
[] |
no_license
|
frpinto/S2B
|
d6e1eaa792e718a29621edf35936867ecd94f71d
|
0745465db01742fddd24dd9c3701aa5f8796cb97
|
refs/heads/master
| 2021-01-21T03:44:58.999837
| 2018-12-23T16:07:12
| 2018-12-23T16:07:12
| 101,900,386
| 2
| 2
| null | 2018-12-23T16:07:13
| 2017-08-30T15:48:15
|
R
|
UTF-8
|
R
| false
| false
| 546
|
r
|
seedrows.R
|
#' Find set of node ids in graph
#'
#' This function allows you to get a vector of node ids matching your input vector with strings of node identifiers. Identifiers not found in the network are ignored.
#' @param seed_graph igraph object of the network to be searched.
#' @param seedvec vector with strings of node identifiers.
#' @return rowindex vector of integers with the found node ids
#' @export
seedrows=function(seed_graph,seedvec){
vertexlist=unlist(igraph::vertex_attr(seed_graph))
rowindex=which(is.element(vertexlist,seedvec))
}
|
2652bd7f1278e757b88654cde460cb8e14f8aa1a
|
5119a104545aaab6b2c6a9053c8941d1acc753c1
|
/R/fit.stem.R
|
3bc9dd5f5064e5475bd09d399fd1d016223f1146
|
[] |
no_license
|
covid19-dashboard-us/STEM
|
1701323f861f30bcde45570edd1c073c62a57169
|
e0ed8c0744d15609e744b706948038b63cc5f9e9
|
refs/heads/master
| 2022-06-07T22:40:18.422884
| 2020-05-11T01:40:41
| 2020-05-11T01:40:41
| 260,450,936
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,763
|
r
|
fit.stem.R
|
#' Estimation using Spatio-Temporal Epidemic Model (STEM)
#'
#' This function implement the model fitting of the spatiotemporal epidemic model.
#' @import mgcv
#' @importFrom kr MGLM
#'
#' @param formula A STEM formula which is exactly like the formula for a GAM except that bivariate smooth terms, f(), can be added to the right hand side to specify that the predictor depends on bivariate smooth functions of predictors.
#' @param location Location information used in the bivariate smooth function.
#' @param V Vertices of a triangulation.
#' @param Tr Triangles of a triangulation.
#' @param d Degree of polynomials in bivariate smooth funciton, default is 2.
#' @param r Smoothness parameter in bivariate smooth function, default is 1.
#' @param family This is a family object specifying the distribution and link to use in fitting etc, default is ziP() (the zero-inflated poisson).
#' @param data A data frame or list containing the model response variable and covariates required by the formula.
#' @param offset The offset used to supply a model fitting.
#'
#' @return
fit.stem <- function(cov.infec, cov.death, data = list(), b1.type = "cons", cov.type = "cons"){
dat.fit = data$dat.fit
dat.fit$lI = log(dat.fit$I + 1)
BQ2.infec = data$BQ2.infec
P.infec = data$P.infec
BQ2.death = data$BQ2.death
P.death = data$P.death
if(b1.type == "vary"){
tmp01 <- paste0("y.infec ~ 0 + BQ2_all.infec + BQ2_all.X.infec + Control2")
}else{
tmp01 <- paste0("y.infec ~ 0 + BQ2_all.infec + lI + Control2")
}
if(cov.type == "vary"){
tmp02 <- paste0(' + s(',cov.infec,', bs = \'cr\', k = 4)', collapse = '')
}else{
tmp02 <- paste0(' + ', cov.infec, collapse = '')
}
formula.infec = as.formula(paste0(tmp01, tmp02))
cat("formula.infec:", as.character(formula.infec), "\n")
if(b1.type == "vary"){
mfit.stem.infec <- gam(formula.infec, family = ziP(),
paraPen = list(BQ2_all.infec = list(P.infec), BQ2_all.X.infec = list(P.infec)), data = dat.fit,
offset = log(dat.fit$S.ratio))
}else{
mfit.stem.infec <- gam(formula.infec, family = ziP(),
paraPen = list(BQ2_all.infec = list(P.infec)), data = dat.fit,
offset = log(dat.fit$S.ratio))
}
tmp01 <- paste0("y.death ~ 0 + BQ2_all.death + lI + Control2")
tmp02 <- paste0(' + ', cov.death, collapse = '')
formula.death = as.formula(paste0(tmp01, tmp02))
cat("formula.death:", as.character(formula.death), "\n")
mfit.stem.death <- gam(formula.death, family = ziP(),
paraPen = list(BQ2_all.death = list(P.death)), data = dat.fit,
offset = log(dat.fit$S.ratio))
list(mfit.stem.infec = mfit.stem.infec, mfit.stem.death = mfit.stem.death,
b1.type = b1.type, cov.type = cov.type,
BQ2.infec = BQ2.infec, BQ2.death = BQ2.death)
}
|
28f994817f3421c6ce4588940fef92b17e478f9a
|
a4b2eb4d0e22546be6454fd57c901a65e49debd9
|
/Report_2.R
|
b20a65dde9962eaec9b1488097b758a481bf60da
|
[] |
no_license
|
DrMuhsin/Statistical-Inference
|
625d3a52c7f7e2aba5d150980fc4a3d3014de5a9
|
e805d175ec3dd50377229a530edda3e65aa73960
|
refs/heads/master
| 2016-09-09T19:05:58.604800
| 2015-09-20T07:03:32
| 2015-09-20T07:03:32
| 42,802,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,731
|
r
|
Report_2.R
|
library(ggplot2)
# data(ToothGrowth)
summary(ToothGrowth)
dose <- ToothGrowth$dose
supp <- ToothGrowth$supp
len <- ToothGrowth$len
# Load the ToothGrowth data and perform some basic exploratory data analyses
# Provide a basic summary of the data.
# Use confidence intervals and hypothesis tests to compare tooth growth by supp and dose. (Use the techniques from class even if there's other approaches worth considering)
# State your conclusions and the assumptions needed for your conclusions.
par(mfrow = c(1,2))
p1 <- ggplot(ToothGrowth, aes(x = factor(dose), y = len, fill = factor(dose)))
p1 + geom_boxplot() + guides(fill=FALSE) + facet_grid(. ~ supp)
p2 <- ggplot(ToothGrowth, aes(x = factor(supp), y = len, fill = factor(supp)))
p2 + geom_boxplot() + guides(fill=FALSE) + facet_grid(. ~ dose)
# calculate the mean and sd of each dosage and supplement
dosages_means <- aggregate(ToothGrowth$len, by = list(ToothGrowth$dose), FUN = mean)
dosages_sds <- aggregate(ToothGrowth$len, by = list(ToothGrowth$dose), FUN = sd)
Supplements_means <- aggregate(ToothGrowth$len, by = list(ToothGrowth$supp), FUN = mean)
Supplements_sds <- aggregate(ToothGrowth$len, by = list(ToothGrowth$supp), FUN = sd)
# split the data up by dosages
d0.5 <- subset(ToothGrowth, dose == 0.5)
d1.0 <- subset(ToothGrowth, dose == 1.0)
d2.0 <- subset(ToothGrowth, dose == 2.0)
# conduct a t test between supplements
test0.5 <- t.test(len ~ supp, paired = FALSE, var.equal = FALSE, data = d0.5)
test0.5$p.value; test0.5$conf
test1.0 <- t.test(len ~ supp, paired = FALSE, var.equal = FALSE, data = d1.0)
test1.0$p.value; test1.0$conf
test2.0 <- t.test(len ~ supp, paired = FALSE, var.equal = FALSE, data = d2.0)
test2.0$p.value; test2.0$conf
|
fb7c6bd4e872222ec0b2402e0946b557503410d1
|
a93f2486bcfdfd63403d96721f0c10f8cc9cdda0
|
/Lab2.R
|
973e7dfa89cac9cf5712efbf0878bef9e56f6fac
|
[] |
no_license
|
aphiratnimanussonkul/data-analysis
|
916b7b278f49363f0d0247482ba971c192296c4f
|
1076fa63f49e9e89979d83c933d2be102f3c2edc
|
refs/heads/master
| 2020-06-23T11:43:05.305794
| 2019-08-28T07:11:45
| 2019-08-28T07:11:45
| 198,613,129
| 0
| 1
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 4,201
|
r
|
Lab2.R
|
#Lab 2
#Sys.setlocale(locale = "Thai")
#when readline can get input in thai language
#1. Write an R program to take input from the user (name and age) and
# display the values and their types.
name <- readline(prompt = "Enter name: ")
age <- as.integer(readline(prompt = "Enter age: "))
# class(age) , typeof(age)
#class(n) show tpye of variable
#as.integer() make sure input will be integer
#age_numberic <- as.numeric(age)
print(paste("Your name is :", name))
print(paste("youe age is : ", age))
# 2. Write an R program to create a sequence of numbers from 20 to 50
# and find the mean of numbers from 20 to 60 and sum of numbers from
# 51 to 91.
#create a sequence of numbers from 20 to 50
seq_num_20_50 <- seq(from = 20, to = 50, by = 1)
#find the mean of numbers from 20 to 60
#create seq num 20 to 60
seq_num_20_60 <- seq(from = 20, to = 60, by = 1)
mean_20to60 <- mean(seq_num_20_60)
# sum of numbers from 51 to 91
#create seq num 51 to 91
seq_num_51_91 <- seq(from = 51, to = 91, by = 1)
sum_51to90 <- sum(seq_num_51_91)
# 3. Write an R program to create a vector which contains 100 random
# integer values between -5 and 5
ran_5to5 <- c(runif(100, min = -5, max = 5))
#or
# ran_5to5 <- c(sample(-5:5, 100, replace = T))
# 4. Write an R program to get the first 9 Fibonacci numbers and put them
# into the vector,then put them into the matrix.
recurse_fibonacci <- function(n) {
if(n <= 1) {
return(n)
} else {
return(recurse_fibonacci(n-1) + recurse_fibonacci(n-2))
}
}
nterms <- 9
#create list for store fibo from loop
fibo_list = list()
#create n terms fibo
for(i in 0:(nterms-1)) {
fibo_list[i+1] <- c(recurse_fibonacci(i))
}
#convert list to vector
fibo_9 <- unlist(fibo_list)
fibo_matrix <- matrix(fibo_9)
# 5. Write an R program to print the numbers from 44 to 100 and print
# "Fizz" for multiples of 2, print "Buzz" for multiples of 3, and print
# "FizzBuzz" for multiples of both.
for (i in 44:100) {
if (i %% 2 == 0 & i %% 3 == 0) {
print(paste("i = ", i, " FizzBuzz"))
} else if (i %% 2 == 0) {
print(paste("i = ", i, " Fizz"))
} else if (i %% 3 == 0) {
print(paste("i = ", i, " Buzz"))
} else {
print(i)
}
}
# 6. Write an R program to find the maximum and the minimum value of a
# given vector.
#create vector from random
vec_random <- c(runif(100, min = -5, max = 10))
vec_random_min <- min(vec_random)
vec_random_max <- max(vec_random)
# 7. Write an R program to create three vectors a,b,c with 3 integers.
# Combine the three vectors to become a 3×3 matrix where each column
# represents a vector. Print the content of the matrix.
#replace = F is Not random number repeate
a <- c(sample(1:20, 3, replace = F))
b <- c(sample(1:20, 3, replace = F))
c <- c(sample(1:20, 3, replace = F))
matrix_3_3 <- cbind(a, b, c)
print(matrix_3_3)
# 8. Write an R program to create a vector of random numbers in normal
# distribution and count occurrences of each value.
vector_normal_dis <- c(rnorm(50, mean = 5, sd = 2))
# vector_normal_dis <- c(sample(1:25, 50, replace = T))
count = list()
temp_vect_nor = list()
index = 0
isRepeate = F
for (i in vector_normal_dis) {
print(i)
if (index == 0) {
index <- index + 1
print("index = 1")
temp_vect_nor[index] <- i
count[index] <- 1
} else {
for (j in 1:index) {
print(paste("j = ", temp_vect_nor[j]))
if (temp_vect_nor[j] == i) {
count[j] <- as.numeric(count[j]) + 1
isRepeate = T
}
}
if (!isRepeate) {
index <- index + 1
temp_vect_nor[index] <- i
count[index] <- 1
}
}
isRepeate = F
}
#table() for count
count_re <- table(vector_normal_dis)
summary_repeate <- cbind(unlist(temp_vect_nor), unlist(count))
print(vector_normal_dis)
# 9. Write an R program to read the .csv file and display the content.
grades_set <- read.csv("C:\\cygwin64\\home\\AphiratNimanussonkul\\data-analysis\\grade_csv.csv", header = TRUE, sep = ",")
print(grades_set)
# 10. Write an R program to print row 2-3-4 of the .csv file.
#[row, col]
print(grades_set[2:4, ])
|
cdc7a4de7565f9b51d5bf9aa9dda3c0187e4627c
|
3a6317baa4c7ff84d8d5a98c9eb2b788ed4502a0
|
/Example/Example 1 - Simulation.R
|
890a284c58c3b98329494ba9969e8c3416a5c1ab
|
[] |
no_license
|
wengewsh/iMed
|
8f3c3275537fef018e5e9617e881c112de0394bb
|
8e24831bbdd935f723931b8d637815bdcc8fc009
|
refs/heads/main
| 2023-03-08T11:44:15.957764
| 2021-02-24T10:35:25
| 2021-02-24T10:35:25
| 330,523,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,165
|
r
|
Example 1 - Simulation.R
|
# Simulation with zero nuisance parameters
n<-500 # 100, 200, 300, 400, 500
N<-1000
ME<-matrix(0, N, 2)
A<-matrix(0, N, 2)
B<-matrix(0, N, 2)
ALPHA<-NULL
# Simulation
for (f in 1:N)
{
## True paremeters
beta1<-c(0,0)
beta2<-c(0,0)
tu<-1.5
a<-c(tu, tu)
b<-c(tu, -tu)
c<-c(0,0)
sigma1<-c(1,1)
sigma2<-c(1,1)
alpha<-c(0.5,0.5)
## Sample based on HMM with $K_{0}=2$
Y<-NULL
M<-NULL
n1<-floor(n*alpha[1])
n2<-n-n1
X<-rnorm(n)
M[1:n1]<-beta1[1]+a[1]*X[1:n1]+rnorm(n1)*sigma1[1]
Y[1:n1]<-beta2[1]+c[1]*X[1:n1]+b[1]*M[1:n1]+rnorm(n1)*sigma2[1]
M[(1+n1):n]<-beta1[2]+a[2]*X[(1+n1):n]+rnorm(n2)*sigma1[2]
Y[(1+n1):n]<-beta2[2]+c[2]*X[(1+n1):n]+b[2]*M[(1+n1):n]+rnorm(n2)*sigma2[2]
U<-rep(1, n)
V<-matrix(c(U, X), ncol=2)
Z<-matrix(c(U, X, M), ncol=3)
# EM for Two Mixed Gaussians
## Initialization setting
K<-2
beta1<-runif(K)
beta2<-runif(K)
a<-runif(K)
b<-runif(K)
c<-runif(K)
d<-runif(1, 0.4, 0.6)
alpha<-c(d,1-d)
sigma1<-runif(K, 0.5, 1)
sigma2<-runif(K, 0.5, 1)
P<-matrix(rep(0,n*K),ncol=K)
Q<-matrix(rep(0,n*K),ncol=K)
R<-matrix(rep(0,n*K),ncol=K)
H<-matrix(rep(0,n*K),ncol=K)
## Circle
for (S in 1:200){
## E-step
for (j in 1:K){
for (i in 1:n){
P[i,j]<-sapply(M[i], dnorm, beta1[j]+a[j]*X[i], sigma1[j])
Q[i,j]<-sapply(Y[i], dnorm, beta2[j]+c[j]*X[i]+b[j]*M[i], sigma2[j])
H[i,j]<-alpha[j]*P[i,j]*Q[i,j]
}
}
H<-H/rowSums(H)
oldbeta1<-beta1
oldbeta2<-beta2
olda<-a
oldb<-b
oldc<-c
oldalpha<-alpha
oldsigma1<-sigma1
oldsigma2<-sigma2
## M-step
for (j in 1:K){
alpha[j]<-sum(H[,j])/sum(H)
beta1[j]<-(solve(t(V)%*%diag(H[,j])%*%V)%*%t(V)%*%diag(H[,j])%*%M)[1]
a[j]<-(solve(t(V)%*%diag(H[,j])%*%V)%*%t(V)%*%diag(H[,j])%*%M)[2]
beta2[j]<-(solve(t(Z)%*%diag(H[,j])%*%Z)%*%t(Z)%*%diag(H[,j])%*%Y)[1]
c[j]<-(solve(t(Z)%*%diag(H[,j])%*%Z)%*%t(Z)%*%diag(H[,j])%*%Y)[2]
b[j]<-(solve(t(Z)%*%diag(H[,j])%*%Z)%*%t(Z)%*%diag(H[,j])%*%Y)[3]
sigma1[j]<-{(M-c(beta1[j], a[j])%*%t(V))%*%diag(H[,j])%*%t(M-c(beta1[j], a[j])%*%t(V))/sum(H[,j])}^0.5
sigma2[j]<-{(Y-c(beta2[j], c[j], b[j])%*%t(Z))%*%diag(H[,j])%*%t((Y-c(beta2[j], c[j], b[j])%*%t(Z)))/sum(H[,j])}^0.5
}
## Change condition
espsilo<-1e-5
if (sum(abs(a-olda)<espsilo) &
sum(abs(b-oldb)<espsilo) &
sum(abs(c-oldc)<espsilo) &
sum(abs(alpha-oldalpha)<espsilo)&
sum(abs(beta1-oldbeta1)<espsilo)&
sum(abs(beta2-oldbeta2)<espsilo)
) break
cat('S', S, 'a', a, 'b', b, 'c', c,'alpha', alpha, '\n')
}
# Interested Parameters' Estimation
men1<-which.min(c(a[1]*b[1],a[2]*b[2]))
men2<-which.max(c(a[1]*b[1],a[2]*b[2]))
me1<-a[men1]*b[men1]
me2<-a[men2]*b[men2]
ME[f,]<-c(me1, me2)
A[f,]<-c(a[men1], a[men2])
B[f,]<-c(b[men1], b[men2])
ALPHA[f]<-alpha[men1]
}
## Mean and SD in Table 2
XXX<-ALPHA # XXX=ALPHA, A[, 1:2], B[, 1:2], and ME[, 1:2].
c(round(mean(XXX), digits=3), round(sd(XXX), digits=3))
|
8a8f8094dd77c97a5c0d106b64a32ba34553ddea
|
4c51e80a435687f8bc529b2808f711b639cc83a4
|
/SupplementaryDataObservationContribution.R
|
6044725f77466126ae208c2d3f207c237a150e72
|
[] |
no_license
|
daisyduursma/EggShape
|
b0e5ff9064cdc8426f601a3f9f3b607ffec47400
|
9adfeabdcdd43af21035d1ddb33e725a8aa9d610
|
refs/heads/master
| 2021-08-26T07:08:55.768684
| 2017-11-16T03:43:07
| 2017-11-16T03:43:07
| 110,913,655
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,281
|
r
|
SupplementaryDataObservationContribution.R
|
#Find names of people who contributed to the ABBBS banding data used in the traits that decreas Thermal Stress paper
rm(list = ls())
library(raster)
indir<-'/Users/daisy/Google Drive/PhD/Data/Observaitons/Raw/egg'
abbbsA<-read.csv(paste0(indir,"/ABBBS/pullusBandingRecordsPartA_dd.csv"))
abbbsB<-read.csv(paste0(indir,"/ABBBS/pullusBandingRecordsPartB_dd.csv"))
abbbs<-rbind(abbbsA,abbbsB)
abbbs<-abbbs[!duplicated(abbbs[,c("SCIENTIFIC_NAME","Day","Month","Year","LAT","LON","BANDER")]),]
abbbs$sourceName<-'ABBBS'
abbbs$epoch<-as.numeric(as.Date(paste0(abbbs$Year,'-',abbbs$Month,'-',abbbs$Day)))
abbbs$type<-with(abbbs, ifelse (AGE == "NESTLING","youngNest",'unknown'))
abbbs$startUnknown<-with (abbbs, ifelse (type=='unknown', abbbs$epoch,NA))
abbbs$startYoung<-with (abbbs, ifelse (type=='startYoung', abbbs$epoch,NA))
abbbs$lat<-abbbs$LAT
abbbs$lon<-abbbs$LON
abbbs$Scientific.Name<-abbbs$SCIENTIFIC_NAME
abbbs$sourceName <- 'ABBBS'
abbbs<-abbbs[,c('Scientific.Name','lat','lon', 'sourceName', 'type' ,'startYoung','startUnknown','BANDER')]
eggdat<-abbbs
# returns string w/o leading or trailing whitespace
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
#finish cleaning egdat
eggdat$Scientific.Name<- capitalize(trim(gsub(' ', ' ', eggdat$Scientific.Name,
fixed=TRUE)))#check for extra spaces and capatlize
eggdat<-subset(eggdat, !is.na(lat) & !is.na(lon) & !is.na(Scientific.Name))#make sure lat and longs are given
#make sure all latitudes are negative
latfix<-subset(eggdat, lat > 0)
latfix$lat<-latfix$lat*-1
latgood<-subset(eggdat, lat <= 0)
eggdat<-rbind(latfix, latgood)
#remove duplicates
eggdat<-eggdat[!duplicated(eggdat),]
#make sure years are as expected
syn<-subset(read.csv('/Users/daisy/Google Drive/PhD/Data/Observaitons/Raw/namesResolved.csv'), use ==1)
bad<-subset(read.csv('/Users/daisy/Google Drive/PhD/Data/Observaitons/Raw/namesResolved.csv'), use ==0)
eggdat<-eggdat[eggdat$Scientific.Name %nin% bad$scn1,]
fix<-eggdat[eggdat$Scientific.Name %in% syn$scn1,]
nofix<-eggdat[eggdat$Scientific.Name %nin% syn$scn1,]#[c(1:8)]
fix<-merge(fix,syn,by.x = 'Scientific.Name',by.y='scn1')
fix$Scientific.Name <-fix$garnett_name
fix<-fix[,c('Scientific.Name','lat','lon', 'sourceName', 'type' ,'startYoung','startUnknown','BANDER')]
eggdat<-smartbind(fix,nofix)
eggdat<-eggdat[!duplicated(eggdat),]
#make sure data is within continental Australia
aus<-raster("/Users/daisy/Google Drive/PhD/Data/Spatial/Climate/averageAnnualVPD.asc",
crs = '+proj=longlat +datum=WGS84')
xy<-cbind(eggdat$lon,eggdat$lat)
eggdat$outsideAustralia<-is.na(extract(aus,xy))
eggdat<-subset(eggdat,outsideAustralia==FALSE)
#see if they are species of interest
inc<-read.csv('/Users/daisy/Google Drive/PhD/ThermalStress/tables/Table_S1_20161128.csv')
#make sure only have species interested in
species<-inc$Species
eggdat<-eggdat[eggdat$Scientific.Name %in% species,] #keep observations of wanted species
a<- trim(gsub(' ', ' ', eggdat$BANDER,fixed=TRUE))#remove white spaces
a<-as.data.frame(unique(a))#list of names need to include
a<-lapply(a, as.character)[[1]]
write.csv(a,paste0("~/Google Drive/PhD/ThermalStress/tables/ABBBSBanders",
as.Date(Sys.time()),".csv"),row.names=F)
|
cc64817f4dc9813e1904c9ba0131c65cf2b34ce9
|
87fdb51b3b0e92f42a3e33dbf07d0c01628d2aaa
|
/R/compute.PARz.R
|
7b910c6ed6088b98e57c56a13b8f38b1a81ca4ad
|
[] |
no_license
|
belasi01/Cops
|
0b5e0f04c46a639dfe5b8716199abf32757732f4
|
5cd0fa2f5fedb338cc063d1af21cf22ead0f9c5f
|
refs/heads/master
| 2023-07-25T16:07:36.614152
| 2023-07-12T01:08:54
| 2023-07-12T01:08:54
| 72,561,413
| 9
| 8
| null | 2022-08-31T16:13:21
| 2016-11-01T17:49:49
|
R
|
UTF-8
|
R
| false
| false
| 2,368
|
r
|
compute.PARz.R
|
#' Compute PAR at depth from surface irradiance (Ed0+) and profile of
#' downwelling irradiance (EdZ)
#'
#'@param Depth is a vector of depth corresponding to the EdZ measurements
#'@param waves is a vector of wavelength of EdZ and Ed0+
#'@param Edz is a matrix of EdZ (col=waves; row=Depth) after nomalization to Ed.0 variation
#'@param Ed.0 is a is a vector surface irradiance
#'@param f.PAR is a vector of numeric values corresponting to light fraction.
#' The depth (in m) of each light fraction will be computed.
#' Default is f.PAR=c(0.001, 0.01, 0.1,0.5)
#'@param z.fixed is a vector of numeric values corresponting to depth at which light fraction will be calculated.
#' Default is z.fixed=c(5,10,20,30,40)
#'
#'@return The program will return three vectors for
#' 1) the depth of each PAR fraction requested (z.f.PAR);
#' 2) the fraction of PAR at the depth requested (PAR.at.z);
#' 3) the vector of PAR at all depth Z (PAR.z) in micro mol photon / m^2
#'
#'@author Simon Belanger
#'@export
compute.PARz <- function(Depth, waves, Edz, Ed.0,
f.PAR=c(0.001, 0.01, 0.1,0.5),
z.fixed=c(5,10,20,30,40)) {
# Convert microW/cm^-2.nm to Quanta/m-2.nm.s
c = 2.9979e17 # in nm/sec
h = 6.6255e-34 # in J sec
Q0 = Ed.0 * waves / (h*c) * 0.01 # factor -0.01 to convert to W/m^-2.nm
# integrated
Q0[is.na(Q0)] <- 0
fx.linear <- approxfun(waves, Q0)
PAR.0m = integrate(fx.linear, 380, 700, subdivisions=350, stop.on.error = FALSE)[1]
PAR.0m = PAR.0m$value
nz = length(Depth)
PAR.z = rep(NA,nz)
for (i in 1:nz){
QZ = Edz[i,] * waves / (h*c) * 10000/1e6 #
# integrated
QZ[is.na(QZ)] <- 0
fx.linear <- approxfun(waves, QZ)
tmp = integrate(fx.linear, 380, 700, subdivisions=350, stop.on.error = FALSE)[1]
PAR.z[i] = tmp$value
}
# Compute the depth of 90%, 50%, 30%, 10%, 1% and 0.1% PAR
res= spline((PAR.z/PAR.0m), Depth, xout=f.PAR)
z.f.PAR= cbind(f.PAR*100,res$y)
z.f.PAR = as.data.frame(z.f.PAR[length(f.PAR):1,])
names(z.f.PAR) = c("%PAR", "z")
# compute %PAR at fixed depth
res= spline(Depth, (PAR.z/PAR.0m), xout=z.fixed)
PAR.at.z= as.data.frame(cbind(z.fixed,res$y*100))
names(PAR.at.z) = c("z.fixed", "%PAR")
return(list(z.f.PAR=z.f.PAR,
PAR.z=PAR.z / 6.06E23 *1e6,
PAR.at.z=PAR.at.z))
}
|
ad11869646f122851a3487e65a82988fdb3fb129
|
2439887493621fef7480bd6ebdea375f0b83f3da
|
/functions.R
|
86e2eff3cc19408c1fe380ec0bdda837471d9806
|
[] |
no_license
|
psrc/demographic-profile
|
7fe3ef5e39a4fac793411d135e7f6d68778688f7
|
6b3f616589ded82c5b8088fbe3267d4c2e9a4080
|
refs/heads/main
| 2023-03-28T22:31:54.986493
| 2021-04-01T21:53:08
| 2021-04-01T21:53:08
| 351,122,885
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,348
|
r
|
functions.R
|
# Functions used for Data Analysis and Visualization Creations
stored.procedure.from.db <- function(srv.nm, db.nm, procedure.nm) {
db.con <- dbConnect(odbc::odbc(),
driver = "SQL Server",
server = srv.nm,
database = db.nm,
trusted_connection = "yes")
w.tbl <- DBI::dbGetQuery(db.con, procedure.nm)
odbc::dbDisconnect(db.con)
as_tibble(w.tbl)
return(w.tbl)
}
get.county.census <- function(c.type, c.yr, c.table, c.geo="county:033,035,053,061", c.state="state:53", l.yr = label.yr) {
# Download Table from API
tbl.values <- suppressWarnings(getCensus(name = c.type, vintage = c.yr, vars = c("NAME",paste0("group(",c.table,")")),region = c.geo, regionin = c.state) %>%
select(ends_with(c("E","M"))) %>%
select(-state) %>%
rename(Geography=NAME) %>%
pivot_longer(cols=contains("_"), names_to="name", values_to="value") %>%
mutate(Geography = str_replace(Geography, ", Washington", "")))
# Get variable labels
tbl.vars <- listCensusMetadata(name = c.type, vintage = l.yr, type = "variables", group = c.table) %>%
filter(grepl("(E|M)$", name)) %>%
select(name,label) %>%
mutate(label = gsub("!!"," ", label), label = gsub("Margin of Error","MoE", label), label = gsub(" Total:",":", label))
# JOin values and labels
tbl.values <- inner_join(tbl.values, tbl.vars, by="name") %>%
select(-name) %>%
pivot_wider(names_from = label)
tbl.values[tbl.values == -555555555 ] <- 0
# Add total for region with calculated MoE for county to region aggregation
region.moe <- suppressWarnings(tbl.values %>% select(contains("MoE")) %>% mutate(PSRC=1) %>% group_by(PSRC) %>% summarise_all(moe_sum))
region.tot <- tbl.values %>% select(!contains("MOE"),-Geography) %>% mutate(PSRC=1) %>% group_by(PSRC) %>% summarise_all(sum)
region <- inner_join(region.tot,region.moe,by="PSRC") %>% mutate(Geography="Central Puget Sound") %>% select(-PSRC)
# Append Region Total to table
tbl.values <- bind_rows(tbl.values,region)
return(tbl.values)
}
return.value <-function(data=results, c.geo=c, c.year=c.yr, acs.typ, c.tbl, c.val ) {
r <- data[[c.year]][['tables']][[acs.typ]][[c.tbl]] %>%
filter(Geography %in% c.geo) %>%
pull(c.val) %>%
sum()
return(r)
}
|
bab2a1fab54946ad575b5135005deed70071d096
|
be9a9778db38c15af123d851bb52a2d89978f124
|
/R/shiftDataByCol.R
|
5d88667b77828f27a8ad3e716196403ea5c48477
|
[] |
no_license
|
kindlychung/bedcollr
|
386d8a1e0f55407c691528d6cc8f2d856f33bd60
|
c708ad191d2c0fc1553c680990f71c021dc974a7
|
refs/heads/master
| 2020-04-12T07:36:53.579287
| 2014-08-12T11:24:44
| 2014-08-12T11:24:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,035
|
r
|
shiftDataByCol.R
|
# TODO: Add comment
#
# Author: kaiyin
###############################################################################
shiftDataByCol = function(dat) {
nr = nrow(dat)
nc = ncol(dat)
message("Shifting data col by col...")
message(paste(" dat has", nr, "rows"))
message(paste(" dat has", nc, "cols"))
message(paste("Head of dat before shifting: "))
print(head(dat))
message(paste("Tail of dat before shifting: "))
print(tail(dat))
message("initialize a matrix newdat with same size as dat...")
newdat = matrix(NA, nr, nc)
for(i in 1:nc) {
colShiftN = as.integer(colnames(dat)[i])
message(paste(" This col should be shifted by ", colShiftN, "according to colname"))
newdat[1:(nr-colShiftN), i] = dat[(colShiftN+1):nr, 1]
}
message(paste("Head of dat after shifting: "))
print(head(newdat))
message(paste("Tail of dat after shifting: "))
print(tail(newdat))
message(paste("Setting colnames for new dat..."))
colnames(newdat) = colnames(dat)
print(head(newdat))
print(tail(newdat))
newdat
}
|
f85403ed4a5fd7f01232a46efc263105fd01f407
|
8d3c3766137ff1b0b7e47e0cf4e33734c40e4c05
|
/SpatemR.R
|
6db605bd05b3bbd3e75f5b2ed978a28026484e65
|
[] |
no_license
|
hdubey/SpatemR
|
297118473b9c986504c5189f79e5f35358b3c692
|
38a60f77414f10da3b0591345df84a71806c1a2b
|
refs/heads/master
| 2021-05-28T09:45:24.096318
| 2015-02-24T19:33:29
| 2015-02-24T19:33:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,469
|
r
|
SpatemR.R
|
library(ggmap)
library(httr)
library(rjson)
library(RCurl)
library(grid)
library(png)
library(jpeg)
# Abfragen der Instagarm Daten
## httr Rederiction URI
full_url <- oauth_callback()
full_url <- gsub("(.*localhost:[0-9]{1,5}/).*", x=full_url, replacement="\\1")
print(full_url)
#Zugangsdaten für den Instragram Client laden
source("secret.R",chdir=T)
## Konfiguration der authentifizierten Verbindung
instagram <- oauth_endpoint(
authorize = "https://api.instagram.com/oauth/authorize",
access = "https://api.instagram.com/oauth/access_token")
myapp <- oauth_app(app_name, client_id, client_secret)
ig_oauth <- oauth2.0_token(instagram, myapp,scope="basic", type = "application/x-www-form-urlencoded",cache=FALSE)
tmp <- strsplit(toString(names(ig_oauth$credentials)), '"')
token <- tmp[[1]][4]
## Daten abrufen
username <- "Inventionate"
user_info <- fromJSON(getURL(paste('https://api.instagram.com/v1/users/search?q=',username,'&access_token=',token,sep="")),unexpected.escape = "keep")
id <- user_info$data[[1]]$id
# Follower erfragen
followers <- fromJSON(getURL(paste('https://api.instagram.com/v1/users/',id,'/follows?access_token=',token,sep="")),unexpected.escape = "keep")
anniID <- followers$data[[1]]$id
### Kompletten eigenen Feed laden
media <- fromJSON(getURL(paste('https://api.instagram.com/v1/users/',id,'/media/recent/?access_token=',token,'&count=33',sep="")),unexpected.escape = "keep")
# Nutzernamen auslesen
for (i in 1:length(media$data) ) {
print(paste(i,': ',media$data[[i]]$user$full_name))
}
### Kompletten Feed eines anderen Nutzers laden
mediaAnni <- fromJSON(getURL(paste('https://api.instagram.com/v1/users/',anniID,'/media/recent/?access_token=',token,'&count=33',sep="")),unexpected.escape = "keep")
mediaAnni
# Nutzernamen auslesen
for (i in 1:length(mediaAnni$data) ) {
print(paste(i,': ',mediaAnni$data[[i]]$user$full_name))
}
# Koordinatenpunkte pro Nutzen plotten
# Geokoordinaten der Bilder abrufen
df = data.frame(no = 1:length(media$data))
for (i in 1:length(media$data) ) {
if ( is.null(media$data[[i]]$location$name) )
{
df$name[i] <- "Nicht benannt"
}
else
{
df$name[i] <- media$data[[i]]$location$name
}
df$latitude[i] <- media$data[[i]]$location$latitude
df$longitude[i] <- media$data[[i]]$location$longitude
date <- as.POSIXct(as.numeric(media$data[[i]]$created_time), origin="1970-01-01")
df$created_date[i] <- format(date, "%d.%m.%Y")
# Stunden: %H
# Minuten: %M
# Datum nach drei Kategorien ordnen
hour <- as.numeric(format(date, "%H"))
# Morgens: 5 bis 11 Uhr
if (5 < hour && hour <= 11)
{
df$created_time[i] <- "morgens"
}
# Mittags: 11 bis 15 Uhr
if (11 < hour && hour <= 15)
{
df$created_time[i] <- "mittags"
}
# Nachmittags: 15 bis 18 Uhr
if (15 < hour && hour <= 18)
{
df$created_time[i] <- "nachmittags"
}
# Abends: 18 bis 23 Uhr
if (18 < hour && hour <= 23)
{
df$created_time[i] <- "abends"
}
# Nachts: 23 bis 5 Uhr
if (23 < hour || hour <= 5)
{
df$created_time[i] <- "nachts"
}
df$image[i] <- media$data[[i]]$images$standard_resolution$url
}
df
# Geografische Analyse der sozialen Orte Chronologie
## Erstellen einer Karte für den geografischen Raum Karlsruhe
myMap <- get_map(location = c(lat = 49.01345, lon = 8.39451), source="stamen", zoom=17, maptype="watercolor", crop=FALSE)
map <- ggmap(myMap, maprange=FALSE, extent = 'device', base_layer=ggplot(aes(x = longitude, y = latitude), data=df))
# legend="topright"
plot <- map +
theme(
legend.title = element_blank(),
legend.text = element_text(size = 14)
) +
geom_path(size=1.5, alpha=0.75, colour="darkred") +
geom_point(aes(colour=created_time), shape=15, alpha = 1, size = 25) +
guides(colour = guide_legend(override.aes = list(size=5)))
plot
# Bilder in Karte einfügen
# Bilder abrufen
for (i in 1:nrow(df) ) {
myurl = toString(df$image[i])
z <- tempfile()
download.file(myurl,z,mode="wb")
assign( paste0("pic_",i,sep=""), readJPEG(z))
file.remove(z) # cleanup
assign( paste0("g_",i,sep=""), rasterGrob(paste("pic_",i,sep=""), interpolate=T))
}
# Bilder hinzufügen
margin <- 0.0005
plot.pic <- plot
for (i in 1:nrow(df)) {
plot.pic <- plot.pic + inset(rasterGrob(get(paste("pic_",i,sep="")), interpolate=T), xmin=df$longitude[i]-margin, xmax=df$longitude[i]+margin, ymin=df$latitude[i]-margin,ymax=df$latitude[i]+margin)
}
plot.pic
# Abstrakte Analyse der sozialen Orte Chronologie
plot <- ggplot(df, aes(x=longitude, y=latitude)) +
geom_path(size=1.5,alpha=0.75,linetype="solid",colour="black") +
geom_point(aes(colour=created_time), shape=15, alpha = 0.85, size = 24) +
guides(colour = guide_legend(override.aes = list(size=5))) +
theme_minimal() +
theme(
legend.title = element_blank(),
legend.text = element_text(size = 14)
)
plot
# Bilder in abstrakten Plot einfügen
# Bilder abrufen
for (i in 1:nrow(df) ) {
myurl = toString(df$image[i])
z <- tempfile()
download.file(myurl,z,mode="wb")
assign( paste0("pic_",i,sep=""), readJPEG(z))
file.remove(z) # cleanup
assign( paste0("g_",i,sep=""), rasterGrob(paste("pic_",i,sep=""), interpolate=T))
}
# Bilder hinzufügen
margin <- 0.05
plot.pic <- plot
for (i in 1:nrow(df)) {
plot.pic <- plot.pic + annotation_custom(rasterGrob(get(paste("pic_",i,sep="")), interpolate=T), xmin=df$longitude[i]-margin, xmax=df$longitude[i]+margin, ymin=df$latitude[i]-margin,ymax=df$latitude[i]+margin)
}
plot.pic
|
53f96dfec69267a462f7a6370658ddde4733d407
|
a82e543bba4977149a41b98bc0aacdd57a89dbe9
|
/R/RcppExports.R
|
770961148ffc9d6ec2998ec214f36c555dcf9a66
|
[] |
no_license
|
Redspecialist/pagoda2
|
e86c7c33948404fa252ef09e1ca5a4d39c868e16
|
9077fff045b2afe9a4142c016ee299523766236d
|
refs/heads/master
| 2021-01-25T09:04:25.489796
| 2017-06-08T04:08:28
| 2017-06-08T04:08:28
| 93,778,664
| 1
| 0
| null | 2017-06-08T18:13:59
| 2017-06-08T18:13:59
| null |
UTF-8
|
R
| false
| false
| 3,415
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
non0LogColLmS <- function(sY, X, ldepth, maxCells = 0L) {
.Call('pagoda2_non0LogColLmS', PACKAGE = 'pagoda2', sY, X, ldepth, maxCells)
}
colMeanVarS <- function(sY, rowSel) {
.Call('pagoda2_colMeanVarS', PACKAGE = 'pagoda2', sY, rowSel)
}
colSumByFac <- function(sY, rowSel) {
.Call('pagoda2_colSumByFac', PACKAGE = 'pagoda2', sY, rowSel)
}
inplaceColMult <- function(sY, mult, rowSel) {
.Call('pagoda2_inplaceColMult', PACKAGE = 'pagoda2', sY, mult, rowSel)
}
inplaceWinsorizeSparseCols <- function(sY, n) {
.Call('pagoda2_inplaceWinsorizeSparseCols', PACKAGE = 'pagoda2', sY, n)
}
jsDist <- function(m) {
.Call('pagoda2_jsDist', PACKAGE = 'pagoda2', m)
}
orderColumnRows <- function(p, i) {
.Call('pagoda2_orderColumnRows', PACKAGE = 'pagoda2', p, i)
}
smatColVecCorr <- function(sY, sv, parallel = TRUE) {
.Call('pagoda2_smatColVecCorr', PACKAGE = 'pagoda2', sY, sv, parallel)
}
arma_mat_cor <- function(m) {
.Call('pagoda2_arma_mat_cor', PACKAGE = 'pagoda2', m)
}
hnswKnn <- function(m, efConstruction = 20L, indexThreadQty = 4L, searchMethod = 4L) {
.Call('pagoda2_hnswKnn', PACKAGE = 'pagoda2', m, efConstruction, indexThreadQty, searchMethod)
}
hnswKnn2 <- function(m, k = 5L, nThreads = 30L, efConstruction = 20L, indexThreadQty = 4L, searchMethod = 4L, seed = -1L, verbose = TRUE) {
.Call('pagoda2_hnswKnn2', PACKAGE = 'pagoda2', m, k, nThreads, efConstruction, indexThreadQty, searchMethod, seed, verbose)
}
hnswKnnJS <- function(m, k = 5L, nThreads = 20L, efConstruction = 20L, indexThreadQty = 4L, searchMethod = 4L, seed = -1L) {
.Call('pagoda2_hnswKnnJS', PACKAGE = 'pagoda2', m, k, nThreads, efConstruction, indexThreadQty, searchMethod, seed)
}
hnswKnnLp <- function(m, k = 5L, nThreads = 30L, p = 2.0, efConstruction = 20L, indexThreadQty = 4L, searchMethod = 4L, seed = -1L, verbose = TRUE) {
.Call('pagoda2_hnswKnnLp', PACKAGE = 'pagoda2', m, k, nThreads, p, efConstruction, indexThreadQty, searchMethod, seed, verbose)
}
hnswKnn3test <- function(m, k = 5L, multiplex = 1L, nqueries = 1000L, nThreads = 30L, efConstruction = 20L, indexThreadQty = 4L, searchMethod = 4L, seed = -1L, verbose = TRUE) {
.Call('pagoda2_hnswKnn3test', PACKAGE = 'pagoda2', m, k, multiplex, nqueries, nThreads, efConstruction, indexThreadQty, searchMethod, seed, verbose)
}
matWCorr <- function(Mat, Matw) {
.Call('pagoda2_matWCorr', PACKAGE = 'pagoda2', Mat, Matw)
}
winsorizeMatrix <- function(Mat, Trim) {
.Call('pagoda2_winsorizeMatrix', PACKAGE = 'pagoda2', Mat, Trim)
}
plSemicompleteCor2 <- function(Pl) {
.Call('pagoda2_plSemicompleteCor2', PACKAGE = 'pagoda2', Pl)
}
avg_rank <- function(x) {
.Call('pagoda2_avg_rank', PACKAGE = 'pagoda2', x)
}
sparse_matrix_column_ranks <- function(sY) {
.Call('pagoda2_sparse_matrix_column_ranks', PACKAGE = 'pagoda2', sY)
}
nearbyPointsGreedyCluster <- function(p, windowSize) {
.Call('pagoda2_nearbyPointsGreedyCluster', PACKAGE = 'pagoda2', p, windowSize)
}
closestNPointsToSegments <- function(s, e, p, tss, N) {
.Call('pagoda2_closestNPointsToSegments', PACKAGE = 'pagoda2', s, e, p, tss, N)
}
closestNSegmentsToPoints <- function(s, e, p, tss, N) {
.Call('pagoda2_closestNSegmentsToPoints', PACKAGE = 'pagoda2', s, e, p, tss, N)
}
|
2ead40c1f607fd51f4baa74c9cded0bec8d5f7f3
|
a266bb66eff94641d1ff100daf31e93dcd4e0105
|
/man/dataspec_p4.Rd
|
1c10f1efc41adc4fcb90e26e9bd0673e7c9bbcf2
|
[
"MIT"
] |
permissive
|
ashiklom/rrtm
|
e879f37471dff49930007b2d6b7a8352db1df4b2
|
504c3c7655fe30c5b713e9f0f606960f8a46466a
|
refs/heads/master
| 2022-09-09T02:43:26.722764
| 2022-08-04T18:04:25
| 2022-08-04T18:04:25
| 196,279,459
| 5
| 3
|
NOASSERTION
| 2022-01-13T16:01:27
| 2019-07-10T21:50:33
|
R
|
UTF-8
|
R
| false
| true
| 667
|
rd
|
dataspec_p4.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataspec.R
\docType{data}
\name{dataspec_p4}
\alias{dataspec_p4}
\alias{dataspec_p5}
\alias{dataspec_pd}
\title{Specific absorption coefficients for PROSPECT model}
\format{
An object of class \code{matrix} (inherits from \code{array}) with 2101 rows and 3 columns.
An object of class \code{matrix} (inherits from \code{array}) with 2101 rows and 5 columns.
An object of class \code{matrix} (inherits from \code{array}) with 2101 rows and 6 columns.
}
\usage{
dataspec_p4
dataspec_p5
dataspec_pd
}
\description{
Specific absorption coefficients for PROSPECT model
}
\keyword{datasets}
|
2a687310bab0100c1bde8e94554101c46fa67672
|
c6c3cbbc17ffb85697e4211d5e1ed088a4a373d8
|
/Desktop/Coursera/DATA SCIENCE SPECIALIZATION/Getting and Cleaning Data/run_analysis.R
|
cf7370485b349fd8144325a9fa84c4f15f0d8451
|
[] |
no_license
|
andresborrerom/ExData_Plotting1
|
5ec0f8a96a646632a710f1149040254ab042d76a
|
207fb878258d2cc17d8e7298e37f6ac029db727a
|
refs/heads/master
| 2021-01-14T09:49:50.512044
| 2014-09-03T00:34:22
| 2014-09-03T00:34:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,242
|
r
|
run_analysis.R
|
run_analysis <- function(){
#packages and librarys
library(plyr)
#set calling names
dir1 <- (getwd())
print(paste("please make sure the UCI HAR Dataset folder is in", dir1) )
# read all data into working tables
Xtest <- read.table("UCI HAR Dataset/test/X_test.txt")
ytest <- read.table("UCI HAR Dataset/test/y_test.txt")
Xtrain <- read.table("UCI HAR Dataset/train/X_train.txt")
ytrain <- read.table("UCI HAR Dataset/train/y_train.txt")
labels <- read.table("UCI HAR Dataset/activity_labels.txt")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
# create names for data columns
names(Xtest) <- features[,2]
names(Xtrain) <- features[,2]
#create subject_* tables to merge with tidy info
subject_test <- cbind(subject_test, "test")
names(subject_test) <- c("subject" , "group-partition")
subject_train <- cbind(subject_train, "train")
names(subject_train) <- c("subject" , "group-partition")
#create label identifyable names for y* files
names(ytrain) <- "activity"
names(ytest) <- "activity"
names(labels) <- c("activity", "activity_name")
ytrain <- join(ytrain, labels)
ytest <- join (ytest, labels)
#merge test and train (subject, X, y) tables bu columns
Xtest <- cbind(ytest[,2], Xtest)
Xtrain <- cbind(ytrain[,2], Xtrain)
names(Xtest) <- c( "activity_name", names(Xtest[,2:562]))
names(Xtrain) <- c( "activity_name", names(Xtrain[,2:562]))
Xtest <- cbind(subject_test, Xtest)
Xtrain <- cbind(subject_train, Xtrain)
#merge train and test by rows
Xtotal <- rbind(Xtest, Xtrain)
# take subset of columns that have mean or std of some measurement. Because of
# 'features_info.txt'I know "mean" or "std" will be on the column name (ignoring
# cases). Include first three columns (subject number, activity, train or test)
Xmean <- grep("MEAN", names(Xtotal), value = F, ignore.case = T)
Xstd <- grep("std", names(Xtotal), value = F, ignore.case = T)
z <- c(1,2,3, Xmean, Xstd)
Xmeanstd <- Xtotal[,z]
# to name the columns descriptively, following the rules on the first week
# 4 lectures, I will change t for time (when at the beggining of the name),
# f for freqdom (frequency domain according to features_info.txt), taking out
# spaces, dots, underscores, end the other rules explain in the mentioned lecture
names(Xmeanstd) <- sub("^t", "time", names(Xmeanstd))
names(Xmeanstd) <- sub("^f", "freqdom", names(Xmeanstd))
names(Xmeanstd) <- tolower(names(Xmeanstd))
names(Xmeanstd) <- sub("_", "-", names(Xmeanstd))
names(Xmeanstd) <- sub("-", "", names(Xmeanstd))
names(Xmeanstd) <- sub("()-", "", names(Xmeanstd))
# now, create new tidy data set with the average of each column for each subject
# and each activity (names in activity-labels.txt).
newTidy1 <- matrix(,180,89) #I need 180 rows as there are 30 subjects and 6 activities
newTidy1 <- data.frame(newTidy1)
names(newTidy1) <- names(Xmeanstd)
multiplo = nrow(labels)
# first, sequence for each subject
for (i in seq_along(unique(Xmeanstd$subject))){
#now, sequence for each activity
for (j in seq_along(unique(Xmeanstd$activityname))){
#create subset for each subject+activity subgroup
p1 <- subset(Xmeanstd, Xmeanstd$subject == i & Xmeanstd$activityname == labels[j,2] )
#assign values to first three columns (subject, train or test, activity name)
newTidy1[(i-1)*multiplo+j,1]<-i
newTidy1[(i-1)*multiplo+j,3]<-as.character(labels[j,2])
newTidy1[(i-1)*multiplo+j,2]<-as.character(p1[1,2])
#now calculate the mean for each of the other columns of the subset and assign it
newTidy1[(i-1)*multiplo+j,4:89]<-colMeans(p1[,4:89])
}
}
# I must export the data set to a file so I can attach it in the course project
# url. I wil use coma as a separator so it can be read as a csv file.
for (k in 4:89){
a <- paste("average",names(newTidy1[k]), sep = "")
names(newTidy1)[k] <- a
}
write.table(newTidy1, file = "tidySamsung.txt", sep=",")
return (newTidy1)
}
|
2d173b68b62870b637e345ca72a9e1ba081c5676
|
bc444cca02da37e3b8c30f2bde1ef595a2a9c7eb
|
/R_S16/Lec1_05.R
|
b9f8173262bddb8bfab8d5b597a622c37dffcb88
|
[] |
no_license
|
dkQkdbTl1966/R_DataAnalytics
|
e20029f3b3e27a2d4f996a4c9e235551a6c0e25e
|
08ea98289f4def3c4f72d4c10d3767784b42619b
|
refs/heads/master
| 2021-08-14T22:57:44.169860
| 2017-11-16T23:39:49
| 2017-11-16T23:39:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,944
|
r
|
Lec1_05.R
|
################################################################
#
# R을 활용한 통계분석
#
# 5. 다중회귀분석
#
################################################################
#---------------------------------------------------------------
# 다중회귀분석 (Multiple Regression Model)
#---------------------------------------------------------------
# programmer 20명
# salary가 experience(경력년수), score (직무적성검사성적)과 연관성을 갖는지 검증.
df <- read.csv("data/salary.csv")
head(df)
summary(df)
library(psych)
pairs.panels(df) # salary ~ experience 상관계수 0.86
# 단순회귀 : 경력 증가시 연봉 증가 상관관계
model <- lm(salary ~ experience, data = df)
summary(model)
# 다중회귀 : 경력 증가시 적성검사 점수 증가로 인한 연봉 증가까지 포함된 관계
# experience ~ score 의 cor() = 0.34
# model <- lm(salary ~ ., data = df)
model <- lm(salary ~ experience + score, data = df)
summary(model)
# 추정된 회귀식
# salary = 3.174 + 1.404 * experience + 0.251 * score
# b1 : b2(score)가 일정하다고 할 때, experience가 1년 증가하면 salary가 $1,404 증가할 것으로 기대된다.
# b2 : b1(experience)가 일정하다고 할 때, score가 1점 증가하면 salary가 $251 증가할 것으로 기대된다.
#---------------------------------------------------------------
# 다중회귀분석 결과 해석
# (1) Adjusted R-squared
# R-squared: 0.83 --> experience와 score가 salary 변동량의 83%를 설명한다.
# But, 설명변수 갯수가 증가하면 결정계수도 증가
# --> 설명변수 갯수에 대한 패널티 적용한 결정계수 = Adjusted R-squared
# (2) F-test
# H0 : b1 = b2 = ...... = bk = 0
# 종속변수와 모든 독립(설명)변수 집합간에 유의한 관계가 존재하는지 검정
# b0 는 큰 의미가 없다.
# (3) T-test
# H0 : bi = 0
# 각 개별 독립변수의 유의성 검정
# (4) 잔차분석 --> Residuals plot / Normal Q-Q plot / Leverage plot
#---------------------------------------------------------------
# 영향점이 있는 경우
plot(model) # --> Leverage plot에서 2번째 자료가 이상치 & 영향점
dcolor <- rep(1, length(df$salary))
dcolor[2] = 2
pairs(df, col = dcolor, pch = dcolor) # 2번 자료만 다르게 표시
# 영향점 제거는 주관적으로 판단하는 수밖에 없다.
df2 <- df[-2, ] # 영향점 제거할 경우
pairs.panels(df2) # salary ~ experience 상관계수 높아짐(0.91). 다른 상관계수는 낮아짐.
model2 <- lm(salary ~ experience + score, data = df2)
summary(model2) # score 회귀계수가 유의하지 않다.
#---------------------------------------------------------------
# 추정과 예측
# 경력 5년, 적성검사성적 80점인 사람과 경력 10년, 성적 70점인 사람의 연봉 예측
# 평균 연봉의 95% 신뢰구간
predict(model, data.frame("experience" = c(5,10), "score" = c(80,70)),
interval = "confidence")
# 새로운 한 명에 대한 95% 예측구간
predict(model, data.frame("experience" = c(5,10), "score" = c(80,70)),
interval = "prediction")
#---------------------------------------------------------------
# 다중공선성 (Multicollinearity)
#---------------------------------------------------------------
# 독립변수들이 서로 높은 상관관계를 가지면 회귀계수의 정확한 추정이 어렵다.
# ---> 모형 선택 방법론을 적용하여 가장 적절한 변수를 선택할 수 있다.
# 30개 부서에서 부서당 35명의 직원 설문조사
# 데이터 숫자는 해당 질문에 긍정한 직원의 비율
attitude
round(cor(attitude),3)
pairs.panels(attitude)
# cor : complaints + learning = 0.597
# cor : complaints + raises = 0.669
plot(attitude[ , c("rating", "complaints", "learning")])
a <- lm(rating ~ complaints + learning, data = attitude)
summary(a)
# learning의 t-test p-value 값을 보면 유의하지 않다.
# 하지만 rating과 상관관계가 없는 것이 아니다.
# complaints 와의 상관관계도 있기 때문에 rating 변수에 대한 역할이 작아보일 뿐이다.
#---------------------------------------------------------------
# 모형 선택법 (Model Selection) = 설명변수 선택
#---------------------------------------------------------------
# *** 해당 업무분야에서 반드시 들어가야 하는 변수는 고정 !!!
# (1) Forward selection
# --- 가장 유의한 변수부터 하나씩 추가 (R-sq 기준)
# --- 변수값의 작은 변동에도 결과가 크게 달라져 안정성 부족
# (2) Backward selection
# --- 모든 변수를 넣고 가장 기여도가 낮은 것부터 하나씩 제거
# --- 전체 변수 정보를 이용하는 장점
# --- 변수의 갯수가 많은 경우 사용 어려움. 안정성 부족.
# (3) Stepwise selection
# --- Forward selection과 backward selection을 조합
# --- 새로운 변수 추가 후에 기존 변수의 중요도가 약화되면 그 변수 제거
# (4) All Subsets Regression
# --- 모든 가능한 모형을 비교하여 최적의 모형선택
# --- 여러 모형 중 최소 AIC, BIC, Mallow’s Cp 또는 최대 adjusted R-sq를 갖는 모형을 선택
# --- 모형의 복잡도에 벌점을 주는 방법. AIC (Akaike information criterion), BIC (Bayesian ...)
#---------------------------------------------------------------
# Backward selection
out <- lm(rating ~ ., attitude)
summary(out)
anova(out) # 각 회귀계수 t검정 p-value 기준 선별. critical 제거.
out2 <- lm(rating ~ . - critical, data = attitude)
summary(out2)
anova(out2) # raises 제거
# Backward selection 자동화
backward <- step(out, direction = "backward", trace = T)
backward <- step(out, direction = "backward", trace = F)
backward # 최종 선택된 회귀모형 : rating ~ complaints + learning
backward$anova # critical, raises, privileges, advance 순으로 제거됨
#---------------------------------------------------------------
# Stepwise selection
both <- step(out, direction = "both", trace = F)
both
both$anova
#---------------------------------------------------------------
# All Subsets Regression
library(leaps)
leap <- regsubsets(rating ~ ., attitude, nbest = 5) # size당 5개의 최적 모형 저장
summary(leap)
plot(leap)
plot(leap, scale = "adjr2") # adjusted r-squred 기준
#---------------------------------------------------------------
# practice 5
#---------------------------------------------------------------
# hotel margin prediction
data <- read.csv("data/laquinta.csv")
summary(data)
str(data)
# 자료의 산점도 확인
round( cor(data), 3)
pairs.panels(data) # 설명변수간의 correlation도 낮다. 종속변수와도 낮다.
# 회귀모형
model <- lm(Margin ~ ., data)
summary(model) # F-test 유의함. R-squared: 0.525. Distance, Enrollment 제외한 회귀계수 유의함.
plot(model) # 잔차도 이상 없음
backward <- step(model, direction = "backward", trace = F)
backward
both <- step(model, direction = "both", trace = F)
both
# 최종 회귀모형 : Margin ~ Number + Nearest + Office.Space + Enrollment + Income
# Coefficients:
# (Intercept) Number Nearest Office.Space Enrollment Income
# 37.128891 -0.007742 1.586923 0.019576 0.196385 0.421411
# 다음 조건을 가진 한 지역의 Margin을 95% 신뢰구간으로 예측
new <- data.frame("Number" = 3815, "Nearest" = 0.9, "Office.Space" = 476,
"Enrollment" = 24.5, "Income" = 35, "Distance" = 11.2)
new
predict(model, new, interval = "prediction")
# BIC 값을 최소로 하는 설명변수의 조합을 찾아 회귀식을 추정
regsub <- regsubsets(Margin ~ ., data, nbest = 5)
plot(regsub) # 최종 회귀모형 : Margin ~ Number + Nearest + Office.Space + Income
plot(regsub, scale="adjr2")
|
18e0a1eefcffcaf97a9cc6bdeeb3d5df17484b74
|
ebeacd2b716a9132e12d868e76c66d4c214ac613
|
/Old Package/R/conc_comp.R
|
fa1cbabdfcb3ae4c05f4b604d11615160ec8bf73
|
[] |
no_license
|
PHP2560-Statistical-Programming-R/r-package-courses-brown
|
345c8c6fc28dfe62531747e01bb34317c0387d2c
|
9b015e4846fe89d6762af3f1402149d898e0d592
|
refs/heads/master
| 2021-08-29T22:18:53.700149
| 2017-12-15T05:29:28
| 2017-12-15T05:29:28
| 110,870,181
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,432
|
r
|
conc_comp.R
|
#' conc_comp Function
#'
#' This function takes two concentrations of interest as inputs and returns a
#' table with all the required classes for both concentrations.
#'conc.name1: a character string for the first concentration name as it appears in the
#' list of Brown concentrations in the website:
#' "https://bulletin.brown.edu/the-college/concentrations/". The input is not case sensitive.
#' **conc.name is a character string, so it needs to go with "".
#' conc.name2: a character string for the second concentration name as it appears in the
#' list of Brown concentrations in the website:
#' "https://bulletin.brown.edu/the-college/concentrations/". The input is not case sensitive.
#' **conc.name is a character string, so it needs to go with "".
#' Table of all the required courses for the concentration inputed.
#' conc_comp("Economics", "Mathematics")
#' conc_comp("mathematics", "Africana Studies")
conc_comp <- function(conc_name1, conc_name2) {
# This function takes the concentration of interest as an input and returns a
# table with all the required classes for that particular concentration. The input is not
# case sensitive. The function will run only if the input matches the concentration name listed
# on the website. If the department does not display a table, a message will display this.
# Compile a list of undergraduate concentrations available at Brown from the website, so
# that if the concentrations are updated on the website, the list is also updated
link <- html_session("https://bulletin.brown.edu/the-college/concentrations/")
conc_list <- link %>%
html_nodes("#textcontainer li") %>% # css selector for the entire list of concentration
html_text() # select only the text
conc_list <- as.vector(conc_list)
## Run the function only if the user's input matches the name listed in the concentration
## list, ignoring cases.
match1 <- grepl(pattern=paste("^", conc_name1,"$", sep=""), conc_list, ignore.case=TRUE)
match2 <- grepl(pattern=paste("^", conc_name2,"$", sep=""), conc_list, ignore.case=TRUE)
# Index to find the link of the concentraton of interest
i <- grep(pattern=paste("^", conc_name1,"$", sep=""), conc_list, ignore.case=TRUE)
b <- grep(pattern=paste("^", conc_name2,"$", sep=""), conc_list, ignore.case=TRUE)
if (any(match1==TRUE) && str_length(conc_name1) == str_length(conc_list[i]) &&
any(match2==TRUE) && str_length(conc_name2) == str_length(conc_list[b])) {
# Pull up the website that has a list of all the undergraduate concentrations
link <- html_session("https://bulletin.brown.edu/the-college/concentrations/")
# Select the concentration of interest
link_conc1 <- link %>% follow_link(i+36)
link_conc2 <- link %>% follow_link(b+36)
# Read the content of the link
content1 <- read_html(link_conc1)
content2 <- read_html(link_conc2)
# Scrape the table
link_table1 <- html_nodes(content1, 'table')
link_table2 <- html_nodes(content2, 'table')
# If the department doesn't display a table, an error "subscript out of bounds" appears. tryCatch will
# ignore this error and allow the function to keep working
scrape_table1 <- tryCatch(html_table(link_table1)[[1]], error=function(e) matrix(nrow=2, ncol=2))
scrape_table2 <- tryCatch(html_table(link_table2)[[1]], error=function(e) matrix(nrow=2, ncol=2))
# Create a table only if the table exists (i.e. if scrape table ≠ NA)
if ((is.na(scrape_table1[1,1])== FALSE) && (is.na(scrape_table2[1,1]) == FALSE)) {
# Convert the table into a dataframe
classes <- scrape_table1$X1
class_name <- scrape_table1$X2
number_classes <- scrape_table1$X3
table_req1 <- data_frame(classes, class_name, number_classes)
table_req1$number_classes[table_req1$number_classes == ""] <- " "
space1 <- list("-", "-", "-")
space2 <- list("-", "-", "-")
name1 <- list("Concentration 1: ", "", "")
name2 <- list("Concentration 2: ", "", "")
table_req1s <- rbind(name1, table_req1)
table_req1s <- rbind(table_req1s, space1)
table_req1s <- rbind(table_req1s,space2)
classes <- scrape_table2$X1
class_name <- scrape_table2$X2
number_classes <- scrape_table2$X3
table_req2 <- data_frame(classes, class_name, number_classes)
table_req2 <- rbind(name2, table_req2)
total <- rbind(table_req1s, table_req2)
total$number_classes[total$number_classes == ""] <- " "
table_req_2<-rename(total, "Class Code" = classes, "Class Name" = class_name, "Number of Classes" = number_classes)
explain <- list("", "", "If the Class Number cell is empty or has a NA, refer to the category the class belongs to.")
rbind(table_req_2, explain)
#Course <- scrape_table2$X1
#Title <- scrape_table2$X2
#Credit <- scrape_table2$X3
# table_req2 <- data_frame(Course, Title, Credit)
#total <- rbind(table_req1, table_req2)
return(total)
} else {stop('One of the concentrations does not have a table presented')}
} else {stop('Please enter valid concentration names. Refer to the list of undergraduate concentrations offered at Brown at https://bulletin.brown.edu/the-college/concentrations/')}
}
a <- conc_comp("Economics", "chemistry")
|
f26090072af58dced2dd19eac9a3d803f839ebe0
|
2d6418dc719114e785f716b08478089503bc0ab2
|
/r/learning/optimization/assign_memory_ahead.r
|
d9d8725d2365fc5a9f9751c83288c5787183e496
|
[] |
no_license
|
jk983294/math
|
9a9343bde5804c8655a897a20ded3e67f528b225
|
f883e281961cd4cf16851de15e64746f59b966c7
|
refs/heads/master
| 2023-09-04T13:04:17.944946
| 2023-09-04T09:25:07
| 2023-09-04T09:25:07
| 139,425,932
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 289
|
r
|
assign_memory_ahead.r
|
set.seed(1234)
k <- 1e+05
x <- rnorm(k)
y <- 0
system.time(for (i in 1:length(x)) y[i] <- x[i]^2) # for & no memory allocation
y <- numeric(k)
system.time(for (i in 1:k) y[i] <- x[i]^2) # for & memory allocation
y <- numeric(k)
system.time(y <- x^2) # vectorization & memory allocation
|
b4be6e8f243936b77e3b16c165c4af5e0e001284
|
f044402735a52fa040c5cbc76737c7950406f8b2
|
/BrCa_Age_Associated_TMA/Packages/biostatUtil/man/Xunivcoxph.Rd
|
0fef18fd4a5444b998f33991679a74ea1edaa5a6
|
[] |
no_license
|
BCCRCMO/BrCa_AgeAssociations
|
5cf34f3b2370c0d5381c34f8e0d2463354c4af5d
|
48a11c828a38a871f751c996b76b77bc33d5a3c3
|
refs/heads/master
| 2023-03-17T14:49:56.817589
| 2020-03-19T02:18:21
| 2020-03-19T02:18:21
| 247,175,174
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,330
|
rd
|
Xunivcoxph.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_coxph.R
\name{Xunivcoxph}
\alias{Xunivcoxph}
\title{Univariate cox proprtional hazards model}
\usage{
Xunivcoxph(mod, digits = 3)
}
\arguments{
\item{mod}{model fit object, returned from either \code{coxph} or
\code{coxphf}.}
\item{digits}{number of digits to round}
}
\value{
Hazard ratio and 95/% confidence interval
}
\description{
Concatenates hazard ratios and confidence limits for every covariate in a Cox
model.
}
\examples{
library(survival)
library(coxphf)
# One predictor
test1 <- list(
time = c(4, 3, 1, 1, 2, 2, 3),
status = c(1, 1, 1, 0, 1, 1, 0),
x = c(0, 2, 1, 1, 1, 0, 0),
sex = c(0, 0, 0, 0, 1, 1, 1)
)
mod <- coxph(Surv(time, status) ~ x + strata(sex), test1)
Xunivcoxph(mod)
# Multiple predictors
bladder1 <- bladder[bladder$enum < 5, ]
mod <- coxph(Surv(stop, event) ~ (rx + size + number) * strata(enum) +
cluster(id), bladder1)
Xunivcoxph(mod, digits = 2)
# Firth's correction
test2 <- data.frame(list(
start = c(1, 2, 5, 2, 1, 7, 3, 4, 8, 8),
stop = c(2, 3, 6, 7, 8, 9, 9, 9, 14, 17),
event = c(1, 1, 1, 1, 1, 1, 1, 0, 0, 0),
x = c(1, 0, 0, 1, 0, 1, 1, 1, 0, 0)
))
mod <- coxphf(formula = Surv(start, stop, event) ~ x, pl = FALSE,
data = test2)
Xunivcoxph(mod)
}
\author{
Aline Talhouk, Derek Chiu
}
|
0e2b9be85d616a361487d39d0ab5603c18b12701
|
6c382b0e1aa8be03eb0b11ad8fb40882701fe909
|
/sample_gradebook.R
|
f916e0a35b3f5f288f7173848ce24887f4b66ca3
|
[] |
no_license
|
restrellado/sample_gradebook
|
02281f4f13df6d0227108b42d3a334e1cbd20e88
|
aee490bc79ed7d27c7bd067da9a4892e736b4fa5
|
refs/heads/master
| 2021-04-03T05:52:46.325481
| 2018-03-14T04:08:58
| 2018-03-14T04:08:58
| 125,152,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 826
|
r
|
sample_gradebook.R
|
# Create a gradebook dataset as an example
library(tidyverse)
# Creates random quiz scores
#------------------------------------------------------------------------------
make_score <- function(min, max, n) {
# Creates random quiz scores
# Args:
# min: lowest possible quiz score
# max: highest possible quiz score
# n: number of quiz scores
# Returns: numeric vector
sample(c(1:100), n, replace = TRUE)
}
# Make dataset
#------------------------------------------------------------------------------
gb <- tibble(
name = paste0("student_", c(1:25)),
gender = sample(c("f", "m"), 25, replace = TRUE),
quiz_1 = make_score(0, 100, 25),
quiz_2 = make_score(0, 100, 25),
quiz_3 = make_score(0, 100, 25),
quiz_4 = make_score(0, 100, 25),
quiz_5 = make_score(0, 100, 25)
)
|
d7251b70ea0852598a7aa88f1789f62593e5bd79
|
331b734a213e6d2b8038ed90797d1440de1b1094
|
/src/dataDownload.R
|
2adc3370f6262f0d0c9b0f3e3e303fb315bf4831
|
[] |
no_license
|
leekgroup/networks_correction
|
b51d0fd36d91e3206a1b54008899561501f0d320
|
ef24a718c86bd9c4ad5b9b15e2f64703bc3ffc51
|
refs/heads/master
| 2021-10-28T07:35:46.170680
| 2019-04-22T16:58:08
| 2019-04-22T16:58:08
| 106,853,810
| 15
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,801
|
r
|
dataDownload.R
|
rm(list=ls())
## load libraries
library(recount)
source("functions.R")
source("config.R")
## create dir
dir.create(datDir)
## download and scale reads
download_study('SRP012682', type = 'rse-gene', outdir = datDir)
load(paste(datDir, "rse_gene.Rdata", sep = ""))
tissue.interest <- c("Subcutaneous", "Lung", "Thyroid", "Muscle", "Blood", "Artery - Tibial", "Nerve - Tibial", "Skin - Sun Exposed")
## scale counts by total coverage of the sample
rse_gene <- scale_counts(rse_gene, by = "auc", round = FALSE)
## select samples included in analysis freeze
rse_gene <- rse_gene[,which(colData(rse_gene)$smafrze=="USE ME")]
## only keep protein coding genes
pc.genes <- read.delim(paste(datDir, "etc/protein_coding.txt", sep = ""), header = F)
pc.genes <- pc.genes[!pc.genes$V1 %in% c("chrM","chrY"),]
overlapping_genes <- read.delim(paste(datDir, "etc/ensembl_ids_overlapping_genes.txt", sep = ""), stringsAsFactors = F)
rse_gene <- rse_gene[which(rownames(rse_gene) %in% pc.genes$V2),]
rse_gene <- rse_gene[-which(rownames(rse_gene) %in% overlapping_genes$x),]
## summary
print(paste("Unique runs", length(unique(colData(rse_gene)$run))))
print(paste("Unique sample ids",length(unique(colData(rse_gene)$sampid))))
print(paste("Are all run values in phenotypes same order as colnames in rse_gene?", all(colData(rse_gene)$run==colnames(rse_gene))))
## split rse object
print(paste("Now splitting rse object by tissues"))
gtex.rse <- sapply(tissue.interest, function(x, y){
idx <- grep(x, colData(y)$smtsd)
rse.dat <- y[,idx]
rse.dat <- select.genes(rse.object = rse.dat, threshold = 0.1)
counts <- SummarizedExperiment::assay(rse.dat, 1)
# log2 transformation
SummarizedExperiment::assay(rse.dat, 1) <- log2(counts+2)
rse.dat
}, rse_gene)
rm(rse_gene)
names(gtex.rse) <- c("Subcutaneous", "Lung", "Thyroid", "Muscle", "Blood", "Artery_tibial", "Nerve_tibial", "Skin")
## Exclude one sample with missing mapping annotations from skin
samp.idx <- which(gtex.rse$Skin@colData$sampid == "GTEX-YF7O-2326-101833-SM-5CVN9")
gtex.rse$Skin <- gtex.rse$Skin[,-samp.idx]
## select only those thyroid samples that have genotype information
#sampid <- as.character(sapply(colData(gtex.rse[["Thyroid"]])$sampid, function(x){
#k <-strsplit(x, '-')
#k <- paste(k[[1]][1],k[[1]][2], sep = ".")
#k
#}
#)
#)
#genotype.samples <- read.delim("genotype_samples.txt")
#dim(gtex.rse[["Thyroid"]])
#gtex.rse[["Thyroid"]] <- gtex.rse[["Thyroid"]][,which(sampid %in% colnames(genotype.samples))]
## tissue specific data summary
print(paste("Number of genes,samples:"))
for(i in 1:length(gtex.rse)){
print(paste(names(gtex.rse)[i]))
print(paste(dim(gtex.rse[[i]])))
}
## save rdata - adipose sub, lung, thyroid, whole Blood
save(gtex.rse, file = paste(datDir, "raw_protein_coding.Rdata",sep = ""))
|
4e5c1a59c862fd9b55786a95c312c170507d435b
|
13f88229d979fd7a994714707f5dfc9923694ae5
|
/Analisis Multivariable/Arbolesde Decisión/Tree - Redes Sociales.R
|
b3b868e02046d9d6fe3f265e3d50c2055b6cabba
|
[] |
no_license
|
mecomontes/Data-Science-Complete-path
|
937d60921d1b9765f31acc2498546a2bf2bf4505
|
840dd2e2caf74a49a99be9b2a013ddb42b4ee613
|
refs/heads/main
| 2023-06-17T21:19:30.247344
| 2021-07-13T02:18:32
| 2021-07-13T02:18:32
| 385,442,161
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 775
|
r
|
Tree - Redes Sociales.R
|
install.packages("rpart")
install.packages("rpart.plot")
install.packages("rattle")
library(rattle)
library(rpart)
library(rpart.plot)
getwd()
setwd("D:/UdeM/Análisis Multivariante/Árboles")
# Ejemplos con Datos de Redes Sociales
T2016<-read.csv("Tree2016.csv",header=T, sep =";",na.strings = c("NA"," ","","NULL","."))
summary(T2016)
names(T2016)
T2015<-read.csv("Tree2015.csv",header=T, sep =";",na.strings = c("NA"," ","","NULL","."))
summary(T2015)
names(T2015)
### Tree 2016
ModeloTree<- data.frame(T2016)
tree<- rpart(Brand_Asset_R ~ ., data = ModeloTree, minbucket=4)
rpart.plot(tree)
### Tree 2015
ModeloTree<- data.frame(T2015)
tree<- rpart(Brand_Asset_R ~ ., data = ModeloTree, minbucket=3)
rpart.plot(tree)
|
e39a6647d4def949ae13bca54bffeb4909416401
|
f81463984a1e80efb56124a57b66253c1c24727a
|
/R/ggbiplot.R
|
e32363583f3b59d0f2ada4c738ae22c964fb70aa
|
[] |
no_license
|
zdk123/compPLS
|
24443fc811b56c413c474f3d44eec2862a395788
|
078bda73ae23b3181f4874bea084c2db51ecc7a2
|
refs/heads/master
| 2022-05-06T21:00:56.959067
| 2022-04-18T15:45:46
| 2022-04-18T15:45:46
| 28,114,942
| 22
| 5
| null | 2022-04-18T15:45:47
| 2014-12-17T01:17:13
|
R
|
UTF-8
|
R
| false
| false
| 10,406
|
r
|
ggbiplot.R
|
################################################################
# Methods for making pretty biplots with ggplot2 package
#
# based on code by http://www.vince.vu/software/#ggbiplot
#' Pretty biplots using ggplots
#' @title methods for making biplots from various projection & classification models
#' @param xobj The object to be plotted
#' @rdname ggbiplot
#' @export
ggbiplot <- function(xobj, ...) {
UseMethod("ggbiplot")
}
#' @rdname ggbiplot
#' @method ggbiplot princomp
#' @export
ggbiplot.princomp <- function(xobj, ...) {
nobs.factor <- sqrt(xobj$n.obs)
d <- xobj$sdev
scores <- sweep(xobj$scores, 2, 6/(d * nobs.factor), FUN = '*')
ggbiplot.default(list(scores=scores, loadings=xobj$loadings), ...)
}
#' @rdname ggbiplot
#' @method ggbiplot prcomp
#' @export
ggbiplot.prcomp <- function(xobj, ...) {
nobs.factor <- sqrt(nrow(xobj$x) - 1)
d <- xobj$sdev
scores <- sweep(xobj$x, 2, 1/(d * nobs.factor), FUN = '*')
loadings <- xobj$rotation
ggbiplot.default(list(scores=scores, loadings=loadings), ...)
}
#' @rdname ggbiplot
#' @method ggbiplot lda
#' @export
ggbiplot.lda <- function(xobj, ...) {
xname <- xobj$call$x
gname <- xobj$call[[3L]]
X <- eval.parent(xname)
g <- eval.parent(gname)
means <- colMeans(xobj$means)
X <- scale(X, center = means, scale = FALSE)
x <- as.matrix(X) %*% xobj$scaling
nobs.factor <- sqrt(nrow(x))
d <- apply(x, 2, sd)
x <- sweep(x, 2, 25/(d * nobs.factor), FUN = '*')
ggbiplot.default(list(scores=x, loadings=xobj$scaling), ...)
}
#' @rdname ggbiplot
#' @method ggbiplot plsda
#' @export
ggbiplot.plsda <- function(xobj, Yplot=FALSE, ...) {
if (Yplot) {
scores <- xobj$Yscores
loadings <- xobj$Yloadings
} else {
scores <- xobj$scores
loadings <- xobj$loadings
}
nobs.factor <- sqrt(nrow(scores))
d <- apply(scores, 2, sd)
means <- colMeans(scores)
scores <- scale(scores, center = means, scale = FALSE)
scores <- sweep(scores, 2, 6/(d * nobs.factor), FUN = '*')
ggbiplot.default(list(scores=scores, loadings=loadings), ...)
}
#' @rdname ggbiplot
#' @method ggbiplot splsda
#' @export
ggbiplot.splsda <- function(xobj, ...) {
# means <- irispls$meanx
# X <- scale(xobj$x, center = means, scale = FALSE)
X <- xobj$x
if (is.null(xobj$scores))
xobj$scores <- as.matrix(X[,xobj$A]) %*% as.matrix(xobj$projection)
xobj$loadings <- xobj$projection
ggbiplot.plsda(xobj, ...)
}
#' @rdname ggbiplot
#' @method ggbiplot matrix
#' @export ggbiplot.matrix
ggbiplot.matrix <- function(xobj, ...) {
scores <- xobj
loadings <- matrix(NA, nrow(xobj), ncol(xobj))
ggbiplot.default(list(scores=scores, loadings=loadings), plot.loadings=FALSE,
equalcoord=FALSE, ...)
}
#' @param grouping an optional grouping vector (ie - for coloring points)
#' @param select index of components to be plotted (must be length 2)
#' @param circle enclose points in a circle
#' @param circle.prob controls circle diameter (scales data std dev) if \code{circle = TRUE}
#' @param plot.loadings should loading vectors be plotted
#' @param label.loadings text of loadings labels, taken from rownames of loadings (depends on class of \code{xobj})
#' @param label.offset absolute offset for loading labels, so labels don't cover loadings vectors
#' @param scale.loadings scale length of loading vectors for plotting purposes
#' @param col.loadings a single value of vector for color of loadings
#' @param alpha controls relative transparency of various plot features
#' @param col color factor for points
#' @param group.ellipse enclose within-group points in an covariance ellipse
#' @param scale.ellipse scale \code{group.ellipse} to 1 standard deviation
#' @param group.cloud connect within-group points to a group mean point with a straight edge
#' @param xlab label for x axis
#' @param ylab label for y axis
#' @param equalcoord equal coordinates, ie should the plot area be square?
#' @param size point size
#' @param size.loadings line width of loading vectors
#'
#' @details additional plotting attributes (eg colors, themes, etc) can be chained on in the usual way for ggplots
#' @examples
#' # an LDA example with iris data
#' ldamod <- lda(iris[,1:4], grouping=iris[,5])
#' ggbiplot(ldamod, grouping=iris[,5], alpha=.7, group.cloud=TRUE) + theme_bw()
#' @rdname ggbiplot
#' @method ggbiplot default
#' @importFrom ggplot2 ggplot geom_segment geom_point geom_text geom_path scale_x_continuous scale_y_continuous aes
#' @export
ggbiplot.default <- function(xobj, grouping, select=1:2, circle = FALSE, circle.prob = 0.69,
plot.loadings=TRUE, label.loadings=FALSE, sub.loadings=1:nrow(xobj$loadings),
label.offset=0, label.size=4.5, scale.loadings = 1, col.loadings=scales::muted("red"),
alpha = 1, col=grouping, shape=NULL, group.ellipse=FALSE, scale.ellipse = 1,
group.cloud = FALSE, xlab="", ylab="", equalcoord=TRUE, size=3, size.loadings=1,
loadingsOnTop = FALSE) {
## get scores and loadings from xobj
if (length(select) > 2) stop("Error: only 2d plots supported")
if (length(select) < 2) stop("Error: need at least 2 coordinates/components")
scores <- data.frame(xvar=xobj$scores[,select[1]], yvar=xobj$scores[,select[2]])
loadings <- data.frame(xvar=xobj$loadings[sub.loadings,select[1]], yvar=xobj$loadings[sub.loadings,select[2]])
# standardize scores (?)
# Base plot
g <- ggplot(data = scores, aes(x = xvar, y = yvar))
if (plot.loadings) {
loadingslayer <- geom_segment(data = loadings*scale.loadings,
aes(x = 0, y = 0, xend = xvar, yend = yvar),
arrow = grid::arrow(length = grid::unit(1/2, 'picas')),
size = size.loadings, color = col.loadings)
}
if (is.character(label.loadings) || label.loadings) {
if (is.logical(label.loadings))
labs <- rownames(loadings)
else labs <- label.loadings
# compute angles from orig.
ang <- atan2(loadings$yvar*scale.loadings, loadings$xvar*scale.loadings)
hyp <- sqrt((loadings$yvar*scale.loadings)^2 + (loadings$xvar*scale.loadings)^2)
labdat <- data.frame(newx=(hyp + label.offset)*cos(ang),
newy=(hyp + label.offset)*sin(ang),
label=labs)
g <- g +
geom_text(aes(x=newx, y=newy, label=label), data=labdat, size=label.size)
}
if (!missing(grouping)) {
gind <- order(grouping)
grouping <- grouping[gind]
scores <- scores[gind,]
df <- data.frame(xvar=scores$xvar, yvar=scores$yvar, grouping=grouping)
if (!is.null(shape)) {
aesfun <- aes(color = grouping, shape=shape) ; df$shape <- shape[gind]
} else
aesfun <- aes(color = grouping)
scoreslayer <- geom_point(data = df,
aesfun, alpha = alpha, size=size)
} else {
if (!missing(col)) {
df <- data.frame(xvar=scores$xvar, yvar=scores$yvar, col=col)
if (!is.null(shape)) {
aesfun <- aes(color = col, shape=shape) ; df$shape <- shape
} else
aesfun <- aes(color = col)
scoreslayer <- geom_point(data=df,
aesfun, alpha = alpha, size=size)
} else {
if (!is.null(shape))
aesfun <- aes(shape=shape)
else
aesfun <- aes()
scoreslayer <- geom_point(aesfun, alpha = alpha, size=size)
}
}
if (plot.loadings) {
if (!loadingsOnTop) g <- g + loadingslayer + scoreslayer
else g <- g + scoreslayer + loadingslayer
} else
g <- g + scoreslayer
if (group.ellipse && !missing(grouping)) {
l <- 200
group.scores <- split(scores[,1:2], grouping)
group.centers <- lapply(group.scores, colMeans)
group.cov <- lapply(group.scores, cov)
group.RR <- lapply(group.cov, chol)
angles <- seq(0, 2*pi, length.out=l)
ell.list <- lapply(group.RR, function(RR)
scale.ellipse * cbind(cos(angles), sin(angles)) %*% RR)
ellCntr <- lapply(1:length(ell.list), function(i)
sweep(ell.list[[i]], 2, group.centers[[i]], "+"))
names(ellCntr) <- names(ell.list)
ell.df <- as.data.frame(do.call("rbind", ellCntr))
ell.df$grouping <- factor(rep(names(ellCntr), each=l), levels=names(ellCntr))
g <- g + geom_path(data = ell.df, aes(color = grouping, group = grouping))
}
if (group.cloud && !missing(grouping)) {
group.scores <- split(scores[,1:2], grouping)
group.centers <- lapply(group.scores, colMeans)
centers.df <- do.call('rbind', rep(group.centers, table(grouping)))
rownames(centers.df) <- rownames(scores)
colnames(centers.df) <- c("xcntr", "ycntr")
loadCntr.df <- cbind(scores, centers.df, grouping)
g <- g +
geom_segment(data = loadCntr.df,
aes(x = xcntr, y = ycntr, xend = xvar, yend = yvar,
color = grouping), alpha=10^log(alpha/1.4))
}
if (circle) {
# scale circle radius
r1 <- sqrt(qchisq(circle.prob, df = 2)) * max(scores$xvar^2)^(1/2)
r2 <- sqrt(qchisq(circle.prob, df = 2)) * max(scores$yvar^2)^(1/2)
theta <- c(seq(-pi, pi, length = 50), seq(pi, -pi, length = 50))
circdat <- data.frame(xvar = r1 * cos(theta), yvar = r2 * sin(theta))
g <- g + geom_path(aes(x=xvar, y=yvar), data = circdat, color = scales::muted('black'),
size = 0.5, alpha = alpha/3)
}
if (equalcoord) {
if (circle) {
xrange <- range(circdat$xvar)
yrange <- range(circdat$yvar)
} else {
xrange <- c(-max(abs(scores$xvar)), max(abs(scores$xvar)))
yrange <- c(-max(abs(scores$yvar)), max(abs(scores$yvar)))
}
g <- g + scale_x_continuous(xlab, limits=xrange) +
scale_y_continuous(ylab, limits=yrange)
} else {
g <- g + scale_x_continuous(xlab) +
scale_y_continuous(ylab)
}
g
}
|
caa61ac1d8067206c3ada0078d6a3eadbd4d01cf
|
4155f9d8b3e49206653f663cb46f547ff8882040
|
/FMI-Code/Exam-2019/Problem-4.R
|
be507498f5b360d633b58368349d0e30ab571e04
|
[] |
no_license
|
nicksinch/Probability-and-Statistics
|
b7574f9a4706713adcc28a8da67290c5654a43ac
|
1b3b4df745140302b00b0f6900b07a564e811842
|
refs/heads/master
| 2020-12-09T12:14:03.155185
| 2020-01-17T22:59:26
| 2020-01-17T22:59:26
| 233,299,736
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 854
|
r
|
Problem-4.R
|
# Problem - 4
Observations = rnbinom(200,1,1/13)
shapiro.test(Observations)
Shapiro-Wilk normality test
data: Observations
W = 0.7351, p-value < 2.2e-16
# ===> не са нормално разпределени , пускаме wilcox.test , защото искаме доверителен интервал
wilcox.test(Observations, conf.level = 0.90, conf.int = T)
Wilcoxon signed rank test with continuity correction
data: Observations
V = 16836, p-value < 2.2e-16
alternative hypothesis: true location is not equal to 0
90 percent confidence interval:
8.499967 10.500040
sample estimates:
(pseudo)median
9.500013
# P(X=9) ; асото е 10та карта
# emp
dnbinom(9, 1, 1/13) # брой неуспехи до първия успех
[1] 0.03742809
# X = 10
sum(Observations == 10) / length(Observations)
[1] 0.05
|
6ce2aab8337a11be49c214720e9a0e75656a4764
|
da6d953d2493b830a87dce2b1b02b863a3257fde
|
/new_proj/new_proj.R
|
94aadfc98fd3b3bd5eeb680b4b9846a1dbb99d4c
|
[] |
no_license
|
xuhuanyunxiao/jupyter-notebook
|
6f19143442ab1301e22d49745ad1a14b25ab801a
|
20ca911cde18d977d062f269f165185ea0c684b2
|
refs/heads/master
| 2021-07-21T03:26:54.359203
| 2020-04-22T13:18:42
| 2020-04-22T13:18:42
| 135,554,417
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 422
|
r
|
new_proj.R
|
library(readxl)
library(lavaan)
library(semPlot)
library(OpenMx)
library(GGally)
library(corrplot)
file_folder <- "D:/XH/Python_Project/notebook/new_proj"
# getwd()
setwd(file_folder)
data <- read_excel('data_sel_1.xlsx')
model1 = 'Z ~ x1 + x2 + x3 + Y
2Y ~ x1 + x2'
fit1 = cfa(model1, data = data1)
summary(fit1, fit.measures = TRUE, standardized = TRUE, rsquare = TRUE)
semPaths(fit1, 'std', layout = 'circle')
|
93375d0886680b572f4a686fc78c6dfc79c61a4f
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkFileChooserUnselectFilename.Rd
|
42d82280f63265d94a53da60c7c13c135772cf46
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 554
|
rd
|
gtkFileChooserUnselectFilename.Rd
|
\alias{gtkFileChooserUnselectFilename}
\name{gtkFileChooserUnselectFilename}
\title{gtkFileChooserUnselectFilename}
\description{Unselects a currently selected filename. If the filename
is not in the current directory, does not exist, or
is otherwise not currently selected, does nothing.}
\usage{gtkFileChooserUnselectFilename(object, filename)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkFileChooser}}}
\item{\verb{filename}}{the filename to unselect}
}
\details{Since 2.4}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
a26399bffcdac914b16cd73cd7af4a8094fa4680
|
1e00f821ffe6ddcad7620cb1acc439ee711c0a23
|
/analyze_msea.R
|
0fbd3b489f9a004eb5598b769bd299270a06691d
|
[] |
no_license
|
XiaYangLabOrg/mergeomics
|
2db3230a4a501434862158e4a0cec3930afa844c
|
569492ce7f998961ec641c4353fc68b900167e20
|
refs/heads/master
| 2023-07-20T09:21:12.645625
| 2023-07-07T16:19:24
| 2023-07-07T16:19:24
| 268,940,012
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 80,673
|
r
|
analyze_msea.R
|
# Please excuse the roughness of these functions! I made these a long time ago while I was learning R and I will be making
# efforts to improve the code hopefully soon.
## ----------------- DOWNSTREAM ANALYSIS OF MSEA RESULTS FROM MERGEOMICS ----------------- ##
# #
# Collection of functions dealing with downstream analysis of MSEA results #
# #
# Functions: #
# #
# 1) directOverlap - outputs exact module (gene set) matches between studies #
# (i.e. "replication") #
# 2) geneOverlap - does "looser" overlap analysis by % gene overlap between #
# modules #
# 3) SGMrepTable - makes table that records what SGM methods are being represented #
# for each module #
# 4) summaryTable - makes table for each SGM method detailing supersets and #
# comprising modules #
# #
# Written by Jessica Ding #
# #
# ------------------------------------------------------------------------------------------#
# Notes:
# - These sets of functions are highly dependent on file names (case sensitive)
# - If functions are done in the logical order that they were created, then this should not be
# a problem. Problems may arise if you have file names in the folder that have string matches
# to the string I am using to query the files to be analyzed. For example, a query for ".mod"
# are for files like AD_Adipose_Subcutaneous.mod.txt. If you have a file in the folder that
# is called "file.modified.txt" - it will capture this file and result in an error.
#-----------------------------------------------------------------------------------------------------#
# Abbreviation: SGM - SNP to gene mapping
# This function does a % gene overlap for two studies for each
# SGM method (as opposed to "direct overlap" - exact name
# matching of gene sets/modules)
#
# Inputs: 1) results_Dir folder with MSEA results for each study with the SGM
# as the common factor(s) between studies. file name format
# should be "Study, SGM, .results.txt" (.result.txt is the
# output extention of MSEA).
# ex. "UKB_T2D.Adipose_Subcutaneous.results.txt"
# 2) output_Dir folder to store results
# 3) perc_gene_overlap minimum gene overlap to merge modules
# 4) FDRcutoff usually 0.05
# 5) cohorts vector with study names that are reflected in the msea results
# ex. c("DIAGRAMstage1_T2D", "UKB_T2D")
# 6) modfile_path path to mod file, columns: 'MODULE' 'GENE'
# 7) infofile_path path to info file, columns: 'MODULE' 'GENE' 'DESCR'
# 8) study
#
# Outputs: 1) "combined_..." combined results file for each study
# 2) "merged_..." mod and info file from module merging
# 3) "analyzed_..." shows all supersets (does not include unmerged
# modules) and study of origin. "OVERLAP" indicates
# module came from all studies
# 4) "analyzed_overlap_summary" a summary file of sets of modules that were merged with
# different study representations (indicating new overlaps
# based on % gene overlap)
# 5) "shared_..." 'MODULE' 'METHOD' 'DESCR' file that can be used to merge
# modules a second time given new overlaps
#
# Notes: 1) % gene overlap is set with "rmax" - ex. .3 is 30% gene overlap
# 2) This function is written to consider multiple studies but all msea
# results files must be in the same folder
# 3) Directory path string parameters must end in "/"
# 4) If DESCR unknown, put MODULE name
#
# Version April 2019
#---------------------------------------------------------------------------------------------------#
geneOverlap <- function(results_Dir, output_Dir, perc_gene_overlap, fdr_cutoff=0.05, cohorts, modfile_path, infofile_path, study, type=NULL){
# MAKE COMBINED FILES FOR ALL STUDIES
files = c()
for(c in cohorts){
files = append(files, list.files(results_Dir)[grep(c, list.files(results_Dir))])
}
##get SGM methods. This assumes your results file is formatted - "cohort.SGM.results.txt"
study_subset = files[grep(cohorts[1], files)]
SGMs = c()
for(f in study_subset){
SGMs = append(SGMs, unlist(strsplit(f, split = ".",fixed = TRUE))[2])
}
SGMs = unique(SGMs)
cat("The SNP to gene mappings used is/are:\n")
print(SGMs) #this is to check that the SGMs are being extracted in the desired way
##combine studies for each SGM
cat("Now combining studies together in one file for each SGM.\n")
for(i in SGMs){
results = files[grep(i,files)]
if(length(results)<2){
cat("The method ", i, " does not have a file for one of the studies.\n")
}
all = data.frame()
for(j in results){
temp = read.delim(paste(results_Dir, j ,sep = ""), header = TRUE, stringsAsFactors = FALSE)
if(is.null(type)){
temp = temp[temp$FDR<fdr_cutoff,]
}
temp=temp[!grepl("_ctrlA",temp$MODULE)&!grepl("_ctrlB",temp$MODULE),]
if(nrow(temp)>0){
for(r in cohorts){
if(grepl(r, j)){
temp$STUDY = r
}
}
}
all = rbind(all, temp)
}
write.table(all, paste0(output_Dir,"combined_", i, ".txt"), row.names = FALSE, quote = FALSE, sep="\t")
}
combined_files = list.files(output_Dir)
combined_files = combined_files[grep("combined_", combined_files)]
cat("Now merging modules for % gene overlap...\n")
# MERGE MODULES - this was modified 5.19.2019 and not currently tested
for(l in combined_files){
cat("Now merging:\n")
cat(l,"\n")
# unfortunately hard coding this in...
if(study=="UKB_Wojcik" & grepl("Pituitary_sQTL",l)) next
df = read.delim(paste0(output_Dir,l), header = TRUE, stringsAsFactors = FALSE)
if(sum(is.na(df$MODULE))>0){
cat("Skipping: ", l, " because there is no significance.\n")
next
}
if(length(df$MODULE)<2){
cat("Skipping: ", l, " because there is only one or 0 significant modules meaning no overlap at all.\n")
next
}
merge_modules(name = gsub("combined_","",gsub(".txt","",l)),
modules_df = df,
rcutoff = 0.30,
output_Dir = output_Dir,
modfile_path = modfile_path,
infofile_path = infofile_path)
}
cat("Now analyzing for newly overlapped modules...\n")
# FIND IF THERE WERE MERGED MODULES WITH DIFFERENT STUDY REPRESENTATION
files1 = list.files(output_Dir)
merged_mod_files = files1[grep("merged_",files1)] #works if no SGMs have "mod" in name
merged_mod_files = merged_mod_files[grep(".mod", merged_mod_files)]
uncommon_df = data.frame(stringsAsFactors = FALSE)
for(m in merged_mod_files){
cat("Now inspecting ", m , "\n")
file = read.delim(paste0(output_Dir,m), header = TRUE, stringsAsFactors = FALSE)
temp = unique(file$OVERLAP[grep(",", file$OVERLAP)])
SGM = gsub("merged_","",gsub(".mod.txt","", m))
combined_file_name = combined_files[grep(SGM, combined_files)]
combined_df = read.delim(paste0(output_Dir,combined_file_name), header = TRUE, stringsAsFactors = FALSE)
study_reps = c()
for(n in temp){
modules = unlist(strsplit(n,","))
study_rep = ""
for(o in modules){
if(length(combined_df$STUDY[combined_df$MODULE==o])>1){
study_rep = paste(study_rep, "OVERLAP",sep = ", ")
}
else{
study_rep = paste(study_rep, combined_df$STUDY[combined_df$MODULE==o], sep = ", ")
}
}
study_rep = substring(study_rep, 3)
study_reps = append(study_reps, study_rep)
}
analyzed = data.frame("Merged Modules"= temp,
"Study Representation"=study_reps)
print(SGM)
write.table(analyzed, paste0(output_Dir,"analyzed_", SGM, ".txt"), row.names = FALSE, quote = FALSE, sep = "\t")
bool = c()
for(p in study_reps){
p = gsub(" ", "", p)
splitted = unlist(strsplit(p, ","))
if(sum(!splitted[1]==splitted)>0){
bool = append(bool, TRUE)
}
else{
bool = append(bool, FALSE)
}
}
if(sum(bool)>0){
uncommon = analyzed[bool,]
uncommon$SNPtoGeneMapping = SGM
uncommon_df = rbind(uncommon_df, uncommon)
}
}
#output summary of all different study representation "overlaps"
write.table(uncommon_df, paste0(output_Dir,"analyzed_overlap_summary.txt"), row.names = FALSE, quote = FALSE, sep = "\t")
#make overlap module files including all direct and quasi overlap modules
for(s in SGMs){
cat("Now analyzing\n")
cat(s, "\n")
#get direct overlaps
comb_file = read.delim(paste0(output_Dir,combined_files[grep(s, combined_files)]), header = TRUE, stringsAsFactors = FALSE)
if(sum(is.na(comb_file$MODULE))>0){
cat("Skipping: ", l, " because there is no significance.\n")
next
}
studies1 = unique(comb_file$STUDY)
num = 1
shared_mods = c()
while(num<length(studies1)){
if(num==1){
modules = comb_file$MODULE[comb_file$STUDY==studies1[num]]
}
else {
modules = shared_mods
}
temp_modules = comb_file$MODULE[comb_file$STUDY==studies1[num+1]]
shared_mods = intersect(modules,temp_modules)
num = num + 1
}
shared = data.frame(stringsAsFactors = FALSE)
if(length(shared_mods>0)){ #if there are any direct overlaps
shared = data.frame("MODULE" = shared_mods,
"METHOD" = "direct")
}
uncommon_df = read.delim(paste0(output_Dir,"analyzed_overlap_summary.txt"), header = TRUE, stringsAsFactors = FALSE) #to convert back into strings
if(sum(grepl(s, uncommon_df$SNPtoGeneMapping))>=1){ #if there are any quasi overlaps
shared_quasi_mods = c()
SGM_mod_rep = uncommon_df$Merged.Modules[grep(s, uncommon_df$SNPtoGeneMapping)]
print(SGM_mod_rep)
SGM_study_rep = uncommon_df$Study.Representation[grep(s, uncommon_df$SNPtoGeneMapping)]
for(u in 1:length(SGM_mod_rep)){
SGM_merged_mod_rep = unlist(strsplit(SGM_mod_rep[u], split = ","))
SGM_merged_study_rep = unlist(strsplit(SGM_study_rep[u], split = ", "))
not_overlap = !(SGM_merged_study_rep=="OVERLAP")
shared_quasi_mods = append(shared_quasi_mods, SGM_merged_mod_rep[not_overlap])
}
shared_quasi = data.frame("MODULE"= shared_quasi_mods,
"METHOD"= "quasi")
if(length(shared_mods)>0){ #if both quasi and direct
shared = rbind(shared, shared_quasi)
}
else{ # if only quasi
shared = shared_quasi
}
}
if(length(shared$MODULE)>0){
#annotate
info = read.delim(infofile_path, header = TRUE, stringsAsFactors = FALSE)
annot = c()
for(v in shared$MODULE){
annot = append(annot, info$DESCR[info$MODULE==v])
}
shared$DESCR = annot
write.table(shared, paste0(output_Dir, "shared_", study, ".", s, ".txt"), row.names = FALSE, quote = FALSE, sep = "\t")
}
else{
next
}
shared = data.frame()
}
cat("Finished. shared_ files made.")
}
source("~/Desktop/Yang_Lab/Mergeomics/geneOntology.R")
source("~/Desktop/Yang_Lab/resources/genesets/R-tomfunctions.R")
#-----------------------------------------------------------------------------------------------------#
# This function makes annotations for supersets that were obtained from module merge.
#
# Inputs: 1) results_Dir folder with ".mod.txt" files (output from module merge)
# 2) ontologyfname path to ontology file
# 3) study name to append to files
# 4) trim vector of strings to "trim" off the mod.txt file name to get desired unique name
# ex. UKB_AD.Adipose_Subcutaneous.mod.txt --> trim = c("UKB_AD.",".mod.txt")
#
# Outputs: 1) "supersets.study" folder contains superset annotations in .xls format
#
# Notes: If you get the error "Error in cbind(rep(modulesize, dim(fm)[1]), fm) : object 'fm' not found", the
# offending module is printed before this error and this module must be taken out from the .mod.txt file
# Version April 2019
#---------------------------------------------------------------------------------------------------#
# ontologyfname = "~/Desktop/Yang_Lab/resources/genesets/MSigDB_Canonical_Pathways.txt"
#--------------------------------------------------------------------------#
#
annotate_supersets <- function(results_Dir, ontologyfname, study="", trim){ #make trim argument that has a vector of things to trim to get the "SGM" and also so that you don't wind up with .mod.txt in subseqeunt result file names
files = list.files(results_Dir)
mod_files = files[grep(".mod.txt", files)]
for(i in mod_files){
mod_file = read.delim(paste0(results_Dir,i), header = TRUE, stringsAsFactors = FALSE)
if(sum(grepl(",", mod_file$MODULE))>0){
mod_file = mod_file[grep(",", mod_file$OVERLAP),]
if(length(mod_files)>1){
SGM = i
for(k in trim){
SGM = gsub(k, "", SGM)
}
SGM = gsub(study, "", SGM)
name = paste0("supersets.", study, ".", SGM)
}
else{
mod_file = mod_file[grep(",", mod_file$OVERLAP),]
name = paste0("supersets.", study)
}
file_name = paste0(results_Dir,name, ".txt")
mod_file = mod_file[,c(1,2)]
write.table(mod_file, file_name, row.names = FALSE, quote = FALSE, sep = "\t")
geneOntology(work_Dir = results_Dir, output_Dir = results_Dir, ontologyfname = ontologyfname, file = file_name, name = name)
}
else{
next
}
}
#input for geneOntology is a .txt file
}
summaryTable <- function(results_Dir, output_Dir, study, infofile_path){
#get SGMs
files = list.files(results_Dir)
mod_files = files[grep("mod", files)]
SGMs = gsub("merged_", "", gsub(".mod.txt","",mod_files))
all_table = data.frame()
for(i in SGMs){
table = data.frame(stringsAsFactors = FALSE)
individual_table = data.frame()
mod_file = read.delim(paste0(results_Dir,mod_files[grep(i, mod_files)]), header = TRUE, stringsAsFactors = FALSE)
mods = unique(mod_file$OVERLAP)
supersets = mods[grep(",", mods)]
mod_names = unique(mod_file$MODULE)
superset_names = mod_names[grep(",", mod_names)]
superset_annotation = c()
if(length(supersets)>0){
SGM_superset_table = data.frame()
for(j in supersets){
superset_table = data.frame()
sub_modules_DESCR = c()
name = unique(mod_file$MODULE[mod_file$OVERLAP==j])
sub_modules = unlist(strsplit(j, split = ","))
for(l in sub_modules){
info = read.delim(infofile_path, header = TRUE, stringsAsFactors = FALSE)
sub_modules_DESCR = append(sub_modules_DESCR, info$DESCR[info$MODULE==l])
}
annot = read.delim(paste0(results_Dir,"supersets.",study,".", i,"/","a.supersets.", study, ".",i,
"_Ontology_", name,".xls"), header = TRUE, stringsAsFactors = FALSE) #probably will cause problems
size = c()
annotation = c()
if(is.null(annot$ModuleSize[1])){
annotation = append(annotation,"Unknown")
size = append(size, "Error in annotation gave no module size")
}
else if(is.na(annot$ModuleSize[1])){
annotation = append(annotation,"Unknown")
size = append(size, annot$ModuleSize[1])
}
else if(sum(annot$pvalue_corrected<0.05)==0){
annotation = append(annotation,"Unknown")
size = append(size, annot$ModuleSize[1])
}
else{
sig_mods = annot[annot$pvalue_corrected<0.05,]
max_mod_overlap_annot = annot$Gene.Category[annot$ModuleOverlap==max(annot$ModuleOverlap)]
if(length(max_mod_overlap_annot)>1){
names = c()
for(k in max_mod_overlap_annot){
names = paste(names, k, sep = ", ") #?
}
names = substring(names, 3)
annotation = append(annotation, names)
}
else{
annotation = append(annotation,max_mod_overlap_annot)
}
size = append(size, annot$ModuleSize[1])
superset_table = data.frame("SGM" = i,
"MODULE"= sub_modules,
"DESCR" = sub_modules_DESCR,
"SUPERSET"= annotation,
"MODULE_SIZE" = size, stringsAsFactors = FALSE)
}
SGM_superset_table = rbind(SGM_superset_table, superset_table)
}
table = SGM_superset_table
}
individual_modules = mods[!grepl(",", mods)]
if(length(individual_modules)>0){
size2 = c()
individ_modules_DESCR = c()
for(m in individual_modules){
individ_modules_DESCR = append(individ_modules_DESCR, info$DESCR[info$MODULE==m])
size2 = append(size2, sum(mod_file$MODULE==m))
}
individual_table = data.frame("SGM" = i,
"MODULE"= individual_modules,
"DESCR" = individ_modules_DESCR,
"SUPERSET" = "none",
"MODULE_SIZE"=size2, stringsAsFactors = FALSE)
table = rbind(table, individual_table)
}
if(grepl("quasi", study)){
shared_files = files[grep("shared_",files)]
shared_file = shared_files[grep(i, shared_files)]
shared = read.delim(paste0(results_Dir,shared_file), header=TRUE, stringsAsFactors = FALSE)
methods = c()
for(n in table$MODULE){
methods = append(methods, shared$METHOD[shared$MODULE==n])
}
table$OVERLAP_METHOD = methods
}
write.table(table,paste0(output_Dir, "table_", study, ".", i, ".txt"), row.names = FALSE, quote = FALSE, sep = "\t")
all_table = rbind(all_table, table)
all_table= rbind(all_table,"")
}
#include SGMs in which there was only one module overlap (was included above^)
shared_files = files[grep("shared_", files)]
for(i in shared_files){
df = data.frame(stringsAsFactors = FALSE)
file = read.delim(paste0(results_Dir, i), header = TRUE, stringsAsFactors = FALSE)
if(length(file$MODULE)==1){
SGM = gsub(paste0("shared_", study, "."),"",i)
SGM = gsub(".txt","", SGM)
if(grepl("quasi",study)){
df = data.frame("SGM"=SGM,
"MODULE"=file$MODULE,
"DESCR"=file$DESCR,
"SUPERSET" = "none",
"MODULE_SIZE"="value",
"OVERLAP_METHOD"=file$METHOD, stringsAsFactors = FALSE)
}
else{
info =read.delim("/Users/jessicading/Desktop/Yang_Lab/resources/genesets/all.info.txt", header = TRUE, stringsAsFactors = FALSE)
df = data.frame("SGM"=SGM,
"MODULE"=file$MODULE,
"DESCR"=info$DESCR[info$MODULE==file$MODULE],
"SUPERSET" = "none",
"MODULE_SIZE"="value", stringsAsFactors = FALSE)
}
all_table = rbind(all_table, df)
all_table = rbind(all_table,"")
}
else{
next
}
}
#annotate coexpression modules
modules = all_table$MODULE
for(j in modules){
if(grepl("GTEXv7",j)){
descrip = c()
if(j==""){
descrip = append(descrip, "")
}
else{
annot = read.delim(paste("~/Desktop/Yang_Lab/resources/genesets/reannotate/trim_coexpr/a.trim_coexpr_Ontology_", j, ".xls", sep = ""), header = TRUE, stringsAsFactors = FALSE)
if(is.na(annot$ModuleSize[1])){
descrip = append(descrip, "Unknown")
}
else if(sum(annot$pvalue_corrected<0.05)==0){
descrip = append(descrip, "Unknown")
}
else{
sig_mods = annot$Gene.Category[annot$pvalue_corrected<0.05]
if(length(sig_mods)>1){
names = c()
for(k in sig_mods){
names = paste(names, k, sep = ", ")
}
names = substring(names, 3)
}
else{
names = sig_mods
}
descrip = append(descrip, names)
}
}
all_table$DESCR[all_table$MODULE==j] = descrip
}
}
all_table$MODULE_SIZE[is.na(all_table$MODULE_SIZE)]<-""
#-------------------------------------------------------#
write.table(all_table, paste0(output_Dir, "table_summary_",study, ".txt"), row.names = FALSE, quote = FALSE, sep = "\t")
}
#set output_Dir to results folder
mergedSummaryTable <- function(summary_table, study, output_Dir, modfile_path, infofile_path, ontologyfname, rcutoff){
mod_file = read.delim(modfile_path, header = TRUE, stringsAsFactors = FALSE)
summary_tbl = read.delim(paste0(output_Dir,summary_table), header=TRUE, stringsAsFactors = FALSE)
modules = summary_tbl$MODULE[!(summary_tbl$MODULE=="")]
modules = unique(modules)
modules_df = data.frame("MODULE"=modules)
cat("The study being analyzed is", study, "\n")
merge_modules(name = study, modules_df = modules_df, output_Dir = output_Dir, modfile_path = modfile_path,
infofile_path = infofile_path, rcutoff = rcutoff)
#annotate supersets
annotate_supersets(results_Dir = output_Dir, ontologyfname = ontologyfname, study = study, trim = c("merged_", ".mod.txt"))
merged = read.delim(paste0(output_Dir, "merged_",study, ".mod.txt"), header = TRUE, stringsAsFactors = FALSE)
supersets = unique(merged$OVERLAP[grep(",", merged$OVERLAP)])
all_melted_df = data.frame(stringsAsFactors = FALSE)
number = 1
index = c()
aggregated_descr = c()
for(i in supersets){ #maybe make function for this? break down supersets into individual modules
name = unique(merged$MODULE[merged$OVERLAP==i])
sub_modules = unlist(strsplit(i, split = ","))
descriptions = c()
sizes = c()
for(k in sub_modules){
descriptions = append(descriptions,unique(summary_tbl$DESCR[summary_tbl$MODULE==k]))
sizes = append(sizes, sum(mod_file$MODULE==k))
}
temp = c()
for(c in descriptions){
temp = paste(temp, c, sep = ", ")
}
temp = substring(temp, 3)
aggregated_descr = append(aggregated_descr, temp) #for renaming
melted_df = data.frame("ID" = paste0("S",number),
"MODULE"=sub_modules,
"DESCR" = descriptions,
"INDIVID_SIZE" = sizes,
"SUPERSET_SIZE" = extract_annotation(study = study, name = name, results_Dir = output_Dir)[,2],
"SUPERSET"=i,
"SUPERSET_DESCR"=extract_annotation(study = study, name = name, results_Dir = output_Dir)[,1], stringsAsFactors = FALSE)
all_melted_df = rbind(all_melted_df, melted_df)
index = append(index, nrow(all_melted_df))
number = number + 1
}
superset_length = nrow(all_melted_df)
standalone_modules = unique(merged$MODULE[!grepl(",", merged$MODULE)])
descriptions = c()
sizes = c()
for(j in standalone_modules){
descriptions = append(descriptions,unique(summary_tbl$DESCR[summary_tbl$MODULE==j]))
sizes = append(sizes, sum(mod_file$MODULE==j))
}
s_melted_df = data.frame("ID" = standalone_modules,
"MODULE"=standalone_modules,
"DESCR" = descriptions,
"INDIVID_SIZE" = sizes,
"SUPERSET_SIZE"= "none",
"SUPERSET"="none",
"SUPERSET_DESCR"= "none")
all_melted_df = rbind(all_melted_df, s_melted_df)
index = append(index, seq(from = (superset_length+1), to = nrow(all_melted_df), by = 1))
# ------- For further refined downstream analysis ------- #
aggregated_descr = append(aggregated_descr, descriptions)
renamed = data.frame("ID"= all_melted_df$ID[index],
"MODULE" = all_melted_df$SUPERSET[index],
"DESCR" = aggregated_descr,
"RENAME" = "",
"ANNOTATION" = all_melted_df$SUPERSET_DESCR[index])
write.table(all_melted_df, paste0(output_Dir, "30merged_summary_table.",study,".txt"), row.names = FALSE, quote = FALSE, sep = "\t")
write.table(renamed, paste0(output_Dir, "30merged_summary_table_rename.",study,".txt"), row.names = FALSE, quote = FALSE, sep = "\t")
}
summaryTablev2 <- function(merged_table, merged_table_rename, summary_table, output_Dir){
merged_tbl = read.delim(merged_table, header = TRUE, stringsAsFactors = FALSE)
merged_tbl_rename = read.delim(merged_table_rename, header = TRUE, stringsAsFactors = FALSE)
summary_tbl = read.delim(summary_table, header = TRUE, stringsAsFactors = FALSE)
modified_summary_tbl = summary_tbl
# replace SUPERSET names from summary table with merged_tbl SUPERSET
modules = summary_tbl$MODULE
renames = c()
ids = c()
for(m in modules){
if(m==""){
renames = append(renames, "")
ids = append(ids, "")
next
}
if(merged_tbl$SUPERSET[merged_tbl$MODULE==m]=="none"){
if(sum(grepl(m, merged_tbl_rename$ID))==0){
renames = append(renames, merged_tbl_rename$RENAME[grep(m, merged_tbl_rename$MODULE)])
ids = append(ids, merged_tbl_rename$ID[grep(m, merged_tbl_rename$MODULE)])
next
}
if(merged_tbl_rename$RENAME[merged_tbl_rename$ID==m]!=""){
renames = append(renames, merged_tbl_rename$RENAME[merged_tbl_rename$ID==m])
ids = append(ids, "none")
next
}
renames = append(renames, "none")
ids = append(ids, "none")
next
}
# get superset ID
ID = merged_tbl$ID[merged_tbl$MODULE==m]
ids = append(ids, ID)
renames = append(renames, merged_tbl_rename$RENAME[merged_tbl_rename$ID==ID])
}
modified_summary_tbl$SUPERSET_ID = ids
modified_summary_tbl$RENAME = renames
modified_summary_tbl = modified_summary_tbl[,c("SGM","MODULE","DESCR","SUPERSET_ID","RENAME","MODULE_SIZE","OVERLAP_METHOD")]
write.table(modified_summary_tbl, paste0(output_Dir,"combined.renamed_summary_table.txt"), row.names = FALSE, quote = FALSE, sep = "\t")
}
#input table from output of summaryTable.
#summary_table - path to table
#1. merge all modules that were significant for all SGMs
#2. annotate modules - pick one annotation in the end
#3. extract comprising modules for each superset
#4. go through each module for each SGM in summary_table and reference it to its superset (if any) - add new column that corresponds to its superset
#5. do SGMrepTable function as I've written previously.
#6. only did for quasi
#set to results folder
keys = c("gene2loci.050kb"=1,"all_48esnps"=2,"all_periph_35esnps"=4, "all_brain_13esnps"=3,"select_24esnps"=5, "periph_18esnps"=6,
"brain_6esnps"=7,"Adipose_Subcutaneous"=8, "Adipose_Visceral_Omentum"=9, "Adrenal_Gland"=10, "Artery_Aorta"=11,
"Artery_Tibial"=12,"Brain_Cerebellar_Hemisphere"=13,"Brain_Cerebellum"=14, "Brain_Cortex"=15, "Brain_Frontal_Cortex_BA9"=16,
"Brain_Hippocampus"=17, "Brain_Hypothalamus"=18, "Colon_Sigmoid"=19, "Esophagus_Mucosa"=20, "Esophagus_Muscularis"=21,
"Heart_Left_Ventricle"=22,"Liver"=23, "Muscle_Skeletal"=24,"Nerve_Tibial"=25, "Pancreas"=26, "Pituitary"=27, "Spleen"=28,
"Stomach"=29, "Thyroid"=30, "Whole_Blood"=31)
#module_categories is your merged table
#Results directory must have table_summary_`study`_.txt - case sensitive
SGMrepTable <- function(keys, results_Dir, infofile_path){
#do for both by superset and by module
files = list.files(results_Dir)
table_file = files[grep("renamed_table_summary",files)]
merged_tbl_file = files[grep("merged_summary_table.AD_quasi", files)]
#read in summary table
summary_tbl = read.delim(paste0(results_Dir,table_file), header=TRUE, stringsAsFactors = FALSE)
summary_tbl = summary_tbl[!(summary_tbl$SGM==""),]
summary_tbl = summary_tbl[!(summary_tbl$SGM=="gene2loci.010kb"),]
summary_tbl = summary_tbl[!(summary_tbl$SGM=="gene2loci.020kb"),]
#read in merging of all modules (module_categories)
mod_categories = read.delim(paste0(results_Dir,merged_tbl_file), header = TRUE, stringsAsFactors = FALSE)
#for multiple superset names listed, convert to just showing the first one.
modify = mod_categories$DESCR[mod_categories$SUPERSET!="none"]
for(i in 1:length(modify)){
splitted = unlist(strsplit(modify[i], split = ", "))
modify[i] = splitted[1]
}
mod_categories$DESCR[mod_categories$SUPERSET!="none"] = modify
mod_descr = unique(mod_categories$DESCR[mod_categories$SUPERSET!="none"]) #SUPERSETS
combined_supersets = c()
num_mod = c()
for(j in mod_descr){
if(grepl(",", mod_categories$SUPERSET[mod_categories$DESCR==j][1])){
supersets = which(mod_categories$DESCR==j)
position = 1
num_mod = append(num_mod, length(supersets))
for(k in supersets){
if(mod_categories$SUPERSET[k]=="none"){
supersets[position] = mod_categories$MODULE[k] # necessarily????
}
else{
supersets[position] = mod_categories$SUPERSET[k]
}
position = position + 1
}
supersets = unique(supersets)
supersets = paste0(supersets, collapse = ",")
}
combined_supersets = append(combined_supersets, supersets)
}
combined_supersets = append(combined_supersets, mod_categories$MODULE[mod_categories$SUPERSET=="none"]) #INDIVIDUAL
num_mod = append(num_mod, rep(1, length(mod_categories$MODULE[mod_categories$SUPERSET=="none"])))
number = 1
ids = c()
for(b in combined_supersets){
name = paste0("S", number)
if(grepl(",", b)){
ids = append(ids, name)
number = number + 1
}
else{
ids = append(ids, b)
}
}
description = append(mod_descr, mod_categories$DESCR[mod_categories$SUPERSET=="none"])
new_df = data.frame("ID" = ids,
"MODULE"=combined_supersets,
"DESCR"=description, stringsAsFactors = FALSE) #check if was fine
sign_map = c()
sign_numMap = c()
num_map = c()
for(l in description){
temp2=c()
for(m in 1:nrow(summary_tbl)){
if(l==mod_categories$DESCR[mod_categories$MODULE==summary_tbl$MODULE[m]]){
if(is.na(keys[summary_tbl$SGM[m]])){
mapping = summary_tbl$SGM[m]
key = summary_tbl$SGM[m]
}
else{
mapping = summary_tbl$SGM[m]
key = keys[summary_tbl$SGM[m]]
}
temp2 = paste(temp2, key, sep = ", ")
}
}
temp2 = substring(temp2,3)
foo2 = unlist(strsplit(temp2,", "))
foo2 = unique(foo2)
foo2 = as.integer(foo2)
foo2 = sort(foo2)
num_map = append(num_map, length(foo2))
temp = names(keys[foo2])
temp = paste0(temp, collapse = ", ")
temp2 = paste0(foo2, collapse = ", ")
sign_map = append(sign_map, temp)
sign_numMap = append(sign_numMap, temp2)
}
new_df$"Significant Mapping Description" = sign_map
new_df$"Significant Mapping"= sign_numMap
new_df$"Number of Significant Mappings" = num_map
new_df$"Number of Significant Modules"= num_mod
# fix problem of mapping to unknown modules for every module that had no annotation. might run into problems that
# merged modules with no annotation will have "1" Significant modules when in fact it was a superset with multiple modules
unknown = new_df[new_df$DESCR=="Unknown",]
if(length(unknown$MODULE)>0){
for(d in 1:nrow(unknown)){
for(e in names(keys)){
if(grepl(e, unknown$MODULE[d])){
unknown$"Significant Mapping Description"[d]=e
unknown$"Significant Mapping"[d] = keys[e]
unknown$"Number of Significant Mappings"[d] = 1
unknown$"Number of Significant Modules"[d]= 1
}
}
}
new_df = new_df[!(new_df$DESCR=="Unknown"),]
new_df = rbind(new_df, unknown)
}
new_df_sorted = new_df[order(-new_df$"Number of Significant Mappings"),]
study = gsub("table_summary_","",table_file)
output_file_name = paste0(results_Dir, "SGMrep_table_", study)
write.table(new_df_sorted, output_file_name, row.names = FALSE, quote = FALSE, sep="\t")
info = read.delim(infofile_path, header = TRUE, stringsAsFactors = FALSE)
#Create superset information table
superset_ids = ids[1:length(mod_descr)]
superset_info = data.frame(stringsAsFactors = FALSE)
for(a in 1:length(mod_descr)){
sub_modules = unlist(strsplit(new_df$MODULE[new_df$DESCR==mod_descr[a]], split = ","))
temp_df = data.frame("SUPERSET_ID"=superset_ids[a],
"MODULE"=sub_modules, stringsAsFactors = FALSE,
"SUPERSET_DESCR"= mod_descr[a])
superset_info = rbind(superset_info, temp_df)
}
individual_df = data.frame("SUPERSET_ID"="none",
"MODULE"=mod_categories$MODULE[mod_categories$SUPERSET=="none"],
"SUPERSET_DESCR"="none")
all_mod_info = rbind(superset_info, individual_df)
sources = c()
for(b in all_mod_info$MODULE){
sources = append(sources, info$SOURCE[info$MODULE==b])
}
all_mod_info$SOURCE = sources
descriptions = c()
for(c in all_mod_info$MODULE){
descriptions = append(descriptions, info$DESCR[info$MODULE==c])
}
all_mod_info$DESCR = descriptions
all_mod_info = all_mod_info[, c(1,2,4,5,3)]
write.table(all_mod_info, paste0(results_Dir, "mod.info_",study), row.names = FALSE, quote = FALSE, sep="\t")
}
SGMrepTablev2 <- function(keys, table_summary, results_Dir, study){
tbl_summary = read.delim(paste0(results_Dir, table_summary), header = TRUE, stringsAsFactors = FALSE)
supersets = unique(tbl_summary$SUPERSET_ID[grep("S",tbl_summary$SUPERSET_ID)])
all_modules = append(supersets, unique(tbl_summary$MODULE[tbl_summary$SUPERSET=="none"]))
mapping_names = c()
key_map = c()
num_map = c()
descriptions = c()
superset_size = c()
for(l in all_modules){
temp = c()
temp2 = c()
for(m in 1:nrow(tbl_summary)){
if(sum(supersets==l)>0){
if(tbl_summary$SUPERSET_ID[m]==l){
if(is.na(keys[tbl_summary$SGM[m]])){
key = tbl_summary$SGM[m]
}
else{
key = keys[tbl_summary$SGM[m]]
}
temp = paste(temp, key, sep = ", ")
}
}
else {
if(tbl_summary$MODULE[m]==l){
if(is.na(keys[tbl_summary$SGM[m]])){
key = tbl_summary$SGM[m]
}
else{
key = keys[tbl_summary$SGM[m]]
}
temp = paste(temp, key, sep = ", ")
}
}
}
if(sum(supersets==l)>0){
descriptions = append(descriptions, tbl_summary$RENAME[tbl_summary$SUPERSET_ID==l][1])
}
else if(unique(tbl_summary$RENAME[tbl_summary$MODULE==l])!="none"){
descriptions = append(descriptions, tbl_summary$RENAME[tbl_summary$MODULE==l][1])
}
else{
descriptions = append(descriptions, tbl_summary$DESCR[tbl_summary$MODULE==l][1])
}
if(sum(supersets==l)>0){
superset_size = append(superset_size, length(unique(tbl_summary$MODULE[tbl_summary$SUPERSET_ID==l])))
}
else{
superset_size = append(superset_size, 1)
}
temp = substring(temp,3)
foo = unlist(strsplit(temp,", "))
foo = unique(foo)
foo = as.integer(foo)
foo = sort(foo)
num_map = append(num_map, length(foo))
temp2 = names(keys[foo]) # keep keys as vector
temp2 = paste0(temp2, collapse = ", ")
temp = paste0(foo, collapse = ", ") # now conjoin keys
key_map = append(key_map, temp)
mapping_names = append(mapping_names, temp2)
}
new_df = data.frame("MODULE"= all_modules,
"DESCR" = descriptions,
"Significant Mapping Description" = mapping_names,
"Significant Mapping" = key_map,
"Number_of_Significant_Mappings" = num_map,
"Number of Modules" = superset_size,
stringsAsFactors = FALSE)
new_df_sorted = new_df[order(-new_df$Number_of_Significant_Mappings),]
unknowns = new_df_sorted[new_df_sorted$DESCR=="Unknown",]
new_df_sorted = new_df_sorted[!(new_df_sorted$DESCR=="Unknown"),]
new_df_sorted = rbind(new_df_sorted, unknowns)
# study = gsub("30renamed_table_summary_","",table_summary)
output_file_name = paste0(results_Dir, "combined.SGMrep_table_", study,".txt")
write.table(new_df_sorted, output_file_name, row.names = FALSE, quote = FALSE, sep="\t")
}
combineSGMrepTable <- function(results1, results2){
first = read.delim(results1, header = TRUE, stringsAsFactors = FALSE)
second = read.delim(results2, header = TRUE, stringsAsFactors = FALSE)
}
#modified merge function that just takes in a list of modules - already filtered for FDR, no ctrl sets, etc.
#include last "/" in output_Dir
merge_modules <- function(name, modules_df, rcutoff, output_Dir, modfile_path, infofile_path){
plan = c()
plan$folder = output_Dir
plan$label = name
plan$modfile= modfile_path
plan$inffile= infofile_path
#=====================================================
pool=c()
aa = modules_df
if (nrow(aa) > 0) pool=unique(c(pool, as.character(aa[,"MODULE"])))
#=====================================================
#=== Merge the modules before 2nd SSEA
if (length(pool)>0){
meg.mods<- tool.read(plan$modfile)
merged.modules <- pool
moddata <- meg.mods[which(!is.na(match(meg.mods[,1], merged.modules))),]
# Merge and trim overlapping modules.
rmax <- rcutoff
moddata$OVERLAP <- moddata$MODULE
moddata <- tool.coalesce(items=moddata$GENE, groups=moddata$MODULE, rcutoff=rmax) #, ncore=500)
moddata$MODULE <- moddata$CLUSTER
moddata$GENE <- moddata$ITEM
moddata$OVERLAP <- moddata$GROUPS
moddata <- moddata[,c("MODULE", "GENE", "OVERLAP")]
moddata <- unique(moddata)
moddatainfo <- tool.read(plan$inffile)
moddatainfo <- moddatainfo[which(!is.na(match(moddatainfo[,1], moddata[,1]))), ]
# Mark modules with overlaps.
for(j in which(moddata$MODULE != moddata$OVERLAP)){
moddatainfo[which(moddatainfo[,"MODULE"] == moddata[j,"MODULE"]), "MODULE"] <- paste(moddata[j,"MODULE"], "..", sep=",")
moddata[j,"MODULE"] <- paste(moddata[j,"MODULE"], "..", sep=",")
}
# Save module info for 2nd SSEA and KDA.
moddata <- unique(moddata)
moddata[, 4] <- moddata[, 2]; names(moddata)[4] <- c("NODE")
mdfile=paste0(name, ".mod.txt"); mifile=paste0(name, ".info.txt")
write.table(moddata, paste0(plan$folder,"merged_", mdfile), #deleted "/" before "merged"
sep='\t', col.names=T, row.names=F, quote=F)
write.table(moddatainfo, paste0(plan$folder,"merged_", mifile),
sep='\t', col.names=T, row.names=F, quote=F)
}
}
#name includes ("supersets.",study,"." and maybe some other thing)
extract_annotation <- function(study, name, results_Dir){
file_name = paste0(results_Dir,"supersets.",study,"/","a.supersets.", study, "_Ontology_", name,".xls")
annot = read.delim(file_name, header = TRUE, stringsAsFactors = FALSE)
size = c()
annotation = c()
if(is.null(annot$ModuleSize[1])){
annotation = append(annotation,"Unknown")
size = append(size, "Error in annotation gave no module size")
}
else if(is.na(annot$ModuleSize[1])){
annotation = append(annotation,"Unknown")
size = append(size, annot$ModuleSize[1])
}
else if(sum(annot$pvalue_corrected<0.05)==0){
annotation = append(annotation,"Unknown")
size = append(size, annot$ModuleSize[1])
}
else{
sig_mods = annot[annot$pvalue_corrected<0.05,]
#max_mod_overlap_annot = annot$Gene.Category[annot$ModuleOverlap==max(annot$ModuleOverlap)] #before wanted to get highest overlap
max_mod_overlap_annot = sig_mods$Gene.Category
if(length(max_mod_overlap_annot)>1){
names = c()
for(k in max_mod_overlap_annot){
names = paste(names, k, sep = ", ") #?
}
names = substring(names, 3)
annotation = append(annotation, names)
}
else{
annotation = append(annotation,max_mod_overlap_annot)
}
size = append(size, annot$ModuleSize[1])
}
superset_table = data.frame("SUPERSET"= annotation,
"MODULE_SIZE" = size, stringsAsFactors = FALSE)
return(superset_table)
#return(annotation)
}
# cohort is so that you only get one set of SGMs
# trim is a vector of strings to trim off files to get the "SGM"
# for example, things to trim include the study or any other common information ex. DIAGRAMstage1_T2D.Adipose_Subcutaneous.50.20.results.txt
# the goal of the trim parameter is to reduce the example to "Adipose_Subcutaneous"
# so your trim vector would be c("DIAGRAMstage1_T2D", ".50.20.results.txt")
# because you just need to get SGMs and can get SGM files using "grep" and this is just an "intersect" or "replication" function, you don't need to put all studies
replication <- function(results_Dir, cohort, study, trim=NULL){
files = list.files(results_Dir)
#get SGMs
subset = files[grep(cohort, files)]
SGMs = subset
for(i in trim){
SGMs = gsub(i, "", SGMs)
}
shared = c()
for(j in SGMs){
results = files[grep(j, files)]
iter = 1
for(k in results){
if(iter==1){
df = read.delim(paste0(results_Dir, k), header = TRUE, stringsAsFactors = FALSE)
shared = df$MODULE
}
if(iter>1){
df = read.delim(paste0(results_Dir, k), header = TRUE, stringsAsFactors = FALSE)
shared_temp = df$MODULE
shared = intersect(shared,shared_temp)
}
iter = iter + 1
}
shared_df = data.frame("MODULE"=shared)
write.table(shared_df, paste0(results_Dir, "shared_", study,".", j, ".txt"), row.names = FALSE, quote = FALSE, sep="\t")
}
}
combine_results <- function(folders, output_Dir){
# just to get traits and mapping
files = list.files(folders[1])
mapping = c()
traits = c()
for(f in files){
mapping = append(mapping, unlist(strsplit(f, split = ".",fixed = TRUE))[2])
traits = append(traits, unlist(strsplit(f, split = ".",fixed = TRUE))[1])
}
mapping = unique(mapping)
traits = unique(traits)
for(t in traits){
for(m in mapping){
combined = data.frame(stringsAsFactors = FALSE)
files = c()
for(f in folders){
file = list.files(f, full.names = TRUE)[grep(t, list.files(f))]
file = file[grep(m, file)]
# should just get one file from each folder
if(length(file)>1){
cat("More than one similar file in ", f, "\n")
}
if(length(file)==0){
next
}
files = append(files, file)
}
if(is.null(files)) next
for(f in files){
df <- read.delim(f, stringsAsFactors = FALSE)
combined = rbind(combined, df)
}
combined = combined[!(combined$MODULE=="_ctrlA" | combined$MODULE=="_ctrlB"),]
# will output the same name
write.table(combined, paste0(output_Dir, tail(unlist(strsplit(files[1], split = "/")),n=1)),
row.names = FALSE, quote = FALSE, sep="\t")
}
}
# results = list.files(results_Dir)
# files = results[grep(study, results)]
# files = files[grep("results", files)]
# coexpr_files = list.files(coexpr_Dir)
# for(i in files){
# file = read.delim(paste0(results_Dir, i), header = TRUE, stringsAsFactors = FALSE)
# file = file[file$FDR<=0.05,]
# if(sum(grepl(i,coexpr_files))>0){
# coexpr = coexpr_files[grep(i, coexpr_files)]
# coexpr_df = read.delim(paste0(coexpr_Dir,coexpr), header = TRUE, stringsAsFactors = FALSE)
# coexpr_df = coexpr_df[coexpr_df$FDR<=0.05,]
# combined = rbind(file, coexpr_df)
# combined = combined[!(combined$MODULE=="_ctrlA" | combined$MODULE=="_ctrlB"),]
# write.table(combined, paste0(output_Dir, i), row.names = FALSE, quote = FALSE, sep="\t")
# next
# }
# file = file[!(file$MODULE=="_ctrlA" | file$MODULE=="_ctrlB"),]
# write.table(file, paste0(output_Dir, i), row.names = FALSE, quote = FALSE, sep="\t")
# }
}
combine_details <- function(canon_Dir, coexp_Dir, output_Dir){
files = list.files(canon_Dir)
for(file in files){
if(sum(list.files(coexp_Dir)==file)==0){
mapping = file.copy(from = paste0(canon_Dir, file), to = paste0(output_Dir,file))
}
else{
canon = read.delim(paste0(canon_Dir, file), stringsAsFactors = FALSE)
coexp = read.delim(paste0(coexp_Dir, file), stringsAsFactors = FALSE)
both = rbind(canon, coexp)
write.table(both, paste0(output_Dir, file), row.names = FALSE, quote = FALSE, sep="\t")
}
}
}
runKDA <- function(nodes, network, trim = NULL){
cat("\nNow analyzing:", gsub(".txt","",nodes), "with", gsub(".txt","",network), "\n")
job.kda <- list()
job.kda$label<-"wKDA"
network_name = gsub(".txt","",unlist(strsplit(network, split = "/"))[length(unlist(strsplit(network, split = "/")))])
nodes_name = nodes
for(i in trim){
nodes_name = gsub(i,"",nodes_name)
}
name = paste(nodes_name, network_name,sep = "_")
job.kda$folder<- name ## parent folder for results
## Input a network
## columns: TAIL HEAD WEIGHT
job.kda$netfile <- network
## Input gene list
## columns: MODULE NODE
job.kda$modfile <- nodes
## "0" means we do not consider edge weights while 1 is opposite.
job.kda$edgefactor <- 0
## The searching depth for the KDA
job.kda$depth <- 1
## 0 means we do not consider the directions of the regulatory interactions
## while 1 is opposite.
job.kda$direction <- 1
job.kda$nperm <- 10000
moddata <- tool.read(job.kda$modfile)
mod.names <- unique(moddata$MODULE)
moddata <- moddata[which(!is.na(match(moddata$MODULE, mod.names))),]
## save this to a temporary file and set its path as new job.kda$modfile:
tool.save(moddata, "subsetof.supersets.txt")
job.kda$modfile <- "subsetof.supersets.txt"
## Run KDA
job.kda <- kda.configure(job.kda)
job.kda <- kda.start(job.kda)
job.kda <- kda.prepare(job.kda)
job.kda <- kda.analyze(job.kda)
job.kda <- kda.finish(job.kda)
job.kda <- kda2cytoscape(job.kda)
}
summarizeKDA <- function(KDA_folder, protein_descrip, name){
results = data.frame(stringsAsFactors = FALSE)
KDs = data.frame(stringsAsFactors = FALSE)
protein_names = read.delim(protein_descrip,
header = TRUE, stringsAsFactors = FALSE, quote = "")
for(l in list.files(KDA_folder)){
if(length(list.files(paste0("./",l, "/")))==1){
cat("No significant key drivers found for", l, "\n")
next
}
else{
key_drivers = read.delim(paste0(KDA_folder,l,"/kda/wKDA.results.txt"), stringsAsFactors = FALSE)
valued_indices = which(!is.na(key_drivers$NODE[1:5]))
edges = read.delim(paste0(KDA_folder,l,"/cytoscape/kda2cytoscape.edges.txt"), stringsAsFactors = FALSE)
edges = edges[!duplicated(edges[,c(1,2)]),] # makes an edge for each module but don't want this..
nodes = read.delim(paste0(KDA_folder,l,"/cytoscape/kda2cytoscape.nodes.txt"), stringsAsFactors = FALSE)
temp = data.frame(stringsAsFactors = FALSE)
# make network summary table
temp = data.frame("NETWORK"=l,
"KDs"=length(key_drivers$NODE),
"KDs_p<0.05"=sum(key_drivers$P<0.05),
"KDs_fdr<0.05"=sum(key_drivers$FDR<0.05),
"topKDs"=concatenate(key_drivers$NODE[valued_indices], mysep = ", "),
"topKDs_fdr"=concatenate(key_drivers$FDR[valued_indices], mysep = ", "),
"n_nodes" = length(nodes$NODE),
"n_edges"=length(edges$TAIL),
"avg_degree"= (length(edges$TAIL)/length(unique(append(edges$TAIL, edges$HEAD)))),
"perc_member"=(sum(grepl("chart", nodes$URL)))/length(nodes$NODE), stringsAsFactors = FALSE)
results = rbind(results, temp)
info = c()
indices = which(!is.na(key_drivers$NODE[key_drivers$FDR<0.05]))
for(m in key_drivers$NODE[indices]){
if(length(protein_names$annotation[protein_names$preferred_name==m])==0){
info = append(info, "Not annotated")
}
else if(length(protein_names$annotation[protein_names$preferred_name==m])>1){
info = append(info,
concatenate(protein_names$annotation[protein_names$preferred_name==m],
mysep = ","))
cat("This protein had more than one annotation:", m, "\n")
}
else{
info = append(info, protein_names$annotation[protein_names$preferred_name==m])
}
}
# make KD summary table
kd_temp = data.frame("NETWORK"=l,
"KDs"= key_drivers$NODE[indices],
"Degrees" = key_drivers$N.neigh[indices],
"P-value" = key_drivers$P[indices],
"FDR" = key_drivers$FDR[indices],
"INFO" = info,
"MEMBER" = key_drivers$MEMBER[indices], stringsAsFactors = FALSE)
KDs = rbind(KDs, kd_temp)
}
}
write.table(results, paste0(name, "_results_summary.txt"),
row.names = FALSE, quote = FALSE, sep="\t")
write.table(KDs, paste0(name, "_KDs_summary.txt"),
row.names = FALSE, quote = FALSE, sep="\t")
}
# this function outputs a new network in the folder with beginning "mod_"
# this function has repetitive code.. will make better later
# when modify nodes file to add GWAS information, have to delete, overwrite, or put in another folder the original nodes file
trim_network <- function(folder,keep_only_orig_input = FALSE, keep_only_GWAS_hits = FALSE){
edges_file = list.files(folder)[grep("edges",list.files(folder))]
edges = read.delim(paste0(folder,edges_file), stringsAsFactors = FALSE)
nodes = read.delim(paste0(folder, list.files(folder)[grep("nodes",list.files(folder))]), stringsAsFactors = FALSE)
prefixes = c("^MMT", "^NM_","^XM_","ENSMUST","^ri", "_at")
# AK genes are long noncoding RNAs
for(pre in prefixes){
edges = edges[!grepl(pre,edges$HEAD),]
}
if(keep_only_orig_input & !keep_only_GWAS_hits){
# keep only KDs and nodes with URLs
# make list of KDs and URL nodes
# keep edges that connect KD to member node
keep_nodes = c()
for(i in nodes$NODE){
if(nodes$SHAPE[nodes$NODE==i]!="Diamond" & nodes$URL[nodes$NODE==i]!=""){
keep_nodes = append(keep_nodes, i)
}
}
total = data.frame(stringsAsFactors = FALSE)
for(r in keep_nodes){
temp = edges[grep(paste0("^",r,"$"), edges$HEAD ),]# KDs in the TAIL column - keep KDs
total = rbind(total, temp)
}
total = total[!duplicated(total),]
total = total[!duplicated(total[,c(1,2)]),]
write.table(total, paste0(folder, "mod_",edges_file), row.names = FALSE, quote = FALSE, sep="\t")
return(total)
}
# should do a version that filters ONLY gwas hits so that genes that are not members can be included
# though this happens rarely
else if(keep_only_orig_input & keep_only_GWAS_hits){ # must have GWAS_hit_meta column, further filtering after filtering by module
# keep only KDs and nodes with URLs
# make list of KDs and URL nodes
# keep edges that connect KD to member node
keep_nodes = c()
for(i in nodes$NODE){
if(nodes$SHAPE[nodes$NODE==i]!="Diamond" & nodes$URL[nodes$NODE==i]!="" & nodes$GWAS_hit_meta[nodes$NODE==i]!="no"){
keep_nodes = append(keep_nodes, i)
}
}
total = data.frame(stringsAsFactors = FALSE)
for(r in keep_nodes){
temp = edges[grep(paste0("^",r,"$"), edges$HEAD ),]# KDs in the TAIL column - keep KDs
total = rbind(total, temp)
}
total = total[!duplicated(total),]
total = total[!duplicated(total[,c(1,2)]),]
write.table(total, paste0(folder, "mod_",edges_file), row.names = FALSE, quote = FALSE, sep="\t")
return(total)
}
else if(!keep_only_orig_input & keep_only_GWAS_hits){
keep_nodes = c()
for(i in nodes$NODE){
if(nodes$SHAPE[nodes$NODE==i]!="Diamond" & nodes$GWAS_hit_meta[nodes$NODE==i]!="no"){
keep_nodes = append(keep_nodes, i)
}
}
total = data.frame(stringsAsFactors = FALSE)
for(r in keep_nodes){
temp = edges[grep(paste0("^",r,"$"), edges$HEAD ),]# KDs in the TAIL column - keep KDs
total = rbind(total, temp)
}
total = total[!duplicated(total),]
total = total[!duplicated(total[,c(1,2)]),]
write.table(total, paste0(folder, "mod_",edges_file), row.names = FALSE, quote = FALSE, sep="\t")
return(total)
}
else{
write.table(edges, paste0(folder, "mod_",edges_file), row.names = FALSE, quote = FALSE, sep="\t")
return(edges)
}
}
# would be better to make reference that says all the genes that could be mapped from the significant values, not just for
# individual mapping methods
# also want to add the info in it was in both AD and T2D
# ^ can do after this
# do same nodes file on AD and T2D, then combine the T2D column result with AD, do if statements to make new column stating
# whether there was any "overlap"
add_GWAS_hit_info_network <- function(nodes, SGM, genes_folder, study){
nodes = read.delim(nodes, stringsAsFactors = FALSE)
files = list.files(genes_folder)
file = files[intersect(grep("noDescrip",files), grep(SGM,files))]
df = read.delim(paste0(genes_folder,file), stringsAsFactors = FALSE)
GWAS_hit = c()
for(i in nodes$NODE){
if(is.character(df$Shared.genes)){
if(sum(unlist(strsplit(df$Shared.genes, split = ", "))==i)>0){
GWAS_hit = append(GWAS_hit, paste0("Replicated_GWAS_",study))
next
}
else if(sum(unlist(strsplit(df[,5], split = ", "))==i)>0){
GWAS_hit = append(GWAS_hit, paste0("GWAS_hit_", gsub("_genes","",colnames(df)[5])))
}
else if(sum(unlist(strsplit(df[,6], split = ", "))==i)>0){
GWAS_hit = append(GWAS_hit, paste0("GWAS_hit_", gsub("_genes","",colnames(df)[6])))
}
else{
GWAS_hit = append(GWAS_hit, "no")
}
}
else{
GWAS_hit = append(GWAS_hit, "no")
}
}
nodes$GWAS_hit = GWAS_hit
return(nodes)
}
extract_annotationv2 <- function(module, set, results_Dir){
file_name = paste0(results_Dir, set, module,".xls")
annot = read.delim(file_name, header = TRUE, stringsAsFactors = FALSE)
size = c()
annotation = c()
if(is.null(annot$ModuleSize[1])){
annotation = append(annotation,"Unknown")
size = append(size, "Error in annotation gave no module size")
}
else if(is.na(annot$ModuleSize[1])){
annotation = append(annotation,"Unknown")
size = append(size, annot$ModuleSize[1])
}
else if(sum(annot$pvalue_corrected<0.05)==0){
annotation = append(annotation,"Unknown")
size = append(size, annot$ModuleSize[1])
}
else{
sig_mods = annot[annot$pvalue_corrected<0.05,]
#max_mod_overlap_annot = annot$Gene.Category[annot$ModuleOverlap==max(annot$ModuleOverlap)] #before wanted to get highest overlap
max_mod_overlap_annot = sig_mods$Gene.Category
if(length(max_mod_overlap_annot)>1){
names = c()
for(k in max_mod_overlap_annot){
names = paste(names, k, sep = ", ") #?
}
names = substring(names, 3)
annotation = append(annotation, names)
}
else{
annotation = append(annotation,max_mod_overlap_annot)
}
size = append(size, annot$ModuleSize[1])
}
superset_table = data.frame("SUPERSET"= annotation,
"MODULE_SIZE" = size, stringsAsFactors = FALSE)
return(superset_table)
#return(annotation)
}
trimSummaryTable <- function(summary_table, study, output_Dir){
new_summary_tbl = data.frame(stringsAsFactors = FALSE)
summary_tbl = read.delim(summary_table, header = TRUE, stringsAsFactors = FALSE)
SGMs = unique(summary_tbl$SGM)
SGMs = SGMs[SGMs!=""]
for(i in SGMs){
all_n_subset = data.frame(stringsAsFactors = FALSE)
SGM_subset = summary_tbl[summary_tbl$SGM==i,]
supers = SGM_subset$SUPERSET_ID[SGM_subset$SUPERSET_ID!="none"]
supersets = unique(supers)
for(j in supersets){
subset = SGM_subset[SGM_subset$SUPERSET_ID==j,]
n_subset = data.frame("SGM"=i,
"MODULE"=concatenate(subset$MODULE, mysep = ", "),
"DESCR"=concatenate(subset$DESCR, mysep = ", "),
"SUPERSET_ID"=subset$SUPERSET_ID[1],
"RENAME"=subset$RENAME[1],
"MODULE_SIZE"=subset$MODULE_SIZE[1],
"OVERLAP_METHOD"=concatenate(subset$OVERLAP_METHOD, mysep = ", "), stringsAsFactors = FALSE)
all_n_subset = rbind(all_n_subset, n_subset)
}
all_n_subset = rbind(all_n_subset, SGM_subset[SGM_subset$SUPERSET_ID=="none",])
new_summary_tbl = rbind(new_summary_tbl, all_n_subset)
new_summary_tbl = rbind(new_summary_tbl,"")
}
write.table(new_summary_tbl, paste0(output_Dir, "trimmed_summary_table_",study,".txt"), row.names = FALSE, quote = FALSE, sep="\t")
}
enrichSupersetTable <- function(name1, sgm_rep_table1, name2, sgm_rep_table2, superset_info, output_Dir){
stbl_1 = read.delim(sgm_rep_table1, header = TRUE, stringsAsFactors = FALSE)
stbl_2 = read.delim(sgm_rep_table2, header = TRUE, stringsAsFactors = FALSE)
supersets = read.delim(superset_info, header = TRUE, stringsAsFactors = FALSE)
modules = supersets$MODULE
represent = c()
# mapping1 = c()
# mapping2 = c()
for(i in modules){
if(sum(stbl_1$MODULE==i)>0 & sum(stbl_2$MODULE==i)>0){
represent = append(represent, paste(name1, name2, sep = ", "))
# mapping1 = append(mapping1, stbl_1$Significant.Mapping[stbl_1$MODULE==i])
# mapping2 = append(mapping2, stbl_2$Significant.Mapping[stbl_2$MODULE==i])
}
else if(sum(stbl_1$MODULE==i)>0){
represent = append(represent, name1)
# mapping1 = append(mapping1, stbl_1$Significant.Mapping[stbl_1$MODULE==i])
# mapping2 = append(mapping2, "")
}
else if(sum(stbl_2$MODULE==i)>0){
represent = append(represent, name2)
# mapping1 = append(mapping1, "")
# mapping2 = append(mapping2, stbl_2$Significant.Mapping[stbl_2$MODULE==i])
}
else{
represent = append(represent, "")
# mapping1 = append(mapping1, "")
# mapping2 = append(mapping2, "")
}
}
supersets$Representation = represent
column1 = paste0(name1,"_Mapping")
column2 = paste0(name2,"_Mapping")
# supersets[,column1]= mapping1
# supersets[,column2] = mapping2
# study1 = supersets[supersets$Representation==name1,]
# study2 = supersets[supersets$Representation==name2,]
# both_studies = supersets[supersets$Representation==paste(name1,name2,sep = ", "),]
# supersets_reorder = rbind(both_studies, study1)
# supersets_reorder = rbind(supersets_reorder, study2)
# supersets_reorder = supersets_reorder[,c(1,6,2,3,4,5,7,8)]
supersets = supersets[,c(1,2,9,3,4,5,6,7,8)]
write.table(supersets, paste0(output_Dir, "detailed_superset_info_rep.txt"),
row.names = FALSE, quote = FALSE, sep="\t")
# write.table(supersets_reorder, paste0(output_Dir, "superset_info_rep_reordered.txt"),
# row.names = FALSE, quote = FALSE, sep="\t")
}
# get numbers
# 2 studies
# results_Dir is preprocess_quasi_overlap
quantifyOverlap <- function(results_Dir, studies, quasi_Dir){
combined_files = list.files(results_Dir)[grep("combined_", list.files(results_Dir))]
study1_num = c()
study2_num = c()
shared = c() # number
shared_modules = c() # actual modules - at the end, unique to get number of unique shared modules
all_study1_modules = c() # unique at end - overall across all mappings
all_study2_modules = c()
# at end after "uniquing", get num of shared modules, number of duplicated
for(file in combined_files){
mapping = read.delim(paste0(results_Dir,file), stringsAsFactors = FALSE)
study1_num = append(study1_num, length(mapping$STUDY[mapping$STUDY==studies[1]]))
study2_num = append(study2_num, length(mapping$STUDY[mapping$STUDY==studies[2]]))
shared = append(shared, sum(duplicated(mapping$MODULE)))
shared_modules = append(shared_modules,mapping$MODULE[duplicated(mapping$MODULE)])
all_study1_modules = append(all_study1_modules, mapping$MODULE[mapping$STUDY==studies[1]])
all_study2_modules = append(all_study2_modules, mapping$MODULE[mapping$STUDY==studies[2]])
}
quasi_overlapped = c()
mappings = gsub("combined_","",gsub(".txt","",combined_files))
shared_files = list.files(quasi_Dir)[grep("shared_", list.files(quasi_Dir))]
for(map in mappings){
if(sum(grepl(map,shared_files))==0)
quasi_overlapped = append(quasi_overlapped, 0)
else{
file = shared_files[grep(map, shared_files)]
shared_file = read.delim(paste0(quasi_Dir, file), stringsAsFactors = FALSE)
quasi_overlapped = append(quasi_overlapped, length(shared_file$MODULE))
}
}
superset_overlap = c()
merged_files = list.files(quasi_Dir)[grep("merged_", list.files(quasi_Dir))]
merged_info = merged_files[grep("info.txt", merged_files)]
for(map in mappings){
if(sum(grepl(map, merged_info))==0)
superset_overlap = c(superset_overlap, 0)
else{
file = merged_info[grep(map, merged_info)]
merged_file = read.delim(paste0(quasi_Dir,file), stringsAsFactors = FALSE)
superset_overlap = append(superset_overlap, length(merged_file$MODULE))
}
}
quantified = data.frame("Mapping"= mappings,stringsAsFactors = FALSE)
quantified[,studies[1]] = study1_num
quantified[,studies[2]] = study2_num
quantified$`Number of Shared Modules` = shared
quantified$`Number of Shared Modules with Quasi Overlap` = quasi_overlapped
quantified$`Number of Shared Supersets` = superset_overlap
# address no merged modules because there was only 1 module
quantified$`Number of Shared Supersets`[quantified$`Number of Shared Modules with Quasi Overlap`==1] <- 1
return(quantified)
}
# study1num = length(unique(all_study1_modules))
# study2num = length(unique(all_study2_modules))
# all = append(unique(all_study1_modules), unique(all_study2_modules))
# shared_overall = sum(duplicated(all))
# shared_byMapping = length(unique(shared_modules))
# take shared modules and get genes from each study. only direct. find consistent modules and unique ones for each different mappings.
# method to get SGM is case specific. second to last thing separated by "." in the file
# value_cutoff is association value - only show genes from SNPs that have values greater than the cutoff (lower p-value)
# modify function such that it looks in the kbr.mod.txt file and checks if the shared genes are part of the pathways because in the details file,
# it will output all the genes that are mapped to by that SNP but are not necessarily part of the pathway (but at least one will be)
# ^^ changed this so it doesnt do that in this output!
# ^^ make sure genes are actually in module - this is what parameter modfile is for
# there is probably a better, modular way I could do this that can do more than 2 studies.
createGenesFiles <- function(results_Dir, genes_Dir, output_Dir, studies, protein_descrip, value_cutoff, modfile){
modfile <- read.delim(modfile, stringsAsFactors = FALSE)
shared_files = list.files(results_Dir)[grep("shared_", list.files(results_Dir))]
all = data.frame(stringsAsFactors = FALSE)
gene_files = c()
for(s in studies){
gene_files = append(gene_files, list.files(genes_Dir)[grep(s, list.files(genes_Dir))])
}
for(file in shared_files){
# direct overlap
mapping = read.delim(paste0(results_Dir,file), stringsAsFactors = FALSE)
SGM = unlist(strsplit(file, split = ".", fixed = TRUE))[2]
cat(SGM,"\n")
sig_modules = mapping$MODULE
genes_s1 = c()
genes_s2 = c()
descrip_s1 = c()
descrip_s2 = c()
shared_genes = c()
shared_descrip = c()
all_long = data.frame(stringsAsFactors = FALSE)
for(mod in sig_modules){
cat(mod,"\n")
#gene_list1 = c() # to get shared genes
#gene_list2 =c() # to get shared genes
details = gene_files[grep(SGM, gene_files)]
details_s1 = read.delim(paste0(genes_Dir,details[grep(studies[1], details)]), stringsAsFactors = FALSE)
# this function will modify genes_s1 and descrip_s1 and return the genes
gene_set1 <- retrieveGenes(genes_df = details_s1, genes = genes_s1, descrip = descrip_s1, mod = mod, modfile = modfile, value_cutoff = value_cutoff)
genes_s1 = append(genes_s1, gene_set1[["Concatenated genes"]])
descrip_s1 = append(descrip_s1, gene_set1[["Concatenated descrip"]])
# get genes in list
# if(sum(details_s1$MODULE==mod)==0){
# genes_s1 = append(genes_s1, "none")
# descrip_s1 = append(descrip_s1, "none")
# }
# else{
# if(length(details_s1$GENE[details_s1$MODULE==mod & details_s1$VALUE>value_cutoff])==0){
# genes_s1 = append(genes_s1, "Genes not meeting association value threshold: ")
# descrip_s1 = append(descrip_s1, "Genes not meeting association value threshold: ")
# }
# for(cell in details_s1$GENE[details_s1$MODULE==mod & details_s1$VALUE>value_cutoff]){
# if(grepl(",",cell)){
# gene_list1 = append(gene_list1, unlist(strsplit(cell,split = ",")))
# }
# else{
# gene_list1 = append(gene_list1, cell)
# }
# }
# genes_s1 = append(genes_s1, concatenate(gene_list1, mysep = ", "))
# descrip_s1 = append(descrip_s1, concatenate(annotate_gene(gene_list1), mysep = ", "))
# }
details_s2 = read.delim(paste0(genes_Dir,details[grep(studies[2], details)]), stringsAsFactors = FALSE)
gene_set2 <- retrieveGenes(genes_df = details_s2, genes = genes_s2, descrip = descrip_s2, mod = mod, modfile = modfile, value_cutoff = value_cutoff)
genes_s2 = append(genes_s2, gene_set2[["Concatenated genes"]])
descrip_s2 = append(descrip_s2, gene_set2[["Concatenated descrip"]])
# if(sum(details_s2$MODULE==mod)==0){
# genes_s2 = append(genes_s2, "none")
# descrip_s2 = append(descrip_s2, "none")
# }
# else{
# for(cell in details_s2$GENE[details_s2$MODULE==mod & details_s1$VALUE>value_cutoff]){
# if(grepl(",",cell)){
# gene_list2 = append(gene_list2, unlist(strsplit(cell,split = ",")))
# }
# else{
# gene_list2 = append(gene_list2, cell)
# }
# }
# genes_s2 = append(genes_s2, concatenate(gene_list2, mysep = ", "))
# descrip_s2 = append(descrip_s2, concatenate(annotate_gene(gene_list2), mysep = ", "))
# }
shared_genes = append(shared_genes, concatenate(intersect(gene_set1[["Gene list"]], gene_set2[["Gene list"]]), mysep = ", "))
shared_descrip = append(shared_descrip, concatenate(annotate_gene(intersect(gene_set1[["Gene list"]], gene_set2[["Gene list"]]))))
shared = intersect(gene_set1[["Gene list"]], gene_set2[["Gene list"]])
# for specifically gene2loci T2D, it is duplicating specificaly LDLC_Control 4 times??? (5 sets of the same shared_genes)
# tried to debug, could not figure out why
if(length(intersect(gene_set1[["Gene list"]], gene_set2[["Gene list"]]))>0){
genes_df_long = data.frame("Mapping"=SGM,
"MODULE" = mod,
"Shared genes" = shared,
"Gene Descriptions" = annotate_gene(shared), stringsAsFactors = FALSE)
cat(" ", length(shared), "\n")
}
all_long = rbind(all_long, genes_df_long[!duplicated(genes_df_long),]) #???? how are things getting duplicated?
all_long = all_long[!duplicated(all_long),]
}
#ugh = all_long[duplicated(all_long),]
write.table(all_long, paste0(output_Dir,"long_",SGM,"_overlap_genes.txt"), row.names = FALSE, quote = FALSE, sep = "\t")
genes_df = data.frame("Mapping" = SGM,
"MODULE" = mapping$MODULE,
"DESCR"=mapping$DESCR,
stringsAsFactors = FALSE)
genes_df$`Overlap Method` = mapping$METHOD
genes_df[,paste0(studies[1],"_genes")] = genes_s1
genes_df[,paste0(studies[2],"_genes")] = genes_s2
genes_df[,paste0(studies[1],"_genes_descrip")] = descrip_s1
genes_df[,paste0(studies[2],"_genes_descrip")] = descrip_s2
genes_df$`Shared genes` = shared_genes
genes_df$`Shared genes description` = shared_descrip
#**problem: overflow of cell so text file can't handle it, results in losing some columns (?)
# do a version with no description
write.table(genes_df[,c("Mapping","MODULE","DESCR","Overlap Method",paste0(studies[1],"_genes"), paste0(studies[2],"_genes"), "Shared genes")],
paste0(output_Dir,"noDescrip_",SGM,"_overlap_genes.txt"), row.names = FALSE, quote = FALSE, sep = "\t")
#write.table(genes_df, paste0(output_Dir,SGM,"_overlap_genes.txt"), row.names = FALSE, quote = FALSE, sep = "\t")
#write.csv(genes_df, paste0(output_Dir,SGM,"_overlap_genes.txt"), row.names = FALSE, quote = FALSE, )
all = rbind(all, genes_df)
}
write.table(all, paste0(output_Dir,"All_mapping_Genes.txt"), row.names = FALSE, quote = FALSE, sep = "\t")
}
# this function modifies genes_s1!
retrieveGenes <- function(genes_df, genes, descrip, mod, modfile, value_cutoff){
gene_list1 = c() # will modularize this later....
set = list()
if(sum(genes_df$MODULE==mod)==0){
genes = "none"
descrip = "none"
set[["Gene list"]] = ""
}
else{
if(length(genes_df$GENE[genes_df$MODULE==mod & genes_df$VALUE>value_cutoff])==0){
genes = "Genes not meeting association value threshold: "
descrip = "Genes not meeting association value threshold: "
set[["Gene list"]] = ""
}
else{
for(cell in genes_df$GENE[genes_df$MODULE==mod & genes_df$VALUE>value_cutoff]){
if(grepl(",",cell)){
gene_list1 = append(gene_list1, unlist(strsplit(cell,split = ",")))
}
else{
gene_list1 = append(gene_list1, cell)
}
keep_indices = which(!is.na(match(gene_list1, modfile$GENE[modfile$MODULE==mod])))
gene_list1 = gene_list1[keep_indices]
}
genes = concatenate(gene_list1, mysep = ", ")
descrip = concatenate(annotate_gene(gene_list1), mysep = ", ")
set[["Gene list"]] = gene_list1
}
}
set[["Concatenated genes"]] = genes
set[["Concatenated descrip"]] = descrip
return(set)
}
# must have protein_names data frame
# changed vector$GENE to vector
annotate_gene <- function(vector){
info = c()
for(m in vector){
if(length(protein_names$annotation[protein_names$preferred_name==m])==0){
if(length(protein_names$annotation[protein_names$preferred_name==gsub("[[:digit:]]","",m)])==0){
info = append(info, "Not annotated")
}
else{
info = append(info, paste0("Possible annotation: ",
concatenate(protein_names$annotation[protein_names$preferred_name==gsub("[[:digit:]]","",m)],
mysep = ",")))
}
}
else if(length(protein_names$annotation[protein_names$preferred_name==m])>1){
info = append(info,
concatenate(protein_names$annotation[protein_names$preferred_name==m],
mysep = ","))
cat("This protein had more than one annotation:", m, "\n")
}
else{
info = append(info, protein_names$annotation[protein_names$preferred_name==m])
}
}
return(info)
}
replicate <- function(results_Dir, FDR_trim, FDR_cutoff, info_file){
files = list.files(results_Dir)
mapping = c()
traits = c()
for(f in files){
mapping = append(mapping, unlist(strsplit(f, split = ".",fixed = TRUE))[2])
traits = append(traits, unlist(strsplit(f, split = ".",fixed = TRUE))[1])
}
mapping = unique(mapping)
traits = unique(traits)
total = data.frame(stringsAsFactors = FALSE)
for(map in mapping){
mapping_files = files[grep(map,files)]
mapping_total = data.frame()
for(file in mapping_files){
df = read.delim(paste0(results_Dir,file), stringsAsFactors = FALSE)
df = df[df$FDR<FDR_trim,]
df = df[!grepl("_ctrlA",df$MODULE),]
df = df[!grepl("_ctrlB",df$MODULE),]
if(length(df$MODULE)==0 | is.na(df$FDR[1])){
next
}
df$Trait = unlist(strsplit(file, split = ".", fixed = TRUE))[1]
df$Mapping = unlist(strsplit(file, split = ".", fixed = TRUE))[2]
mapping_total = rbind(mapping_total, df)
}
if(dim(mapping_total)[1]==0){
next
}
mapping_ls = list()
for(trait in 1:length(traits)){
mapping_ls[[traits[trait]]] = c()
mapping_ls[[paste0(traits[trait],"_FDR")]] = c()
for(module in unique(mapping_total$MODULE)){
if(sum(mapping_total$Trait[mapping_total$MODULE==module]==traits[trait])==0){
mapping_ls[[traits[trait]]] = append(mapping_ls[[traits[trait]]], "NO")
}
else if(mapping_total$FDR[mapping_total$MODULE==module & mapping_total$Trait==traits[trait]]>FDR_cutoff){
mapping_ls[[traits[trait]]] = append(mapping_ls[[traits[trait]]], "NO")
}
else{
mapping_ls[[traits[trait]]] = append(mapping_ls[[traits[trait]]], "YES")
}
if(sum(mapping_total$Trait[mapping_total$MODULE==module]==traits[trait])>0){
mapping_ls[[paste0(traits[trait],"_FDR")]] = append(mapping_ls[[paste0(traits[trait],"_FDR")]],
mapping_total$FDR[mapping_total$MODULE==module & mapping_total$Trait==traits[trait]])
}
else{
mapping_ls[[paste0(traits[trait],"_FDR")]] = append(mapping_ls[[paste0(traits[trait],"_FDR")]], "NS")
}
}
}
mapping_df = data.frame("Mapping" = map,
"MODULE"=unique(mapping_total$MODULE))
for(item in 1:length(mapping_ls)){
#mapping_df = cbind(mapping_df, mapping_ls[[item]])
mapping_df[,names(mapping_ls)[item]] = mapping_ls[[item]]
}
total = rbind(total, mapping_df)
}
# sum "YES"s in a row
rownames(total) <- seq(length=nrow(total))
YESs = c()
for(i in 1:nrow(total)){
YESs[i] = sum(total[i,]=="YES")
}
total["n_Overlap"] = YESs
total = total[!(total$n_Overlap<2),] # get rid of no overlaps
total = total[order(total$n_Overlap, decreasing = TRUE),]
total = addDESCR(df = total, position_to_add = 3, info_file)
return(total)
}
# add DESCR column to df at a position position_to_add
# assumes df has MODULE column
addDESCR <- function(df, position_to_add, info_file){
info <- read.delim(info_file, stringsAsFactors = FALSE)
descr = c()
for(i in df$MODULE){
descr = append(descr, info$DESCR[info$MODULE==i])
}
df$DESCR = descr
col_names = colnames(df)
post_total = length(colnames(df))+1
before = 1:(position_to_add-1)
after = (position_to_add):(post_total-1)
# indices = c(before, post_total, after)
df = df[,c(col_names[before], "DESCR",col_names[after])]
return(df)
}
subset_replicate <- function(df, studies){
result = data.frame(stringsAsFactors = FALSE)
for(i in studies){
temp = df[df[,i]=="YES",]
result = rbind(result, temp)
}
}
makeDf <- function(Dir){
files = list.files(Dir)[grep("shared_", list.files(Dir))]
all = data.frame(stringsAsFactors = FALSE)
for(foo in files){
SGM = unlist(strsplit(foo, split = ".", fixed = TRUE))[2]
mods = read.delim(paste0(Dir,foo), stringsAsFactors = FALSE)
mods$Mapping = SGM
mods = mods[,c(4,1,3,2)]
all = rbind(all, mods)
}
return(all)
}
describePathways <- function(modules_list, df, DESCR){
mappings = c()
num = c()
for(mod in modules_list){
num = append(num, length(df$Mapping[df$MODULE==mod]))
mappings = append(mappings, concatenate(df$Mapping[df$MODULE==mod], mysep = ", "))
}
describe = c()
for(mod in modules_list){
describe = append(describe, all_DESCR$DESCR[all_DESCR$MODULE==mod])
if(length(all_DESCR$DESCR[all_DESCR$MODULE==mod])==0)
cat(mod)
}
new_df = data.frame("MODULE"=modules_list,
"DESCR"=describe,
"Mapping" = mappings,
"nMapping" = num, stringsAsFactors = FALSE)
new_df = new_df[order(new_df$nMapping, decreasing = TRUE),]
return(new_df)
}
# find consistent modules and unique ones for each different mappings. Hm, I already did this.
# Purpose:
# 1) consolidate results from all mappings
# 2) take genes result file and append to each module in a long format. (each gene separated by comma)
# 3) add gene information
# nerve = read.delim("/Users/jessicading/Desktop/Yang_Lab/T2D_AD/Data/AD/quasi_overlap/supersets.AD_quasi.Nerve_Tibial.txt", header = TRUE, stringsAsFactors = FALSE)
# nerve = nerve[!(nerve$MODULE=="Nerve_Tibial.GTEXv7.MEGENA_126,.."),] #this module was not able to be annotated!!!
# write.table(nerve, "/Users/jessicading/Desktop/Yang_Lab/T2D_AD/Data/AD/quasi_overlap/supersets.AD_quasi.Nerve_Tibial1.txt", row.names = FALSE, quote = FALSE, sep = "\t")
#"Nerve_Tibial.GTEXv7.MEGENA_126,.." THIS MODULE WAS NOT ABLE TO BE ANNOTATED IN DIRECT OVERLAP EITHER
#if you get the error: "Error in cbind(rep(modulesize, dim(fm)[1]), fm) : object 'fm' not found" - FIND OFFENDING MODULE AND TAKE IT OUT
#WORKFLOW FOR GENE OVERLAP (QUASI)
#1. have files for both studies (ex. DIAGRAM and UKB) - combine canonical and coexpression
# for AD, I put them in "canonical_coexpr". for T2D, I put them in "combined" (FOLDERS)
#2. use geneOverlap function for to produce "shared_" files. all other files are used in this function to produce the "shared_" files in the end and have useful information
#3. transfer "shared_" files to new folder. I called this "quasi_overlap"
#4. merge modules in "shared_" files
#5. annotatkee supersets resulting from step 4 using annotate_supersets function
#6. do summaryTable function to get summary table!
# setwd("/Users/jessicading/Desktop/Yang_Lab/T2D_AD/Data/AD_T2D/shared_quasi/")
# for(i in files){
# file = read.delim(i, header = TRUE, stringsAsFactors = FALSE)
# write.table(file, paste0("AD.",i), row.names = FALSE, quote = FALSE, sep = "\t")
# }
#
# files = list.files("/Users/jessicading/Desktop/Yang_Lab/T2D_AD/Data/AD_T2D/shared_quasi/")
# subset = files[grep("AD",files)]
# SGMs = gsub("AD.shared_", "", gsub(".50.20.txt", "", subset))
#
#took out Esophagus_Muscularis.GTEXv7.MEGENA_168,..
|
292aa87d337f0be003f50dbfccc9aadd0680f024
|
7cf5aa38dd8dbd38de1b4b4823de713bfa673a1e
|
/aa-second-derivative/main.R
|
88808c6dc18a21a64d52fec7f4d2bbd514531a8f
|
[] |
no_license
|
bendotli/rpensemble
|
622a2acc6b762da907c211f1f79fc17523b0df14
|
9dadcc0624e4b78bab5f04aa1e5f2e1e0ec95870
|
refs/heads/master
| 2021-01-21T21:40:16.035204
| 2016-03-21T23:29:48
| 2016-03-21T23:29:48
| 41,241,763
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,766
|
r
|
main.R
|
# TODO: Add comment
#
# Author: Ben
###############################################################################
source("rpecompare/builtin_models.R")
source("rpecompare/basicmodel.R")
# Build a histogram of G_{n,1}(p)=P(P(Chat_n(AX)=1|X)<=p)
# Sample X, P(Chat_n(AX)=1|X) (by sampling)
sample.g = function() {
data = model.1()#basic.model(p = 5, d = 2)
train = data$train
test = data$test
n_train = length(train$y)
n_test = length(test$y)
p = ncol(train)-1
d = 5 ###############################
# Run RPE LDA with Haar measure-drawn/axis-aligned random projections
rpelda.out = RPParallel(XTrain = data.matrix(train[-(p+1)]), YTrain = train$y,
XTest = data.matrix(test[-(p+1)]), d = d, B1 = 100, B2 = 100,
#projmethod = "axis",
cores = 1)
# Estimate G_{n,i}
y.pred = rpelda.out[1:n_train, ]
G.n.1.data = 2 - rowMeans(y.pred[train$y == "class.1", ], na.rm = TRUE)
G.n.2.data = 2 - rowMeans(y.pred[train$y == "class.2", ], na.rm = TRUE)
# Return them as data, not as ecdfs
return(list(G.n.1.data, G.n.2.data))
}
replicate.sample.g = function(r = 10) {
library(parallel)
cl = makeCluster(detectCores() - 1)
clusterExport(cl, c("sample.g"))
clusterEvalQ(cl, source("rpecompare/builtin_models.R"))
clusterEvalQ(cl, source("rpecompare/basicmodel.R"))
out = parSapply(cl, 1:r, function(i, ...) { return(sample.g()) } )
stopCluster(cl)
return(out)
}
#out = replicate(10, sample.g())
out = replicate.sample.g()
G.n.1 = ecdf(do.call(c, out[1,]))
G.n.2 = ecdf(do.call(c, out[2,]))
#png("G_ni_mini.png")
#plot(G.n.1, col="red", xval=(1:100)/100)
#lines(G.n.2, col="green", xval=(1:100)/100)
#dev.off()
R1 = do.call(c, out[1,])
png("r1_dist.png")
hist(R1)
dev.off()
|
575aa4912e8e7f5176cd6d308b2f188ef818d7af
|
bad08314942d890670cb8186827e93387f8242cb
|
/man/pairwiselogFC.Rd
|
06f374508db4712ad849caa6720295b2db8f2c05
|
[] |
no_license
|
stamats/MKmisc
|
faaa5a4bc04d015143fcd2d468bc11aa12ef5633
|
e738e1f1b18899af42c1149335c6ee063e9de80c
|
refs/heads/master
| 2022-11-25T06:06:56.692986
| 2022-11-19T15:35:13
| 2022-11-19T15:35:13
| 33,780,395
| 10
| 2
| null | 2015-06-29T18:02:53
| 2015-04-11T15:13:48
|
R
|
UTF-8
|
R
| false
| false
| 1,493
|
rd
|
pairwiselogFC.Rd
|
\name{pairwise.logfc}
\alias{pairwise.logfc}
\title{ Compute pairwise log-fold changes }
\description{
The function computes pairwise log-fold changes.
}
\usage{
pairwise.logfc(x, g, ave = mean, log = TRUE, base = 2, ...)
}
\arguments{
\item{x}{ numeric vector. }
\item{g}{ grouping vector or factor }
\item{ave}{ function to compute the group averages. }
\item{log}{ logical. Is the data logarithmic? }
\item{base}{ If \code{log = TRUE}, the base which was used to compute
the logarithms. }
\item{\dots}{ optional arguments to \code{ave}. }
}
\details{
The function computes pairwise log-fold changes between groups, where
the group values are aggregated using the function which is
given by the argument \code{ave}.
The implementation is in certain aspects analogously to \code{\link[stats]{pairwise.t.test}}.
}
\value{
Vector with pairwise log-fold changes.
}
%\references{ ~put references to the literature/web site here ~ }
\author{ Matthias Kohl \email{Matthias.Kohl@stamats.de}}
%\note{}
\seealso{ \code{\link[stats]{pairwise.t.test}} }
\examples{
set.seed(13)
x <- rnorm(100) ## assumed as log2-data
g <- factor(sample(1:4, 100, replace = TRUE))
levels(g) <- c("a", "b", "c", "d")
pairwise.logfc(x, g)
## some small checks
res <- by(x, list(g), mean)
res[[1]] - res[[2]] # a vs. b
res[[1]] - res[[3]] # a vs. c
res[[1]] - res[[4]] # a vs. d
res[[2]] - res[[3]] # b vs. c
res[[2]] - res[[4]] # b vs. d
res[[3]] - res[[4]] # c vs. d
}
\keyword{univar}
|
d0dcf6a6144c233ef63e0056e56fd7b80a85d043
|
bd4c0e98e2ca54a36f5b6423326e8915cc471ec0
|
/tests/testthat/node_modules/r-dummy.csv/main.r
|
77ee3bab6ddc58d1e424960a8f9ca9363fbd1216
|
[
"BSD-2-Clause"
] |
permissive
|
tlevine/nprm
|
e30c9073fe10fce4874b8dd69e26b43a398cb697
|
21385253c2c1dc8ea07519437a045ae0553a9bf8
|
refs/heads/master
| 2021-01-25T07:08:06.399723
| 2013-12-12T20:50:09
| 2013-12-12T20:50:09
| 14,666,018
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33
|
r
|
main.r
|
exports <- read.csv('dummy.csv')
|
a11064ab7acdbd9c1771bab24ea1723b273174cf
|
f5237133433557b81638bcffe7c7b264ece4faec
|
/STRINGdb-HGNCsymbols.R
|
a83312402401d3f68d52733ef9d8e85683932a12
|
[
"MIT"
] |
permissive
|
ameya225/useful_functions
|
04277b6eb50a0e73e83ebcb0541a2a105a76a62f
|
f5b2712d7d22c32d128cfe18add189baa02b16e4
|
refs/heads/master
| 2021-05-14T08:53:09.205237
| 2018-08-07T05:33:18
| 2018-08-07T05:33:18
| 116,312,512
| 0
| 0
| null | 2018-02-14T00:52:26
| 2018-01-04T22:09:13
|
R
|
UTF-8
|
R
| false
| false
| 1,891
|
r
|
STRINGdb-HGNCsymbols.R
|
# Code by Ameya Kulkarni
# E-mail at echo blvmlbso@nbjm.fjotufjo.zv.fev | tr '[b-{' '[a-z]'
# This code can be used to convert Stringdb Ensembl protein ids to HGNC symbols using biomaRt
# The example code is shown for Human protein interaction data from experimental evidence only
# Human Protein Interaction data file can be downloaded from https://stringdb-static.org/download/protein.links.full.v10.5.txt.gz
# Protein pairs with experimental evidence of interaction can be extracted using
# zgrep ^"9606\." protein.links.full.txt.gz | awk '($10 != 0) { print $1, $2, $10 }' > direct_experimental_data_human.txt
library(biomaRt)
library(dplyr)
ppi <- read.table("path-to-direct_experimental_data_human.txt") # Modify using path to direct_experimental_data_human.txt
ppi <- as.data.frame(sapply(ppi,gsub,pattern="9606.",replacement=""))
ppi <- ppi[,-3]
mart <- useDataset("hsapiens_gene_ensembl", useMart("ensembl"))
P1 <- as.data.frame(ppi$V1)
P2 <- as.data.frame(ppi$V2)
P1_uniq <- unique(P1)
G1_uniq <- getBM(filters= "ensembl_peptide_id", attributes= c("ensembl_peptide_id","hgnc_symbol"),values=P1_uniq,mart= mart)
P2_uniq <- unique(P2)
G2_uniq <- getBM(filters= "ensembl_peptide_id", attributes= c("ensembl_peptide_id","hgnc_symbol"),values=P2_uniq,mart= mart)
colnames(P1) <- "ensembl_peptide_id"
colnames(P2) <- "ensembl_peptide_id"
join1 <- dplyr::left_join(as_tibble(P1)%>%mutate(ensembl_peptide_id=as.character(ensembl_peptide_id)), as_tibble(G1_uniq)%>%mutate(ensembl_peptide_id=as.character(ensembl_peptide_id)), by="ensembl_peptide_id")
join2 <- dplyr::left_join(as_tibble(P2)%>%mutate(ensembl_peptide_id=as.character(ensembl_peptide_id)), as_tibble(G2_uniq)%>%mutate(ensembl_peptide_id=as.character(ensembl_peptide_id)), by="ensembl_peptide_id")
ppi_join <- as.data.frame(cbind(join1$hgnc_symbol, join2$hgnc_symbol))
ppi_join <- ppi_join[complete.cases(ppi_join),]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.