blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1f70b98ac551dfc72af5bff1e81fa7686fdce681
|
17e0b4e4c0fddaa71ce2b137b7f59d17fa47243b
|
/res/ld_st_indirect_y.r
|
5512cf6ef41e0b2c6d979ba0e7037b9e00e7c506
|
[
"MIT"
] |
permissive
|
JSpuri/EmuParadise
|
6f6d26c43d9dce8f05448b6c07db133d691e39b2
|
b8f6cf8823f8553f28dab5c6b44df20978ad6ba0
|
refs/heads/master
| 2020-06-28T18:33:56.341244
| 2019-11-22T22:49:53
| 2019-11-22T22:49:53
| 200,309,043
| 0
| 0
|
MIT
| 2019-09-27T15:59:31
| 2019-08-02T23:26:20
|
C
|
UTF-8
|
R
| false
| false
| 1,534
|
r
|
ld_st_indirect_y.r
|
| pc = 0xc002 | a = 0xab | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc004 | a = 0xab | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x00ff] = 0xab |
| pc = 0xc006 | a = 0x07 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc008 | a = 0x07 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0000] = 0x07 |
| pc = 0xc00a | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc00c | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc00e | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc010 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x07ab] = 0x01 |
| pc = 0xc012 | a = 0x01 | x = 0x00 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc014 | a = 0xf2 | x = 0x00 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc016 | a = 0xf2 | x = 0x00 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x07ac] = 0xf2 |
| pc = 0xc018 | a = 0x00 | x = 0x00 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc01a | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc01c | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x07ab] = 0x01 |
| pc = 0xc01e | a = 0x01 | x = 0x00 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc020 | a = 0xf2 | x = 0x00 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x07ac] = 0xf2 |
|
885f5c5b7232cb1f57a18de38570f57b939d3e0d
|
3c639ff3361293d0c1fb2cbec04680da9a271dbc
|
/Scripts/Gut Check.R
|
70abcce86598c6241c93210226b41ac23d307c76
|
[] |
no_license
|
9Olive/FACES
|
8d2093e3b7fbf4c2a1c68e17532f810413031320
|
24f986d29ae47e423a8f6470f3c0a99f976f9e4a
|
refs/heads/master
| 2022-11-25T08:40:38.858945
| 2020-07-28T01:36:14
| 2020-07-28T01:36:14
| 274,549,081
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,201
|
r
|
Gut Check.R
|
faces_avg <- faces %>%
group_by(Group, `Participant #`, Time, Survey) %>%
summarise(avg_resp = mean(Response)) %>%
ungroup()
faces_sum <- faces %>%
group_by(Group, `Participant #`, Time, Survey) %>%
summarise(avg_resp = sum(Response)) %>%
ungroup()
#Assuming the faces data is cleaned correctly. can be verified elswhere.
#starting w/ base faces data
prePostExp <- faces_sum %>%
filter(Group == 'Experimental') %>%
mutate(Survey = factor(Survey))
surveys <- unique(prePostExp$Survey)
pValppExp <- c()
for (i in 1:6) {
pValppExp[i] <- wilcox.test(avg_resp ~ Time,
paired = TRUE,
alternative="less",
data = filter(prePostExp, Survey == surveys[i]))[3]
}
pValppExp <- t(pValppExp)
colnames(pValppExp) <- surveys
pValppExp
prePostCon <- faces_sum %>%
filter(Group == 'Control') %>%
mutate(Survey = factor(Survey))
pValppCon <- c()
for (i in 1:6) {
pValppCon[i] <- wilcox.test(avg_resp ~ Time,
paired = TRUE,
alternative="less",
data = filter(prePostCon, Survey == surveys[i]))[3]
}
pValppCon <- t(pValppCon)
colnames(pValppCon) <- surveys
pValppCon
#-------------------------------
tcPre <- faces_sum %>%
filter(Time == 'Pre') %>%
mutate(Survey = factor(Survey), factor(Group))
pValTCPre <- c()
for (i in 1:6) {
pValTCPre[i] <- wilcox.test(avg_resp ~ Group,
paired = FALSE,
alternative="two.sided",
data = filter(tcPre, Survey == surveys[i]))[3]
}
pValTCPre <- t(pValTCPre)
colnames(pValTCPre) <- surveys
pValTCPre
tcPost <- faces_sum %>%
filter(Time == 'Post') %>%
mutate(Survey = factor(Survey), factor(Group))
pValTCPost <- c()
for (i in 1:6) {
pValTCPost[i] <- wilcox.test(avg_resp ~ Group,
paired = FALSE,
alternative="two.sided",
data = filter(tcPost, Survey == surveys[i]))[3]
}
pValTCPost <- t(pValTCPost)
colnames(pValTCPost) <- surveys
pValTCPost
|
58744939b6615be71a99543d98fa21fb984aa0f6
|
5976a3a11d1b46dc1b0f38d1235adc629fe54101
|
/Plot 5.R
|
d24bfce3a5022472ada831ae63954dbbf21b2eb9
|
[] |
no_license
|
DamekH/ExData_Emissions_Project
|
92a7d18d04e306757574975bcc5e8bd66efaa46e
|
e90bc25d13a2a206934f6a65eb7421405ea5fc67
|
refs/heads/master
| 2021-01-01T18:05:12.062714
| 2017-07-28T03:55:31
| 2017-07-28T03:55:31
| 98,243,508
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 751
|
r
|
Plot 5.R
|
# Load and assign data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Pulls subset of Baltimore and on-road data from full dataset
subset_Baltimore_onroad <- NEI[NEI$fips == "24510" & NEI$type == "ON-ROAD", ]
# Aggregates data from each year
Yearly_Emissions_Baltimore_onroad <- aggregate(Emissions ~ year, subset_Baltimore_onroad, sum)
# Open device and defines size of image
png("plot5.png", width = 480, height = 480)
# Construct and save barplot
barplot(Yearly_Emissions_Baltimore_onroad$Emissions, names.arg = Yearly_Emissions_Baltimore_onroad$year,
main = "On-Road Emissions by Year for Baltimore City, MD", xlab = "Year", ylab = "Sum of on-road PM2.5 emissions (tons)")
dev.off()
|
78a5e62456ff4591daf9d19a27196fab0b3e0640
|
c750c1991c8d0ed18b174dc72f3014fd35e5bd8c
|
/pkgs/bayesm/man/cgetC.Rd
|
02b9235a6e674c681a8387b00d4e7d0a1c07b456
|
[] |
no_license
|
vaguiar/EDAV_Project_2017
|
4b190e66fe7a6b4078cfe1b875bccd9b5a594b25
|
288ffaeec1cfdd873fe7439c0fa0c46a90a16a4f
|
refs/heads/base
| 2021-01-23T02:39:36.272851
| 2017-05-01T23:21:03
| 2017-05-01T23:21:03
| 86,010,131
| 1
| 0
| null | 2017-05-01T23:43:04
| 2017-03-24T00:21:20
|
HTML
|
UTF-8
|
R
| false
| false
| 1,043
|
rd
|
cgetC.Rd
|
\name{cgetC}
\alias{cgetC}
\title{ Obtain A List of Cut-offs for Scale Usage Problems }
\description{
\code{cgetC} obtains a list of censoring points, or cut-offs, used
in the ordinal multivariate probit model of Rossi et al (2001).
This approach uses a quadratic parameterization of the cut-offs.
The model is useful for modeling correlated ordinal data on a
scale from 1, ..., k with different scale usage patterns.
}
\usage{
cgetC(e, k)
}
\arguments{
\item{e}{ quadratic parameter (>0 and less than 1) }
\item{k}{ items are on a scale from 1, \ldots, k }
}
\section{Warning}{
This is a utility function which implements \strong{no} error-checking.
}
\value{
A vector of k+1 cut-offs.
}
\references{
Rossi et al (2001), \dQuote{Overcoming Scale Usage Heterogeneity,} \emph{JASA}96, 20-31.
}
\author{ Rob McCulloch and Peter Rossi, Anderson School, UCLA.
\email{perossichi@gmail.com}.
}
\seealso{ \code{\link{rscaleUsage}} }
\examples{
##
cgetC(.1,10)
}
\keyword{ utilities }
|
dd9b456a54f864e380656282eda7307d5cba9c73
|
da6a579eef90305d8a9b10e728fdb737303e063e
|
/model_branch/subCodes/LapseStatistics.R
|
0f4e20f316cb8596bd571185606679e8f3a8ec50
|
[] |
no_license
|
fidelsteiner/debriscoveredglaciers
|
ab7433db84cf3c40c3d22c20917e92504ac952e6
|
5f989bfb53e0a1311749489c2a19f46d14ddcc1a
|
refs/heads/master
| 2021-06-09T18:10:38.717316
| 2021-05-22T08:59:27
| 2021-05-22T08:59:27
| 147,206,872
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 989
|
r
|
LapseStatistics.R
|
################################################################################
# Statistics of Bulk Models
#
# LapseStatsitics.R
#
# ReadMe: calculate Statistics for Model Performance
#
# Created: 2017/11/27
# Latest Revision: 2017/11/27
#
# Jakob F Steiner | PhD candidate | Faculty of Geosciences | Universiteit Utrecht |
# Heidelberglaan 2, 3584 CS Utrecht | W.C. van Unnik building | Room 124, Zonneveldvleugel |
# j.f.steiner@uu.nl | www.uu.nl/staff/jfsteiner | www.mountainhydrology.org
#
LapseStatsitics <- function(Qmeas, Qmod, match) {
#browser()
TStats <- array(0, c(1, 3));
TStats[1,1] <- summary(lm(Qmeas[match] ~ Qmod[match]))$r.squared # R2 of Sensible Heat Flux
TStats[1,2] <- sqrt(sum((Qmeas[match] - Qmod[match])^2,na.rm=T) / length(which(is.na((Qmeas[match] - Qmod[match])^2)==F))); #RMSE
TStats[1,3] <- sum(Qmeas[match] - Qmod[match],na.rm=T) / length(which(is.na((Qmeas[match] - Qmod[match])^2)==F)) # MBE
LapseStatsitics <- TStats
}
|
9aee59fca1ea196e96be120aa1817f4121abaed5
|
f22ceaa4fbb61eb443b802964c78ece95151e76e
|
/plot4.R
|
f6b6991603771f1cd2e9f338e143a773ccdc828c
|
[] |
no_license
|
pmcody77/ExData_Plotting1
|
7cf5435c25927fce4908a6549c9fc2ca345fe427
|
7041ad733dce9c216ad37d215b8215aeb3d08c03
|
refs/heads/master
| 2020-12-14T09:57:52.094168
| 2017-07-02T21:08:40
| 2017-07-02T21:08:40
| 95,485,259
| 0
| 0
| null | 2017-06-26T20:11:01
| 2017-06-26T20:11:01
| null |
UTF-8
|
R
| false
| false
| 2,005
|
r
|
plot4.R
|
## Plotting Assignment #1 for Coursera course on Exploratory Data Analysis
##
## Assumes the UC data set is already downloaded and extracted to working directory
#### Plot4.R
#####
## Loading and preparing the data frame to plot from
#####
#set coltypes so numeric columns come in as numbers not characters when loaded in (but date must be converted)
coltypes <- c("character", "character", "double", "double","double","double","double","double","double")
hhdata <- read.table("household_power_consumption.txt", header = TRUE, sep=";", colClasses = coltypes, na.strings = c("?"))
#convert date and time variables from characters to one variable called datetime
hhdata$datetime <- strptime(paste(hhdata$Date,hhdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#subset the dataframe to the dates of interest
startdate <- strptime("2007-02-01", "%Y-%m-%d")
enddate <- strptime("2007-02-03", "%Y-%m-%d") #will use < this date, thus including the 2nd
hhdata$includedate <- (hhdata$datetime >= startdate & hhdata$datetime < enddate)
subdata <- subset(hhdata, includedate == TRUE)
png("Plot4.png", width = 480, height = 480, units = "px")
## Set up for four plots
par(mfrow = c(2,2))
## CREATE THE PLOT(s)
## plot 1:
plot(subdata$datetime, subdata$Global_active_power, ylab="Global Active Power (kilowatts)", xlab = "", type="l")
## plot 2:
plot(subdata$datetime, subdata$Voltage, ylab="Voltage", xlab = "datetime", type="l")
## plot 3:
plot(subdata$datetime, subdata$Sub_metering_1, ylab="Energy sub metering", xlab = "", type="l")
lines(subdata$datetime, subdata$Sub_metering_2, col="red", type="l")
lines(subdata$datetime, subdata$Sub_metering_3, col="blue", type="l")
legend("topright", lwd = c(1,1,1), col = c("black", "red", "blue"), bty="n", legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"))
## plot 4:
plot(subdata$datetime, subdata$Global_reactive_power, ylab="Global_reactive_power", xlab = "datetime", type="l")
## SAVE AS PNG
dev.off()
|
04016efc8c7dd28893757d05461e455ed417d4e9
|
587bd26d91c30820bfe9debc9888e52d642f372d
|
/utility.R
|
83b0aa8db46c57d969398e38d3d9a99e2184cc9e
|
[] |
no_license
|
abelmontesdeoca/ExData_Plotting1
|
71a4212c1cf578f0ce71165f376f2fc3293167f6
|
3102ff5d19e906d0fffaf055319ff58942ac76a0
|
refs/heads/master
| 2021-01-18T07:56:46.265156
| 2015-05-10T10:05:35
| 2015-05-10T10:05:35
| 35,339,284
| 0
| 0
| null | 2015-05-09T18:03:12
| 2015-05-09T18:03:12
| null |
UTF-8
|
R
| false
| false
| 449
|
r
|
utility.R
|
todo <- read.table("household_power_consumption.txt", header = TRUE, sep=";", na.strings="?")
hpc <<- with(subset(todo, Date == "1/2/2007" | Date == "2/2/2007"),
data.frame(Timestamp = strptime(paste(Date, Time),
format="%d/%m/%Y %H:%M:%S"),
Global_active_power, Global_reactive_power,
Voltage,
Global_intensity, Sub_metering_1, Sub_metering_2,
Sub_metering_3))
|
0653701c98c76fe2d14d8be159575bb438c2293e
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query25_query08_1344/query25_query08_1344.R
|
faf2a79700b4005e8f7bbb9577aef0be11463866
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
query25_query08_1344.R
|
79fa44dbade359ecf7230a32a46cb6f3 query25_query08_1344.qdimacs 790 2113
|
df1189b5e231e0a46fdc6ca1714547d3dc2a9e4e
|
81a791350b09bb1999a42b7d26f160eb69039172
|
/ch3/R/cogsH_analysis.R
|
da9d6fe84f01ccd685ed3fa394baaad6f3ab9903
|
[] |
no_license
|
ollyburren/thesis
|
08ab64cf3ec3b31213a87eb6cc676c23c2409978
|
3ab8bbf6e7a255f6ddfdd1ffdf5f79fb8dd57d20
|
refs/heads/master
| 2020-07-18T22:59:41.613683
| 2019-06-13T09:28:28
| 2019-06-13T09:28:28
| 94,329,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,590
|
r
|
cogsH_analysis.R
|
## analyse hierachical prioritisation
DATA_DIR <- '/home/ob219/share/cogs_bb/COGS'
fs <- list.files(path=DATA_DIR,pattern="\\_prioritised.tab",full.names=TRUE)
res.DT <- lapply(fs,fread) %>% rbindlist
## examine protein_coding only
resf <- res.DT[biotype=='protein_coding',]
## filter such that there is at least marginal overall posterior that a gene
## is involved in a disease
D <- resf
Df <- melt(D,id.vars=c('disease','ensg','name'),measure.vars='node')
c.DT<-Df[,list(gene.count=.N),by=c('disease','value')]
c.DT[,]
test.DT <- c.DT[disease %in% c('CD','RA','SLE','T1D','UC'),]
back.DT <- c.DT[!disease %in% c('CD','RA','SLE','T1D','UC'),]
bdist <- back.DT[,list(mean=mean(log(gene.count+1)),var=var(log(gene.count+1))),by='value']
M <- merge(test.DT,bdist,by='value')
M[,Z:=(log(gene.count+1)-mean)/sqrt(var)]
## try Tukey transform
back.DT[,tukey.trans:=transformTukey(gene.count,plotit=FALSE),by='value']
bdist <- back.DT[,list(mean=mean(log(gene.count+1)),var=var(log(gene.count+1))),by='value']
setnames(M,'value','node.name')
M <- melt(M,id.vars=c('node.name','disease'),measure.vars='Z')
#M <- dcast(M,disease~node.name+variable)
#mat <- as.matrix(M)[,-1] %>% apply(.,2,as.numeric)
#rownames(mat) <- M$disease
#colnames(mat) <- colnames(mat) %>% sub("\\_Z","",.)
#library(pheatmap)
#pheatmap(mat)
## do in ggplot
M[,p.adj:=(pnorm(value,lower.tail=FALSE) * 2) %>% p.adjust]
library(cowplot)
ggplot(M[p.adj<0.05,],aes(x=node.name,y=disease,fill=value)) + geom_tile(color='black') +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1)) + xlab("Node") + ylab("Disease") +
scale_fill_continuous("Z")
## looking at background distro
node <- 'overall'
library(rcompanion)
par(mfrow=c(2,2))
qqnorm(back.DT[value==node,]$gene.count,main="Overall node gene counts")
qqline(back.DT[value==node,]$gene.count,col='red')
qqnorm(log(back.DT[value==node,]$gene.count + 1),main="Overall node log(gene counts+1)")
qqline(log(back.DT[value==node,]$gene.count + 1),col='red')
qqnorm(sqrt(back.DT[value==node,]$gene.count),main="Overall node sqrt(gene counts)")
qqline(sqrt(back.DT[value==node,]$gene.count),col='red')
T_tuk = transformTukey(back.DT[value==node,]$gene.count,plotit=FALSE)
qqnorm(T_tuk,main="Overall node transformTukey")
qqline(T_tuk,col='red')
par(mfrow=c(1,1))
hist(back.DT[value==node,]$gene.count,main="Overall node gene counts")
bb_trait <- readRDS('/home/ob219/share/cogs_bb/bb_trait_manifest.RDS')
scount.DT <- merge(back.DT,bb_trait[,.(phe,cases)],by.y='phe',by.x='disease')
test <- scount.DT[,list(cor=cor(gene.count,cases)),by='value']
|
553a570fb765ef1ba0e03279f88f622cb56af9ab
|
1197eab6d7008068cd5558d26039fa157c6e7854
|
/ui.R
|
cc0d5012ce684145fdaa92cf409bc49fc44c9009
|
[] |
no_license
|
arjitmazumdar/DataProducts
|
66976b90e9a38cfb9bf57f63ce7376eb43e12716
|
1f490236d971fe62cbb16eae0c521c3252b76887
|
refs/heads/master
| 2016-08-12T03:53:32.081992
| 2015-12-27T11:31:27
| 2015-12-27T11:31:27
| 48,642,153
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,569
|
r
|
ui.R
|
# ui.R
shinyUI(fluidPage(
titlePanel(" BMI- Body Mass Index Calculation For Men and Women"),
sidebarLayout(
sidebarPanel(
p(h4("Please Enter Your Details")),
textInput("text", label = ("Name"),
value = "Enter Name..."),
radioButtons("radio", label = ("Sex"),
choices = list("Male" = 1, "Female" = 2
),selected = 1),
sliderInput("slider1", label = ("Height (in meters)"),
min = 0, max = 300, value = 0),
sliderInput("slider2", label = ("Weight (in kgs)"),
min = 0, max = 200, value = 0),
submitButton("Submit")
),
mainPanel(
h4(em("Stay Healthy Stay Happy")),
p(strong("To keep the body in", em("good health"),"is a duty
... otherwise we shall not be able to
keep our mind", em("strong and clear.- Buddha"))),
br(),
img(src="bmi.png", height = 200, width = 500),
br(),
br(),
br(),
textOutput("bmi"),
br(),
p("For maintaining a good BMI, visit the ",
a("National Heart, Lung and Blood Institute homepage.",
href = "http://www.nhlbi.nih.gov/health/educational/lose_wt/index.htm")),
p(h6(" Copyright: National Institute of Health "))
))))
|
fb72ee83b281b21d283768edaad6a2f428f03e90
|
32f251147606d865a04834ba8f08f8be75410738
|
/man/Hdr_LN2.Rd
|
4899b3447d869ff6eb19991624d42a3e2d8c3e0a
|
[] |
no_license
|
cdv04/ACTR
|
4e17aaab32d319b1b609b6c1c0c553a0f7e41317
|
d1762dc8884eb37b023cf146a71c05a96508cc08
|
refs/heads/master
| 2021-01-01T05:11:47.528297
| 2017-04-07T10:16:40
| 2017-04-07T10:16:40
| 59,212,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 731
|
rd
|
Hdr_LN2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Hdr_LN2.R
\name{Hdr_LN2}
\alias{Hdr_LN2}
\title{Calculating HDR5 of observed data from log normal distribution.}
\usage{
Hdr_LN2(edr10.df, th)
}
\arguments{
\item{th}{The desired probability (i.e. 0.05 for HDR5)}
\item{edr10}{A dataframe containing the edr10 for all the var_group we want to calculate hdr th}
}
\value{
a data frame containing the hdr th calculated from empirical distribution
}
\description{
Calculating HDR5 of observed data from log normal distribution.
}
\details{
This function is necessary for IC_HdrLN2, when IC of hdr th are etsimated without caclculation weight of the bootstrap sample
}
\examples{
Hdr_LN(edr10_obs,0.05)
}
|
50bd58e9ece0751d14487be10b0b851365bd4909
|
046d61375e3d1fb5efcad840a992a9ea2b7d897f
|
/R/eb.R
|
81cd6a8b8ee0ea34a3aa3b82bd2a47bc2e87530d
|
[] |
no_license
|
pbreheny/deepn
|
a494f634ebcce4bdf022af6539aa1fc92bd5af5c
|
04d69d464f1b8f410d747fe416898ec22a8361ef
|
refs/heads/master
| 2021-01-09T20:18:45.151939
| 2020-05-04T22:17:33
| 2020-05-04T22:17:33
| 61,055,789
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 226
|
r
|
eb.R
|
# NOT FUNCTIONAL
eb <- function(z, se, lam=0.05) {
n <- length(z)
w <- 1/se^2
m <- weighted.mean(z, w)
mu <- lam*m + (1-lam)*z
v <- 1/(w + lam*w)
mu[is.na(mu)] <- m
#v[is.na(v)] <- 1/(lam*)
#list(mu=mu, v=v)
}
|
d14c9f8f841de0ad71c0ecc905d5006a881247b4
|
e0b0ddad937ce559aba8ce8e0ef2d5432ed839c9
|
/result/real_data_0621/lib/sim.R
|
795695727509e49d9f71eac3eb6334daeefe037a
|
[] |
no_license
|
jenjong/RankConsistency
|
968b4dfb7f5a43b33ad8bc0a8d872dbb4d6a24f5
|
ad262789e833757e2437fb2423ca71e7ce89793c
|
refs/heads/master
| 2020-03-11T01:21:25.848962
| 2019-04-08T11:11:15
| 2019-04-08T11:11:15
| 129,689,557
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,653
|
r
|
sim.R
|
#1 Dirichlet density function
ddirichlet<- function(x,alpha.vec)
{
xd = 1-sum(x)
v<- sum((alpha.vec[1:2]-1)*log(x)) + (alpha.vec[3]-1)*log(xd)
v1<- gamma(sum(alpha.vec))/prod(gamma(alpha.vec))*exp(v)
v1
}
#2 glmnet design matrix and reponse matrix
gen.designR <- function(p)
{
x = matrix(0, p*(p-1), p)
y = rep(0, p*(p-1) )
ix = 1
for (i in 1:p)
{
for (j in 1:p)
{
if (i == j) next
jx1 = min(i,j)
jx2 = max(i,j)
x[ix,jx1] = 1; x[ix,jx2] = -1
if (i<j) y[ix] = 1
ix = ix + 1
}
}
x = x[,-p]
return(list(x = x, y = y))
}
#3 Construct Qmat
gen.Qmat <- function(p,counter, tn, rand.sim = F )
{
if (counter == 1)
{
alpha.vec1 = c(10, 8, 3)
alpha.vec2 = c(3, 3, 10)
alpha.vec3 = c(10, 3, 4)
}
if (counter == 2)
{
alpha.vec1 = c(1, 8, 3)
alpha.vec2 = c(3, 3, 10)
alpha.vec3 = c(10, 3, 4)}
if (counter == 3)
{
alpha.vec1 = c(3, 2, 1)
alpha.vec2 = c(3, 4, 1)
alpha.vec3 = c(3, 6, 1)
}
if (counter<4)
{
pi.var1 = 1/3
pi.var2 = 1/3
pi.var3 = 1/3
z = seq(1/p,1-1/p, length = p)
dmat <- matrix(0, length(z), length(z))
for (i in 1:length(z))
{
for (j in 1:length(z))
{
xvar = c(z[i], z[j])
if (sum(xvar)>= 1) next
dmat[i,j] <- pi.var1*ddirichlet(xvar,alpha.vec1)+
pi.var2*ddirichlet(xvar,alpha.vec2) +
(1-pi.var1-pi.var2)*ddirichlet(xvar,alpha.vec3)
}
}
#dmat
dmat <- dmat/sum(dmat)
if (rand.sim == T)
{
tmp <- drop(rmultinom(1, tn, prob = c(dmat)) )
dmat1 <- matrix(tmp, p, p)
}
if (rand.sim == F) dmat1 <- round(dmat*tn)
dmat2 <- matrix(0,p,p)
for (j in 1:p) dmat2[,j] <- rev(dmat1[j,])
dmat2 <- dmat2 + t(dmat2)
Qmat = dmat2
}
if (counter == 4)
{
#Qmat = matrix( rpois( p^2 , p^2)^2 , p, p)
Qmat = matrix( runif(p^2), p , p)
Qmat[lower.tri(Qmat, diag = FALSE)] = 0
#Qmat[sample(1:length(Qmat), trunc(length(Qmat)/4))] = 0
Qmat = Qmat + t(Qmat)
diag(Qmat) <- 0
dmat1 = NULL
n = sum(Qmat)
nj = colSums(Qmat)
Qpmat = Qmat
for (j in 1:nrow(Qmat)) Qpmat[j,] = round(Qmat[j,]/n*2,2)
return(list(Qmat = Qmat, dmat = dmat1, Qpmat = Qpmat))
}
n = sum(Qmat)
nj = colSums(Qmat)
Qpmat = Qmat
for (j in 1:nrow(Qmat)) Qpmat[j,] = Qmat[j,]/n*2
# dmat1 has the coordiantes for image()
return(list(Qmat = Qmat, dmat = dmat1, Qpmat = Qpmat))
}
#4 simulation code in Gmat.hat
gen.Gmathat <- function(Gmat, Qmat)
{
p = ncol(Gmat)
gmat.prob<-c(Gmat)
gmat.num <- c(Qmat)
gmat.gen<- rep(0, length(gmat.num))
for (i in 1:length(gmat.num))
{
gmat.gen[i] <- rbinom(n = 1, size = gmat.num[i], prob = gmat.prob[i])
}
Gmat.hat <- matrix(gmat.gen,p,p)
Gmat.hat[lower.tri(Gmat.hat, diag = T)] = 0
tmp <- Qmat - t(Gmat.hat)
Gmat.hat[lower.tri(Qmat)]<- tmp[lower.tri(Qmat)]
Gmat.hat <- Gmat.hat/Qmat
Gmat.hat[!is.finite(Gmat.hat)] = 0
return( Gmat.hat )
}
#5 naive est
naive.fun <- function(Qpmat, Gmat.hat,x,y,p)
{
wmat = Qpmat*Gmat.hat
wmat = t(wmat)
wvec = wmat[ - (1 + ( 0:(p-1) ) *(p+1))]
# fit glmnet
fit <- glmnet(x, y, family = 'binomial',
intercept = FALSE, weights = wvec, lambda = 0, standardize = F, thresh = 1e-09)
est = c(fit$beta[,1],0)
naive.est <- est
return(naive.est)
}
##
# # check the existence of estimator by Qpmat.c1
# # note that the condition of existence only holds for population version
# # 1. make edges
# tmp <- cbind( rep(1:p, p), rep(1:p, each = p), c(Qpmat.c1))
# tmp = tmp[tmp[,3] > 0, 1:2]
# ed = c(t(tmp))
# # 2. make directed graph
# g <-make_graph(ed, n = p, directed = TRUE)
# # 3. connectivity check
# cg <- components(g, mode = 'strong')
# # 4. report
# if (cg$no == 1) print("estimator exists!") else print("estimator does not exist !")
#
prop.fun <- function(max.k, Qpmat, Gmat.hat, x, y, p)
{
Result = NULL
Result.list = list()
k = 0
for (k in 0:max.k)
{
#cat('outer interataion::: ', k , '\n')
# Thresholding Qpmat.c1 by constant 'cvar'
# unique cvec
cvec<- sort(unique(Qpmat[upper.tri(Qpmat)]))
# the first threshold
if (k == 0 ) idx <- which(Qpmat <= 0)
if ( k>0 ) idx <- which(Qpmat <= cvec[k])
Qpmat.c1 = Qpmat
Qpmat.c1[idx] <- 0
###############################
# set weight-vector
i1 = 1 ; i2 = 2
idx = 1
result = matrix(0, p*(p-1)/2, 4)
for ( i1 in 1:(p-1))
{
for (i2 in (i1+1):p)
{
Qpmat.c2 = Qpmat.c1
nvec1 = Qpmat.c1[i1,]
nvec2 = Qpmat.c1[i2,]
idx1 = which(nvec1 == 0 | nvec2 == 0)
idx2 = setdiff( idx1, c(i1, i2))
nvec3 = (nvec1[-idx1]+nvec2[-idx1])/2
Qpmat.c2[i1,-idx1] = Qpmat.c2[i2,-idx1] = nvec3
if (length(idx2)>0) Qpmat.c2[i1,idx2] = Qpmat.c2[i2,idx2] = 0
Qpmat.c2[,i1] <- Qpmat.c2[i1,]
Qpmat.c2[,i2] <- Qpmat.c2[i2,]
# check the result: Qpmat.c2[i1,]
# : Qpmat.c2[i2,]
# if (length(idx2) > 0 & max( c( Qpmat.c1[i1,idx2],Qpmat.c1[i2,idx2]) ) >0) :: check the existence of estimator
###############################
# set weight-vector in glmnet
wmat = Qpmat.c2*Gmat.hat
wmat = t(wmat)
wvec = wmat[ - (1 + ( 0:(p-1) ) *(p+1))]
# fit glmnet
fit <- glmnet(x, y, family = 'binomial',
intercept = FALSE, weights = wvec, lambda = 0, standardize = F, thresh = 1e-09)
est = c(fit$beta[,1],0)
result[idx, 1:2] = c(i1, i2)
if( est[i1] > est[i2]) result[idx, 3] = 1
pmat = plogis(outer(est, est, FUN = '-'))
vmat = pmat*(1-pmat)
v1 = rowSums(Qpmat.c2*vmat)
v1 <- v1[-p]
v2 = (-Qpmat.c2*vmat)[-p,-p]
diag(v2) <- v1
inv.v2 <- solve(v2)
if (i2 < p ) result[idx, 4] <- inv.v2[i1,i1] + inv.v2[i2,i2] - 2*inv.v2[i1,i2]
if (i2 == p ) result[idx, 4] <- inv.v2[i1,i1]
idx = idx + 1
#cat(' inner iteration:' , idx, '\n')
#plot(c(fit$beta[,1],0), type ='b')
#min(-diff(est, 1))
#sum(-diff(est, 1)<0)
#v1 = rowSums(Qpmat.c2*vmat)
}
}
Result <- cbind(Result, result[,4])
Result.list[[k+1]] <- result
}
return(list(Result = Result, Result.list = Result.list))
}
#6 aggregation
agg.fun <- function(prop.result)
{
min.vec<-apply(prop.result$Result, 1, which.min)
agg.result <- matrix(0,nrow(prop.result$Result), 3)
agg.result[,1:2] <- (prop.result$Result.list[[1]])[,1:2]
for (i in 1:nrow(agg.result))
{
agg.result[i,3] <- (prop.result$Result.list[[min.vec[i]]])[i,3]
}
return(agg.result)
}
summary.fun<- function(tmp)
{
sum(tmp[,3])== nrow(tmp)
sum(tmp[,3]==0)
#tmp[tmp[,3] == 1,1:2]
tmp1 <- tmp[,1:3]
tmp2 <- tmp[,c(2,1,3)]
tmp2[,3]<- abs(tmp2[,3]-1)
sc <- c()
for ( i in 1:p)
{
sc[i] = sum(tmp1[tmp1[,1]== i ,3]) + sum(tmp2[tmp2[,1]== i ,3])
}
return(sc)
}
###
cons.rank<- function(tmp)
{
tmp.copy<- tmp[,c(2,1,3)]
tmp.copy[,3] <- 1-tmp.copy[,3]
tmp <- rbind(tmp, tmp.copy)
tmp <- tmp[tmp[,1] != 0,]
Cset <- unique( tmp[,1])
sc <- c()
while( length(Cset)>0)
{
Uset <- c()
Cset <- unique( tmp[,1])
for (i in Cset)
{
tmp.sub1<-tmp[tmp[,1] == i,, drop = F]
if ( sum(tmp.sub1[,3]) == nrow(tmp.sub1) ) Uset <- c(Uset, i)
}
#cat(sc,'\n')
if ( length(Uset) == 1)
{
sc <- c(sc,Uset)
if (length(sc)==(p-1))
{
sc<- c( sc, setdiff(Cset, Uset) )
break
}
Uset.idx<-tmp[, 1] == Uset | tmp[, 2] == Uset
tmp <- tmp[!Uset.idx,]
} else {
sc <- NA
break
}
}
return(sc)
}
## cv code
# make cv_mat
#
cv_mat_fun <- function(Gmat_obs, Qmat, k_fold)
{
cv_mat = matrix(0, tn, 4)
s1 = 1
for (j in 1:(ncol(Gmat_obs)-1))
{
for(i in (j+1):nrow(Gmat_obs))
{
a1 = Qmat[i,j]
if (a1 == 0 ) next
f_idx <- s1:(s1+a1-1)
cv_mat[f_idx,1] <- i
cv_mat[f_idx,2] <- j
s1 = s1 + a1
a2 = Gmat_obs[i,j]
if (a2 == 0 ) next
f_idx2 <- sample(f_idx,a2)
cv_mat[f_idx2,3] = 1
}
}
cv_mat[,4] <- sample(1:k_fold, tn, replace = TRUE)
colnames(cv_mat) = c("j", "k", "y_jk", "partition")
return(cv_mat)
}
cv_table_fun = function(cv_m)
{
cv_m <- as.data.frame(cv_m)
# require(dplyr)
result = cv_m %>% group_by(j, k) %>%
summarize(sum = length(y_jk)) %>% as.matrix()
Qmat_tr = matrix(0, p, p)
for ( i in 1:nrow(result))
{
Qmat_tr[result[i, 1], result[i, 2]] <- result[i, 3]
}
Qmat_tr <- Qmat_tr + t(Qmat_tr)
result = cv_m %>% group_by(j, k) %>%
summarize(sum = sum(y_jk)) %>% as.matrix()
Gmat_tr = matrix(0, p, p)
for ( i in 1:nrow(result))
{
Gmat_tr[result[i, 1], result[i, 2]] <- result[i, 3]
}
tmp <- Qmat_tr - t(Gmat_tr)
Gmat_tr[upper.tri(Qmat_tr)] <- tmp[upper.tri(Qmat_tr)]
return(list(Q=Qmat_tr, G=Gmat_tr))
}
gen_sim_fun = function(Gmat, Qmat)
{
## Gmat.hat : true Gmat을 이용하여 data generation을 할 경우에 승패 수와 전체 대결 수를 이용하여
## 만든 Gmat의 추정값
gmat_prob <- c(Gmat) ## Gmat_jk : j object와 k object에서 j가 k를 이길 확률.
gmat_num <- c(Qmat)
gmat_gen<- rep(0, length(gmat_num))
for (i in 1:length(gmat_num))
{
gmat_gen[i] <- rbinom(n = 1, size = gmat_num[i], prob = gmat_prob[i])
}
Gmat_obs <- matrix(gmat_gen,p,p)
Gmat_obs[lower.tri(Gmat_obs, diag = T)] = 0
tmp <- Qmat - t(Gmat_obs)
Gmat_obs[lower.tri(Qmat)]<- tmp[lower.tri(Qmat)]
return( list(G = Gmat_obs, Q = Qmat) )
}
bt_fun = function(gen_fit, lambda.vec = NULL)
{
Gmat.hat <- gen_fit$G
Qmat <- gen_fit$Q
p = ncol(Qmat)
Gmat.hat <- Gmat.hat/Qmat
Gmat.hat[!is.finite(Gmat.hat)] = 0
n = sum(Qmat)
Qpmat = Qmat/n*2
wmat = Qpmat*Gmat.hat
wmat = t(wmat)
wvec = wmat[ - (1 + ( 0:(p-1) ) *(p+1))]
# fit glmnet
fit <- glmnet(x, y, family = 'binomial',
intercept = FALSE, weights = wvec, lambda = 0,
standardize = F, thresh = 1e-09)
est = c(fit$beta[,1],0)
if (is.null(lambda.vec)) cor.r = NULL else cor.r = cor(est, lambda.vec, method = 'kendall')
return( list (coefficients = est,
cor = cor.r) )
}
gbt_step1_fun = function(Qpmat, Gmat.hat, p, cval)
{
result = matrix(0, p*(p-1)/2, 4)
idx = 1
# select a pair of items for obtaining rank consistent estimator
# define a matrix to save paired results
for ( i1 in 1:(p-1))
{
for (i2 in (i1+1):p)
{
Qpmat.c1 = Qpmat
# threshold step
idx1 <- ( Qpmat.c1[i1,] <= cval )
idx2 <- ( Qpmat.c1[i2,] <= cval ) ## intersect(!idx1,!idx2)=\hat{O}_jk
if (sum(idx1)>0 ) ##length->sum으로 고침
{
Qpmat.c1[i1,idx1] <- 0 ; Qpmat.c1[idx1,i1] <- 0
}
if (sum(idx2)>0 ) ##length->sum으로 고침
{
Qpmat.c1[i2,idx2] <- 0 ; Qpmat.c1[idx2,i2] <- 0
}
Qpmat.c2 = Qpmat.c1
## thresholding procedure
Qpmat.c2 = Qpmat.c2*(Qpmat.c2>cval)
nvec1 = Qpmat.c1[i1,]
nvec2 = Qpmat.c1[i2,]
idx1 = which(nvec1 == 0 | nvec2 == 0) ## !idx1 : \hat{O}_jk
idx2 = setdiff( idx1, c(i1, i2))
# balancing the weight parameter in gBT model for the selected pair
nvec3 = (nvec1[-idx1]+nvec2[-idx1])/2
Qpmat.c2[i1,-idx1] = Qpmat.c2[i2,-idx1] = nvec3
if (length(idx2)>0) Qpmat.c2[i1,idx2] = Qpmat.c2[i2,idx2] = 0
Qpmat.c2[,i1] <- Qpmat.c2[i1,] ## 대칭이 되도록 만들자
Qpmat.c2[,i2] <- Qpmat.c2[i2,] ## 대칭이 되도록 만들자
## find V_jk(maximum connected set)
i1i2_adj_matrix = matrix(as.integer(Qpmat.c2>0) , p , p) ## adjacency matrix
i1i2_graph = graph_from_adjacency_matrix(i1i2_adj_matrix , mode="undirected" , weighted=NULL) ## make a graph
i1i2_clusters = clusters(i1i2_graph)$mem ## clustering using adj matrix
if (i1i2_clusters[i1] != i1i2_clusters[i2])
{
## i1과 i2가 다른 connected 되지 않은 경우
#cat(' k:',k,', ',i1,'and',i2, 'is not connected!!\n')
idx = idx + 1
next
}
## idx3 : edge index set of V_jk
idx3 = sort(which(i1i2_clusters %in% i1i2_clusters[i1]))
#########################################
## computing gBT estimator
#########################################
wmat <- Qpmat.c2[idx3, idx3]*Gmat.hat[idx3, idx3]
wmat = t(wmat)
pp <- length(idx3)
wvec = wmat[ - (1 + ( 0:(pp-1) ) *(pp+1))] ## w_jj는 제거..
xx = matrix(0, pp*(pp-1), pp)
yy = rep(0, pp*(pp-1) )
ix = 1
for (i in 1:pp)
{
for (j in 1:pp)
{
if (i == j) next
jx1 = min(i,j)
jx2 = max(i,j)
xx[ix,jx1] = 1; xx[ix,jx2] = -1
if (i<j) yy[ix] = 1
ix = ix + 1
}
}
xx = xx[,-pp]
# note that the gBT estimator may not exist because of the thresolding
# use ridge
try.fit <- try(fit <- glmnet(xx, yy, family = 'binomial',
intercept = FALSE, weights = wvec,
lambda = 1e-5, alpha = 0, standardize = F,
thresh = 1e-09), silent = T)
if (class(try.fit)[1] == 'try-error')
{
## Q: result[idx,]에 error가 났다는 어떤 표시도 하지 않나..??
## A: result에 (i1,i2) 대신 (0,0)을 표시..
idx = idx + 1
next
}
est = c(fit$beta[,1],0) ## lambda_pp 추가
result[idx, 1:2] = c(i1, i2)
# compare the values of lambda_{i1} and lambda_{i2}
if( est[which(idx3==i1)] > est[which(idx3==i2)]) result[idx, 3] = 1
## \hat{lambda_i1}>\hat{lambda_i2}면 result[idx,3]에 1을 부여..(아니면 0)
## calculate weight v_jk
# 1. obtain the asymptotic variance
pmat = plogis(outer(est, est, FUN = '-'))
vmat = pmat*(1-pmat)
v1 = rowSums(Qpmat.c2[idx3,idx3]*vmat)
v1 <- v1[-pp]
v2 = (-Qpmat.c2[idx3,idx3]*vmat)[-pp,-pp]
diag(v2) <- v1
## calculate asymptotic covariance matrix
tri.fit2 = try(inv.v2 <- solve(v2*sum(Qmat)/2))
if (class(tri.fit2)[1] == 'try-error'){
cat(' k:',k,', ',i1,'and',i2, ': cannot calaulate inverse')
result[idx , 4] = 4/(sum(Qpmat.c2[idx3,idx3]*sum(Qmat)/2)/2) ## alternative v_jk
idx = idx+1 ## if error -> next step
next
}
i1_ind = which(idx3==i1); i2_ind = which(idx3==i2)
if ((i1_ind<pp) & (i2_ind<pp))
{
result[idx, 4] <- inv.v2[i1_ind,i1_ind] + inv.v2[i2_ind,i2_ind] -
2*inv.v2[i1_ind,i2_ind]
}
if ((i1_ind==pp) | (i2_ind==pp) )
{
min_ind = min(i1_ind , i2_ind)
result[idx , 4] = inv.v2[min_ind , min_ind]
}
## 2. v_jk = \sum n_ml
#result[idx , 4] = sum(Qpmat.c2[idx3,idx3])/2
#cat(' inner iteration:' , idx, '\n')
idx = idx + 1
#plot(c(fit$beta[,1],0), type ='b')
#min(-diff(est, 1))
#sum(-diff(est, 1)<0)
#v1 = rowSums(Qpmat.c2*vmat)
#}
#}
}
}
return(result)
}
# note that gbt_step2_fun has two types of returns:
gbt_step2_fun = function(result, p, lambda.vec, newdata = NULL,
weight = TRUE)
{
tmp<-result
not0_ind = (tmp[,1]!=0)
tmp <-tmp[not0_ind, 1:3]
p.set <-sort(unique(c(tmp[,1:2])))
if (length(p.set) != p)
{
return(list(gbt_est = rep(NA,p),
cor = NA, test_cor = NA))
}
xx <- matrix(0, nrow(tmp)*2, p)
yy <- rep(0, nrow(tmp)*2)
i = 1
for ( i in 1:nrow(tmp))
{
vec1<-tmp[i,1:2]; vec2<- tmp[i,3]
xx[2*(i-1)+1, vec1] <- c(1,-1) ; yy[2*(i-1)+1] <- vec2
xx[2*i, vec1] <- c(-1,1) ; yy[2*i] <- abs(vec2 - 1)
}
xx<- xx[,-p]
## See fit: Note that weights denotes v_jk
v_weight = rep(result[not0_ind, 4],each=2)
if (weight == FALSE) v_weight = rep(1, length(v_weight))
fit<-glmnet(xx,yy, family = 'binomial', alpha = 0, lambda = 1e-5,
intercept = FALSE,
weights = v_weight , standardize = F)
gbt.est <- c(fit$beta[,1],0)
if (is.null(lambda.vec))
{
cor.r = NA
} else {
cor.r <- cor(gbt.est, lambda.vec, method = 'kendall')
}
if (is.null(newdata))
{
test_cor = NA
} else {
tmp = matrix(0,p,p)
Q = newdata$Q
G = newdata$G
for (i in 1:p)
{
for (j in 1:p)
{
if (i == j) next
tmp[i,j] = as.integer( gbt.est[i]-gbt.est[j] > 0 )
}
}
test_cor = sum(tmp*G)/(sum(Q)/2)
}
return(list(gbt_est = gbt.est,
cor = cor.r, test_cor = test_cor))
}
sparse_gen_fun <- function(dmat, kn, rn, tn, random = TRUE)
{
if (random == TRUE)
{
dmat1 <- dmat ## dmat : {q_jk} matrix
u.idx <- which( dmat > 0)
sel.u.idx<- sample(u.idx, kn)
dmat1[sel.u.idx] <- 0
dmat1 <- dmat1/sum(dmat1)
d.sample <- drop (rmultinom(1, tn-rn*kn, prob = c(dmat1))) ## n_jk
d.sample[sel.u.idx] <- rn
dmat1 <- matrix(d.sample, p , p)
Qmat <- matrix(0, p, p )
for (j in 1:p) Qmat[,j] <- rev(dmat1[j,])
Qmat <- Qmat + t(Qmat)
return(Qmat)
}
if (random == FALSE)
{
dmat1 <- dmat ## dmat : {q_jk} matrix
for (j in 1:p) dmat1[,j] <- rev(dmat[j,])
#dmat1 <- dmat1 + t(dmat1)
u.idx <- which( dmat1>0)
sel.u.idx<- sample(u.idx, kn)
dmat1[sel.u.idx] <- 0
dmat1 <- dmat1/sum(dmat1)
d.sample <- drop (rmultinom(1, tn-rn*kn, prob = c(dmat1)))
d.sample[sel.u.idx] <- rn
dmat1 <- matrix(d.sample, p , p)
Qmat <- matrix(0, p, p )
for (j in 1:p) Qmat[,j] <- rev(dmat1[j,])
Qmat <- Qmat + t(Qmat) ## Qmat : 최종적인 n_jk matrix
return(Qmat)
}
}
cv.gbt_fun = function(gen_fit, cvec, k_fold, lambda.vec)
{
fit = gen_fit
p = ncol(fit$Q)
cv_mat = cv_mat_fun(fit$G, fit$Q, k_fold)
cor_mat = matrix(NA, k_fold, length(cvec))
for (k in 1:length(cvec))
{
######### gBT model ###########
cval <- cvec[k]
# k-fold
for (k_num in 1:k_fold)
{
result = NULL
tmp_te = cv_mat[cv_mat[,"partition"] == k_num,-4]
tmp_tr = cv_mat[cv_mat[,"partition"] != k_num,-4]
cv_table <- cv_table_fun(tmp_tr)
Qmat <- cv_table$Q
Gmat.hat <- cv_table$G
Gmat.hat = Gmat.hat/Qmat
Gmat.hat[!is.finite(Gmat.hat)] = 0
n = sum(Qmat)
Qpmat = Qmat/n*2
result <- gbt_step1_fun(Qpmat, Gmat.hat, p, cval)
# gbt_step2_fun
cv_table <- cv_table_fun(tmp_te)
gbt_fit <- gbt_step2_fun(result, p, lambda.vec, cv_table)
# cat ("k:", k, " k_num:", k_num," ", gbt_fit$test_cor,'\n')
if (any(is.na(gbt_fit))) next
cor_mat[k_num, k] <- gbt_fit$test_cor
}
}
return(cor_mat)
}
gbt_fun = function(gen_fit, cval, lambda.vec)
{
Gmat.hat <- gen_fit$G
Qmat <- gen_fit$Q
p = ncol(Qmat)
Gmat.hat <- Gmat.hat/Qmat
Gmat.hat[!is.finite(Gmat.hat)] = 0
n = sum(Qmat)
Qpmat = Qmat/n*2
result <- gbt_step1_fun(Qpmat, Gmat.hat, p, cval)
gbt_fit<- gbt_step2_fun(result, p, lambda.vec)
gbt_est = gbt_fit$gbt_est
cor = gbt_fit$cor
return( list (coefficients = gbt_est,
cor = cor) )
}
|
a1c7d8a5ad122811bd5e4d7eab53b17f5a6f5267
|
51edc04f9e08625a2ba5fd4bb1ab18cd6b244549
|
/cachematrix.R
|
9bc6f2cdf9ca1f1cdcbe6fe034e8f9f1886d36fc
|
[] |
no_license
|
cmattoon/ProgrammingAssignment2
|
da2b6d4f0623271aa7d1f1d685586fe3c3f83730
|
29f85802713d1156b98e380946077d8cbc8b4b05
|
refs/heads/master
| 2020-12-25T21:01:00.118162
| 2014-10-25T17:36:41
| 2014-10-25T17:36:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,285
|
r
|
cachematrix.R
|
## Solves and caches inverse of matrix.
## Usage:
## > cache_matrix <- makeCacheMatrix(c(11,12,21,22), ncol=2, nrow=2)
## > inverse <- cacheSolve(cache_matrix)
## Attaches getters/setters for both the object
## and the inverse property.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
## This function resets the cache (inverse <<- NULL) when the
## value of the original matrix (x) changes.
set <- function(y) {
x <<- y
inverse <<- NULL
}
## Getter for self.
get <- function() x
## Sets the inverse 'property' to inv
setinverse <- function(inv) inverse <<- inv
## Gets the inverse 'property'
getinverse <- function() inverse
## List
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse,
solve=solve)
}
## This function leverages the $inverse property
## to avoid expensive calculations in solve(x)
cacheSolve <- function(x, ...) {
## Get the inverse (matrix or NULL)
inverse <- x$getinverse()
if(!is.null(inverse)) {
## Cache hit, return the cached value.
return(inverse)
}
## If we're here, it was a miss. Get the actual result
## with solve()
inverse <- solve(x$get())
## Set the inverse to the new value with the setter.
x$setinverse(inverse)
inverse
}
|
0a913dbb663ccbcedf194a9b5a9d9fecc9e77910
|
d45283f3b64c81c2a1c1e7a4cf5e2ff4d9e7b495
|
/man/mtcarz.Rd
|
f8c54db704efa8522a4990805273cd7f12278183
|
[] |
no_license
|
DuyDN/descriptr
|
d138c1121e4fdb969b4d70e08402320c8417b1d2
|
6f3469f2e068c8e685dbb25471468b0816d1406e
|
refs/heads/master
| 2022-05-19T14:40:56.429504
| 2020-02-01T10:20:44
| 2020-02-01T10:20:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 334
|
rd
|
mtcarz.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ds-data-mtcarz.R
\docType{data}
\name{mtcarz}
\alias{mtcarz}
\title{mtcarz}
\format{An object of class \code{data.frame} with 32 rows and 11 columns.}
\usage{
mtcarz
}
\description{
Copy of mtcars data set with modified variable types
}
\keyword{datasets}
|
c2e4221beb946aa02ac7713de2b3c01cab9ad375
|
288b4b6998906714ab368e0ee14c70a4059be4ab
|
/tests/testthat/test_data_mcdaniel1994.R
|
550f3a9f377bcbb1d95ab35cb1780af18cb741a5
|
[] |
no_license
|
qsh7950/metadat
|
f6243a382c8c0e3f4c9a0e2cd657edb0ffa3e018
|
5c70fa63d7acfa1f315534fb292950513cb2281e
|
refs/heads/master
| 2021-02-26T06:42:18.937872
| 2019-10-21T21:58:33
| 2019-10-21T21:58:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 207
|
r
|
test_data_mcdaniel1994.R
|
source("hashTable.R")
context("Checking data: mcdaniel1994")
library(digest)
test_that("checks data md5 hash", {
expect_match(digest(metadat::dat.mcdaniel1994, algo = "md5"), hashTable$mcdaniel1994)
})
|
459c300a1e4ce858d587412b34bf906e812f59b4
|
dbfe5ce272e204a8e1663ced35c9d48ef4870496
|
/man/str_similar_pair.Rd
|
902e24218c87186defe968a20b33412d249c7af4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hmito/hmRLib
|
fac91a4e2ddfcd899283ec0b63c87c31965fb17f
|
f2cfd54ea491ee79d64f7dd976a94086092b8ef5
|
refs/heads/master
| 2023-08-31T07:21:31.825394
| 2023-08-28T10:02:07
| 2023-08-28T10:02:07
| 41,907,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 551
|
rd
|
str_similar_pair.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/character.R
\name{str_similar_pair}
\alias{str_similar_pair}
\title{Check similarity pair of given string with themselves.}
\usage{
str_similar_pair(string, similarity = 3, only_sub = FALSE)
}
\arguments{
\item{string}{character for checking similarity}
\item{similarity}{threshold similarity}
\item{only_sub}{count only substitute}
}
\value{
logical: TRUE if string is enough similar with target.
}
\description{
Check similarity pair of given string with themselves.
}
|
1251f1fff2d5b13c78109d3d978bc16eba23bc96
|
3eab8baafc916551a9cae1f1145949facfbe20ba
|
/Sync_DB_with_Ratings.R
|
ddbae58617586e800c69062ce73c263f001cfb64
|
[] |
no_license
|
LovinSpoonful/IS607-Project3
|
1d1a67c666eeb7dc4eeb0ada5a4da4958c07c514
|
2a7fbadb946aa8a8ecf1cbd09f4ae02d84131ab3
|
refs/heads/master
| 2021-01-10T16:54:25.180508
| 2016-03-27T03:20:56
| 2016-03-27T03:20:56
| 54,080,950
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,413
|
r
|
Sync_DB_with_Ratings.R
|
library(RMySQL)
# MySQL DB info
proj_user <- "project3"
proj_pwd <- "CUNYRBridge4"
proj_db <- "skill"
proj_host <- "db4free.net"
## ------------------------------------------
## Using RMYSQL
## ------------------------------------------
# establish the connection to the skill DB on db4free.net
skilldb = dbConnect(MySQL(), user=proj_user, password=proj_pwd, dbname=proj_db, host=proj_host)
# weighted_rating_by_skill_type
dbWriteTable(skilldb, name="df_temp1", value=data.frame.name)
dbSendQuery(con, "
UPDATE TBL_DATA T, DF_TEMP1 R
SET T.WEIGHTED_RATING_BY_SKILL_TYPE = R.WEIGHTED_RATING_BY_SKILL_TYPE
WHERE T.SKILL_ID = R.SKILL_ID
AND T.SOURCE_ID = R.SOURCE_ID;")
# overall_weighted_rating
dbWriteTable(skilldb, name="df_temp2", value=data.frame.name)
dbSendQuery(con, "
UPDATE TBL_DATA T, DF_TEMP2 R
SET T.OVERALL_WEIGHTED_RATING = R.OVERALL_WEIGHTED_RATING
WHERE T.SKILL_ID = R.SKILL_ID AND T.SOURCE_ID = R.SOURCE_ID;")
# rating_by_skill_set
dbWriteTable(skilldb, name="df_temp3", value=data.frame.name)
dbSendQuery(con, "
UPDATE TBL_DATA T, DF_TEMP2 R
SET T.RATING_BY_SKILL_SET = R.RATING_BY_SKILL_SET
WHERE T.SKILL_ID = R.SKILL_ID AND T.SOURCE_ID = R.SOURCE_ID;")
dbSendQuery(mydb, "DROP TABLE IF EXISTS DF_TEMP1, DF_TEMP2, DF_TEMP3")
|
6a1141c819ee1ee596d6b7e86e8032173c0b7075
|
6033a4d1ddfa4000df0095adf4e16d60b465fab7
|
/top5_CityHotel.R
|
cad98d4272401598c25273e4fdd2f88fe9a50e5e
|
[] |
no_license
|
Pari02/Summer_Practicum_Orbitz
|
e2408856605e7a4f7449c585df12bd801cf726db
|
61b519b26b8f829a69170068dd90467a24fb6985
|
refs/heads/master
| 2021-01-15T16:57:39.754646
| 2015-09-27T09:01:03
| 2015-09-27T09:01:03
| 38,257,253
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,989
|
r
|
top5_CityHotel.R
|
# installing required libraries
install.packages("rjson")
install.packages("jsonlite")
install.packages("hydroTSM")
install.packages("RCurl")
install.packages("XML")
install.packages("zoo")
installed.packages("ggplot2")
library(rjson)
library(jsonlite)
library(hydroTSM)
library(plyr)
library(RCurl)
library(XML)
library(zoo)
library(ggplot2)
library(RColorBrewer)
library(wordcloud)
# assign path to the variables
dirP <- file.path("C:", "Users", "Parikshita", "Desktop", "Data Science", "SummerSemester")
dirS <- file.path(dirP, "SummerPracticum", "USHotelCityRatingPlots") # cityRating for csv's
path <- file.path(dirP, "Practicum-CSP 572", "TripAdvisorJson", "json")
# extract the data from the files
filename <- list.files(path, pattern = ".json", full.names = TRUE)
# extracting all the files
all_data <- lapply(filename, function(x) fromJSON(x))
# extracting cities and countries name
all_address <- lapply(1:length(all_data), function(i) all_data[[i]]$HotelInfo$Address)
all_doc <- lapply(1:length(all_address), function(x) htmlParse(all_address[x], asText = TRUE))
all_city <- lapply(1:length(all_doc), function(x) xpathSApply(all_doc[[x]], "//span[@property='v:locality']", xmlValue))
all_country <- lapply(1:length(all_doc), function(x) xpathSApply(all_doc[[x]], "//span[@property='v:country-name']", xmlValue))
all_country <- unique(subset(all_country, lapply(1:length(all_country), function(x) length(all_country[[x]])) > 0))
# extracting address of all the files
plain.text <- lapply(1:length(all_doc), function(x) xpathSApply(all_doc[[x]], "//text()[not(ancestor::script)][not(ancestor::style)][not(ancestor::noscript)][not(ancestor::form)]", xmlValue))
pattern <- lapply(1:length(all_doc), function(x) which(plain.text[[x]] %in% c(" ", ", ")))
address <- lapply(1:length(plain.text), function(i) {
if(length(pattern[[i]]) != 0)
{
plain.text[[i]][-pattern[[i]]]
}
else
{
plain.text[[i]] <- plain.text[[i]]
}
})
# creating directory where output will be saved
dir.create(file.path(dirS, "top5"), showWarnings = FALSE)
# removing address of hotels which are not in United States
country_filter <- which(sapply(lapply(1:length(address), function(x) any(which(address[[x]] %in% all_country))), isTRUE))
address <- address[-country_filter]
# extracting rating & review date for all the hotels
all_rating <- lapply(1:length(all_data), function(i) all_data[[i]]$Reviews$Ratings$Overall)
rating_all <- all_rating[-country_filter]
all_reviewDate <- lapply(1:length(all_data), function(i) all_data[[i]]$Reviews$Date)
reviewDate_all <- all_reviewDate[-country_filter]
all_hotel <- lapply(1:length(all_data), function(i) all_data[[i]]$HotelInfo$Name)
hotels <- all_hotel[-country_filter]
# extarcting us_cities
us_cities <- all_city[-country_filter]
cities <- unique(subset(us_cities, lapply(1:length(us_cities), function(x) length(us_cities[[x]])) > 0))
# getting only those indexes which have city name in them
city_filter <- which(sapply(lapply(1:length(address)
, function(x) any(which(address[[x]] %in% cities))), isTRUE))
city_name <- us_cities[city_filter]
city_freq <- as.data.frame(table(unlist(city_name)))
# subsetting cities with hotel count >= 30
city_freq <- as.character(subset(city_freq$Var1, city_freq$Freq >= 30))
city_30 <- which(sapply(lapply(1:length(address)
, function(x) any(which(address[[x]] %in% city_freq))), isTRUE))
# using the index field to get rating, review date information
rating_city <- rating_all[city_30]
reviewDate_city <- reviewDate_all[city_30]
hotelName <- hotels[city_30]
city <- us_cities[city_30]
# combining city name, review date and rating
city_rating <- lapply(1:length(city_30), function(x) cbind(city[[x]], hotelName[[x]], reviewDate_city[[x]], rating_city[[x]]))
# command to create flatlist in R by row
city_rating <- do.call(rbind.data.frame, city_rating)
# assigning names to columns
colnames(city_rating) <- c("City", "Hotel", "Date", "Rating")
# conversting datatype of Date column to Date and Rating to numeric
city_rating$Date <- as.Date(city_rating$Date, "%B%d, %Y")
# as.character(f) requires a "primitive lookup" to find the function as.character.factor()
#, which is defined as as.numeric(levels(f))[f]
city_rating$Rating <- as.numeric(levels(city_rating$Rating))[city_rating$Rating]
# subsetting thr data set for review dates > year 2012
cr_2012 <- subset(city_rating, city_rating$Date >= "2012-01-01")
cr_2012$Date <- format(as.Date(cr_2012$Date, "%Y-%m-%d"), "%Y-%m")
city_avg <- ddply(cr_2012, c("City"), summarise, AvgRating = mean(Rating))
top_5 <- head(city_avg[order(city_avg$AvgRating, decreasing= T),], n = 5)
# extracting average top 5 citis from city avg
top5_filter <- which(sapply(lapply(1:nrow(city_avg)
, function(x) any(which(city_avg[x,1] %in% as.character(top_5$City))))
, isTRUE))
city_avg_5 <- city_avg[top5_filter,]
top5 <- as.character(top_5$City)
for (i in 1:length(top5))
{
city <- top5[i]
indexes <- which(sapply(lapply(1:nrow(cr_2012)
, function(x) any(which(cr_2012[x,"City"] %in% city)))
, isTRUE))
city_5 <- cr_2012[indexes,]
# aggregating data by hotel name
hotel_5_avg <- ddply(city_5, c("Hotel", "Date"), summarise, AvgRating = mean(Rating))
hotel_5 <- ddply(city_5, c("Hotel"), summarise, AvgRating = mean(Rating))
top_5_hotel <- head(hotel_5[order(hotel_5$AvgRating, decreasing= T),], n = 5)
filter_5_hotel <- which(sapply(lapply(1:nrow(hotel_5_avg)
, function(x) any(which(hotel_5_avg[x,"Hotel"] %in% as.character(top_5_hotel$Hotel))))
, isTRUE))
hotel_5_avg <- hotel_5_avg[filter_5_hotel,]
colnames(hotel_5_avg) <- c("Name", "Date", "AvgRating")
city_5_avg <- ddply(city_5, c("City", "Date"), summarise, AvgRating = mean(Rating))
colnames(city_5_avg) <- c("Name", "Date", "AvgRating")
df_5_avg <- do.call(rbind, list(city_5_avg, hotel_5_avg))
# setting dimensions for saving the plot
file.name <- paste(city, "top5_Hotels.png", sep="_")
savepath <- file.path(dirS, "top5", file.name)
# writing the data into a csv (optional)
#write.csv(avg_rating, paste(city , "USavg_rating.csv", sep = "_"))
#len <- length(unique(df_5_avg$Name))
col <- palette(rev(rich.colors(6)))
# generating plot
df_5_plot <- ggplot(data = df_5_avg, aes(Date, AvgRating, group = Name, colour = Name)) +
geom_line(size = 2) +
scale_colour_manual(name = "City & Hotels", breaks = df_5_avg$Name, values = col) +
xlab("Date") + ggtitle(paste("Average Rating of Top 5 Hotels -", city)) +
theme(legend.justification = c(1, 0), legend.position = c(1, 0),
legend.background = element_rect(colour = "black"))
# save the plot
ggsave(savepath, plot = df_5_plot, width = 12, height = 6, units = 'in')
}
|
40b51f19aaa54eeb40ccf1b1f244b1ba3a9bc3d3
|
30b6762b3eefc492ef6e41715e6076aed74154a6
|
/vim/nostow/test/test_colorscheme/statistics.R
|
67b6d539d891e8f41bd64ca419701b0d8dd17cb7
|
[
"MIT"
] |
permissive
|
viccuad/dotfiles
|
e7ac04e3c4671aa8f1f1d6e7fca5278e6c0ec458
|
fbd13a108c8201310af323ccda74aac23ec7a03a
|
refs/heads/master
| 2020-04-15T22:48:29.276891
| 2017-09-03T09:49:49
| 2017-09-03T09:49:49
| 18,060,758
| 26
| 11
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
statistics.R
|
library(foreign)
detach("package:datasets")
x <- rnorm(1)
if(x > 0)
cat("Yes\n")
else
cat("No\n")
x <- numeric(123)
x <- NULL
|
7fd07f3ab621a634f0ab1fa02782ca58b8bad400
|
4f8c7cd717fd4a0a7b9b5d9e7dbd856b21ad8533
|
/v1/server.R
|
9d2bd33f591261f9a5d521be8cbbdfde50a0f856
|
[] |
no_license
|
omker04/LinkedIn
|
c9b7803a05784e9aaf076724c25cc1e5ba5816bd
|
54b80a68f3cebc79682dbc086f110efac4d35af7
|
refs/heads/master
| 2020-03-14T22:02:43.632143
| 2018-05-02T07:08:54
| 2018-05-02T07:08:54
| 131,811,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,203
|
r
|
server.R
|
shinyServer(function(input, output, session) {
##### Login Signup ####
observe({
if (input$user_name != '' & input$user_ID %like% 'GTS' & nchar(input$user_ID) == 7 & input$agree) {
enable('login')
enable('signup')
} else {
disable('login')
disable('signup')
}
})
observeEvent(input$login, {
closeAlert(session, 'warning_alert')
if (!python.get(paste0('"', input$user_ID, '" in person.keys()'))) {
createAlert(session, anchorId = 'danger', alertId = 'warning_alert',
title = 'User Not Registered.',
content = 'You have not yet signed up. Please sign up now.',
style = 'warning', dismiss = TRUE)
} else {
if (python.call('add_new_user', input$user_ID, input$user_name) %like% 'match') {
createAlert(session, anchorId = 'warning', alertId = 'warning_alert',
title = 'Incorrect Entry',
content = 'Input GTS_ID and Name doesnt match',
style = 'danger', dismiss = TRUE)
} else {
shinyjs::hide(id = 'login_page', anim = TRUE)
shinyjs::show(id = 'search_bar', anim = TRUE)
shinyjs::show(id = 'self_profile', anim = TRUE)
# shinyjs::show(id = 'managers_profile', anim = TRUE)
# shinyjs::show(id = 'connection_profile', anim = TRUE)
# shinyjs::show(id = 'direct_report_profile', anim = TRUE)
# hide(id = 'self_profile', anim = TRUE)
#shinyjs::show(id = 'managers_profile', anim = TRUE)
}
}
})
# observeEvent(input$signup, {
# closeAlert(session, 'warning_alert')
# if (python.call('add_new_user', input$user_ID, input$user_name) %like% 'already') {
# createAlert(session, anchorId = 'warning', alertId = 'warning_alert',
# title = 'User Already Registered.',
# content = 'You have already signed up. Try logging in.',
# style = 'warning', dismiss = TRUE)
# } else {
# shinyjs::hide(id = 'login_page', anim = TRUE)
# shinyjs::show(id = 'search_bar', anim = TRUE)
# shinyjs::show(id = 'self_profile', anim = TRUE)
# }
# })
#
current_click <- reactiveValues(type = NULL, id = NULL, self = 'Yes', anchor = NULL, manager = NULL)
anchor <- reactive({
print(self())
if (!is.null(self()$id)) {
current_click$anchor <- self()$id
} else {
current_click$anchor <- input$user_ID
}
manager = python.get(paste0('person[', char(current_click$anchor), ']["Manager"]'))
return(list('anchor' = current_click$anchor, 'manager' = manager))
})
# observe({
# self <<- callModule(self_profile_, id = 'self_page', self = input$user_ID, is_manager = python.get(paste0(char(input$user_ID), ' in manager.keys()')))
# anchor_manager <<- callModule(manager_profile_, id = 'manager', manager = anchor()$manager)
# anchor_connections <<- callModule(associate_names_, id = 'Conn', Uid = anchor()$anchor, type = 'connections')
# anchor_directs <<- callModule(associate_names_, id = 'DR', Uid = anchor()$anchor, type = 'DR')
# })
#
# observe({
# if (is.null(self()$id)) {
# current_click$anchor <- input$user_ID
# } else {
# current_click$anchor <- self()$id
# }
# current_click$self <- ''
# print(self())
# print(paste('anchor', anchor()))
# })
#
# observe({
# print(anchor())
# })
#
callModule(self_profile_, id = 'self_page', self = input$user_ID, is_manager = python.get(paste0(char(input$user_ID), ' in manager.keys()')))
#
#
# observe({
# if (!is.null(s()$id)) {
# current_click$id <- s()$id
# current_click$type <- s()$type
# current_click$self <- 'No'
# }
# })
#
# new <- eventReactive(current_click, {
# if (current_click$type == 'Manager') {
# show(id = 'managers_profile', anim = TRUE)
# hide(id = 'connection_profile', anim = TRUE)
# hide(id = 'direct_report_profile', anim = TRUE)
# hide(id = 'self_profile', anim = TRUE)
# s1 <- callModule(manager_profile_, id = 'manager', manager = s()$id)
# }
# if (current_click$type == 'Connection') {
# hide(id = 'managers_profile', anim = TRUE)
# show(id = 'connection_profile', anim = TRUE)
# hide(id = 'direct_report_profile', anim = TRUE)
# hide(id = 'self_profile', anim = TRUE)
# s1 <- callModule(associate_names_, id = 'Conn', Uid = s()$id, type = 'connections')
# }
# if (current_click$type == 'Directs') {
# hide(id = 'managers_profile', anim = TRUE)
# hide(id = 'connection_profile', anim = TRUE)
# show(id = 'direct_report_profile', anim = TRUE)
# hide(id = 'self_profile', anim = TRUE)
# s1 <- callModule(associate_names_, id = 'DR', Uid = s()$id, type = 'DR')
# }
# return(s1)
# })
#
#
# change_current_value <- eventReactive(new(), {
# s <- new()
# print('esss')
# print(s())
# current_click$id <- s()$id
# current_click$type <- s()$type
# return(new())
# })
#
# observe({
# print(paste('observe', current_click$type))
# print(paste('observe', current_click$id))
# if (!is.null(current_click$type) && !is.null(current_click$id) && current_click$self == 'No') {
# s1 <- new()
# change_current_value()
# } else {
# print('else')
# }
# })
# observe({
# print('observe')
# print(current_click$id)
# print(current_click$type)
# print(type())
# print(id())
# })
})
|
b60a0b789623b93d91b469c5461c68fb2b0cdf23
|
20a14c8e7e54196ec8fe97157b52a8a7789ab349
|
/wrangling/joining_data.R
|
0ed0194101d25826a3a7a11f0ed35f49816245b4
|
[] |
no_license
|
azbenoit/EDX
|
f31235250f69d331ff675087fd3dc7626ee27321
|
5bd741b10ea0cef4542fb6d162930937fa321048
|
refs/heads/master
| 2022-11-22T13:55:54.653207
| 2020-07-30T22:32:26
| 2020-07-30T22:32:26
| 261,275,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 609
|
r
|
joining_data.R
|
library(tidyverse)
library(Lahman)
top <- Batting %>%
filter(yearID == 2016) %>%
arrange(desc(HR)) %>% # arrange by descending HR count
slice(1:10) # take entries 1-10
top %>% as_tibble()
Master %>% as_tibble()
top_names <- top %>%
left_join(Master, "playerID") %>%
select(playerID, nameFirst, nameLast, HR)
top_salary <- Salaries %>%
filter(yearID == 2016) %>%
right_join(top_names) %>%
select(nameFirst, nameLast, teamID, HR, salary)
Awards <- AwardsPlayers %>%
filter(yearID == 2016)
non_top_awards <- anti_join(Awards, top_names)
nrow(semi_join(Master, non_top_awards))
|
2f15b74feb8bd89edbb1efd047ef733e3ff2c517
|
afd08df66beef2f756ead9d2af095b5a6f0aee8c
|
/R/gather.PharmacoSet.R
|
f51a809dca0fe0e56d511ae41cddccf417b94512
|
[] |
no_license
|
chapmandu2/tidyMultiAssay
|
05ee00642684c5813cb7d05fdfc65bd62ba72993
|
59d1388c87e8ce68f5b52e38315fa2b5bf37e5ad
|
refs/heads/master
| 2021-01-23T03:37:31.209263
| 2017-01-06T08:54:27
| 2017-01-06T08:54:27
| 64,685,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,404
|
r
|
gather.PharmacoSet.R
|
#' @title
#' Convert a PharmacoSet object into tidy format
#'
#' @description
#' Convert a \linkS4class{PharmacoSet} object into tidy format. This is essentially a wrapper function
#' for \code{\link{gather_response.PharmacoSet}} and \code{\link{gather_assay.PharmacoSet}}
#'
#' @param x PharmacoSet object
#' @param sample_ids A vector of sample ids. Default is NULL (don't filter on sample id)
#' @param gene_ids A vector of gene ids. Default is NULL (don't filter on gene id)
#' @param resp_ids A vector of response ids. Default is NULL (don't filter on response id)
#' @param sample_col Name of the column in the pData data frame to use for filtering on sample id
#' @param gene_col Name of the column in the rowData data frame to use for filtering on gene id
#' @param resp_col Response variable to retrieve
#' @param data_types Names of the components of the PharmacoSet object to gather
#' @param resp_df Data frame to process response data from (rather than PharmcaGx object)
#'
#' @return a data frame in tall_df format
#' @export gather.PharmacoSet
#'
#' @examples
#' data('CCLEsmall', package='PharmacoGx')
#'
#' gather.PharmacoSet(CCLEsmall, sample_ids=c('143B', '23132-87'), gene_ids=c('BRAF', 'EGFR'),
#' data_types=c('rna', 'mutation'), gene_col=c('Symbol', 'Symbol'))
#'
#' gather.PharmacoSet(CCLEsmall, sample_ids=c('CHL-1', 'SW1573'), gene_ids=c('BRAF', 'EGFR'),
#' resp_ids=c("AEW541","Nilotinib","PHA-665752","lapatinib"),
#' data_types=c('rna', 'mutation'), gene_col=c('Symbol', 'Symbol'), resp_col='ic50_published')
#'
#'
#'
gather.PharmacoSet <- function(x, sample_ids=NULL, gene_ids=NULL, resp_ids=NULL,
sample_col='cellid', gene_col=c('Symbol', 'gene_name'),
resp_col='ic50_published', data_types=c('rna', 'mutation'), resp_df=NULL) {
stopifnot(length(gene_col) == length(data_types))
genetic_data <- purrr::map2(data_types, gene_col, function(z1, z2) {
gather_assay.PharmacoSet(x, sample_ids=sample_ids, gene_ids=gene_ids,
data_type=z1, sample_col=sample_col, gene_col=z2)
}) %>%
dplyr::bind_rows()
if(is.null(resp_ids)) {
return(genetic_data)
} else {
drug_data <- gather_response.PharmacoSet(x, sample_ids=sample_ids, resp_ids=resp_ids, resp_col=resp_col)
dplyr::bind_rows(genetic_data, drug_data)
}
}
|
df60569aaebf7bdcd5a36db1cc01d72d38d7f1e8
|
28d8c54ff161be6c0123748935c5c88124fea7b8
|
/data-raw/create_test_data.R
|
da35fae9bacf5c757c3ec0773c33eb1a8c2623bc
|
[] |
no_license
|
lawinslow/LAGOSNE
|
1b1fd7b7574a1e5cf9c87b6c28d30c3008255b41
|
b68fff7dc4862e90b5f178a545e46e6ecf5f97fb
|
refs/heads/master
| 2020-03-08T16:48:32.275788
| 2018-03-26T19:33:06
| 2018-03-26T19:33:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,345
|
r
|
create_test_data.R
|
# Create a testing dataset from compiled LAGOS database rds
# LAGOS:::lagos_compile("1.087.1", "rds")
dt <- LAGOSNE::lagosne_load("1.087.1")
dt_subset <- purrr::map(dt, function(x) head(x, n = 2))
# manually add rows for lake_info test
dt_subset$state <- rbind(dt_subset$state,
dt$state[which(dt$state$state == "MA"),])
# names(dt_subset)
exclude_names <- c("lakes4ha.buffer100m",
"lakes4ha.buffer100m.lulc",
"lakes4ha.buffer500m.conn",
"lakes4ha.buffer500m.lulc",
"lakes4ha.buffer500m",
"hu8",
"hu8.chag",
"hu8.conn",
"hu8.lulc",
"hu12",
"hu12.chag",
"hu12.conn",
"hu12.lulc",
"name",
"type",
"variables",
"observations",
"identifier",
"group",
"county",
"county.chag",
"county.conn",
"county.lulc")
dt_subset <- dt_subset[!(names(dt_subset) %in% exclude_names)]
saveRDS(dt_subset, "tests/testthat/lagos_test_subset.rds")
lg_subset <- dt_subset
devtools::use_data(lg_subset, overwrite = TRUE)
|
04ffcb4963e596f4708c7cf3f9eefbb1b6331491
|
e85a80ba4df857d2cccc3884a782be50833ea7f0
|
/05.transcription_factors/04.expression_correlation/two_gene_correlation_all.R
|
7aa8ecc6c28242b791deb1f1694e0b01fb0497db
|
[] |
no_license
|
zengxiaofei/monolignol-biosynthesis
|
4c9ffc2cc9b69c4056a114dadb8c2de16e73fddb
|
c16862fc70bf32aa679eb792cb973f3f0c5c1e28
|
refs/heads/master
| 2023-01-11T17:45:54.555382
| 2020-11-05T03:06:30
| 2020-11-05T03:06:30
| 272,724,214
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 801
|
r
|
two_gene_correlation_all.R
|
#!/usr/bin/env Rscript
args <- commandArgs(T)
library(ggplot2)
args <- commandArgs(T)
gene1 <- args[1]
gene2 <- args[2]
md <- read.table("RSEM.gene.TMM.EXPR.annotated.CCR1_2.matrix", header=T)
tmd <- as.data.frame(t(md))
sp <- cor.test(tmd[[gene1]], tmd[[gene2]], method="spearman")
pval <- sp$p.value
rho <- sp$estimate
# print to screen
anno <- paste("Spearman's rho = ", rho, ", p-value = ", pval, sep="")
anno
pval <- signif(pval, 2)
rho <- signif(rho, 2)
anno <- paste("Spearman's rho = ", rho, ", p-value = ", pval, sep="")
pdf(paste(paste(gene1, gene2, sep="_"), ".pdf", sep=""), width=5, height=5)
ggplot(tmd, aes_string(x=gene1, y=gene2)) +
geom_point() + geom_smooth() +
annotate("text", -Inf, Inf, label=anno, hjust=-0.5, vjust=2) +
theme_bw()
dev.off()
|
0b2452b69422c17f681dc4f78ac9b6ba6a9a7f9b
|
3a94e79e085df45b5bcf25e7778a26daee82ad53
|
/man/plot_risk_premium_en.Rd
|
37085ce4638d37e3e22deef31bdaeb11c6cc8771
|
[
"MIT"
] |
permissive
|
cran/ceRtainty
|
bac5e856ec90963f4d72f9a2d0abbc50fdff6202
|
f43c62da68d3c6bb6ae256c8c87cd4186d60b866
|
refs/heads/master
| 2020-12-21T21:32:37.227829
| 2019-06-14T13:40:03
| 2019-06-14T13:40:03
| 236,569,284
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 729
|
rd
|
plot_risk_premium_en.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_risk_premium_en.R
\name{plot_risk_premium_en}
\alias{plot_risk_premium_en}
\title{Plot of the Risk Premium values using Exponential Negative Utility Function}
\usage{
plot_risk_premium_en(data, rac_ini, rac_fin, rac_len)
}
\arguments{
\item{data}{\code{data.frame} of CE computed by Exponential Negative function}
\item{rac_ini}{Initial RAC values used in the CE computation}
\item{rac_fin}{Final RAC values used in the CE computation}
\item{rac_len}{Length of the RAC vector used in the CE computation}
}
\value{
Plot object
}
\description{
Plot of the Risk Premium values using Exponential Negative Utility Function
}
|
7393dcba81010504c594e5a509d8ef8e7b0d8076
|
ef4c19bbcd2bb4915aa17654c7760fe0e6e14e5c
|
/man/prostate.Rd
|
0f383a8f26046225f4edb524fd6ae85c97481c61
|
[] |
no_license
|
3shmawei/faraway
|
ee4f1442e5b28de2c4e436c57a7c34f2f3065a2b
|
dc770de1a2d55d31a217e14c911495644ea17b3f
|
refs/heads/master
| 2020-03-23T18:10:35.105916
| 2016-02-15T15:07:03
| 2016-02-15T15:07:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 923
|
rd
|
prostate.Rd
|
\name{prostate}
\alias{prostate}
\title{Prostate cancer surgery}
\description{The \code{prostate} data frame has 97 rows and 9 columns.
A study on 97 men with prostate cancer who were due to receive
a radical prostatectomy.
}
\usage{
data(prostate)
}
\format{
This data frame contains the following columns:
\describe{
\item{\code{lcavol}}{
log(cancer volume)
}
\item{\code{lweight}}{
log(prostate weight)
}
\item{\code{age}}{
age
}
\item{\code{lbph}}{
log(benign prostatic hyperplasia amount)
}
\item{\code{svi}}{
seminal vesicle invasion
}
\item{\code{lcp}}{
log(capsular penetration)
}
\item{\code{gleason}}{
Gleason score
}
\item{\code{pgg45}}{
percentage Gleason scores 4 or 5
}
\item{\code{lpsa}}{
log(prostate specific antigen)
}
}}
\source{Andrews DF and Herzberg AM (1985): Data. New York: Springer-Verlag}
\keyword{datasets}
|
5c0fb9d13e862293229b5a771de0a0bd56ea1213
|
285d35f03c59b3eca8a8639e30009cc020ee7fbf
|
/00-metadata/HemisphereChart.R
|
5087979dd93f066f0c8b2c7c66bbaa35a70c6ecd
|
[] |
no_license
|
octopode/cteno-lipids-2021
|
5a039c0506111cc3bbd6e689bd7bf02ac585dc1c
|
b2c66cf59149e5c79918bdcc317303a23e59e338
|
refs/heads/master
| 2023-03-26T07:59:36.219753
| 2021-03-12T18:19:25
| 2021-03-12T18:19:25
| 289,368,885
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 634
|
r
|
HemisphereChart.R
|
library(tidyverse)
library(ggrepel)
library(ggpubr)
# plot that lonely northern hemisphere
land <- map_data("world") %>%
filter(
(region %in% c(
"Greenland",
"Iceland",
"Canada",
"USA",
"Mexico",
"Guatemala",
"Belize",
"Honduras",
"El Salvador",
"Nicaragua",
"Costa Rica"
)
) |
(subregion %in% c(
"Svalbard"
)
)
) %>%
filter(long < 75)
ggplot() +
geom_polygon(
data = land,
aes(x=long, y=lat, group=group),
fill = "white"
) +
coord_map(projection = "ortho", orientation = c(45, -120, 0))
|
8737b7ad3adc7d8cfc14ef229f68bb1ec4d795cd
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/9043_0/rinput.R
|
5cd176e81ccc82b2fffc244bc97c6c47ff3460e3
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("9043_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9043_0_unrooted.txt")
|
20c11f00e8e6b423ee86a89ba4cf104cb88724dc
|
8ad96eb29e6839e486a6d2cd72a4a93bab45d57d
|
/script/subscript/calculatePP.R
|
20a171bb48bd5adcb6c2b51f864f979dbb934aa3
|
[
"MIT"
] |
permissive
|
hoangtn/DECO
|
4ca3049875db954eca9044397dfe59fda8ae638b
|
864bb0e9ac163717739536c37688479a05e0a826
|
refs/heads/master
| 2023-08-06T14:37:53.698455
| 2021-09-24T00:13:09
| 2021-09-24T00:13:09
| 272,843,394
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,081
|
r
|
calculatePP.R
|
##Calculate posterior probabilities
calculatePP <- function(pars,
caseData = NULL,
controlData = NULL,
dnData = NULL,
mutData = NULL,
geneName, geneSet){
outData <- data.frame(geneName, geneSet)
colnames(outData) <- c("GENE", paste0("GeneSet", 1:dim(geneSet)[2]))
if (!is.null(dnData))
outData <- cbind(outData, dnData)
if (!is.null(mutData))
outData <- cbind(outData, mutData)
if (!is.null(caseData))
outData <- cbind(outData, caseData)
if (!is.null(controlData))
outData <- cbind(outData, controlData)
##Calculate pi
sumGeneSet = rep(pars$alphaValue[1], length(geneName))
for (j in 1:dim(geneSet)[2])
sumGeneSet = sumGeneSet + pars$alphaValue[j+1]*geneSet[, j]
pi0 = exp(sumGeneSet)/(1 + exp(sumGeneSet))
bfAll <- rep(1, dim(outData)[1])
if ( length(pars$gammaMeanDN) == 0) {
message("No parameters for de novo data; therefore, these categories are not calculated in this step.\n")
} else {
bfDN <- matrix(1, nrow = dim(dnData)[1], ncol = dim(dnData)[2]) ##De novo bayes factors
for (j2 in 1:dim(bfDN)[2]) {
e.hyperGammaMeanDN <- pars$gammaMeanDN[j2]
e.hyperBetaDN <- pars$betaDN[j2]
e.bf <- bayes.factor.denovo(x = dnData[, j2],
N = pars$nfamily[j2],
mu = mutData[, j2],
gamma.mean = e.hyperGammaMeanDN,
beta = e.hyperBetaDN)
bfDN[, j2] <- e.bf
}
bfAll <- bfAll*apply(bfDN, 1, prod)
}
if (length(pars$gammaMeanCC) == 0) {
message("No parameters for case-control data; therefore, these categories are not calculated in this step.\n")
} else {
bfCC <- matrix(1, ncol = dim(caseData)[2], nrow = dim(caseData)[1])
for (cc3 in 1:dim(bfCC)[2]){
e.hyperGammaMeanCC <- pars$gammaMeanCC[cc3]
e.hyperBetaCC <- pars$betaCC[cc3]
e.nu <- 200
t.case <- caseData[, cc3]
t.control <- controlData[, cc3]
e.rho <- e.nu*mean(t.case + t.control)/(pars$ncase[cc3] + pars$ncontrol[cc3])
e.bf <- BayesFactorCC3(Nsample = list(ca = pars$ncase[cc3], cn = pars$ncontrol[cc3]),
x.case = t.case, x.control = t.control,
gamma.meanCC = e.hyperGammaMeanCC, betaCC = e.hyperBetaCC,
rhoCC = e.rho, nuCC = e.nu)
# e.bf0 <- bayes.factor.CC(x
bfCC[, cc3] <- e.bf
}
bfAll <- bfAll*apply(bfCC, 1, prod)
}
outData[, 'pi0'] <- pi0
outData$BF <- bfAll
outData[, 'PP'] <- pi0*bfAll/(pi0*bfAll + (1 - pi0))
# outData <- outData[order(-outData$BF),]
# outData$qvalue <- Bayesian.FDR(outData$BF, 1 - pars$pi0)$FDR
return(outData)
}
|
cadbae3e6bf8fc2ee3a6744caf5f50dee59d3a0e
|
087f9b379877bc87466f8b24d97be9632d46ed2f
|
/DataCode.R
|
feaad97124a726e3c04e454afc83540b70ace391
|
[] |
no_license
|
Soma586/Data
|
e407904a7e382ec81bbd123be15bc51b07bec372
|
a469b2eba7d7f86ba455b3bce396074008eee10f
|
refs/heads/master
| 2020-03-29T08:08:47.364818
| 2018-12-13T16:56:47
| 2018-12-13T16:56:47
| 149,696,017
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 322
|
r
|
DataCode.R
|
pro <-read.csv("FHVData.csv")
x = as.numeric(pro$American)
mean(x)
sd(x)
y = as.numeric(pro$Uber)
mean(y)
sd(y)
z = as.numeric(pro$Highclass)
mean(z)
sd(z)
hist(x, main = "Histogram of Pro$American")
plot(z, main = "Plot of high class", y = "frequency")
plot(y, main = "Plot of Pro$Uber", ylab = "frequency")
|
b3622870f9dc95f92c64e4e4a1e53c3ee8bc2285
|
aac4e0c44de4aa89ca0081f2de4d113897ee9c65
|
/man/scotland_births.Rd
|
ac74221b97d15148d380cf0d804f2f224f8b3b3c
|
[] |
no_license
|
TKosfeld/fosdata
|
c2c39db8cc7a62ebfca48ba87b30c4ebb9a3eee9
|
7353861f61ae094c474a8e1a4d698f4919333ba6
|
refs/heads/master
| 2023-08-01T11:53:33.944662
| 2021-09-15T04:12:52
| 2021-09-15T04:12:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 684
|
rd
|
scotland_births.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scotland_births.R
\docType{data}
\name{scotland_births}
\alias{scotland_births}
\title{Births in Scotland}
\format{
A data frame with 45 observations of 76 variables.
\describe{
\item{age}{Age of the mother, in years}
\item{x****}{Year of birth of baby; 75 columns of this type from 1945-2019.}
}
}
\source{
National Records of Scotland, https://www.nrscotland.gov.uk/statistics-and-data/statistics/statistics-by-theme/vital-events/births/births-time-series-data
}
\usage{
scotland_births
}
\description{
The number of babies born in Scotland from 1945-2019, by age of the mother.
}
\keyword{datasets}
|
dd412b8fab20ef3b30e9e8ea6247f880dfbe3cb5
|
504f277ca5e3d508ff047d24944ab6550ed8e431
|
/pairwiseAcf.R
|
4232dc5909e0f6450698d95591deece108fa0d5f
|
[] |
no_license
|
srivathsesh/DengueAI
|
ca99c032920acb5bd17fb025f8c45d23c6d2c99e
|
54cbe790404b72ee94bdbe79996a708c06373ff2
|
refs/heads/master
| 2020-03-18T23:48:24.731364
| 2018-06-11T19:29:24
| 2018-06-11T19:29:24
| 135,429,683
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,263
|
r
|
pairwiseAcf.R
|
#****************************************************************
# Optimal lags decision
#****************************************************************
pairwiseAcf <- function(x, y, varnames) {
bestlag <- function(p) {
# browser()
cf.df <- forecast::Ccf(y[,1], x[, p], plot = T,lag.max = 10)
data.frame(
predictor = p,
lags = which.max(abs(cf.df[["acf"]][11:21, 1, 1]))-1,
correlation = abs(cf.df[["acf"]][11:21, 1, 1])[which.max(abs(cf.df[["acf"]][11:21, 1, 1]))]
)
}
varnames %>% map_df(bestlag)
}
varnames <- c('reanalysis_relative_humidity_percent',
'reanalysis_precip_amt_kg_per_m2',
'reanalysis_max_air_temp_k',
"ndvi_ne","ndvi_nw",
"ndvi_se",
"ndvi_sw",
"reanalysis_sat_precip_amt_mm",
"reanalysis_tdtr_k")
lagstouse <- pairwiseAcf(ts(x.train,start = c(1990,18), frequency = 52),ts(y,start = c(1990,18), frequency = 52),varnames)
#lagCcf(tsTotalcasesSj,ts(trainImputedSj$reanalysis_relative_humidity_percent,start = c(1990,18), frequency = 52))
lagstouse <- pairwiseAcf(ts(x.train.diff,start = c(1990,19), frequency = 52),ts(y.boxcox.diff,start = c(1990,18), frequency = 52),varnames)
|
6838502364ef51da33704d5fbf365525389fcb1c
|
b5ebe52bb3594b62395ff452235fed8c8be477b8
|
/tsa_project/main/R/latest.r
|
91f7ba9c8bed881fee5da72b671c7dc9b2a69772
|
[] |
no_license
|
Steven-Sakurai/Math-Potential-Well
|
1e69992ca867d85d05af756ca471cf64627a9f20
|
01c5bdf1964b363b4455c6da4c9879aff1b66846
|
refs/heads/master
| 2021-01-20T01:17:16.833077
| 2017-06-18T15:31:26
| 2017-06-18T15:31:26
| 89,248,955
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 743
|
r
|
latest.r
|
library(curl)
library(forecast)
library(TSA)
library(rugarch)
tmpf <- tempfile()
curl_download("http://www.metoffice.gov.uk/hadobs/hadcrut4/data/current/time_series/HadCRUT.4.5.0.0.monthly_ns_avg.txt", tmpf)
gtemp <- read.table(tmpf)[, 1:2]
gtemp = as.numeric(gtemp$V2)
mytemp = gtemp[600:1980]
plot.ts(mytemp)
acf(mytemp)
pacf(mytemp)
m1 = auto.arima(mytemp, seasonal = TRUE)
m2 = arima(mytemp, order = c(3, 1, 1), seasonal = list(order = c(1, 0, 1), period = 24))
res2 = residuals(m2)
acf(res2)
m3 = arfima(mytemp)
res3 = residuals(m3)
res4 = residuals(arima(res3, seasonal = list(order = c(1, 0, 1), period = 24)))
# seasonal adjustment
atemp = diff(mytemp, lag = 24)
m5 = auto.arima(atemp)
res5 = residuals(m5)
acf(res5)
pacf(res5)
|
ce65d4d9c7ce818c2de072fe0b4ea237608bc947
|
28767234045ae57efe03a7d9949f75c01388ce14
|
/ImageCollection/02_metadata.R
|
998b055928488969db45602c3678d62f59387544
|
[
"MIT"
] |
permissive
|
yangxhcaf/rgee-examples
|
596ef77d80b33be402e1c4e5bbb76dba0a135fa6
|
9e2dc8c18a87ebf2bae9666830f9018a9ce3df1f
|
refs/heads/master
| 2021-02-12T04:39:26.594480
| 2020-02-29T09:41:16
| 2020-02-29T09:41:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,133
|
r
|
02_metadata.R
|
library(rgee)
ee_Initialize()
# Load a Landsat 8 ImageCollection for a single path-row.
collection = ee$ImageCollection('LANDSAT/LC08/C01/T1_TOA')$
filter(ee$Filter$eq('WRS_PATH', 44))$
filter(ee$Filter$eq('WRS_ROW', 34))$
filterDate('2014-03-01', '2014-08-01')
print('Collection: ', collection$getInfo())
# Get the number of images.
count = collection$size()
print('Count: ', count$getInfo())
# Get the date range of images in the collection.
range = collection$reduceColumns(ee$Reducer$minMax(), list("system:time_start"))
print('Date range: ', ee$Date(range$get('min'))$getInfo(), ee$Date(range$get('max'))$getInfo())
# Get statistics for a property of the images in the collection.
sunStats = collection$aggregate_stats('SUN_ELEVATION')
print('Sun elevation statistics: ', sunStats$getInfo())
# Sort by a cloud cover property, get the least cloudy image.
image = ee$Image(collection$sort('CLOUD_COVER')$first())
print('Least cloudy image: ', image$getInfo())
# Limit the collection to the 10 most recent images.
recent = collection$sort('system:time_start', FALSE)$limit(10)
print('Recent images: ', recent$getInfo())
|
849b103d1d49b847a2de93b097945d1085c94f95
|
4ee386eaa3017a4d8a83ff902ef45ef1572c92e9
|
/R help/XML/extract_XML_attributes.r
|
7d963c65f7ebb1fc70b4dc71b66370067d11be58
|
[] |
no_license
|
aayush26/My_Notes
|
3a733fad0cfd34495530d77082c1b96dff8a626e
|
5c53ce2bb5fef4c328c4118e9c4ab22b5c8a3068
|
refs/heads/master
| 2021-01-17T13:36:09.202631
| 2019-11-11T13:51:24
| 2019-11-11T13:51:24
| 39,803,397
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 379
|
r
|
extract_XML_attributes.r
|
library(XML)
file <- xmlTreeParse("Tags.xml",useInternalNodes = TRUE)
xmltop = xmlRoot(file) #access top-node
#xml tag attribute extract
Id <- xpathApply(xmltop,'//row',xmlGetAttr,"Id")
Count <- xpathApply(xmltop,'//row',xmlGetAttr,"Count")
ExcerptPostId <- xpathApply(xmltop,'//row',xmlGetAttr,"ExcerptPostId")
WikiPostId <- xpathApply(xmltop,'//row',xmlGetAttr,"WikiPostId")
|
05c294e6db47514cba6270b211ea95c92faaf77a
|
5c0ad661c78c7d51fbcf73ab8d1f8459c76e78b2
|
/CH9#4.R
|
d77e15c150c0028db060649163712d714cc8df9b
|
[] |
no_license
|
nickblink/Intro.-to-Stat.-Learning-Codes
|
38b00eb44347e299527f74294331bc539eda5ba7
|
7b82a911ea5fb74107412e7a90cbc9fe90213003
|
refs/heads/master
| 2020-04-06T20:57:54.843322
| 2018-11-16T00:10:58
| 2018-11-16T00:10:58
| 157,788,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,605
|
r
|
CH9#4.R
|
# I guess all the previous stuff got deleted. Well screw it I'm gonna
# continue with 8. Whose gonna come back and look at this anyway?
library(ISLR)
D = Auto
D$mpg.level = ifelse(D$mpg>median(D$mpg),1,0)
D$mpg.level = as.factor(D$mpg.level)
D = D[,setdiff(Colnames(D),'mpg')]
library(e1071)
tune.out = tune(svm,mpg.level~.-mpg,data=D,kernel="linear",
ranges = list(cost=c(0.001,0.01,0.1,1,5,10,100)))
summary(tune.out)
# error is pretty similar for .1, 1, 5, but is the lowest with cost=1 (error =.095)
# that's a pretty good error. = 90.5% classification accuracy.
svm.lin = svm(mpg.level ~., data = D, kernel='linear',cost=0.01)
tune.rad = tune(svm,mpg.level~.-mpg,data=D, kernel="radial",
ranges = list(cost = c(.01,1,10,100),
gamma = c(0.5, 1, 2, 3, 4)))
summary(tune.rad)
# min at cost = 1, gamma = 0.5. Error = .063.
# What's the role of gamma again? This will
# be a tight fit. Idk now. If I end up needing to use svms I should look
# into this more, but for now I will pass.
svm.rad = svm(mpg.level~.-mpg,data=D, kernel="radial",cost=10,gamma=0.5)
tune.poly = tune(svm,mpg.level~.-mpg,data=D, kernel="polynomial",
ranges = list(cost=c(.01, 1, 10, 100),
degree = c(1,2,3,4)))
summary(tune.poly)
# bad all around. It did best with degree 1, aka linear. Obvi similar
# results to linear (though slightly diff because of randomness).
svm.poly = svm(mpg.level~.-mpg,data=D, kernel="polynomial",degree=2,cost=1)
par(mfrow=c(1,3))
plot(svm.lin,D)
plot(svm.rad,D)
plot(svm.poly,D)
#####
rm(list=ls())
library(ISLR)
train.sample = sample(1070,800)
train = OJ[train.sample,]
test = OJ[-train.sample,]
svm.fit = svm(Purchase~.,data=train,cost=0.01,kernel='linear')
summary(svm.fit)
mean(train$Purchase==predict(svm.fit,train))
# 0.82 accuracy - weirdly lower than test, though they're
# close enough to be random. I guess the linear
# model is unlikely to overfit, esp. with all of this data.
mean(test$Purchase==predict(svm.fit,test))
# 0.84 accuracy. It's aite.
library(e1071)
tune.out = tune(svm, Purchase~.,data=train,kernel = 'linear',
ranges = list(cost = c(0.01,0.1,1,5,10)))
# (e) no point in this one - computing new training and test error rates
tune.out = tune(svm, Purchase~.,data=train,kernel = 'radial',
ranges = list(gamma = c(0.5,1,2,3,4),
cost = c(0.01,0.1,1,10)))
# best has error ~ 0.21
radial.best = svm(Purchase~.,data=train,kernel = 'radial',
gamma = 0.5, cost = 1)
radial.worst = svm(Purchase~.,data=train,kernel = 'radial',
gamma = 3, cost = .1)
mean(test$Purchase==predict(radial.best,test))
# 0.82
mean(test$Purchase==predict(radial.worst,test))
# 0.64. No bueno
tune.out = tune(svm, Purchase~.,data=train,kernel = 'polynomial',
ranges = list(degree = c(1,2,3,4),
cost = c(0.01,0.1,1,10)))
poly.best = svm(Purchase~.,data=train,kernel = 'polynomial',
degree = 1, cost = 1)
poly.worst = svm(Purchase~.,data=train,kernel = 'polynomial',
degree = 1, cost = .01)
mean(test$Purchase==predict(poly.best,test))
# 0.84
mean(test$Purchase==predict(poly.worst,test))
# 0.82
# Again we have a linear boundary is the best. Shows that it
# all depends on the data set. Radial does not do as well
# on this data set. I am really tired. I just want to go home
# and nap.
|
3b79f8460e26d2dd2087b711eb675ac2da845561
|
821045753bd189beebe239e346b1e3183be415b4
|
/tests/test_Bike_counter.R
|
e6bbc630a26e579751ffd060059ecce2456955f5
|
[
"MIT"
] |
permissive
|
flynn-d/BikeCount
|
9688bf4580bb117b7e44d6f5d1c3602e2011d8d7
|
4befb007d703cededb12cf72017f506737fff1c1
|
refs/heads/master
| 2021-06-18T11:43:29.411162
| 2020-12-06T02:21:19
| 2020-12-06T02:21:19
| 142,794,979
| 0
| 1
|
MIT
| 2020-12-06T02:21:20
| 2018-07-29T20:56:30
|
R
|
UTF-8
|
R
| false
| false
| 265
|
r
|
test_Bike_counter.R
|
# Testing the Bike Counter code
# Building off of these resources
# http://r-pkgs.had.co.nz/tests.html
# https://dzone.com/articles/example-unit-testing-r-code
# https://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf
source('Bike_counter_get.R')
|
98ec6bd0644e56997af91c37d44a6c7c22f23c55
|
dbe5615e3de9440d1e2ba4ee362feb6a457229b8
|
/Plot3_code.R
|
53e8e354aeb156a8ccdd35c9e189779c15c57d25
|
[] |
no_license
|
vucko83/ExData_Plotting1
|
f3d160643d7c16d870c0c496e808526b36371f19
|
2de3a8209c213d292b13083cb466ed5a7dc0b7b6
|
refs/heads/master
| 2021-01-17T10:27:04.671695
| 2014-10-12T22:28:41
| 2014-10-12T22:28:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,062
|
r
|
Plot3_code.R
|
power<-read.table("household_power_consumption.txt",header=T,sep=";")
power$Date<-as.Date(power$Date, format="%d/%m/%Y")
subpower <-power[(power$Date=="2007-02-01") | (power$Date=="2007-02-02"),]
subpower$Global_active_power <- as.numeric(as.character(subpower$Global_active_power))
subpower<- transform(subpower, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
subpower$Sub_metering_1 <- as.numeric(as.character(subpower$Sub_metering_1))
subpower$Sub_metering_2 <- as.numeric(as.character(subpower$Sub_metering_2))
subpower$Sub_metering_3 <- as.numeric(as.character(subpower$Sub_metering_3))
png(filename = "plot3.png", width = 480, height = 480, units = "px", bg = "transparent")
plot(subpower$timestamp,subpower$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(subpower$timestamp,subpower$Sub_metering_2, type="l", col="red")
lines(subpower$timestamp,subpower$Sub_metering_3, type="l", col="blue")
legend("topright",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=1)
dev.off()
|
4388932ec223f5d1fb352602c06e26e7c2e6341c
|
a462a24ff937e151e8151f3a1bdc9c3714b12c0e
|
/creation/create_scripts_azzini.R
|
34239fe133301c2e9933512c11cf198e5ca2c61e
|
[] |
no_license
|
noeliarico/kemeny
|
b4cbcac57203237769252de2c50ce959aa4ca50e
|
50819f8bf0d19fb29a0b5c6d2ee031e8a811497d
|
refs/heads/main
| 2023-03-29T14:36:37.931286
| 2023-03-16T09:04:12
| 2023-03-16T09:04:12
| 330,797,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,243
|
r
|
create_scripts_azzini.R
|
scripts_azzini <- function(pors, n, file_name, rep = 0, type = "") {
imports <- c("
import pandas as pd
import numpy as np
from kemeny.scf.borda import *
from kemeny.scf.condorcet import *
import kemeny.scf.distance as dist
import kemeny.scf.initialization as init
import kemeny.azzinimunda.azzinimunda0 as am0
import kemeny.azzinimunda.azzinimunda1 as am1
import kemeny.azzinimunda.azzinimunda2 as am2
import kemeny.azzinimunda.azzinimunda3 as am3
import time
")
if(rep == 0) {
out <- c(imports, paste0('
results',n,' = np.zeros(0).reshape(0,8)
'))
for(i in 1:length(pors)) {
for(j in 1:length(pors[[i]])) {
if(!is.null(pors[[i]][[j]])) {
out0 <- "##############################################################"
out1 <- to_python_om(votrix(pors[[i]][[j]]))
out2 <- paste0('
# Algorithm without Condorcet
exec_time = 0
algorithm = am0.AzziniMunda0(om)
start_time = time.time()
sol = algorithm.execute()
exec_time = time.time() - start_time
result = np.array([',n,', ',i,', ',j,', 0, exec_time, sol.shape[0], algorithm.ntentative, "NULL"], dtype=np.dtype(object))
print(result[:7])
results',n,' = np.vstack((results',n,', result))
')
out3 <- paste0('
# Algorithm with Condorcet winner
exec_time = 0
algorithm = am1.AzziniMunda1(om)
start_time = time.time()
sol = algorithm.execute()
exec_time = time.time() - start_time
result = np.array([',n,', ',i,', ',j,', 1, exec_time, sol.shape[0], algorithm.ntentative, algorithm.cwinner], dtype=np.dtype(object))
print(result[:7])
results',n,' = np.vstack((results',n,', result))
')
out4 <- paste0('
# Algorithm with Condorcet winner
exec_time = 0
algorithm = am2.AzziniMunda2(om)
start_time = time.time()
sol = algorithm.execute()
exec_time = time.time() - start_time
result = np.array([',n,', ',i,', ',j,', 2, exec_time, sol.shape[0], algorithm.ntentative, algorithm.cwinner], dtype=np.dtype(object))
print(result[:7])
results',n,' = np.vstack((results',n,', result))
')
out5 <- paste0('
# Algorithm with Condorcet winner
exec_time = 0
algorithm = am3.AzziniMunda3(om, np.float("inf"))
start_time = time.time()
sol = algorithm.execute()
exec_time = time.time() - start_time
result = np.array([',n,', ',i,', ',j,', 3, exec_time, sol.shape[0], algorithm.ntentative, algorithm.cwinner], dtype=np.dtype(object))
print(result[:7])
results',n,' = np.vstack((results',n,', result))
')
out <- c(out, out0, out1, out2, out3, out4, out5)
}
}
}
}
else {
out <- c(paste0(imports, '
rep = ',rep,'
results',n,' = np.zeros(0).reshape(0,8+rep)
'
))
for(i in 1:length(pors)) {
for(j in 1:length(pors[[i]])) {
if(!is.null(pors[[i]][[j]])) {
out0 <- "##############################################################"
out1 <- to_python_om(votrix(pors[[i]][[j]]))
out2 <- paste0('
times = np.zeros(rep)
for i in range(rep):
# Algorithm without Condorcet
algorithm = am0.AzziniMunda0(om)
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([',n,', ',i,', ',j,', 0, exec_time, sol.shape[0], algorithm.ntentative, "NULL"], dtype=np.dtype(object)), times)
print(result[:7])
results',n,' = np.vstack((results',n,', result))
')
out3 <- paste0('
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = am1.AzziniMunda1(om)
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([',n,', ',i,', ',j,', 1, exec_time, sol.shape[0], algorithm.ntentative, algorithm.cwinner], dtype=np.dtype(object)), times)
print(result[:7])
results',n,' = np.vstack((results',n,', result))
')
out4 <- paste0('
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = am2.AzziniMunda2(om)
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([',n,', ',i,', ',j,', 2, exec_time, sol.shape[0], algorithm.ntentative, algorithm.cwinner], dtype=np.dtype(object)), times)
print(result[:7])
results',n,' = np.vstack((results',n,', result))
')
out5 <- paste0('
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = am3.AzziniMunda3(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([',n,', ',i,', ',j,', 3, exec_time, sol.shape[0], algorithm.ntentative, algorithm.cwinner], dtype=np.dtype(object)), times)
print(result[:7])
results',n,' = np.vstack((results',n,', result))
')
out <- c(out, out0, out1, out2, out3, out4, out5)
}
}
}
}
out <- c(out, paste0('
pd.DataFrame(results',n,').to_csv("results',n,type,'_azzini.csv")'))
out <- paste(out, collapse = "\n")
sink(file_name)
cat(out)
sink()
}
|
b7f4e07a772a0d3a6ab3a2967dc196acb2adb382
|
c9f3369c749e5a3cfebaa96dc1b484e72a3dd7d0
|
/man/brmle.Rd
|
543d05ba72917ac569d158d07c22816a1b165333
|
[] |
no_license
|
amoloudi/R-PKG-Distributions
|
128ff992a10a0da915f27aa941d2f9d476ad9e9a
|
20daa9657d6833cb7aff2ac9194340504c1f0270
|
refs/heads/master
| 2021-09-02T19:18:04.772144
| 2018-01-03T19:13:48
| 2018-01-03T19:13:48
| 115,752,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 814
|
rd
|
brmle.Rd
|
\name{brmle}
\alias{brmle}
\title{
bernoulli estimation
}
\description{
finds parameter of bernoulli distribution given as input
}
\usage{
brmle(x)
}
\arguments{
\item{x}{
x is a vector contains a set of bernoulli numbers
}
}
\details{
p is mean of X values
}
\value{
return value is the parameter p of bernoulli distribution
}
\references{
https://www.projectrhea.org/rhea/index.php/Maximum_Likelihood_Estimation_Analysis_for_various_Probability_Distributions}
\author{
A.Moloudi
}
\note{
Nothing to note!
}
\seealso{
bimle.R
}
\examples{
x <- read.delim("DATA.txt", header=TRUE, sep="\t")
res = brmle(x)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~brmle }% use one of RShowDoc("KEYWORDS")
\keyword{ ~bernoullimle }% __ONLY ONE__ keyword per line
|
3ff2ce4247b9dc35c6baf511ce6fe11874751ba9
|
33c3502bf722baa887d932a7a42d0ab1456ea450
|
/Task3R.r
|
56805206894b65090341be32f7c55373a4c8e262
|
[] |
no_license
|
aasthas10/TSF-Task3-R
|
043e6269beb7dffd9d92b1fd2e6766dd9274b428
|
4527e027c1a5f3a84ec000461ce7f4137856f15e
|
refs/heads/main
| 2023-05-08T15:30:25.271553
| 2021-05-09T17:11:35
| 2021-05-09T17:11:35
| 365,224,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,367
|
r
|
Task3R.r
|
library(dplyr)
library(ggplot2)
library(tidyr)
library(shiny)
library(plotly)
library(corrr)
library(treemap)
superstore <- read.csv("C:\\Users\\ADMIN\\Downloads\\SampleSuperstore.csv")
head(superstore)
tail(superstore)
summary(superstore)
#To check if there are any null values
is.null(superstore)
#To check if there is any duplicacy and remove them too along with removing two columns (postal codes and country)
#as I do not require them for further analysis.
data <- superstore %>%
distinct() %>%
select(-c(Country, Postal.Code))
data
x <- data %>%select(Sales, Quantity, Discount, Profit)
corr_var <- correlate(x, method = 'pearson',use = "pairwise.complete.obs", diagonal = 1)
corr_var
y<- data %>%select(Sales, Quantity, Discount, Profit)
cov_var <- cov(y)
cov_var
summary(data$Sales)
statewise_sales <- data %>%
group_by(State) %>%
summarise(total_sales = sum(Sales)) %>%
arrange(desc(total_sales))
statewise_sales
ggplot(data, aes( x= State, y= Sales, fill= State),options(scipen=999)) +
geom_col()+
ggtitle("Statewise Sales Analysis") +
coord_flip() +
theme(legend.position = "None", axis.text.y = element_text(size=6))
regionwise_sales = data %>%
group_by(Region) %>%
summarize(totalS= sum(Sales)) %>%
arrange(desc(totalS))
regionwise_sales
ggplot(data, aes( x= Region, y= Sales, fill= Region),options(scipen=99)) +
geom_col()+
ggtitle("Regionwise Sales Analysis") +
theme(legend.position = "None", axis.text.y = element_text(size=6))
Statewise_profit = data%>%
group_by(State)%>%
summarise(totalP= sum(Profit))%>%
arrange(desc(totalP))
Statewise_profit
ggplot(data, aes( x= State, y= Profit, fill= State),options(scipen=99)) +
geom_col()+
ggtitle("Statewise Profit Analysis") +
coord_flip() +
theme(legend.position = "None", axis.text.y = element_text(size=6))
regionwise_profit = data %>%
group_by(Region) %>%
summarize(totalP= sum(Profit)) %>%
arrange(desc(totalP))
regionwise_sales
ggplot(data, aes( x= Region, y= Profit, fill= Region),options(scipen=99)) +
geom_col()+
ggtitle("Regionwise Profit Analysis") +
theme(legend.position = "None", axis.text.y = element_text(size=6))
BarPlot = data %>%
group_by(State) %>%
summarize(sales_profit_ratio= sum(Profit)/sum(Sales)) %>%
arrange(desc(sales_profit_ratio))
BarPlot
ggplot(BarPlot, aes( x= sales_profit_ratio, y= State, fill= State),options(scipen=99)) +
geom_col()+
ggtitle("Statewise Sales-Profit Ratio Analysis ") +
theme(legend.position = "None", axis.text.y = element_text(size=6))
Segment_analysis = data %>%
group_by(Segment) %>%
summarize(ratio= sum(Profit)/sum(Sales)) %>%
arrange(desc(ratio)) %>%
ggplot( aes( x= Segment, y= ratio, fill= Segment),options(scipen=99)) +
geom_col()+
ggtitle("Profit-Sales Ratio analysis for each Segment ")
Segment_analysis
category_s = data %>%
group_by(Category) %>%
summarize(Sales=sum(Sales))
pct <- round((category_s$Sales/sum(category_s$Sales))*100)
lbls <- paste(category_s$Category , pct)
lbls <- paste(lbls, "%", sep = " ")
pie(category_s$Sales, labels = lbls, main =" Percentage sales by Category ", col= c('darkmagenta','plum','plum4'))
treemap(data, index = c("Category","Sub.Category"),title='Sales treemap for categories', vSize = "Sales",vColor ="Profit", type= "value",palette = "RdYlGn", range=c(-20000,60000),mapping= c(-20000,10000,60000))
Price_per_product = ggplot(data, aes( x= Sub.Category, y=sum(Sales)/sum(Quantity), fill= Sub.Category),options(scipen=99)) +
geom_col()+
ggtitle("Price per product") +
coord_flip() +
theme(legend.position = "None", axis.text.x = element_text(size=6))
Price_per_product
profit_per_product = ggplot(data, aes( x= Sub.Category, y=sum(Profit)/sum(Quantity), fill= Sub.Category),options(scipen=99)) +
geom_col()+
ggtitle("Profit per product") +
coord_flip()+
theme(legend.position = "None", axis.text.x = element_text(size=6))
profit_per_product
ggplot(data, aes(x = Quantity, y = Sales, fill = Ship.Mode),options(scipen=99) )+ geom_bar(stat = "identity")
ggplot(data, aes( x=Ship.Mode, y= Profit, fill= Ship.Mode),options(scipen=99)) +
geom_col()+
ggtitle("Profit by Shipment mode and Segment") +
theme(legend.position = "None", axis.text.x = element_text(angle = 70 ,size=6)) +
facet_wrap(~Segment)
Sales_with_discount = data %>%
filter(Discount != 0) %>%
summarize(totals=sum(Sales))
Sales_with_discount
Sales_without_discount = data %>%
filter(Discount == 0) %>%
summarize(totals=sum(Sales))
Sales_without_discount
profit_with_discount = data %>%
filter(Discount != 0) %>%
summarize(totalp=sum(Profit))
profit_with_discount
profit_without_discount = data %>%
filter(Discount == 0) %>%
summarize(totalp=sum(Profit))
profit_without_discount
|
d10a1ae4e300a41ee3cd16aeaf842c9742070ec5
|
197590555db25e2b43692e4a89c3c8388c03fdf1
|
/tests/testthat/test-custom-noncompliance.R
|
59ec62e8ea9bd85d48414f3d619d3d879b2e3fb7
|
[] |
no_license
|
yadevi/DeclareDesign-1
|
f843ef77d937d1dd0975b99f8ab5914c20461c1c
|
badcd6e6edbb2e0bb3cf51b3da1047664acea789
|
refs/heads/master
| 2021-06-14T03:53:23.472797
| 2016-12-05T06:16:05
| 2016-12-05T06:16:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,698
|
r
|
test-custom-noncompliance.R
|
rm(list=ls())
library(testthat)
library(DeclareDesign)
context("Noncompliance")
test_that("test whether noncompliance works", {
population <- declare_population(noise = "rnorm(n_)", size = 1000)
sampling <- declare_sampling(n = 500)
potential_outcomes <- declare_potential_outcomes(formula = Y ~ 5 + .5*D,
condition_names = c(0, 1),
assignment_variable_name = "D")
my_noncompliance_function <- function(
data,
baseline_condition,
assignment_variable_name,
prob_non_comply){
N <- nrow(data)
D <- data[,assignment_variable_name]
non_comply <- rbinom(N,1,prob_non_comply)==1
D[non_comply] <- baseline_condition
return(D)
}
noncompliance <- declare_noncompliance(
noncompliance_function = my_noncompliance_function,
condition_names = c(0,1),
assignment_variable_name = "Z",
baseline_condition = 0,
prob_non_comply = .1)
assignment <- declare_assignment(condition_names = c(0,1))
# mock data ---------------------------------------------------------------
pop_draw <- draw_population(population = population,
potential_outcomes = list(potential_outcomes),
noncompliance = noncompliance)
smp_draw <- draw_sample(data = pop_draw, sampling = sampling)
smp_draw <- assign_treatment(data = smp_draw, assignment = assignment)
smp_draw <- draw_outcome(data = smp_draw, potential_outcomes = potential_outcomes, noncompliance = noncompliance)
head(pop_draw)
head(smp_draw)
with(smp_draw, table(Z, D))
})
|
54a63a56079e6878b5a5c3aead24b5afecf3ec0b
|
f6d8e20dec1641dae4d8dcbeacea647ffede8a5f
|
/R/SvalbardGradientButtonData/iButtonParse.R
|
7c73ace69a37472f6f4559198b389c7e124de766
|
[] |
no_license
|
Plant-Functional-Trait-Course/PFTC_Data
|
0f440f97cd92814d60dba8a6508e5e1c4988fa46
|
c436657f33226bd53b565ad90be422d7343de78b
|
refs/heads/master
| 2022-12-16T07:25:48.797221
| 2020-09-10T14:56:33
| 2020-09-10T14:56:33
| 150,146,451
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,327
|
r
|
iButtonParse.R
|
iButt <- read.csv("Celevation_TEMPfile.txt")
fluxes <- read.csv("Cflux_SV_Gradient_2018.csv")
iButt$Datetime <- as.character(iButt$Datetime)
iButt$Temperature <- iButt$Temp + (iButt$Decimal/1000)
datestimes <- strsplit(iButt$Datetime, split = " ")
iButt$Date <- sapply(datestimes, FUN="[[", 1)
iButt$Time <- sapply(datestimes, FUN="[[", 2)
times <- strsplit(iButt$Time, split=":")
iButt$Hour <- as.numeric(sapply(times, FUN="[[", 1))
iButt$Minute <- as.numeric(sapply(times, FUN="[[", 2))
iButt$Second <- sapply(times, FUN="[[", 3)
#Use the first iButton measurement if fluxes are too early
base_temp = 13.098
flux_temp <- rep(0, nrow(fluxes))
fluxtime <- strsplit(as.character(fluxes$StartTime), split=":")
fluxtime[[27]] <- c(0, 0, 0) #yea
hours <- as.numeric(sapply(fluxtime, FUN="[[", 1))
minutes <- as.numeric(sapply(fluxtime, FUN="[[", 2))
for(i in seq(1, nrow(fluxes)))
{
if(hours[i] <= 11)
{
if((hours[i] == 11 && minutes[i] < 14) || hours[i] < 11)
{
flux_temp[i] = base_temp
next
}
}
ind <- which(iButt$Hour == hours[i] & iButt$Minute == minutes[i])
if(length(ind) == 0)
print(i)
flux_temp[i] = mean(iButt[ind,]$Temperature)
}
print(flux_temp)
##Write the final merged output file using temperature data and site metadata
#write.csv(flux_temp, file="Svalbard_Gradient_iButtonTemps.csv")
|
f7aa03e7e7362ca3cdaa05c1ab82e1775cf0f3e0
|
2d1a2eef9ed2df3670c24a80378cb5e8acd7003d
|
/man/print.Cosinor.Rd
|
af475f95b61b6a0b6ed92ca6af6dd913a08d8304
|
[] |
no_license
|
agbarnett/season
|
bd7b5a12ccab2c9337ab2961204b512f2fee4f42
|
6a707fccc0bdef472446b758e548e259f48e7b10
|
refs/heads/master
| 2022-05-20T06:30:33.394625
| 2022-03-20T09:48:02
| 2022-03-20T09:48:02
| 126,654,287
| 1
| 1
| null | 2022-02-16T02:59:35
| 2018-03-25T01:20:28
|
R
|
UTF-8
|
R
| false
| true
| 618
|
rd
|
print.Cosinor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.Cosinor.R
\name{print.Cosinor}
\alias{print.Cosinor}
\title{Print the Results of a Cosinor}
\usage{
\method{print}{Cosinor}(x, ...)
}
\arguments{
\item{x}{a \code{Cosinor} object produced by \code{cosinor}.}
\item{\dots}{optional arguments to \code{print} or \code{plot} methods.}
}
\description{
The default print method for a \code{Cosinor} object produced by
\code{cosinor}.
}
\details{
Uses \code{print.glm}.
}
\seealso{
\code{cosinor}, \code{summary.Cosinor}, \code{glm}
}
\author{
Adrian Barnett \email{a.barnett@qut.edu.au}
}
|
0d99b79b11551ce32895e489fd01f30500a12ecb
|
bcfdc2cd1d0ff50ce66662a1a37fd9f5adc90f68
|
/global.R
|
60697023949ab1d98dcf05102748bea61633c286
|
[] |
no_license
|
alstoc/LM_vs_SEM_shiny
|
d82197060e361129f38840b69ed64345d0230b41
|
ce223d78a98b40d7e52523bf492a946918a1d5e5
|
refs/heads/master
| 2023-05-03T10:21:10.500114
| 2021-05-17T11:27:22
| 2021-05-17T11:27:22
| 365,478,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,724
|
r
|
global.R
|
load("./data/sim_res_study_1.Rda")
load("./data/sim_res_study_2.Rda")
# Create table containing error variance and corresponding reliability
rel_err <- tibble(reliability = c(0.5, 0.7, 0.8, 0.9, 0.95, 1.0),
#error_var = c(1, 0.428571, 0.25, 0.111111, 0.052632),
error_var = c(225, 96.428571, 56.25, 25, 11.842105, 0))
# Create custom ggplot theme
theme_fruits <- function() {
font <- "sans" #assign font family up front
theme_fivethirtyeight() + #replace elements we want to change
theme(
# panel elements
rect = element_rect(fill = "#F8F8F8"),
panel.background = element_rect(fill = "#F8F8F8"),
# plot elements
plot.title = element_text(
family = font,
size = 30,
face = "bold"),
plot.subtitle = element_text(
family = font,
size = 20),
plot.caption = element_text(
family = font,
size = 12),
# axis elements
axis.title = element_text(
family = font,
size=20,
face = "bold"),
axis.text.x = element_text(
family = font,
size = 12),
axis.text.y = element_text(
family = font,
size = 12),
# legend elements
legend.title = element_text(
family = font,
size = 20,
face = "bold"),
legend.text = element_text(
family = font,
size = 18),
legend.position = "right",
legend.direction = "vertical",
# other elements
aspect.ratio = 1,
)
}
|
f71c61d27c803bb2b78759485cd8cfd149b9e30e
|
fcbe81388b0f883ff36a9e7173277f80789fa871
|
/man/pd_sector_var.Rd
|
60b926d4676232ffd31c9d5979bf3faeb92f5cd0
|
[] |
no_license
|
cran/crp.CSFP
|
15b76b1860f47153a0909b1b9520f4357738d383
|
b94ad004acd7cfb33210b5f3c74976ca7561c04a
|
refs/heads/master
| 2021-01-10T21:24:19.961192
| 2016-09-11T18:35:59
| 2016-09-11T18:35:59
| 17,695,304
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 363
|
rd
|
pd_sector_var.Rd
|
\name{pd_sector_var}
\docType{data}
\alias{pd_sector_var}
\title{Sector variances for the Credit Suisse example portfolio}
\description{
The file contains the sector variances corrsponding to the example portfolio from the Credit Suisse First Boston for CreditRisk+.
}
\references{First Boston Financial Products, "CreditRisk+", 1997}
\keyword{datasets}
|
de160f6e3a4b04ef1a177185b425fb58ef68b16d
|
8eda0c211d86fba9ea1abec607de84127d34ffeb
|
/lib/image_df.R
|
380b5b2de492bb6014d09501890fb442ed4ab910
|
[] |
no_license
|
NinaShao1230/Fall2017-project3-grp9
|
f2c942cf31d201346301d920143220b348d81908
|
bbda084d60b89bd559affc117958f6f291a4d6de
|
refs/heads/master
| 2021-07-25T01:21:49.922482
| 2017-11-02T04:42:17
| 2017-11-02T04:42:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,025
|
r
|
image_df.R
|
install.packages("OpenImageR")
# library(EBImage)
library("OpenImageR")
setwd("/Users/ninashao/Desktop/Fall2017-project3-fall2017-project3-grp9-master 2")
img_labels<-read.csv("/Users/ninashao/Desktop/Fall2017-project3-fall2017-project3-grp9-master 2/data/training_set/label_train.csv",header = T)
img_dir <- "/Users/ninashao/Desktop/Fall2017-project3-fall2017-project3-grp9-master 2/data/image3/"
dir_names <- list.files(img_dir)
n_files <- length(list.files(img_dir))
labels<-img_labels$label..0.for.muffin..1.for.chicken..2.for.dog.
# Set up df
df <- data.frame()
# Set image size. In this case 28x28
img_size <- 28*28
for(i in 1:n_files){
img <- as.matrix(readImage(paste0(img_dir, dir_names[i])))
# Get the image as a matrix
img_matrix <- img@.Data
# Coerce to a vector
img_vector <- as.vector(t(img_matrix))
# Bind rows
df <- rbind(df,img_vector)
# Print status info
print(paste("Done ", i, sep = ""))
}
# dim(df)
df_label<-cbind(labels,df)
write.csv(df_label, file="df_nn.csv", row.names = FALSE)
|
519772641356cbcc1c270f3eb18cbeda2db06142
|
95510affefea922b31ae8f8141f373d2274b7b17
|
/3 - Sampling.R
|
dc97443c8e24304d81535275972a74c43f99f6c6
|
[] |
no_license
|
armandyne/Data-Science-Capstone-Project
|
ed50c83b42dd477604e5e2b102e9ca19b9dc49e4
|
57afda53168fd46cb4a6618109fde8b2e888afa2
|
refs/heads/master
| 2020-03-10T08:27:45.372241
| 2018-04-14T18:29:24
| 2018-04-14T18:29:24
| 129,285,925
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 296
|
r
|
3 - Sampling.R
|
load(paste0(data_dir, "/", "raw_datasets.rda"))
blogs_sample <- sample(blogs_vec, size = 10000)
news_sample <- sample(news_vec, size = 1000)
twitter_sample <- sample(twitter_vec, size = 20000)
save(blogs_sample, news_sample, twitter_sample, file = paste0(data_dir, "/", "sampled_datasets.rda"))
|
ce5ddc7b01574f54929cc062081bcb976bc446d7
|
4c32c6c3cf83a05cbc1ff323977872233c4637f1
|
/R/Stitcher.R
|
13e1d0d9c0fe4eca9ebb97b8199799f0bdd9fda2
|
[] |
no_license
|
icorroramos/COMPAR-EU_diabetes_model
|
fb4f343d563c1179d7e0d7256b94f1953d098415
|
8e7bf1445daafee7655135c75bc02b73e99695f5
|
refs/heads/master
| 2022-08-29T21:08:00.103803
| 2022-08-06T09:21:19
| 2022-08-06T09:21:19
| 207,499,341
| 0
| 2
| null | 2021-03-17T08:34:57
| 2019-09-10T08:00:48
|
R
|
UTF-8
|
R
| false
| false
| 3,091
|
r
|
Stitcher.R
|
### Function to combine the results of two model runs.
### Maintains names and structure of output objects so that analysis scripts can be used
stitcher <- function(comp.name, location.1, location.2){
load(paste0(location.2, comp.name, '.RData'))
npats.loc2 <- ifelse(exists('sim.vars'), sim.vars[[2]], sim.vars.uc[[2]])
sim.names <- ls()[startsWith(ls(), 'sim.results')]
for (i in 1:length(sim.names)) {
assign(paste0(sim.names[i], '.extend'), get(sim.names[i]))
}
load(paste0(location.1, comp.name, '.RData'))
npats.loc1 <- ifelse(exists('sim.vars'), sim.vars[[2]], sim.vars.uc[[2]])
for (i in 1:length(sim.names)) {
loc2.data <- get(paste0(sim.names[i], '.extend'))
loc2.data[[1]]$SIMID <- loc2.data[[1]]$SIMID + npats.loc1
loc1.data <- get(sim.names[i])
loc1.data[[1]] <- rbind(loc1.data[[1]], loc2.data[[1]])
loc1.data[-1] <- (npats.loc1 * as.data.frame(loc1.data[-1]) + npats.loc2 * as.data.frame(loc2.data[-1])) / (npats.loc1 + npats.loc2)
assign(sim.names[i], loc1.data, envir = .GlobalEnv)
}
}
### Function to combine n model runs stored in same directory
### Maintains names and structure of output objects so that analysis scripts can be used
sewing_machine <- function(n.run, filename) {
n.pats.run <- rep(NA, n.run)
run.seed <-rep(NA, n.run)
load(paste0(filename, '1.RData')) # Get first output to check which simulations were run
sim.names <- ls()[startsWith(ls(), 'sim.results')]
list.names <- names(get(sim.names[1]))
n.sims <- length(sim.names)
all.means <- lapply(1:n.sims, matrix, data = NA, nrow = 27, ncol = n.run)
all.patients <- rep(list(data.frame()), n.sims)
for (i in 1:n.run) {
load(paste0(filename, i, '.RData'))
sim.names.run <- ls()[startsWith(ls(), 'sim.results')]
if (!identical(sim.names,sim.names.run[1:n.sims])) stop(paste('Simulations in run', i, 'not the same as in first run'))
n.pats.run[i] <- ifelse(exists('sim.vars'), sim.vars[[2]], sim.vars.uc[[2]])
run.seed[i] <- ifelse(exists('sim.vars'), sim.vars[[1]], sim.vars.uc[[1]])
for (j in 1:n.sims) {
curr.sim <- get(sim.names[j])
all.means[[j]][, i] <- unlist(curr.sim[2:28])
curr.sim[[1]]$SIMID <- curr.sim[[1]]$SIMID + sum(n.pats.run[1:i-1]) # Increase SIMIDs with total pats in previous runs so that SIMIDS remain unique
all.patients[[j]] <- rbind.fill(all.patients[[j]], curr.sim[[1]])
}
}
run.weight <- n.pats.run / sum(n.pats.run)
weighed.means <- lapply(all.means, '*', run.weight)
unnamed.means <- lapply(weighed.means, rowSums)
results.means <- lapply(unnamed.means, function(x) {names(x) <- list.names[2:28]; return(x)})
for (i in 1:n.sims){
results.patient <- list(all.patients[[i]])
names(results.patient) <- list.names[1]
assign(sim.names[i], c(results.patient, as.list(results.means[[i]])), envir = .GlobalEnv)
print(paste('Created object', sim.names[i], 'in global environment'))
}
assign('sewing.seeds', run.seed, envir = .GlobalEnv)
print('Created object sewing.seeds containing all seed values')
}
|
8494cb3ce5fd4785b16e27a3450eb27b7e3a731d
|
4f14e7e4a3a91ca32330362158028e65f3f75011
|
/slice_b_gamma.R
|
04071e699be45d58564e7678c02f933843ecbb91
|
[] |
no_license
|
drvalle1/resist
|
31f5231ce944d9b9c86b4f6dd63cfa327c80c116
|
4cc588b91a8a82ca2ba698c8001b3fc6cc18bde8
|
refs/heads/master
| 2021-02-26T06:25:37.518977
| 2020-08-31T16:37:06
| 2020-08-31T16:37:06
| 245,502,435
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,602
|
r
|
slice_b_gamma.R
|
Doubling_bgamma=function(soma.media,w,b.gamma,yslice,MaxIter,ysoma){
b.gammaLo=b.gamma-w*runif(1)
b.gammaLo=ifelse(b.gammaLo<0.0000001,0.0000001,b.gammaLo)
b.gammaHi=b.gammaLo+w
#calculate llk
ylo=llk_bgamma(soma.media=soma.media,ysoma=ysoma,b.gamma=b.gammaLo)
yhi=llk_bgamma(soma.media=soma.media,ysoma=ysoma,b.gamma=b.gammaHi)
cond=F
#keep doubling until ylo<yslice and yhi<yslice
oo=0
while ((ylo>yslice) & (oo<MaxIter)){
b.gammaLo=b.gammaLo-w
if (b.gammaLo<0.0000001) { #avoid negative values
b.gammaLo=0.0000001
break;
}
ylo=llk_bgamma(soma.media=soma.media,ysoma=ysoma,b.gamma=b.gammaLo)
oo=oo+1
}
if (oo >= MaxIter) cond=T
oo=0
while ((yhi>yslice) & (oo<MaxIter)){
b.gammaHi=b.gammaHi+w
yhi=llk_bgamma(soma.media=soma.media,ysoma=ysoma,b.gamma=b.gammaHi)
oo=oo+1
}
if (oo >= MaxIter) cond=T
if (cond) return(c(b.gamma,b.gamma))
c(b.gammaLo,b.gammaHi)
}
#-------------------------------------------
llk_bgamma=function(soma.media,ysoma,b.gamma){
a1=b.gamma*soma.media
sum(dgamma(ysoma,a1,b.gamma,log=T))
}
#-------------------------------------------
Shrink_Sample_bgamma=function(rango1,yslice,MaxIter,soma.media,ysoma,b.gamma){
diff1=rango1[2]-rango1[1]
yfim=-Inf
oo=0
while ((yfim<yslice) & (diff1 > 0.00000001) & (oo<MaxIter)){
x=runif(1,min=rango1[1],max=rango1[2])
yfim=llk_bgamma(soma.media=soma.media,ysoma=ysoma,b.gamma=x)
if (yfim<yslice){ #shrink the slice if x falls outside
DistLo=abs(rango1[1]-x)
DistHi=abs(rango1[2]-x)
if (DistLo>DistHi) rango1[1]=x
if (DistLo<DistHi) rango1[2]=x
diff1=rango1[2]-rango1[1]
}
oo=oo+1
}
if (diff1 <= 0.00000001 | oo >=MaxIter) return(b.gamma)
x
}
#-------------------------------------------
Sample_bgamma=function(nparam,xmat,ysoma,betas,b.gamma,w,MaxIter,seg.id,nagg){
media=exp(xmat%*%betas)
soma.media1=GetSomaMediaOneGroup(media=media, nagg=nagg,SegID=seg.id-1)
# teste=data.frame(media=media,seg.id=seg.id)
# teste1=aggregate(media~seg.id,data=teste,sum)
#define upper bound
upper1=llk_bgamma(soma.media=soma.media1,ysoma=ysoma,b.gamma=b.gamma)
yslice=upper1-rexp(1);
#define slice
rango1=Doubling_bgamma(soma.media=soma.media1,w=w,b.gamma=b.gamma,
yslice=yslice,MaxIter=MaxIter,ysoma=ysoma)
#sample this particular parameter
b.gamma=Shrink_Sample_bgamma(rango1=rango1,yslice=yslice,MaxIter=MaxIter,
soma.media=soma.media1,ysoma=ysoma,
b.gamma=b.gamma)
b.gamma
}
|
73a08da5c1e32d79005da50a44058963dc4f76a8
|
3581b1fb06e1455db0cbafd1c43e2ac71a250734
|
/R/doNlme.R
|
29b9e51df125d57df326517ceb0f78c3a935b158
|
[] |
no_license
|
cran/mixlow
|
a4f85e942ef212bfbed81551c2d68b9197e2dc99
|
779a43f75afd02322ade30e575a45a2d1bd81e79
|
refs/heads/master
| 2016-09-05T13:24:41.248119
| 2012-03-20T00:00:00
| 2012-03-20T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,176
|
r
|
doNlme.R
|
`doNlme` <-
function(mixlowData, nlsData, drugs=getDrugs(mixlowData), analysis="multiple",
varFunction=1, method="ML", verbose=FALSE) {
## outer loop that sets up calls to NLME
if (!inherits(nlsData, "nlsData"))
stop("use only with \"nlsData\" objects")
if (analysis=="single") {
if (is.list(varFunction)==FALSE) stop("VarFunction must be a list of vectors named by drug")
if (length(names(varFunction)) == 0) stop("VarFunction must be a list of vectors named by drug")
}
varFunction0 = varFunction
nls.estimates = nlsData$nlsEstimates
data00 = mixlowData$concentrationResponse
drugRatios = mixlowData$drugRatios
# check for number of drugs
newDrugVector = drugs[1] # default, for single drug
if (length(drugs) !=1) {
# length will be 1 if a single drug is being analyzed alone. Else, the drug set will contain a mixture.
# order drugs so mixture is last
newDrugVector = numeric(0)
mixFlag = 0
for (i in seq(1,length(drugs))) {
dr = drugs[i]
ratio = drugRatios[,dr]
if (all(ratio==0)) {
if (mixFlag == 1)
stop("Drugs for analysis can contain only one mixture")
mixFlag = 1
mix = dr
}
if (any(ratio>0))
newDrugVector = c(newDrugVector,dr)
}
if (mixFlag != 0) {
#stop("Drugs must contain at least two drugs and one mixture. No mixture present.")
newDrugVector = c(newDrugVector,mix)
}
}
drugs0 = newDrugVector
# remove entries for blanks
data00 <- data00[data00$label=="rx",]
nlmeResults = list(0)
nlmeGraph = list(0)
nlmeModels = list(0)
numberOfAnalysis = 1
if (analysis == "single")
numberOfAnalysis = length(drugs0)
# loop over each set of drugs to be modeled ----------------------------------------------------------------------
for (setNum in seq(1,numberOfAnalysis)) {
drugs = drugs0
if (numberOfAnalysis > 1) {
drugs = drugs0[setNum]
varFunction = varFunction0[[drugs]]
}
#dr1 = drugs[1]
cell = unique(as.vector(drugRatios$cell[drugRatios$drug==drugs[1]]))
useTrays = drugRatios[(drugRatios$drug %in% drugs),]
# add drug and cell data to data0
data0 = merge(data00, useTrays, by= "tray")
data0$tray = as.factor(data0$tray)
# collect data for drugs in desired set
inlist <- data0$drug %in% drugs
dat1 <- data0[inlist & data0$cell== cell, ]
# set order for assessing drugs
ord = drugs
ord2 = paste(ord,"|",sep="",collapse="")
ord2 = substr(ord2,1,nchar(ord2)-1)
if (verbose==TRUE) writeLines("######################################################################\n")
if (verbose==TRUE) writeLines(paste("\n **** NLME analysis, drugs for round ", setNum, " are: ", ord2, " ****",sep=""))
# if only one tray, then duplicate data
new = 20000
for (dr in ord)
{
utray = unique(dat1$tray[dat1$drug==dr])
if (length(utray) == 1)
{
if (verbose==TRUE) writeLines("\n********** duplicating data, only one tray present ***************\n")
tmp <- dat1[dat1$drug==dr,]
tray = unique(tmp$tray)
# assumes that no tray number is called new20000+
tmp$tray <- paste("new", new,sep="")
new = new + 1
dat1 <- rbind(dat1,tmp)
}
}
# order drugs
dat1 <- dat1[order(dat1$cell,dat1$drug, dat1$tray, dat1$conc),]
dat1$drg <- rep(1,length(dat1$adj_resp))
for (i in 1:length(ord)) dat1$drg[dat1$drug==ord[i]] <- i
dat1$drg = as.factor(dat1$drg)
# make parameter list
tmp1 = unique(dat1[,c("drug","tray")])
combo = merge(nls.estimates,tmp1, by= "tray")
tmp5 = cbind(as.numeric(as.vector(combo$g)),as.numeric(as.vector(combo$p)),as.numeric(as.vector(combo$u)),as.numeric(as.vector(combo$lambda)))
if (is.null(combo$drug)==FALSE) {param = aggregate(tmp5, list(drug=combo$drug) ,mean)}
if (is.null(combo$drug) == TRUE) {param = aggregate(tmp5, list(drug=combo$drug.x) ,mean)}
names(param) = c("drug","g","p","u","lambda")
paramList = as.list(param)
ord3 = rep(0,length(ord))
for (i in 1:length(ord)) ord3[ord == paramList$drug[i]] <- i
#for (i in 1:length(ord)) {ord3[paramList$drug==ord[i]] <- i
paramList$drug = paramList$drug[ord3]
paramList$g = paramList$g[ord3]
paramList$p = paramList$p[ord3]
paramList$u = paramList$u[ord3]
paramList$lambda = paramList$lambda[ord3]
if (length(paramList$drug) != length(ord)) {stop("** Mismatch in length of drugs and parameter estimates")}
if (any(paramList$drug != ord)) {
stop("** Drugs/param list in wrong order")}
paramList$u = mean(as.numeric(as.vector(combo$u)))
paramList$p = paramList$p # p is in log form
paramList$g = paramList$g # g is in log form
# setup call to NLME and then call NLME, return NLME results for best model
meth = method
best2 <- NlmeSetupCall(ord, paramList, dat1, method, varFunction, cell, analysis, verbose)
if(best2$best==0) {
if (verbose==TRUE) print('no models were suitable')
next
}
# collect results
temp <- as.list(c(nam =best2$best, method=meth, drug= ord2, setNum=setNum, cell= cell, lik= logLik(best2$mbest),
bic = BIC(best2$mbest), sig = best2$mbest$sigma, modelstruct = unlist(best2$mbest$modelStruct),
rc = best2$rcdrug, r2= best2$r2drug, se=unlist(summary(best2$mbest)$tTable[,2]),
coeff = unlist(best2$mbest$coefficients), df = unlist(best2$mbest$fixDF)[1], covv=as.list(best2$mbest$varFix) ))
nlmeResults[[setNum]] <- temp
# collect results for graphing
nlmeGraph[[setNum]] = list(pred0=best2$pred0, dat1=dat1, ord=ord, residu=best2$residu, best=best2$best)
# alter formula so that anova can be called on models
#best2$mbest$call$model = y~1
nlmeModels[[setNum]] = best2$mbest
}
returnList = list(nlmeResults=nlmeResults, nlmeGraph=nlmeGraph, nlmeModels=nlmeModels)
class(returnList) <- c("nlmeData")
return (returnList)
}
|
1ae1a18a31abfb2a116cc827ee61c01500910b3a
|
1f0a75ecce94daf0003f6545e9ef4ac4c56ccf6d
|
/man/PictureRadialGradient-class.Rd
|
d40b93c11dd85ad57bae66563b8a09f7b25fc349
|
[] |
no_license
|
sjp/grImport2
|
767e71b961204b9b25389c7e07a629e2c9d6e069
|
00e310e43fa9bc33db2d4eef81340069493a2150
|
refs/heads/master
| 2021-01-19T04:55:45.773061
| 2015-02-27T10:41:20
| 2015-02-27T10:41:20
| 24,830,205
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,124
|
rd
|
PictureRadialGradient-class.Rd
|
\name{PictureRadialGradient-class}
\docType{class}
\alias{PictureRadialGradient-class}
\alias{applyTransform,PictureRadialGradient,matrix-method}
\alias{grobify,PictureRadialGradient-method}
\title{Class "PictureRadialGradient" }
\description{
A description of a radial gradient that is meant to be used as part of
a larger picture (as it does not directly draw anything itself). This
object will have no effect unless it is used in conjunction with the
gridSVG package.
}
\section{Slots}{
\describe{
\item{\code{x}:}{
Object of class \code{"numeric"}. The x-location of the radial
gradient.
}
\item{\code{y}:}{
Object of class \code{"numeric"}. The y-location of the radial
gradient.
}
\item{\code{r}:}{
Object of class \code{"numeric"}. The radius of the radial
gradient.
}
\item{\code{fx}:}{
Object of class \code{"numeric"}. The x-location of the focal
point of the radial gradient.
}
\item{\code{fy}:}{
Object of class \code{"numeric"}. The y-location of the focal
point of the radial gradient.
}
\item{\code{spreadMethod}:}{
Object of class \code{"character"} that specifies what happens
when a gradient ends within its bounds. Must be one of "pad",
"reflect" or "repeat". See \code{"radialGradient"} in the gridSVG
package for more information.
}
\item{\code{stops}:}{
Object of class \code{"list"} that is a list of objects of class
\code{"PictureGradientStop"}.
}
}
}
\section{Extends}{
Class \code{"PictureContent"}.
}
\section{Methods}{
\describe{
\item{applyTransform}{
\code{signature(object = "PictureRadialGradient", tm = "matrix")}:
transform the locations that represent the bounds and direction of
the gradient by a 3x3 transformation matrix.
}
\item{grobify}{\code{signature(object = "PictureRadialGradient")}:
convert to a gradient object for use with the gridSVG package.
}
}
}
\author{ Simon Potter }
\seealso{
\code{\link{Picture-class}},
\code{\link{grid.picture}}.
}
\keyword{classes}
|
c1807d4e1bae8307844e831cdcfe8de043b052f9
|
90e52d80527b046428fe779017e880a3845e76fb
|
/CourseProjectv2/plot3.R
|
acff0f945dd4fa79d414376f1e2841e2febf5bfd
|
[] |
no_license
|
raneykat/ExploratoryAnalysis
|
f28099ef1a00e8616aa64f090aae220499c71358
|
4481571a50cbcbc5959fd7583604197ebbe76496
|
refs/heads/master
| 2020-05-17T23:50:02.005582
| 2014-12-21T23:10:11
| 2014-12-21T23:10:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,399
|
r
|
plot3.R
|
# Katherine Raney
# Coursera Exploratory Data Analysis
# December 2014
# Course Project 2, Plot 3
# setwd("C:\\raneykat_git\\ExploratoryDataAnalysis\\ExploratoryAnalysis\\CourseProject")
#get the data file we need and unzip it
library(downloader)
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download(url,dest="dataset.zip", mode = "wb")
unzip ("dataset.zip",exdir = ".")
# read in the datasets
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Of the four types of sources indicated by the type (point, nonpoint,
# onroad, nonroad) variable, which of these four sources have seen
# decreases in emissions from 1999–2008 for Baltimore City?
# Which have seen increases in emissions from 1999–2008?
# Use the ggplot2 plotting system to make a plot answer this question.
# plot emissions by year and type for the given city - GIVEN CITY FOR BOTH QUESTIONS?
library(dplyr)
nei2 <- NEI %>%
filter(fips == "24510") %>%
group_by(year, type) %>%
mutate(TotalEmissions=sum(Emissions)) %>%
select(year, type, TotalEmissions)
library(ggplot2)
# send plot to a png file
png(file="plot3.png",bg="transparent",width = 800, height = 800)
qplot(year, TotalEmissions, data = nei2, facets =.~ type,ylab="Total PM2.5 Emissions", xlab="Year"
,main="Baltimore City PM2.5 Emissions by Type",geom="line")
dev.off()
|
fa6863e412d9ce93d9387730bf5b4e414d230869
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610052075-test.R
|
dd8f158adaaf7d2f6c81d867f251948ab0398b32
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 959
|
r
|
1610052075-test.R
|
testlist <- list(rates = 4.14452302922905e-317, thresholds = NA_real_, x = c(-2.11966640218428e-289, Inf, 9.70418706714725e-101, 7.55109302986233e-308, 9.70418706357123e-101, 9.70418706716095e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 7.55652533908718e-308, 7.91826926828267e-101, 9.70395179359032e-101, 9.70418704587148e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 7.91826926828272e-101, 5.35249266634154e+116, 5.35249213585531e+116, 9.70466856459303e-101, -9.77580118946814e-150, 3.23687261230869e-314, NaN, NaN, 2.81199605989981e-312, 6.59473782987034e-96, 6.59473782982525e-96, 6.59473782982525e-96, 6.59473782982525e-96, 6.5947378298542e-96, 6.59473782982525e-96, 6.59473782982525e-96, 6.59473782982525e-96, NaN, 2.39958945526328e-310, 1.44804023176556e+190, NaN, 1.39067049844331e-309, NaN, 1.25986739689518e-321, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
88b04bd7443f8b18a3178d57910147c47e6dbfaa
|
dd5df187c9f002ff6eae9d70b7c6f69b8ebe15c4
|
/R入门小例子.R
|
7041af6b471e3835ce211562629e741a078fa136
|
[] |
no_license
|
liliwin/Rproject
|
6429d0209425ca633c3bec59289836f37828c9fa
|
12be261051b543131ff90daaf192b27444e407ed
|
refs/heads/master
| 2021-01-20T22:29:04.905614
| 2016-07-29T15:46:08
| 2016-07-29T15:46:08
| 64,316,708
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,953
|
r
|
R入门小例子.R
|
#############################################################################
#一组学生参加了数学、科学和英语考试。
#为了给所有的学生确定一个单一的成绩衡量指标,需要将这些科目的成绩组合起来。
#另外,还想将前20%的学生评定为A,接下来20%的学生评定为B,以此类推。
#最后,希望按字母顺序对学生排序。
#############################################################################
Stuld <- c(1:10)
StuName <- c('John Davis','Angela Williams','Bull Jones','Cheryl Cushing',
'Reuven Ytzrhak','Joel Knox','Mary Rayburn','Greg England','Brad Tmac','Tracy Mcgrady')
Math <- c(502,465,621,575,454,634,576,421,599,666)
Science <- c(95,67,78,66,96,89,78,56,68,10)
English <- c(25,12,22,18,15,30,37,12,22,38)
mydata <- data.frame(Stuld,StuName,Math,Science,English)
colnames(mydata)
mydata <- mydata[,-1]
##数据中心化、标准化##
#1、数据的中心化:指数据集中的各项数据减去数据集的均值
#2、数据的标准化:值中心化后的数据在除以数据集的标准差
#数据中心化和标准化的意义是一样的,为了消除量纲对数据结构的影响
##scale(data,center=T,scale=T) center=T 表示数据中心化 scale=T 表示数据标准化
z <- scale(mydata[3:5])
##求每个学生的所有成绩均值##
#apply(X,MARGIN,FUN,...)
#X:阵列包括矩阵,MARGIN:1表示矩阵行,2表示矩阵列,也可以是c(1,2) FUN:表示具体的运算方式
score <- apply(z,1,mean)
mydata$score<-apply(z,1,mean)
##姓名处理,将姓名列拆分为FirName,LatName两列
#使用strsplit(data,split="") data是一组向量,split表示分割方式
name<-strsplit(mydata$StuName,split=" ") #data是向量所以这一句是报错的#
name<-strsplit(StuName,split=" ") #把strsplit()应用到一个字符串组成的向量上会返回一个列表#
name<-unlist(strsplit(StuName,split=" "))#将strsplit的list形式转成向量格式#
name<-matrix(name,ncol=2,byrow=T) #将name转化为矩阵,这么做是为了拆分方便#
FirName<-name[,1]
LasName<-name[,2]
##合并
mydata<-cbind(mydata,FirName,LasName)
##去掉mydata多余的列
mydata<-mydata[-2]
##计算前20%等 用四分位来计算 quanile(data,c(.8,.6,.4,.2))
q<-quantile(mydata$score,c(.8,.6,.4,.2))
##按照四分位分等级
mydata$level[score>=q[1]] <- "A"
mydata$level[score>=q[2] & score<q[1]] <- "B"
mydata$level[score>=q[3] & score<q[2]] <- "c"
mydata$level[score>=q[4] & score<q[3]] <- "D"
mydata$level[score<q[4]] <- "E"
##按姓名排序
mydata<-mydata[order(mydata$LasName,mydata$FirName),]
options(digits=2)
----------------------------------------------------------------
mydata1 <- data.frame(Stuld,StuName,Math,Science,English)
name<-strsplit((StuName)," ")
lastname<-sapply(name,"[",2)
firstname<-sapply(name,"[",1)
sapply() 提取列表中每个成分的第一个元素,
“[”是一个可以提取某个对象一部分的函数
|
ad7f33022bc2416fe05b4d866b642342a64c965c
|
4c14bcc37fa428673536b87083afb734866f947c
|
/man/array.resistance.Rd
|
a6a09f5359aadd87c99d1f06275cca08f79366d4
|
[] |
no_license
|
RobinHankin/ResistorArray
|
9c06802cb867eb3c40014ae5552ae8b8420411d1
|
fe8588cc44b3c5afd91033efd768ce9846860087
|
refs/heads/master
| 2021-09-28T17:31:52.431138
| 2021-09-18T22:25:41
| 2021-09-18T22:25:41
| 168,077,182
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,556
|
rd
|
array.resistance.Rd
|
\name{array.resistance}
\alias{array.resistance}
\title{Resistance between two arbitrary points on a regular lattice of
unit resistors
}
\description{
Given two points on a regular lattice of electrical nodes joined by unit
resistors (as created by \code{makefullmatrix()}), returns the
resistance between the two points, or (optionally) the potentials of
each lattice point when unit current is fed into the first node, and the
second is earthed.
}
\usage{
array.resistance(x.offset, y.offset, rows.of.resistors,
cols.of.resistors, give.pots = FALSE)
}
\arguments{
\item{x.offset}{Earthed node is at \eqn{(0,0)}{(0,0)}, second node is at
\code{(x.offset, y.offset)}}
\item{y.offset}{Earthed node is at \eqn{(0,0)}{(0,0)}, second node is at
\code{(x.offset, y.offset)}}
\item{rows.of.resistors}{Number of rows of resistors in the network
(positive integer)}
\item{cols.of.resistors}{Number of columns of resistors in the network
(positive integer)}
\item{give.pots}{Boolean, with \code{TRUE} meaning to return a matrix
of potentials of the electrical nodes, and \code{FALSE} meaning to
return the resistance between the origin and the current input node}
}
\details{
Note that the electrical network is effectively toroidal.
}
\author{Robin K. S. Hankin}
\seealso{\code{\link{makefullmatrix}}}
\examples{
jj.approximate <- array.resistance(1,2,15,17,give=FALSE)
jj.exact <- 4/pi-1/2
print(jj.exact - jj.approximate)
persp(array.resistance(4,0,14,16,give=TRUE),theta=50,r=1e9,expand=0.6)
}
\keyword{array}
|
fbb68e8f420780d1f3799a6fb4e5a0dd84c3c85e
|
c08e6b516a3d341d1fdb893448922082dc3626cf
|
/man/dimsum_stage_fastqc.Rd
|
0e685f04d1427d73d9f96019279ce66fad0bdfff
|
[
"MIT"
] |
permissive
|
lehner-lab/DiMSum
|
eda57459bbb450ae52f15adc95d088747d010251
|
ca1e50449f1d39712e350f38836dc3598ce8e712
|
refs/heads/master
| 2023-08-10T17:20:39.324026
| 2023-07-20T15:29:47
| 2023-07-20T15:29:47
| 58,115,412
| 18
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 754
|
rd
|
dimsum_stage_fastqc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimsum_stage_fastqc.R
\name{dimsum_stage_fastqc}
\alias{dimsum_stage_fastqc}
\title{dimsum_stage_fastqc}
\usage{
dimsum_stage_fastqc(
dimsum_meta,
fastqc_outpath,
report = TRUE,
report_outpath = NULL,
save_workspace = TRUE
)
}
\arguments{
\item{dimsum_meta}{an experiment metadata object (required)}
\item{fastqc_outpath}{FASTQC output path (required)}
\item{report}{whether or not to generate FASTQC summary plots (default: TRUE)}
\item{report_outpath}{FASTQC report output path}
\item{save_workspace}{whether or not to save the current workspace (default: TRUE)}
}
\value{
an updated experiment metadata object
}
\description{
Run FASTQC on all fastq files.
}
|
0002a42fc5f39ff7145d768a6280ed4e7e03f1ff
|
4ba85b2f9f4c11255905ed3228101248595986ad
|
/k-Nearest-Neighbors and Boosting/DigitRecognition/DigitRecognitionkNN.R
|
0caeb2f236aae63fb24b920067797074d5ed9cea
|
[] |
no_license
|
prabhuvashwin/Machine-Learning
|
97cd7b0260e9f921839a87931725010cfa6d0df0
|
c63bfadae9a421d093ad8647c9f9b95cf95abdd6
|
refs/heads/master
| 2021-06-14T01:29:24.255604
| 2017-04-08T06:17:06
| 2017-04-08T06:17:06
| 87,610,303
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,137
|
r
|
DigitRecognitionkNN.R
|
## Author: Ashwin Venkatesh Prabhu
## Implement k-NN algorithm for Optical recognition for handwritten digits dataset
require(class)
## Getting the train and test data
train_data <- read.csv("optdigits_raining.csv", header = FALSE)
test_data <- read.csv("optdigits_test.csv", header = FALSE)
## Getting the class variable for train and test data
train_class_variable <- train_data$V65
test_class_variable <- test_data$V65
digit_accuracy <- numeric()
for(i in seq(1, 100, 1))
{
## Generating the model and output for the testing data
knnTest <- knn(train_data, test_data, train_class_variable, k = i)
accuracy <- mean(knnTest == test_class_variable)
## Accuracy for each k value is stored in this list
digit_accuracy <- c(digit_accuracy, accuracy)
print(paste("Accuracy at k = ", i, " is: ", accuracy), quote = F)
}
## Error rate graph plotted
plot(1 - digit_accuracy, type = "l", ylab = "Error Rate",
xlab = "K", main = "Error Rate with K values")
## Accuracy graph plotted
plot(digit_accuracy, type = "l", ylab = "Accuracy",
xlab = "K", main = "Accuracy with K values")
|
bff663b5250684c5397280bc3168b36457322319
|
30a8fe01e73e6483e99daddab56a8f97af3b60fd
|
/GenomeBrowser2_Plot.R
|
5eebd3a5a2897c08e39e7326565d8774c6290e8f
|
[
"MIT"
] |
permissive
|
Jonna-Heldrich/meiosis_scripts
|
3e60552cd3b17ef0104e90f3eb4c0a4e1b09fb89
|
e75b05d44d21a29c84eff22e879dc252eb4e05aa
|
refs/heads/master
| 2020-08-11T07:44:08.162751
| 2019-10-12T04:44:38
| 2019-10-12T04:44:38
| 214,519,955
| 0
| 0
|
MIT
| 2019-10-12T04:16:34
| 2019-10-11T20:04:48
|
R
|
UTF-8
|
R
| false
| false
| 1,393
|
r
|
GenomeBrowser2_Plot.R
|
# Genome Browser View plotting
# Purpose: Plot whole chromosome or region of chromosome as genome browser view
# input: 2 outputs from GenomeBrowser1_Chr.R
plot_genomeview <- function(df_samp1,df_samp2,position1,position2,name1,name2,chrnum,color1,color2) {
par(las=1)
par(mfrow=c(2,1))
ax=c(min(df_samp1[df_samp1$position>=position1 & df_samp1$position<=position2,1]),df_samp1[df_samp1$position>=position1 & df_samp1$position<=position2,1],max(df_samp1[df_samp1$position>=position1 & df_samp1$position<=position2,1]))
ay=c(0,df_samp1[df_samp1$position>=position1 & df_samp1$position<=position2,2],0)
bx=c(min(df_samp2[df_samp2$position>=position1 & df_samp2$position<=position2,1]),df_samp2[df_samp2$position>=position1 & df_samp2$position<=position2,1],max(df_samp2[df_samp2$position>=position1 & df_samp2$position<=position2,1]))
by=c(0,df_samp2[df_samp2$position>=position1 & df_samp2$position<=position2,2],0)
plot(df_samp1[df_samp1$position>=position1 & df_samp1$position<=position2,], xlab=paste0('Position on chromosome ',chrnum,' (kb)'), ylab=name1, type='h',col=color1,frame.plot=F)
polygon(ax,ay,col=color1,border = NA)
plot(df_samp2[df_samp2$position>=position1 & df_samp2$position<=position2,], xlab=paste0('Position on chromosome ',chrnum,' (kb)'), ylab=name2, type='h',col=color2,frame.plot=F)
polygon(bx,by,col=color2,border = NA)
par(mfrow=c(1,1))
}
|
1cf1e61d134f5269ddeb905cf83be4f479be4b8f
|
4056e89d2e74109f4938051dbe09017ab0c48f43
|
/man/flat_habitat.Rd
|
9c57f7ea916459db53ec1d37aa13c90b3cff15be
|
[] |
no_license
|
petrelharp/landsim
|
af4209dd8f5ffdd8a32a367d8e58b1b1f632e885
|
c4e7cb8cb637c58bcacc1d92e4a01023eb0b281a
|
refs/heads/master
| 2021-01-19T04:32:44.496828
| 2017-11-22T22:54:10
| 2017-11-22T22:54:10
| 47,908,664
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 734
|
rd
|
flat_habitat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/random_habitat.R
\name{flat_habitat}
\alias{flat_habitat}
\title{Create a Flat Habitat}
\usage{
flat_habitat(diam = 20000, res = 100, value = 1, width = diam,
height = diam)
}
\arguments{
\item{diam}{Diameter of the desired habitat (in real units, not number of cells).}
\item{res}{Spatial resolution of the Raster.}
\item{value}{The value(s) to populate the habitat with (will be recycled).}
\item{width}{Width of the region (default: diam).}
\item{height}{Height of the region (default: diam).}
}
\value{
A RasterLayer with nonnegative and missing values.
}
\description{
Simulates a random environment, with holes and varying carrying capacity.
}
|
d059805170d2f5bd961c15e3e2ef91078d3f49f5
|
69b68868434fce241a10b100bcc8eb7ad564ecb2
|
/R/SepsisProjection.R
|
ecfbef9612597b151ca030f3b17f3d17b5246d14
|
[
"MIT"
] |
permissive
|
jknightlab/SepstratifieR
|
1481d5777b6d179719ec3cf921512d1c7dbd7271
|
0ad77fb3447b41f0464b5577b8d2186bdbc31c54
|
refs/heads/main
| 2023-04-18T08:53:08.735906
| 2022-03-14T13:32:32
| 2022-03-14T13:32:32
| 397,996,598
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,470
|
r
|
SepsisProjection.R
|
# Defining SepsisProjection class
setClass("SepsisProjection",
slots=list(gene_set="character",
predictors="numeric",
cosine_similarities = "data.frame",
SRS = "factor",
SRS_probs = "data.frame",
SRSq = "numeric")
)
# Defining methods associated with the SepsisProjection class
setMethod("show",
"SepsisProjection",
function(object) {
cat("SepsisProjection\n\n")
cat("Gene set used: ", object@gene_set, "\n")
cat(length(object@predictors)," predictor variables\n\n", sep="")
if(length(object@predictors) > 0) {
cat("Predictor variables: ")
cat(utils::head(names(object@predictors), n=4),"...", sep=", ")
cat("\nSRS: ")
cat(as.character(object@SRS))
cat("\nSRSq: ")
cat(as.numeric(object@SRSq))
cat("\n")
}
}
)
# Creating a constructor SepsisProjection method
SepsisProjection <- function(gene_set=NULL, predictors=NULL, cosine_similarities=NULL, SRS=NULL, SRS_probs=NULL, SRSq=NULL) {
methods::new(
"SepsisProjection",
gene_set = as.character(gene_set),
predictors = as.numeric(predictors),
cosine_similarities = data.frame(cosine_similarities),
SRS = factor(SRS),
SRS_probs = data.frame(SRS_probs),
SRSq = as.numeric(SRSq)
)
}
|
d2b368823a0339228497eaa6800bf0225ae8b9fc
|
5bf426ec23846b07687b4bbbe7f29a655001aa2d
|
/ExistingStrokeRiskExternalValidation/extras/codeToRun.R
|
624975daee556ba09a9d7a3fb8f85c447cf3bc65
|
[] |
no_license
|
julwa/StudyProtocolSandbox
|
14a0ad48897f977878859e87e97ac2ff6ced173e
|
a2030d9706222ebb9eb955c51f12263b7559617f
|
refs/heads/master
| 2023-07-10T21:03:58.762135
| 2021-08-13T12:40:12
| 2021-08-13T12:40:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,394
|
r
|
codeToRun.R
|
#file to run study
library(PredictionComparison)
library(ExistingStrokeRiskExternalValidation)
options(fftempdir = 'T:/yourFftemp')
dbms <- yourDbms
user <- yourUsername
pw <-yourPassword
server <- Sys.getenv('server')
port <- Sys.getenv('port')
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = dbms,
server = server,
user = user,
password = pw,
port = port)
databaseName = 'friendlyDatabaseName'
cdmDatabaseSchema <- 'yourCdmDatabaseSchema'
cohortDatabaseSchema <- 'yourCohortDatabaseSchema'
cohortTable <- 'existingStrokeVal'
outputLocation <- 'C:/existingStrokeVal'
ExistingStrokeRiskExternalValidation::main(
connectionDetails=connectionDetails,
oracleTempSchema = NULL,
databaseName=databaseName,
cdmDatabaseSchema=cdmDatabaseSchema,
cohortDatabaseSchema=cohortDatabaseSchema,
outputLocation=outputLocation,
cohortTable=cohortTable,
createCohorts = F,
runAtria = F,
runFramingham = F,
runChads2 = F,
runChads2Vas = F,
runQstroke = F,
summariseResults = F,
packageResults = F,
N=10)
#submitResults(exportFolder=outputLocation,
# dbName= databaseName, key, secret)
|
945a2c7d67f7df9d396e67cf71255764c41222d6
|
1fa9f418caddb65fb8feaa9b236764c98c61fd17
|
/src/libRbind/testEst.R
|
e29efe821ef5d726e8fa863bc30fc8cffb9150c5
|
[
"MIT"
] |
permissive
|
MADAI/MADAIEmulator
|
485a19013de8d6160e50e2ed1886f94d3b0f8592
|
7d926ad04a791c7694defd88db41c13f7ee4e6aa
|
refs/heads/master
| 2016-09-01T19:39:03.556529
| 2012-11-27T15:16:39
| 2012-11-27T15:19:50
| 4,770,211
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,518
|
r
|
testEst.R
|
## some functions for testing the estimation parts of the emulator
##
## ccs, cec24@phy.duke.edu, aug-2011
##
## aims:
## 1: plot lhood over a plane while keeping other hyperparams fixed (lhoodcompare)
## 2: compare max lhood for various regression and cov fn models (not implemented yet)
## 3: do a k-sets withold test to build up a distribution of deviates (holdbacktest)
##
## inputs:
## modelData, trainingData.
## these will be supplied in model.in{training=trainingData, xmodel=modelData}
##
## these fns are for a single observable, we're not using multidim directly
##
## rangeMatrix: 2x2, { { minA, maxA}, {minB, maxB} }
## thetaVec, vector of the "best" thetas for this function
## we'll run thetaA and thetaB over the supplied ranges while holding the other values
## fixed
lhoodOverRange <- function(thetaA, thetaB, rangeMatrix, thetaVec, nthetas, nEvalPts, model.in)
{
lhood.mat <- matrix(0, nrow=nEvalPts, ncol=nEvalPts)
nmodelPoints <- dim(model.in$xmodel)[1]
nparams <- dim(model.in$xmodel)[2]
cat("nparams: ", nparams, "\n")
cat("nmodelPoints: ", nmodelPoints, "\n")
stepA <- abs((rangeMatrix[1,1] - rangeMatrix[1,2]) / nEvalPts)
stepB <- abs((rangeMatrix[2,1] - rangeMatrix[2,2]) / nEvalPts)
pointList <- matrix(0, nrow=nEvalPts**2, ncol=nthetas)
for(i in 1:nEvalPts){
for(j in 1:nEvalPts){
subValA <- rangeMatrix[1,1] + i * stepA
subValB <- rangeMatrix[2,1] + j * stepB
subVec <- thetaVec
subVec[thetaA] <- subValA
subVec[thetaB] <- subValB
pointList[j+nEvalPts*(i-1),] = subVec
}
}
lhoodRes <- callEvalLhoodList(model.in, pointList, nEvalPts**2, nmodelPoints, nparams=nparams, nthetas=nthetas)
lhoodRes
}
##
## vary 2 hyperparams and plot the lhood while keeping the remaining hyperparams fixed
## plot this as a set of images
lhoodCompare <- function(estim.result=estimResult, obsIndex=1, ngridPts=32, titleAdditional=NULL){
namesfirst <- c("scale", "nugget")
nthetas <- length(estim.result$thetas[obsIndex,])
par(mfrow=c(nthetas,nthetas), mar=c(2,2,0,0), oma=c(2,2,2,2), cex.axis=0.8,
cex.lab=0.8, cex.main=0.8)
# compute the lhood of the initial theta set
model <- list(xmodel=estim.result$des.scaled, training=estim.result$train.scaled)
lhoodMain <- callEvalLhoodList(model, estim.result$thetas[obsIndex,], 1,
nmodelPoints=dim(model$xmodel)[1], nparams=dim(model$xmodel)[2], nthetas=nthetas)
print(lhoodMain)
# all this to make a grid
for(i in 1:nthetas){
for(j in 1:nthetas){
# don't plot in the upper right corner
if(j > i){
plot(c(0,1),c(0,1),ann=F,bty='n',type='n',xaxt='n',yaxt='n')
} else if(j == i){
# on the diagonals plot the names of the variables
plot(c(0,1),c(0,1),ann=F,bty='n',type='n',xaxt='n',yaxt='n')
if(i <= 2){
buffer <- namesfirst[i]
} else {
buffer <- paste("theta:", (i-2))
}
text(x=0.5, y=0.5, buffer, cex=1.5)
} else {
# actually plot a slice in the lhood plane
plotLhoodSlice(estim.result, j, i, obsIndex, ngridPts)
}
}
}
par(mfrow=c(1,1), mar=c(2,2,2,2))
buffer<-paste(titleAdditional, "L = ", round(lhoodMain$lhood,digits=2))
title(main=buffer, line=-1)
}
plotLhoodSlice <- function(estim.result=estimResult, dimA=5, dimB=6, obsIndex=1, ngridPts=32, nconts=10)
{
ranges <- matrix(0, nrow=2, ncol=2)
if(dimA > 2){
ranges[1,] <- c(-10,10)
} else if (dimA == 2){
ranges[1,] <- c(0.00001, 0.5)
} else {
ranges[1,] <- c(0.00001, 5)
}
if(dimB > 2){
ranges[2,] <- c(-10,10)
} else if (dimB == 2){
ranges[2,] <- c(0.00001, 0.5)
} else {
ranges[2,] <- c(0.00001, 5)
}
# used the estimated thetas for the first bin
fixedValVec <- estim.result$thetas[obsIndex,]
print(fixedValVec)
cat("ranges A: " , ranges[1,], "\n")
cat("ranges B: " , ranges[2,], "\n")
model <- list(xmodel=estim.result$des.scaled, training=estim.result$train.scaled[,obsIndex])
res <- lhoodOverRange(dimA,dimB ,ranges, fixedValVec, nthetas=length(fixedValVec), nEvalPts=ngridPts, model)
xrange <- seq(from=ranges[1,1], to=ranges[1,2], length.out=ngridPts)
yrange <- seq(from=ranges[2,1], to=ranges[2,2], length.out=ngridPts)
lhoodMatrix <- t(matrix(res$lhood, ncol=ngridPts, nrow=ngridPts))
bufferX <- paste("theta :", dimA)
bufferY <- paste("theta :", dimB)
#levels=c(-300, -200,-150, -100, -75, -65, -50, -40, -32, -25, -15, -10)
#image(xrange, yrange, lhoodMatrix, xlab=bufferX, ylab=bufferY, col=topo.colors(length(levels)-1), breaks=levels)
image(xrange, yrange, lhoodMatrix, xlab=bufferX, ylab=bufferY)
contour(xrange, yrange, lhoodMatrix, nlevels=nconts, add=TRUE, labcex=0.8)
points(fixedValVec[dimA], fixedValVec[dimB], pch=3, cex=1.5, lwd=2)
}
# partition the nmodelpoints into nSets subsets, estimate the model
# for the complement of each subset and then compute the deviation on the
# witheld points
HoldBackTest <- function(des.scaled, train.scaled, nSets=7, fixNuggetOption=NULL){
nmodelPts.tot <- dim(des.scaled)[1]
nparams <- dim(des.scaled)[2]
nthetas <- nparams + 2 ## currently
# the number of points in one of the withold sets
subSetLength <- floor( nmodelPts.tot / nSets)
if(nmodelPts.tot %% nSets != 0){
nSets <- nSets + 1 # add an additional set to hold the remainder
}
model.list <- vector("list", nSets)
partial.des <- vector("list", nSets)
partial.train <- vector("list", nSets)
test.pts <- vector("list", nSets)
true.values <- vector("list", nSets)
subList <- vector("list", nSets)
thetas.full <- matrix(0, ncol=nthetas, nrow=nSets)
lhoods <- rep(NA, nSets)
# loop over all the sets creating the partial designs, we make sure that the
# final set contains the remainder
for(i in 1:nSets){
if(i < nSets){
subList[[i]] <- seq(from=((i-1)*subSetLength+1), to=(i*subSetLength))
} else {
subList[[i]] <- seq(from=((i-1)*subSetLength+1), nmodelPts.tot)
}
# these will be the design points for each partial run
partial.des[[i]] <- des.scaled[-(subList[[i]]), ]
# the training values for each partial run
partial.train[[i]] <- train.scaled[-(subList[[i]])]
# the test locations
test.pts[[i]] <- des.scaled[subList[[i]], ]
# the values at the test locations
true.values[[i]] <- train.scaled[subList[[i]]]
model.list[[i]] <- list(xmodel=partial.des[[i]], training=partial.train[[i]])
}
# estimate the hyper params for each model
# we support the fixedNugget option here.
#
for(i in 1:nSets){
npts <- length(model.list[[i]]$training)
thetas.full[i,] <- callEstimate(model.list[[i]], nmodelpts=npts, nparams=nparams,
nthetas=nthetas, fixedNugget=fixNuggetOption)
## it's also fun to compute the lhood for each set
temp <- callEvalLhoodList(model.list[[i]], thetas.full[i,], nevalPoints=1, nmodelPoints=npts,
nparams=nparams, nthetas=nthetas)
lhoods[i] <- temp$lhood
}
# hold the results and the deviates
emu.res <- vector("list", nSets)
devs <- c()
# now we want to compute the deviates for each set
for(i in 1:nSets){
nEmuPts <- length(true.values[[i]])
npts <- length(model.list[[i]]$training)
# start by emulating each partial model at its test.pts locations
emu.res[[i]] <- callEmulateAtList(model.list[[i]], thetas.full[i,], test.pts[[i]], nemupts=nEmuPts,
nmodelpoints=npts, nparams=nparams, nthetas=nthetas)
## we quantify the deviations as the squared diff from the real value scaled by
## the predicted variance at this locn
devs <- c(devs, (emu.res[[i]]$mean - true.values[[i]])**2 / (emu.res[[i]]$var))
}
## we return the vector of deviates, the test points where we compute the deviates
## the emulated means and vars at these test points, the true values at the points
## the lhoods of each of the models and the thetas specifying each model
## and also the models themselves
final.res <- list(deviates=devs, test.pts=test.pts, emu.res=emu.res,
true.values=true.values, lhoods=lhoods, thetas=thetas.full,
models=model.list)
}
# use this to test holdbacktest
testWT <- function(){
options(error=utils::recover)
source("emuYields.R")
res <- HoldBackTest(estimResult$des.scaled, estimResult$train.scaled[,1], nSets=20)
invisible(res)
}
|
a68f6e445c72567683821a81044ec25880b7fcfc
|
7fd749dc1a52e201dfe433fa0da403414687f5c0
|
/R/chi_diag.boot.R
|
35812ed81c2c6eb5a1dba7864d917f2a96327aa4
|
[] |
no_license
|
cran/r4lineups
|
a179b83afdc8b8c68ac812cd600e86bc0ece48e8
|
6753d4662d34ea39261878f9f0584788be7c23f7
|
refs/heads/master
| 2020-03-27T03:58:08.061355
| 2018-07-18T12:20:02
| 2018-07-18T12:20:02
| 145,902,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 109
|
r
|
chi_diag.boot.R
|
chi_diag.boot <- function(df, d=d){
q <- df[d] %>% sum((df[2,]-log(d_bar(df))/(df[1,])))
return(q)
}
|
e1cf90d2e1d2bda9121e176023db74392e9de362
|
568db5b2c448e80d459f4f9c923d2efbd0acc120
|
/R/BCIPlot.R
|
324a3c48c79bcbf719d799889cf01117defffe85
|
[] |
no_license
|
bromsk/NCRNbirds
|
0becc5be7b1ee521fe4ee14d6c7e6f6751047b10
|
25b2c87700ded8f96800193aa9a94add9667bac8
|
refs/heads/master
| 2020-04-21T15:09:45.937162
| 2018-12-06T21:16:10
| 2018-12-06T21:16:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,587
|
r
|
BCIPlot.R
|
#' @include NCRNbirds_Class_def.R
#' @include BCI.R
#'
#' @title BCIPlot
#'
#' @importFrom dplyr case_when group_by n_distinct pull right_join summarize
#' @importFrom ggplot2 aes annotate element_line geom_point ggplot ggtitle guides guide_legend labs
#' @importFrom ggplot2 scale_color_manual scale_x_continuous scale_y_continuous theme theme_classic
#' @importFrom magrittr %>%
#' @importFrom purrr map pmap
#' @importFrom tidyr full_seq
#' @importFrom viridis viridis_pal
#' @importFrom tidyr replace_na
#'
#' @description Plots BCI score over time.
#'
#' @param object An \code{NCRNbirds} object a \code{list} of such objects, or a \code{data.frame} like that produced by \code{birdRichness()}.
#' @param years A numeric vector. Indicates which years should be graphed.
#' @param points A character vector of point names. Only these points will be used.
#' @param visits A length 1 numeric vector, defaults to NA. Returns data only from the indicated visits.
#' @param times A numeric vector of length 1. Returns only data from points where the number of years that a point has been vistied is greater or equal to the value of \code{times}. This is determined based on the data found in the \code{Visits} slot.
#' @param plot_title Optional, A title for the plot.
#' @param point_num An optional numeric vector indicating the number of points sampled each year. If \code{object} is a \code{NCRNbirds} object
#' then this will be calculated automatically. If \code{object} is a \code{data.frame} or a \code {list} than this can be provided by the user.
#' @param output Either "total" (the default) or "list". Only used when \code{object} is a \code{list}.
#' @param ... Additional arguments passed to \code{\link{birdRichness}}
#'
#' @details This function produces a graph the Bird community Index over time. It does this by using the output of
#' the \code{\link{BCI}} function, and averging the plot values for the park's yearly value. The data is then passed on
#' to ggplot2 for graphing. Typically this is done automatically by providing an \code{NCRNbirds} object or a \code{list}
#' of such objects. If the user wishes to provide their own \code{data.frame} it should have 3 columns, \code{Year, BCI, BCI_Category}
#' and each row should be data from single year.
#'
#' @export
setGeneric(name="BCIPlot",function(object,years=NA, points=NA,visits=NA, times=NA, plot_title=NA, point_num=NA,output="total", ...){standardGeneric("BCIPlot")}, signature="object")
setMethod(f="BCIPlot", signature=c(object="list"),
function(object,years,points,visits, times, plot_title, point_num, output, ...) {
switch(output,
total={
visits<-if(anyNA(visits)) getDesign(object,info="visits") %>% unlist %>% max %>% seq else visits
years<-if(anyNA(years)) getVisits(object, points=points, visits=visits, times=times) %>%
pull(Year) %>% unique %>% sort %>% full_seq(1) else years
graphdata<-data.frame(Year=years,BCI=NA, BCI_Category=NA)
graphdata$BCI<-(years %>% map(~BCI(object=object, years=.x, points=points,
visits=visits, times=times,output="dataframe",...),...) %>%
map("BCI") %>% map(mean) %>% unlist %>% round(0))
graphdata<-graphdata %>%
mutate (BCI_Category=case_when( BCI <40.1 ~"Low Integrity",
BCI>=40.1 & BCI<52.1 ~ "Medium Integrity",
BCI>=52.1 & BCI < 60.1 ~ "High Integrity",
BCI>=60.1 ~ "Highest Integrity" ),
BCI_Category=factor(BCI_Category, levels=c("Low Integrity","Medium Integrity",
"High Integrity","Highest Integrity")))
if (all(is.na(point_num))) point_num<-map(visits, function(visits){
map(years, function(years) getVisits(object=object, years=years, visits=visits, times=times) %>% nrow) %>%
unlist(F)})
return(BCIPlot(object=graphdata, plot_title=plot_title, point_num = point_num))
},
list={
return(lapply(X=object, FUN=BCIPlot, years=years, points=points, visits=visits, times=times,
plot_title=plot_title, point_num=point_num))
}
)
})
setMethod(f="BCIPlot", signature=c(object="NCRNbirds"),
function(object,years,points,visits, times, plot_title=NA, ...){
visits<-if(anyNA(visits)) 1:getDesign(object,info="visits") else visits
years<-if(anyNA(years)) getVisits(object, points=points, visits=visits, times=times) %>%
pull(Year) %>% unique %>% sort %>% full_seq(1) else years
graphdata<-data.frame(Year=years,BCI=NA, BCI_Category=NA)
graphdata$BCI<-(years %>% map(~BCI(object=object, years=.x, points=points,
visits=visits, times=times,...),...) %>%
map("BCI") %>% map(mean) %>% unlist %>% round(0))
graphdata<-graphdata %>% mutate (BCI_Category=case_when( BCI <40.1 ~"Low Integrity",
BCI>=40.1 & BCI<52.1 ~ "Medium Integrity",
BCI>=52.1 & BCI < 60.1 ~ "High Integrity",
BCI>=60.1 ~ "Highest Integrity" ),
BCI_Category=factor(BCI_Category, levels=c("Low Integrity","Medium Integrity",
"High Integrity","Highest Integrity")))
if (all(is.na(point_num))) point_num<-map(visits, function(visits){
map(years, function(years) getVisits(object=object, years=years, visits=visits, times=times) %>% nrow) %>%
unlist(F)})
plot_title<-if(is.na(plot_title)) paste0("Bird Community Index for ",getParkNames(object, name.class="long")) else plot_title
return(BCIPlot(object=graphdata, plot_title=plot_title, point_num = point_num))
})
setMethod(f="BCIPlot", signature=c(object="data.frame"),
function(object, plot_title, point_num){
BCIColors<-viridis_pal()(4)
names(BCIColors)<-c("Low Integrity", "Medium Integrity", "High Integrity", "Highest Integrity")
SampEffort<-if(!all(is.na(point_num))) pmap(point_num, paste, sep=",") %>% unlist else NA
integer_breaks<-min(object$Year):max(object$Year)
YearTicks<- if(!all(is.na(point_num))) paste0(integer_breaks, "\n(", SampEffort,")") else integer_breaks
GraphOut<-ggplot(data=object, aes(x=Year, y=BCI, color=BCI_Category)) +
annotate("rect", ymin=0, ymax=40, xmin=-Inf, xmax=Inf, fill=BCIColors[1], alpha=0.3) +
annotate("rect", ymin=40, ymax=52, xmin=-Inf, xmax=Inf, fill=BCIColors[2], alpha=0.3) +
annotate("rect", ymin=52, ymax=60, xmin=-Inf, xmax=Inf, fill=BCIColors[3], alpha=0.3) +
annotate("rect", ymin=60, ymax=80, xmin=-Inf, xmax=Inf, fill=BCIColors[4], alpha=0.3) +
geom_point(size=4) +
scale_color_manual(values=BCIColors, drop=FALSE) +
guides(color=guide_legend(reverse=T, title ="BCI Category"))+
scale_x_continuous(breaks=integer_breaks, minor_breaks=integer_breaks, labels=YearTicks) +
scale_y_continuous(limits=c(0,80), expand=c(0,0)) +
labs(y=" Bird Community Index", caption="Values in parentheses indicate the number of points monitored each visit of each year.") +
{if(!is.na(plot_title)) ggtitle(plot_title)} +
theme_classic()# +
return(GraphOut)
})
|
09afc44965bc26503d11f8b7152510363635fab3
|
6dba9fa38b058cee5e4dbfa6b3b7ac5e3c88c0f0
|
/loop2.R
|
32d3677e9217100b360af8d3f7ae23bbe94e1eb0
|
[] |
no_license
|
AngelicaValdes/Tesis
|
2c7ea3889b8d720ec7f31d5475e7c40f0ed4ba56
|
82d0d5dec09bb016256b18aee13feec864695bea
|
refs/heads/main
| 2023-08-24T13:08:38.825748
| 2021-10-08T17:12:13
| 2021-10-08T17:12:13
| 413,637,703
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,114
|
r
|
loop2.R
|
dotR <- file.path(Sys.getenv("HOME"), ".R")
if (!file.exists(dotR))
dir.create(dotR)
M <- file.path(dotR, "Makevars")
if (!file.exists(M))
file.create(M)
cat("\nCXXFLAGS=-O3 -Wno-unused-variable -Wno-unused-function",
file = M, sep = "\n", append = TRUE)
cat('Sys.setenv(BINPREF = "C:/Rtools/mingw_$(WIN)/bin/")',
file = file.path(Sys.getenv("HOME"), ".Rprofile"),
sep = "\n", append = TRUE)
cat("\nCXXFLAGS += -Wno-ignored-attributes -Wno-deprecated-declarations",
file = M, sep = "\n", append = TRUE)
#Setear directorio
library("rstan")
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
load('/mydata_server_3.RData')
mod<-stan_model("delay2.stan")
for (j in clientes$child_id[(count_clientes+1):(677)]){
cliente<-subset(BD,child_id==j)
cliente<-cliente[order(cliente$Periodo_final, cliente$route_id),]
#inicializacion de variables
h<-length(cliente$Periodo_final)
theta<-rep(param[1],h)
sigma<- rep(param[2],h)
xi<-rep(param[3],h)
mean_rhat<-rep(0,h)
max_rhat<-rep(0,h)
se_mean<-rep(0,h)
thetar<-data.frame(theta1=c(rep(param[4],h)),theta2=c(rep(param[4],h)),theta3=c(rep(param[4],h)),theta4=c(rep(param[4],h)))
#algoritmo para obtener pronostico
for (i in 1:h){
nRoutes <- cliente$child_route_num[1]
nObs <- nrow(cliente[1:i,])
obs <- as.matrix(cliente[1:i,6:(5+nRoutes)], nrow = nrow(cliente[1:i,]))
y <- as.matrix(cliente[1:i,12:(11+nRoutes)], nrow = nrow(cliente[1:i,]))
stan.data = list(nRoutes = nRoutes, nObs = nObs, obs = obs, y = y)
reg_fit <- sampling(mod, data=stan.data, iter=50000*nObs^(-0.5), chains=2, save_warmup = FALSE)
summary <- summary(reg_fit, pars = c("theta","sigma","xi","thetar"), probs = c(0.025,0.5,0.975))$summary
theta[i]<-summary[1]
sigma[i]<-summary[2]
xi[i]<-summary[3]
mean_rhat[i]<-mean(summary[,8])
max_rhat[i]<-max(summary[,8])
se_mean[i]<-summary[1,2]
if (nRoutes >= 1){thetar$theta1[i]<-summary[4]}
if (nRoutes >= 2){thetar$theta2[i]<-summary[5]}
if (nRoutes >= 3){thetar$theta3[i]<-summary[6]}
if (nRoutes >= 4){thetar$theta4[i]<-summary[7]}
files <- list.files(tempdir(), full.names = T, pattern = "^file")
file.remove(files)
}
#Introducir el valor de "diferencia" en la columna correspondiente
for(i in 1:h){
d<-cliente$Periodo_final[i]
if(cliente$route_id[i]==1){
Base_pronostico$diff_1[d+f]<-cliente$difference[i]
}
if(cliente$route_id[i]==2){
Base_pronostico$diff_2[d+f]<-cliente$difference[i]
}
if(cliente$route_id[i]==3){
Base_pronostico$diff_3[d+f]<-cliente$difference[i]
}
if(cliente$route_id[i]==4){
Base_pronostico$diff_4[d+f]<-cliente$difference[i]
}
}
#datos para el cliente sin periodos repetidos
cliente$Periodos_repetidos<-1
cliente$Diferencia_prom<-rep(NA,h)
p<-rep(1,h)
for (i in 1:(h-1)){
if(cliente$Periodo_final[i]==cliente$Periodo_final[i+1] & cliente$route_id[i]==cliente$route_id[i+1]){
p[i+1]<-p[i]+1
cliente$Periodos_repetidos[i+1]<-cliente$Periodos_repetidos[i]+1
cliente$Diferencia_prom[i+1]<-mean(cliente$difference[(i+2-p[i+1]):(i+1)])
cliente$Periodo_final[i]<-0
theta[i]<-NA
sigma[i]<-NA
xi[i]<-NA
mean_rhat[i]<-NA
max_rhat[i]<-NA
se_mean[i]<-NA
thetar[i,]<-NA
}
}
cliente<-subset(cliente,Periodo_final>0)
theta<-theta[is.na(theta)==F]
sigma<-sigma[is.na(sigma)==F]
xi<-xi[is.na(xi)==F]
mean_rhat<-mean_rhat[is.na(mean_rhat)==F]
max_rhat<-max_rhat[is.na(max_rhat)==F]
se_mean<-se_mean[is.na(se_mean)==F]
thetar<-subset(thetar,is.na(theta1)==FALSE)
#algoritmo para introducir los datos en las variables
h<-length(cliente$Periodo_final)
#datos para primer periodo
Base_pronostico$theta[1+f]<-param[1]
Base_pronostico$sigma[1+f]<-param[2]
Base_pronostico$xi[1+f]<-param[3]
Base_pronostico$theta_1[1+f]<-param[4]
if(cliente$child_route_num[1]>=2){Base_pronostico$theta_2[1+f]<-param[4]}
if(cliente$child_route_num[1]>=3){Base_pronostico$theta_3[1+f]<-param[4]}
if(cliente$child_route_num[1]>=4){Base_pronostico$theta_4[1+f]<-param[4]}
#datos para pronostico
for(i in 1:h){
d<-cliente$Periodo_final[i]
if(cliente$child_route_num[1]>=1){
Base_pronostico$theta_1[d+1+f]<-thetar$theta1[i]
}
if(cliente$child_route_num[1]>=2){
Base_pronostico$theta_2[d+1+f]<-thetar$theta2[i]
}
if(cliente$child_route_num[1]>=3){
Base_pronostico$theta_3[d+1+f]<-thetar$theta3[i]
}
if(cliente$child_route_num[1]>=4){
Base_pronostico$theta_4[d+1+f]<-thetar$theta4[i]
}
if(is.na(cliente$Diferencia_prom[i])==FALSE){
if(cliente$route_id[i]==1){
Base_pronostico$diff_1[d+f]<-cliente$Diferencia_prom[i]
}
if(cliente$route_id[i]==2){
Base_pronostico$diff_2[d+f]<-cliente$Diferencia_prom[i]
}
if(cliente$route_id[i]==3){
Base_pronostico$diff_3[d+f]<-cliente$Diferencia_prom[i]
}
if(cliente$route_id[i]==4){
Base_pronostico$diff_4[d+f]<-cliente$Diferencia_prom[i]
}
}
Base_pronostico$theta[d+1+f]<-theta[i]
Base_pronostico$sigma[d+1+f]<-sigma[i]
Base_pronostico$xi[d+1+f]<-xi[i]
Base_pronostico$mean_rhat[d+1+f]<-mean_rhat[i]
Base_pronostico$max_rhat[d+1+f]<-max_rhat[i]
Base_pronostico$se_mean[d+1+f]<-se_mean[i]
Base_pronostico$route[d+f]<-cliente$route_id[i]
Base_pronostico$count_period[d+f]<-cliente$Periodos_repetidos[i]
Base_pronostico$route_num[d+f]<-cliente$child_route_num[i]
}
for(i in 2:(max(BD$Periodo_final)+1)){
if(is.na(Base_pronostico$theta[i+f]))Base_pronostico$theta[i+f]<-Base_pronostico$theta[i-1+f]
if(is.na(Base_pronostico$sigma[i+f]))Base_pronostico$sigma[i+f]<-Base_pronostico$sigma[i-1+f]
if(is.na(Base_pronostico$xi[i+f]))Base_pronostico$xi[i+f]<-Base_pronostico$xi[i-1+f]
for (r in 1:mean(BD$child_route_num[BD$child_id==j])){
if(r==1){
if(is.na(Base_pronostico$theta_1[i+f]))Base_pronostico$theta_1[i+f]<-Base_pronostico$theta_1[i-1+f]
}
if(r==2){
if(is.na(Base_pronostico$theta_2[i+f]))Base_pronostico$theta_2[i+f]<-Base_pronostico$theta_2[i-1+f]
}
if(r==3){
if(is.na(Base_pronostico$theta_3[i+f]))Base_pronostico$theta_3[i+f]<-Base_pronostico$theta_3[i-1+f]
}
if(r==4){
if(is.na(Base_pronostico$theta_4[i+f]))Base_pronostico$theta_4[i+f]<-Base_pronostico$theta_4[i-1+f]
}
}
}
f<-f+max(BD$Periodo_final)+1
count_clientes<-count_clientes+1
print("CLIENTE NUMERO:")
print(count_clientes)
unlink('/mydata_server_3.RData', recursive = TRUE)
save.image(file = '/mydata_server_3.RData')
rm(list = ls())
gc()
load('/mydata_server_3.RData')
files <- list.files("\\AppData\\Local\\Temp\\3", full.names = T, pattern = "Rtmp")
unlink(files, recursive = TRUE)
}
|
c75540601561f4f6a5acf511317aafbde5f4db89
|
bdadf9e6d2f86a00793fad1806b743cb755b4277
|
/La Palma Skript.R
|
64a00a1df7d98a1f3f5a8b30ea62db61781cb495
|
[] |
no_license
|
s2jsvoge/R-scripts-La-Palma
|
f050dc8a94b2416f9d1842c8bc5757ad4537ddc2
|
2b2755689a8a973eb4ec9d6cc2e7fd6490c6a417
|
refs/heads/master
| 2021-01-01T03:52:49.785217
| 2016-05-26T12:55:54
| 2016-05-26T12:55:54
| 59,143,614
| 1
| 0
| null | 2016-05-28T16:47:02
| 2016-05-18T19:06:37
|
R
|
ISO-8859-1
|
R
| false
| false
| 5,521
|
r
|
La Palma Skript.R
|
######################################
##### Fog precipitation efficiency ###
##### in different forest types ######
##### on La Palma, Canary Islands#####
######################################
# Hypothesis: Fog precipitation efficiency in cloud influenced forests on La Palma
# is determinded by vertical vegetation structure and leaf morphology.
# Pine forest is most efficient in absorbing precipitation from cloud layer.
# Dear La Palma Mates, unter "##"Überschriften bitte den Code einfügen.
# Falls euch noch weitere Dinge einfallen, die wir berechnen könnten, einfach mit einer Überschrift hinzufügen.
# Bitte nicht vergessen kurz mit "#" zu schreiben, was die neu kreierte Variable ist
# z.b.: b1 - fog prec. Standort 1 Tag 1 oder entsprechende Überschriften ;)
####################
### Load in Data ###
####################
setwd("D:/Studium/Master Bayreuth/Science Schools/La Palma Schience School/results/csv")
descr_nets<-read.table(file="descriptions_nets.csv",sep=";",dec=",", head=TRUE)
descr_control<-read.table(file="descriptions_control.csv",sep=";",dec=",", head=TRUE)
descr_devices<-read.table(file="descriptions_devices.csv",sep=";",dec=",", head=TRUE)
#########################
### Site descriptions ###
#########################
# subsets per location
dev_site1<-subset(descr_devices, descr_devices$Location==1)
dev_site2<-subset(descr_devices, descr_devices$Location==2)
dev_site3<-subset(descr_devices, descr_devices$Location==3)
## Altitude
alt_site1_mean<-mean(dev_site1$Altitude.device) # 1225.11 m
alt_site2_mean<-mean(dev_site2$Altitude.device) # 1241.33 m
alt_site3_mean<-mean(dev_site3$Altitude.device) # 1482.44 m
## Exposition
exp_site1<-dev_site1$Aspect.of.Location
exp_site2<-dev_site2$Aspect.of.Location
exp_site3<-dev_site3$Aspect.of.Location
## average Canopy cover
cc_site1_mean<-mean(dev_site1$Canopy.density) #79.28
cc_site2_mean<-mean(dev_site2$Canopy.density) # 82.19
cc_site3mean<-mean(dev_site3$Canopy.density) # 51.86
cc_site1<-subset(descr_devices$Canopy.density, descr_devices$Location==1)
cc_site2<-subset(descr_devices$Canopy.density, descr_devices$Location==2)
cc_site3<-subset(descr_devices$Canopy.density, descr_devices$Location==3)
## stand density
sd_site1_mean<-mean(dev_site1$Stand.density..Bitterlich.) # 15.33
sd_site2_mean<-mean(dev_site2$Stand.density..Bitterlich.) # 20.78
sd_site3_mean<-mean(dev_site3$Stand.density..Bitterlich.) # 18.89
sd_site1<-subset(descr_devices$Stand.density..Bitterlich., descr_devices$Location==1)
sd_site2<-subset(descr_devices$Stand.density..Bitterlich., descr_devices$Location==2)
sd_site3<-subset(descr_devices$Stand.density..Bitterlich., descr_devices$Location==3)
## volume of crown
## average height of trees
## variability in height of trees
## average vertical structure
## variability in vertical structure
## number of species
## which species
####################################
### Measured daily precipitation ###
####################################
prec<-read.csv("prec_devices.csv",header=TRUE,sep=";", dec=",")
## daily precipitation per device
daily_prec_dev<-subset(prec, prec$Precipitation..ml.>0)
# daily_prec_dev is subset of precipitation measurements for each day
# for sampling devices without NA and without 0ml
# !!! CHANGE: include 0 ml but exclude NA
plot(daily_prec_dev$Precipitation..ml.~daily_prec_dev$Device,
col=as.factor(daily_prec_dev$Date))
## mean daily precipitation per site
?tapply
tapply(X=daily_prec_dev$Precipitation..ml.,
INDEX=as.numeric(daily_prec_dev$Location), as.numeric(daily_prec_dev$Date),
FUN=mean)
# funktioniert noch nicht
## mean precipitation per device over all measurement days
## daily precipitation in control devices
# Are there differences in control devices of each site? (canpoy cover correlation?)
## mean daily precipitation in control devices per site
## device prec. substracted with control device average per day of corresponding site
## daily fog precipitation per device
## mean daily fog precipitation per site
## mean fog precipitation per device over all measurement days
################################################################
### Correlations of vegetation parameters with precipitation ###
###############################################################
## with canopy cover
## with species
## with vertical structure --> Index???
# meterzählungen joscha
# tree hights
## with crown volume
## with forest type
## with altitude
## with observed weather conditions
######################################################################
### Precipitation captured by nets as validation of cloud presence ###
######################################################################
# As discussed with Anke we need to standardize somehow the potential fog precipitaion
# of each site and day to cheack whether the cloud presence/ density etc is comparable
# at a certain day between the different sites to exclude the possibility that
# we captured differences of meso-meteorology instead of vegetation driven differences.
## Correlation between net density and fog precipitation?
## with exposition of nets
|
a39281cf974115bf941c5899cbe24597fbab737f
|
f9f42e00cdc3ddb75f4496733fbba28ca1f740b0
|
/R/actinoUtils.R
|
833bf40550e1492fc03fbf462f57509d9fb70a69
|
[] |
no_license
|
richardsprague/actino
|
20f36f951cb7a45294225005de8c1e69fc4a7fdd
|
f4bbaa1c7cf3f520de34e53ef340877ebc9ee07d
|
refs/heads/master
| 2021-12-09T21:08:21.017115
| 2021-12-03T23:37:40
| 2021-12-03T23:37:40
| 80,489,917
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,062
|
r
|
actinoUtils.R
|
# actinoUtils.R
#' @title Dataframe at a specific rank
#' @description turn a uBIome data frame into a matrix at a specific rank
#' @param df a well-formed dataframe created by join_all_ubiome_files_full
#' @param rank tax_rank (default = "genus")
#' @importFrom stats na.omit
#' @export
dataframe_at_rank <- function(df, rank="genus" ){
z <- lapply(df[,1],function(x) {
l = tax_rank_of_full_taxa(x)
return(c(l[[1]],l[[2]]))
})
names(z) = NULL
z.df = t(as.data.frame(z))
row.names(z.df) = NULL
new.df <- na.omit(data.frame(z.df, df[,-1]))
colnames(new.df)[1:2] = c("tax_rank","tax_name")
return(new.df[new.df$tax_rank==rank,][-1])
}
#' @title Matrix version of a dataframe at a specific rank
#' @description turn a uBIome data frame into a matrix at a specific rank
#' @param df a well-formed dataframe created by join_all_ubiome_files_full
#' @param rank tax_rank (default = "genus")
#' @export
matrix_at_rank <- function(df, rank = "genus"){
mf <- dataframe_at_rank(df,rank)
m <- as.matrix(mf[,-1])
rownames(m) <- mf[,1]
m
}
|
3bf32773724e318d5841b54ece315e9973bdfbba
|
40a72b0add98f0ddadac4e07e64287b2d2dbb22a
|
/r/result_plots_v2.r
|
4a5704cdf401a4b3666836dba4525a96151b74bf
|
[] |
no_license
|
andydawson/stepps-calibration
|
a5e8c1a2fafef07b070cbab444967fcbebb66602
|
c72cd26600fb1d82666264f962c5176fa0f8beb4
|
refs/heads/master
| 2020-05-27T17:23:08.311656
| 2018-04-13T14:04:11
| 2018-04-13T14:04:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,443
|
r
|
result_plots_v2.r
|
library(rstan)
library(ggplot2)
library(fields, quietly=TRUE)
library(RColorBrewer)
wd = getwd()
#####################################################################################
# user pars
#####################################################################################
path_utils = 'r/utils'
path_data = 'r/dump'
path_out = 'output'
path_figs = 'figures'
suff_dat = '12taxa_mid_comp_v0.1'
save_plots = TRUE
rescale = 1e6
#####################################################################################
# read in data and source utils
#####################################################################################
source(file.path(path_utils, 'process_funs.r'))
source(file.path(path_utils, 'plot_funs.r'))
load(sprintf('%s/cal_data_%s.rdata', path_data, suff_dat))
path_out = 'output'
run1 = list(suff_fit = 'cal_g_v0.3',
kernel = 'gaussian',
one_psi = TRUE,
one_gamma = TRUE,
EPs = FALSE)
run2 = list(suff_fit = 'cal_g_Kpsi_EPs_v0.3',
kernel = 'gaussian',
one_psi = FALSE,
one_gamma = TRUE,
EPs = TRUE)
run3 = list(suff_fit = 'cal_g_Kpsi_Kgamma_EPs_v0.3',
kernel = 'gaussian',
one_psi = FALSE,
one_gamma = FALSE,
EPs = TRUE)
run4 = list(suff_fit = 'cal_g_Kgamma_EPs_v0.3',
kernel = 'gaussian',
one_psi = TRUE,
one_gamma = FALSE,
EPs = TRUE)
run5 = list(suff_fit = 'cal_pl_v0.3',
kernel = 'pl',
one_a = TRUE,
one_b = TRUE,
one_gamma = TRUE,
EPs = FALSE)
run6 = list(suff_fit = 'cal_pl_Kgamma_EPs_v0.3',
kernel = 'pl',
one_a = TRUE,
one_b = TRUE,
one_gamma = FALSE,
EPs = TRUE)
# runs = list(run1, run2, run3, run4, run5, run6)
runs = list(run1, run5)
fname = sprintf('%s/%s.csv', path_out, run5$suff_fit)
fit_pl = read_stan_csv(fname)
post_pl = rstan::extract(fit_pl, permuted=FALSE, inc_warmup=FALSE)
fname = sprintf('%s/%s.csv', path_out, run1$suff_fit)
fit_g = read_stan_csv(fname)
post_g = rstan::extract(fit_g, permuted=FALSE, inc_warmup=FALSE)
limits <- get_limits(centers_veg)
# #####################################################################################
# # trace plots
# #####################################################################################
# trace_plots(post_g, suff, save_plots=save_plots, fpath='figures')
#####################################################################################
# read in data and source utils
#####################################################################################
# power law
sum_w_pl = build_sumw_pot(post_pl, K, N_pot, d_pot, run5)
sum_w_pl
# gaussian
sum_w_g = build_sumw_pot(post_g, K, N_pot, d_pot, run1)
sum_w_g
col_substr_g = substr(colnames(post_g[,1,]), 1, 3)
col_substr_pl = substr(colnames(post_pl[,1,]), 1, 3)
one_psi = run1$one_psi
if (one_psi){
psi = rep(mean(post_g[,1,which(col_substr_g == 'psi')]), K)
} else {
psi = colMeans(post_g[,1,which(col_substr_g == 'psi')])
}
one_a = run5$one_a
if (one_a){
a = rep(mean(post_pl[,1,which(col_substr_pl == 'a')]), K)
} else {
a = colMeans(post_pl[,1,which(col_substr_pl == 'a')])
}
one_b = run5$one_b
if (one_b){
b = rep(mean(post_pl[,1,which(col_substr_pl == 'b')]), K)
} else {
b = colMeans(post_pl[,1,which(col_substr_pl == 'b')])
}
dx = 0.008
dr = 0.001
dvec = seq(0, 1, dr)
# power law
C_pl=build_sumw_pot(post_pl, K, N_pot, d_pot, run5)*dx^2
C_pl
# gaussian
C_g=build_sumw_pot(post_g, K, N_pot, d_pot, run1)*dx^2
C_g
p_g = gaussian(dvec, psi[1])
p_pl = power_law(dvec, a[1], b[1])
pdf('figures/kernel_pdfs.pdf')
plot(dvec*1e3, p_pl*dvec/C_pl[1], type='l', xlab='Radius', ylab='Density')
lines(dvec*1e3, p_g*dvec/C_g[1], lty=2, col='blue')
legend('topright', c('Power law', 'Gaussian'), col=c('black', 'blue'), lty=c(1,2))
dev.off()
# cdf
c_pl = cumsum(p_pl*dvec/C_pl[1]*2*pi)*dr
c_g = cumsum(p_g*dvec/C_g[1]*2*pi)*dr
pdf('figures/kernel_cdfs.pdf')
plot(dvec*1e3, c_pl, type='l', xlab='Radius', ylab='Estimated cumulative density')
lines(dvec*1e3, c_g, col='blue', lty=2)
legend('bottomright', c('Power law', 'Gaussian'), col=c('black', 'blue'), lty=c(1,2))
dev.off()
sum(p_pl*dvec*2*pi)*dr/C_pl
sum(p_g*dvec*2*pi)*dr/C_g
############################################################################################################
# dispersal cdf
############################################################################################################
radius = seq(8000,1000000, by=4000)
x_pot = seq(-528000, 528000, by=8000)
y_pot = seq(-416000, 416000, by=8000)
coord_pot = expand.grid(x_pot, y_pot)
dmat = t(rdist(matrix(c(0,0), ncol=2), as.matrix(coord_pot, ncol=2))/rescale)
# power law
sum_w_pl = build_sumw_pot(post_pl, K, N_pot, d_pot, run5)
r_int_pl = dispersal_decay(post_pl, dmat, sum_w_pl[1], radius/rescale, kernel='pl')
# gaussian
sum_w_g = build_sumw_pot(post_g, K, N_pot, d_pot, run1)
r_int_g = dispersal_decay(post_g, dmat, sum_w_g[1], radius/rescale, kernel='gaussian')
fifty = which.min(abs(r_int - 0.5))
ninety = which.min(abs(r_int - 0.9))
# segments = data.frame(x=c(0, 0, radius[fifty]/1e3, radius[ninety]/1e3),
# xend=c(radius[fifty]/1e3, radius[ninety]/1e3, radius[fifty]/1e3, radius[ninety]/1e3),
# y=c(r_int[fifty], r_int[ninety], 0.2, 0.2),
# yend=c(r_int[fifty], r_int[ninety], r_int[fifty], r_int[ninety]))
library(reshape)
dat = data.frame(radius=radius/1e3, Gaussian=r_int_g, InversePowerLaw=r_int_pl)
dat = melt(dat, id='radius')
p <- ggplot(dat) + geom_line(aes(x=radius, y=value, color=factor(variable), linetype=variable))
p <- p + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
xlab('Radius') + ylab('Proportion of pollen')
p <- p + theme(axis.text= element_text(size=rel(1)), axis.title=element_text(size=14))
# p <- p + geom_segment(data=segments, aes(x=x, y=y, xend=xend, yend=yend), linetype=2, colour='royalblue4')
p <- p + xlim(0, 600) + ylim(0,1.0)
p <- p + labs(color='Kernel', linetype='Kernel')
p
ggsave(file='figures/dispersal_vs_distance.pdf', scale=1)
ggsave(file='figures/dispersal_vs_distance.eps', scale=1)
|
dd4e9d4d8ea37d2af98470b697ce1456040d8903
|
2d35f719a15e28eebe7b819c1fe9fac9d7c37862
|
/fig/CIcover10h.R
|
1e990438b810d1fd83facf73494ce8eb3a5bed86
|
[] |
no_license
|
JDC-Shimada/bayesbook
|
f7fe5fcb40fd4a5cb2de6ce1ba53df05fd1830f9
|
9202919453c7fc575191ae7b950b53c6d9d328e8
|
refs/heads/master
| 2023-04-18T15:59:41.525953
| 2021-05-06T00:57:12
| 2021-05-06T00:57:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,234
|
r
|
CIcover10h.R
|
args = commandArgs()
basename = sub(".R$", "", sub("^--file=(.*/)?", "", args[grep("^--file=", args)]))
if (length(basename) != 0)
pdf(file=paste0(basename, "_tmp.pdf"), colormodel="gray", width=7, height=3)
par(family="Palatino")
par(mgp=c(2,0.8,0)) # title and axis margins. default: c(3,1,0)
par(mar=c(3,3,2,2)+0.1) # bottom, left, top, right margins. default: c(5,4,4,2)+0.1
pb = function(x) (2/pi)*asin(sqrt(x)) # pbeta(x,0.5,0.5)
qb = function(z) (sin(pi*z/2))^2 # qbeta(z,0.5,0.5)
binomHPD = function(n, y) {
a = y + 0.5
b = n - y + 0.5
f = function(x) pb(qbeta(x+0.95,a,b))-pb(qbeta(x,a,b))
x = optimize(f, c(0,0.05), tol=1e-8)$minimum
qbeta(c(x,x+0.95), a, b)
}
HPD = sapply(0:10, function(y) binomHPD(10,y))
HPD[1,1] = 0
f = function(x) {
p = dbinom(0:10, 10, x)
sum(p * (HPD[1,] <= x & x <= HPD[2,]))
}
vf = Vectorize(f)
curve(pb(vf(qb(x))), n=10001, xlab="", ylab="", xaxt="n", yaxt="n", ylim=c(0.72,1))
abline(h=pb(0.95), lty=3)
axis(1, at=pb((0:10)/10), labels=(0:10)/10)
axis(2, at=pb(seq(0.85,1,0.05)), labels=seq(0.85,1,0.05), las=1)
dev.off()
embedFonts(paste0(basename, "_tmp.pdf"), outfile=paste0(basename, ".pdf"),
options="-c \"<</NeverEmbed []>> setdistillerparams\" -f ")
|
d825b5012d0648c79463c4dfe89db8e28474f527
|
b447d3c0c2b6a24cd0c4dce87e1626a92cf58dde
|
/ui.R
|
3821461fb7d76569853875a9f5167b9899371e31
|
[] |
no_license
|
MrConradHarrison/CLEFT-Q-CAT-Score-Checker
|
3b4e693ee2deda97ab818690c6c57ffcb4631914
|
02d1deeb5ae896c9f08812c1281831b135ff71f5
|
refs/heads/main
| 2023-08-19T10:16:41.390055
| 2021-10-01T12:02:48
| 2021-10-01T12:02:48
| 400,149,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,531
|
r
|
ui.R
|
shinyUI(
navbarPage("CLEFT-Q CAT Score Checker",
tabPanel(
"Radar Plot",
fluidPage(
titlePanel("Radar Plot"),
# Text
br(),
h4("Please use this Score Checker to compare CLEFT-Q CAT scores to median scores
from the CLEFT-Q field test (Klassen", em("et al.,"), "2018)."),
fluidRow(
plotOutput(outputId = "RadarPlot") %>% withSpinner(color = "darkmagenta")
),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
br(),
fluidRow(
column(
3,
selectInput(
inputId = "age_radar",
label = "Age range (years)",
choices = c("All ages (8-29)", "8-11", "12-15", "16-19", "20+")
),
selectInput(
inputId = "Sex_radar",
label = "Gender",
choices = c("All genders", "Male", "Female")
),
selectInput(
inputId = "Cleft_type_radar",
label = "Cleft type",
choices = c(
"All cleft types", "Cleft lip", "Cleft palate",
"Cleft lip and alveolus",
"Cleft lip, alveolus and palate"
)
),
selectInput(
inputId = "Laterality_radar",
label = "Laterality",
choices = c("Unilateral or Bilateral", "Unilateral", "Bilateral")
)
),
column(3,
numericInput(
inputId = "facescore",
label = "Face Score",
value = 50,
min = 1,
max = 100
),
numericInput(
inputId = "nosescore",
label = "Nose Score",
value = 50,
min = 1,
max = 100
),
numericInput(
inputId = "nostrilscore",
label = "Nostrils Score",
value = 50,
min = 1,
max = 100
),
numericInput(
inputId = "jawscore",
label = "Jaws Score",
value = 50,
min = 1,
max = 100
)
),
column(3,
numericInput(
inputId = "lipscore",
label = "Lips Score",
value = 50,
min = 1,
max = 100
),
numericInput(
inputId = "teethscore",
label = "Teeth Score",
value = 50,
min = 1,
max = 100
),
numericInput(
inputId = "scarscore",
label = "Scar Score",
value = 50,
min = 1,
max = 100
),
numericInput(
inputId = "schoolscore",
label = "School Score",
value = 50,
min = 1,
max = 100
)
),
column(3,
numericInput(
inputId = "socialscore",
label = "Social Function Score",
value = 50,
min = 1,
max = 100
),
numericInput(
inputId = "psychscore",
label = "Psychological Function Score",
value = 50,
min = 1,
max = 100
),
numericInput(
inputId = "sfscore",
label = "Speech Function Score",
value = 50,
min = 1,
max = 100
),
numericInput(
inputId = "sdscore",
label = "Speech Distress Score",
value = 50,
min = 1,
max = 100
)
)
)
)
),
tabPanel(
"Population Density",
fluidPage(
# Application title
titlePanel("Population Density"),
# Text
br(),
h4("Please use this Score Checker to compare a CLEFT-Q CAT score to scores
obtained in the CLEFT-Q field test (Klassen", em("et al.,"), "2018)."),
br(),
br(),
# Sidebar
sidebarLayout(
sidebarPanel(
numericInput(
inputId = "score",
label = "Score",
value = 50,
min = 1,
max = 100
),
selectInput(
inputId = "scale",
label = "CLEFT-Q CAT scale",
choices = c(
"Face", "Nose", "Nostrils", "Jaws", "Teeth",
"Lips", "Scar", "School", "Speech function",
"Speech distress", "Psychological function",
"Social function"
)
),
selectInput(
inputId = "age",
label = "Age range (years)",
choices = c("All ages (8-29)", "8-11", "12-15", "16-19", "20+")
),
selectInput(
inputId = "Sex",
label = "Gender",
choices = c("All genders", "Male", "Female")
),
selectInput(
inputId = "Cleft_type",
label = "Cleft type",
choices = c(
"All cleft types", "Cleft lip", "Cleft palate",
"Cleft lip and alveolus",
"Cleft lip, alveolus and palate"
)
),
selectInput(
inputId = "Laterality",
label = "Laterality",
choices = c("Unilateral or Bilateral", "Unilateral", "Bilateral")
)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("Plot") %>% withSpinner(color = "darkmagenta"),
div(textOutput("Text"), style = "font-size:20px;")
)
)
)
),
footer = tags$footer(column(12,
p("Klassen, A. F. et al. (2018) ‘Psychometric findings and normative values for the CLEFT-Q
based on 2434 children and young adult patients with cleft lip and/or palate from 12 countries’,",
em("CMAJ"), ". doi: 10.1503/cmaj.170289.",
style = "font-size:13px;"
),
br(),
p("The CLEFT-Q CAT Score Checker has been developed by Conrad Harrison at the University of Oxford.
This represents independent research funded by the NIHR.
The views expressed are those of the author and not necessarily those of
the University of Oxford, the NHS, the NIHR or the Department of Health and Social Care.
The CLEFT-Q CAT Score Checker is provided with absolutely no warranty. The author, the University of Oxford
and the NIHR disclaim any and all express and implied warranties including without limitation the
implied warranties of title, fitness for a particular
purpose, merchantability and noninfringement. The CLEFT-Q Score Checker is
licensed under BSD_3_clause + file LICENSE. To obtain a copy of the license please",
a(href = "https://github.com/MrConradHarrison/CLEFT-Q-CAT-Score-Checker/blob/main/LICENSE", "click here"), "or contact the author on conrad.harrison@medsci.ox.ac.uk.",
style = "font-size:11px;")
))
)
)
|
806178f67e1b432300391f8213e4ad57c47329fa
|
4f6acb159819268d7108f8e43e4c40e777af9612
|
/man/dynfrail_dist.Rd
|
953217f9043f4c7bf033326fc9e9c4612250b1b9
|
[] |
no_license
|
cran/dynfrail
|
54d2e3fb9c9e0ae5d8b25191db2c0b8e726c8132
|
4a89b5deba02a9a6f28797abf7ce2672f465701c
|
refs/heads/master
| 2021-07-23T00:57:01.678044
| 2017-10-30T09:11:49
| 2017-10-30T09:11:49
| 108,834,132
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,845
|
rd
|
dynfrail_dist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dynfrail_arguments.R
\name{dynfrail_dist}
\alias{dynfrail_dist}
\title{Distribution parameters for dynfrail}
\usage{
dynfrail_dist(dist = "gamma", theta = 2, pvfm = -1/2, lambda = 0.1,
n_ints = NULL, times = NULL)
}
\arguments{
\item{dist}{One of 'gamma', 'stable' or 'pvf'.}
\item{theta}{Frailty distribution parameter. Must be >0.}
\item{pvfm}{Only relevant if \code{dist = 'pvf'} is used. It determines which PVF distribution should be used. Must be larger than -1 and not equal to 0.}
\item{lambda}{Frailty autocorrelation parameter. Must be >0.}
\item{n_ints}{For piece-wise constant frailty, the number of intervals. With \code{n_ints = 0}, the classical shared frailty scenario is obtained.}
\item{times}{A vector of time points which determine the piecewise-constant interval for the frailty. Overrides \code{n_ints}.}
}
\value{
An object of the type \code{dynfrail_dist}, which is mostly used to denote the
supported frailty distributions in a consistent way.
}
\description{
Distribution parameters for dynfrail
}
\details{
The \code{theta} and \code{lambda} arguments must be positive. In the case of gamma or PVF, \code{theta} is the inverse of
the frailty variance, i.e. the larger the \code{theta} is,
the closer the model is to a Cox model. When \code{dist = "pvf"} and \code{pvfm = -0.5}, the inverse Gaussian
distribution is obtained.
For the positive stable distribution, the \eqn{\gamma} parameter of the Laplace transform is
\eqn{\theta / (1 + \theta)}, with the \eqn{alpha} parameter fixed to 1.
}
\examples{
dynfrail_dist()
# Compound Poisson distribution:
dynfrail_dist(dist = 'pvf', theta = 1.5, pvfm = 0.5)
# Inverse Gaussian distribution:
dynfrail_dist(dist = 'pvf')
}
\seealso{
\code{\link{dynfrail}, \link{dynfrail_control}}
}
|
6c8bb0da63c86863907e79ea71d50a2af33b5c2d
|
04cd0fbc561e73b4a8e4912c5253681715faa3ef
|
/Source_data_files/Preclinical_study/weight_change_day_plotting_stats.R
|
dee208a10da44d4d47fdb59872f4f48101c8e45e
|
[] |
no_license
|
raymondkiu/Infant-Clostridium-perfringens-Paper
|
50a30f717187e65d5c83d403ecc5954ae2df2a07
|
aeec074baec297019641969ba696d09456554d13
|
refs/heads/main
| 2023-06-10T17:03:54.348138
| 2023-03-21T12:27:26
| 2023-03-21T12:27:26
| 312,297,407
| 1
| 0
| null | 2023-05-30T14:13:37
| 2020-11-12T14:14:53
|
R
|
UTF-8
|
R
| false
| false
| 2,533
|
r
|
weight_change_day_plotting_stats.R
|
setwd("~/")
library(tidyverse)
library(reshape2) # for melt function
library(Rmisc) # for summarySE
data <- read.csv("weight_change_day_data.csv", header = TRUE, stringsAsFactors = TRUE)
head(data)
#data1 <- melt(data, id.vars ="Group" , variable.name ="Day")
#data1
#write.csv(data1,"weight-change-melted.csv")
# Input format
# Time Group Value
# Day 0 CP 0
# Day 0 CP 0
# Use theme cowplot - minimalist
library('cowplot')
theme_set(theme_cowplot())
# Prep stats for error bar
data <- summarySE(data, measurevar="value", groupvars=c("Group","Day"))
data
p <- data %>% mutate(Group = fct_relevel(Group, "Control","ABX","pfoA+","pfoA-")) %>%
ggplot(aes(x=Day, y=value, color=Group, group=Group)) +
geom_line(size=0.6)+
scale_color_manual(values=c("Control"="lightgreen","ABX"="mediumpurple1","pfoA+"="Red2","pfoA-"="Grey"))+
scale_y_continuous(breaks=c(-2,-1,0,1,2,3,4,5), limits=c(-2,6))+
stat_summary(fun=mean, geom="line") + # only scale_color_manual goes with stat_summary
geom_errorbar(size=0.6, aes(ymin=value-se, ymax=value+se),
width=.5) # Width of the error bar
p + theme(axis.text.x = element_text(angle = 45, vjust=1, hjust=1))
p + theme(axis.text.x = element_blank(),axis.ticks.x=element_blank()) +facet_grid(~ Group) + ylab("Weight Changes (%)")
#theme(axis.text.x = element_text(angle = 45, vjust=1, hjust=1))
## Stats
Data <- read.csv("weight_change_day_data.csv", header = TRUE, stringsAsFactors = TRUE)
head(Data)
shapiro.test(Data$value) # seem to be different from normal distribution
# filter subsets
Day1 <-filter(Data, Day == "Day 1")
Day2 <-filter(Data, Day == "Day 2")
Day3 <-filter(Data, Day == "Day 3")
Day4 <-filter(Data, Day == "Day 4")
Day5 <-filter(Data, Day == "Day 5")
Day6 <-filter(Data, Day == "Day 6")
# Run statistical analysis
library(FSA)
Summarize(value~ Group, data = Day1)
Summarize(value~ Group, data = Day2)
Summarize(value~ Group, data = Day3)
Summarize(value~ Group, data = Day4)
Summarize(value~ Group, data = Day5)
Summarize(value~ Group, data = Day6)
kruskal.test(value ~ Group, data=Day1)
kruskal.test(value ~ Group, data=Day2)
kruskal.test(value ~ Group, data=Day3)
kruskal.test(value ~ Group, data=Day4)
kruskal.test(value ~ Group, data=Day5)
kruskal.test(value ~ Group, data=Day6)
# Test which group is statistically different (if overall KW test P<0.05), suitable for unequal number of observations
library(rstatix)
dunn_test(Day1, value ~ Group, p.adjust.method = "BH")
dunn_test(Day2, value ~ Group, p.adjust.method = "BH")
|
1125d31a5384f12a870407d13572742a6bb75e9c
|
917c9867f05b73b37c7d68d28fe4a1c57702fd28
|
/ReadFiles/zooObject.R
|
b2009c6d1a77ec95c60a4279ba7cb7c82b2ac79f
|
[] |
no_license
|
xiabofei/rlanguage
|
e06f58dd25629bd11206369a4189220605bfe550
|
809435e5da08427f33223dee367d161e20a0424c
|
refs/heads/master
| 2020-12-06T17:14:31.562219
| 2016-09-27T06:20:29
| 2016-09-27T06:20:29
| 66,824,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 576
|
r
|
zooObject.R
|
# 1. zoo这个包当时专门为了处理daily的finacial data
# 2. 有处理时间序列数据相关的需求 可以再具体去查zoo包的用法
# http://ftp.auckland.ac.nz/software/CRAN/doc/vignettes/zoo/zoo-quickref.pdf
A <- data.frame(date=c("1995-01-01", "1995-01-02", "1995-01-03", "1995-01-06"),
x=runif(4),
y=runif(4))
A$date <- as.Date(A$date)
# 构建一个zoo的object matrix
library(zoo)
B <- A
B$date <- NULL
z <- zoo(as.matrix(B), order.by = A$date)
rm(A, B)
C <- coredata(z)
rownames(C) <- as.character(time(z))
str(C)
str(z)
|
54f70fac02bf992ecc8385d64dd663dfa9165de9
|
2fa58d99bebdb471fc7b81fab879b9c93560e8f7
|
/predicting_prices/ui.R
|
8b4bdb973d13a84f5c1530e9afb838b8071f0077
|
[] |
no_license
|
albert2828/coursera_ddp_final_project
|
86fe5df78b5575c7a9480b5c2873ff9fc35b0bba
|
ef300f5f1868273fc06d7989a42c6a8ba9509317
|
refs/heads/main
| 2023-04-21T21:00:11.693000
| 2021-05-19T21:26:06
| 2021-05-19T21:26:06
| 368,397,096
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,283
|
r
|
ui.R
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Compute the price of your car"),
sidebarLayout(
sidebarPanel(
sliderInput("age",
"Age of your car:",
min = 1,
max = 50,
value = 5),
selectInput(inputId = "Fuel_Type", label = "Select the type of fuel your cars uses",
choices = c("Petrol", "Diesel"),
selected = "Petrol"),
sliderInput("Kms_Driven",
"Kilometers Driven",
min = 1,
max = 500000,
value = 5000),
selectInput("Transmission", "Select the type of Transmission of your car",
choices = c("Automatic", "Manual"),
selected = "Automatic"),
selectInput("Seller_type", "Do you want to sell it yourseld or by a third party?",
choices = c("Dealer", "Individual"),
selected =" Dealer"),
selectInput("Owner", "Number of previous owners",
choices = 0:5,
selected = 0),
submitButton("Submit")
),
mainPanel(
h3("Aproximated price of your car"),
textOutput("pred"),
br(),
p("This app is made so you can aproximate the price of your car if you want to sell it"),
p("You only need to fill a few gaps"),
tags$ul(
tags$li("Age: age of your car"),
tags$li("Fuel Type: Petrol or Diesel (for the moment we are not prepared for electric cars)"),
tags$li("Killometers Driven: if your cars runs in miles, multiply miles by 1.60934"),
tags$li("Transmission: Automatic or Manual"),
tags$li("Do you plan to sell it yourself of to go to an agency?"),
tags$li("Number of owners: How may persons have had the car before you?")
),
p("Finally, just click the Submit buttom!"),
)
)
)
)
|
34d4362ea8162d273afd4dd9202974b768a8e09f
|
b8a0bf8c783e5454a6c5bbb76385c6e852bbe577
|
/quant v1.3/Visualize.R
|
65b84e7d045dcb3f028d419e4b76fae4792fd456
|
[] |
no_license
|
highandhigh/Quantitative-Finance-Project
|
20ee30f1a79f19989d54ffc70046128946d7646b
|
9bc5179b036a624164865ffa8291ea58febc6895
|
refs/heads/master
| 2021-01-13T16:33:40.910622
| 2016-06-24T16:11:57
| 2016-06-24T16:11:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,723
|
r
|
Visualize.R
|
# 量化金融项目
# By: JiaRu, jiaru2014@126.com
# R version 3.2.3
# Platform: x86_64-apple-darwin13.4.0 (64-bit)
# Date: 2016-06-22
#
# Version 1.3
# ===================================================================
# 可视化
#
# 输入 tradebook(with attr: out-sample period) dt
# 输出 ggplot object
# layers:
# 1. EndPrc
# 2. Buy / Sell signal
# 3. Profit / Stop
# 4. Volumn
Visualize <- function(tradebook, dt)
{
# merge
date_range <- attr(tradebook, "out_sample_range") # date range
dt_out <- dt[TDate >= date_range[1] & TDate <= date_range[2]]
trade_plot <- melt(
tradebook,
id.vars = c("date_time"),
measure.vars = c("Buy", "Sell", "Profit", "Stop")
)
trade_plot <- trade_plot[!is.na(value)]
# 从 dt_out 中添加 EndPrc
trade_plot_2 <- merge(
x = trade_plot,
y = dt_out[, .(DateTime ,EndPrc)],
by.x = "date_time",
by.y = "DateTime",
all.x = TRUE,
all.y = TRUE
)
# 因为x是datetime,存在大量非交易时间,
# 需要把x做一个转换,不然画出来图很难看
trade_plot_2[, timeindex := seq_along(date_time)]
# 绘图参数
b <- 20 # 20个break
m <- round(nrow(trade_plot_2) / b)
# 开始画图
g1 <-
ggplot(data = trade_plot_2, aes(x = timeindex, y = EndPrc)) +
geom_line() +
geom_point(data = trade_plot_2[!is.na(variable)],
aes(colour = variable),
alpha = 0.5) +
scale_x_continuous(
breaks = trade_plot_2[timeindex %% m == 0, timeindex],
labels = trade_plot_2[timeindex %% m == 0, format(date_time, format = "%m/%d")]
) +
ggtitle("Trade") +
theme_bw()
return(g1)
}
# # 单元测试
# Visalize(tradebook, dt)
|
15f6c1b6b16a98911a726ff1fcc724e3419917e1
|
34b1404a1ff8c8abe0be16f9d0bc29f633043e35
|
/tests/setCellLazy.R
|
3741d18fc87f489c68d7ae3e3202bd70f1e7ca93
|
[] |
no_license
|
duncantl/RExcelXML
|
729183fdc549951d986dd730ee9b16846a3a3d86
|
bb278b9e15ae44bf538b86d04f0490c128db4a31
|
refs/heads/master
| 2020-06-07T07:09:44.596626
| 2012-01-18T23:22:58
| 2012-01-18T23:22:58
| 3,922,555
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 448
|
r
|
setCellLazy.R
|
library(RExcelXML)
fn = system.file("templateDocs", "Sample3sheets.xlsx", package="RExcelXML")
w = excelDoc("bbb.xlsx", create = TRUE, template = fn)
wb = workbook(w)
sh = wb[["Sheet1"]]
c1 = cells(sh)
styles = getDocStyles(w)
ft = Font(sz = 10L, face=c("b","i"))
newSt = createStyle(sh, format = 14, font = ft, halign="center", update = FALSE, fg = "FF0000", styleDoc = styles)
setCellStyle(c1[[3]], newSt, update = FALSE)
update(sh, styles)
|
633c2e944f1b5016cf22016e68270f5bf87a3f28
|
44e281a60264d4a48b40f6fdf1005395a6103dc8
|
/R/dnegocc.R
|
ec94b95dcf8b980710d9740f8cef23bd19fefeee
|
[] |
no_license
|
cran/occupancy
|
7f0bd71c4f30ecf4b1cc73864177de5e2ceeb67e
|
4421086015ad2103131cd18e514706913d49ea8f
|
refs/heads/master
| 2023-06-05T13:28:09.280196
| 2021-06-24T10:00:02
| 2021-06-24T10:00:02
| 379,975,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,659
|
r
|
dnegocc.R
|
#' @rdname dnegocc
dnegocc <- function(x, space, occupancy, prob = 1, approx = FALSE, log = FALSE) {
#Check that argument and parameters are appropriate type
if (!is.numeric(x)) stop('Error: Argument x is not numeric')
if (!is.numeric(space)) stop('Error: Space parameter is not numeric')
if (!is.numeric(occupancy)) stop('Error: Occupancy parameter is not numeric')
if (!is.numeric(prob)) stop('Error: Probability parameter is not numeric')
if (!is.logical(approx)) stop('Error: approx option is not a logical value')
if (!is.logical(log)) stop('Error: log option is not a logical value')
#Check that parameters are atomic
if (length(space) != 1) stop('Error: Space parameter should be a single number')
if (length(occupancy) != 1) stop('Error: Occupancy parameter should be a single number')
if (length(prob) != 1) stop('Error: Probability parameter should be a single number')
if (length(approx) != 1) stop('Error: approx option should be a single logical value')
if (length(log) != 1) stop('Error: log option should be a single logical value')
#Set parameters
if (space == Inf) { m <- Inf } else { m <- as.integer(space) }
k <- as.integer(occupancy)
#Check that parameters are in allowable range
if (space != m) stop('Error: Space parameter is not an integer')
if (m <= 0) stop('Error: Space parameter must be positive')
if (occupancy != k) stop('Error: Occupancy parameter is not an integer')
if (k < 0) stop('Error: Occupancy parameter must be non-negative')
if (k > m) stop('Error: Occupancy parameter is larger than space parameter')
if (prob < 0) stop('Error: Probability parameter must be between zero and one')
if (prob > 1) stop('Error: Probability parameter must be between zero and one')
#Create output vector
max.x <- floor(max(x))
NEGOCC <- rep(-Inf, length(x))
#Compute for trivial case where k = 0
if (k == 0) {
for (i in 1:length(x)) {
xx <- x[i]
if (xx == 0) { NEGOCC[i] <- 0 } }
if (log) { return(NEGOCC) } else { return(exp(NEGOCC)) } }
#Compute for trivial case where prob = 0
if (prob == 0) {
if (k > 0) {
for (i in 1:length(x)) {
xx <- x[i]
if (xx == Inf) { NEGOCC[i] <- 0 } }
if (log) { return(NEGOCC) } else { return(exp(NEGOCC)) } } }
#Compute for special case where m = Inf
if (m == Inf) {
NEGOCC <- dnbinom(x, size = k, prob = prob, log = TRUE)
if (log) { return(NEGOCC) } else { return(exp(NEGOCC)) } }
#Compute for non-trivial cases where k > 0 and prob > 0
#Compute log-probablities using recursion
if (!approx) {
#Create base vector for recursion
if(prob == 1) {
LOGS <- c(0, rep(-Inf, max.x)) } else {
LOGS <- log(prob) + (0:max.x)*log(1-prob) }
#Update via recursion
r <- 2
while (r <= k) {
NEWLOGS <- rep(-Inf, max.x+1)
LLL <- (0:max.x)*log(1-prob*(m-r+1)/m)
for (t in 0:max.x) {
TERMS <- LLL[1:(t+1)] + LOGS[(t+1):1]
NEWLOGS[t+1] <- log(prob*(m-r+1)/m) + matrixStats::logSumExp(TERMS) }
LOGS <- NEWLOGS
r <- r+1 }
#Generate output vector
for (i in 1:length(x)) {
xx <- x[i]
if ((as.integer(xx) == xx)&(xx >= 0)) {
NEGOCC[i] <- LOGS[xx+1] } } }
#Compute log-probabilities using approximation
if (approx) {
#Compute generalised harmonic numbers
H1 <- sum(1/((m-k+1):m))
H2 <- sum(1/((m-k+1):m)^2)
#Compute moments
MEAN <- max(0,(m/prob)*H1 - k)
VAR <- max(0,(m/prob)^2*H2 - (m/prob)*H1)
#Approximation using discretised gamma distribution
if (VAR == 0) {
APPROX <- c(0, rep(-Inf, max.x)) }
if (VAR > 0) {
SHAPE <- (MEAN + 1/2)^2/VAR
RATE <- m*(MEAN + 1/2)/VAR
LGA <- pgamma((0:(max.x+1))/m, shape = SHAPE, rate = RATE, log.p = TRUE)
LOWER <- LGA[1:(max.x+1)]
UPPER <- LGA[2:(max.x+2)]
APPROX <- UPPER + VGAM::log1mexp(UPPER-LOWER) }
#Generate output vector
for (i in 1:length(x)) {
xx <- x[i]
if ((as.integer(xx) == xx)&(xx >= 0)) {
NEGOCC[i] <- APPROX[xx+1] } } }
#Return output
if (log) { NEGOCC } else { exp(NEGOCC) } }
|
d47b9f92b924389badcbbff74fd4c23630d5e31b
|
838eef61b899bafd172b48d8689bdc0a34d2c7cd
|
/photographer_classifier/pkgs/PhotographerModels/man/load.model.Rd
|
777689f4bd4300240c239b9d5bf67da10d86586e
|
[] |
no_license
|
rajatmnnit/csx415.1-project
|
8bc669768f43d4a87248fa006d36ba2042a31182
|
16a1b3e2e6a70401d841d53b40fd071594c216c2
|
refs/heads/master
| 2020-03-08T13:26:55.512439
| 2018-08-07T22:43:29
| 2018-08-07T22:43:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 622
|
rd
|
load.model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{load.model}
\alias{load.model}
\title{Load the classification model.}
\usage{
load.model(mtype = "gbm")
}
\arguments{
\item{mtype}{Type of the model to be returned. Valid value [svm | rf | gbm]. Defaults to the highest accuracy gbm model.}
}
\value{
a caret::train binary classification model object trained using specified model types.
}
\description{
Loads a pre-trained persistent binary classification model to classify customers as "PHOTOGRAPHERS" or "OTHERS".
}
\examples{
load.model()
load.model(mtype="rf")
}
|
f477f3b3fe460ca121dc0ac3ed9ba48fc4e24dd3
|
84dd0562ebd14dab4913a27313c3e82b792e3d76
|
/R/cor_RNA.R
|
a96354688cfd34de45265f08d47915ed5cd017d3
|
[
"Apache-2.0"
] |
permissive
|
yikeshu0611/TCGAimmunelncRNA
|
daaca3623943b63b83ad98ef357c179ad6048c2f
|
9c86b20caa6674a976aca6ea3111755fd34bb5b5
|
refs/heads/main
| 2023-06-11T09:01:11.701450
| 2021-07-03T07:10:31
| 2021-07-03T07:10:31
| 381,676,631
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,638
|
r
|
cor_RNA.R
|
#' correlation of mRNA and lncRNA
#'
#' @param data data of FPKM2df()
#' @importFrom foreach %dopar%
#' @export
#'
cor_RNA <- function(data){
mRNA <- data$mRNA[data$group$bar_code[data$group=='Tumor'],]
message('\nuse ',nrow(mRNA), ' tumors for correlation analysis')
message('mRNA:',ncol(mRNA))
lncRNA <- data$lncRNA[data$group$bar_code[data$group=='Tumor'],]
message('lncRNA:',ncol(lncRNA))
cores <- parallel::detectCores()
cl <- snow::makeSOCKcluster(cores)
doSNOW::registerDoSNOW(cl)
pb <- utils::txtProgressBar(max=ncol(mRNA),
width = 30,
style=3)
progress <- function(n) utils::setTxtProgressBar(pb, n)
opts <- list(progress=progress)
result <-
foreach::foreach(i=1:ncol(mRNA),
.packages="Kendall",
.options.snow=opts) %dopar% {
x <- as.data.frame(t(sapply(1:ncol(lncRNA), function(j) as.data.frame(t(do.call('c',cor.test(mRNA[,i],lncRNA[,j]))))[,c(1,4,3)])))
colnames(x) <- c('t','cor','pvalue')
x <- do::numeric.it(x,c('t','cor','pvalue'))
x <- round(x,3)
x$mRNA <- colnames(mRNA)[i]
x$lncRNA <- colnames(lncRNA)
x
}
close(pb)
snow::stopCluster(cl)
co <- do.call(rbind,result)
co$regulate <- ifelse(co$cor>0,'positive','negative')
cor <- co[,c("mRNA", "lncRNA", "t", "cor", "pvalue",'regulate')]
list(cor=cor,
bar_code=rownames(mRNA),
mRNA=colnames(mRNA),
lncRNA=colnames(lncRNA))
}
|
982a87f73bb5964c6b6fda2d900dd1f422fee7e0
|
9994253dee538a79e4c7a9d9cdcde5dccb9fe2f7
|
/man/readMyData.Rd
|
d3104869b5653328776f3e9e6713e631142654a5
|
[] |
no_license
|
lehmansociology/lehmansociology
|
aa8d80e4a1bbb0e85c63aa25a318ef24daf69483
|
9ff6232f8f26ee0a5921e7f013bca7994bc2f2e1
|
refs/heads/master
| 2022-06-14T06:01:19.829363
| 2022-05-15T05:30:56
| 2022-05-15T05:30:56
| 103,927,010
| 1
| 2
| null | 2019-12-31T14:45:44
| 2017-09-18T10:48:28
|
R
|
UTF-8
|
R
| false
| true
| 273
|
rd
|
readMyData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readmydata.R
\name{readMyData}
\alias{readMyData}
\title{Read My Data}
\usage{
readMyData(datasetname)
}
\description{
This function reads a webchip style dataset
}
\keyword{Freq}
\keyword{data}
|
7c9b5f729d3456af979b3bfd3b1d0c9692e7275e
|
c5d173f7755dc27e348ef616ebafba137ee7e1da
|
/man/glue_fmt_chr.Rd
|
480fc4cbdfe920f30cf22137530c3822b41c1b35
|
[
"MIT"
] |
permissive
|
Ilia-Kosenkov/RLibs
|
85679202753b7565d8995350e74b28a8c3d624d9
|
60e34778a96f5ba9705b5fa5da0bffcc3be482fd
|
refs/heads/master
| 2021-03-27T16:13:53.106985
| 2020-01-23T15:33:33
| 2020-01-23T15:33:33
| 96,409,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 372
|
rd
|
glue_fmt_chr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_fmt.R
\name{glue_fmt_chr}
\alias{glue_fmt_chr}
\title{glue_fmt_chr}
\usage{
glue_fmt_chr(..., .envir = parent.frame())
}
\arguments{
\item{...}{Passed to \code{glue::glue}.}
\item{.envir}{Evaluation environment.}
}
\value{
Format-aware interpoalted string.
}
\description{
glue_fmt_chr
}
|
bc075509eb99a880e2cac4fd6162e7ee5ad2e668
|
269d71aab45fac39e11c44d40aaa264dc27026e6
|
/WHO_Life_Expectancy.R
|
59884cbd72a066afb848e823e9af088cfc96e9a4
|
[] |
no_license
|
arun98-mohan/ML_Academic_Assignments
|
41013aa64616203f0bf49f8c4eef35c959e7282b
|
d3e2f9a1f5b0703c25c0c99bae1d40dc1fad27cb
|
refs/heads/master
| 2020-04-18T01:51:12.649239
| 2019-03-04T23:40:24
| 2019-03-04T23:40:24
| 167,137,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 610
|
r
|
WHO_Life_Expectancy.R
|
life<-read.csv("/Users/Arun/Desktop/Datasets/life.csv")
head(life)
life2<-life[,c(4,5,6,7,8,9,10,11,12,13,14,15,16,17)]
head(life2)
life2<-na.omit(life2)
cor(life2)
plot(percentage.expenditure~GDP,data=life2)
set.seed(110)
training_row_index<-sample(1:nrow(life2),0.75*nrow(life2))
training_data<-life2[training_row_index,]
test_data<-life2[-training_row_index,]
linmod=lm(percentage.expenditure~GDP,data=life2)
linmod
summary(linmod)
abline(linmod,col="green")
p<-predict(linmod,test_data)
summary(p)
acc_pred<-data.frame(cbind(acc=test_data$percentage.expenditure,pred=p))
head(acc_pred)
cor(acc_pred)
|
9c046dd052a91ea881a9d2d0bfba85f1fa7e705b
|
726573b44fd67576df1df8e916b562b2b84e5091
|
/man/clean_spocc.Rd
|
ad464b12cce51e6fc6e1e0e43df856c634411ed0
|
[
"MIT"
] |
permissive
|
Libardo1/spocc
|
0b17b9a4c4cfe467f53b55093f70e535ccafe657
|
bb9379125053e902d6006198db856687bd4b0149
|
refs/heads/master
| 2021-01-17T14:37:28.115221
| 2014-07-01T17:14:12
| 2014-07-01T17:14:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 581
|
rd
|
clean_spocc.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{clean_spocc}
\alias{clean_spocc}
\title{Clean spocc data}
\usage{
clean_spocc(input, country = NULL, habitat = NULL)
}
\arguments{
\item{input}{An object of class occdat}
\item{country}{(logical) Attempt to clean based on country}
\item{habitat}{(logical) Attempt to clean based on habitat}
}
\description{
Clean spocc data
}
\examples{
\dontrun{
res <- occ(query = c('Ursus','Accipiter','Rubus'), from = 'bison', limit=120)
res_cleaned <- clean_spocc(res)
class(res_cleaned) # now with classes occdat and occclean
}
}
|
0033b0e7f1910eb08e8c8ec367edfc5fff40ef8c
|
ca0537a5f6bec94d7d472e5b0fa022b465f3853a
|
/plot5.R
|
7bab16ee9c77fbe076fc9ec43eeacc9d8dd91a42
|
[] |
no_license
|
tjeerdluykx/ExData_CP2
|
8ef958f44d92ab3f939ca0e5a8d18148803769b3
|
fc0bafea06df3092b6cd83dbb6edb415f1cbca07
|
refs/heads/master
| 2016-09-06T12:39:33.086960
| 2015-07-23T06:33:15
| 2015-07-23T06:33:15
| 39,489,612
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,512
|
r
|
plot5.R
|
## Title: plot5.R
## Author: Tjeerd Luykx
## Date: July 22nd 2015
## Description: create plot question 5.
library(dplyr)
library(ggplot2)
FileName <- "exdata-data-NEI_data.zip"
# Downloading and unzipping dataset:
if (!file.exists(FileName)){
FileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(FileURL, FileName, method="curl")
}
if (!file.exists("exdata-data-NEI_data")) {
unzip(FileName)
}
# Load data sets from home directory:
if(!exists("NEI")){
NEI <- readRDS("./summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./Source_Classification_Code.rds")
}
# Question 5: how have emissions from motor vehicle sources changed from 1999–2008 in Baltimore City?
# Merge the two data sets
NEISCC <- full_join(NEI, SCC, by="SCC")
# Filter large dataset
NEISCC.f <- filter(NEISCC, fips == "24510" & type == "ON-ROAD")
# Aggregate emissions
BalEmMotor <- aggregate(Emissions ~ year, NEISCC.f, FUN = sum)
colnames(BalEmMotor) <- c("Year","Emissions")
# Create barplot
png("plot5.png")
gPlot <- ggplot(BalEmMotor, aes(factor(Year), Emissions))
gPlot <- gPlot + geom_bar(stat="identity",fill="black", colour="blue") +
xlab("Year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle("Baltimore Total PM'[2.5]*'Emissions from motor vehicles: 1999 to 2008")
print(gPlot)
dev.off()
# Answer: emissions from motor vehicles have significantly decreased in Baltimore City from 1999 to 2008.
|
ad2099c3e56e5fd7076c9bd01b109c803f7314b5
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/SuperGauss/R/Cholesky.R
|
aa9192621a72c4284fa075e0afdeb5185234b409
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,135
|
r
|
Cholesky.R
|
#' Cholesky multiplication with Toeplitz variance matrices.
#'
#' Multiplies the Cholesky decomposition of the Toeplitz matrix with another matrix, or solves a system of equations with the Cholesky factor.
#'
#' @name Cholesky
#' @aliases cholXZ cholZX
#'
#' @param X Length-`N` or `N x p` matrix of observations.
#' @param Z Length-`N` or `N x p` matrix of residuals.
#' @param acf Length-`N` autocorrelation vector of the Toeplitz variance matrix.
#'
#' @return Size `N x p` residual or observation matrix.
#'
#' @details If `C == t(chol(toeplitz(acf)))`, then `cholZX()` computes `C %*% Z` and `cholZX()` computes `solve(C, X)`. Both functions use the Durbin-Levinson algorithm.
#' @example examples/Cholesky.R
NULL
#' @rdname Cholesky
#' @export
cholZX <- function(Z, acf) {
n <- length(acf)
Z <- as.matrix(Z)
if(nrow(Z) != n) stop("Z and acf have incompatible dimensions.")
DurbinLevinson_ZX(Z = Z, acf = acf)
}
#' @rdname Cholesky
#' @export
cholXZ <- function(X, acf) {
n <- length(acf)
X <- as.matrix(X)
if(nrow(X) != n) stop("X and acf have incompatible dimensions.")
DurbinLevinson_XZ(X = X, acf = acf)
}
|
75b256dc6b3c7a57f522fa1e25440f51dfe8915d
|
2f22514a6c4b4e769d08027c9db4b8fdccac3fd7
|
/Rbonaut2/man/itemID2Params.Rd
|
1256d34c734a8388d6cf8878c5c48fdb99256985
|
[] |
no_license
|
cavorit/Rbonaut2
|
4489bff4ac9fb4c228d9315de4c80e9fe9e0fd3d
|
b0ecbd74411cc2c1306b06e059c130f45a0749b5
|
refs/heads/master
| 2021-01-24T14:45:43.549973
| 2017-02-22T14:51:03
| 2017-02-22T14:51:03
| 47,259,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 641
|
rd
|
itemID2Params.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/itemID2Params.R
\name{itemID2Params}
\alias{itemID2Params}
\title{itemID2Params}
\usage{
itemID2Params(ItemID)
}
\arguments{
\item{ItemID}{charakter der Länge 1, welches den Namen des Items angibt. Implementiert sind BL01:BL32}
}
\value{
list
}
\description{
Gibt Informationen an simFBN() zurück, wie der nächste Ball gespielt werden soll
}
\details{
Für eine genauere Beschreibung verweise ich auf das Markdown-Manual für BL32MultiTargetSimTest.md. Diese Funktion erstellt den Knoten "nextB"
}
\examples{
itemID2Params("BL03")
}
\author{
Harald Fiedler
}
|
fa5ddcb90227b6385d405b81f50c1df650e39c97
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/crimelinkage/examples/compareCrimes.Rd.R
|
46f0abd64a3cde4bd259ba76f31c2bf51efd86c9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 466
|
r
|
compareCrimes.Rd.R
|
library(crimelinkage)
### Name: compareCrimes
### Title: Creates evidence variables by calculating 'distance' between
### crime pairs
### Aliases: compareCrimes
### ** Examples
data(crimes)
pairs = t(combn(crimes$crimeID[1:4],m=2)) # make some crime pairs
varlist = list(
spatial = c("X", "Y"),
temporal = c("DT.FROM","DT.TO"),
categorical = c("MO1", "MO2", "MO3")) # crime variables list
compareCrimes(pairs,crimes,varlist,binary=TRUE)
|
67b79784f6a5c3a584691b979b73499bda7559f4
|
791faa0afc9c0e613df80709016f4cb094473e5f
|
/MFA/man/LG_table.Rd
|
5e3fca9ff1e031dc01ddf73ceec03e78bffa51cb
|
[] |
no_license
|
LlamaL/ShinyApp-RStudio
|
84f79b30c4b5a57507f52776acc5d28f0a8ce7fa
|
7e7ab1490c86cf89c50e9a3b2364322579a49bd3
|
refs/heads/master
| 2021-05-03T09:59:47.998108
| 2018-02-06T22:23:02
| 2018-02-06T22:23:02
| 120,529,260
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 578
|
rd
|
LG_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LG_table.r
\name{LG_table}
\alias{LG_table}
\title{Lg Table}
\usage{
LG_table(dataset, sets)
}
\arguments{
\item{dataset}{a normalized dataframe or matrix}
\item{sets}{list of vector contains vector of indices of each group}
}
\value{
a table of Lg coefficients
}
\description{
Return a table of Lg coefficients between any two subsets of a normalized dataset
}
\examples{
# default
nadtas <- scale(wine_data)
LG_table(ndatas,sets=list(1:6,7:12,13:18,19:23,24:29,30:34,35:38,39:44,45:49,50:53))
}
|
4aea51ba5512e034ceed75d37bf94ec03a660406
|
1cbaa63faef96016849ec72cffb7a6f1428a30c9
|
/man/MakeFIFOs.Rd
|
86fae4cb0346572e04dd51884e363ac6493076d0
|
[] |
no_license
|
cran/MultiJoin
|
efe3c4106ab9f3ebcf52a3ddfbebc2a6756b9816
|
81d3779fab11e7af59f0505b4d287390d4099683
|
refs/heads/master
| 2021-01-25T10:00:01.703163
| 2018-11-15T21:10:07
| 2018-11-15T21:10:07
| 35,290,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,144
|
rd
|
MakeFIFOs.Rd
|
\name{MakeFIFOs}
\alias{MakeFIFOs}
\title{creates named Unix pipes, which gzipped files can be streamed to for e.g. further joins}
\description{Additional filters can be implemented based upon the input arguments.
This string is typically used in between pipes. }
\usage{MakeFIFOs(file = "file1.txt.gz", FIFO = "/tmp/fifo1", path = ".",
filterStr = " | cut -f2,3 -d\\" \\" --complement", mycat = "gunzip -cf ",
verbose = 2)}
\arguments{
\item{file}{Name of the file that contains the data to uncompress and filter on}
\item{FIFO}{Name of the FIFO to create}
\item{path}{Directory to find the files in}
\item{filterStr}{various inline filters that act locally and do not need an input file,}
\item{mycat}{effective cat command}
\item{verbose}{level of verbosity}
}
\value{filter string}
\author{"Markus Loecher, Berlin School of Economics and Law (BSEL)" <markus.loecher@gmail.com>}
\examples{
if (0){
MakeFIFOs(verbose=2)
MakeFIFOs(filterStr=" | awk '$2 > 100 && $3 > 5' |
cut -f2,3 -d\" \" --complement | head -n 10000 | sort -k1,1")
}
}
|
5e51e171e6fd584f068d215d0b6d34c27543e73c
|
706ffaca97a7b980b960010aa96aaea81fd274a1
|
/02_rprog/complete.R
|
27e8a3e53bbdae26598d3a8dda6b8467fdd3df11
|
[] |
no_license
|
szebenyib/datasciencecoursera
|
ea40f3550307a512a5cbd545963c822ee5017a97
|
b1adfaea1198875dabb619009c7e02f72499e2d3
|
refs/heads/master
| 2021-01-19T18:30:06.560198
| 2015-12-26T21:06:30
| 2015-12-26T21:06:30
| 31,818,358
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 935
|
r
|
complete.R
|
complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
#files_df <- list.files(directory)
df = data.frame(id = id,
nobs = 0)
line_counter <- 1
for (i in id) {
full_name <- paste(sprintf("%03d",
i),
".csv",
sep="")
full_url <- paste(directory,
"/",
full_name,
sep="")
obs_df <- read.csv(full_url)
df[line_counter, 2] <- sum(complete.cases(obs_df))
line_counter = line_counter + 1
}
return(df)
}
|
e95aea46e74ce7b3c73c3b14f2eab2647ac4c877
|
438352c9be21915f9aa2bda4b1743e45904a5118
|
/man/compute_budyko2.Rd
|
cdea4a0915863c1efa87ff48a25356ca99d90048
|
[] |
no_license
|
Lihao-CAU/ecohydroexamples
|
1658fce766a2d54fc70faa96caf8b9435d8e9e9a
|
768dd71af5b8cccb93bb20270af7a40ad746128b
|
refs/heads/master
| 2020-06-15T20:24:06.892433
| 2017-11-10T03:26:01
| 2017-11-10T03:26:01
| 195,384,528
| 1
| 0
| null | 2019-07-05T09:48:09
| 2019-07-05T09:48:09
| null |
UTF-8
|
R
| false
| true
| 508
|
rd
|
compute_budyko2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_budyko2.R
\name{compute_budyko2}
\alias{compute_budyko2}
\title{compute_budyko2}
\usage{
compute_budyko2(P, PET, w = 2.63)
}
\arguments{
\item{P}{precipitation}
\item{PET}{potential evapotranspiration}
\item{w}{adjustment parameter (default 2.63)}
}
\value{
Evaporative index (ratio), Q (streamflow), AET (actual evapotranspration)
}
\description{
THis evaporative index (ratio of AET/P) using Budyko
}
\author{
Naomi
}
|
56e99c455865fc54953782af3b31ba6089d1c278
|
7a24a59293bafdd638d2fb9cdc10aa00359f5797
|
/man/read.fitskey.Rd
|
2141a8d277143a0f094ce740146bbc25bdcfca01
|
[] |
no_license
|
cran/astro
|
c0c92d6ff150a68ce81cc7f75e1888ee6498c36f
|
19710691a4a1a7d18aab7ccd31a7be31e0fe2b2a
|
refs/heads/master
| 2021-01-13T01:30:32.071603
| 2014-09-08T00:00:00
| 2014-09-08T00:00:00
| 17,694,512
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 975
|
rd
|
read.fitskey.Rd
|
\name{read.fitskey}
\alias{read.fitskey}
\title{Read FITS Header Keyword}
\description{The mid-level function 'read.fitskey' allows FITS header keywords to be read directly into R.}
\usage{read.fitskey(key, file, hdu = 1, comments = FALSE,
strip = c(" ","'"," "), maxlines = 50000)}
\arguments{
\item{key}{header keyword (may be a vector)}
\item{file}{file name}
\item{hdu}{header and data unit to be read}
\item{comments}{output header comments?}
\item{strip}{lead/trail characters stripped from header 'value' data}
\item{maxlines}{maximum number of header lines}
}
\details{
The mid-level function 'read.fitskey' is a wrapper around 'read.fits', and provides a more simplistic output of that routine.
}
\value{
A vector of data equal in length to the input key request. NA is returned where no keys have been found.
}
\author{
Lee Kelvin <lee.kelvin@uibk.ac.at>
}
\seealso{
The astronomy package: \code{\link{astro}}.
}
\keyword{data}
|
b67a9c5e1df771b9bfae0f281582157b2573d308
|
2800fef4eae5c523fb424ef3c672c5f3e9abe537
|
/data-raw/procdat.R
|
de8b5a073d38fdf4543368db9d2fa7183f6628c6
|
[] |
no_license
|
bbuchsbaum/pcfacespace
|
78097fc869ffa6961a7cd867edafd8d88c5bfe8c
|
607c4e20ba05d99f937673703c3bb58aa84fd9a8
|
refs/heads/master
| 2023-03-08T07:58:20.171989
| 2021-02-18T18:22:44
| 2021-02-18T18:22:44
| 302,968,567
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
r
|
procdat.R
|
library(rmatio)
pfd_all_faces <- rmatio::read.mat("data-raw/pfd_all_faces.mat")[[1]]
#usethis::use_data(pfd_all_faces, overwrite=TRUE)
pfd_avg_face <- rmatio::read.mat("data-raw/pfd_avg_face.mat")[[1]]
#usethis::use_data(pfd_avg_face, overwrite=TRUE)
pfd_pc <- rmatio::read.mat("data-raw/pfd_pc.mat")[[1]]
#usethis::use_data(pfd_pc, overwrite=TRUE)
pfd_score <- rmatio::read.mat("data-raw/pfd_score.mat")[[1]]
#usethis::use_data(pfd_score, overwrite=TRUE)
pfd_std_score <- rmatio::read.mat("data-raw/pfd_std_score.mat")[[1]]
#usethis::use_data(pfd_std_score, overwrite=TRUE)
pfd_demo <- as.data.frame(rmatio::read.mat("data-raw/pfd_demographics.mat")[[1]])
names(pfd_demo) <- c("id", "pop", "sex", "age")
pfd_data <- list(
all_faces=pfd_all_faces,
avg_face=pfd_avg_face,
pc=pfd_pc,
scores=pfd_score,
std_scores=pfd_std_score,
demographics=pfd_demo
)
usethis::use_data(pfd_data, overwrite=TRUE)
|
eb4a2001f5d0691bceaeed8b52edea8204f9f467
|
eb38363e113cd1331b929276155f4dd409cd5de4
|
/man/one_URL_geoJSON.Rd
|
057d8fe13796ae5013e3d7b80a15467beaca3e66
|
[] |
no_license
|
ElliottMess/pcodesOCHA
|
581d261de3c0cbe790055a9a8a7e5972a05a3357
|
3a991dc02e3694ff012694616521120d9f1a3452
|
refs/heads/master
| 2023-05-09T03:57:25.028768
| 2021-05-12T12:00:23
| 2021-05-12T12:00:23
| 328,928,392
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 837
|
rd
|
one_URL_geoJSON.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pcodes_scrapping.R
\name{one_URL_geoJSON}
\alias{one_URL_geoJSON}
\title{Scrap geoJSON from one URL on \href{https://gistmaps.itos.uga.edu/arcgis/rest/services/COD_External}{OCHA COD REST API}}
\usage{
one_URL_geoJSON(layer_URL)
}
\arguments{
\item{layer_URL}{character - A valid FeatureServer layer directory URL.
Example: "https://gistmaps.itos.uga.edu/arcgis/rest/services/COD_External/ZMB_pcode/FeatureServer/2"}
}
\value{
geoJSON - a geoJSON object containing the layer's features
}
\description{
Scrap geoJSON from one URL on \href{https://gistmaps.itos.uga.edu/arcgis/rest/services/COD_External}{OCHA COD REST API}
}
\examples{
\dontrun{
one_URL_geoJSON("https://gistmaps.itos.uga.edu/arcgis/rest/services/COD_External/ZMB_pcode/FeatureServer/2")
}
}
|
c61bb4f5aeaf314f3b53d47b93c7dd7e401b8231
|
f48eca83e984133c32ef38560e7015ed9ae1f4de
|
/r5_weather1.R
|
c164d2376cd636a3dbb51767b6744c8734c01b87
|
[] |
no_license
|
DataScienceGB/test
|
d39c3e7859ae384fd48926c1a75cf4fec515088b
|
cf429494320727efa6ffc0e32c42326e2a23a6e8
|
refs/heads/master
| 2020-04-06T04:52:36.236085
| 2015-08-23T04:34:02
| 2015-08-23T04:34:02
| 30,780,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,562
|
r
|
r5_weather1.R
|
library(sqldf)
library(xtable)
library(ggplot2)
#Check for file existance
# if exists then load it else
#if not exists bring it from source and inflate it and load it
#overwrite inflated file if exists
csv_storm_file="./data/repdata_data_StormData.csv"
bz2_storm_file="./data/repdata_data_StormData.csv.bz2"
if (file.exists(bz2_storm_file) ) {
stormdf=read.csv(bz2_storm_file,stringsAsFactors=F)
} else { if (!file.exists(bz2_storm_file)) {
fileUrl<-"https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2"
download.file(fileUrl,dest=bz2_storm_file,method="curl")
stormdf=read.csv(bz2_storm_file,stringsAsFactors=F)
}
}
#Convert PROPDMG and CROPDMG to same "scale"(usd)
stormdf[!(stormdf$PROPDMGEXP %in% c("b","B","k","K","m","M")),"PROPDMGEXP"] = "1"
stormdf[stormdf$PROPDMGEXP %in% c("k","K"),"PROPDMGEXP"] = "1000"
stormdf[stormdf$PROPDMGEXP %in% c("m","M"),"PROPDMGEXP"] = "1000000"
stormdf[stormdf$PROPDMGEXP %in% c("b","B"),"PROPDMGEXP"] = "1000000000"
stormdf[!(stormdf$CROPDMGEXP %in% c("b","B","k","K","m","M")),"CROPDMGEXP"] = "1"
stormdf[stormdf$CROPDMGEXP %in% c("k","K"),"CROPDMGEXP"] = "1000"
stormdf[stormdf$CROPDMGEXP %in% c("m","M"),"CROPDMGEXP"] = "1000000"
stormdf[stormdf$CROPDMGEXP %in% c("b","B"),"CROPDMGEXP"] = "1000000000"
#Convert Strings to Numeric values
stormdf$PROPDMGEXP=as.numeric(stormdf$PROPDMGEXP)
stormdf$CROPDMGEXP=as.numeric(stormdf$CROPDMGEXP)
#summarize health_impact by event type
health_grp=sqldf("select EVTYPE,count(*) as number_of_events, sum(FATALITIES+INJURIES) as total_impact,
avg(FATALITIES+INJURIES) as impact_by_event
from stormdf
where (FATALITIES+INJURIES)!='NA'
group by EVTYPE
order by 3 desc limit 10")
sqldf("select sum(total_impact) as Total_Impact,
case EVTYPE when 'TORNADO' then 'T'
else 'NT' end as grp
from health_grp
group by case EVTYPE when 'TORNADO' then 'T'
else 'NT' end")
econ_grp=sqldf("select EVTYPE,
count(*) as number_of_events,
sum(
(PROPDMG*PROPDMGEXP) +
(CROPDMG*CROPDMGEXP)
) as econ_impact ,
avg(
(PROPDMG*PROPDMGEXP) +
(CROPDMG*CROPDMGEXP)
) as econ_impact_by_event
from stormdf
where (PROPDMG+CROPDMG)!='NA'
group by EVTYPE
order by 3 desc limit 10"
)
#set impact to millions of usd
econ_grp$econ_impact=round(econ_grp$econ_impact/1000000 ,0)
p<-ggplot(health_grp,aes(x=reorder(EVTYPE, +total_impact),y=total_impact)) +
geom_bar(stat="identity",fill=1:10) +
scale_fill_manual(values=c(1:10)) +
coord_flip() +
geom_text(aes(y=total_impact,ymax=total_impact,
label=total_impact)
,position= position_dodge(width=0.2), hjust=.5 ,
vjust=1.2,
size=rel(3),angle=90)+
theme_bw(base_family = "Arial") +
theme(panel.background = element_rect(fill = "lightblue"),
panel.grid.minor = element_line(linetype = "dotted")) +
labs(title="Severe Weather Public Health Impact")+
labs(y="Injuries + Fatalities" ,x="Event Type")
#print(p)
#label=round(econ_impact/1000000,2)
g<-ggplot(econ_grp,aes(x=reorder(EVTYPE, +econ_impact),y=econ_impact)) +
geom_bar(stat="identity",fill=1:10) +
scale_fill_manual(values=c(1:10)) +
coord_flip() +
geom_text(aes(y=econ_impact,ymax=econ_impact,
label= econ_impact)
,position= position_dodge(width=0.2), hjust=.5 ,
vjust=1.2,
size=rel(3),angle=90)+
theme_bw(base_family = "Arial") +
theme(panel.background = element_rect(fill = "lightblue"),
panel.grid.minor = element_line(linetype = "dotted")) +
labs(title="Severe Weather Economic Impact")+
labs(y="Properties and Crops Damage (Millions of Dollars)" ,x="Event Type")
#print(g)
grid.arrange(g,p,nrow=2)
#Not used in porject but useful
#convert to Date type
#stormdf$BGN_DATE=strptime(stormdf$BGN_DATE,"%m/%d/%Y %H:%M:%S")
|
49fbee138ba296868dde79411b1205b828bbb5e0
|
8c7dc1facdc0672df7fed535258a1d446b419c9d
|
/00_packages_and_helpers/helpers_plot_functions.R
|
1f7c4ba89358e063ac8a5149ec730a01230697d4
|
[
"MIT"
] |
permissive
|
boyercb/mmc
|
2b0dbebe5d6f0fcce60c9fedadd520a6ddc26e7c
|
654fe8d8e59a503b0ac835433b270a21aaa7f47b
|
refs/heads/master
| 2020-06-26T18:11:50.404833
| 2020-01-21T03:30:42
| 2020-01-21T03:30:42
| 199,709,960
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,671
|
r
|
helpers_plot_functions.R
|
# ggplot theme ------------------------------------------------------------
mmc_theme <-
function() {
theme_bw() +
theme(
axis.ticks = element_blank(),
axis.line = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_line(color = '#eeeeee'),
strip.background = element_blank(),
legend.position = "bottom",
text = element_text(family = "Palatino"),
plot.title = element_text(hjust = 0.5)
)
}
# Plotting function -------------------------------------------------------
plot_coefs <- function(plot_data, outcome_levels, type_levels){
plot_data$outcome <- factor(plot_data$outcome, levels = outcome_levels)
plot_data$type <- factor(plot_data$type, levels = type_levels)
ggplot(plot_data, aes(y = fct_reorder(outcome, estimate), x = estimate, shape = index)) +
geom_point() +
geom_vline(xintercept = 0, linetype = "dashed", size = .25) +
geom_segment(aes(x = conf.low, xend = conf.high, y = outcome, yend = outcome),
alpha = .3) +
geom_text(data = filter(plot_data, index == "Yes"),
aes(label = round(estimate, 3)), nudge_y = 0.5, family = "Palatino", size = 2.5
) +
facet_grid(type~adjusted, scales = "free_y", space = "free_y") +
labs(
x = "Treatment Effect",
y = ""
) +
# scale_y_discrete(expand = c(.2, 0)) +
coord_cartesian(clip = "off") +
scale_shape_manual(values = c(19, 5), guide = FALSE) +
mmc_theme() +
theme()
}
# plot treatment effects --------------------------------------------------
plot_treatment_effects <- function(
fit,
outcome,
data,
type = "individual",
color = NULL,
color_name = NULL,
color_values = NULL,
color_labels = NULL,
title = NULL,
ylabel = outcome,
xlabel = NULL
) {
plot_df <-
data %>%
group_by(treatment) %>%
summarise(n = n()) %>%
ungroup()
predictions <-
predict(fit,
newdata = plot_df,
interval = "confidence",
alpha = 0.05)
plot_df <-
plot_df %>%
mutate(
pred = predictions[, 1],
conf95_low = predictions[, 2],
conf95_high = predictions[, 3],
label = specd(pred, 3)
)
p <- ggplot(plot_df, aes(
x = factor(treatment, labels = c("Control", "Treatment")),
y = pred,
))
if (type == "cluster") {
if (is.null(color)) {
p <- p + geom_jitter(
aes(
x = factor(treatment, labels = c("Control", "Treatment")),
y = get(outcome),
size = n
),
data = data,
alpha = 0.30,
width = 0.2,
height = 0
)
} else {
p <- p + geom_jitter(
aes(
x = factor(treatment, labels = c("Control", "Treatment")),
y = get(outcome),
size = n,
color = factor(get(color))
),
data = data,
alpha = 0.30,
width = 0.2,
height = 0
)
}
} else {
if (is.null(color)) {
p <- p + geom_jitter(
aes(
x = factor(treatment, labels = c("Control", "Treatment")),
y = get(outcome)
),
data = data,
alpha = 0.30,
width = 0.2,
height = 0.1
)
} else {
p <- p + geom_jitter(
aes(
x = factor(treatment, labels = c("Control", "Treatment")),
y = get(outcome),
color = factor(get(color))
),
data = data,
alpha = 0.30,
width = 0.2,
height = 0.1
)
}
}
p <- p +
geom_point() +
geom_text(aes(label = label), nudge_x = 0.075, size = 3) +
geom_errorbar(aes(ymin = conf95_low, ymax = conf95_high), width = 0) +
labs(
title = title,
x = xlabel,
y = ylabel
) +
mmc_theme() +
scale_color_manual(
name = color_name,
values = color_values,
labels = color_labels
) +
theme(
legend.position = "bottom",
axis.title = element_text(size = 10),
plot.title = element_text(size = 10)
)
return(p)
}
# plot balance ------------------------------------------------------------
plot_balance <- function(plot_data, levels){
plot_data$outcome <- factor(plot_data$outcome, levels = levels)
ggplot(plot_data, aes(y = outcome, x = estimate)) +
geom_point() +
geom_vline(xintercept = 0, linetype = "dashed", size = .25) +
geom_segment(aes(x = conf.low, xend = conf.high, y = outcome, yend = outcome),
alpha = .3) +
facet_grid(~blocks) +
labs(
x = "Treatment Effect",
y = ""
) +
mmc_theme()
}
|
8a4289dad507d5d63ca971a1b9ec36639602ee8c
|
38829c3f894aa8617bd1d914fa8ad2a8efe962e0
|
/R/GCAT/man/plot_data.Rd
|
3c5c8e60c6ba0589e469278a8c04d281ad456936
|
[] |
no_license
|
ybukhman/GCAT
|
04304e045c6b16cc2f1e7beb14ebedc115508c70
|
f7be365b210f0f4c5c3851b9edf3e5e25924f19a
|
refs/heads/master
| 2021-01-21T20:22:33.938413
| 2019-07-16T03:14:59
| 2019-07-16T03:14:59
| 34,178,856
| 2
| 5
| null | 2016-03-29T16:08:58
| 2015-04-18T19:06:00
|
R
|
UTF-8
|
R
| false
| true
| 1,032
|
rd
|
plot_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.fit.R
\name{plot_data}
\alias{plot_data}
\title{plot_data}
\usage{
plot_data(input.well, view.raw.data = F, unlog = F, scale = 1,
main = paste(plate.name(input.well), well.name(input.well)),
number.points = T, draw.symbols = F, constant.added, ylim, ...)
}
\arguments{
\item{input.well}{The well object that need to be plottedd}
\item{view.raw.data}{should the raw data be plotted? (}
\item{unlog}{should data be plotted on a linear (vs. logarithmic) scale?}
\item{scale}{determines the font scale for the entire graph. all cex values are calculated from this.}
\item{main}{...}
\item{number.points}{should points be labeled with numeric indices?}
\item{draw.symbols}{- should <check.slopes> be called on the well and markings drawn on the graph?}
\item{constant.added}{Similar to added.constant.}
\item{ylim}{...}
\item{...}{additional arguments passed to plot()}
}
\description{
Basic function plots time vs. OD from a well object
}
|
9b46358f257fce2a69fcee7f440a7acb047b0ddd
|
d141653ead8c6ab785dbfc47d402197a19fa4985
|
/R/shutdowns_by_geo_scope.R
|
73fd8cf643d0a70ec188816a79e0ade1d7d4a070
|
[] |
no_license
|
bbc/vjdata.26005.internet.shutdown
|
8186f93de67c91bd0a9b08c7c87bb8727726641d
|
fa7f8931a87329f682624c5fca21047f400cda29
|
refs/heads/master
| 2022-04-06T16:07:58.621915
| 2020-02-13T14:33:42
| 2020-02-13T14:33:42
| 228,865,144
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,786
|
r
|
shutdowns_by_geo_scope.R
|
pacman::p_load('tidyverse','dplyr','ggplot2', 'bbplot2', 'lubridate', 'grid')
source(prepare_dataset.R)
###### by geographic scope ######
# countries and number of shutdowns by level of geographic spread
geography_scope_count <- tidy_data_geography %>%
group_by(continent, country, geo_scope) %>%
count(geo_scope) %>%
arrange(country) %>%
spread(geo_scope, n)
# this step is only necessary if you notice 2 Level 3 columns. Not sure why it happens
# geography_scope_count <- geography_scope_count %>%
# mutate("Level 3" = sum(`Level 3` + `Level 3 `))
geography_scope_count[is.na(geography_scope_count)] <-0
geography_scope_forchart <- tidy_data_geography %>%
group_by(continent, country, geo_scope) %>%
count(geo_scope) %>%
arrange(country) %>%
group_by(geo_scope) %>%
summarise(total = sum(n))
# Plot
scope.plot <- ggplot(geography_scope_forchart,
aes(x=geo_scope,
y=total,
#group = continent,
fill = "#4D2C7A")) +
geom_bar(stat = "identity",
position = "identity") +
bbc_style() +
scale_fill_manual(values = c("#1380A1")) +
scale_y_continuous(limits = c(0,125)) +
scale_x_discrete("month",
labels= c("Local","Regional","Multi-regional")) +
theme(panel.grid.major.x = element_blank(),
legend.position = "none") +
geom_hline(yintercept = 0, size = 1, colour = "#333333") +
labs(title = "Most shutdowns are targeted to specific areas")
scope.plot
finalise_plot(scope.plot,
source = "Source: Access Now, 2019",
save_filepath = "~/Dropbox (BBC)/Visual Journalism/Data/2019/vjdata.26005.internet.shutdown/output/shutdowns_by_geo_scope-nc.png",
width_pixels = 640)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.