blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c2718e56e9720f9dbdbb9be1d5331d701825a1b0
|
f64fea318bda54ddf7a18aab6ea6683d2b2c94e1
|
/car_data/car_data_4_glm.R
|
fefd33c3e858479f8ac6748856dc41a05cb2d24f
|
[] |
no_license
|
SportsTribution/doing_data
|
75faedc24fe467120cbb2e46892e98db219d2e54
|
c728afee4d3cb4fdf7d25cf319cf220497e9eb87
|
refs/heads/master
| 2018-01-08T08:10:22.206196
| 2016-02-24T16:19:00
| 2016-02-24T16:19:00
| 52,455,390
| 3
| 0
| null | null | null | null |
ISO-8859-16
|
R
| false
| false
| 5,192
|
r
|
car_data_4_glm.R
|
### REGRESSION MODEL
## Let us rename ur data frame, so that we have the "original" one still handy
carDataGLM <- carData
## one important Problem: We have to take away one Factor from each Class, otherwise the glm will complain
## the following turns all characterŽinformation into factors by turning the data frame into an unclassed list and back
carDataGLM<-as.data.frame(unclass(carDataGLM))
## we do not need the Model Name for the glm
carDataGLM<-carDataGLM[,!(colnames(carData)%in% "Model")]
## now we turn each factor into dummy variables
isFactor <- sapply(carDataGLM, is.factor)
carDataGLM<-as.data.frame(model.matrix(~ .+1, data=carDataGLM,
contrasts.arg = lapply(carDataGLM[,isFactor], contrasts, contrasts=TRUE)))
## You will see that one group dissappeared from each Factor and we have the group intercept instead
print("***Model with Factors***")
print(names(carDataGLM))
## To get the as.formula to work, the intercept needs to be renamed
colnames(carDataGLM)[1]<-"Intercept"
##So, we can almost start to glm around.
##Smog, the mpg columns and the greenhouse column is our output.
##Solution: get only the valid terms and the valid responses
responses<-c("Air.Pollution.Score","City.MPG","Hwy.MPG","Cmb.MPG","Greenhouse.Gas.Score")
tests<-names(carDataGLM)[!(names(carDataGLM) %in% c(responses))]
##I love the %in% command.
##Now we can use this as formula command
##let's start a test with Cmb.MPG
frm <- as.formula(paste("Cmb.MPG"," ~", paste(tests,collapse ="+"),"-1"))
myglm.All.Lin <- glm(formula = frm, family = gaussian(link = "identity"), data = carDataGLM,
na.action = na.exclude)
print("***Linear GLM All tests***")
print(summary(myglm.All.Lin))
##Not bad.
##But... do we really need the Years?
testsNoYears <- tests[-c(grep("Years*",tests,value=FALSE))]
frmNY <- as.formula(paste("Cmb.MPG"," ~", paste(testsNoYears,collapse ="+"),"-1"))
myglm.Lin <- glm(formula = frmNY, family = gaussian(link = "identity"), data = carDataGLM,
na.action = na.exclude)
print("***Linear GLM no Years***")
print(summary(myglm.Lin))
## AIC got slightly worse, so let us keep years. But... our displacement plots showed that we do not have a linear relation ship
## So, let's try some other families
## Inverse function
myglm.Inv <- glm(formula = frm, family = Gamma(link = "inverse"), data = carDataGLM,
na.action = na.exclude)
print("***inverse GLM***")
print(summary(myglm.Inv))
## inverse 2nd order
myglm.Inv2 <- glm(formula = frm, family = inverse.gaussian(link = "1/mu^2"), data = carDataGLM,
na.action = na.exclude)
print("***inverse GLM second order***")
print(summary(myglm.Inv2))
## Nonsurprisingly, those two look better, as MPG space goes from 0 to (in theory) infinity
## Comparing the two last, in one is TransAutoMan a positive variable and in the other one a negative
## (in comparison to TransAMS). CVT seems to be pretty good for fuel transmission
## Other link functions won't work for MPG
### GLM FIT COMPARISONS
## We will use a plot of actual MPG values against residual values
## The response argument is needed to recieve an answer in MPG units
carDataGLM$Res.Lin<-carDataGLM$Cmb.MPG-predict(myglm.Lin, newdata=carDataGLM, type="response")
carDataGLM$Res.Inv<-carDataGLM$Cmb.MPG-predict(myglm.Inv, newdata=carDataGLM, type="response")
carDataGLM$Res.Inv2<-carDataGLM$Cmb.MPG-predict(myglm.Inv2, newdata=carDataGLM, type="response")
## We can now plot each Residual individually
## We use geom_points and alpgha to allow us to see point density
## and geom_quantile with smooth to estimate our standard deviation
p<- ggplot(carDataGLM, aes(Cmb.MPG,Res.Lin))+geom_point(alpha = 0.1)+
geom_quantile(method = "rqss", lambda = 10)
ggsave("car_data/car_output/glm_1_Lin_Residuals.png")
p<- ggplot(carDataGLM, aes(Cmb.MPG,Res.Inv))+geom_point(alpha = 0.1)+
geom_quantile(method = "rqss", lambda = 10)
ggsave("car_data/car_output/glm_2_Inv_Residuals.png")
p<- ggplot(carDataGLM, aes(Cmb.MPG,Res.Inv2))+geom_point(alpha = 0.1)+
geom_quantile(method = "rqss", lambda = 10)
ggsave("car_data/car_output/glm_3_Inv2_Residuals.png")
## Problem: We have trouble to compare the models, as they are not in the same plot
## Solution: We melt the 3 residuals into one column
carDataGLMmelt <- carDataGLM[,c("Cmb.MPG","Res.Lin","Res.Inv","Res.Inv2")]
carDataGLMmelt <- melt(carDataGLMmelt,id="Cmb.MPG", variable.name = "GLM.Type", value.name="Residual")
## Focus is on the quantiles, especially the median
p<- ggplot(carDataGLMmelt, aes(Cmb.MPG,Residual))+
geom_quantile(aes(colour=GLM.Type), method = "rqss", lambda = 10,quantiles=c(0.25,0.75),size=0.5,alpha=0.5)+
geom_quantile(aes(colour=GLM.Type), method = "rqss", lambda = 10,quantiles=c(0.5),size=1.5)
ggsave("car_data/car_output/glm_4_All_Residuals.png")
## Okay, we see that the inverse functions are much less skewed for high MPG
## There is slight skewdness for low MPG in the inverse functions,
## but especially the inverse fit from around 18 to 28 MPG has a residual very close to zero
## Idea for future work: Take a look at the outliers
|
710eef7c26b2cbe67cdd642f52bc40a2c854aec2
|
8e6e55fe43bc3ed64f01fec4ed07c027b29f96a6
|
/man/make_bulk_get_job_url.Rd
|
842ed76e2c6ecc3b3fa9e1163c9267e970e6673c
|
[
"MIT"
] |
permissive
|
carlganz/salesforcer
|
a3ec51c556b79b4734b5c8d844f000c2573fadbc
|
2078627bc988e5d58f90d16bf42c603507ab16db
|
refs/heads/main
| 2023-04-14T23:50:26.698773
| 2021-04-27T15:44:55
| 2021-04-27T15:44:55
| 362,164,928
| 1
| 0
|
NOASSERTION
| 2021-04-27T15:38:47
| 2021-04-27T15:38:46
| null |
UTF-8
|
R
| false
| true
| 457
|
rd
|
make_bulk_get_job_url.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/endpoints-bulk.R
\name{make_bulk_get_job_url}
\alias{make_bulk_get_job_url}
\title{Bulk Get Job Generic URL Generator}
\usage{
make_bulk_get_job_url(
job_id,
api_type = c("Bulk 1.0", "Bulk 2.0"),
query_operation = NULL
)
}
\description{
Bulk Get Job Generic URL Generator
}
\note{
This function is meant to be used internally. Only use when debugging.
}
\keyword{internal}
|
4a854c9edcfdde9333d730d38f43be2412740438
|
41c8197a6586d2a4bede4e80f4d87952944e6f4f
|
/plot4.R
|
cc467d8169e169f074f110a8e77ca956dab95614
|
[] |
no_license
|
jesper4711-personal/ExData_Plotting1
|
2b47576b985434a58dd8201cfa802608c53903e6
|
12ecc522a6c5de78db036f241fe440c9ea1de2f0
|
refs/heads/master
| 2020-12-13T18:25:02.698292
| 2015-01-11T20:39:08
| 2015-01-11T20:39:08
| 29,090,099
| 0
| 0
| null | 2015-01-11T11:56:03
| 2015-01-11T11:56:01
| null |
UTF-8
|
R
| false
| false
| 1,444
|
r
|
plot4.R
|
## format of the data
classes=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
## read in the data
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";",colClasses = classes, na="?")
## need the times converted for the plotting
data$Time <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
## convert dates to take subset of data
data$Date <- as.Date(data$Date, "%d/%m/%Y")
## only use two days worth of data
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
data <- subset(data, Date %in% dates)
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
## NW
plot(data$Time, data$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power")
## NE
plot(data$Time,
data$Voltage,
type="l",
xlab="datetime",
ylab="Voltage")
## SW
plot(data$Time,
data$Sub_metering_1,
type="l",
col="black",
xlab="",
ylab="Energy sub metering")
lines(data$Time, data$Sub_metering_2, col="red")
lines(data$Time, data$Sub_metering_3, col="blue")
legend("topright",
col=c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1,
bty="n")
## SE
plot(data$Time,
data$Global_reactive_power,
type="n",
xlab="datetime",
ylab="Global_reactive_power")
lines(data$Time, data$Global_reactive_power)
dev.off()
|
43810a3e10596cf022d9ca4f8f54b558cb61bd59
|
6242962bfa0e8022cebc8822def9c611eea02132
|
/2021/2021_01.R
|
0f2588bbe9236ccede02e64782bc278c6a793bac
|
[] |
no_license
|
nickopotamus/preppin_data
|
d2c12800252792a96e5c6d3ec311eab40064a058
|
fc19d5ed55e659bed65ecb2da580039846345032
|
refs/heads/main
| 2023-09-01T04:54:07.096793
| 2021-10-07T05:19:22
| 2021-10-07T05:19:22
| 414,340,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,146
|
r
|
2021_01.R
|
# 2021, week 1
# Bike sales
# https://preppindata.blogspot.com/2021/01/2021-week-1.html
library(tidyverse)
raw_data <- googlesheets4::read_sheet("1GYv4573GnJa-C21NYeDj-OhFSTwrK0SnQNF2IQFqa50")
# Function to convert factors (called by fct_relevel())
bike_regex <- function(x = NULL) {
x[grepl("^Grav", x)] <- "Gravel"
x[grepl("^Moun", x)] <- "Mountain"
x[grepl("^Ro", x)] <- "Road"
return(x)
}
# Create data view
bikes <- raw_data %>%
# Split Store/Bike
separate(`Store - Bike`, into = c("Store", "Bike"), sep = " - ") %>%
mutate(Bike = fct_relabel(Bike, bike_regex), # Recode factors using regex
Quarter = lubridate::quarter(Date), # Quarter
`Day of Month` = lubridate::mday(Date)) %>% # Day of the month
filter(`Order ID` > 10) %>% # Drop test rows
# Reorder variables as in example
select(c(Quarter, Store, Bike, `Order ID`, `Customer Age`, `Bike Value`, `Existing Customer?`, `Day of Month`))
# Visualisation
# Not sure how they want this to work - each quarter has >31 days
bikes %>%
# Sort by day within quarter
arrange(Quarter, `Day of Month`) %>%
# Total sold for each bike type per day
group_by(Quarter, `Day of Month`, Bike) %>%
summarise(daily_earnings = sum(`Bike Value`)) %>%
# Cumulative earnings by day
group_by(Quarter, Bike) %>%
mutate(cum_earnings = cumsum(daily_earnings)) %>%
# Plot
ggplot() +
aes(x = `Day of Month`, y = cum_earnings, color = Bike) +
geom_line() +
facet_wrap(~Quarter, strip.position = "left", ncol = 1) +
# Make it look more like target plot
theme_light() +
labs(title = "Typical Running Monthly Sales in each Quarter",
subtitle = "For <span style='color:#663300'>Mountain</span>, <span style='color:#CC9933'>Gravel</span>, and <span style='color:#666666'>Road</span> bikes") +
theme(plot.subtitle = ggtext::element_markdown(),
legend.position = "none") +
ylab("Running Total of Sales") +
scale_color_manual(values = c("#CC9933", "#663300", "#666666")) +
scale_x_continuous(expand = c(0,0), limits = c(1, 31), breaks = seq(2, 30, by = 2))
|
1ee575ac8070ed08ffbe3d43e3c94260f30689e4
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/A_github/sources/authors/2009/RNeXML/taxize_nexml.R
|
1d7157549884c46df4c87e5a05c97df262afd6ac
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,243
|
r
|
taxize_nexml.R
|
#' taxize nexml
#'
#' Check taxanomic names against the specified service and
#' add appropriate semantic metadata to the nexml OTU unit
#' containing the corresponding identifier.
#' @param nexml a nexml object
#' @param type the name of the identifier to use
#' @param ... additional arguments (not implemented yet)
#' @import taxize
#' @export
#' @examples \dontrun{
#' data(bird.orders)
#' birds <- add_trees(bird.orders)
#' birds <- taxize_nexml(birds, "NCBI")
#' }
taxize_nexml <- function(nexml, type = c("NCBI"), ...){
type <- match.arg(type)
if(type == "NCBI"){
for(j in 1:length(nexml@otus)){
for(i in 1:length(nexml@otus[[j]]@otu)){
id <- get_uid(nexml@otus[[j]]@otu[[i]]@label)
if(is.na(id))
warning(paste("ID for otu", nexml@otus[[j]]@otu[[i]]@label, "not found. Consider checking the spelling or alternate classification"))
else
nexml@otus[[j]]@otu[[i]]@meta <- new("ListOfmeta", list(
meta(href = paste0("http://ncbi.nlm.nih.gov/taxonomy/", id),
rel = "tc:toTaxon")))
}
}
}
nexml
}
|
5e38e58d0b76193ffef72a3e4d4bcc267e37b879
|
a0585ca647461121f67f91069809cecf7f5a7e5f
|
/app/ui.R
|
95e73804fbcf6cdd09b37d450e5fbbb6943a6506
|
[] |
no_license
|
tyz910/dsscapstone
|
9e8f1ec60d83cb97a0bab6282d07fc62d741d905
|
07a33d1c16eaaf39ad169781ad6820a396b52db4
|
refs/heads/master
| 2020-05-21T12:50:37.492601
| 2015-08-15T05:55:34
| 2015-08-15T05:55:34
| 39,577,614
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 214
|
r
|
ui.R
|
library(shiny)
fluidPage(
titlePanel("Word prediction"),
fluidRow(
column(4, div(br(), wellPanel(textInput('sentence', 'Input text:')))),
column(8, h3("Suggestions:"), uiOutput('prediction'))
)
)
|
65892cbda379a9d506d460a237a7ece9ae4c441a
|
2b7696de761986e7c295da36201f06fca701f059
|
/man/hs3_hs1.Rd
|
352d76e36a7c77a33ef1625ad843c0c8e4489fbd
|
[] |
no_license
|
cran/concordance
|
130b5cadccfce9cc5ef98432fc2f938c75eebd93
|
b8d1e592399f05941ce24a4afd96007b8dae0ec5
|
refs/heads/master
| 2021-05-04T11:23:30.586684
| 2020-04-24T15:10:08
| 2020-04-24T15:10:08
| 49,413,285
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 648
|
rd
|
hs3_hs1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{hs3_hs1}
\alias{hs3_hs1}
\title{HS3-HS1 Concordance}
\format{
A data frame with 5052 rows and 6 variables:
\describe{
\item{HS3_6d}{6-digit HS3 Code}
\item{HS3_4d}{4-digit HS3 Code}
\item{HS3_2d}{2-digit HS3 Code}
\item{HS1_6d}{6-digit HS1 Code}
\item{HS1_4d}{4-digit HS1 Code}
\item{HS1_2d}{2-digit HS1 Code}
}
}
\source{
\url{https://unstats.un.org/unsd/trade/classifications/correspondence-tables.asp}
}
\usage{
hs3_hs1
}
\description{
A dataset containing concordances between HS3 and HS1 classification.
}
\keyword{datasets}
|
315fd09bfdd95591f95af11619a73f5804fd8800
|
3449a99c56cf3120aa02ab22c58edbd3d6286074
|
/Plots/ECDF_plot2.R
|
b01bc565ded995a9758844a1f3cb5c40e46a082a
|
[] |
no_license
|
martin-vasilev/Bmeta
|
f124f73e8d2b53ecddb41ef50ecde699b35c84e6
|
39d587df5f816c24510093a44d8d2f100729292c
|
refs/heads/master
| 2021-01-18T02:20:32.482029
| 2016-08-02T10:53:23
| 2016-08-02T10:53:23
| 58,971,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,360
|
r
|
ECDF_plot2.R
|
# Martin Vasilev, 2015
ECDF_plot <- function(JAGS_model1=S1, JAGS_model2=S6, JAGS_model3=S7, type="FFD"){
# Generate samples for all models:
Samples1<-c(JAGS_model1$mu[1,,1],JAGS_model1$mu[1,,2],JAGS_model1$mu[1,,3])
ECDF1<- ecdf(Samples1);
Samples2<-c(JAGS_model2$mu[1,,1],JAGS_model2$mu[1,,2],JAGS_model2$mu[1,,3])
ECDF2<- ecdf(Samples2);
Samples3<-c(JAGS_model3$mu[1,,1],JAGS_model3$mu[1,,2],JAGS_model3$mu[1,,3])
ECDF3<- ecdf(Samples3);
prob1<- NULL; prob2<- NULL; prob3<- NULL; seq1<- NULL
if(type=="FFD"){
mu<- seq(0,8,0.001)
seq1<- c(0, 1, 2, 3, 4, 5, 6, 7, 8)
} else {
mu<- seq(0,12,0.001)
seq1<- c(0, 1, 2, 3, 4, 5, 6, 7,8, 9, 10, 11, 12)
}
for (i in 1:length(mu)){
prob1[i]<- 1- (ECDF1(mu[i]))
prob2[i]<- 1- (ECDF2(mu[i]))
prob3[i]<- 1- (ECDF3(mu[i]))
}
mu<- c(mu, mu, mu)
prob<- c(prob1, prob2, prob3)
var= c(rep(paste(expression(mu), "~ Unif(-30, 30)", sep=""), length(mu)/3),
rep(paste(expression(mu), "~ N(0, 0.5)", sep=""), length(mu)/3),
rep(paste(expression(mu), "~ N(7, 0.5)", sep=""), length(mu)/3))
DB<- data.frame(mu, prob, var)
# colnames(DB)<- c("mu, prob", "Prior on MU")
# Create graph:
library(ggplot2)
y<- expression(paste("P(", mu, " > X)"))
#
Plot <-ggplot(DB[8001,], aes(x=mu, y=prob, colour=prob)) + geom_point(size=15)+ theme_bw() +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +
ylab(y) + xlab("X (in ms)") +
scale_x_continuous(breaks=seq1) +
scale_y_continuous(breaks=c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1),
labels=c("0",".1",".2",".3",".4",".5", '.6', '.7','.8', '.9', '1')) +
theme(axis.title.x = element_text(size=16),axis.title.y = element_text(size=16),
axis.title.y = element_text(size=16), axis.text=element_text(size=16)); Plot
Plot<- Plot + geom_line(aes(y = prob2, colour = "firebrick1"),size=1.06) +
geom_line(aes(y = prob3, colour = "lightseagreen"),size=1.06) +
theme(legend.position="top")
if(type=="FFD"){
Plot<- Plot + geom_text(aes(7, .92, label="FFD"))
} else{
Plot<- Plot + geom_text(aes(11, .92, label="GD"))
}
return(Plot)
}
|
91ddeb2f4e35b4694814dd89d399655f20d6cef1
|
c0e66d1ebacdbf10375a818ec6b24c35e5c9073e
|
/R/plot_dat_in_map.R
|
fcb3eae701badc4dd10033af6f1930398492a451
|
[] |
no_license
|
Climandes/ClimIndVis
|
b1045a5cdce425e6dbf42d3032bb9ac2a0933786
|
47627ea250c1bb1d7416341645ffe1a7f5ab5b48
|
refs/heads/master
| 2021-11-07T05:24:44.418529
| 2021-10-22T09:17:22
| 2021-10-22T09:17:22
| 140,861,804
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,093
|
r
|
plot_dat_in_map.R
|
#' function to plot data points on map of gridded data
#' @param g_lon,g_lat: arrays of longitudes and latidudes of dimension nx,ny ( or matrix of dim nx x ny if grid is not regular).
#' @param g_dat: matrix of gridded data of dimension nx x ny
#' @param g_col: color scale for plotting gridded data as character of function
#' @param g_breaks (g_nlev): breaks for plotting gridded data. If not provided, g_nlev needs to be specified to calculate breaks. If g_nlev is provided, the pretty() function is used to define the breakpoints and therefore the number of breaks might differ from the g_nlev.
#'
#' @param p_lon,p_lat: array of longitudes and latitudes of points of dimension np=number of points
#' @param p_dat: array of values for data for points of dimension p (needed for p_col_info = "same" or "cbar")
#' @param p_col_info: type of coloring for points
#' \itemize{
#' \item "same": same colorbar as for gridded data, p_col and p_nlev do not need to be specified and no extra legend is plotted for points but p_dat is needed
#' \item "cbar": colorbar specified in p_col but different from g_col. Either predefined colors bars such as these: link to grDevices::Palettes and gplots::colorpanel and RColorBrewer or given in "" (e.g. cbar="gplots::bluered")
#' \item "cpoints": discrete colors specified for each point as array in p_col (p_dat is not needed in this case)
#' }
#' @param p_col: colors for points, see p_col_info
#' @param p_breaks (p_nlev): breaks for plotting point data. If p_breaks is not provided, p_nlev needs to be specified to calculate breaks. If p_col_info = "same" or "cpoints" values are taken from g_breaks (g_nlev). If p_nlev is provided, the pretty() function is used to define the breakpoints and therefore the number of breaks might differ from the p_nlev.
#' @param p_pch: pch of the points (see par). Values between 21:25 are implemented, default is 21.
#' @param p_cex: cex of points (number or array), default is 2
#' @param p_legend: legend to be drawn for point data. Default is no legend. Legend is not drawn if p_col_info="same".
#'\itemize{
#' \item "cbar": colorbar, additional argumten p_nlev or p_breaks is needed
#' \item "cpoints": legend of points of unique colors of p_col, p_legend_text should be specified
#' }
#'@param p_pch_s: x and y limits of the plot. If not specified taken from g_lon and g_lat
#'@param xlims,ylims: x and y limits of the plot. If not specified taken from g_lon and g_lat
#'@param mask: plot land-sea mask of data
#'@param mask_col: color of mask values
#'@param fc_plot -> forecast plot, shows probabilities for all ncat forecast categories
# in optional Argumets weitergeben - fpr fc_plot=TRUE
#'@param leg_labs: legend labs (for fc_plot=TRUE)
#'@param fc_catnames: title for categories (for fc_plot=TRUE)
#'@param leg_title title of legend
#' @param output: needs to be specified if file output is wanted, currently only "png" is implemented (default=NA)
#' @param outfile: character, outputfile (only if output is not NULL). final file will be of type outfile.output
#' @param plot_title (optional), character string for plot title
#' @param pwidth width of plotting region of output (as input for jpeg, png, ...). height is calculated in functions based on plot layout
#' @param lwidth width of legend
#' @param plwidth width of legend for points
#' @param topo Information for contour lines added to the plot. Named list with following three elements: \emph{alt}=array of altitudes of dimension nlonXnlat, \emph{lon}: array of longitudes, \emph{lat}: array of latitudes. Default=NULL.
#' @param graphic_device arguments for graphics devices (in addition to <<filename>>, <<width>> and <<height>> ), depending on the output chosen see \code{\link[grDevices]{png}} or \code{\link[grDevices]{pdf}}.
#' @keywords internal
plot_map_grid_points <- function(g_dat=NULL,g_lon=NULL,g_lat=NULL,g_col=rainbow(10),g_col_center=FALSE,g_breaks,g_nlev=10,
p_lon=NULL,p_lat=NULL,p_col_info="same",p_col=rainbow(10),p_col_center=FALSE,p_dat,p_legend=0,legend_center=FALSE,
xlims,ylims,zlims=NULL,p_zlims,mask=NULL,mask_NA=NULL,ratio=1.0,p_nlev=10,p_breaks=NA,p_text,p_pch=21,p_cex=1,p_pch_s=NA,p_col_sig,
output=NULL,outfile,plot_scale=TRUE,pwidth=5,graphic_device,plot_title,lwidth=1,plwidth=1,trend=FALSE,topo=NULL,text_cex=1,
mask_col="lightgrey",NA_col="white",fc_plot=FALSE,outliers=c(FALSE,FALSE),units="",contour=NULL,contourcol="darkgrey",...) {
opargs<-list(...)
# 1.checks ----------------------------------------------------------------
grid <- ifelse (missing(g_dat) ,0,1)
points<-ifelse (missing(p_dat) & missing(p_col),0,1)
if(grid==0 & points==0) stop("either grid or point data needs to be specified")
if (grid==1) check_arguments_function(list(g_lon,g_lat))
if (points==1) check_arguments_function(list(p_lon,p_lat))
isNA<-lapply(switch(points+1,list(g_dat),list(g_dat,p_dat)), function(x){
switch(is.null(x)+1,all(is.na(x)),TRUE)
})
if (!all(unlist(isNA))){
#fehlt: checken ob lons und lats die gleiche Groesse haben
if(points==1 & p_col_info=="same" & missing(p_dat)) stop("p_dat needs to be specified if <<p_col_info==same>>")
#check if lons are in same "format" [0,360] vs. [-180,180]
if (grid==1 & points == 1){
if(any(p_lon > 180) & any(g_lon < 0)) p_lon <- p_lon-360
if(any(p_lon < 0) & any(g_lon > 180)) p_lon <- p_lon+360
}
# default limits from lon and lat if not specified
if (missing(xlims)) xlims <-range(g_lon,p_lon)
if (missing(ylims)) ylims <-range(g_lat,p_lat)
if(grid==1) {
ylims=ylims+(diff(g_lon)[1]/2 *c(-1,1))
xlims=xlims+(diff(g_lat)[1]/2 *c(-1,1))
}
limcalc <- "quant" # for test purpose and put switch later
# default z-limits
if (missing(zlims) & grid==1){
if (points==1 & p_col_info=="same" ) {
zlims_g <- get_lims(list(g_dat,p_dat))
} else
zlims_g <-get_lims(g_dat)
} else {
zlims_g=list()
zlims_g$lim=zlims
}
if(points==1){
if (missing(p_zlims)) {
if (p_col_info=="same"){
zlims_p=list()
zlims_p$lims<-zlims_g
} else if (p_col_info == "cbar") {
#p_zlims <-range(p_dat,na.rm=TRUE)
zlims_p <- get_lims(p_dat)
if(!all(is.finite(zlims_p$lim))) zlims_p$lim <- c(0,0)
} # wenn p_col_info="cpoints" braucht man keine p_zlims da diese nur zur Aufteilung der Farbskala gebraucht werden und diese dann in p_col definiert sind
} else {
zlims_p=list()
zlims_p$lim=p_zlims
}
# check if all arguments for p_legend are given
if (p_legend=="cpoints") {
if(missing(p_pch)) p_pch <- 16
if (missing(p_text)) {
message("No text specified for legend points (p_text)")
}
}
}
# default color scales ----------------------------------------------------
#grid
if (grid==1 ) {
if(missing(g_breaks)){
g_br = get_breaks(g_dat,zlims_g,g_nlev+1, center=legend_center)
g_breaks = g_br$breaks
outliers <- g_br$outliers
}
if (g_col_center){
if (!is.element(0,g_breaks)){
if (!all(g_breaks>0)){
neg=tail(which(g_breaks<0),1)
g_breaks=c(g_breaks[1:neg],0,g_breaks[(neg+1):length(g_breaks)])
}
}
if (all(g_breaks>=0)) {
g_col=colorRampPalette(g_col)(length(g_breaks)*2-1)[(length(g_breaks)+1): (length(g_breaks)*2-1)]
} else if (all(g_breaks<=0)){
g_col=colorRampPalette(g_col)(length(g_breaks)*2-1)[1:(length(g_breaks)-1)]
} else {
step=diff(zlims_g$lim)/(length(g_breaks)-1)
nzl=c(max(abs(zlims_g$lim))*c(-1,1))
g_br_help=pretty(nzl,diff(nzl)/step)
sel=which(is.element(round(g_br_help,digits=4),round(g_breaks,digits=4)))
g_col = colorRampPalette(g_col)(length(g_br_help)-1)[head(sel,-1)]
}
}
if (length(g_col) != (length(g_breaks)-1)){
g_col = colorRampPalette(g_col)(length(g_breaks)-1)
}
if (outliers[1]) g_dat[g_dat<head(g_breaks,1)]=head(g_breaks,1)
if (outliers[2]) g_dat[g_dat>tail(g_breaks,1)]=tail(g_breaks,1)
}
# points
if (points==1){
if (p_col_info == "cpoints") {
if(length(p_col) != length(p_lon)) stop("length of p_col needs to be of same number as p_lon and p_lat")
col_points = p_col
} else {
if (p_col_info =="same"){
p_col<-g_col
p_breaks<-g_breaks
p_legend=0
} else if (p_col_info == "cbar"){
if (is.na(p_breaks)[1]){
p_br <-get_breaks(p_dat, zlims_p, p_nlev, center=legend_center)
p_breaks <- p_br$breaks
outliers <- p_br$outliers
}
if (p_col_center){
if (!is.element(0,p_breaks)){
if (!all(p_breaks>0)) {
neg=tail(which(p_breaks<0),1)
p_breaks=c(p_breaks[1:neg],0,p_breaks[(neg+1):length(p_breaks)])
}
}
if (all(p_breaks>=0)) {
p_col=colorRampPalette(p_col)(length(p_breaks)*2-1)[length(p_breaks): (length(p_breaks)*2-1)]
} else if (all(p_breaks<=0)){
p_col=colorRampPalette(p_col)(length(p_breaks)*2-1)[1:(length(p_breaks)-1)]
} else {
step=diff(zlims_p$lim)/(length(p_breaks)-1)
nzl=c(max(abs(zlims_p$lim))*c(-1,1))
g_br_help=pretty(nzl,diff(nzl)/step)
sel=which(is.element(round(g_br_help,digits=4),round(p_breaks,digits=4)))
p_col = colorRampPalette(p_col)(length(g_br_help)-1)[head(sel,-1)]
}
} else {
if (length(p_col) != (length(p_breaks)-1)){
p_col= colorRampPalette(p_col)(length(p_breaks)-1)
}
}
}
if (outliers[1]) p_dat[p_dat<head(p_breaks,1)]=head(p_breaks,1)
if (outliers[2]) p_dat[p_dat>tail(p_breaks,1)]=tail(p_breaks,1)
if(all(unlist(lapply(p_dat,is.na)))){
col_points <- NA
} else{
col_points <- p_col[cut(p_dat, p_breaks,right=TRUE,include.lowest = TRUE)]
col_points[is.na(p_dat)]=NA_col
}
}
}
# setup plot and plot -----------------------------------------------------
# calculate aspect ratio of length of lons and lats within x- and ylims
#if (is.matrix(g_lon)==FALSE){
nx <- diff(xlims) #length(is.in.range(g_lon,xlims))
ny <- diff(ylims) #length(is.in.range(g_lat,ylims))
#} else {
# print("not implemented yet for irregular grid")
#return;}
ratio<-ny/nx
if (!is.null(output)){
if(missing(graphic_device)) graphic_device=list()
if (missing(outfile)) stop("no outfile specified")
filename=paste0(outfile,".",output)
pheight=ifelse(p_legend==0 & !fc_plot, ratio*5/6*pwidth+(pwidth/5),(ratio*5/6+1/6)*pwidth+(pwidth/5))
if (is.element(output,c("png","jpeg","bmp","tiff"))){
if(is.null(graphic_device$res)) graphic_device$res=200 # res=pres,units="in"
if(is.null(graphic_device$units)) graphic_device$units="in"
do.call(output,c(list(filename=filename,height=pheight,width=pwidth+(pwidth/5)),graphic_device))
} else if (output == "pdf"){
do.call(output,c(list(file=filename,height=pheight,width=pwidth+(pwidth/5)),graphic_device))
} else if (output=="dev.new" ) dev.new()
}
if (p_legend==0 & grid==0) {
layout(mat=matrix(c(1,2),nrow=1,byrow=TRUE),heights=c(ratio*5),widths=c(5),respect=TRUE)
} else if ((p_legend==0 & grid==1) | (p_legend!=0 & grid==0) | fc_plot) {
layout(mat=matrix(c(1,2),nrow=1,byrow=TRUE),heights=c(ratio*5),widths=c(5,1),respect=TRUE)
} else layout(mat=matrix(c(1,2,3,3),nrow=2,byrow=TRUE),heights=c(ratio*5,2),widths=c(5,1))
# define type of coords
coords=ifelse(any(xlims> 180), FALSE, TRUE)
coords=ifelse(any(xlims< 0), TRUE, FALSE)
#if (fc_plot) { par(oma=c(1,1,5,1),mar=c(1,1,0.5,1))
#} else {
ifelse(!all(is.na(plot_title)),
par(oma=c(1.5,1.5,(length(plot_title)+0.5)*text_cex,1),mar=c(1,1,0.5,1)),
par(oma=c(1.5,1.5,1,1),mar=c(1,1,1,1)))
#}
#muss evtl angepasst werden an Groesse?
# plot gridded data or else generate empty map plot
if (grid==1){
image(g_lon,g_lat,g_dat,xlim=xlims, ylim=ylims,zlim=zlims_g$lim,col=g_col,breaks=as.numeric(g_breaks),cex=3*text_cex,xlab="",ylab="",cex.axis=1)
if(!is.null(mask_NA) ){
if(!is.null(mask)) mask_NA[mask==1]=NA
if(!all(is.na(mask_NA))) image(g_lon,g_lat,mask_NA,xlim=xlims, ylim=ylims,col=NA_col,add=TRUE)
}
} else {
image(xlims[1]:xlims[2],ylims[1]:ylims[2],array(0,dim=c(diff(xlims)+1,diff(ylims)+1)),col=NULL,cex=3*text_cex,xlab="",ylab="",cex.axis=1)
}
if(!is.null(mask) ){
if (!all(is.na(mask))) image(g_lon,g_lat,mask,xlim=xlims, ylim=ylims,col=mask_col,add=TRUE)
}
maps::map(ifelse(coords==TRUE,'world',"world2"), add=T, lwd=2)
maps::map('lakes', add=T, lwd=2,wrap=switch(coords+1,c(0,360),c(-180,180)), fill=TRUE, col="lightblue")
#plot a grid on top of the plot
all_lons <- seq(xlims[1],xlims[2],1)
all_lats <- seq(ylims[1],ylims[2],1)
abline(v= all_lons[which((all_lons %% 5)==0)], lwd=1, lty=2, col="gray66")
abline(h= all_lats[which((all_lats %% 5)==0)], lwd=1, lty=2, col="gray66")
box()
if (!is.null(topo)){
contour(topo$lon, topo$lat, topo$alt, nlevels=3, col="gray55", add=TRUE)
}
if (!is.null(contour)) contour(contour$lon,contour$lat,contour$mask, nlevels=1, col=contourcol, add=TRUE,lwd=2,labels="")
#plot points
if (points==1){
points(p_lon,p_lat,col="black",bg=col_points,pch=p_pch,cex=p_cex,lwd=1)
}
# plot title
if (!all(is.na(plot_title))){
plot_title=plot_title[!sapply(plot_title,is.null)]
for(ll in 1:length(plot_title)){
mtext(side=3,line=((length(plot_title)-0.5):0.5)[ll]*0.6*text_cex, plot_title[[ll]], adj=ifelse(names(plot_title)[ll]=="utitle",NA,0), cex=ifelse(ll==1,1,0.6)*text_cex)
} }
test<-par()
par(fig=c(0.8,0.99,0.01,0.95),pty="m",new=TRUE)
if (grid == 1 | p_col_info=="same"){
if(!length(g_breaks)==1 & plot_scale){
if(fc_plot){
image_scale(breaks=g_breaks,col=g_col,axis.pos=4,xlab=opargs$leg_labs,
add.axis=FALSE,fc_axis=opargs$fc_catnames,equidist=TRUE)
} else {
image_scale(breaks=g_breaks,col=g_col,axis.pos=4,scale_lab=units, add.axis=TRUE,key.extend=outliers)
}
}
}
#legend
if(p_legend!=0){
if (p_legend == "cbar" & fc_plot){
image_scale(breaks=p_breaks,col=p_col,axis.pos=4,xlab=opargs$leg_labs,
add.axis=FALSE,fc_axis=opargs$fc_catnames,equidist=TRUE)
} else {
if (grid==1) par(fig=c(0,1,0,test$fig[3]),new=TRUE)
if (p_legend == "cbar") {
if(!length(p_breaks)==1 & plot_scale){
horiz= ifelse(grid==0,4,1)
if (horiz==1) {
par(mar=c(2,1,1,1))
} else par(mar=c(1,1,1,2))
image_scale(breaks=p_breaks,col=p_col,axis.pos=horiz,scale_lab=units, add.axis=TRUE,key.extend=outliers)
}
}else if (p_legend == "cpoints") {
plot(0,type="n",axes=FALSE)
legend("topleft",pch=p_pch,legend=p_text,col=unique(p_col),cex=1,bty="n")
}
}
}
if (!is.null(output)){
if (output!="dev.new") dev.off()
}
} else warning(paste0("all values are NA, the following file is not plotted:",plot_title$title ))
} #end function
|
30b7fbd5d99afe1aa10e2a2438d5106db3a2acc6
|
ac38f9ec3c1054d98660a128e6574e6c93694e4c
|
/cachematrix.R
|
147335995d7aaa7e22e73e4252fa6b0c4edb2b59
|
[] |
no_license
|
Xcodingdata/ProgrammingAssignment2
|
1419583c59519189ac7f94043368bbec06fea46f
|
f7960cb6842c48ebc4c0a9c2747553b4ce0840c5
|
refs/heads/master
| 2021-01-11T15:01:38.874292
| 2017-01-29T18:33:27
| 2017-01-29T18:33:27
| 80,283,083
| 0
| 0
| null | 2017-01-28T12:25:42
| 2017-01-28T12:25:41
| null |
UTF-8
|
R
| false
| false
| 1,284
|
r
|
cachematrix.R
|
# Caching the Inverse of a Matrix
makeCacheMatrix <- function (x = matrix()) {
# Make sure the inverse is empty
inv <- NULL
# Set the matrix
set <- function (y) {
x <<- y
inv <<- NULL
}
# Get the matrix
get <- function () x
# Calculate the inverse of the matrix
setinverse <- function (inverse) inv <<- inverse
# Get the inverse and display as a list
getinverse <- function () inv
list (set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
# cacheSolve is a function that computes the inverse of a matrix returned by makeCacheMatrix.
# If the inverse has already been calculated (and the matrix has not changed),
# then the cachesolve function should retrieve the inverse of the matrix from the cache.
# cacheSolve assumes that the matrix is invertible.
cacheSolve <- function (x, ...) {
# Get the inverse
inv <- x$getinverse()
# Test if a cache is present and if it is notify
if (!is.null (inv)) {
message ("getting cached data")
return (inv)
}
# If a cache is not present then calculate the inverse and return it
data <- x$get()
inv <- solve (data)
x$setinverse(inv)
return (inv)
}
|
4b0ef43510bc27de6a5963e050dcaba13b8817e2
|
ec6a31fa07969e903b92fcc5aeffe816ffa5ce27
|
/Multi_Linear_Regression.R
|
702d7b7fbf8bcda10e19009a3bb2e024d476974a
|
[] |
no_license
|
cparrett300/Data-Science
|
44ff2af7db60ee1fc35af17e816c639378827a7c
|
e357cc447f10c289b3ec373db5827d1eaf266758
|
refs/heads/master
| 2023-02-08T01:50:29.699352
| 2023-02-06T20:51:21
| 2023-02-06T20:51:21
| 220,693,882
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,556
|
r
|
Multi_Linear_Regression.R
|
require(R6)
OLS <- function(y, y_hat) 1/(2*length(y))*sum((y-y_hat)^2)
randn <- function(rows, cols){
return(matrix(data = rnorm(rows*cols), nrow = rows, ncol = cols))
}
MultiLinearRegression <- R6Class("MultiLinearRegression",
list(
w = NULL,
b = NULL,
fit = function(X, y, eta = 1e-3, epochs = 1e3, lambda1 =0, show_curve = FALSE) {
N <- nrow(X)
self$w <- randn(dim(x)[2], dim(y)[2])
self$b <- randn(1, dim(y)[2])
J <- vector(mode = "numeric", length = epochs)
for (epoch in seq_len(epochs)){
y_hat <- self$predict(X)
J[epoch] <- OLS(y, y_hat) + lambda1/(2*N)*sum(self$w^2)
self$w <- self$w - eta*(1/N)*(t(X)%*%(y_hat - y) + lambda1*sign(self$w)) # %*% matrix multiple
self$b <- self$b - eta*(1/N)*sum(y_hat-y)
}
if (show_curve){
plot(seq_len(epochs), J, type = "l", main = "Training Curve", xlab = "epochs", ylab = "J")
}
},
# w = (x.T * x)^-1 * x.T * y
predict = function(X)sweep(X %*% self$w, 2, -self$b) #X%*%self$w + self$b
# x * self.w
))
|
a08cd5c701e476d5f7adcb9f49ca57448db9e1cd
|
43c0c2fa7cfa01633f87f1ef7e51437f5fcd448e
|
/pecuD3_final/pecuD3_shiny_final/server.R
|
b3c287853147af5e4f027c87e31e6b49483f9e0f
|
[
"MIT"
] |
permissive
|
OOmegaPPanDDa/pecuD3
|
a038e8a72bc06e3dfa0e1e040ca8a15884e5cabf
|
dab23996659c30d4afec47f96945a5a62c51b2ed
|
refs/heads/master
| 2021-06-15T13:40:07.705691
| 2017-03-16T07:08:47
| 2017-03-16T07:08:47
| 78,158,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,196
|
r
|
server.R
|
library(shiny)
library(dplyr)
library(googleVis)
library(ggplot2)
source('read_data.R')
shinyServer(function(input, output) {
output$map <- renderGvis({
date_data <<- train_flow %>%
filter(year==input$year & month == input$month & day == input$day)
date_flow_sum <- sum(date_data$flow)
map_data <- data.frame(站名=date_data$stop_name, 日流量=date_data$flow, 日流量佔比=date_data$flow/date_flow_sum,
LatLong = date_data$LatLong)
map_data <- map_data %>% arrange(desc(日流量))
if(input$hide == '否'){
map_data <- map_data[1:20,]
}
gvisGeoChart(map_data, "LatLong",
hovervar = "站名",
sizevar='日流量佔比',
colorvar="日流量",
options=list(region="TW",colors="['#F1E1FF', '#FF0000']"))
})
output$table <- renderGvis({
date_data <- train_flow %>%
filter(year==input$year & month == input$month & day == input$day)
date_flow_sum <- sum(date_data$flow)
table_data <- data.frame(站名=date_data$stop_name, 日流量=date_data$flow, 日流量佔比=date_data$flow/date_flow_sum)
table_data <- table_data %>% arrange(desc(日流量))
if(input$hide == '否'){
table_data <- table_data[1:20,]
}
table_data$排名 <- seq(from = 1, to = nrow(table_data))
table_data <- table_data[,c(4,1,2,3)]
gvisTable(table_data)
})
output$month_hist <- renderPlot({
year_data <- train_flow %>%
filter(year==input$year)
year_data <- year_data %>% group_by(month) %>% mutate(month_count=sum(flow)) %>% ungroup()
year_data <- unique(year_data %>% select(month,month_count))
year_data$month <- as.factor(year_data$month)
options(scipen=999)
month_hist <- ggplot(year_data, aes(x=month,y=month_count-input$thresh_month,fill=month_count)) +
geom_histogram(stat='identity',alpha = .8) +
ylim(0, 22000000) +
scale_fill_gradient("月流量", low = "#84C1FF", high = "#0066CC") +
labs(title= paste0("Histogram for ", input$year, " Train", collapse = ''))+
labs(x="月份", y=paste0("月流量 - ", input$thresh_month, collapse = ''))+
theme(text = element_text(family= 'Arial Unicode MS'))
month_hist
})
output$month_hist_zoomIn <- renderPlot({
year_data <- train_flow %>%
filter(year==input$year)
year_data <- year_data %>% group_by(month) %>% mutate(month_count=sum(flow)) %>% ungroup()
year_data <- unique(year_data %>% select(month,month_count))
year_data$month <- as.factor(year_data$month)
options(scipen=999)
month_hist_zoomIn <- ggplot(year_data, aes(x=month,y=month_count-input$thresh_month,fill=month_count)) +
geom_histogram(stat='identity',alpha = .8) +
scale_fill_gradient("月流量", low = "#84C1FF", high = "#0066CC") +
labs(title= paste0("Histogram for ", input$year, " Train (Zoom In to View)", collapse = ''))+
labs(x="月份", y=paste0("月流量 - ", input$thresh_month, collapse = ''))+
theme(text = element_text(family= 'Arial Unicode MS'))
month_hist_zoomIn
})
output$date_hist <- renderPlot({
month_data <- train_flow %>%
filter(year==input$year, month==input$month)
month_data <- month_data %>% group_by(day) %>% mutate(day_count=sum(flow)) %>% ungroup()
month_data <- unique(month_data %>% select(day,day_count))
month_data$day <- as.factor(month_data$day)
options(scipen=999)
day_hist <- ggplot(month_data, aes(x=day,y=day_count-input$thresh_date,fill=day_count)) +
geom_histogram(stat='identity',alpha = .8) +
ylim(0, 1500000) +
scale_fill_gradient("日流量", low = "#FFC78E", high = "#FF5809") +
labs(title= paste0("Histogram for ", input$year,"年 ",input$month,"月 ","Train", collapse = ''))+
labs(x="日期", y=paste0("日流量 - ", input$thresh_date, collapse = ''))+
theme(text = element_text(family= 'Arial Unicode MS'))
day_hist
})
output$date_hist_zoomIn <- renderPlot({
month_data <- train_flow %>%
filter(year==input$year, month==input$month)
month_data <- month_data %>% group_by(day) %>% mutate(day_count=sum(flow)) %>% ungroup()
month_data <- unique(month_data %>% select(day,day_count))
month_data$day <- as.factor(month_data$day)
options(scipen=999)
day_hist_zoomIn <- ggplot(month_data, aes(x=day,y=day_count-input$thresh_date,fill=day_count)) +
geom_histogram(stat='identity',alpha = .8) +
scale_fill_gradient("日流量", low = "#FFC78E", high = "#FF5809") +
labs(title= paste0("Histogram for ", input$year,"年 ",input$month,"月 ","Train (Zoom In to View)", collapse = ''))+
labs(x="日期", y=paste0("日流量 - ", input$thresh_date, collapse = ''))+
theme(text = element_text(family= 'Arial Unicode MS'))
day_hist_zoomIn
})
})
|
b92db002fa758ed9fd1c3f67ffcf521916b4fa02
|
fbe57536cc2d84e69a5bf799c88fcb784e853558
|
/R/sample.mode.R
|
1386c3da5705f088d62cc7290d9b8e17b6e1255c
|
[
"MIT"
] |
permissive
|
burrm/lolcat
|
78edf19886fffc02e922b061ce346fdf0ee2c80f
|
abd3915791d7e63f3827ccb10b1b0895aafd1e38
|
refs/heads/master
| 2023-04-02T11:27:58.636616
| 2023-03-24T02:33:34
| 2023-03-24T02:33:34
| 49,685,593
| 5
| 2
| null | 2016-10-21T05:14:49
| 2016-01-15T00:56:55
|
R
|
UTF-8
|
R
| false
| false
| 896
|
r
|
sample.mode.R
|
#' Calculate Sample Mode
#'
#' Calculate the mode of a data set, defined as the value(s) that occur most often.
#'
#' @param x Vector - The object to remove names from
#'
#' @return Calculated mode value
sample.mode <- function(x) {
m <- NA
if (length(x) > 1) {
if (is.character(x[1])) {
m.out <- sample.mode(factor(x))
if (!is.na(m.out[1])) {
m <- levels(m.out)[m.out]
}
} else if (is.factor(x[1])) {
m.out <- lolcat::sample.mode(as.numeric(x))
if (!is.na(m.out[1])) {
m <- factor(levels(x)[m.out], levels(x))
} else {
m <- factor(NA, levels(x))
}
} else {
dist <- frequency.dist.ungrouped(x)
m <- dist$value[which(dist$freq == max(dist$freq))]
if (nrow(dist) > 1 & length(m) == nrow(dist)) {
m <- NA
}
}
} else if (length(x) == 1) {
m <- x
}
m
}
|
9797a278683035cb4ede006e56757bc524b6507b
|
082bbd3b3e173d802cdc8f4ee090aad0fb821760
|
/HW3/hw3p-p2_Xichen.R
|
be45519fd94110eec729b8bfbb28f7a48732159f
|
[] |
no_license
|
JohnnyBarber/Investment
|
2357b9004e1efdd771aabd931eda6b810369e106
|
67043859e6d61ed6d07570d9fff394977aa964f2
|
refs/heads/master
| 2020-04-27T19:25:22.307239
| 2019-03-08T22:09:49
| 2019-03-08T22:09:49
| 174,617,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 109
|
r
|
hw3p-p2_Xichen.R
|
# 2
# put a and c in pdf
# Forward rate
b=round(sqrt((1+0.07)^3/(1+0.06))-1,2)
mysoln[["Q2"]] = list(b=b)
|
c59584ad2ddc3f1a904708d76ab60f8a134759e0
|
d05a635d55dd1ca2df01865f9c7da2f1fa8eaa05
|
/inst/demo/demo_treeandleaf.R
|
13b39ae0e54ff006cd9b094529fc0c0b43e4328e
|
[] |
no_license
|
daniloimparato/easylayout
|
9de520b6e6c0914e6cb2b1f0e78cf4a7b602ff40
|
d200c2dbea3f56253850b9f4a52d731e4d06b971
|
refs/heads/master
| 2021-06-25T00:37:45.390921
| 2020-06-04T21:13:39
| 2020-06-04T21:13:39
| 210,895,795
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 374
|
r
|
demo_treeandleaf.R
|
#devtools::install_github("daniloimparato/easylayout", force=T)
library(easylayout)
library(igraph)
library(ggraph)
load()
g <- graph_from_data_frame(string_interactions, directed=F)
V(g)$degree <- degree(g)
layout <- easylayout(g)
ggraph(g, layout = layout) +
geom_edge_link(color="#999999") +
geom_node_point(aes(color = degree, size = degree)) +
theme_void()
|
7ff775daf08f2e2fc49064352aa1a1f6569ca4b7
|
462980a11e4b73ee26290f78ba028537224eba78
|
/3. Getting and Cleaning Data/Qiuz 1.R
|
d5a02761406cba450b8762eb0e4cab4f33f57824
|
[] |
no_license
|
gravialex/Coursera-Data-Scientist
|
1673f37845320d5c376da1d2c62149a6e0f11987
|
c72d20b58d8f5d5d7ff70e24ed7fce832f0fd7ca
|
refs/heads/master
| 2020-07-22T23:13:48.130566
| 2017-03-01T11:17:38
| 2017-03-01T11:17:38
| 73,820,520
| 0
| 0
| null | 2017-03-01T11:01:28
| 2016-11-15T14:18:46
|
HTML
|
UTF-8
|
R
| false
| false
| 1,195
|
r
|
Qiuz 1.R
|
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(URL, destfile = "./data/data.csv")
# 1
data<-read.csv("./data/data.csv")
v24 <- subset(data, VAL==24)
nrow(v24)
# 2
fes <- data[["FES"]]
unique(fes)
summary(fes)
table(fes)
# 3
install.packages("xlsx")
library(xlsx)
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx"
download.file(URL, destfile = "./data/FDATA.gov_NGAP.xlsx")
rowIndex <- 18:23
colIndex <- 7:15
# 4
library(XML)
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
##download.file(URL, destfile = "./data/Frestaurants.xml")
doc <- xmlTreeParse(URL)
rootNode <- xmlRoot(doc)
names(rootNode)
#5
install.packages("data.table")
library(data.table)
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
DT <- fread(input=URL, sep=",")
system.time(sapply(split(DT$pwgtp15,DT$SEX),mean))
system.time(rowMeans(DT)[DT$SEX==1]; rowMeans(DT)[DT$SEX==2])
system.time(DT[,mean(pwgtp15),by=SEX])
system.time(mean(DT[DT$SEX==1,]$pwgtp15); mean(DT[DT$SEX==2,]$pwgtp15))
system.time(tapply(DT$pwgtp15,DT$SEX,mean))
system.time(mean(DT$pwgtp15,by=DT$SEX))
|
9ec3b0ad99b36493328f6494cdc2278faad4ec15
|
6d443800445592a4bcdc3531a850d5152942e2fd
|
/server.R
|
ef0c385448c5075563330a90b1aeb92a4882e6a6
|
[] |
no_license
|
angy89/InsideNano
|
35f2004414bd1065df4db686ceefdb2096b789da
|
0b5ee4502106740acc3daec100cac37f015791d3
|
refs/heads/master
| 2021-01-18T21:11:38.811196
| 2016-01-10T20:23:47
| 2016-01-10T20:23:47
| 45,189,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,691
|
r
|
server.R
|
shinyServer(function(input, output,session){
check_login(output,FALSE) #plot the login control
set_observer(input,output,session) #set the observe event
observeEvent(input$login, {
validate_login(input,output)
if(input$username != username | input$password != password){
login_failed(input,output,session)
}else{
login_successful(input,output,session)
check_login(output,TRUE)
phenotypic_network_UI(input,output)
free_query_UI(input,output)
conditionl_query_UI(input,output)
withProgress(message = 'Progress...', min = 1,max = 5, {
load(paste(APP_PATH,"graph_without_genes_also_intra_classes_edges_with_properties80.RData",sep=""))
incProgress(1, detail = "Data Loaded 1/4")
load(paste(APP_PATH,"graph_without_genes_also_intra_classes_edges_network_estimation80_2.RData",sep=""))
incProgress(1, detail = "Data Loaded 2/4")
load(paste(APP_PATH,"gene_network_KEGG_th99.RData",sep=""))
incProgress(1, detail = "Data Loaded 3/4")
load(paste(APP_PATH,"big_net_with_chemical_up_down80_2.RData",sep=""))
incProgress(1, detail = "Data Loaded 3/4")
incProgress(1, detail = "Data Loaded 4/4")
incProgress(1, detail = "Waiting For input!")
})
observeEvent(input$refresh_free,{free_query_UI_refresh(input,output)}) #and observe event refresh_free
observeEvent(input$Refresh, {conditional_query_UI_refresh(input,output)}) #end Observe Event refresh conditional query
# gene_network_UI(input,output)
# genes_input = gene_query_UI(input,output,g,genes_input)
#gene_network_NANO_DISEASE_query(input,output,g)
toRem = which(chemical %in% drugs)
chem_idx = which(node_type=="chemical")
toRem2 = which(colnames(W_ADJ)[chem_idx] %in% intersect(chemical,drugs))
chemical = chemical[-toRem]
toRem2 = toRem2 + 1292
W_ADJ = W_ADJ[-toRem2,-toRem2]
node_type = node_type[-toRem2]
path_group = names(table(vertices$group))
path_group = lapply(path_group,FUN = function(i){i})
names(path_group) = unlist(path_group)
output$Patway <- renderUI({
selectInput("Patway_g",label = "Gene Pathway",multiple = TRUE, choices = path_group, selected = path_group[[1]])
})
good_disease = disease[disease %in% colnames(W_ADJ)]
ii_dis_list = list("All" = "ALL")
for(ii in good_disease){
ii_dis_list[[ii]]=ii
}
output$input_dis <- renderUI({
selectInput("input_dis",label = "Disease",multiple = TRUE,choices = ii_dis_list,selected = ii_dis_list[[2]])
})
if(DEBUGGING)
cat("waiting for input \n")
observeEvent(input$Go, {
free_query(input,output,disease_list,selected_nodes,W_ADJ,
th_p = input$th_slider/100,node_type,chemMat,join10,g,g_geni2) #in free_query.R
}) #End Listener 1
#ANGELA
observeEvent(input$Go2, {
load(paste(APP_PATH,"LOG.RData",sep=""))
if(DEBUGGING){
cat("LOG file loaded \n")
cat("dim(LOG_CONDITIONAL",dim(LOG_CONDITIONAL),"\n")
}
log_file_to_load = check_already_existing_conditional_query(input,output,LOG_CONDITIONAL)
if(DEBUGGING){
cat("Query checked", log_file_to_load, "\n")
}
if(is.null(log_file_to_load)){
if(DEBUGGING){
cat("New conditional query \n")
}
conditional_query(input,output,disease_list,selected_nodes,W_ADJ,th_p = input$th_slider2/100,node_type,chemMat,join10,g,g_geni2,LOG_CONDITIONAL)
}else{
if(DEBUGGING){
cat("Load conditional query \n")
}
load_conditional_query(input,output,log_file_to_load)
}
}) #End Listener 2
# observeEvent(input$Go3, {
# if(DEBUGGING) cat("GENE QUERY\n")
# gene_query(input,output,disease_list,selected_nodes,W_ADJ,th_p = input$th_slider3/100,node_type,chemMat,join10,g,g_geni2,gene_input)
#
# }) #End Listener 3
#
# observeEvent(input$Go4,{
# gene_like_conditional_query(input,output,disease_list,selected_nodes,W_ADJ,th_p = input$th_slider3/100,node_type,chemMat,join10,g,g_geni2)
#
# })
#Outside Listener
plot_item_network(input,output,W2_ADJ)
plot_item_network_pie(input,output,W2_ADJ)
plot_gene_network(input,output,g,g_geni2)
}
}) #end event login
}) #end server
|
0e3cea52fb9108b7f6865cd07204341047154b1d
|
169a6494a475f42d0452d3ade4622bde1eb939cc
|
/tests/testthat/test-lowest_common.R
|
720acd28f65da0449a24e57e2e107dda94cc1432
|
[
"MIT"
] |
permissive
|
ropensci/taxize
|
d205379bc0369d9dcdb48a8e42f3f34e7c546b9b
|
269095008f4d07bfdb76c51b0601be55d4941597
|
refs/heads/master
| 2023-05-25T04:00:46.760165
| 2023-05-02T20:02:50
| 2023-05-02T20:02:50
| 1,771,790
| 224
| 75
|
NOASSERTION
| 2023-05-02T20:02:51
| 2011-05-19T15:05:33
|
R
|
UTF-8
|
R
| false
| false
| 1,673
|
r
|
test-lowest_common.R
|
context("lowest_common")
force_http1_1 <- list(http_version = 2L)
test_that("lowest_common works with ncbi, passing in classifications and doing internally", {
skip_on_cran()
skip_on_travis()
id <- c("9031", "9823", "9606", "9470")
idc <- classification(id, db = 'ncbi', callopts = force_http1_1)
aa <- lowest_common(id[2:4], db = "ncbi")
bb <- lowest_common(id[2:4], db = "ncbi", low_rank = 'class')
cc <- lowest_common(id[2:4], db = "ncbi", class_list = idc, low_rank = 'class')
expect_is(aa, "data.frame")
expect_is(bb, "data.frame")
expect_is(cc, "data.frame")
expect_named(aa, c('name', 'rank', 'id'))
expect_named(cc, c('name', 'rank', 'id'))
expect_identical(aa, bb)
expect_identical(bb, cc)
expect_equal(NROW(aa), 1)
# passing in classification list obs. takes less time
expect_lt(
system.time(lowest_common(id[2:4], db = "ncbi", class_list = idc, low_rank = 'class'))[3],
system.time(lowest_common(id[2:4], db = "ncbi", low_rank = 'class'))[3]
)
})
test_that("lowest_common works with itis", {
skip_on_cran()
#lowest_common(spp, db = "itis")
#spp <- c("Sus scrofa", "Homo sapiens", "Nycticebus coucang")
ids <- c("180722","180092","572890")
idc <- classification(ids, db = 'itis')
expect_identical(
lowest_common(ids, db = "itis"),
lowest_common(ids, db = "itis", class_list = idc)
)
bb <- lowest_common(ids, db = "itis", low_rank = 'class')
cc <- lowest_common(ids, db = "itis", class_list = idc, low_rank = 'class')
expect_is(bb, "data.frame")
expect_is(cc, "data.frame")
expect_named(cc, c('name', 'rank', 'id'))
expect_identical(bb, cc)
expect_equal(NROW(bb), 1)
})
|
8d1774a4503699459561e82f78b6622ba0a82b23
|
e91c5f8da9291cb2dfb9436bd9934054df6940cf
|
/Min_Rank_Val_Bet.R
|
fe956eee4c43ec1d3bfc0026c4d768c455907844
|
[] |
no_license
|
melgj/UKHR
|
671ee8307f2146f44c753aef8ade03eda68e4021
|
5653ff75aeddc6657503bfe171b9e6a3ce493b0a
|
refs/heads/master
| 2020-03-27T14:34:17.238033
| 2019-04-29T18:38:28
| 2019-04-29T18:38:28
| 105,633,141
| 0
| 0
| null | 2017-10-03T09:27:40
| 2017-10-03T09:12:01
| null |
UTF-8
|
R
| false
| false
| 5,047
|
r
|
Min_Rank_Val_Bet.R
|
#setwd("~/git_projects/UKHR_Project")
#
# top5 <- ukhr_master_BF %>%
# drop_na(Rating_Rank, BFSP_PL, ValueOdds_BetfairFormat, BetFairSPForecastWinPrice, VOR_Range, Value_Odds_Range,
# Speed_Rank_Range) %>%
# filter(Rating_Rank <= 5, ValueOdds_BetfairFormat <= 21, Actual.Runners >= 5) %>%
# group_by(UKHR_RaceID) %>%
# mutate(HiRnkValBet = if_else(Value_Odds_Ratio > 1.0, 1, 0),
# Won = if_else(BFSP_PL > 0, 1, 0)) %>%
# filter(HiRnkValBet == 1) %>%
# select(UKHR_RaceID, Year, Meeting, Horse, Ratings_Range ,Class_Rank_Range, Speed_Rank_Range, Runners_Range, RaceType, Handicap,Rating_Rank, BetFairSPForecastWinPrice, ValueOdds_BetfairFormat,
# Value_Odds_Ratio, VOR_Range, Value_Odds_Range, Betfair.Win.S.P., Won, BFSP_PL, HiRnkValBet) %>%
# mutate(Min_Rnk_Val_Bet = min_rank(Rating_Rank),
# BFSPvVOBF = if_else(Betfair.Win.S.P. > ValueOdds_BetfairFormat, 1, 0)) %>%
# filter(Min_Rnk_Val_Bet == 1,
# Value_Odds_Ratio <= 5.0)
#
# head(top5)
#
# #nrow(top5)
# sum(top5$BFSP_PL)
# mean(top5$BFSP_PL)
# mean(top5$Betfair.Win.S.P.)
# mean(top5$BetFairSPForecastWinPrice)
# mean(top5$Won)
#
# top5 %>%
# group_by(Speed_Rank_Range) %>%
# summarise(Bets = n(),
# Avg_PL = mean(BFSP_PL),
# Total_PL = sum(BFSP_PL),
# Winners = sum(Won),
# Win_Percent = mean(Won)) %>%
# arrange(desc(Avg_PL)) %>%
# View()
# str(ukhr_master_BF$Prize)
#
# coursePrize <- ukhr_master_BF %>%
# group_by(Meeting) %>%
# summarise(Races = n(), Avg_Prize = mean(Prize, na.rm = T)) %>%
# mutate(Course_Rank = min_rank(desc(Avg_Prize)),
# Course_Grade = cut(Course_Rank, 4,
# labels = c("G1", "G2", "G3", "G4"),
# ordered_result = T)) %>%
# arrange(desc(Avg_Prize))
#
# coursePrize
#
# View(coursePrize)
#
# ukhr <- ukhr_master_BF %>%
# left_join(coursePrize, by = "Meeting")
#
# courseGrade <- coursePrize %>%
# mutate(Course_Grade = cut(desc(Avg_Prize), 4,
# labels = c("G1", "G2", "G3", "G4"),
# ordered_result = T)) %>%
# arrange(desc(Avg_Prize))
#
# courseGrade
#
# summary(courseGrade)
top5Q <- today %>%
drop_na(Rating_Rank, ValueOdds_BetfairFormat, BetFairSPForecastWinPrice, VOR_Range, Value_Odds_Range,
Speed_Rank_Range) %>%
filter(Rating_Rank <= 4 , ValueOdds_BetfairFormat <= 21, Runners >= 5, (Value_Odds_Ratio > 1 & Value_Odds_Ratio <= 5.0),
Ratings_Range != "Bottom_Third", Speed_Rank_Range != "Bottom_Third") %>%
group_by(UKHRCardRaceID) %>%
mutate(HiRnkValBet = if_else(Value_Odds_Ratio > 1.0, 1, 0)) %>%
filter(HiRnkValBet == 1) %>%
select(UKHRCardRaceID, Time24Hour, Meeting, Horse, Ratings_Range, Speed_Rank_Range, RaceType, Handicap,
Rating_Rank, BetFairSPForecastWinPrice, ValueOdds_BetfairFormat, Value_Odds_Ratio, VOR_Range,
Value_Odds_Range, HiRnkValBet) %>%
mutate(Min_Rnk_Val_Bet = min_rank(Rating_Rank)) %>%
filter(Min_Rnk_Val_Bet == 1) %>%
arrange(Time24Hour, Meeting, Horse)
top5Q
write_csv(top5Q, paste0("ValueTop5_",today$Date[1],".csv"))
dualQuals <- top5Q %>%
inner_join(asq, by = c("Horse", "Time24Hour", "Meeting", "ValueOdds_BetfairFormat")) %>%
filter(Handicap.x == "HANDICAP", Ratings_Range.x == "Top_Third")
dualQuals
write_csv(dualQuals, paste0("Dual_Quals_", today$Date[1], ".csv"))
# top5QualsGD <- filter(top5Q, Horse %in% goodDrawToday$Horse)
#
# top5QualsBD <- filter(top5Q, Horse %in% badDrawToday$Horse)
#
# top5QualsGD
# top5QualsBD
############################################################
#
#
# rTop5 <- ukhr_master_BF %>%
# drop_na(Rating_Rank, BFSP_PL, ValueOdds_BetfairFormat, BetFairSPForecastWinPrice, VOR_Range, Value_Odds_Range,
# Speed_Rank_Range) %>%
# filter(Rating_Rank <= 3, ValueOdds_BetfairFormat <= 21, Actual.Runners >= 5) %>%
# group_by(UKHR_RaceID) %>%
# mutate(HiRnkValBet = if_else(Value_Odds_Ratio > 1.0, 1, 0),
# Won = if_else(BFSP_PL > 0, 1, 0)) %>%
# filter(HiRnkValBet == 1) %>%
# select(UKHR_RaceID, Year, Meeting, Horse, Ratings_Range ,Class_Rank_Range, Speed_Rank_Range, Runners_Range, RaceType, Handicap,Rating_Rank, BetFairSPForecastWinPrice, ValueOdds_BetfairFormat,
# Value_Odds_Ratio, VOR_Range, Value_Odds_Range, Betfair.Win.S.P., Won, BFSP_PL, HiRnkValBet) %>%
# mutate(Min_Rev_Rnk_Val_Bet = min_rank(desc(Rating_Rank)),
# BFSPvVOBF = if_else(Betfair.Win.S.P. > ValueOdds_BetfairFormat, 1, 0)) %>%
# filter(Min_Rev_Rnk_Val_Bet == 1,
# Value_Odds_Ratio <= 5.0)
#
# head(rTop5)
#
# #nrow(top5)
# sum(rTop5$BFSP_PL)
# mean(rTop5$BFSP_PL)
# mean(rTop5$Betfair.Win.S.P.)
# mean(rTop5$BetFairSPForecastWinPrice)
# mean(rTop5$Won)
#
# rTop5 %>%
# group_by(Rating_Rank) %>%
# summarise(Bets = n(),
# Avg_PL = mean(BFSP_PL),
# Total_PL = sum(BFSP_PL),
# Winners = sum(Won),
# Win_Percent = mean(Won)) %>%
# arrange(desc(Avg_PL)) %>%
# View()
#
|
bcdb5253beb145261aea92dbcb0a9ae4bcd9078b
|
1603c15605353094a3807180a2645654639138bb
|
/chapter-06-exercises/exercise-3/exercise.R
|
ac12029b3baf1a39b9d21e431dc7a15817e5be93
|
[
"MIT"
] |
permissive
|
sugarbagels/2020S_INFO201_BookExercises
|
254d4cefd2c6280ccc84e88045e4d75a63678040
|
658e808c2fce2994bc7b4e3132ab1a3da8d53935
|
refs/heads/master
| 2021-05-19T16:23:02.031454
| 2020-04-22T04:04:42
| 2020-04-22T04:04:42
| 252,024,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 750
|
r
|
exercise.R
|
# Exercise 3: writing and executing functions
# Define a function `add_three` that takes a single argument and
# returns a value 3 greater than the input
add_three <- function(a){
sum(a+3)
}
add_three(23)
# Create a variable `ten` that is the result of passing 7 to your `add_three`
# function
ten <- add_three(7)
# Define a function `imperial_to_metric` that takes in two arguments: a number
# of feet and a number of inches
# The function should return the equivalent length in meters
imp_2_met <- function(f,i)
{
temp_inch <- 12 * f + i
temp_inch * 0.0254
}
imp_2_met(5,0)
# Create a variable `height_in_meters` by passing your height in imperial to the
# `imperial_to_metric` function
height_meters <- imp_2_met(5,2)
|
68e2e79fa441d67d7daa916a3b98c458cfe18648
|
e09fa7173766215ad0284d6c2608179d6e95121c
|
/man/find_hrefs.Rd
|
a0e2e18151c6e816fbe8bf55b6ba8eba1b2fa8b9
|
[] |
no_license
|
TuQmano/indek
|
a20ae12c8e46ad323410345f4c6429dc16558d7f
|
3bad9aff914504cfceb72a0cc67d0d209f520ae4
|
refs/heads/master
| 2020-04-18T03:24:06.613694
| 2019-01-07T22:41:37
| 2019-01-07T22:41:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 431
|
rd
|
find_hrefs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/indek-page.R
\name{find_hrefs}
\alias{find_hrefs}
\title{'find_hrefs' search for resources whithin the links of a web page}
\usage{
find_hrefs(page, regex)
}
\arguments{
\item{regex}{a character vector with the regexs for matching.}
\item{x}{an object used to select a method}
}
\description{
## S3 method for class 'indek.page'
find_hrefs(x, regex)
}
|
89e0b0784f085c99c618016ce326f14ca2fe155e
|
49cbd9a3389a95715c578e42a81e93f59994037f
|
/man/cumall.Rd
|
4693cfd1be4d2bbe5be6ba7bade9df5c8812f93f
|
[
"MIT"
] |
permissive
|
s-fleck/lest
|
fec697bff4ea542d0b380e7386fb376878e33350
|
353e80e532b8a2742da6c154aefee0b64d8024c7
|
refs/heads/master
| 2020-03-25T09:56:58.754740
| 2019-11-28T14:58:53
| 2019-11-28T15:00:32
| 143,680,243
| 29
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 408
|
rd
|
cumall.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cumall.R
\name{cumall}
\alias{cumall}
\alias{cumany}
\title{Cumulative all and any}
\usage{
cumall(x)
cumany(x)
}
\arguments{
\item{x}{a \code{logical} vector.}
}
\value{
a \code{logical} vector
}
\description{
Cumulative all and any
}
\examples{
cumall(c(TRUE, TRUE, NA, TRUE, FALSE))
cumany(c(FALSE, FALSE, NA, TRUE, FALSE))
}
|
372d06082231117d6efa88fe732303f3dde7ad58
|
09f19d2460871a24f38cfe5c71a4aefe3bb901eb
|
/tests/tst_mtl_preds_10data190709.R
|
f4dafc6704c18ed94b78924d5684422e80e1ece4
|
[] |
no_license
|
iaolier/mtl-qsar
|
080b7518e5a3b98bf5da68042bdf3e4ef8354b23
|
0217155b051e9b87a5be9527127dc784c59ee041
|
refs/heads/master
| 2020-05-22T17:00:37.647504
| 2019-08-08T15:47:17
| 2019-08-08T15:47:17
| 186,443,092
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,094
|
r
|
tst_mtl_preds_10data190709.R
|
#!/usr/bin/Rscript --vanilla
# nohup ./tests/tst_mtl_preds_10data.R > ~/logs/tst_mtl_preds_10data.log &
#' Script to compare stl and mtl through RF model performance.
library(devtools)
load_all(".")
#paths
dsets_prim_path <- "/shared/mtl-qsar/datasets/originals/"
dsets_splits_path <- "/shared/mtl-qsar/data_splits/"
dsets_assist_path <- "/shared/mtl-qsar/datasets/assist_data_v190709/"
#output path
preds_stl_path <- "/shared/mtl-qsar/tmp/predictions/stl/ranger/"
dir.create(preds_stl_path, recursive = T)
preds_mtl_path <- "/shared/mtl-qsar/tmp/predictions/mtl/ranger_v190709/"
dir.create(preds_mtl_path, recursive = T)
# available assistant datasets:
assist_data_fnam <- list.files(dsets_assist_path)
# get ids
data_ids <- assist_data_fnam %>% str_remove_all("data-assist_|.csv")
library(ranger)
make_preds_ranger <- function(dset_trn, dset_tst) {
#browser()
mdl <- ranger(pXC50 ~ ., data = dset_trn)
dset_tst %>% select(rows_id = molecule_id, truth = pXC50) %>%
mutate(prediction = (predict(mdl, data = dset_tst))$predictions)
}
walk(data_ids, function(did){
dset <- paste0(dsets_prim_path,"data_",did,".csv") %>% read.csv
d_split <- paste0(dsets_splits_path,"data-split_",did,".csv") %>% read.csv %>% rename(molecule_id = rows_id)
d_assist <- paste0(dsets_assist_path,"data-assist_",did,".csv") %>% read.csv
#browser()
dset <- inner_join(d_split, dset)
dset_tests <- dset %>% split(.$fold)
dset_prims <- map(1:10, ~ filter(dset, fold != .))
dset_assists <- map(1:10, ~ filter(d_assist, fold == .))
dset_exts <- map(1:10, ~ bind_rows(dset_prims[[.]], dset_assists[[.]]))
dset_exts <- map(dset_exts, ~ select(.x, -c(molecule_id, fold, dataset_id)))
dset_prims <- map(dset_prims, ~ select(.x, -c(molecule_id, fold)))
map_dfr(1:10, ~ make_preds_ranger(dset_prims[[.x]], dset_tests[[.x]]), .id = "fold") %>%
write_csv(path = paste0(preds_stl_path, "preds-did_", did, ".csv"))
map_dfr(1:10, ~ make_preds_ranger(dset_exts[[.x]], dset_tests[[.x]]), .id = "fold") %>%
write_csv(path = paste0(preds_mtl_path, "preds-did_", did, ".csv"))
})
|
a9c319dceddbd42de76ab4a3694c5e65a74e61eb
|
823a0d1c34ac3bdccb51596fbfb0a65bbd5a77bd
|
/RawGeno/R/SMPLDIAGVAL.R
|
c0be7f1fa7984cb102c4f868b21ca13529b53799
|
[] |
no_license
|
arrigon/RawGeno
|
ca66e64ce5fc1ac9c39da25345c8826093a4cfd5
|
9898e0b525c54fc7afe58727fac354dfc846ac65
|
refs/heads/master
| 2021-01-11T00:14:19.789351
| 2016-10-11T14:18:19
| 2016-10-11T14:18:19
| 70,573,310
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 765
|
r
|
SMPLDIAGVAL.R
|
SMPLDIAGVAL <-
function(path=getwd()){
if(exists("mergedTable")){
data.bin=mergedTable
} else {
data.bin=t(data.binary$data.binary)
}
### Prepare diagnostics
nbbin=rowSums(data.bin)
binFreq=colMeans(data.bin,na.rm=T)
binFR=t(t(data.bin)*binFreq)
binFR[binFR==0]=NA
binFR=rowMeans(binFR,na.rm=T)
if(exists("mergedTable")){
data.test=mergedTable
} else {
data.test=t(data.binary$data.height)
}
data.test[data.test==0]=NA
h.var=apply(data.test,1,sd,na.rm=T)
h.mean=rowMeans(data.test,na.rm=T)
sample.diags=data.frame(NumbPeaks=nbbin,R=binFR,BinHeightMean=h.mean,BinHeightSD=h.var)
write.table(sample.diags,paste(path,.Platform$file.sep,'SamplesDiagnosticValues.txt',sep=''),sep='\t')
cat('Done!\n')
}
|
f9fe021705b42caa2ff6b4e659caba42b2258842
|
5b153389e67e59a30aebf6a0d84b69fd89f805d4
|
/quantutils/R/date.busDiff.r
|
a8378424a5d2c2e01b0aabc1e827e1de6df8046e
|
[] |
no_license
|
dengyishuo/dengyishuo.github.com
|
480d9b5911851e56eca89c347b7dc5d83ea7e07d
|
86e88bbe0dc11b3acc2470206613bf6d579f5442
|
refs/heads/master
| 2021-03-12T23:17:10.808381
| 2019-08-01T08:13:15
| 2019-08-01T08:13:15
| 8,969,857
| 41
| 35
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,143
|
r
|
date.busDiff.r
|
##' Unconfirmed
##'
##' Unconfirmed
##' @title Diff between date and BusDate
##' @param fromDate start Date
##' @param toDate end Date
##' @param region region of stocks or bonds
##' @return out Diff between date and busDate
##' @export
##' @author Weilin Lin
date.busDiff <- function(fromDate, toDate, region) {
fromDate <- date.convert(fromDate)
toDate <- date.convert(toDate)
L <- max(length(fromDate), length(toDate))
if (length(fromDate) != L)
fromDate <- rep(fromDate, length.out = L)
if (length(toDate) != L)
toDate <- rep(toDate, length.out = L)
i <- which(!is.missing(fromDate) & !is.missing(toDate))
if (any(date.toBusDate(fromDate[i], region = region) != fromDate[i]))
stop("Input arg 'fromDate' to date.busDiff() was not all business dates")
if (any(date.toBusDate(toDate[i], region = region) != toDate[i]))
stop("Input arg 'toDate' to date.busDiff() was not all business dates")
output <- rep(NA, L)
output[i] <- match(toDate[i], DATE.BUS.DATES[[region]]) - match(fromDate[i],
DATE.BUS.DATES[[region]])
output
}
|
c242988f2934765d0e14894152a4396e15133484
|
6c0e36ea67a5f7a50051a6eef635283a589d020e
|
/inst/examples/PiecesDevice/pieceDevice.R
|
2736a22cf785257bf722585e18c580fd7d70a627
|
[] |
no_license
|
omegahat/RGraphicsDevice
|
1f07432c7a1e0400b5d955cc24754f0916e807ac
|
fb223a7cbb21c8e513dc377ed52f28e8f4241e9e
|
refs/heads/master
| 2022-01-25T13:22:36.464954
| 2022-01-15T18:04:58
| 2022-01-15T18:04:58
| 4,004,745
| 4
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,411
|
r
|
pieceDevice.R
|
#
# This graphics device tries to figure out what is being
# drawn as higher-level constructs, e.g. axes, titles,
# box for the entire plot, ...
#
# Basically, we collect up the calls to the graphical primitive
# functions and all the arguments of relevance. We also store
# the call stack (sys.calls()) and the names of the functions in effect
# on that stack. This helps us to identify what part of the plot is
# being drawn, e.g. axis(), title(), mtext(), ...
# We also can calculate the region data region
#
#
library(RGraphicsDevice)
setClass("PieceDeviceMethods",
representation( pages = "function"),
contains = "RDevDescMethods")
getDataRegion =
#
# Figure out in device coordinates what the data region for the
# current plot. This will handle par(mfrow = c(r, c))
# It returns the min and max for horizontal and for vertical.
#
function(dims, pars = par())
{
# "cxy" "fig" "mfg" "usr" "xaxp" "yaxp" are the interesting ones
xp = pars[c("fig", "plt")]
w = dims[1] * xp$fig[1:2]
h = dims[2] * xp$fig[3:4]
w = w[1] + diff(w) * xp$plt[1:2]
# row 1 of par(mfrow = c(2, n)) will yield high values
# since we are counting from the lower corner in this world.
h = min(h) + abs(diff(h)) * xp$plt[3:4]
list(hor = w, vert = h)
}
isDataRegionElement =
function(type, val, gc = NULL, calls = val$calls) {
!( type %in% c("mode", "newPlot", "newFrame")) &&
!any(calls %in% c("axis", "mtext", "localTitle", "title", "box"))
}
isDataRegionElementCoords =
# Checks that the coordinates given by x, y are all within
# the data region given by dims.
function(x, y, dims, region = getDataRegion(dims), clip = TRUE)
{
all(x >= region$hor[1] & x <= region$hor[2] & y >= region$vert[1] & y <= region$vert[2])
}
classes = c(rect = "Rectangle", circle = "Circle", line = "Line",
polyline = "PolyLine", polygon = "Polygon")
pieceDevice =
function(width = 1000, height = 800, col = "black", ps = 12, fill = "transparent",
funs = dummyDevice( obj = new("PieceDeviceMethods")))
{
pages = list()
elements = list()
# return the plots
funs@pages = function() pages
dims = c(width, height)
curRegion = NULL
add = function(type, gc, ..., class = NA) {
if(TRUE) {
r = unlist(getDataRegion(dims))
if(is.null(curRegion) || any(curRegion != r)) {
elements[[length(elements) + 1L]] <<- structure(list(type = "newPlot", region = r,
index = length(elements) + 1L,
origType = type),
class = "NewPlotDesc")
curRegion <<- r
}
}
val = list(...)
val$type = type
if(is.na(class)) {
class = c(classes[type], "GraphicalObjectDesc")
}
if(any(is.na(class)))
class = c("GenericGraphicaObjectDesc", "GraphicalObjectDesc")
class(val) = class
k = sys.calls()
val$calls = sapply(k[seq(1, length(k) - 2)], function(x) as.character(x[[1]]))
val$fullCalls = k#[seq(1, length(k) - 2)]
val$isDataRegionElement = isDataRegionElement(type, val, gc) && isDataRegionElementCoords(val$x, val$y, c(width, height))
val$gcontext = if(!is.null(gc)) as(gc, "R_GE_gcontext") else NULL
elements[[length(elements) + 1]] <<- val
}
funs@circle =
function(x, y, r, gc, dev) {
add("circle", gc, x = x, y = y, r = r)
}
funs@line =
function(x1, y1, x2, y2, gc, dev) {
add("line", gc, x = x1, y = y1, x2 = x2, y2 = y2)
}
funs@rect =
function(x1, y1, x2, y2, gc, dev) {
add("rect", gc, x = x1, y = y1, x2 = x2, y2 = y2)
}
funs@polygon =
function(n, x, y, gc, dev) {
add("polygon", gc, x = x[1:n], y = y[1:n])
}
funs@polyline =
function(n, x, y, gc, dev) {
add("polyline", gc, x = x[1:n], y = y[1:n])
}
if(FALSE)
funs@textUTF8 = function(x, y, text, rot, hadj, gc, dev) {
cat("UTF8: ", text, "\n")
}
funs@strWidth = function(str, gc, dev) {
gc$cex * gc$ps * nchar(str)
}
funs@metricInfo = function(char, gc, ascent, descent, width, dev) {
#XXX Need to ma
# Maybe use Freetype.
# When merging with a "real" device that will do the rendering,
# defer to it.
width[1] = gc$ps * gc$cex
ascent[1] = 1
descent[1] = .25
}
funs@text = function(x, y, text, rot, hadj, gc, dev) {
add("text", gc, x = x, y = y, text = text, rot = rot, hadj = hadj)
}
funs@clip = function(x, y, x1, y1, dev) {
if(all(c(x, y, x1, y1) == c(0, width, height, 0)))
add("clipToDevice", NULL, x = x, y = y, x1 = x1, y1 = y1, class = "ClipToDevice")
else
add("clipToRegion", NULL, x = x, y = y, x1 = x1, y1 = y1, class = "ClipToRegion")
}
funs@mode = function(val, dev)
add("mode", NULL, val = val)
funs@newFrameConfirm = function(dev) {
TRUE
}
finishPlot = function() {
if(length(elements) == 0 || all(sapply(elements, `[[`, "type") %in% c("newFrame", "newPlot")))
return()
pages[[length(pages) + 1]] <<- new("PlotDescList", elements)
elements <<- list()
}
funs@newPage = function(gcontext, dev)
finishPlot()
funs@close = function(dev)
finishPlot()
funs@initDevice = function(dev) {
# The all important parameter to set ipr to get the plot region with adequate margins
# dev$ipr = rep(1/72.27, 2)
# dev$cra = rep(c(6, 13)/12) * 10
dev$cra = c(10.8, 14.39)
dev$startps = ps
dev$canClip = TRUE # FALSE
dev$canChangeGamma = FALSE
dev$startgamma = 0
dev$startcol = as(col, "RGBInt")
dev$startfill = -1L
dev$xCharOffset = .4899999
dev$yCharOffset = .3332999
dev$yLineBias = .1
if(FALSE) {
dev$hasTextUTF8 = TRUE
dev$wantSymbolUTF8 = TRUE
}
dev$startfont = 1L
dev$canHAdj = 2L
# dev$top = 0
# dev$bottom = dev$clipBottom = height
# dev$left = 0
# dev$right = dev$clipRight = width
}
funs@GEInitDevice = function(dev) {
return(TRUE)
gedev <- as(dev, "GEDevDescPtr")
gedev$recordGraphics = FALSE # TRUE
gedev$ask = FALSE
}
dev = graphicsDevice(funs, as.integer(c(width, height)), col, fill, ps)
list(dev = dev, pages = funs@pages)
}
|
7de66270af175fadf18d844af2b6ede19209f093
|
8cb0c44a74f7a61f06d41e18ff8c222cc5f28826
|
/man/NamedResourceDTOExperimentModel.Rd
|
cadea438f9479679e9986a18b2bda62daeabd15a
|
[] |
no_license
|
OpenSILEX/opensilexClientToolsR
|
cb33ddbb69c7596d944dcf1585a840b2018ee66c
|
856a6a1d5be49437997a41587d0c87594b0c6a36
|
refs/heads/master
| 2023-05-31T14:26:19.983758
| 2022-01-26T17:51:51
| 2022-01-26T17:51:51
| 360,246,589
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 566
|
rd
|
NamedResourceDTOExperimentModel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NamedResourceDTOExperimentModel.r
\docType{data}
\name{NamedResourceDTOExperimentModel}
\alias{NamedResourceDTOExperimentModel}
\title{NamedResourceDTOExperimentModel Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
NamedResourceDTOExperimentModel
}
\description{
NamedResourceDTOExperimentModel Class
}
\section{Fields}{
\describe{
\item{\code{uri}}{}
\item{\code{name}}{}
\item{\code{rdf_type}}{}
\item{\code{rdf_type_name}}{}
}}
\keyword{datasets}
|
3f3040f94ec66b871055c3ceda1ef8de2ef58d28
|
8c6691d164054aa116a9285f1ad2e30c41c61bf5
|
/R/justify.R
|
24e238bd9069f3640ecab56407f57b9ec080bd16
|
[
"MIT"
] |
permissive
|
nacnudus/unpivotr
|
7d0ee014e96cddae82de7df60400a5914d4d098d
|
f7eb82b0e5f9ea67357402f7973a49254312d15c
|
refs/heads/main
| 2023-08-16T12:42:21.082390
| 2023-01-22T21:05:00
| 2023-01-22T21:05:00
| 66,308,149
| 179
| 21
|
NOASSERTION
| 2023-08-07T14:15:01
| 2016-08-22T21:04:24
|
R
|
UTF-8
|
R
| false
| false
| 1,368
|
r
|
justify.R
|
#' Align one set of cells with another set
#'
#' @description
#' If the header cells of a table aren't aligned to the left, right, top or
#' bottom of the data cells that they describe, then use [justify()] to re-align
#' them, using a second set of cells as a guide.
#'
#' @param header_cells Data frame of data cells with at least the columns 'row'
#' and 'column', which are `numeric` or `integer`.
#' @param corner_cells Data frame of header cells with at least the columns
#' 'row' and 'column', which are numeric/integer vectors. The same length as
#' `header_cells`.
#'
#' @name justify
#' @export
#' @examples
#' header_cells <- tibble::tibble(row = c(1L, 1L, 1L, 1L),
#' col = c(3L, 5L, 8L, 10L),
#' value = LETTERS[1:4])
#' corner_cells <- tibble::tibble(row = c(2L, 2L, 2L, 2L),
#' col = c(1L, 4L, 6L, 9L))
#' justify(header_cells, corner_cells)
justify <- function(header_cells, corner_cells) {
UseMethod("justify")
}
#' @export
justify.data.frame <- function(header_cells, corner_cells) {
stopifnot(nrow(header_cells) == nrow(corner_cells))
header_cells <- dplyr::arrange(header_cells, row, col)
corner_cells <- dplyr::arrange(corner_cells, row, col)
header_cells$row <- corner_cells$row
header_cells$col <- corner_cells$col
header_cells
}
|
855100fab2ae9cc868e7e646a96c84b32c0ad238
|
7cd817e9b83a5710e3b1d610dae7128436a13304
|
/love.R
|
8abc2614bb9ab2ae74c004e255ab909a1be69045
|
[] |
no_license
|
donlelek/Rsnippets
|
b6fa02827eda93cf8f8c309c621c065c962b7510
|
3315a34924c5034921bd0d0e585fb28d6ec4e825
|
refs/heads/master
| 2020-04-03T22:36:37.121140
| 2019-01-14T22:43:29
| 2019-01-14T22:43:29
| 24,967,402
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,665
|
r
|
love.R
|
# this will be a great t-shirt one day
# source:
# http://stackoverflow.com/questions/39870405/plotting-equations-in-r
library(ggplot2)
library(dplyr)
L <- data_frame(x = 1:100,
y = 1 / x)
O <- data_frame(t = seq(-pi, pi, l = 100),
x = 3 * cos(t),
y = 3 * sin(t))
V <- data_frame(x = -50:50,
y = abs(-2 * x))
E <- data_frame(y = seq(-pi, pi, l = 100),
x = -3 * abs(sin(y)))
pd <- bind_rows(L, O, V, E, .id = "letter") %>%
select(-t) %>%
mutate(letter = factor(letter, labels = c("y == 1/x",
"x^2 + y^2 == 9",
"y == abs(-2*x)",
"x == -3*abs(sin(y))")))
ggplot(pd, aes(x, y)) +
geom_vline(xintercept = 0, color = "white") +
geom_hline(yintercept = 0, color = "white") +
geom_path(size = 1.5, color = "red") +
facet_wrap(~letter,
scales = "free",
labeller = label_parsed,
nrow = 1,
switch = "x") +
labs(title = "ALL YOU NEED IS", caption = "...and ggplot") +
theme_minimal() +
theme(text = element_text(color = "white"),
plot.background = element_rect(fill = "black", color = "black"),
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
strip.text = element_text(color = "white", size = 50),
plot.title = element_text(face = "bold", size = 50),
plot.caption = element_text(color = "white", size = 20))
ggsave("~/Downloads/love_android.png", width = 19.20, height = 10.80, dpi = 100)
|
fc1349c4cb73229bd916ec269087db6e05897532
|
5fffd8bd76010b3bc6407069230c7acf5518d72d
|
/getandclean_question5.R
|
f2ff1ca722a47155fc84d63b42fb9a6add7082cc
|
[] |
no_license
|
cmohite/GettingAndCleaningData-Quiz2
|
8a3059c6e4a214d88425f2c2cc98ac5444a9528d
|
83c1bf091d2a70ae84540694a54574192e535ca9
|
refs/heads/master
| 2020-04-16T12:15:17.459555
| 2015-08-16T03:50:22
| 2015-08-16T03:50:22
| 40,708,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 253
|
r
|
getandclean_question5.R
|
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
download.file(fileUrl, destfile="./Fwksst8110.for")
data <- read.fwf(file="./Fwksst8110.for", widths=c(-1,9,-5,4,4,-5,4,4,-5,4,4,-5,4,4),
skip=4)
sum(data[,4])
|
2689b561f6da041d50963627dd8ce760d6ffccb0
|
3a36591cd6483b6bd36a9277e7b42af710c87b39
|
/run_analysis.r
|
7d180a0b66acd6ee31cb2915dc66810352203c5e
|
[] |
no_license
|
tmregh/Getting-and-Cleaning-Data-Project
|
457e414c7431cb374ff8f4439dab8c9bdb38dff0
|
348e13592f987e99600be9d468e2702727cf2d9f
|
refs/heads/master
| 2021-01-20T05:04:59.161282
| 2014-08-24T23:00:17
| 2014-08-24T23:00:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,332
|
r
|
run_analysis.r
|
##############
##############
##############
#Read in the necessary Samsung data
#Set the working directory to a folder containing only the following 8 Samsung datasets:
# 1.activity_labels
# 2.features
# 3.subject_train
# 4.subject_test
# 5.X_train
# 6.y_train
# 7.X_test
# 8.y_test
##############
##############
##############
setwd("your_directory_with_the_data")
input_dats <- dir()
for(i in 1:length(input_dats)){
assign(input_dats[i],read.table(dir()[i], sep=""))
}
##############
##############
##############
#Step 1
##############
##############
##############
#meaningful label for y_train/test.txt
names(y_train.txt) <- "activity_type"
names(y_test.txt) <- "activity_type"
#get training and test datasets ready to merge
#add data_ind variable to keep track of what dataset a record comes from
train <- data.frame(X_train.txt, y_train.txt,subject_train.txt[,1], data_ind="train")
test <- data.frame(X_test.txt, y_test.txt, subject_test.txt[,1], data_ind="test")
names(train)[563] <- "subject"
names(test)[563] <- "subject"
#dataset requested
train_test_data <- rbind(train, test)
##############
##############
##############
#Step 2
##############
##############
##############
#find features with "mean" or "std" in part of the name
#these variable are defined to be mean and std variables for a given measurement
mean_related_var_names <- features.txt[,2][grep("mean",tolower(features.txt[,2]))]
mean_related_var_indices <- grep("mean",tolower(features.txt[,2]))
std_related_var_names <- features.txt[,2][grep("std",tolower(features.txt[,2]))]
std_related_var_indices <- grep("std",tolower(features.txt[,2]))
#dataset requested
mean_std_data <- train_test_data[,sort(c(mean_related_var_indices,std_related_var_indices, 562,563, 564))]
##############
##############
##############
#Step 3
##############
##############
##############
#convert mean_std_data$activity_type to a factor then activity_labels.txt$V2 to be the levels of the factor
mean_std_data$activity_type <- as.factor(mean_std_data$activity_type)
levels(mean_std_data$activity_type) <- as.character(activity_labels.txt$V2)
##############
##############
##############
#Step 4
##############
##############
##############
#take mean_related_var_names from step 2 (variable names for mean of a measurement)
#take std_related_var_names from step 3 (variable names for mean of a measurement)
#along with "activity_type", "subject", and "data_ind" to be the names of the variables in mean_std_data
var_names <- c(as.character(mean_related_var_names),as.character(std_related_var_names), "activity_type", "subject", "data_ind" )
names(mean_std_data) <- var_names
##############
##############
##############
#Step 5
##############
##############
##############
#define list consisting of activity_type and subject vectors
facs <- list(activity_type=as.factor(mean_std_data$activity_type),
subject=as.factor(mean_std_data$subject)
)
#compute mean of the 86 measurements for each activity_type and subject
tidy_data <- aggregate(mean_std_data[,1:86],facs, function(x){mean(x,na.rm=T)})
##############
##############
##############
#output the dataset
##############
##############
##############
#write.table(tidy_data, "tidy_data.txt",row.names=F)
|
166344e616ed0019632d54d2c900bc6318614f5a
|
efffa5e84284216ee1a4a3b633948eb80121c596
|
/R/runMLMethod.R
|
79e88693bfdf3320d1fca4734c434dacffc9f611
|
[] |
no_license
|
chethanjjj/LireNLPSystem
|
f178aa3969a179601cb5262db6d46ec7f9fab22f
|
408da4cdf8907ed0e2a0fdc650967bf31c9181f3
|
refs/heads/master
| 2021-06-20T22:09:43.098365
| 2021-06-19T14:07:07
| 2021-06-19T14:07:07
| 318,691,894
| 0
| 1
| null | 2020-12-05T03:22:24
| 2020-12-05T03:22:24
| null |
UTF-8
|
R
| false
| false
| 9,516
|
r
|
runMLMethod.R
|
#' This function elastic net logistic regression for a given finding and feature matrix
#' @param finding string indicating finding of interest
#' @param featureMatrix feature matrix
#' @param outcome dataframe indicating the labels for each report
#' @param trainID vector indicating the imageid's for the training data
#' @param testID vector indicating the imageid's for the test data
#' @param metric string indicating which metric to maximize (eg. "auc" or "fscore")
#' @param mlmethod string indicating the ml method (eg. "glmnet")
#' @param myControl caret myControl hyperparameters
#' @param outpath string indicating outpath to write results to to
#' @keywords runMLMethod
#' @import caret
#' @import lattice
#' @export
#' @return a list of 3 dataframes: metrics (performance metrics for train and test), predictions (predictions on train and test), model's features and coefficients
#' @examples
#' runMLMethod(finding = "fracture",
#' featureMatrix = featureMatrix,
#' outcome = outcome.df,
#' trainID = c(1,2,3),
#' testID = c(4,5,6),
#' metric = "auc",
#' mlmethod = "glmnet",
#' outpath = "Results/")
runMLMethod = function(finding,
featureMatrix,
outcome,
trainID,
testID,
metric,
mlmethod,
myControl,
outpath) {
### SET UP DATASETS
#### Match imageid ordering of both feature matrix and outcome dataframe
featureMatrix = featureMatrix[order(match(featureMatrix$imageid, outcome$imageid)), ]
outcome = outcome[order(match(featureMatrix$imageid, outcome$imageid)), ]
#### REMOVE REGEX AND NEGEX FOR NON-FINDINGS OF INTEREST
colsRemove = names(featureMatrix)[which(grepl(paste("(?<!",finding,")_(r|n)egex", sep="")
, names(featureMatrix), perl = TRUE))]
##### the only identifier is imageid
if (length(colsRemove) > 0) {
X = featureMatrix[ , -which(names(featureMatrix) %in% colsRemove)]
} else {
X = featureMatrix
}
Y = outcome %>%
dplyr::select(c("imageid", "siteID", "imageTypeID", finding)) %>%
dplyr::mutate(trueClass = factor(make.names(get(finding))))
##### caret function needs "X0" and "X1" as levels (corresponding to 0 and 1)
CONTROL_LEVEL = levels(Y$trueClass)[1]
CASE_LEVEL = levels(Y$trueClass)[2]
### PERFORM TRAINING
trainModel = train(x = X %>%
dplyr::filter(imageid %in% trainID) %>%
dplyr::select(-imageid) %>%
as.matrix(.),
y = Y %>%
dplyr::filter(imageid %in% trainID) %>%
dplyr::select(trueClass) %>%
unlist(.),
method = mlmethod,
metric = ifelse(metric == "auc","ROC", metric), # Loss function to optimize on
maximize = TRUE,
trControl = myControl,
tuneLength = 10
)
print(trainModel)
#### PREDICTION ON TRAINING DATASET
trainOutput = data.frame(imageid = Y$imageid[trainModel$pred[,"rowIndex"]],
siteID = Y$siteID[trainModel$pred[,"rowIndex"]],
imageTypeID = Y$imageTypeID[trainModel$pred[,"rowIndex"]],
trueClass = trainModel$pred[,"obs"],
predProb = trainModel$pred[,CASE_LEVEL],
fold = "train"
)
trainROC = prediction(predictions = trainOutput$predProb,
labels = trainOutput$trueClass,
label.ordering = c(CONTROL_LEVEL, CASE_LEVEL))
#### PLOT ROC AND PRECISION-RECALL CURVES
case = trainOutput$predProb[trainOutput$trueClass == "X1"]
control = trainOutput$predProb[trainOutput$trueClass == "X0"]
##### ROC
# roc = roc.curve(scores.class0 = case, scores.class1 = control, curve = T)
# try(png(paste0(outpath,"/trainROC.png")))
# try(plot(roc))
# try(dev.off())
##### Precision-Recall
# try(png(paste0(outpath, "/trainPrecisionRecall.png")))
# pr = pr.curve(scores.class0 = case, scores.class1 = control, curve = T)
# try(plot(pr))
# try(dev.off())
#### GET OPTIMAL CUT-OFF
if(metric == "f"){ # For F1, find cut off that gives highest F1 score
perfObject = performance(trainROC,
measure = metric,
x.measure = "cutoff")
k_optimal = perfObject@"x.values"[[1]][which.max(perfObject@"y.values"[[1]])]
}
if(metric == "auc"){ # For AUC, find cut off that gives topleft corner of ROC curve
perfObject = performance(trainROC,
measure = "tpr",
x.measure = "fpr")
# false positive rate
x = perfObject@"x.values"[[1]]
# true positive rate
y = perfObject@"y.values"[[1]]
# identify probability cut that leads to the topleft corner of the ROC curve
d = (x - 0)^2 + (y - 1)^2
ind = which(d == min(d))
k_optimal = perfObject@"alpha.values"[[1]][ind]
}
#### add the prediction based on the optimal cut, anything above the cut is case and anything
trainOutput$predClass = ifelse(trainOutput$predProb >= k_optimal,
CASE_LEVEL,
CONTROL_LEVEL)
### PERFORM TESTING
testOutput = data.frame(imageid = Y$imageid[which(Y$imageid %in% testID)],
siteID = Y$siteID[which(Y$imageid %in% testID)],
imageTypeID = Y$imageTypeID[which(Y$imageid %in% testID)],
trueClass = Y %>%
dplyr::filter(imageid %in% testID) %>%
dplyr::select(trueClass) %>%
unlist(.),
predProb = predict(trainModel,
newdata = X %>%
dplyr::filter(imageid %in% testID) %>%
dplyr::select(-imageid) %>%
as.matrix(.),
type = "prob")[,levels(Y$trueClass)[2]],
fold = "test"
)
testOutput$predClass = ifelse(testOutput$predProb >= k_optimal,
CASE_LEVEL,
CONTROL_LEVEL)
testROC = prediction(predictions=testOutput$predProb,
labels=testOutput$trueClass,
label.ordering = c(CONTROL_LEVEL,CASE_LEVEL))
#### PLOT ROC AND PRECISION-RECALL CURVES
case = testOutput$predProb[testOutput$trueClass == "X1"]
control = testOutput$predProb[testOutput$trueClass == "X0"]
##### ROC
# roc = roc.curve(scores.class0 = case, scores.class1 = control, curve = T)
# try(png(paste0(outpath,"/testROC.png")))
#try(plot(roc))
# try(dev.off())
##### Precision-Recall
# try(png(paste0(outpath, "/testPrecisionRecall.png")))
# pr = pr.curve(scores.class0 = case, scores.class1 = control, curve = T)
# try(plot(pr))
# try(dev.off())
### EVALUATION METRICS
trainMetric = CalcMetrics(test = trainOutput$predClass,
truth = trainOutput$trueClass,
N = length(trainID),
control = CONTROL_LEVEL,
case = CASE_LEVEL)
testMetric = CalcMetrics(test=testOutput$predClass,
truth=testOutput$trueClass,
N=length(testID),
control=CONTROL_LEVEL,
case=CASE_LEVEL)
metrics = rbind(train = c(trainMetric[-length(trainMetric)], auc=as.numeric(performance(trainROC,"auc")@y.values)),
test = c(testMetric[-length(testMetric)], auc=as.numeric(performance(testROC,"auc")@y.values))
)
metrics = as.data.frame(metrics)
metrics$Finding = finding
metrics$optim = metric # May be auc or f1
metrics$N = c(length(trainID), length(testID))
metrics$Partition <- rownames(metrics) # either "train" or "test" depending on which partition subject is in
### GET FEATURES
### model predictors/features
myFeat = coef(trainModel$finalModel, trainModel$bestTune$lambda)
myFeat = as.data.frame(as.matrix(myFeat))
# the last row is the optimal cut identified from the training data
myFeat = data.frame(predictors = c(row.names(myFeat),"cutoff"),
coef = c(myFeat[,1],k_optimal))
myFeat = myFeat[myFeat$coef != 0,]
myFeat$predictors = gsub("\\(|\\)","", myFeat$predictors, perl = TRUE) %>%
gsub("IMPRESSION", "IMP", .)
myFeat = myFeat %>% dplyr::arrange(desc(abs(coef)))
myFeat = rbind(myFeat %>% dplyr::filter(predictors != "cutoff"), myFeat %>% dplyr::filter(predictors == "cutoff"))
# output results
write.csv(metrics, paste(outpath, "metrics.csv", sep = "/"), row.names = FALSE)
write.csv(data.frame(rbind(trainOutput, testOutput)), paste(outpath, "prediction.csv", sep = "/"), row.names = FALSE)
write.csv(myFeat, paste(outpath, "features.csv", sep = "/"), row.names = FALSE)
return(list(metrics = metrics,
predictions = data.frame(rbind(trainOutput, testOutput)),
features = myFeat))
}
|
163c1f62fae5c24b371daef2fca73d8592fe3c11
|
335eb7d0a695b9a7bc5e1f5ed9dc676272776703
|
/man/objective.keepSKL.Rd
|
f86e26aaa12a3e3807fb470637e8ffdc95955faa
|
[] |
no_license
|
ayazagan/amalgam
|
750298c89709814696642304b52b24d0fab9b4a7
|
00ca3804f031cc67ff104ce35c8522f82b444ec9
|
refs/heads/master
| 2022-04-16T11:59:22.642445
| 2020-02-25T03:40:22
| 2020-02-25T03:40:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 548
|
rd
|
objective.keepSKL.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/8-entropy.R
\name{objective.keepSKL}
\alias{objective.keepSKL}
\title{Calculate Fitness of Binary Vector}
\usage{
objective.keepSKL(codon, ARGS)
}
\arguments{
\item{codon}{A binary vector.}
\item{ARGS}{Handled by \code{\link{prepareArgs}}.}
}
\description{
This objective function seeks to maximize the correlation
between the (symmetric) Kullback-Leibler divergence of the
complete composition and the (symmetric) Kullback-Leibler
divergence of the amalgamation.
}
|
8c9e60094c6471f1e8f3f333e2e27904db2bb303
|
b8fa00b408af080b5e25363c7fdde05e5d869be1
|
/Project_0867117/Project_11.r
|
ea09b2124a5cb695d379345aad1ec6d5e2679b32
|
[] |
no_license
|
anurag199/r-studio
|
bc89f0c18a8d44164cb4ede8df79321ea965bc77
|
e42909505fbe709f476081be97f89cc945a2745d
|
refs/heads/master
| 2020-04-26T23:04:25.061780
| 2019-03-05T06:56:26
| 2019-03-05T06:56:26
| 173,891,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 238
|
r
|
Project_11.r
|
library("ggplot2")
library("plyr")
# plotting graph for birth_sort (Ascending order birth_data)
ggplot(birth_sort,aes(x=births, y=year, color=factor(month))) + geom_point() +
geom_smooth(method = 'loess') + ggtitle("births vs year")
|
2e55f763cf697036dcc79a253fa4f388d862fccc
|
37f2792ebd7f95a652cdb5f0256a63a42262fa62
|
/lesson3_Shinydashboard/app1/flexdashboard_script.R
|
f65e70acd9d04cb0f2777219e35e4de5a0b68e57
|
[] |
no_license
|
lili013/shiny_dashboard_tutorial
|
ccbbc2258a1b85e9686da2663e2e8276729afacc
|
22a6aa8c94b1cc99f7ead93384e3dd157961cf95
|
refs/heads/master
| 2022-12-11T01:41:44.639339
| 2020-09-18T22:31:51
| 2020-09-18T22:31:51
| 296,443,579
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,021
|
r
|
flexdashboard_script.R
|
library(ggplot2)
library(dplyr)
# Shiny economics module
# UI function
economicUI <- fluidPage(
titlePanel("Widget"),
fillCol(height = 400, flex = c(1,1),
inputPanel(
selectInput(inputId = "variable",
label = "Select a column to display",
choices = colnames(economics)),
numericInput(inputId ="jobless",
label="Number of unemployed >=",
value=6000,min=6000, max=max(economics$unemploy),step=1000)
),
plotOutput("econoplot",height = "150%")
#leafletOutput()
#dataTableOutput()
)
)
# Server function
economic<- function(input,output,session) {
dataset <- reactive({filter(economics,unemploy>=input$jobless)})
output$econoplot <- renderPlot({
ggplot(dataset(),aes_string(x=input$variable))+geom_histogram()})
}
shinyApp(ui=economicUI,server=economic)
|
f4b8b9d46bd7c8964955f0d976d8683e8fb65722
|
dd3a4cebe8b03dfce00c930c5cc2b844fbb1c015
|
/R code/plots.R
|
add7e6197a078c74c1088403f2aef7d6d2023409
|
[] |
no_license
|
DiabbZegpi/ggsave
|
c043ed9149280f12194f3a8fbcc26b8cbd1e9c8c
|
efd4bd3a598a79e72d6552ab986520b5adc6cec7
|
refs/heads/master
| 2022-04-17T09:07:10.023992
| 2020-04-19T22:58:06
| 2020-04-19T22:58:06
| 256,554,935
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 876
|
r
|
plots.R
|
library(tidyverse)
library(patchwork)
library(ggforce)
data(iris)
theme_set(theme_ligth())
descrp <- "The smallest specie among virginica and versicolor"
p <-
ggplot(iris,
aes(x = Petal.Width, y = Petal.Length)) +
geom_point() +
geom_mark_ellipse(show.legend = FALSE,
aes(fill = Species,
filter = Species == "setosa",
label = "Iris setosa",
description = descrp)) +
labs(title = "Iris on default Windows PNG engine",
x = "Petal width (cm)", y = "Petal length (cm)")
ggsave(filename = "images/iris_default.png", plot = p,
height = 5, width = 7, units = "in", dpi = 500)
q <- p + labs(title = "Now with Cairo's PNG engine")
ggsave(filename = "images/iris_cairo.png", plot = q,
height = 5, width = 7, units = "in", dpi = 500, type = "cairo")
|
b5abb9f2e131e73857352443e7a2e5a44e192d14
|
86cc55c6a11ac25a08bf7cf05e93a7620440560a
|
/R_code_point_pattern_analysis.r
|
6b513d87f5fa455ee52d6a30e7cb77e624089a65
|
[] |
no_license
|
AmandaVecchi/Monitoring-Unibo
|
eebb07edce5b6a7f25cd39b2292ce4017e98ea95
|
32581e557aef3f5ba55b4ccc7b3cf8363a396754
|
refs/heads/master
| 2021-09-08T16:52:20.380310
| 2021-09-08T16:33:20
| 2021-09-08T16:33:20
| 250,028,031
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,544
|
r
|
R_code_point_pattern_analysis.r
|
## Point pattern analysis: density maps
install.packages("spatstat")
library(spatstat)
#attach our dataset, the one we are going to use
attach(covid)
#what are the coordinates of the datases and what are the extensions of these cooridnates
head(covid)
covids <- ppp(lon, lat, c(-180,180), c(-90,90)) #ppp= planar point pattern, specify the variables and the range
#density map
d <- density(covids)
#show density map
plot(d)
#put points on the density map
points(covids)
setwd("C:/LAB/")
load(".RData")
ls()
#covids: point pattern
#d = density map
library(spatstat)
plot(d)
points(covids)
install.packages("rgdal")
library(rgdal)
# let's input vector ines (x0y0, x1y1, x2y2..) #download coastline file from IOL and put it in older LAB
coastline <- readOGR("ne_10m_coastline.shp")
plot(coastline, add=T) #adds coastline to previous plot of covids
# change of the colour, we are making ramp palette c=array of colours (100): all the possible colours from yellow to red
cl <- colorRampPalette(c("yellow","orange","red"))(100)
plot(d, col=cl)
points(covids)
plot(coastlines, add=T)
# Exercise: new colour ramp palette
clr <- colorRampPalette(c("light green","yellow","orange","violet"))(100)
plot(d, col=clr, main="densitites of covid-19")
points(covids)
plot(coastlines, add=T)
# export as pdf or png("covid_density.png")
pdf("covid_density.pdf")
clr <- colorRampPalette(c("light green","yellow","orange","violet"))(100)
plot(d, col=clr, main="densitites of covid-19")
points(covids)
plot(coastlines, add=T)
dev.off()
|
cb6e53f7490c9854580de7d43d0e8b4af448632b
|
6ed58d0c61899aeb5e4870621dc7412b3eaa9d6f
|
/GettingCleaningData/Semana3/mergingData.R
|
4173384b9e32958400c8434bfdc1c89d44bc1b7d
|
[] |
no_license
|
jspaz/DataScience
|
3be2c9497bf11af41168acdef83c764188cf68e2
|
b8bd27c4cc967c4127ef421585864f0f9de17b68
|
refs/heads/master
| 2020-04-06T06:19:28.585471
| 2017-12-25T03:58:48
| 2017-12-25T03:58:48
| 55,121,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,431
|
r
|
mergingData.R
|
# Descargar archivo
library(bitops)
library(RCurl)
if(!file.exists("./data")){dir.create("./data")}
fileUrl1 = "https://dl.dropboxusercontent.com/u/7710864/data/reviews-apr29.csv"
fileUrl2 = "https://dl.dropboxusercontent.com/u/7710864/data/solutions-apr29.csv"
download.file(fileUrl1, destfile = "./data/reviews.csv", method = "auto")
download.file(fileUrl2, destfile = "./data/solutions.csv", method = "auto")
reviews = read.csv("./data/reviews.csv"); solutions <- read.csv("./data/solutions.csv")
head(reviews, 2)
head(solutions, 2)
# Combinando datos, con parámetros x,y,by,by.x,by.y,all
names(reviews)
names(solutions)
mergedData = merge(reviews, solutions, by.x = "solution_id", by.y = "id", all = TRUE) # con all, si existen columnas que no aparecen en otro se magregan pero sin mistrar los valores NA
head(mergedData)
# Combinar los valores de las variables con nombre común
intersect(names(solutions), names(reviews))
mergedData2 = merge(reviews, solutions, all = TRUE)
head(mergedData2)
# Usar un join con plyr
library(plyr)
df1 = data.frame(id = sample(1:10), x = rnorm(10))
df2 = data.frame(id = sample(1:10), y = rnorm(10))
arrange(join(df1, df2), id) # Hace el join de forma ordenada
# Join con multiples data frames
df1 = data.frame(id = sample(1:10), x = rnorm(10))
df2 = data.frame(id = sample(1:10), y = rnorm(10))
df3 = data.frame(id = sample(1:10), z = rnorm(10))
dfList = list(df1, df2, df3)
join_all(dfList)
|
3d16766952518790d947f661084b5cdfe3374b23
|
33083fb27268d8e1198697526c7bcd13a1bf3e41
|
/shiny_app/demoGraphDistance/tests/testthat/test_isNonBlankSingleString.R
|
6043fc701cae4d16be32e1faa48cd9256edf3246
|
[
"MIT"
] |
permissive
|
johndrummond/demo_shiny_graph_distance
|
5fedc9e4eb40e40867d55ff80bb48115c196d36b
|
49dd94a2d10be6a4543a276576bedc06db0e6145
|
refs/heads/master
| 2020-06-16T10:36:45.808336
| 2019-07-10T10:44:15
| 2019-07-10T10:44:15
| 195,542,773
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 891
|
r
|
test_isNonBlankSingleString.R
|
context("Util functions blank string")
library(demoGraphDistance)
test_that("isNonBlankSingleString false for Non character values", {
expect_equal(isNonBlankSingleString(NA), FALSE)
expect_equal(isNonBlankSingleString(NULL), FALSE)
expect_equal(isNonBlankSingleString(1), FALSE)
expect_equal(isNonBlankSingleString(""), FALSE)
expect_equal(isNonBlankSingleString(list("ab")), FALSE)
})
test_that("isNonBlankSingleString false for character arrays length > 1", {
expect_equal(isNonBlankSingleString(c("","a")), FALSE)
expect_equal(isNonBlankSingleString(c("ab","a")), FALSE)
})
test_that("isNonBlankSingleString true for single strings", {
expect_equal(isNonBlankSingleString("a"), TRUE)
expect_equal(isNonBlankSingleString("ab"), TRUE)
expect_equal(isNonBlankSingleString("ab v"), TRUE)
expect_equal(isNonBlankSingleString("ab \nv"), TRUE)
})
|
ed9688b87bcef3970f7f8901aba58f562f67202f
|
ec6dca004307b39d5629c5dc153d60a16e10c824
|
/NaiveBayes.R
|
f0943a9b914f4ae5ee9173f7962bcd125b249da2
|
[] |
no_license
|
ragnar-lothbrok/data-analytics
|
df6e9b912ac180db6d188afa4f6d5a63d71b738d
|
13a1c3c4824836d80b1f5a4dd28cd025f53e778c
|
refs/heads/master
| 2020-05-21T16:44:47.255629
| 2017-02-12T10:14:58
| 2017-02-12T10:14:58
| 65,620,376
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,568
|
r
|
NaiveBayes.R
|
#NaiveBayes
install.packages("e1071")
library(e1071)
CTR_SD_Data <- read.csv("/home/raghunandangupta/Downloads/splits/splitaa")
#View(CTR_SD_Data)
#Split the data in training and test data
rows = seq(from=1,to=nrow(CTR_SD_Data), by = 1)
head(rows)
train_rows = sample(x=rows, size=(0.7 * nrow(CTR_SD_Data))) #selecting 70% random sample no of row no as training data
head(train_rows)
TrainData = CTR_SD_Data[train_rows,] #Getting training data i.e. selecting all rows that we had randomly selected from rows
TestData = CTR_SD_Data[-train_rows,] #Getting TEST data i.e. all rows not mentioned in train rows
#Building model on train data
nblassifier<-naiveBayes(TrainData[,-2], factor(TrainData[,2]))
table(predict(nblassifier, TrainData[,-2]), factor(TrainData[,2]))
#Testing on test data
#table(predict(nblassifier, TestData[,-2]), factor(TestData[,2]))
#Calculate accuracy, precision, recall
confusionMatrix = table(predict(nblassifier, TestData[,-2]), factor(TestData[,2])); confusionMatrix
accuracry = sum(diag(confusionMatrix))/sum(confusionMatrix) * 100; accuracry
#Precision : proportion of predicted positive test cases which is true
precision = confusionMatrix[2,2] / (confusionMatrix[2,2] + confusionMatrix[1,2]); precision
#Recall : proportion of predicted positive test cases / actual postive test cases
recall = confusionMatrix[2,2] / (confusionMatrix[2,2] + confusionMatrix[2,1]); recall
#False positive rate : predicted +ve said amongst actual negative test case
fpr = confusionMatrix[1,2] / (confusionMatrix[1,1] + confusionMatrix[1,2]); fpr
|
2eaf2f3d735539150ebefd7ded2efbaf107a62f5
|
f057a79b8c1cb6ea00ec6ff1c06852b2d660537a
|
/man/sp2df.Rd
|
bdaf81c2ea4f73148d96632dca81d68711b4b53b
|
[] |
no_license
|
kcucchi/myUtils
|
985bed13a1378e4ee5bdbf97664c098ad4e1440f
|
14418d7acf4f0c77643b1f3391180871f07b9a04
|
refs/heads/master
| 2021-04-12T10:05:45.741265
| 2018-08-08T22:35:06
| 2018-08-08T22:35:06
| 126,282,738
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 423
|
rd
|
sp2df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_utils.R
\name{sp2df}
\alias{sp2df}
\title{Transforms a sp object into a dataframe to plot into ggplot}
\usage{
sp2df(x_sp)
}
\arguments{
\item{x_sp}{the sp object to transform into dataframe}
}
\value{
the corresponding dataframe to be plotted using geom_polygon
}
\description{
Transforms a sp object into a dataframe to plot into ggplot
}
|
6a4df4dfda694143909cbb7dd6703e42ab3945a9
|
d4bbec7817b1704c40de6aca499625cf9fa2cb04
|
/src/lib/distributions/signrank/__test__/fixture-generation/qsign1a.R
|
a160108ee5379367167705c91ae73835f0423b11
|
[
"MIT"
] |
permissive
|
R-js/libRmath.js
|
ac9f21c0a255271814bdc161b378aa07d14b2736
|
9462e581da4968938bf4bcea2c716eb372016450
|
refs/heads/main
| 2023-07-24T15:00:08.372576
| 2023-07-16T16:59:32
| 2023-07-16T16:59:32
| 79,675,609
| 108
| 15
|
MIT
| 2023-02-08T15:23:17
| 2017-01-21T22:01:44
|
TypeScript
|
UTF-8
|
R
| false
| false
| 470
|
r
|
qsign1a.R
|
# W+ values that approx map to probabilities = [0,1]
# W+
1 0
2 258
3 280
4 294
5 305
6 314
7 322
8 329
9 335
10 341
11 347
12 352
13 357
14 362
15 366
16 371
17 375
18 379
19 383
20 387
21 391
22 395
23 399
24 402
25 406
26 410
27 414
28 418
29 421
30 425
31 429
32 433
33 437
34 441
35 445
36 449
37 454
38 458
39 463
40 468
41 473
42 479
43 485
44 491
45 498
46 506
47 515
48 526
49 540
50 562
51 820
|
0a50751d7dd0ee6c1de36507af88838ae4b68166
|
6590eea4dd7a55c10b29a4a8c3286cb7023100a3
|
/cachematrix.R
|
4a75ba1588d968f6604e9ec21f8a4b2da0ad05bf
|
[] |
no_license
|
Swisshenri/ProgrammingAssignment2
|
8ccf87c6d14ee79cb88eef4f02c526133dfe3348
|
83c44220bb36bb5c18e9984db0ec094b66e35aa4
|
refs/heads/master
| 2020-05-30T08:28:36.058170
| 2019-06-01T18:49:00
| 2019-06-01T18:49:00
| 189,622,836
| 0
| 0
| null | 2019-05-31T16:06:32
| 2019-05-31T16:06:31
| null |
UTF-8
|
R
| false
| false
| 1,134
|
r
|
cachematrix.R
|
## This function allows for creating a matrix
## to use it with another function cacheSolve that will invert
## the matrix / uses "<<-" for global variable (to do cache)
makeCacheMatrix <- function(x = matrix()) {
minv <- NULL
set <- function(y) {
x <<- y
minv <<- NULL
}
get <- function() x
setinv <- function(solvMtx) minv <<- solvMtx
getinv <- function() minv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## The function cacheSolve computes de Inverse of a matrix and
## tests if the matrix has already been inverted to use the
## cached data
cacheSolve <- function(x, ...) {
## x$get stores the input matrix, solve(data) inverts it
## the if statement test "minv" variable if full matrix
## (remember "<<-" assgnmt from makeCacheMstrix)
minv <- x$getinv()
if(!is.null(minv)){
message("getting cached data")
return(minv)
}
data <- x$get()
minv <- solve(data)
x$setinv(minv)
minv
}
|
d413b830f97653af3abf8381d057e04cfad54701
|
9d3e3c3950c4101bc863a90e69606d7c7d03a4e9
|
/chilling/04_make_figures/frost_bloom/crossOvers/post_processing_1_corssovers.R
|
c8baefadabb4b7c4e38f0959d9a665b16d9f97c0
|
[
"MIT"
] |
permissive
|
HNoorazar/Ag
|
ca6eb5a72ac7ea74e4fe982e70e148d5ad6c6fee
|
24fea71e9740de7eb01782fa102ad79491257b58
|
refs/heads/main
| 2023-09-03T18:14:12.241300
| 2023-08-23T00:03:40
| 2023-08-23T00:03:40
| 146,382,473
| 3
| 6
| null | 2019-09-23T16:45:37
| 2018-08-28T02:44:37
|
R
|
UTF-8
|
R
| false
| false
| 4,212
|
r
|
post_processing_1_corssovers.R
|
rm(list=ls())
library(data.table)
library(dplyr)
options(digits=9)
options(digit=9)
source_path_1 = "/Users/hn/Documents/GitHub/Ag/chilling/chill_core.R"
source_path_2 = "/Users/hn/Documents/GitHub/Ag/chilling/chill_plot_core.R"
source(source_path_1)
source(source_path_2)
#############################################
data_dir <- "/Users/hn/Desktop/Desktop/Ag/check_point/"
data_dir <- paste0(data_dir, "chilling/frost_bloom/Feb/")
#############################################
six_models <- c("BNU-ESM", "CanESM2", "GFDL-ESM2G",
"bcc-csm1-1-m", "CNRM-CM5", "GFDL-ESM2M")
emissions <- c("RCP 4.5", "RCP 8.5")
cities <- c("Hood River", "Walla Walla", "Richland",
"Yakima", "Wenatchee", "Omak")
apple_types <- c("Cripps Pink", "Gala", "Red Deli") #
fruit_types <- c("apple", "Cherry", "Pear")
thresholds <- c("threshold: 70", "threshold: 75")
time_window <- c("2070_2094")
vect <- c("city", "model", "emission", "thresh",
"time_window", "fruit_type")
fruit <- fruit_types[1]
for (fruit in fruit_types){
full_cripps <- CJ(cities, six_models, emissions, thresholds, time_window, c(paste0(fruit, ": Cripps Pink")))
full_gala <- CJ(cities, six_models, emissions, thresholds, time_window, c(paste0(fruit, ": Gala")))
full_red <- CJ(cities, six_models, emissions, thresholds, time_window, c(paste0(fruit, ": Red Deli")))
setnames(full_cripps, old=paste0("V", 1:6), new=vect)
setnames(full_gala, old=paste0("V", 1:6), new=vect)
setnames(full_red, old=paste0("V", 1:6), new=vect)
all_data <- read.csv(paste0(data_dir, fruit, "_crossovers.csv"),
as.is=TRUE)
cripps <- all_data %>%
filter(fruit_type == paste0(fruit, ": Cripps Pink")) %>%
data.table()
gala <- all_data %>%
filter(fruit_type == paste0(fruit, ": Gala")) %>%
data.table()
red <- all_data %>%
filter(fruit_type == paste0(fruit, ": Red Deli")) %>%
data.table()
cripps <- merge(full_cripps, cripps, all.x=TRUE)
gala <- merge(full_gala, gala, all.x=TRUE)
red <- merge(full_red, red, all.x=TRUE)
cripps[is.na(cripps)] <- 0
gala[is.na(gala)] <- 0
red[is.na(red)] <- 0
cripps <- cripps[order(city, model, emission, thresh, fruit_type), ]
gala <- gala[order(city, model, emission, thresh, fruit_type), ]
red <- red[order(city, model, emission, thresh, fruit_type), ]
cripps_70_85 <- cripps %>% filter(thresh=="threshold: 70" & emission=="RCP 8.5")
cripps_70_45 <- cripps %>% filter(thresh=="threshold: 70" & emission=="RCP 4.5")
cripps_70 <- rbind(cripps_70_85, cripps_70_45)
cripps_75_85 <- cripps %>% filter(thresh=="threshold: 75" & emission=="RCP 8.5")
cripps_75_45 <- cripps %>% filter(thresh=="threshold: 75" & emission=="RCP 4.5")
cripps_75 <- rbind(cripps_75_85, cripps_75_45)
cripps <- rbind(cripps_75, cripps_70)
red_70_85 <- red %>% filter(thresh=="threshold: 70" & emission=="RCP 8.5")
red_70_45 <- red %>% filter(thresh=="threshold: 70" & emission=="RCP 4.5")
red_70 <- rbind(red_70_85, red_70_45)
red_75_85 <- red %>% filter(thresh=="threshold: 75" & emission=="RCP 8.5")
red_75_45 <- red %>% filter(thresh=="threshold: 75" & emission=="RCP 4.5")
red_75 <- rbind(red_75_85, red_75_45)
red <- rbind(red_75, red_70)
gala_70_85 <- gala %>% filter(thresh=="threshold: 70" & emission=="RCP 8.5")
gala_70_45 <- gala %>% filter(thresh=="threshold: 70" & emission=="RCP 4.5")
gala_70 <- rbind(gala_70_85, gala_70_45)
gala_75_85 <- gala %>% filter(thresh=="threshold: 75" & emission=="RCP 8.5")
gala_75_45 <- gala %>% filter(thresh=="threshold: 75" & emission=="RCP 4.5")
gala_75 <- rbind(gala_75_85, gala_75_45)
gala <- rbind(gala_75, gala_70)
write.table(cripps,
file = paste0(data_dir, fruit, "_cripps_crossovers.csv"),
row.names=FALSE, na="", col.names=TRUE, sep=",")
write.table(gala,
file = paste0(data_dir, fruit, "_gala_crossovers.csv"),
row.names=FALSE, na="", col.names=TRUE, sep=",")
write.table(red,
file = paste0(data_dir, fruit, "_red_crossovers.csv"),
row.names=FALSE, na="", col.names=TRUE, sep=",")
}
|
4a960ceea54ee6d4eb22e8ba1eb082ca9ec2bc5c
|
b8bb5f8022c0def70493d747648342dd43d5000f
|
/man/detBiallHitType.Rd
|
84cd49b6acf295f420a854a5bf7b4c112dd9c3dd
|
[] |
no_license
|
UMCUGenetics/hmfGeneAnnotation
|
a76321e2d01efcbd19467621fb4e5f4977c6c23f
|
67227c56232184de9b673cdcc814e9884e454943
|
refs/heads/master
| 2021-07-25T22:16:07.883012
| 2020-05-04T10:47:56
| 2020-05-04T10:47:56
| 167,984,352
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 484
|
rd
|
detBiallHitType.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postProcessDiplotypes.R
\name{detBiallHitType}
\alias{detBiallHitType}
\title{Determine biallelic hit type}
\usage{
detBiallHitType(diplotypes, min.a1.score = 1, min.a2.score = 1)
}
\arguments{
\item{diplotypes}{Diplotypes dataframe}
\item{min.a1.score}{Min score to assess hit type}
\item{min.a2.score}{See above}
}
\value{
A character vector of hit types
}
\description{
Determine biallelic hit type
}
|
2d54d39a08f8e1ffd4c2aebd64e8ff2633ed2a74
|
bb2f9a5339badd495d41334c8df70bf5c3b29a28
|
/R/epicurve.R
|
f4ed6899101537bedfbc5cf56d4780180cd9df71
|
[] |
no_license
|
thlytras/Rivets
|
7eeee206e49e462cf1a7eb06b3b372b7bddc29e7
|
d438d5f5c8c01b5ebd09be9d17e3700eaa14e3a6
|
refs/heads/master
| 2023-07-22T04:25:04.911329
| 2023-07-13T11:20:33
| 2023-07-13T11:20:33
| 95,921,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,832
|
r
|
epicurve.R
|
#' Draw a classic style epidemic curve (with rectangles)
#'
#' epikml() takes a set of coordinates and other associated info as input, and creates
#' a KML (Keyhole Markup Language) file that can be opened with Google Earth or other
#' similar programs. It's original intention was to plot disease cases, but can find wider
#' use as well.
#'
#' @param x Data vector of integers or 'Date' objects
#' @param series Factor of equal length as x, used to group cases and color them separately
#' @param col Color, or vector of colors of length equal to the levels of \code{series}.
#' @param xlim Limits for the x axis. Defaults to the range of \code{x},
#' plus/minus \code{xmargin}
#' @param ymax Maximum for the y axis. Leaving \code{NA} (the default) will calculate it
#' automatically. Specify it manually if needed (for example, to create a slideshow of
#' epicurves to be shown in succession).
#' @param xlab Title for x axis
#' @param ylab Title for y axis
#' @param xaxt x axis type. Specifying \code{xaxt="n"} suppresses plotting of the x axis.
#' You can then draw it explicitly with \code{link{axis}}.
#' @param yaxt y axis type. Specifying \code{yaxt="n"} suppresses plotting of the y axis.
#' You can then draw it explicitly with \code{link{axis}}.
#' @param box If FALSE, rectangles are drawn with the appropriate aspect ratio to fit
#' the range of both axes. If TRUE, the function adjusts the length of the y axis
#' so that squares are drawn
#' @param xmargin When drawing the x axis and the limits are not explicitly set, extend
#' the range of x by this amount and use that as the limits of the x axis.
#'
#' @details Scales are NOT drawn for either axis. Use \code{\link{axis}} to draw them
#' explicitly to your liking.
#'
#' @return Nothing. The function plots the epidemic curve on the current graphics device.
#'
#' @examples
#' # Create some dummy data
#' sampdates <- seq(as.Date("2016-07-15"), as.Date("2016-09-15"), 1)
#' x <- sample(sampdates, 120, rep=TRUE)
#' gender <- sample(c("Male","Female"), 120, rep=TRUE)
#'
#' # Draw the epidemic curve
#' epicurve(x, gender, c("skyblue","pink"))
#'
#' @export
epicurve <- function(x, series=NA, col="green",
xlim=range(x, na.rm=TRUE)+xmargin*c(-1,1), ymax=NA, xlab=NA, ylab=NA,
xaxt="s", yaxt="s", box=FALSE, xmargin = 2) {
if (length(series)>1) {
series <- factor(series)
data <- table(factor(as.integer(x), levels=xlim[1]:xlim[2]), series)
} else {
data <- as.matrix(table(factor(as.integer(x), levels=xlim[1]:xlim[2])))
}
ratio <- ((par("plt")[2]-par("plt")[1])*par("fin")[1])/((par("plt")[4]-par("plt")[3])*par("fin")[2])
if (is.na(ymax)) {
ylimit <- ifelse(box,
(as.integer(diff(xlim)) + 1.5)/ratio + 0.5,
ceiling((max(rowSums(data))+2)/5)*5 + 0.5
)
} else { ylimit <- ymax }
plot(0, xaxs="i", yaxs="i", bty="l", xaxt="n", yaxt="n", type="n",
xlab=xlab, ylab=ylab, ylim=c(0.5, ylimit), las=2,
xlim=as.integer(xlim)+c(-0.5,0.5))
# My original box drawing function
# boxi <- function(x,y.from,y.to, col="green") {
# polygon(c(x-0.5,x+0.5,x+0.5,x-0.5),c(y.from-0.5,y.from-0.5,y.to+0.5,y.to+0.5),col=col)
# if (y.to-y.from>0) sapply(y.from:(y.to-1),function(j)lines(c(x-0.5,x+0.5),c(j+0.5,j+0.5)))
# }
# A quicker version
boxi <- function(x,y.from,y.to, col="green") {
polygon(
rep(c(x-0.5,x+0.5,x+0.5,x-0.5),y.to-y.from+1),
as.double(sapply(y.from:y.to,function(j)c(j-0.5,j-0.5,j+0.5,j+0.5))),
col=col
)
}
for (i in 1:nrow(data)) {
offset=0
for (j in 1:ncol(data)) {
if (data[i,j]>0) {
boxi(i+as.integer(xlim[1])-1,1+offset,data[i,j]+offset, col=col[j])
offset=offset+data[i,j]
}
}
}
if (xaxt!="n") axis(1, at=pretty(x), labels=pretty(x))
if (yaxt!="n") axis(2)
}
|
c8be3a02e736a5bcee4eaf8d9b2d810c6c774aa9
|
4bba0777dc4516afc16af4693e22b7685e3cc77a
|
/scripts/02_make_RNDsp_plots.R
|
d6822f8bb2e1ecb6bd2e4a983215af23e4d264b4
|
[] |
no_license
|
CMPG/vole_genome_analysis
|
27d9f89f25022212358357c752e0dab2d41573b3
|
424431f6c62079e1bff77382b1a49dbc988a9ee7
|
refs/heads/master
| 2023-04-09T13:49:13.436480
| 2021-04-20T21:29:08
| 2021-04-20T21:29:08
| 274,372,809
| 2
| 1
| null | 2021-04-20T21:29:08
| 2020-06-23T10:07:48
|
R
|
UTF-8
|
R
| false
| false
| 2,865
|
r
|
02_make_RNDsp_plots.R
|
### RNDsp plots
source("./scripts/Resequencing-data-analysis-functions.R")
require(GenomicRanges)
load("./data/annotation.rda")
load("./data/pi_rndsp.rda")
output.dir <- "./plots"
if(!dir.exists(output.dir)) dir.create(output.dir)
stamp <- format(Sys.time(), "/%Y%m%d_%H%M%S")
span.loess <- 0.01
vole.col <- c(rep("#228B22", 4), RColorBrewer::brewer.pal(9, "Set1"))
samp.names <- c("M. arvalis W","M. arvalis I","M. arvalis E","M. arvalis C",
"M. agrestis","M. duodecimcostatus","M. oeconomus","M. brandti","M. glareolus",
"M. pennsylvanicus","M. cabrerae","M. lusitanicus","M. levis")
rndsps <- stats[, grep("rndsp", colnames(stats))]
ind.ord <- c(4,1,2,3,12,6,11,10,5,7,8,9)
# 3 x 4 individuals
## arvalis
# pdf(file = paste0(output.dir, stamp, "_RNDsp.pdf"), paper = "a4", width = 10, height = 8)
png(file = paste0(output.dir, stamp, "_RNDsp.png"), width = 17, height = 22, res = 600, units = "cm")
par(mfrow=c(5,1), mar = c(1,5,1,0), omi = c(0.8,0,0,0))
for(i in c(1:4, 8)) {
Y <- rndsps[ind.ord][[i]]
a <- samp.names[-9][ind.ord][i]
b <- bquote(paste('RND'['sp'], " (", .(a), ")"))
plotOut(Y, ylim = c(0, ifelse(i == 8, 0.6, 0.4)), scale = ifelse(i == 1, 0.3, 0.4), clust = 100, ylab = b)
if(i==8) {
segments(1900, 0.4, 2000, 0.4, lwd = 2)
text(1950, 0.4, "100 Mb",pos = 1,offset = 0.5, adj = 0.5)
}
}
ax <- tapply(res$pos.cumul, res$sc, mean)
axis(1, at = ax, labels = 1:22, lwd.ticks = 1, lwd = 0, cex.axis = 0.7)
title(xlab = "Scaffold", outer = TRUE)
dev.off()
png(file = paste0(output.dir, stamp, "_RNDsp_supp1.png"), width = 10, height = 8, res = 600, units = "in")
## close
par(mfrow=c(3,1), mar = c(1,5,1,0), omi = c(0.8,0,0,0))
for(i in 5:7) {
Y <- rndsps[ind.ord][[i]]
a <- samp.names[-9][ind.ord][i]
b <- bquote(paste('RND'['sp'], " (", .(a), ")"))
plotOut(Y, ylim = c(0,0.5), scale = 0.5, clust = 100, ylab = b)
if(i==7) {
segments(1900, 0.25, 2000, 0.25, lwd = 2)
text(1950, 0.25, "100 Mb",pos = 1,offset = 0.5, adj = 0.5)
}
}
ax <- tapply(res$pos.cumul, res$sc, mean)
axis(1, at = ax, labels = 1:22, lwd.ticks = 1, lwd = 0, cex.axis = 0.7)
title(xlab = "Scaffold", outer = TRUE)
dev.off()
png(file = paste0(output.dir, stamp, "_RNDsp_supp2.png"), width = 10, height = 8, res = 600, units = "in")
## far
par(mfrow=c(4,1), mar = c(1,5,1,0), omi = c(0.8,0,0,0))
for(i in 9:12) {
Y <- rndsps[ind.ord][[i]]
a <- samp.names[-9][ind.ord][i]
b <- bquote(paste('RND'['sp'], " (", .(a), ")"))
plotOut(Y, ylim = c(0,0.8), scale = 0.5, clust = 100, ylab = b)
if(i==12) {
segments(1900, 0.25, 2000, 0.25, lwd = 2)
text(1950, 0.25, "100 Mb",pos = 1,offset = 0.5, adj = 0.5)
}
}
ax <- tapply(res$pos.cumul, res$sc, mean)
axis(1, at = ax, labels = 1:22, lwd.ticks = 1, lwd = 0, cex.axis = 0.7)
title(xlab = "Scaffold", outer = TRUE)
dev.off()
|
b4804e1fad5ab85f6546aa29429b7f40393b4dae
|
9ae81d9cf8c1912e2a1ec9294596b798307e953b
|
/test.R
|
eedc310859bf54cf19b6f2e0af9e44b25ea8ddcb
|
[] |
no_license
|
15210280436/bizforecast-exercise
|
62256d20317abdd1e6edf6c927575b487ade8953
|
8cd3cf8ff6c04170de3f7e2316b49a0c2a035a05
|
refs/heads/master
| 2020-05-07T22:32:59.472591
| 2019-04-18T06:52:05
| 2019-04-18T06:52:05
| 180,949,569
| 0
| 0
| null | 2019-04-12T07:00:25
| 2019-04-12T07:00:25
| null |
UTF-8
|
R
| false
| false
| 2,582
|
r
|
test.R
|
library(magrittr)
library(tidyverse)
library(purrr)
library(dplyr)
library(infer)
set.seed(1212)
income=350:400 #销售价格
leads_cost=8:10 #leads成本
constant_cost=20000 #固定成本
leads_cnt=3000:4000 #每天leads数量
leads_conversion <- matrix(rnorm(30, mean = 0.04, sd = 0.5/100), # 转化率
ncol = 2, byrow = FALSE)
i=1:1000
profit_set <- function(income,leads_cnt,leads_conversion,constant_cost,leads_cost,m) {
income_1 = sample(income, size = m, replace = TRUE)
leads_cnt_1 = sample(leads_cnt, size = m, replace = TRUE)
leads_conversion_1 = sample(leads_conversion, size = m, replace = TRUE)
leads_cost_1 = sample(leads_cost, size = m, replace = TRUE)
leads_cnt_1*leads_conversion_1*income_1-constant_cost-leads_cnt_1*leads_cost_1
}
temp <- map_dbl(i, ~ {
replicate(30,profit_set(income,leads_cnt,leads_conversion,constant_cost,leads_cost,1)) %>%
sum()
})
quantile(temp,c(0.005,0.995))
temp1 <- temp %>%
as_data_frame()
temp1 %>%
ggplot(aes(x=value)) +
geom_histogram(aes(y = ..density..),
col = "black", fill = "salmon", alpha = .7, bins = 45) +
geom_density(col = "red", size = 1.5)+
scale_x_continuous(labels = scales::dollar) +
theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) +
labs(x = "Profit", y = "Density")
#第三题
temp1 %>%
ggplot(aes(x=value)) +
stat_ecdf() + #利润累计函数
scale_x_continuous(labels = scales::dollar) +
theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) +
labs(x = "Profit", y = "Density")
# 利润高于10万
temp_10 <- ifelse(temp1>=100000,TRUE,FALSE)
table(temp_10) %>% prop.table()
# 利润小于0
temp_0 <- ifelse(temp1<0,TRUE,FALSE)
table(temp_0) %>% prop.table()
#第四题
#leads_conversion_4=-0.181044846739997+0.0245604788599997*leads_cost_4
temp41 <- map_dbl(i, ~ {
replicate(30,profit_set(income,leads_cnt,-0.181044846739997+0.0245604788599997*leads_cost,constant_cost,leads_cost,1)) %>%
sum()
})
temp41 <- temp41 %>%
as_data_frame()
temp41 %>%
ggplot(aes(x=value)) +
geom_histogram(aes(y = ..density..),
col = "black", fill = "salmon", alpha = .7, bins = 45) +
geom_density(col = "red", size = 1.5)+
scale_x_continuous(labels = scales::dollar) +
theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) +
labs(x = "Profit", y = "Density")
# 利润高于10万
temp_410 <- ifelse(temp41>=100000,TRUE,FALSE)
table(temp_410) %>% prop.table()
# 利润小于0
temp_40 <- ifelse(temp41<0,TRUE,FALSE)
table(temp_40) %>% prop.table()
|
3aaf92ed76b816d0f4f5f4ddc7e4e043f8091b4f
|
81a14c15f1f686b71fb97c3b1b34e336125577dc
|
/send_errors_to_server.R
|
f86f9cad5518b2ed075fc7201078abbf375fa3c4
|
[] |
no_license
|
ccssaude/farmac-sync
|
a86924a3d4d619f626e990023370927fbf294f6c
|
e81747445574b9d9c969e7bb9310c8880139e0f1
|
refs/heads/master
| 2022-12-25T06:51:57.787440
| 2020-10-07T20:40:36
| 2020-10-07T20:40:36
| 256,339,191
| 0
| 0
| null | 2020-10-07T20:40:37
| 2020-04-16T21:58:09
|
R
|
UTF-8
|
R
| false
| false
| 2,681
|
r
|
send_errors_to_server.R
|
###########################################################
# con_farmac = FALSE para casos de conexao nao estabelecida
# if (!is.logical(con_farmac)) {
# # Get local connections
# # con_local = FALSE para casos de conexao nao estabelecida
# if (!is.logical(con_local)) {
if (is.farmac) {
log_error_server <- getLogErrorFromServer(con_farmac, farmac_name)
} else {
log_error_server <-
getLogErrorFromServer(con_farmac, main_clinic_name)
}
if (!is.logical(log_error_server)) {
if (nrow(log_error_server) == 0) {
if (nrow(logErro) == 0) {
if (is.farmac) {
message(paste0( farmac_name, ' - sem logs por enviar.'))
} else{
message(paste0( main_clinic_name, ' - sem logs por enviar.'))
}
}
else {
log_error_to_send <- logErro
status <-
sendLogError(con_postgres = con_farmac, df.logerror = log_error_to_send)
if (!status) {
# salvar o ficheiro dos logs das dispensas
save(logErro, file = 'logs\\logErro.RData')
}
}
}
else {
if (nrow(logErro) == 0) {
if (is.farmac) {
message(paste0(farmac_name, ' - sem logs por enviar.'))
} else{
message(paste0(main_clinic_name, ' - sem logs por enviar.'))
}
}
else {
log_error_to_send <-
anti_join(logErro,
log_error_server,
by = c('data_evento', 'erro'))
if (nrow(log_error_to_send) > 0) {
status <-
sendLogError(con_postgres = con_farmac, df.logerror = log_error_to_send)
if (!status) {
# salvar o ficheiro dos logs das dispensas
save(logErro, file = 'logs\\logErro.RData')
} else{
message('Log erros enviados com sucesso')
}
} else{
if (is.farmac) {
message(paste0(farmac_name, ' - sem logs por enviar.'))
} else{
message(paste0(main_clinic_name, ' - sem logs por enviar.'))
}
}
}
}
} else {
save(logErro, file = 'logs\\logErro.RData')
}
# ---
# }
# else {
# # erro de comunicacao
# save(logErro, file = 'logs\\logErro.RData')
# }
#
#
#
#
# } else {
# # erro de comunicacao
# save(logErro, file = 'logs\\logErro.RData')
# }
|
b9076323a3eb90fa847b74d043fe3c852f56df50
|
64ceb4e31d673a06f8e4c7a0d6c2ae68a926aa3e
|
/gamma.R
|
b93cc0a721f6a8b7252f84b681605dd803e3ca83
|
[] |
no_license
|
jessehamner/ResearchMethodsRScripts
|
b6fb207c62f8e0cb314a4cc133b3571446b1465d
|
c6a95686cfd5c73ec8798ed09ec328e743d21ff8
|
refs/heads/master
| 2021-01-11T08:30:13.094982
| 2020-10-26T15:09:00
| 2020-10-26T15:09:00
| 72,244,076
| 0
| 0
| null | 2020-10-23T14:54:21
| 2016-10-28T21:35:41
|
R
|
UTF-8
|
R
| false
| false
| 2,221
|
r
|
gamma.R
|
# Draw the gamma distribution.
library(ggplot2)
x <- seq(0, 100, 0.001)
#a <- 0.5
a <- 1.5
s <- 2
i<- seq(0,100,0.001)
y=dgamma(i, shape = a, scale = s)
y[length(y)] <-0
# graphics margins:
margins<-c(4.5,4.5,1,2)
# C<- data.frame(x,y)
B <- matrix(c(i,y), nrow=length(i),ncol=2)
C <-data.frame(B)
png(filename="gammadistribution.png",
width=5,
height=5,
units="in",
pointsize=10,
bg="white",
res=300,
type="quartz")
plot(x,
dgamma(x, shape = a, scale = s),
type = "l",
ylim = c(0,0.3),
lwd = 2,
xlab="X value",
ylab="Probability of event occurrence",
xlim=c(0,10)
)
# polygon(c(i),dgamma(i, shape = a, scale = s), density=5, angle=45)
polygon(c(i),dgamma(i, shape = a, scale = s), col="darkgreen")
text(6, 1.4, expression(f(x) == frac(1, s^a * Gamma(a)) * x^(a - 1) * exp(-frac(x, s))))
text(6, 1.2, expression(paste(a > 0, ": shape parameter")))
text(6, 1.1, expression(paste(s > 0, ": scale parameter")))
title("Gamma distribution: parameters shape=1.5 and scale=2",cex = 0.6)
dev.off()
png(filename="gammadistribution_simple.png",
width=5,
height=5,
units="in",
pointsize=10,
bg="white",
res=300,
type="quartz")
par(mar=margins)
plot(x, dgamma(x, shape = a, scale = s), type = "l",
ylim = c(0,0.3),
xlim=c(0,10),
lwd = 2,
xlab="X value",
ylab="Probability of event occurrence")
# polygon(c(i),dgamma(i, shape = a, scale = s), density=5, angle=45)
polygon(c(i),dgamma(i, shape = a, scale = s), col="darkgreen")
#text(6, 1.4, expression(f(x) == frac(1, s^a * Gamma(a)) * x^(a - 1) * exp(-frac(x, s))))
#text(6, 1.2, expression(paste(a > 0, ": shape parameter")))
#text(6, 1.1, expression(paste(s > 0, ": scale parameter")))
#title("Gamma distribution: parameters shape=1.5 and scale=2",cex = 0.6)
dev.off()
png(filename="gammadistributionnolabels.png", width=5, height=5, units="in", pointsize=10, bg="white", res=300, type="quartz")
bb <- ggplot(data=C, aes(x=i,y=y)) +
geom_point(size=0.5, color="darkgreen") +
labs( x="X value", y="Probability") +
geom_polygon(x=i,y=y, fill="darkgreen", size=0.5, alpha=1) +
xlim(0,10)
bb
dev.off()
|
c4467823284f21c9eda9128505cc6a0be20a8b78
|
5c6f1f6ca431f60d06ad2c09f1f031885be27e29
|
/Simple CTA Analysis/cta.R
|
8919f28698dbeabb31a35f8c17da1869c3b4ca0e
|
[] |
no_license
|
ajayjjain/nonpolitical-statistical-analysis
|
dcc029a2dc71494a380b3321a59c74e500532e8b
|
109cbda368e859291a60bbcf3cb89921d1eed8a9
|
refs/heads/master
| 2021-01-24T16:56:49.695019
| 2018-02-28T21:08:31
| 2018-02-28T21:08:31
| 123,219,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,013
|
r
|
cta.R
|
library(ggmap)
cta = read.csv("/Users/Ajay/Downloads/Webservice-CTA-master/share/cta_L_stops.csv")
chicago <- get_map(location = 'chicago', zoom=10)
colors = c()
line = c()
reds = c()
blues = c()
greens = c()
pinks = c()
purples = c()
browns = c()
yellows = c()
oranges = c()
for (row in 1:nrow(cta)){
if (!(grepl("&", cta$STATION_DESCRIPTIVE_NAME[row]))){
if (cta$Red[row] == 1){
colors = append(colors, "red")
}
else if (cta$Blue[row] == 1){
colors = append(colors, "deepskyblue1")
}
else if (cta$Brn[row] == 1){
colors = append(colors, "brown")
}
else if (cta$G[row] == 1){
colors = append(colors, "forestgreen")
}
else if (cta$P[row] == 1 | cta$Pexp[row] == 1){
colors = append(colors, "purple")
}
else if (cta$Y[row] == 1){
colors = append(colors, "gold")
}
else if (cta$Org[row] == 1){
colors = append(colors, "orange")
}
else{
colors = append(colors, "pink")
}
}
else{
colors = append(colors, "black")
}
if (cta$Red[row] == 1){
if (!(cta$PARENT_STOP_ID[row] %in% reds)){
line = append(line, "Red")
reds = append(reds, cta$PARENT_STOP_ID[row])
#reds = append(reds, 1)
}
}
if (cta$Blue[row] == 1){
if (!(cta$PARENT_STOP_ID[row] %in% blues)){
line = append(line, "Blue")
blues = append(blues, cta$PARENT_STOP_ID[row])
}
}
if (cta$Brn[row] == 1){
if (!(cta$PARENT_STOP_ID[row] %in% browns)){
line = append(line, "Brown")
browns = append(browns, cta$PARENT_STOP_ID[row])
}
}
if (cta$G[row] == 1){
if (!(cta$PARENT_STOP_ID[row] %in% greens)){
line = append(line, "Green")
greens = append(greens, cta$PARENT_STOP_ID[row])
}
}
if (cta$P[row] == 1 | cta$Pexp[row] == 1){
if (!(cta$PARENT_STOP_ID[row] %in% purples)){
line = append(line, "Purple")
purples = append(purples, cta$PARENT_STOP_ID[row])
}
}
if (cta$Y[row] == 1){
if (!(cta$PARENT_STOP_ID[row] %in% yellows)){
line = append(line, "Yellow")
yellows = append(yellows, cta$PARENT_STOP_ID[row])
}
}
if (cta$Pink[row] == 1){
if (!(cta$PARENT_STOP_ID[row] %in% pinks)){
line = append(line, "Pink")
pinks = append(pinks, cta$PARENT_STOP_ID[row])
}
}
if (cta$Org[row] == 1){
if (!(cta$PARENT_STOP_ID[row] %in% oranges)){
line = append(line, "Orange")
oranges = append(oranges, cta$PARENT_STOP_ID[row])
}
}
}
cta$colors = colors
ggmap(chicago) + geom_point(data=cta, aes(x=LON, y = LAT), color = colors)
values = c("deepskyblue1", "brown", "forestgreen", "orange", "pink", "purple", "red", "gold")
ggplot() + geom_bar(aes(x = line), fill = values) + xlab("CTA Line") + ylab("Number of Stations")
cta$ADA[cta$ADA == 0] <- "Not ADA compliant"
cta$ADA[cta$ADA == 1] <- "ADA compliant"
adaColors = c("seagreen3", "slateblue3")
ggplot(cta) + geom_bar(aes(x = ADA), fill = adaColors) + xlab("ADA compliance") + ylab("Number of CTA Stations")
|
2dc380a4f9991cf8d07372305b3e70df6cdb8ecf
|
a3bf051cc61a4b28acf455d8b29ea9ca84fe7538
|
/code/008_Unemployment initial claims.R
|
ace33c9218178cdc542e3595809c8c9b237cdd77
|
[] |
no_license
|
pgsmith2000/dailyecon
|
bca27c432007c59283b0adcb6081001d48fc1c4d
|
1d7077a2718508b846c03395d98178af3f06376c
|
refs/heads/master
| 2020-09-01T18:15:29.328782
| 2019-11-12T19:34:45
| 2019-11-12T19:34:45
| 219,024,281
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,029
|
r
|
008_Unemployment initial claims.R
|
# set start and end dates
end_date <- Sys.Date() - 1
if (wday(end_date) == 1){
end_date <- end_date - 3
} else if (wday(end_date) == 7){
end_date <- end_date - 2
}
start_date <- end_date - (3*(13*7))
# last 45 days of SLVPRUSD
fr_IC4WSA <- makeFREDtable("IC4WSA", start.date = start_date, end.date = end_date)
# caluculate limit for gragph
delta <- ((max(fr_IC4WSA$IC4WSA) - min(fr_IC4WSA$IC4WSA))/.8) *.1
low.delta <- min(fr_IC4WSA$IC4WSA) - (6*delta)
high.delta <- max(fr_IC4WSA$IC4WSA) + (2*delta)
# Plot it
p8 <- ggplot(fr_IC4WSA, aes(x=date, y=IC4WSA))
p8 <- p8 + geom_line(size=2, color="peru") + theme_minimal() +
scale_x_date(breaks = pretty_breaks(6)) +
scale_y_continuous(limit=c(low.delta,high.delta)) +
stat_smooth(method = lm) +
labs(title="8. 4-Wk Avg Initial Claims", x="", y="",
caption = "US ETA.") +
theme_minimal() +
theme(legend.position = 'bottom') +
theme(axis.text.x = element_text(angle = 45))
ggsave("../output/p8.png", p8)
|
763980e2e26f87fc040a84b13fd59e8897d3e82d
|
ef3c8b0609b38ab0388babe411eb4eccaa4d94b4
|
/preprocessAffy.R
|
d4d9fcb917488abbebf2778b6b45ba098e0b8f99
|
[] |
no_license
|
DannyArends/ludeCode
|
d540c1a1d3e9373fe2beaf69a80390c483eb01c0
|
ff109c9fd5fa4204805e3ea672c749189d6ed670
|
refs/heads/master
| 2021-01-10T19:36:54.675900
| 2013-07-19T13:07:05
| 2013-07-19T13:07:05
| 10,618,682
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,882
|
r
|
preprocessAffy.R
|
source("http://bioconductor.org/biocLite.R") # Connect to bioConductor
#install.packages(c("preprocessCore", "biomaRt")
#biocLite(c("affy", "gcrma", "lumi", "gcrma","AnnotationDbi")) # Install the affy package
library(affy) # Load the affy package
library(gcrma) # gcRMA normalisation
library(lumi) # Load the illumina package
library(biomaRt) # Biomart annotations
library(AnnotationDbi) # Load the annotations package
library(preprocessCore) # Normalization
# AFFYMETRIX #
# We need the array info files: (CELname \t Other Columns)
GSE22886A <- read.csv("dataDescr/GSE22886.txt", sep="\t", header=FALSE, colClasses=("character"))
GSE6613A <- read.csv("dataDescr/GSE6613.txt", sep="\t", header=FALSE, colClasses=("character"))
GSE12288A <- read.csv("dataDescr/GSE12288.txt", sep="\t", header=FALSE, colClasses=("character"))
GSE3846A <- read.csv("dataDescr/GSE3846.txt", sep="\t", header=FALSE, colClasses=("character"))
GSE24250A <- read.csv("dataDescr/GSE24250.txt", sep="\t", header=FALSE, colClasses=("character"))
getCol <- function(x, aType){ GSE22886A[which(GSE22886A[,4]==aType), x] } # Helper function
controlSamples <- GSE6613A[which(grepl("control", GSE6613A[,2])),1]# Only control samples, Why addcomplexity
HGU133A <- paste0("GSE22886/", getCol(1,"[HG-U133A]"),".CEL.gz") # The Filenames for U133A
HGU133Awb <- c(paste0("GSE6613/", GSE6613A[,1] ,".CEL.gz"), # Filenames for whole blood controls
paste0("GSE12288/", GSE12288A[,1],".CEL.gz"), # Filenames for whole blood controls
paste0("GSE3846/", GSE3846A[,1] ,".CEL.gz"), # Filenames for whole blood controls
paste0("GSE24250/", GSE24250A[,1],".CEL.gz")) # Filenames for whole blood controls
# Loads CEL.gz data files and return nolog gcRMA expression data
loadCELdata <- function(filenames, file="", doQuantiles = FALSE){
library(affy)
data <- ReadAffy(filenames = filenames)
eset <- gcrma(data)
expr <- exprs(eset)
arrayNames <- colnames(expr)
probeNames <- rownames(expr)
if(doQuantiles){
expr <- normalize.quantiles(expr)
colnames(expr) <- arrayNames
rownames(expr) <- probeNames
}
if(file != "") write.csv(expr, file=file, quote = FALSE)
invisible(expr)
}
# Calculate the cell type means for array type aType
calcCellTypeMeans <- function(expData, aType, file = ""){
phenames <- unique(getCol(2, aType))
cTypeMeans <- NULL
for(phe in phenames){
cTypeMeans <- cbind(cTypeMeans, apply(expData[,which(getCol(2,aType) == phe)], 1, mean))
}
colnames(cTypeMeans) <- phenames
if(file != "") write.csv(cTypeMeans, file=file, quote = FALSE)
invisible(cTypeMeans)
}
if(!file.exists("expHGU133A_gcRMA_MORE.txt")){ # Load .CEL.gz data for the Affy HGU133A samples
expression <- loadCELdata(c(HGU133A, HGU133Awb), "expHGU133A_gcRMA_MORE.txt", doQuantiles = TRUE)
}
expression <- read.csv("expHGU133A_gcRMA_MORE.txt", row.names=1)
cellTypeIds <- which(colnames(expression) %in% gsub("GSE22886/","", HGU133A))
wholeBlood <- expression[, -cellTypeIds]
cellTypes <- expression[, cellTypeIds]
write.csv(wholeBlood, file="expHGU133A_gcRMA_WholeBlood.txt", quote = FALSE)
write.csv(cellTypes, file="expHGU133A_gcRMA_CellType.txt", quote = FALSE)
wholeMean <- apply(wholeBlood, 1, mean)
wholeSd <- apply(wholeBlood, 1, sd, na.rm=TRUE)
cellMeans <- calcCellTypeMeans(cellTypes, "[HG-U133A]", "expHGU133A_gcRMA_CellType_mean.txt")
cellMeans <- cellMeans[which(wholeSd!=0),] #Remove SD==0
wholeMean <- wholeMean[which(wholeSd!=0)]
wholeSd <- wholeSd[which(wholeSd!=0)]
#cellTypeRatios <- cellTypeRatios[which(wholeMean > 4),]
#wholeMean <- wholeMean[which(wholeMean > 4)]
cellTypeRatios <- (cellMeans - wholeMean) / wholeSd #cellMeans/wholeMean
write.csv(cellTypeRatios, file="expHGU133A_CellType_Ratios.txt", quote = FALSE)
ids <- rownames(cellTypeRatios)
mart <- useMart("ensembl", dataset="hsapiens_gene_ensembl")
toGene <- getBM(attributes = c("affy_hg_u133a", "hgnc_symbol"),
filters="affy_hg_u133a", values=ids, mart=mart)
toGene <- toGene[which(!is.na(toGene[,2])),] # Removed the ones in togene which don't have a gene
toGene <- toGene[which(!toGene[,2]==""),] # Removed the ones in togene which don't have a gene
matched <- which(rownames(cellTypeRatios) %in% toGene[,1])
cellTypeRatios <- cellTypeRatios[matched, ] # Removed the ones in cellTypeRatios which don't have a match
sortG <- match(rownames(cellTypeRatios), toGene[,1]) # Sorter to put toGene in the order of cellTypeRatios
write.csv(cbind(toGene[sortG,],cellTypeRatios), file="expHGU133A_CellType_Ratios.txt", quote = FALSE)
|
599caa6c533ce71b0bd75ec7a5de6937b3412420
|
0ff5853af9fd557f6591979a834d9fe82708d234
|
/R/drmEMstandard.R
|
c2fce16dd6347d800fdbf90515ec9b671136f4eb
|
[] |
no_license
|
csetraynor/drc
|
4c6deed9b783802852cfd3ed60c87dec6afc0ce5
|
8719d43a09711250cd791d7d2bc965558d3e62a6
|
refs/heads/master
| 2023-04-11T03:15:00.881506
| 2021-05-04T02:42:06
| 2021-05-04T02:42:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,933
|
r
|
drmEMstandard.R
|
"drmEMstandard" <-
function(dose, resp, multCurves, doseScaling = 1)
{
## Defining a helper function for calculating the variance-covariance matrix
# vcFct <- function(beta0, beta, sigma2, len0)
# {
# vc <- (sigma2 / len0) * (beta %o% beta) / (beta0^4)
# diag(vc) <- diag(vc) + sigma2 / (beta0^2)
#
# return(vc)
# }
vcFct <- function(beta0, beta, len0)
{
vc <- (1 / len0) * (beta %o% beta) / (beta0^4)
diag(vc) <- diag(vc) + (1 / (beta0^2))
return(vc)
}
zeroDose <- dose < 1e-15 # hardcoded tolerance of 1e-15
len0 <- sum(zeroDose)
vcFct2 <- function(beta0, betaVec)
{
# len0 <- weightVec[1] # in case len0 is a vector
vc <- (1 / len0) * (betaVec %o% betaVec) / (beta0^4)
diag(vc) <- diag(vc) + (1 / (beta0^2))
# zeroDose <- dose < doseTol
# print(vc[!zeroDose, zeroDose])
# print((1 / len0) * (-betaVec / (beta0^3)))
# print(vc[!zeroDose, zeroDose] + (1 / len0) * (-betaVec[!zeroDose] / (beta0^3)))
vc[!zeroDose, zeroDose] <- vc[!zeroDose, zeroDose] + (1 / len0) * (-betaVec[!zeroDose] / (beta0^3))
vc[zeroDose, !zeroDose] <- vc[zeroDose, !zeroDose] + (1 / len0) * (-betaVec[!zeroDose] / (beta0^3))
# print(vc[zeroDose, zeroDose])
# print(diag(vc[zeroDose, zeroDose]) + (1 / (len0 * beta0^2)) - (1 / (beta0^2)))
diag(vc[zeroDose, zeroDose]) <- diag(vc[zeroDose, zeroDose]) + (1 / (len0 * beta0^2)) - (1 / (beta0^2))
return(vc)
}
## Defining the objective function
opfct <- function(c) # dose, resp and weights are fixed
{
print(c)
f0 <- multCurves(0, c)[1]
print(f0)
fVec <- multCurves(dose / doseScaling, c)
print(fVec)
# vcMat <- vcFct(f0, fVec, weightVec)
vcMat <- vcFct2(f0, fVec)
print(solve(vcMat)[1:6, 1:6])
sum( (resp - fVec) %*% solve(vcMat) %*% (resp - fVec))
}
## Defining self starter function
ssfct <- NULL
## Defining the log likelihood function
llfct <- function(object)
{
# total <- (object$"data")[iv, 5]
# success <- total*(object$"data")[iv, 2]
# c( sum(log(choose(total, success))) - object$"fit"$"ofvalue", object$"sumList"$"df.residual" )
c(
-object$"fit"$value + sum(log(gamma(resp+1))),
object$"sumList"$"df.residual"
) # adding scale constant
}
## Defining functions returning the residual variance, the variance-covariance matrix, and the parameter estimates
# rvfct <- function(object)
# {
# object$"fit"$"value" / df.residual(object) # object$"sumList"$"df.residual"
# }
#
# vcovfct <- function(object)
# {
# solve(object$fit$hessian)
# }
#
# copied from drmEMls.R
rvfct <- function(object)
{
object$"fit"$"value" / df.residual(object)
}
vcovfct <- function(object)
{
scaledH <- (object$"fit"$"hessian") / (2 * rvfct(object))
invMat <- try(solve(scaledH), silent = TRUE)
if (inherits(invMat, "try-error"))
{
## More stable than 'solve' (suggested by Nicholas Lewin-Koh - 2007-02-12)
ch <- try(chol(scaledH))
if(inherits(ch, "try-error"))
{
ch <- try(chol(0.99 * object$fit$hessian + 0.01 * diag(dim(object$fit$hessian)[1])))
}
## Try regularizing if the varcov is unstable
if(!inherits(ch, "try-error")) return(chol2inv(ch))
} else {
return(invMat)
}
}
parmfct <- function(fit, fixed = TRUE)
{
fit$par
}
## Returning list of functions
return(list(llfct = llfct, opfct = opfct, ssfct = ssfct, rvfct = rvfct, vcovfct = vcovfct,
parmfct = parmfct))
}
"drmLOFstandard" <- function()
{
return(list(anovaTest = NULL, gofTest = NULL))
}
if (FALSE)
{
covFct <- function(sigma0, sigma, myVec, dose)
{
zeroDose <- dose < 1e-15 # hardcoded tolerance of 1e-15
len0 <- sum(zeroDose)
my0 <- (myVec[zeroDose])[1]
n0 <- sum(zeroDose)
lenMy <- length(myVec)
derMat <- matrix(0, lenMy, lenMy+1)
diag(derMat) <- 1 / my0
derMat[, lenMy+1] <- -myVec / (my0^2)
lenY <- lenMy + 1
origVCmat <- matrix(0, lenY, lenY)
sigma0mean <- sigma0^2/n0
origVCmat[zeroDose, zeroDose] <- sigma0mean
diag(origVCmat)[!zeroDose] <- sigma^2
diag(origVCmat)[zeroDose] <- sigma0^2
origVCmat[lenY, lenY] <- sigma0mean
list(my0, n0, derMat, origVCmat, derMat %*% origVCmat %*% t(derMat))
}
resList<-covFct(0.52, 0.52, fitted(ryegrass.m1)[1:10], ryegrass$conc[1:10])
resList[[5]] / (outVec %o% outVec)
varOptim1 <- function(varpar)
{
resList <- covFct(varpar[1], varpar[2], fitted(ryegrass.m1), ryegrass$conc)[[5]]
resVec <- residuals(ryegrass.m1)
# resVec <- fitted(ryegrass.m1) + rnorm(24, 0, c(rep(3,6), rep(1, 18))
resVec%*%solve(resList)%*%resVec + log(abs(det(resList)))
}
optim(c(1, 0.1), varOptim1)
varOptim1b <- function(par, const = 1)
{
fittedVec <- par[2]+(1-par[2])/(1+(ryegrass$conc/par[3])^par[1])
resList <- covFct(1, par[4], fittedVec, ryegrass$conc)[[5]]
resVec <- (ryegrass$rootl / 7.75) - fittedVec
resVec%*%solve(resList)%*%resVec + const * log(abs(det(resList)))
}
rg.optim <- optim(c(2,0.05,3,0.5), varOptim1b, hessian = TRUE)
sqrt(diag(solve(rg.optim$hessian)))
sqrt(varOptim1b(rg.optim$par, 0) / 20)
## S.alba
varOptim1b2 <- function(par, const = 1)
{
fittedVec <- par[2]+(1-par[2])/(1+(S.alba$Dose[1:32]/par[3])^par[1])
resList <- covFct(1, par[4], fittedVec, S.alba$Dose[1:32])[[5]]
resVec <- (S.alba$DryMatter[1:32] / 7.75) - fittedVec
resVec%*%solve(resList)%*%resVec + const * log(abs(det(resList)))
}
rg.optim2 <- optim(c(2,0.05,3,0.5), varOptim1b2, hessian = TRUE)
rg.optim2$par
sqrt(diag(solve(rg.optim2$hessian)))
sqrt(varOptim1b2(rg.optim2$par, 0) / 28)
sa.drm1 <- drm(DryMatter~Dose, data=S.alba[1:32,], fct=LL.4())
summary(sa.drm1)
sa.drm2 <- drm(DryMatter/mean(S.alba$DryMatter[1:8])~Dose, data=S.alba[1:32,], fct=LL.4(fixed=c(NA,NA,1,NA)))
summary(sa.drm2)
sa.drm3 <- drm(DryMatter/mean(S.alba$DryMatter[1:8])~Dose, data=S.alba[1:32,], fct=LL.4(fixed=c(NA,NA,NA,NA)))
summary(sa.drm3)
#yVec <- fitted(ryegrass.m1) + rnorm(24, 0, c(rep(3,6), rep(1, 18)))
yVec <- fitted(ryegrass.m1) + rnorm(24, 0, c(rep(10,6), rep(1, 18)))
xVec <- ryegrass$conc
varOptim1c <- function(par, const = 1)
{
fittedVec <- par[2]+(1-par[2])/(1+(xVec/par[3])^par[1])
resList <- covFct(1, par[4], fittedVec, xVec)[[5]]
resVec <- (yVec / mean(yVec[1:4])) - fittedVec
resVec%*%solve(resList)%*%resVec + const * log(det(resList))
}
ratioVec <- rep(NA, 100)
sigmaVec <- rep(NA, 100)
ec50Vec <- rep(NA, 100)
seVec1 <- rep(NA, 100)
seVec2 <- rep(NA, 100)
xVec <- rep(ryegrass$conc, rep(3, 24))
xVec <- xVec[-c(1:14)]
for (i in 1:100)
{
yVec <- rep(fitted(ryegrass.m1), rep(3, 24)) + rnorm(72, 0, c(rep(3,18), rep(1, 54)))
yVec <- yVec[-c(1:14)]
# varOptim1c <- function(par, const = 1)
# {
# fittedVec <- par[2]+(1-par[2])/(1+(xVec/par[3])^par[1])
# resList <- covFct(1, par[4], fittedVec, xVec)[[5]]
# resVec <- (yVec / mean(yVec[1:6])) - fittedVec
# resVec%*%solve(resList)%*%resVec + const * log(det(resList))
# }
# seVec1[i] <- coef(summary(drm(yVec/mean(yVec[1:4])~xVec, fct=LL.4(fixed=c(NA,NA,NA,NA)))))[4,2]
seVec1[i] <- coef(summary(drm(yVec/mean(yVec[1:4])~xVec, fct=LL.4(fixed=c(NA,NA,1,NA)))))[3,2]
optimRes <- optim(c(1.7,0.01,2.5,0.1), varOptim1c, hessian=TRUE)
seVec2[i] <- sqrt(diag(solve(optimRes$hessian)))[3]
parem <- optimRes$par
ratioVec[i] <- parem[4]
ec50Vec[i] <- parem[3]
sigmaVec[i] <- sqrt(varOptim1c(parem, 0)/68)
}
cbind(seVec1, seVec2)
ratioVec
sigmaVec
hist(ratioVec)
hist(sigmaVec)
varOptim2 <- function(varpar)
{
resList<-covFct(varpar[1], varpar[2], fitted(ryegrass.m1), ryegrass$conc)[[5]]
resVec <-residuals(ryegrass.m1)
resVec%*%solve(resList)%*%resVec+log(abs(det(resList)))
}
#resVec2 <- ryegrass$rootl - (fitted(ryegrass.m1) + rnorm(24, 0, c(rep(3,6), rep(1, 18))))
varOptim3 <- function(varpar, const=1)
{
resList<-covFct(1, varpar[1], fitted(ryegrass.m1), ryegrass$conc)[[5]]
resVec <- residuals(ryegrass.m1)
# resVec <- resVec2
resVec%*%solve(resList)%*%resVec + const * log(abs(det(resList)))
}
optimize(varOptim3, lower=0, upper=100)
}
|
7bdbfcf928582b622b12f820adaeb516e231f0d6
|
04606f837c84fe0614b494054aa8e644680669df
|
/man/covid_world_map.Rd
|
e9eb84663c4f904c1535dcb0badbd3e7b8df844c
|
[] |
no_license
|
Ashkan-nmt/Rcovid19
|
da678ce59401bc568f8193d5b4739d7e6c59ee1a
|
22c32872e309e91466b9cf985072d1310efd961f
|
refs/heads/master
| 2023-01-10T17:37:26.374943
| 2020-11-08T13:17:34
| 2020-11-08T13:17:34
| 309,087,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 502
|
rd
|
covid_world_map.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covid_world_map.R
\name{covid_world_map}
\alias{covid_world_map}
\title{covid_world_map}
\usage{
covid_world_map(date, type)
}
\arguments{
\item{date}{a date string like "yyyy-mm-dd"}
\item{type}{should be equal to "death" or "confirmed"}
}
\value{
a world map plot
}
\description{
provides a function to draw Covid-19 world map based on Johns Hopkins University's data
}
\examples{
covid_world_map("2020-03-24","death")
}
|
e5d6abfd3c662fa30fe34806eb11ffa1e7113949
|
bfe31790682c4ab4fd7e30bc91f43b989750a1fd
|
/R/scatter_libdepth.R
|
411155cdac05f2c6084fd798468cbb55fe9e6532
|
[] |
no_license
|
millersan/rimmi.rnaseq
|
d1d5342c512854cb3e7f379cfaa0f2813fc68fc9
|
433f0fbb4392c1a47a013f4170749c79db793834
|
refs/heads/master
| 2022-12-28T07:33:23.053283
| 2020-10-13T12:41:26
| 2020-10-13T12:41:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,319
|
r
|
scatter_libdepth.R
|
#' Plot clusters in 2 umaps with the point size corresponting to the library size
#'
#' This function allows you to see if the library size influences in your clustering
#'
#' @param Seurat_obj your Seurat object
#'
#' @return scatter plot with the library size correponding to the point size
#'
#' @keywords Seurat, single cell sequencing, RNA-seq, gene signature
#'
#' @examples
#'
#' scatter_libdepth(A07)
#'
#' @export
#'
lib.qc_plot <- function(Seurat_obj){
library(ggplot2)
qc.df <- data.frame(row.names = rownames(Seurat_obj@meta.data),
umap1 = Seurat_obj@reductions$umap@cell.embeddings[,1],
umap2 = Seurat_obj@reductions$umap@cell.embeddings[,2],
nUMI = Seurat_obj$nCount_RNA,
pMito = Seurat_obj$percent.mt,
nGenes = Seurat_obj$nFeature_RNA,
cluster = Idents(Seurat_obj))
p1 <- ggplot(qc.df, aes(x = umap1, y = umap2, colour = cluster)) +
geom_point(aes(size = nUMI)) +
theme_bw()
p2 <- ggplot(qc.df, aes(x = umap1, y = umap2, colour = cluster)) +
geom_point(aes(size = nGenes)) +
theme_bw()
p3 <- ggplot(qc.df, aes(x = umap1, y = umap2, colour = cluster)) +
geom_point(aes(size = pMito)) +
theme_bw()
cowplot::plot_grid(p1, p2, p3)
}
|
fb8cf552c77cf6c902576e8ea9cfe1aca7969869
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/reportROC/examples/reportROC.Rd.R
|
3f949678eb6e75dc157b04e5de38a4a3acd08911
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 351
|
r
|
reportROC.Rd.R
|
library(reportROC)
### Name: reportROC
### Title: An Easy Way to Report ROC Analysis
### Aliases: reportROC
### Keywords: ROC analysis
### ** Examples
data(aSAH)
reportROC(gold=aSAH$outcome,predictor=aSAH$s100b,important="se",plot=TRUE)
binary=rep(0,nrow(aSAH))
binary[aSAH$s100b>=0.205]=1
reportROC(gold=aSAH$outcome,predictor.binary=binary)
|
f51d36495f17923c45af99b7b11c38f76e45ed6a
|
331b6212e64e8c703f423d4d1b2edb857ab7396a
|
/inst/examples/cmm-dist.R
|
665a2685b76b3f436a49fc31a6b7f0701839b834
|
[] |
no_license
|
FranciscoOlego/COMMultReg
|
b6376b6e32add5e22c601b76f6c5b078533cbf58
|
1f1b61204acef86209bdf6dede49ea458c41ea44
|
refs/heads/master
| 2023-04-05T12:04:30.013210
| 2021-04-19T12:03:56
| 2021-04-19T12:03:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 121
|
r
|
cmm-dist.R
|
library(COMMultReg)
p = c(1,2,3) / 6
y = r_cmm(n = 100, m = 10, p = p, nu = 0.8, burn = 1000, thin = 10)
head(y)
d_cmm
|
b2b15e677ea71923e4cc992643358daab9834ab6
|
31ea8595b1b023988c18875d71ce2a5202c5f3ea
|
/rprog/Quizzes/w3_1-2.R
|
0b3903332f1fdc53a0ac4f9d34b1c4537a4a46c5
|
[] |
no_license
|
datawrecker/datasciencecoursera
|
3fef8322c062442e2a8222e36bdf187462c295b3
|
ce1d0940fec6c0f4123d48b51a30598c24bbf074
|
refs/heads/master
| 2020-04-05T15:20:08.066152
| 2015-03-21T15:10:58
| 2015-03-21T15:10:58
| 31,636,947
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 127
|
r
|
w3_1-2.R
|
library(datasets)
data(iris)
print(mean(iris$Sepal.Length[iris$Species == "virginica"]))
print(apply(iris[, 1:4], 2, mean))
|
f666f78911e5a7cd8a50c9eec6161f929ccd6598
|
9616c625f27b79378589972989858d789dd1a08a
|
/time_evol_hgT_mehgT_2050.R
|
6593917486be246c94b7f9d72a33e9b47b76d868
|
[] |
no_license
|
ginevrar/BS
|
46fc78121e8e22f4235d381757e4431335d6f1dd
|
0de8aa416bdc95d25604f0d3b8f13122ac671baf
|
refs/heads/master
| 2021-01-11T21:52:40.756912
| 2019-07-22T16:19:26
| 2019-07-22T16:19:26
| 78,849,166
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,304
|
r
|
time_evol_hgT_mehgT_2050.R
|
getwd()
#wd('C:/Users/gi/Dropbox/29_Luglio/Anoxic3c/ore17')
setwd("C:/Users/gi/Dropbox/BlackSea2/implementazione/new_sim0/_met/Wh1")
hg<-read.csv("Dissolved_Divalent_Hg.csv", header=FALSE, skip = 1,sep = ",", dec=".")
names(hg)<-c("Time", "Oxic1","Oxic2", "CIL", "Oxycline","Suboxic1","Suboxic2", "Anoxic","Anoxic2","Anoxic3","Sed1","Sed2")
hg<-hg [3:2414,1:12]; str(hg$Time)
solids<-read.csv("Total_Solids.csv", header=FALSE, skip = 1,sep = ",", dec=".")
names(solids)<-c("Time", "Oxic1","Oxic2", "CIL", "Oxycline","Suboxic1","Suboxic2", "Anoxic","Anoxic2","Anoxic3","Sed1","Sed2")
Phg<-read.csv("Total_Sorbed_Divalent_Hg.csv", header=FALSE, skip = 1,sep = ",", dec=".")
names(Phg)<-c("Time", "Oxic1","Oxic2", "CIL", "Oxycline","Suboxic1","Suboxic2", "Anoxic","Anoxic2","Anoxic3","Sed1","Sed2")
Phg<-Phg [3:2414,1:12]
DOChg<-read.csv("DOC_Sorbed_Divalent_Hg.csv", header=FALSE, skip = 1,sep = ",", dec=".")
names(DOChg)<-c("Time", "Oxic1","Oxic2", "CIL", "Oxycline","Suboxic1","Suboxic2", "Anoxic","Anoxic2","Anoxic3","Sed1","Sed2")
DOChg<-DOChg [3:2414,1:12]
mehg<-read.csv("Dissolved_Methyl_Hg.csv", header=FALSE, skip = 1,sep = ",", dec=".")
names(mehg)<-c("Time", "Oxic1","Oxic2", "CIL", "Oxycline","Suboxic1","Suboxic2", "Anoxic","Anoxic2","Anoxic3","Sed1","Sed2")
mehg<-mehg [3:2414,1:12]
DOCmehg<-read.csv("DOC_Sorbed_Methyl_Hg.csv", header=FALSE, skip = 1,sep = ",", dec=".")
names(DOCmehg)<-c("Time", "Oxic1","Oxic2", "CIL", "Oxycline","Suboxic1","Suboxic2", "Anoxic","Anoxic2","Anoxic3","Sed1","Sed2")
DOCmehg<-DOCmehg [3:2414,1:12]
hg0<-read.csv("Elemental_Hg.csv", header=FALSE, skip = 1,sep = ",", dec=".")
names(hg0)<-c("Time", "Oxic1","Oxic2", "CIL", "Oxycline","Suboxic1","Suboxic2", "Anoxic","Anoxic2","Anoxic3","Sed1","Sed2")
hg0<-hg0 [3:2414,1:12]
mehgT<-read.csv("Methyl_Hg.csv", header=FALSE, skip = 1,sep = ",", dec=".")
names(mehgT)<-c("Time", "Oxic1","Oxic2", "CIL", "Oxycline","Suboxic1","Suboxic2", "Anoxic","Anoxic2","Anoxic3","Sed1","Sed2")
mehgT<-mehgT [3:2414,1:12]
hgT<-read.csv('Total_Hg.csv', header=FALSE, skip = 1,sep = ",", dec=".")
names(hgT)<-c("Time", "Oxic1","Oxic2", "CIL", "Oxycline","Suboxic1","Suboxic2", "Anoxic","Anoxic2","Anoxic3","Sed1","Sed2")
hgT<-hgT [3:2414,1:12]
Temp<-read.csv('Segment_Temperature.csv', header=FALSE, skip = 1,sep = ",", dec=".")
names(Temp)<-c("Time", "Oxic1","Oxic2", "CIL", "Oxycline","Suboxic1","Suboxic2", "Anoxic","Anoxic2","Anoxic3","Sed1","Sed2")
Temp<-Temp [3:2414,1:12]
Temp<-tail(Temp,36)
hg0_fM<-tail(hg0,36)/200.59*10^6
hg0a<-mean(hg0_fM$Oxic1)
hg0b<-mean(hg0_fM$Oxic2)
hg0c<-mean(hg0_fM$CIL)
hg0d<-mean(hg0_fM$Oxicline)
hg0e<-mean(hg0_fM$Suboxic1)
hg0f<-mean(hg0_fM$Suboxic2)
hg0h<-mean(hg0_fM$Anoxic)
hg0i<-mean(hg0_fM$Anoxic2)
hg0l<-mean(hg0_fM$Anoxic3)
hg0_layer<-c(hg0a,hg0b,hg0c,hg0d,hg0e,hg0f,hg0h,hg0i,hg0l)
plot(hg0_layer)
range(hg0_fM$Oxic1)
plot(hg0_fM$Oxic1, xlim=c(0,36), col='red')
par(new=T)
plot(Temp$Oxic1, type='b',xlim=c(0,36), col='red')
par(new=T)
plot(tail(hgdiss_pM$Oxic1,36),xlim=c(0,36), col='royalblue', type='b')
par(new=T)
plot(tail(mehgdiss_pM$Oxic1,36),xlim=c(0,36), col='hotpink', type='b')
par(new=T)
plot(tail(mehgT$Oxic1,36),xlim=c(0,36), col='orange', type='b')
plot(Temp$Oxic1, type='b',xlim=c(0,36), col='red')
par(new=T)
plot(tail(mehgT$Oxic2,36),xlim=c(0,36), col='orange', type='b')
par(new=T)
plot(tail(mehgT$Oxic1,36),xlim=c(0,36), col='hotpink', type='b')
par(new=T)
plot(tail(mehgT$CIL,36),xlim=c(0,36), col='blue', type='b')
par(new=T)
plot(tail(mehgT$Suboxic1,36),xlim=c(0,36), col='green', type='b')
par(new=T)
plot(tail(mehgT$Anoxic,36),xlim=c(0,36), col='darkgreen', type='b')
a<-c(2.1,2,3)
ax2<-(seq(1850,2050,by=1))
str(ax2)
dev.new()
par(mfrow=c(1,1))
plot(mehgT$Oxic1/215*1000, col="deepskyblue", ylab=" ", ylim=c(0,0.8),
xlab=" ", main=expression("MeHg"[T]*" concentrations in the model layers"),
cex.main=1.5, type="l",lwd=1.5,lty=1, xaxt='n', cex.axis=1.4)
par(new=TRUE)
plot(mehgT$Oxic2/215*1000, ylim=c(0,0.8), col="dodgerblue",
ylab="MeHg (pM)", cex.lab=1.4, xlab="Time (years)", yaxt='n',type="l",lwd=1.5,lty=2, xaxt='n')
par(new=TRUE)
plot(mehgT$CIL/215*1000, ylim=c(0,0.8), col="#feb24c", ylab=" ",
xlab=" ", type="l",lwd=1.5, yaxt='n',lty=3, xaxt='n')
par(new=TRUE)
plot(mehgT$Oxycline/215*1000, ylim=c(0,0.8), col="#225ea8",
ylab=" ", xlab=" ", type="l",yaxt='n',lwd=1.5,lty=4, xaxt='n')
par(new=TRUE)
plot(mehgT$Suboxic1/215*1000, ylim=c(0,0.8), col="#addd8e",
ylab=" ", xlab=" ", xaxt='n', type="l",lwd=1.5,yaxt='n',lty=5)
par(new=TRUE)
plot(mehgT$Suboxic2/215*1000, ylim=c(0,0.8), col="lightslategray",
ylab=" ", xlab=" ", type="l", xaxt='n',lwd=1.5,yaxt='n',lty=1)
par(new=TRUE)
plot(mehgT$Anoxic/215*1000, ylim=c(0,0.8), col="darkslategray",
ylab=" ", xlab=" ", type="l",lwd=1.5,lty=2, yaxt='n',xaxt='n')
par(new=TRUE)
plot(mehgT$Anoxic2/215*1000, ylim=c(0,0.8), col="#fc4e2a",
ylab=" ", xlab=" ", type="l",lwd=1.5,lty=3, yaxt='n',xaxt='n')
par(new=TRUE)
plot(mehgT$Anoxic3/215*1000, ylim=c(0,0.8), col="#800026",
ylab=" ", xlab=" ", type="l",lwd=1.5,lty=4, yaxt='n',xaxt='n')
text(950,.19,'euphotic1', col='deepskyblue', cex=1.2,font=3)
text(950,.04,'euphotic2', col='dodgerblue', cex=1.2,font=3)
text(1500,0.09,'CIL', col='#feb24c', cex=1.2,font=3)
text(2100,0.04,'oxycline', col='#225ea8', cex=1.2,font=3)
text(2100,.28,'SOL', col='#addd8e', cex=1.2,font=3)
text(1800,.45,'UAOL1', col='lightslategray', cex=1.2,font=3)
text(2200,.55,'UAOL2', col='darkslategray', cex=1.2,font=3)
text(1400,.7,'DAOL', col='#fc4e2a', cex=1.2,font=3)
text(1800,.8,'BBL', col='#800026', cex=1.2,font=3)
#
#
#end(50,1, pch=19,legend=c("Euphotic1","Euphotic2","CIL",'Oxycline',
# "SOL" ,"UAOL1", "UAOL2",
# "DAOL", "BBL"),
## col=c('deepskyblue', 'dodgerblue','#225ea8','#bdbdbd',
# '#addd8e','#41ab5d','#feb24c','#fc4e2a','#800026'),
# lty=c(1,2,3,4,5,1,2,3,4))
#abline(v=1970,col='red')
#abline(v=2013)
dev.new()
par(mfrow=c(1,1))
plot(hgT$Oxic1/200.59*1000, col="deepskyblue",ylim=c(0,4),
ylab="Hg (pM)", xlab="Time (years)",
main=expression("Hg"[T]*" concentrations in the model layers"),
cex.lab=1.4, cex.main=1.5,cex.axis=1.3,
type="l",lwd=1.5,lty=1, xaxt='n')
par(new=TRUE)
plot(hgT$Oxic2/200.59*1000, ylim=c(0,4), col="dodgerblue", ylab=" ", xlab="",
type="l",lwd=1.5,lty=2,yaxt='n', xaxt='n')
par(new=TRUE)
plot(hgT$CIL/200.59*1000, ylim=c(0,4), col='#feb24c', ylab=" ",
xlab=" ", type="l",lwd=1.5,lty=3, yaxt='n', xaxt='n')
par(new=TRUE)
plot(hgT$Oxycline/200.59*1000, ylim=c(0,4), col="#225ea8",
ylab=" ", xlab="",cex.lab=1.2, type="l",lwd=1.5,lty=4, yaxt='n', xaxt='n')
par(new=TRUE)
plot(hgT$Suboxic1/200.59*1000, ylim=c(0,4), col="#addd8e",
ylab=" ", xlab=" ", xaxt='n',yaxt='n', type="l",lwd=1.5,lty=5)
par(new=TRUE)
plot(hgT$Suboxic2/200.59*1000, ylim=c(0,4), col="lightslategray",yaxt='n',
ylab=" ", xlab=" ", type="l", xaxt='n',lwd=1.5,lty=1)
par(new=TRUE)
plot(hgT$Anoxic/200.59*1000, ylim=c(0,4), col="darkslategray",
ylab=" ", xlab=" ", type="l",lwd=1.5,lty=2, yaxt='n',xaxt='n')
par(new=TRUE)
plot(hgT$Anoxic2/200.59*1000, ylim=c(0,4), col="#fc4e2a",
ylab=" ", xlab=" ", type="l",lwd=1.5,yaxt='n',lty=3, xaxt='n')
par(new=TRUE)
plot(hgT$Anoxic3/200.59*1000, ylim=c(0,4), col="#800026",
ylab=" ", xlab=" ", xaxt='n',yaxt='n', type="l",lwd=1.5,lty=4)
text(160,1.7,'euphotic1', col='deepskyblue', cex=1.2,font=3)
text(800,2.3,'euphotic2', col='dodgerblue', cex=1.2,font=3)
text(600,0.4,'CIL', col='#feb24c', cex=1.2,font=3)
text(1000,0.7,'oxycline', col='#225ea8', cex=1.2,font=3)
text(1600,1.5,'SOL', col='#addd8e', cex=1.2,font=3)
text(1300,3.5,'UAOL1', col='lightslategray', cex=1.2,font=3)
text(2200,2.7,'UAOL2', col='darkslategray', cex=1.2,font=3)
text(1800,3.7,'DAOL', col='#fc4e2a', cex=1.2,font=3)
text(2200,3.95,'BBL', col='#800026', cex=1.2,font=3)
#
#
#legend(-1,4.1, pch=19,legend=c("Euphotic1","Euphotic2","CIL",'Oxycline',
# "SOL" ,"UAOL1", "UAOL2",
# "DAOL", "BBL"),
# col=c('deepskyblue', 'dodgerblue','#225ea8','#bdbdbd',
# '#addd8e','#41ab5d','#feb24c','#fc4e2a','#800026'),
# cex=1.15)
|
3f62dfbec7b9773894ff0c0a6fdb3918b6da531f
|
d73d6f10c28e6ecca2ff3597eac3238da3d05d42
|
/R/n.tables.R
|
e23f52f7b63c88a3ee67de5744f7d7dae03abe91
|
[
"Apache-2.0"
] |
permissive
|
hms-dbmi/dbGaP2x
|
dec021b70db3684e19752b5da242086b35caa268
|
2024714c2793cc2707f41a4320b27b8da1e421ce
|
refs/heads/master
| 2020-04-05T09:20:34.458287
| 2018-11-28T17:37:03
| 2018-11-28T17:37:03
| 156,751,577
| 2
| 0
|
Apache-2.0
| 2019-04-20T21:50:46
| 2018-11-08T18:32:07
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,340
|
r
|
n.tables.R
|
#' @title Gets the number of phenotypic datatables in the study
#'
#' @param phs dbGap study ID (phs00xxxx, or 00xxxx, or xxx)
#'
#' @return Return the number of phenotypic datatables in the study
#'
#' @description This function extracts informations from data.dict.xml files from the dbgap ftp server to get the study characteristics. Works only for a parent study.
#' @import RCurl
#'
#' @author Gregoire Versmee, Laura Versmee
#' @export
n.tables <- function(phs) {
phs <- phs.version(phs)
url<- paste0("ftp://anonymous:anonymous@ftp.ncbi.nlm.nih.gov/dbgap/studies/", unlist(strsplit(phs, "\\."))[1], "/", phs, "/")
filenames <- RCurl::getURL(url, ftp.use.epsv = FALSE, dirlistonly = TRUE, crlf = TRUE)
filenames <- paste(url, strsplit(filenames, "\r*\n")[[1]], sep = "")
phenodir <- filenames[grepl("pheno", filenames)]
filelist <- RCurl::getURL(paste0(phenodir, "/"), ftp.use.epsv = FALSE, dirlistonly = TRUE, crlf = TRUE)
filelist <- paste(phenodir, "/", strsplit(filelist, "\r*\n")[[1]], sep = "")
return(length(filelist[(grepl(".data_dict.xml", filelist)) & (!grepl("Sample_Attributes.data_dict.xml", filelist)) &
(!grepl("Subject.data_dict.xml", filelist)) & (!grepl("Sample.data_dict.xml", filelist)) &
(!grepl("Pedigree.data_dict.xml", filelist))]))
}
|
e99ece860e3b36f4d74cb0c36e8028f743fc747e
|
2cf5744042a9802bc019c0557848db8fbfda0d39
|
/inst/sfMM-plotCv_fMM.Rd
|
f9ca19fb9fee7fd8237ffaa91f36c07d13f6927b
|
[] |
no_license
|
cran/MRIaggr
|
bcc874f1253ab7b168e4a6d68bc66e8556b7d330
|
099c3227ac60fdad71aa5c1b79bf53b91a92e177
|
refs/heads/master
| 2021-01-21T21:47:16.132229
| 2015-12-23T23:44:19
| 2015-12-23T23:44:19
| 31,946,742
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,327
|
rd
|
sfMM-plotCv_fMM.Rd
|
\name{plotCv_fMM}
\title{Graphical display of the convergence criteria}
\alias{plotCv_fMM}
\description{
Graphical display of the convergence criteria using the result of \code{\link{fMMalgo}} algorithm.
}
\usage{
plotCv_fMM(res.EM, window=FALSE,
filename="EM.traceLv", width=480, height=480, path=NULL, unit="px", res=NA)
}
\arguments{
\item{res.EM}{The result of the \code{\link{fMMalgo}} or the \code{\link{launcher_fMM}} function.}
\item{window}{Display the graphics in the current graphics device (\code{F}), in a new device (\code{T}), or save it in a png/eps/svg file (\code{"png"} / \code{"esp"} / \code{"png"}). \code{NULL} indicates no display.}
\item{filename}{Name of the file (without the extension) where the plot should be saved. \code{character}. Ignored if window is not png/eps/svg.}
\item{width}{the width of the device. \code{numeric}. Ignored if window is not png/eps/svg.}
\item{height}{the width of the device. \code{numeric}. Ignored if window is not png/eps/svg.}
\item{path}{the directory where the plot file will be created. \emph{character}. Default is \code{NULL} corresponding to the current working directory.}
\item{unit}{the units in which \code{height} and \code{width} are given. Can be \code{"px"}, \code{"in"}, \code{"cm"} or \code{"mm"}. Default is \code{"px"}.}
\item{res}{the nominal resolution in ppi which will be recorded in the bitmap file, if a positive integer. \emph{numeric}. Default is \code{NA}.}
}
\value{
A \code{list} containing :
\item{diff_lv}{a \code{vector} indicating the evolution of the likelihood between two consecutive steps.}
\item{test.croissance}{a \code{logical} testing the monotonicity of the likelihood across steps.}
\item{diff_lv_completee}{a \code{vector} indicating the evolution of the completed likelihood between two consecutive steps.}
\item{test.croissance_completee}{a \code{logical} testing the monotonicity of the completed likelihood across steps.}
}
\examples{
data(Simulation_data,package="fMMseg")
res_EMbase <- launcher_fMM(G=3,data=Simulation_data,
var_reg="Y",family=gaussian(link="identity"),
epsilon=5*10^{-3})
plotCv_fMM(res_EMbase)
}
|
4cb30483609f0e75a1e338875c1b2dec18276335
|
75254b62ce23b3f4c4be7b5c3b8a2a9de0a9f7ce
|
/app/server.R
|
d1f47f48f5ef5bb6ef1cc6ea10aa163f2ade0342
|
[] |
no_license
|
NlIceD/asa-shiny-app
|
f4589591cd440f55a583bafc9b480ea690ccbec4
|
d3d78f74fc83908ce54b25a2316013e2500a88f9
|
refs/heads/master
| 2022-11-07T01:57:41.246957
| 2020-06-30T02:14:53
| 2020-06-30T02:14:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,352
|
r
|
server.R
|
server <- function(input, output, session) {
# -----------------------------------------------
# GLOBAL SETTINGS -------------------------------
# -----------------------------------------------
# Source data -----------------------------------
source("../app/utils/retrieve_data.R")
source("../app/utils/reactive_values.R")
# Hide loading page -----------------------------
hide(id = "loader_page", anim = TRUE, animType = "fade", time = 2)
# sendSweetAlert(
# session,
# title = "We've made some changes!",
# text = "We gave things a fresh coat of paint, and we have a lot of exciting new updates in store. Plus, we've included our new metric, goals added (g+), in this latest update.
#
# Start by selecting a set of tables from the sidebar on the left, and then use the settings icon in the top-right corner to tailor your results.",
# type = "info"
# )
# Set default reactive values -------------------
# players_reactive_values <- reactiveValues(profile_player_name = START_PLAYER,
# profile_player_season = max(all_players_seasons$season_name[all_players_seasons$player_id == START_PLAYER]))
# Body settings ---------------------------------
body_reactive <- eventReactive(input$asa_sidebar, {
# if (input$asa_sidebar == "profile_player") {
# profile_player
# }
if (grepl("^tables", input$asa_sidebar)) {
tables_div
} else if (input$asa_sidebar == "home") {
home_div
}
})
output$asa_body <- renderUI({
body_reactive()
})
# Controlbar settings ---------------------------
controlbar_listener <- reactive({
list(input$asa_sidebar, input$tables_subheader)
})
controlbar_reactive <- eventReactive(controlbar_listener(), {
if (input$asa_sidebar == "profile_player") {
controlbar_profile_player(players_dropdown, players_reactive_values, all_players_seasons)
} else if (grepl("^tables", input$asa_sidebar)) {
controlbar_tables(input$asa_sidebar, input$tables_subheader, tables_rv)
}
})
output$asa_controlbar <- renderUI({
controlbar_reactive()
})
# Footer settings -------------------------------
observeEvent(input$client_timezone, {
output$asa_footer <- renderUI({
footer_reactive(recent_games, input$client_timezone)
})
})
# -----------------------------------------------
# TABLES ----------------------------------------
# -----------------------------------------------
# Change reactive values upon refresh -----------
table_refreshes <- c()
for (x in names(tables_menu_items)) {
for (y in tables_menu_items[[x]][["subheaders"]]) {
table_refreshes <- c(table_refreshes, paste(x, tolower(y), "refresh", sep = "_"))
}
}
lapply(table_refreshes, function(x) {
observeEvent(input[[x]], {
shinyjs::disable(x)
matching_inputs <- names(input)
matching_inputs <- matching_inputs[grepl(gsub("_refresh$", "", x), matching_inputs) & !grepl("refresh", matching_inputs)]
rv_key <- gsub("(^tables_|_refresh$)", "", x)
execute_api_call <- TRUE
if (any(grepl("date_type", matching_inputs))) {
if (input[[gsub("_refresh$", "_date_type", x)]] == "Date Range") {
if (sum(is.na(c(input[[gsub("_refresh$", "_date_range", x)]][1], input[[gsub("_refresh$", "_date_range", x)]][2]))) == 1) {
sendSweetAlert(
session,
title = "Error: Date Filter",
text = "If filtering by date range, both a start and end date must be included.",
type = "error"
)
shinyjs::enable(x)
execute_api_call <- FALSE
} else if (sum(is.na(c(input[[gsub("_refresh$", "_date_range", x)]][1], input[[gsub("_refresh$", "_date_range", x)]][2]))) == 0 &
input[[gsub("_refresh$", "_date_range", x)]][2] < input[[gsub("_refresh$", "_date_range", x)]][1]) {
sendSweetAlert(
session,
title = "Error: Date Filter",
text = "If filtering by date range, the end date must be greater than or equal to the start date.",
type = "error"
)
shinyjs::enable(x)
execute_api_call <- FALSE
}
}
}
if (grepl("salaries_teams", x)) {
if (sum(c(input[[gsub("_refresh$", "_split_by_teams", x)]], input[[gsub("_refresh$", "_split_by_seasons", x)]], input[[gsub("_refresh$", "_split_by_positions", x)]])) == 0) {
sendSweetAlert(
session,
title = "Error: Grouping Results",
text = "Results must be grouped by at least one of teams, positions, or seasons.",
type = "error"
)
shinyjs::enable(x)
execute_api_call <- FALSE
}
}
if (execute_api_call) {
lapply(matching_inputs, function(y) {
if (grepl("date_range", y)) {
tables_rv[[rv_key]][["start_date"]] <- input[[y]][1]
tables_rv[[rv_key]][["end_date"]] <- input[[y]][2]
} else {
rv_secondary_key <- gsub(paste0("tables_", rv_key, "_"), "", y)
tables_rv[[rv_key]][[rv_secondary_key]] <- input[[y]]
}
})
subheader <- gsub("^_", "", stri_extract_last_regex(rv_key, "_[a-z]+$"))
header <- stri_replace_last_regex(rv_key, "_[a-z]+$", "")
tables_rv[[rv_key]][["data_frame"]] <- tables_rv_to_df(header, subheader)
shinyjs::enable(x)
}
})
})
# Tables header ---------------------------------
tables_header_reactive <- reactive({
tables_header(input$asa_sidebar)
})
output$tables_header <- renderUI({
tables_header_reactive()
})
# Tables subheader ------------------------------
tables_subheader_reactive <- reactive({
if (grepl("^tables", input$asa_sidebar)) {
tables_subheader(input$asa_sidebar)
}
})
output$tables_subheader <- renderUI({
tables_subheader_reactive()
})
# Tables body -----------------------------------
tables_body_reactive <- reactive({
tables_body(input$asa_sidebar, input$tables_subheader, input$client_timezone)
})
output$tables_body <- renderUI({
tables_body_reactive()
})
# xGoals body -----------------------------------
tables_xgoals_ui_reactive <- reactive({
tables_xgoals_ui()
})
output$tables_xgoals_ui <- renderUI({
tables_xgoals_ui_reactive()
})
# -----------------------------------------------
# PROFILES: PLAYERS -----------------------------
# -----------------------------------------------
# # Add headshots to controlbar -------------------
# observeEvent(input$asa_sidebar, {
# updateSelectizeInput(session,
# "profile_player_name",
# server = TRUE,
# choices = players_dropdown,
# selected = players_reactive_values$profile_player_name,
# options = list(render = I(
# "{
# option: function(item, escape) {
# return '<div><div class = \"players_dropdown_wrapper\"><img class=\"players_dropdown_img\"' +
# 'src=\"' + item.url + '\" /></div><div class = \"players_dropdown_txt\">' +
# item.label + '</div></div>';
# }
# }"
# )))
# })
#
# # Update season on controlbar -------------------
# observeEvent(input$profile_player_name, {
# if (input$profile_player_name == players_reactive_values$profile_player_name) {
# updateSelectizeInput(session,
# "profile_player_season",
# server = TRUE,
# choices = all_players_seasons$season_name[all_players_seasons$player_id == input$profile_player_name],
# selected = players_reactive_values$profile_player_season)
# } else if (input$profile_player_name != "") {
# updateSelectizeInput(session,
# "profile_player_season",
# server = TRUE,
# choices = all_players_seasons$season_name[all_players_seasons$player_id == input$profile_player_name],
# selected = max(all_players_seasons$season_name[all_players_seasons$player_id == input$profile_player_name]))
# } else {
# updateSelectizeInput(session,
# "profile_player_season",
# server = TRUE,
# choices = NULL,
# selected = NULL)
# }
# })
#
# # Change reactive values upon refresh -----------
# observeEvent(input$profile_player_refresh, {
# players_reactive_values$profile_player_name <- input$profile_player_name
# players_reactive_values$profile_player_season <- input$profile_player_season
# })
#
# # Profile header --------------------------------
# player_profile_basic_info_reactive <- reactive({
# header_profile_player(players_reactive_values, all_players)
# })
#
# output$player_profile_basic_info <- renderUI({
# player_profile_basic_info_reactive()
# })
#
# # Violin plots ----------------------------------
# player_profile_violin_plots_reactive <- reactive({
# violin_plots_profile_player(all_players_stats, players_reactive_values)
# })
#
# output$player_profile_violin_plots <- renderUI({
# player_profile_violin_plots_reactive()
# })
#
# # Touch heatmap ---------------------------------
# player_profile_touch_heatmap_ggplot_reactive <- reactive({
# touch_heatmap_ggplot_profile_player(players_reactive_values)
# })
#
# output$player_profile_touch_heatmap_ggplot <- renderPlot({
# player_profile_touch_heatmap_ggplot_reactive()
# })
#
# player_profile_touch_heatmap_reactive <- reactive({
# touch_heatmap_profile_player()
# })
#
# output$player_profile_touch_heatmap <- renderUI({
# player_profile_touch_heatmap_reactive()
# })
}
|
cf7f72d33d3da6ecf7673c750ba2a4ce422d80c7
|
efeba9f5aff2e7afbf96a57e0baf62a8fb1a3b94
|
/Part2/WordCloud_Practice/test.R
|
a26840732c0f30a40029d7f86f85668468f0257c
|
[] |
no_license
|
psm9619/R_Data_Analysis
|
b1db04295607b5b0811eb2151ce5378a812b2aa3
|
b6b8186a582174533ab41a68aeab77bdcf0ea854
|
refs/heads/master
| 2020-05-29T13:27:26.350660
| 2019-10-10T01:07:53
| 2019-10-10T01:07:53
| 189,161,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 520
|
r
|
test.R
|
library(dplyr)
library(KoNLP)
library(wordcloud)
library(stringr)
library(RColorBrewer)
txt <- readLines("test.txt")
test <- sapply(txt, extractNoun, USE.NAMES = F)
test <- unlist(test) ;
# for some reason, the below order does not remove non-alphabet/한글 data compeletely. After this step, dataset still contains some numbers
test
test <- str_replace_all(test, "[^:alpha:]]","") ; test
t_gsub <- readLines("응답소gsub.txt")
t_gsub
for (i in 1:length(t_gsub)) {
test2 <- gsub(t_gsub[i], "", test)
}
test2
|
b4fecbdf596499f76829e7b975a3b153c8e0f94f
|
3be358db53d6093e48c22e77a19abc1a9ca369c5
|
/partitionnementSpectral.R
|
fbf9c58af03b03028d142e3bee92a39b7b4cb689
|
[] |
no_license
|
lucasRemera/Clustering
|
f6cf925505fc5537498029d3f84fa157e2495a0f
|
7187a2ca949ffa3cd13e41123adce65111191df7
|
refs/heads/master
| 2020-03-09T17:41:06.009902
| 2018-08-21T08:44:20
| 2018-08-21T08:44:20
| 128,913,950
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
partitionnementSpectral.R
|
##détails sur le wiki
#construit une matrice symétrique de similarité, basée sur la p-value du test de Fisher
#library(deldir)
getGNeighbourVoronoiMatrix=function(x,y,cas,tem,fisher=TRUE){
m=matrix(0,ncol=length(x),nrow=length(x))
ddir=deldir(x,y)
#print(ddir)
v1=ddir$dirsgs$ind1
v2=ddir$dirsgs$ind2
for(i in 1:nrow(ddir$dirsgs)){
if(fisher) pp=fisher.test(matrix(c(sum(tem[v1[i]]),sum(cas[v1[i]]),sum(tem[v2[i]]),sum(cas[v2[i]])),ncol=2))$p.value #case and controls of adjacents vertices could be from the same distribution?
else pp=1-abs(log((cas[v1[i]]/(cas[v1[i]]+tem[v1[i]]))/(cas[v2[i]]/(cas[v2[i]]+tem[v2[i]]))))
#else pp=1-abs((cas[v1[i]]/(cas[v1[i]]+tem[v1[i]]))-(cas[v2[i]]/(cas[v2[i]]+tem[v2[i]])))
m[v1[i],v2[i]]=pp
m[v2[i],v1[i]]=pp
}
if(!fisher) m=m-min(m)
return(m)
}
#effectue le partitionnement spectral
spectralPartitionnement=function(m,k=2,norme=FALSE,scaled=FALSE){
N=dim(m)[1]
Di=diag(apply(m,1,sum)) #matrice des degrés
L=Di-m #laplacienne
if(norme) L=solve(Di)%*%L
#Di=diag(1/sqrt(apply(m,1,sum)))
#L=(Di)%*%m%*%Di
#vp=eigen(L)$values
VP=eigen(L)$vectors #Vecteurs propres en colonne
VP2=VP[,(N-k+1):N] #on récupère les k derniers VP
if(scaled) VP2=scale(VP2)
return(kmeans(VP2,k)$cluster)
}
###########
# exemple #
###########
x=runif(800)
y=runif(800)
inCluster=((x>.3)&(x<.6)&(y>.3)&(y<.6))
obs=rpois(800,500)
cas=rbinom(800,obs,0.2+as.numeric(inCluster)*0.15)
m=getGNeighbourVoronoiMatrix(x,y,cas,obs-cas)
cl=spectralPartitionnement(m)
plot(x,y,col=rainbow(2)[cl])#si tout marche bien, cluster au milieu
|
fa0a4f7f0e953e207efba55355dc2c3e86969eb3
|
1599eb68922f4f722b9920fd4f5b1d7be6b99947
|
/Assignments/Assignment 2/Machine learning/Weather Forecast/R/MLR.R
|
a7dd19eb86044b1797a70f0d329cd2fd06a8bc3d
|
[] |
no_license
|
chrisatrotter/INF5870-Energy-Informatics
|
782ce3d47dc70652b05cca9908c84d1bbc38f6f8
|
a8e1c69f263686c5516029b98a8842678c57d35c
|
refs/heads/master
| 2021-09-14T20:44:02.019179
| 2018-05-18T21:48:02
| 2018-05-18T21:48:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,974
|
r
|
MLR.R
|
# Installation of packages
packages <- c("caret")
if (length(setdiff(packages, rownames(installed.packages())))){
install.packages(setdiff(packages, rownames(installed.packages())))
}
# Loading of packages
library(caret)
# Read in data set with applliances
setwd(getwd())
directory <- './data/'
forecast <- 'predicted forecast/'
forecast_model <- c('ForecastTemplate1-kNN.csv', 'ForecastTemplate1-aNN.csv', 'ForecastTemplate1-LR.csv', 'ForecastTemplate1-SVR.csv', 'ForecastTemplate2.csv')
solution_data_file <- 'Solution.csv'
training_data_file <- 'TrainData.csv'
weather_forecast_input_file <- 'WeatherForecastInput.csv'
# Read in data set with applliances
training_data <- read.csv( paste(directory, training_data_file, sep=""))
solution_data <- read.csv( paste(directory, solution_data_file, sep=""))
weather_forecast_input <- read.csv(paste(directory, weather_forecast_input_file, sep=""))
# calculate the wind direction
WDIR <- (270-atan2(training_data$V10,training_data$U10)*180/pi)%% 360
# append column wdir (wind direction)
training_data <- cbind(training_data, WDIR)
set.seed(333)
# Training of the model (multi linear Regression)
model_mlr <- train(POWER ~ WS10 + WDIR, data = training_data, method = "lm")
model_mlr$coefnames
model_mlr$finalModel
WDIR <- (270-atan2(weather_forecast_input$V10,weather_forecast_input$U10)*180/pi)%% 360
# append column wdir (wind direction)
weather_forecast_input <- cbind(weather_forecast_input, WDIR)
# Predict new data by the trained model
prediction_mlr <- predict(model_lr, newdata = weather_forecast_input )
model_lr <- train(POWER ~ WS10, data = training_data, method = "lm")
# Predict new data by the trained model
prediction_lr <- predict(model_lr, newdata = weather_forecast_input)
# root mean square error function
rmse <- function(error){
sqrt(mean(error^2))
}
# calculate error
rmse(solution_data$POWER - prediction_mlr)
# Write results to file
write.table(data.frame(weather_forecast_input$TIMESTAMP, prediction_lr),
paste(directory, forecast, forecast_model[5], sep=""),
sep=",",
col.names= c("TIMESTAMP", "FORECAST"),
row.names = F)
# Prediction plot
plot_prediction <- function(){
prediction_plot <- data.frame(predictionslr = prediction_lr,
predictionsmlr = prediction_mlr,
powers = solution_data$POWER,
month = as.POSIXct(solution_data$TIMESTAMP, format = "%Y%m%d %H:%M", origin = "1970-01-01"))
ggplot(prediction_plot, aes(x = month)) +
geom_line(aes(y = powers), na.rm = TRUE, color = "red", size = 1, alpha=1) +
geom_line(aes(y = predictionslr), na.rm = TRUE, color = "green4", size = 1, alpha=1) +
geom_line(aes(y = predictionsmlr), na.rm = TRUE, color = "Black", size = 1, alpha=1) +
scale_x_datetime(date_breaks = "4 day", date_labels = "%b %d") +
xlab("November 2013") +
ylab("Power")
}
#plot_prediction()
|
33bf69d94abce328c25fda429521780021e1fcc0
|
3fb499152f285794975e3b296d44171f3a3f510c
|
/man/exist_package_useragent.Rd
|
986acde3d36a17b537ff4e6342e4d0e0d5901b2c
|
[
"MIT"
] |
permissive
|
ebbertd/existR
|
fed64de590e7a89dfe57854bef8033fe3172a4c2
|
e5d9bfb15fb21f0f269c5bc1ded748c6a8f04b54
|
refs/heads/master
| 2020-12-18T11:13:27.698242
| 2020-03-01T15:15:35
| 2020-03-01T15:15:35
| 235,359,399
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 469
|
rd
|
exist_package_useragent.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exist_package_useragent.R
\name{exist_package_useragent}
\alias{exist_package_useragent}
\title{existR user agent}
\usage{
exist_package_useragent()
}
\value{
A character string containing the user agent used in API calls generate with the existR package.
}
\description{
This function returns the user agent used in API calls of the existR package.
}
\examples{
exist_package_useragent()
}
|
c3314cfca7de2c19ed3bd18cd10f166fd340881f
|
187842c58b7690395eb7405842ac28bc4cafd718
|
/R/logLik.nlstac.R
|
ff502a466d8a032c5a27179b0b90f0d61d7cb6d4
|
[] |
no_license
|
cran/nlstac
|
d5e38b819795e2862e1b8c7e3e94d0a9af8fbc2f
|
298e5206c29e091929eb76091ba2cb67a22e8316
|
refs/heads/master
| 2023-04-13T16:09:33.706071
| 2023-04-11T14:20:02
| 2023-04-11T14:20:02
| 310,516,792
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,249
|
r
|
logLik.nlstac.R
|
#' @title Extract Log-Likelihood from a nlstac Model
#' @description Returns the log-likelihood value from an object returned by a nlstac model fit.
#'
#' @param object An object of class \code{"nlstac"} obtained by the \code{nls_tac} function.
#' @param ... Ignored, for compatibility issues.
#'
#'
#' @return A single numeric value for the log-likelihood of the model
#' @author
#' \strong{Mariano Rodríguez-Arias} (\email{arias@@unex.es}).
#' \emph{Deptartment of Mathematics}
#'
#' \strong{Juan Antonio Fernández Torvisco} (\email{jfernandck@@alumnos.unex.es}).
#' \emph{Department of Mathematics}
#'
#' University of Extremadura (Spain)
#'
#' \strong{Rafael Benítez} (\email{rafael.suarez@@uv.es}).
#' \emph{Department of Business Mathematics}
#'
#' University of Valencia (Spain)
#'
#' @method logLik nlstac
#' @export
#'
logLik.nlstac <- function(object, ...){
res <- as.numeric(object$resid[,1])
N <- length(res)
w <- rep_len(1, N)
zw <- w == 0
N <- sum(!zw)
val <- -N * (log(2 * pi) + 1 - log(N) - sum(log(w + zw))/N +
log(sum(res^2)))/2
attr(val, "df") <- 1L + length(coef(object))
attr(val, "nobs") <- attr(val, "nall") <- N
class(val) <- "logLik"
val
}
|
0f0d242e172e03ad1456f77e0b4295e4df6f64b0
|
3ce6ad4a9778e0b36472ffc1b969d86279089e1a
|
/general_utilities/userPrompt_IF.R
|
fc26c5af2dd264153dafe9a2e7561472a328a2ad
|
[] |
no_license
|
FlicAnderson/Scriptbox
|
25e7dc097b61fea18d384319cc77a838ea251ddf
|
147322a52cdd77e8fd43409de37dc696b0bae2aa
|
refs/heads/master
| 2020-05-21T20:06:46.132456
| 2019-07-02T13:41:51
| 2019-07-02T13:41:51
| 21,389,537
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
r
|
userPrompt_IF.R
|
# Padme Data:: Databasin:: userPrompt_IF.R
# ========================================================
# (2nd June 2014)
# standalone script
# AIM: require information from the user and carry out other functions dependent on result
# require information (example adds 2 numbers)
fun <- function(){
print("This function adds together number values of x and y that you give it...")
x <- readline("what is the value of x? ")
y <- readline("what is the value of y? ")
x <- as.numeric(unlist(strsplit(x, ",")))
y <- as.numeric(unlist(strsplit(y, ",")))
out1 <- x + y
return(out1)
}
## uncomment the below if you're running it interactive() == TRUE
## if interactive() == FALSE, then it will ask for the x and y inputs but won't return the result.
#if(interactive())fun()
# carry out functions dependent on result
# (UNFINISHED)
|
69676ba736727dca3d30a9e08695402d729325f3
|
0ab0221f9d99796cf31247e18085bc5e39fb37a7
|
/data-raw/1_organize_data.R
|
f9f4d7fccbc7386447c878de8e27e620790f585a
|
[] |
no_license
|
stamnosslin/alexis106
|
730dd81d739cec32b00c964fdf6a509b5d9a0d53
|
d38532a0acf0fe11bb00074e0657640672dbc684
|
refs/heads/master
| 2021-01-19T07:29:52.451839
| 2017-04-11T16:08:08
| 2017-04-11T16:08:08
| 87,546,417
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,806
|
r
|
1_organize_data.R
|
# ALEXIS 106 Psychoacoustic experiment with blind and sighted listeners
# This script reads data from the raw data file and calculates thresholds
# 'alexis106_data.txt'. This script imports background data and combines part of
# it with the threshold data. It also defines experimental groups.
#
# MN: 2016-12-30
# Load threshold data ----------------------------------------------------------
source('./data-raw/0_get_individual_thresholds.R')
#-------------------------------------------------------------------------------
# Import background data -------------------------------------------------------
bg <- read.table("./data-raw/alexis106_background_cleaned.txt",
header = TRUE, sep = ',', stringsAsFactors = FALSE)
# ------------------------------------------------------------------------------
# Fixing date variables --------------------------------------------------------
bg$birthdate <- as.Date(bg$birthdate, format = "%Y-%m-%d")
bg$testdate <- as.Date(bg$testdate, format = "%Y-%m-%d")
bg$age_y <- as.numeric(difftime(bg$testdate, bg$birthdate, units = 'days')) / 365 # Age [y]
# Make factor variables
bg$sex <- factor(bg$sex, levels = c(1, 2), labels = c('female', 'male'))
bg$echolocator <- factor(bg$echolocator, levels = c(1, 2, 3, 4),
labels = c('never', 'sometimes', 'often', 'always'))
# ------------------------------------------------------------------------------
# Define groups in mth and bg --------------------------------------------------
# Note: 1039 lack 3D-sight! + other visual problems, excluded from young group
# (below 1039 up in the to be excuded unmatch group)
young <- c(1002, 1003, 1004, 1006, 1007, 1008, 1009, 1011, 1012, 1014,
1015, 1017, 1018, 1019, 1021, 1022, 1023, 1024, 1026, 1027,
1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037,
1038, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048)
matched <- c(1010, 1013, 1016, 1020, 2001, 2002, 2003, 2050, 2051, 2052,
2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062,
2063, 2064, 2065, 2067, 2070, 2071)
blind <- c(6001, 6002, 6003, 6010, 6013, 6016, 6020, 6050, 6051, 6052,
6053, 6054, 6055, 6056, 6057, 6058, 6059, 6060, 6061, 6062,
6063, 6064, 6065, 6067, 6070, 6071)
unmatched <- setdiff(unique(mth$id), c(young, matched, blind))
# Add groups to mth (threshold data)
mth$group <- 1 * is.element(mth$id, young) + 2 * is.element(mth$id, matched) +
3 * is.element(mth$id, blind)
mth$group <- factor(mth$group, levels = c(0, 1, 2, 3),
labels = c('unmatched', 'young', 'matched', 'blind'))
# Add groups to bg (background data)
bg$group <- 1 * is.element(bg$id, young) + 2 * is.element(bg$id, matched) +
3 * is.element(bg$id, blind)
bg$group <- factor(bg$group, levels = c(0, 1, 2, 3),
labels = c('unmatched', 'young', 'matched', 'blind'))
# ------------------------------------------------------------------------------
# Create variable with matched-pair numbers in mth -----------------------------
mth$pairs <- as.character(mth$id)
mth$pairs <- substr(mth$pairs, 3, 4) # Keep two last digits only
mth$pairs <- as.numeric(mth$pairs)
mth$pairs[mth$group == 'young'] <- 0 # Set non-matched individuals to 0
table(mth$pairs) # Check that it worked
# ------------------------------------------------------------------------------
# Merge mth and bg into new data frame -----------------------------------------
bg$id2 <- bg$id # Add id-variable as a security check
# This merges mth and (part of) bg into a new data frame called d
d <- merge(mth, bg, by = 'id')
d$id - d$id2 # This is the check: Should be all zeros
d$id2 <- NULL # Delete d$id2 (not necessary)
d$group.y <- NULL # remove duplicate group variable
# remove birth and testdate
d$birthdate <- NULL
d$testdate <- NULL
# Rename two columns
dcols <- colnames(d)
dcols[dcols == "group.x"] <- "group" # Rename group.x to group
dcols[dcols == "age_y"] <- "age" # Rename age_y to age
colnames(d) <- dcols
# ------------------------------------------------------------------------------
# Exclude unmatched participants from data frame -------------------------------
d <- d[d$group != 'unmatched', ]
d$group <- droplevels(d$group) # Drop level 'unmatched' from factor
rownames(d) <- NULL # Drop rownames
# ------------------------------------------------------------------------------
# Remove objects from global environment ---------------------------------------
z <- ls()
z <- c(z[z != 'd'], 'z')
rm(list = z)
# ------------------------------------------------------------------------------
table(d$pairs)
|
540d31354a916dfdd82df52da4399a21fbab899c
|
f89c50f72976d4dea4e068bdfa3b61e2b4d1e5f1
|
/R_analyses/03_2019_data_angels_rental/code/LA City-from USC Socrata Data.R
|
0227b172fb2939696ab2475f5147f2716965e356
|
[
"MIT"
] |
permissive
|
ahla-brfoley/abundanthousingla
|
5e48ef959531b3347dec1964b62699055fb9239c
|
d4f6d7c44a8633a83251af80a156a07654b78609
|
refs/heads/master
| 2021-03-10T09:08:00.356810
| 2020-08-12T18:52:13
| 2020-08-12T18:52:13
| 246,441,489
| 0
| 0
|
MIT
| 2020-05-15T18:56:50
| 2020-03-11T00:56:14
|
Rebol
|
UTF-8
|
R
| false
| false
| 19,247
|
r
|
LA City-from USC Socrata Data.R
|
#LA City Data
library(tidycensus)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(viridisLite)
library(viridis)
library(tigris)
library(sp)
library(crosstalk)
library(leaflet)
library(leaflet.extras)
library(shiny)
library(shinyWidgets)
library(rsconnect)
setwd("/Users/alfonsoberumen/Desktop/City Files/SOCRATA")
###############
#spatial data
###############
tracts <- tracts(state = 'CA', county = "Los Angeles",cb=TRUE)
#######################
#income data-detailed
#######################
income_detailed<-read.csv("Income__LA_.csv",header=TRUE)
table(income_detailed$Year)#2010 to 2016
GEOID<-as.data.frame(table(income_detailed$GEOID))
income_detailed<-income_detailed %>%
mutate(Amount=as.numeric(gsub(",","",Amount)))
income_detailed_merged<-sp::merge(tracts,
income_detailed,
by.x="AFFGEOID",
by.y="GEOID",
duplicateGeoms = T)
#######################
#rent data-detailed
#######################
rent<-read.csv("Rent_Price__LA_.csv",header=TRUE)
table(rent$Year)#2010 to 2016
GEOID<-as.data.frame(table(rent$GEOID))
rent<-rent %>%
mutate(Amount=as.numeric(gsub(",","",Amount)))
rent_merged<-sp::merge(tracts,
rent,
by.x="AFFGEOID",
by.y="GEOID",
duplicateGeoms = T)
#######################
#fraction calculation
#######################
rent_lim<-rent %>%
select(c("GEOID","Year","Amount"))%>%
rename(Rent_Amount=Amount)
merged_income_rent<-income_detailed %>%
inner_join(rent_lim, by=c("GEOID","Year")) %>%
mutate(Proportion_Rent_Income=(Rent_Amount/(Amount/12))) %>%
mutate(Proportion_Rent_Income=ifelse(Proportion_Rent_Income>1,
1.0,Proportion_Rent_Income)) %>%
mutate(Proportion_Rent_Income=Proportion_Rent_Income*100)
merged_income_rent<-sp::merge(tracts,
merged_income_rent,
by.x="AFFGEOID",
by.y="GEOID",
duplicateGeoms = T)
#########################
#rent burdened population
#########################
burden<-read.csv("Rent_Burden__LA_.csv",header=TRUE)
table(burden$Year)#2010 to 2017
GEOID<-as.data.frame(table(burden$GEOID))
#burden<-burden %>%
#mutate(Amount=as.numeric(gsub(",","",Amount)))
burden_merged<-sp::merge(tracts,
burden,
by.x="AFFGEOID",
by.y="GEOID",
duplicateGeoms = T)
#########################
#Race/Ethinicity
#########################
race<-read.csv("Race___Ethnicity__LA_.csv",header=TRUE)
table(burden$Year)#2010 to 2016
GEOID<-as.data.frame(table(race$GEOID))
#check sum
race_check_sum <-race %>%
group_by(GEOID,Neighborhood,Tract,Tract.Number,Year)%>%
summarise(Percent = sum(Percent))
#take max to plot majority race
race_max<-race %>%
mutate(Count=as.numeric(Count)) %>%
group_by(GEOID,Neighborhood,Tract,Tract.Number,Year) %>%
slice(which.max(Percent))
#burden<-burden %>%
#mutate(Amount=as.numeric(gsub(",","",Amount)))
race_max_merged<-sp::merge(tracts,
race_max,
by.x="AFFGEOID",
by.y="GEOID",
duplicateGeoms = T)
#########################
#Employment
#########################
employ<-read.csv("Employment__LA_.csv",header=TRUE)
table(employ$Year)#2010 to 2017
GEOID<-as.data.frame(table(employ$GEOID))
table(employ$Variable)
#check sum
employ_check_sum <-employ %>%
group_by(GEOID,Neighborhood,Tract,Tract.Number,Year)%>%
summarise(Percent = sum(Percent))
#just keep unemployment rate
unemploy<-employ %>%
filter(Variable=="Unemployment Rate")
unemploy_merged<-sp::merge(tracts,
unemploy,
by.x="AFFGEOID",
by.y="GEOID",
duplicateGeoms = T)
#########################
#Subsidized Housing
#########################
sub<-read.csv("Subsidized_Housing__LA_.csv",header=TRUE)
table(sub$Year)#2012 to 2017
GEOID<-as.data.frame(table(sub$GEOID))
table(sub$Variable)
#sum total number of units
sub <-sub %>%
mutate(Count=as.numeric(Count))
sub_totalunits <-sub %>%
group_by(GEOID,Neighborhood,Tract,Tract.Number,Year)%>%
summarise(Count = sum(Count))
sub_totalunits_merged<-sp::merge(tracts,
sub_totalunits,
by.x="AFFGEOID",
by.y="GEOID",
duplicateGeoms = T)
#########################
#Overcrowding
#########################
over<-read.csv("Overcrowding__LA_.csv",header=TRUE)
table(over$Year)#2010 to 2017
GEOID<-as.data.frame(table(over$GEOID))
table(over$Variable)#Includes only number of overcrowded houses
over_merged<-sp::merge(tracts,
over,
by.x="AFFGEOID",
by.y="GEOID",
duplicateGeoms = T)
#########################
#Housing Stability
#########################
stab<-read.csv("Housing_Stability__LA_.csv",header=TRUE)
table(stab$Year)#2010 to 2017
GEOID<-as.data.frame(table(stab$GEOID))
table(stab$Variable)#Includes only number of stable HHs
stab_merged<-sp::merge(tracts,
stab,
by.x="AFFGEOID",
by.y="GEOID",
duplicateGeoms = T)
#####################################
#####################################
#create palattes
#####################################
#####################################
summary(rent_merged$Amount)
summary(income_detailed_merged$Amount)
summary(merged_income_rent$Proportion_Rent_Income)
summary(burden_merged$Percent)
summary(unemploy_merged$Percent)
summary(sub_totalunits_merged$Count)
summary(over_merged$Percent)
summary(stab_merged$Percent)
#rent palatte
pal_rent <-
colorNumeric(palette = "magma",
domain=c(0,5000))
#income palatte
pal_detail <-
colorNumeric(palette = "magma",
domain=c(0,300000))
#proportion palatte
pal_prop <-
colorNumeric(palette = "magma",
domain=c(0,100))
#burden palatte
pal_burden <-
colorNumeric(palette = "magma",
domain=c(0,100))
#race palatte
pal_race <-
colorFactor(palette = "magma",
domain=race_max_merged$Variable)
#unemploy palatte
pal_unemploy <-
colorNumeric(palette = "magma",
domain=c(0,100))
#sub palatte
pal_sub <-
colorQuantile(palette = "magma",
domain=sub_totalunits_merged$Count,
n=4)
#over palatte
pal_over <-
colorNumeric(palette = "magma",
domain=c(0,100))
#stab palatte
pal_stab <-
colorNumeric(palette = "magma",
domain=c(0,100))
#####################################
#####################################
#CREATE MAPS
#####################################
#####################################
ui<-shinyUI(fluidPage(titlePanel("Author: Alfonso Berumen,
City of Los Angeles: Data Angels"),
mainPanel(h1("American Community Survey, 5-year estimates"),
h2("Periods: 2006-10,
2007-11,
2008-12,
2009-13,
2010-14,
2011-15,
2012-16,
2013-17"),
h3("Census Tract"),
tabsetPanel(
tabPanel("Rent",leafletOutput("leafmap_rent")),
tabPanel("Income",leafletOutput("leafmap_income")),
tabPanel("Proportion: Rent/Income",
leafletOutput("leafmap_prop")),
tabPanel("Unemployment Rate",
leafletOutput("leafmap_unemploy")),
tabPanel("Proportion: Rent Burdened",
leafletOutput("leafmap_burden")),
tabPanel("Proportion: Stable HHs",
leafletOutput("leafmap_stab")),
tabPanel("Count: Total Subsidized Units",
leafletOutput("leafmap_sub")),
tabPanel("Proportion: Overcrowded",
leafletOutput("leafmap_over")),
tabPanel("Race: Majority",
leafletOutput("leafmap_race")))),
selectInput(inputId = "Year", "Year",
choices = list(2010,
2011,
2012,
2013,
2014,
2015,
2016,
2017)),
pickerInput("Neighborhood", "Choose a Neighborhood:",
choices=levels(unique((rent_merged$Neighborhood))),
options = list(`actions-box` = TRUE),
multiple = T)
))
server<-function(input, output) {
#rent
output$leafmap_rent = renderLeaflet({
rent_subset<-subset(rent_merged,
(rent_merged$Year == input$Year)
&
as.factor(rent_merged$Neighborhood) %in% input$Neighborhood)
leaflet(rent_subset,width = "100%") %>%
addPolygons(popup = paste("Neighborhood: ",rent_subset$Neighborhood,"<br>",
"Tract: ",rent_subset$Tract,"<br>",
"Median Rent ($): ",rent_subset$Amount),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal_rent(rent_subset$Amount),
data = rent_subset)%>%
addProviderTiles(provider = "CartoDB.Positron") %>%
addLegend("bottomleft",
pal = pal_rent,
values = ~ rent_subset$Amount,
title = "Median Rent",
opacity = 1)
})
#income
output$leafmap_income = renderLeaflet({
income_detailed_subset<-subset(income_detailed_merged,
(income_detailed_merged$Year == input$Year)
&
as.factor(income_detailed_merged$Neighborhood) %in% input$Neighborhood)
leaflet(income_detailed_subset,width = "100%") %>%
addPolygons(popup = paste("Neighborhood: ",income_detailed_subset$Neighborhood,"<br>",
"Tract: ",income_detailed_subset$Tract,
"Median Income ($): ",income_detailed_subset$Amount),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal_detail(Amount),
data = income_detailed_subset)%>%
addProviderTiles(provider = "CartoDB.Positron") %>%
addLegend("bottomleft",
pal = pal_detail,
values = ~ Amount,
title = "Median HH Income",
opacity = 1)
})
#prop
output$leafmap_prop = renderLeaflet({
merged_subset<-subset(merged_income_rent,
(merged_income_rent$Year == input$Year)
&
as.factor(merged_income_rent$Neighborhood) %in% input$Neighborhood)
leaflet(merged_subset,width = "100%") %>%
addPolygons(popup = paste("Neighborhood: ",merged_subset$Neighborhood,"<br>",
"Tract: ",merged_subset$Tract,"<br>",
"Proportion (%): ",merged_subset$Proportion_Rent_Income),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal_prop(Proportion_Rent_Income),
data = merged_subset)%>%
addProviderTiles(provider = "CartoDB.Positron") %>%
addLegend("bottomleft",
pal = pal_prop,
values = ~ Proportion_Rent_Income,
title = "Proportion (%) = Median Rent/Median HH Income",
opacity = 1)
})
#unemployment rate
output$leafmap_unemploy = renderLeaflet({
unemploy_subset<-subset(unemploy_merged,
(unemploy_merged$Year == input$Year)
&
as.factor(unemploy_merged$Neighborhood) %in% input$Neighborhood)
leaflet(unemploy_subset,width = "100%") %>%
addPolygons(popup = paste("Neighborhood: ",unemploy_subset$Neighborhood,"<br>",
"Tract: ",unemploy_subset$Tract,"<br>",
"Percent (%): ",unemploy_subset$Percent),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal_unemploy(Percent),
data = unemploy_subset)%>%
addProviderTiles(provider = "CartoDB.Positron") %>%
addLegend("bottomleft",
pal = pal_unemploy,
values = ~ Percent,
title = "Unemployment Rate",
opacity = 1)
})
#stable
output$leafmap_stab = renderLeaflet({
stab_subset<-subset(stab_merged,
(stab_merged$Year == input$Year)
&
as.factor(stab_merged$Neighborhood) %in% input$Neighborhood)
leaflet(stab_subset,width = "100%") %>%
addPolygons(popup = paste("Neighborhood: ",stab_subset$Neighborhood,"<br>",
"Tract: ",stab_subset$Tract,"<br>",
"Percent (%): ",stab_subset$Percent,"<br>",
"Stable HHs: ",stab_subset$Count,"<br>",
"Total HHs: ",stab_subset$Denominator,"<br>"),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal_stab(Percent),
data = stab_subset)%>%
addProviderTiles(provider = "CartoDB.Positron") %>%
addLegend("bottomleft",
pal = pal_stab,
values = ~ Percent,
title = "Percent of HHs Stable",
opacity = 1)
})
#burden
output$leafmap_burden = renderLeaflet({
burden_subset<-subset(burden_merged,
(burden_merged$Year == input$Year)
&
as.factor(burden_merged$Neighborhood) %in% input$Neighborhood)
leaflet(burden_subset,width = "100%") %>%
addPolygons(popup = paste("Neighborhood: ",burden_subset$Neighborhood,"<br>",
"Tract: ",burden_subset$Tract,"<br>",
"Percent (%): ",burden_subset$Percent,"<br>",
"Burdened HHs: ",burden_subset$Count,"<br>",
"Total HHs: ",burden_subset$Denominator,"<br>"),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal_burden(Percent),
data = burden_subset)%>%
addProviderTiles(provider = "CartoDB.Positron") %>%
addLegend("bottomleft",
pal = pal_burden,
values = ~ Percent,
title = "Percent of Renters Paying > 30% of Income",
opacity = 1)
})
#subsidized housing
output$leafmap_sub = renderLeaflet({
sub_subset<-subset(sub_totalunits_merged,
(sub_totalunits_merged$Year == input$Year)
&
as.factor(sub_totalunits_merged$Neighborhood) %in% input$Neighborhood)
leaflet(sub_subset,width = "100%") %>%
addPolygons(popup = paste("Neighborhood: ",sub_subset$Neighborhood,"<br>",
"Tract: ",sub_subset$Tract,"<br>",
"Units: ",sub_subset$Count),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal_sub(Count),
data = sub_subset)%>%
addProviderTiles(provider = "CartoDB.Positron") %>%
addLegend("bottomleft",
pal = pal_sub,
values = ~ Count,
title = "Total Units: Quartile",
opacity = 1)
})
#over
output$leafmap_over = renderLeaflet({
over_subset<-subset(over_merged,
(over_merged$Year == input$Year)
&
as.factor(over_merged$Neighborhood) %in% input$Neighborhood)
leaflet(over_subset,width = "100%") %>%
addPolygons(popup = paste("Neighborhood: ",over_subset$Neighborhood,"<br>",
"Tract: ",over_subset$Tract,"<br>",
"Percent (%): ",over_subset$Percent,"<br>",
"Overcrowded HHs: ",over_subset$Count,"<br>",
"Total HHs: ",over_subset$Denominator,"<br>"),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal_over(Percent),
data = over_subset)%>%
addProviderTiles(provider = "CartoDB.Positron") %>%
addLegend("bottomleft",
pal = pal_over,
values = ~ Percent,
title = "Percent of HHs Overcrowded",
opacity = 1)
})
#race
output$leafmap_race = renderLeaflet({
race_subset<-subset(race_max_merged,
(race_max_merged$Year == input$Year)
&
as.factor(race_max_merged$Neighborhood) %in% input$Neighborhood)
leaflet(race_subset,width = "100%") %>%
addPolygons(popup = paste("Neighborhood: ",race_subset$Neighborhood,"<br>",
"Tract: ",race_subset$Tract,"<br>",
"Percent (%): ",race_subset$Percent,"<br>",
"HHs: ",race_subset$Count),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal_race(Variable),
data = race_subset)%>%
addProviderTiles(provider = "CartoDB.Positron") %>%
addLegend("bottomleft",
pal = pal_race,
values = ~ Variable,
title = "Majority Race",
opacity = 1)
})
}
shinyApp(ui,server)
#check<-data.frame(table(rent_merged$Neighborhood))
#check2<-data.frame(table(income_detailed_merged$Neighborhood))
#check3<-data.frame(table(burden$Neighborhood))
|
5b3a5554e67486c2843ea51723c3e6ccb82710db
|
1ef15b94cd32444ac5640ecdd08d6667638aca8c
|
/R/plot_map4.R
|
e28524fb38dbba64b2f3e85d0cb72582ce896e98
|
[] |
no_license
|
martinabuck/rbeni
|
c27dc67391ba9d72dd75ddf1d567be3537ae662d
|
41165bbeb2cef826b92e4b2815e8ce70a123d440
|
refs/heads/master
| 2023-08-11T16:19:31.164750
| 2021-10-11T16:46:13
| 2021-10-11T16:46:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,364
|
r
|
plot_map4.R
|
#' Plot a nice map with ggplot.
#'
#' Returns a cowplot object for a global map plot.
#'
#' @param obj An object, either a \code{RasterBrick} (returned from a \code{raster::brick()} function call),
#' or a list returned from a \code{rbeni::read_nc_onefile()} function call.
#' @param nbin An integer specifying the number of bins used for the color key.
#' @param maxval A numeric value specifying the maximum value for which the color key is to be extended. Defaults
#' to \code{NA} (the 99\% quantile of values is used).
#' @param breaks A numeric vector specifying the breaks for the color scale. Defaults to \code{NA}, i.e. breaks
#' are determined automatically based on \code{nbin} and \code{maxval}.
#' @param lonmin Left edge (longitude, in degrees), defaults to -180.
#' @param lonmax Right edge (longitude, in degrees), defaults to 180.
#' @param latmin Lower edge (latitude, in degrees), defaults to -90.
#' @param latmax Upper edge (latitude, in degrees), defaults to 90.
#' @param plot_title A character string specifying the plot title
#' @param plot_subtitle A character string specifying the plot subtitle
#' @param legend_title A character string specifying the legend title (annotation above the color key)
#' @param legend_direction Either \code{"vertical"} (default) or \code{"horizontal"}.
#' @param colorscale Either function that returns a set of colors or a vector of color names from which to interpolate.
#' Defaults to \code{virids::viridis}.
#' @param do_reproj A boolean specifying whether to re-project the map to Robin projection
#' @param hillshade A logical specifying whether a hillshade layer should be added. Defaults to \code{FALSE}.
#' @param rivers A logical specifying whether to display rivers (the \code{ne_50m_rivers_lake_centerlines} layer from NaturalEarth.). Defaults to \code{FALSE}.
#' @param lakes A logical specifying whether to display rivers (the \code{ne_50m_lakes} layer from NaturalEarth). Defaults to \code{FALSE}.
#' @param coast A logical specifying whether to display coastlines (the \code{ne_50m_coastline} layer from NaturalEarth). Defaults to \code{TRUE}.
#' @param scale A character string specifying the scale of geo layers (coast, rivers, lakes). One of \code{"low", "medium", "high"}.
#' NaturalEarth layers for 110, 50, 10 m are used for low, medium, and high resolution (scale) layers, respectively. Defaults to \code{"low"}.
#' @param countries A logical specifying whether to display country borders (the \code{ne_50m_admin_0_countries} layer from NaturalEarth). Defaults to \code{FALSE}.
#' @param states A logical specifying whether to display sub-country administrative borders (e.g. US states) (the \code{ne_50m_admin_1_states_provinces} layer from NaturalEarth). Defaults to \code{FALSE}.
#' @param make_discrete A logical scpecifying whether data layer is to be made discrete for plotting with colors
#' of discrete bins. Defaults to \code{TRUE}.
#' @param combine A boolean specifying whether the map and the colorscale should be combined using cowplot.
#' Defaults to \code{TRUE}. If \code{FALSE}, a list of elements are retruned, where elements are the ggplot2 plot object
#' and the coloscale object returned by the call to \link{plot_discrete_cbar}.
#' @param varnam If \code{obj} is a rbeni-nc object (returned by \code{read_nc_onefile()}), \code{varnam} must be
#' provided (a character string specifying the variable name in \code{obj$vars[[varnam]]}).
#'
#' @return A ggplot object for a global map plot.
#' @export
#'
plot_map4 <- function(obj, maxval = NA, breaks = NA, lonmin = -180, lonmax = 180, latmin = -90, latmax = 90,
nbin = 10, legend_title = waiver(), legend_direction = "vertical",
colorscale = viridis::viridis, do_reproj = FALSE,
hillshade = FALSE, rivers = FALSE, lakes = FALSE, coast = TRUE, countries = FALSE,
states = FALSE, scale = "low", make_discrete = TRUE,
plot_title = waiver(), plot_subtitle = waiver(), combine = TRUE, varnam = NULL, ...){
library(rnaturalearth)
library(sp)
library(sf)
library(ggplot2)
library(rgdal)
library(raster)
## following https://downwithtime.wordpress.com/2013/12/04/naturalearthdata-and-r-in-ggplot2/
## read geo data
res <- ifelse(scale == "low", "110", ifelse(scale == "medium", "50", ifelse(scale == "high", "10", NA)))
if (!exists("raster_shade") && hillshade) raster_shade <- raster::stack(paste0("~/data/naturalearth/SR_50M/SR_50M.tif"))
if (!exists("layer_lakes") && lakes) layer_lakes <- readOGR(paste0("~/data/naturalearth/ne_", res, "m_lakes/ne_", res, "m_lakes.shp"), paste0("ne_", res, "m_lakes"))
if (!exists("layer_rivers") && rivers) layer_rivers <- readOGR(paste0("~/data/naturalearth/ne_", res, "m_rivers_lake_centerlines/ne_", res, "m_rivers_lake_centerlines.shp"), paste0("ne_", res, "m_rivers_lake_centerlines"))
if (!exists("layer_coast") && coast) layer_coast <- readOGR(paste0("~/data/naturalearth/ne_", res, "m_coastline/ne_", res, "m_coastline.shp"), paste0("ne_", res, "m_coastline"))
if (!exists("layer_country") && countries) layer_country <- readOGR(paste0("~/data/naturalearth/ne_", res, "m_countryline/ne_", res, "m_admin_0_countries.shp"), paste0("ne_", res, "m_admin_0_countries"))
if (!exists("layer_states") && states) layer_states <- readOGR(paste0("~/data/naturalearth/ne_", res, "m_admin_1_states_provinces/ne_", res, "m_admin_1_states_provinces.shp"), paste0("ne_", res, "m_admin_1_states_provinces"))
##---------------------------------------------
## interpret object
##---------------------------------------------
if (identical(class(obj), "character")){
## read as raster brick
rasta <- raster::brick(obj)
##---------------------------------------------
## re-project
##---------------------------------------------
# declare incoming CSR (should be done wayyyyyyy earlier than this)
if (do_reproj){
crs(rasta) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
rasta_reproj <- projectRaster(rasta, crs=CRS("+proj=robin"))
} else {
rasta_reproj <- rasta
}
##---------------------------------------------
## convert to data frame for ggplot
##---------------------------------------------
tstep <- 1
df <- as(rasta_reproj[[tstep]], "SpatialPixelsDataFrame")
df <- as.data.frame(df)
names(df) <- c("layer", "x", "y")
} else if (identical(class(obj), "matrix")){
## Complement info of matrix
if (length(dim(obj))==2){
if (dim(obj)[1] == 720 && dim(obj)[2]==360){
grid <- "halfdeg"
} else if (dim(obj)[1] == 360 && dim(obj)[2]==720){
obj <- t(obj)
grid <- "halfdeg"
} else if (dim(obj)[1] == 360 && dim(obj)[2]==180){
grid <- "1x1deg"
} else if (dim(obj)[1] == 180 && dim(obj)[2]==360){
obj <- t(obj)
grid <- "1x1deg"
}
## obj is a 2D matrix (grid)
if (grid=="halfdeg"){
lon <- seq(from = -179.75, to = 179.75, by = 0.5)
lat <- seq(from = -89.75, to = 89.75, by = 0.5)
} else if (grid=="1x1deg"){
lon <- seq(from = -179.75, to = 179.75, by = 1)
lat <- seq(from = -89.75, to = 89.75, by = 1)
} else {
rlang::warn("Dimensions not identified")
lon <- seq(dim(obj)[1])
lat <- seq(dim(obj)[2])
}
df <- grid_to_df(obj, grid = grid, dropna = FALSE) %>%
setNames(c("x", "y", "layer"))
} else {
rlang::abort("Aborted. Argument obj is a matrix but does not have two dimensions.")
}
} else if (identical(class(obj)[[1]], "RasterBrick")){
## convert into data frame with longitude (x) and latitude (y)
## convert object into data frame
df <- as(obj, "SpatialPixelsDataFrame")
df <- as.data.frame(df)
names(df) <- c("layer", "x", "y")
} else if (is.element("vars", ls(obj)) && is.element("lat", ls(obj)) && is.element("lon", ls(obj))){
## is a rbeni-nc element
if (is.null(varnam)) rlang::abort("Error: provide the variable name to be plotted as argument varnam.")
df <- nc_to_df(obj, varnam = varnam) %>%
dplyr::rename(x=lon, y=lat)
} else if (is.data.frame(obj)){
## is already a data frame. thanks.
df <- as_tibble(obj) %>%
dplyr::filter(lon > lonmin & lon < lonmax & lat > latmin & lat < latmax) %>%
rename(x=lon, y=lat)
# dplyr::select(x, y, !!varnam) %>%
# setNames(c("x", "y", "layer"))
}
## of more than one variable is available, make varnam a required argument
if (is.null(varnam)){
varnam <- names(df %>% dplyr::select(-x, -y))
if (length(varnam) > 1){
rlang::abort(paste("Aborting. Argument varnam not provided and more than one variable available. Which one to take?"))
}
}
## reduce and rename
df <- df %>%
dplyr::select(x, y, !!varnam) %>%
setNames(c("x", "y", "layer"))
##---------------------------------------------
## Bin data
##---------------------------------------------
toptriangle <- FALSE
bottomtriangle <- FALSE
if (identical(NA, breaks)){
breaks <- scales::pretty_breaks(n = nbin)(df$layer)
rlang::warn("Overwriting nbin after defining breaks with scales::pretty_breaks().")
nbin <- length(breaks) - 1
} else {
nbin <- length(breaks) - 1
}
breaks_with <- breaks
if (is.infinite(breaks[length(breaks)])){
toptriangle <- TRUE
breaks <- breaks[-(length(breaks)-1)]
}
if (is.infinite(breaks[1])){
bottomtriangle <- TRUE
breaks <- breaks[-2]
}
## update
nbin <- length(breaks) - 1
## add dummy rows to make sure values in layer span the entire range
df <- df %>%
bind_rows(
tibble(
x = NA,
y = NA,
layer = breaks[1:(length(breaks)-1)] + 0.5 * (breaks[2]-breaks[1])
)
)
## bin data
if (make_discrete){
df$layercut <- as.factor(base::cut(df$layer, breaks=breaks, labels = FALSE, include.lowest = TRUE))
} else {
df$layercut <- df$layer
}
# # remove symbols from category-strings to make them nicer in the legend
# df$layercut <- gsub("\\(|\\]", "", df$layercut)
# df$layercut <- gsub("\\,", " - ", df$layercut)
##---------------------------------------------
## crop
##---------------------------------------------
domain <- c(lonmin, lonmax, latmin, latmax)
if (lakes) lakes_crop <- mycrop(layer_lakes, domain)
if (rivers) river_crop <- mycrop(layer_rivers, domain)
if (coast) coast_crop <- mycrop(layer_coast, domain)
if (countries) countries_crop <- mycrop(layer_countries, domain)
if (states) states_crop <- mycrop(layer_states, domain)
if (hillshade) raster_shade_crop <- crop(raster_shade, y=extent(domain))
df <- df %>%
dplyr::filter(x > domain[1] & x < domain[2] & y > domain[3] & y < domain[4])
## convert the hillshade layer to a data frame
if (hillshade){
df_hs <- data.frame(
xyFromCell(raster_shade_crop, 1:ncell(raster_shade_crop)),
getValues(raster_shade_crop/255)) %>%
as_tibble()
}
##---------------------------------------------
## Create color scale
##---------------------------------------------
if (class(colorscale)=="function"){
colorscale <- colorscale(nbin, direction = -1)
} else if (class(colorscale)=="character"){
colorscale <- colorRampPalette( colorscale )( nbin )
} else if (class(colorscale)=="palette"){
## nothing to do in this case
#colorscale <- colorscale
} else {
rlang::abort("colorscale could not be set.")
}
if (toptriangle){
colorscale <- c(colorscale, colorscale[length(colorscale)])
}
if (bottomtriangle){
colorscale <- c(colorscale[1], colorscale)
}
##---------------------------------------------
## map theme
##---------------------------------------------
theme_map <- theme_grey() + # theme_minimal()
theme(
plot.title = element_text(hjust = 0, face="bold", size = 18),
legend.position = "right", # c(0.07, 0.35), #"left"
# legend.key.size = unit(c(5, 1), "mm"),
legend.title=element_text(size=12),
legend.text=element_text(size=10),
# axis.line = element_blank(),
# axis.text = element_blank(),
# axis.title = element_blank(),
# panel.grid.major = element_blank(),
panel.grid.minor = element_blank()
# plot.margin = unit( c(0, 0, 0, 5) , "mm")
)
# # define labels
# dloncoarse <- (lonmax - lonmin)/6
# dlonfine <- (lonmax - lonmin)/18
# dlatcoarse <- (latmax - latmin)/6
# dlatfine <- (latmax - latmin)/18
# lat.labels <- seq(latmin, latmax, dlatcoarse)
# lat.short <- seq(latmin, latmax, dlatfine)
# lon.labels <- seq(lonmin, lonmax, dloncoarse)
# lon.short <- seq(lonmin, lonmax, dlonfine)
#
# # a <- sapply( lat.labels, function(x) if (x>0) {bquote(.(x)*degree ~N)} else if (x==0) {bquote(.(x)*degree)} else {bquote(.(-x)*degree ~S)} )
# # b <- sapply( lon.labels, function(x) if (x>0) {bquote(.(x)*degree ~E)} else if (x==0) {bquote(.(x)*degree)} else {bquote(.(-x)*degree ~W)})
# a <- sapply(seq(-180, 180, by = 60), function(x) if (x>0) {parse(text = paste0(x, "*degree ~ E"))} else if (x==0) {parse(text = paste0(x, "*degree"))} else {parse(text = paste0(-x, "*degree ~ W"))} )
lat_breaks <- seq(-90, 90, by = 30)
lon_breaks <- seq(-180, 180, by = 60)
# lat_labels <- sapply( lat_breaks, function(x) if (x>0) {bquote(.(x)*degree ~N)} else if (x==0) {bquote(.(x)*degree)} else {bquote(.(-x)*degree ~S)} )
# lon_labels <- sapply( lon_breaks, function(x) if (x>0) {bquote(.(x)*degree ~E)} else if (x==0) {bquote(.(x)*degree)} else {bquote(.(-x)*degree ~W)} )
lat_labels <- sapply( lat_breaks, function(x) if (x>0) {parse(text = paste0(x, "*degree ~ N"))} else if (x==0) {parse(text = paste0(x, "*degree"))} else {parse(text = paste0(-x, "*degree ~ S"))} )
lon_labels <- sapply( lon_breaks, function(x) if (x>0) {parse(text = paste0(x, "*degree ~ E"))} else if (x==0) {parse(text = paste0(x, "*degree"))} else {parse(text = paste0(-x, "*degree ~ W"))} )
##---------------------------------------------
## Create ggplot object
##---------------------------------------------
ggmap <- ggplot() +
## main raster layer
geom_tile(data = df, aes(x = x, y = y, fill = layercut, color = layercut), show.legend = FALSE) +
# scale_x_continuous(expand=c(0,0)) +
# scale_y_continuous(expand=c(0,0)) +
scale_fill_manual(values = colorscale) +
scale_color_manual(values = colorscale) +
xlab('') + ylab('') +
coord_sf(expand = FALSE) +
theme_bw() +
theme(axis.ticks.y.right = element_line(),
axis.ticks.x.top = element_line(),
panel.grid = element_blank())
# theme(panel.background = element_rect(fill = "white", colour = "grey50"),
# axis.line.x.top = element_line(),
# axis.line.y.right = element_line())
# scale_x_continuous(labels = lon_labels, breaks = lon_breaks) +
# scale_y_continuous(labels = lat_labels, breaks = lat_breaks)
## add coast layer
if (coast){
ggmap <- ggmap +
geom_path(data = coast_crop, aes(x = long, y = lat, group = group), color = 'gray25', size = 0.1)
}
## add rivers layer
if (rivers){
ggmap <- ggmap +
geom_path(data = river_crop, aes(x = long, y = lat, group = group), color = 'dodgerblue', size = 0.2)
}
## add lakes layer
if (lakes){
ggmap <- ggmap +
geom_polygon(data = lakes_crop, aes(x = long, y = lat, group = group), fill = "#ADD8E6")
}
## add country layer
if (countries){
ggmap <- ggmap +
geom_path(data = countries_crop, aes(x = long, y = lat, group = group), color = 'gray25', size = 0.2)
}
## add states layer
if (states){
ggmap <- ggmap +
geom_path(data = states_crop, aes(x = long, y = lat, group = group), color = 'gray25', size = 0.1)
}
## add hillshade layer
if (hillshade){
ggmap <- ggmap +
geom_tile(data = df_hs, aes(x = x, y = y, alpha = SR_50M), show.legend = FALSE) +
scale_alpha(range=c(0.5, 0))
}
gglegend <- plot_discrete_cbar(
breaks = breaks_with, # Vector of breaks. If +-Inf are used, triangles will be added to the sides of the color bar
colors = colorscale,
legend_title = legend_title,
legend_direction = legend_direction,
...
)
if (combine){
if (legend_direction == "vertical"){
out <- cowplot::plot_grid(ggmap, gglegend, ncol = 2, rel_widths = c(1, 0.2))
} else {
out <- cowplot::plot_grid(ggmap, gglegend, ncol = 1, rel_heights = c(1, 0.2))
}
} else {
out <- list(ggmap = ggmap, gglegend = gglegend)
}
return(out)
}
## Copied from https://github.com/adrfantini/plot_discrete_cbar
plot_discrete_cbar = function(
breaks, # Vector of breaks. If +-Inf are used, triangles will be added to the sides of the color bar
palette = "Greys", # RColorBrewer palette to use
colors = RColorBrewer::brewer.pal(length(breaks) - 1, palette), # Alternatively, manually set colors
direction = 1, # Flip colors? Can be 1 or -1
spacing = "natural", # Spacing between labels. Can be "natural" or "constant"
border_color = NA, # NA = no border color
legend_title = NULL,
legend_direction = "horizontal", # Can be "horizontal" or "vertical"
font_size = 3.5,
expand_size = 0, # Controls spacing around legend plot
expand_size_y = 0.5,
spacing_scaling = 0.3, # Multiplicative factor for label and legend title spacing
width = 0.01, # Thickness of color bar
triangle_size = 0.05 # Relative width of +-Inf triangles
) {
require(ggplot2)
if (!(spacing %in% c("natural", "constant"))) stop("spacing must be either 'natural' or 'constant'")
if (!(direction %in% c(1, -1))) stop("direction must be either 1 or -1")
if (!(legend_direction %in% c("horizontal", "vertical"))) stop("legend_direction must be either 'horizontal' or 'vertical'")
breaks = as.numeric(breaks)
new_breaks = sort(unique(breaks))
if (any(new_breaks != breaks)) warning("Wrong order or duplicated breaks")
breaks = new_breaks
if (class(colors) == "function") colors = colors(length(breaks) - 1)
if (length(colors) != length(breaks) - 1) stop("Number of colors (", length(colors), ") must be equal to number of breaks (", length(breaks), ") minus 1")
if (!missing(colors)) warning("Ignoring RColorBrewer palette '", palette, "', since colors were passed manually")
if (direction == -1) colors = rev(colors)
inf_breaks = which(is.infinite(breaks))
if (length(inf_breaks) != 0) breaks = breaks[-inf_breaks]
plotcolors = colors
n_breaks = length(breaks)
labels = breaks
if (spacing == "constant") {
breaks = 1:n_breaks
}
r_breaks = range(breaks)
d_breaks = breaks[2] - breaks[1]
cbar_df = data.frame(stringsAsFactors = FALSE,
y = breaks,
yend = c(breaks[-1], NA),
color = as.character(1:n_breaks)
)[-n_breaks,]
xmin = 1 - width/2
xmax = 1 + width/2
cbar_plot = ggplot(
cbar_df,
aes(xmin=xmin, xmax = xmax, ymin = y, ymax = yend, fill = factor(color, levels = 1:length(colors)))
) +
geom_rect(show.legend = FALSE, color=border_color)
## Add arrows
if (any(inf_breaks == 1)) { # Add < arrow for -Inf
firstv = breaks[1]
polystart = data.frame(
x = c(xmin, xmax, 1),
y = c(rep(firstv, 2), firstv - diff(r_breaks) * triangle_size)
)
plotcolors = plotcolors[-1]
cbar_plot <- cbar_plot +
geom_polygon(data=polystart, aes(x=x, y=y),
show.legend = FALSE,
inherit.aes = FALSE,
fill = colors[1],
color=border_color)
}
if (any(inf_breaks > 1)) { # Add > arrow for +Inf
lastv = breaks[n_breaks]
polyend = data.frame(
x = c(xmin, xmax, 1),
y = c(rep(lastv, 2), lastv + diff(r_breaks) * triangle_size)
)
plotcolors = plotcolors[-length(plotcolors)]
cbar_plot <- cbar_plot +
geom_polygon(data=polyend, aes(x=x, y=y),
show.legend = FALSE,
inherit.aes = FALSE,
fill = colors[length(colors)],
color=border_color)
}
if (legend_direction == "horizontal") {
#horizontal legend
mul = 1
x = xmin
xend = xmax
cbar_plot <- cbar_plot + coord_flip()
angle = 0
legend_position = xmax + 0.1 * spacing_scaling
} else {
# vertical legend
mul = -1
x = xmax
xend = xmin
angle = -90
legend_position = xmin # xmax + 0.2 * spacing_scaling
}
ymid <- (breaks[length(breaks)] + breaks[1]) / 2
dy <- breaks[length(breaks)] - breaks[1]
ybottom_abs <- ymid - dy/2 * 1/expand_size_y
ytop_abs <- ymid + dy/2 * 1/expand_size_y
# Create color key
cbar_plot <- cbar_plot +
geom_segment(data = data.frame(y = breaks, yend = breaks),
aes(y=y, yend=yend),
x = x - 0.01 * mul * spacing_scaling, xend = x, #+ 0.01 * mul * spacing_scaling, # xend = xend,
inherit.aes = FALSE) +
annotate(geom = 'text', x = x - 0.02 * mul * spacing_scaling, y = breaks,
label = labels,
size = font_size,
hjust = 0) +
# scale_x_continuous(expand = c(expand_size,expand_size)) +
scale_fill_manual(values=plotcolors) +
theme_void() +
expand_limits(y = c(ybottom_abs, ytop_abs), x = c(xend, x - 0.1 * mul * spacing_scaling))
# Add legend title
if (!is.null(legend_title)) {
cbar_plot <- cbar_plot +
annotate(
geom = 'text',
x = legend_position,
# y = mean(r_breaks),
y = max(r_breaks) + d_breaks * 1.5,
label = legend_title,
# angle = angle,
angle = 0,
size = font_size,
fontface = 1,
hjust = 0
)
}
return(cbar_plot)
}
mycrop <- function(x, domain){
# domain should be a vector of four values: c(xmin, xmax, ymin, ymax)
x@data$id <- rownames(x@data)
fortify(x, region="id") %>%
as_tibble() %>%
dplyr::left_join(x@data, by = "id") %>%
dplyr::filter(long > domain[1] & long < domain[2] &
lat > domain[3] & lat < domain[4])
}
|
3a094af05684e3f51fd5cdbf9cb75c233e56df0d
|
293de535b0fe8cc4f9c145caebd91abc0686ddcc
|
/man/get_rollit_source.Rd
|
dd44bb2898f220b3f7cddc870fc8b850882f07a0
|
[] |
no_license
|
tyler-roberts/RcppRoll
|
dad4f3b2141681efd827d5f1d6120791d8593a24
|
d9eeb5c7f5ff09f470317f2143d687fcdc851bfb
|
refs/heads/master
| 2021-01-23T23:19:34.808769
| 2015-04-05T11:23:08
| 2015-04-05T11:23:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 729
|
rd
|
get_rollit_source.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/get_rollit_source.R
\name{get_rollit_source}
\alias{get_rollit_source}
\title{Get and Edit Source File Associated with User-Generated 'rollit' Function}
\usage{
get_rollit_source(fun, edit = TRUE, RStudio = FALSE, ...)
}
\arguments{
\item{fun}{The generated 'rollit' function.}
\item{edit}{boolean; open the C++ source file in a text editor?}
\item{RStudio}{boolean; open the C++ source file in RStudio?}
\item{...}{Optional arguments passed to \code{\link{file.edit}}.}
}
\description{
This function returns and opens the source file associated with
a particular 'rollit' function for debugging. We use \R's
\code{file.edit} interface.
}
|
d1e51e36a9037663045e0339ca34bb9452a0ccfe
|
c91d990800aacd643c49da7422b6d2c0502ab8c5
|
/R/raster.R
|
f3aa9d8a6574f925118824814b281051a5114d74
|
[] |
no_license
|
cran/quadmesh
|
2c04217d1fff117f722fe334a7a47c492f1a1e65
|
3bab1089f9a277837c78a0527f51ec3d6a36989a
|
refs/heads/master
| 2022-09-16T22:04:58.384139
| 2022-08-31T05:50:02
| 2022-08-31T05:50:02
| 60,974,821
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,165
|
r
|
raster.R
|
#' Quadmesh to raster
#'
#' Approximate re-creation of a raster from a quadmesh.
#'
#' The raster is populated with the mean of the values at each corner, which is
#' closest to the interpretation use to create mesh3d from rasters. This can be over ridden
#' by setting 'index' to 1, 2, 3, or 4.
#' @param x 'mesh3d' object
#' @param index optional index to specify which z coordinate to use as raster values
#'
#' @return RasterLayer
#' @export
#'
#' @examples
#' qm_as_raster(quadmesh(etopo))
qm_as_raster <- function(x, index = NULL) {
if (is.numeric(index) && (index < 1 || index > 4)) stop("index is out of range, set to NULL or 1, 2, 3, or 4")
if (!inherits(x, "mesh3d")) stop("only mesh3d supported")
if (is.null(index)) {
v <- .colMeans(matrix(x$vb[3, x$ib], nrow = 4), 4, ncol(x$ib))
} else {
v <- x$vb[3, x$ib[index, ]]
}
r_meta <- .raster_meta(x)
raster::setValues(do.call(raster::raster, r_meta),
v)
}
.raster_meta <- function(x) {
if ("raster_metadata" %in% names(x)) {
out <- x$raster_metadata
} else {
warning("original raster_metadata has been stripped or does not exist, \nif this mesh has been modified the raster may be nonsensical")
ucol <- unique(diff(which(diff(x$ib[1, ]) > 1)))
ncell <- ncol(x$ib)
if (length(ucol) > 1) stop("cannot automatically determine original raster dimensions")
urow <- ncell/ucol
if (abs(urow - as.integer(urow)) > sqrt(.Machine$double.eps)) {
warning("maybe cannot determine original raster dimension properly, has it been subset")
}
out <- list()
out$xmn <- min(x$vb[1, ], na.rm = TRUE)
out$xmx <- max(x$vb[1, ], na.rm = TRUE)
out$ymn <- min(x$vb[2, ], na.rm = TRUE)
out$ymx <- max(x$vb[2, ], na.rm = TRUE)
out$ncols <- ucol
out$nrows <- as.integer(urow)
out$crs <- NA_character_
## how to do this?
#stop("no raster metadata available")
#list(xmn = min(x$vb[1, ], na.rm = TRUE),
# xmx = max(x$vb[1, ], na.rm = TRUE),
# ymn = min(x$vb[2, ], na.rm = TRUE),
# ymx = max(x$vb[2, ], na.rm = TRUE),
# ncols = ?)
}
out
}
|
d2739f7b64c46ff4ec120937afb24b201422759f
|
b4307e64b13191cdb9261c92332c72ee3e126de9
|
/Image_dimension_from_image_url_Radhika.R
|
a9d0d43e1026ae30159b19a67eba19aa86f39dc3
|
[] |
no_license
|
MukeshGangwar333/R-Queries
|
5653e55cc6020c5a0932db664288af4dc617f85e
|
945025dc32dec0963441d7a7d401d70d18f3d545
|
refs/heads/master
| 2020-06-27T09:22:13.470908
| 2019-07-31T18:50:11
| 2019-07-31T18:50:11
| 199,911,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 879
|
r
|
Image_dimension_from_image_url_Radhika.R
|
library(RSelenium)
library(readxl)
library(stringr)
#dvr <- remoteDriver(browser=c("chrome"), extraCapabilities = eCaps )
driver <- rsDriver(browser=c("chrome"), chromever = "74.0.3729.6") #starting selenium server for chrome browser
remDr <- driver$client
file_select <- choose.files()
Image_File <- read_excel(file_select)
Image_File$Dimension <- ""
i=1
for (i in 1: nrow(Image_File )) {
a <- Image_File$`Image Url's`[i]
remDr$navigate(a)
image_title <- remDr$findElements( using = "xpath", "/html/head/title" )
image_title_d <- unlist(image_title[[1]]$getTitle())
image_title_d1 <- str_extract(image_title_d, " (.*?)$" )
image_title_d1 <- trimws(image_title_d1)
Image_File$Dimension[i] <- as.character(image_title_d1)
}
driver$client$closeall() #use to free port
driver$server$stop() #use to free port
|
a78ea1ea1fcd0489f5168e0e5308494fa2e44956
|
0bd7b839a46bb12f1c2319094842123e094cd921
|
/R/theme_redd.R
|
6b9231d6c95e4e076bc4c039687df152899f0cb9
|
[] |
no_license
|
cognack/kimchicentromadia
|
7dafecec5f2269c797c6de658f1bd753ce77c77c
|
8826edc439ffd3c210bb4ac14d25df5fa77f2cb9
|
refs/heads/main
| 2023-07-12T08:28:26.192090
| 2021-08-20T16:35:24
| 2021-08-20T16:35:24
| 395,761,805
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 389
|
r
|
theme_redd.R
|
#' theme_red
#'
#' creates an ugly red theme for use in ggplot that no one should actually use.
#'
#' @return
#' @export
#'
#' @examples
theme_redd <- function() {
theme(
plot.background = element_rect(fill = "cornsilk1"),
panel.background = element_rect(fill = "coral1"),
axis.title = element_text(color = "black"),
panel.grid.major = element_line(color = "red"))
}
|
07a6d8ebd447a0d290b67d5078f32513be6aa664
|
62e00b1a6c6c6c378108643274c437ad14df5df9
|
/run_analysis.R
|
86ef0b8b1ec276755b84720a2390a63e2b6462aa
|
[] |
no_license
|
hahendri/TidyDataCourseProject
|
1a4f14d9977a0bd4810137221311b165c3f768bb
|
5aefd3c249e2b90f76398cbe5120e4c1fd81d756
|
refs/heads/master
| 2020-03-10T09:31:28.915457
| 2018-04-13T00:36:59
| 2018-04-13T00:36:59
| 129,311,331
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,457
|
r
|
run_analysis.R
|
##Set your Working Directory
##Install dplyr library
library(dplyr)
##Create data directory and download the data
fileurl = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if (!file.exists("data")) {
dir.create("data")
}
download.file(fileurl, destfile = "./data/UCI HAR Dataset.zip", mode = "wb")
unzip("./data/UCI HAR Dataset.zip", exdir = "./data")
##Read all data files into tables
xtest_data <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
xtrain_data <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
ytest_data <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
ytrain_data <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
stest_data <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
strain_data <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
##rbind data frames and remove originals
x_data <- bind_rows(xtest_data, xtrain_data)
rm(xtest_data, xtrain_data)
y_data <- bind_rows(ytest_data, ytrain_data)
rm(ytest_data, ytrain_data)
s_data <- bind_rows(stest_data, strain_data)
rm(stest_data, strain_data)
##Get x_data variable names as character vector
x_features <- read.table("./data/UCI HAR Dataset/features.txt", stringsAsFactors = FALSE)
x_names <- x_features$V2
## Replace x_data colnames with x_names
names(x_data) <- x_names
##replace y_data colname with "activity"
names(y_data) <- c("activity_num")
##replace s_data colname with "subject"
names(s_data) <- c("subject")
##cbind all three data frames into one and remove individual data frames
df <- cbind(s_data, y_data, x_data)
rm(s_data, y_data, x_data)
##replace row values in "activity" column with activites and make column names lower case
activity <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
names(activity)[names(activity) == "V2"] <- "activity"
df <- merge(activity, df, by.x = "V1", by.y = "activity_num", all.x = TRUE)
df <- df[,2:564]
names(df) <- tolower(names(df))
##select only mean and standard deviation for measurements and write table
tidydata_1 <- select(df, 2, 1, contains("mean()"), contains ("std()"))
write.table(tidydata_1, "./tidydata_1.txt")
##Arrange by subject and activity and return mean for all variables
tidydata_2 <- tidydata_1 %>%
group_by(subject, activity) %>%
summarise_all(mean)
write.table(tidydata_2, "./tidydata_2.txt", row.name = FALSE)
|
dd5f49a162819206d60041cb82845c23cd17be33
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/move/tests/test.getDuplicatedTimestamps.R
|
c2c25f18e4320f4c593ad185dc456350213cca37
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,663
|
r
|
test.getDuplicatedTimestamps.R
|
context('getDuplicatedTimestamps')
test_that("basic functionality", {
expect_null(getDuplicatedTimestamps(system.file("extdata", "leroy.csv.gz", package = "move")))
expect_null(getDuplicatedTimestamps(df<-data.frame(individual.local.identifier=letters, timestamps=as.POSIXct(1:26, origin="1970-1-1", tz="UTC"), sensor.type=rep("gps",26))))
expect_equal(getDuplicatedTimestamps(df[c(1:3,2,4),]), structure(list("b|1970-01-01 00:00:02.0000|gps" = c(2L, 4L)), .Names = "b|1970-01-01 00:00:02.0000|gps"))
expect_null(getDuplicatedTimestamps(df<-data.frame(individual.local.identifier=letters, timestamps=as.POSIXct(rep(1,26), origin="1970-1-1", tz="EST"), sensor.type=rep("gps",26))))
df$individual.local.identifier[26]<-"a"
expect_equal(getDuplicatedTimestamps(df), structure(list("a|1969-12-31 19:00:01.0000|gps" = c(1L, 26L)), .Names = "a|1969-12-31 19:00:01.0000|gps"))
df2<-df
df2$individual.local.identifier<-as.character( df$individual.local.identifier)
expect_equal(
getDuplicatedTimestamps(df2$individual.local.identifier, df2$timestamps, df2$sensor.type),
getDuplicatedTimestamps(df))
})
test_that("basic functionality", {
v<-c('event.id,visible,timestamp,location.long,location.lat,comments,modelled,study.specific.measurement,tag.tech.spec,sensor.type,individual.taxon.canonical.name,tag.local.identifier,individual.local.identifier,study.name',
'53434,true,2011-12-15 00:21:57.000,1,2,sdfg,true,111,1,gps,T,7GPS,7,"a"',
'2345234,true,2011-12-15 01:22:57.000,3,4,dfg,true,111,1,gps,T,7GPS,7,"a"',
'12345320,true,2011-12-15 02:52:57.000,0,5,dfg,true,111,1,gps,T,7GPS,7,"a"'
,
'1234320,true,2011-12-15 01:52:57.000,0,5,dfg,true,111,1,gps,T,8GPS,8,"a"',
'1235320,false,2011-12-15 02:52:57.000,0,5,dfg,true,111,1,gps,T,8GPS,8,"a"',
'145320,true,2011-12-15 02:52:57.000,1,5,dfg,true,111,1,gps,T,8GPS,8,"a"'
)
ff <- textConnection(v)
class(ff) <- 'connection'
expect_equal(getDuplicatedTimestamps(ff, onlyVisible=F), structure(list("8|2011-12-15 02:52:57.0000|gps" = 5:6), .Names = "8|2011-12-15 02:52:57.0000|gps"))
ff <- textConnection(v)
class(ff) <- 'connection'
expect_null(getDuplicatedTimestamps(ff))
ff <- textConnection(v)
class(ff) <- 'connection'
expect_null(getDuplicatedTimestamps(ff, onlyVisible=T))
v<-c('event.id,visible,timestamp,location.long,location.lat,comments,modelled,study.specific.measurement,tag.tech.spec,sensor.type,individual.taxon.canonical.name,tag.local.identifier,individual.local.identifier,study.name',
'53434,true,2011-12-15 00:21:57.000,1,2,sdfg,true,111,1,gps,T,7GPS,7,"a"',
'2345234,true,2011-12-15 00:21:57.000,3,4,dfg,true,111,1,gps,T,7GPS,7,"a"',
'12345320,true,2011-12-15 02:52:57.000,0,5,dfg,true,111,1,gps,T,7GPS,7,"a"'
,
'1234320,true,2011-12-15 01:52:57.000,0,5,dfg,true,111,1,gps,T,8GPS,8,"a"',
'1235320,false,2011-12-15 02:52:57.000,0,5,dfg,true,111,1,gps,T,8GPS,8,"a"',
'145320,true,2011-12-15 02:52:57.000,1,5,dfg,true,111,1,gps,T,8GPS,8,"a"'
)
ff <- textConnection(v)
class(ff) <- 'connection'
expect_equal(getDuplicatedTimestamps(ff, onlyVisible=F), structure(list("7|2011-12-15 00:21:57.0000|gps" = 1:2,"8|2011-12-15 02:52:57.0000|gps" = 5:6), .Names = c("7|2011-12-15 00:21:57.0000|gps","8|2011-12-15 02:52:57.0000|gps")))
ff <- textConnection(v)
class(ff) <- 'connection'
expect_equal(getDuplicatedTimestamps(ff, onlyVisible=T), structure(list("7|2011-12-15 00:21:57.0000|gps" = 1:2), .Names = c("7|2011-12-15 00:21:57.0000|gps")))
})
|
6b54793409d0ab3d965fb8e0f9b5e734a00b7b2d
|
b8756cf7e224eed7291700aa98c4a4afe05381b3
|
/man/allWorld.Rd
|
e3ce0d15feba8e8856f18f82f830391c1b944695
|
[] |
no_license
|
jonasbhend/NRMgraphics
|
879e8cff100d1a570453cc294c504db2d4dc7a7c
|
503f11fe95d562d3ce5c157b0d377ded1a8c499c
|
refs/heads/master
| 2016-08-04T20:58:24.326426
| 2014-11-05T10:20:02
| 2014-11-05T10:20:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,111
|
rd
|
allWorld.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{allWorld}
\alias{allWorld}
\alias{allWorldKey}
\title{Giorgi regions overview plots}
\usage{
allWorld(fun, regnames = NULL, inset.bg = "#FFFFFFCC", ...)
allWorldKey(fun, ...)
}
\arguments{
\item{fun}{function to be executed at the respective spots for insets}
\item{regnames}{which regions should be plotted (see details)}
\item{inset.bg}{background colour for insets (with transparency)}
\item{...}{additional arguments passed to \code{fun}}
}
\description{
This function provides the backbone for the global overview plots.
In essence, it provides the map and the locations where to add insets.
Also \code{allWorldKey} offers the additional functionality of
placing a legend in the bottomleft corner.
}
\details{
The list for background colouring has to contain at least the object
\code{data}, a vector with values to use in colouring named with the region
abbreviations. In addition, \code{col} and \code{lev} can be added to control
the colouring. \code{title} will add a title and \code{units}
will add units to the legend.
}
\keyword{plot}
|
db5b79a06d28ea77490a8ed1cbbd327493b3b9d3
|
cf6cae1a30e3b1f600b2c0b7990207a25cadb619
|
/creating-git-ignore/creating-git-ignore.R
|
523759d4a66545fe984a8a894f55b64980ab5a99
|
[] |
no_license
|
ARPeters/Utilities
|
778f983d5ada00c643ba7ddc285d9232bf630cb9
|
675ce52bc9661fe90ff4c8b257a3cf794c0030ac
|
refs/heads/master
| 2023-02-10T19:09:37.978078
| 2023-02-02T17:53:19
| 2023-02-02T17:53:19
| 30,270,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,041
|
r
|
creating-git-ignore.R
|
# Following instructions for creating a gitignore file here:
# https://docs.ropensci.org/gitignore/
# library(devtools)
# devtools::install_github("ropensci/gitignore")
library(gitignore)
head(gi_available_templates(), 25)
length(gi_available_templates())
gi_fetch_templates("R")
git_ignore_text <- gi_fetch_templates("R")
gi_write_gitignore(git_ignore_text, gitignore_file = here::here(".gitignore"))
# Adding this line of text to gitigore file to identify data-unshared folder (without the quotes)
# Also wipes out any file types in the following list.
# "# ---- Protected Information -----------------------------------------------
# "/data-unshared/*"
# *.rdata
# *.Rdata
# *.RData
# *.RDATA
# *.RDS
# *.rds
# *.csv
# *.csv#
# *.mdb
# *.accdb
# *.sav
# *.sas7bdat
# *.xls
# *.xlsx"
# ---- testing
ds_test <- tibble::as_tibble(head(mtcars))
path_out_csv <- "./data-unshared/test_1.csv"
path_out_rds <- "./data-unshared/test_1.rds"
readr::write_csv(ds_test, path = path_out_csv)
readr::write_rds(ds_test, path = path_out_rds)
|
7db43b583ec2d6387f77853a152981bf21a50d92
|
58e919cebf3a3b23aae8b40ba48c3d68c0dfac0c
|
/code/data_analysis/R/qpcr_analysis.R
|
7ac8829b1a078feea2b512f9f96f9a951a346b03
|
[
"MIT"
] |
permissive
|
bdwilliamson/paramedic_supplementary
|
3a511aa6124a867dfefa284a7cc58f951be62ff6
|
52fcf88e4045596ad3675c246561af14d77496e4
|
refs/heads/master
| 2021-06-14T02:51:43.002324
| 2021-05-06T00:02:02
| 2021-05-06T00:02:02
| 192,587,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,762
|
r
|
qpcr_analysis.R
|
######################################################################################
##
## FILE: qpcr_analysis.R
##
## CREATED: 15 October 2018 by Brian Williamson
##
## PURPOSE: data analysis of the qPCR + br16s data from Hutch collaborators
## to showcase qPCR estimation method
##
## INPUTS: ../../data/p2_brngs_qPCR_merge_20180314.csv
##
## OUTPUTS:
######################################################################################
## -----------------------------------------------------------------------------------
## load required functions and libraries
## -----------------------------------------------------------------------------------
code_dir <- "code/R/"
data_dir <- "data/"
stan_dir <- "stan/"
library("methods")
library("StanHeaders")
library("rstan")
library("argparse")
library("Cairo")
remotes::install_github("statdivlab/paramedic", ref = "v0.0.3")
library("paramedic")
source(paste0(code_dir, "analyze_data/qpcr_analysis_helper_functions.R"))
source(paste0(code_dir, "naive_qpcr_estimator.R"))
source(paste0(code_dir, "analyze_data/get_most_abundant_taxa.R"))
parser <- ArgumentParser()
parser$add_argument("--estimator", default = "no_ve", help = "the estimator to calculate")
parser$add_argument("--use-precompiled", type = "integer", default = 1, help = "use precompiled stan code?")
parser$add_argument("--adjust", type = "integer", default = 0, help = "adjust for case/control status?")
parser$add_argument("--do-parallel", type = "integer", default = 1, help = "parallelize?")
parser$add_argument("--fold-num", type = "double", default = 1, help = "data fold to run on")
parser$add_argument("--num-folds", type = "double", default = 1, help = "number of data folds")
parser$add_argument("--n-chains", type = "double", default = 6, help = "number of chains")
parser$add_argument("--n-iter", type = "double", default = 10500, help = "number of iterations")
parser$add_argument("--n-burnin", type = "double", default = 10000, help = "number of burn-in")
parser$add_argument("--q", type = "double", default = 127, help = "number of taxa")
parser$add_argument("--sample-num", type = "double", default = 110, help = "sample sample-num women or not")
parser$add_argument("--q-obs", type = "double", default = 13, help = "number of observed qPCR taxa")
parser$add_argument("--leave-one-out", type = "double", default = 999, help = "index to leave out (99 means do not do leave-one-out)")
parser$add_argument("--div-num", type = "double", default = 1000, help = "number to rescale qPCR by")
parser$add_argument("--save-stan-model", type = "double", default = 0, help = "save stan model (1) or not (0)")
parser$add_argument("--max-treedepth", type = "double", default = 15, help = "max treedepth")
parser$add_argument("--adapt-delta", type = "double", default = 0.85, help = "adapt delta")
parser$add_argument("--alpha-sigma", type = "double", default = 2, help = "alpha_sigma: shape parameter for inverse gamma prior on e variance")
parser$add_argument("--kappa-sigma", type = "double", default = 1, help = "kappa_sigma: scale parameter for inverse gamma prior on e variance")
args <- parser$parse_args()
args$do_parallel <- as.logical(args$do_parallel)
args$save_stan_model <- as.logical(args$save_stan_model)
args$adjust <- as.logical(args$adjust)
args$use_precompiled <- as.logical(args$use_precompiled)
print(args)
## -----------------------------------------------------------------------------------
## load in the data, clean it up
## -----------------------------------------------------------------------------------
data("example_qPCR_data")
data("example_br16S_data")
## calculate the read numbers
br16_mat <- as.matrix(example_br16S_data[, 2:dim(example_br16S_data)[2]])
qPCR_mat <- as.matrix(example_qPCR_data[, 2:dim(example_qPCR_data)[2]])
m <- rowSums(br16_mat)
## -----------------------------------------------------------------------------------
## run the estimators
## -----------------------------------------------------------------------------------
## set q, q_obs
q <- args$q
q_obs <- args$q_obs
observed_taxa <- 1:args$q_obs
## if leave_one_out < 999, then do leave-one-out
if (args$leave_one_out < 999) {
q_obs <- args$q_obs - 1
observed_taxa <- (1:args$q_obs)[-args$leave_one_out]
}
cat("\n Observed taxa: \n")
print(observed_taxa)
## if q is smaller than the total number, select the most abundant taxa
if (q < dim(br16_mat)[2]) {
## get the order
ordered_by_abundance <- get_most_abundant_taxa(br16_mat, m)
## remove the ones corresponding to observed qPCR
taxa_to_estimate_init <- ordered_by_abundance[!(ordered_by_abundance %in% observed_taxa)]
## if leave-one-out, the taxon to estimate is the left-out one
if (args$leave_one_out < 999) {
taxa_to_estimate <- args$leave_one_out
} else {
taxa_to_estimate <- taxa_to_estimate_init
}
## select the most abundant taxa (always select the first 7, corresponding to qPCR)
## if q == q_obs, then only do observed taxa
if (q == q_obs) {
most_abundant_16S <- br16_mat[, observed_taxa]
} else {
most_abundant_16S <- br16_mat[, c(observed_taxa, taxa_to_estimate[1:(q - q_obs)])]
}
## rename br16_mat
br16_mat <- most_abundant_16S
## re-normalize
m <- rowSums(br16_mat)
}
cat("\n Taxa to estimate: \n")
print(taxa_to_estimate)
set.seed(4747)
## break up the sample into even chunks
# folds_init <- rep(seq_len(args$num_folds), length = dim(br16_mat)[1])
# folds <- sample(folds_init)
## sample women
samp <- sample(1:dim(br16_mat)[1], args$sample_num)
## set stan args
stan_seeds <- c(1, 2)
adapt_delta <- args$adapt_delta
max_treedepth <- args$max_treedepth
## set up the data list for stan; V needs to be all counts
stan_v <- qpcr_mat[, observed_taxa]
mode(stan_v) <- "integer"
if (args$use_precompiled) {
n <- nrow(br16_mat[, , drop = FALSE])
W <- cbind.data.frame(subj_id = (1:n)[samp], br16_mat[samp, , drop = FALSE])
V <- cbind.data.frame(subj_id = (1:n)[samp], stan_v[samp, , drop = FALSE])
names(W)[1:(length(observed_taxa) + 1)] <- names(V)[1:(length(observed_taxa) + 1)]
X <- cbind.data.frame(subj_id = (1:n)[samp], case_control = case_control[samp])
p <- 1
} else {
W <- br16_mat[samp, , drop = FALSE]
V <- stan_v[samp, , drop = FALSE]
X <- matrix(case_control, ncol = 1)[samp, , drop = FALSE]
p <- 1
}
if (args$estimator != "naive") {
stan_data_lst <- list(W = W, V = V,
N = length(samp), q = q, q_obs = q_obs,
sigma_beta = 1.62, sigma_Sigma = sqrt(50),
alpha_sigma = args$alpha_sigma, kappa_sigma = args$kappa_sigma)
} else {
stan_data_lst <- list(N = length(samp))
}
if (args$adjust) {
tmp <- c(stan_data_lst, list(X = X, p = 1))
stan_data_lst <- tmp
} else {
tmp <- c(stan_data_lst, list(X = V[, 1, drop = FALSE]))
stan_data_lst <- tmp
}
## set up parallel
if (args$do_parallel) {
options(mc.cores = parallel::detectCores())
}
## run the naive estimator for initial values
if (q == q_obs) {
if (args$leave_one_out < 999) {
naive <- cbind(stan_v[samp, ], apply(matrix(1:q)[-observed_taxa], 1, naive_estimator, br16_mat[samp, ], stan_v[samp, ], observed_taxa))
} else {
naive <- stan_v[samp, ]
}
} else {
naive <- cbind(stan_v[samp, ], apply(matrix((q_obs+1):q), 1, naive_estimator, br16_mat[samp, ], stan_v[samp, ], observed_taxa))
}
log_naive <- ifelse(is.infinite(log(naive)), 0, log(naive))
set.seed(4747)
eps <- 1e-1
error_mu_tilde <- t(replicate(args$sample_num, rnorm(args$q, 0, 10*eps)))
error_beta <- rnorm(args$q, 0, eps)
error_sigma <- rnorm(args$q, 0, eps)
naive_beta <- colMeans(log_naive, na.rm = TRUE) + error_beta
naive_sigma <- diag(var(log_naive, na.rm = TRUE)) + error_sigma
naive_sigma <- ifelse(naive_sigma <= 0, 1e-2, naive_sigma)
## numerator: sweep(log_naive, 2, naive_beta, FUN = "-"); add some random noise to everything, just in case
log_mu_tilde <- sweep(sweep(log_naive, 2, naive_beta, FUN = "-"), 2, naive_sigma, FUN = "/") + error_mu_tilde
# set up inits list
if (args$n_chains > 1) {
if (args$n_chains == 4) {
inits_list <- c(list(list(log_mu_tilde = log_mu_tilde),
list(beta = naive_beta),
list(Sigma = naive_sigma),
list(init = "random")))
} else {
inits_list <- c(list(list(log_mu_tilde = log_mu_tilde),
list(beta = naive_beta),
list(Sigma = naive_sigma)),
replicate(args$n_chains - 3, list(init = "random"), simplify = FALSE))
}
} else {
inits_list <- list(list(log_mu_tilde = log_mu_tilde, beta = naive_beta, Sigma = naive_sigma))
}
cat("\n Running estimator", args$estimator, "fold", args$fold_num, "\n")
if (args$estimator == "naive") {
if (q == q_obs) {
system.time(mod <- cbind(qpcr_mat[samp, observed_taxa], apply(matrix(1:q), 1, naive_estimator, br16_mat[samp, ], qpcr_mat[samp, ], observed_taxa)))
colnames(mod) <- br16_mat_nms[observed_taxa]
} else {
system.time(mod <- cbind(qpcr_mat[samp, observed_taxa], apply(matrix((q_obs+1):q), 1, naive_estimator, br16_mat[samp, ], qpcr_mat[samp, ], observed_taxa)))
colnames(mod) <- c(br16_mat_nms[observed_taxa], br16_mat_nms[taxa_to_estimate][1:(q - q_obs)])
}
samps <- NA
mod_summ <- mod
} else if (args$estimator == "no_ve") {
if (args$use_precompiled) {
system.time(mod <- paramedic::no_efficiency(W = stan_data_lst$W, V = stan_data_lst$V,
X = stan_data_lst$X, n_iter = args$n_iter,
n_burnin = args$n_burnin, n_chains = args$n_chains,
stan_seed = stan_seeds[1],
inits_lst = inits_list,
sigma_beta = stan_data_lst$sigma_beta, sigma_Sigma = stan_data_lst$sigma_Sigma,
control = list(adapt_delta = adapt_delta, max_treedepth = max_treedepth),
open_progress = FALSE, verbose = FALSE))
} else {
system.time(mod <- stan(file = paste0(stan_dir, "predict_qpcr_noncentered.stan"),
data = stan_data_lst,
iter = args$n_iter, warmup = args$n_burnin, chains = args$n_chains, seed = stan_seeds[1],
control = list(adapt_delta = adapt_delta, max_treedepth = max_treedepth),
verbose = FALSE, open_progress = FALSE,
pars = c("mu", "beta", "Sigma"),
init = inits_list))
}
mod_summ <- summary(mod, probs = c(0.025, 0.975))$summary
samps <- rstan::extract(mod)
} else if (args$estimator == "ve") {
if (args$use_precompiled) {
system.time(mod <- paramedic::run_paramedic(W = stan_data_lst$W, V = stan_data_lst$V,
X = stan_data_lst$X, n_iter = args$n_iter,
n_burnin = args$n_burnin, n_chains = args$n_chains,
stan_seed = stan_seeds[2],
inits_lst = inits_list,
sigma_beta = stan_data_lst$sigma_beta, sigma_Sigma = stan_data_lst$sigma_Sigma,
alpha_sigma = stan_data_lst$alpha_sigma, kappa_sigma = stan_data_lst$kappa_sigma,
control = list(adapt_delta = adapt_delta, max_treedepth = max_treedepth),
open_progress = FALSE, verbose = FALSE))
} else {
system.time(mod <- stan(file = paste0(stan_dir, "predict_qpcr_with_varying_efficiency_noncentered.stan"),
data = stan_data_lst,
iter = args$n_iter, warmup = args$n_burnin, chains = args$n_chains, seed = stan_seeds[2],
control = list(adapt_delta = adapt_delta, max_treedepth = max_treedepth),
verbose = FALSE, open_progress = FALSE,
pars = c("mu", "beta", "Sigma", "e", "sigma"),
init = inits_list))
}
mod_summ <- summary(mod, probs = c(0.025, 0.975))$summary
samps <- rstan::extract(mod)
} else {
stop("the estimator requested isn't currently implemented")
}
## save off stan model objects, naive estimators, data
if (args$save_stan_model) {
save_lst <- list(data = data_lst, mod = mod_summ, stan_data_lst = stan_data_lst, samps = samps, stan_out = mod)
} else {
save_lst <- list(data = data_lst, mod = mod_summ, stan_data_lst = stan_data_lst, samps = samps, stan_out = NA)
}
saveRDS(save_lst, paste0("qpcr_data_analysis_est_", args$estimator, "_q_", args$q, "_q_obs_", q_obs, "_sample_", args$sample_num, "_loo_", args$leave_one_out, ".rds"))
trace_plot_nms <- c("mu", "beta", "Sigma")
if (args$estimator == "ve") {
trace_plot_nms <- c(trace_plot_nms, "e")
}
fig_width <- fig_height <- 2590
cex <- 1.5
if (args$estimator == "naive") {
} else {
trace_plots_dir <- "trace_plots"
if (!dir.exists(trace_plots_dir)) {
dir.create(trace_plots_dir, recursive = TRUE)
}
for (n in 1:length(trace_plot_nms)) {
for (i in 1:args$q) {
logi <- grepl(trace_plot_nms[n], names(mod)) & !grepl("log", names(mod)) & grepl(i, names(mod))
if (trace_plot_nms[n] == "mu") {
logi <- grepl(trace_plot_nms[n], names(mod)) & !grepl("log", names(mod)) & grepl(paste0(",", i, "]"), names(mod), fixed = TRUE)
}
if (trace_plot_nms[n] == "e") {
logi <- grepl(trace_plot_nms[n], names(mod)) & !grepl("log", names(mod)) & !grepl("beta", names(mod)) & grepl(paste0("[", i, "]"), names(mod), fixed = TRUE)
}
if (sum(logi) > 10) {
for (j in 1:ceiling(sum(logi)/10)) {
CairoPNG(paste0(trace_plots_dir, "/", args$estimator, "_par_", trace_plot_nms[n], "_taxon_", i, "_slice_", j, "_q_", args$q, "_q_obs_", q_obs, "_sample_", as.numeric(args$sample), "_loo_", args$leave_one_out, ".png"), width = fig_width, height = fig_height, res = 300, units = "px")
plot(traceplot(mod, pars = names(mod)[logi][1:10 + (j-1)*10]), cex = cex)
dev.off()
}
} else {
CairoPNG(paste0(trace_plots_dir, "/", args$estimator, "_par_", trace_plot_nms[n], "_taxon_", i, "_q_", args$q, "_q_obs_", q_obs, "_sample_", as.numeric(args$sample), "_loo_", args$leave_one_out, ".png"), width = fig_width, height = fig_height, res = 300, units = "px")
plot(traceplot(mod, pars = names(mod)[logi]), cex = cex)
dev.off()
}
}
}
}
|
87a188e4b0ccb163a6f75407b93b573cdc62c0a7
|
b761234cdc3b07e81dbc05da5ec1f726650ee7bd
|
/R/officer-utils.R
|
f7aa23278067509a040457ea0dbb359b1965aba3
|
[
"MIT"
] |
permissive
|
elipousson/officerExtras
|
1d76ee389f2d649cf397199d00fb6894fd42eaa0
|
f491277b69e659bb65f65f258878516b2c997e78
|
refs/heads/main
| 2023-08-27T01:32:07.879195
| 2023-08-26T16:51:15
| 2023-08-26T16:51:15
| 606,570,447
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,305
|
r
|
officer-utils.R
|
#' Convert officer object class into equivalent file extension
#'
#' @keywords internal
#' @noRd
officer_fileext <- function(x, prefix = "") {
paste0(prefix, str_remove(class(x), "^r"))
}
#' Subset officer object summary by content_type
#'
#' @keywords internal
#' @noRd
#' @importFrom rlang has_name
subset_type <- function(x, type) {
if (has_name(x, "content_type")) {
x[x[["content_type"]] %in% type, ]
}
}
#' Subset officer object summary by style_name
#'
#' @keywords internal
#' @noRd
#' @importFrom rlang has_name
subset_style <- function(x, style) {
if (has_name(x, "style_name")) {
if (!is.na(style)) {
x[!is.na(x[["style_name"]]) && x[["style_name"]] %in% style, ]
} else {
x[is.na(x[["style_name"]]), ]
}
}
}
#' Subset officer object summary by doc_index or id
#'
#' @keywords internal
#' @noRd
#' @importFrom rlang has_name
subset_index <- function(x, index) {
if (has_name(x, "doc_index")) {
x[x[["doc_index"]] %in% index, ]
} else if (has_name(x, "id")) {
x[x[["id"]] %in% index, ]
}
}
#' @keywords internal
#' @noRd
#' @importFrom rlang is_true is_false
subset_header <- function(x, header = TRUE) {
if (is_true(header)) {
x[x[["is_header"]], ]
} else if (is_false(header)) {
x[!x[["is_header"]], ]
} else {
x
}
}
|
3d630ce3c1128326ecd421d0006c697c91c2e3d4
|
f96af69ed2cd74a7fcf70f0f63c40f7725fe5090
|
/MonteShaffer/humanVerseWSU/compiling/_stuff_/functions-latlong.R
|
e5cc63d4f206c975156c95bbf988882400213a26
|
[
"MIT"
] |
permissive
|
sronchet/WSU_STATS419_2021
|
80aa40978698305123af917ed68b90f0ed5fff18
|
e1def6982879596a93b2a88f8ddd319357aeee3e
|
refs/heads/main
| 2023-03-25T09:20:26.697560
| 2021-03-15T17:28:06
| 2021-03-15T17:28:06
| 333,239,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,685
|
r
|
functions-latlong.R
|
#' parseDMSfromFormat
#'
#'
#' @family LatLong
#'
#'
#' @param str a string form of lat/long
#' @param format
#'
#' @return a list of $degrees , $minutes , $seconds
#' @export
#'
#' @examples
#' parseDMSfromFormat();
#' parseDMSfromFormat("3 8 29.7335529232441", "MeAsUr");
#' parseDMSfromFormat("-3 8 29.7335529232441");
#' parseDMSfromFormat("-3 8 29.7335529232441 S");
parseDMSfromFormat = function(str="3 8 29.7335529232441", format="measurements")
{
format.4 = substr(toupper(format),1,4);
str = trimMe(str);
if(format.4 == "MEAS")
{
tmp = explodeMe(" ", str);
direction = NULL;
if(!is.na(tmp[4])) { direction = tmp[4]; }
tmp = as.numeric(tmp[1:3]);
return ( list( "degrees" = tmp[1], "minutes" = tmp[2],
"seconds" = tmp[3], "direction" = direction) );
}
if(format.4 == "UTF8")
{
# TODO later ...
}
}
#' convertDECtoDMS
#'
#'
#' @family LatLong
#'
#'
#' @param decimal lat/long in decimal form
#' @param which is it "latitude" or "longitude" ... matters for c("N","S","E","W")
#' @param return one of :: c("list","measurements","UTF8")
#' @param include.direction if TRUE, string returns will have direction appended to end
#'
#' @return either a list or a string based on `return` option; default is "list"
#' @export
#'
#' @examples
#' convertDECtoDMS(3.1415926535897932384626, "latitude");
#' convertDECtoDMS(-3.1415926535897932384626, "la");
#'
#'
#' convertDECtoDMS(3.1415926535897932384626, "latitude", "measurements");
#' measurements::conv_unit("3 8 29.7335529232441", "deg_min_sec", "dec_deg");
#' measurements::conv_unit(3.1415926535897932384626, "dec_deg", "deg_min_sec");
#'
#' convertDECtoDMS(-3.1415926535897932384626, "longitude");
#' convertDECtoDMS(-3.1415926535897932384626, "longitude", "UTF8");
#' convertDECtoDMS(3.1415926535897932384626, "lo");
#'
#' convertDECtoDMS(-31.0125,"longitude","UTF8");
#'
#' convertDECtoDMS( convertDMStoDEC(30,60,45,"N"), "latitude", return = "MEAS", include.direction=FALSE);
#' convertDECtoDMS( convertDMStoDEC(30,60,45,"N"), "latitude", return = "MEAS", include.direction=TRUE);
#' convertDECtoDMS( convertDMStoDEC(-30,60,45,"N"), "latitude", return = "MEAS", include.direction=FALSE);
#' convertDECtoDMS( convertDMStoDEC(-30,60,45,"N"), "latitude", return = "MEAS", include.direction=TRUE);
convertDECtoDMS = function(decimal = 105.38, which="latitude", return="list", include.direction=TRUE, decimal.seconds=TRUE)
{
# x = c(sin(pi), -sin(pi))
which.2 = substr(tolower(which),1,2);
# conv_unit("105 22 48", "deg_min_sec", "dec_deg");
# conv_unit("105.38", "dec_deg", "deg_min_sec");
direction = if(which.2=="lo") "E" else "N"; # defaults to latitude
isNegative = (decimal < 0);
my.sign = "";
if(isNegative)
{
my.sign = "-";
direction = if(which.2=="lo") "W" else "S";
}
degrees.dec = abs(decimal);
degrees = floor(degrees.dec);
remainder = degrees.dec - degrees;
minutes.dec = 60*remainder;
minutes = floor(minutes.dec);
remainder = minutes.dec - minutes;
seconds.dec = 60*remainder;
seconds = floor(seconds.dec);
my.seconds = seconds;
if(decimal.seconds) { my.seconds = seconds.dec; }
return.4 = substr(toupper(return),1,4);
if(return.4 == "LIST") # "list"
{
return ( list("degrees" = degrees, "minutes" = minutes, "seconds" = my.seconds, "direction" = direction) );
}
if(return.4 == "MEAS" || return == "CONV") # measurements::conv_units
{
# measurements::conv_units
# measurements # conv_units
stem = paste0(degrees," ",minutes," ", my.seconds);
if(include.direction) { stem = paste0(stem, " ", direction); } else { stem = paste0(my.sign, stem); }
return ( stem );
}
if(return.4 == "UTF8") # UTF8
{
stem = paste0(degrees,"°",minutes,"′", my.seconds);
if(include.direction) { stem = paste0(stem, "″", direction); } else { stem = paste0(my.sign, stem); }
return( stem );
}
}
#' convertDMStoDEC
#'
#'
#' @family LatLong
#'
#'
#' @param degrees numeric (integer likely)
#' @param minutes numeric (integer likely)
#' @param seconds numeric (integer ??)
#' @param direction one of : c("N","S","E","W");
#'
#' @return updated form as numeric (decimal)
#' @export
#'
#' @examples
#' convertDMStoDEC(30,60,45,"S");
#' convertDMStoDEC(-30,60,45,"S"); # negative is only working correctly on degrees
#'
#' convertDMStoDEC(30,60,45,"N");
#' convertDMStoDEC(-30,60,45,"N");
#'
#' convertDMStoDEC("105",direction="E");
#' convertDMStoDEC("105",direction="W");
#'
#'
#' convertDMStoDEC("105 22 48",direction="E",format="measurements");
#' convertDMStoDEC("105 22 48",direction="W",format="MeAsUr");
#'
#' convertDMStoDEC("105 22 48 E", format="MeAsUr");
#' convertDMStoDEC("-105 22 48 E", format="MeAsUr");
#' convertDMStoDEC("105 22 48 W", format="MeAsUr");
#' convertDMStoDEC("-105 22 48 W", format="MeAsUr");
#'
#'
#' convertDMStoDEC(30,60,45,"N");
#'
#' convertDMStoDEC( convertDECtoDMS(3.1415926535897932384626, "lat", "meas"), format="meas");
convertDMStoDEC = function(degrees, minutes=0, seconds=0, direction="N", format=NULL)
{
degrees.raw = degrees;
if(is.character(degrees))
{
if(is.null(format))
{
degrees = as.numeric(degrees);
} else { degrees = parseDMSfromFormat(degrees,format=format); }
}
if(is.list(degrees))
{
deg = degrees; # copy
degrees = deg$degrees;
minutes = deg$minutes;
seconds = deg$seconds;
if(!is.null(deg$direction)) { direction = deg$direction; }
}
mydeg = degrees; # original sign
degrees = abs(degrees);
direction = toupper(direction);
reverse = 1;
if(mydeg < 0) { reverse = -1 * reverse; }
if (direction == "S" || direction == "W")
{
reverse = -1 * reverse;
}
# https://stackoverflow.com/questions/1140189/
dd = degrees + minutes/60 + seconds/(60*60);
dd = dd * reverse;
dd;
}
#' buildBoundingBoxFromRadiusAndGivenLatitudeLongitude
#'
#' A store locator or other "element" locator can utilize a
#' bounding box to generate an initial subset; from which
#' final circle radius calculations can be performed.
#'
#'
#' @family LatLong
#'
#'
#' @param my.radius numeric
#' @param my.latitude numeric
#' @param my.longitude numeric
#' @param my.units a valid "length" measure from measurements:conv_unit
#'
#' @return a numeric vector of length 4: c(latitude.lower, latitude.upper, longitude.lower, longitude.upper);
#' @export
#'
#' @examples
#' buildBoundingBoxFromRadiusAndGivenLatitudeLongitude(10, 46.76551, -117.1919, "mi");
#' buildBoundingBoxFromRadiusAndGivenLatitudeLongitude(10, 46.76551, -117.1919, "km");
buildBoundingBoxFromRadiusAndGivenLatitudeLongitude = function(my.radius, my.latitude, my.longitude, my.units="mi")
{
# default values are in miles
option = c("angstrom", "nm", "um", "mm", "cm", "dm", "m", "km", "inch", "ft",
"yd", "fathom", "mi", "naut_mi", "au", "light_yr", "parsec", "point");
if(!is.element(my.units,option)) { my.units = "mi"; } # miles
factor.lat = 68.703; if(my.units != "mi") { factor.lat = measurements::conv_unit(68.703, "mi", my.units); }
factor.long = 69.172; if(my.units != "mi") { factor.long = measurements::conv_unit(69.172, "mi", my.units); }
delta.latitude = my.radius / factor.lat ;
delta.longitude = my.radius / (factor.long * cos(deg2rad(my.longitude)));
latitude.lower = my.latitude - delta.latitude;
latitude.upper = my.latitude + delta.latitude;
longitude.lower = my.longitude - delta.longitude;
longitude.upper = my.longitude + delta.longitude;
c(latitude.lower, latitude.upper, longitude.lower, longitude.upper);
}
|
924d2e88621ba44b859e83fdb100aab1a9d7a4fa
|
d01ffddc33db49131b8fe75e7257df524f8871db
|
/server.R
|
f43d9cf33a391abcc68d7d5c9e5a913495d95b28
|
[] |
no_license
|
juandratto/ShinyAppAndReprodPitch
|
96d8c655e076e4f111829e6ce29af406ae544479
|
fa513043951dc233a3c68eddf8c406d2b5373a71
|
refs/heads/master
| 2021-02-13T05:04:16.023549
| 2020-03-03T21:09:59
| 2020-03-03T21:09:59
| 244,664,214
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,210
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(caret)
library(rpart)
library(rpart.plot)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
set.seed(1234)
df_titanic_training <- read.csv("titanic_train.csv")
df_titanic_training <- df_titanic_training[,-c(1)]
mod_rf <- rpart(Survived ~ ., data = df_titanic_training, method = "class")
model1Pred <- reactive({
pclassInput <- as.numeric(input$Sel03_Class)
SexInput <- input$Sel01_Sex
AgeInput <- input$numIn01_Age
EmbarkInput <- input$Sel02_Embark
newdata <- data.frame(Pclass = pclassInput, Sex = SexInput, Age = AgeInput, Embarked = EmbarkInput)
pred <- predict(mod_rf, newdata = newdata)
paste0(format(as.numeric(pred[1,2])*100, digits = 2), "%")
})
output$plot1 <- renderPlot({
rpart.plot(mod_rf)
})
output$pred1 <- renderText({
model1Pred()
})
})
|
22ef14ba64c0c65ff25983e6f41ed29329482d10
|
62bda029e5d083b1c4c58568914bfc2251c1e5e2
|
/bin/core_function.R
|
20811622ae11238e490b73481a9431b6f7895cc2
|
[] |
no_license
|
franzx5/Core_Microb_Krona
|
35529de55e363d0257a42cf72263355d5bb677a2
|
be295647d00be6113b171785ce4ada877cf4a89b
|
refs/heads/master
| 2020-04-26T17:37:31.929540
| 2019-03-08T09:38:03
| 2019-03-08T09:38:03
| 173,719,551
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,314
|
r
|
core_function.R
|
#!/usr/bin/env Rscript
#Author: AKE Franz-Arnold
#EDF Lab r&D
#Date: 27/02/2019
#function
#libraries
#get core database based on specified conditions
#by default take all samples
#return core microbiome database
get_core_dbase = function(otu_db_input, otu_targets_input, otu_annot_input){
#'''
#get core database based on specified conditions
#by default take all samples
#return core microbiome database
#'''
parts = vector()
conditions = c()
otu_targets_temp = otu_targets_input
flag_1 = F;
while(!isTRUE(flag_1)){
flag_1 = F;
choice_1 = readline(prompt = "Do you want to fix some variables ? : ")
while(choice_1 == "y"){
choice_2 = NULL; choice_3 = NULL; choice_4 = NULL; flag_2 = F; flag_3 = F
#check valid input - condition number
while(!isTRUE(flag_2)){
print(colnames(otu_targets_temp))
choice_2 = as.numeric(readline(prompt = "Which condition Marie ? (Enter choice number) : "))
if(!isTRUE(choice_2 %in% 1:length(otu_targets_temp))){cat("Bad entry\n")}
else{flag_2 = T}
}
levels_values = levels(as.factor(unlist(otu_targets_temp[,choice_2])))
print(levels_values)
#check valid input - condition names
while(!isTRUE(flag_3)){
choice_3 = readline(prompt = "which values ? (Enter name of the value) : ")
choice_3 = unlist(strsplit(choice_3,split = " "))
#if one condition selected
if(identical(choice_3, character(0))){cat("Empty entry ! all_values taken by default\n"); break}
else if(length(choice_3) > 1){ #if several conditions selected
interest_rows_ind = c()
for (i in 1:length(choice_3)){
if(choice_3[i] %in% levels_values){
interest_rows_ind = c(interest_rows_ind, which(otu_targets_temp[,choice_2] == choice_3[i]))
}else{cat("Bad value name entry !"); break}
}
otu_targets_temp = otu_targets_temp[interest_rows_ind,]
flag_3 = T
}else if(length(choice_3) == 1 & choice_3 %in% levels_values){
otu_targets_temp = otu_targets_temp[which(otu_targets_temp[,choice_2] == choice_3),]
flag_3 = T
}else{cat("Bad entry !\n")}
}
cat("condition specified ...\n")
print(otu_targets_temp)
choice_1 = readline(prompt = "Do you want to fix others variables ?")
if(choice_1 == "n"){flag_1 = T}
conditions = c(choice_3, conditions)
}
if(choice_1 == "n" & !isTRUE(flag_1)){cat("All OTU dataset count taken by default ...\n"); flag_1 = T;}
else if(choice_1 == "n" & isTRUE(flag_1)){cat("Choices validated\n")}
else{cat("Bad entry ! try again"); flag_1 = F}
}
# print(conditions)
# mat_all = c()
# for(i in 1:length(conditions)){
# col_cond = which(otu_targets_input == conditions[i], arr.ind = T)[,'col'][1]
# cond_temp = (otu_targets_input[,col_cond] == conditions[i])
# colnames(cond_temp) <- conditions[i]
# mat_all = cbind(mat_all, cond_temp)
# }
# counts_cond = vennCounts(mat_all)
# print(str(counts_cond))
#
# # print(counts_cond)
# vennDiagram(counts_cond, circle.col = rainbow(20), names = conditions)
# stop()
# #construct conditions vector
# for(i in 1:length(conditions)){
# tab = as.data.frame(combn(conditions,i))
# for(j in 1:length(tab)){
# cond = paste(as.character(tab[,j]), collapse = "&")
# parts = c(parts, cond)
# }
# }
# #find occurence number for each conditions
# liste_nb_occur = c()
# mat_nb_occur = c()
# for(i in 1:length(parts)){
# temp_targets = otu_targets_input
# value_vec = unlist(strsplit(parts[i], split = "&"))
# if(length(value_vec)== 1){
# nb_occur = length(which(otu_targets_input == value_vec))
# col_number_associated = which(otu_targets_input == value_vec, arr.ind = T)[,'col'][1]
# cond_temp = (otu_targets_input[,col_number_associated] == value_vec)
# colnames(cond_temp) = value_vec
# }else{
# for(j in 1:length(value_vec)){
# if(dim(temp_targets)[1]==0){break}
# temp_targets = temp_targets[which(temp_targets == value_vec[j], arr.ind = T)[,"row"],]
# }
# nb_occur = dim(temp_targets)[1]
# # cond_temp = as.data.frame(x = otu_targets_input$Samples %in% temp_targets$Samples)
# # colnames(cond_temp) = parts[i]
# }
# liste_nb_occur = c(liste_nb_occur, nb_occur)
# mat_nb_occur = cbind(mat_nb_occur, cond_temp)
# }
#
# #clean mat_nb_occur
# # print(str(mat_nb_occur))
# print(mat_nb_occur)
# neg_ind = c()
# for (i in 1:length(mat_nb_occur)){
# if(identical(mat_nb_occur[,i], rep(FALSE,dim(mat_nb_occur)[1]))){
# neg_ind = c(neg_ind, -i)
# }
# }
# print(neg_ind)
# mat_nb_occur = mat_nb_occur[,neg_ind]
# print(mat_nb_occur)
# print(parts)
# # print(mat_nb_occur)
# stop()
#dataset conditioned
condt_otu_db = otu_db_input[,which(colnames(otu_db_input)[2:length(otu_db_input)] %in% otu_targets_temp$Samples)+1]
cat("dataset conditioned obtained ...\n")
print(conditions)
#get for each conditions the samples associated...
if(!(is.null(conditions))){
collection_cond = c()
for(i in 1:length(conditions)){
col_cond = which(otu_targets_input == conditions[i], arr.ind = T)[,'col'][1]
samples_cond = otu_targets_input$Samples[which(otu_targets_input[,col_cond] == conditions[i])]
#get otu_tab samples for each condition
otu_cond = as.data.frame(otu_db_input[,samples_cond])
#get core logical tab
otu_core_cond = matrix(apply(otu_cond, 1, function(x) !(0 %in% x)))
colnames(otu_core_cond) = conditions[i]
#adding
collection_cond = cbind(collection_cond, otu_core_cond)
#printing
core_res = otu_cond[which(otu_core_cond == TRUE),]
rownames(core_res) = otu_db_input$OTU[as.numeric(rownames(core_res))]
print(paste('CORE to Condition: ', conditions[i]))
print(paste('On a ', rownames(core_res)))
}
#Plotting Venn Diagrams
res = vennCounts(collection_cond)
vennDiagram(res, cex = 0.7, circle.col = c("red","blue","green","yellow","orange","gray"))
title('Core microbiome for defined conditions')
}
#get_core_microb for All
cat("Getting core microbiome for all conditions")
interest_rows_ind = c()
for (i in 1:dim(condt_otu_db)[1]){if(0 %in% condt_otu_db[i,] == F){interest_rows_ind = c(interest_rows_ind,i)}}
core_condt_otu_db = condt_otu_db[interest_rows_ind,]
core_otu_annot = otu_annot_input[interest_rows_ind,]
cat("\n\nconditioned dataset core microbiome obtained ...\n")
print(core_condt_otu_db)
cat("\n\ntaxonomics assignation associated ...\n")
print(core_otu_annot)
#identify freq of identicals taxons
core_krona_db_annot = as.data.frame(table(do.call(paste,c(core_otu_annot[-1], sep = ","))))[,c(2,1)]
taxons = t(as.data.frame(strsplit(as.character(core_krona_db_annot$Var1), split = ",")))
rownames(taxons) = 1:dim(taxons)[1]
output_tab = as.data.frame(cbind(core_krona_db_annot$Freq,taxons))
colnames(output_tab) = c("Frequence", colnames(core_otu_annot)[2:length(core_otu_annot)])
output_tab$Frequence = as.numeric(as.character(output_tab$Frequence))
cat("\n\nfreq of identified taxons obtained ...\n")
print(output_tab)
return(output_tab)
}
|
fa7687f93bad8626e88f721247c04728dae889ad
|
bb8885fa4eeea616240e82af05a8827bf4855991
|
/man/percentilePlot.Rd
|
b815fc9e88cb5aa5f7a45fa951e8196881627a7b
|
[] |
no_license
|
amd112/rseiAnalysis
|
95f9ef10fb43cfd349b391a08bc1414866a105e3
|
54b8c2899ca248dc1a6c12d003aefdf1fa3b65b2
|
refs/heads/master
| 2021-09-16T04:21:31.073052
| 2018-06-16T10:05:02
| 2018-06-16T10:05:02
| 105,723,664
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,221
|
rd
|
percentilePlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/percentilePlot.R
\name{percentilePlot}
\alias{percentilePlot}
\title{Create a plot of the cdf's of the log toxicity of various groups.}
\usage{
percentilePlot(data, value, log = "l", title = NA)
}
\arguments{
\item{data}{is a list of dataframes. The list must be at least 1 long. Each of the dataframes must contain a column "concentration", and
will be the data for various groups at different times.}
\item{value}{is a list of the columns that you would like plotted. All dataframes must contain all values included here. For example,
if you want to plot the hispanic distributions in 1990 and 2000 -> percentilePlot(c(t1990, t2000), c("hispanic")). Comparitively, if
you want to plot hispanic in 1990, and asian in 2000, percentilePlot(c(t1990, t2000), c("hispanic", "asian")) will plot both groups in
both years, and throw an error if any of those columns don't exist.}
\item{log}{if log == "l", then it will plot the logged toxicity, if log == "", it will plot normal toxicity}
}
\description{
Create a plot of the cdf's of the log toxicity of various groups.
}
\examples{
percentilePlot(list(toxic_1990), c("hispanic", "white"))
}
|
cdd52c24700ca25441bc89d8a4e85f7fd2a3fb96
|
fa9c3976dc1a0f2cc66f2008af3331de7885ada9
|
/R/RcppExports.R
|
f018a83fd2a23baa363b83c35eff5a73f2d55be9
|
[] |
no_license
|
adolgert/mashproto
|
4cdde15337e2e9d457dfb651a7d9ad61f2c08a42
|
567f933c1da83dab886a5456bf91bf0efec90b07
|
refs/heads/master
| 2020-08-24T22:04:29.780216
| 2019-10-31T23:22:37
| 2019-10-31T23:22:37
| 216,915,513
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 667
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
rcpp_hello_world <- function() {
.Call(`_mashproto_rcpp_hello_world`)
}
movement_init <- function(parameters) {
.Call(`_mashproto_movement_init`, parameters)
}
movement_step <- function(module, time_step) {
.Call(`_mashproto_movement_step`, module, time_step)
}
convert_to_r_movement <- function(movement_list, human) {
invisible(.Call(`_mashproto_convert_to_r_movement`, movement_list, human))
}
movements_of_human <- function(movement_list, human) {
.Call(`_mashproto_movements_of_human`, movement_list, human)
}
|
5f7c730ca0a90084072e20396bc734809c763f6f
|
88d110a9b898a15c7d27f7bc4308549525212253
|
/R/plotOverlapMinimal.R
|
4663dbd712364aeeed8b8e705f7bde7caf8e8daa
|
[] |
no_license
|
frhl/pRoteomics
|
3bbabf7c6b083e37461b8ff26e4f9db6d5b25ab2
|
fc41a617b79032e3353f26c7b860bb5a7aafa791
|
refs/heads/master
| 2020-09-17T01:43:22.452221
| 2020-02-15T18:48:52
| 2020-02-15T18:48:52
| 223,950,481
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,360
|
r
|
plotOverlapMinimal.R
|
#' @title Draw Overlap Plot
#' @description ne list overlap enrichment test + volcano plot
#' @param df something
#' @author April Kim
#' @family genoppi
#' @export
### --------------------------------------------------
### gene list overlap enrichment test + volcano plot
plotOverlapMinimal <- function(df, bait, reference, title = '', subtitle = NULL, drawLabel = T,
col.genelist.sig = 'yellow', inweb_enrichment_calculate = F,
size_point = 3, size_text=3, color_alpha=0.8,
plot.legend = T, drawLabelOnlySignificant = T,
mod.arrowhead = 0.5, mod.arrowlength = 3,
bait.cols = c('red', 'orange')){
require(ggplot2)
require(ggrepel)
# If a df with only genes are inputted assume
# that the user would like to just overlay genes
#if (ncol(reference) == 1){
# reference <- data.frame(gene=reference$gene, significant=TRUE)
# warn('[plotOverlap] no "significant"-column, assuming al#l genes significant.')
#}
# generate statistics for enrichement
if (inweb_enrichment_calculate){
statistics <- enrichment_inweb(df, bait, reference)
} else{
statistics <- enrichment(df, bait, reference)
}
if ('draw' %nin% colnames(reference)) reference$draw <- TRUE
subset1 <- merge(subset(df, gene %in% statistics$sigGenes & significant), reference)
subset2 <- merge(subset(df, gene %in% statistics$sigGenes & !significant), reference)
# start volcano plot
p <- ggplot(df, aes(x=logFC, y=-log10(pvalue))) +
geom_hline(yintercept=0, color="black") + geom_vline(xintercept=0, color="black") +
xlab(bquote(log[2]*"[fold change]")) + ylab(bquote(-log[10]*"["*italic(.("P"))*"-value]")) +
# plot all proteins (green = significant, blue = not significant)
geom_point(alpha=0.95, size=size_point+0.3, color=ifelse(df$significant, "black", "grey"), shape=ifelse(df$significant, 1, 1)) +
geom_point(alpha=color_alpha, size=size_point, color=ifelse(df$significant, "springgreen3", "grey")) +
#geom_point(alpha=color_alpha, size=size_point, color=ifelse(df$significant, "springgreen3", "grey")) +
#geom_point(subset(df, gene %in% bait), mapping=aes(x=logFC, y=-log10(pvalue)), size=size_point, color="black", shape=1) +
# label sig genes in gene list (yellow = significant, white = not significant)
geom_point(subset1, mapping=aes(x=logFC, y=-log10(pvalue), colour = dataset), size=size_point) +
#geom_point(subset1, mapping=aes(x=logFC, y=-log10(pvalue)), size=size_point, colour = col.genelist.sig) +
#scale_colour_manual(values=setNames(c('orange','blue'), author)) +
#rscale_colour_identity("author", breaks=c('orange','blue'), guide="legend") +
geom_point(subset2, mapping=aes(x=logFC, y=-log10(pvalue)), size=size_point, color="white") +
geom_point(subset(df, gene %in% bait | gene %in% statistics$sigGenes), mapping=aes(x=logFC, y=-log10(pvalue)),
size=size_point, color="black", shape=1) +
# label bait (red = signficant, orange = not significant)
geom_point(subset(df, gene %in% bait & significant), mapping=aes(x=logFC, y=-log10(pvalue)), size=size_point, color=bait.cols[1]) +
geom_point(subset(df, gene %in% bait & !significant), mapping=aes(x=logFC, y=-log10(pvalue)), size=size_point, color=bait.cols[2]) +
# title (with statistics$fisherP) and theme
labs(title = title,
subtitle = ifelse(is.null(subtitle),
paste(length(statistics$sigGenes)," detected. ",length(statistics$overlap),
" significant. p-value = ", format(statistics$fisherP,digits=3),sep=""),
subtitle))+
#ggtitle( +
#theme_minimal()
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), legend.position=c(.15,.15),
legend.key = element_rect(colour = "transparent", fill = "white"))
### only draw text labels for gene list genes if drawLabel==TRUE
if (drawLabel==TRUE) {
p <- p + geom_point(subset(df, (gene %in% bait | gene %in% statistics$sigGenes) & (gene %in% reference[reference$draw, ]$gene) | gene %in% bait), mapping=aes(x=logFC, y=-log10(pvalue)),
size=size_point, color="black", shape=1)
p <- p + geom_text_repel(subset(df, (gene==bait | gene %in% statistics$sigGenes) & (gene %in% reference[reference$draw, ]$gene) | gene %in% bait), mapping=aes(label=gene),
arrow=arrow(length=unit(0.015*mod.arrowhead, 'npc')), box.padding=unit(0.15*mod.arrowlength, "lines"),
point.padding=unit(0.2, "lines"), color="black", size=size_text)
} else {
p <- p + geom_text_repel(subset(df, gene==bait), mapping=aes(label=gene),
arrow=arrow(length=unit(0.015, 'npc')), box.padding=unit(0.15, "lines"),
point.padding=unit(0.2, "lines"), color="black", size=size_text)
}
if (TRUE){
f <- data.frame(table(reference$dataset, reference$color))
dataset_authors <- as.vector(f$Var1[as.logical(f$Freq)])
dataset_colors <- as.vector(f$Var2[as.logical(f$Freq)])
p <- p + scale_color_manual(values = dataset_colors)
}
print(p)
}
|
523d7345cdc62952a9fdfbff5568d6dafab3c496
|
5d17b85808e9dac3dcad3d0caa223cf6c3ae1221
|
/man/cloPredTraj.Rd
|
55e8ded1a7c7bf450f16255cf2f0e1ddc6559495
|
[] |
no_license
|
ebmtnprof/qid
|
3c818f3ece373d61562bd0b4c428fa04643a9181
|
648fa6eeaf3d7ebb9e7fb94f41507d63b84ff685
|
refs/heads/master
| 2020-04-16T09:02:31.772340
| 2019-01-14T00:13:14
| 2019-01-14T00:13:14
| 151,756,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,707
|
rd
|
cloPredTraj.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clo.R
\name{cloPredTraj}
\alias{cloPredTraj}
\title{Plots the state variable's clo model predicted temporal trajectory}
\usage{
cloPredTraj(origdata, paramData, paramM1, paramM2, dist0name, dist1name,
obsName, m1Name, m2Name)
}
\arguments{
\item{origdata}{A dataframe produced by the "dataPrep" function.}
\item{paramData}{A dataframe produced by the "indivClo" function.}
\item{paramM1}{A vector of centering values for the first comparison model.}
\item{paramM2}{A vector of centering values for the second comparison model.}
\item{dist0name}{A name for the level-0 of the distinguishing variable (e.g., "Women").}
\item{dist1name}{A name for the level-1 of the distinguishing variable (e.g., "Men").}
\item{obsName}{A name for the state variable (e.g., "Emotional Experience").}
\item{m1Name}{A name for the first user-specified model.}
\item{m2Name}{A name for the second user-specified model.}
}
\description{
Produces plots of the state variable's predicted temporal trajectory based on 3 versions of the clo model. The first is the "average" model, with all parameters centered on the sample averages. The other two are specified by the user by providing centering values for any desired parameters. For example, if prior analyses showed that the system variable was predicted by the damping parameter for the level-0 of the distinguishing variable, then a logical pair of models to compare would be one with that parameter centered at a low value, such as 1SD below the mean, and one with it centered at a high value, such as 1SD above the mean. All other parameters would be centered at the sample averages.
}
|
388a8d13e3d218e474a4c7e33ce39516d38f8c54
|
dafdbdfa5ea119afb77aef9030488e9a5ceb9ba9
|
/R/Day6/Programs/cor.R
|
13081faa031de4c6e8a264eefb079eef4bc28d19
|
[] |
no_license
|
shibinmak/advance-diploma-ai-nielit
|
2f33eb5c125353032c1d23e01db3174bfce744ea
|
d2458f571c68f81ebf5211665dfa368fb3210c82
|
refs/heads/master
| 2020-04-03T10:08:17.820106
| 2018-12-17T09:44:00
| 2018-12-17T09:44:00
| 155,184,686
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 193
|
r
|
cor.R
|
X<-c(7,6,8,5,6,9)
Y<-c(12,8,12,10,11,13)
cor(X,Y)
cor(X, Y , method = "spearman")
cor(X, Y , method = "kendall")
?cor
X<-c(10,8,2,1,5,6)
Y<-c(2,3,9,7,6,5)
cor(X,Y)
|
1b0bef6586d63aaceb4684c22463051e4aa4f970
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/flying/R/lookup_table2.R
|
a544106d0cfbc6556c433013732ae498c16d496a
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,971
|
r
|
lookup_table2.R
|
# load table 2 generate. Description in Pennycuick earlier version
# @name .gen.table2
# @author Brian Masinde
# @return table2
# @description Pennycuick's table II aids in calculation of D factor for finding
# lift:drag ratio. C factor for finding power required at maximum
# range speed and B a ratio of maximum range speed and minimum
# power speed.
#
.gen.table2 <- function() {
x1Plusx2 <- c(0.00, 0.25, 0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 2.00,
2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 3.75, 4.00, 4.25,
4.50, 4.75, 5.00)
B <- c(1.360, 1.386, 1.452, 1.515, 1.574, 1.631, 1.684, 1.735, 1.784, 1.830,
1.875, 1.918, 1.959, 1.999, 2.038, 2.075, 2.111, 2.146, 2.180, 2.213,
2.246)
C <- c(1.140, 1.458, 1.783, 2.115, 2.453, 2.795, 3.141, 3.490, 3.841, 4.195,
4.550, 4.907, 5.266, 5.625, 5.986, 6.348, 6.711, 7.074, 7.438,
7.803, 8.168)
D <- c(1.000, 0.824, 0.706, 0.621, 0.556, 0.506, 0.465, 0.431, 0.402, 0.378,
0.357, 0.339, 0.322, 0.308, 0.295, 0.283, 0.273, 0.263, 0.254, 0.246,
0.238)
table2 <- as.data.frame(cbind(x1Plusx2, B, C, D))
return(table2)
}
#### Inerpolate D values in table 2 ####
#' @author Brian Masinde
#' @name .interpolate
#' @param x1plusx2 sum of metabolic power profile power ratio
#' @param table2 interpolation table II for B, C D factors based on sum of
#' metabolic power ratio and profile power ratio
#' @return D factor
.interpolate <- function(x1plusx2, table2) {
if (length(which(table2$x1Plusx2 == x1plusx2)) == 0) {
upId <- which(table2$x1Plusx2 > x1plusx2)[1]
lowId <- tail(which(table2$x1Plusx2 < x1plusx2), 1)
if ((abs(x1plusx2 - table2$x1Plusx2[upId])) >=
(abs(x1plusx2 - table2$x1Plusx2[lowId]))) {
D <- table2$D[lowId]
} else {
D <- table2$D[upId]
}
} else {
D <- table2$D[which(table2$x1Plusx2 == x1plusx2)]
}
return(D)
}
|
807662c571ea1446b0f51de4ba0ba68b96a4b7c0
|
4798cb29678fb3e54a317ef28ff1ddaec260cb89
|
/HD_RGB_Flight_Height_Tool/old_scripts/TestCode.R
|
8886dadafcf2373ddac5c71e8fed4b1aad73b930
|
[] |
no_license
|
HiDef-Aerial-Surveying/RBG_Flight_Height_Analysis
|
5dd481b3542edb662d75b67a020e24b06f1b97e8
|
167076025cc73526ae586e794bfcc4b7516fff78
|
refs/heads/master
| 2023-06-22T09:36:37.840795
| 2021-07-23T15:11:06
| 2021-07-23T15:11:06
| 320,638,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 776
|
r
|
TestCode.R
|
## Bootstrap test for skewed data to get 95% or 2SD values
require(readxl)
X <- readxl::read_xlsx("./reflected_data/HiDef GANNET reflection height data ALL projects - CLEANED.xlsx")
out <- foreach(k=1:nrow(X),.combine='c') %do% {
y <- unlist(X[k,3:10])
return(y)
}
values <- as.numeric(out)
values <- values[!is.na(values)]
nboot <- 1000
bootstrapper <- function(values,nboot){
resamples <- lapply(1:nboot, function(i) sample(values,size=ceiling(length(values)*0.5),replace=T))
r.median <- sapply(resamples,median)
r.mean <- sapply(resamples,mean)
return(list(data=resamples,medians=r.median,means=r.mean))
}
output <- bootstrapper(values,nboot)
mean(values)
mean(output$means)
quantile(values,c(0.025,0.975))
quantile(output$means,c(0.025,0.975))
|
792bc73a7bced2b46b51db9875085eb4eb9c84f7
|
a5ab0f9e3a46a5f2298fa49b28058978eb310ecf
|
/inst/preprocessing/GSE29172/03.writeWholeGenomeData.R
|
49fe269b637623f71a116dfd5d72bc16d54ce525
|
[] |
no_license
|
mpierrejean/acnr
|
64e960c559867723bf79de85eefa5c65875fb5de
|
d65dbfa70315c6f8a2caa6f7077c4c19ce24190a
|
refs/heads/master
| 2021-12-14T23:19:09.016536
| 2021-11-18T07:46:06
| 2021-11-18T07:46:06
| 59,839,608
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,464
|
r
|
03.writeWholeGenomeData.R
|
## Adapted from http://aroma-project.org/vignettes/PairedPSCBS-lowlevel
dataSet <- "GSE29172"
dataSetN <- "GSE26302"
chipType <- "GenomeWideSNP_6"
tags <- "ACC,ra,-XY,BPN,-XY,AVG,FLN,-XY"
dsT <- AromaUnitTotalCnBinarySet$byName(dataSet, tags=tags, chipType = chipType)
length(dsT)
dsN <- AromaUnitTotalCnBinarySet$byName(dataSetN, tags=tags, chipType = chipType)
length(dsN)
fnt <- function(names, ...) {
pct <- gsub(".*mix([0-9]+).*", "\\1", names)
}
setFullNamesTranslator(dsT, fnt)
fullNames <- getFullNames(dsT)
## Naming
tumorName <- "H1395"
normalName <- "BL1395"
## Extract (total,beta) estimates for the tumor-normal pair
dataT <- extractPSCNArray(dsT);
str(dataT);
dataN <- extractPSCNArray(dsN);
str(dataN);
## Get (chromosome, position) annotation data
ugp <- getAromaUgpFile(dsT);
chromosome <- ugp[,1,drop=TRUE];
x <- ugp[,2,drop=TRUE];
## Total intensities and Allele B fractions for the normal
thetaN <- dataN[,"total", 1];
betaN <- dataN[,"fracB", ];
datPath <- "wholeGenomeData";
## A symbolic link to "/home/share/Data/wholeGenomeData"
datPath <- Arguments$getWritablePath(datPath);
chipType <- getChipType(dsT, full=FALSE)
dsName <- getName(dsT)
dataSet <- sprintf("%s,ASCRMAv2", dataSet)
path <- file.path(datPath, dataSet, chipType);
path <- Arguments$getWritablePath(path);
idxs <- seq(length=dim(dataT)[3])
for (ss in idxs) {
pct <- dimnames(dataT)[[3]][ss]
print(pct)
pairName <- sprintf("%svs%s,%s", tumorName, normalName, pct)
## Total CNs for the tumor relative to the matched normal
CT <- 2 * dataT[,"total", ss] / thetaN;
## Allele B fractions for the tumor
betaT <- dataT[,"fracB", ss];
## Setup data structure
df <- data.frame(chromosome=chromosome, x=x, CT=CT, betaT=betaT, betaN=betaN);
dfC <- subset(df, !is.na(df$chromosome) & !is.na(df$x))
str(dfC)
## save
fileName <- sprintf("%s.rds", pairName)
pathname <- file.path(path, fileName)
saveRDS(dfC, file=pathname)
if (FALSE) {
## chromosome by chromosome
for (cc in 1:24) {
print(cc)
fileName <- sprintf("%s,chr=%02d.rds", pairName, cc)
pathname <- file.path(path, fileName)
saveRDS(dfC, file=pathname)
datCC <- subset(dfC, chromosome==cc)
o <- order(datCC$x)
datCC <- datCC[o, ]
str(datCC)
save(datCC, file=pathname)
}
}
}
|
aa33f6a8e031f70f8098dc387ce8b708eebd7aaa
|
cfd39938d7912462f83b5e4bfe6e976ada293495
|
/hw/Learning_Word.R
|
e59b25b0743d09a36eef156828f792f4c72f5d86
|
[] |
no_license
|
jonferrin/Econometrics---Group
|
dfa3ed84151dd78a61f7026e98a2f276b138f5a6
|
4ea229b46d425890911515a189ce57794b9accb5
|
refs/heads/master
| 2022-11-19T04:10:06.851971
| 2020-07-09T15:03:04
| 2020-07-09T15:03:04
| 259,477,488
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,305
|
r
|
Learning_Word.R
|
library(magrittr)
library(officer)
library(ggplot2)
gg1 <- ggplot(data = iris, aes(Sepal.Length, Petal.Length)) +
geom_point()
gg2 <- ggplot(data = iris, aes(Sepal.Length, Petal.Length, color = Species)) +
geom_point()
my_doc <- read_docx()
styles_info(my_doc)
src <- tempfile(fileext = ".png")
png(filename = src, width = 5, height = 6, units = 'in', res = 300)
barplot(1:10, col = 1:10)
dev.off()
my_doc <- my_doc %>%
body_add_img(src = src, width = 5, height = 6, style = "centered") %>%
body_add_par("Hello world!", style = "Normal") %>%
body_add_par("", style = "Normal") %>% # blank paragraph
body_add_table(iris, style = "table_template")
print(my_doc, target = "assets/docx/first_example.docx")
read_docx() %>% print(target = tempfile(fileext = ".docx"
doc <- read_docx() %>%
body_add_par(value = "Table of content", style = "heading 1") %>%
body_add_toc(level = 2) %>%
body_add_par(value = "Tables", style = "heading 1") %>%
body_add_par(value = "dataset mtcars", style = "heading 2") %>%
body_add_table(value = head(mtcars)[, 1:4], style = "table_template" ) %>%
body_add_par(value = "data mtcars", style = "table title") %>%
shortcuts$slip_in_tableref(depth = 2) %>%
body_add_par(value = "dataset iris", style = "heading 2") %>%
body_add_table(value = head(iris), style = "table_template" ) %>%
body_add_par(value = "data iris", style = "table title") %>%
shortcuts$slip_in_tableref(depth = 2) %>%
body_end_section_portrait() %>%
body_add_par(value = "plot examples", style = "heading 1") %>%
body_add_gg(value = gg1, style = "centered" ) %>%
body_add_par(value = "graph example 1", style = "graphic title") %>%
shortcuts$slip_in_plotref(depth = 1) %>%
body_add_par(value = "plot 2", style = "heading 2") %>%
body_add_gg(value = gg2, style = "centered" ) %>%
body_add_par(value = "graph example 2", style = "graphic title") %>%
shortcuts$slip_in_plotref(depth = 2) %>%
body_end_section_landscape() %>%
body_add_par(value = "Table of tables", style = "heading 2") %>%
body_add_toc(style = "table title") %>%
body_add_par(value = "Table of graphics", style = "heading 2") %>%
body_add_toc(style = "graphic title")
print(doc, target = "assets/docx/toc_and_captions.docx")
|
267f642b78a935653bb34eb2e970dbff2fc4c3e2
|
9ad4cbe64051e36224fe652a2cca4347c478b37b
|
/7_GBM_build_final.r
|
63de70c284fcbf4c0c04d99baf2cb78e5f1a70ef
|
[] |
no_license
|
shikharsgit/SolarEnergy-Pred
|
b5ecac9deea4888a9a992bf93f7fa4806ef5da32
|
c581543860bc53cd8d831a10a965758bee6eb28f
|
refs/heads/master
| 2021-09-24T16:26:08.370744
| 2018-10-11T22:13:12
| 2018-10-11T22:13:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,913
|
r
|
7_GBM_build_final.r
|
##### Building final GBM model
rmse <- function(error)
{
sqrt(mean(error^2))
}
##Load libraries
library(doSNOW)
library(foreach)
library(gbm)
start.time <- Sys.time()
home=paste0("/home/shikhar/")
setwd(home)
grnd_data = read.csv("ground_data_correctelev_eu.csv",as.is=T)
country_parent_dir = "Europe"
setwd(country_parent_dir)
train_begindate = 20050000
train_enddate = 20120000
val_enddate = 20140000
test_enddate = 20150000
cl <- makeCluster(16) #change to your number of CPU cores
registerDoSNOW(cl)
start.time <- Sys.time()
### Building all models in parallel
foreach (stn = 1:16 , .packages=c("gbm")) %dopar% {
station_name = grnd_data[stn,"stid"]
home=paste0("/home/shikhar/Europe/",station_name,"/")
setwd(home)
dataset = read.csv("datasets/dataset_all_ml.csv",as.is=T)
stids = unique(dataset$stid)
dataset_todo = dataset[dataset$date>train_begindate,]
dataset_todo = dataset_todo[dataset_todo$date<test_enddate,] ##2014-2015 is test period
dataset_todo = dataset_todo[order(dataset_todo$stid,dataset_todo$date),]
dataset_todo$month <- NULL
rm(dataset)
dir.create("gbm")
dir.create("gbm/RMSE/")
dir.create("gbm/scores/")
setwd("gbm/")
####Creates dataframes for score values
rmse_all = data.frame( model_treenumber = numeric(),time_taken = numeric(), train_score = numeric(), val_score= numeric(),train_relscore = numeric(), val_relscore= numeric(),stringsAsFactors = FALSE)
rmse_test = data.frame( model_treenumber = numeric(),time_taken = numeric(), val_score = numeric(), test_score= numeric(),val_relscore= numeric(),test_relscore= numeric(),stringsAsFactors = FALSE)
to_add<-c(1,2,2,3,4,5)
rmse_all[1,]<-to_add
rmse_test[1,]<-to_add
write.csv(rmse_all,"RMSE/train_val.csv",row.names=F)
write.csv(rmse_all,"RMSE/train_val_regional.csv",row.names=F)
write.csv(rmse_test,"RMSE/val_test.csv",row.names=F)
write.csv(rmse_test,"RMSE/val_test_regional.csv",row.names=F)
starttime2 <- Sys.time()
train = dataset_todo[dataset_todo$date<train_enddate,]
train_tocheck = train[train$stid==station_name,]
val_test = dataset_todo[dataset_todo$date>train_enddate,]
val = val_test[val_test$date<val_enddate,] ##2012-13 is val set
val_tocheck = val[val$stid==station_name,]
test = val_test[val_test$date>val_enddate,] ##2014 is test set
test_tocheck = test[test$stid==station_name,]
train$stid <- NULL
train_tocheck$stid <- NULL
val$stid <- NULL
val_tocheck$stid <- NULL
test$stid <- NULL
test_tocheck$stid <- NULL
rm(dataset_todo)
vars <- setdiff(colnames(train),c("date","energy"))
all_vars <- paste(vars,collapse="+")
shrinkage = 0.1
trees = 1
minobsinnode = 11
distribution = "laplace"
int_depth = 11
model_treenumber = trees
GBM_model = gbm( as.formula(paste("energy~", all_vars, "")), data = train , shrinkage=shrinkage, n.trees=trees, n.minobsinnode=minobsinnode ,distribution=distribution, interaction.depth=int_depth)
for (add_trees in 1:40){ ### for each 100th tree scores will be calculated
starttime2 <- Sys.time()
if (add_trees==1){
model_treenumber = 100
GBM_model = gbm.more(GBM_model,n.new.trees = 99)
}else{
model_treenumber = add_trees*100
GBM_model = gbm.more(GBM_model,n.new.trees = 100)
}
options(scipen=999)
gbm_train_regional = predict.gbm(GBM_model,train[,-grep("date|energy",colnames(train))], n.trees=GBM_model$n.trees)
gbm_val_regional = predict.gbm(GBM_model,val[,-grep("date|energy",colnames(val))], n.trees=GBM_model$n.trees)
gbm_test_regional = predict.gbm(GBM_model,test[,-grep("date|energy",colnames(test))], n.trees=GBM_model$n.trees)
options(scipen=999)
rmse_train_regional = rmse(train[,"energy"]-gbm_train_regional)
rmse_val_regional = rmse(val[,"energy"]-gbm_val_regional)
rmse_test_regional = rmse(test[,"energy"]-gbm_test_regional)
rel_rmse_train_regional = rmse(train[,"energy"]-gbm_train_regional)/mean(train[,"energy"])*100
rel_rmse_val_regional = rmse(val[,"energy"]-gbm_val_regional)/mean(val[,"energy"])*100
rel_rmse_test_regional = rmse(test[,"energy"]-gbm_test_regional)/mean(test[,"energy"])*100
options(scipen=999)
gbm_train = predict.gbm(GBM_model,train_tocheck[,-grep("date|energy",colnames(train_tocheck))], n.trees=GBM_model$n.trees)
gbm_val = predict.gbm(GBM_model,val_tocheck[,-grep("date|energy",colnames(val_tocheck))], n.trees=GBM_model$n.trees)
gbm_test = predict.gbm(GBM_model,test_tocheck[,-grep("date|energy",colnames(test_tocheck))], n.trees=GBM_model$n.trees)
##calculating error scores
options(scipen=999)
rmse_train = rmse(train_tocheck[,"energy"]-gbm_train)
rmse_val = rmse(val_tocheck[,"energy"]-gbm_val)
rmse_test = rmse(test_tocheck[,"energy"]-gbm_test)
rel_rmse_train = rmse(train_tocheck[,"energy"]-gbm_train)/mean(train_tocheck[,"energy"])*100
rel_rmse_val = rmse(val_tocheck[,"energy"]-gbm_val)/mean(val_tocheck[,"energy"])*100
rel_rmse_test = rmse(test_tocheck[,"energy"]-gbm_test)/mean(test_tocheck[,"energy"])*100
##### saving station predicion
rmse_all = read.csv("RMSE/train_val.csv",as.is=T)
rmse_test_save = read.csv("RMSE/val_test.csv",as.is=T)
endtime2 <- Sys.time()
rmse_all = rbind(rmse_all,c(model_treenumber,as.numeric(round(endtime2 - starttime2,3)),round(rmse_train,4),round(rmse_val,4),round(rel_rmse_train,4),round(rel_rmse_val,4)))
rmse_test_save = rbind(rmse_test_save,c(model_treenumber,as.numeric(round(endtime2 - starttime2,3)),round(rmse_val,4),round(rmse_test,4),round(rel_rmse_val,4),round(rel_rmse_test,4)))
write.csv(rmse_all,"RMSE/train_val.csv",row.names=F)
write.csv(rmse_test_save,"RMSE/val_test.csv",row.names=F)
score_file = data.frame(date=test_tocheck$date,score=gbm_test)
write.csv(score_file,paste0("scores/tree_",model_treenumber,"_scores.csv"),row.names=F)
val_score_file = data.frame(date=val_tocheck$date,score=gbm_val)
write.csv(val_score_file,paste0("scores/tree_",model_treenumber,"_scores_val.csv"),row.names=F)
####saving regional predicion
rmse_all_regional = read.csv("RMSE/train_val_regional.csv",as.is=T)
rmse_test_regional_save = read.csv("RMSE/val_test_regional.csv",as.is=T)
rmse_all_regional = rbind(rmse_all_regional,c(model_treenumber,as.numeric(round(endtime2 - starttime2,3)),round(rmse_train_regional,4),round(rmse_val_regional,4),round(rel_rmse_train_regional,4),round(rel_rmse_val_regional,4)))
rmse_test_regional_save = rbind(rmse_test_regional_save,c(model_treenumber,as.numeric(round(endtime2 - starttime2,3)),round(rmse_val_regional,4),round(rmse_test_regional,4),round(rel_rmse_val_regional,4),round(rel_rmse_test_regional,4)))
write.csv(rmse_all_regional,"RMSE/train_val_regional.csv",row.names=F)
write.csv(rmse_test_regional_save,"RMSE/val_test_regional.csv",row.names=F)
}
}
stopCluster(cl)
end.time <- Sys.time()
time.taken <- round(end.time - start.time,3)
print("GBM completed")
print(paste0("Total time taken = ",time.taken," hours"))
|
c16526de73cf9395bb45024385d79a93ca269ce1
|
d061d08c38167fd944b27f2ded545947050c0ae5
|
/Movielens.R
|
5046830a39a2c93d0a6cc6fb3bf89be1a95102c8
|
[] |
no_license
|
livcaten/Movielens
|
385444941b7ff9e319ae811ca9e66626944f0241
|
db8bc06feb0f978cbff1fbb338283192e941bcb6
|
refs/heads/master
| 2020-06-05T10:59:36.077038
| 2019-06-17T20:55:57
| 2019-06-17T20:55:57
| 192,416,818
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,430
|
r
|
Movielens.R
|
###################################
# 1. Create edx set and validation set
###################################
# Note: this process could take a couple of minutes
library(kableExtra)
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- read.table(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1) # if using R 3.6.0: set.seed(1, sample.kind = "Rounding")
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
###################################
# 2.1 Exploratory data analysis
###################################
#Number of row and columns
nrow(edx)
ncol(edx)
#Number of different users and movies
edx %>% summarize(n_users = n_distinct(userId),n_movies = n_distinct(movieId))
#Movie Rating
edx %>%
count(movieId) %>%
ggplot(aes(n)) +
geom_histogram(bins = 30, color = "black") +
scale_x_log10() +
xlab("Number of ratings") +
ylab("Movies Count") +
ggtitle("Number of ratings per movie") + theme(plot.title = element_text(hjust = 0.5))
###################################
# 2.2.1 Basic model
###################################
#Define LOSS function RMSE
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2,na.rm=TRUE))
}
# Create the index and test/training set
edx <- edx %>% select(userId, movieId, rating)
test_index <- createDataPartition(edx$rating, times = 1, p = .2, list = F)
# Create the index
train <- edx[-test_index, ]
test <- edx[test_index, ]
test <- test %>%
semi_join(train, by = "movieId") %>%
semi_join(train, by = "userId")
test_dimension<-c(dim(train),dim(test))
# Mean accross all movies
mu_hat <- mean(train$rating)
# RMSE of test set
RMSE_base <- RMSE(validation$rating, mu_hat)
RMSE_base
rmse_table_val <- tibble(Method = "Base", RMSE = RMSE_base)
rmse_table_val %>% knitr::kable(caption = "RMSEs")
###################################
# 2.2.2 User and Movie effect Model
###################################
mu <- mean(train$rating)
movie_avgs <- train %>%
group_by(movieId) %>%
summarize(m_i = mean(rating - mu))
user_avgs <- test %>%
left_join(movie_avgs, by = "movieId") %>%
group_by(userId) %>%
summarize(u_i = mean(rating - mu - m_i))
predicted_ratings <- test %>%
left_join(movie_avgs, by = "movieId") %>%
left_join(user_avgs, by = "userId") %>%
mutate(pred = mu + m_i + u_i) %>% .$pred
model_RMSE <- RMSE(test$rating,predicted_ratings )
model_RMSE
#User and Movie effect Model on validation set
validation <- validation %>% select(userId, movieId, rating)
mu <- mean(train$rating)
movie_avgs <- train %>%
group_by(movieId) %>%
summarize(m_i = mean(rating - mu))
user_avgs <- test %>%
left_join(movie_avgs, by = "movieId") %>%
group_by(userId) %>%
summarize(u_i = mean(rating - mu - m_i))
predicted_ratings <- test %>%
left_join(movie_avgs, by = "movieId") %>%
left_join(user_avgs, by = "userId") %>%
mutate(pred = mu + m_i + u_i) %>% .$pred
predicted_val <- validation %>%
left_join(movie_avgs, by = "movieId") %>%
left_join(user_avgs, by = "userId") %>%
mutate(pred = mu + m_i + u_i) %>% .$pred
val_RMSE2 <- RMSE( validation$rating,predicted_val)
val_RMSE2
rmse_table_val <- bind_rows(rmse_table_val,
tibble(Method="User and Movie Effect ",
RMSE =val_RMSE2 ))
##############################################
# 2.2.3 Regularized user and Movie effect Model
##############################################
lambdas <- seq(0, 10, 0.25)
# Sequence of lambdas to use
rmses <- sapply(lambdas, function(l){
mu <- mean(edx$rating)
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+l))
b_u <- edx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+l))
predicted_ratings <- validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
return(RMSE(validation$rating,predicted_ratings))
})
## For each lambda, we find the b_i and the b_u, then make our prediction and test and plot.
qplot(lambdas, rmses)
lambda <- lambdas[which.min(rmses)]
lambda
movie_avgs_reg <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+lambda), n_i = n())
## Using lambda, find the movie effects
user_avgs_reg <- edx %>%
left_join(movie_avgs_reg, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = sum(rating - mu - b_i)/(n()+lambda), n_u = n())
## Using lambda, find the user effects
predicted_ratings_reg <- validation %>%
left_join(movie_avgs_reg, by='movieId') %>%
left_join(user_avgs_reg, by='userId') %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
## Make our predicted ratings
model_3_rmse <- RMSE(validation$rating,predicted_ratings_reg)
model_3_rmse
rmse_table_val <- bind_rows(rmse_table_val,
tibble(Method="Regularized Movie and User Effect Model",
RMSE = model_3_rmse ))
|
0f4268261b0b668978486ffb56b21ec77c3c218e
|
d8677cdff6f9f1bc36d7d3f47460db4417b721e4
|
/Counterbalancing.R
|
6187f769ba662045cf5de54fd1a828853a07b6ea
|
[] |
no_license
|
cphaneuf/bc_data_science_presentation
|
c59c8952ac6aaf73847bc9d8ab5b4142398d9c1c
|
49d61a8db036564c300aea9c6565c0813e786e8e
|
refs/heads/master
| 2020-03-23T06:15:01.435079
| 2018-07-23T18:52:10
| 2018-07-23T18:52:10
| 141,198,659
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,539
|
r
|
Counterbalancing.R
|
# Counterbalancing for sample data set
# Before running this script, delete 'Remaining_Master.csv' from your bc_data_science_presentation directory
# Set working directory
# HOW TO: open terminal > change to your local datascienceworkflow directory > type 'pwd' > copy and paste output setwd("<here>")
setwd("/Users/camillephaneuf/Desktop/repos/bc_data_science_presentation")
# Install and load necessary packages
# HOW TO: uncomment the line below if you do not have dplyr installed (you will get an error if you try running this code without it); dplyr is useful for working with data frames
# install.packages("dplyr")
library(dplyr)
# Load data
data <- read.csv("data.csv")
# Clean up data
# Remove excluded individuals (== 1; == NA)
data <- data[data$Rejected == 0,]
# Remove individuals outside of age range
data <- data[data$Age < 84 & data$Age >= 36,]
# Reformat data
data$Age_Category <- (data$Age)/12
data$Age_Category <- floor(data$Age_Category)
# ---CREATE REMAINING_MASTER---
# Calculate frequency values
Age_Condition_Table_Master <- table(data$List, data$Age_Category)
# Inspect table
Age_Condition_Table_Master
Remaining_Master <- as.data.frame(Age_Condition_Table_Master)
# Rename columns of Remaining
colnames(Remaining_Master) <- c("Condition_Num","Age","Frequency")
Remaining_Master$Remaining_to_Test <- 10 - Remaining_Master$Frequency
# Write to new CSV file (in other words, create a permanent file for the Remaining data frame)
write.csv(Remaining_Master, file = "Remaining_Master.csv", row.names = FALSE)
|
b29c64efe636a860000034c1f36b70c1e88c8835
|
3162c50b248d9dbb3c210db68d209a916cdc5a56
|
/scripts/analyses/gyrb_visualize_all.R
|
4c5d622ce22cba7fd4dc79d67c37b563219f317c
|
[
"MIT"
] |
permissive
|
ahnishida/captive_ape_microbiome
|
b68f818c20df396180e960b1a23e6b0c3daef0ab
|
afa2310dfd53aa94526aedf3d3bc00f9852e7921
|
refs/heads/master
| 2023-04-17T17:20:47.683032
| 2021-06-08T21:21:50
| 2021-06-08T21:21:50
| 287,132,087
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,686
|
r
|
gyrb_visualize_all.R
|
library(ape)
library(ggtree)
library(tidyverse)
library(phytools)
library(cowplot)
library(ggplot2)
set.seed(3113)
#summarize 16S and gyrB ASV distrbutions
HR_16S <- read.table('results/16s/analyses/tables/16S_ASVs_summary.txt',sep='\t',header=T)
HR_16S <- HR_16S %>% filter(Order == 'Bacteroidales')
All_16S <- HR_16S %>% group_by(HR_type) %>% tally()
All_16S$cat <- '16S Bacteroidales ASVs all samples'
CP_16S <- HR_16S %>% filter(CP_pres=='True') %>% group_by(HR_type) %>% tally()
CP_16S$cat <- '16S Bacteroidales ASVs captive apes'
HR_gyrb <- read.table('results/gyrb/analyses/tables/gyrb_asv_hr_table.txt',sep='\t',header=T)
All_gyrb <- HR_gyrb %>% group_by(HR_type) %>% tally()
All_gyrb$cat <- 'gyrb ASVs all samples'
CP_gyrb <- HR_gyrb %>% filter(CP_pres=='True') %>% group_by(HR_type) %>% tally()
CP_gyrb$cat <- 'gyrb ASVs captive apes'
HR_16S_gyrb <- bind_rows(All_16S,CP_16S,All_gyrb,CP_gyrb)
print('summary of 16S and gyrb ASVs observed')
HR_16S_gyrb %>% group_by(cat) %>% summarise(sum(n))
HRpalette <- as.vector(recode(unique(HR_16S_gyrb$HR_type), HR_wild_gorilla = "darkgreen",
HR_wild_chimp = "darkorange2",
HR_wild_bonobo = "red2",
HR_human = "dodgerblue",
MX_human_wild_apes = "maroon",
MX_wild_apes = "goldenrod",
Unique_CP='purple'))
plot_HR_16S_gyrb <- ggplot(HR_16S_gyrb, aes(fill=HR_type, y=n, x=cat)) +
geom_bar(position="fill", stat="identity") +
scale_fill_manual(values=HRpalette) +
theme_cowplot() +
theme(axis.text.x = element_text(angle = 90,hjust=1))+
theme(axis.title.x=element_blank())
ggsave(plot_HR_16S_gyrb,file='results/gyrb/analyses/figures/FigureS2_HR_16S_gyrb.pdf')
#FIGURE 1
tree_file <- file.path('results/gyrb/analyses/intermediate_outputs/HRclades_wholetree.tre')
clades_table <- file.path('results/gyrb/analyses/intermediate_outputs/HRclades_wholetree_table.txt')
#read in clades data
clades <- data.frame(read.table(clades_table,sep='\t',header=TRUE))
print('Summary of host restricted gyrB clades')
print('gyrB ASVs distributed among host-restricted, mixed host, and unique to captive clades')
(clades %>% group_by(HR_cat) %>% summarise(sum(ASVsNum)))
print('clades across all samples')
(c1_summary <- clades %>% filter(heatmap_col1 !='Blank') %>% group_by(heatmap_col1) %>% tally())
print(c(sum(c1_summary$n),'total clades'))
print('clades present in 25% of individuals of any host species in captivity or wild')
(c2_summary <- clades %>% filter(heatmap_col2 !='Blank') %>% group_by(heatmap_col2) %>% tally())
print(c(sum(c2_summary$n),'total clades'))
print('clades present in 25% of individuals of any host species in captivity')
(c3_summary <- clades %>% filter(heatmap_col3 !='Blank') %>% group_by(heatmap_col3) %>% tally())
print(c(sum(c3_summary$n),'total clades'))
#subset to clades that are observed in threshold cutoff of the individuals with one captive ape species
threshold <- .25
captive_clades <- clades %>%
filter(heatmap_col3 !='Blank')
#reformat
captive_clades_long <- captive_clades %>%
select(cladeName,wild_gorilla,wild_chimp,wild_bonobo,industrialized_human,non_industrialized_human,captive_chimp,captive_gorilla,captive_orangutan,captive_bonobo) %>%
gather(key = "host_cat", value="percent_samples",-cladeName)
#reorder sample description
new_order = c("non_industrialized_human","industrialized_human","wild_bonobo", "wild_chimp","wild_gorilla",
"captive_bonobo","captive_chimp","captive_gorilla","captive_orangutan")
captive_clades_long$host_cat <- factor(captive_clades_long$host_cat, levels = new_order)
captive_clades_long <- captive_clades_long %>% filter(percent_samples>0)
#read in phylogeny
full_tree <- ape::read.tree(tree_file)
#add moeller clades into the tree
Bt1_lineage_clades = clades %>% filter(lineage == 'Bt1')
Bt1_lineage_clades$cladeName
Bt1_node = findMRCA(full_tree, as.vector(Bt1_lineage_clades$cladeName))
Bt2_lineage_clades = clades %>% filter(lineage == 'Bt2')
Bt2_lineage_clades$cladeName
Bt2_node = findMRCA(full_tree, as.vector(Bt2_lineage_clades$cladeName))
Bt3_lineage_clades = clades %>% filter(lineage == 'Bt3')
Bt3_lineage_clades = intersect(Bt3_lineage_clades$cladeName,full_tree$tip.label)
Bt3_node = findMRCA(full_tree, as.vector(Bt3_lineage_clades))
#tree plot
tree_plot <- ggtree(full_tree,ladderize = F) %<+% clades +
geom_cladelabel(node=Bt1_node,label='Bt1',color='magenta') +
geom_cladelabel(node=Bt2_node,label='Bt2',color='magenta') +
geom_cladelabel(node=Bt3_node,label='Bt3',color='magenta') +
geom_treescale()
#add heatmap
clades_heatmap <- clades %>%
column_to_rownames("cladeName") %>%
select(heatmap_col1,heatmap_col2,heatmap_col3)
(tree_heatmap <-gheatmap(tree_plot, clades_heatmap, offset = .01, width=0.5) +
scale_fill_manual(values=c("white","dodgerblue3","red2","darkorange2","darkgreen","chocolate4","purple")))
ggsave(tree_heatmap,filename = 'results/gyrb/analyses/figures/Figure1_tree_plot.pdf',width=5,height=10)
#Dotplots
#generate color scales
dotplot_color_scale <- levels(droplevels(factor(captive_clades_long$host_cat)))
(dotplot_color_scale <- as.vector(recode(dotplot_color_scale, wild_gorilla = "darkgreen",
wild_chimp = "darkorange2",
wild_bonobo = "red2",
industrialized_human = "blue",
non_industrialized_human="skyblue2",
captive_gorilla = "darkolivegreen3",
captive_chimp = "tan1",
captive_bonobo = "indianred2",
captive_orangutan="plum3")))
HRclade_color_scale <- levels(droplevels(factor(captive_clades$HR_type)))
(HRclade_color_scale <- as.vector(recode(HRclade_color_scale, HR_wild_gorilla = "darkgreen",
HR_wild_chimp = "darkorange2",
HR_wild_bonobo = "red2",
HR_human = "dodgerblue3",
MX_human_wild_apes = "chocolate4",
MX_wild_apes = "chocolate4",
Unique_CP = "purple")))
#change order of taxa in dotplot
ordered_tips = full_tree$tip.label[full_tree$tip.label %in% as.character(captive_clades$cladeName)]
setdiff(ordered_tips,as.character(unique(captive_clades_long$cladeName)))
setdiff(as.character(unique(captive_clades_long$cladeName)),ordered_tips)
captive_clades_long$cladeName = factor(captive_clades_long$cladeName,levels = ordered_tips)
captive_clades$cladeName = factor(captive_clades$cladeName,levels = ordered_tips)
#generate dot plot
dotplot <- ggplot(captive_clades_long) +
geom_point(aes(x=host_cat,y=cladeName,color = host_cat,size=percent_samples))+
theme_bw()+
theme(
panel.grid = element_blank(),
axis.text.x = element_blank(),
axis.title = element_blank()) +
scale_color_manual(values=dotplot_color_scale)
dotplot
#taxonomy_dotplot
(taxonomy_dotplot <- ggplot(captive_clades) +
geom_point(aes(x=0,y=cladeName,color = cladeTax))+
theme_bw()+
theme(
panel.grid = element_blank(),
axis.text.x = element_blank(),
axis.title = element_blank()))
#add HR_type
(HR_type_dotplot <- ggplot(captive_clades) +
geom_point(aes(x=0,y=cladeName,color = HR_type))+
theme_bw()+
theme(
panel.grid = element_blank(),
axis.text.x = element_blank(),
axis.title = element_blank()) +
scale_color_manual(values=HRclade_color_scale))
#put all the dotplots together
all_dotplots <- plot_grid(HR_type_dotplot,taxonomy_dotplot,dotplot,nrow = 1)
ggsave(all_dotplots,filename = 'results/gyrb/analyses/figures/Figure1_all_dotplots.pdf',height=10,width=18)
#SUBTREE FIGURE 1, S1 - sub
full_tree <- read.tree(file.path('results/gyrb/inputs/physeq_Bacteroidales_ASVs_ref.tree'))
Bt2_inset_tree <- read.tree(file.path('results/gyrb/inputs/moeller_codiv_lin_Bt2.tree'))
Bt3_inset_tree <- read.tree(file.path('results/gyrb/inputs/moeller_codiv_lin_Bt3.tree'))
inset_table <- read.table(file.path('results/gyrb/inputs/moeller_codiv_HRclades.txt'),sep='\t',header=TRUE)
ASV_table <- read.table(file.path('results/gyrb/analyses/intermediate_outputs/HRclades_subtrees_table.txt'),sep='\t',header=TRUE)
print('distribution of ASVs in clades')
extract_subtree <- function(tree,Bt_lin) {
#Subset tree and codivASVs to Bt lineage
lin_table <- ASV_table %>% filter(lineage == Bt_lin)
lin_ASVs = as.vector(lin_table$ASV)
MRCA <- findMRCA(tree,lin_ASVs)
subtree <- extract.clade(tree,MRCA)
return(subtree)
}
Bt1_tree <- extract_subtree(full_tree,'Bt1')
ggtree(Bt1_tree) + geom_nodepoint(aes(subset = label > .50))
add_HRclades_to_tree <- function(lineage_tree,table){
#label HR clades in subtree
(taxa<-lineage_tree$tip.label)
(lineage_table <- table %>%
dplyr::filter(ASV %in% taxa & cladeName != "") %>%
dplyr::group_by(cladeName) %>%
dplyr::sample_n(1))
(cladeNames = as.vector(lineage_table$cladeName))
(cladeHostSp = as.vector(lineage_table$HR_type))
cls <- list()
for (clade in cladeNames) {
#print(clade)
(clade_table <- table %>% filter(cladeName == clade))
(clade_ASVs <- as.character(clade_table$ASV))
cls[[length(cls)+1]] <- clade_ASVs
}
tree_cls <- groupOTU(lineage_tree, cls)
#print(cladeHostSp)
color_vec <- as.vector(recode(cladeHostSp, HR_wild_gorilla = "darkgreen",
HR_wild_chimp = "darkorange2",
HR_wild_bonobo = "red2",
HR_human = "dodgerblue3",
MX_wild_apes='magenta',
MX_human_wild_apes ='magenta'))
(color_vec <- c("black",color_vec))
return(c(tree_cls,color_vec))
}
Bt1_res <- add_HRclades_to_tree(Bt1_tree,ASV_table)
Bt1_tree_cls <- Bt1_res[[1]]
Bt1_color_vec <- Bt1_res[[2]]
ggtree(Bt1_tree_cls, aes(color=group)) +
scale_color_manual(values=Bt1_color_vec)
#moeller clade labels
unique(ASV_table$codiv_clade)
get_codiv_table <- function(tree,clade){
clade_ASVs <- ASV_table %>% filter(codiv_clade == clade)
clade_node<- findMRCA(tree,as.vector(clade_ASVs$ASV))
return(clade_node)
}
Bt1_gorilla_node <- get_codiv_table(Bt1_tree,"Bt1_clade1_gorilla")
Bt1_chimp_node <- get_codiv_table(Bt1_tree,"Bt1_clade1_chimp")
Bt1_bonobo_node <- get_codiv_table(Bt1_tree,"Bt1_clade1_bonobo")
ggtree(Bt1_tree_cls, aes(color=group)) +
scale_color_manual(values=Bt1_color_vec) +
geom_nodepoint(aes(subset = label > .50)) +
geom_cladelabel(node=Bt1_gorilla_node,label='Bt1_gorilla',color='green4') +
geom_cladelabel(node=Bt1_chimp_node,label='Bt1_chimp',color='orange') +
geom_cladelabel(node=Bt1_bonobo_node,label='Bt1_bonobo',color='red')
#no captive ape ASVs in Bt1 lineage
ASV_table %>%
filter(ASV %in% Bt1_tree$tip.label) %>%
filter(captive_all>0)
#Subset tree and codivASVs to Bt2 lineage
Bt2_tree <- extract_subtree(full_tree,'Bt2')
Bt2_res <- add_HRclades_to_tree(Bt2_tree,ASV_table)
Bt2_tree_cls <- Bt2_res[[1]]
Bt2_color_vec <- Bt2_res[[2]]
Bt2_clade1_gorilla_node <- get_codiv_table(Bt2_tree,"Bt2_clade1_gorilla")
Bt2_clade1_chimp_node <- get_codiv_table(Bt2_tree,"Bt2_clade1_chimp")
Bt2_clade1_bonobo_node <- get_codiv_table(Bt2_tree,"Bt2_clade1_bonobo")
Bt2_clade2_chimp_node <- get_codiv_table(Bt2_tree,"Bt2_clade2_chimp")
Bt2_clade2_bonobo_node <- get_codiv_table(Bt2_tree,"Bt2_clade2_bonobo")
#Bt2 ASVs in captive apes
ASV_table %>%
filter(ASV %in% Bt2_tree$tip.label) %>%
select(ASV,captive_chimp_HOUZ:captive_chimp_PC) %>%
gather(key = "host_cat", value="sample_count",-ASV) %>%
filter(sample_count>0)
#Bt2 ASVs by HR_type
ASV_table %>%
filter(ASV %in% Bt2_tree$tip.label) %>%
group_by(HR_type) %>%
tally()
ASV_table %>%
filter(ASV %in% Bt2_tree$tip.label) %>%
group_by(ASV_HR_type) %>%
tally()
(Bt2_tree_fig <- ggtree(Bt2_tree_cls, aes(color=group)) + xlim(NA, .5) +
scale_color_manual(values=Bt2_color_vec) + #HR clades
geom_treescale(x=0,y=0) +
geom_nodepoint(aes(subset = label > .50),size=.75) + #bootstrap
#moeller codiv clades
geom_cladelabel(node=Bt2_clade1_gorilla_node,label='Bt2_clade1_gorilla',color='darkgreen',offset=.01) +
geom_cladelabel(node=Bt2_clade1_chimp_node,label='Bt2_clade1_chimp',color='darkorange2',offset=.02) +
geom_cladelabel(node=Bt2_clade1_bonobo_node,label='Bt2_clade1_bonobo',color='red2',offset=.01) +
geom_cladelabel(node=Bt2_clade2_chimp_node,label='Bt2_clade2_chimp',color='darkorange2',offset=.01) +
geom_cladelabel(node=Bt2_clade2_bonobo_node,label='Bt2_clade2_bonobo',color='red2',offset=.01) +
#captive ape ASVs
geom_tiplab(aes(subset=label %in% ASV_table$ASV[ASV_table$captive_all>0],
label=label),align = TRUE, color='black') +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$captive_gorilla_COLZ>0],
label=label), size=1.5,x=.25,color='darkolivegreen3',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$captive_chimp_PC>0],
label=label), size=1.5,x=.28,color='tan1',shape=17) +
#HR_ASVs labels
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$ASV_HR_type=='HR_wild_chimp'],
label=label), size=1,color='darkorange2',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$ASV_HR_type=='HR_wild_bonobo'],
label=label), size=1,color='red2',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$ASV_HR_type=='HR_wild_gorilla'],
label=label), size=1,color='darkgreen',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$ASV_HR_type=='HR_human'],
label=label), size=1,color='dodgerblue3',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[str_detect(ASV_table$ASV_HR_type,'MX')],
label=label), size=1,color='magenta',shape=15) +
theme(legend.position = "none"))
ggsave(Bt2_tree_fig,filename = file.path('results/gyrb/analyses/figures/Figure2_Bt2_tree.pdf'),width=5)
#Subset tree and codivASVs to Bt3 lineage
Bt3_tree <- extract_subtree(full_tree,'Bt3')
Bt3_res <- add_HRclades_to_tree(Bt3_tree,ASV_table)
Bt3_tree_cls <- Bt3_res[[1]]
Bt3_color_vec <- Bt3_res[[2]]
Bt3_clade1_human_node <- get_codiv_table(Bt3_tree,"Bt3_clade1_human")
Bt3_clade1_chimp_node <- get_codiv_table(Bt3_tree,"Bt3_clade1_chimp")
Bt3_clade1_bonobo_node <- get_codiv_table(Bt3_tree,"Bt3_clade1_bonobo")
#Bt3 ASVs in captive apes
ASV_table %>%
filter(ASV %in% Bt3_tree$tip.label) %>%
select(ASV,captive_chimp_HOUZ:captive_chimp_PC) %>%
gather(key = "host_cat", value="sample_count",-ASV) %>%
filter(sample_count>0) %>%
group_by(host_cat) %>%
tally()
#Bt3 ASVs by HR_type
ASV_table %>%
filter(ASV %in% Bt3_tree$tip.label) %>%
group_by(HR_type) %>%
tally()
ASV_table %>%
filter(ASV %in% Bt3_tree$tip.label) %>%
group_by(ASV_HR_type) %>%
tally()
(Bt3_tree_fig <- ggtree(Bt3_tree_cls, aes(color=group)) + xlim(NA, .8) +
scale_color_manual(values=Bt3_color_vec) +
geom_nodepoint(aes(subset = label > .50),size=.75) +
geom_treescale(x=0,y=0) +
geom_cladelabel(node=Bt3_clade1_human_node,label='Bt3_clade1_human',color='dodgerblue3',offset=.01) +
geom_cladelabel(node=Bt3_clade1_chimp_node,label='Bt3_clade1_chimp',color='darkorange2',offset=.01) +
geom_cladelabel(node=Bt3_clade1_bonobo_node,label='Bt3_clade1_bonobo',color='red2',offset=.01) +
geom_tiplab(aes(subset=label %in% ASV_table$ASV[ASV_table$captive_all>0],
label=label),align = TRUE, color='black',offset = .05) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$captive_chimp_PC>0],
label=label), size=3,color='tan1',x=.45,shape=17) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$captive_bonobo_COLZ>0],
label=label), size=3,x=.345,color='indianred2') +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$captive_gorilla_COLZ>0],
label=label), size=3,x=.37,color='darkolivegreen3') +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$captive_orangutan_COLZ>0],
label=label), size=3,x=.395,color='plum3') +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$captive_chimp_HOUZ>0],
label=label), size=3,x=.42,color='tan1',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$captive_gorilla_HOUZ>0],
label=label), size=3,x=.47,color='darkolivegreen3',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$captive_orangutan_HOUZ>0],
label=label), size=3,x=.495,color='plum3',shape=15) +
#HR_ASVs labels
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$ASV_HR_type=='HR_wild_chimp'],
label=label), size=1,color='darkorange2',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$ASV_HR_type=='HR_wild_bonobo'],
label=label), size=1,color='red2',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$ASV_HR_type=='HR_wild_gorilla'],
label=label), size=1,color='darkgreen',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[ASV_table$ASV_HR_type=='HR_human'],
label=label), size=1,color='dodgerblue3',shape=15) +
geom_tippoint(aes(
subset=label %in% ASV_table$ASV[str_detect(ASV_table$ASV_HR_type,'MX')],
label=label), size=1,color='magenta',shape=15) +
theme(legend.position = "none"))
ggsave(Bt3_tree_fig,filename = file.path('results/gyrb/analyses/figures/FigureS1_Bt3_tree.pdf'),width=7)
##Co-diversification test on Bt2 lineage
##Host distance matrix
source('scripts/analyses/co_diversification_functions.R')
human = c(0,6.4,6.4,8.60)
chimp = c(6.4,0,2.396,8.60)
bonobo = c(6.4,2.396,0,8.60)
gorilla = c(8.60,8.60,8.60,0)
host.D = rbind(human,chimp,bonobo,gorilla)
colnames(host.D) = c('human','chimp','bonobo','gorilla')
Random.host.D <- host.D
#sample(c('human','chimp','bonobo','gorilla'))
colnames(Random.host.D) = c("gorilla","human","chimp","bonobo")
rownames(Random.host.D) = c("gorilla","human","chimp","bonobo")
#bacterial distance matrix
Bt2.D = cophenetic(Bt2_tree)
dim(Bt2.D)
#host to ASVs
Host_Bt2 = HR_gyrb %>% select(ASV,HR_sampleTypes) %>%
mutate(human = ifelse(str_detect(HR_sampleTypes,'human'),1,0),
chimp = ifelse(str_detect(HR_sampleTypes,'chimp'),1,0),
bonbo = ifelse(str_detect(HR_sampleTypes,'bonobo'),1,0),
gorilla = ifelse(str_detect(HR_sampleTypes,'gorilla'),1,0)) %>%
select(-HR_sampleTypes) %>%
filter(ASV %in% colnames(Bt2.D)) %>%
column_to_rownames(var="ASV") %>%
t()
N.perm = 9
############ HOMMOLA ET AL. ###############
(P.hommola <- suppressWarnings(sim_cosp(host.D, Bt2.D, t(Host_Bt2),N.perm)))
#random tree
(Random.P.hommola <- suppressWarnings(sim_cosp(Random.host.D, Bt2.D, t(Host_Bt2),N.perm)))
############ PARAFIT ######################
(P.parafit <- parafit(host.D, Bt2.D, Host_Bt2, nperm = N.perm, test.links = FALSE,
seed = NULL, correction = "cailliez", silent = TRUE)$p.global)
#random tree
(Random.P.parafit <- parafit(Random.host.D, Bt2.D, Host_Bt2, nperm = N.perm, test.links = FALSE,
seed = NULL, correction = "cailliez", silent = TRUE)$p.global)
############# PACo ############
(P.PACo <- PACo_N.perm(host.D, Bt2.D, Host_Bt2, N.perm))
#random tree
(Random.P.PACo = PACo_N.perm(Random.host.D, Bt2.D, Host_Bt2, N.perm))
#write table
co_diversification_res = c(P.hommola,Random.P.hommola,P.parafit,Random.P.parafit,P.PACo,Random.P.PACo)
names(co_diversification_res) = c('P.hommola','P.hommola_RandomHostTree','P.parafit','P.parafit_RandomHostTree','P.PACo','P.PACo_RandomHostTree')
co_diversification_res = as.data.frame(co_diversification_res) %>% rownames_to_column(var = 'co_diversification_test')
write_tsv(co_diversification_res,'results/gyrb/analyses/tables/co_diversification_res.txt')
|
61434040914746374b9f97c07f1000cf06f0b108
|
682d21975c4e622fd59c55107aa3b6105e28c8c4
|
/discriminant_analysis_r/DA.R
|
4d8ad54fa6632e547649221456f57dd99f9b3dbe
|
[] |
no_license
|
stacymiller/dataanalysis
|
d4ef3def0d04e7150467af027c5a4316cb80f0a1
|
a6cfe261d521c0f9bd310ef67af0eda785b4dd16
|
refs/heads/master
| 2021-01-01T20:05:20.565571
| 2017-10-31T19:53:24
| 2017-10-31T19:53:24
| 33,039,431
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,120
|
r
|
DA.R
|
library(foreign)
library(MASS)
library(candisc)
library(car)
library(psych)
library(nortest)
library(stats)
# library(qualityTools)
# library(ppcor)
# library(mixlm)
# library(QuantPsyc)
library(klaR)
# library(biotools)
df <- read.spss("wuschiz.sav",to.data.frame=TRUE)
df$Schizo==1
df <- subset(df, Schizo!=1)
#df <- read.csv("C://?????/5_????/Australian/australian.dat.txt",dec=".",sep=" ")
# boxM(df[,-c(1,2)],df$Schizo)
man <- aov(Schizo~.,df[,-1])
summary(man)
boxplot(df$Pa~df$Schizo)
shapiro.test(df$Pa[df$Schizo==0])
var.test(df$Pa[df$Schizo==0],df$Pa[df$Schizo==1])
t.test(df$Pa[df$Schizo==0],df$Pa[df$Schizo==1])
l <- lda(Schizo~.,df[,-1])
can <- candisc(lm(cbind(L,F)),df[,-1])
l
plot(l)
partimat(Schizo~.,df[,-1],method="lda")
pre <- predict(l)
ldahist(pre$x[,1],g=df$Schizo)
ldahist(pre$x[,2],g=df$Schizo)
plot(pre$x[,1],pre$x[,2], col=df$Schizo)
text(pre$x[,1],pre$x[,2],df$Schizo,cex=0.7,pos=4,col="red")
gree1 <- greedy.wilks(Schizo~.,df[,-1],niveau = 0.05)
gree1
#????????? ?? ?????????????? ? ????????:
df.test <- df[seq(1,nrow(df),20),]
df.train <-df[-seq(1,nrow(df),20),]
df<- na.omit(df)
l <- qda(df[,-c(1,2)],df[,2], CV = TRUE, prior=c(0.5,0.5))
l
table(l$class,df[,2])
pred <- predict(l,df[,-c(1,2)])$class
table(pred, df[,2])
man <- manova(as.matrix(df[,-c(1,2)])~pred)
summary(man, test="Wilks")
# names<-read.csv("C://?????/RR/4/Ozone2/eighthr.names.txt",header =FALSE ,
# na.strings = "?",skip=3)
# names <- as.character(names$V1)
# names <- gsub(names, pattern = ': continuous.', replacement = '')
# names[72] <- 'Precp'
# names <- c('Date', names, 'Day')
#
# df <- read.csv("C://?????/RR/4/Ozone2/eighthr.data.txt",header = FALSE,
# na.strings = "?")
# df <- as.data.frame(df, ncoll = 74)
# names(df) <- names
#
# df.ex.NA <- na.omit(df)
# df.ex.NA$Day <- as.factor(df.ex.NA$Day)
# l1 <- lda(Day~.,df.ex.NA[,-1])
# l1
# pre <- predict(l1)
# ldahist(pre$x[,1],g=df.ex.NA$Day)
# ldahist(pre$x[,2],g=df.ex.NA$Day)
# plot(pre$x[,1],pre$x[,2])
# plot(l1)
# partimat(Day~.,data=df.ex.NA[1:100,-c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)],method="lda")
|
57f3d8c82b40817013903abc363bcdd28c2f7845
|
d104eaae49776e9f1bf929b2a4bc8d54f305e212
|
/tests/testthat/test-list_output.R
|
05e6781a262b9cd7aee281047085862c2e64ab49
|
[] |
no_license
|
forestgeo/fgeo.misc
|
96d1a65360511bba7ef147bca74e507218c5c654
|
3a9386cebc7512df0d4923c364585f9c7a113c2b
|
refs/heads/master
| 2020-04-10T19:14:48.565370
| 2019-06-21T22:41:11
| 2019-06-21T22:41:11
| 161,228,174
| 2
| 2
| null | 2019-02-04T22:50:11
| 2018-12-10T19:48:01
|
R
|
UTF-8
|
R
| false
| false
| 1,036
|
r
|
test-list_output.R
|
context("list_csv")
output <- tempdir()
test_that("errs with wrong input", {
expect_error(list_csv(1, output))
expect_error(list_csv(list(1), output))
})
test_that("works as expected", {
lst <- list(df1 = data.frame(x = 1), df2 = data.frame(x = 2))
output <- tempdir()
list_csv(lst, output, prefix = "myfile-")
files <- dir(output, pattern = "myfile")
expect_true(length(files[grepl("^myfile.*csv$", files)]) > 0)
})
context("list_df")
lst <- list(
a = data.frame(x = 1),
b = data.frame(x = 2, y = 2),
c = data.frame(x = 1, z = 3)
)
test_that("errs with wrong input", {
expect_error(list_df(1))
expect_error(list_df(data.frame(1)))
})
test_that("works as expected", {
x <- list_df(lst, df_names = c("a", "c"))
expect_equal(names(x), c("x", "z"))
x <- list_df(lst, df_names = c("b", "c"))
expect_equal(names(x), c("x", "y", "z"))
expect_silent(list_df(list(data.frame(1))))
expect_silent(
list_df(
list(data.frame(x = 1), data.frame(z = 2)),
by = c("x" = "z")
)
)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.