blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1cfacc1ce137648982d8108bc0108cd044c72dfe
|
ab0e26eeb1729125151a68dc9b4908eb5727223b
|
/man/genius.Rd
|
1dd9a764986945734b35a884c43dc4d0210e428a
|
[] |
no_license
|
eliph/genefu
|
9dda0e14924f7038babd0bfe6a5ef129f6894931
|
040c6463eae2c57923f685206ac365ae4f90448a
|
refs/heads/master
| 2021-01-04T12:38:49.347906
| 2020-02-14T16:52:56
| 2020-02-14T16:52:56
| 240,553,931
| 0
| 0
| null | 2020-02-14T16:38:27
| 2020-02-14T16:38:27
| null |
UTF-8
|
R
| false
| false
| 2,853
|
rd
|
genius.Rd
|
\name{genius}
\alias{genius}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to compute the Gene Expression progNostic Index Using Subtypes (GENIUS) as published by Haibe-Kains et al. 2010
}
\description{
This function computes the Gene Expression progNostic Index Using Subtypes (GENIUS) as published by Haibe-Kains et al. 2010. Subtype-specific risk scores are computed for each subtype signature separately and an overall risk score is computed by combining these scores with the posterior probability to belong to each of the breast cancer molecular subtypes.
}
\usage{
genius(data, annot, do.mapping = FALSE, mapping, do.scale = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
Matrix of gene expressions with samples in rows and probes in columns, dimnames being properly defined.
}
\item{annot}{
Matrix of annotations with at least one column named "EntrezGene.ID", dimnames being properly defined.
}
\item{do.mapping}{
\code{TRUE} if the mapping through Entrez Gene ids must be performed (in case of ambiguities, the most variant probe is kept for each gene), \code{FALSE} otherwise.
}
\item{mapping}{
Matrix with columns "EntrezGene.ID" and "probe" used to force the mapping such that the probes are not selected based on their variance.
}
\item{do.scale}{
\code{TRUE} if the ESR1, ERBB2 and AURKA (module) scores must be rescaled (see \code{\link[genefu]{rescale}}), \code{FALSE} otherwise.
}
}
%%\details{
%% ~~ If necessary, more details than the description above ~~
%%}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
\item{GENIUSM1 }{Risk score from the ER-/HER2- subtype signature in GENIUS model.}
\item{GENIUSM2 }{Risk score from the HER2+ subtype signature in GENIUS model.}
\item{GENIUSM3 }{Risk score from the ER+/HER2- subtype signature in GENIUS model.}
\item{score }{Overall risk prediction as computed by the GENIUS model.}
}
\references{
Haibe-Kains B, Desmedt C, Rothe F, Sotiriou C and Bontempi G (2010) "A fuzzy gene expression-based computational approach improves breast cancer prognostication", \emph{Genome Biology}, \bold{11}(2):R18
}
\author{
Benjamin Haibe-Kains
}
%%\note{
%% ~~further notes~~
%%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link[genefu]{subtype.cluster.predict}},\code{\link[genefu]{sig.score}}
}
\examples{
## load NKI dataset
data(nkis)
## compute GENIUS risk scores based on GENIUS model fitted on VDX dataset
genius.nkis <- genius(data=data.nkis, annot=annot.nkis, do.mapping=TRUE)
str(genius.nkis)
## the performance of GENIUS overall risk score predictions are not optimal
## since only part of the NKI dataset was used
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ prognosis }
|
c6e4d2a1f24c126ad96260a8f1c6f379db9f81d2
|
a0b79ac6a4461a00d762c55d84b931587e14f453
|
/Plot3.R
|
86f7f824f932c2e7f7327ddd018ca6edde67a59a
|
[] |
no_license
|
JLH2593/ExData_Plotting1
|
73c79987caa8931835c7feb6c949759e9400d9ad
|
ff13590e4e35c9435d523ed00f77ef2d20cac30b
|
refs/heads/master
| 2022-11-24T12:13:10.819758
| 2020-08-01T18:17:10
| 2020-08-01T18:17:10
| 284,081,494
| 0
| 0
| null | 2020-07-31T16:30:16
| 2020-07-31T16:30:15
| null |
UTF-8
|
R
| false
| false
| 755
|
r
|
Plot3.R
|
library(dplyr)
library(lubridate)
data <- read.table("household_power_consumption.txt",sep = ";",header = TRUE,na.strings ="?")
data$Date_Time <-strptime(paste(data$Date,data$Time),"%d/%m/%Y %H:%M:%S")
data <-tibble(data) %>%
filter(Date == "1/2/2007"| Date == "2/2/2007")
data$weekday <- wday(data$Date_Time,label = TRUE)
png(file = "plot3.png",width=480,height = 480)
par(mar =c(2,4,4,2))
plot(data$Date_Time,data$Sub_metering_1,type ="n",ylab ="Energy sub metering")
lines(data$Date_Time,data$Sub_metering_1)
lines(data$Date_Time,data$Sub_metering_2,col="red")
lines(data$Date_Time,data$Sub_metering_3,col="blue")
legend("topright",lwd=c(1,1,1),cex = 0.7,col = c("black","red","blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
4b59c09731ec3a4d07d0975a924784110e2f2bd8
|
e847ab0ae68d4e0328d0755151bcf8a51168cd6e
|
/server.R
|
9df3a8b40b37b18bafc843b197dd0512bd01e0c1
|
[] |
no_license
|
EmielGeeraerts/DevDataProd_Final
|
038047052fd79a9b55779d857f514b3f50fdedd9
|
b1df4af4eac082da7b3ab9bcf367d272483f6350
|
refs/heads/master
| 2020-06-23T11:39:44.909690
| 2019-07-24T10:25:48
| 2019-07-24T10:25:48
| 198,612,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,829
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(caret) #for machine learning
library("ggplot2") #for diamonds dataset
library("mgcv")
library(Metrics)
# Define server logic
shinyServer(function(input, output) {
#Load diamonds dataset
data("diamonds")
#simplify dataset by filtering out diamonds of perfect clarity
diamonds <- diamonds[diamonds$clarity == "VS2",]
#Keep only columns of interest
diamonds <- diamonds[,c("price","carat","cut","color")]
diamonds$cut <- factor(diamonds$cut, ordered = F)
diamonds$color <- factor(diamonds$color, ordered = F)
#encode categorical values
dmy <- dummyVars("~ .", data = data.frame(diamonds), fullRank = F)
diamondsD <- data.frame(predict(dmy, newdata = diamonds))
#do train-test split
set.seed(123)
index <- createDataPartition(diamondsD$price, p=0.75, list=FALSE)
trainSet <- diamondsD[index,]
testSet <- diamondsD[-index,]
#set model parameters
trctrl <- trainControl(method = "cv", number = 5)
#train model
knn_fit <- train(price ~., data = trainSet,
method = "knn",
trControl=trctrl,
#preProcess = c("center", "scale")
)
train_pred_knn <- predict(knn_fit, newdata = trainSet)
test_pred_knn <- predict(knn_fit, newdata = testSet)
pred_knn <- data.frame(Predictions = test_pred_knn, True_price = testSet$price, Carat = testSet$carat)
ggplot(data = pred_knn)+
geom_point(data = pred_knn,
aes(x=True_price, y = Predictions,
size = Carat))
#Initialize input dataframe
my_input <- as.data.frame(rbind(c(NaN, NaN, NaN, NaN)), stringsAsFactors = T)
colnames(my_input) <- c("price", "carat", "cut", "color")
my_inputD_pred <- reactive({
#collect inputs
my_input[1,"carat"] <- input$Carat
my_input[1,"cut"] <- input$Cut
my_input[1,"color"] <- input$Color
#ensure inputs are of the correct format
my_input$carat <- as.numeric(my_input$carat)
my_input$cut <- factor(my_input$cut, levels = levels(diamonds$cut))
my_input$color <- factor(my_input$color, levels = levels(diamonds$color))
#convert input to dummy variables
my_inputD <- data.frame(predict(dmy, newdata = my_input))
#Calculate the predicted price of the input diamond
round(predict(knn_fit, newdata = my_inputD), digits = 0)
})
#return the predicted price
output$text1 = renderText({
my_inputD_pred()
})
})
|
ce0af82bd89fc4e7ee2fb1c2c02fc4a6195bc3e8
|
70fd8ee5ed93f1de409322ffe62bb0318d1a55ea
|
/Hari_Panjwani_Section_1_Assignment_2/1. Plotly/San Francisco Crime Analysis/sfo.R
|
c58ed159560bb209e0d22663dbb5650c8c8e1ad4
|
[] |
no_license
|
Haricharanpanjwani/Engineering-Big-Data
|
7f7b500a21d533fe4e02d50f0924ff688c1b4779
|
1cc405570aa5accd84a4526eb99a59a9501bbd63
|
refs/heads/master
| 2021-01-20T08:02:55.244408
| 2017-01-04T02:23:41
| 2017-01-04T02:23:41
| 68,549,631
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,695
|
r
|
sfo.R
|
setwd("C:/Users/saksh/R Workspace/")
#setting the enviornment variable for the plotly
Sys.setenv("plotly_username"="panjwani.h")
Sys.setenv("plotly_api_key"="8w5f8e9g0f")
#importing the required library
library(plotly)
library(ggplot2)
library(plyr)
library(gridExtra)
library(RColorBrewer)
# Load the dataset
rawCrime <- read.csv("sfoCrime.csv")
crime <- read.csv("crime.csv")
# Plot a histogram showing the frequency of each crime category
crimeCount <- cbind(aggregate(crime[, "IncidntNum"], by = list(crime$Category), sum))
colnames(crimeCount) <- c("Category", "Frequency")
h <- ggplot(crimeCount, aes(reorder(Category, Frequency), Frequency))
crimeFrequency <- h + geom_bar(stat="identity") +
ggtitle("Frequency of crime in the SF Bay area") +
ylab("Number of reports") + xlab("Crime category") +
theme(axis.text.x = element_text(angle=90, hjust=1)) +
geom_text(aes(label=round(Frequency), size = 2, hjust = 0.5, vjust = -1))
#publish to plotly
plotly_POST(crimeFrequency, filename = "crime-frequency-sf-bay-area")
### What days and times are especially dangerous?
# Subset only high crime frequencies
crimeHigh <- subset(crime, Category == "LARCENY/THEFT" | Category == "ASSAULT" |
Category == "VANDALISM" | Category == "VEHICLE THEFT" | Category == "BURGLARY" |
Category == "DRUG/NARCOTIC" | Category == "ROBBERY")
crimeHigh <- droplevels(crimeHigh)
# Sort the crime categories in decreasing order
crimeHigh <- within(crimeHigh, Category <- factor(Category, levels = names(sort(table(Category), decreasing = T))))
crimePd <- within(crime, PdDistrict <- factor(PdDistrict, levels = names(sort(table(PdDistrict), decreasing = T))))
# What are the crime patterns by categories and disricts according to DayOfWeek
catDow <- ggplot(data = crimeHigh, aes(x = DayOfWeek, fill = Category)) +
geom_bar(width = 0.9) + ggtitle("Frequency of Crime by District and Category")
labs(x = "Day of Week", y = "Number of reports", fill = guide_legend(title = "Crime category"))
pdDow <- ggplot(data = crimePd, aes(x = DayOfWeek, fill = PdDistrict)) +
geom_bar(width = 0.9) + ggtitle("Frequency of Crime by District and Category")
labs(x = "Day of Week", y = "Number of reports", fill = guide_legend(title = "District"))
#cpDow <- grid.arrange(pdDow, catDow, nrow=2)
cpDow <- subplot(ggplotly(catDow),
ggplotly(pdDow),
nrows = 2, margin = 0.05)
plotly_POST(cpDow, filename = "crime-frequency-Crime-category-and-by-District")
# To prepare for smoonth_geom plots
# Larceny
larc <- subset(crime, crime$Category == "LARCENY/THEFT")
larc <- droplevels(larc)
larcTime <- ddply(larc, c('Category', 'Time'), summarise, totalCat = sum(IncidntNum, na.rm=T))
# Assault
asst <- subset(crime, crime$Category == "ASSAULT")
asst <- droplevels(asst)
asstTime <- ddply(asst, c('Category', 'Time'), summarise, totalCat = sum(IncidntNum, na.rm=T))
# Vandalism
vand <- subset(crime, crime$Category == "VANDALISM")
vand <- droplevels(vand)
vandTime <- ddply(vand, c('Category', 'Time'), summarise, totalCat = sum(IncidntNum, na.rm=T))
# VEHICLE THEFT
vehc <- subset(crime, crime$Category == "VEHICLE THEFT")
vehc <- droplevels(vehc)
vehcTime <- ddply(vehc, c('Category', 'Time'), summarise, totalCat = sum(IncidntNum, na.rm=T))
# BURGLARY
burg <- subset(crime, crime$Category == "BURGLARY")
burg <- droplevels(burg)
burgTime <- ddply(burg, c('Category', 'Time'), summarise, totalCat = sum(IncidntNum, na.rm=T))
# DRUG/NARCOTIC
narc <- subset(crime, crime$Category == "DRUG/NARCOTIC")
narc <- droplevels(narc)
narcTime <- ddply(narc, c('Category', 'Time'), summarise, totalCat = sum(IncidntNum, na.rm=T))
# ROBBERY
robb <- subset(crime, crime$Category == "ROBBERY")
robb <- droplevels(robb)
robbTime <- ddply(robb, c('Category', 'Time'), summarise, totalCat = sum(IncidntNum, na.rm=T))
larcPlot <- ggplot(larcTime, aes(x=Time, y=totalCat, group=1)) + geom_point(colour="red", size=2) +
geom_smooth(method="loess") + labs(x = "Time (24-hour interval)", y = "Number of reports") +
ggtitle("Larceny/Theft vs Time")
asstPlot <- ggplot(asstTime, aes(x=Time, y=totalCat, group=1)) + geom_point(colour="blue", size=2) +
geom_smooth(method="loess") + labs(x = "Time (24-hour interval)", y = "Number of reports") +
ggtitle("Assault vs Time")
vandPlot <- ggplot(vandTime, aes(x=Time, y=totalCat, group=1)) + geom_point(colour="darkgreen", size=2) +
geom_smooth(method="loess") + labs(x = "Time (24-hour interval)", y = "Number of reports") +
ggtitle("Vandalism vs Time")
vehcPlot <- ggplot(vehcTime, aes(x=Time, y=totalCat, group=1)) + geom_point(colour="purple", size=2) +
geom_smooth(method="loess") + labs(x = "Time (24-hour interval)", y = "Number of reports") +
ggtitle("Vehicle Theft vs Time")
burgPlot <- ggplot(burgTime, aes(x=Time, y=totalCat, group=1)) + geom_point(colour="orange", size=2) +
geom_smooth(method="loess") + labs(x = "Time (24-hour interval)", y = "Number of reports") +
ggtitle("Burglary vs Time")
narcPlot <- ggplot(narcTime, aes(x=Time, y=totalCat, group=1)) + geom_point(colour="black", size=2) +
geom_smooth(method="loess") + labs(x = "Time (24-hour interval)", y = "Number of reports") +
ggtitle("Drug/Narcotic vs Time")
robbPlot <- ggplot(robbTime, aes(x=Time, y=totalCat, group=1)) + geom_point(colour="brown", size=2) +
geom_smooth(method="loess") + labs(x = "Time (24-hour interval)", y = "Number of reports") +
ggtitle("Robbery vs Time")
grid.arrange(larcPlot, asstPlot, vandPlot, vehcPlot, burgPlot, narcPlot, robbPlot, ncol=3)
df <- layout(
subplot(
ggplotly(larcPlot),
ggplotly(asstPlot),
ggplotly(vandPlot),
ggplotly(vehcPlot),
ggplotly(burgPlot),
ggplotly(narcPlot),
ggplotly(robbPlot),
margin = 0.05, shareX = TRUE, shareY = TRUE, nrows = 3),
title = "Crime vs Time"
)
plotly_POST(df, filename = 'Crime-name-vs-time')
# Heatmap of District/Category
pdCatheat <- ddply(rawCrime, c("PdDistrict", "Category"), summarise,
totalCrime = sum(IncidntNum, na.rm=T))
brks <- c(1,10^rep(1:6))
pdCatheat$bin <- cut(pdCatheat$totalCrime, breaks=brks, labels=1:6, include.lowest=T)
majorArea <- ggplot(pdCatheat, aes(y = Category, x = PdDistrict)) + geom_tile(aes(fill=bin)) +
scale_fill_manual(name="Crime Incidents", labels=brks, values=rev(brewer.pal(6,"Spectral"))) +
xlab("") + ylab("") + ggtitle("Heatmap of crime by District/Category")
plotly_POST(majorArea, filename = 'Heatmap of District/Category')
|
ab0a0d576b66a973dc1f32037eca150f646d8b7d
|
ab5d317c5a8d426c9d7f1523a759fbfd4fb60ed1
|
/R/verbose_message.R
|
04f81b045ae126397631bce6377c4e169361902c
|
[
"MIT"
] |
permissive
|
acelt/aim.analysis
|
5883f5bbb8c2b79825c7cb5326b79db1ffdea9dd
|
1c8d2f2df49a336bb8cfeeb81bea4e80f6aa3daa
|
refs/heads/master
| 2021-01-02T10:28:27.236985
| 2020-02-27T21:09:59
| 2020-02-27T21:09:59
| 239,578,777
| 0
| 0
|
MIT
| 2020-02-27T21:06:45
| 2020-02-10T18:07:27
| null |
UTF-8
|
R
| false
| false
| 2,337
|
r
|
verbose_message.R
|
#' Display the message if criteria are met
#' @description Sometimes you want to display a message not only when certain criteria are met but also only if you've asked a function to be verbose
#' @param string Character string. The text to display.
#' @param criteria Optional logical vector. One or more logical values or statements, e.g. \code{c(length(vector) > 1, is.character(vector))}. These are compared/combined according to \code{criteria_relationship}. Defaults to \code{TRUE}.
#' @param criteria_relationship Character string. The approach to comparing/combining the values in \code{criteria}. Valid options are \code{"all"} (resolves \code{criteria} to \code{TRUE} if all values in \code{criteria} are \code{TRUE}), \code{"any"} (resolves \code{criteria} to \code{TRUE} if any of the values in \code{criteria} are \code{TRUE}), \code{"xor"} (resolves \code{criteria} to \code{TRUE} if only one value in \code{criteria} is \code{TRUE}), and \code{"none"} (resolves \code{criteria} to \code{TRUE} if all values in \code{criteria} are \code{FALSE}). Defaults to \code{"all"}.
#' @param type Character string. Which function to use to relay \code{string}, \code{message()} or \code{warning()}. Valid values are \code{"message"} and \code{"warning"}. Defaults to \code{"message"}.
#' @param verbose Logical value. Intended to take the logical value from the parent function signalling whether or not it should be verbose, \code{string} will only be relayed to the user if this is \code{TRUE}. Defaults to \code{TRUE}.
vmessage <- function(string,
criteria = NULL,
criteria_relationship = "all",
type = "message",
verbose = TRUE){
if (!is.character(string)) {
string <- as.character(string)
}
if (is.null(criteria)) {
criteria <- TRUE
}
if (!is.logical(criteria)){
stop(paste0("criteria is class ", class(criteria), " but it must be logical"))
}
criteria <- switch(criteria_relationship,
"all" = {all(criteria)},
"any" = {any(criteria)},
"xor" = {sum(criteria) == 1},
"none" = {sum(criteria) == 0})
if (criteria & verbose) {
switch(type,
"message" = {message(string)},
"warning" = {warning(string)})
}
}
|
85828026202ff2073c425ee337b9a344ea287eb6
|
653b8ba356ed50f74a442455e409f62976b4464d
|
/modelAnalyzeR/man/western_electric_TS_counts.Rd
|
66c9a73de6fc73c6b681c3226544ac35f7e93ce4
|
[
"MIT"
] |
permissive
|
kiran1984/SCOPE-Anomaly-Detection-Case-Study
|
e5bcfaf981b78695f7ebebdfb8b40ed7871244c5
|
21a0bb9e16a200ba1fcf29354c544524cec9a154
|
refs/heads/master
| 2020-06-22T11:09:53.603581
| 2018-06-30T21:53:38
| 2018-06-30T21:53:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 751
|
rd
|
western_electric_TS_counts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/case_study_results.R
\name{western_electric_TS_counts}
\alias{western_electric_TS_counts}
\title{Western Electric Rules TS Alert Counts}
\usage{
western_electric_TS_counts(beginning, middle, end, rule)
}
\arguments{
\item{beginning}{Dataframe of TS results for beginning of the month}
\item{middle}{Dataframe of TS results for middle of the month}
\item{end}{Dataframe of TS results for end of the month}
\item{rule}{Rule to filter for}
}
\value{
A list containing dataframe of TS alert counts, a graph of the results, and a boxplot of the distribution
}
\description{
Calculate TS client and campaign alert counts for Western Electric Rules
}
\author{
Stefanie Molin
}
|
2fc334e57d577c53deb62a7360f4a27f33fc7b2f
|
b4af76cfa8a2fccdcd44af5fd403b84e43446aff
|
/man/af_get_aggregate_data.Rd
|
22258ba4690df094fd6542c12e2a8429549bcdf6
|
[] |
no_license
|
DrSterling/r-appsflyer
|
499f46c4bf333731ea71dbe0e06ef4fcc60796c0
|
b21ec42e1b564d1358186b6d35b1c1788d7ff573
|
refs/heads/main
| 2023-03-27T02:15:18.979756
| 2021-03-24T08:55:04
| 2021-03-24T08:55:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,857
|
rd
|
af_get_aggregate_data.Rd
|
\name{af_get_aggregate_data}
\alias{af_get_aggregate_data}
\title{
Get AppsFlyer aggregate data
}
\description{
Get AppsFlyer Aggregate (user acquisition and retargeting) data
}
\usage{
af_get_aggregate_data(
date_from = Sys.Date() - 8,
date_to = Sys.Date() - 1,
report_type = c("daily_report",
"partners_report",
"partners_by_date_report",
"geo_report",
"geo_by_date_report"),
additional_fields = c("keyword_id",
"store_reinstall",
"deeplink_url",
"oaid",
"install_app_store",
"contributor1_match_type",
"contributor2_match_type",
"contributor3_match_type",
"match_type"),
media_source = NULL,
attribution_touch_type = NULL,
currency = NULL,
timezone = "Europe/Moscow",
retargeting = NULL,
app_id = getOption("apps_flyer_app_id"),
api_token = getOption("apps_flyer_api_key"))
}
\arguments{
\item{date_from}{
Reporting start date.
}
\item{date_to}{
Reporting finish date.
}
\item{report_type}{
Report type. One of: daily_report, partners_report, partners_by_date_report, geo_report, geo_by_date_report. For more details go to Integration > API access in AppsFlyer Web UI.
}
\item{additional_fields}{
Character vector of report's additional fields.
}
\item{media_source}{
Use to limit (filter) to a specific media source.for example, if you need inly facebook data in your report use media_source="facebook".
}
\item{attribution_touch_type}{
Set this parameter as shown in the example to get \href{https://support.appsflyer.com/hc/en-us/articles/207034346#viewthrough-attribution-vta-kpis}{view-through attribution} (VTA) KPIs. For example attribution_touch_type="impression".
}
\item{currency}{
Currency of revenue and cost. Aggregate Pull API reports always use the app-specific currency.
}
\item{timezone}{
Your timezone, for example Europe/Moscow.
}
\item{retargeting}{
If TRUE you get retargeting data.
}
\item{app_id}{
Your app id from apps flyer.
}
\item{api_token}{
Your AppsFlyer API token V1.0 for more details go \href{https://support.appsflyer.com/hc/en-us/articles/360004562377}{link}.
}
}
\author{
Alexey Seleznev
}
\seealso{
\href{https://support.appsflyer.com/hc/en-us/articles/207034346-Pull-APIs-Pulling-AppsFlyer-Reports-by-APIs}{AppsFlyer Pull API documentation}
}
\examples{
\dontrun{
af_set_api_token("Your API token")
geo_data <- af_get_aggregate_data(
date_from = "2021-03-01",
date_to = "2021-03-15",
report_type = "geo_by_date_report",
app_id = "id0001111"
)
}
}
|
99e256f3f037a9db29aca3352e9628d22744b83b
|
7da718dc45c69be0dbf0409fe423f32f28151dff
|
/inst/shiny/server_4_evalMetrics/server_4_evalMetrics_funcs.R
|
dc3cbc23fec2d6ae9f7a7d5c51597305042df8d2
|
[] |
no_license
|
cran/eSDM
|
ac865dd1a35268c31a17be3e20d964b1882bb850
|
35c58df0a1d89e5c501ecd55cb3608c5ebda5101
|
refs/heads/master
| 2021-06-16T10:59:20.678147
| 2021-05-04T03:50:08
| 2021-05-04T03:50:08
| 199,010,992
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,686
|
r
|
server_4_evalMetrics_funcs.R
|
### Non-reactive functions for Evaluation Metrics tab
###############################################################################
# Process evaluation metric validation data
### Process data frame (x) with long, lat, and data column;
### processing method depends on data type (y)
eval_proc_df <- function(x, y, p.codes, a.codes) {
#----------------------------------------------------------------------------
stopifnot(
is.data.frame(x),
ncol(x) == 3,
y %in% c(1, 2)
)
if (y == 1) {
#--------------------------------------------
# Count data
validate(
need(is.numeric(x[, 3]) | is.integer(x[, 3]),
paste("Error: Selected validation data column is not numeric.",
"Consider importing data as 'Presence/absence' data"))
)
x <- x %>%
dplyr::rename(lon = 1, lat = 2, count = 3) %>%
dplyr::mutate(sight = as.numeric(count > 0)) %>%
dplyr::select(1, 2, 4, 3)
} else {
#--------------------------------------------
# Presence/absence data
x <- x %>%
dplyr::rename(lon = 1, lat = 2, sight.temp = 3) %>%
dplyr::mutate(count = NA)
validate(
need(!(is.null(p.codes) & is.null(a.codes)),
paste("Error: Please select one or more",
"presence codes and absence codes")),
need(all(!(p.codes %in% a.codes)),
paste("Error: Please ensure that no presence and",
"absence codes are the same")),
need(all(unique(x$sight.temp) %in% c(p.codes, a.codes)),
paste("Error: Please ensure that all codes are classified",
"as either presence or absence codes"))
)
x <- x %>%
dplyr::mutate(sight = ifelse(sight.temp %in% p.codes, 1, 0)) %>%
dplyr::select(1, 2, 5, 4)
}
#----------------------------------------------------------------------------
stopifnot(
ncol(x) == 4,
names(x) == c("lon", "lat", "sight", "count")
)
if (min(x$lon, na.rm = TRUE) > 180) x$lon <- x$lon - 360
# Sort by lat (primary) then long for bottom up sort and then create sf obj
pts <- x %>%
dplyr::arrange(lat, lon) %>%
st_as_sf(coords = c("lon", "lat"), crs = crs.ll, agr = "constant")
# Perform checks
validate(
need(inherits(st_geometry(pts), "sfc_POINT"),
"Error processing validation data")
)
# Don't need check_valid() for pts
check_dateline(pts)
}
###############################################################################
# Generate message detailing the number of validation pts on polygon boundaries
eval_overlap_message <- function(models.toeval, eval.data) {
pt.over.len <- sapply(
lapply(models.toeval, function(m) {
eval.data <- st_transform(eval.data, st_crs(m))
which(sapply(suppressMessages(st_intersects(eval.data, m)), length) > 1)
}),
length
)
# Make text pretty
#--------------------------------------------------------
if (all(pt.over.len == 0)) {
paste(
"The predictions being evaluated had 0 validation points",
"that fell on the boundary between two or more prediction polygons"
)
#------------------------------------------------------
} else if (length(pt.over.len) == 1) {
paste(
"The predictions being evaluated had", pt.over.len, "validation points",
"that fell on the boundary between two or more prediction polygons;" ,
"the predictions from these polygons were averaged for the evaluation.",
"See Appendix 2 of the manual for more details."
)
#------------------------------------------------------
} else {
if (zero_range(pt.over.len)) {
temp <- paste(
"The predictions being evaluated each had", unique(pt.over.len),
"validation points"
)
} else if (length(pt.over.len) == 2) {
temp <- paste(
"The predictions being evaluated had",
paste(pt.over.len, collapse = " and "),
"validation points, respectively,"
)
} else {
temp <- paste(
"The predictions being evaluated had",
paste0(paste(head(pt.over.len, -1), collapse = ", "), ","),
"and", tail(pt.over.len, 1), "validation points, respectively,"
)
}
paste(
temp,
"that fell on the boundary between two or more prediction polygons;",
"the predictions from these polygons were averaged for the evaluation.",
"See Appendix 2 of the manual for more details."
)
}
}
###############################################################################
|
4c967b424e90eab45da8fd25ffe8f88e140fd69b
|
3a90f2fd600e946e8ecc2dd11258cc13298e225e
|
/LOHSE/script_lohse_data_viz.R
|
f3f3f9257e6b8b1a9faad2ee0828936cd0d3ffc0
|
[] |
no_license
|
npnl/ASNR_2019
|
6da596b276e9e5e0ae27c01856f78a11a3ce5a1a
|
4b3192e62f3d00ade4f94ce45be8281de0c8dc00
|
refs/heads/master
| 2020-08-08T12:45:38.169033
| 2020-06-25T19:09:32
| 2020-06-25T19:09:32
| 213,834,393
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,220
|
r
|
script_lohse_data_viz.R
|
library(ggplot2); library(tidyverse);
# By treating this workshop as an R project, we can use relative file paths that
# allow you to open the data anywhere on any computer, provided you have downloaded
# the whole workshop folder.
getwd()
## 1.0 Plotting Discrete Data --------------------------------------------------
# Anscombe's Quartet and the Importance of Checking Assumptions
DAT1 <- read.csv("./data_ANSCOMBE.csv", header = TRUE, sep = ",")
head(DAT1)
## Regression Coefficients ----
COEFS<-DAT1 %>%
group_by(group) %>%
summarise(Intercept=lm(yVal~xVal, data=DAT1)$coefficients[1],
Slope=lm(yVal~xVal, data=DAT1)$coefficients[2],
MeanY=mean(yVal),
SDY = sd(yVal),
MeanX=mean(xVal),
SDX = sd(xVal))
COEFS
# Visualizing All the Data
ggplot(DAT1, aes(x = xVal, y = yVal)) +
geom_point(aes(fill=as.factor(group)), pch=21, color="black", size=2)+
stat_smooth(aes(col=as.factor(group)), method="lm", se=FALSE, lwd=1)+
facet_wrap(~group, ncol=2)+
scale_x_continuous(name = "X Values") +
scale_y_continuous(name = "Y Values") +
theme(axis.text=element_text(size=16, color="black"),
axis.title=element_text(size=16, face="bold"),
plot.title=element_text(size=16, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
#axis.text.y=element_blank(),
#axis.title.y=element_blank(),
#axis.ticks.y=element_blank(),
legend.position = "none")
## Disctrete Categorical Data
DAT2 <- read.csv("./data_FINAL_RATINGS.csv", header = TRUE, sep = ",")
head(DAT2)
MEANS<-DAT2 %>%
group_by(Elevation, Speed) %>%
summarise(ave_Effort=mean(Effort),
N = length(Effort),
SD = sd(Effort))
MEANS
# Just the means
ggplot(MEANS, aes(x = Elevation, y = ave_Effort)) +
geom_bar(aes(fill=Elevation), stat="identity", width = 0.5)+
facet_wrap(~Speed) +
scale_y_continuous(name = "Effort (%)", limits = c(0,100)) +
#scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
theme(axis.text=element_text(size=16, color="black"),
axis.title=element_text(size=16, face="bold"),
plot.title=element_text(size=16, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=16, face="bold"),
legend.position = "none")
# Means with Standard errors
ggplot(MEANS, aes(x = Elevation, y = ave_Effort)) +
geom_bar(aes(fill=Elevation, col=Elevation),
stat="identity", width = 0.5)+
geom_errorbar(aes(ymin = ave_Effort-SD/sqrt(N), ymax=ave_Effort+SD/sqrt(N)),
width = 0.2)+
scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#E69F00", "#56B4E9"))+
facet_wrap(~Speed) +
scale_y_continuous(name = "Effort (%)", limits = c(0,100)) +
theme(axis.text=element_text(size=16, color="black"),
axis.title=element_text(size=16, face="bold"),
plot.title=element_text(size=16, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=16, face="bold"),
legend.position = "none")
# All the data
ggplot(DAT2, aes(x = Elevation, y = Effort)) +
geom_point(aes(fill=Elevation), pch=21, size=2,
position=position_jitter(w=0.2, h=0))+
facet_wrap(~Speed) +
scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#E69F00", "#56B4E9"))+
scale_y_continuous(name = "Effort (%)", limits = c(0,100)) +
theme(axis.text=element_text(size=16, color="black"),
axis.title=element_text(size=16, face="bold"),
plot.title=element_text(size=16, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=16, face="bold"),
legend.position = "none")
# Boxplots
ggplot(DAT2, aes(x = Elevation, y = Effort)) +
geom_point(aes(fill=Elevation), pch=21, size=2,
position=position_jitter(w=0.2, h=0))+
geom_boxplot(fill="white", col="black", outlier.shape = "na",
alpha=0.4, width=0.5)+
facet_wrap(~Speed) +
scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#E69F00", "#56B4E9"))+
scale_y_continuous(name = "Effort (%)", limits = c(0,100)) +
theme(axis.text=element_text(size=16, color="black"),
axis.title=element_text(size=16, face="bold"),
plot.title=element_text(size=16, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=16, face="bold"),
legend.position = "none")
# Connect the dots
head(DAT2)
DAT3 <- DAT2 %>%
group_by(Elevation, Speed) %>%
summarise(Effort=mean(Effort))
head(DAT3)
ggplot(DAT2, aes(x = Elevation, y = Effort)) +
geom_point(aes(fill=Elevation), pch=21, size=2)+
geom_line(aes(group=SUBJ, lty=Speed), col="grey40")+
facet_wrap(~Speed) +
scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#E69F00", "#56B4E9"))+
scale_y_continuous(name = "Effort (%)", limits = c(0,100)) +
theme(axis.text=element_text(size=16, color="black"),
axis.title=element_text(size=16, face="bold"),
plot.title=element_text(size=16, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=16, face="bold"),
legend.position = "none") +
stat_smooth(aes(group=Speed, lty=Speed), col="black", lwd=2, se=FALSE)+
geom_point(data=DAT3, aes(fill=Elevation), shape=22, size=5)
# 2.0 Visualizing Continuous Data ----------------------------------------------
# Acquisition Data -------------------------------------------------------------
list.files()
ACQ<-read.csv("./data_CI_ERRORS.csv", header = TRUE, sep=",",
na.strings=c("NA","NaN"," ",""))
head(ACQ)
ACQ$subID<-factor(ACQ$subID)
ACQ$target_nom<-factor(ACQ$target)
# Removing Outliers
ACQ <- subset(ACQ, absolute_error < 1000)
head(ACQ)
ggplot(data=ACQ, aes(x=target+constant_error))+
geom_density(aes(col=target_nom, fill=target_nom), alpha=0.4)+
facet_wrap(~group)+
scale_fill_manual(values=c("#000000","#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#000000", "#E69F00", "#56B4E9"))+
scale_x_continuous(name="Time Produced (ms)")+
scale_y_continuous(name = "Density", limits = c(0,0.003)) +
labs(fill = "Target (ms)", col="Target (ms)")+
theme(axis.text=element_text(size=12, color="black"),
legend.text=element_text(size=16, color="black"),
legend.title=element_text(size=16, face="bold"),
axis.title=element_text(size=16, face="bold"),
plot.title=element_text(size=16, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=16, face="bold"))
# Post-Test Data ---------------------------------------------------------------
POST<-read.csv("./Post Data_Long Form.csv", header=TRUE)
head(POST)
POST$Participant<-factor(POST$Participant)
POST$target_nom<-factor(POST$Target.Time)
POST<-subset(POST, Absolute.Error < 1000)
# Subsetting into retention and transfer
RET <- subset(POST, Target.Time == 1500|Target.Time == 1700|Target.Time == 1900)
# Retention Data
ggplot(data=RET, aes(x=target_nom, y=Absolute.Error))+
scale_fill_manual(values=c("#000000","#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#000000", "#E69F00", "#56B4E9"))+
geom_jitter(aes(group=Group, fill=target_nom), pch=21, position=position_jitterdodge(dodge.width=0.8))+
geom_boxplot(aes(lty=Group, col=target_nom), fill="white",
alpha=0.4, outlier.shape = NA)
ggplot(data=RET, aes(x=Target.Time+Constant.Error))+
geom_density(aes(col=target_nom, fill=target_nom), alpha=0.4)+
facet_wrap(~Group) +
scale_fill_manual(values=c("#000000","#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#000000", "#E69F00", "#56B4E9"))+
scale_x_continuous(name="Time Produced (ms)")+
scale_y_continuous(name = "Density", limits = c(0,0.003)) +
labs(fill = "Target (ms)", col="Target (ms)")+
theme(axis.text=element_text(size=12, color="black"),
legend.text=element_text(size=16, color="black"),
legend.title=element_text(size=16, face="bold"),
axis.title=element_text(size=16, face="bold"),
plot.title=element_text(size=16, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=16, face="bold"))
# Longitudinal Plots of Practice Data ------------------------------------------
# Aggregate Practice Data into Blocks
head(ACQ)
ACQ_AVE<-ACQ %>%
group_by(subID, group, block) %>%
summarise(CE_AVE = mean(constant_error, na.rm=TRUE),
AE_AVE = mean(absolute_error, na.rm=TRUE))
head(ACQ_AVE)
sd(ACQ_AVE$CE_AVE)
ACQ_GROUP_AVE<-ACQ_AVE %>%
group_by(group, block) %>%
summarise(CE = mean(CE_AVE, na.rm=TRUE),
CE_sd = sd(CE_AVE, na.rm=TRUE),
AE = mean(AE_AVE, na.rm=TRUE),
AE_sd = sd(AE_AVE, na.rm=TRUE),
N=length(AE_AVE))
head(ACQ_GROUP_AVE)
# More traditional plot averaging across trials ----
ggplot(ACQ_GROUP_AVE, aes(x = block, y = AE)) +
geom_line(aes(col=group), lwd=1)+
geom_errorbar(aes(ymin = AE-AE_sd/sqrt(N), ymax=AE+AE_sd/sqrt(N)),
width = 0.1)+
geom_point(aes(fill=group), shape=21, size=2)+
scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#E69F00", "#56B4E9"))+
scale_x_continuous(name = "Block", breaks = c(1,2,3)) +
scale_y_continuous(name = "Absolute Error (ms)", limits = c(0,250)) +
labs(fill = "Group", col="Group")+
theme(axis.text=element_text(size=16, color="black"),
legend.text=element_text(size=16, color="black"),
legend.title=element_text(size=16, face="bold"),
axis.title=element_text(size=16, face="bold"),
plot.title=element_text(size=16, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=16, face="bold"),
legend.position = "top")
head(ACQ)
ggplot(ACQ, aes(x = trial_total, y = absolute_error , group=subID)) +
geom_line(aes(col=group), lwd=1)+
geom_point(aes(fill=group), shape=21, size=2)+
scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#E69F00", "#56B4E9"))+
scale_x_continuous(name = "Trial") +
scale_y_continuous(name = "Absolute Error (ms)") +
facet_wrap(~target, ncol=1)+
labs(fill = "Group", col="Group")+
theme(axis.text=element_text(size=16, color="black"),
legend.text=element_text(size=16, color="black"),
legend.title=element_text(size=16, face="bold"),
axis.title=element_text(size=16, face="bold"),
plot.title=element_text(size=16, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=16, face="bold"),
legend.position = "top")
ggplot(ACQ, aes(x = trial_total, y = absolute_error)) +
geom_point(aes(fill=group), shape=21, size=1, alpha=0.3)+
stat_smooth(aes(group=group, lty=group), col="black", fill="white",
method="loess", lwd=1, se=TRUE)+
scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#E69F00", "#56B4E9"))+
scale_x_continuous(name = "Trial") +
scale_y_continuous(name = "Absolute Error (ms)") +
facet_grid(~group+target)+
labs(fill = "Group", lty="Group")+
theme(axis.text.x = element_text(size=10, color="black"),
axis.text.y=element_text(size=14, color="black"),
legend.text=element_text(size=14, color="black"),
legend.title=element_text(size=14, face="bold"),
axis.title=element_text(size=14, face="bold"),
plot.title=element_text(size=14, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=14, face="bold"),
legend.position = "none")
head(ACQ_AVE)
ACQ_BLOCK_AVE<-ACQ %>%
group_by(subID, group, block, target) %>%
summarise(CE = mean(constant_error, na.rm=TRUE),
AE = mean(absolute_error, na.rm=TRUE))
head(ACQ_BLOCK_AVE)
head(ACQ_GROUP_AVE)
ggplot(ACQ_BLOCK_AVE, aes(x = block, y = AE)) +
geom_point(aes(fill=group), col="black",
shape=21, size=2, alpha=0.5)+
#geom_line(aes(group=subID), size=1, alpha=0.5)+
geom_line(data=ACQ_GROUP_AVE, aes(col=group), lwd=1)+
geom_point(data=ACQ_GROUP_AVE, aes(fill=group),
col="black", shape=21, size=5, alpha=0.5)+
scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
scale_color_manual(values=c("#E69F00", "#56B4E9"))+
scale_x_continuous(name = "Block", breaks=c(1,2,3)) +
scale_y_continuous(name = "Absolute Error (ms)") +
facet_grid(~target)+
labs(fill = "Group", col="Group")+
theme(axis.text.x = element_text(size=14, color="black"),
axis.text.y=element_text(size=14, color="black"),
legend.text=element_text(size=14, color="black"),
legend.title=element_text(size=14, face="bold"),
axis.title=element_text(size=14, face="bold"),
plot.title=element_text(size=14, face="bold", hjust=0.5),
panel.grid.minor = element_blank(),
strip.text = element_text(size=14, face="bold"),
legend.position = "top")
|
524652f6bf815d195ef7303b02c078bdbd9a09c0
|
9b3ce1ce05ab74a40d1f72e903a9848c7d6dee1c
|
/Covid19HcqSccs/R/SelfControlledCaseSeries.R
|
75ec101e039c5aa7f3ef678e5a777017bd2cb2da
|
[
"Apache-2.0"
] |
permissive
|
A1exanderAlexeyuk/Covid19EstimationHydroxychloroquine
|
aeaa6e5a9c6fa6166516f3949ea2c65b2eac5ca6
|
d5ab703f48bec9aede5542f530b34a4260b67abf
|
refs/heads/master
| 2023-08-23T21:11:07.681521
| 2020-10-28T18:40:56
| 2020-10-28T18:40:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,497
|
r
|
SelfControlledCaseSeries.R
|
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of Covid19HcqSccs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Execute the Self-Controlled Case Series analyses
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param outcomeDatabaseSchema Schema name where the outcome cohorts are stored. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param outcomeTable The name of the table in the outcome database schema that holds the outcome cohorts,
#' @param exposureDatabaseSchema Schema name where the exposure cohorts are stored. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param exposureTable The name of the table in the exposure database schema that holds the exposure cohorts,
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param maxCores How many parallel cores should be used? If more cores are made available
#' this can speed up the analyses.
#' @export
runSelfControlledCaseSeries <- function(connectionDetails,
cdmDatabaseSchema,
oracleTempSchema = NULL,
outcomeDatabaseSchema = cdmDatabaseSchema,
outcomeTable = "cohort",
exposureDatabaseSchema = cdmDatabaseSchema,
exposureTable = "drug_era",
outputFolder,
maxCores) {
start <- Sys.time()
sccsFolder <- file.path(outputFolder, "sccsOutput")
if (!file.exists(sccsFolder))
dir.create(sccsFolder)
sccsSummaryFile <- file.path(outputFolder, "sccsSummary.csv")
if (!file.exists(sccsSummaryFile)) {
eoList <- createTos(outputFolder = outputFolder)
sccsAnalysisListFile <- system.file("settings", "sccsAnalysisList.json", package = "Covid19HcqSccs")
sccsAnalysisList <- SelfControlledCaseSeries::loadSccsAnalysisList(sccsAnalysisListFile)
sccsResult <- SelfControlledCaseSeries::runSccsAnalyses(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
exposureDatabaseSchema = exposureDatabaseSchema,
exposureTable = exposureTable,
outcomeDatabaseSchema = outcomeDatabaseSchema,
outcomeTable = outcomeTable,
sccsAnalysisList = sccsAnalysisList,
exposureOutcomeList = eoList,
outputFolder = sccsFolder,
combineDataFetchAcrossOutcomes = TRUE,
compressSccsEraDataFiles = TRUE,
getDbSccsDataThreads = 1,
createSccsEraDataThreads = min(5, maxCores),
fitSccsModelThreads = max(1, floor(maxCores/4)),
cvThreads = min(4, maxCores))
sccsSummary <- SelfControlledCaseSeries::summarizeSccsAnalyses(sccsResult, sccsFolder)
readr::write_csv(sccsSummary, sccsSummaryFile)
}
delta <- Sys.time() - start
ParallelLogger::logInfo(paste("Completed SCCS analyses in", signif(delta, 3), attr(delta, "units")))
}
createTos <- function(outputFolder) {
pathToCsv <- system.file("settings", "TosOfInterest.csv", package = "Covid19HcqSccs")
tosOfInterest <- read.csv(pathToCsv, stringsAsFactors = FALSE)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "Covid19HcqSccs")
ncs <- read.csv(pathToCsv, stringsAsFactors = FALSE)
allControls <- ncs
tos <- unique(rbind(tosOfInterest[, c("exposureId", "outcomeId")],
allControls[, c("exposureId", "outcomeId")]))
createTo <- function(i) {
exposureOutcome <- SelfControlledCaseSeries::createExposureOutcome(exposureId = tos$exposureId[i],
outcomeId = tos$outcomeId[i])
return(exposureOutcome)
}
tosList <- lapply(1:nrow(tos), createTo)
return(tosList)
}
runSccsDiagnostics <- function(outputFolder, databaseId) {
diagnosticsFolder <- file.path(outputFolder, "sccsDiagnostics")
if (!file.exists(diagnosticsFolder)) {
dir.create(diagnosticsFolder)
}
sccsSummaryFile <- file.path(outputFolder, "sccsSummary.csv")
sccsSummary <- readr::read_csv(sccsSummaryFile, col_types = readr::cols())
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "Covid19HcqSccs")
ncs <- read.csv(pathToCsv, stringsAsFactors = FALSE)
ncs <- merge(ncs, sccsSummary)
evaluateSystematicError <- function(subset) {
subset <- subset[!is.na(subset$`seLogRr(Exposure of interest)`), ]
if (nrow(subset) != 0) {
fileName <- file.path(diagnosticsFolder, sprintf("NegativeControls_e%s_a%s_%s.png", subset$exposureId[1], subset$analysisId[1], databaseId))
if (subset$analysisId[1] == 1) {
title <- "Primary analysis"
} else {
title <- "Adjusting for event-dependent observation"
}
EmpiricalCalibration::plotCalibrationEffect(logRrNegatives = subset$`logRr(Exposure of interest)`,
seLogRrNegatives = subset$`seLogRr(Exposure of interest)`,
xLabel = "Incidence Rate Ratio",
title = title,
showCis = TRUE,
fileName = fileName)
}
}
lapply(split(ncs, paste(ncs$exposureId, ncs$analysisId)), evaluateSystematicError)
}
generateBasicOutputTable <- function(outputFolder, databaseId) {
diagnosticsFolder <- file.path(outputFolder, "sccsDiagnostics")
sccsSummaryFile <- file.path(outputFolder, "sccsSummary.csv")
sccsSummary <- readr::read_csv(sccsSummaryFile, col_types = readr::cols())
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "Covid19HcqSccs")
negativeControls <- read.csv(pathToCsv, stringsAsFactors = FALSE)
calibrate <- function(subset) {
ncs <- merge(subset, negativeControls)
ncs <- ncs[!is.na(ncs$`seLogRr(Exposure of interest)`) & !is.infinite(ncs$`seLogRr(Exposure of interest)`), ]
if (nrow(ncs) != 0) {
null <- EmpiricalCalibration::fitMcmcNull(logRr = ncs$`logRr(Exposure of interest)`,
seLogRr = ncs$`seLogRr(Exposure of interest)`)
calP <- EmpiricalCalibration::calibrateP(null, logRr = subset$`logRr(Exposure of interest)`,
seLogRr = subset$`seLogRr(Exposure of interest)`)
subset$calP <- calP$p
subset$calPLb <- calP$lb95ci
subset$calPUb <- calP$ub95ci
model <- EmpiricalCalibration::convertNullToErrorModel(null)
calCi <- EmpiricalCalibration::calibrateConfidenceInterval(logRr = subset$`logRr(Exposure of interest)`,
seLogRr = subset$`seLogRr(Exposure of interest)`,
model = model)
subset$calRr <- exp(calCi$logRr)
subset$calLb95Rr <- exp(calCi$logLb95Rr)
subset$calUb95Rr <- exp(calCi$logUb95Rr)
subset$calLogRr <- calCi$logRr
subset$calSeLogRr <- calCi$seLogRr
}
return(subset)
}
results <- lapply(split(sccsSummary, sccsSummary$exposureId), calibrate)
results <- dplyr::bind_rows(results)
results <- addCohortNames(data = results, IdColumnName = "exposureId", nameColumnName = "exposureName")
results <- addCohortNames(data = results, IdColumnName = "outcomeId", nameColumnName = "outcomeName")
results$negativeControl <- results$outcomeId %in% negativeControls$outcomeId
results$description <- "Primary analysis"
results$description[results$analysisId == 2] <- "Adjusting for event-dependent observation"
results <- results[!results$negativeControl, ]
results <- results[, c("exposureName", "outcomeName", "description", "caseCount", "rr(Exposure of interest)", "ci95lb(Exposure of interest)", "ci95ub(Exposure of interest)", "calP", "calRr", "calLb95Rr", "calUb95Rr")]
colnames(results) <- c("Exposure", "Outcome", "Analysis", "Cases", "IRR", "CI95LB", "CI95UB", "Calibrated P", "Calibrated IRR", "Calibrated CI95LB", "Calibrated CI95UB")
results <- results[order(results$Exposure, results$Outcome, results$Analysis), ]
readr::write_csv(results, file.path(diagnosticsFolder, sprintf("allResults_%s.csv", databaseId)))
}
getAllControls <- function(outputFolder) {
allControlsFile <- file.path(outputFolder, "AllControls.csv")
if (file.exists(allControlsFile)) {
# Positive controls must have been synthesized. Include both positive and negative controls.
allControls <- read.csv(allControlsFile)
} else {
# Include only negative controls
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "Covid19HcqSccs")
allControls <- readr::read_csv(pathToCsv, col_types = readr::cols())
allControls$oldOutcomeId <- allControls$outcomeId
allControls$targetEffectSize <- rep(1, nrow(allControls))
}
return(allControls)
}
|
b021a74383839ecf00bb63a033e0273b890d299c
|
c85471f60e9d5c462de6c60c880d05898ec81411
|
/cache/gdatascience|tidytuesday|moores_law.R
|
cdf574a25240304f175fae5d27e6e266d7402f23
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
a-rosenberg/github-content-scraper
|
2416d644ea58403beacba33349ee127e4eb42afe
|
ed3340610a20bb3bd569f5e19db56008365e7ffa
|
refs/heads/master
| 2020-09-06T08:34:58.186945
| 2019-11-15T05:14:37
| 2019-11-15T05:14:37
| 220,376,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,412
|
r
|
gdatascience|tidytuesday|moores_law.R
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----ttsetup, echo=FALSE, warning=FALSE, message=FALSE-------------------
# Load libraries, set the default theme & caption, and grab the data
library(tidyverse)
theme_set(theme_light())
default_caption <- "Source: Wikipedia | Designer: Tony Galvan @gdatascience1"
cpu <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-09-03/cpu.csv")
gpu <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-09-03/gpu.csv")
ram <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-09-03/ram.csv")
## ------------------------------------------------------------------------
cpu %>%
ggplot(aes(date_of_introduction, transistor_count, color = if_else(designer == "Intel", "Intel", "Other"))) +
geom_point(alpha = 0.5) +
scale_y_log10(labels = scales::comma_format()) +
scale_color_manual(values = c("darkblue", "gray50")) +
labs(x = "",
y = "# of transistors",
color = "CPU Designer",
title = "Is Moore's Law true?",
subtitle = "The number of transistors in a dense integrated circuit\ndoubles approximately every two years",
caption = default_caption)
##ggsave("moores_law.png", width = 6, height = 4)
|
25fa34626862c4370bb3f0268c8cd99c64361d86
|
43f0025d32c0415e40439ac2557b60fd6be81f28
|
/index.R
|
4a8deb44a8a91dadd9b16880d14ced8b3fa9bcea
|
[] |
no_license
|
young-do/2019-Research-Papers-for-Graduation
|
95ea86643d9568401f22584a03268d0f7d3416ef
|
e5c1bc24b60b7236dd38f84a249cda2d8f63537e
|
refs/heads/master
| 2020-08-30T20:27:36.125368
| 2019-10-30T08:50:11
| 2019-10-30T08:50:11
| 218,480,250
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,653
|
r
|
index.R
|
# Title: Improving Random Forest in Image Classification Using t-SNE and K-means Clustering
# Young-do Cho, 2019
# 0. Configuration
## Computing Environment
# - CPU: Intel i5 4690
# - RAM: 24GB
# - OS: Ubuntu Desktop 18.04.3
# - R version: 3.6.1
## Install packages
install.packages("tidyverse") # Metapackage with lots of helpful functions
install.packages("ranger") # Fast Implementation of Random Forests
install.packages("Rtsne") # T-Distributed Stochastic Neighbor Embedding (t-SNE)
## Set packages
library(tidyverse) # ver. 1.2.1
library(ranger) # ver. 0.11.2
library(Rtsne) # ver. 0.15
# 1. Load data
## 1. MNIST
mnist_train <- read.csv("mnistTrainSet.csv")
mnist_test <- read.csv("mnistTestSet.csv")
mnist_train$label <- as.factor(mnist_train$label)
mnist_test$label <- as.factor(mnist_test$label)
str(mnist_train)
str(mnist_test)
## 2. fashion-MNIST
fashion_mnist_train <- read.csv("fashion-mnist_train.csv")
fashion_mnist_test <- read.csv("fashion-mnist_test.csv")
fashion_mnist_train$label <- as.factor(fashion_mnist_train$label)
fashion_mnist_test$label <- as.factor(fashion_mnist_test$label)
str(fashion_mnist_train)
str(fashion_mnist_test)
# 2. t-SNE
## 1. MNIST
mnist_all <- rbind(mnist_train, mnist_test)
set.seed(2019) # to always get the same result
mnist_tsne <- Rtsne(mnist_all[, -1], dims = 2, perplexity=30, check_duplicates = FALSE, verbose=TRUE, max_iter = 500)
tsne_2d_mnist_all <- as.data.frame(mnist_tsne$Y)
tsne_2d_mnist_train <- tsne_2d_mnist_all[1:60000, ]
tsne_2d_mnist_test <- tsne_2d_mnist_all[60001:70000, ]
### graph
set.seed(2019)
select <- sample(1:nrow(mnist_train), 6000)
selected_mnist_train <- mnist_train[select,]
selected_tsne_mnist_train <- tsne_2d_mnist_train[select, ]
colors <- rainbow(10)
names(colors) <- unique(selected_mnist_train$label)
par(mgp = c(2.5, 1,0))
plot(selected_tsne_mnist_train, t="n", main="tSNE (MNIST)", xlab="tSNE dimension 1", ylab="tSNE dimension 2")
text(selected_tsne_mnist_train, labels=selected_mnist_train$label, col=colors[selected_mnist_train$label])
## 2. fashion-MNIST
fashion_mnist_all <- rbind(fashion_mnist_train, fashion_mnist_test)
set.seed(2019) # to always get the same result
fashion_mnist_tsne <- Rtsne(fashion_mnist_all[, -1], dims = 2, perplexity=30, check_duplicates = FALSE, verbose=TRUE, max_iter = 500)
tsne_2d_fashion_mnist_all <- as.data.frame(fashion_mnist_tsne$Y)
tsne_2d_fashion_mnist_train <- tsne_2d_fashion_mnist_all[1:60000, ]
tsne_2d_fashion_mnist_test <- tsne_2d_fashion_mnist_all[60001:70000, ]
### graph
set.seed(2019)
select <- sample(1:nrow(fashion_mnist_train), 6000)
selected_fashion_mnist_train <- fashion_mnist_train[select,]
selected_tsne_fashion_mnist_train <- tsne_2d_fashion_mnist_train[select, ]
colors <- rainbow(10)
names(colors) <- unique(selected_fashion_mnist_train$label)
par(mgp = c(2.5, 1,0))
plot(selected_tsne_fashion_mnist_train, t="n", main="tSNE (MNIST)", xlab="tSNE dimension 1", ylab="tSNE dimension 2")
text(selected_tsne_fashion_mnist_train, labels=selected_fashion_mnist_train$label, col=colors[selected_fashion_mnist_train$label])
# 3. Experiment
## Expt.1) Random Forest
### 1. MNIST
set.seed(2019)
model_m_expt1 <- ranger(label ~ ., data = mnist_train, importance = "impurity")
print(model_m_expt1)
print(model_m_expt1$prediction.error)
### 2. fashion-MNIST
set.seed(2019)
model_fm_expt1 <- ranger(label ~ ., data = fashion_mnist_train, importance = "impurity")
print(model_fm_expt1)
print(model_fm_expt1$prediction.error)
## Expt.2) K-means -> RF
### 1. MNIST
for(i in 2:50) {
temp_m_train <- mnist_train
set.seed(2019)
kmeans_result <- kmeans(temp_m_train[, -1], centers = i, iter.max = 10000, nstart = 20)
temp_m_train$cluster <- as.factor(kmeans_result$cluster)
print(i)
print("Expt 2. fit one model (just kmeans)")
set.seed(2019)
model <- ranger(label ~ ., data = temp_m_train, importance = "impurity")
print(model)
print(model$prediction.error)
}
### 2. fashion-MNIST
for(i in 2:50) {
temp_fm_train <- fashion_mnist_train
set.seed(2019)
kmeans_result <- kmeans(temp_fm_train[, -1], centers = i, iter.max = 10000, nstart = 20)
temp_fm_train$cluster <- as.factor(kmeans_result$cluster)
print(i)
print("Expt 2. fit one model (just kmeans)")
set.seed(2019)
model <- ranger(label ~ ., data = temp_fm_train, importance = "impurity")
print(model)
print(model$prediction.error)
}
## Expt.3) t-SNE + K-means -> RF
### 1. MNIST
for(i in 2:50) {
temp_m_train <- mnist_train
set.seed(2019)
kmeans_result <- kmeans(tsne_2d_mnist_train[, -1], centers = i, iter.max = 10000, nstart = 20)
temp_m_train$cluster <- as.factor(kmeans_result$cluster)
print(i)
print("Expt 2. fit one model (just kmeans)")
set.seed(2019)
model <- ranger(label ~ ., data = temp_m_train, importance = "impurity")
print(model)
print(model$prediction.error)
}
### 2. fashion-MNIST
for(i in 2:50) {
temp_fm_train <- fashion_mnist_train
set.seed(2019)
kmeans_result <- kmeans(tsne_2d_fashion_mnist_train[, -1], centers = i, iter.max = 10000, nstart = 20)
temp_fm_train$cluster <- as.factor(kmeans_result$cluster)
print(i)
print("Expt 2. fit one model (just kmeans)")
set.seed(2019)
model <- ranger(label ~ ., data = temp_fm_train, importance = "impurity")
print(model)
print(model$prediction.error)
}
## Expt.4) t-SNE + RF
### 1. MNIST
temp_m_train <- mnist_train
temp_m_train$tsne_X <- tsne_2d_mnist_train$V1
temp_m_train$tsne_Y <- tsne_2d_mnist_train$V2
set.seed(2019)
model_m_expt4 <- ranger(label ~ ., data = temp_m_train, importance = "impurity")
print(model_m_expt4)
print(model_m_expt4$prediction.error) # = 0.02301667
### 2. fashion-MNIST
temp_fm_train <- tsne_2d_fashion_mnist_train
temp_fm_train$tsne_X <- tsne_2d_fashion_mnist_train$V1
temp_fm_train$tsne_Y <- tsne_2d_fashion_mnist_train$V2
set.seed(2019)
model_fm_expt4 <- ranger(label ~ ., data = temp_fm_train, importance = "impurity")
print(model_fm_expt4)
print(model_fm_expt4$prediction.error) # = 0.1143333
# 4. Test
## Choose Expt.4 model for both data
### 1. MNIST
temp_m_test <- mnist_test
temp_m_test$tsne_X <- tsne_2d_mnist_test$V1
temp_m_test$tsne_Y <- tsne_2d_mnist_test$V2
pred <- predict(model_m_expt4, temp_m_test)$predictions
real <- temp_m_test$label
print(confusionMatrix(pred, real))
### 2. fashion-MNIST
temp_fm_test <- fashion_mnist_test
temp_fm_test$tsne_X <- tsne_2d_fashion_mnist_test$V1
temp_fm_test$tsne_Y <- tsne_2d_fashion_mnist_test$V2
pred <- predict(model_fm_expt4, temp_fm_test)$predictions
real <- temp_fm_test$label
print(confusionMatrix(pred, real))
|
b7d44ab3d10d5e06f961c49431f5cff00e6b5512
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CircNNTSR/examples/nntsplot.Rd.R
|
4d291968cf8c6f0784db3c77914239ef57b81e51
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 490
|
r
|
nntsplot.Rd.R
|
library(CircNNTSR)
### Name: nntsplot
### Title: Plots the NNTS density
### Aliases: nntsplot
### ** Examples
data(Turtles_radians)
#Empirical analysis of data
Turtles_hist<-hist(Turtles_radians,breaks=10,freq=FALSE)
#Estimation of the NNTS density with 3 componentes for data
est<-nntsmanifoldnewtonestimation(Turtles_radians,3)
est
#plot the estimated density
nntsplot(est$cestimates[,2],3)
#add the histogram to the estimated density plot
plot(Turtles_hist, freq=FALSE, add=TRUE)
|
bd1eca397b0d3b0f4a338e2244aeeedae47711b5
|
1f55eec948770d1a34bd41e6a9ff4b691944dfdf
|
/sir_lamp_function.R
|
3542c5557507f5ea958f3fd253fff0df2fcc889d
|
[] |
no_license
|
andyhoegh/LAMP_screen
|
4f28068e0d4b39fae7d523dfc91ec68e65572a4a
|
b754a18b80b2e86e1ced0b0d588119b802e128fc
|
refs/heads/main
| 2023-02-04T17:37:05.219101
| 2020-12-31T04:03:15
| 2020-12-31T04:03:15
| 325,709,468
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,710
|
r
|
sir_lamp_function.R
|
#### SIR Step Function for Model objective 1 - identify role of LAMP specificity
# We assume a student model with on and off campus students with slightly different
# contact structures, but no real difference in transmission. We don't do much here
# with delays. Instead, we are focusing on how the testing sensitivity affects
# some important parameters. Here, we assume that PCR could take as much at 2 days
# to run, but LAMP is functionally instantaneous.
# adapted from Aaron King's code
#sims - number of stochastic simulations
#S vector of susceptibles, length=sims
#E vector of exposed, length=sims
#I1 vector of pre symptomatic infecteds, length=sims
#I2 vector of possibly symptomatic infecteds, length=sims
#R vector of recoverdeds, length=sims
#N vector of population size, length=sims
#newSympt1 counter for new possibly symptomatic individuals, to allow 2 day health seeking delay
#newSympt2 counter for new possibly symptomatic individuals, to allow 2 day health seeking delay
#beta transmission rate
#theta rate from exposed to infected
#gamma_I1I2 rate from pre-symptomatic infecteds to possibly symptomatic
#gamma_I2R rate from possibly symptomatic to recovered
#delta.t time step length default is 1 day timestep
#tests number of asymptomatic tests per day
#ppn_sympt proportion of possibly symptomatic individuals who are symptomatic
#contacts average number of contaccts per individual
#compliance is a proportion of confirmed asymptomatic tests or their contacts who either fail to
# show for testing or dont comply with isolation
#care.seeking is the proportion of students who seek care once symptomatic
sir_lamp <- function (sims, S.on, E.on, I1.on, I2.on, R.on, N.on, newSympt1.on, newSympt2.on, beta_vec.on = beta_vec.on,
S.off,E.off,I1.off,I2.off,R.off,N.off,newSympt1.off, newSympt2.off, beta_vec.off = beta_vec.off,
theta, gamma_I1I2, gamma_I2R, delta.t=1, tests, ppn_sympt=ppn_sympt,
contacts.on = 5, contacts.off = 5, compliance = 1, care.seeking = 1,
atest.wait.3,atest.wait.2,atest.wait.1,contact.wait.3,contact.wait.2,contact.wait.1,
test.scenario = c("2 Days","1 Day","No Delay"), sens.pcr = .99, spec.pcr = .99,
sens.lamp = .8, spec.lamp = .99, lamp.diagnostic = F, contact.tracing.limit = 100,
intro.on, intro.off, pooling, pooling.multi) {
ts <- ts
dN_SE.on <- rbinom(n=sims,size=S.on,
prob=1-exp(-beta_vec.on*(I1.on+I2.on+I1.off+I2.off)/(N.on+N.off)*delta.t)) + intro.on # add random introductions
dN_EI1.on <- rbinom(n=sims,size=E.on,
prob=1-exp(-theta*delta.t))
dN_I1I2.on <- rbinom(n=sims,size=I1.on,
prob=1-exp(-gamma_I1I2*delta.t))
dN_I2R.on <- rbinom(n=sims,size=I2.on,
prob=1-exp(-gamma_I2R*delta.t))
dN_SE.off <- rbinom(n=sims,size=S.off,
prob=1-exp(-beta_vec.off*(I1.on+I2.on+I1.off+I2.off)/(N.on+N.off)*delta.t)) + intro.off # add random introductions
dN_EI1.off <- rbinom(n=sims,size=E.off,
prob=1-exp(-theta*delta.t))
dN_I1I2.off <- rbinom(n=sims,size=I1.off,
prob=1-exp(-gamma_I1I2*delta.t))
dN_I2R.off <- rbinom(n=sims,size=I2.off,
prob=1-exp(-gamma_I2R*delta.t))
# update classes
S.on. <- S.on - dN_SE.on
E.on. <- E.on + dN_SE.on - dN_EI1.on
I1.on. <- I1.on + dN_EI1.on - dN_I1I2.on
I2.on. <- I2.on + dN_I1I2.on - dN_I2R.on
newSympt2.on <- newSympt1.on
newSympt1.on <- dN_I1I2.on
newSymptReportedTrue.on <- rbinom(sims, newSympt2.on, ppn_sympt) # randomly draw symtomatic individuals
newSymptReported.on <- floor(newSymptReportedTrue.on*care.seeking*(1-(1-sens.pcr)))
R.on. <- R.on + dN_I2R.on
S.off. <- S.off - dN_SE.off
E.off. <- E.off + dN_SE.off - dN_EI1.off
I1.off. <- I1.off + dN_EI1.off - dN_I1I2.off
I2.off. <- I2.off + dN_I1I2.off - dN_I2R.off
newSympt2.off <- newSympt1.off
newSympt1.off <- dN_I1I2.off
newSymptReportedTrue.off <- rbinom(sims, newSympt2.off, ppn_sympt) # randomly draw symtomatic individuals
newSymptReported.off <- floor(newSymptReportedTrue.off*care.seeking*(1-(1-sens.pcr)))
R.off. <- R.off + dN_I2R.off
out <- cbind( S.on., E.on., I1.on., I2.on., R.on., dN_I1I2.on,
S.off., E.off., I1.off., I2.off., R.off., dN_I1I2.off ) # assume that I1->I2 is when cases become detectable
sympt.pcr <- newSymptReported.on + newSymptReported.off
avail.tests <- tests * pooling
atests <- rmultinomial(sims,avail.tests,out[,c(1:5,7:11)])
tested <- atests
for (i in 1:sims){
for (j in 1:10){
if (j %in% c(3,4,8,9)){
tested[i,j] <- rbinom(1, atests[i,j], (((pooling-1)*-pooling.multi)+sens.lamp)*compliance)
}
if (j %in% c(1,2,5,6,7,10)){
tested[i,j] <- rbinom(1, atests[i,j], (1-((pooling-1)*-pooling.multi+spec.lamp))*compliance)
}
}
}
asymp.pcr <- rep(0, sims)
tested. <- tested
if (lamp.diagnostic == F) {
for (i in 1:sims){
for (j in 1:10){
if (j %in% c(3,4,8,9)){
tested.[i,j] <- rbinom(1, tested[i,j], (1-((1-sens.pcr))))
}
if (j %in% c(1,2,5,6,7,10)){
tested.[i,j] <- rbinom(1, tested[i,j], (1-(spec.pcr)))
}
}
}
asymp.pcr <- apply(tested., 1, sum)
}
cases.caught <- apply(tested.[,c(3,4,8,9)], 1, sum) + newSymptReported.on + newSymptReported.off
sympt.isolate <- matrix(0,nr=sims,nc=10) # storage for symptomatic cases to isolate
sympt.isolate[,c(4)] <- newSymptReported.on
sympt.isolate[,c(9)] <- newSymptReported.off
atests.isolate <- tested. # holder for which tests will be positive that need to be isolated
# atests.isolate[,c(1,2,5,6,7,10)] <- 0 # set non-infected classes to 0
# atests.isolate <- floor(atests.isolate)
atest.wait.3 <- sir_simple_step(atest.wait.2,sims,
I1.on, I2.on, I1.off, I2.off, N.on, N.off,
theta, gamma_I1I2, gamma_I2R,
beta_vec.on, beta_vec.off)
atest.wait.2 <- sir_simple_step(atest.wait.1,sims,
I1.on, I2.on, I1.off, I2.off, N.on, N.off,
theta, gamma_I1I2, gamma_I2R,
beta_vec.on, beta_vec.off)
atest.wait.1 <- atests.isolate
if(test.scenario == "2 Days") {
# randomly draw the contacts from the different classes
tot.contacts.on <- rmultinomial(sims,
rep(rpois(sims,contacts.on)*(newSymptReported.on + apply(atest.wait.3[,c(1:5)], 1, sum)),sims),
matrix(c(1,1,1,1,1,1,1,1,1,1),nr=sims,nc=10,byrow=T)*out[,c(1:5, 7:11)])
# contacts.on <- floor(tot.contacts.on*compliance)
tot.contacts.off <- rmultinomial(sims,
rep(rpois(sims,contacts.off)*(newSymptReported.off + apply(atest.wait.3[,c(6:10)], 1, sum)),sims),
matrix(c(1,1,1,1,1,1,1,1,1,1),nr=sims,nc=10,byrow=T)*out[,c(1:5, 7:11)])
}
if(test.scenario == "1 Day") {
# randomly draw the contacts from the different classes
tot.contacts.on <- rmultinomial(sims,
rep(rpois(sims,contacts.on)*(newSymptReported.on + apply(atest.wait.2[,c(1:5)], 1, sum)),sims),
matrix(c(1,1,1,1,1,1,1,1,1,1),nr=sims,nc=10,byrow=T)*out[,c(1:5, 7:11)])
# contacts.on <- floor(tot.contacts.on*compliance)
tot.contacts.off <- rmultinomial(sims,
rep(rpois(sims,contacts.off)*(newSymptReported.off + apply(atest.wait.2[,c(6:10)], 1, sum)),sims),
matrix(c(1,1,1,1,1,1,1,1,1,1),nr=sims,nc=10,byrow=T)*out[,c(1:5, 7:11)])
}
if(test.scenario == "No Delay") {
# randomly draw the contacts from the different classes
tot.contacts.on <- rmultinomial(sims,
rep(rpois(sims,contacts.on)*(newSymptReported.on + apply(atest.wait.1[,c(1:5)], 1, sum)),sims),
matrix(c(1,1,1,1,1,1,1,1,1,1),nr=sims,nc=10,byrow=T)*out[,c(1:5, 7:11)])
# contacts.on <- floor(tot.contacts.on*compliance)
tot.contacts.off <- rmultinomial(sims,
rep(rpois(sims,contacts.off)*(newSymptReported.off + apply(atest.wait.1[,c(6:10)], 1, sum)),sims),
matrix(c(1,1,1,1,1,1,1,1,1,1),nr=sims,nc=10,byrow=T)*out[,c(1:5, 7:11)])
}
if(!test.scenario %in% c("2 Days","1 Day","No Delay")) {
out <- 0
print("Need correct delay interval")
}
tot.contacts <- tot.contacts.on + tot.contacts.off
contacts <- tot.contacts
for (i in 1:nrow(tot.contacts)) {
if (sum(tot.contacts[i,]) == 0 ) next
contacts[i,] <- rmultinomial(1,contact.tracing.limit,tot.contacts[i,])
}
contact.wait.3 <- sir_simple_step(contact.wait.2,sims,
I1.on, I2.on, I1.off, I2.off, N.on, N.off,
theta, gamma_I1I2, gamma_I2R,
beta_vec.on, beta_vec.off)
contact.wait.2 <- sir_simple_step(contact.wait.1,sims,
I1.on, I2.on, I1.off, I2.off, N.on, N.off,
theta, gamma_I1I2, gamma_I2R,
beta_vec.on, beta_vec.off)
contact.wait.1 <- contacts
if(test.scenario == "2 Days") {
out[,c(1:5,7:11)] <- pmax(out[,c(1:5,7:11)] - sympt.isolate - (contact.wait.3) - (atest.wait.3),0)
}
if(test.scenario == "1 Day") {
out[,c(1:5,7:11)] <- pmax(out[,c(1:5,7:11)] - sympt.isolate - (contact.wait.2) - (atest.wait.2),0)
}
if(test.scenario == "No Delay") {
out[,c(1:5,7:11)] <- pmax(out[,c(1:5,7:11)] - sympt.isolate - (contact.wait.1) - (atest.wait.1),0)
}
if(!test.scenario %in% c("2 Days","1 Day","No Delay")) {
out <- 0
print("Need correct delay interval")
}
out <- cbind(out, atests, newSympt1.on, newSympt1.off, newSympt2.on, newSympt2.off, newSymptReported.on, newSymptReported.off,
contacts, tot.contacts, avail.tests, atests.isolate,
sympt.isolate, newSymptReportedTrue.on, newSymptReportedTrue.off,
atest.wait.3,atest.wait.2,atest.wait.1,
contact.wait.3,contact.wait.2,contact.wait.1, sympt.pcr, asymp.pcr, cases.caught
)
# store all states -- SIR states plus tested, reported, contacts
}
|
c88867ad125ae8a15061fd4f809469dc57ec06ca
|
51703d55be207df29decc17441c323be93b8adaf
|
/HW2/Solutions/1.R
|
0ed97026afe07a6774667621c2025d660d5cf483
|
[] |
no_license
|
Mahbodmajid/DataAnalysis
|
b4ee16f39e91dff0bbeea058e92162f91f28d84c
|
127e59a101d4847171fcb7728db38f4405a10e38
|
refs/heads/master
| 2021-06-10T02:04:52.153255
| 2019-11-25T00:58:18
| 2019-11-25T00:58:18
| 141,756,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 339
|
r
|
1.R
|
companies <- mobile %>%
group_by(company) %>%
summarise(n = n())
top_companies <- companies %>%
top_n(n = 20, wt = n)
top_companies$company <-
factor(top_companies$company,
levels = top_companies$company[order(top_companies$n)])
ggplot(top_companies, aes(x = company, y = n, fill = n)) +
geom_bar(stat = "identity")
|
f1358527fa3c4a640ae7470a1f96147ec8d53447
|
fab90001189c6512ea178a46539bd6558f035944
|
/code/code_1_load_data.R
|
b1b4f76aa967a08327ae262561f7db6297b0c28f
|
[] |
no_license
|
Juannadie/lwa_europe
|
89428f68bf7c4a6ab6f2d9c5cf39c165b188a747
|
bae066ef5bd3f5bd38957743c9db20ff4a83ba44
|
refs/heads/master
| 2022-12-20T05:34:37.451665
| 2020-10-02T10:06:42
| 2020-10-02T10:06:42
| 300,573,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,546
|
r
|
code_1_load_data.R
|
#load all packages that we might use throughout the code
library(tidyverse)
library(readstata13)
library(reshape2)
library(dineq)
library(ggrepel)
library(matrixStats)
library(leaflet)
library(leaflet.providers)
library(ggplot2)
library(maptools)
library(rgeos)
library(Cairo)
library(ggmap)
library(scales)
library(RColorBrewer)
library(mapproj)
library(sf)
library(cowplot)
library(scales)
library(RColorBrewer)
library(mapproj)
library(zoo)
library(plotly)
library(gapminder)
library(ggrepel)
library(ggpubr)
library(reldist)
options ("scipen"=10000, "digits"= 6)
set.seed(8000)
#setwd("WORKING DIRECTORY") #Run to set directory where files are stored
temp = list.files(pattern="*P.dta") #creates a list with all the personal files in the eu-silc directory directory
countries <- lapply(temp, read.dta13) #reads the list (note, alphabetical order)
#we create the country_id variable in each country
countries <- lapply(countries, function(X) {
X[["country"]] <- X[["pb020"]]
X
})
#we switch Greece code to GR in EU-SILC
countries <- lapply(countries, function(X) {
X[["country"]][X[["country"]] == 'EL'] <- 'GR'
X
})
namescountries <- sapply(countries, function(x) {sample(x[["country"]], size = 1)})
names(countries) <- namescountries #We get the names of the countries from the variables
countries <- countries[order(names(countries))] #order the list alphabetically
allcountries <- countries
countries <- countries[c(1:20,22:26,28:31)] #We remove MT (element 21 in our list), since we could not obtain TW for occupations in that country and element 27 RS (Serbia) since we do not have TW information either.
#Note this assumes you have originally loaded the full list of countries available in EU-SILC 2018
names(countries) #renaming countries
#LOAD TELEWORKING INDICES
twisco2d <- read_csv("tw_isco_2d_countries.csv") #2digit isco
twisco1d <- read_csv("tw_isco_1d_countries.csv") #1digit isco
#2 digit isco
twisco2dlong <- melt(twisco2d, id.vars = "isco2d")
names(twisco2dlong) <- c("isco2d", "country", "teleworking")
#1 digit isco
twisco1dlong <- melt(twisco1d, id.vars = "isco1d")
names(twisco1dlong) <- c("isco1d", "country", "teleworking")
#MERGE TELEWORKING, ESSENTIAL AND CLOSE INDICES WITH MAIN DATA
countries <- lapply(countries, function(X) {
X <- X[!is.na(X[["pl051"]]),]
X <- X[X[["pl051"]] != 0,]
X
}) #we keep only non-missing occupation observations and non-armed forces occupations
#create variables with compatible names for merging
#occupation
countries <- lapply(countries, function(X) {
X[["isco2d"]] <- X[["pl051"]]
X
})
countries <- lapply(countries, function(X) {
X[["isco1d"]] <- ifelse(X[["pl051"]] >= 10, trunc(X[["pl051"]]/10), X[["pl051"]])
X
}) #We create ISCO 1d (from ISCO 2d)
countries <- lapply(countries, function(X) {
X[["isco1d"]] <- ifelse(X[["pl051"]] < 10 & X[["pb020"]] !=("DE") & X[["pb020"]] !=("SI"), trunc(X[["pl051"]]/10), X[["isco1d"]])
X
}) #We also convert to zeros in isco 1d obs <10 in countries that have isco2d.
countries <- lapply(countries, function(X) {
X <- X[X[["isco1d"]] > 0,]
X
}) #we also remove observations = 0 in 1 in isco 1d (we exclude armed forces occupations)
#industry
countries <- lapply(countries, function(X) {
X[["nace"]] <- X[["pl111"]]
X
})
#we merge the main data with the teleworking index
#for countries with 2d information about teleworking
names(countries)
#We filter for the ones that have 2digit ISCO in both our TW and EU-SILC
countries2dtw <- countries[c(1:2,4:6,8:22,24:26,28:29)] # All countries except BG, DE, PL, SI
names(countries2dtw)
#We filter for the ones that have 1digit ISCO in both our TW and EU-SILC
countries1dtw <- countries[c(3, 7, 23, 27)] #BG, DE, PL, SI
names(countries1dtw)
#function to merge
merge2d <- function(df1, df2) {
a <- merge(df1, df2, by = c("isco2d", "country"))
a
}
countries2dtw <- lapply(countries2dtw, function (X) (merge2d (df1 = X, df2 = twisco2dlong))) #we merge each of this countries
#function to merge
merge1d <- function(df1, df2) {
a <- merge(df1, df2, by = c("isco1d", "country"))
a
}
countries1dtw <- lapply(countries1dtw, function (X) (merge1d (df1 = X, df2 = twisco1dlong))) #we merge each of this countries
#we reaggregate all countries in the list
countries <- c(countries2dtw, countries1dtw)
countries <- countries[order(names(countries))] #order back the list alphabetically
names(countries)
#LOAD ESSENTIAL AND CLOSED INDICES
#Directory where the indices are stored
essentialindex21 <- read_csv("essential_index21.csv") #2digit isco - 1 digit nace
closedindex21 <- read_csv("closed_index21.csv")
essentialindex11 <- read_csv("essential_index11.csv") #1digit isco - 1 digit nace
closedindex11 <- read_csv("closed_index11.csv")
essentialindex2 <- read_csv("essential_index2.csv") #2digit isco only
closedindex2 <- read_csv("closed_index2.csv")
#now convert to long format; we need that to merge by two variables
essential21long <- melt(essentialindex21, id.vars = "isco2d")
names(essential21long) <- c("isco2d", "nace", "essential_index")
closed21long <- melt(closedindex21, id.vars = "isco2d")
names(closed21long) <- c("isco2d", "nace", "closed_index")
#And merge essential and closed index in one "long format file"
ei_ci_index_long_2d_isco_1d_nace <- merge(essential21long, closed21long, by = c("isco2d", "nace"))
#now the same for 1 digit ISCO - 1 digit NACE coding
essential11long <- melt(essentialindex11, id.vars = "isco1d")
names(essential11long) <- c("isco1d", "nace", "essential_index")
closed11long <- melt(closedindex11, id.vars = "isco1d")
names(closed11long) <- c("isco1d", "nace", "closed_index")
#and merge essential and closed indices in one long file
ei_ci_index_long_1d_isco_1d_nace <- merge(essential11long, closed11long, by = c("isco1d", "nace"))
#now for only isco 2 digit
#no need to reshape, only one variable already
names(essentialindex2) <- c("isco2d", "essential_index")
names(closedindex2) <- c("isco2d","closed_index")
#And merge ((no industries))
essential_closed_index_long_only_isco <- merge(essentialindex2, closedindex2, by = c("isco2d"))
#now adding essential and closed index
#We filter for the countries that have 2digit ISCO and 1 digit NACE
countries1 <- countries[c(1:2,4:6,9:22,24:26,28:29)] #all except BG, DE, PL, SI and DK
names(countries1)
#function to merge
merge1 <- function(df1, df2) {
a <- merge(df1, df2, by = c("isco2d", "nace"))
a
}
countries1 <- lapply(countries1, function (X) (merge1 (df1 = X, df2 = ei_ci_index_long_2d_isco_1d_nace))) #we merge each of this countries
#Now for the countries for which we only have 1-digit tw
#We filter for the ones we merge BG, DE, PL, SI (that only have 1digit ISCO and 1 digit NACE)
countries2 <- countries[c(3, 7, 23, 27)]
names(countries2)
merge2 <- function(df1, df2) {
a <- merge(df1, df2, by = c("isco1d", "nace"))
a
}
countries2 <- lapply(countries2, function (X) (merge2 (df1 = X, df2 = ei_ci_index_long_1d_isco_1d_nace)))
#Now for Denmark (DK does not have NACE info in EU-SILC (most are missing))
countries3 <- countries[c(8)]
names3 <- namescountries[c(8)]
merge3 <- function(df1, df2) {
a <- merge(df1, df2, by = c("isco2d"))
a
}
countries3 <- lapply(countries3, function(x) {merge3(x, essential_closed_index_long_only_isco)})
countries <- c(countries1, countries2, countries3)
countries <- countries[order(names(countries))] #order back the list alphabetically
names(countries)
saveRDS(countries, file = "countries_1.rds") #saves the data in working directory
|
094c78e07cb8683900d2089150395ab2513f76a3
|
14abfaec6c704d8ea1799fa8f2dfe834d66953b1
|
/selection_score_audit/selection_score_audit.R
|
a4c6965461acb1ba12471fbbdc7bc870d4927fae
|
[] |
no_license
|
jrliebster/getting-and-cleaning-data
|
d7d119125750006f74c52ff81f396fb91dd8ade5
|
b30aa5f7a1287bd5dece503885cd0f74ff126aa0
|
refs/heads/master
| 2021-01-22T18:06:47.183111
| 2017-08-23T20:47:59
| 2017-08-23T20:47:59
| 100,740,016
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 634
|
r
|
selection_score_audit.R
|
if (!require("pacman")) install.packages("pacman"); library(pacman)
p_load(tidyr, readxl, readr, dplyr, janitor, tidyr, stringr, zoo, magrittr)
dat <- read_excel("NYCTF File Review Week 1 for Robert.xlsx")
dat[dat== "E"] <- 5
dat[dat== "FA+"] <- 4
dat[dat== "FA"] <- 3
dat[dat== "FA-"] <- 2
dat[dat== "NFA"] <- 1
dat$critical_thinking_recalc <- case_when(
(dat$TeachingSamplePlanningFormCT == 1 |dat$GroupActivityCT == 1 |dat$TeachingSampleReteachCT == 1) ~ 1,
~ 2,
TRUE ~ 1000
)
# confirm that all values calculated correctly
sum(dat$critical_thinking_recalc != dat$CriticalThinkingOverall)
|
9328fb6e4dc7aeb22ef784c0e3c7c9bdecff99fe
|
78fb338b3a288c75f2f00e0e67ceaecdc5aeecdd
|
/R/standings.R
|
0972b89c9ade4713c4e8d4275adf61023439cc33
|
[
"MIT"
] |
permissive
|
pbulsink/nhlRapi
|
f5b9c8f80b5030b9fc5ce92c811937ccb7c7f2f6
|
88c65b93dbf0e8b787ffbf02ef915f84330b50f9
|
refs/heads/master
| 2020-04-22T06:01:11.780541
| 2019-06-26T13:27:45
| 2019-06-26T13:27:45
| 170,176,071
| 0
| 1
|
NOASSERTION
| 2019-06-26T13:27:46
| 2019-02-11T18:03:09
|
R
|
UTF-8
|
R
| false
| false
| 1,974
|
r
|
standings.R
|
#' Get Standing
#'
#' @description Get the NHL standings, including historical standings. Or, get one single standing
#' @param standingsType Optional, get specific standing type. See \code{\link{getStandingsTypes}()} for allowed values
#' @param season Get statndings for a specific season. If null, current season returned. Season should be in format 20172018. Overrides date specification.
#' @param expand Whether to return detailed information from each team.
#'
#' @return The API output of standings
#' @export
#'
#' @examples
#' # get standings
#' standings <- getStandings()
#'
#' #Get standings from 20182009
#' standings <- getStandings(season = 20082009)
getStandings <- function(standingsType = NULL, season = NULL, expand = FALSE) {
if (!is.null(standingsType)) {
# checks to prevent bad API calls from
# progressing
stopifnot(length(standingsType) == 1)
stopifnot(standingsType %in% c("regularSeason",
"wildCard", "divisionLeaders", "wildCardWithLeaders",
"preseason", "postseason", "byDivision",
"byConference", "byLeague"))
query <- querybuilder("standings", standingsType)
} else {
query <- "standings"
}
modifier <- NULL
if (expand) {
modifier <- c(modifier, "expand=standings.record")
}
if (!is.null(season)) {
stopifnot(length(season) == 1)
stopifnot(validSeason(season))
modifier <- c(modifier, paste0("season=",
season))
}
if (!is.null(modifier)) {
modifier <- modifier[!is.null(modifier)]
}
return(getStatAPI(query = query, modifiers = modifier))
}
#' Get Standings Types
#'
#' @description Only certain standings display types are accepted. This returns the full valid list.
#'
#' @return a list of standings types to call with \code{\link{getStandings}()}
#' @export
#'
#' @examples
#' #Show the accepted standings types
#' standingTypes <- getStandingsTypes()
getStandingsTypes <- function() {
return(unname(unlist(getStatAPI("standingsTypes"))))
}
|
bd40bc067ac213f1a5b3105e695290db7bbf98cd
|
8decd5c9ba82c372762b0e11486725ad1f8596e0
|
/profile.R
|
f41f5918c759e2fa794d83bd594fd5587ca2c948
|
[] |
no_license
|
LiangZZZ123/dmpa
|
22168d839d61af6370cc5d92cee90a06dcef9afb
|
7e89a7c168235d544cf6c91acc8a1a48d8da1896
|
refs/heads/master
| 2020-04-24T06:40:29.435500
| 2017-02-27T02:55:24
| 2017-02-27T02:55:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 147
|
r
|
profile.R
|
# Practise
site_path = R.home(component = 'home')
site_path
fname = file.path(site_path,'etc','Rprofile.site')
file.exists(fname)
file.edit(fname)
|
eef8b1cc02dc06b5ac8405bf4168a1bad568c2f4
|
7e5e5139f817c4f4729c019b9270eb95978feb39
|
/Introduction to Tidyverse/Chapter 1-Data Wrangling/5.R
|
2b2fd61a05891d24a93f014ac80bf3e955daa3d1
|
[] |
no_license
|
Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track-
|
a45594a8a9078076fe90076f675ec509ae694761
|
a50740cb3545c3d03f19fc79930cb895b33af7c4
|
refs/heads/main
| 2023-05-08T19:45:46.830676
| 2021-05-31T03:30:08
| 2021-05-31T03:30:08
| 366,929,815
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 572
|
r
|
5.R
|
# Filtering and arranging
# You'll often need to use the pipe operator (%>%) to combine multiple dplyr verbs in a row. In this case, you'll combine a filter() with an arrange() to find the highest population countries in a particular year.
#
# Instructions
# 100 XP
# Use filter() to extract observations from just the year 1957, then use arrange() to sort in descending order of population (pop).
library(gapminder)
library(dplyr)
# Filter for the year 1957, then arrange in descending order of population
gapminder %>%
filter(year == 1957) %>%
arrange(desc(pop))
|
1c0592332eee374fa65cfa8f792d5f24956c8770
|
c1d01cff0c002cdb20008e8776de1632ecca031b
|
/cachematrix.R
|
bbca2864843c5e067e8bf36ad32047292ffc99af
|
[] |
no_license
|
hgunicamp/ProgrammingAssignment2
|
604175d4c3ae4be7bc5c85f9a2716d62345cb1dd
|
ad37abcbd8463a1a46bec262b38b081d26cf297f
|
refs/heads/master
| 2021-01-24T00:54:47.705670
| 2015-02-16T16:40:12
| 2015-02-16T16:40:12
| 30,705,692
| 0
| 0
| null | 2015-02-12T14:19:31
| 2015-02-12T14:19:30
| null |
UTF-8
|
R
| false
| false
| 2,088
|
r
|
cachematrix.R
|
## makeCacheMatrix(x): function
## - Arg:
## x: matrix()
## - Use:
## aMatrix <- makeCacheMatri(x)
## - Description:
## Returns a list of functions to manipulate a square matrix internally
## stored, and its inverse, created using the "x" argument.
## The functions are:
## - Get(): Returns the internally stored square matrix.
## - Use:
## y <- aMatrix$Get()
## - Set(x): Stores internally the matrix passed through
## argument "x". It invalidates the value of the
## inverse previously stored.
## - Args:
## x: matrix()
## - Use:
## aMatrix$Set(x)
## - GetInverse(): Returns the value of the inverse matrix stored
## internally. If it was not calculated yet, resturns NULL.
## - Use:
## y <- aMatrix$GetInverse()
## - SetInverse(x): Stores internally the matrix passed through
## argument "x" as the inverse of the previous stored
## matrix.
## - Args:
## x: A square matrix
## - Use:
## aMatrix$SetInverse(x)
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
Set <- function(y) {
x <<- y
inverse <<- NULL
}
Get <- function() x
SetInverse <- function(inv) inverse <<- inv
GetInverse <- function() inverse
list("Get" = Get,
"Set" = Set,
"GetInverse" = GetInverse,
"SetInverse" = SetInverse )
}
## cacheSolve(x,...): function
## - Arg:
## x: makeCacheMatrix()
## - Use:
## y <- cacheSolve(x,...)
## - Description:
## Returns an internally stored inverse matrix stored into a list created
## through makeCacheMatrix function. If the inverse was not calculated yet,
## calculates its inverse, updates the list and returns the inverse
## calculed.
cacheSolve <- function(x, ...) {
inverse <- x$GetInverse()
if (!is.null(inverse)) {
# From Cache
return(inverse)
}
data <- x$Get()
inverse <- solve(data,...)
x$SetInverse(inverse)
inverse
}
|
f5396515d3116359014f9b7022e6ba721c294750
|
3af91945083aa604efc778ea52a17ad60766948b
|
/allele_betabinomial_plot.R
|
481b3a7edcd209ef65c6bd5f9e7745d9dfb30b86
|
[] |
no_license
|
cjieming/R_codes
|
fa08dd1f25b22e4d3dec91f4fb4e598827d7492f
|
5b2bcf78dc217bc606c22f341e1978b5a1246e0c
|
refs/heads/master
| 2020-04-06T03:53:50.030658
| 2019-06-30T07:31:35
| 2019-06-30T07:31:35
| 56,031,249
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 828
|
r
|
allele_betabinomial_plot.R
|
library(VGAM)
colors <- c("red","blue","green","orange","cyan","pink","purple",
"brown","black","slategray1","violetred","tan","deeppink","darkgreen",
"orchid","darksalmon","antiquewhite3","magenta","darkblue","peru","slateblue",
"thistle","tomato","rosybrown1","royalblue","olivedrab") ##Set of 26 colours (no red) to use for all plots
k = seq(0,1,by=0.1)
n = 100
a = apply(as.data.frame(k),1,function(x) dbetabinom(seq(0,n),n,0.5,x))
leg = matrix(0,length(k))
x11()
for(i in 1:length(k))
{
if(i==1)
{
plot(a[,i],ylim=c(0,0.25),type='b',col=colors[i])
leg[i] = paste("b=",k[i])
}
else
{
par(new=TRUE)
plot(a[,i], col=colors[i],ylim=c(0.,0.25), type='b')
leg[i] = paste("b=",k[i])
}
}
legend(2,0.25,leg,colors[1:length(k)])
|
c5f002395877e706942e444ea8799983dce12f11
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/MultiRR/R/sd2.R
|
77fdbded34312dd9c4e72f8398856a5e1f22b365
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36
|
r
|
sd2.R
|
sd2 <-
function(x){apply(x, 2, sd)}
|
f721a3b73e2fee85f59aa7177d608d5bf7b2ec38
|
72e01915a0207d4971d7982dc02f7638162929cd
|
/R scripts/Metabolome DEA.R
|
b0a8ca61c39bc59f2903d515c39cad9db658fe32
|
[] |
no_license
|
sabdeolive/Sabrina_BMS_Bachelor-Internship
|
1233a0b3bd4a1aa26ed7414d8707c225d7332188
|
81d7428e182ed917a7d8b47f6e0e4275fbe50a0d
|
refs/heads/master
| 2021-06-13T16:01:36.242275
| 2020-07-02T13:52:51
| 2020-07-02T13:52:51
| 254,438,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,440
|
r
|
Metabolome DEA.R
|
#Install Bioconductor
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install(version = "3.10")
#Install Biobase
BiocManager::install("Biobase")
#Assigning metabolome data to metabolome and creating matrix
metabolome <- metabolome_abundance.11
class(metabolome)
dim(metabolome)
metabolomeMat <- data.matrix(metabolome)
dim(metabolomeMat)
class(metabolomeMat)
#Assigning subject data to subject
subject <-Subject.data.T2DM.iHMP.5
class(subject)
dim(subject)
#Load package
library(Biobase)
#Creating ExpressionSet object
eset <- ExpressionSet(assayData = metabolomeMat,
phenoData = AnnotatedDataFrame(subject))
dim(eset)
#Install limma
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("limma")
#Translate statistical model to R code (creation of design matrix)
design<- model.matrix(~IR_IS_classification, data = pData(eset))
head(design,2)
colSums(design)
#Load limma
library(limma)
#Fit the model with lmFit
fit<- lmFit(eset,design)
#Calculate the t-statistics
fit<- eBayes(fit)
#Summarize results
results<- decideTests(fit[,"IR_IS_classificationIS"])
summary(results)
fit
#Extracting top ranked metabolites from fit
TableMet <- topTable(fit,coef=2)
#Export TableMet
write.table(TableMet,
file = "c:/Users/sabde/Documents/TopRankedMetabolitesFromR.txt",
sep="\t")
|
da1f7e841764a84a309831c2fc5ea8c0968d5c8b
|
6977e192700edbc39847f6069dba22d16ee6ae46
|
/man/tse_codes.Rd
|
d271c4f340cdca9a204b46ddd3fc07e1334c9c67
|
[
"MIT"
] |
permissive
|
RobertMyles/partycodesbr
|
0969525a206e289fc4f53d5161a65a492d013e49
|
93b75fb639e0b9988ccea44ba951b1fa0439588e
|
refs/heads/master
| 2020-04-14T03:43:00.433753
| 2018-12-30T20:17:54
| 2018-12-30T20:17:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 369
|
rd
|
tse_codes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tse_codes.R
\name{tse_codes}
\alias{tse_codes}
\title{Return Numerical codes for Brazilian political parties from the
Tribunal Superior Eleitoral (TSE).}
\usage{
tse_codes()
}
\description{
Return Numerical codes for Brazilian political parties from the
Tribunal Superior Eleitoral (TSE).
}
|
9d98ef561c2c7b272c11d3ea2549d056c6ec9de1
|
a5b757b279966b9e86cef3e70092fd5b791b11f9
|
/man/summarize_home_meds.Rd
|
1e9e59fa83cfe5971cc63b268980b2933a6501e5
|
[] |
no_license
|
bgulbis/mbohelpr
|
62c15326680e91a2d394614287770e101b13daf6
|
e362dd59db37edb2c870a46d3389e7f95d9a745b
|
refs/heads/master
| 2023-01-24T11:42:03.100949
| 2023-01-11T20:30:08
| 2023-01-11T20:30:08
| 197,245,899
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 837
|
rd
|
summarize_home_meds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarize_data.R
\name{summarize_home_meds}
\alias{summarize_home_meds}
\title{Summarize home meds}
\usage{
summarize_home_meds(x, ..., ref, pts = NULL, home = TRUE)
}
\arguments{
\item{x}{data frame}
\item{...}{columns}
\item{ref}{a reference}
\item{pts}{patients}
\item{home}{home meds vs discharge prescriptions}
}
\description{
Summarize home meds
}
\details{
The data frame passed to \code{ref} should contain three character
columns: name, type, and group. The name column should contain either
generic medication names or medication classes. The type column should
specify whether the value in name is a "class" or "med". The group column
should specify whether the medication is a continous ("cont") or scheduled
("sched") medication.
}
|
f6e1fa08bcb949f3f0b78ebe88366c1be7b954ef
|
c9ebc6872dfcd3dc6618c23d1c52c86c9bb298dd
|
/cachematrix.R
|
c0d897756479a82b135d83fef1c04a3fa43c7c12
|
[] |
no_license
|
Butterworks/ProgrammingAssignment2
|
785a9f71655eeeb27c75efbc53ce1ff775e86efd
|
883465aedb9da1c838e5f5a2f99b2de52514f411
|
refs/heads/master
| 2021-01-22T05:32:39.830604
| 2015-12-24T00:02:04
| 2015-12-24T00:02:04
| 48,306,993
| 0
| 0
| null | 2015-12-20T04:19:13
| 2015-12-20T04:19:12
| null |
UTF-8
|
R
| false
| false
| 2,282
|
r
|
cachematrix.R
|
## R Programming Assignment 2
## Butterworks, Rudolf J
## From the discussion board; thank you to Semhar for the great post to explain the example functions.
## 4 functions:
## makeVector, from assignment 2 example but added commentry to help me understand what whats being achieved.
## cacheMean, from assignment 2 example
## makeCacheMatrix, function that creates special matrix cache to be solved by solveCache,
## solveCache, takes the special matrix cache and solves the inverse.
## Copied from makeVector, this function sets out 4 functions within it self, set, get, setInv and getInv.
## makeCacheMatrix does not calculate anything, it stores the given matrix or matrix via $set
makeCacheMatrix <- function(x = matrix()) {
#Function to create a special matrix object that can cache its inverse
#body copied / edited from makeVector example
i <- NULL
#same as set in makeVector, set changed the vector in the main "makeCacheMatrix" function
set <- function(y) {
x <<- y
i <<- NULL #inverse set to null, as set will change the vector and the inverse must be recalculated.
}
get <- function () x #same as example
setInv <- function(inv) i <<- inv #taken from mean example in setMean.
getInv <- function () i
list(set = set, get = get, setInverse = setInv, getInv = getInv)
}
## copied from cacheMean, this function uses the function solve to return the inverse of x
cacheSolve <- function(x, ...) {
i <- x$getInv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data,...)
x$setInv(i)
i
}
#Example Functions (from Assignment two outline)
makeVector <- function(x = numeric()) {
m <- NULL
#set function changes the vector stored in the main "makeVector" function
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(mean) m <<- mean #Does not calculate the mean, store the value of the input in a variable m into the main fuction
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
|
d4e03698ffd43bad6638a6c2b2666ffdd28384ac
|
f92b0a4bef9fa53e62fec46840d9ae20f48af32a
|
/03 Get data on BitCoin.r
|
107479927ec9451eb56d6bd38e3b68d92ff96572
|
[] |
no_license
|
valecaio/Bitcoin
|
3240e6925521d0dfd628f292bfe4354804ff7061
|
d6a5bb051564db41060ed689fcb4df1819c4e654
|
refs/heads/master
| 2020-07-31T17:43:10.180928
| 2018-06-24T01:31:29
| 2018-06-24T01:31:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,545
|
r
|
03 Get data on BitCoin.r
|
#Gets Bitcoin Data from the web
#By Camilo Mora
#2018
#this code is for use in R. 3.5.0 and it requires the library RJSONIO to be installed.
#the code goes to blocktrail.com and takes data on the blocks needed. In this case all the information for the blocks in 2017
#Data are collected in groups of 1000 blocks, starting on the first block defined in the code.
setwd("C:/Users/Camilo Mora/Documents/Bitcoins/") ##define where you want the blocks data to be saved
library(RJSONIO) # load the necessarylibrary for web-connection
Start= 446097 #first block in 2017 446097 #454102
Last= 502027 #last block of 2017 502027
nullToNA <- function(x) {
x[sapply(x, is.null)] <- NA
return(x)
}
MY_APIKEY <- "6debaf0ebd4c9081795fe38716df550c46ab06fb"
# API url
block_url <- "https://api.blocktrail.com/v1/btc/block/"
APIkey <- paste0("?api_key=", MY_APIKEY)
for (i in 1:1) #do 50 loops of 1000 blocks each, that will conver the 47925 blocks of 2017
{
Data=data.frame()
for (x in 0:999) {
# x1 <- runif(1, 0.0, 1.1) #sleep for a random time so it does not get blocked
# Sys.sleep (x1)
BlockNum=(i*1000+x)+Start
block_data_list_0 <- nullToNA(fromJSON(paste0(block_url, BlockNum, APIkey)))
datax=data.frame(t(unlist(block_data_list_0)) )
Data=rbind(Data,datax)
}
write.csv(Data,paste(i,".csv",sep=""))
}
|
fbbf22d0f0a9da3c4ebea20c0ab9b6c7a8a9e17a
|
74694fc3e2a8e5dbedf5297bc6f841dbe5000ad9
|
/man/adjust_for_splits.Rd
|
a844d0b51be2d91183b8452073c44add81b877a3
|
[
"MIT"
] |
permissive
|
aljrico/rfinance
|
2c5c1dce1b742f4fb9c07d1cd31888c8d78b693a
|
473caf34dcaa7c349d037c74d71a2046f5795056
|
refs/heads/master
| 2021-12-27T08:32:31.002366
| 2021-11-11T11:13:34
| 2021-11-11T11:13:34
| 231,904,432
| 0
| 1
|
NOASSERTION
| 2020-02-23T12:15:42
| 2020-01-05T11:03:09
|
R
|
UTF-8
|
R
| false
| true
| 425
|
rd
|
adjust_for_splits.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adjust_for_splits.R
\name{adjust_for_splits}
\alias{adjust_for_splits}
\title{Adjust For Splits}
\usage{
adjust_for_splits(df, symbol)
}
\arguments{
\item{df}{data.frame containing the prices/dividends to be adjusted}
\item{symbol}{Ticker of the subject company}
}
\description{
This function adjusts prices or dividends by past company splits
}
|
b2503c9e28dec83a096103c47de6783581626345
|
8a3a1a824f820e179c9bb8bd8b08bd6d3d9670d6
|
/Cross Validation - Auto.R
|
0c463c85dfe708152f3a057ba306af05d5a4ee98
|
[] |
no_license
|
hesham230/Data-Science-with-R
|
bee3f451706e0aab2f7666988ffb9e269430d8ec
|
ef7967150eba83d3c15c8cdbb0f8fbc179226f79
|
refs/heads/main
| 2023-06-27T06:53:55.000921
| 2021-07-26T06:31:57
| 2021-07-26T06:31:57
| 389,527,731
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,893
|
r
|
Cross Validation - Auto.R
|
library(ISLR)
mpg = Auto
str(mpg)
##############################################################################
# Validation Set Approach ##
##############################################################################
# when a testing set is not available, we can create validation sets #
# if we fit two models for two different validation sets, we get somewhat #
# similar results. ##
##############################################################################
# First Model
set.seed(1)
train_index = sample(length(mpg$mpg),0.5*length(mpg$mpg))
lm1 = lm(mpg ~ horsepower, data = mpg, subset = train_index)
trainSet = mpg[train_index,]
validSet = mpg[-train_index,]
summary(lm1)
EMSE1 = mean((validSet$mpg - predict(lm1, newdata = validSet))^2)
# Model 2
set.seed(2)
train_index = sample(length(mpg$mpg),0.5*length(mpg$mpg))
lm2 = lm(mpg ~ horsepower, data = mpg, subset = train_index)
trainSet = mpg[train_index,]
validSet = mpg[-train_index,]
summary(lm2)
EMSE2 = mean((validSet$mpg - predict(lm2, newdata = validSet))^2)
##############################################################################
## The data is divided randomly into K groups. For each group the generalized#
## linear model is fit to data omitting that group, then the function cost is#
## applied to the observed responses in the groupthat was omitted from the #
## fit and the prediction made by the fitted models for those observations. #
## When K is the number of observations leave-one-out cross-validation is #
## used and all the possiblesplits of the data are used. #
## When K is less than the number of observations the K splits to be used #
##############################################################################
##############################################################################
## Leave One Out Cross Validation ############################################
##############################################################################
fit.glm = glm(mpg ~ horsepower, data = mpg) #without family = , we get linear regression
coef(fit.glm)
library(boot)
cv.err = cv.glm(data = Auto, glmfit = fit.glm, K = nrow(mpg))
cv.err$delta
cv.err$K # when k = n R will do a leave one out cross validation
##############################################################################
##### K Fold CV ##############################################################
##############################################################################
cv.error.10 = cv.glm(data = mpg, glmfit = fit.glm, K = 10)$delta[2]
cv.error.5 = cv.glm(data = mpg, glmfit = fit.glm, K = 5)$delta[2]
# Cost function for binary outcome
# cost <- function(r, pi = 0) mean(abs(r-pi) > 0.5)
|
efeffe285cb6cc70d455336e6b113dfd89b650e4
|
4d8cc64d5d7756b7824f0ecd4d985b797fecd57c
|
/R/is_err.R
|
9bc13807d7114668ec47f5c865f746d8c59110ad
|
[] |
no_license
|
cran/TeachNet
|
226b84b4a53689745d562d706e541d7176b8635a
|
45202a95b35c8ed7446a4b2487828ba929ce281a
|
refs/heads/master
| 2020-04-14T03:35:59.188990
| 2018-11-27T15:30:11
| 2018-11-27T15:30:11
| 17,693,932
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 268
|
r
|
is_err.R
|
is.err <- function(x){ # test for err.fct
if(!is.character(x)){stop("The err.fct you entered is not of type character!")}
if(!is.na(x[2])){stop("The err.fct you entered is a vector!")}
if(!(x %in% c("sse","ce") )){stop("The err.fct you entered is not valid!")}
}
|
b37341bbfabe6527dd16e9843180a07929fbeaa4
|
e2147faf5003816d6d26d430e3ca49c761642fa7
|
/old_NC_DEM_scripts/NC_read_img_raster_v2.R
|
d7172b5461115833e0b0014ee02a91e7c0eba45c
|
[] |
no_license
|
mschwaller/R_code
|
053ba60710168f920cb4c83ddd2795eafabc15d3
|
f5b989600f8bf4faaedb5ebe246d41cb80146f20
|
refs/heads/master
| 2020-04-22T06:42:38.339522
| 2019-05-14T20:30:54
| 2019-05-14T20:30:54
| 170,199,824
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 959
|
r
|
NC_read_img_raster_v2.R
|
source_dir <- "/Users/mschwall/Desktop/impervious/nlcd_2006_to_2011_landcover_fromto_change_index_2011_edition_2014_10_10/"
source_file_img <- paste(source_dir, "nlcd_2006_to_2011_landcover_fromto_change_index_2011_edition_2014_10_10.img", sep="")
# load in map and locality data
NLCD<-raster (source_file_img)
sites<-read.csv('sites.csv', header=T)
#crop site data to just latitude and longitude
sites<-sites[,4:5]
#convert lat/lon to appropirate projection
str (sites)
coordinates(sites) <- c("Longitude", "Latitude")
proj4string(sites) <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84")
sites_transformed<-spTransform(sites, CRS("+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"))
#plot the map
plot (NLCD)
#add the converted x y points
points (sites_transformed, pch=16, col="red", cex=.75)
#extract values to poionts
Landcover<-extract (NLCD, sites_transformed, buffer=1000)
|
f6171ca9914bda16ef5a8afc1040f8bee82469d7
|
3aa14a1f63b66cd23741b8ddbe91ee979e9d75b9
|
/ui.R
|
fc34f217d1f3ef5c5a4da5ae3f1ad4a19cf32f6f
|
[] |
no_license
|
kaloatanasov/JHU-Developing-Data-Products-Course-Project-Shiny-App
|
1608b61512be9aaefd463ca9aa351c263088b511
|
b4739fa06187113916176c8c6e81abd2f6e2b32a
|
refs/heads/master
| 2020-03-23T08:20:31.046774
| 2018-07-17T18:28:43
| 2018-07-17T18:28:43
| 141,321,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 753
|
r
|
ui.R
|
library(shiny)
library(ggplot2)
shinyUI(fluidPage(
titlePanel("Predict Diamond Carat from Price"),
sidebarLayout(
sidebarPanel(
sliderInput("sliderPrice", "How much can you spend in USD?",
300, 20000, value = 5051)
),
mainPanel(
tabsetPanel(
tabPanel("Plot",
plotOutput("plot"),
h3("Predicted Diamond Carat"),
textOutput("pred")),
tabPanel("Documentation", verbatimTextOutput("text")))
)
)
))
|
c43192226802a6a9a2cc6875fd1417b996435c5d
|
6594403b535237e0bc2137b3e929427df3a4b51f
|
/2016/RJ-2016-031.R
|
473e6d979cea7ac64c0e4301f06ebd6e79285190
|
[] |
no_license
|
MrDomani/WB2020
|
b42b7992493721fcfccef83ab29703b429e1a9b3
|
2612a9a3b5bfb0a09033aa56e3a008012b8df310
|
refs/heads/master
| 2022-07-13T03:32:29.863322
| 2020-05-18T17:25:43
| 2020-05-18T17:25:43
| 264,994,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,097
|
r
|
RJ-2016-031.R
|
var.eqn.x <- "(alpha * x) * (1 - (x / beta)) - ((delta * (x^2) * y) / (kappa + (x^2)))"
var.eqn.y <- "((gamma * (x^2) * y) / (kappa + (x^2))) - mu * (y^2)"
model.parms <- c(alpha = 1.54, beta = 10.14, delta = 1, gamma = 0.476,
kappa = 1, mu = 0.112509)
parms.eqn.x <- Model2String(var.eqn.x, parms = model.parms)
## Do not print to screen.
parms.eqn.y <- Model2String(var.eqn.y, parms = model.parms, supress.print = TRUE)
model.state <- c(x = 1, y = 2)
model.sigma <- 0.05
model.time <- 1000 # we used 12500 in the figures
model.deltat <- 0.025
ts.ex1 <- TSTraj(y0 = model.state, time = model.time, deltat = model.deltat,
x.rhs = parms.eqn.x, y.rhs = parms.eqn.y, sigma = model.sigma)
## Could also use TSTraj to combine equation strings and parameter values.
## ts.ex1 <- TSTraj(y0 = model.state, time = model.time, deltat = model.deltat,
## x.rhs = var.eqn.x, y.rhs = var.eqn.y, parms = model.parms, sigma = model.sigma)
TSPlot(ts.ex1, deltat = model.deltat) # Figure 2
TSPlot(ts.ex1, deltat = model.deltat, dim = 2) # Figure 3a
TSDensity(ts.ex1, dim = 1) # like Figure 2 histogram
TSDensity(ts.ex1, dim = 2) # Figure 3b
Φ
## If not done in a previous step.
parms.eqn.x <- Model2String(var.eqn.x, parms = model.parms)
## Do not print to screen.
parms.eqn.y <- Model2String(var.eqn.y, parms = model.parms, supress.print = TRUE)
## Could also input the values by hand and use this version.
## parms.eqn.x <- "1.54 * x * (1.0 - (x / 10.14)) - (y * (x^2)) / (1.0 + (x^2))"
## parms.eqn.y <- "((0.476 * (x^2) * y) / (1 + (x^2))) - 0.112509 * (y^2)"
eq1.x <- 1.40491
eq1.y <- 2.80808
eq2.x <- 4.9040
eq2.y <- 4.06187
[
]
[
]
bounds.x <- c(-0.5, 20.0)
bounds.y <- c(-0.5, 20.0)
=
=
step.number.x <- 1000
step.number.y <- 1000 # we used 4100 in the figures
eq1.local <- QPotential(x.rhs = parms.eqn.x, x.start = eq1.x, x.bound = bounds.x,
x.num.steps = step.number.x, y.rhs = parms.eqn.y, y.start = eq1.y,
y.bound = bounds.y, y.num.steps = step.number.y)
eq2.local <- QPotential(x.rhs = parms.eqn.x, x.start = eq2.x, x.bound = bounds.x,
x.num.steps = step.number.x, y.rhs = parms.eqn.y, y.start = eq2.y,
y.bound = bounds.y, y.num.steps = step.number.y)
Φ
(
)
= Φ
ex1.global <- QPGlobal(local.surfaces = list(eq1.local, eq2.local),
unstable.eq.x = c(0, 4.2008), unstable.eq.y = c(0, 4.0039),
x.bound = bounds.x, y.bound = bounds.y)
QPContour(surface = ex1.global, dens = c(1000, 1000), x.bound = bounds.x,
y.bound = bounds.y, c.parm = 5) # right side of Figure 4
∂Φ
∂Φ
+
## Calculate all three vector fields.
VDAll <- VecDecomAll(surface = ex1.global, x.rhs = parms.eqn.x, y.rhs = parms.eqn.y,
x.bound = bounds.x, y.bound = bounds.y)
## Plot the deterministic skeleton vector field.
VecDecomPlot(x.field = VDAll[, , 1], y.field = VDAll[, , 2], dens = c(25, 25),
x.bound = bounds.x, y.bound = bounds.y, xlim = c(0, 11), ylim = c(0, 6),
arrow.type = "equal", tail.length = 0.25, head.length = 0.025)
## Plot the gradient vector field.
VecDecomPlot(x.field = VDAll[, , 3], y.field = VDAll[, , 4], dens = c(25, 25),
x.bound = bounds.x, y.bound = bounds.y, arrow.type = "proportional",
tail.length = 0.25, head.length = 0.025)
## Plot the remainder vector field.
VecDecomPlot(x.field = VDAll[, , 5], y.field = VDAll[, , 6], dens = c(25, 25),
x.bound = bounds.x, y.bound = bounds.y, arrow.type = "proportional",
tail.length = 0.35, head.length = 0.025)
var.eqn.x <- "- (y - beta) + mu * (x - alpha) * (1 - (x - alpha)^2 - (y - beta)^2)"
var.eqn.y <- "(x - alpha) + mu * (y - beta) * (1 - (x - alpha)^2 - (y - beta)^2)"
model.state <- c(x = 3, y = 3)
model.parms <- c(alpha = 4, beta = 5, mu = 0.2)
model.sigma <- 0.1
model.time <- 1000 # we used 2500 in the figures
model.deltat <- 0.005
ts.ex2 <- TSTraj(y0 = model.state, time = model.time, deltat = model.deltat,
x.rhs = var.eqn.x, y.rhs = var.eqn.y, parms = model.parms, sigma = model.sigma)
TSPlot(ts.ex2, deltat = model.deltat) # Figure 8
TSPlot(ts.ex2, deltat = model.deltat, dim = 2, line.alpha = 25) # Figure 9a
TSDensity(ts.ex2, dim = 1) # Histogram
TSDensity(ts.ex2, dim = 2) # Figure 9b
eqn.x <- Model2String(var.eqn.x, parms = model.parms)
eqn.y <- Model2String(var.eqn.y, parms = model.parms)
eq1.qp <- QPotential(x.rhs = eqn.x, x.start = 4.15611, x.bound = c(-0.5, 7.5),
x.num.steps = 4000, y.rhs = eqn.y, y.start = 5.98774, y.bound = c(-0.5, 7.5),
y.num.steps = 4000)
Φ
QPContour(eq1.qp, dens = c(1000, 1000), x.bound = c(-0.5, 7.5),
y.bound = c(-0.5, 7.5), c.parm = 10)
(
)
(
)
var.eqn.x <- "x * ((1 + alpha1) - (x^2) - x * y - (y^2))"
var.eqn.y <- "y * ((1 + alpha2) - (x^2) - x * y - (y^2))"
model.state <- c(x = 0.5, y = 0.5)
model.parms <- c(alpha1 = 1.25, alpha2 = 2)
model.sigma <- 0.8
model.time <- 5000
model.deltat <- 0.01
ts.ex3 <- TSTraj(y0 = model.state, time = model.time, deltat = model.deltat,
x.rhs = var.eqn.x, y.rhs = var.eqn.y, parms = model.parms, sigma = model.sigma)
TSPlot(ts.ex3, deltat = model.deltat) # Figure 12
TSPlot(ts.ex3, deltat = model.deltat, dim = 2 , line.alpha = 25) # Figure 13a
TSDensity(ts.ex3, dim = 1) # Histogram of time series
TSDensity(ts.ex3, dim = 2 , contour.levels = 20 , contour.lwd = 0.1) # Figure 13b
equation.x <- Model2String(var.eqn.x, parms = model.parms)
equation.y <- Model2String(var.eqn.y, parms = model.parms)
bounds.x <- c(-3, 3); bounds.y <- c(-3, 3)
step.number.x <- 6000; step.number.y <- 6000
eq1.x <- 0; eq1.y <- -1.73205
eq2.x <- 0; eq2.y <- 1.73205
eq1.local <- QPotential(x.rhs = equation.x, x.start = eq1.x, x.bound = bounds.x,
x.num.steps = step.number.x, y.rhs = equation.y, y.start = eq1.y,
y.bound = bounds.y, y.num.steps = step.number.y)
eq2.local <- QPotential(x.rhs = equation.x, x.start = eq2.x, x.bound = bounds.x,
x.num.steps = step.number.x, y.rhs = equation.y, y.start = eq2.y,
y.bound = bounds.y, y.num.steps = step.number.y)
(
)
(
)
Φ
(
Φ
)
(
Φ
)
= Φ
= Φ
ex3.global <- QPGlobal(local.surfaces = list(eq1.local, eq2.local),
unstable.eq.x = c(0, -1.5, 1.5), unstable.eq.y = c(0, 0, 0), x.bound = bounds.x,
y.bound = bounds.y)
QPContour(ex3.global, dens = c(1000, 1000), x.bound = bounds.x, y.bound = bounds.y,
c.parm = 5)
(
)
(
)
(
)
(
)
(
)
(
)
|
545460b8c36cb5a7ddb743d8308cb4879c136e81
|
96b20aa9bc050c778a2814de604a0c6478f83030
|
/R/crii.R
|
cfd3359d7fbe82392bd84700cf8dae3029d0ea8a
|
[] |
no_license
|
IvanRicarte/Evidasp
|
83723b64fcada4dc78e08130224c0a982038947c
|
2b5fbd6864d5379aed92d1649bf2219b7cea1ece
|
refs/heads/master
| 2020-06-04T11:57:44.416797
| 2015-04-13T19:22:03
| 2015-04-13T19:22:03
| 30,421,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 742
|
r
|
crii.R
|
crii <- function(relev) {
frem <- levels(as.factor(relev$Resumo))
rescrii <- data.frame(resumo=frem, N=0, tot=0, par=0, not=0, CRII=0)
for (res in frem) {
comres <- subset(relev, Resumo==res)
totrel <- sum(comres$Relevancia==2)
parrel <- sum(comres$Relevancia==1)
notrel <- sum(comres$Relevancia==0)
total <- totrel+parrel+notrel
crii <- if (totrel+parrel > 0) {
(2 * totrel * (totrel+parrel))/(total*(2*totrel + parrel))
}
else 0
row <- which(rescrii$resumo==res)
rescrii[row,2] <- total
rescrii[row,3] <- totrel
rescrii[row,4] <- parrel
rescrii[row,5] <- notrel
rescrii[row,6] <- crii
}
rescrii
}
|
6864f6824fd97b8ef38bf2eec7f29ab623d91bcc
|
bf7836dca436a6cb1a5c4e3df5781914dff087ad
|
/scATAC/Sankey_Diagram.R
|
5b1e75ba9a8a04704e2d2dbbbd6e0beada05c78a
|
[] |
no_license
|
zang-lab/Single-cell-chromatin-profiling-of-the-primitive-gut-tube
|
690e910c96e630d34cd2c580179d7caa176e052d
|
fea12666396c0f3adaaf2b7c42974bb97f8df843
|
refs/heads/main
| 2023-03-31T01:52:44.850155
| 2021-04-01T17:30:27
| 2021-04-01T17:30:27
| 366,777,354
| 3
| 0
| null | 2021-05-12T16:15:37
| 2021-05-12T16:15:36
| null |
UTF-8
|
R
| false
| false
| 2,474
|
r
|
Sankey_Diagram.R
|
library(ArchR)
library(tibble)
library(dplyr)
library(tidyr)
library(networkD3)
library(htmlwidgets)
set.seed(1)
proj <- loadArchRProject(path = "HemeTutorial")
Cdata <- getCellColData(ArchRProj = proj, select = NULL, drop = FALSE)
label1 <- Cdata$superClusters
label2 <- as.character(Cdata$predictedGroup_un_E95)
label2[which(Cdata$superClusters=="colon" | Cdata$superClusters=="unidentified")] <- "colon&unidentified"
label2[which(label2=="pharynx")] <- "pharynx*"
names(label1) <- rownames(Cdata)
names(label2) <- rownames(Cdata)
#exclude colon and unidentified
celltypes1 <- c("pharynx","esophagus","lung","stomach","liver","pancreas","intestine")
celltypes2 <- c("pharynx*","esophagus-2","esophagus-1","respiratory-lung","respiratory-trachea","anaterior stomach","posterior stomach","liver-hepatocyte","hepatoblast","dorsal pancreas","duodenum")
links <- data.frame(source=0,target=0,value=0)
for(i in celltypes1){
for(j in celltypes2){
strength <- length(intersect(which(label1==i),which(label2==j)))
if(strength==0){
next
}else{
links <- rbind(links,c(i,j,strength))
}
}
}
links <- links[-1,]
nodes <- data.frame(name=c(celltypes1,celltypes2))
nodes$target=c(rep(TRUE,7),rep(FALSE,11))
links$IDsource <- match(links$source, nodes$name)-1
links$IDtarget <- match(links$target, nodes$name)-1
pal=c("#E6C2DC","#F47D2B","#89C75F","#C06CAB","#90D5E4","#FEE500","#D51F26","#E6C2DC","#F47D2B","#D8A767","#89C75F","#208A42","#89288F","#C06CAB","#90D5E4","#8A9FD1","#FEE500","#D51F26")
my_color = 'd3.scaleOrdinal().domain([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]).range(["#E6C2DC","#F47D2B","#89C75F","#C06CAB","#90D5E4","#FEE500","#D51F26","#E6C2DC","#F47D2B","#D8A767","#89C75F","#208A42","#89288F","#C06CAB","#90D5E4","#8A9FD1","#FEE500","#D51F26"])'
# Make the Network
library(networkD3)
p <- sankeyNetwork(Links = links, Nodes = nodes,
Source = "IDsource", Target = "IDtarget",
Value = "value", NodeID = "name", iterations = 0,
sinksRight=FALSE, colourScale = my_color, fontSize=14, nodeWidth = 15, height = 800, width = 800)
p$x$nodes$target <- nodes$target
p <- onRender(p,
'
function(el) {
d3.select(el)
.selectAll(".node text")
.filter(d => d.target)
.attr("x", -6)
.attr("text-anchor", "end");
} '
)
saveRDS(p,"Labels_sankey_diagram.rds")
|
755f1d3dcf1e2710b1a8b0e2b60a318f4122f4f6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RHPCBenchmark/examples/SparseQrMicrobenchmark.Rd.R
|
2f26b5b0f0bc99edb367db810791b0ab5d50879c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 607
|
r
|
SparseQrMicrobenchmark.Rd.R
|
library(RHPCBenchmark)
### Name: SparseQrMicrobenchmark
### Title: Conducts a single performance trial with the QR factorization
### sparse matrix kernel
### Aliases: SparseQrMicrobenchmark
### ** Examples
## Not run:
##D # Allocate input to the QR factorization microbenchmark for the
##D # Maragal_6 matrix
##D microbenchmarks <- GetSparseQrDefaultMicrobenchmarks()
##D kernelParameters <- SparseQrAllocator(microbenchmarks[["qr_Maragal_6"]], 1)
##D # Execute the microbenchmark
##D timings <- SparseQrMicrobenchmark(
##D microbenchmarks[["qr_Maragal_6"]], kernelParameters)
## End(Not run)
|
025a99dfd37fe9aff38996faaae9a26cc86a38f2
|
88ec4702de41df1fbdcd0c38cf06f8fb6461589c
|
/graphics.R
|
29422416791ce32195037f10fcfe82fcbcb5a2e7
|
[] |
no_license
|
gwb/parallelMCMC
|
f14310ff93c8bd1f7d6e3ec8b54aa0fcd1c496b6
|
211f8583af40cf7886fa821283f1f5fb9f6bc721
|
refs/heads/master
| 2021-03-16T06:53:38.382346
| 2015-05-23T20:46:28
| 2015-05-23T20:46:28
| 20,383,560
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,158
|
r
|
graphics.R
|
require(ggplot2)
# # # # # # #
# FUNCTIONS #
# # # # # # #
get.numerical.intervals <- function(intervals){
res <- NULL
for(interval in intervals){
res <- rbind(res, as.numeric(strsplit(substr(interval, 2, nchar(interval) - 1), ',')[[1]]))
}
return(res)
}
add.rects <- function(clust.ints, my.colors){
lim <- par('usr')
for(i in seq(nrow(clust.ints))){
rect(clust.ints[i,1], lim[3]-1, clust.ints[i,2], lim[4]+1, col=my.colors[clust.ints[i,3]], border=NA)
}
}
# color scheme with transparency
my.cols <- c(rgb(1,0,0,alpha=0.2), rgb(0,1,0, alpha=0.2), rgb(0,0,1, alpha=0.2))
# plot partition (for 2D data)
get.partition.colors <- function(xs, ys, cluster.fn){
res <- expand.grid(xs, ys)
res <- cbind(res, apply(res, 1, cluster.fn))
names(res) <- NULL
return(res)
}
# plots the contours of a bivariate density
plot.bivariate.density <- function(fn, x1range=c(-10,10), x2range=c(-10,10), bins.per.dim=100){
x1 <- seq(x1range[1], x1range[2], length.out = bins.per.dim)
x2 <- seq(x2range[1], x2range[2], length.out = bins.per.dim)
z2 <- matrix(0, nrow=bins.per.dim, ncol=bins.per.dim)
for(i in seq(1,bins.per.dim)){
for(j in seq(1,bins.per.dim)){
z2[i,j] <- fn(c(x1[i], x2[j]))
}
}
contour(list(x=x1,y=x2,z=z2), col='red',add=F)
}
plot.cluster.and.target <- function(cluster.fn, rtarget, x1range=c(-10,10), x2range=c(-10,10), target.draws=10000){
pcolors <- get.partition.colors(seq(x1range[1],x1range[2]), seq(x2range[1], x2range[2]), cluster.fn)
dt <- data.frame(pcolors)
names(dt) <- c('x','y','z')
dt2 <- data.frame(rtarget(target.draws))
names(dt2) <- c('x','y')
g <- ggplot(data=dt2, aes(x,y)) + stat_density2d() + geom_tile(data=dt, aes(x,y,fill=factor(z), alpha=0.2))
return(g)
}
.plot.vhd.clusters <- function(dt, cluster.fn, projmat.ls, centers, nproj=5){
clusters <- apply(dt, 1, cluster.fn)
projdt.ls <- vector('list',nproj)
for( i in seq(nproj)){
projdt.ls[[i]] <- t(projmat.ls[[i]] %*% t(dt))
}
if(!is.null(centers)){
proj.centers.ls <- NULL
for(i in seq(nproj)){
proj.centers.ls[[i]] <- t(projmat.ls[[i]] %*% t(centers))
}
}
N <- nrow(dt)
projdt <- do.call('rbind', projdt.ls)
projdt <- cbind(projdt, rep(clusters, nproj))
projdt <- cbind(projdt, rep(seq(nproj),each=nrow(dt)))
projdt <- data.frame("x"=projdt[,1],
"y"=projdt[,2],
"clust"=projdt[,3],
"proj"=projdt[,4])
if(!is.null(centers)){
centers.clusters <- apply(centers, 1, cluster.fn)
projcenters <- do.call('rbind', proj.centers.ls)
projcenters <- cbind(projcenters, rep(centers.clusters,nproj))
projcenters <- cbind(projcenters, rep(seq(nproj), each=nrow(centers)))
projcenters <- data.frame("x"=projcenters[,1],
"y"=projcenters[,2],
"clust"=projcenters[,3],
"proj"=projcenters[,4])
}
#ggplot(data=projdt, aes(x,y)) + geom_point(aes(color=factor(clust))) + facet_wrap(.~proj)
if(is.null(centers)){
return(list(projdt,projmat.ls))
} else {
return(list(projdt, projmat.ls, projcenters))
}
}
plot.multiple.vhd.clusters <- function(dt, cluster.fn.ls, centers.ls=NULL, nproj=5){
dt.dim <- ncol(dt)
projmat.ls <- vector('list',nproj)
for( i in seq(nproj)){
projmat.ls[[i]] <- t(qr.Q(qr(matrix(rnorm(2 * dt.dim), nrow=dt.dim, ncol=2))))
}
Nclustfn <- length(cluster.fn.ls)
projdt.ls <- vector('list', Nclustfn)
if(!is.null(centers.ls)){
centersdt.ls <- vector('list', Nclustfn)
for(i in seq(Nclustfn)){
res.dt <- .plot.vhd.clusters(dt, cluster.fn.ls[[i]], projmat.ls, centers.ls[[i]], nproj)
projdt.ls[[i]] <- res.dt[[1]]
centersdt.ls[[i]] <- res.dt[[3]]
projdt.ls[[i]]$ind <- i
centersdt.ls[[i]]$ind <- i
}
} else {
for(i in seq(Nclustfn)){
res.dt <- .plot.vhd.clusters(dt, cluster.fn.ls[[i]], projmat.ls, nproj= nproj)
projdt.ls[[i]] <- res.dt[[1]]
projdt.ls[[i]]$ind <- i
}
}
projdt <- do.call('rbind', projdt.ls)
centersdt <- do.call('rbind', centersdt.ls)
return(list(projdt, centersdt))
}
# projects the data on many different 2D subspaces
# in the hope to show different features
plot.vhd.clusters <- function(dt, cluster.fn = NULL, clust=NULL, centers=NULL){
if(is.null(clust)){
clusters <- apply(dt, 1, cluster.fn)
} else {
clusters <- clust
}
nproj <- 5
dt.dim <- ncol(dt)
projmat.ls <- vector('list',nproj)
for( i in seq(nproj)){
projmat.ls[[i]] <- t(qr.Q(qr(matrix(rnorm(2 * dt.dim), nrow=dt.dim, ncol=2))))
}
projdt.ls <- vector('list',nproj)
for( i in seq(nproj)){
projdt.ls[[i]] <- t(projmat.ls[[i]] %*% t(dt))
}
if(!is.null(centers)){
proj.centers.ls <- NULL
for(i in seq(nproj)){
proj.centers.ls[[i]] <- t(projmat.ls[[i]] %*% t(centers))
}
}
N <- nrow(dt)
projdt <- do.call('rbind', projdt.ls)
projdt <- cbind(projdt, rep(clusters, nproj))
projdt <- cbind(projdt, rep(seq(nproj),each=nrow(dt)))
projdt <- data.frame("x"=projdt[,1],
"y"=projdt[,2],
"clust"=projdt[,3],
"proj"=projdt[,4])
if(!is.null(centers)){
centers.clusters <- apply(centers, 1, cluster.fn)
projcenters <- do.call('rbind', proj.centers.ls)
projcenters <- cbind(projcenters, rep(centers.clusters,nproj))
projcenters <- cbind(projcenters, rep(seq(nproj), each=nrow(centers)))
projcenters <- data.frame("x"=projcenters[,1],
"y"=projcenters[,2],
"clust"=projcenters[,3],
"proj"=projcenters[,4])
}
#ggplot(data=projdt, aes(x,y)) + geom_point(aes(color=factor(clust))) + facet_wrap(.~proj)
if(is.null(centers)){
return(list(projdt,projmat.ls))
} else {
return(list(projdt, projmat.ls, projcenters))
}
}
plot.vhd.no.clusters <- function(dt, centers=NULL, nproj=5){
#nproj <- 5
dt.dim <- ncol(dt)
projmat.ls <- vector('list',nproj)
for( i in seq(nproj)){
projmat.ls[[i]] <- t(qr.Q(qr(matrix(rnorm(2 * dt.dim), nrow=dt.dim, ncol=2))))
}
projdt.ls <- vector('list',nproj)
for( i in seq(nproj)){
projdt.ls[[i]] <- t(projmat.ls[[i]] %*% t(dt))
}
if(!is.null(centers)){
proj.centers.ls <- vector('list',nproj)
for(i in seq(nproj)){
proj.centers.ls[[i]] <- t(projmat.ls[[i]] %*% t(centers))
}
}
N <- nrow(dt)
projdt <- do.call('rbind', projdt.ls)
projdt <- cbind(projdt, rep(seq(nproj),each=nrow(dt)))
projdt <- data.frame("x"=projdt[,1],
"y"=projdt[,2],
"proj"=projdt[,3])
if(!is.null(centers)){
projcenters <- do.call('rbind', proj.centers.ls)
projcenters <- cbind(projcenters, rep(seq(nproj), each=nrow(centers)))
projcenters <- data.frame("x"=projcenters[,1],
"y"=projcenters[,2],
"proj"=projcenters[,3])
}
#ggplot(data=projdt, aes(x,y)) + geom_point(aes(color=factor(clust))) + facet_wrap(.~proj)
if(is.null(centers)){
return(list(projdt,projmat.ls))
} else {
return(list(projdt, projmat.ls, projcenters))
}
}
plot.vhd.simple <- function(dt, clust, nproj=5){
dt.dim <- ncol(dt)
projmat.ls <- vector('list',nproj)
for( i in seq(nproj)){
projmat.ls[[i]] <- t(qr.Q(qr(matrix(rnorm(2 * dt.dim), nrow=dt.dim, ncol=2))))
}
projdt.ls <- vector('list',nproj)
for( i in seq(nproj)){
projdt.ls[[i]] <- t(projmat.ls[[i]] %*% t(dt))
}
N <- nrow(dt)
projdt <- do.call('rbind', projdt.ls)
projdt <- cbind(projdt, rep(clust, nproj))
projdt <- cbind(projdt, rep(seq(nproj),each=nrow(dt)))
projdt <- data.frame("x"=projdt[,1],
"y"=projdt[,2],
"clust"=projdt[,3],
"proj"=projdt[,4])
return(list(projdt, projdt.ls, projmat.ls))
}
plot.dt.and.bridge <- function(dt1, dt2, nproj=5){
dt.dim <- ncol(dt1)
projmat.ls <- vector('list', nproj)
for( i in seq(nproj)){
projmat.ls[[i]] <- t(qr.Q(qr(matrix(rnorm(2 * dt.dim), nrow=dt.dim, ncol=2))))
}
projdt.ls.1 <- vector('list',nproj)
projdt.ls.2 <- vector('list',nproj)
for( i in seq(nproj)){
projdt.ls.1[[i]] <- t(projmat.ls[[i]] %*% t(dt1))
projdt.ls.2[[i]] <- t(projmat.ls[[i]] %*% t(dt2))
}
projdt.1 <- do.call('rbind', projdt.ls.1)
projdt.1 <- cbind(projdt.1, rep(seq(nproj), each=nrow(dt1)))
projdt.1 <- cbind(projdt.1, rep(1, nrow(dt1)))
projdt.2 <- do.call('rbind', projdt.ls.2)
projdt.2 <- cbind(projdt.2, rep(seq(nproj), each=nrow(dt2)))
projdt.2 <- cbind(projdt.2, rep(2, nrow(dt2)))
projdt <- rbind(projdt.1, projdt.2)
projdt <- data.frame("x"=projdt[,1],
"y"=projdt[,2],
"proj"=projdt[,3],
"dt"=projdt[,4])
return(projdt)
}
# # # # # # #
# EXAMPLES #
# # # # # # #
# The examples below show how to plot some quantities of interest
# -> the target
# curve(dtarget, from=min.val, to = max.val, n=5000)
# -> kernel density estimation, from the draws
# plot(density(all.res[,1], n=2000, bw="bcv"))
# ->superimposing the target and the histogram
# curve(dtarget, from=min.val, to = max.val, n=5000, add=FALSE, col="blue")
# hist(all.res[,1], breaks=1000, col="red", freq=FALSE, add=TRUE)
# curve(dtarget, from=min.val, to = max.val, n=5000, add=TRUE, col="blue", lwd=2)
# -> progressively adding histogram (that's pretty cool)
# (proof of concept for live monitoring of mcmc)
#
# rand.res = sample(all.res[,1], length(all.draws))
# for(i in seq(1, 50000, by=100)){
# curve(dtarget, from=0, to = 10, n=5000, add=FALSE, col="blue")
# hist(c(rand.res[seq(1, i)], rep(10,50000-i)), breaks=1000, col="red", freq=FALSE, add=TRUE)
# S ys.sleep(0.5)
# }
#
# cfn <- proj.clustering(X, 1, 2)
# bli <- get.partition.colors(seq(-1, 5), seq(-1,5), cfn)
# dt <- data.frame(bli)
# names(dt) <- c('x','y','z')
# qplot(x,y,fill=factor(z), data=dt, geom="tile")
#
#
#
#
#
#
|
5c68e0b0cf412ff850842ea79d79529c29d4df0f
|
a4329f3a3ef994cc900b0a0037a7f736aa441707
|
/R_code/states.R
|
5d30d60f4c34d25fedf7bf10bfb7f90a5b02147f
|
[] |
no_license
|
aakashc/R-Project-Crime
|
f55d588ab7d18d5621dd9d3cba62113c88327121
|
4c03c4f7556822c129d0664294763d4d60323974
|
refs/heads/master
| 2021-01-25T10:14:18.796292
| 2014-02-27T17:31:29
| 2014-02-27T17:31:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,053
|
r
|
states.R
|
rm(list=ls()) # to remove all objects and clean the workspace
# Read Data
raw <- read.table('data/crimeAgainstWomen.csv', sep=',', check.names=F,header=T,fill=TRUE)
names(raw) <- c('state', 'crimeType', seq(2001,2012,1))
# Remove columns
clean <- raw[,c(1,2,8:14)]
# Remove Rows
clean <- subset(clean,!clean$state=='A & N ISLANDS')
clean <- subset(clean,!clean$state=='D & N HAVELI')
clean <- subset(clean,!clean$state=='PUDUCHERRY')
clean <- subset(clean,!clean$state=='LAKSHADWEEP')
clean <- subset(clean,!clean$state=='DAMAN & DIU')
clean <- subset(clean,!clean$state=='CHANDIGARH')
clean <- subset(clean,!clean$crimeType=='COMMISSION OF SATI PREVENTION ACT')
clean <- subset(clean,!clean$crimeType=='INDECENT REPRESENTATION OF WOMEN (P) ACT')
clean <- subset(clean,!clean$crimeType=='IMPORTATION OF GIRLS FROM FOREIGN COUNTRY')
row.names(clean) <- NULL
# convert to wide format
library(reshape2)
long <- melt(clean, id.vars = c("state", "crimeType"))
wide <- dcast(long, variable+state ~ crimeType)
wide <- wide[,c(2,10:3,1)]
names(wide)[2:10] <- c('rape','kidnapping','insult','immoral','dowry','dowry_deaths','cruelty','assult','year')
names(wide)
str(wide)
wide$rape <- as.numeric(wide$rape)
wide$kidnapping <- as.numeric(wide$kidnapping)
wide$insult <- as.numeric(wide$insult)
wide$immoral <- as.numeric(wide$immoral)
wide$dowry <- as.numeric(wide$dowry)
wide$dowry_deaths <- as.numeric(wide$dowry_deaths)
wide$cruelty <- as.numeric(wide$cruelty)
wide$assult <- as.numeric(wide$assult)
wide$year <- as.numeric(as.character(wide$year))
wide$state <- gsub('ODISHA', 'Orissa', wide$state)
#Function to convert Upper case to Lower
simpleCap <- function(x) { s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2), sep="", collapse=" ")}
library(plyr)
wide$state <- tolower(wide$state)
wide$state <- sapply(wide$state, simpleCap)
# Save RDA file for 2012 - to be used for Shiny App
dfs <- subset(wide, year==2012)
row.names(dfs) <- NULL
dfs <- dfs[,-10]
save(dfs, file='shiny/dfs.rda', compress=F, precheck=F)
# Calculate Total Crimes
TotalCrimes <- rowSums((wide[,2:9]))
wide$TotalCrimes <- TotalCrimes
wide <- wide[,c(1:9,11,10)]
# Calcluate Total Crimes for each year
TOTAL <- ddply(wide, .(year), function(x) colSums(x[c(2:10)]))
merge <- rbind.fill(wide,TOTAL)
merge$state <- replace(merge$state, is.na(merge$state), "TOTAL")
longNew <- melt(merge, id.vars = c("year", "state"))
names(longNew)[3:4] <- c('crimeType', 'cases')
###########################33
# Calculate %change of crimes for each state and each year
good<-function(x){((x - x[1])/x[1])*100}
pc06_07 <- ddply(subset(merge, year==2006 | year==2007,c(1,10:11)), .(state), transform, perchange= round(good(TotalCrimes),2))
pc07_08 <- ddply(subset(merge, year==2007 | year==2008,c(1,10:11)), .(state), transform, perchange= round(good(TotalCrimes),2))
pc08_09 <- ddply(subset(merge, year==2008 | year==2009,c(1,10:11)), .(state), transform, perchange= round(good(TotalCrimes),2))
pc09_10 <- ddply(subset(merge, year==2009 | year==2010,c(1,10:11)), .(state), transform, perchange= round(good(TotalCrimes),2))
pc10_11 <- ddply(subset(merge, year==2010 | year==2011,c(1,10:11)), .(state), transform, perchange= round(good(TotalCrimes),2))
pc11_12 <- ddply(subset(merge, year==2011 | year==2012,c(1,10:11)), .(state), transform, perchange= round(good(TotalCrimes),2))
x <- cbind(pc06_07,pc07_08,pc08_09,pc09_10,pc10_11,pc11_12)
y <- x[,c(1,3,4,7,8,11,12,15,16,19,20,23,24)]
z <- subset(y, year.5=='2012')
row.names(z) <- NULL
zz <- z[,c(1,3,5,7,9,11,13)]
names(zz)[2:7] <- c(2007:2012)
# Chart for % change in crimes for each state
m <- melt(zz, id.vars='state')
m$state <- gsub('Pradesh', '', m$state)
m$state <- gsub('Jammu & Kashmir', 'J&K', m$state)
m$state <- gsub('West Bengal', 'WB', m$state)
m$state <- gsub('Tamil Nadu', 'TN', m$state)
m$state <- gsub('Tamil Nadu', 'TN', m$state)
m$state <- gsub('Chhattisgarh', 'Chgarh', m$state)
library(rCharts)
p <- nPlot(value ~ state, group = "variable", data =m, type = "multiBarChart", height=500,width=1200)
p$chart(stacked=TRUE)
p$chart(reduceXTicks = FALSE)
p$xAxis(staggerLabels = TRUE)
#p$xAxis(rotateLabels=-90)
p$save('graph/crimePct.html', cdn = TRUE)
zzz <- melt(zz, id.vars='state')
zzzz<- ddply(melt(zz, id.vars='state'), .(variable), summarise, maxChange = max(value),state=state[which.max(value)])
zzzzz <- ddply(melt(zz, id.vars='state'), .(variable), summarise, minChange = min(value),state=state[which.min(value)])
# States with Maximum Crime Increase in percentage
library(rCharts)
sPlot1 <- nPlot(x = "state", y = "maxChange", group = "variable", data = zzzz, type = "multiBarChart")
sPlot1$save("graph/sPlot1.html",cdn=T)
# States with Maximum Crime Decrease in percentage
sPlot2 <- nPlot(x = "state", y = "minChange", group = "variable", data = zzzzz, type = "multiBarChart")
sPlot2$save("graph/sPlot2.html",cdn=T)
#########################
# Convert to Percentage for all the crimes
rapePct <- round((merge$rape/merge$TotalCrimes)*100,1)
kidnappingPct <- round((merge$kidnapping/merge$TotalCrimes)*100,1)
insultPct <- round((merge$insult/merge$TotalCrimes)*100,1)
immoralPct <- round((merge$immoral/merge$TotalCrimes)*100,1)
dowryPct <- round((merge$dowry/merge$TotalCrimes)*100,1)
dowry_deathsPct <- round((merge$dowry_deaths/merge$TotalCrimes)*100,1)
crueltyPct <- round((merge$cruelty/merge$TotalCrimes)*100,1)
assultPct <- round((merge$assult/merge$TotalCrimes)*100,1)
TotalCrimesPct <- round((merge$TotalCrimes/merge$TotalCrimes)*100,1)
dfPct <- data.frame(rapePct,kidnappingPct,insultPct,immoralPct,dowryPct,dowry_deathsPct,crueltyPct,assultPct,TotalCrimesPct)
# Merging Datasets:
mergedData <- cbind(merge, dfPct)
dfOnlyPct <- mergedData[,c(1,11,12:20)]
dfOnlyCnt <- mergedData[,c(1:11)]
meltPct <- melt(dfOnlyPct, id.vars = c("state", "year"))
meltCnt <- melt(dfOnlyCnt, id.vars = c("state", "year"))
meltCom <- cbind(meltCnt, meltPct$value)
names(meltCom) <- c('state', 'year', 'crimeType', 'count', 'percent')
meltCom$state <- as.factor(meltCom$state)
##############################################
# ArePlot for each crime from year 2006 to year 2012
AreaTotPlot <- nPlot(count~year, group='crimeType', data=subset(meltCom, meltCom$state=="TOTAL"), type="stackedAreaChart")
AreaTotPlot$yAxis(tickFormat = "#! function(y) { return (y*100).toFixed(0)} !#")
AreaTotPlot$save("graph/AreaTotPlot.html",cdn=T)
# State Map - Crime count for each state
library(googleVis)
stateMap <- gvisGeoChart(data=subset(meltCom, meltCom$crimeType=="TotalCrimes" & meltCom$year==2012 & meltCom$state!='TOTAL'),locationvar="state", colorvar="count",options=list(title="XXX",region='IN',displayMode="region",resolution="provinces",colorAxis="{colors:['grey','yellow','green','blue','orange','red']}",width=600,height=600))
google <- print(stateMap, file="graph/google.html")
##################################################################################
# Find the Top Crime per Year
library(ggplot2)
topCrimeByYr <- ddply(subset(meltCom, meltCom$state!='TOTAL' & meltCom$crimeType!='TotalCrimes'), .(crimeType, year), summarise, maxCrime = max(count),state=state[which.max(count)])
sPlot3 <- ggplot(topCrimeByYr, aes(x = year, y = maxCrime, fill = state)) + geom_bar(stat='identity', position="dodge",width=0.4) + theme_bw() + facet_grid(.~crimeType) + theme(axis.text=element_text(size=7,face="bold"),plot.title = element_text(lineheight=.8, face="bold")) + ggtitle('Top Crime by State and Year')
sPlot3 <- sPlot3 + theme(plot.background=element_rect(fill="#FFF9AB"),panel.background=element_rect(fill='#4C2E11'),panel.grid.minor = element_blank(),panel.grid.major=element_blank())
####################################################################################
# Data Frame Containing data for the year 2012
df2012 <- subset(meltCom, year==2012 & crimeType!='TotalCrimes' & state!='TOTAL')
row.names(df2012) <- NULL
df2012 <- df2012[,c(-2,-4)]
wide2012 <- dcast(df2012, state ~ crimeType )
######################
df2012CrimeP <- subset(meltCom, year==2012 & crimeType=='TotalCrimes')
row.names(df2012CrimeP) <- NULL
df2012CrimeP <- df2012CrimeP[,c(1,4)]
p <- sapply(df2012CrimeP[,2],function(x) ((x/243690)*100))
p <- round(p,1)
df2012CrimeP$perCrime <- p
df2012CrimeP <- df2012CrimeP[-30,c(1,3)]
save(df2012CrimeP, file='data/df2012CrimeP.rda')
#################################################################
# Correlation Plot
library(scales)
cormat <- cor(dfs[c(-1)])
cm <- melt(cormat)
names(cm)=c("Var1","Var2","CorrelationCoefficient")
cplot = rPlot(Var2 ~ Var1, color = 'CorrelationCoefficient', data = cm, type = 'tile', height = 600)
cplot$addParams(height = 450, width=1000)
cplot$guides("{color: {scale: {type: gradient2, lower: 'red', middle: 'white', upper: 'blue',midpoint: 0}}}")
cplot$guides(y = list(numticks = length(unique(cm$Var1))))
cplot$guides(x = list(numticks = 8))
cplot$addParams(staggerLabels=TRUE)
cplot$save("graph/corrmatplot.html",cdn=T)
# heatmap of variables and State UTs
hmMelt <- ddply(melt(dfs),.(variable),transform,rescale=rescale(value))
names(hmMelt)=c("state","type","count","rescale")
hmap <- rPlot(state ~ type, color = 'rescale', data = hmMelt, type = 'tile')
hmap$addParams(height = 600, width=800)
hmap$guides(reduceXTicks = FALSE)
hmap$guides("{color: {scale: {type: gradient, lower: 'white', upper: 'red'}}}")
hmap$guides(y = list(numticks = length(unique(hmMelt$state))))
hmap$guides(x = list(numticks = 10))
hmap$save("graph/hmap.html",cdn=T)
# Clustering:
set.seed(200)
kmeansdata <- kmeans(dfs[c(-1)],4)
meanvars <- aggregate(dfs[c(-1)],by=list(kmeansdata$cluster),FUN=mean)
clust <- data.frame(dfs, kmeansdata$cluster)
dPlots <- dPlot(x="state", y="kmeansdata.cluster",groups="kmeansdata.cluster",data=clust,type="bar",width=500,height=800,bounds = list(x=50, y=10, width=400, height=400))
dPlots$yAxis(type="addCategoryAxis")
dPlots$xAxis(type="addCategoryAxis",orderRule="kmeansdata.cluster")
dPlots$save("graph/dPlots.html",cdn=T)
##########################################
|
39a1769b9f4016cb5c6b673453b0379f1a8aa9de
|
f1a7ab41ba3ce33c01a30e7283339c6432f9745f
|
/man/permtest.Rd
|
3834b76173184665af05493108785c04c2238bec
|
[] |
no_license
|
vishalbelsare/xnet
|
3ccba84442ebbf411fd96dc5c02dfedfc794adbf
|
4093905ae81281b6cf81c6a3425bdaf884e78fb4
|
refs/heads/main
| 2023-05-30T09:50:59.545097
| 2021-06-03T13:04:23
| 2021-06-03T13:04:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,133
|
rd
|
permtest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_generics.R, R/Class_permtest.R,
% R/permtest.R
\name{permtest}
\alias{permtest}
\alias{print.permtest}
\alias{permtest,tskrrHeterogeneous-method}
\alias{permtest,tskrrHomogeneous-method}
\alias{permtest,tskrrTune-method}
\title{Calculate the relative importance of the edges}
\usage{
permtest(x, ...)
\method{print}{permtest}(x, digits = max(3L, getOption("digits") - 3), ...)
\S4method{permtest}{tskrrHeterogeneous}(
x,
n = 100,
permutation = c("both", "row", "column"),
exclusion = c("interaction", "row", "column", "both"),
replaceby0 = FALSE,
fun = loss_mse,
exact = FALSE
)
\S4method{permtest}{tskrrHomogeneous}(
x,
n = 100,
permutation = c("both"),
exclusion = c("interaction", "both"),
replaceby0 = FALSE,
fun = loss_mse,
exact = FALSE
)
\S4method{permtest}{tskrrTune}(x, permutation = c("both", "row", "column"), n = 100)
}
\arguments{
\item{x}{either a \code{\link{tskrr-class}} or a
\code{\link{tskrrTune-class}} object}
\item{...}{arguments passed to other methods}
\item{digits}{the number of digits shown in the output}
\item{n}{the number of permutations for every kernel matrix}
\item{permutation}{a character string that defines whether the row,
column or both kernel matrices should be permuted. Ignored in case of
a homogeneous network}
\item{exclusion}{the exclusion to be used in the \code{\link{loo}} function. See also \code{\link{get_loo_fun}}}
\item{replaceby0}{a logical value indicating whether \code{\link{loo}}
removes a value in the leave-one-out procedure or replaces it by zero.
See also \code{\link{get_loo_fun}}.}
\item{fun}{a function (or a character string with the name of a
function) that calculates the loss. See also \code{\link{tune}} and
\code{\link{loss_functions}}}
\item{exact}{a logical value that indicates whether or not an
exact p-value should be calculated, or be approximated based on
a normal distribution.}
}
\value{
An object of the class permtest.
}
\description{
This function does a permutation-based evaluation of the impact of
different edges on the final result. It does so by permuting the kernel
matrices, refitting the model and calculating a loss function.
}
\details{
The test involved uses a normal approximation. It assumes that under the
null hypothesis, the loss values are approximately normally distributed.
The cumulative probability of a loss as small or smaller than
the one found in the original model, is calculated based on a normal
distribution from which the mean and sd are calculated from the permutations.
}
\section{Warning}{
It should be noted that this normal approximation is an ad-hoc approach.
There's no guarantee that the actual distribution of the loss under the
null hypothesis is normal. Depending on the loss function, a significant
deviation from the theoretic distribution can exist. Hence this functions should only
be used as a rough guidance in model evaluation.
}
\examples{
# Heterogeneous network
data(drugtarget)
mod <- tskrr(drugTargetInteraction, targetSim, drugSim)
permtest(mod, fun = loss_auc)
}
|
c7c4cc5948a4e18510cb4f65f6838d5a3870388c
|
886af47cad8dcd3920b556c7a0fe11265fc70648
|
/cachematrix.R
|
ab76c4aaf9d2d0a1607ad2a197b2ee3f3f0c976c
|
[] |
no_license
|
aflindst/ProgrammingAssignment2
|
48e0d1f781594c48e0e3c3645127610b08bb7402
|
b7bdbb9ca9e3f09d65ba7b81ca329e51211ae3b4
|
refs/heads/master
| 2021-01-16T20:59:16.357164
| 2014-07-27T21:51:01
| 2014-07-27T21:51:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,678
|
r
|
cachematrix.R
|
## Programming assignment 2
## Adam Lindstrom
## These function construct and modify a variant of the matrix object which is able to cache its own matrix inverse.
## Use them for large matrices with long solve() times.
## makeCacheMatrix() takes a matrix as its only argument and returns a CacheMatrix object
## the CachMatrix object has two locally defined variables:
## x, the matrix
## i, the cached inverse of x
## the CacheMatrix object is a list of 4 functions (methods):
## get() returns the value of x
## set(y) assigns the new value y to x, then reinitializes i
## getinv() returns the value of i
## set(inv) assigns a new value inv to i
## Example:
## test<-makeCacheMatrix(matrix(1:4, c(2,2)))
## test$get()
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inv) i <<- inv
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve(CacheMatrix) returns the inverse of a CacheMatrix.
## cacheSolve() uses the getinv() and setinv() methods of a CacheMatrix to:
## 1. check if the CachMatrix has a valid value from getinv(). If so, return the cached value.
## 2. If not, solve(), use setinv to cache the result of solve(), then return it.
## Example:
## test<-makeCacheMatrix(matrix(1:4, c(2,2)))
## cacheSolve(test)
## cacheSolve(test)
cacheSolve <- function(x, ...) {
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
message("computing inverse for the first time")
i <- solve(data, ...)
x$setinv(i)
i
}
|
4b47d60f6023368aa31770682b244151c35ad18a
|
c864a03e8a5a9b86ab74bdc97912b53febbac09c
|
/man/plotContrastPHists.Rd
|
47bde582e2e687682e87124ba028c2157faabfd0
|
[] |
no_license
|
ComputationalProteomics/NormalyzerDE
|
95bcbc18e77b07c855355a06b9bf062c6783e5f4
|
6bc9f977820cca02e02e77c502581c48854e7c9f
|
refs/heads/master
| 2022-05-11T23:49:16.855983
| 2022-04-22T09:34:47
| 2022-04-22T09:34:47
| 127,118,099
| 14
| 5
| null | 2023-09-11T09:38:38
| 2018-03-28T09:38:27
|
R
|
UTF-8
|
R
| false
| true
| 684
|
rd
|
plotContrastPHists.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculateStatistics.R
\name{plotContrastPHists}
\alias{plotContrastPHists}
\title{Takes an NormalyzerStatistics instance and generates and prints a p-value
histogram for each onto the viewport}
\usage{
plotContrastPHists(nst, jobName, currentLayout, pageno)
}
\arguments{
\item{nst}{NormalyzerDE statistics object.}
\item{jobName}{Name of processing run.}
\item{currentLayout}{Layout used for document.}
\item{pageno}{Current page number.}
}
\value{
None
}
\description{
Takes an NormalyzerStatistics instance and generates and prints a p-value
histogram for each onto the viewport
}
\keyword{internal}
|
a7d51bacfa41206f4a47d94de5cf3cc41ed10e33
|
bfc4dde4eac32663f768e75edde091d20676308f
|
/man/summary.mitml.Rd
|
a1cb711e91c0ba10793ebab65400a29a23d126b6
|
[] |
no_license
|
simongrund1/mitml
|
f33b4fda8e929a8652146ca7bcd8011d34f9ebc6
|
4f1e20daccf45da1ee157b3e2e78d7b250fd8203
|
refs/heads/master
| 2023-03-21T09:20:56.636023
| 2023-03-10T15:42:30
| 2023-03-10T15:42:30
| 68,100,636
| 28
| 9
| null | 2021-10-05T09:25:07
| 2016-09-13T11:01:02
|
R
|
UTF-8
|
R
| false
| false
| 2,956
|
rd
|
summary.mitml.Rd
|
\name{summary.mitml}
\alias{summary.mitml}
\title{Summary measures for imputation models}
\description{
Provides summary statistics and additional information on imputations in objects of class \code{mitml}.
}
\usage{
\method{summary}{mitml}(object, n.Rhat = 3, goodness.of.appr = FALSE, autocorrelation = FALSE, ...)
}
\arguments{
\item{object}{An object of class \code{mitml} as produced by \code{panImpute} or \code{jomoImpute}.}
\item{n.Rhat}{(optional) An integer denoting the number of segments used for calculating the potential scale reduction factor. Default is \code{3}.}
\item{goodness.of.appr}{(optional) A logical flag indicating if the goodness of approximation should be printed. Default is \code{FALSE} (see 'Details').}
\item{autocorrelation}{(optional) A logical flag indicating if the autocorrelation should be printed. Default is \code{FALSE} (see 'Details').}
\item{\dots}{Not used.}
}
\details{
The \code{summary} method calculates summary statistics for objects of class \code{mitml} as produced by \code{\link{panImpute}} or \code{\link{jomoImpute}}.
The output includes the potential scale reduction factor (PSRF, or \eqn{\hat{R}}) and (optionally) the goodness of approximation and autocorrelation.
The PSRF is calculated for each parameter of the imputation model and can be used as a convergence diagnostic (Gelman and Rubin, 1992).
Calculation of the PSRFs can be suppressed by setting \code{n.Rhat = NULL}.
The PSRFs are not computed from different chains but by dividing each chain from the imputation phase into a number of segments as denoted by \code{n.Rhat}.
This is slightly different from the original method proposed by Gelman and Rubin.
The goodness of approximation measure indicates what proportion of the posterior standard deviation is due to simulation error.
This is useful for assessing the accuracy of the posterior summaries (e.g., the EAP).
The autocorrelation includes estimates of the autocorrelation in the chains at lag 1 (i.e., for consecutive draws) and for lags \eqn{k} and \eqn{2k}, where \eqn{k} is the number of iterations between imputations.
For lag \eqn{k} and \eqn{2k}, the autocorrelation is slightly smoothed to reduce the influence of noise on the estimates (see \code{\link{plot.mitml}}).
}
\value{
An object of class \code{summary.mitml}.
A print method is used for more readable output.
}
\references{
Gelman, A., and Rubin, D. B. (1992). Inference from iterative simulation using multiple sequences. \emph{Statistical Science, 7}, 457-472.
Hoff, P. D. (2009). \emph{A first course in Bayesian statistical methods}. New York, NY: Springer.
}
\author{Simon Grund}
\seealso{\code{\link{panImpute}}, \code{\link{jomoImpute}}, \code{\link{plot.mitml}}}
\examples{
data(studentratings)
fml <- ReadDis + SES ~ ReadAchiev + (1|ID)
imp <- panImpute(studentratings, formula = fml, n.burn = 1000, n.iter = 100, m = 5)
# print summary
summary(imp)
}
\keyword{methods}
|
4463a962ff9fa13b87cf29ff44343712a96fc5f7
|
ba649a390fed05e64ad956dee1ccea869a9f3ae5
|
/01.expr/03_test_b_all_samples.R
|
eebdae257c3968a628e05c0ffcc5ad474e1daadf
|
[
"MIT"
] |
permissive
|
zhangyupisa/autophagy-in-cancer
|
8d1ae581f5b5e17ad92b96812b4df6436e69de03
|
7510611e742ba451c9741ff3ca767c84bfce66f3
|
refs/heads/master
| 2020-03-28T17:09:28.092615
| 2018-09-13T10:29:40
| 2018-09-13T10:29:40
| 148,762,727
| 0
| 1
|
MIT
| 2018-09-14T08:55:44
| 2018-09-14T08:55:43
| null |
UTF-8
|
R
| false
| false
| 9,671
|
r
|
03_test_b_all_samples.R
|
library(magrittr)
library(ggplot2)
# processed path
tcga_path = "/home/cliu18/liucj/projects/6.autophagy/TCGA"
expr <- readr::read_rds(file.path(tcga_path, "pancan_expr_20160513.rds.gz"))
#output path
expr_path <- "/home/cliu18/liucj/projects/6.autophagy/02_autophagy_expr/"
# Read gene list
# Gene list was compress as rds
gene_list <- readr::read_rds(file.path(expr_path, "rds_03_atg_gene_list.rds.gz"))
expr %>%
dplyr::mutate(filter_expr = purrr::map(expr, filter_gene_list, gene_list = gene_list)) %>%
dplyr::select(-expr) -> gene_list_expr
filter_gene_list <- function(.x, gene_list) {
gene_list %>%
dplyr::select(symbol) %>%
dplyr::left_join(.x, by = "symbol")
}
calculate_fc_pvalue_all_samples <- function(.x, .y) {
print(.x)
.y %>% tibble::add_column(cancer_types = .x, .before = 1) -> df
# get cancer types and get # of smaple >= 10
samples <-
tibble::tibble(barcode = colnames(df)[-c(1:3)]) %>%
dplyr::mutate(
sample = stringr::str_sub(
string = barcode,
start = 1,
end = 12
),
type = stringr::str_split(barcode, pattern = "-", simplify = T)[, 4] %>% stringr::str_sub(1, 2)
) %>%
dplyr::filter(type %in% c("01", "11")) %>%
dplyr::mutate(type = plyr::revalue(
x = type,
replace = c("01" = "Tumor", "11" = "Normal"),
warn_missing = F
)) %>%
dplyr::group_by(sample) %>%
# dplyr::filter(n() >= 2, length(unique(type)) == 2) %>%
dplyr::ungroup()
sample_type_summary <- table(samples$type) %>% as.numeric()
if (gtools::invalid(sample_type_summary) || length(sample_type_summary) != 2 ||
any(sample_type_summary < c(10, 10))) {
return(NULL)
}
# filter out cancer normal pairs
df_f <-
df %>%
dplyr::select(c(1, 2, 3), samples$barcode) %>%
tidyr::gather(key = barcode, value = expr, -c(1, 2, 3)) %>%
dplyr::left_join(samples, by = "barcode")
# pvalue & fdr
df_f %>%
dplyr::group_by(cancer_types, symbol, entrez_id) %>%
tidyr::drop_na(expr) %>%
dplyr::do(broom::tidy(t.test(expr ~ type, data = .))) %>%
dplyr::ungroup() %>%
dplyr::mutate(fdr = p.adjust(p.value, method = "fdr")) %>%
dplyr::select(cancer_types, symbol, entrez_id, p.value, fdr) -> df_pvalue
# log2 fold change mean
df_f %>%
dplyr::group_by(cancer_types, symbol, entrez_id, type) %>%
tidyr::drop_na(expr) %>%
dplyr::summarise(mean = mean(expr)) %>%
tidyr::spread(key = type, mean) %>%
dplyr::ungroup() %>%
dplyr::mutate(fc = (Tumor + 0.1) / (Normal + 0.1)) -> df_fc
df_fc %>%
dplyr::inner_join(df_pvalue, by = c("cancer_types", "symbol", "entrez_id")) %>%
dplyr::mutate(n_normal = sample_type_summary[1], n_tumor = sample_type_summary[2]) -> res
return(res)
}
purrr::map2(.x = gene_list_expr$cancer_types,
.y = gene_list_expr$filter_expr,
.f = calculate_fc_pvalue_all_samples) -> gene_list_fc_pvalue
names(gene_list_fc_pvalue) <- gene_list_expr$cancer_types
gene_list_fc_pvalue %>% dplyr::bind_rows() -> gene_list_fc_pvalue_simplified
expr_path <- file.path(expr_path, "03_test_b_all_samples")
readr::write_rds(
x = gene_list_fc_pvalue_simplified,
path = file.path(expr_path, "rds_03_a_at_gene_list_fc_pvalue_simplified.rds.gz"),
compress = "gz"
)
readr::write_tsv(
x = gene_list_fc_pvalue_simplified,
path = file.path(expr_path, "rds_03_a_at_gene_list_fc_pvalue_simplified.tsv")
)
gene_list_fc_pvalue_simplified %>%
dplyr::left_join(gene_list, by = "symbol") -> gene_fc_pvalue
gene_fc_pvalue_autophagy <-
gene_fc_pvalue %>%
dplyr::filter(type == "Autophagy")
filter_fc_pval <- function(.x){
.x %>%
dplyr::filter(abs(log2(fc)) >= log2(1.5), fdr <= 0.05) %>%
dplyr::mutate(p.value = -log10(p.value)) %>%
dplyr::mutate(p.value = ifelse(p.value > 15, 15, p.value)) %>%
dplyr::mutate(fc = ifelse(fc < 1/8, 1/8, ifelse(fc > 8, 8, fc)))
}
# filter_pattern based on
filter_pattern <- function(fc, p.value) {
if ((fc > 1.5) && (p.value < 0.05)) {
return(1)
} else if ((fc < 2 / 3) && (p.value < 0.05)) {
return(-1)
} else {
return(0)
}
}
# get up down pattern
get_pattern <- function(.x){
.x %>%
dplyr::mutate(expr_pattern = purrr::map2_dbl(fc, p.value, filter_pattern)) %>%
dplyr::select(cancer_types, symbol, expr_pattern) %>%
tidyr::spread(key = cancer_types, value = expr_pattern)
}
# gene rank by up and down
get_gene_rank <- function(pattern){
pattern %>%
dplyr::rowwise() %>%
dplyr::do(
symbol = .$symbol,
rank = unlist(.[-1], use.names = F) %>% sum(),
up = (unlist(.[-1], use.names = F) == 1) %>% sum(),
down = (unlist(.[-1], use.names = F) == -1) %>% sum()
) %>%
dplyr::ungroup() %>%
tidyr::unnest() %>%
dplyr::arrange(rank)
}
# cancer types rank by gene
get_cancer_types_rank <- function(pattern){
pattern %>%
dplyr::summarise_if(.predicate = is.numeric, dplyr::funs(sum(abs(.)))) %>%
tidyr::gather(key = cancer_types, value = rank) %>%
dplyr::arrange(-rank)
}
# rect plot
plot_rect_pattern <- function(.x_filter, gene_rank, cancer_types_rank){
ggplot(.x_filter, aes(x = cancer_types, y = symbol, fill = log2(fc))) +
geom_tile(color = "black") +
scale_fill_gradient2(
low = "blue",
mid = "white",
high = "red",
midpoint = 0,
na.value = "white",
breaks = seq(-3, 3, length.out = 5),
labels = c("<= -3", "-1.5", "0", "1.5", ">= 3"),
name = "log2 FC"
) +
scale_y_discrete(limit = gene_rank$symbol) +
scale_x_discrete(limit = cancer_types_rank$cancer_types, expand = c(0, 0)) +
theme(
panel.background = element_rect(colour = "black", fill = "white"),
panel.grid = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank(),
legend.text = element_text(size = 12),
legend.title = element_text(size = 14),
legend.key = element_rect(fill = "white", colour = "black")
) -> p
print(p)
return(p)
}
# point plot
plot_fc_pval_pattern <- function(.x_filter, gene_rank, cancer_types_rank){
ggplot(.x_filter, aes(x = cancer_types, y = symbol)) +
geom_point(aes(size = p.value, col = log2(fc))) +
scale_color_gradient2(
low = "blue",
mid = "white",
high = "red",
midpoint = 0,
na.value = "white",
breaks = seq(-3, 3, length.out = 5),
labels = c("<= -3", "-1.5", "0", "1.5", ">= 3"),
name = "log2 FC"
) +
scale_size_continuous(
limit = c(-log10(0.05), 15),
range = c(1, 6),
breaks = c(-log10(0.05), 5, 10, 15),
labels = c("0.05", latex2exp::TeX("$10^{-5}$"), latex2exp::TeX("$10^{-10}$"), latex2exp::TeX("$< 10^{-15}$"))
) +
scale_y_discrete(limit = gene_rank$symbol) +
scale_x_discrete(limit = cancer_types_rank$cancer_types) +
theme(
panel.background = element_rect(colour = "black", fill = "white"),
panel.grid = element_line(colour = "grey", linetype = "dashed"),
panel.grid.major = element_line(
colour = "grey",
linetype = "dashed",
size = 0.2
),
axis.title = element_blank(),
axis.ticks = element_line(color = "black"),
legend.text = element_text(size = 12),
legend.title = element_text(size = 14),
legend.key = element_rect(fill = "white", colour = "black")
) -> p
print(p)
return(p)
}
# cancer count
plot_cancer_count <- function(.x_filter, gene_rank, cancer_types_rank){
ggplot(
dplyr::mutate(
.x_filter,
alt = ifelse(log2(fc) > 0, "up", "down")
),
aes(x = symbol, fill = factor(alt))
) +
geom_bar(color = NA, width = 0.5) +
scale_fill_manual(
limit = c("down", "up"),
values = c("blue", "red"),
guide = FALSE
) +
scale_y_continuous(
limit = c(-0.1, 12.5),
expand = c(0, 0),
breaks = seq(0, 12, length.out = 5)
) +
scale_x_discrete(limit = gene_rank$symbol, expand = c(0.01, 0.01)) +
theme(
panel.background = element_rect(
colour = "black",
fill = "white",
size = 1
),
panel.grid.major = element_line(linetype = "dashed", color = "lightgray"),
axis.title = element_blank(),
axis.ticks.x = element_blank(),
legend.text = element_text(size = 12),
legend.title = element_text(size = 14),
legend.key = element_rect(fill = "white", colour = "black")
) +
coord_flip() -> p
print(p)
return(p)
}
at_filter <-
gene_fc_pvalue_autophagy %>%
filter_fc_pval()
at_cancer_rank <-
gene_fc_pvalue_autophagy %>%
get_pattern() %>%
get_cancer_types_rank()
# dplyr::filter(!cancer_types %in% c("KICH", "LUSC"))
at_gene_rank <-
gene_fc_pvalue_autophagy %>%
get_pattern() %>%
get_gene_rank() %>%
dplyr::left_join(gene_list, by = "symbol") %>%
dplyr::filter(up + down >= 2) %>%
dplyr::arrange(rank)
p <- plot_rect_pattern(at_filter, at_gene_rank, at_cancer_rank) +
theme(axis.text.y = element_text(color = at_gene_rank$color))
ggsave(
filename = "fig_04_at_core_expr_rect.pdf",
plot = p,
device = "pdf",
width = 10,
height = 20,
path = expr_path
)
readr::write_rds(
p,
path = file.path(expr_path, "fig_04_at_core_expr_rect.pdf.rds.gz"),
compress = "gz"
)
p <- plot_fc_pval_pattern(at_filter, at_gene_rank, at_cancer_rank) +
theme(axis.text.y = element_text(color = at_gene_rank$color))
ggsave(
filename = "fig_05_at_expr_fc_pval.pdf",
plot = p,
device = "pdf",
width = 10,
height = 20,
path = expr_path
)
readr::write_rds(
p,
path = file.path(expr_path, "fig_05_at_expr_fc_pval.pdf.rds.gz"),
compress = "gz"
)
|
6bbafeecdf7036ea37409d4eb41d5ec743bbc6ad
|
13870924decf189e710c1883e91c72bd8b08f609
|
/2018/Week_23/fast_food_calories.R
|
e4714ca3753a4aca31f0fe7fd946b95e559165f2
|
[] |
no_license
|
dallinwebb/tidy_tuesday
|
7a5499433a89c3af9f207a79f52b2c047762bb83
|
f7b5681744a4de921bd283148e05834fbebd4a78
|
refs/heads/master
| 2021-07-06T17:06:43.547791
| 2019-03-25T21:52:00
| 2019-03-25T21:52:00
| 133,426,197
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,439
|
r
|
fast_food_calories.R
|
library(tidyverse)
# .png of burger
g <- png::readPNG(source = "burger.png") %>% grid::rasterGrob(interpolate = T)
# One pipeline
df <- read_rds("fastfood_calories.rds") %>%
as_tibble() %>%
select(restaurant, item, calories) %>%
mutate(is_over = ifelse(calories > 500, T, F)) %>%
group_by(restaurant, is_over) %>%
summarise(n = n()) %>%
mutate(freq = n / sum(n)) %>%
filter(!is_over == F) %>%
ggplot(aes(fct_reorder(restaurant, freq, max, .desc = T),
weight = freq,
fill = restaurant)) +
geom_bar(show.legend = F) +
geom_text(aes(y = freq + .02,
label = scales::percent(freq))) +
annotation_custom(g,
ymin = .4,
ymax = .8,
xmin = 7,
xmax = 8) +
scale_fill_manual(values = c("Mcdonalds" = "#36454a", "Arbys" = "#638c94",
"Burger King" = "#79ada0", "Sonic" = "#adb563",
"Dairy Queen" = "#e7a521", "Subway" = "#e78418",
"Taco Bell" = "#ee5928","Chick Fil-A" = "#e73a3f")) +
labs(x = NULL,
y = NULL,
title = "Percentage of Menu Items Over 500 Calories\n\n") +
theme_minimal() +
theme(panel.grid = element_blank(),
axis.text.y = element_blank(),
title = element_text(size = 16),
axis.text.x = element_text(size = 12))
|
792baf9988bd33a799e2b61d78ae10e0d50286f9
|
005b697b8a291d2dae196e253930bda208d3eaad
|
/Fixed-7/DistCor_Same_Preg.R
|
c9824a4d345e37beb2ad76c539550e833d6dc9a4
|
[] |
no_license
|
abhijeetsingh1704/MOAB_private
|
1bdeb5bc1c0411dfb3b7d55bbb742c927abf5d84
|
a4b58a59439909e160c9b379ffb7dd071e50e9b9
|
refs/heads/main
| 2023-03-21T10:50:48.231536
| 2020-11-11T15:40:20
| 2020-11-11T15:40:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,935
|
r
|
DistCor_Same_Preg.R
|
source('~/Clean_Multiomics/General/SampleEmbeddingCompact.R')
#only use this version when you remove duplicate data
Raw=readRDS('~/Clean_Multiomics/Fixed-7/FIxed_Sajjad_Multiomics/AllX_NoDup')
PointLab=readRDS('~/Clean_Multiomics/Fixed-7/FIxed_Sajjad_Multiomics/nodelabs_NoDup')
source('~/Clean_Multiomics/Compact_Functions/Cor_Dist_SameModality.R')
#Raw=readRDS('~/Clean_Multiomics/Sajjad/AllX_Sajjad')
#PointLab=readRDS('~/Clean_Multiomics/Sajjad/PointLab')
#########
#tSNE
########
#read in data
#tEmbed=readRDS('~/Clean_Multiomics/Fixed-7/Embeddings/tSNE_noDup')
#tSNE=Cor_Dist_SameModality(Raw,tEmbed,PointLab)
#######################
#UMAP
#######################
#tEmbed=readRDS('~/Clean_Multiomics/Fixed-7/Embeddings/UMAP_noDup')
#umap=Cor_Dist_SameModality(Raw,tEmbed,PointLab)
####################
#Large Vis###########
####################
#tEmbed=readRDS('~/Clean_Multiomics/Fixed-7/Embeddings/LargeVis_noDup')
#tEmbed=t(tEmbed)
#LV=Cor_Dist_SameModality(Raw,tEmbed,PointLab)
#######################################################################
############
##TriMap####
###########
#tEmbed=readRDS('~/Clean_Multiomics/Fixed-7/Embeddings/TriMap')
#Tri=Cor_Dist_SameModality(Raw,tEmbed,PointLab)
##########
#LaplacianEigen
#tEmbed=readRDS('~/Clean_Multiomics/Fixed-7/Embeddings/Laplacian_Eigenmap')
#LLE=Cor_Dist_SameModality(Raw,tEmbed,PointLab)
################
##Diffusion maps
tEmbed=readRDS('~/Clean_Multiomics/Fixed-7/Embeddings/DiffusionMaps')
Diffusion=Cor_Dist_SameModality(Raw,tEmbed,PointLab)
###################
#Lamp
tEmbed=readRDS('~/Clean_Multiomics/Fixed-7/Embeddings/Lamp')
Lamp=Cor_Dist_SameModality(Raw,tEmbed,PointLab)
#############
####Pesk
#tEmbed=readRDS('~/Clean_Multiomics/Fixed-7/Embeddings/Pesk')
#Pesk=Cor_Dist_SameModality(Raw,tEmbed,PointLab)
##########
##PCA
tEmbed=readRDS('~/Clean_Multiomics/Fixed-7/Embeddings/PrComp')
PCA=Cor_Dist_SameModality(Raw,tEmbed,PointLab)
|
d23bddbb3efda134f84e420b2eea1783b6df78c2
|
e812a0825dd42bf6b6a98131ce1f406ca9f828f4
|
/package/WaveCrest/man/WaveCrestENI.Rd
|
c9b756d25a660ff17b67329615fb90ffe75505fb
|
[
"Apache-2.0"
] |
permissive
|
lengning/WaveCrest
|
9dc6e1baef72ec58baf0b0e73e1a008eb3b0cd54
|
ac819ba9803d06b387190914f4ae4b705094380d
|
refs/heads/master
| 2020-05-22T01:27:36.518823
| 2017-10-17T16:46:46
| 2017-10-17T16:46:46
| 48,067,238
| 2
| 4
| null | 2017-10-17T16:46:50
| 2015-12-15T20:29:09
|
R
|
UTF-8
|
R
| false
| false
| 2,229
|
rd
|
WaveCrestENI.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/WaveCrestENI.R
\name{WaveCrestENI}
\alias{WaveCrestENI}
\title{Search for the optimal sample order for experiments with ordered conditions}
\usage{
WaveCrestENI(GeneList, Data, Conditions, Ndg=3,
N=20000, NCThre=1000)
}
\arguments{
\item{GeneList}{A vector that contains genes to use for reordering.}
\item{Data}{gene-by-sample matrix or isoform-by-sample matrix. It should be rescaled to gene specific z scores.}
\item{Conditions}{A factor indicates the condition (time/spatial point) which
each sample belongs to. The levels of the factor should be sorted
by the time-course / spatial order.}
\item{Ndg}{degree of polynomial.}
\item{N,NCThre}{The 2-opt algorithm will stop if N iterations has been performed or if the optimal order
remains unchanged for over NCThre iterations.}
}
\value{
This function performs the extended nearest insertion (ENI) and 2-opt algorithm.
The ENI algorithm will be applied to
search for the optimal sample order which minimizes the MSE of
polynomial regression (PR).
This function will call PipeRCDF() function, which fits
PR to expression of each gene/isoform within the gene list.
The aggregated MSE of a fit is defined as the
summation of the MSEs of all genes/isoforms considered here.
The output of PipeRCDF() returns the optimal order which provides the smallest PR MSE.
The 2-opt algorithm will then be applied to improve the optimal order searching of the ENI.
In each iteration, the 2-opt algorithm will randomly choose two points (samples), then flip the samples
between these two points. The new order will be adapted if it provides smaller PR MSE.
The output returns the optimal order.
Note that the reordering happens within condition (time point). Cells from
different conditions won't be mixed unless the cell is placed in the
coundary of two conditions.
}
\description{
Search for the optimal sample order for experiments with ordered conditions
}
\examples{
aa <- sin(seq(0,1,.1))
bb <- sin(seq(0.5,1.5,.1))
cc <- sin(seq(0.9,1.9,.1))
dd <- sin(seq(1.2,2.2,.1))
res <- WaveCrestENI(c("aa","bb"), rbind(aa,bb,cc,dd), N=50,
Conditions = factor(c(rep(1,5),rep(2,6))))
}
\author{
Ning Leng
}
|
bc248e4704de6bbc84773003061f2217ba6f1753
|
9978cd1445ca2c95301169faadc6c1337649132a
|
/R/escapeContent.R
|
9c94cbc5413ad1c8e04bb8c0657db941f47300ef
|
[] |
no_license
|
cran/wyz.code.rdoc
|
f9e607ab38b89c62340388ad353d07fa553fb4b9
|
983f4cc651fe63f909a03caaa1089ca7254a49a0
|
refs/heads/master
| 2021-10-08T20:22:59.419778
| 2021-10-06T06:00:02
| 2021-10-06T06:00:02
| 213,606,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 996
|
r
|
escapeContent.R
|
escapeContent <- function(content_s_1, escapeBraces_b_1 = FALSE) {
patchArobas <- function(x_s) {
if (stringr::str_count(x_s, '@') == 0L) return(x_s)
paste(strsplit(x_s, "@@|@")[[1]], collapse = '@@', sep = '@@')
}
patchPercent <- function(x_s) {
if (stringr::str_count(x_s, '%') == 0L) return(x_s)
paste(strsplit(x_s, "\\\\%|%")[[1]], collapse = '\\%', sep = '\\%')
}
patchOB <- function(x_s) {
if (stringr::str_count(x_s, '\\{') == 0L) return(x_s)
paste(strsplit(x_s, "\\\\\\{|\\{")[[1]], collapse = '\\{', sep = '\\{')
}
patchCB <- function(x_s) {
if (stringr::str_count(x_s, '\\}') == 0L) return(x_s)
paste(strsplit(x_s, "\\\\\\}|\\}")[[1]], collapse = '\\}', sep = '\\}')
}
s <- paste0(content_s_1, '\t') # to circumvent strsplit final character issue
s <- patchArobas(s)
s <- patchPercent(s)
if (!escapeBraces_b_1) return(substring(s, 1L, nchar(s) - 1L))
s <- patchOB(s)
s <- patchCB(s)
substring(s, 1L, nchar(s) - 1L)
}
|
58ce8b2c3903aa32c865f457462bcb6134a796e9
|
f1584cd634fe8a0ac556a22dd316839f38e67974
|
/plot4.R
|
baddb575104937f54796c5f49b88ed6055040daa
|
[] |
no_license
|
Sanches4733/ExData_Plotting1
|
c38ef4e5d26e6dff8f662dffe019ab1f24cbe227
|
57af947023a0b5e7778e92a24dd9cdd59fafb51d
|
refs/heads/master
| 2020-12-25T16:47:23.737886
| 2015-09-13T23:28:29
| 2015-09-13T23:28:29
| 42,416,297
| 0
| 0
| null | 2015-09-13T23:05:50
| 2015-09-13T23:05:50
| null |
UTF-8
|
R
| false
| false
| 1,478
|
r
|
plot4.R
|
# Please notice that in Russian weekdays are abbreviated just as they are
# on this plot. Thanks for understanding. :-)
# Considering that unzipped file is in your working directory.
# Reading data only about two required days.
# Creating one datetime variable.
mydata <- read.table(pipe('grep "^[1-2]/2/2007" "household_power_consumption.txt"'),
sep = ";", na.strings = "?", stringsAsFactors = FALSE)
mydata <- transform(mydata, timestamp=as.POSIXct(paste(V1, V2),
format="%d/%m/%Y %H:%M:%S"))
# Opening device.
png("plot4.png")
# Changing par() so that we can plot 4 graphs at once.
par(mfcol = c(2,2))
# plot2.R
plot(mydata$timestamp, mydata$V3, type = "l", xlab = "",
ylab = "Global Active Power (kilowatts)")
# plot3.R (only legend is changed: there is no border anymore)
plot(mydata$timestamp, mydata$V7, xlab = "", ylab = "Energy sub metering",
type = "n")
lines(mydata$timestamp, y = mydata$V7)
lines(mydata$timestamp, y = mydata$V8, col = "red")
lines(mydata$timestamp, y = mydata$V9, col = "blue")
legend("topright",legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1 , col = c("black", "red", "blue"), bty = "n")
# Creating two needed plots.
plot(mydata$timestamp, mydata$V5, type = "l", xlab = "datetime",
ylab = "Voltage")
plot(mydata$timestamp, mydata$V4, type = "l", xlab = "datetime",
ylab = "Global_reactive_power")
# Closing device.
dev.off()
|
e5d7d11d13d4677226eac72e27dba7893066f0b4
|
647ab9c839daa7c11bdbd4fac785c96cac563be6
|
/simulation_degroot.R
|
47a18ed78c67a719a5c939d1efcccec5ff41a00b
|
[] |
no_license
|
hector-garrido/scripts-info-propagation
|
8dcbf8d49eea422c2783814d53605ea2f31dddec
|
8fc8a120e324b92aa9b08c6e5d570bd0e98a8b88
|
refs/heads/main
| 2023-02-25T17:14:38.573574
| 2021-02-07T02:35:29
| 2021-02-07T02:35:29
| 318,891,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,291
|
r
|
simulation_degroot.R
|
# install.packages('igraph')
#
# adjm <- matrix(sample(0:1, 100, replace=TRUE, prob=c(0.9,0.1)), nc=10)
# g1 <- graph_from_adjacency_matrix( adjm ,mode='undirected')
# adjm <- matrix(sample(0:5, 100, replace=TRUE,
# prob=c(0.9,0.02,0.02,0.02,0.02,0.02)), nc=10)
library(igraph)
################################################################################
total_nodos <- 300
vecinos <- 4
porc_sat <- 0.2
beta <- 0.1
#g1 <- graph_from_adjacency_matrix( adjm ,mode='undirected')
g_reg <- sample_k_regular(total_nodos, vecinos, directed = FALSE, multiple = FALSE)
g_reg_mat <- as_adjacency_matrix(g_reg) %>% matrix(ncol=total_nodos)
g_reg_mat <-t(g_reg_mat + diag(total_nodos))/(vecinos+1)
#infectados_0 <- numeric(total_nodos)
#infectados_0[1:(total_nodos*porc_sat)] <- 1
infectados_0 <- runif(total_nodos)
infectados_1 <- g_reg_mat%*%infectados_0
infectados_2 <- g_reg_mat%*%infectados_1
infectados_3 <- g_reg_mat%*%infectados_2
infectados_4 <- g_reg_mat%*%infectados_3
infectados_5 <- g_reg_mat%*%infectados_4
################################################################################
igraph_options(labels = NA)
par(mfrow=c(3,2))
par(mar = c(.2, .2, .2, .2))
plot(g_reg,
vertex.color=paste0('grey',100-round(100*infectados_0)),vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=paste0('grey',100-round(100*infectados_1)),vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=paste0('grey',100-round(100*infectados_2)),vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=paste0('grey',100-round(100*infectados_3)),vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=paste0('grey',100-round(100*infectados_4)),vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=paste0('grey',100-round(100*infectados_5)),vertex.label=NA,vertex.size=6)
################################################################################
colores <- c('red4','red2','white','green2','green4')
igraph_options(labels = NA)
par(mfrow=c(3,2))
par(mar = c(.2, .2, .2, .2))
plot(g_reg,
vertex.color=colores[1+round(4*infectados_0)],vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=colores[1+round(4*infectados_1)],vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=colores[1+round(4*infectados_2)],vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=colores[1+round(4*infectados_3)],vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=colores[1+round(4*infectados_4)],vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=colores[1+round(4*infectados_5)],vertex.label=NA,vertex.size=6)
colores <- c('red4','red2','green2','green4')
igraph_options(labels = NA)
par(mfrow=c(3,2))
par(mar = c(.2, .2, .2, .2))
plot(g_reg,
vertex.color=colores[1+round(3*infectados_0)],vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=colores[1+round(3*infectados_1)],vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=colores[1+round(3*infectados_2)],vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=colores[1+round(3*infectados_3)],vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=colores[1+round(3*infectados_4)],vertex.label=NA,vertex.size=6)
plot(g_reg,
vertex.color=colores[1+round(3*infectados_5)],vertex.label=NA,vertex.size=6)
|
1622e73d85687abbfe8e2c678d024c055418dd7e
|
facfce4925642abc520a19e66eca956296250f9c
|
/code for RNA-seq/03.merge.R
|
f54db700caff9edf0d01c141fab5a79d5cea52b4
|
[] |
no_license
|
ChengLiLab/T_cell_tolerance
|
8cda1245b0ad7f2c3477b6211a18ed8f58918a90
|
4b09cb8a7c45669545f19844510e96d76c919744
|
refs/heads/main
| 2023-02-27T08:16:30.118981
| 2021-01-30T12:16:21
| 2021-01-30T12:16:21
| 334,325,854
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 918
|
r
|
03.merge.R
|
path = dir()
gene_bio = read.table("/lustre/user/liclab/lirf/Project/sunyj/RNA.and.CHIP.seq.data/2015.6.15/data/Tophat.out/Sample_PR10_SYJ_GFP-1.1.clean.fq.111/cufflinks/genes.fpkm_tracking",header = T)
gene_exp = read.table(paste0("./",path[1],"/cufflinks/genes.fpkm_tracking",collapse = ""),header = T)
gene = intersect(as.character(unique(gene_bio[,4])),as.character(gene_exp[,5]))
gene_exp = gene_exp[match(gene,as.character(gene_exp[,5])),c(7,10)]
for(i in 2:length(path)){
gene_exp1 = read.table(paste0("./",path[i],"/cufflinks/genes.fpkm_tracking",collapse = ""),header = T,fill = T)
gene_exp = cbind(gene_exp,gene_exp1[match(gene,as.character(gene_exp1[,5])),10])
print(i)
}
colnames(gene_exp) = c("loci", gsub("_R1.fq.","",gsub("Sample_PR10_SYJ_","",path[1:length(path)])))
rownames(gene_exp) = gene
#colnames(gene_exp ) = c("loci","65N","65T")
write.table(gene_exp,file = "gene_expression1.txt")
|
3b1a6f27ca24f6b4fdaeab9341f51d53636db7be
|
01a33c3170bf018372ee3fc7e77ee8dd52d028e5
|
/compare_epp_spec_counts.R
|
ad530af31dbdb2ce63237437903b2353e4a7f3a2
|
[] |
no_license
|
deepajag/gbdeppaiml
|
0adcc098c0e9436e39232a70f1ed0eca7400c568
|
3a21fd940d8a0a03847f59dd57de5a07750c2533
|
refs/heads/master
| 2021-09-09T22:06:25.669158
| 2021-09-03T17:17:49
| 2021-09-03T17:17:49
| 212,451,317
| 0
| 1
| null | 2019-10-02T22:15:53
| 2019-10-02T22:15:53
| null |
UTF-8
|
R
| false
| false
| 1,364
|
r
|
compare_epp_spec_counts.R
|
new_list = mclapply(loc.table[epp == 1 & !grepl("IND",ihme_loc_id) & most_detailed,ihme_loc_id],function(loc){
print(loc)
compiled = fread(paste0("/share/hiv/epp_output/gbd20/200713_yuka/compiled/",loc,".csv"))
compiled = compiled[year == 2019 & age %in% 15:49]
compiled[,plhhiv := pop_art + pop_gt350 + pop_200to350 + pop_lt200]
compiled = compiled[,list(plhiv = sum(plhhiv)), by = .(year,run_num)]
compiled = compiled[,list(plhiv = mean(plhiv)), by = .(year)]
store1 = compiled
store1[,file := "epp"]
compiled = fread(paste0("/share/hiv/spectrum_prepped/aggregates/200713_yuka/",loc,".csv"))
compiled = compiled[year_id == 2019 & age_group_id %in% c(8:14)]
compiled = compiled[,list(plhiv = sum(pop_hiv)), by = .(year_id,run_num)]
compiled = compiled[,list(plhiv = mean(plhiv)), by = .(year_id)]
compiled[,file := "spec_agg"]
setnames(compiled,"year_id","year")
out = rbind(store1,compiled)
out[,iso := loc]
return(out)
},mc.cores = 5)
new_list = rbindlist(new_list)
new_list1 = dcast(new_list, year + iso ~ file, value.var = "plhiv")
new_list1[ ,diff := spec_agg/epp]
new_list1[order(diff)]
####
#reckoning needs to be rerun for group one
#pull out the summary files to make sure that it matches the email
#prevalence discrepancy: see what happened by looking at what would be attributed to the populatoin
|
3067948da75be2ac1d999a35fdb46353c397e62e
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609889532-test.R
|
7acf31e376eb3c6ad62736cf8cf1ba03d59ab02d
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 114
|
r
|
1609889532-test.R
|
testlist <- list(type = 1710618L, z = 8.28914734191549e-317)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
309494a5c47274eb8202762edd2491e68d21d240
|
4950cf89023c981749c2fb1ceec95bd6e584cb32
|
/scripts/util/__Util_QuantDOLFunctions.R
|
804d7d25962eb5922df3fb42d8b9a0fe2cf8c23f
|
[] |
no_license
|
christokita/socially-modulated-threshold-model
|
372368a518dd55b2bb61ecdaeeac8e4005f5396e
|
3112e4de8c794f079a6459fd5918c04db226ca75
|
refs/heads/master
| 2023-06-01T01:17:53.518760
| 2020-08-31T14:40:40
| 2020-08-31T14:40:40
| 108,687,450
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,241
|
r
|
__Util_QuantDOLFunctions.R
|
##################################################
#
# Qunatifying Division of Labor Functions
#
##################################################
####################
# Mutual Entropy DOL Measure
####################
# From Gorelick, Bertram, Killeen, & Fewell (2004)
mutualEntropy <- function(TotalStateMat) {
# Normalize matrix
# normMat <- TotalStateMat / rowSums(TotalStateMat)
normMat <- TotalStateMat / sum(TotalStateMat)
# Total Individuals
n <- nrow(normMat)
m <- ncol(normMat)
total <- sum(normMat)
# Shannon's entropy of individuals H(X)
H_x <- apply(normMat, MARGIN = 1, function(ind) {
p_x <- sum(ind) / total
h_x <- p_x * log2(p_x)
if(is.na(h_x)) {
h_x <- 0
}
return(h_x)
})
# Shannon's entropy of tasks H(Y)
H_y <- apply(normMat, MARGIN = 2, function(task) {
p_y <- sum(task) / total
h_y <- p_y * log2(p_y)
if(is.na(h_y)) {
h_y <- 0
}
return(h_y)
})
# Mutual entropy I(X,Y)
I_xy <- lapply(1:n, function(ind) {
# Loop through tasks for each individual
mutualEntr <- rep(NA, m)
for (task in 1:m) {
# joint probability p(x,y)
p_xy <- normMat[ind, task] / total
# calculate log portion
p_x <- sum(normMat[ind, ]) / total
p_y <- sum(normMat[ , task]) / total
logVal <- log2(p_xy / (p_x * p_y))
# If entry has zero probability, set total value to zero (instead of NA/-Inf)
entry <- p_xy * logVal
if (is.na(entry)) {
entry <- 0
}
# enter into list
mutualEntr[task] <- entry
}
mutualEntr <- sum(mutualEntr)
return(mutualEntr)
})
# Sum values
H_x <- -sum(H_x)
H_y <- -sum(H_y)
I_xy <- sum(unlist(I_xy))
# Calcualte symmetrid division of labor D(x,y)
D_sym <- I_xy / sqrt(H_x * H_y)
D_task <- I_xy / H_x
D_ind <- I_xy / H_y
# Dataframe
D <- data.frame(Dsym = D_sym, Dtask = D_task, Dind = D_ind)
D <- as.matrix(D)
# Return
return(D)
}
####################
# Calcualte task rank
####################
calculateTaskRank <- function(TaskStepMat) {
# Loop through columns
for (column in 1:ncol(TaskStepMat)) {
TaskStepMat[ , column] <- dense_rank(TaskStepMat[ , column])
}
# Return
return(TaskStepMat)
}
|
8b305e9da28dfe24b54fa0f591c0ba3f4591eeb9
|
2e54809a2c3af44c5d460cb2eb2c8cebb2791af4
|
/R/arg-homogenise.R
|
c51006105f4bbedf0199190cc3902b1b55bd3bee
|
[] |
no_license
|
dpastoor/rlang
|
289f58ee3379ecbf666776c5b9cd462c0a64af80
|
2376dd6b2cba1689710b16fe181879735a4a251d
|
refs/heads/master
| 2020-05-24T23:16:13.239502
| 2017-03-13T21:07:15
| 2017-03-13T21:07:15
| 84,889,867
| 0
| 0
| null | 2017-03-14T01:04:17
| 2017-03-14T01:04:17
| null |
UTF-8
|
R
| false
| false
| 6,936
|
r
|
arg-homogenise.R
|
#' Standardise a call against formal arguments.
#'
#' This is a slower but more thorough version of
#' [lang_standardise()]. It is useful for tracing arguments across a
#' call stack (see [arg_inspect()] for an example).
#'
#' Compared to [match.call()], `lang_homogenise()`:
#'
#' * Always report dotted arguments as such: `call(..1, ..2)`. In
#' comparison, `match.call()` inlines literals: `call("foo", 10)`.
#'
#' * Provides an argument `enum_dots` to impose enumerated names on
#' dotted arguments. This produces `call(..1 = x, ..2 = ..3)`
#' instead of `call(foo = x, ..3)`.
#'
#' * Does not sort arguments according to the order of appearance in
#' the function definition.
#'
#' * Standardises missing arguments as well if you specify
#' `add_missings`: `call(x = , y = , )`.
#'
#' @param call Can be a call, a formula quoting a call in the
#' right-hand side, or a frame object from which to extract the call
#' expression. If not supplied, the calling frame is used. Note that
#' [lang_homogenise()] needs access to the actual function
#' corresponding to the call. It will retrieve it from the tidy
#' quote or frame environment, or in the calling context.
#' @param dots_env Calling frame in which to look up call the contents
#' of `...`. If not supplied and `call` is a frame object, it is
#' retrieved from `call`.
#' @param enum_dots Whether to standardise the names of dotted
#' arguments. If `TRUE`, this produces calls such as `f(..1 = ..2)`
#' instead of `f(..2)`. The first form is mostly intended for code
#' analysis tools as it makes it easier to climb arguments through
#' nested calls. The latter form (the default) is more faithful to
#' the actual call and is ready to be evaluated.
#' @param add_missings Whether to standardise missing arguments.
#' @export
lang_homogenise <- function(call = caller_frame(),
dots_env = NULL,
enum_dots = FALSE,
add_missings = FALSE) {
if (is_frame(call)) {
# Check for global frame
if (call$pos == 0) {
return(NULL)
}
dots_env <- sys_frame(call$caller_pos)
fn <- call$fn
} else {
fn <- NULL
}
call <- as_quosure(call, caller_env())
fn <- fn %||% lang_fn(call)
lang_homogenise_(f_rhs(call), fn, dots_env, enum_dots, add_missings)
}
lang_homogenise_ <- function(call, fn, dots_env, enum_dots, add_missings) {
call <- duplicate(call)
call <- call_inline_dots(call, dots_env, enum_dots)
call <- call_match_partial(call, fn)
call <- call_match(call, fn, enum_dots, add_missings)
call
}
arg_match_partial <- function(arg, formal) {
formal <- substr(formal, 1, nchar(arg))
arg == formal
}
call_match_partial <- function(call, fn) {
actuals_nms <- lang_args_names(call)
formals_nms <- fn_fmls_names(fn)
is_empty <- actuals_nms == ""
is_dup <- duplicated(actuals_nms) & !is_empty
is_dup <- is_dup & actuals_nms %in% formals_nms
if (any(is_dup)) {
dups_nms <- actuals_nms[which(is_dup)]
abort(paste0(
"formal arguments matched by multiple actual arguments: ",
paste0(dups_nms, collapse = ", ")
))
}
dots_pos <- match("...", formals_nms)
# No partial-matching of args after dots
if (!is.na(dots_pos)) {
formals_nms <- formals_nms[seq_len(dots_pos) - 1]
}
formals_pool <- setdiff(formals_nms, actuals_nms)
is_matched <- actuals_nms %in% formals_nms
actuals_pat <- actuals_nms
actuals_pat[is_matched | is_empty] <- NA
matched <- list()
for (formal in formals_pool) {
matched_pos <- which(map_lgl(actuals_pat, arg_match_partial, formal))
if (length(matched_pos) > 1) {
abort(paste0(
"formal argument `", formal,
"` matched by multiple actual arguments"
))
}
if (length(matched_pos) && matched_pos %in% matched) {
abort(paste0(
"actual argument `", actuals_pat[matched_pos],
"` matches multiple formal arguments"
))
}
matched <- append(matched, matched_pos)
actuals_nms[matched_pos] <- formal
}
if (is.na(dots_pos)) {
is_unused <- !actuals_nms %in% c(formals_nms, "")
is_unused <- is_unused & !map_lgl(actuals_nms, is_dot_nm)
if (any(is_unused)) {
abort(paste0(
"unused arguments: ",
paste0(actuals_nms[is_unused], collapse = ", ")
))
}
if (length(actuals_nms) > length(formals_nms)) {
abort("unused arguments")
}
}
names(call) <- c("", actuals_nms)
call
}
call_inline_dots <- function(call, dots_env, enum_dots) {
d <- node_walk_nonnull(call, function(arg) {
if (identical(node_cadr(arg), quote(...))) arg
})
if (is_null(d)) {
return(call)
}
if (is_null(dots_env)) {
abort("`dots_env` must be supplied to match dots")
}
if (env_has(dots_env, "...")) {
dots <- node_cdr(substitute(alist(...), dots_env))
} else {
dots <- pairlist()
}
dots <- dots_enumerate_args(dots)
# Attach remaining args to expanded dots
remaining_args <- node_cddr(d)
node_walk_nonnull(dots, function(arg) {
if (is_null(node_cdr(arg))) set_node_cdr(arg, remaining_args)
})
# Replace dots symbol with actual dots and remaining args
set_node_cdr(d, dots)
call
}
call_match <- function(call, fn, enum_dots, add_missings) {
args <- call[-1]
args_nms <- names2(args)
fmls_nms <- names2(fn_fmls(fn))
dots_i <- which(fmls_nms == "...")
if (length(dots_i)) {
args_nms <- call_match_dotted(args_nms, fmls_nms, dots_i, enum_dots)
} else {
args_nms <- call_match_args(args_nms, fmls_nms)
}
names(call) <- c("", args_nms)
if (add_missings) {
missing_nms <- setdiff(fmls_nms, c(args_nms, "..."))
missing_args <- rep(list(arg_missing()), length(missing_nms))
missing_args <- as.pairlist(set_names(missing_args, missing_nms))
call <- node_append(call, missing_args)
}
call
}
call_match_dotted <- function(args_nms, fmls_nms, dots_i, enum_dots) {
# First match formals on the left of dots
is_unmatched <- map_lgl(args_nms, `==`, "")
candidates <- fmls_nms[seq_len(dots_i - 1)]
candidates <- setdiff(candidates, args_nms[!is_unmatched])
args_nms[is_unmatched] <- call_match_args(args_nms[is_unmatched], candidates)
if (enum_dots) {
is_matched <- map_lgl(args_nms, `%in%`, fmls_nms)
n_dots <- sum(!is_matched)
args_nms[!is_matched] <- paste0("..", seq_len(n_dots))
}
args_nms
}
call_match_args <- function(args_nms, fmls_nms) {
is_unmatched <- map_lgl(args_nms, `==`, "")
# Only match up to the number of formals
n_fmls <- length(setdiff(fmls_nms, "..."))
n_args <- length(args_nms)
if (n_args > n_fmls) {
is_ignored <- rep(TRUE, n_args)
is_ignored[seq_len(n_fmls)] <- FALSE
is_unmatched <- is_unmatched & !is_ignored
}
candidates <- setdiff(fmls_nms, args_nms[!is_unmatched])
args_nms[is_unmatched] <- candidates[seq_len(sum(is_unmatched))]
args_nms
}
|
f849f5592e44c37fe869fe7d9260eef560deae8e
|
2c170474a479f0582d1685a8df22ca98dd157798
|
/tests/testthat.R
|
c9a5ad67f149091d91dee8ab16f234620e120379
|
[] |
no_license
|
wcmbishop/gogoplot
|
80574a1161a44222265f9478d891ac6d4a696033
|
1857b750305c15a9bb90dfdb12b96230c14a0ff8
|
refs/heads/master
| 2021-03-27T17:13:16.813628
| 2018-03-30T18:08:07
| 2018-03-30T18:08:07
| 106,642,044
| 3
| 2
| null | 2018-03-30T18:08:08
| 2017-10-12T03:53:19
|
R
|
UTF-8
|
R
| false
| false
| 60
|
r
|
testthat.R
|
library(testthat)
library(gogoplot)
test_check("gogoplot")
|
bdbc69acad247941d613b8de2612e9f50af18017
|
f5b72d9c57e50c7e95a56fd53fed4c18e6fe16b1
|
/movie.R
|
67de9497be58759db8419086a24cc816ba095eec
|
[] |
no_license
|
Bio3SS/Compensation_models
|
024151087ea3ee89a3edeeedadb0e03844e82a91
|
e91d68cbbfbd94fd3a9ca72cb2cb5fdbb48b7567
|
refs/heads/master
| 2022-04-29T22:58:24.445887
| 2022-04-11T20:11:02
| 2022-04-11T20:11:02
| 49,981,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 97
|
r
|
movie.R
|
Nc <- 40
for(R in seq(1.1, 4.0, by=0.02)){
simPlot(N=Nc, f0=exp(R), fDD=2*Nc, p0=0, pDD=NULL)
}
|
2f0e91fd374a018ca6e0565c80a1e836a1c12d1b
|
a15b798ee17686b617b1849db93086cfe0be9b47
|
/plot2.R
|
0c7191b7412b8c7256c26647d5ffed3d670a997c
|
[] |
no_license
|
vtpanda/ExData_Plotting1
|
07fb737623fc131e2b8c7bdd55945441e3655220
|
8fbbcb32c63d463f825c60a74ebd075b313afd5d
|
refs/heads/master
| 2021-01-21T08:51:22.088359
| 2016-01-09T03:38:07
| 2016-01-09T03:38:07
| 49,175,637
| 0
| 0
| null | 2016-01-07T02:28:39
| 2016-01-07T02:28:38
| null |
UTF-8
|
R
| false
| false
| 403
|
r
|
plot2.R
|
mydata<-read.table("household_power_consumption.txt", sep=";", na.strings="?", header=TRUE, as.is=TRUE)
mybetterdata<-subset(mydata, Date=="1/2/2007" | Date=="2/2/2007")
png("plot2.png", width=480, height=480)
plot(as.POSIXct(paste(mybetterdata$Date, mybetterdata$Time), format="%d/%m/%Y %H:%M:%S"), mybetterdata$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
787d54e596425b32a37d5cde25f3a913a857ab2a
|
7584c4b6119cf7985b1ea152f03de0a2619fe13b
|
/man/read.newick.Rd
|
e6e930119c16655a367f08ae74c994b82dab6aea
|
[] |
no_license
|
blueraleigh/macroevolution
|
2e380d14d91c7312d6ce1298d808f32b4b1becbd
|
bfa0644f4941940d7812106914add06fd5540656
|
refs/heads/master
| 2021-12-09T20:12:57.472284
| 2021-11-10T22:43:14
| 2021-11-10T22:43:14
| 213,418,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 537
|
rd
|
read.newick.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treeio.R
\name{read.newick}
\alias{read.newick}
\title{Phylogenetic tree input}
\usage{
read.newick(file, text = NULL)
}
\arguments{
\item{file}{A filename pointing to a file containing a Newick character string.}
\item{text}{A Newick character string. If not \code{NULL} any \code{file}
argument is ignored.}
}
\value{
An object of class \code{tree}
}
\description{
Parse a phylogenetic tree in Newick string format
}
\seealso{
\code{\link{write.newick}}
}
|
448710c9f3fc7685f7eadb191bf50d3098ec527f
|
9bdbe6236e0a9c59edd565079d383924061ee782
|
/man/strogatz.Rd
|
a55c20624fd701fba56abe5eead7f9d25256526d
|
[
"MIT"
] |
permissive
|
PabRod/sleepR
|
d2470acb6ea14f9d789991c2edc0bddedf39c348
|
0e8e92f0db62fb8723f55dc62d11e0254c7be5c5
|
refs/heads/master
| 2021-06-22T09:14:47.882072
| 2020-12-17T15:23:09
| 2020-12-17T15:23:09
| 153,459,732
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,131
|
rd
|
strogatz.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/strogatz.R
\name{strogatz}
\alias{strogatz}
\title{Solve Strogatz's model}
\usage{
strogatz(ts, y0, parms = strogatz_default_parms(), ...)
}
\arguments{
\item{ts}{Vector of times (in h)}
\item{y0}{Initial condition}
\item{parms}{Model parameters (optional, see \code{\link{strogatz_default_parms}})}
\item{...}{Additional arguments passed to the \code{\link{ode}} integrator}
}
\value{
Results of the simulation, including times, states and asleep/awake status
}
\description{
Solves the Strogatz's model for the given times, initial condition and parameters
}
\examples{
y0 <- c(th1 = 0.1, th2 = 0.05)
nDays <- 30
ts <- seq(0, nDays*24, length.out=nDays*24*10)
ys <- strogatz(ts, y0)
}
\references{
Strogatz, S. H. (1987).
Human sleep and circadian rhythms: a simple model based on two coupled oscillators.
Journal of Mathematical Biology, 25(3), 327–347. \url{http://doi.org/10.1007/BF00276440}
}
\seealso{
\code{\link{dStrogatz}, \link{strogatz_default_parms}, \link{ode}}
}
\author{
Pablo Rodríguez-Sánchez (\url{https://pabrod.github.io})
}
|
428ee287b72a7c09c4daacc6d4c65928ec8f7a71
|
c5eebcd0f28922376621fe6f72cabafa6f58c59d
|
/man/eom.Rd
|
7e337fef7d480bb92f154aba8fded33d48e26fc7
|
[] |
no_license
|
Hans213/ralibr
|
ef99afd9b6eefaea2d1ef6fa7f707572512177f1
|
8b5f636d84873465de502a92fb3429f374cf4fb3
|
refs/heads/master
| 2020-03-07T09:13:44.787956
| 2018-03-30T13:01:34
| 2018-03-30T13:01:34
| 127,402,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 414
|
rd
|
eom.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dates.R
\name{eom}
\alias{eom}
\title{The end of month date}
\usage{
eom(dates)
}
\arguments{
\item{dates}{a vector of dates.}
}
\value{
a date vector with the same class as \code{dates}
}
\description{
The \code{dates} are rounded to the end of their respective months.
}
\examples{
library("lubridate")
eom(ymd(20120203, 20140203))
}
|
72d59a25027f8025dfea75b1dc9958ee736fc632
|
e6d4db5226182c63755e1fe8fb8677104218082b
|
/length副本.R
|
261c27b0eb5aacfb872bed2de87ef2209e68e986
|
[] |
no_license
|
chihungfei/xuanti
|
0a5f41091c73c1a7144710f2b61386a82859a113
|
e1ac0a569d6279c42bccae43e35c47be8e236de4
|
refs/heads/master
| 2021-03-12T22:52:29.657741
| 2014-06-22T23:45:26
| 2014-06-22T23:45:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 613
|
r
|
length副本.R
|
xunzilength <- c(5,4,4,4,8,9,12,6,9,5,4,8,11,10,10,12,7,7,10,7,11,7,5,5,8,7,6,5,5,7,10,4,4,5,11,13,4,4,4,12,11,7,4,4,9,4,3,3,3,6,6,4,4,5,7,5,6,6,6,5,6,6,4,4,8,8,7,5,5,5,3,3,9,6,6,8,3,6,8,8,11,7,11,11,13,12,12,4,10,7,18,7,16,8,6,4,4,6,7,9,6,8,10,4,10,5,7,4,3,4,4,4,7,7,5,4,4,4,4,7,4,4,7,8,4,5,8,4,5,4,8,7,3,7,3,4,10,3,5,4,10,7,5,5)
daizhenlength <- c(5,7,10,6,5,4,7,9,15,4,7,5,7,4,5,5,6,4,9,7,4,4,5,4,4,6,8,7,6,6,3,4,4,7,6,9,10,11,6,7,4,6,5,10,10,7,5,5,4,5,5,4,4,12,5,4,8,9,6,13,13,6,11,10,9,9,4,5,5,5,9,7,9,6,5,7,6,5,4,5,8,4,8,5,10,8,7,9,8,6,5,7,7,5,6,6,4,3,3,3,4,6,4,7,10,4,4,4,4,9,8,7,10,15,7,7,4,7,4,4,4,4,6,4)
|
de5d98dd45beb91794d41ef2c2d6d0eed89c19a7
|
3ba9af0c684713f3ff43b833e7631aaec1fb434c
|
/plot.R
|
76c60c785f8f26f1fec8413e77b83cd17ea707d3
|
[] |
no_license
|
mllofriu/taxi
|
4ff1b4f0329296ba583618e1a26b7b3f3588d9da
|
4c952080f07a5cb291dfb97e8b2867136999a02b
|
refs/heads/master
| 2021-03-12T19:21:33.710098
| 2017-04-07T22:10:48
| 2017-04-07T22:10:48
| 24,699,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 715
|
r
|
plot.R
|
load(file = 'rte-1.Rdata')
require(ggplot2)
data <- rte
p <- ggplot(data = data, aes(x=factor(episode), y = steps, fill = method))
p <- p+ geom_boxplot(notch = TRUE)
p <- p + ylab("Completion time (s)") + xlab("Episode")
p <- p + theme_bw() #+ ylim(c(0,750))
p <- p + scale_x_discrete(breaks = c(1,5,10,15,20,25,30)) + scale_fill_discrete(name = "Method")
p <- p + theme(legend.text = element_text(size=15), legend.title = element_text(size=15), text = element_text(size=20))
p <- p + theme(legend.key.height = unit(3,"line"), legend.key.width = unit(3,"line"), legend.position = "right", legend.justification = c(1, 1), legend.background = element_rect(colour = NA, fill = NA))
p
ggsave(file = "runtimes.pdf")
|
55e5d645e9a9871411c380c58d25b9673d206cfb
|
b45876b9933df3f5e21f348e13fd66a8f990950a
|
/0.1_Codes_Invivo/1_4_zscore-div-sqrtP_heatmap_barchart.R
|
2af1233c50424d01b49c1c67d1cd61a98e17ee07
|
[] |
no_license
|
ScrippsPipkinLab/CRF_Screen
|
91988991806fd3f526e013bb6c9e224160410860
|
80300511482fd3d2003cc4ec88ef1ee555b259ef
|
refs/heads/master
| 2023-06-12T08:46:39.736104
| 2021-06-24T19:43:51
| 2021-06-24T19:43:51
| 164,017,424
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,781
|
r
|
1_4_zscore-div-sqrtP_heatmap_barchart.R
|
########## Converted Data analysis ##########
# Author: Huitian (Yolanda) Diao
# May 15th, 2019
# For t-test with percentiles
# Reference: https://bioconductor.statistik.tu-dortmund.de/packages/3.1/bioc/vignettes/ComplexHeatmap/inst/doc/ComplexHeatmap.html
########## Libraries ##########
library(dplyr)
library(tidyverse)
library(BSDA)
library(magrittr)
library(pheatmap)
library(RColorBrewer)
library(ggrepel)
library(ComplexHeatmap)
library(circlize)
library(colorspace)
library(GetoptLong)
###BiocManager::install("org.Mm.eg.db")
#library("org.Mm.eg.db")
#library(clusterProfiler)
#library(ggplot2)
########## Self-defined functions ##########
save_pheatmap_pdf <- function(x, filename, width=7, height=7) {
stopifnot(!missing(x))
stopifnot(!missing(filename))
pdf(filename, width=width, height=height)
grid::grid.newpage()
grid::grid.draw(x$gtable)
dev.off()
}
simpleCap <- function(x) {
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
in_vec <- function(refvec, vecx){
out_vec <- numeric(length(vecx))
for (x in c(1:length(vecx))){
if (vecx[x] %in% refvec){
out_vec[x] <- 1
}
else {
out_vec[x] <- 0
}
}
return(out_vec)
}
in_vec_name <- function(refvec, vecx){
out_vec <- character(length(vecx))
for (x in c(1:length(vecx))){
if (vecx[x] %in% refvec){
out_vec[x] <- vecx[x]
}
else {
out_vec[x] <- ""
}
}
return(out_vec)
}
# Save dendrogram k-means clusters from ComplexHeatmap
# Reference: https://github.com/jokergoo/ComplexHeatmap/issues/136
k_means_save <- function(c_heatmap, c_tb, out_name) {
#c_heatmap <- target.heatmap
#c_tb <- target.tb
#out_name <- "target_cluster.csv"
r.dend <- row_dend(c_heatmap)
rcl.list <- row_order(c_heatmap)
#lapply(rcl.list, function(x) length(x))
for (i in 1:length(row_order(c_heatmap))){
if (i == 1) {
clu <- t(t(row.names(c_tb[row_order(c_heatmap)[[i]],])))
out <- cbind(clu, paste("cluster", i, sep=""))
colnames(out) <- c("GeneID", "Cluster")
} else {
clu <- t(t(row.names(c_tb[row_order(c_heatmap)[[i]],])))
clu <- cbind(clu, paste("cluster", i, sep=""))
out <- rbind(out, clu)
}
}
write.csv(out, file=out_name, row.names=FALSE)
}
########## Main ##########
###----- Calculate z-score divided by p-value
if (FALSE){
wk.dir <- "/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190516/5_zscore_div_sqrt_pval"
setwd(wk.dir)
z.p.file <- "/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190516/4_t-test_by_gene/all_z-score_p.csv"
z.p.tb <- read_csv(z.p.file)
colnames(z.p.tb)
z.p.tb <- z.p.tb %>%
mutate(InputMinusAvg_p_sqrt = sqrt(InputMinusAvg_p + 0.01)) %>%
mutate(Q3minusOther_p_sqrt = sqrt(Q3minusOther_p + 0.01)) %>%
mutate(Q4minusQ1_p_sqrt = sqrt(Q4minusQ1_p + 0.01)) %>%
mutate(InputMinusAvg_z_p = InputMinusAvg_z/InputMinusAvg_p_sqrt) %>%
mutate(Q3minusOther_z_p = Q3minusOther_z / Q3minusOther_p_sqrt) %>%
mutate(Q4minusQ1_z_p = Q4minusQ1_z / Q4minusQ1_p_sqrt) %>%
select(gene_name, InputMinusAvg_z_p, Q3minusOther_z_p, Q4minusQ1_z_p)
colnames(z.p.tb)
write_csv(z.p.tb, "all_z-score_div_sqrt-p.csv")
}
###----- Heatmap
if (FALSE) {
wk.dir <- "/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190516/5_zscore_div_sqrt_pval"
setwd(wk.dir)
z.p.file <- "all_z-score_div_sqrt-p.csv"
z.p.tb <- read_csv(z.p.file)
# Convert adjusted z-score by sqrt
z.p.tb <- z.p.tb %>%
mutate(Q4minusQ1 = abs(Q4minusQ1_z_p) / Q4minusQ1_z_p * sqrt(abs(Q4minusQ1_z_p))) %>%
mutate(Q3minusOther = abs(Q3minusOther_z_p) / Q3minusOther_z_p * sqrt(abs(Q3minusOther_z_p))) %>%
mutate(InputMinusAvg = abs(InputMinusAvg_z_p) / InputMinusAvg_z_p * sqrt(abs(InputMinusAvg_z_p))) %>%
select(gene_name, Q4minusQ1, Q3minusOther, InputMinusAvg)
write_csv(z.p.tb, "all_z-score_div_sqrt-p_sqrt.csv")
# Plot heatmap
z.p.tb <- z.p.tb %>% column_to_rownames("gene_name")
col.pal <- colorRampPalette(brewer.pal(n=11, name="RdBu"))
col.use <- rev(col.pal(50))
z.p.heatmap <- pheatmap(z.p.tb , color=col.use, cluster_cols = FALSE, show_rownames = FALSE)
save_pheatmap_pdf(z.p.heatmap, "zscore_div_sqrt-pval.pdf", 5, 45)
}
##########-------------------- Bar plot
if (TRUE) {
wk.dir <- "/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190516/5_zscore_div_sqrt_pval/Baf"
setwd(wk.dir)
z.p.file <- "/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190516/5_zscore_div_sqrt_pval/all_z-score_div_sqrt-p_sqrt.csv"
z.p.tb <- read_csv(z.p.file)
#z.p.tb <- z.p.tb %>% column_to_rownames("gene_name")
#####---------- Genes to annotate
#anno.vec <- c("Tbx21", "Prdm1", "Id2", "Runx3", "Ncor1", "Tet2", "Mbd2",
# "Ezh2", "Suv39h1", "Dnmt3a", "Kdm2b", "Rpa3", "Runx3",
# "Ing2", "Ing3", "Ing4", "Ing5", "Bop1",
# "Cd4", "Cd14")
#anno.vec <- c("Myst3", "Myst4", "Brpf1", "Ing5", "Ing4", "Ing3",
# "Mll1", "Wdr5", "Rbbp5", "Ash2l", "Dpy30",
# "Cd4", "Runx3", "Tbx21",
# "Cxxc1", "Paf1")
#anno.vec <- c("ACTL6A", "ARID1A", "ARID1B", "ARID2", "BRD7", "BRD9", "PBRM1", "PHF10",
# "SMARCA2", "SMARCA4", "SMARCB1", "SMARCC1", "SMARCC2", "SMARCD1", "SMARCD2",
# "SMARCD3", "SMARCE1", "Actl6a", "Actl6b", "Arid1a", "Arid1b", "Brd9", "Smarca2",
# "Smarca4", "Smarcb1", "Smarcc1", "Smarcc2", "Smarcd1", "Smarcd2", "Smarcd3", "Smarce1",
# "CHD3", "CHD4", "CHD5", "HDAC1", "HDAC2", "KDM1A",
#
# "MBD2", "MBD3", "MTA1", "MTA2", "MTA3", "RBBP4", "RBBP7", "SIN3A", "SIN3B",
# "Chd3", "Chd4", "Chd5", "Hdac1", "Hdac2", "Mbd3", "Mta1", "Mta2",
# "Mta3", "Rbbp4", "Rbbp7",
#
# "Cd4", "Runx3", "Tbx21", "Chd7")
#anno.vec <- tolower(anno.vec)
#anno.vec <- as.character(sapply(anno.vec, simpleCap))
anno.vec <- c("Smarca4", "Smarcb1", "Smarcc1", "Smarcc2", "Smarcd1", "Smarcd2", "Smarcd3", "Smarce1",
"Actl6a", "Actl6b", "Arid1a", "Arid1b", "Brd9", "Smarca2")
name.root <- "Baf"
#####---------- Q4 minus Q1
out.name <- paste(name.root, "Q4minusQ1.bar.pdf", sep="_")
# Rank order
z.p.tb <- z.p.tb %>% arrange(Q4minusQ1)
z.p.tb <- within(z.p.tb, z.p.tb$gene_name <- factor(z.p.tb$gene_name, levels=z.p.tb$gene_name))
# Set color for top and bottom quarter
col_panel <- c( "deepskyblue", "snow2", "tomato")
qt <- as.integer(floor(nrow(z.p.tb)/4))
col.vec <- rep(col_panel[1], qt)
col.vec <- c(col.vec, rep(col_panel[2], nrow(z.p.tb)-2*qt))
col.vec <- c(col.vec, rep(col_panel[3], qt))
z.p.tb$color_use <- col.vec
# Select annotations
z.p.tb <- z.p.tb %>%
mutate(pointsize = in_vec(anno.vec, gene_name)) %>%
mutate(annoname = in_vec_name(anno.vec, gene_name))
# Plot
bar.plot <- ggplot(z.p.tb, aes(z.p.tb$gene_name, z.p.tb$Q4minusQ1, fill=col.vec)) +
geom_col(alpha=0.7) +
geom_point(size=z.p.tb$pointsize, stroke = 0) +
scale_fill_manual(values=col_panel) +
geom_text_repel(aes(label=annoname), force=5, min.segment.length=0) +
coord_flip() +
scale_y_continuous(position = "right", limits=c(-5.2, 5.2)) +
geom_hline(yintercept=0, size=0.25) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white",colour = "white", size = 0.5, linetype = "solid"),
axis.line.x = element_line(colour="black", size=0.5), axis.title.x = element_blank(),
axis.ticks.y = element_blank(), axis.text.y=element_blank(), axis.title.y = element_blank(),
legend.position = "none")
bar.plot
ggsave(out.name, width=6, height=9, units="cm")
#####---------- Q3 minus other
out.name <- paste(name.root, "Q3minusOther.bar.pdf", sep="_")
# Rank order
z.p.tb <- z.p.tb %>% arrange(Q3minusOther)
z.p.tb <- within(z.p.tb, z.p.tb$gene_name <- factor(z.p.tb$gene_name, levels=z.p.tb$gene_name))
# Set color for top and bottom quarter
col_panel <- c("deepskyblue", "snow2", "tomato")
qt <- as.integer(floor(nrow(z.p.tb)/4))
col.vec <- rep(col_panel[1], qt)
col.vec <- c(col.vec, rep(col_panel[2], nrow(z.p.tb)-2*qt))
col.vec <- c(col.vec, rep(col_panel[3], qt))
z.p.tb$color_use <- col.vec
# Select annotations
z.p.tb <- z.p.tb %>%
mutate(pointsize = in_vec(anno.vec, gene_name)) %>%
mutate(annoname = in_vec_name(anno.vec, gene_name))
# Plot
bar.plot <- ggplot(z.p.tb, aes(z.p.tb$gene_name, z.p.tb$Q3minusOther, fill=col.vec)) +
geom_col(alpha=0.7) +
geom_point(size=z.p.tb$pointsize, stroke = 0) +
scale_fill_manual(values=col_panel) +
geom_text_repel(aes(label=annoname), force=5, min.segment.length=0) +
coord_flip() +
scale_y_continuous(position = "right", limits=c(-5.2, 5.2)) +
geom_hline(yintercept=0, size=0.25) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white",colour = "white", size = 0.5, linetype = "solid"),
axis.line.x = element_line(colour="black", size=0.5), axis.title.x = element_blank(),
axis.ticks.y = element_blank(), axis.text.y=element_blank(), axis.title.y = element_blank(),
legend.position = "none")
bar.plot
ggsave(out.name, width=6, height=9, units="cm")
#####---------- Input v.s. output
out.name <- paste(name.root, "InputMinusAvg.bar.pdf", sep="_")
# Rank order
z.p.tb <- z.p.tb %>% arrange(z.p.tb$InputMinusAvg)
z.p.tb <- within(z.p.tb, z.p.tb$gene_name <- factor(z.p.tb$gene_name, levels=z.p.tb$gene_name))
# Set color for top and bottom quarter
col_panel <- c("deepskyblue", "snow2", "tomato")
qt <- as.integer(floor(nrow(z.p.tb)/4))
col.vec <- rep(col_panel[1], qt)
col.vec <- c(col.vec, rep(col_panel[2], nrow(z.p.tb)-2*qt))
col.vec <- c(col.vec, rep(col_panel[3], qt))
z.p.tb$color_use <- col.vec
# Select annotations
z.p.tb <- z.p.tb %>%
mutate(pointsize = in_vec(anno.vec, gene_name)) %>%
mutate(annoname = in_vec_name(anno.vec, gene_name))
# Plot
bar.plot <- ggplot(z.p.tb, aes(z.p.tb$gene_name, z.p.tb$InputMinusAvg, fill=col.vec)) +
geom_col(alpha=0.7) +
geom_point(size=z.p.tb$pointsize, stroke = 0) +
scale_fill_manual(values=col_panel) +
geom_text_repel(aes(label=annoname), force=5, min.segment.length=0) +
coord_flip() +
scale_y_continuous(position = "right", limits=c(-5.2, 5.2)) +
geom_hline(yintercept=0, size=0.25) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "white",colour = "white", size = 0.5, linetype = "solid"),
axis.line.x = element_line(colour="black", size=0.5), axis.title.x = element_blank(),
axis.ticks.y = element_blank(), axis.text.y=element_blank(), axis.title.y = element_blank(),
legend.position = "none")
bar.plot
ggsave(out.name, width=6, height=9, units="cm")
}
##########-------------------- Seperated heatmap
if (FALSE) {
wk.dir <- "/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190516/5_zscore_div_sqrt_pval"
setwd(wk.dir)
z.p.file <- "all_z-score_div_sqrt-p_sqrt.csv"
z.p.tb <- read_csv(z.p.file)
#z.p.tb <- z.p.tb %>% column_to_rownames("gene_name")
colnames(z.p.tb)
###----- Find genes that are on top / bottom of list in different cases
#--- Q4 v.s. Q1
z.p.tb <- z.p.tb %>% arrange(Q4minusQ1)
qt <- as.integer(floor(nrow(z.p.tb)/8))
q4_q1_dn <- z.p.tb$gene_name[1: qt]
q4_q1_up <- z.p.tb$gene_name[(nrow(z.p.tb)-qt+1): nrow(z.p.tb)]
#--- Q3 v.s. Other
z.p.tb <- z.p.tb %>% arrange(Q3minusOther)
qt <- as.integer(floor(nrow(z.p.tb)/8))
q3_o_dn <- z.p.tb$gene_name[1: qt]
q3_o_up <- z.p.tb$gene_name[(nrow(z.p.tb)-qt+1): nrow(z.p.tb)]
#--- Input v.s. Avg
z.p.tb <- z.p.tb %>% arrange(InputMinusAvg)
qt <- as.integer(floor(nrow(z.p.tb)/8))
in_a_dn <- z.p.tb$gene_name[1: qt]
in_a_up <- z.p.tb$gene_name[(nrow(z.p.tb)-qt+1): nrow(z.p.tb)]
###----- All genes that are on top / bottom in any case
all.tgt <- c(q4_q1_dn, q4_q1_up, q3_o_dn, q3_o_up, in_a_dn, in_a_up)
all.tgt <- unique(all.tgt)
length(all.tgt)
#####---------- Heatmaps
#col.pal <- colorRampPalette(brewer.pal(n=11, name="RdBu"))
#col.use <- rev(col.pal(50))
###----- Heatmap of all targets
target.tb <- z.p.tb %>% filter(gene_name %in% all.tgt) %>% column_to_rownames("gene_name")
#target.heatmap <- pheatmap(target.tb , color=col.use, cluster_cols = FALSE,
# clustering_distance_rows="canberra")
set.seed(123)
pdf("target_cluster_2.pdf", width=4, height=24)
target.heatmap <- Heatmap(target.tb, name="foo", km=6, cluster_columns=FALSE,
clustering_distance_rows = "pearson")
target.heatmap
dev.off()
###----- Save dendrogram clusters
k_means_save(target.heatmap, target.tb, "target_cluster.csv")
write_csv(target.tb, "target.tb.csv")
}
##########-------------------- Pathway
if (FALSE){
wk.dir <- "/Volumes/Yolanda/CRF_Screen/InVivo/1_1_Norm/20190516/5_zscore_div_sqrt_pval"
setwd(wk.dir)
if (FALSE){
z.p.file <- "all_z-score_div_sqrt-p_sqrt.csv"
z.p.tb <- read_csv(z.p.file)
qt <- as.integer(floor(nrow(z.p.tb)/4))
#####---------- Q3 minus other
z.p.tb <- z.p.tb %>% arrange(Q3minusOther)
z.p.tb <- within(z.p.tb, z.p.tb$gene_name <- factor(z.p.tb$gene_name, levels=z.p.tb$gene_name))
q3_o_dn <- z.p.tb$gene_name[1:qt]
q3_o_up <- z.p.tb$gene_name[(nrow(z.p.tb)-qt+1): nrow(z.p.tb)]
#####---------- Q4 minus Q1
z.p.tb <- z.p.tb %>% arrange(Q4minusQ1)
z.p.tb <- within(z.p.tb, z.p.tb$gene_name <- factor(z.p.tb$gene_name, levels=z.p.tb$gene_name))
q4_q1_dn <- z.p.tb$gene_name[1:qt]
q4_q1_up <- z.p.tb$gene_name[(nrow(z.p.tb)-qt+1): nrow(z.p.tb)]
#####---------- Input v.s. output
z.p.tb <- z.p.tb %>% arrange(z.p.tb$InputMinusAvg)
z.p.tb <- within(z.p.tb, z.p.tb$gene_name <- factor(z.p.tb$gene_name, levels=z.p.tb$gene_name))
in_a_dn <- z.p.tb$gene_name[1:qt]
in_a_up <- z.p.tb$gene_name[(nrow(z.p.tb)-qt+1): nrow(z.p.tb)]
out.tb <- tibble(q3_o_dn=q3_o_dn, q3_o_up=q3_o_up,
q4_q1_dn=q4_q1_dn, q4_q1_up=q4_q1_up,
in_a_dn=in_a_dn, in_a_up=in_a_up)
write_csv(out.tb, "topQuarter.csv")
}
in.df <- read.csv("topQuarter.csv")
gn_list_names <- colnames(in.df)
q3_o_dn <- in.df$q3_o_dn
q3_o_up <- in.df$q3_o_up
q4_q1_dn <- in.df$q4_q1_dn
q4_q1_up <- in.df$q4_q1_up
in_a_dn <- in.df$in_a_dn
in_a_up <- in.df$in_a_up
gn_list <- list(q3_o_dn, q3_o_up, q4_q1_dn, q4_q1_up, in_a_dn, in_a_up)
for (x in c(1:6)){
#x <- 1
i <- paste(gn_list_names[x], sep="")
genes.i <- as.character(unlist(gn_list[x]))
genes.i.id <- select(org.Mm.eg.db, genes.i, c("ENTREZID"), "ALIAS")
#genes.i.id$ENTREZID
egoBP <- enrichGO(gene = genes.i.id$ENTREZID, keyType = 'ENTREZID', OrgDb = org.Mm.eg.db, ont = "BP", pAdjustMethod = "none", pvalueCutoff = 0.05, readable = TRUE)
egoCC <- enrichGO(gene = genes.i.id$ENTREZID, keyType = 'ENTREZID', OrgDb = org.Mm.eg.db, ont = "CC", pAdjustMethod = "none", pvalueCutoff = 0.05, readable = TRUE)
egoMF <- enrichGO(gene = genes.i.id$ENTREZID, keyType = 'ENTREZID', OrgDb = org.Mm.eg.db, ont = "MF", pAdjustMethod = "none", pvalueCutoff = 0.05, readable = TRUE)
# Dotplot visualization
if (!is.null(egoBP)){
pdf.name <- paste(i,"_BP_dotplot.pdf",sep="")
csv.name <- paste(i,"_BP_dotplot.csv",sep="")
write.csv(egoBP@result, file=csv.name, row.names=FALSE)
egoBP.dotplot <- dotplot(egoBP, x="count", showCategory=25)
ggsave(pdf.name, egoBP.dotplot, device = "pdf", width = 30, height = 20, units = "cm")
}
if(!is.null(egoCC)){
csv.name <- paste(i,"_CC_dotplot.csv",sep="")
pdf.name <- paste(i,"_CC_dotplot.pdf",sep="")
write.csv(egoCC@result, file=csv.name, row.names=FALSE)
egoCC.dotplot <- dotplot(egoCC, x="count", showCategory=25)
ggsave(pdf.name, egoCC.dotplot, device = "pdf", width = 30, height = 20, units = "cm")
}
if(!is.null(egoMF)){
csv.name <- paste(i,"_MF_dotplot.csv",sep="")
pdf.name <- paste(i,"_MF_dotplot.pdf",sep="")
write.csv(egoMF@result, file=csv.name, row.names=FALSE)
egoMF.dotplot <- dotplot(egoMF, x="count", showCategory=25)
ggsave(paste(i,"_MF_dotplot.pdf",sep=""), egoMF.dotplot, device = "pdf", width = 30, height = 20, units = "cm")
}
}
}
|
5c7ab0775d06dfbb91f66cc213342b7a66284b3b
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/IceCast/man/reg_info.Rd
|
876a9a7f1ab41970befc7413eb3e25f2ba560358
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,515
|
rd
|
reg_info.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regionInformation.R
\docType{data}
\name{reg_info}
\alias{reg_info}
\title{List of information about each region}
\format{An object of class \code{list} of length 10.}
\usage{
reg_info
}
\description{
A region information list, \code{reg_info}, is a list of ten items
\code{regions},\code{start_lines}, \code{start_lines_coords},
\code{start_coords}, \code{end_coords}, \code{out}, \code{lines}, \code{dist},
\code{loop}, and \code{angs}. The package contains a \code{reg_info} object
which is typically what is used for all analyses. However, it would be
possible to redefine the regions if desired by making a new \code{reg_info}
object.
}
\details{
\code{regions}: list of \code{SpatialPolygons} objects corresponding
to each region.
\code{start_lines}: list of \code{SpatialLines} object giving the line from
which each mapping or contour generation will start. For the central Arctic
region, a single \code{SpatialPoint} is used instead. List ordered the same
as \code{reg_info$regions}
\code{start_lines_coords}: list of matrices giving the coordinates that
approximately match \code{reg_info$start_lines}, except that they extend to
touch the end point of the first and last fixed line. For the central Arctic
region, the coordinate of the \code{start_line}is just repeated. List ordered
the same as \code{reg_info$regions}
\code{start_coords}: list of matrices giving the coordinates from which the
lines start. List ordered the same as \code{reg_info$regions}
\code{end_coords}: list of matrices giving the coordinates between the end
points of the first and last fixed line. List ordered the same as
\code{reg_info$regions}
\code{out}: list of \code{SpatialPolygons} object that border
\code{reg_info$start_lines}, but are outside the region. These are used when
building new polygons to determine if points are outside the region of
interest. List ordered the same as \code{reg_info$regions}
\code{lines}: list giving the \code{SpatialLines} objects that correspond to
the line on which contours are mapped and built.
\code{dist}: list for each region with one item for each line
\code{reg_info$lines} giving the lengths at which restrictions on the line
lengths occur. The first element for all entries is 0 and the last element
is the length of the line. Elements in between refer to the starting and
ending lengths on which points cannot be placed. The first list index is
ordered the same as \code{reg_info$regions} and the second list index is
ordered as the corresponding lines in \code{reg_info$lines}
\code{loop}: vector gives a Boolean for each region. The value \code{TRUE}
indicates that the \code{lines} are mapped in a circle around a fixed point.
The value \code{FALSE} indicates that the lines are mapped along a line on
land. The first element, corresponding to the central Arctic region is
\code{TRUE}. All others are \code{FALSE}. Elements ordered the same as
\code{reg_info$regions}
\code{angs}: list of vectors giving the angles of the corresponding
\code{reg_info$lines}. Elements ordered the same as \code{reg_info$regions}
}
\examples{
data(reg_info)
names(reg_info)
}
\references{
The regions in this object have been substantially modified from
the following region mask:
National Snow and Ice Data Center, 2017: Region mask for the
northern hemisphere
\url{http://nsidc.org/data/polar-stereo/tools_masks.html}.
}
\keyword{datasets}
|
159d689a2792593abb150eee6039d99fc2248cdc
|
d9dd343f826dbc5b9fc50657c99795a3dd434b5a
|
/app.R
|
d8602667b5777a485f10b308eaa0e0df613a5246
|
[
"MIT"
] |
permissive
|
b-tomhave/ShinyCensus
|
84e3be075a57dbec70a1748a92db029895f53cee
|
ccc28fc8577edfeb2700448fbd869f28bdf9f5eb
|
refs/heads/main
| 2023-06-22T23:37:45.865067
| 2021-07-23T17:09:49
| 2021-07-23T17:09:49
| 386,975,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,689
|
r
|
app.R
|
# R Shiny App For ACS Census Data Viewing
##############################################################################
# Libraries
library(shiny)
library(shinyjs) #For javascript collapse box button
library(shinydashboard)
library(shinydashboardPlus)
library(shinythemes)
library(plotly)
library(tidytransit) # For read_gtfs
library(shinyWidgets) # For pickerInput
library(data.table)
library(leaflet)
library(sf)
library(gtools) # For mixed sort
library(DT)
library(stringr) # For string formatting
library(tidycensus) # For downloading Census data
library(tmap) # For creating tmap
library(tmaptools) # For reading and processing spatial data related to tmap
library(tigris) # Get zip code list
library(scales)
library(gtfsFunctions) #devtools::install_github("b-tomhave/gtfsFunctions", force = TRUE)
library(plyr) # For round_any
library(readr)
library(htmlTable) # For popup table
library(leafpop)
# Install these to use the aggregate_map function that was previously in tmaptools
# library(devtools)
# install_github("mtennekes/oldtmaptools")
library(oldtmaptools)
# Get API Key For Get_ACS From R Script Hidden From Github and load
source("CensusAPIKey.R")
#census_api_key(CENSUS_API_KEY, install = T, Overwirte = T)
# Allow input zip file to be up to 200mb in size
options(shiny.maxRequestSize = 200*1024^2)
# Load tigris as sf object
options(tigris_class = "sf")
# Cache acs data for quicker loading
options(tigris_use_cache = TRUE)
Sys.getenv("CENSUS_KEY")
# Set Inputs
acsYear = 2016
palletBinNumber = 5
# Quintile Percent Bins
percentBins <- c(0,.05,0.1,0.25,0.5,0.75,1)
labels_PctBins <- c("< 5%", "5% - 10%", "10% - 25%", "25% - 50%","50% - 75%", "> 75%")
# define a little helper function to format dollars for map
make_dollar <- function(x, digits = 0) {
paste0("$", formatC(x, digits = digits, format = "f", big.mark = ","))
}
fips_codes <- tidycensus::fips_codes
# Get MSAs
msas <- tigris::core_based_statistical_areas(cb = TRUE)
# Create Table of Potential Variables/ACS Tables to Search
possibleTablesStatic <- load_variables(year = acsYear, dataset = "acs5")%>%
select(tableName ="name", concept, label)
# Tab 1 ACS Filter Map Variables
filterMapVars <- list(list("B01003_001", "[B01003_001] Total Population"),
list("B01002_001", "[B01002_001] Median Age"),
list("B01001_011", "[B01001_011] Male 25-29"),
list("B01001_012", "[B01001_012] Male 30-34"),
list("B01001_013", "[B01001_013] Male 35-39"),
list("B01001_014", "[B01001_014] Male 40-44"),
list("B01001_015", "[B01001_015] Male 45-49"),
list("B01001_035", "[B01001_011] Female 25-29"),
list("B01001_036", "[B01001_012] Female 30-34"),
list("B01001_037", "[B01001_013] Female 35-39"),
list("B01001_038", "[B01001_014] Female 40-44"),
list("B01001_039", "[B01001_015] Female 45-49"),
list("B02001_002", "[B02001_002] Population (White Alone)"),
list("B02001_003", "[B02001_003] Population (Black Alone)"),
list("B02001_004", "[B02001_004] Population (American Indian/Alaska Native Alone)"),
list("B02001_005", "[B02001_005] Population (Asian Alone)"),
list("B02001_006", "[B02001_006] Population (Hawaiian/Pacific Islander Alone)"),
list("B02001_007", "[B02001_007] Population (Other Race Alone)"),
list("B02001_008", "[B02001_008] Population (2+ Race)"),
list("B03001_003", "[B03001_003] Population (Hispaic/Latino)"),
list("B08101_025", "[B08101_025] Population Taking Transit To Work"),
list("B08101_033", "[B08101_033] Population Walking To Work"),
list("B17001_002", "[B17001_002] Population with below poverty-level income"),
list("B17001_031", "[B17001_031] Population with at or above poverty-level income"),
list("B19013_001", "[B19013_001] Median Household income in past 12 months (in selected year $)"),
list("B25064_001", "[B25064_001] Median Gross Rent"),
list("B08201_002", "[B08201_002] Zero-Vehicle Households"),
list("C17002_001", "[C17002_001] PAll population for poverty threshold"),
list("C17002_008", "[C17002_002] Population living above 200% the poverty threshold"))
filterMapVarName <- unlist(lapply(filterMapVars, `[[`, 1))
filterMapVarDescription <- unlist(lapply(filterMapVars, `[[`, 2))
names(filterMapVarName) <- filterMapVarDescription
# Tab 2 Key Vars
keyVarList <- list(list("B01003_001", "[B01003_001] Total Population"),
list("B01001_002", "[B01001_002] Total Male Population (All Race)"),
list("B01001_026", "[B01001_026] Total Female Population (All Race)"),
list("B01002_001", "[B01002_001] Median Age"),
list("B01002_002", "[B01002_002] Median Age (Male-All Races)"),
list("B01002_003", "[B01002_003] Median Age (Female-All Races)"),
list("B02001_002", "[B02001_002] Population (White Alone)"),
list("B02001_003", "[B02001_003] Population (Black Alone)"),
list("B02001_004", "[B02001_004] Population (American Indian/Alaska Native Alone)"),
list("B02001_005", "[B02001_005] Population (Asian Alone)"),
list("B02001_006", "[B02001_006] Population (Hawaiian/Pacific Islander Alone)"),
list("B02001_007", "[B02001_007] Population (Other Race Alone)"),
list("B02001_008", "[B02001_008] Population (2+ Race)"),
list("B03001_003", "[B03001_003] Population (Hispaic/Latino)"),
list("B08101_009", "[B08101_009] Population Driving Alone To Work"),
list("B08101_025", "[B08101_025] Population Taking Transit To Work"),
list("B08101_033", "[B08101_033] Population Walking To Work"),
list("B08201_002", "[B08201_002] Zero-Vehicle Households"),
list("B09001_001", "[B09001_001] Under-18 population"),
list("B17001_002", "[B17001_002] Population with below poverty-level income"),
list("B17001_031", "[B17001_031] Population with at or above poverty-level income"),
list("B19013_001", "[B19013_001] Median Household income in past 12 months (in selected year $)"),
list("B25064_001", "[B25064_001] Median Gross Rent"))
keyVarName <- unlist(lapply(keyVarList, `[[`, 1))
keyVarDescription <- unlist(lapply(keyVarList, `[[`, 2))
names(keyVarName) <- keyVarDescription
##############################################################################
# UI Side of App
##############################################################################
ui <-navbarPage("Shiny Census", id="nav",
# Map Page
# Tab 1: Filtered ACS Mapping ----------------------------------
tabPanel("Filtered ACS Mapping",
div(class="outer",
tags$head(
# Include custom CSS
includeCSS("www/styles.css")
),
leafletOutput("filteredAcsMap", width="100%", height="100%"),
# Input Selections for Tab 2
absolutePanel(id = "controls2", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto",
width = 330, height = "auto",
radioButtons("inputGeomMethod", label = h4("Select ACS Geometry Method"),
choices = list("State Abbreviations" = 1,
"GTFS Service Area Estimation" = 2),
selected = 1),
# If State Abbreviations Input Selected Show That Option
conditionalPanel(
"input.inputGeomMethod == 1",
selectInput(
"acsStateSelect2",
label = h4("Select State(s) To View Census Tracts"),
choices = unique(fips_codes$state),
selected = NULL,
multiple = TRUE,
selectize = F
)
),
# If GTFS Input Selection Show That Option
conditionalPanel(
"input.inputGeomMethod == 2",
fileInput("selectInputFile", h5("Select GTFS Zip File:"),
multiple = FALSE,
accept = ".zip")
),
actionButton("loadDataButton2", "Map Data"),
br(),br(),
# Map Filters
h4('Adjust Sliders to Filter Tracts'),
uiOutput("formattedPctAge25_50Slider"),
uiOutput("formattedPctBipocSlider"),
uiOutput("formattedHHIncomeSlider"),
uiOutput("formattedPctBelowPovertySlider"),
textOutput("tractCount"),
br(),
uiOutput("acsTableSelect2UI"),
paste("Data From:", acsYear, "ACS")
)
)),
# Tab 2: Explore ACS Variables ----------------------------------
tabPanel("Explore ACS Variables",
div(class="outer",
tags$head(
# Include custom CSS
includeCSS("www/styles.css")
),
# If not using custom CSS, set height of leafletOutput to a number instead of percent
tmapOutput("acsMap", width="100%", height="100%"),
# Shiny versions prior to 0.11 should use class = "modal" instead.
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto",
width = 330, height = "auto",
h2("ACS Viewer"),
helpText("Initial load time of 5-10 seconds."),
selectInput(
"acsStateSelect",
label = h4("Select State(s) To View Census Tracts"),
choices = unique(fips_codes$state),
selected = NULL,
multiple = TRUE,
selectize = F
),
selectInput(
"keyVarSwitch",
label = h4("ACS Table/Variable Filter"),
choices = c("Key Variables Only", "All Variables"),
selected = "Key Variables Only",
multiple = F,
selectize = F
),
selectizeInput('acsTableSelect',
label = 'Select Table/Variable Name',
choices = NULL),
prettySwitch(
inputId = "pctTotalPopSwitch",
label = "Scale Variable by Total Tract Population",
fill = TRUE,
status = "primary",
value = F),
actionButton("loadDataButton", "Map Data"),
br(),br(),
paste("Data From:", acsYear, "ACS")
)
)
),
# Tab 3: ACS Lookup Table of Variables ---------------------------
tabPanel("ACS Lookup Table",
h2("Possible ACS Tables for Mapping"),
br(),
DT::dataTableOutput("possibleTables")
)
)
##############################################################################
# Server Side of App
##############################################################################
server <- function(input, output, session, ...) {
# Tab 1: ACS Map Filtering ---------------------------------
# Load Default Map
output$filteredAcsMap <- renderLeaflet({
leaflet()%>%
#Add Basemap Options
addProviderTiles(providers$CartoDB.Positron,
group = "Sketch (Default)")%>%
addProviderTiles(providers$Stamen.Toner,
group = "Black & White")%>%
addProviderTiles(providers$OpenStreetMap,
group = "OpenStreetMap")%>%
addProviderTiles(providers$Esri.WorldImagery,
group = "Aerial")%>%
setView(lat = 39.809253334942575,
lng = -98.55663889876627,
zoom = 5)%>%
#Reset Zoom Button
addEasyButton(easyButton(
icon = 'fa-home',
title = 'Reset view',
onClick = JS("function(btn, map) {
var groupLayer = map.layerManager.getLayerGroup('filteredTracts');
map.fitBounds(groupLayer.getBounds());
}"
)))%>%
# Layers control
addLayersControl(
baseGroups = c("Sketch (Default)", "Black & White", "OpenStreetMap","Aerial"),
options = layersControlOptions(collapsed = T),
position = c("topleft"))%>%
addMeasure(position = c("topleft")) #Add distance (and area measure)
})
# Update acs data selection based on file selection and get all variables specified in filterMapVars
observeEvent(input$loadDataButton2,{
# Load Data Differently Depending on if by state abbreviation of GTFS
# If by state abbreviation...
if (input$inputGeomMethod == 1){
req(input$acsStateSelect2)
# Can only run 25 variables in one go so for more this executes twice
filteredACSTracts <- get_acs(
key = CENSUS_API_KEY,
geography = "tract",
variables = as.character(filterMapVarName),
state = input$acsStateSelect2,
year = acsYear,
survey = "acs5",
geometry = TRUE,
output = "wide" # get data in wide format for easier mapping
)%>%st_transform(crs=4326)
}
else if(input$inputGeomMethod == 2){
filteredACSTracts <- gtfsFunctions::getServiceAreaACS(gtfsFunctions::formatGTFSObject(input$selectInputFile$datapath),
variables = as.character(filterMapVarName),
geography = "tract",
year = acsYear,
survey = "acs5",
tidyCensusAPIKey = CENSUS_API_KEY)%>%st_transform(crs=4326)
}
# Set Map View to Data Extent:
bbox <- st_bbox(filteredACSTracts)%>%as.vector()
leafletProxy("filteredAcsMap") %>%
fitBounds(bbox[1], bbox[2], bbox[3], bbox[4])
# Create Custom ACS Columns
filteredACSTracts$TotalPop <- filteredACSTracts$B01003_001E
# Remove Tracts with No Pop
filteredACSTracts <- filteredACSTracts[!(is.na(filteredACSTracts$TotalPop) | filteredACSTracts$TotalPop ==0),]
# Race Vars as Pct of Total Tract Pop
filteredACSTracts$PctWhite <- round(100*(filteredACSTracts$B02001_002E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctBIPOC <- 100 - filteredACSTracts$PctWhite
filteredACSTracts$PctBlack <- round(100*(filteredACSTracts$B02001_003E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctNative <- round(100*(filteredACSTracts$B02001_004E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctAsian <- round(100*(filteredACSTracts$B02001_005E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctPacIsland <- round(100*(filteredACSTracts$B02001_006E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctOtherRace <- round(100*(filteredACSTracts$B02001_007E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctTwoPlusRace <- round(100*(filteredACSTracts$B02001_008E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctHispLatino <- round(100*(filteredACSTracts$B03001_003E/filteredACSTracts$TotalPop),1)
# 25-50 Age Pct of Total Tract Pop
filteredACSTracts$Male_Age25_50 <- filteredACSTracts$B01001_011E + filteredACSTracts$B01001_012E + filteredACSTracts$B01001_013E + filteredACSTracts$B01001_014E +filteredACSTracts$B01001_015E
filteredACSTracts$Female_Age25_50 <- filteredACSTracts$B01001_035E + filteredACSTracts$B01001_036E + filteredACSTracts$B01001_037E + filteredACSTracts$B01001_038E +filteredACSTracts$B01001_039E
filteredACSTracts$Total_Age25_50 <- filteredACSTracts$Male_Age25_50 + filteredACSTracts$Female_Age25_50
filteredACSTracts$Pct_Male_Age25_50 <- round(100*(filteredACSTracts$Male_Age25_50 / filteredACSTracts$TotalPop),1)
filteredACSTracts$Pct_Female_Age25_50 <- round(100*(filteredACSTracts$Female_Age25_50 / filteredACSTracts$TotalPop),1)
filteredACSTracts$Pct_Total_Age25_50 <- round(100*(filteredACSTracts$Total_Age25_50 / filteredACSTracts$TotalPop),1)
# Other Vars as Pct of Total Tract Pop
filteredACSTracts$PctTransit2Wrk <- round(100*(filteredACSTracts$B08101_025E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctWalk2Wrk <- round(100*(filteredACSTracts$B08101_033E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctBelowPoverty <- round(100*(filteredACSTracts$B17001_002E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctAtOrAbvPoverty <- round(100*(filteredACSTracts$B17001_031E/filteredACSTracts$TotalPop),1)
filteredACSTracts$PctZeroCarHH <- round(100*(filteredACSTracts$B08201_002E/filteredACSTracts$TotalPop),1)
#Poverty
filteredACSTracts$PopBelow200PctPovertyLevel<- filteredACSTracts$C17002_001E - filteredACSTracts$C17002_008E
filteredACSTracts$PctBelow2TimesPovLevel <- round(100*(filteredACSTracts$PopBelow200PctPovertyLevel/filteredACSTracts$C17002_001E),1)
# Other Non Pct Vars
filteredACSTracts$MedianHHInc <- filteredACSTracts$B19013_001E
filteredACSTracts$MedianGrossRent <- filteredACSTracts$B25064_001E
finalFilteredVarsList <- list(#list("GEOID", "GEOID"),
list("TotalPop", "Total Population"),
list("Pct_Total_Age25_50", "% Population Ages 25-50"),
list("PctBIPOC","% Population (BIPOC)"),
list("MedianHHInc","Median HH Income (2016$)"),
list("PctBelowPoverty","% Below Poverty Line"),
list("PctAtOrAbvPoverty","% At or Above Poverty Line"),
list("PctBelow2TimesPovLevel", "% Population Below 2x Poverty Line"),
list("PctZeroCarHH","% 0-Car HH"),
list("PctWhite","% Population (White)"),
list("PctBlack","% Population (Black)"),
list("PctAsian","% Population (Asian)"),
list("PctHispLatino","% Population (Hispanic/Latino)"),
list("PctTransit2Wrk","% Transit to Work"),
list("PctWalk2Wrk","% Walk to Work"),
list("MedianGrossRent","Median Gross Rent (2016$)"))
formatted_finalFilteredVarsList <- unlist(lapply(finalFilteredVarsList, `[[`, 1))
formattedDescription <- unlist(lapply(finalFilteredVarsList, `[[`, 2))
names(formatted_finalFilteredVarsList) <- formattedDescription
# Filter Filtered Df By Column
filteredACSTracts <- filteredACSTracts[as.character(formatted_finalFilteredVarsList)]
# Set Min & Max Values for Each Slider
minHHIncome = round_any(min(na.omit(filteredACSTracts$MedianHHInc)),
10000, f = floor)
maxHHIncome = round_any(max(na.omit(filteredACSTracts$MedianHHInc)),
10000, f = ceiling)
minPctBipoc = 0
maxPctBipoc = round_any(max(na.omit(filteredACSTracts$PctBIPOC)),
5, f = ceiling)
minPctAge25_50 = 0
maxPctAge25_50 = round_any(max(na.omit(filteredACSTracts$Pct_Total_Age25_50)),
5, f = ceiling)
minPctBelowPov = 0
maxPctBelowPov = round_any(max(na.omit(filteredACSTracts$PctBelowPoverty)),
5, f = ceiling)
# Formatted Slider Inputs
output$formattedPctAge25_50Slider <- renderUI({
shinyWidgets::noUiSliderInput(
inputId = "formattedPctAge25_50Slider_raw",
label = "Tract % Aged 25-50",
min = minPctAge25_50, max = maxPctAge25_50,
margin = 10,
value = c(minPctAge25_50, maxPctAge25_50),
step = 5,
behaviour = c("snap"),
format = wNumbFormat(decimals = 0,
thousand = ",",
suffix = "%")
)
})
output$formattedPctBipocSlider <- renderUI({
shinyWidgets::noUiSliderInput(
inputId = "formattedPctBipocSlider_raw",
label = "Tract % BIPOC",
min = minPctBipoc, max = maxPctBipoc,
margin = 10,
value = c(minPctBipoc, maxPctBipoc),
step = 5,
behaviour = c("snap"),
format = wNumbFormat(decimals = 0,
thousand = ",",
suffix = "%")
)
})
output$formattedHHIncomeSlider <- renderUI({
shinyWidgets::noUiSliderInput(
inputId = "formattedHHIncomeSlider_raw",
label = paste0("Median Annual HH Income (",acsYear, " dollars)"),
min = minHHIncome, max = maxHHIncome,
margin = 20000,
value = c(minHHIncome, maxHHIncome),
step = 5000,
behaviour = c("snap"),
format = wNumbFormat(decimals = 0,
thousand = ",",
prefix = "$")
)
})
output$formattedPctBelowPovertySlider <- renderUI({
shinyWidgets::noUiSliderInput(
inputId = "formattedPctBelowPovertySlider_raw",
label = "Tract % Below Poverty Line",
min = minPctBelowPov, max = maxPctBelowPov,
margin = 2,
value = c(minPctBelowPov, maxPctBelowPov),
step = 1,
behaviour = c("snap"),
format = wNumbFormat(decimals = 0,
thousand = ",",
suffix = "%")
)
})
# Set Variable to Color Cloropleth
output$acsTableSelect2UI <- renderUI({
selectizeInput('acsTableSelect2',
label = h5('Select Cloropleth Fill Color'),
choices = formatted_finalFilteredVarsList,
selected = "TotalPop")
})
# Variables To Listen For Change to Update Map
mapVarChange <- reactive({
list(input$acsTableSelect2,
input$formattedPctAge25_50Slider_raw,
input$formattedPctBipocSlider_raw,
input$formattedHHIncomeSlider_raw,
input$formattedPctBelowPovertySlider_raw)
})
# UPDATE MAP BASED ON INPUT VARIABLES --------------------
observeEvent(mapVarChange(), {
# Ensure both inputs have values
req(input$formattedHHIncomeSlider_raw)
req(input$acsTableSelect2)
# Create Copy of Initial Table For Mapping
mappingTable <- filteredACSTracts
# Set Color Column Values Before Filtering
mappingTable$colorCol = mappingTable[[input$acsTableSelect2]]
# Subset Table By Filters
mappingTable <- mappingTable%>%filter(MedianHHInc >= input$formattedHHIncomeSlider_raw[1] & MedianHHInc <= input$formattedHHIncomeSlider_raw[2] & PctBIPOC >= input$formattedPctBipocSlider_raw[1] & PctBIPOC <= input$formattedPctBipocSlider_raw[2] & Pct_Total_Age25_50 >= input$formattedPctAge25_50Slider_raw[1] & Pct_Total_Age25_50 <= input$formattedPctAge25_50Slider_raw[2] & PctBelowPoverty >= input$formattedPctBelowPovertySlider_raw[1] & PctBelowPoverty <= input$formattedPctBelowPovertySlider_raw[2])
#Remove Empty Rows From Table
nonEmptyMappingTable <- mappingTable[!st_is_empty(mappingTable), ]
# Remove records with empty geometry/units and plot if any records remain
if (nrow(nonEmptyMappingTable) !=0){
# Load Palette
filteredLeafletPal <- colorBin("Greens", nonEmptyMappingTable$colorCol,
palletBinNumber, pretty = FALSE)
# Create Columns for Popup
nonEmptyMappingTable$`Total Tract Population` <- prettyNum(nonEmptyMappingTable$TotalPop, big.mark = ",")
nonEmptyMappingTable$`BIPOC` <- paste(prettyNum(nonEmptyMappingTable$PctBIPOC, big.mark = ","), "%")
nonEmptyMappingTable$`Aged 25-50` <- paste(nonEmptyMappingTable$Pct_Total_Age25_50, "%")
nonEmptyMappingTable$`Taking Transit to Work` <- paste(nonEmptyMappingTable$PctTransit2Wrk, "%")
nonEmptyMappingTable$`Below Poverty Line` <- paste(nonEmptyMappingTable$PctBelowPoverty, "%")
nonEmptyMappingTable$`Median HH Income` <- prettyNum(nonEmptyMappingTable$MedianHHInc, big.mark = ",")
nonEmptyMappingTable$`Selected Variable` <- prettyNum(nonEmptyMappingTable$colorCol, big.mark = ",")
# Update Map
leafletProxy("filteredAcsMap") %>%
clearGroup(group = "filteredTracts")%>%
clearControls()%>%
addPolygons(data = st_transform(nonEmptyMappingTable, crs = 4326),
#layerId = ~GEOID,
group = "filteredTracts",
fillColor = ~filteredLeafletPal(colorCol),
color = "grey",
fillOpacity = 0.4,
weight = 1,
smoothFactor = 0.2,
popup = popupTable(st_drop_geometry(nonEmptyMappingTable[, c("Total Tract Population","BIPOC",
"Aged 25-50", "Taking Transit to Work",
"Below Poverty Line", "Median HH Income",
"Selected Variable")]),
row.numbers = FALSE,
feature.id = FALSE),
highlightOptions = highlightOptions(color = "Purple", weight = 4,
bringToFront = T))%>%
addLegend("bottomleft", pal = filteredLeafletPal, values = st_transform(nonEmptyMappingTable)$colorCol,
title = names(formatted_finalFilteredVarsList)[formatted_finalFilteredVarsList == input$acsTableSelect2],
labFormat = labelFormat(big.mark = ",", digits = 0),
opacity = 1
)
# No output Text
output$tractCount <- renderText({paste(nrow(nonEmptyMappingTable), "tracts meet criteria.")})
}else{
output$tractCount <- renderText({"NO TRACTS for selected filter(s). Change filters to update map"})
}
})
})
# c(leaflet::providers$OpenStreetMap, leaflet::providers$Stamen.Toner,
# leaflet::providers$Stamen.Terrain, leaflet::providers$Esri.WorldImagery,
# leaflet::providers$Esri.NatGeoWorldMap, leaflet::providers$CartoDB.Positron)
# Info/Code for Tab 2: General ACS Overview ---------------------------------
observeEvent(input$keyVarSwitch,{
if (input$keyVarSwitch == "Key Variables Only"){
variableSelectionList = keyVarName
}else{
variableSelectionList = unique(possibleTablesStatic$tableName)
}
# Update Dropdown for possible variables/tables
updateSelectizeInput(session, 'acsTableSelect', choices = variableSelectionList, server = TRUE)
})
# Update acs data selection based on file selection for Tab 2
observeEvent(input$loadDataButton,{
req(input$acsStateSelect)
currentSelectedStateTracts <- get_acs(
key = CENSUS_API_KEY,
geography = "tract",
variables = c(input$acsTableSelect, "B01003_001"),
state = input$acsStateSelect,
year = acsYear,
survey = "acs5",
geometry = TRUE,
output = "wide" # get data in wide format for easier mapping
)
# Alter Values and Labels if Scaled to Pct Total Pop
# Set Style for Pct of Total Pop
if (input$pctTotalPopSwitch == TRUE){
currentSelectedStateTracts$colorCol = round(currentSelectedStateTracts[[paste0(input$acsTableSelect,"E")]] / currentSelectedStateTracts[[paste0("B01003_001","E")]],2)
currentSelectedStateTracts$pctColorCol = label_percent(0.1)(currentSelectedStateTracts[[paste0(input$acsTableSelect,"E")]] / currentSelectedStateTracts[[paste0("B01003_001","E")]])
popupText = c("Selected Variable: " = paste0(input$acsTableSelect,"E"),
"% of Total Tract Pop: " = "pctColorCol",
"Total Tract Population: " = "B01003_001E")
# Set Cloropleth Formatting
cloroplethStyle = "fixed"
breakVals = percentBins# quantile(currentSelectedStateTracts$colorCol, percentBins, na.rm = T)
fillLabels = labels_PctBins
colorPal = "Greens" #"-RdBu"
legendTitle = paste(names(keyVarName)[which(keyVarName == input$acsTableSelect)], "% of Total Tract Pop.")
}else{
currentSelectedStateTracts$colorCol = currentSelectedStateTracts[[paste0(input$acsTableSelect,"E")]]
popupText = c("Selected Variable: " = "colorCol",
"Total Tract Population: " = "B01003_001E")
# Set Cloropleth Formatting
cloroplethStyle = "fisher"
colorPal = "Greens"
breakVals = NULL
fillLabels = NULL
legendTitle = names(keyVarName)[which(keyVarName == input$acsTableSelect)]
}
#percentBins
# Remove records with empty geometry/units and plot
updatedMap <- tm_shape(currentSelectedStateTracts[!st_is_empty(currentSelectedStateTracts), ], unit = "mi") +
tmap_options(max.categories = palletBinNumber) + # Set Max Number of levels
tm_fill(
group = "ACS Data Layer",
col = "colorCol",
n = palletBinNumber, # 5 colors
labels = fillLabels,
palette = colorPal,
style = cloroplethStyle,
breaks = breakVals,
contrast = c(0.3, 1),
title = legendTitle,
textNA = "Not Available",
colorNA = "gray",
id = "NAME",
popup.vars = popupText
) +
# tm_layout(basemaps = leaflet::providers$OpenStreetMap)+
tm_borders(col = "darkgray") +
tm_view(
alpha = 0.5,
#basemaps = "Stamen.TonerLite",
view.legend.position = c("right", "bottom")
)
# Update Map
output$acsMap <- renderTmap({updatedMap})
# highlightOptions = highlightOptions(color = "white",
# weight = 5, bringToFront = F, opacity = 1)
})
# Tab 3 (ACS Data Table Possibilities) ----------
output$possibleTables <- DT::renderDataTable(DT::datatable(possibleTablesStatic,
filter = 'top',
options = list(pageLength = 25, ordering=F),
rownames= FALSE,
escape = FALSE,
selection = 'none')) # Set Strings as Factor so that filter is a dropdown not typing
} # End of Server
##############################################################################
shinyApp(ui, server)
##############################################################################
|
e8d48d382ee33328762770c45259628309f1c5da
|
9e6dbca191e7ae538a467ca6941b8bb85ff0da5f
|
/inst/functions/utils.R
|
026c1a02d246915d1058c8504ae8beaa34b1d92c
|
[
"MIT"
] |
permissive
|
seabbs/covid19Vaccination
|
7ca2f5ce8d3942bf4146bbd8d678d9e5aa676f15
|
bb14013f76adea0d2886c2a812b65c25503100a4
|
refs/heads/master
| 2023-06-07T12:01:40.436239
| 2021-06-28T09:15:24
| 2021-06-28T09:15:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
utils.R
|
download_vaccine_data_state = function()
{
data = read.csv(file = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/us_state_vaccinations.csv')
write.csv(data, '~/git/covid19Vaccination/inst/data/us_state_vaccinations_210611.csv', row.names = F)
}
|
34282dc44224621a6c4b641e6aa6b9d686f18989
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rtip/examples/arpt.Rd.R
|
f180742cf6d063f49a50c7b2010c672e5acf2ead
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 191
|
r
|
arpt.Rd.R
|
library(rtip)
### Name: arpt
### Title: At-risk-of-poverty threshold
### Aliases: arpt
### ** Examples
data(eusilc2)
ATdataset <- setupDataset(eusilc2, country = "AT")
arpt(ATdataset)
|
d11f2a3187a0c945dfee048359c4615fc66ecfdf
|
19aae92539022ec2989da3c86d47f52d9f3df297
|
/packages/ATOdig/R/ATOdig-package.R
|
e4a45198cdf5948fb4e201d5078dd206f78a3541
|
[] |
no_license
|
jmp75/govhack2013
|
f4c70b286141a1a9bf5821d3fd880071a755d856
|
1fef7253f2d7a6c6daf099a9b3d424890d7df44f
|
refs/heads/master
| 2021-01-19T07:55:05.730513
| 2013-06-02T06:57:01
| 2013-06-02T06:57:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 524
|
r
|
ATOdig-package.R
|
#' A package to access the ATO Excel based data from R
#'
#' \tabular{ll}{
#' Package: \tab ATOdig\cr
#' Type: \tab Package\cr
#' Version: \tab 0.1-3\cr
#' Date: \tab 2013-06-03\cr
#' License: \tab LGPL 2.1\cr
#' }
#'
#'
#'
#'
#' @imports rClr
#' @imports stringr
#' @name ATOdig-package
#' @aliases ATO.dig
#' @docType package
#' @title Access the ATO Excel based data from R
#' @author Jean-Michel Perraud \email{jean-michel.perraud_at_csiro.au}
#' @keywords package rClr .NET Mono ATO
NULL
|
250423fc1b867ce3aaf788f0328d548f0523cf49
|
ae8a72dd35911a3a9d6b472b152e22a382d67d3b
|
/varTest/results/assembleDiagnostics.R
|
e2c34aa7ac63e328b05d3bd5bd00d845031164d3
|
[] |
no_license
|
inspktrgadget/atlantis
|
883a1555c3c930007ebc475dc3dd5fca14e2d717
|
3a324ea7194f2a93ad54f6f7ce0f4e55dc2419e6
|
refs/heads/master
| 2021-09-10T23:03:37.983689
| 2018-04-03T20:45:54
| 2018-04-03T20:45:54
| 116,151,163
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,128
|
r
|
assembleDiagnostics.R
|
# code to assemble diagnostic data for varModels
library(plyr)
library(tidyverse)
library(grid)
library(Rgadget)
varmod_dir <- "~/gadget/models/atlantis/varTest"
modfiles <- dir(sprintf("%s/varModel/varModels", varmod_dir))
out_data <-
lapply(modfiles, function(x) {
load(sprintf("%s/varModels/%s/WGTS/WGTS.Rdata", varmod_dir, x))
return(out)
})
# ## fit statistics - not a great way to do this for all varModels
# resTable <- fit$resTable[tail(head(names(fit$resTable),-2),-1)]
#
# summary_plot <-
# ggplot(filter(fit$likelihoodsummary, year != "all"),
# aes(as.numeric(year), likelihood.value)) +
# geom_point() + facet_wrap(~component, scales="free_y") +theme_bw()+
# xlab("Year") + ylab("Score")
# put together survey indices for all varModels
si_dat <-
lapply(1:length(out_data), function(x) {
tmp <-
out_data[[x]]$sidat %>%
mutate(survey = ifelse(substr(name, 1, 3) == "aut",
"aut", "igfs")) %>%
rbind.fill(ddply(., ~year+survey, summarize,
observed = sum(observed*0.008249352*lower^3.026918),
predict = sum(predict*0.008249352*lower^3.026918),
upper = sum(upper*0.008249352*lower^3.026918),
lower = sum(lower*0.008249352*lower^3.026918),
length = "Biomass")) %>%
mutate(model_ind = x) %>%
ungroup()
}) %>%
do.call("rbind", .) %>%
group_by(year, survey, length) %>%
summarize(obs_median = median(observed),
pred_median = median(predict),
pred_lower = sort(predict)[length(predict)*0.025],
pred_upper = sort(predict)[length(predict)*0.975])
ldist_data <-
lapply(1:length(out_data), function(x) {
out_data[[x]]$catchdist.fleets
}) %>%
do.call("rbind", .) %>%
group_by(name, year, step, lower) %>%
summarize(observed = median(observed),
pred_median = median(predicted),
pred_lower = sort(predicted)[length(predicted)*0.025],
pred_upper = sort(predicted)[length(predicted)*0.975])
aldist_data <-
lapply(1:length(out_data), function(x) {
out_data[[x]]$catchdist.fleets %>%
filter(name %in% c("aldist_spr", "aldist_aut", "aldist_comm")) %>%
group_by(name, year, step, age) %>%
summarize(observed = sum(observed, na.rm=T),
predicted = sum(predicted, na.rm=T)) %>%
mutate(age = as.numeric(gsub("age", "", age)))
}) %>%
do.call("rbind", .) %>%
group_by(name, year, step, age) %>%
summarize(observed = median(observed),
pred_median = median(predicted),
pred_lower = sort(predicted)[length(predicted)*0.025],
pred_upper = sort(predicted)[length(predicted)*0.975])
selection_data <-
lapply(1:length(out_data), function(x) {
out_data[[x]]$suitability
}) %>%
do.call("rbind", .) %>%
filter(suit > 0) %>%
group_by(year, stock, fleet, length) %>%
summarize(suit_median = median(suit),
suit_lower = sort(suit)[ceiling(length(suit)*0.025)],
suit_upper = sort(suit)[ceiling(length(suit)*0.0975)])
grw_data <-
lapply(1:length(out_data), function(x) {
out_data[[x]]$stock.growth
}) %>%
do.call("rbind", .) %>%
group_by(age) %>%
summarize(length_median = median(length),
length_lower = sort(length)[length(length)*0.025],
length_upper = sort(length)[length(length)*0.975])
f_data <-
lapply(1:length(out_data), function(x) {
out_data[[x]]$res.by.year
}) %>%
do.call("rbind", .) %>%
filter(stock == "cod") %>%
group_by(year) %>%
summarize(f_median = median(F),
f_lower = sort(F)[length(F)*0.025],
f_upper = sort(F)[length(F)*0.975])
save(si_dat, ldist_data, aldist_data, grw_data, selection_data, f_data,
file = sprintf("%s/results/varMod/diagnostic_data.RData", varmod_dir))
|
8cb4bfc4072d24bc0326c77774160526dc30b7b9
|
334145f4753d39c1024d6e4f256d30ee50fe657e
|
/man/KMedoids.Rd
|
e5810d39e7ed8389bc72cbac7310d5587468c7cd
|
[] |
no_license
|
cran/TSdist
|
6aaefaefd78c37fbfb07efe164cdb44c19fc2f53
|
d28f6f0c3aa4c5004caf33724b5c7fc064846553
|
refs/heads/master
| 2022-09-15T21:13:30.275246
| 2022-08-31T08:40:02
| 2022-08-31T08:40:02
| 19,747,325
| 5
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,527
|
rd
|
KMedoids.Rd
|
\name{KMedoids}
\alias{KMedoids}
\title{
K medoids clustering for a time series database using the selected distance measure.
}
\description{
Given a specific distance measure and a time series database, this function provides the K-medoids clustering result. Furthermore, if the ground truth clustering is provided, and the associated F-value is also provided.}
\usage{
KMedoids(data, k, ground.truth, distance, ...)
}
\arguments{
\item{data}{
Time series database saved in a numeric matrix, a list, an \code{mts} object, a \code{zoo} object or \code{xts} object.}
\item{k}{
Integer value which represents the number of clusters.}
\item{ground.truth}{
Numerical vector which indicates the ground truth clustering of the database.}
\item{distance}{
Distance measure to be used. It must be one of: \code{"euclidean"}, \code{"manhattan"}, \code{"minkowski"}, \code{"infnorm"}, \code{"ccor"}, \code{"sts"}, \code{"dtw"}, \code{"keogh_lb"}, \code{"edr"}, \code{"erp"}, \code{"lcss"}, \code{"fourier"}, \code{"tquest"}, \code{"dissimfull"}, \code{"dissimapprox"}, \code{"acf"}, \code{"pacf"}, \code{"ar.lpc.ceps"}, \code{"ar.mah"}, \code{"ar.mah.statistic"}, \code{"ar.mah.pvalue"}, \code{"ar.pic"}, \code{"cdm"}, \code{"cid"}, \code{"cor"}, \code{"cort"}, \code{"wav"}, \code{"int.per"}, \code{"per"}, \code{"mindist.sax"}, \code{"ncd"}, \code{"pred"}, \code{"spec.glk"}, \code{"spec.isd"},
\code{"spec.llr"}, \code{"pdc"}, \code{"frechet"})
}
\item{...}{
Additional parameters required by the chosen distance measure.
}
}
\details{
This function is useful to evaluate the performance of different distance measures in the task of clustering time series.
}
\value{
\item{clustering}{
Numerical vector providing the clustering result for the database.
}
\item{F}{
F-value corresponding to the clustering result.
}
}
\author{
Usue Mori, Alexander Mendiburu, Jose A. Lozano.
}
\seealso{
To calculate the distance matrices of time series databases the \code{\link{TSDatabaseDistances}} is used.
}
\examples{
# The example.database3 synthetic database is loaded
data(example.database3)
tsdata <- example.database3[[1]]
groundt <- example.database3[[2]]
# Apply K-medoids clusterning for different distance measures
KMedoids(data=tsdata, ground.truth=groundt, k=5, "euclidean")
KMedoids(data=tsdata, ground.truth=groundt, k=5, "cid")
KMedoids(data=tsdata, ground.truth=groundt, k=5, "pdc")
}
|
0981f1d5ec9f1fc34f1a18d06ccea5c0e836a9f6
|
6e7eed058c0dd6d0d777b54a6738d3458b8064be
|
/plot1.R
|
844e65062c2f8e8ba064d3d09367906c06a097d5
|
[] |
no_license
|
bolognarossoblu/ExData_Plotting1
|
37ba57e040731db7fa1bad27402e2948657126c5
|
d6627a4b1715e54f2f0c32ff6c48579da32017e9
|
refs/heads/master
| 2021-01-18T14:55:00.558238
| 2015-02-09T00:13:11
| 2015-02-09T00:13:11
| 30,487,737
| 0
| 0
| null | 2015-02-08T10:26:38
| 2015-02-08T10:26:38
| null |
UTF-8
|
R
| false
| false
| 618
|
r
|
plot1.R
|
# Plot 1. Course Project 1. Exploratory Data Analysis.
# Load package
# To install package enter: install.packages("sqldf")
require(sqldf)
# Load data and get subset
file <- c("household_power_consumption.txt")
data_subset <- read.csv.sql(file, header = T, sep=";", sql = "select * from file where (Date == '1/2/2007' OR Date == '2/2/2007')" )
# Open png graphic device, draw histogram, and close graphic device
png(filename = "plot1.png", width = 480, height = 480, bg = "transparent")
hist(data_subset$Global_active_power, xlab="Global Active Power (kilowatts)", main="Global Active Power", col = "red")
dev.off()
|
57ff757a52789e6da77f73e8ae93978577e8a390
|
2aba8aef55e7cbf1a70565bddd386f680f1d3396
|
/2019-10-17-fuel-poverty/script.R
|
77d74d9920951a975bb17eef9d589cb872265a16
|
[] |
no_license
|
traffordDataLab/charticles
|
c45755fb1b2c404e9072f2bb19d45f42f01097ac
|
e6f19ac4d6a6e2acd2e6edcc54711af679160fed
|
refs/heads/master
| 2023-04-30T21:58:49.526318
| 2023-04-20T14:38:20
| 2023-04-20T14:38:20
| 156,720,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,514
|
r
|
script.R
|
## Fuel poverty ##
# Source: Department for Business, Energy & Industrial Strategy
# URL: https://www.gov.uk/government/statistics/sub-regional-fuel-poverty-data-2019
# Licence: Open Government Licence 3.0
library(tidyverse) ; library(httr) ; library(readxl) ; library(sf) ; library(viridis)
source("https://github.com/traffordDataLab/assets/raw/master/theme/ggplot2/theme_lab.R")
# load data ---------------------------
tmp <- tempfile(fileext = ".xlsx")
GET(url = "https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/808294/Fuel_poverty_sub-regional_tables_2019.xlsx",
write_disk(tmp))
df <- read_xlsx(tmp, sheet = 6, range = "A3:H32847") %>%
filter(`LA Name` == "Trafford") %>%
mutate(indicator = "Proportion of households in fuel poverty",
period = "2017",
measure = "Proportion",
unit = "Households") %>%
select(lsoa11cd = `LSOA Code`, lsoa11nm = `LSOA Name`,
area_code = `LA Code`, area_name = `LA Name`,
indicator, period, measure, unit,
value = `Proportion of households fuel poor (%)`)
sf <- left_join(st_read(paste0("https://ons-inspire.esriuk.com/arcgis/rest/services/Census_Boundaries/Lower_Super_Output_Areas_December_2011_Boundaries/MapServer/2/query?where=UPPER(lsoa11nm)%20like%20'%25", URLencode(toupper("Trafford"), reserved = TRUE), "%25'&outFields=lsoa11cd&outSR=4326&f=geojson")),
df, by = "lsoa11cd")
# plot data ---------------------------
ggplot(sf) +
geom_sf(aes(fill = value), color = "#FFFFFF", size = 0.5, alpha = 0.8) +
scale_fill_viridis(option = "E", discrete = F,
label = function(x) paste0(x, "%"),
direction = -1,
guide = guide_colourbar(
direction = "vertical",
barwidth = unit(3, units = "mm"),
title.position = 'top',
title.vjust = 1)) +
labs(x = NULL, y = NULL,
title = "Proportion of households in fuel poverty",
subtitle = "Trafford, 2017",
caption = "Source: BEIS | @traffordDataLab\n Contains Ordnance Survey data © Crown copyright and database right 2019",
fill = NULL) +
coord_sf(datum = NA) +
theme_lab() +
theme(legend.position = "right",
legend.text = element_text(size = 10))
# write data ---------------------------
write_csv(df, "data.csv")
ggsave("plot.svg", dpi = 300, scale = 1)
ggsave("plot.png", dpi = 300, scale = 1)
|
61ba974182783cfd28b1637a7d652de0bce6c68f
|
c2cc9e7bde76f30f0c63f7068bdde39cf91aa0f8
|
/Unit 5 - Text Analytics/Assignment 5/automating_reviews_in_medicine.R
|
82daeefb8f0b5b6a4e19a818d61c1f796ec481ae
|
[] |
no_license
|
arubino322/The_Analytics_Edge
|
5319e3d538c682ace9c8c077792935581841cdfb
|
a6137fe80a8023eaab63a77700fb274f0785d4b6
|
refs/heads/master
| 2016-09-06T10:55:51.039815
| 2015-08-26T22:06:50
| 2015-08-26T22:06:50
| 41,329,691
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,847
|
r
|
automating_reviews_in_medicine.R
|
trials = read.csv("clinical_trial.csv", stringsAsFactors=FALSE)
summary(trials)
str(trials)
which.max(nchar(trials$abstract))
nchar(trials$abstract[664])
#3708
#OR
max(nchar(trials$abstract))
sum(nchar(trials$abstract)==0)
#OR
table(nchar(trials$abstract)==0)
which.min(nchar(trials$title))
trials$title[1258]
#let's create some corpera for title and abstract and do all the preprocessing
#/that we do
library(tm)
library(SnowballC)
corpusTitle = Corpus(VectorSource(trials$title))
corpusAbstract = Corpus(VectorSource(trials$abstract))
#make them lowercase
corpusTitle = tm_map(corpusTitle, tolower)
corpusAbstract = tm_map(corpusAbstract, tolower)
corpusTitle = tm_map(corpusTitle, PlainTextDocument)
corpusAbstract = tm_map(corpusAbstract, PlainTextDocument)
#remove punctuations
corpusTitle = tm_map(corpusTitle, removePunctuation)
corpusAbstract = tm_map(corpusAbstract, removePunctuation)
#remove english stop words
corpusTitle = tm_map(corpusTitle, removeWords, stopwords("english"))
corpusAbstract = tm_map(corpusAbstract, removeWords,stopwords("english"))
#stem the words
library(SnowballC)
corpusTitle = tm_map(corpusTitle, stemDocument, language="english")
corpusAbstract = tm_map(corpusAbstract, stemDocument, language="english")
#build a document term matrix
dtmTitle = DocumentTermMatrix(corpusTitle)
dtmAbstract = DocumentTermMatrix(corpusAbstract)
#sparsness
dtmTitle = removeSparseTerms(dtmTitle, 0.95)
dtmAbstract = removeSparseTerms(dtmAbstract, 0.95)
#convert to data frames
dtmTitle = as.data.frame(as.matrix(dtmTitle))
dtmAbstract = as.data.frame(as.matrix(dtmAbstract))
length(stopwords("english"))
str(dtmTitle)
str(dtmAbstract)
#most frequent word stem across all abstracts
sort(colSums(dtmAbstract))
#combine dtmTitle and dtmAbstract into single dataframe
colnames(dtmTitle) = paste0("T", colnames(dtmTitle))
colnames(dtmAbstract) = paste0("A", colnames(dtmAbstract))
dtm = cbind(dtmTitle, dtmAbstract)
#add outcome variable
dtm$trial = trials$trial
library(caTools)
set.seed(144)
spl = sample.split(dtm$trial, 0.7)
train = subset(dtm, spl==TRUE)
test = subset(dtm, spl==FALSE)
table(train$trial)
#CART model
library(rpart)
library(rpart.plot)
trialCART = rpart(trial ~ ., data=train, method="class")
prp(trialCART)
predTrain = predict(trialCART)
predTrain[1:10,]
pred.prob = predTrain[,2]
max(pred.prob)
#or
summary(pred.prob)
table(train$trial, pred.prob>=0.5)
sensitivity = 441/(441+131)
specificity = 631/(631+99)
predTest = predict(trialCART, newdata=test)
pred.prob.test = predTest[,2]
table(test$trial, pred.prob.test >= 0.5)
#plot ROC curce
library(ROCR)
predROCR = prediction(pred.prob.test, test$trial)
performance(predROCR, "auc")@y.values
perfROCR = performance(predROCR, "tpr", "fpr")
plot(perfROCR, colorize=TRUE)
|
8f36c91433404d7d7a31266ddfb826d45b9dcca9
|
4c69d500aebcfd631f339aede54d0a2e54556274
|
/Lab_Exercise_1.R
|
616ae4d1e1c8cf62e146ae3320c3b823581b00a7
|
[] |
no_license
|
mvanger/data_viz_code
|
c20f94a9b072830b8befd0d46cea7194df402435
|
a4a5e1a11f1e343f7f182533aef401978a083031
|
refs/heads/master
| 2021-01-15T21:10:03.523557
| 2014-06-05T18:10:26
| 2014-06-05T18:10:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,565
|
r
|
Lab_Exercise_1.R
|
# Problem 1
# Set x and y values for each function
x1 = seq(-4,4,.1)
y1 = (sin(x1))
x2 = seq(-2,2,.1)
y2 = (x2^2)
x3.1 = c(-3:0)
y3.1 = c(rep(1,4))
x3.2 = c(0:3)
y3.2 = c(rep(0,4))
# Plot function a
plot(x1,y1,ylim=range(c(y1,y2)),xlim=range(c(-6,6)),main="Question 1", xlab="X Value", ylab="Y Value",type="n")
lines(x1,y1,type="l",col="red")
# Plot function b
par(new=T)
plot(x2,y2,ylim=range(c(y1,y2)),xlim=range(c(-6,6)),main="Question 1", xlab="X Value", ylab="Y Value",type="n")
lines(x2,y2,type="p",col="green")
# Plot function c from -3 to 0
par(new=T)
plot(x3.1,y3.1,ylim=range(c(y1,y2)),xlim=range(c(-6,6)),main="Question 1", xlab="X Value", ylab="Y Value",type="n")
lines(x3.1,y3.1,type="o",col="blue")
# Plot function c from 0 to 3
par(new=T)
plot(x3.2,y3.2,ylim=range(c(y1,y2)),xlim=range(c(-6,6)),main="Question 1", xlab="X Value", ylab="Y Value",type="n")
lines(x3.2,y3.2,type="o",col="blue")
# Add a legend
legend(x=3,y=3,legend=c("function(a)","function(b)","function(c)"),col=c("red","green","blue"),lty=c(1,0,3),pch=c(26,1,1),cex=1)
# Problem 2
# Part A
carsdata<-read.csv("Downloads/04cars data.csv",header=TRUE)
attach(carsdata)
toyotas = grep("Toyota",Vehicle.Name)
d = density(Engine.Size..l.[toyotas])
plot(d,xlab="Engine Size",main="Engine Size of Toyota Vehicles Density Plot")
# Part B
sample.Toyota = carsdata[toyotas,]
x = sample.Toyota[order(sample.Toyota$Engine.Size..l.),]
x$Sports.Car = factor(x$Sports.Car)
x$color[x$Sports.Car==0] = "red"
x$color[x$Sports.Car==1] = "blue"
dotchart(as.numeric(x$Engine.Size..l.),labels=x$Vehicle.Name,cex=.8,groups=x$Sports.Car,color=x$color, main="Toyota Engine Size Vs Vehicle Type", xlab="Engine Size")
# Part C
fords = grep("Ford",Vehicle.Name)
sample.Ford = carsdata[fords,]
sample.Toyota$make = "Toyota"
sample.Ford$make = "Ford"
sample.Toyota$type[sample.Toyota$Small.Sporty..Compact.Large.Sedan==1] = "Sedan"
sample.Toyota$type[sample.Toyota$Sports.Car==1] = "Sports Car"
sample.Toyota$type[is.na(sample.Toyota$type)] = "Other"
sample.Ford$type[sample.Ford$Small.Sporty..Compact.Large.Sedan==1] = "Sedan"
sample.Ford$type[sample.Ford$Sports.Car==1] = "Sports Car"
sample.Ford$type[is.na(sample.Ford$type)] = "Other"
table.Toyota = table(sample.Toyota$make,sample.Toyota$type)
table.Ford = table(sample.Ford$make,sample.Ford$type)
stacked.Bar = rbind(table.Toyota,table.Ford)
barplot(t(stacked.Bar), main="Vehicle Type Distribution among Toyota and Ford",col=c("blue","darkblue","red"),xlab="Vehicle Make",legend=colnames(stacked.Bar),ylab="Number of Vehicles", ylim=c(0,30))
|
149ace49431a9de83a98a1720b6601123ee1e631
|
3737b9311c734efcad14b24f07914dc7188a1381
|
/libr/switch_to_prop.R
|
66fc5e9fb74dcb7d44ff919ee2b3a5a98069f33c
|
[] |
no_license
|
jcccf/tlang
|
e38f64b45116c709ff528fbc7db49296499811b3
|
6fe67c1c550b23d19731bc41ebbaa20c953e6f48
|
refs/heads/master
| 2021-01-01T17:47:21.137247
| 2012-08-29T07:22:28
| 2012-08-29T07:22:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216
|
r
|
switch_to_prop.R
|
w <- read.csv("switch_to_prop.csv")
names(w) <- c('x', 'y')
attach(w)
reg <- lm(y~x+I(x^2), w)
summary(reg)
xx <- seq(min(x), max(x), len=200)
yy <- reg$coef %*% rbind(1,xx,xx^2)
plot(y~x, w)
lines(xx,yy,lwd=2,col=2)
|
ef135e706e8725f6a3a3646b5ce65db3f0aa4268
|
87f01abc80a1da97ea9e3143981fcabb4192c370
|
/Codigo Grafico Animado.R
|
55891488d7c626a992db7554b27f2c8c985ebeb0
|
[] |
no_license
|
joakoestri/Laboratorio8
|
eef41cbd75724547a8b70461d2aa7d835eaa416f
|
522422b77cc02b627f2477bc48c794982f6da9d8
|
refs/heads/master
| 2020-09-22T15:15:40.457183
| 2019-12-02T01:14:05
| 2019-12-02T01:14:05
| 225,255,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,735
|
r
|
Codigo Grafico Animado.R
|
#Leemos las mallas
mismanaged_vs_gdp <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-05-21/per-capita-mismanaged-plastic-waste-vs-gdp-per-capita.csv")
waste_vs_gdp <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-05-21/per-capita-plastic-waste-vs-gdp-per-capita.csv")
#Unimos la malla mismanaged_vs_gdp y waste_vs_gdp
inner_join(mismanaged_vs_gdp,
waste_vs_gdp,
by=c("Entity","Code","Year","Total population (Gapminder)")) %>%
#Seleccionamos el año 2010 y los paises de Mexico y España
filter(Year == "2010", Entity %in% c("Mexico","Spain")) %>%
#Seleccionamos las columnas que nos interesan
select(Entity,
"Residuo Plastico per capita (kg/dia)"=`Per capita plastic waste (kilograms per person per day)`,
"Residuo Platico con mal manejo per capita (Kg/dia)"=`Per capita mismanaged plastic waste (kilograms per person per day)`) %>%
#Pasamos de wide a long
pivot_longer(-Entity,
names_to = "Tipo",
values_to = "Cantidad") %>%
#Aplicamos el grafico
ggplot(aes(x = Entity,
y = Cantidad,
fill = Entity))+
geom_bar(stat='identity') +
labs(title = "Medición: {closest_state}",
x = "País",
y = "Residuos Plasticos per capita (kg/día)",
caption = "Fuente: Our World in Data") +
transition_states(Tipo) +
theme_grey() +
theme(legend.position = "none",
title = element_text(size = 16,
colour = "red"),
axis.title.x = element_text(color = "black"),
axis.title.y = element_text(color = "black"))
|
4de3063dc6a6214810c5ba227a5ce7029091fe64
|
31953bf4daaf2e63a37963c823e314283eb27483
|
/a71_election.R
|
e845c0b73b37d162b2ec8d582c99b400648e1705
|
[] |
no_license
|
mofas/r-note
|
b2c0692ab5693df341df82ea46c732ea1d17a1fd
|
175019f5ec40ce37fa5dba73e9240c99c46a27f4
|
refs/heads/master
| 2020-04-05T13:05:05.294066
| 2017-08-17T20:40:50
| 2017-08-17T20:40:50
| 95,072,355
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,820
|
r
|
a71_election.R
|
statesMap = map_data("state")
library(ggplot2)
library(maps)
library(ggmap)
# 1.1
str(statesMap)
length(table(statesMap$group))
# 1.2
polling = read.csv("PollingImputed.csv")
Train = subset(polling, Year < 2012)
Test = subset(polling, Year == 2012)
# 2.1
mod2 = glm(Republican~SurveyUSA+DiffCount, data=Train, family="binomial")
TestPrediction = predict(mod2, newdata=Test, type="response")
TestPredictionBinary = as.numeric(TestPrediction > 0.5)
predictionDataFrame = data.frame(TestPrediction, TestPredictionBinary, Test$State)
table(TestPredictionBinary)
mean(TestPrediction)
# 2.2
predictionDataFrame$region = tolower(predictionDataFrame$Test.State)
predictionMap = merge(statesMap, predictionDataFrame, by = "region")
predictionMap = predictionMap[order(predictionMap$order),]
str(predictionMap)
str(statesMap)
# 2.3
# 2.4
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPredictionBinary)) +
geom_polygon(color = "black")
# 2.5
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPredictionBinary)) +
geom_polygon(color = "black") +
scale_fill_gradient(low = "blue", high = "red", guide = "legend", breaks= c(0,1), labels = c("Democrat", "Republican"), name = "Prediction 2012")
# 3.1
# 3.2
# 4.1
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPrediction))+ geom_polygon(color = "black", linetype=3) + scale_fill_gradient(low = "blue", high = "red", guide = "legend", breaks= c(0,1), labels = c("Democrat", "Republican"), name = "Prediction 2012")
ggplot(predictionMap, aes(x = long, y = lat, group = group, fill = TestPrediction))+ geom_polygon(color = "black", size=3) + scale_fill_gradient(low = "blue", high = "red", guide = "legend", breaks= c(0,1), labels = c("Democrat", "Republican"), name = "Prediction 2012")
# 4.2
|
5245c5ad2887e3692bb69191bd2a2208d2fd0c71
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.analytics/man/gluedatabrew_update_project.Rd
|
c04d351d9eaacf196866dba99aa987e36bcb214c
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 696
|
rd
|
gluedatabrew_update_project.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gluedatabrew_operations.R
\name{gluedatabrew_update_project}
\alias{gluedatabrew_update_project}
\title{Modifies the definition of an existing DataBrew project}
\usage{
gluedatabrew_update_project(Sample = NULL, RoleArn, Name)
}
\arguments{
\item{Sample}{}
\item{RoleArn}{[required] The Amazon Resource Name (ARN) of the IAM role to be assumed for this
request.}
\item{Name}{[required] The name of the project to be updated.}
}
\description{
Modifies the definition of an existing DataBrew project.
See \url{https://www.paws-r-sdk.com/docs/gluedatabrew_update_project/} for full documentation.
}
\keyword{internal}
|
6a335092e382446905a5fb0bd61af34940034b4b
|
3b3bb9d7be4125a7790ec0282bc3501e494498ec
|
/podstawy_programowania/2017_2018/01_kalkulator.R
|
444d33531fa1c2d796b40aa2ce44a91b1dc66b0b
|
[] |
no_license
|
lwawrowski/cdv_bigdata
|
ee4c198c739ff6d8fdda966376ccf3be96987ce1
|
e09183d300d56e0448ba853bb6165f9ed3c68126
|
refs/heads/master
| 2023-05-24T11:50:41.290672
| 2023-05-14T10:19:39
| 2023-05-14T10:19:39
| 183,685,383
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 451
|
r
|
01_kalkulator.R
|
# sprawdzenie katalogu roboczego
getwd() # kom
?getwd
help("getwd")
2+2
4^(1/2)
sqrt(4)
factorial(3)
sign(-5)
sign(5)
exp(1)
exp(2)
pi
liczba_pi <- pi
liczba_5 <- 5
log(10)
log(10, base = 2)
log(10, 2)
abs(-5)
# ćwiczenie
2*sqrt(pi)+
log(8,2)
((2^3)*(6^2))/(((1/2)^2)*((4/5)^3)) # 1
((6-3.5)/(2^11))^(1/3) # 2
pi + sqrt(exp(4)) # 3
factorial(5)-log(100,10) # 4
abs(1-exp(1)) # 5
save(liczba_5, file = "dane.RData")
load("dane.RData")
|
7b407c6057fae8cf6b9812227f534ca5657177be
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sensitivitymv/examples/amplify.Rd.R
|
a2dc821282fee635194a129a5c622182fd12c245
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 246
|
r
|
amplify.Rd.R
|
library(sensitivitymv)
### Name: amplify
### Title: Amplification of sensitivity analysis in observational studies.
### Aliases: amplify
### ** Examples
data(erpcp)
senmv(erpcp,gamma=3.5,trim=1)
amplify(3.5,6)
amplify(3.5,c(4,6,8,11))
|
976190bce074b7f3e8443c6b8bc67adb7ed0921a
|
06c91835ef96c077e8549e7948c36dc6775fdd52
|
/man/get_observedprob.Rd
|
41fa1fc736fad19a64ef6776990d9434f6681d3a
|
[] |
no_license
|
sscogges/controlhet
|
3293a7f0ad949108f90bea6278d6d61e231013dc
|
0150cbb9fd2247baecf7513c7ea00db232442dae
|
refs/heads/master
| 2021-01-20T04:53:51.475552
| 2017-08-25T10:32:35
| 2017-08-25T10:32:35
| 101,393,161
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 493
|
rd
|
get_observedprob.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multipleversions_code_110316.R
\name{get_observedprob}
\alias{get_observedprob}
\title{Observed data quantities}
\usage{
get_observedprob(Y0, D0, Y1, D1)
}
\arguments{
\item{Y0}{observed outcomes in Z = 0 arm}
\item{D0}{observed treatment types in Z = 0 arm}
\item{Y1}{observed outcomes in Z = 1 arm}
\item{D1}{observed treatment types in Z = 1 arm}
}
\description{
Calculates q_dyz = P(Y = y, D = d | Z = z)
}
|
0e5d9ad1a81ef4d3a714b7b5fe544444e49a04bb
|
c77069c2dc6dbf3f9449a44e06d70b540a1912b5
|
/R/RI.R
|
988087e4e032868c47cc203e410becb418058964
|
[] |
no_license
|
cran/phenology
|
62b323a9231c3701568de58c57a804e043abe6a2
|
991d2c35dcbcf1fcff23cbcc0c2f82b74a868dfb
|
refs/heads/master
| 2023-04-15T03:37:51.464388
| 2023-04-01T09:10:02
| 2023-04-01T09:10:02
| 17,698,504
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,547
|
r
|
RI.R
|
#' RI returns an expected remigration interval
#' @title Return an expected remigration interval.
#' @author Marc Girondot
#' @return Return a remigration interval.\cr
#' @param s Time-conditional probability of survival
#' @param t Time-conditional probability of tag retention
#' @param r Time-conditional probability of return
#' @param c Probability of return
#' @param p Annual probability of observation
#' @description Model of remigration interval\cr
#' Note that r, s and t are conditional probabilities. If c is null, then return probabilities are
#' estimated from r. r can be named vector. For example:\cr
#' r <- c(r1=0.5, r2=0.60, r3=1) is equivalent to c <- c(c1=0.5, c2=0.3, c3=0.2)\cr
#' The vector of r described the probability that a female returned after
#' 1, 2, 3 years among those who have not nested before.
#' The vector of r is the same but defining the return probability for an initial female.\cr
#' @family Model of Remigration Interval
#' @examples
#' \dontrun{
#' library(phenology)
#' # Example
#' s <- c(s1=1, s2=1, s3=1, s4=1, s5=1)
#' t <- c(t1=0.95, t2=1, t3=1, t4=1, t5=1)
#' r <- c(r1=0.1, r2=0.8, r3=0.7, r4=0.7, r5=1)
#' p <- c(p1=0.6, p2=0.6, p3=0.6, p4=0.6, p5=0.6)
#'
#' # r is equivalent to
#' c <- c(c1=0.1, c2=0.72, c3=0.126, c4=0.0378, c5=0.0162)
#' # Then the true remigration interval is:
#' ri_true <- sum(1:5*c[1:5])
#'
#' s_ri <- NULL
#' for (sx in seq(from=0.01, to=1, by=0.01)) {
#' s[] <- sx
#' ri1 <- RI(s=s, t=t, r=r, p=p)
#' s_ri <- c(s_ri,sum(1:5*ri1)/sum(ri1))
#' }
#'
#' par(mar=c(4, 4, 1, 1)+0.4)
#'
#' plot(x=seq(from=0.01, to=1, by=0.01), y=s_ri, type="l",
#' las=1, bty="n", ylim=c(0, 4),
#' xlab="Annuual survival probabilities", ylab="Naive Remigration Interval",
#' main="")
#' segments(x0=0.01, x1=1, y0=ri_true, y1=ri_true, lty=2, col="red")
#' legend("topright", legend="True remigration interval", lty=2, col="red")
#'
#' }
#' @export
RI <- function(s, t, r=NULL, c=NULL, p) {
if (names(s[1]) != "s") {
s <- s[order(as.numeric(substr(names(s), 2, nchar(s))))]
}
if (names(t[1]) != "t") {
t <- t[order(as.numeric(substr(names(t), 2, nchar(t))))]
}
if (!is.null(r)) {
if (names(r[1]) != "r") {
r <- r[order(as.numeric(substr(names(r), 2, nchar(r))))]
}
}
if (!is.null(c)) {
if (names(c[1]) != "c") {
c <- c[order(as.numeric(substr(names(c), 2, nchar(c))))]
}
}
if (names(p[1]) != "p") {
p <- p[order(as.numeric(substr(names(p), 2, nchar(p))))]
}
if (is.null(c)) {
rp <- rep(NA, length(r))
rp[1] <- r[1]
if (length(r) != 1) {
for (i in 2:length(r)) rp[i] <- r[i]*prod(1 - r[1:(i-1)])
}
r <- rp
} else {
r <- c
}
# r <- r/sum(r)
k <- matrix(data = 1, nrow=1)
N <- 1
# Note that r, s and t are conditional probabilities
n.return.vue <- NULL
for (l in 1:length(s)) {
n.survival <- N * prod(s[1:l])
n.tagretention <- n.survival * prod(t[1:l])
n.return <- 0
for (h in 1:nrow(k)) {
ts <- k[h, , drop=TRUE]
j <- 1
l <- 1
if (length(ts) != 1) {
for (n in 1:(length(ts)-1)) {
if (ts[n]==1) {
j <- j*r[l]*(1-p[n])
l <- 1
} else {
l <- l + 1
}
}
}
j <- j*r[l]*p[length(s)]
n.return <- n.return + j * n.tagretention
}
n.return.vue <- c(n.return.vue, n.return)
# je prépare le suivant
k2 <- k
k2[, ncol(k)] <- 0
k3 <- rbind(k, k2)
k4 <- cbind(k3, rep(1, nrow(k3)))
k <- k4
}
return(unname(n.return.vue))
}
|
5c2b1969d20586b1e3f3dfb8de4a7381715272a6
|
daa6105785845c70b06aef9760708ced491353e9
|
/public/R/src/org/broadinstitute/sting/utils/R/gsalib/man/gsalib-package.Rd
|
dc7a08287f4ba134a70f876024704b54465635bd
|
[] |
no_license
|
johandahlberg/gatk
|
2f1b1c4909326480841d73dd07bb469e22cc7552
|
b8f13082ca139e74caa887a05855e723430d1aad
|
refs/heads/master
| 2021-01-18T07:36:54.199474
| 2012-12-17T17:54:03
| 2012-12-20T03:28:53
| 4,624,254
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,909
|
rd
|
gsalib-package.Rd
|
\name{gsalib-package}
\alias{gsalib-package}
\alias{gsalib}
\docType{package}
\title{
GATK utility analysis functions
}
\description{
Utility functions for analyzing GATK-processed NGS data
}
\details{
This package contains functions for working with GATK-processed NGS data. These functions include a command-line parser that also allows a script to be used in interactive mode (good for developing scripts that will eventually be automated), a proportional Venn diagram generator, convenience methods for parsing VariantEval output, and more.
}
\author{
Genome Sequencing and Analysis Group
Medical and Population Genetics Program
Maintainer: Kiran Garimella
}
\references{
GSA wiki page: http://www.broadinstitute.org/gatk
GATK help forum: http://www.broadinstitute.org/gatk
}
\examples{
## get script arguments in interactive and non-interactive mode
cmdargs = gsa.getargs( list(
requiredArg1 = list(
value = NA,
doc = "Documentation for requiredArg1"
),
optionalArg1 = list(
value = 3e9,
doc = "Documentation for optionalArg1"
)
) );
## plot a proportional Venn diagram
gsa.plot.venn(500, 250, 0, 100);
## read a GATKReport file
report = gsa.gatk.report("/path/to/my/output.gatkreport");
## emit a message
gsa.message("This is a message");
## emit a warning message
gsa.message("This is a warning message");
## emit an error message
gsa.message("This is an error message");
## read the SQUID metrics for a given sequencing project (internal to the Broad only)
s = gsa.read.squidmetrics("C427");
## read command-line arguments
cmdargs = gsa.getargs(
list(
file = list(value="/my/test.vcf", doc="VCF file"),
verbose = list(value=0, doc="If 1, set verbose mode"),
test2 = list(value=2.3e9, doc="Another argument that does stuff")
),
doc="My test program"
);
}
\keyword{ package }
|
0481ce811eec63be81892f12de222adbbe74efde
|
666f1cc22538362c71a404c6977c05c54d09d587
|
/09_combine_crunch.R
|
82e6f492dc248925ba9ca30f6db48a65555438b2
|
[] |
no_license
|
kuriwaki/cces_cumulative
|
09c8b2548006e5b0abf6dfdecaa1816dc50be2a4
|
2449f5c76fb29b1f0ea88060189357b12c9cb53e
|
refs/heads/main
| 2023-05-25T14:38:23.523698
| 2023-05-18T13:29:08
| 2023-05-18T13:29:08
| 93,090,050
| 19
| 1
| null | 2023-05-12T17:47:20
| 2017-06-01T19:00:36
|
TeX
|
UTF-8
|
R
| false
| false
| 4,592
|
r
|
09_combine_crunch.R
|
library(tidyverse)
library(crunch)
library(dplyr)
library(haven)
library(googlesheets)
library(lubridate)
login()
if (writeToCrunch) {
login()
deleteDataset("CCES Cumulative Issues")
newDataset("https://www.dropbox.com/s/p8cx49h82coqfcs/cumulative_2006_2018_crunch.sav?dl=0",
"CCES Cumulative v4.0")
}
# append intent to vote party -----
ds_orig <- loadDataset("CCES Cumulative Common", project = "CCES")
forkDataset(ds_orig, "Fork of CCES Cumulative Common")
ds_fork <- loadDataset("Fork of CCES Cumulative Common")
ds_new <- loadDataset("CCES Cumulative v6.0")
ds_iss <- loadDataset("CCES Cumulative Issues")
# identifier
ds_fork$year_caseid <- paste0(as.character(as.vector(ds_fork$year)), "_", as.vector(ds_fork$case_id))
ds_new$year_caseid <- paste0(as.character(as.vector(ds_new$year)), "_", as.vector(ds_new$case_id))
ds_iss$year_caseid <- paste0(as.character(as.vector(ds_iss$year)), "_", as.vector(ds_iss$case_id))
# compare
vars_to_replace <- setdiff(names(ds_new), c("year_caseid", "year", "case_id"))
# validated vote update
# vars_to_replace <- c(str_subset(names(ds_new), "vv"),
# str_subset(names(ds_new), "voted_(rep|gov|sen)"))
# A - for the new variables dataset
# keep only variables to merge
# delete everything BUT the key and the vars to be replaced
deleteVariables(ds_new,
setdiff(names(ds_new), c("year_caseid", vars_to_replace)))
saveVersion(ds_new, "dropped all but the vote validation and post-elec vote")
# B- drop the to-be overwritten variables fromfork
deleteVariables(ds_fork,
vars_to_replace) # delete the vars to be replaced
saveVersion(ds_fork, "dropped all but year_caseid")
refresh(ds_fork)
refresh(ds_new)
# merge fork on new, dropping 2018 rows
extendDataset(ds_fork, ds_new, by = "year_caseid")
refresh(ds_fork)
saveVersion(ds_fork, description = "fork merged with new 2020")
# updte new dataset to be only 2018
ds_new <- dropRows(ds_new, ds_new$year != 2018)
refresh(ds_new)
# append
appendDataset(ds_fork, ds_new)
# un 07 format
# merge new vars into fork
mergeFork(ds_orig, fork = ds_fork)
# Fix 2016 vote match ---------
ds <- loadDataset("CCES 2016 Common Vote Validated", project = "CCES")
crtabs(~ inputstate + CL_E2016GVM, ds, useNA = "ifany", weight = NULL) # check Northeastern states
login()
ds <- loadDataset("Fork of CCES 2016 Common Vote Validated")
crtabs(~ inputstate + CL_E2016GVM, ds, useNA = "ifany", weight = NULL) # check Northeastern states
if(FALSE){
ds16 <- loadDataset("CCES Cumulative Common 2016") # old dataset, 2006 - 2016
ds17 <- loadDataset("CCES Cumulative Common 2017") # new dataset, only 2017
ds16_17 <- appendDataset(ds16, ds17)
compareDatasets(ds16, ds17)
# something wrong with variable number 6?
ds16[[6]]
ds17[[6]]
}
# fix 2016 cumulative --------
# upload 2016
cc16 <- read_dta("~/Dropbox/CCES_SDA/2016/data/Common/CCES16_Common_OUTPUT_Feb2018_VV.dta") %>%
select(V101, matches("weight"), matches("^CL")) # vars to replace + ID
# insert it once
if (FALSE) { # don't run again
insert <- loadDataset("CCES 2016 Jan 2018")
old16_fork <- loadDataset("Fork of CCES 2016 Common Vote Validated") # old version (will get overwritten)
# insert is the vars to replacement + ID
vars_to_replace <- setdiff(names(insert), "V101")
# delete the "wrong" variables
deleteVariables(old16_fork, vars_to_replace)
# immediately add back the "correct" variables in its place
joined <- extendDataset(old16_fork, insert, by = "V101")
}
# add to weights
login()
ds <- loadDataset("Fork of CCES 2016 Common Vote Validated")
# apply weights ---
weight_aliases <- str_subset(names(ds), "weight")
weightVariables(ds) <- weight_aliases
weight(ds) <- ds$commonweight_vv
# replace alias-based names with real names
ccvar <- gs_title("CCES_crunch_variables")
v16 <- gs_read_csv(ccvar, ws = "CCES_2016_variables")
lookup <- v16 %>% select(alias = variable, name) %>% distinct()
# rename
for (cv in aliases(variables(ds))) {
if (cv %in% lookup$alias) {
replacement <- lookup$name[which(lookup$alias == cv)]
if (!is.na(replacement)) {
name(ds[[which(aliases(variables(ds)) == cv)]]) <- replacement
print(cv)
} else next
}
}
# merge fork
ds_original <- loadDataset("CCES 2016 Common Vote Validated", project = "CCES")
ds_fork <- loadDataset("Fork of CCES 2016 Common Vote Validated")
mergeFork(dataset = ds_original, fork = ds_fork)
crtabs(~ inputstate + CL_E2016GVM, ds_original, useNA = "ifany", weight = NULL) # check Northeastern states
|
9bb925e7d7a93031319e7669af062890544ab407
|
64163746088c204483e85d54f428b31b891ce6ae
|
/R/SNR.R
|
9e05e0b8a935f10681e6d4cf4d84955f526e5d05
|
[] |
no_license
|
peterccarl/TTR
|
af83e3940077ba64f75e253a6f38de5cbee9a1a4
|
93b316959116a4ec1a70c614a10658e0c0f93afe
|
refs/heads/master
| 2021-01-15T13:07:30.222833
| 2016-04-07T14:21:43
| 2016-04-07T14:21:43
| 55,658,094
| 2
| 0
| null | 2016-04-07T02:46:39
| 2016-04-07T02:46:39
| null |
UTF-8
|
R
| false
| false
| 1,403
|
r
|
SNR.R
|
#' Signal to Noise Ratio
#'
#' The n-day SNR for a given market is calculated by taking the absolute
#' price change over an n-day period and dividing it by the average
#' n-day volatility.
#'
#' \deqn{SNR(n) = \frac{ABS(P_t - P_{t-n})}{ATR_n}}{SNR(n) = ABS(P_t - P_(t-n))/ATR_n}
#'
#' Using average true range as the volatility measure captures more of the
#' intraday and overnight volatility in a way that a measurement of Close-to-
#' Close price change does not.
#'
#' The interpretation is then relatively intuitive - an SNR value of five indicates
#' that the market has moved five times the volatility (average true range) over
#' the given look-back period
#'
#' @param HLC Object that is coercible to xts or matrix and contains High-Low-Close prices
#' @param n Number of periods for moving average
#' @param ... parameters passed into \code{\link{ATR}}
#'
#' @return xts time series of signal to noise ratio
#'
#' @author Peter Carl
#' @references Skeggs, James and Hill, Alex (2015). Back in Black Part 2: The
#' Opportunity Set for Trend Following.
#' \url{http://208.75.238.16/content/dam/shared/alternativeedge-snapshots/AlternativeEdge-Snapshot-Back-Black-Trend-Following-Opportunities.pdf}
#' @export
#'
SNR <- function(HLC, n, ...) {
HLC <- try.xts(HLC, error=as.matrix)
snr = abs(HLC[,3] - lag.xts(Cl(HLC[,3]), n))/ATR(HLC, n, ...)$atr
return(reclass(snr, HLC ))
}
|
2eaaab0aef37c15024e118b1f6b45d73aaef7fa6
|
87d40842a98dc8d752f0babe560c51f99ebbc47b
|
/R/Updated/SVAR.R
|
ec6f53158bbadfcfaf59afbfef899b7cdfdeed30
|
[] |
no_license
|
Allisterh/SVARdoc
|
83a2f6d30edd571d692a58c5279e6ffcdd0f1a40
|
fc3eaa55bb4ebaf57f7f8c3ed8053df04a0e48bd
|
refs/heads/master
| 2023-04-04T13:41:54.833426
| 2021-04-22T15:07:53
| 2021-04-22T15:07:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,834
|
r
|
SVAR.R
|
#SVAR sel up
# there will be three models: the central SVAR model is model 1
# the othogonal model is model 2
# the othogonal random model is model 3
head(da)
#head(dan)
#Var1<-VAR(da,p=4, type='both',season=NULL, exog=dum)
#Var3 <- VAR(dan[, c(3:7, 9. 11)], p = 8, type = 'both', season = NULL, exog = dum)
#Setting up the A (or B) Matrix#####################################
#Setting up the A matrix.
#change the restrictions according to the theory to be tested.
Amat=diag(7)
Amat[1,1]<-1
Amat[2,2]<-1
Amat[3,3]<-1
Amat[4,4]<-1
Amat[5,5]<-1
Amat[6,6]<-1
Amat[7,7]<-1
Amat[1,2]<-NA #bond vs equity
Amat[1,3]<-0 #bond vs fdi
Amat[1,4]<-NA #bond vs cot
Amat[1,5]<-0 #bond vs fx
Amat[1,6]<-NA #bond vs spread
Amat[1,7]<-0 #bond vs sentiment
Amat[2,1]<-NA #equity bonds
Amat[2,3]<-NA #equity fdi (estimated cos similar influences)
Amat[2,4]<-0 #equity cot (dubious if inflow has an effect)
Amat[2,5]<-NA #equity fx
Amat[2,6]<-0 #equity spread (can be justified)?
Amat[2,7]<-NA #equity sentiment
Amat[3,1]<-0 #fdi bond
Amat[3,2]<-NA #fdi equity
Amat[3,4]<-0 #fdi cot
Amat[3,5]<-NA #fdi fx
Amat[3,6]<-0 #fdi spread
Amat[3,7]<-0 #fdi sentiment
Amat[4,1]<-0 #cot bond
Amat[4,2]<-0 # cot equity
Amat[4,3]<-0 # cot fdi
Amat[4,5]<-NA # cot vs fx
Amat[4,6]<-0 # cot vs spread
Amat[4,7]<-NA # cot vs s1
Amat[5,1]<-0 # fx and bond
Amat[5,2]<-NA # fx vs equity
Amat[5,3]<-NA # fx and FDI
Amat[5,4]<-NA # fx and no
Amat[5,6]<-NA # fx vs spread
Amat[5,7]<-NA # fx vs s1
Amat[6,1]<-NA #spread and bonds
Amat[6,2]<-0 #spread and equities
Amat[6,3]<-0 # spread and fdi
Amat[6,4]<-0 # spread and cot
Amat[6,5]<-NA #spread and fx
Amat[6,7]<-0 #spread and sentiment
Amat[7,1]<-0 #S1 vs bond
Amat[7,2]<-NA #S1 vs equity
Amat[7,3]<-0 #S1 vs fdi
Amat[7,4]<-NA #sentiment vs no
Amat[7,5]<-NA # sentiment vs fx
Amat[7,6]<-0 # sentiment vs spread
Amat
|
93e13561d654dbff1f97704ed7d85bf1eeb9ab41
|
1020e655f91b5f3ae5056f3a3006858ddaa92d3a
|
/modelo2/R_server/clustering0.R
|
452606b26f56897c1af2e1da6f917134c9824057
|
[] |
no_license
|
ivomota/Olho-Passarinho
|
578200e3c12a4c94c399b0455ef33ed61ca7619c
|
5b531aebac3c4914f90586f4f163b42773a5b31d
|
refs/heads/master
| 2021-01-18T11:25:50.284361
| 2014-06-28T13:24:19
| 2014-06-28T13:24:19
| 15,495,323
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,906
|
r
|
clustering0.R
|
library('fpc')
library("stringr", lib.loc="/Library/Frameworks/R.framework/Versions/2.15/Resources/library")
set.seed(665544)
mainDir <- "~/Dropbox/FEUP/FEUP_13.14/Dissertacao/Olho-Passarinho/modelo2/matrix_combined/S0"
setwd(file.path(mainDir))
a <- c(1, 0.75, 0.75, 0.5, 0.5, 0.5, 0.25, 0.25, 0.25, 0.25, 0, 0, 0, 0, 0)
b <- c(0, 0.25, 0, 0.5, 0.25, 0, 0.75, 0.5, 0.25, 0, 1, 0.75, 0.5, 0.25, 0)
c <- c(0, 0, 0.25, 0, 0.25, 0.5, 0, 0.25, 0.5, 0.75, 0, 0.25, 0.5, 0.75, 1)
len <- length(a)
for (j in 1:len){
print(j)
x <- c("combined_matrix_", a[j], "_", b[j], "_", c[j], ".rds")
m_name <- str_replace_all(toString(x), ", ", "")
conbined_matrix <- readRDS(m_name)
eps <- max(conbined_matrix)*0.02
ds <- dbscan(conbined_matrix, eps, MinPts=5, method = "dist", showplot=1)
x <- c("c_", a[j], "_",b[j], "_",c[j])
c_name <- str_replace_all(toString(x), ", ", "")
dir <- paste("clusters", c_name, sep="/")
dir.create(dir)
maxim = max(ds$cluster)
len = length(ds$cluster)
for (i in 0:maxim) {
nam <- paste("c", i, sep = "")
assign(nam, which(ds$cluster %in% i) )
nam <- paste(nam, "txt", sep = ".")
way <- paste("./clusters", c_name, sep="/")
name <- paste(way, nam, sep="/")
write(which(ds$cluster %in% i), file=name, ncolumns = len)
}
# saveRDS(ds, c_name)
cat(sprintf("Detected %s new clusters\n", max(as.array(unlist(ds$cluster)))))
for(k in 1:max(as.array(unlist(ds$cluster))))
{
cat(sprintf("Cluster %s has %s elements\n", k,
dim(as.array(unlist(which(ds$cluster==k))))))
}
}
# maxim = max(ds$cluster)
# len = length(ds$cluster)
# j = 0
#
# for (i in 0:maxim) {
# nam <- paste("C", i, sep = "")
# assign(nam, which(ds$cluster %in% i) )
# nam <- paste(nam, "txt", sep = ".")
# nam <- paste("../R_server/clusters/S0", nam, sep="/")
# write(which(ds$cluster %in% i), file=nam, ncolumns = len)
# }
|
e771d5f228bea46bced74bf442272e5d167d2b70
|
114773ac779032e674a4246309e17187c6ed69f6
|
/Project 101.R
|
97ef67fafecd7d486d999ce698702fdb030326c2
|
[] |
no_license
|
shoshin-programmer/manning_agencies_geoplot
|
b23f9548126553be13541bb1851c87807389aabc
|
bc04581abb90f7975e14e6e6e9a998458ab0df92
|
refs/heads/master
| 2022-03-12T16:44:58.218634
| 2019-05-28T05:00:16
| 2019-05-28T05:00:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 586
|
r
|
Project 101.R
|
library(dplyr)
library(ggmap)
library(leaflet)
ph <- read.csv("manning_agencies.csv")
ph <- ph %>% select(-X)
ph %>%
head(5) %>%
DT::datatable(options = list(
lengthMenu = c(5,3,1)
))
Ph.geo <- data.frame(120.98422, 14.599512)
names(Ph.geo) <- c("long", "lat")
leaflet(data = ph) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
setView(lng = Ph.geo$lon, lat = Ph.geo$lat, zoom = 5) %>%
addCircleMarkers(~lon, ~lat, popup = ~address,
clusterOptions = markerClusterOptions()) %>%
addLegend("topright", colors= "blue", labels="Address")
|
7c9d72c0d5897c422cb006724e82529ec9b59876
|
b2307568018ef3ff904e3406f927a4cd5c0d8a26
|
/R/utils.R
|
64694e285c045367969ed62e7dfde57b4d3e07e0
|
[] |
no_license
|
ropensci-review-tools/pkgcheck
|
ad75d51c0baf573ec38b4911feb2febe459be4d9
|
21e6ac3aa530dacf7b4440f63d53f2374965ca1a
|
refs/heads/main
| 2023-08-30T21:37:05.761840
| 2023-08-29T11:54:17
| 2023-08-29T11:54:17
| 264,203,707
| 15
| 4
| null | 2023-09-05T00:50:39
| 2020-05-15T13:40:55
|
R
|
UTF-8
|
R
| false
| false
| 2,853
|
r
|
utils.R
|
#' Tick symbol for markdown output
#' @noRd
symbol_tck <- function () {
":heavy_check_mark:"
}
#' Cross symbol for markdown output
#' @noRd
symbol_crs <- function () {
":heavy_multiplication_x:"
}
get_Rd_meta <- utils::getFromNamespace (".Rd_get_metadata", "tools") # nolint
#' Decompose file paths into character vectors of named directories and final
#' file names
#'
#' @param f One of more file paths with system-dependent file separators
#' @return List of equivalent character vectors from which paths can be
#' reconstructed with \link{file.path}
#' @noRd
decompose_path <- function (f) {
# https://github.com/r-lib/fs/blob/4cc4b56c26b9d7f177a676fbb331133bb2584b86/R/path.R # nolint
strsplit (f, "^(?=/)(?!//)|(?<!^)(?<!^/)/", perl = TRUE)
}
#' List all checks currently implemented
#'
#' @param quiet If `TRUE`, print all checks to screen. Function invisibly
#' returns list of checks regardless.
#' @return Character vector of names of all checks (invisibly)
#' @examples
#' list_pkgchecks ()
#' @family extra
#' @export
list_pkgchecks <- function (quiet = FALSE) {
chks <- grep (
"^pkgchk\\_",
ls (envir = asNamespace ("pkgcheck"), all.names = TRUE),
value = TRUE
)
if (!quiet) {
cli::cli_alert_info (paste0 (
"The following checks are ",
"currently implemented in pkgcheck:"
))
cli::cli_ol (chks)
cli::cli_end ()
}
invisible (chks)
}
#' Modified verion of getNamespaceExports` to exclude fns re-exported from other
#' packages
#' @noRd
exported_fns <- function (path) {
nspace <- readLines (fs::path (path, "NAMESPACE"))
exports <- grep ("^export\\s?\\(", nspace, value = TRUE)
exports <- gsub ("^export\\s?\\(|\\)$", "", exports)
exports <- unlist (strsplit (exports, ",\\s?"))
exports <- gsub ("\\\"", "", exports)
# exclude any re-exports from other packages (typically like "%>%"):
imports <- grep ("^importFrom\\s?\\(", nspace, value = TRUE)
imports <- vapply (imports,
function (i) {
gsub ("\\)$", "", strsplit (i, ",") [[1]] [2])
},
character (1),
USE.NAMES = FALSE
)
imports <- gsub ("\\\"", "", imports)
return (exports [which (!exports %in% imports)])
}
#' Convert anything that is not an environment into one.
#'
#' Used in `collate_extra_env_checks` to convert package names into namespace
#' environments.
#' @noRd
env2namespace <- function (e) {
if (!is.environment (e)) {
s <- search ()
if (any (grepl (paste0 (e, "$"), s))) {
e <- s [grep (paste0 (e, "$"), s)] [1] # hard-code to 1st value
e <- gsub ("package\\:", "", e)
}
e <- tryCatch (
asNamespace (e),
error = function (err) NULL
)
}
return (e)
}
|
71f9bc1caede371fbbebb0307ebfcf72bdbfe36a
|
ae5225814dde38b7a3e08e5aa984209cf47ff1ed
|
/R/installAlteryx.R
|
401f31e4890170cd9a8ac90d15e8447306228612
|
[
"MIT"
] |
permissive
|
KrishAK47/jeeves
|
ccbba2d94b55d8325368b9f7e6c93a12fb440042
|
c0ddecad35cdfee1763cf54c8fc61dd477a41e46
|
refs/heads/master
| 2020-06-02T17:22:25.335004
| 2018-06-28T16:21:37
| 2018-06-28T16:21:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,403
|
r
|
installAlteryx.R
|
#' Download Installers
#'
#' Downloads installers automatically from the build repository.
#' @param buildRepo path to the build repo.
#' @param to directory to download the installers to. defaults to working dir.
#' @param buildDir the build directory.
#' @param branch string indicating the branch. defaults to Predictive_Dev.
#' @param type string indicating installer type. it should be one of 'Server',
#' 'Gallery', NonAdmin' or ''.
#' @param rInstaller string indicating R installer to download. it should be one
#' of 'RInstaller' or 'RREInstaller'.
#' @export
downloadInstallers <- function(buildRepo = "\\\\DEN-IT-FILE-07\\BuildRepo",
to = ".", buildDir = NULL, branch = "Predictive_Dev", type = 'Server',
rInstaller = 'RInstaller'){
runFromWindows()
to <- normalizePath(to)
if (is.null(buildDir)){
message("No buildDir specified. So defaulting to latest.")
builds <- dir(buildRepo, pattern = branch, full.names = TRUE)
buildDir <- tail(builds, 1)
}
message("Build Directory is ", buildDir)
ayxInstaller <- list.files(
file.path(buildDir, 'Alteryx'), pattern = type, full.names = TRUE
)
message("Downloading ", ayxInstaller)
file.copy(ayxInstaller, to)
rInstaller <- list.files(
file.path(buildDir, 'R'), pattern = rInstaller, full.names = TRUE
)
message("Downloading ", rInstaller)
file.copy(rInstaller, to)
list(
ayxInstaller = file.path(to, basename(ayxInstaller)),
rInstaller = file.path(to, basename(rInstaller))
)
}
#' Install Alteryx
#'
#' Install Alteryx and optionally, the Predictive Tools using the
#' command line SDK. It does a silent install skipping through all dialogs and
#' accepting all defaults.
#' @param installers list of paths to named installers.
#' @export
installAlteryx <- function(installers){
for (inst in installers){
withr::with_dir(dirname(inst), {
r <- plyr::llply(basename(inst), function(installer){
message("Installing ", basename(installer))
install_cmd <- paste(basename(installer), '/s')
message('Running ', install_cmd)
system(install_cmd)
})
})
}
}
#' Copy predictive macros, samples and plugins from SVN
#'
#' @param to directory to copy files to.
#' @param svnDir path to svn branch.
#' @export
copyAlteryxRPlugin <- function(to = NULL, svnDir = getOption('alteryx.svndir')){
if (is.null(to)){
to <- file.path(getOption('dev.dir'), 'dev', 'AlteryxRPlugin')
}
pluginDir <- file.path(svnDir, 'Alteryx', 'Plugins', 'AlteryxRPlugin')
files_to_copy <- list.files(pluginDir, full.names = T,
pattern = 'Macros|Samples|HtmlPlugins')
if (!(file.exists(to))) {
message("Creating directory ", to, "...")
dir.create(to, recursive = TRUE)
}
message("Copying files to ", to, "...")
file.copy(files_to_copy, to, recursive = TRUE)
}
runFromWindows <- function(){
if(.Platform$OS.type != "windows"){
stop("Please run this function from Windows", call. = FALSE)
}
}
#' List of all installed packages used in Alteryx's Predictive tools
#'
#' A vector of all added packages in an SVN installation of a version of R.
#' This is done by reading the Readme.txt file in the /3rdParty/R/Installer
#' directory. The packages are in a named list identifying those from CRAN
#' and those from Alteryx
#'
#' @param svnDir Path to the local copy of a SVN branch of Alteryx.
#' @param type Should the set of packages be based on the packages used in
#' the previous predictive installer ("last"), or on the set of packages
#' explicitly used by the predictive tools ("min").
#' @param rVersion The version of R to use as the basis of package installation.
#' For a completely new version of R, this will likely be the last version.
#' @export
listInstalledPackages <- function(svnDir = getOption('alteryx.svndir'),
type = c("last", "min"),
rVersion = NULL) {
type <- match.arg(type)
rdirs <- getAyxSvnRDirs(svnDir = svnDir, rVersion = rVersion)
allPkgs_mc <- installed.packages(lib.loc = rdirs$lib)
pkgs <- allPkgs_mc[is.na(allPkgs_mc[, "Priority"]), "Package"]
# Remove the translations package
pkgs <- pkgs[pkgs != "translations"]
ayxPkgs <- c(grep("^Alteryx", pkgs, value = TRUE), "flightdeck")
loadedLoc_sc <- paste0(svnDir, "/3rdParty/R/loaded_pkgs.txt")
cran <- if (type == "last") {
cran <- setdiff(pkgs, ayxPkgs)
} else {
cran <-
as.character(read.csv(file = loadedLoc_sc, header = FALSE)[[1]])
}
list(
cran = cran,
alteryx = ayxPkgs
)
}
#' Write RPluginIni
#'
#' The \code{RPluginSettings.ini} file enables Alteryx to find the R engine to
#' work with. This function allows one to customize the ini file and support
#' working with R installs that are not provided by the predictive installer.
#' @param revo boolean indicating if xdf inputs are to be supported.
#' @param replace boolean indicating if the existing file should be overwritten.
#' @export
writeRPluginIni <- function(revo = FALSE, replace = FALSE){
RpluginIni <- file.path(getOption('alteryx.path'), 'Settings',
'RPluginSettings.ini'
)
l <- c(
RVersion = paste(R.version$major, R.version$minor, sep = "."),
RExePath = normalizePath(R.home()),
RevolutionRinstalled = as.character(revo)
)
contents <- c(
'[Settings]',
paste(names(l), l, sep = "=")
)
if (revo){
message('Copying XDF macros and samples ...')
copyXDFFiles()
}
if (replace){
message('Writing new RpluginSettings.ini')
writeLines(contents, con = RpluginIni)
} else {
return(contents)
}
}
#' Install All Needed Packages
#'
#' @param dev Boolean indicating if dev versions of packages should be installed.
#' @param rVersion The version of R to use as the basis of package installation.
#' This optional argument will typically be used when moving to a new version of
#' R, when the natural source of packages were those packages in the current
#' version of R used by Alteryx.
#' @export
installAllPackages <- function(dev = TRUE, rVersion = NULL){
runFromWindows()
cranPkgs <- listInstalledPackages(rVersion = rVersion)$cran
existing_packages <- row.names(installed.packages())
needed_packages <- cranPkgs[!(cranPkgs %in% existing_packages)]
if (length(.libPaths()) == 1) {
lib <- .libPaths()
} else {
lib <- .libPaths()[2]
}
# Install any needed R packages
if (length(needed_packages) > 0){
message("Installing packages ")
message(paste(needed_packages, collapse = "\n"))
install.packages(needed_packages)
}
ayxPackages <- c("AlteryxSim", "AlteryxPredictive",
"AlteryxPrescriptive", "AlteryxRDataX", "AlteryxRviz")
# The full paths to the binary packages to be installed. This is based on
# installing the packages from a local directory
ayxPackages <- file.path(getOption('dev.dir'), 'dev',
'AlteryxRPackage', ayxPackages)
requireNamespace('devtools')
install_ <- devtools::install
withr::with_libpaths(lib, {
lapply(ayxPackages, install_)
})
}
#' Install All Needed Packages V2
#'
#' Alteryx packages, with the exception of AlteryxRDataX, are installed from
#' the Alteryx drat repo on GitHub, while AlteryxRDataX is installed from
#' either the binary installer of the package of the most recent nightly
#' build of the specified branch, or from a local a local directory. The local
#' directory option would allow for an installation of AlteryxRDataX from
#' source.
#'
#' @param branch string indicating svn branch.
#' @param buildDir build directory.
#' @param ayxRepo string indicating cran-like repo for alteryx packages
#' @param buildRepo build repo.
#' @export
installAllPackages2 <- function(branch = 'Predictive_Dev', buildDir = NULL,
ayxRepo = 'https://alteryx.github.io/drat',
buildRepo = "\\\\DEN-IT-FILE-07\\BuildRepo"){
runFromWindows()
requiredPkgs <- unlist(listInstalledPackages(), use.names = F)
requiredPkgs <- requiredPkgs[requiredPkgs != 'AlteryxRDataX']
existing_packages <- row.names(installed.packages())
needed_packages <- requiredPkgs[!(requiredPkgs %in% existing_packages)]
if (length(.libPaths()) == 1) {
lib <- .libPaths()
} else {
lib <- .libPaths()[2]
}
message("Installing AlteryxRDataX...")
if (is.null(buildDir)) {
# Determine the most recent build for the desired branch
builds <- dir(buildRepo, pattern = branch, full.names = TRUE)
buildDir <- tail(builds, 1)
}
# The path to the *binary* installer from the most recent build of the branch
RDataX <- list.files(file.path(buildDir, 'R'), pattern = 'AlteryxRDataX_',
full.names = TRUE)
install.packages(RDataX, repos = NULL)
if (length(needed_packages) > 0){
options(repos = c(CRAN = getOption("repos"), Alteryx = ayxRepo))
message("Installing missing packages from CRAN...")
message(paste(needed_packages, collapse = "\n"))
install.packages(needed_packages)
} else {
message("Updating R Packages")
ayxPkgs <- grep("^Alteryx", requiredPkgs, value = TRUE)
# The line below may not work as expected, since it does not have a
# specified repository, and default repositories have not been specified,
# via the use of options(), unless it is assumed the user had already
# done this.
install.packages(ayxPkgs)
update.packages()
}
}
#' Install CRAN Packages needed for Alteryx predictive tools
#'
#' The function can be used to install needed CRAN packages for the predictive
#' tools to either a user's development R installation or the R installation in
#' the user's local copy of the SVN repository of an Alteryx development
#' branch. NOTE: To use this function, the R session being used must be running
#' in administrator mode to allow for appropriate read/write permissions.
#'
#' @param currentRVersion The current version of R being used by Alteryx's
#' predictive tools.
#' @param installation One of "dev" or "svn". In the case of "dev", the
#' needed CRAN packages are installed into the system library of the user's
#' development installation of R. When "svn" is selected, then the packages
#' are installed to the system library of the R installation located in the
#' user's local copy of the relevant SVN repository. The development R
# version and the one in the local copy of the SVN repository must match.
#' The respository's path is determined by the alteryx.svndir global options
#' setting.
#' @param type Should the set of packages be based on the packages used in
#' the previous predictive installer ("last"), or on the set of packages
#' explicitly used by the predictive tools ("min").
#' @param repos The CRAN repository to use for package installation. The
#' default is https://cloud.r-project.org.
#' @export
install_CRAN_pkgs <- function(currentRVersion,
installation = c("dev", "svn"),
type = c("last", "min"),
repos = "https://cloud.r-project.org") {
installation <- match.arg(installation)
type <- match.arg(type)
# Stop Mac users from harming themselves
runFromWindows()
# Bootstrap the process using the packages associated with the current
# version of R being used
curPkgs_l <- listInstalledPackages(rVersion = currentRVersion, type = type)
print(curPkgs_l)
# Get the set of dependencies that match the current packages used. This
# is needed to determine any new dependencies
allCranDeps_vc <- miniCRAN::pkgDep(curPkgs_l$cran, suggests = FALSE)
# The set of dependencies will include recommended packages which will
# already be installed, the lines below find these packages and removes
# the from the set of packages to install
pkgPriority_vc <- installed.packages()[, "Priority"]
pkgPriority_vc[is.na(pkgPriority_vc)] <- "optional"
recoPkgs_vc <- names(pkgPriority_vc[pkgPriority_vc == "recommended"])
cranPkgs_vc <- allCranDeps_vc[!(allCranDeps_vc %in% recoPkgs_vc)]
# Address the installation type
installPlace_sc <- if (installation == "dev") {
"development R installation.\n"
} else {
"copy of the SVN repository.\n"
}
libLoc_sc <- if (installation == "dev") {
.libPaths()[1]
} else {
getAyxSvnRDirs()$lib
}
availPkgs_vc <- row.names(installed.packages(lib.loc = libLoc_sc))
cranPkgs_vc <-
cranPkgs_vc[!(cranPkgs_vc %in% availPkgs_vc)]
# Install the packages
msg_sc <- paste("Installing",
length(cranPkgs_vc),
"CRAN packages to the local",
installPlace_sc)
cat(msg_sc)
insPkgs_vc <- row.names(installed.packages(lib.loc = libLoc_sc))
while (!all(cranPkgs_vc %in% insPkgs_vc)) {
missPkgs_vc <- cranPkgs_vc[!(cranPkgs_vc %in% insPkgs_vc)]
install.packages(missPkgs_vc, lib = libLoc_sc, repos = repos)
insPkgs_vc <- row.names(installed.packages(lib.loc = libLoc_sc))
}
cranPkgs_vc
}
#' Install Alteryx R packages
#'
#' The function can be used to install Altery's R packages to either a
#' user's development R installation or the R installation in the user's
#' local copy of the SVN repository of an Alteryx development branch.
#' NOTE: To use this function, the R session being used must be running
#' in administrator mode to allow for appropriate read/write permissions.
#'
#' @param installation One of "dev" or "svn". In the case of "dev", the
#' needed CRAN packages are installed into the system library of the user's
#' development installation of R. When "svn" is selected, then the packages
#' are installed to the system library of the R installation located in the
#' user's local copy of the relevant SVN repository. The development R
# version and the one in the local copy of the SVN repository must match.
#' The respository's path is determined by the alteryx.svndir global options
#' setting.
#' @param dataXPath The local full path to an appropriate binary installer of
#' the AlteryxRDataX package. If its value is NULL, then no attempt will be
#' made to install the package.
#' @param useGitHub Install the Alteryx predictive packages other than
#' AlteryxRDataX from Alteryx's CRAN like repository on GitHub at
#' https://alteryx.github.io/drat. The default is FALSE.
#' @param ayxDepend A character vector of CRAN packages that Alteryx packages
#' depend on since the last version, but are not a dependency of other CRAN
#' packages.
#' @export
install_Alteryx_pkgs <- function(installation = c("dev", "svn"),
dataXPath = NULL,
useGitHub = FALSE,
ayxDepend = NULL) {
installation <- match.arg(installation)
# Stop Mac users from harming themselves
runFromWindows()
# Address the installation type
installPlace_sc <- if (installation == "dev") {
"to the local development R installation"
} else {
"to the local copy of the SVN repository"
}
libLoc_sc <- if (installation == "dev") {
.libPaths()[1]
} else {
getAyxSvnRDirs()$lib
}
# Address dependencies for Alteryx package, but not the other CRAN packages
if (length(ayxDepend) > 0) {
install.packages(ayxDepend,
repos = "https://cloud.r-project.org",
lib = libLoc_sc)
}
# Install AlteryxRDataX if dataXPath is not NULL
if (!is.null(dataXPath)) {
message(paste0("Installing AlteryxDataX to ", installPlace_sc, "."))
install.packages(dataXPath, repos = NULL, lib = libLoc_sc)
}
# Install Alteryx R packages other than AlteryxRDataX
ayxPackages_vc <- c("AlteryxSim",
"flightdeck",
"AlteryxRviz",
"AlteryxPredictive",
"AlteryxPrescriptive")
msg1 <- paste0("Installing Alteryx packages other than AlteryxRDataX to ",
installPlace_sc,
".")
message(paste0(msg1))
if (useGitHub) {
withr::with_libpaths(libLoc_sc, {
install.packages(ayxPackages_vc,
repos = "https://alteryx.github.io/drat",
lib = libLoc_sc)})
} else {
fullPaths_vc <- paste0(getOption("alteryx.svndir"),
"/Alteryx/Plugins/AlteryxRPackage/",
ayxPackages_vc)
requireNamespace('devtools')
install_ <- devtools::install
withr::with_libpaths(libLoc_sc, {
lapply(fullPaths_vc, install_)
})
}
c(ayxDepend, ayxPackages_vc)
}
#' Install All Packages Needed for Alteryx Predictive Tools
#'
#' The function can be used to install Altery's R packages to either a
#' user's development R installation or the R installation in the user's
#' local copy of the SVN repository of an Alteryx development branch.
#' NOTE: To use this function, the R session being used must be running
#' in administrator mode to allow for appropriate read/write permissions.
#'
#' @param currentRVersion current version of R
#' @param installation One of "dev" or "svn". In the case of "dev", the
#' needed CRAN packages are installed into the system library of the user's
#' development installation of R. When "svn" is selected, then the packages
#' are installed to the system library of the R installation located in the
#' user's local copy of the relevant SVN repository. The development R
# version and the one in the local copy of the SVN repository must match.
#' The respository's path is determined by the alteryx.svndir global options
#' setting.
#' @param type Should the set of packages be based on the packages used in
#' the previous predictive installer ("last"), or on the set of packages
#' explicitly used by the predictive tools ("min").
#' @param readmeManifest A logical flag indicating whether the Readme file
#' and the manifest file are saved after installing all the
#' needed package. This is only relevant for installing packages into the SVN
#' R installation.
#' @param dataXPath The local full path to an appropriate binary installer of
#' the AlteryxRDataX package. If its value is NULL, then no attempt will be
#' made to install the package.
#' @param repos The CRAN repository to use for package installation. The
#' default is https://cloud.r-project.org.
#' @param useGitHub Install the Alteryx predictive packages other than
#' AlteryxRDataX from Alteryx's CRAN like repository on GitHub.
#' The default is FALSE.
#' @param ayxDepend A character vector of CRAN packages that Alteryx packages
#' depend on since the last version, but are not a dependency of other CRAN
#' packages.
#' @export
install_all_pkgs <- function(currentRVersion,
installation = c("dev", "svn"),
type = c("last", "min"),
readmeManifest = TRUE,
dataXPath = NULL,
repos = "https://cloud.r-project.org",
useGitHub = FALSE,
ayxDepend = NULL) {
installation <- match.arg(installation)
type <- match.arg(type)
installedCranPkgs_vc <- install_CRAN_pkgs(currentRVersion = currentRVersion,
installation = installation,
type = type,
repos = repos)
installedAyxPkgs_vc <- install_Alteryx_pkgs(installation = installation,
dataXPath = dataXPath,
useGitHub = useGitHub,
ayxDepend = ayxDepend)
if (readmeManifest && installation == "svn") {
svnR_l <- getAyxSvnRDirs()
# The readme file
pkgList_l <- listInstalledPackages(type = "last")
allPkgs_vc <- unlist(pkgList_l)
allPkgs_vc <- allPkgs_vc[order(allPkgs_vc)]
readmeFile = file.path(svnR_l$installer, "Readme.txt")
writeLines(allPkgs_vc, readmeFile)
mrsFile = file.path(svnR_l$installer, "../MRSInstaller", "Readme.txt")
writeLines(allPkgs_vc, mrsFile)
revoFile = file.path(svnR_l$installer, "../RevoInstaller", "Readme.txt")
writeLines(allPkgs_vc, revoFile)
# The manifest file
suppressWarnings(
man1_mc <- summary(packageStatus(lib.loc = svnR_l$lib,
repositories = "https://cran.cnr.berkeley.edu"))
)
suppressWarnings(
man2_mc <-
man1_mc$inst[, c("Package", "Version", "Status", "Priority", "Built")]
)
rownames(man2_mc) <- NULL
write.csv(man2_mc,
file = file.path(svnR_l$installer, "../Scripts", "packages.csv"),
row.names = F)
}
if (installation == "svn") {
ayxPackages_vc <- c("AlteryxSim",
"flightdeck",
"AlteryxRviz",
"AlteryxPredictive",
"AlteryxPrescriptive")
if (!is.null(dataXPath)) {
ayxPackages_vc <- c(ayxPackages_vc, "AlteryxRDataX")
}
remove.packages(ayxPackages_vc, lib = svnR_l$lib)
}
}
#' Update R installation
#'
#' @export
updateRInstallation <- function(){
message('Installing missing R packages...')
installAllPackages()
message("Updating RPluginSettings.ini...")
writeRPluginIni(replace = TRUE)
}
#' Copy XDF files from SVN
#'
#' @param svnDir svn directory to copy from.
#' @param rVersion string indicating version of R.
copyXDFFiles <- function(svnDir = getOption('alteryx.svndir'),
rVersion = getRversion()){
xdf_macros <- file.path(svnDir, 'Alteryx', 'Plugins', 'AlteryxRPlugin',
'XDF_Macros')
xdf_samples <- file.path(svnDir, 'Alteryx', 'Plugins', 'AlteryxRPlugin',
'XDF_Samples')
copy_dir <- function (from, to) {
if (!(file.exists(to))) {
dir.create(to, recursive = TRUE)
}
message("Copying files to ", to, "...")
file.copy(list.files(from, full.names = T), to, recursive = TRUE)
}
pluginDir <- file.path(getOption('alteryx.path'),
paste0('R-', rVersion), 'plugin')
copy_dir(xdf_macros, file.path(pluginDir, 'Macros', 'XDF_Macros'))
copy_dir(xdf_samples, file.path(pluginDir, 'Samples', 'XDF_Samples'))
}
|
550f34e605a28106a05b41c6d7f745f705206168
|
4f87ae5e7bb3a536eeea7af032d7b54efd00f19d
|
/man/prep_proposal_dist.Rd
|
f01e810add31813ffbedb049e6219d88ff7cac89
|
[
"MIT"
] |
permissive
|
ml-lab/LDATS
|
5d3f176ea6bffa04b4b14fdcc81df229b6adba41
|
1422570a36777de7189b9c772eb8257985658883
|
refs/heads/master
| 2020-04-12T07:10:43.339295
| 2018-12-17T00:10:27
| 2018-12-17T00:10:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,785
|
rd
|
prep_proposal_dist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ptMCMC.R
\name{prep_proposal_dist}
\alias{prep_proposal_dist}
\title{Pre-claculate the change point proposal distribution for the ptMCMC
algorithm}
\usage{
prep_proposal_dist(nchangepoints, control = TS_controls_list())
}
\arguments{
\item{nchangepoints}{Integer corresponding to the number of
change points to include in the model. 0 is a valid input (corresponding
to no change points, so a singular time series model), and the current
implementation can reasonably include up to 6 change points. The
number of change points is used to dictate the segementation of the data
for each continuous model and each LDA model.}
\item{control}{Class \code{TS_controls} list, holding control parameters
for the Time Series model including the parallel tempering Markov Chain
Monte Carlo (ptMCMC) controls, generated by
\code{\link{TS_controls_list}}. Currently relevant here is
\code{magnitude}, which controls the magnitude of the step size (is the
average of the geometric distribution).}
}
\value{
List of two matrices: [1] the size of the proposed step for each
iteration of each chain and [2] the identity of the change point location
to be shifted by the step for each iteration of each chain.
}
\description{
Calculate the proposal distribution in advance of actually
running the ptMCMC algorithm in order to decrease computation time.
The proposal distribution is a joint of three distributions:
[1] a multinomial distribution selecting among the change points within
the chain, [2] a binomial distribution selecting the direction of the
step of the change point (earlier or later in the time series), and
[3] a geometric distribution selecting the magnitude of the step.
}
|
814d4c522ba3f82b133a8735cfb7f2b8cc34c711
|
98098b9c75de0237c3324e4698d8231eecdaf5c3
|
/data_structure_reduced.R
|
32b6c1325035c5e0638cb9ae716189390aa64534
|
[] |
no_license
|
lnlroger/c19-behav
|
8bbac306c22aaf6162b2ef305b175822c4b767ff
|
1680867b4868beb1c1b81383fc8792597a2ff1b4
|
refs/heads/master
| 2022-12-23T21:56:16.611238
| 2020-10-06T17:49:33
| 2020-10-06T17:49:33
| 301,788,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,366
|
r
|
data_structure_reduced.R
|
#Files that require regular updating
## Case Death
## Google
## Oxford
library("tidyverse")
library("countrycode")
`%notin%` <- Negate(`%in%`) # Defines logical operator "not in" for use below
# Import google mobility data ----
source("Google/import_mobility.R")
# Lockdown dates ----
source("LockDown/import_lockdown.R")
# Cases and Deaths ----
source("CasesDeaths/import_cases_deaths.R")
# Weather data ----
#source("Weather/import_weather.R")
# World Value Survey ----
source("WVS/import_wvs.R")
# World Bank data (Rule of Law, Communicable diseases, hospital beds among others) ----
source("WB/import_wb.R")
# PolityIV index ----
source("Politics/import_polityIV.R")
# UN population data ----
source("UN-Population/import_unpop.R")
# Elections ----
source("Politics/import_dpi.R")
# Social preferences ----
source("Briq/import_social-prefs.R")
# Basic country co-variates (source?) ----
source("Countries/import_covariates.R")
# Collectivism - Hofstede ----
source("Collectivism/import_collectivism.R")
# Gelfand data (Government efficiency) ----
source("Government_Gelfand/import_gelfand.R")
# Previous epidemics ----
source("EM-DAT/import_epidemics.R")
# Import long run coefficients obtained from ARDL
lr.coeffs <- read_rds("compliance/LongRunCoefficients_ARDL.rds")
# Merge into single dataframes ----
# Short version (pure cross-section)
source("OxfordTracking/covid-policy-tracker-master/data/import_OxCGRT.R")
# datasets.to.merge.short <- list(mobility_short,
# DaysLock_short,
# wvs,
# weather_short,
# wb,
# elections,
# rol,
# social_prefs,
# countries,
# polityIV,
# UNpop,
# hf,
# Gf_gov,
# lr.coeffs
# )
#
# df_short <- Reduce(function(...) full_join(..., by='Country'), datasets.to.merge.short) %>%
# filter(!is.na(Country)) %>%
# mutate(Death_pc = TotalDeaths/Population) %>%
# mutate(Death_pc = TotalDeaths/Population) %>%
# mutate(Confirmed_pc = TotalCases/Population) %>%
# filter(Province != "Faroe Islands") %>%
# mutate(Log_Death_pc = ifelse(Death_pc>0,log(Death_pc),NA)) %>%
# mutate(Google_pc = Google/Population) %>%
# mutate(Log_Google_pc = ifelse(Google_pc>0,log(Google_pc),NA)) %>%
# mutate(DateLockDown=as.Date(DateLockDown,format="%d/%m/%Y"))%>%
# mutate(Date=as.Date(Date))
#
#
#
# write.csv(df_short,"df_covid_short.csv")
# Long version (daily data)
days_mobility_prefs<-merge(merge(mobility,days,by=c('Country','Date'),all=T),social_prefs,by='Country',all=T)
#days_mobility_prefs_regional<-merge(merge(mobility_regional,days,by=c('Country','Date'),all=T),social_prefs_city,by=c('Country'),all=T)
## cannot allocate vector of 156Mb...
datasets.to.merge.long <- list(#days_mobility_prefs_regional,
days_mobility_prefs,
lockdown,
wvs,
wb,
elections,
rol,
#social_prefs,
#social_prefs_city,
countries,
#time_short,
polityIV,
UNpop,
hf,
Gf_gov,
lr.coeffs
)
df_long<- Reduce(function(...) full_join(..., by=c('Country')), datasets.to.merge.long) %>%
filter(!is.na(Country)) %>%
mutate(Death_pc = total_deaths/Population) %>%
mutate(Confirmed_pc = total_cases/Population) %>%
filter(Province != "Faroe Islands") %>%
mutate(Log_Death_pc = ifelse(Death_pc>0,log(Death_pc),NA))
#mutate(Google_pc = DeathsBeforeGoogle/Population) %>%
#mutate(Log_Google_pc = ifelse(Google_pc>0,log(Google_pc),NA)) %>%
#mutate(DateLockDown = DateLockDown.y)
df_long<-merge(df_long,Ox,by=c("Country","Date"),all=T)
write_rds(df_long,"df_covid_long.rds")
#write_rds(df_long,"df_covid_long_cities.rds")
|
6ce4d4110df33b0a534f3472c5cb8ab2b838319f
|
3328b8263d0351041caebd290d2724702167b76b
|
/man/plotCorrectedCoverage.Rd
|
53307fb05e134795db948c7691ab875c252df943
|
[] |
no_license
|
skillcoyne/BarrettsProgressionRisk
|
c006b9e62299a3e81996d30c511e4330b9548d49
|
c2c5eb68b4be695f720f5eef1d39d552e37b1041
|
refs/heads/master
| 2023-06-07T12:43:47.565697
| 2021-06-22T08:51:02
| 2021-06-22T08:51:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 465
|
rd
|
plotCorrectedCoverage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_utilities.R
\name{plotCorrectedCoverage}
\alias{plotCorrectedCoverage}
\title{Plot genome-wide coverage from adjusted raw data}
\usage{
plotCorrectedCoverage(brr, as = c("plot", "list"))
}
\arguments{
\item{BarrettsRiskRx}{or SegmentedSWGS object}
}
\value{
ggplot of raw and segmented values
}
\description{
Plot genome-wide coverage from adjusted raw data
}
\author{
skillcoyne
}
|
a8af2dd66ea2cf6a36ae34daa416b438e6ff79aa
|
8c3a282d3be3a23b45fef8069f45cffb23d3a6ee
|
/main.r
|
20671a3f09907bc58b43dca40a37b1501ce35ff8
|
[] |
no_license
|
jrmdel/Metal100
|
8a2faebac9c04eb0d24e68f76ec958006eb7a2e8
|
e6441a770767fb9517555e0e4cc6938304bbdccf
|
refs/heads/main
| 2023-01-08T04:16:18.441843
| 2020-11-14T14:50:32
| 2020-11-14T14:50:32
| 306,159,313
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,388
|
r
|
main.r
|
library(FactoMineR)
library(readxl)
library(dplyr)
library(tidyr)
library(ggplot2)
# Load data
dataAlbums <- read_excel("./data/metal_dataset_albums.xlsx")
dataSongs <- read_excel("./data/metal_dataset_songs.xlsx")
songsFromCsv <- read.csv("./data/dataAcpExtract.csv")
dataSongs100 <- data.frame(songsFromCsv)[,-c(1)]
# Song distribution
years <- dataSongs$`Release Year`
h <- hist(years,
main= title("Distribution of songs per 5-year time"),
xlab="Years", ylab="Number of songs", col="mediumpurple"
)
text(h$mids,h$counts,labels=h$counts, adj=c(0.5, -0.5),cex = 0.6)
# Averaging the 6 main features per year
# Data is tweaked. Here BPM is lowered so that the graph would show more details
dataMean <- dataSongs[,c(6,10:17)] %>% group_by(`Release Year`) %>% summarise(
BPM=mean(`BPM`)-50,
Dance=mean(`Dance`),
Energy=mean(`Energy`),
Valence=mean(`Valence`),
Acoustic=mean(`Acoustic`),
Popularity=mean(`Popularity`),
)
# Gathering data into one table to display it
dataGather <- dataMean %>% gather(key = `Audio Feature`, value = Score, -`Release Year`)
dataGather %>% ggplot(
aes(x=`Release Year`, y=Score, group=`Audio Feature`, fill=`Audio Feature`)) +
geom_line() + geom_smooth(method = lm) +
theme(
legend.position = "none",
plot.title = element_text(hjust = 0.5, size=14)
) +
ggtitle("Mean audio feature per year") +
facet_wrap(~`Audio Feature`)
# Principal Component Analysis
res.pca <- PCA(dataSongs100, quanti.sup=c(3), quali.sup=1)
# Correspondence Analysis
res.ca <- CA(
table(dataAlbums$Origin, dataAlbums$`Sub Metal Genre`)
)
# Distribution per country
albumsOrigin <- dataAlbums %>%
group_by(`Origin`) %>%
summarise(count = n()) %>%
arrange(desc(count))
# Multiple Correspondence Analysis
# First, adapting the dataset, changing values (20, 51, 97...) to categories (low, medium, high)
dataMCA <- dataSongs100 %>%
mutate(energy = ifelse(`Energy` < 33,"low",ifelse(`Energy` < 66,"medium","high"))) %>%
mutate(acoustic = ifelse(`Acoustic` < 33,"low",ifelse(`Acoustic` < 66,"medium","high"))) %>%
mutate(dance = ifelse(`Dance` < 33,"low",ifelse(`Dance` < 66,"medium","high")))
# Keeping the essential
dataMCAlight <- dataMCA[,c(2,10:12)]
# Finally, plotting
res.mca <- MCA(dataMCAlight, quanti.sup = 1, graph = FALSE)
plot(res.mca, cex = 0.7, autoLab = "y", col.var = "black", col.ind = "pink1")
|
c8da8e754982cc98fc88524df630db1cecf9a53a
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/4695_0/rinput.R
|
8aff45cf72e54901d9e3abfaffb7524475ffbddb
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("4695_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4695_0_unrooted.txt")
|
9f96e4d8bc9aa64142a293337f6ffb285921d4bd
|
bbb6d6f862dd460481e4c38917cfd3ff3c446480
|
/Laying date analysis.R
|
14f3a1160a4af63114f12a2a213eade989a227a5
|
[] |
no_license
|
11arc4/Long-term-trends-TRES
|
091e71751a4591e961987c742d83c21997693fcd
|
938a726e3d4936d5ba13e2e74ee67c8964d64d86
|
refs/heads/master
| 2018-12-15T06:43:23.088908
| 2018-12-04T18:45:51
| 2018-12-04T18:45:51
| 120,344,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,851
|
r
|
Laying date analysis.R
|
#First Egg date mini analysis
#Extract the date of first egg
# data <- as.data.frame(matrix(nrow= 12000, ncol=4))
# colnames(data) <- c("NestID", "Year", "LayDate", "FemaleAge")
#
# i=0
# for (nest in as.list(globalData$nests)){
# i=i+1
# data$NestID[i] <- paste(nest$siteID, nest$year, nest$renestStatus, sep="-")
# data$Year[i] <- nest$year
# data$LayDate[i] <- nest$firstEggDate
#
# #data$FemaleAge[i] <- nest$fAge
#
#
#
# }
# i
#
# data<- data[1:i, ]
# write.csv(data, col.names = T, row.names = F, file="file:///C:/Users/11arc/Documents/Masters Thesis Project/Long term trends paper/Data Files_long term trends/Lay date data.csv")
dat <- read.csv("file:///C:/Users/11arc/Documents/Masters Thesis Project/Long term trends paper/Data Files_long term trends/Lay date data.csv")
data2 <- dat %>% group_by(Year) %>% summarise(MLayDate = mean(LayDate, na.rm=T))
data2$TimePeriod <- "Growing"
data2$TimePeriod[data2$Year>1991] <- "Declining"
data2$TimePeriod[data2$Year>2013] <- "PostDecline"
data2$TimePeriod <- factor(data2$TimePeriod)
data3 <- data2 %>% filter(!is.na(MLayDate))
mod <- lm(MLayDate ~TimePeriod*Year, data=data3)
plot(mod)
hist(resid(mod))
shapiro.test(resid(mod))
plot(resid(mod)~data3$Year)
plot(resid(mod)~data3$TimePeriod)
#Looks good
options(na.action="na.fail")
dredge(mod)
anova(mod, test="F")
car::Anova(mod)
mam <- lm(MLayDate ~Year, data=data3)
summary(mam)
ggplot(data3, aes(x=Year, y=MLayDate))+
geom_smooth(method="lm", color="black")+
geom_point()+
labs(y= "Mean laying date (Julian)", x="Year")+
ggthemes::theme_few(base_size = 16, base_family = "serif")+
scale_y_continuous(breaks=c(133, 137, 141, 145))
ggsave(filename='~/Masters Thesis Project/Long term trends paper/Plots for paper/Supplementary laying date plot.jpeg', width=4, height=3, units="in", device="jpeg")
|
a98ab4e1151dd4efc94089b6f0c696808846ce25
|
c5666f152dfa294ad17eda5ecbb51b09af28faa6
|
/R/nhs_connect.R
|
8437c14d16d7c43000fc6e78d76e8aa3aec9fe2e
|
[] |
no_license
|
DilyLong/nhanesR
|
5fd29cfb08956f2b1f3af17f52690f1a6d1e4afe
|
12f712cb5c0ddddb34d19512e80f3b4770674a7d
|
refs/heads/main
| 2023-09-05T11:01:19.542345
| 2021-10-06T08:01:39
| 2021-10-06T08:01:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,405
|
r
|
nhs_connect.R
|
#' Connect PostgreSQL
#'
#' @param user user, default is 'postgres'
#' @param password password, default is 'pg'
#' @param dbname database name, default is 'nhanes'
#' @param host default is 'localhost'
#' @param port default is 5432
#' @param ... passed to DBI::dbConnect()
#'
#' @return connection with PostgreSQL
#' @export
#'
nhs_Connect <- function(user='postgres',
password = 'pg',
dbname="nhanes",
host="localhost",
port=5432,...){
conn <- DBI::dbConnect(RPostgreSQL::PostgreSQL(),
user = user,
password = password,
host=host,
port=port)
# create database
datname <- as.data.frame(dplyr::tbl(conn,dbplyr::sql('SELECT datname FROM pg_database')))[,1]
if (! 'nhanes' %in% tolower(datname) & dbname=="nhanes"){
message('\ncreate database nhanes')
DBI::dbGetQuery(conn = conn,
statement = "CREATE DATABASE nhanes;")
}
conn <- DBI::dbConnect(RPostgreSQL::PostgreSQL(),
user = user,
password = password,
host=host,
port=port,
dbname=dbname,...)
dbplyr::src_dbi(con = conn, auto_disconnect = TRUE)
}
|
ff5aa7666305eda1d1f2c4bf42b2085f549ea92d
|
3d560900291b0b323d1c8f5512e47a785774141e
|
/man/scale_extrude_face_fill_manual.Rd
|
4296b250ca9074a369e470c307f0f05d6d6f4a49
|
[
"MIT"
] |
permissive
|
hjanime/ggrgl
|
a4de12f6ede8471dbd542499d730e92e420318fa
|
27ba63cc57102e1f410273f688ef7e4ea7a01d85
|
refs/heads/main
| 2023-02-01T16:17:22.454753
| 2020-12-21T20:00:05
| 2020-12-21T20:00:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,144
|
rd
|
scale_extrude_face_fill_manual.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scale-extrude-manual.R
\name{scale_extrude_face_fill_manual}
\alias{scale_extrude_face_fill_manual}
\alias{scale_extrude_face_alpha_manual}
\alias{scale_extrude_edge_colour_manual}
\alias{scale_extrude_edge_alpha_manual}
\title{Create your own discrete scale}
\usage{
scale_extrude_face_fill_manual(
...,
values,
aesthetics = "extrude_face_fill",
breaks = waiver()
)
scale_extrude_face_alpha_manual(
...,
values,
aesthetics = "extrude_face_alpha",
breaks = waiver()
)
scale_extrude_edge_colour_manual(
...,
values,
aesthetics = "extrude_edge_colour",
breaks = waiver()
)
scale_extrude_edge_alpha_manual(
...,
values,
aesthetics = "extrude_edge_alpha",
breaks = waiver()
)
}
\arguments{
\item{...}{arguments passed on to \code{discrete_scale}. See \code{ggplot2::scale_fill_manual}
for more details.}
\item{values, aesthetics, breaks}{See \code{ggplot2::scale_fill_manual}
documentation for more details.}
}
\description{
These functions allow you to specify your own set of mappings from levels
in the data to aesthetic values.
}
|
774c30b389bd8ddc9ca200609431a07627777199
|
239598b4d21b6d80367ec0655593f20e6e0a1c6a
|
/man/llog.Rd
|
e7f8b30b41ec601eeb42024536598fe37a8b82e5
|
[
"Apache-2.0"
] |
permissive
|
flor14/ssdtools
|
35183e109c407424ccdb993f5c416de3f46e76b1
|
bc1d13c2d929233d8113f4b5f019a85d45932fe8
|
refs/heads/master
| 2020-04-16T09:03:23.276006
| 2018-12-11T04:13:23
| 2018-12-11T04:13:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,343
|
rd
|
llog.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/llog.R
\name{llog}
\alias{llog}
\alias{dllog}
\alias{qllog}
\alias{pllog}
\alias{rllog}
\title{Log-Logistic Distribution}
\usage{
dllog(x, shape = 1, scale = 1, log = FALSE)
qllog(p, shape = 1, scale = 1, lower.tail = TRUE, log.p = FALSE)
pllog(q, shape = 1, scale = 1, lower.tail = TRUE, log.p = FALSE)
rllog(n, shape = 1, scale = 1)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{shape}{shape parameter.}
\item{scale}{scale parameter.}
\item{log, log.p}{logical; if TRUE, probabilities p are given as log(p).}
\item{p}{vector of probabilities.}
\item{lower.tail}{logical; if TRUE (default), probabilities are P[X <= x],otherwise, P[X > x].}
\item{n}{number of observations.}
}
\value{
dllog gives the density, pllog gives the distribution function,
qllog gives the quantile function, and rllog generates random deviates.
}
\description{
Density, distribution function, quantile function and random generation
for the log-logistic distribution with \code{shape} and \code{scale} parameters.
}
\details{
The functions are wrappers to export the identical functions from the FAdist package.
}
\examples{
x <- rllog(1000)
hist(x,freq=FALSE,col='gray',border='white')
curve(dllog(x),add=TRUE,col='red4',lwd=2)
}
\seealso{
\code{\link[FAdist]{dllog}}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.