blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2cd71a448f98c9955d0532f6dc0aeddc4c1a2f8
|
61d83f366aaf324e4de459ec361f5232a4015966
|
/code/server/tab_real_time.R
|
70a76583e6ad8810f6b8ccc07adee2a8af74271a
|
[] |
no_license
|
vicennt/bicycles-use-analysis
|
eec23052d240797a8393a27782cd404a7a000b75
|
4291ea2e05c982ead213b5b256945e7266a3c6fd
|
refs/heads/master
| 2021-06-28T12:14:18.800242
| 2020-11-13T11:28:16
| 2020-11-13T11:28:16
| 181,752,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,061
|
r
|
tab_real_time.R
|
# ---------- Comparing stations -------------
# TODO: API connections
api_call <- reactive({
invalidateLater(100000)
city <- input$selected_city
url <- paste0("https://api.jcdecaux.com/vls/v1/stations?contract=", city,"&apiKey=", key)
df <- jsonlite::fromJSON(url)
df
})
observeEvent(api_call, {
aux <- match("TRUE", city_station_info$available_bikes == 0)
if(!is.na(aux)){
str <- paste0("Station ", city_station_info[aux,]$number, " is empty!!")
showNotification(str, type = danger, duration = 1)
}
})
output$last_update <- renderText({
city_station_info <<- api_call()
t <- city_station_info$last_update[1] / 1000
date <- as.POSIXct(t, origin="1970-01-01")
paste0("Last update: ", date)
})
output$real_time_map <- renderLeaflet({
city_station_info <<- api_call()
getColor <- function(city_station_info) {
sapply(city_station_info$available_bikes, function(available_bikes) {
if(available_bikes >= 10) {
"green"
} else if(available_bikes >= 6) {
"orange"
} else {
"red"
} })
}
icons <- awesomeIcons(
icon = 'bike',
iconColor = 'black',
library = 'ion',
markerColor = getColor(city_station_info)
)
leaflet(data = city_station_info) %>% addTiles() %>% addAwesomeMarkers(lng = city_station_info$position$lng, lat = city_station_info$position$lat, data = city_station_info,
icon = icons, popup = ~as.character(paste0("Station number ", number)), layerId = ~number)
})
observe({
click <- input$real_time_map_marker_click
if(is.null(click)){
return()
}else {
num_station <- click$id
output$bike_stands <- renderInfoBox({
infoBox(
title = "Number of stands",
icon = icon("star"),
color = "red",
value = paste0(filter(city_station_info, number == num_station)$bike_stands, " bike stands")
)
})
output$available_bikes <- renderInfoBox({
infoBox(
title = "Available bikes",
icon = icon("bicycle"),
color = "green",
value = paste0(filter(city_station_info, number == num_station)$available_bikes, " bikes")
)
})
output$available_bike_stands <- renderInfoBox({
infoBox(
title = "Free docks",
icon = icon("parking"),
color = "light-blue",
value = paste0(filter(city_station_info, number == num_station)$available_bike_stands, " bike stands")
)
})
}
})
output$bike_stands <- renderInfoBox({
infoBox(
title = "Number of stands",
icon = icon("star"),
color = "red",
value = "Station not selected"
)
})
output$available_bikes <- renderInfoBox({
infoBox(
title = "Available bikes",
icon = icon("bicycle"),
color = "green",
value = "Station not selected"
)
})
output$available_bike_stands <- renderInfoBox({
infoBox(
title = "Available stands",
icon = icon("parking"),
color = "light-blue",
value = "Station not selected"
)
})
|
8aa420633b870399d077962486a87f2355585a6f
|
8fafedfbd8c3a578da82cd5ba1afc5ad9a2aab6e
|
/data-raw/DATASET.R
|
5bcb9813c846ce0643621f00a53b6cf7a4e52cb4
|
[
"MIT"
] |
permissive
|
rea-osaka/reti
|
4b22b8c938426103e29cff82ad851c4a6a014a13
|
ca59b6cb29650b7ec19c2eeb02e1623876153d6b
|
refs/heads/master
| 2021-09-28T17:30:41.450821
| 2021-09-26T10:02:24
| 2021-09-26T10:02:24
| 151,536,659
| 1
| 0
|
MIT
| 2021-09-26T10:02:25
| 2018-10-04T07:48:20
|
R
|
UTF-8
|
R
| false
| false
| 3,500
|
r
|
DATASET.R
|
## code to prepare `DATASET` dataset goes here
library(tidyverse)
# ファイルは総務省の全国地方公共団体コードのページからダウンロード
# https://www.soumu.go.jp/denshijiti/code.html
# 「都道府県コード及び市区町村コード」
currentcode_path <- "data-raw/000730858.xlsx"
# 「都道府県コード及び市区町村コード」改正一覧表
oldcode_path <- "data-raw/000562731.xls"
# 市区町村
city01 <-
readxl::read_excel(currentcode_path, sheet = 1) %>%
select(c(1,3)) %>%
`names<-`(c("code","city_name"))
# 政令指定都市
city02 <-
readxl::read_excel(currentcode_path,sheet = 2) %>%
select(c(1,3)) %>%
`names<-`(c("code","city_name"))
# 廃止されたもの
city_old <-
readxl::read_excel(oldcode_path,
sheet = 1,
skip = 4,
col_names = F) %>%
dplyr::select(c(6,7)) %>%
`names<-`(c("code","city_name"))
# 都道府県コード DB
pref_db <- tibble::tribble(
~pref_code, ~pref_name,
"01","\u5317\u6d77\u9053",#"北海道",
"02","\u9752\u68ee\u770c",#"青森県",
"03","\u5ca9\u624b\u770c",#"岩手県",
"04","\u5bae\u5d0e\u770c",#"宮城県",
"05","\u79cb\u7530\u770c",#"秋田県",
"06","\u5c71\u5f62\u770c",#"山形県",
"07","\u798f\u5cf6\u770c",#"福島県",
"08","\u8328\u57ce\u770c",#"茨城県",
"09","\u6803\u6728\u770c",#"栃木県",
"10","\u7fa4\u99ac\u770c",#"群馬県",
"11","\u57fc\u7389\u770c",#"埼玉県",
"12","\u5343\u8449\u770c",#"千葉県",
"13","\u6771\u4eac\u90fd",#"東京都",
"14","\u795e\u5948\u5ddd\u770c",#"神奈川県",
"15","\u65b0\u6f5f\u770c",#"新潟県",
"16","\u5bcc\u5c71\u770c",#"富山県",
"17","\u77f3\u5ddd\u770c",#"石川県",
"18","\u798f\u4e95\u770c",#"福井県",
"19","\u5c71\u68a8\u770c",#"山梨県",
"20","\u9577\u91ce\u770c",#"長野県",
"21","\u5c90\u961c\u770c",#"岐阜県",
"22","\u9759\u5ca1\u770c",#"静岡県",
"23","\u611b\u77e5\u770c",#"愛知県",
"24","\u4e09\u91cd\u770c",#"三重県",
"25","\u6ecb\u8cc0\u770c",#"滋賀県",
"26","\u4eac\u90fd\u5e9c",#"京都府",
"27","\u5927\u962a\u5e9c",#"大阪府",
"28","\u5175\u5eab\u770c",#"兵庫県",
"29","\u5948\u826f\u770c",#"奈良県",
"30","\u548c\u6b4c\u5c71\u770c",#"和歌山県",
"31","\u9ce5\u53d6\u770c",#"鳥取県",
"32","\u5cf6\u6839\u770c",#"島根県",
"33","\u5ca1\u5c71\u770c",#"岡山県",
"34","\u5e83\u5cf6\u770c",#"広島県",
"35","\u5c71\u53e3\u770c",#"山口県",
"36","\u5fb3\u5cf6\u770c",#"徳島県",
"37","\u9999\u5ddd\u770c",#"香川県",
"38","\u611b\u5a9b\u770c",#"愛媛県",
"39","\u9ad8\u77e5\u770c",#"高知県",
"40","\u798f\u5ca1\u770c",#"福岡県",
"41","\u4f50\u8cc0\u770c",#"佐賀県",
"42","\u9577\u5d0e\u770c",#"長崎県",
"43","\u718a\u672c\u770c",#"熊本県",
"44","\u5927\u5206\u770c",#"大分県",
"45","\u5bae\u5d0e\u770c",#"宮崎県",
"46","\u9e7f\u5150\u5cf6\u770c",#"鹿児島県",
"47","\u6c96\u7e04\u770c") #"沖縄県"
local_address_DB <-
dplyr::bind_rows(city01,city02,city_old) %>%
dplyr::filter(!is.na(city_name)) %>%
unique() %>%
mutate(address_code = stringr::str_sub(code, 1,5),
pref_code = stringr::str_sub(code, 1,2),
city_code = stringr::str_sub(code,3,5)) %>%
left_join(pref_db, by = c( "pref_code" = "pref_code")) %>%
select(address_code,
pref_name,
city_name)
usethis::use_data(local_address_DB,internal = T, overwrite = TRUE)
|
ea371a95362576613478ef0e831e590bddf8d3fc
|
1a9fb15106e9ce175d9e9aad1c34a4fa42c33aa9
|
/courses/41000/code/hwk3s.R
|
44dd20416d9fdb6cd6a9e9d8876a01d094e3721f
|
[] |
no_license
|
VadimSokolov/vadimsokolov.github.io
|
0c1dc7628d7abda48370fd62dbab462fb67cf181
|
2260c785381f24fe15e4fd06e6e076e1be237214
|
refs/heads/master
| 2023-08-31T23:59:03.177923
| 2023-08-21T18:10:37
| 2023-08-21T18:10:37
| 41,467,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,673
|
r
|
hwk3s.R
|
## Business Statistics: Hwk3
##---------------------------------
# set your working directory
# setwd("~/....")
#------------------------------
# Buffett vs Keynes
#------------------------------
buffett = read.csv("http://vsokolov.org/courses/41000/data/buffett.csv",header=T)
attach(buffett)
# Plot the data
plot(Market, Buffett, xlab="Market Return", ylab="Buffett Return",pch=20,bty='n')
legend(x="topleft",legend=c("Buffett","Market"),pch=20,col=c(2,4),bty="n")
# correlation matrix
cor(cbind(Buffett,Market))
# Fit the least-squares line and superimpose this line on the plot
model = lm(Buffett~Market)
abline(model,col="red",lwd=3)
title("Buffett vs Market")
# Extract the model coefficients
coef(model)
summary(model)
# Improvement in Fit
sd(model$residuals)
sd(Buffett)
# Prediction of portfolios
# Market return = 10%
newdata = data.frame(10)
colnames(newdata) = "Market"
predict(model,newdata)
# sum(coef(model)*c(1,10))
# Market return = -10%
newdata2 = data.frame(-10)
colnames(newdata2) = "Market"
predict(model,newdata2)
# sum(coef(model)*c(1,-10))
# To remove datapoint 10
# buffett_10 = buffett[-10,]
detach(buffett)
#-------------------------------------
# Keynes Data
#-------------------------------------
keynes = read.csv("http://vsokolov.org/courses/41000/data/keynes.csv",header=T)
attach(keynes)
# Plot the data
plot(Year,Keynes,pch=20,col="dark grey",type='l',bty='n')
plot(Market, Keynes, xlab="Market Return", ylab="Keynes Excess Return",col=20,pch=20,bty='n')
# correlation matrix
cor(cbind(Keynes,Market))
# Fit the least-squares line.
model = lm(Keynes~Market)
abline(model,col="red",lwd=3)
title("Keynes vs Market")
# Extract the model coefficients
coef(model)
summary(model)
# 4-in-1 residual diagnostics
layout(matrix(c(1,2,3,4),2,2))
plot(model,pch=20)
# Calculate excess return
Keynes = Keynes - Rate
Market = Market - Rate
# correlation matrix
cor(cbind(Keynes,Market))
modelnew = lm(Keynes~Market)
# Diagnostics
summary(modelnew)
# Prediction of portfolios
# Market return = 10%
sum(coef(model)*c(1,10))
# Market return = -10%
sum(coef(model)*c(1,-10))
detach(keynes)
#------------------------------
# Diamond Pricing
#------------------------------
diamond = read.csv("http://vsokolov.org/courses/41000/data/diamond.csv",header=T)
colnames(diamond) = c("Weight", "Price")
diamond$Weight = as.numeric(diamond$Weight)
diamond$Price = as.numeric(diamond$Price)
# Run a regression
fit = lm(Price~Weight,data = diamond)
summary(fit)
# Plot Price versus Weight
plot(Price~Weight,data = diamond,
xlab="Weight (carats)",ylab = "Price (Singapore dollars)",
main= "Bivariate Fit of Price (Singapore dollars) By Weight (carats)",
xaxs="i", yaxs="i",pch=20,bty='n')
abline(fit,col="red",lwd=2)
# Plug-in prediction
# Weight = 0.25
sum(coef(fit)*c(1,0.25))
# Weight = 1
sum(coef(fit)*c(1,1))
#------------------------------
# NFL Salaries
#------------------------------
salary = read.csv("http://vsokolov.org/courses/41000/data/NFLsalary.csv", header = TRUE, stringsAsFactors = T)
salary$Conf = as.factor(salary$Conf)
# attach the dataset
attach(salary)
# Plot a boxplot to compare salaries of the NFC to AFC.
boxplot(Salary~Conf, data=salary,main="Team Salary by Conference", ylab="Salary ($1,000s)")
# Dummy coding (dummy code the "Conf" variable into NFC = 1 and AFC = 0)
dConf = as.numeric(Conf) - 1
# Routine analysis
mean(dConf)
sd(dConf)
cor(dConf, Salary)
# Linear regression wit a dummy variable
model = lm(Salary ~ QB + dConf)
# model = lm(Salary ~ QB + Conf) #produces the same regression result.
summary(model)
detach(salary)
|
dbde45544edd113b8be4e7b65d7d9f798246d2f0
|
0feecef7fdb76ccb37191a71640aed28c2fc648b
|
/DemogObjects/FishTrend.R
|
dcd53573cc78995333a4a0d5124e2b068bbc1a82
|
[
"MIT"
] |
permissive
|
pointblue/weddell-seal-toothfish-model
|
75e9f10db75e9d298d7fe324f38e96debf31fb46
|
57668823cb440ebd55f4e432235de2f39660a42f
|
refs/heads/master
| 2021-01-10T04:36:17.101901
| 2015-06-02T21:00:37
| 2015-06-02T21:00:37
| 36,399,919
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,678
|
r
|
FishTrend.R
|
# TODO: Add comment
#
# Author: lsalas
###############################################################################
## This file sets the FishTrend object for the simple WESE simulation
#' Abstract class for FishTrend
#'
#' Abstract class for FishTrend
#'
#' @slot TFabundance The model with the trend in Toothfish abundance over time, with metadata about start abundance and depletion rate
#' @slot CurrentWeight The current weight of the average seal, estimated from the toothfish-availability-to-weight model and toothfish availability
#' @slot CurrentTF The numeric value of current number of Toothfish consumed
#' @slot CurrentSF The numeric value of current number of Silverfish consumed
#' @slot Timestep The integer value of the current timestep
#' @slot Toothfish A list holding the trend model of toothfish consumed vs. toothfish abundance and the data used to train it, if any
#' @slot Silverfish A list holding the trend model of silverfish consumed vs. toothfish abundance and the data used to train it, if any
#' @slot TFtoWeight A model relating the seals' weight vs. toothfish abundance
#' @exportClass FishTrend
setClass(Class="FishTrend", representation(
TFabundance = "list",
CurrentWeight = "numeric",
CurrentTF = "numeric",
CurrentSF = "numeric",
Timestep = "integer",
Toothfish = "list",
Silverfish = "list",
TFtoWeight = "list"
))
############################################ SLOT METHODS #######################################
########################## Set TFabundance slot
#' Set generic to method that sets the TFabundance slot of the FishTrend object.
#'
#' @name setTFabundance
#' @param object A FishTrend object
#' @param value The model with the trend in Toothfish abundance over time, with metadata about start abundance and depletion rate
#' @nord
setGeneric("TFabundance<-",
function(object, value) standardGeneric("TFabundance<-"))
#' Set the TFabundance slot of a FishTrend object.
#'
#' @name setTFabundance
#' @param object A FishTrend object
#' @param value The model with the trend in Toothfish abundance over time, with metadata about start abundance and depletion rate
setReplaceMethod("TFabundance",signature(object="FishTrend"),
function(object,value) {
slot(object,"TFabundance")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the TFabundance slot value of a FishTrend object.
#'
#' @name TFabundance
#' @param object A FishTrend object
#' @nord
setGeneric("TFabundance",
function(object) standardGeneric("TFabundance"))
#' Retrieve the TFabundance slot value of a FishTrend object.
#'
#' @name TFabundance
#' @param object A FishTrend object
setMethod("TFabundance", signature(object="FishTrend"),
function(object) slot(object,"TFabundance"))
##########################
########################## Set CurrentWeight slot
#' Set generic to method that sets the CurrentWeight slot of the FishTrend object.
#'
#' @name setCurrentWeight
#' @param object A FishTrend object
#' @param value The current weight of the average seal, estimated from the toothfish-availability-to-weight model and toothfish availability
#' @nord
setGeneric("CurrentWeight<-",
function(object, value) standardGeneric("CurrentWeight<-"))
#' Set the CurrentWeight slot of a FishTrend object.
#'
#' @name setCurrentWeight
#' @param object A FishTrend object
#' @param value The current weight of the average seal, estimated from the toothfish-availability-to-weight model and toothfish availability
setReplaceMethod("CurrentWeight",signature(object="FishTrend"),
function(object,value) {
slot(object,"CurrentWeight")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the CurrentWeight slot value of a FishTrend object.
#'
#' @name CurrentWeight
#' @param object A FishTrend object
#' @nord
setGeneric("CurrentWeight",
function(object) standardGeneric("CurrentWeight"))
#' Retrieve the CurrentWeight slot value of a FishTrend object.
#'
#' @name CurrentWeight
#' @param object A FishTrend object
setMethod("CurrentWeight", signature(object="FishTrend"),
function(object) slot(object,"CurrentWeight"))
##########################
########################## Set CurrentTF slot
#' Set generic to method that sets the CurrentTF slot of the FishTrend object.
#'
#' @name setCurrentTF
#' @param object A FishTrend object
#' @param value The numeric value of current Toothfish abundance
#' @nord
setGeneric("CurrentTF<-",
function(object, value) standardGeneric("CurrentTF<-"))
#' Set the CurrentTF slot of a FishTrend object.
#'
#' @name setCurrentTF
#' @param object A FishTrend object
#' @param value The numeric value of current Toothfish abundance
setReplaceMethod("CurrentTF",signature(object="FishTrend"),
function(object,value) {
slot(object,"CurrentTF")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the CurrentTF slot value of a FishTrend object.
#'
#' @name CurrentTF
#' @param object A FishTrend object
#' @nord
setGeneric("CurrentTF",
function(object) standardGeneric("CurrentTF"))
#' Retrieve the CurrentTF slot value of a FishTrend object.
#'
#' @name CurrentTF
#' @param object A FishTrend object
setMethod("CurrentTF", signature(object="FishTrend"),
function(object) slot(object,"CurrentTF"))
##########################
########################## Set CurrentSF slot
#' Set generic to method that sets the CurrentSF slot of the FishTrend object.
#'
#' @name setCurrentSF
#' @param object A FishTrend object
#' @param value The numeric value of current Silverfish abundance
#' @nord
setGeneric("CurrentSF<-",
function(object, value) standardGeneric("CurrentSF<-"))
#' Set the CurrentSF slot of a FishTrend object.
#'
#' @name setCurrentSF
#' @param object A FishTrend object
#' @param value The numeric value of current Silverfish abundance
setReplaceMethod("CurrentSF",signature(object="FishTrend"),
function(object,value) {
slot(object,"CurrentSF")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the CurrentSF slot value of a FishTrend object.
#'
#' @name CurrentSF
#' @param object A FishTrend object
#' @nord
setGeneric("CurrentSF",
function(object) standardGeneric("CurrentSF"))
#' Retrieve the CurrentSF slot value of a FishTrend object.
#'
#' @name CurrentSF
#' @param object A FishTrend object
setMethod("CurrentSF", signature(object="FishTrend"),
function(object) slot(object,"CurrentSF"))
##########################
########################## Set Timestep slot
#' Set generic to method that sets the Timestep slot of the FishTrend object.
#'
#' @name setTimestep
#' @param object A FishTrend object
#' @param value The integer value of the current timestep
#' @nord
setGeneric("Timestep<-",
function(object, value) standardGeneric("Timestep<-"))
#' Set the Timestep slot of a FishTrend object.
#'
#' @name setTimestep
#' @param object A FishTrend object
#' @param value The integer value of the current timestep
setReplaceMethod("Timestep",signature(object="FishTrend"),
function(object,value) {
slot(object,"Timestep")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the Timestep slot value of a FishTrend object.
#'
#' @name Timestep
#' @param object A FishTrend object
#' @nord
setGeneric("Timestep",
function(object) standardGeneric("Timestep"))
#' Retrieve the CurrentSF slot value of a FishTrend object.
#'
#' @name Timestep
#' @param object A FishTrend object
setMethod("Timestep", signature(object="FishTrend"),
function(object) slot(object,"Timestep"))
##########################
########################## Set Toothfish slot
#' Set generic to method that sets the Toothfish slot of the FishTrend object.
#'
#' @name setToothfish
#' @param object A FishTrend object
#' @param value A list holding the trend model of toothfish consumed vs. toothfish abundance and the data used to train it, if any
#' @nord
setGeneric("Toothfish<-",
function(object, value) standardGeneric("Toothfish<-"))
#' Set the Toothfish slot of a FishTrend object.
#'
#' @name setToothfish
#' @param object A FishTrend object
#' @param value A list holding the trend model of toothfish consumed vs. toothfish abundance and the data used to train it, if any
setReplaceMethod("Toothfish",signature(object="FishTrend"),
function(object,value) {
slot(object,"Toothfish")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the Toothfish slot value of a FishTrend object.
#'
#' @name Toothfish
#' @param object A FishTrend object
#' @nord
setGeneric("Toothfish",
function(object) standardGeneric("Toothfish"))
#' Retrieve the Toothfish slot value of a FishTrend object.
#'
#' @name Toothfish
#' @param object A FishTrend object
setMethod("Toothfish", signature(object="FishTrend"),
function(object) slot(object,"Toothfish"))
##########################
########################## Set Silverfish slot
#' Set generic to method that sets the Silverfish slot of the FishTrend object.
#'
#' @name setSilverfish
#' @param object A FishTrend object
#' @param value A list holding the trend model of silverfish consumed vs. toothfish abundance and the data used to train it, if any
#' @nord
setGeneric("Silverfish<-",
function(object, value) standardGeneric("Silverfish<-"))
#' Set the Silverfish slot of a FishTrend object.
#'
#' @name setSilverfish
#' @param object A FishTrend object
#' @param value A list holding the trend model of silverfish consumed vs. toothfish abundance and the data used to train it, if any
setReplaceMethod("Silverfish",signature(object="FishTrend"),
function(object,value) {
slot(object,"Silverfish")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the Silverfish slot value of a FishTrend object.
#'
#' @name Silverfish
#' @param object A FishTrend object
#' @nord
setGeneric("Silverfish",
function(object) standardGeneric("Silverfish"))
#' Retrieve the Silverfish slot value of a FishTrend object.
#'
#' @name Silverfish
#' @param object A FishTrend object
setMethod("Silverfish", signature(object="FishTrend"),
function(object) slot(object,"Silverfish"))
##########################
########################## Set TFtoWeight slot
#' Set generic to method that sets the Toothfish slot of the FishTrend object.
#'
#' @name setTFtoWeight
#' @param object A FishTrend object
#' @param value A model relating the seals' weight vs. toothfish abundance
#' @nord
setGeneric("TFtoWeight<-",
function(object, value) standardGeneric("TFtoWeight<-"))
#' Set the TFtoWeight slot of a FishTrend object.
#'
#' @name setTFtoWeight
#' @param object A FishTrend object
#' @param value A model relating the seals' weight vs. toothfish abundance
setReplaceMethod("TFtoWeight",signature(object="FishTrend"),
function(object,value) {
slot(object,"TFtoWeight")<-value
validObject(object)
object
})
#' Set generic to the method that retrieves the TFtoWeight slot value of a FishTrend object.
#'
#' @name TFtoWeight
#' @param object A FishTrend object
#' @nord
setGeneric("TFtoWeight",
function(object) standardGeneric("TFtoWeight"))
#' Retrieve the TFtoWeight slot value of a FishTrend object.
#'
#' @name TFtoWeight
#' @param object A FishTrend object
setMethod("TFtoWeight", signature(object="FishTrend"),
function(object) slot(object,"TFtoWeight"))
##########################
############################################ INITIALIZE ####################################################
#' Instantiate a new FishTrend object
#'
#' @name initialize
#' @nord
#' @exportMethod initialize
setMethod("initialize",
signature(.Object = "FishTrend"),
function (.Object, ...)
{
.Object@TFabundance<-list()
.Object@CurrentWeight<-numeric()
.Object@CurrentTF<-numeric()
.Object@CurrentSF<-numeric()
.Object@Timestep<-integer()
.Object@Toothfish<-list()
.Object@Silverfish<-list()
.Object@TFtoWeight<-list()
.Object
}
)
############################################ FISHTREND METHODS #######################################
########################## Generate trend values for CurrentTF and CurrentSf from trend data and timestep
#' Set generic to method that generates current values of seal weight, and toothfish and silverfish abundance from toothfish trend model and timestep
#'
#' @name UpdateFish
#' @param object A FishTrend object
setGeneric("UpdateFish",
function(object, ...) standardGeneric("UpdateFish"))
#' Generates current values of toothfish and silverfish abundance from trend models and timestep
#'
#' @param object A FishTrend object
#' @param timestep Integer, a value for the current timestep
setMethod("UpdateFish", signature(object = "FishTrend"),
function(object,timestep) {
ea.call<-match.call()
if (is.null(object)) stop("A FishTrend object is required.")
if (is.null(timestep) | (class(timestep)!="integer")) stop("A valid timestep value is required.")
oldStep<-Timestep(object)
#NOTE: The whole modeling is initialized with year 1 data, so oldStep is length=1 at the beginning and has the first step value (=0)
if(oldStep!=(timestep-1))stop("The new timestep value is not 1 + old value.")
#need to get the tfabundance from timestep
tfabund.mdl<-TFabundance(object)[[1]]
newdata<-data.frame(year=timestep) #assuming that the x-variable (predictor) is simply called "time" and there is no other predictor
tfa<-predict(tfabund.mdl,newdata=newdata)
sealwgt<-TFtoWeight(object)[[1]]
tfmodel<-Toothfish(object)[[1]]
sfmodel<-Silverfish(object)[[1]]
tfab<-data.frame(TFabund=tfa)
newWt<-try(predict(sealwgt,newdata=tfab),silent=TRUE)
newTF<-try(predict(tfmodel,newdata=tfab),silent=TRUE)
newSF<-try(predict(sfmodel,newdata=tfab),silent=TRUE)
if(!inherits(newWt,"try-error") & !inherits(newTF,"try-error") & !inherits(newSF,"try-error")){
CurrentWeight(object)<-newWt
CurrentTF(object)<-newTF
CurrentSF(object)<-newSF
Timestep(object)<-timestep
return(object)
}else{
stop("Failed to obtain new seal weight, toothfish or silverfish consumed")
}
}
)
##########################
|
c4a66426bcef380e266a867de7e55870409a0c04
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/asVPC/examples/asVPC.distanceW.Rd.R
|
944812e63248da27c206823eb20877e49062ad5c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 350
|
r
|
asVPC.distanceW.Rd.R
|
library(asVPC)
### Name: asVPC.distanceW
### Title: calculate percentiles of original data using distance-related
### weight percentiles of simulated data with corresponding confidence
### interval
### Aliases: asVPC.distanceW
### ** Examples
data(origdata)
data(simdata)
asVPC.distanceW(origdata,simdata,n.timebin=10, n.sim=100,n.hist=3)
|
4601cd9174c8ba92af2881da4b62f4da4ab781ed
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phylosim/examples/omegaVarM1.CodonSequence.Rd.R
|
5816528f811a71e223f752ffe762aedd00c06a4d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 620
|
r
|
omegaVarM1.CodonSequence.Rd.R
|
library(phylosim)
### Name: omegaVarM1.CodonSequence
### Title: The M1 (neutral) model of variable omega ratios among sites
### Aliases: omegaVarM1.CodonSequence CodonSequence.omegaVarM1
### omegaVarM1,CodonSequence-method
### ** Examples
# create a GY94 object
p<-GY94(kappa=2)
# create a CodonSequence object, attach process p
s<-CodonSequence(length=25, processes=list(list(p)))
# sample states
sampleStates(s)
# sample omegas in range 1:20 from model M1
omegaVarM1(s,p,p0=0.5,1:20)
# get omega values
getOmegas(s,p)
# get a histogram of omega values in range 1:20
omegaHist(s,p,breaks=50,1:20)
|
5e19d4089f859245043eec14ea6cd1d1bbdfe77d
|
d2d392813c3a8f34cd96ce2fa92884ee56b8c46b
|
/man/streetNumberLocator.Rd
|
0a308b87ef97955a55f96993d6c2f8514e06645d
|
[] |
no_license
|
GapData/cholera
|
108336a159e151707949741e572fca9306e3eae0
|
5bdf5c00ee6a0b744b7ec42e74e306334917cf64
|
refs/heads/master
| 2021-08-30T08:03:19.555449
| 2017-12-16T23:19:12
| 2017-12-16T23:19:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,195
|
rd
|
streetNumberLocator.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/streetNumberLocator.R
\name{streetNumberLocator}
\alias{streetNumberLocator}
\title{Locate road by its numerical ID.}
\usage{
streetNumberLocator(road.number, zoom = FALSE, radius = 1,
all.cases = FALSE)
}
\arguments{
\item{road.number}{Numeric or integer. A whole number between 1 and 528.}
\item{zoom}{Logical.}
\item{radius}{Numeric. Controls the degree of zoom. For values <= 5, the numeric ID of all cases or just the anchor case is plotted.}
\item{all.cases}{Logical. When zoom = TRUE and radius <= 5, all.cases = TRUE plots the numeric ID of all cases; when all.cases = FALSE only the numeric ID of the anchor case is shown.}
}
\value{
A base R graphics plot.
}
\description{
Plots John Snow's map of the 1854 London cholera outbreak and highlights the
selected road. See cholera::roads for numerical IDs and \code{vignette}("road.names") for details.
}
\examples{
streetNumberLocator(243)
streetNumberLocator(243, zoom = TRUE)
streetNumberLocator(243, zoom = TRUE, radius = 0)
}
\seealso{
\code{\link{roads}}, \code{\link{road.segments}}, \code{\link{streetNameLocator}}, \code{vignette("road.names")}
}
|
52fff9eaaef1346ad90309bd7f174d1b5022055d
|
dddf0a017c8426e173f10787d4c5b163cc233bf7
|
/R/PacfToAR.R
|
adf17595cc0c2190b9d8c43e692dce17ed988acf
|
[] |
no_license
|
cran/FGN
|
7dda2722a37182c1a1a9ab566d92b7e64e1807e4
|
a6465ebd0376f1cf7f4ee19bbe0c5506f8d52307
|
refs/heads/master
| 2016-09-11T13:47:04.711333
| 2014-05-15T00:00:00
| 2014-05-15T00:00:00
| 17,691,774
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216
|
r
|
PacfToAR.R
|
`PacfToAR` <-
function(zeta){
L=length(zeta)
if (L==0) return(numeric(0))
if (L==1) return(zeta)
phik=zeta[1]
for (k in 2:L){
phikm1=phik
phik=c(phikm1-zeta[k]*rev(phikm1),zeta[k])
}
phik
}
|
67b779e7af7b02156cae92bf0a5d57bd315d86c3
|
5a87297f6dbcd7027fa8412018e0dee36a2b42ba
|
/man/plot_nhdplus.Rd
|
a13799a5bdd63642917fd6fd31a0e502f800a5c9
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer",
"CC0-1.0"
] |
permissive
|
hydroinfo-gis/nhdplusTools
|
fce3b719a52f1c00d1b3eb87c1b4522c8f841627
|
48020b1b7aca68c4e4fc641ff3391d12d032e2c2
|
refs/heads/master
| 2023-02-05T08:38:37.307951
| 2020-12-09T16:43:43
| 2020-12-09T16:43:43
| 321,866,343
| 1
| 0
|
CC0-1.0
| 2020-12-16T04:20:12
| 2020-12-16T04:20:11
| null |
UTF-8
|
R
| false
| true
| 5,183
|
rd
|
plot_nhdplus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_nhdplus.R
\name{plot_nhdplus}
\alias{plot_nhdplus}
\title{Plot NHDPlus}
\usage{
plot_nhdplus(
outlets = NULL,
bbox = NULL,
streamorder = NULL,
nhdplus_data = NULL,
gpkg = NULL,
plot_config = NULL,
add = FALSE,
actually_plot = TRUE,
overwrite = TRUE,
flowline_only = NULL,
...
)
}
\arguments{
\item{outlets}{list of nldi outlets. Other inputs are coerced into nldi outlets, see details.}
\item{bbox}{object of class bbox with a defined crs. See examples.}
\item{streamorder}{integer only streams of order greater than or equal will be returned}
\item{nhdplus_data}{geopackage containing source nhdplus data (omit to download)}
\item{gpkg}{path and file with .gpkg ending. If omitted, no file is written.}
\item{plot_config}{list containing plot configuration, see details.}
\item{add}{boolean should this plot be added to an already built map.}
\item{actually_plot}{boolean actually draw the plot? Use to get data subset only.}
\item{overwrite}{passed on the \link{subset_nhdplus}.}
\item{flowline_only}{boolean only subset and plot flowlines?}
\item{...}{parameters passed on to rosm.}
}
\value{
plot data is returned invisibly in NAD83 Lat/Lon.
}
\description{
Given a list of outlets, get their basin boundaries and network and return a plot in
EPSG:3857 Web Mercator Projection.
}
\details{
plot_nhdplus supports several input specifications. An unexported function "as_outlet"
is used to convert the outlet formats as described below.
\enumerate{
\item if outlets is omitted, the bbox input is required and all nhdplus data
in the bounding box is plotted.
\item If outlets is a list of integers, it is assumed to be NHDPlus IDs (comids)
and all upstream tributaries are plotted.
\item if outlets is an integer vector, it is assumed to be all NHDPlus IDs (comids)
that should be plotted. Allows custom filtering.
\item If outlets is a character vector, it is assumed to be NWIS site ids.
\item if outlets is a list containing only characters, it is assumed to be a list
of nldi features and all upstream tributaries are plotted.
\item if outlets is a data.frame with point geometry, a point in polygon match
is performed and upstream with tributaries from the identified catchments is plotted.
}
The \code{plot_config} parameter is a list with names "basin", "flowline" and "outlets".
The following shows the defaults that can be altered.
\enumerate{
\item basin \code{list(lwd = 1, col = NA, border = "black")}
\item flowline \code{list(lwd = 1, col = "blue")}
\item outlets \preformatted{
list(default = list(col = "black", border = NA, pch = 19, cex = 1),
nwissite = list(col = "grey40", border = NA, pch = 17, cex = 1),
huc12pp = list(col = "white", border = "black", pch = 22, cex = 1),
wqp = list(col = "red", border = NA, pch = 20, cex = 1))
}
}
If adding additional layers to the plot, data must be projected to EPSG:3857 with
`sf::st_transform(x, 3857)` prior to adding to the plot.
}
\examples{
\donttest{
options("rgdal_show_exportToProj4_warnings"="none")
rosm::set_default_cachedir(tempfile())
plot_nhdplus("05428500")
plot_nhdplus("05428500", streamorder = 2)
plot_nhdplus(list(13293970, 13293750))
sample_data <- system.file("extdata/sample_natseamless.gpkg", package = "nhdplusTools")
plot_nhdplus(list(13293970, 13293750), streamorder = 3, nhdplus_data = sample_data)
plot_nhdplus(list(list("comid", "13293970"),
list("nwissite", "USGS-05428500"),
list("huc12pp", "070900020603"),
list("huc12pp", "070900020602")),
streamorder = 2,
nhdplus_data = sample_data)
plot_nhdplus(sf::st_as_sf(data.frame(x = -89.36083,
y = 43.08944),
coords = c("x", "y"), crs = 4326),
streamorder = 2,
nhdplus_data = sample_data)
plot_nhdplus(list(list("comid", "13293970"),
list("nwissite", "USGS-05428500"),
list("huc12pp", "070900020603"),
list("huc12pp", "070900020602")),
streamorder = 2,
nhdplus_data = sample_data,
plot_config = list(basin = list(lwd = 2),
outlets = list(huc12pp = list(cex = 1.5),
comid = list(col = "green"))))
bbox <- sf::st_bbox(c(xmin = -89.43, ymin = 43, xmax = -89.28, ymax = 43.1),
crs = "+proj=longlat +datum=WGS84 +no_defs")
fline <- sf::read_sf(sample_data, "NHDFlowline_Network")
comids <- nhdplusTools::get_UT(fline, 13293970)
plot_nhdplus(comids)
#' # With Local Data
plot_nhdplus(bbox = bbox, nhdplus_data = sample_data)
# With downloaded data
plot_nhdplus(bbox = bbox, streamorder = 3)
# Can also plot on top of the previous!
plot_nhdplus(bbox = bbox, nhdplus_data = sample_data,
plot_config = list(flowline = list(lwd = 0.5)))
plot_nhdplus(comids, nhdplus_data = sample_data, streamorder = 3, add = TRUE,
plot_config = list(flowline = list(col = "darkblue")))
}
}
|
3fee786c35ad47aef45b4c10affdd1c5b8041a8c
|
41e7e34d08802616eb45cfac67e9874dc6d0599d
|
/Scripts/2020-02-11_Hotel-Bookings_Clean.R
|
667b682c7c5714df8bcb1683f9da09dd4ee04de2
|
[
"MIT"
] |
permissive
|
grvsrm/Tidy-Tuesday
|
40de74f6cd772234df9c6067b888cd62ef71d8f4
|
fb0ba80691e478510233ea905218a8120495c129
|
refs/heads/main
| 2023-02-23T21:39:09.764984
| 2021-01-27T16:48:56
| 2021-01-27T16:48:56
| 309,890,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 509
|
r
|
2020-02-11_Hotel-Bookings_Clean.R
|
# Prerequisites
library(tidyverse)
library(here)
library(janitor)
# Downloadthe data
# read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-11/hotels.csv') %>%
# write_rds(here("data", "hotel_bookings_raw.rds"))
#
# Clean the data
hotel_cleaned <- read_rds(here("data", "hotel_bookings_raw.rds")) %>%
clean_names() %>%
remove_empty(c("rows", "cols"))
# Save the data
hotel_cleaned %>%
write_rds(here("data", "hotel_bookings.rds"))
|
21a49aa022e24ab96595d150cdeda131a7825f63
|
5febc1e3f2dd766ff664f8e0ae79002072359bde
|
/man/mcatlas_annotate_mc_by_mc2mc_projection.Rd
|
437ce30e8f93cf37b42a7fdee0220366a8053a93
|
[
"MIT"
] |
permissive
|
tanaylab/metacell
|
0eff965982c9dcf27d545b4097e413c8f3ae051c
|
ff482b0827cc48e5a7ddfb9c48d6c6417f438031
|
refs/heads/master
| 2023-08-04T05:16:09.473351
| 2023-07-25T13:37:46
| 2023-07-25T13:37:46
| 196,806,305
| 89
| 30
|
NOASSERTION
| 2023-07-25T13:38:07
| 2019-07-14T07:20:34
|
R
|
UTF-8
|
R
| false
| true
| 1,547
|
rd
|
mcatlas_annotate_mc_by_mc2mc_projection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/atlas.r
\name{mcatlas_annotate_mc_by_mc2mc_projection}
\alias{mcatlas_annotate_mc_by_mc2mc_projection}
\title{Annotate query metacell with atlas by comparing query metacell gene profiles to the atlas MCs gene profiles
This will take each MC in the query MC and find its to correlated
metacell in the reference, gemeratomg some figures along the way/}
\usage{
mcatlas_annotate_mc_by_mc2mc_projection(
atlas_id,
qmc_id,
qmat_naming_type,
new_qmc_id,
q_gset_id = NULL,
T_cor_gap = 1,
nd_color = "lightgray",
fig_cmp_dir = NULL
)
}
\arguments{
\item{atlas_id}{id of atlas object in scdb}
\item{qmc_id}{id of metacell object ina scdb}
\item{qmat_naming_type}{naming scheme of query matrix/mc}
\item{new_qmc_id}{id of recolored metacell object to save in DB (NULL will supress updating db)}
\item{q_gset_id}{query gene set id object (optional) - to restrict features used in comaprison to the intersection of atlas and query gsets}
\item{T_cor_gap}{how much gap between internal atlas mc-mc correlation and query-atlas correlation one is allowing to keep the annotation.}
\item{nd_color}{- color for metacell without an annotation}
\item{fig_cmp_dir}{name of directory to put figures per MC}
}
\description{
Annotate query metacell with atlas by comparing query metacell gene profiles to the atlas MCs gene profiles
This will take each MC in the query MC and find its to correlated
metacell in the reference, gemeratomg some figures along the way/
}
|
d4318c603a8bbd16215677137da95b5ead86779c
|
61a88247e1261f03659be3ada7c70ec7944b3ae0
|
/MakingMaps/mapsShapefile.R
|
01a9c27b4af22165b118b384a217c6179b6a10e4
|
[] |
no_license
|
niehusst/R-DataSci
|
09e844de8d5290b3bf64de3c24666d69b8cc680b
|
1bf14f9624ea7459037d6d61f3dc61198eeb7721
|
refs/heads/master
| 2020-03-30T06:54:19.596360
| 2018-11-26T01:34:57
| 2018-11-26T01:34:57
| 150,898,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,121
|
r
|
mapsShapefile.R
|
#Liam Niehus-Staab
#Maps with shapefiles Lab
#10/3/18
# load required packages
library(maptools) # creates maps and work with spatial files
library(broom) # assists with tidy data
library(ggplot2) # graphics package
library(leaflet) # interactive graphics (output does not show in RMD files)
library(dplyr) # joining data frames
library(readr) # quickly reads files into R
# Reads the shapefile into the R workspace.
TerrorismData <- read_csv("~/Shared/F18MAT295/r-tutorials-master/maps-shapefiles/data/terrorismData.csv")
Worldshapes <- readShapeSpatial("~/Shared/F18MAT295/r-tutorials-master/maps-shapefiles/data/ne_50m_admin_0_countries")
Worldshapes_tidied <- tidy(Worldshapes)
str(Worldshapes, max.level = 2)
### On your own question
# Both **Worldshapes** and **TerrorismData** files have a column that defines a
# region as `Europe and Central Asia`
# (see `Worldshapes@data$region_wb` and`TerrorismData$region2`).
# Create a map of all incidents that occured in `Europe and Central Asia` during 2013.
# What countries appear to have the most incidents? Give a short (i.e. one paragraph)
# description of the graph. This description should include an identification of the
# countries with the most incidents.
#filter correct data
inc2013Eurasia = filter(TerrorismData, iyear == 2013, region2 == "Europe & Central Asia")
#graph incidents on map
eurasia <- ggplot() + geom_point(data = inc2013Eurasia,
aes(x = longitude, y = latitude, size = severity),
color = "red", alpha = .5) +
coord_cartesian(xlim = c(-10,70), ylim = c(35, 67)) +
geom_polygon(data = Worldshapes2,
aes(x = long, y = lat, group = group),
fill = "lightblue", color = "black", alpha = .3) +
labs(title = "Terrorism in Europe & Central Asia (2013)", x = "Longitude", y = "Lattitude")
eurasia #visualize
# It appears that the UK and Russia have the most incidents of the Eurasian countries
# in 2013. The highest concentration of incidents are in Northern Ireland (UK) and
# the Russia-Georgia border area in the Caucasus mountains. This makes sense as Northern
# Ireland is the main opperating turf of the the Irish Republican Army, and the
# Caucasus mountians has been the location of fighting between Russia and Islamic State
# forces.
### lab work through ###
# The `readShapeSpatial` from the `maptools` package allows us to load all component files simultaneously.
# The `str` command allows us to see that the `Worldshapes` object is of the class `SpatialPolygonsDataFrame`. This means that R is representing this shapefile as a special object consisting of 5 slots of geographic data. The first slot, (and the most relevant to us) is the data slot, which contains a data frame representing the actual data adjoined to the geometries. Similar to how we access a column in a data frame with the `$` infix, we can also access a slot in a shapefile with the `@` infix.
# The `max.level=2` limits the information that is printed from the `str` command.
Worldshapes_tidied <- tidy(Worldshapes) # have to tidy it because shape files cant be read directly
g <- ggplot() +
geom_polygon(data = Worldshapes_tidied,
aes(x = long, y = lat, group = group),
fill = "lightblue", color = "black")
g #map of the world
# create a new terrorism data frame that includes only four years
Incidents2 <- filter(TerrorismData, iyear == 1975 | iyear == 1985 | iyear == 1995 |iyear == 2005)
p <- ggplot() + geom_point(data = Incidents2,
aes(x = longitude, y = latitude, size = severity),
size = 1, color = "red", alpha = .5) +
facet_wrap(~iyear) +
coord_cartesian(xlim = c(-11, 3), ylim = c(51, 59)) +
geom_polygon(data = Worldshapes2,
aes(x = long, y = lat, group = group),
fill = "lightblue", color = "black", alpha = .3)
p #map of terrorism occurences in uk and ireland
##### QUESTIONS #########
# 1) Create a graph that shows the terrorism incidents that occured in the United
#States during 2001. Have the size and color of the incident be determined by `severity`.
Incidents3 <- filter(TerrorismData, iyear == 2001)
us <- ggplot() + geom_point(data = Incidents3,
aes(x = longitude, y = latitude, size = severity),
color = "red", alpha = .5) +
facet_wrap(~iyear) +
coord_cartesian(xlim = c(-125, -60), ylim = c(25, 50)) +
geom_polygon(data = Worldshapes2,
aes(x = long, y = lat, group = group),
fill = "lightblue", color = "black", alpha = .3) +
labs(title = "Terrorism in the US (2001)", x = "Longitude", y = "Lattitude")
us
# 2) Suppose you want to look the effects of terrorism before, during, and after the
#United States invasion of Iraq in 2003. Create three maps of the area, displayed
#side-by-side. Hint: You might also want to center the map on Iraq using
#`xlim = c(35,50)` and `ylim = c(28,38)`.
Iraqbefore <- filter(TerrorismData, iyear == 2002)
Iraqduring <- filter(TerrorismData, iyear == 2003)
Iraqafter <- filter(TerrorismData, iyear == 2004)
before <- ggplot() + geom_point(data = Iraqbefore,
aes(x = longitude, y = latitude, size = severity),
color = "red", alpha = .5) +
facet_wrap(~iyear) +
coord_cartesian(xlim = c(35,50), ylim = c(28,38)) +
geom_polygon(data = Worldshapes2,
aes(x = long, y = lat, group = group),
fill = "lightblue", color = "black", alpha = .3) +
labs(title = "Terrorism in Iraq (2002)", x = "Longitude", y = "Lattitude")
during <- ggplot() + geom_point(data = Iraqduring,
aes(x = longitude, y = latitude, size = severity),
color = "red", alpha = .5) +
facet_wrap(~iyear) +
coord_cartesian(xlim = c(35,50), ylim = c(28,38)) +
geom_polygon(data = Worldshapes2,
aes(x = long, y = lat, group = group),
fill = "lightblue", color = "black", alpha = .3) +
labs(title = "Terrorism in Iraq (2003)", x = "Longitude", y = "Lattitude")
after <- ggplot() + geom_point(data = Iraqafter,
aes(x = longitude, y = latitude, size = severity),
color = "red", alpha = .5) +
facet_wrap(~iyear) +
coord_cartesian(xlim = c(35,50), ylim = c(28,38)) +
geom_polygon(data = Worldshapes2,
aes(x = long, y = lat, group = group),
fill = "lightblue", color = "black", alpha = .3) +
labs(title = "Terrorism in Iraq (2004)", x = "Longitude", y = "Lattitude")
before
during
after
# 3) Create a world map colored by the square root of the estimated population
#`sqrt(pop_est)` from the `Worldshapes@data`. Does it appear that population is highly
#correlated with the number of incidents that occur?
pop = tidy(Worldshapes2@data$pop_est)
world = ggplot() +
geom_point(data = Incidents3, aes(x = longitude, y = latitude, size = severity),
color = "red", alpha = .5) +
facet_wrap(~iyear) +
geom_polygon(data = Worldshapes2,
aes(x = long, y = lat, group = group),
color = "black", alpha = .3) +
scale_fill_continuous()
labs(title = "Terrorism vs Square root of population", x = "Longitude", y = "Lattitude")
world #trash
#### Leaflets ####
library(leaflet)
# Subset terrorism database to only contain events in Europe in 1984
US2000.05Incidents <- filter(TerrorismData, iyear == 2000 | iyear == 2001 | iyear == 2002 | iyear == 2003 | iyear == 2004 | iyear == 2005)
#country_txt == "United States")
# addTiles() Add background map
# setView( Set where the map should originally zoom to
leaflet() %>%
addTiles() %>%
setView(lng = -125, lat = -100, zoom = 4) %>%
addCircleMarkers(data = data,
lat = ~latitude, lng = ~longitude,
radius = ~severity, popup = ~info,
color = "red", fillColor = "yellow")
|
889d703e7c21de39b220f356764f7d0825bc8483
|
a974809566b3d6d278d4b25ada7701c40a55fbba
|
/socioEcoData/script/telechargementWebInsee.R
|
c2a03a465311c16bac7f7309a20f76c95a5b72e6
|
[] |
no_license
|
yanndav/biovallee
|
e101069773a37c1e4a88a31ecbd0652f08923469
|
e1639e9e1b8e2ebb9dfe7729680a835a65e88176
|
refs/heads/main
| 2023-06-27T00:27:08.182670
| 2021-07-28T15:46:45
| 2021-07-28T15:46:45
| 383,185,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,109
|
r
|
telechargementWebInsee.R
|
#########################################
# #
# TELECHARGEMENTS DONNEES INSEE #
# . Yann DAVID . TI-Biovallée . #
# Juillet 2021 #
# #
#########################################
# Le but de ce script est de télécharger les données données pour la France
# 00. INSTALLATION PACKAGES -----------------------------------------------
# Initialisation dossiers
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
data <- sub('/script','/data',getwd())
if(!file.exists(paste0(data,"/insee"))){
dir.create(paste0(data,"/insee"))
}
data_insee = paste0(data,"/insee")
# Installation packages
## Packages pour le téléchargement du découpage géographique
tel = c('httr','utils','sf','rvest','tidyverse','readxl')
invisible(lapply(tel,function(pack){
if(!c(pack %in% installed.packages()[,'Package'])){
install.packages(pack)
}
do.call("require", list(pack))
}))
# 01. INDEXATION DES DIFFERENTS FICHIERS DISPONIBLES ----------------------------------------
# Cette partie permet de charger les liens des différents bases disponibles
# Lien au 12.07.2021
url = "https://www.insee.fr/fr/information/2880845" # Adresse de départ
racine = "https://www.insee.fr"
# Récupération des liens pour chaque année
liens_annees = read_html(url) %>%
html_elements(css = "#consulter > div > ul > li > a") %>%
html_attr("href")
# Ajout 2018 manuel
l2018 = "/fr/information/5369871"
liens_annees = c(liens_annees,l2018)
interet = c("Mobilités professionnelles des individus : déplacements commune de résidence / commune de travail",
"Migrations résidentielles : localisation à la commune de résidence et à la commune de résidence antérieure")
interet_general = c("Évolution et structure de la population - Migrations résidentielles",
"Évolution et structure de la population" ,
"Logements - Migrations résidentielles" ,
"Couples - Familles - Ménages",
"Logements",
"Diplômes - Formation - Mobilités scolaires",
"Diplôme - Formation - Mobilités scolaires",
"Population active - Emploi - Chômage",
"Caractéristiques de l'emploi - Mobilités professionnelles",
"Caractéristique de l'emploi - Mobilités professionnelles")
# 02. TELECHARGEMENT DES FICHIERS -----------------------------------------
## Fonction de téléchargement et d'import/export du fichier dans R
lien = paste0(racine,link)
telechargementBase <- function(lien,nom_element){
extension = str_extract(lien, "\\.[A-Za-z]+$")
if(!(paste0(nom_element,extension) %in% list.files(data_insee))){
httr::GET(
url = lien,
httr::write_disk(file.path(data_insee,paste0(nom_element,extension)))
)
if(extension==".zip"){
unzip(file.path(data_insee,paste0(nom_element,".zip")),
exdir = file.path(data_insee,nom_element))
files = list.files(file.path(data_insee,nom_element),full.names = T)
size = order(sapply(files, file.size),
decreasing = T)
# Sauvergarde au format R pour chargement futur plus rapide
to_open = files[size==1]
}else{
to_open = file.path(data_insee,paste0(nom_element,extension))
}
if(str_detect(str_to_lower(to_open),"\\.csv$")){
data_base = read.csv2(to_open)
}else if (str_detect(str_to_lower(to_open),"\\.xls$")){
print(to_open)
temp = read_xls(to_open,
sheet = 1)
# Détection de la ligne de début
start = which(sapply(temp[,1],function(vect) str_detect(vect,"^[A-Z]+$")))[1]
data_base = temp[start+1:nrow(temp),]
colnames(data_base) = temp[start,]
}else if (str_detect(str_to_lower(to_open),"\\.txt$")){
print(to_open)
data_base = read_delim(to_open, delim=";")
}else{
print("problem extension file")
}
saveRDS(data_base,file = file.path(data_insee,paste0(nom_element,'.RDS')))
}
}
# Téléchargement automatisé des différents fichiers:
# Accès page de données :
for(an in 1:length(liens_annees)){
print(liens_annees[an])
page_annee = read_html(paste0(racine,liens_annees[an]))
annee = str_extract(page_annee %>%
html_elements(css = " title") %>%
html_text(),"\\d+")
# Téléchargement des données détaillées (flux)
sections = page_annee %>%
html_elements(css="#consulter > div > div")
lien_detailles = sections[which(sections %>% html_text2() =="Pour accéder à l'ensemble des fichiers détail, cliquez ici.")] %>%
html_children() %>%
html_element("a") %>%
html_attr("href")
page_detailles = read_html(paste0(racine,lien_detailles))
fichiers_dispos = str_replace_all(str_replace_all(page_detailles %>%
html_elements("#consulter-sommaire > nav > ul > li > ul > li > a") %>%
html_text(),"\n","")," +"," ")
select = which(fichiers_dispos %in% interet)
for(page in select){
lien = page_detailles %>%
html_elements("#consulter-sommaire > nav > ul > li > ul > li > a") %>%
.[[page]] %>%
html_attr("href")
page_temp = read_html(paste0(racine,lien))
categ = str_replace_all(str_replace_all(page_temp %>%
html_elements("#consulter > div > div:nth-child(4) > div:nth-child(1) > div > span:nth-child(2)") %>%
html_text(),"\n|:|/","")," +"," ")
print(categ)
liens = page_temp %>%
html_elements("#consulter > div > div:nth-child(4) > a")
link = liens[which(str_detect(liens %>% html_text2(),"csv"))] %>%
html_attr("href")
if(identical(link,character(0))){
link = liens[which(str_detect(liens %>% html_text2(),"txt"))] %>%
html_attr("href")
}
nom_dossier = str_replace_all(paste(annee,str_to_lower(categ),sep = ".")," ","_")
telechargementBase(paste0(racine,link),nom_dossier)
}
# Téléchargement des données communales, principaux indicateurs
tableau = page_annee %>%
html_element(css="#produit-tableau-Feuil3 tbody")
# Sélection des variables d'intérêt
selection = which(tableau %>% html_elements("th") %>% html_text() %in% interet_general)
for(k in selection){
nom_categ = tableau %>%
html_children() %>%
.[[k]] %>%
html_elements(css="th") %>%
html_text()
print(nom_categ)
lien_categ = tableau %>%
html_children() %>%
.[[k]] %>%
html_elements(css="td") %>%
.[[1]] %>%
html_element(css = "a") %>%
html_attr("href")
## Accès données catégorie
page_categ = read_html(paste0(racine,lien_categ))
lien_fichier = page_categ %>%
html_elements("#consulter > div > div:nth-child(4) > a")%>%
html_attr("href")
links = page_categ %>%
html_elements("#consulter > div > div:nth-child(4) > a ")
link = links[which(str_detect(links %>% html_text2(),"csv"))] %>%
html_attr("href")
if(identical(link,character(0))){
link = links[which(str_detect(links %>% html_text2(),"xls"))] %>%
html_attr("href")
}
if(identical(link,character(0))){
link = links[which(str_detect(links %>% html_text2(),"zip"))] %>%
html_attr("href")
}
nom_dossier = str_replace_all(paste(annee,str_to_lower(nom_categ),sep = ".")," ","_")
telechargementBase(paste0(racine,link),nom_dossier)
}
}
# Probleme de conversion avec read_csv2 -> donc patch
# fichiers_source = list.files(data_insee)
# fichiers_source = fichiers_source[!(fichiers_source %>% str_detect(.,".RDS|.zip"))]
#
#
# fichiers_csv = sapply(fichiers_source, function(dossier){
# tempo = list.files(file.path(data_insee,dossier),
# full.names = T)
# if(TRUE %in% (tempo %>% str_detect(.,".CSV|.csv"))){
# return(tempo[which(tempo %>% str_detect(.,".CSV|.csv"))])
# }
# })
#
# for (dossier in names(fichiers_csv)) {
# if(!is.null(fichiers_csv[[dossier]])){
# files = fichiers_csv[[dossier]]
#
# size = order(sapply(files, file.size),
# decreasing = T)
#
#
# # Sauvergarde au format R pour chargement futur plus rapide
# to_open = files[size==1]
#
# # Ouverture
#
# data_base = read.csv2(to_open)
#
# saveRDS(data_base,file = file.path(data_insee,paste0(dossier,'.RDS')))
#
#
# }
#
#
# }
#
#
#
#
#
|
a6f98afd7c4aecc077c2972d467f08c714595116
|
2e89c08f28217e544f81b46a4408fb016fd867c7
|
/PEA/ui.R
|
4e10fc78097e7b781baf132b9eb16f846c74305a
|
[] |
no_license
|
harvardinformatics/quantproteomics
|
b9ff8e13469c279cbe409b3eb8e2e2f622b74997
|
d5ba1ebd0eca8e1b59f55ebbee654214e256830d
|
refs/heads/master
| 2021-07-02T22:38:28.263577
| 2021-06-23T19:31:33
| 2021-06-23T19:31:33
| 239,841,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,588
|
r
|
ui.R
|
library(shiny)
library("shinythemes")
shinyUI(fluidPage(theme=shinytheme("superhero"),
textInput('psmfilename', 'PSM Filename'),
textInput('replicatenum1', 'Replicate Number of Sample 1'),
textInput('replicatenum2', 'Replicate Number of Sample 2'),
textInput('abundancecolumn', 'Abundance Column'),
actionButton('runimputation1', 'Impute my Missing Values with missForest!'),
actionButton('runimputation2', 'Impute my Missing Values with KNN!'),
actionButton('runimputation3', 'Impute my Missing Values with RegImpute!'),
textInput('PSMfile', 'PSM Imputed File'),
textInput('Protfile', 'PD Protein File'),
actionButton('runPDfilter', 'Use PD filters'),
fileInput('csvfile', 'Input File'),
fileInput('uniprotout', 'Uniprot File'),
fileInput('unitogene', 'Uniprot and Gene Name File'),
textInput('protnorm', 'Protein to Normalize to', value = 'NA'),
textInput('lessperc', 'Coisolation Interference Threshold (default 70%)', value = 70.0),
textInput('plottitle', 'Plot Title', value = 'Differential Expressed Proteins for Treatment/Control at P Value <= 0.05'),
textInput('xaxis', 'Plot X-axis', value = 'log2(Treatment/Control)'),
textInput('yaxis', 'Plot Y-axis', value = '-log10(nominalpval)'),
textInput('pcacontrol', 'PCA Control', value = 'Control'),
textInput('pcatreatment', 'PCA Treatment', value = 'Treatment'),
textInput('protint', 'Protein of Interest', value = 'NA'),
radioButtons('channel126', 'Channel 126', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel127N', 'Channel 127N', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel127C', 'Channel 127C', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel128N', 'Channel 128N', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel128C', 'Channel 128C', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel129N', 'Channel 129N', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel129C', 'Channel 129C', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel130N', 'Channel 130N', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel130C', 'Channel 130C', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel131N', 'Channel 131N', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel131C', 'Channel 131C', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel132N', 'Channel 132N', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel132C', 'Channel 132C', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel133N', 'Channel 133N', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel133C', 'Channel 133C', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel134N', 'Channel 134N', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
radioButtons('channel134C', 'Channel 134C', inline = TRUE, choices = NULL, selected = NULL, choiceNames = list(
"Control",
"Treatment",
"NA"
),
choiceValues = list(
'1','0','2'
)),
actionButton('buttonId', 'run script'),
titlePanel("Volcano Plot"),
plotOutput('volcanoPlot',click='plot_click'),
sliderInput('fcCut', label="log(FC) cutoff",min=-2,max=2,value=c(-2,-2), step=0.1, width="600px"),
actionButton('downloadPlot', 'Download Plot'),
#here the table for the clicked points:
tableOutput('clickedPoints')
)
)
|
bb0d6fad905326d8aaf6467149405f7f83dc65e0
|
dc4e4365c7c5d6a94ea0616c12d1a59ccffa12e9
|
/R/mapreduce.R
|
820a0b0abdd867391c62539a07744c2f8c2dc6e7
|
[
"BSD-3-Clause"
] |
permissive
|
hafen/datadr
|
582321d9a5628fb2facd1623fb4048a2ac1575bf
|
b4f7e7a1d9a09ac51b0c456f35177d61626d73c3
|
refs/heads/master
| 2020-12-29T03:06:38.926328
| 2018-08-20T23:59:20
| 2018-08-20T23:59:20
| 20,773,966
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,447
|
r
|
mapreduce.R
|
### general map/reduce methods
# the fundamental way to deal with divided data objects is mapreduce
# we generalize that so that if there is a new backend for divided
# data objects, we can simply implement map, reduce, and exec methods
# and then all datadr computations should work on the new backend
# map takes the input data and an expression
# which expects to have "map.keys" and "map.values" defined
# it also has a "collect" function
# reduce takes input data and an expression of pre, reduce, post
# it expects to have a "collect" function
#' Execute a MapReduce Job
#'
#' Execute a MapReduce job
#'
#' @param data a ddo/ddf object, or list of ddo/ddf objects
#' @param setup an expression of R code (created using the R command \code{expression}) to be run before map and reduce
#' @param map an R expression that is evaluated during the map stage. For each task, this expression is executed multiple times (see details).
#' @param reduce a vector of R expressions with names pre, reduce, and post that is evaluated during the reduce stage. For example \code{reduce = expression(pre = {...}, reduce = {...}, post = {...})}. reduce is optional, and if not specified the map output key-value pairs will be the result. If it is not specified, then a default identity reduce is performed. Setting it to 0 will skip the reduce altogether.
#' @param output a "kvConnection" object indicating where the output data should reside (see \code{\link{localDiskConn}}, \code{\link{hdfsConn}}). If \code{NULL} (default), output will be an in-memory "ddo" object. If a character string, it will be treated as a path to be passed to the same type of connection as \code{data} - relative paths will be relative to the working directory of that back end.
#' @param overwrite logical; should existing output location be overwritten? (also can specify \code{overwrite = "backup"} to move the existing output to _bak)
#' @param control parameters specifying how the backend should handle things (most-likely parameters to \code{rhwatch} in RHIPE) - see \code{\link{rhipeControl}} and \code{\link{localDiskControl}}
#' @param params a named list of objects external to the input data that are needed in the map or reduce phases
#' @param packages a vector of R package names that contain functions used in \code{fn} (most should be taken care of automatically such that this is rarely necessary to specify)
#' @param verbose logical - print messages about what is being done
#'
#' @return "ddo" object - to keep it simple. It is up to the user to update or cast as "ddf" if that is the desired result.
#'
#' @author Ryan Hafen
#' @examples
#' # compute min and max Sepal Length by species for iris data
#' # using a random partitioning of it as input
#' d <- divide(iris, by = rrDiv(20))
#'
#' mapExp <- expression({
#' lapply(map.values, function(r) {
#' by(r, r$Species, function(x) {
#' collect(
#' as.character(x$Species[1]),
#' range(x$Sepal.Length, na.rm = TRUE)
#' )
#' })
#' })
#' })
#'
#' reduceExp <- expression(
#' pre = {
#' rng <- c(Inf, -Inf)
#' }, reduce = {
#' rx <- unlist(reduce.values)
#' rng <- c(min(rng[1], rx, na.rm = TRUE), max(rng[2], rx, na.rm = TRUE))
#' }, post = {
#' collect(reduce.key, rng)
#' })
#'
#' res <- mrExec(d, map = mapExp, reduce = reduceExp)
#' as.list(res)
#'
#' @export
mrExec <- function(data, setup = NULL, map = NULL, reduce = NULL, output = NULL, overwrite = FALSE, control = NULL, params = NULL, packages = NULL, verbose = TRUE) {
# handle list of ddo/ddf - if not a list, make it one
if(!inherits(data, "ddo")) {
if(!all(sapply(data, function(x) inherits(x, "ddo"))))
stop("data must be a 'ddo' or 'ddf' object or a list of these")
# make sure all have the same storage class
storageClasses <- sapply(data, function(x) {
tmp <- class(x)
tmp[grepl("^kv", tmp)][1]
})
uStorageClasses <- unique(storageClasses)
if(length(uStorageClasses) != 1)
stop("all data inputs must be of the same class - the input has data of classes ", paste(uStorageClasses, collapse = ", "))
} else {
data <- list(data)
}
class(data) <- c(paste(utils::tail(class(data[[1]]), 1), "List", sep = ""), "list")
# assign names to each data source if missing
nms <- names(data)
if(is.null(nms)) {
nms <- paste("dataSource", seq_along(data), sep = "")
} else {
ind <- which(nms == "")
if(length(ind) > 0)
nms[ind] <- paste("unnamedDataSource", seq_along(ind), sep = "")
}
if(any(duplicated(nms)))
stop("data sources must all have unique names")
names(data) <- nms
if(is.character(output)) {
class(output) <- c("character", paste0(utils::tail(class(data[[1]]), 1), "Char"))
output <- charToOutput(output)
}
# TODO: make sure all data sources have same kv storage type
mrCheckOutput(data[[1]], output)
output <- mrCheckOutputLoc(output, as.character(overwrite))
if(is.null(control))
control <- list()
# fill in missing required control fields with default
dc <- defaultControl(data[[1]])
controlMissingNames <- setdiff(names(dc), names(control))
for(nm in controlMissingNames)
control[[nm]] <- dc[[nm]]
# if map is NULL, replace with identity
if(is.null(map))
map <- expression({
for(i in seq_along(map.keys))
collect(map.keys[[i]], map.values[[i]])
})
# if reduce is NULL, don't do reduce
# but if it's a number, n, do an identity reduce with n reduce tasks
if(is.numeric(reduce)) {
if(reduce > 0) {
reduce <- expression({
reduce = {
collect(reduce.key, reduce.values)
}
})
}
}
mapApplyTransform <- expression({
curTrans <- transFns[[.dataSourceName]]
if(!is.null(curTrans)) {
setupTransformEnv(curTrans, environment())
for(i in seq_along(map.keys)) {
tmp <- applyTransform(curTrans, list(map.keys[[i]], map.values[[i]]), env = environment())
names(tmp) <- c("key", "value")
map.keys[[i]] <- tmp[[1]]
map.values[[i]] <- tmp[[2]]
}
}
})
map <- appendExpression(mapApplyTransform, map)
setup <- appendExpression(control$setup, setup)
loadPackagesSetup <- expression({
if(length(mr___packages) > 0) {
for(pkg in mr___packages)
suppressMessages(require(pkg, character.only = TRUE))
}
})
setup <- appendExpression(loadPackagesSetup, setup)
setup <- nullAttributes(setup)
map <- nullAttributes(map)
reduce <- nullAttributes(reduce)
# get transformations that have been added through addTransform
transFns <- lapply(data, function(a) attr(a, "transforms")$transFns)
params <- c(params, list(transFns = transFns))
transPackages <- unique(do.call(c, lapply(transFns, function(a) {
do.call(c, lapply(a, function(b) {
b$packages
}))
})))
packages <- unique(c(packages, transPackages))
# add required packages to the list of parameters
params <- c(params, list(mr___packages = packages))
res <- mrExecInternal(data, setup = setup, map = map, reduce = reduce, output = output, control = control, params = params)
obj <- ddo(res$data, update = FALSE, verbose = FALSE) # if update==TRUE, can get recursive
# if two consecutive values are data frames with same names, chances are it's a ddf
tmp <- try(suppressWarnings(suppressMessages(obj[1:2])), silent = TRUE)
if(inherits(tmp, "try-error") || length(tmp) == 1) {
tmp <- try(suppressMessages(obj[[1]]), silent = TRUE)
if(!inherits(tmp, "try-error")) {
if(is.data.frame(obj[[1]][[2]]))
obj <- ddf(obj, update = FALSE, verbose = FALSE)
}
} else {
if(all(sapply(tmp, function(x) inherits(x[[2]], "data.frame")))) {
nms <- lapply(tmp, function(x) names(x[[2]]))
if(identical(nms[[1]], nms[[2]]))
obj <- ddf(obj, update = FALSE, verbose = FALSE)
}
}
# extractableKV can change after any mr job
obj <- setAttributes(obj, list(extractableKV = hasExtractableKV(obj), counters = res$counters))
convert(obj, output)
}
mrExecInternal <- function(data, ...) {
UseMethod("mrExecInternal", data)
}
defaultControl <- function(x) {
UseMethod("defaultControl", x)
}
# check output
mrCheckOutput <- function(input, output) {
if(!class(output)[1] %in% convertImplemented(input))
stop("Cannot convert to requested output type")
}
mrCheckOutputLoc <- function(x, ...)
UseMethod("mrCheckOutputLoc", x)
|
1bf4f712e2b3f8dce54ab9770cd2ed442bb6ad03
|
c50f7394bd48e626a82149389a185da48fe560a7
|
/R/SummariseGTcheck.R
|
8b2dbd357225310eee5241f5fbca2bbace6bca5c
|
[] |
no_license
|
hobrien/GENEX-FB2
|
ef494c3dd874191a0caccd32b0c21258f89a6c23
|
09557f4eeccb0ac274b77afd37a86483befce378
|
refs/heads/master
| 2020-04-05T14:10:31.004016
| 2019-07-25T21:12:31
| 2019-07-25T21:12:31
| 94,789,823
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,980
|
r
|
SummariseGTcheck.R
|
library(readr)
library(dplyr)
library(tools)
library(ggplot2)
library(optparse)
# Test for eGene enrichment (ratio of sig to non-sig GTEx eGenes that are sig in query sample vs. ratio in eGenes that are non-sig in query)
# I'm also determining the number of query topSNPs that are also sig in GTEx samples and the number that are sig for the same eGene
# It's not entirely clear (to me) what the appropriate background is to test for enrichment in these cases
option_list <- list(
make_option(c("-o", "--outfile"), type="character", default="test.tsv",
help="output file"),
make_option(c("-p", "--plot"), type="character", default="test.png",
help="plot of Discordance values")
)
opt_parser <- OptionParser(option_list=option_list)
opt <- parse_args(opt_parser, positional_arguments=TRUE)
Discordance <- vector("list", length(opt$args))
for ( i in seq_along(opt$args) ) {
GTcheckFile <- opt$args[[i]]
sample<-file_path_sans_ext(basename(GTcheckFile))
GTcheck <- read_tsv(GTcheckFile,
col_names = c('CN', 'Discordance_total', 'Discordance_avg',
'Num_sites', 'SampleID', 'Sample_num'),
col_types = cols(
CN = col_character(),
Discordance_total = col_double(),
Discordance_avg = col_double(),
Num_sites = col_integer(),
SampleID = col_character(),
Sample_num = col_integer()
),
comment = "#", trim_ws = TRUE)
GTcheck <- GTcheck %>% mutate(refID=sample) %>% select(-CN, -Sample_num)
Discordance[[i]] <- GTcheck
}
Discordance <- bind_rows(Discordance)
write_tsv(Discordance, opt$options$outfile)
ggplot(Discordance, aes(x=refID, y=Discordance_total)) +
geom_point() +
geom_point(data=filter(Discordance, SampleID==refID), colour='red')
ggsave(opt$options$plot)
|
79d16c3909ac98027803c280d8c54a8abb8d5222
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/nandb/examples/cc_brightness_folder.Rd.R
|
2e7026dfe8e93099040529c1d172d63da376089c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 357
|
r
|
cc_brightness_folder.Rd.R
|
library(nandb)
### Name: cc_brightness_folder
### Title: Cross-correlated brightness calculations for every image in a
### folder.
### Aliases: cc_brightness_folder
### ** Examples
## Not run:
##D setwd(tempdir())
##D ijtiff::write_tif(img, 'a.tif')
##D ijtiff::write_tif(img, 'ab.tif')
##D cc_brightness_folder()
##D list.files()
## End(Not run)
|
bceb44471524b77d8d187d96c47c22b362f94700
|
31548d781675560bf6752cd554149e6bf1edcae5
|
/R/get_descriptive_stats.R
|
2bd9cd7a484958eb3a8ae3fcce5eefdef31fb41b
|
[] |
no_license
|
jesswalker/sw_fire
|
cb9a5b7e0e24036eec37ce7060aa54e3d57e85d3
|
7d5e4049b623b7b9385ae919632a91b20968376c
|
refs/heads/master
| 2020-03-18T04:34:49.157591
| 2018-05-21T17:32:16
| 2018-05-21T17:32:16
| 134,294,763
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,531
|
r
|
get_descriptive_stats.R
|
########################################################################### #
#
# get_descriptive_stats.R
#
# Objective:
# This script generates a set of descriptive stats
# (n, mean, median, sd, se) for different metrics
# baseval, peakval, amp, peakt.doy) per fire.
#
# Input: Edited output of Timesat seasonal phenology metrics
# (i.e., "control_16day_TS_seasonal_output_edit.csv")
#
# Working directory is typically
# D:/projects/sw_fire/output/timesat/seasonal
#
#
# Output: "descriptive_stats.R"
#
# in
# D:/projects/sw_fire/data
#
# August 2017 JWalker
#
#
########################################################################### #
# 17 Aug 2017 JWalker
# Remove all
rm(list=ls())
# Load libraries
library(plyr) # to convert list to df; join (merge) command
library(dplyr)
library(greenbrown) # trend/breakpoint analysis
library(zoo) # moving window
library(reshape2) # melt
library(lubridate) # year
# Get functions
source("D:/projects/sw_fire/R/sw_fire_functions.R")
# Set directories
setwd("D:/projects/sw_fire/output/timesat/seasonal")
path.in <- "D:/projects/sw_fire/output/timesat/seasonal"
path.tables <- "D:/projects/sw_fire/output/tables"
path.plot <- "D:/projects/sw_fire/output/plots/apr2018"
path.r <- "D:/projects/sw_fire/data"
rdata <- "descriptive_stats.RData"
# ---------- function: get descriptive stats for each fire, for each metric ----------
stats_by_group <- function(df) {
df.out <- data.frame() # setup holding df
for (metric in c("baseval", "peakval", "amp", 'peakt.doy')) {
stats.temp <- getStats(df, metric)
stats.temp$metric <- metric
df.out <- bind_rows(df.out, stats.temp)
}
df.out$metric <- as.factor(df.out$metric)
return(df.out)
}
# -------------- function create time series
create_ts <- function(df, value) {
ts.x <- ts(df[[value]], start = c(year(x$date[1])),
end = c(year(x$date[nrow(x)])),
frequency = 1)
}
# -------------- function get breakpoints ----
stats_by_phase <- function(df){ #}, group.var) {
df.out <- data.frame() # set up holding df
for (metric in c("baseval", "peakval", "amp", 'peakt.doy')) {
stats.temp <- df %>% group_by_("name", 'ptid') %>% get_breaks('mean')
stats.temp$metric <- metric
df.out <- bind_rows(df.out, stats.temp)
}
return(df.out)
}
# Can't figure out an efficient way to run the moving average on distinct subgroups
# Do it manually for now
# ---------- function get moving average ----------
get_mvavg <- function(df) {
# get moving average
l.mvg <- by(df$mean, df$regrowth, (function(x) rollmean(x, 3, na.pad = T, align = 'center')))
df.mvg <- ldply(l.mvg, rbind)
colnames(df.mvg)[1] <- "regrowth"
# melt back to long
df.long <- melt(df.mvg, .(regrowth), variable.name = 'years_post', value.name = 'mean')
# convert time to a number rather than a factor
df.long$years_post <- as.numeric(levels(df.long$years_post))[df.long$years_post]
return(df.long)
}
# ------------------------------------------------------------------ #
# ------------------------------------------------------------------ #
# removed Rodeo (start: 1986, fire.year: 2002)
# combine all fire data into one large happy file
files <- list.files(path.in, pattern = "*_edit.csv$", full.names = FALSE, ignore.case = FALSE)
x <- do.call(rbind, lapply(files, read.csv, header=T))
# standardize column names
colnames(x) <- tolower(colnames(x))
# get rid of extraneous columns
x <- x[c("baseval", "peakval", "amp", "ptid", "n", "peakt.doy", "year", "phase", "years_post", "fire_name", "t")]
# > head(x)
# baseval peakval amp ptid n peakt.doy year phase years_post name t
# 1 0.6477563 0.7146601 0.06690384 pt01 1 276 1985 pre 0 bell 16
# 2 0.5372279 0.7125424 0.17531456 pt01 2 218 1986 pre 0 bell 16
# 3 0.5054810 0.7429433 0.23746228 pt01 3 198 1987 pre 0 bell 16
# 4 0.5803843 0.7164091 0.13602488 pt01 4 226 1988 pre 0 bell 16
# 5 0.5605682 0.6806482 0.12008002 pt01 5 281 1989 pre 0 bell 16
# 6 0.5206859 0.7154045 0.19471860 pt01 6 247 1990 pre 0 bell 16
# QA/QC take out weird data
# Backtracked on this b/c hard to define "weird"
#x$peakt.doy[x$peakt.doy < 45 | x$peakt.doy > 320] <- NA
x <- na.omit(x)
# make sure t (time period: 8 or 16) is a factor
x$t <- as.factor(x$t)
# drop the fire phase
x <- subset(x, phase != "fire")
x$phase <- droplevels(x$phase)
x$phase <- factor(x$phase, levels = c("pre", "post"))
# set up the regrowth type
x$regrowth <- "Forest"
# Bell
x$regrowth[x$name == 'bell' & !(x$ptid %in% c('pt02', 'pt03'))] <- "Herbaceous" #Grass/shrub"
# Blackhawk
x$regrowth[x$name == 'blackhawk'] <- "Shrub"
# Dude
x$regrowth[x$name == 'dude'] <- "Herbaceous"
x$regrowth[x$name == 'dude' & (x$ptid %in% c('pt05', 'pt06'))] <- "Shrub" #"Grass/shrub"
# La Mesa
x$regrowth[x$name == 'lamesa' & (x$ptid %in% c('pt01', 'pt06'))] <- "Shrub"
x$regrowth[x$name == 'lamesa' & (x$ptid == 'pt07')] <- "Herbaceous"
# Las Conchas
x$regrowth[x$name == 'lasconchas' & (x$ptid == 'pt01')] <- "Herbaceous" #"Grass/shrub"
x$regrowth[x$name == 'lasconchas' & (x$ptid == 'pt02')] <- "Herbaceous"
# Pot (NM)
x$regrowth[x$name == 'pot'] <- "Deciduous"
x$regrowth[x$name == 'pot' & (x$ptid == 'pt01')] <- "Herbaceous"
# Pot (AZ)
x$regrowth[x$name == 'potaz'] <- "Herbaceous"
# Rattlesnake
x$regrowth[x$name == 'rattlesnake' & (x$ptid %in% c('pt02', 'pt03'))] <- "Herbaceous" #Shrub
x$regrowth[x$name == 'rattlesnake' & (x$ptid == 'pt01')] <- "Herbaceous"
# Rincon
x$regrowth[x$name == 'rincon'] <- "Herbaceous"
# Slim
x$regrowth[x$name == 'slim'] <- "Shrub"
# South
x$regrowth[x$name == 'south'] <- "Shrub"
# Stone
x$regrowth[x$name == 'stone'] <- "Shrub"
# fire characteristics
# took out shelly, bonner
#fireNames <- c("bell", 'blackhawk', 'control', 'dude', 'lamesa', 'lasconchas', 'pot', 'potaz', 'rattlesnake', 'rincon',
# 'slim', 'south')
fireLabels <- c("Bell", "Blackhawk", "Control", "Dude", "La Mesa", "Las Conchas", "Pot (NM)", "Pot (AZ)", "Rattlesnake",
"Rincon", "Slim", "South")
get_fire_year <- function(y) {
year_temp <- min(y[which(y$years_post == 1), ]$year)
if (!is.infinite(year_temp)) {
return(year_temp)
} else {
return(NA)
}
}
#dataStart <- c(1985, 1985, 1984, 1987, 1984, 1984, 1986, 1984, 1984, 1984, 1986, 1985)
#fireYears <- c(1993, 1993, 2017, 1990, 1977, 2011, 1994, 1996, 1994, 1994, 1987, 1995)
#fire.desc <- data.frame(year = fireYears, name = fireNames, start = dataStart, labels = fireLabels)
# Get data start year
temp.start <- ddply(x, .(fire_name), summarize,
start_year = min(year))
# Get fire year
# Look for the first "post-fire" year, subtract 1
temp.fire <- ddply(x, .(fire_name), function(y) {
suppressWarnings(
temp <- min(y[which(y$years_post == 1), ]$year))
if (!is.infinite(temp)) {
data.frame(fire_year = temp - 1)
} else {
data.frame(fire_year = NA)
}
})
fire.desc <- merge(temp.start, temp.fire, by = "fire_name")
fire.desc$labels <- fireLabels
# ---------------------------------------------------------------- #
# stats by phase (pre/post) and name (points are aggregated) ----
# ---------------------------------------------------------------- #
x.stats.phase.8.all <- x[x$t==8, ] %>% group_by(fire_name, phase) %>% stats_by_group()
x.stats.phase.16.all <- x[x$t==16, ] %>% group_by(fire_name, phase) %>% stats_by_group()
#> head(x.stats.phase.16.all)
# name phase n mean median sd se metric
#1 bell pre 85 0.5122012 0.5183115 0.06108287 0.006625368 baseval
#2 bell post 140 0.2137434 0.2053897 0.04674217 0.003950435 baseval
#3 blackhawk pre 6 0.5989001 0.5815791 0.04199062 0.017142599 baseval
#4 blackhawk post 22 0.2091737 0.2093612 0.03936628 0.008392919 baseval
#5 control pre 52 0.4563193 0.4588355 0.05484602 0.007605775 baseval
#6 dude pre 17 0.5748819 0.5712164 0.06791413 0.016471596 baseval
write.csv(x.stats.phase.8.all, file = file.path(path.tables, "descriptive_stats", "stats_by_phase_fire_all_8.csv"), row.names = F)
write.csv(x.stats.phase.16.all, file = file.path(path.tables, "descriptive_stats", "stats_by_phase_fire_all_16.csv"), row.names = F)
# -------------------------------------------------------------- #
# --- stats by phase (pre/post), name, and point ----
# -------------------------------------------------------------- #
x.stats.phase.8.pt <- x[x$t==8, ] %>% group_by(fire_name, phase, ptid) %>% stats_by_group()
x.stats.phase.16.pt <- x[x$t==16, ] %>% group_by(fire_name, phase, ptid) %>% stats_by_group()
#> head(x.stats.phase.16.pt)
# name phase ptid n mean median sd se metric
#1 bell pre pt01 7 0.5556748 0.5376199 0.04747527 0.017943965 baseval
#2 bell pre pt02 17 0.5179602 0.5249631 0.07389809 0.017922919 baseval
#3 bell pre pt03 19 0.4997694 0.5235786 0.07836931 0.017979153 baseval
#4 bell pre pt04 16 0.4778225 0.4895456 0.04517894 0.011294736 baseval
#5 bell pre pt05 7 0.5409580 0.5385751 0.03469521 0.013113557 baseval
#6 bell pre pt06 6 0.5343155 0.5401456 0.02425080 0.009900348 baseval
write.csv(x.stats.phase.8.pt, file = file.path(path.tables, "descriptive_stats", "stats_by_phase_fire_pt_8.csv"), row.names = F)
write.csv(x.stats.phase.16.pt, file = file.path(path.tables, "descriptive_stats", "stats_by_phase_fire_pt_16.csv"), row.names = F)
# ---------------------------------------------------------------- #
# stats by time since fire and name (points are aggregated) ----
# ---------------------------------------------------------------- #
x.stats.years_post.8.all <- x[x$t==8, ] %>% group_by(fire_name, years_post) %>% stats_by_group()
x.stats.years_post.16.all <- x[x$t==16, ] %>% group_by(fire_name, years_post) %>% stats_by_group()
#> head(x.stats.tpost.16.all)
# name years_post n mean median sd se metric
#1 bell 0 85 0.5122012 0.5183115 0.06108287 0.006625368 baseval
#2 bell 1 8 0.1792775 0.1792386 0.02266586 0.008013590 baseval
#3 bell 2 8 0.1879514 0.1864579 0.02557598 0.009042474 baseval
#4 bell 3 8 0.1912053 0.1935639 0.01607038 0.005681739 baseval
#5 bell 4 8 0.2041848 0.2045284 0.02478045 0.008761214 baseval
#6 bell 5 8 0.2004619 0.2027343 0.02581317 0.009126333 baseval
write.csv(x.stats.years_post.8.all, file = file.path(path.tables, "descriptive_stats", "stats_by_fire_years_post_all_8.csv"), row.names = F)
write.csv(x.stats.years_post.16.all, file = file.path(path.tables, "descriptive_stats", "stats_by_fire_years_post_all_16.csv"), row.names = F)
# -------------------------------------------------------------- #
# stats by time since fire, name, and point ----
# -------------------------------------------------------------- #
x.stats.years_post.8.pt <- x[x$t==8 & x$years_post != 0, ] %>% group_by(fire_name, years_post, ptid) %>% stats_by_group()
x.stats.years_post.8.pt$t <- 8
x.stats.years_post.16.pt <- x[x$t==16 & x$years_post != 0, ] %>% group_by(fire_name, years_post, ptid) %>% stats_by_group()
x.stats.years_post.16.pt$t <- 16
#> head(x.stats.tpost.16.pt)
# name years_post ptid n mean median sd se metric
#1 bell 1 pt01 1 0.2044205 0.2044205 NaN NaN baseval
#2 bell 1 pt02 1 0.1825926 0.1825926 NaN NaN baseval
#3 bell 1 pt03 1 0.1758847 0.1758847 NaN NaN baseval
#4 bell 1 pt04 1 0.1854673 0.1854673 NaN NaN baseval
#5 bell 1 pt05 1 0.1371447 0.1371447 NaN NaN baseval
#6 bell 1 pt06 1 0.2098297 0.2098297 NaN NaN baseval
write.csv(x.stats.years_post.8.pt, file = file.path(path.tables, "descriptive_stats", "stats_by_fire_years_post_pt_8.csv"), row.names = F)
write.csv(x.stats.years_post.16.pt, file = file.path(path.tables, "descriptive_stats", "stats_by_fire_years_post_pt_16.csv"), row.names = F)
# -------------------------------------------------------------- #
# stats by pre/post and regrowth type (points aggregated) ----
# -------------------------------------------------------------- #
x.stats.phase.reg.8.all <- x[x$t==8, ] %>% group_by(regrowth, phase) %>% stats_by_group()
x.stats.phase.reg.16.all <- x[x$t==16, ] %>% group_by(regrowth, phase) %>% stats_by_group()
#> head(x.stats.phase.16.reg)
# regrowth phase n mean median sd se metric
#1 Deciduous pre 31 0.4838723 0.4929430 0.06293468 0.011303403 baseval
#2 Deciduous post 84 0.2092328 0.2140476 0.03568828 0.003893910 baseval
#3 Forest pre 121 0.4678394 0.4720970 0.06384715 0.005804287 baseval
#4 Forest post 165 0.2970766 0.2694065 0.10848467 0.008445520 baseval
#5 Grass pre 48 0.5089609 0.5243272 0.06213518 0.008968441 baseval
#6 Grass post 90 0.1766146 0.1747460 0.02510187 0.002645969 baseval
write.csv(x.stats.phase.reg.8.all, file = file.path(path.tables, "descriptive_stats", "stats_by_phase_regrowth_all_8.csv"), row.names = F)
write.csv(x.stats.phase.reg.16.all, file = file.path(path.tables, "descriptive_stats", "stats_by_phase_regrowth_all_16.csv"), row.names = F)
# ------------------------------------------------------------------- #
# stats by time since fire and regrowth type (points aggregated) ----
# ------------------------------------------------------------------- #
x.stats.years_post.reg.8.all <- x[x$t==8 & x$years_post != 0, ] %>% group_by(regrowth, years_post) %>% stats_by_group()
x.stats.years_post.reg.16.all <- x[x$t==16 & x$years_post != 0, ] %>% group_by(regrowth, years_post) %>% stats_by_group()
#> head(x.stats.tpost.reg.16.all)
# regrowth years_post n mean median sd se metric
#1 Deciduous 1 4 0.3079408 0.2614214 0.14748212 0.07374106 baseval
#2 Deciduous 2 4 0.3005765 0.2375595 0.15544918 0.07772459 baseval
#3 Deciduous 3 4 0.2804252 0.2190004 0.14016511 0.07008255 baseval
#4 Deciduous 4 4 0.2291705 0.2064023 0.06646931 0.03323466 baseval
#5 Deciduous 5 3 0.2021335 0.1868690 0.04410893 0.02546630 baseval
#6 Deciduous 6 4 0.1678633 0.1356292 0.07244381 0.03622190 baseval
write.csv(x.stats.years_post.reg.8.all, file = file.path(path.tables, "descriptive_stats", "stats_by_years_post_regrowth_all_8.csv"), row.names = F)
write.csv(x.stats.years_post.reg.16.all, file = file.path(path.tables, "descriptive_stats", "stats_by_years_post_regrowth_all_16.csv"), row.names = F)
# get moving averages ----
t_doy_8 <- get_mvavg(x.stats.years_post.reg.8.all[x.stats.years_post.reg.8.all$metric == 'peakt.doy', ])
t_peak_8 <- get_mvavg(x.stats.years_post.reg.8.all[x.stats.years_post.reg.8.all$metric == 'peakval', ])
t_amp_8 <- get_mvavg(x.stats.years_post.reg.8.all[x.stats.years_post.reg.8.all$metric == 'amp', ])
t_base_8 <- get_mvavg(x.stats.years_post.reg.8.all[x.stats.years_post.reg.8.all$metric == 'baseval', ])
t_doy_16 <- get_mvavg(x.stats.years_post.reg.16.all[x.stats.years_post.reg.16.all$metric == 'peakt.doy', ])
t_peak_16 <- get_mvavg(x.stats.years_post.reg.16.all[x.stats.years_post.reg.16.all$metric == 'peakval', ])
t_amp_16 <- get_mvavg(x.stats.years_post.reg.16.all[x.stats.years_post.reg.16.all$metric == 'amp', ])
t_base_16 <- get_mvavg(x.stats.years_post.reg.16.all[x.stats.years_post.reg.16.all$metric == 'baseval', ])
# Save data and environment settings
print(paste0("R data file saved to ", file.path(path.r, rdata)))
save.image(file = file.path(path.r, rdata))
|
69ef4087881330bc801e23fd50084c78e1d0597a
|
864315937f7e975a5e911288ccb2eabf0fea5a8a
|
/man/calculate_funnel_points.Rd
|
d20b52de6e4903b82974c1500f509620f4c9d649
|
[] |
no_license
|
cran/PHEindicatormethods
|
67178fa46449b70d9606b57872bb47ac72f31ffb
|
bdfdb8189053667e961e18460407d7804f6efd6b
|
refs/heads/master
| 2023-05-11T18:05:44.767385
| 2023-05-05T16:50:02
| 2023-05-05T16:50:02
| 145,906,348
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,456
|
rd
|
calculate_funnel_points.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Funnels.R
\name{calculate_funnel_points}
\alias{calculate_funnel_points}
\title{For rate-based funnels: Derive rate and annual population values for charting
based. Process removes rates where the rate type is dsr and the number of
observed events are below 10.}
\usage{
calculate_funnel_points(
data,
numerator,
denominator,
rate,
rate_type = NULL,
years_of_data = NULL,
multiplier = NULL
)
}
\arguments{
\item{data}{a data.frame containing the data to calculate control limits for;
unquoted string; no default}
\item{numerator}{field name from data containing the observed numbers of
cases in the sample meeting the required condition (the numerator or
observed counts for the control limits); unquoted string; no default}
\item{denominator}{field name from data containing the population(s) in the
sample (the denominator or expected counts for the control limits);
unquoted string; no default}
\item{rate}{field name from data containing the rate data when creating
funnels for a Crude or Directly Standardised Rate; unquoted string; no
default}
\item{rate_type}{if statistic is "rate", specify either "dsr" or "crude";
string; no default}
\item{years_of_data}{number of years the data represents; this is required
for statistic = "rate"; numeric; no default}
\item{multiplier}{the multiplier used to express the final values (eg 100 =
percentage); numeric; no default}
}
\value{
returns the same table as provided with two additional fields. First
will have the same name as the rate field, with the suffix "_chart", the
second will be called denominator_derived
}
\description{
For rate-based funnels: Derive rate and annual population values for charting
based. Process removes rates where the rate type is dsr and the number of
observed events are below 10.
}
\seealso{
Other PHEindicatormethods package functions:
\code{\link{assign_funnel_significance}()},
\code{\link{calculate_ISRate}()},
\code{\link{calculate_ISRatio}()},
\code{\link{calculate_funnel_limits}()},
\code{\link{phe_dsr}()},
\code{\link{phe_life_expectancy}()},
\code{\link{phe_mean}()},
\code{\link{phe_proportion}()},
\code{\link{phe_quantile}()},
\code{\link{phe_rate}()},
\code{\link{phe_sii}()}
}
\author{
Sebastian Fox, \email{sebastian.fox@phe.gov.uk}
}
\concept{PHEindicatormethods package functions}
|
232f26fba26c6f0242f06ffe7a38993c2b70e454
|
21ac0d174b6d35ab6cc0704e4aacecc3e0954cb8
|
/R/utils-font.R
|
110504ab07ef97f634eb3c142daa13096262142b
|
[
"MIT",
"CC-BY-4.0",
"CC-BY-3.0",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
cran/piecepackr
|
207671f786ebace9215cc23830e059a50fe4711c
|
88f3abf9334c112aaadf8f45f41b3978ff99a871
|
refs/heads/master
| 2023-09-01T08:00:31.360526
| 2023-08-25T10:00:11
| 2023-08-25T11:30:57
| 218,309,498
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,873
|
r
|
utils-font.R
|
#' Font utility functions
#'
#' `get_embedded_font()` returns which font is actually embedded
#' by `cairo_pdf()` for a given character.
#' `has_font()` tries to determine if a given font is available on the OS.
#' @name font_utils
#' @rdname font_utils
#' @param font A character vector of font(s).
#' @param char A character vector of character(s) to be embedded by `grid::grid.text()`
#' @return `get_embedded_font()` returns character vector of fonts that were actually embedded by `cairo_pdf()`.
#' \code{NA}'s means no embedded font detected: this either means that no font
#' was found or that a color emoji font was found and instead of a font an image was embedded.
#' @details `get_embedded_font()` depends on the suggested `pdftools` package being installed
#' and R being compiled with Cairo support.
#' `has_font()` depends on either the suggested `systemfonts` (preferred) or `pdftools`
#' packages being installed.
#' @examples
#' if (requireNamespace("pdftools", quietly = TRUE) && capabilities("cairo")) {
#' chars <- c("a", "\u2666")
#' fonts <- c("sans", "Sans Noto", "Noto Sans", "Noto Sans Symbols2")
#' get_embedded_font(fonts, chars)
#' }
#'
#' if (requireNamespace("systemfonts", quietly = TRUE) ||
#' (requireNamespace("pdftools", quietly = TRUE) && capabilities("cairo"))) {
#' has_font("Dejavu Sans")
#' }
#' @export
get_embedded_font <- function(font, char) {
if (!capabilities("cairo")) {
abort("'get_embedded_font()' requires that R has been compiled with 'cairo' support. ")
}
if (!requireNamespace("pdftools", quietly = TRUE)) {
if (Sys.which("pdffonts") == "") {
assert_suggested("pdftools")
} else {
.Deprecated(msg = paste("Using the system command `pdffonts` is deprecated.",
"Please install the suggested R package `{pdftools}`."))
}
}
df <- expand.grid(char, font, stringsAsFactors=FALSE)
names(df) <- c("char", "requested_font")
df$embedded_font <- NA
for (ii in seq(nrow(df))) {
df[ii, 3] <- get_embedded_font_helper(df[ii,2], df[ii,1])
}
df
}
get_embedded_font_helper <- function(font, char) {
file <- tempfile(fileext=".pdf")
on.exit(unlink(file))
grDevices::cairo_pdf(file)
grid::grid.text(char, gp=grid::gpar(fontsize=72, fontfamily=font))
invisible(grDevices::dev.off())
if (requireNamespace("pdftools", quietly = TRUE)) {
df <- pdftools::pdf_fonts(file)
if(nrow(df) == 0L)
embedded_font <- NA # probably some color emoji font used
else
embedded_font <- gsub(".*\\+(.*)", "\\1", df$name)
} else {
pf_output <- system2("pdffonts", file, stdout=TRUE)
if (length(pf_output) == 2)
embedded_font <- NA # probably some color emoji font used
else
embedded_font <- gsub(".*\\+(.*)", "\\1", strsplit(pf_output[3], " +")[[1]][1])
}
embedded_font
}
#' @rdname font_utils
#' @export
has_font <- function(font) {
stopifnot(length(font) == 1)
if (requireNamespace("systemfonts", quietly = TRUE)) {
font_file <- basename(systemfonts::match_font(family = font)$path)
grepl(simplify_font(font), simplify_font(font_file))
} else if (Sys.which("pdffonts") != "" && capabilities("cairo")) {
embedded_font <- get_embedded_font(font, "A")$embedded_font
grepl(simplify_font(font), simplify_font(embedded_font))
} else {
warn(paste("has_font() needs either the suggested 'systemfonts' package installed",
"or R compiled with 'cairo' support plus the system tool 'pdffonts' installed.",
"Conservatively returning `FALSE`."))
FALSE
}
}
simplify_font <- function(font) {
tolower(gsub(" ", "", font))
}
|
c043a8871b63092903804097c0d1aa87e9fc3fda
|
11be214f5aaf0740788b27ccb0b6a4194b783574
|
/R/6_kyu/House_of_cards.R
|
0fada19beda9253553a4e19cb414657c6e7cac19
|
[] |
no_license
|
y0wel/Codewars-Kata
|
b054e53233543c2766569fba6855f6e5c7be6f28
|
36a1a20ff8807c929e92d17b50183272483dcd01
|
refs/heads/master
| 2020-04-14T00:39:50.054664
| 2019-06-13T16:33:52
| 2019-06-13T16:33:52
| 163,538,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 162
|
r
|
House_of_cards.R
|
house_of_cards <- function(floors) {
if (floors <= 0) {
stop("error")
}
else {
sum(as.numeric(1:(floors + 1) * 2)) + sum(as.numeric(1:floors))
}
}
|
1a928d07dd5261c914c81b71183fdbf4ec03171f
|
a46ba6eedc84d92eb0ed28d2c533c625f7db546f
|
/week_07/case_study_07.R
|
126bf5cb3e8d8bf28e6dc2a9cc27881504e83b99
|
[] |
no_license
|
geo511-2020/geo511-2020-tasks-btkunz
|
f82f406368555632b691cb9d09131bc3d7ed2e73
|
9f4a99d51fb2cc4a96c6149811f1fa7dc33d8e85
|
refs/heads/master
| 2023-01-31T07:07:38.419276
| 2020-12-18T06:39:48
| 2020-12-18T06:39:48
| 296,491,311
| 0
| 0
| null | 2020-09-18T02:20:21
| 2020-09-18T02:20:13
|
R
|
UTF-8
|
R
| false
| false
| 384
|
r
|
case_study_07.R
|
library(ggplot2)
library(tidyverse)
library(reprex)
library(spData)
library(sf)
data(world)
#helped by Hadarou on line below
gdp_plot <- ggplot(world, aes(x = gdpPercap, fill = continent))+
geom_density(alpha = 0.5, color = F)+
labs(x = "GDP per Capita",
y = "Density",
fill = "Continent") +
theme(legend.position = "bottom")
plot(gdp_plot)
#reprex::reprex()
|
651ba24d1a5c896c0b7be9b1fcc3833326be8d3c
|
99e9855e8d64c55880a42f0b7d901e40c9b475f9
|
/Scraping.R
|
f2de6be1af198a7aa622e9953877e606dd8fe8e9
|
[] |
no_license
|
supriyd/Scraping-Lion-Air-Reviews-on-TripAdvisor-with-R
|
a11af29f2210950e61b4970679e5ad25a24f74ff
|
020821f2156c5329be74dba45b2769fe34653a10
|
refs/heads/master
| 2018-11-10T03:15:12.153492
| 2018-08-21T11:40:50
| 2018-08-21T11:40:50
| 107,696,819
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,609
|
r
|
Scraping.R
|
library(rvest)
url<-read_html("https://www.tripadvisor.com/Airline_Review-d8729111-Reviews-Lion-Air")
#menemukan page terakhir pada review
npages<-url%>%
html_nodes(" .pageNum")%>%
html_attr(name="data-page-number")%>%
tail(.,1)%>%
as.numeric()
npages
#find index page
a<-0:(npages-1)
b<-10
res<-numeric(length=length(a))
for (i in seq_along(a)) {
res[i]<-a[i]*b
}
tableout <- data.frame()
for(i in res){
cat(".")
#Change URL address here depending on attraction for review
url <- paste ("https://www.tripadvisor.com/Airline_Review-d8729111-Reviews-or",i,"-Lion-Air#REVIEWS",sep="")
reviews <- url %>%
html() %>%
html_nodes("#REVIEWS .innerBubble")
id <- reviews %>%
html_node(".quote a") %>%
html_attr("id")
quote <- reviews %>%
html_node(".quote span") %>%
html_text()
rating <- reviews %>%
html_node(".rating .ui_bubble_rating") %>%
html_attrs() %>%
gsub("ui_bubble_rating bubble_", "", .) %>%
as.integer() / 10
date <- reviews %>%
html_node(".innerBubble, .ratingDate") %>%
html_text()
review <- reviews %>%
html_node(".entry .partial_entry") %>%
html_text()
#get rid of \n in reviews as this stands for 'enter' and is confusing dataframe layout
reviewnospace <- gsub("\n", "", review)
temp.tableout <- data.frame(id, quote, rating, date, reviewnospace)
tableout <- rbind(tableout,temp.tableout)
}
#simpan review dalam file excel
write.csv(tableout, "F:/DOC/lionGithub/datalion.csv")
|
6150fe0c33a0dc71aab0aa5d4b35a80288cbb1ea
|
5ac5920bc54c456669b9c1c1d21ce5d6221e27eb
|
/facebook/delphiFacebook/man/code_beliefs.Rd
|
5f01164dfe00c1ca11ed175c549f8f32b4e66fa1
|
[
"MIT"
] |
permissive
|
alexcoda/covidcast-indicators
|
50e646efba61fbfe14fd2e78c6cf4ffb1b9f1cf0
|
0c0ca18f38892c850565edf8bed9d2acaf234354
|
refs/heads/main
| 2023-08-13T04:26:36.413280
| 2021-09-16T18:16:08
| 2021-09-16T18:16:08
| 401,882,787
| 0
| 0
|
MIT
| 2021-09-01T00:41:47
| 2021-09-01T00:41:46
| null |
UTF-8
|
R
| false
| true
| 360
|
rd
|
code_beliefs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variables.R
\name{code_beliefs}
\alias{code_beliefs}
\title{Beliefs}
\usage{
code_beliefs(input_data, wave)
}
\arguments{
\item{input_data}{input data frame of raw survey data}
\item{wave}{integer indicating survey version}
}
\value{
augmented data frame
}
\description{
Beliefs
}
|
12a8f264103dee5bbe4780be598f33a0223e89f3
|
c7e8ff375d6e8a625ede3071a3685de7c452265e
|
/man/summary-Counts-method.Rd
|
cbe547bc239757597aed289303ff64ef01bf85c8
|
[] |
no_license
|
FedericoComoglio/dupiR
|
443bdacd9e969038657259876c525013c6f1a783
|
21c0e39a6377cb2fbf6233328f851bb87b707e58
|
refs/heads/master
| 2021-07-08T12:00:47.979574
| 2021-05-17T16:13:15
| 2021-05-17T16:13:15
| 12,947,693
| 1
| 0
| null | 2021-05-17T16:13:15
| 2013-09-19T12:10:57
|
R
|
UTF-8
|
R
| false
| true
| 424
|
rd
|
summary-Counts-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{summary,Counts-method}
\alias{summary,Counts-method}
\title{Summary method for \code{Counts} class}
\usage{
\S4method{summary}{Counts}(object, ...)
}
\arguments{
\item{object}{object of class \code{Counts}}
\item{...}{additional parameters affecting the summary produced}
}
\description{
Summary method for \code{Counts} class
}
|
6b82be92462be60a334d8528ec26df3fd072835c
|
c91969db6e1d7f08a315d824a26a9a8954ed3810
|
/tests/testthat/test-json.R
|
147f4b04343670e5411de0e9d1f07981ca30c9d3
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
smartinsightsfromdata/qtlcharts
|
7a888a6d83f9b89c2a3a48845aed161459cc4a27
|
67d95b33eb9deafc48b74c84201a2f80b2fd0ab7
|
refs/heads/master
| 2021-01-15T20:52:47.308601
| 2015-01-28T12:51:19
| 2015-01-28T12:51:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,357
|
r
|
test-json.R
|
context("json i/o")
test_that("test simple conversions to JSON", {
tocharjson <- function(...) as.character(jsonlite::toJSON(...))
input01 <- list(x="a", y=NA)
output01 <- "{\"x\":[\"a\"],\"y\":[null]}"
expect_equal(tocharjson(input01), output01)
input02 <- list(x=NA, y="a")
output02 <- "{\"x\":[null],\"y\":[\"a\"]}"
expect_equal(tocharjson(input02), output02)
input03 <- list(x=NA, y=1)
output03 <- "{\"x\":[null],\"y\":[1]}"
expect_equal(tocharjson(input03), output03)
input04 <- list(x=1, y=NA)
output04 <- "{\"x\":[1],\"y\":[null]}"
expect_equal(tocharjson(input04), output04)
input05 <- c(x="a", y=NA)
output05 <- "[\"a\",null]"
expect_equal(tocharjson(input05), output05)
input06 <- c(x=NA, y="a")
output06 <- "[null,\"a\"]"
expect_equal(tocharjson(input06), output06)
input07 <- c(x=1, y=NA)
output07 <- "[1,\"NA\"]"
expect_equal(tocharjson(input07), output07)
input08 <- c(x=NA, y=1)
output08 <- "[\"NA\",1]"
expect_equal(tocharjson(input08), output08)
# It's a bit of a surprise that NA -> "NA" if numeric and NA -> null if character
expect_equal(tocharjson(lapply(c(a=1, b=NA), jsonlite::unbox)), "{\"a\":1,\"b\":\"NA\"}")
expect_equal(tocharjson(lapply(c(a="1", b=NA), jsonlite::unbox)), "{\"a\":\"1\",\"b\":null}")
})
|
350ba6371eb6b0f27261769c9f9925b060cb80d0
|
8d4dfa8b6c11e319fb44e578f756f0fa6aef4051
|
/man/getAAProteinCoordinates.Rd
|
2772272d4418f978d8cd29317507a578624ef87b
|
[] |
no_license
|
eahrne/SafeQuant
|
ce2ace309936b5fc2b076b3daf5d17b3168227db
|
01d8e2912864f73606feeea15d01ffe1a4a9812e
|
refs/heads/master
| 2021-06-13T02:10:58.866232
| 2020-04-14T10:01:43
| 2020-04-14T10:01:43
| 4,616,125
| 4
| 4
| null | 2015-11-03T20:12:03
| 2012-06-10T15:35:25
|
R
|
UTF-8
|
R
| false
| true
| 624
|
rd
|
getAAProteinCoordinates.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IdentificationAnalysis.R
\name{getAAProteinCoordinates}
\alias{getAAProteinCoordinates}
\title{Get amino acid coordinates on protein}
\usage{
getAAProteinCoordinates(peptideSeq, proteinSeq, aaRegExpr = "[STY]")
}
\arguments{
\item{peptideSeq}{peptide sequence}
\item{proteinSeq}{protein sequence}
\item{aaRegExpr}{target AA reg exp}
}
\value{
vector of protein coordinates (mmodification residue number)
}
\description{
Get amino acid coordinates on protein
}
\details{
NA
}
\note{
No note
}
\examples{
print("No examples")
}
\references{
NA
}
|
ea904d2329741d6d46ba67bd48ceff3bd9edf6b5
|
792ee880bc80a08af80eb87d8d193fd586a1f6de
|
/R/anova_out.R
|
da21e3a3d56cc3d28df7395e81e347ab5580a8ef
|
[] |
no_license
|
cran/schoRsch
|
9852bf631fa28b90e14a5d939973264ceef8e6a1
|
52957e3499cd0afc2e7a8ffb20602bc4d4bb9467
|
refs/heads/master
| 2022-11-10T11:54:14.422863
| 2022-11-01T20:14:58
| 2022-11-01T20:14:58
| 17,699,487
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,750
|
r
|
anova_out.R
|
# -----------------------------------------------
# Function: Assemble summary table for ezANOVA.
# Requires an ezANOVA output object as argument.
# -----------------------------------------------
anova_out <- function(ezout,
print = TRUE,
sph.cor = "GG",
mau.p = 0.05,
etasq = "partial",
dfsep = ", ",
corr.df = FALSE,
show.eps = 0) {
# ---------------------------------------------
# (1) Check input arguments
# ---------------------------------------------
x <- ezout;
# Check for inconsistent sphericity mehod
if (toupper(sph.cor)!="GG" &
toupper(sph.cor)!="HF" &
toupper(sph.cor)!="NO" ) {
sph.cor="no"
print(paste("Warning: Unknown correction method specified!",
" Reporting uncorrected p-values instead.",sep=""),quote=FALSE)
}
# Check for inconsistent sphericity mehod
if (etasq!="partial" &
etasq!="generalized" ) {
etasq="partial"
print(paste("Warning: Unknown effect size specified!",
" Reporting partial eta squared instead.",sep=""),quote=FALSE)
}
# Check for inconsistent sphericity mehod
if (show.eps!=0 &
show.eps!=1 &
show.eps!=2 &
show.eps!=3) {
show.eps=0
print(paste("Warning: Unknown reporting method for epsilon specified!",
" Omitting epsilon statistics for violations of sphericity.",sep=""),quote=FALSE)
}
# ---------------------------------------------
# (2) Assemble ANOVA table
# ---------------------------------------------
# Construct table
doeswork <- 1;
if ("ANOVA" %in% names(x)) {
# Check whether SSn and SSd are present in input data
if ("SSn" %in% colnames(x$ANOVA) && "SSd" %in% colnames(x$ANOVA)) {
outtable <- data.frame(
Effect=x$ANOVA$Effect,
MSE=x$ANOVA$SSd/x$ANOVA$DFd,
df1=x$ANOVA$DFn,
df2=x$ANOVA$DFd,
F=format(round(x$ANOVA$F,2),nsmall=2),
p=format(round(x$ANOVA$p,3),nsmall=3),
petasq=format(round(x$ANOVA$SSn/
(x$ANOVA$SSn+x$ANOVA$SSd),2),nsmall=2),
getasq=format(round(x$ANOVA$ges,2),nsmall=2)
);
} else {
outtable <- "Couldn't find Sum of Squares in ezANOVA output. Please use the 'detailed=TRUE' option!";
doeswork <- 0;
}
} else {
outtable <- "N/A ... possibly wrong input?";
doeswork <- 0;
}
# Do remaining operations only if main output could be created
if (doeswork < 1) {
print(outtable);
} else {
# ---------------------------------------------
# (3) Sphericity tests
# ---------------------------------------------
if ("Mauchly's Test for Sphericity" %in% names(x)) {
outspher <- data.frame(
Effect=x$"Mauchly's Test for Sphericity"$Effect,
p_Mauchly=format(round(x$"Mauchly's Test for Sphericity"$p,3),nsmall=3),
GGEpsilon=format(round(x$"Sphericity Corrections"$GGe,3),nsmall=3),
p_GG=format(round(x$"Sphericity Corrections"$"p[GG]",3),nsmall=3),
HFEpsilon=format(round(x$"Sphericity Corrections"$HFe,3),nsmall=3),
p_HF=format(round(x$"Sphericity Corrections"$"p[HF]",3),nsmall=3)
);
} else {
outspher <- "N/A"
sph.cor = "no"
}
# ---------------------------------------------
# (4) Prepare formatted output
# ---------------------------------------------
if ("ANOVA" %in% names(x)) {
# Adjust p values when sphericity is violated
txttable <- outtable;
txttable$epsilon <- ""
ajdffcts <- list();
# Check all effects listed in "Sphericity Corrections"
if (toupper(sph.cor)!="NO") {
for (isph in 1:length(x$"Sphericity Corrections"$Effect)) {
# Get effects of interest and check corresponding p_Mauchly
if (x$"Mauchly's Test for Sphericity"$p[isph] <= mau.p) {
eoi <- x$"Sphericity Corrections"$Effect[isph]
# Cycle through ANOVA table and check for effects
for (iaov in 1:length(x$ANOVA$Effect)) {
# Adjust p-value
if (x$ANOVA[iaov,1]==eoi) {
if (toupper(sph.cor)=="GG") {
pmaucorr <- format(round(x$"Sphericity Corrections"$"p[GG]"[isph],3),nsmall=3);
if (R.version$major < 4) {
levels(txttable$p) <- c(levels(txttable$p), pmaucorr)
}
txttable[iaov,6] <- pmaucorr;
# Correct dfs and get epsilon estimates
if (corr.df == TRUE) {
corr.df1 <- format(round(x$"Sphericity Corrections"$GGe[isph]*as.numeric(txttable[iaov,3]),2),nsmall=2);
corr.df2 <- format(round(x$"Sphericity Corrections"$GGe[isph]*as.numeric(txttable[iaov,4]),2),nsmall=2);
txttable[iaov,3]=corr.df1;
# Append epsilon estimate to df2 (if it is to be reported in the df parentheses)
if (show.eps == 1) {
txttable[iaov,4] <- paste(corr.df2, dfsep, "e = ", format(round(x$"Sphericity Corrections"$GGe[isph],2),nsmall=2), sep="");
} else {
txttable[iaov,4] <- corr.df2;
}
} else if (show.eps == 1) {
txttable[iaov,4] <- paste(txttable[iaov,4], dfsep, "e = ", format(round(x$"Sphericity Corrections"$GGe[isph],2),nsmall=2), sep="");
} else if (show.eps == 2) {
txttable$epsilon[iaov] <- paste(" (e = ", format(round(x$"Sphericity Corrections"$GGe[isph],2),nsmall=2), ")", sep="");
}
# Print epsilon estimate in additional column (if it is to be reported after the dfs)
if (show.eps >= 2) {
txttable$epsilon[iaov] <- paste(" (e = ", format(round(x$"Sphericity Corrections"$GGe[isph],2),nsmall=2), ")", sep="");
}
} else if (toupper(sph.cor)=="HF") {
pmaucorr <- format(round(x$"Sphericity Corrections"$"p[HF]"[isph],3),nsmall=3);
if (R.version$major < 4) {
levels(txttable$p) <- c(levels(txttable$p), pmaucorr)
}
txttable[iaov,6]=pmaucorr
# Correct dfs and get epsilon estimates
if (corr.df == TRUE) {
corr.df1 <- format(round(x$"Sphericity Corrections"$HFe[isph]*as.numeric(txttable[iaov,3]),2),nsmall=2);
corr.df2 <- format(round(x$"Sphericity Corrections"$HFe[isph]*as.numeric(txttable[iaov,4]),2),nsmall=2);
txttable[iaov,3]=corr.df1;
# Append epsilon estimate to df2 (if it is to be reported in the df parentheses)
if (show.eps == 1) {
txttable[iaov,4] <- paste(corr.df2, dfsep, "e = ", format(round(x$"Sphericity Corrections"$HFe[isph],2),nsmall=2), sep="");
} else {
txttable[iaov,4] <- corr.df2;
}
} else if (show.eps == 1) {
txttable[iaov,4] <- paste(txttable[iaov,4], dfsep, "e = ", format(round(x$"Sphericity Corrections"$HFe[isph],2),nsmall=2), sep="");
}else if (show.eps >= 2) {
txttable$epsilon[iaov] <- paste(" (e = ", format(round(x$"Sphericity Corrections"$HFe[isph],2),nsmall=2), ")", sep="");
}
# Print epsilon estimate in additional column (if it is to be reported after the dfs)
if (show.eps >= 2) {
txttable$epsilon[iaov] <- paste(" (e = ", format(round(x$"Sphericity Corrections"$HFe[isph],2),nsmall=2), ")", sep="");
}
}
ajdffcts <- c(ajdffcts,eoi);
}
}
} # End: if p_Mauchly < p_crit
}
# Construct note
if (length(ajdffcts) == 0) {
note <- paste("No adjustments necessary (all p_Mauchly > ", mau.p,
").",sep="")
} else {
if (corr.df == TRUE) {
notedf <- " Reporting corrected degrees of freedom."
} else {
notedf <- " Reporting uncorrected degrees of freedom."
}
note <- paste("p-values for the following effects were ", sph.cor,
"-adjusted (p_Mauchly <= ", mau.p, "): ",
paste(paste(ajdffcts,collapse="; ",sep=""), ".", notedf,sep=""),sep="");
}
# Else/End: Check if sph.cor != "NO"
} else {
if (toupper(sph.cor) == "NO") {
note <- "Reporting unadjusted p-values."
} else if (outspher!="N/A") {
note <- "Reporting unadjusted p-values."
}
}
pcorr <- paste(", p = ", txttable$p, sep="")
pcorr <- gsub("p = 1.000","p > .999", pcorr, fixed=TRUE)
pcorr <- gsub("p = 0.000","p < .001", pcorr, fixed=TRUE)
pcorr <- gsub("p = 0","p = ", pcorr, fixed=TRUE)
# Get effect size
if (etasq == "partial") {
petasqcorr <- paste(", np2 = ", txttable$petasq, sep="")
petasqcorr <- gsub("np2 = 1.00","np2 > .99", petasqcorr, fixed=TRUE)
petasqcorr <- gsub("np2 = 0.00","np2 < .01", petasqcorr, fixed=TRUE)
petasqcorr <- gsub("np2 = 0","np2 = ", petasqcorr, fixed=TRUE)
#outtext <- data.frame(
# Effect=x$ANOVA$Effect,
# Text=paste("F(", txttable$df1, "," ,txttable$df2, ") = ", txttable$F,
# pcorr, petasqcorr,sep=""));
if (show.eps == 3) {
outtext <- data.frame(
Effect=x$ANOVA$Effect,
Text=paste("F(", txttable$df1, dfsep, txttable$df2, ") = ", txttable$F,
pcorr, petasqcorr, txttable$epsilon, sep=""));
} else if (show.eps == 2) {
outtext <- data.frame(
Effect=x$ANOVA$Effect,
Text=paste("F(", txttable$df1, dfsep, txttable$df2, ") = ", txttable$F, txttable$epsilon,
pcorr, petasqcorr,sep=""));
} else {
outtext <- data.frame(
Effect=x$ANOVA$Effect,
Text=paste("F(", txttable$df1, dfsep, txttable$df2, ") = ", txttable$F,
pcorr, petasqcorr,sep=""));
}
} else {
getasqcorr <- paste(", ng2 = ", txttable$getasq, sep="")
getasqcorr <- gsub("ng2 = 1.00","ng2 > .99", getasqcorr, fixed=TRUE)
getasqcorr <- gsub("ng2 = 0.00","ng2 < .01", getasqcorr, fixed=TRUE)
getasqcorr <- gsub("ng2 = 0","ng2 = ", getasqcorr, fixed=TRUE)
if (show.eps == 3) {
outtext <- data.frame(
Effect=x$ANOVA$Effect,
Text=paste("F(", txttable$df1, dfsep, txttable$df2, ") = ", txttable$F,
pcorr, getasqcorr, txttable$epsilon,sep=""));
} else if (show.eps == 2) {
outtext <- data.frame(
Effect=x$ANOVA$Effect,
Text=paste("F(", txttable$df1, dfsep, txttable$df2, ") = ", txttable$F, txttable$epsilon,
pcorr, getasqcorr,sep=""));
} else {
outtext <- data.frame(
Effect=x$ANOVA$Effect,
Text=paste("F(", txttable$df1, dfsep, txttable$df2, ") = ", txttable$F,
pcorr, getasqcorr,sep=""));
}
}
} else {
outtext <- "N/A"
}
# ---------------------------------------------
# (5) Combine and display ANOVA results
# ---------------------------------------------
x <- list("--- ANOVA RESULTS ------------------------------------" = outtable,
"--- SPHERICITY TESTS ------------------------------------" = outspher,
"--- FORMATTED RESULTS ------------------------------------" = outtext);
if (exists("note")) {
x = c(x,"NOTE:"=note);
}
if (print==TRUE) {
print(x);
} else {
x;
}
} # Do only if doeswork > 0
}
|
33d0f8fd9acacd3419b423c89af7cbcd2931a27d
|
b908acbcad164e17c3959eb565841fc519354918
|
/man/filterMEData.Rd
|
927511ed83f1bacdd8ca8fb4b840f8d93359e34d
|
[
"MIT"
] |
permissive
|
saracg-forks/microbiomeExplorer
|
1ae25b6f2481302b82f56d23d6e659989a09cbed
|
4c8e3c42eca65d70c636f07df5ceaf01c32e4055
|
refs/heads/master
| 2023-08-22T00:37:06.369878
| 2021-10-26T23:11:45
| 2021-10-26T23:11:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 762
|
rd
|
filterMEData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{filterMEData}
\alias{filterMEData}
\title{Function to filter the MRexperiment data by numerical parameters}
\usage{
filterMEData(MRobj, minpresence = 1, minfeats = 2, minreads = 2)
}
\arguments{
\item{MRobj}{MRExperiment object to filter}
\item{minpresence}{minimum sample presence per feature}
\item{minfeats}{minimum number of features per sample}
\item{minreads}{minimum number of reads per sample}
}
\value{
the filtered MRobj
}
\description{
Function to filter the MRexperiment data by numerical parameters
}
\examples{
data("mouseData", package = "metagenomeSeq")
filterMEData(MRobj = mouseData, minpresence = 4, minfeats = 300)
}
\author{
Janina Reeder
}
|
265651d05777e423c03e01b4a9d326173bb1c3d0
|
e1206339c2caae271aab9df3767545b0a9da135d
|
/scrape.R
|
c86718c643a426d76ce1722d00633fbf9e6a86f5
|
[] |
no_license
|
schochastics/eurovision
|
6877c823c6d15f7c681a3f612af5b6569c952cb0
|
d9686478bcb6f9fd3acab3e298c26f1631fd9792
|
refs/heads/master
| 2020-12-30T15:55:39.924730
| 2017-05-13T21:31:36
| 2017-05-13T21:31:36
| 91,190,631
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,040
|
r
|
scrape.R
|
library(RSelenium)
library(XML)
# helper function ------
getAlt <- function(node, ...){
if(xmlName(node) == "td" && !is.null(node[["img"]]))
xmlGetAttr(node[["img"]], "alt")
else
xmlValue(node)
}
# (make sure docker is running: https://cran.r-project.org/web/packages/RSelenium/vignettes/RSelenium-docker.html)
# scrape data-----
base.url <- "http://eurovisionworld.com/?eurovision="
remDr <- remoteDriver(port = 4445L)
remDr$open()
for(y in 1958:2016){
print(y)
url <- paste0(base.url,y)
remDr$navigate(url)
webElem <- remDr$findElement(using = 'xpath', value = '//*[(@id = "voting_grid")]')
webElemtxt <- webElem$getElementAttribute("outerHTML")[[1]]
table <- readHTMLTable(webElemtxt,elFun = getAlt)$`NULL`
table[1,] <- c(NA,NA,NA,table[1,1:(ncol(table)-3)]) %>% unlist
table <- table[,-c(1,2,4)]
names(table) <- table[1,]
names(table)[1] <- "country"
table <- table[-1,]
voting.df <- table %>% gather(voter,points,-country)
write.table(voting.df,paste0(y,".csv"),row.names = F)
}
remDr$close()
|
ea948e22796d910b8128640213de1c4fc85f0c6f
|
2fba561f4692bd37c0ead5538d1111e4d03e2ac9
|
/sandbox/network.R
|
64e51369edf9cd417f191253a9de4f441593927b
|
[] |
no_license
|
AFortyTwo/media42
|
7770b6d96fa8dc73612459ec08804c1598d2484e
|
03a985edd35c9fa1d2715ae3438834d5013552eb
|
refs/heads/master
| 2021-04-09T16:17:56.015777
| 2018-06-29T05:19:42
| 2018-06-29T05:19:42
| 125,764,406
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,619
|
r
|
network.R
|
library(ggraph)
library(tidygraph)
library(tidyverse)
library(plotly)
library(igraph)
# Network
df <- readRDS("export/other/merged.rds")
df %>% count(user) %>% arrange(desc(n)) %>%
ggplot(aes(x = n)) + geom_histogram()
df <- df %>% group_by(user) %>% mutate(n = n()) %>%
filter(n>20) # %>% distinct(user)
r_count <- df %>% count(id_user) %>% na.omit()
r_corr <- df %>%
# filter(!is.na(cat)) %>%
semi_join(r_count) %>%
group_by(id_article) %>%
mutate(n = n_distinct(id_user)) %>% ungroup() %>%
filter(n > 50) %>%
widyr::pairwise_cor(id_user, id_article)
hist(r_corr$correlation)
# First indication of multiple usernames:
r_corr %>% arrange(desc(correlation))
# Todo relative frequency same article
# Dashboard Find similar
# Network
set.seed(2018)
# we set an arbitrary threshold of connectivity
r_corr %>%
filter(correlation > .1) %>%
graph_from_data_frame(vertices = r_count) %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation)) +
geom_node_point(aes(size = n), color = "lightblue") +
theme_void() +
geom_node_text(aes(label = name), repel = TRUE) +
theme(legend.position = "none")
df <- df %>%
group_by(id_user) %>% mutate(d = n_distinct(id_article)) %>%
filter(d > 50) %>%
select(id_article, id_user) # %>%
# filter(row_number() < 5000)
m <- df %>% count(id_article, id_user) %>% spread(id_article,n, fill = 0)
# g <- igraph::graph_from_data_frame(df %>% count(id_article, id_user))
g <- as_tbl_graph(m)
gd <- igraph::simplify(igraph::graph.data.frame(df, directed=FALSE))
igraph::vcount(g)
igraph::ecount(g)
edges <- df %>% select(source = )
|
8aa3e18c00dc8c2b31208b4fbccb8a28a0c2d89c
|
c99f4211cac0c4e23899ea86b978a6385d94da6a
|
/man/fitDiscrete.Rd
|
f849ec5b001923bcb2c65a1714af41c173ba51a0
|
[] |
no_license
|
mwpennell/geiger-v2
|
a0e0047a666a5fe76d2216d688cdc3e29bee7179
|
a6b589fc07112449effb926ed8b9e1e03e28f49d
|
refs/heads/master
| 2023-03-11T17:33:28.182595
| 2023-02-22T16:54:14
| 2023-02-22T16:54:14
| 17,117,854
| 14
| 18
| null | 2022-12-06T01:05:47
| 2014-02-23T21:43:24
|
R
|
UTF-8
|
R
| false
| false
| 11,125
|
rd
|
fitDiscrete.Rd
|
\name{fitDiscrete}
\alias{fitDiscrete}
\alias{as.Qmatrix.gfit}
\title{ Model fitting for discrete comparative data }
\description{
fitting macroevolutionary models to phylogenetic trees
}
\usage{
fitDiscrete(phy, dat,
model = c("ER","SYM","ARD","meristic"),
transform = c("none", "EB", "lambda", "kappa", "delta", "white"),
bounds = list(), control = list(method = c("subplex", "L-BFGS-B"),
niter = 100, FAIL = 1e+200, hessian = FALSE, CI = 0.95), ncores=NULL,
...)
\method{as.Qmatrix}{gfit}(x, ...)
}
\arguments{
\item{phy}{ a phylogenetic tree of class phylo}
\item{dat}{ data vector for a single trait, with names matching tips in \code{phy}}
\item{model}{ an Mkn model to fit to comparative data (see \bold{Details}) }
\item{transform}{ an evolutionary model used to transform the tree (see \bold{Details}) }
\item{bounds}{ range to constrain parameter estimates (see \bold{Details}) }
\item{control}{ settings used for optimization of the model likelihood}
\item{ncores}{ Number of cores. If \code{NULL} then number of cores is detected}
\item{x}{ Object of class \code{"gfit"} for S3 method \code{as.Qmatrix} }
\item{...}{if \code{model="meristic"}, \code{...} can dictate whether the matrix is asymmetric (\code{symmetric=FALSE})}
}
\details{
This function fits various likelihood models for discrete character evolution. The function returns parameter estimates and the likelihood for univariate datasets. All of the models are continuous-time Markov models of trait evolution (see Yang 2006 for a good general discussion of this type of model).
The model likelihood is maximized using methods available in \code{\link[stats]{optim}} as well as \code{\link[subplex]{subplex}}. Optimization methods to be used within \code{optim} can be specified through the \code{control} object.
A number of random starting points are used in optimization and are given through the \code{niter} element within the \code{control} object (e.g., \code{control$niter}). Finding the maximum likelihood fit is sometimes tricky, especially as the number of parameters in the model increases. Even in the example below, a slightly suboptimal fit is occasionally returned with the default settings fitting the general (\code{ARD}) model. There is no rule of thumb for the number of iterations that will be appropriate for a given dataset and model, but one use the variance in fitted likelihoods across iterations as an indication of the difficulty of the likelihood space (see details of the \code{res} object in \bold{Value}). Twenty optimization iterations per parameter seems to be a decent \emph{starting} point for fitting these models.
The \code{FAIL} value within the \code{control} object should be a large value that will be considerably far from -lnL of the maximum model likelihood. In most cases, the default setting for \code{control$FAIL} will be appropriate. The Hessian may be used to compute confidence intervals (\code{CI}) for the parameter estimates if the \code{hessian} element in \code{control} is TRUE.
The function can handle traits with any number of character states, under a range of models. The character model is specified by the \code{model} argument:
\itemize{
\item{\bold{ER} }{is an \code{equal-rates} model of where a single parameter governs all transition rates}
\item{\bold{SYM} }{is a \code{symmetric} model where forward and reverse transitions share the same parameter}
\item{\bold{ARD} }{is an \code{all-rates-different} model where each rate is a unique parameter}
\item{\bold{meristic} }{is a model wherein transitions occur in a stepwise fashion (e.g., 1 to 2 to 3 to 2) without skipping intermediate steps; this requires a sensible coding of the character
states as consecutive integers are assumed to be neighboring states}
\item{\bold{matrix} }{is a user supplied model (given as a dummy matrix representing transition classes between states); elements that are zero signify rates that are also zero (see \bold{Examples})}
}
The \code{transform} argument allows one to test models where rates vary across the tree. Bounds for the relevant parameters of the tree \code{transform}
may be given through the \code{bounds} argument. Several bounds can be given at a time. Default bounds under the different models are given below.
Options for \code{transform} are as follows:
\itemize{
\item{\bold{none} }{is a model of rate constancy through time}
\item{\bold{EB} }{is the Early-burst model (Harmon et al. 2010) and also called the \code{ACDC} model (accelerating-decelerating; Blomberg et al. 2003). Set by the \code{a} rate parameter, \code{EB}
fits a model where the rate of evolution increases or decreases exponentially through time, under the model r[t] = r[0] * exp(a * t), where \code{r[0]} is the
initial rate, \code{a} is the rate change parameter, and \code{t} is time. Default bounds are \code{a = c(min = -10, max = 10)}}
\item{\bold{lambda} }{is one of the Pagel (1999) models that fits the extent to which the phylogeny predicts covariance among trait values for species. The model effectively transforms the tree:
values of \code{lambda} near 0 cause the phylogeny to become more star-like, and a \code{lambda} value of 1 recovers the \code{none} model. Default
bounds are \code{lambda = c(min = 0, max = 1)}}
\item{\bold{kappa} }{is a punctuational (speciational) model of trait evolution (Pagel 1999), where character divergence is related to the number of speciation events between two species. Note that if
there are speciation events in the given phylogeny (due to extinction or incomplete sampling), interpretation under the \code{kappa} model may be difficult. Considered as a tree
transformation, the model raises all branch lengths to an estimated power (\code{kappa}). Default bounds are \code{kappa = c(min = 0, max = 1)}}
\item{\bold{delta} }{is a time-dependent model of trait evolution (Pagel 1999). The \code{delta} model is similar to \code{ACDC} insofar as the \code{delta} model fits the relative contributions of
early versus late evolution in the tree to the covariance of species trait values. Where \code{delta} is greater than 1, recent evolution has been relatively fast; if \code{delta} is less
than 1, recent evolution has been comparatively slow. Intrepreted as a tree transformation, the model raises all node depths to an estimated power (\code{delta}). Default bounds are \code{delta = c(min = 0, max = 3)}}
\item{\bold{white} }{is a \code{white}-noise (non-phylogenetic) model, which converts the tree into a star phylogeny}
}
}
\value{
\code{fitDiscrete} returns a list with the following four elements:
\item{\bold{lik} }{is the function used to compute the model likelihood. The returned function (\code{lik}) takes arguments that are necessary for the given model.
For instance, if estimating an untransformed \code{ER} model, there would be a single argument (the transition rate) necessary for the \code{lik} function. The tree and data are stored internally within the \code{lik} function, which permits those elements to be efficiently reused when computing the likelihood under different parameter values. By default, the function evaluates the likelihood of the model by weighting root states in accordance with their conditional probability given the data (this is the \code{"obs"} option; see FitzJohn et al. 2009). This default behavior can be changed in the call to \code{lik} with \code{lik(pars, root="flat")}, for instance, which would weight each state equally at the root. The other useful option is \code{"given"}, where the user must also supply a vector (\code{root.p}) of probabilities for each possible state. To make likelihoods roughly comparable between \pkg{geiger} and \pkg{ape}, one should use the option \code{lik(pars, root="given", root.p=rep(1,k))}, where \code{k} is the number of character states. See \bold{Examples} for a demonstration
}
\item{\bold{bnd} }{is a matrix of the used bounds for the relevant parameters estimated in the model. Warnings will be issued if any parameter estimates occur at the supplied (or default) parameter bounds
}
\item{\bold{res} }{is a matrix of results from optimization. Rownames of the \code{res} matrix are the optimization methods
(see \code{\link[stats]{optim}} and \code{\link[subplex]{subplex}}). The columns in the \code{res} matrix are the estimated
parameter values, the estimated model likelihood, and an indication of optimization convergence. Values of convergence not
equal to zero are not to be trusted
}
\item{\bold{opt} }{is a list of the primary results: estimates of the parameters, the maximum-likelihood estimate (\code{lnL}) of the model, the
optimization method used to compute the MLE, the number of model parameters (\code{k}, including one parameter for the root state), the AIC (\code{aic}),
sample-size corrected AIC (\code{aicc}). The number of observations for AIC computation is taken to be the number of trait values observed.
If the Hessian is used, confidence intervals on the parameter estimates (\code{CI}) and the Hessian matrix (\code{hessian}) are also returned
}
}
\note{
To speed the likelihood search, one may set an environment variable to make use of parallel processing, used by \code{\link[parallel]{mclapply}}. To set the environment variable, use \code{options(mc.cores=INTEGER)}, where \code{INTEGER} is the number of available cores. Alternatively, the \code{mc.cores} variable may be preset upon the initiation of an R session (see \code{\link[base]{Startup}} for details).
}
\references{
Yang Z. 2006. \emph{Computational Molecular Evolution}. Oxford University Press: Oxford.
FitzJohn RG, WP Maddison, and SP Otto. 2009. Estimating trait-dependent speciation and extinction rates from incompletely resolved molecular phylogenies. \emph{Systematic Biology} 58:595-611.
}
\author{ LJ Harmon, RE Glor, RG FitzJohn, and JM Eastman }
\examples{
\dontrun{
## match data and tree
tmp=get(data(geospiza))
td=treedata(tmp$phy, tmp$dat)
geo=list(phy=td$phy, dat=td$data)
gb=round(geo$dat[,5]) ## create discrete data
names(gb)=rownames(geo$dat)
tmp=fitDiscrete(geo$phy, gb, model="ER", control=list(niter=5), ncores=2) #-7.119792
## using the returned likelihood function
lik=tmp$lik
lik(0.3336772, root="obs") #-7.119792
lik(0.3336772, root="flat") #-8.125354
lik(0.3336772, root="given", root.p=rep(1/3,3)) #-8.125354
lik(0.3336772, root="given", root.p=c(0, 1, 0)) #-7.074039
lik(c(0.3640363), root="given", root.p=rep(1,3)) #-7.020569 & comparable to ape:::ace solution
}
\donttest{
# general model (ARD)
## match data and tree
tmp=get(data(geospiza))
td=treedata(tmp$phy, tmp$dat)
geo=list(phy=td$phy, dat=td$data)
gb=round(geo$dat[,5]) ## create discrete data
names(gb)=rownames(geo$dat)
fitDiscrete(geo$phy, gb, model="ARD", ncores=1) #-6.064573
# user-specified rate classes
mm=rbind(c(NA, 0, 0), c(1, NA, 2), c(0, 2, NA))
fitDiscrete(geo$phy, gb, model=mm, ncores=1) #-7.037944
# symmetric-rates model
fitDiscrete(geo$phy, gb, model="SYM", ncores=1)#-6.822943}
}
\keyword{ arith }
\keyword{models}
|
be69f237633be92f8860b0edc77cc2e8111e1dde
|
e780054e167a67261fa2e120c437b46a5a05eaa1
|
/Source Code/Step 2 - Essentiality Analysis.R
|
8b4d81da58ceefec71d448ffec8de15fb6f23cb9
|
[] |
no_license
|
nbashkeel/EV
|
ccfd7dcf4fc9a3f1c76a0fd8d70913eb1c0f48d6
|
f98bbb74ce26270ccda901b06744697dc56d5a6a
|
refs/heads/master
| 2020-05-25T06:03:45.894531
| 2019-06-03T17:28:35
| 2019-06-03T17:28:35
| 187,660,749
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,595
|
r
|
Step 2 - Essentiality Analysis.R
|
setwd(choose.dir(default = "", caption = "Select folder labeled: 'Source Code'"))
library(illuminaio)
library(rowr)
df <- read.table("Data/Outputs/Final All 3.txt")
df$Class[df$Sig == "NS"] <- "NV"
breast <- subset(df, df$Tissue == "Breast")
cerebellum <- subset(df, df$Tissue == "Cerebellum")
frontal <- subset(df, df$Tissue == "Frontal Cortex")
genelist <- readBGX("Data/HumanHT-12_V3_0_R3_11283641_A.bgx")
genelist <- data.frame(genelist$probes)
genelist <- data.frame(Gene.Symbol=genelist$ILMN_Gene,
Probes=genelist$Probe_Id,
Refseq=genelist$RefSeq_ID,
Unigene=genelist$Unigene_ID,
Chromosome=genelist$Chromosome,
Coordinates=genelist$Probe_Coordinates)
genelist <- genelist[-grep("NR",genelist$Refseq),] # 654 genes
genelist <- genelist[-grep("XR",genelist$Refseq),] # 501 genes
genelist$Coordinates[genelist$Coordinates==""] <- NA
genelist <- genelist[complete.cases(genelist),] # 42084 probes w/ coordinates
rownames(genelist) <- genelist$Probes
genelist <- unique(genelist[,c(1,2,5,6)]) # 91 probes removed (41993 probes)
genelist$Chromosome <- as.character(genelist$Chromosome)
genelist <- subset(genelist, nchar(genelist$Chromosome) < 4) # 107 probes removed (41886 probes)
genelist <- subset(genelist, genelist$Chromosome != "XY" &
genelist$Chromosome != "YX") # 9 Probes removed
Essentiality <- function(dat) {
essentialgenes <- read.csv("Data/Essential Genes.csv")
essentialgenes <- subset(essentialgenes, essentialgenes$Essential == "Y" & essentialgenes$Non.essential == "N")
essentialgenes <- essentialgenes[,c(1,6)]
colnames(essentialgenes) <- c("Gene.Symbol", "Essential")
nr <- nrow(dat)
dat$Essential <- "N"
dat[na.omit(match(essentialgenes$Gene.Symbol, dat$Gene.Symbol)),"Essential"] <- "Y"
dat$Essential <- as.factor(dat$Essential)
counts <- table(dat$Essential, dat$Class)
stdres <- chisq.test(counts)$stdres
pval <- chisq.test(counts)$p.value
counts <- rbind(counts,colSums(counts))
summary.table <- as.data.frame(matrix(c(counts[3,1], counts[2,1], round(stdres[2,1],2),
counts[3,2], counts[2,2], round(stdres[2,2],2),
counts[3,3], counts[2,3], round(stdres[2,3],2)),
ncol=3, byrow=T))
colnames(summary.table) <- c("N" , "Essential Gene Counts", "Standard Residuals")
rownames(summary.table) <- c("Hyper", "Hypo", "NV")
summary.table$`P Value` <- pval
summary.table
}
breast.essential <- Essentiality(breast)
cere.essential <- Essentiality(cerebellum)
frontal.essential <- Essentiality(frontal)
# Extract list of essential genes
Essentiality_genelist <- function(dat, class) {
essentialgenes <- read.csv("Data/Essential Genes.csv")
essentialgenes <- subset(essentialgenes, essentialgenes$Essential == "Y" & essentialgenes$Non.essential == "N")
essentialgenes <- essentialgenes[,c(1,6)]
colnames(essentialgenes) <- c("Gene.Symbol", "Essential")
nr <- nrow(dat)
dat$Essential <- "N"
dat[na.omit(match(essentialgenes$Gene.Symbol, dat$Gene.Symbol)),"Essential"] <- "Y"
dat <- subset(dat, dat$Essential == "Y" & dat$Class == class)
dat$Gene.Symbol
}
e_hyper <- cbind.fill(Essentiality_genelist(breast, "Hypervariable"),
Essentiality_genelist(cerebellum,"Hypervariable"),
Essentiality_genelist(frontal,"Hypervariable"),
fill=NA)
colnames(e_hyper) <- c("Breast", "Cerebellum", "Frontal")
common_hyper <- intersect(intersect(e_hyper$Breast,e_hyper$Cerebellum),e_hyper$Frontal)
apply(e_hyper,2,function(x) {length(na.omit(x))})
length(common_hyper)
write.csv(e_hyper, "Data/Outputs/GO/Essentiality/Essential Hypervariable Genes.csv", quote = F, row.names = F)
write.csv(common_hyper, "Data/Outputs/GO/Essentiality/Common Essential Hypervariable.csv", quote=F, row.names = F)
e_Hypo <- cbind.fill(Essentiality_genelist(breast, "Hypovariable"),
Essentiality_genelist(cerebellum,"Hypovariable"),
Essentiality_genelist(frontal,"Hypovariable"),
fill=NA)
colnames(e_Hypo) <- c("Breast", "Cerebellum", "Frontal")
common_Hypo <- intersect(intersect(e_Hypo$Breast,e_Hypo$Cerebellum),e_Hypo$Frontal)
write.csv(e_Hypo, "Data/Outputs/GO/Essentiality/Essential Hypovariable Genes.csv", quote = F, row.names = F)
write.csv(common_Hypo, "Data/Outputs/GO/Essentiality/Common Essential Hypovariable.csv", quote=F, row.names = F)
|
938670d876354ce370f261aa1b4fc6a138c95842
|
0fe115082fa671f6969ad2ed8d636e7afc7d1a57
|
/R-packages/evalcast/R/plot_calibration.R
|
7c9adf3dba606b0b73054b5aeb03501770ac88ab
|
[] |
no_license
|
eujing/covidcast
|
40e78e8505877febd2c00f55c69357a4411abbb6
|
3a00af226a6d5cea70f489f2732b6a253e5a6590
|
refs/heads/main
| 2023-04-15T10:36:00.145842
| 2021-03-22T02:22:47
| 2021-03-22T02:22:47
| 360,325,972
| 0
| 0
| null | 2021-04-21T22:45:20
| 2021-04-21T22:45:20
| null |
UTF-8
|
R
| false
| false
| 5,170
|
r
|
plot_calibration.R
|
#' Plot calibration curves
#'
#' @param scorecard Single score card.
#' @param type One of "wedgeplot" or "traditional".
#' @param alpha Deprecated parameter to be removed soon.
#' @param legend_position Legend position, the default being "bottom".
#'
#' @importFrom rlang .data
#' @importFrom ggplot2 ggplot aes geom_point geom_abline geom_vline geom_hline labs scale_colour_discrete scale_alpha_continuous scale_size_continuous guides facet_wrap xlim ylim theme_bw theme
#' @importFrom dplyr filter mutate recode
#' @importFrom tidyr pivot_longer
#' @export
plot_calibration <- function(scorecard,
type = c("wedgeplot", "traditional"),
alpha = 0.2,
legend_position = "bottom") {
name <- attr(scorecard, "name_of_forecaster")
ahead <- attr(scorecard, "ahead")
type <- match.arg(type)
if (type == "wedgeplot") {
g <- compute_calibration(scorecard) %>%
pivot_longer(contains("prop"),
names_to = "coverage_type",
values_to = "proportion") %>%
filter(.data$coverage_type != "prop_covered") %>%
mutate(emph = ifelse((.data$coverage_type == "prop_above" & .data$nominal_quantile < 0.5) |
(.data$coverage_type == "prop_below" & .data$nominal_quantile >= 0.5), 0.5, 1)) %>%
mutate(coverage_type = recode(.data$coverage_type,
prop_above = "Proportion above",
prop_below = "Proportion below")) %>%
ggplot(aes(x = .data$nominal_quantile,
y = .data$proportion,
colour = .data$coverage_type)) +
geom_line(aes(alpha = .data$emph, size = .data$emph)) +
geom_point(aes(alpha = .data$emph, size = .data$emph)) +
geom_abline(intercept = 0, slope = 1) +
geom_abline(intercept = 1, slope = -1) +
labs(x = "Nominal quantile level",
y = "Proportion",
title = sprintf("%s (ahead = %s): Proportion above/below", name, ahead)) +
scale_colour_discrete(name = "") +
scale_alpha_continuous(range = c(0.5, 1)) +
scale_size_continuous(range = c(0.5, 1)) +
guides(alpha = FALSE, size = FALSE)
} else if (type == "traditional") {
calib <- compute_actual_vs_nominal_prob(scorecard)
g <- calib %>%
ggplot(aes(x = .data$nominal_prob, y = .data$prop_below)) +
geom_line(color = "red") +
geom_point(color = "red") +
geom_abline(slope = 1, intercept = 0) +
labs(x = "Quantile level",
y = "Proportion",
title = sprintf("%s (ahead %s): Calibration", name, ahead))
}
g +
facet_wrap(~ forecast_date) +
xlim(0, 1) +
ylim(0, 1) +
theme_bw() + theme(legend.position = legend_position)
}
#' Plot interval coverage
#'
#' @param scorecards List of different score cards, all on the same forecasting
#' task (i.e., same ahead, etc.).
#' @param type One of "all" or "none", indicating whether to show coverage
#' across all nominal levels (in which case averaging is performed across
#' forecast dates and locations) or whether to show it for one specific alpha
#' value.
#' @param alpha If `type = "one"`, then 1-alpha is the nominal interval coverage
#' shown.
#' @param legend_position Legend position, the default being "bottom".
#'
#' @importFrom rlang .data set_names
#' @importFrom purrr map_dfr
#' @importFrom ggplot2 ggplot geom_abline geom_vline geom_hline labs facet_wrap xlim ylim theme_bw theme
#' @export
plot_coverage <- function(scorecards, type = c("all", "one"), alpha = 0.2,
legend_position = "bottom") {
type <- match.arg(type)
# make sure scorecards are comparable:
unique_attr(scorecards, "ahead")
unique_attr(scorecards, "as_of")
unique_attr(scorecards, "geo_type")
unique_attr(scorecards, "incidence_period")
unique_attr(scorecards, "backfill_buffer")
unique_attr(scorecards, "response")
scorecards <- intersect_locations(scorecards)
cover <- scorecards %>%
set_names(all_attr(scorecards, "name_of_forecaster")) %>%
map_dfr(compute_coverage, .id = "forecaster")
if (type == "all") {
cover %>%
ggplot(aes(x = .data$nominal_coverage_prob,
y = .data$prop_covered,
color = .data$forecaster)) +
geom_line() +
geom_abline(slope = 1, intercept = 0) +
facet_wrap(~ .data$forecast_date) +
xlim(0, 1) +
ylim(0, 1) +
labs(x = "Nominal coverage", y = "Empirical coverage") +
theme_bw() + theme(legend.position = legend_position)
} else {
cover %>%
filter(.data$nominal_coverage_prob == 1 - alpha) %>%
group_by(.data$forecast_date, .data$forecaster) %>%
summarize(prop_covered = mean(.data$prop_covered, na.rm = TRUE)) %>%
ggplot(aes(x = .data$forecast_date,
y = .data$prop_covered,
color = .data$forecaster)) +
geom_point() + geom_line() +
geom_hline(yintercept = 1 - alpha, lty = 2) +
labs(x = "Forecast date", y = "Empirical coverage") +
theme_bw() + theme(legend.position = legend_position)
}
}
|
88cf495ab184d572f56da858d673e19f4aced4c3
|
5d1b6b5c553019d54750c4404ab9cb638f6db8b7
|
/tokyo.R
|
2b74f08500e60794f365b4d60033af921f1011a0
|
[] |
no_license
|
tadakazu1972/tokyo_hogo
|
b1b9e285da49abc01fd2edb90a091588b5685e61
|
908b2bd21b0fa980b09db10cc83b8a74301c136d
|
refs/heads/master
| 2021-05-06T02:21:38.942873
| 2017-12-18T12:34:01
| 2017-12-18T12:34:01
| 114,522,155
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,133
|
r
|
tokyo.R
|
#ライブラリ読込
library(dplyr)
library(sf)
library(readr)
library(RColorBrewer)
library(classInt)
#作業ディレクトリ指定
setwd("~/Desktop/tokyo")
#必要なファイル読込
shape <- st_read("h27_did_13.shp")
hogo <- read_csv("xxxx.csv")
#結合後、23区のみ残す
data <- inner_join(shape, hogo, by="CITYNAME")
#地図描画
par(family="HiraKakuProN-W3")
col_km <- data$生活保護世帯 %>% classIntervals(., 10, style="fixed", fixedBreaks=c(min(.),2000,4000,6000,8000,10000,12000,14000,16000,18000,max(.))) %>% findColours(., c("white","orange","red"))
plot(shape[1:23,4], col=col_km, main="東京都23区 生活保護世帯数 平成29年9月")
text(st_coordinates(data %>% st_centroid)[,1], st_coordinates(data %>% st_centroid)[,2]+0.005, labels=data$CITYNAME, cex=1)
text(st_coordinates(data %>% st_centroid)[,1], st_coordinates(data %>% st_centroid)[,2]-0.005, labels=data$生活保護世帯, cex=1)
#################################
par(family="HiraKakuProN-W3")
col_km <- data$生活保護人員 %>% classIntervals(., 10, style="fixed", fixedBreaks=c(min(.),2000,4000,6000,8000,10000,12000,14000,16000,18000,max(.))) %>% findColours(., c("white","green","blue"))
plot(shape[1:23,4], col=col_km, main="東京都23区 生活保護人員数 平成29年9月")
text(st_coordinates(data %>% st_centroid)[,1], st_coordinates(data %>% st_centroid)[,2]+0.005, labels=data$CITYNAME, cex=1)
text(st_coordinates(data %>% st_centroid)[,1], st_coordinates(data %>% st_centroid)[,2]-0.005, labels=data$生活保護人員, cex=1)
#################################
par(family="HiraKakuProN-W3")
col_km <- data$保護率 %>% classIntervals(., 10, style="fixed", fixedBreaks=c(min(.),10,20,30,40,50,60,70,80,90,max(.))) %>% findColours(., c("yellow","orange","red"))
plot(shape[1:23,4], col=col_km, main="東京都23区 保護率(千分率) 平成29年9月")
text(st_coordinates(data %>% st_centroid)[,1], st_coordinates(data %>% st_centroid)[,2]+0.005, labels=data$CITYNAME, cex=1)
text(st_coordinates(data %>% st_centroid)[,1], st_coordinates(data %>% st_centroid)[,2]-0.005, labels=data$保護率, cex=1)
|
c55acdc2ccd4905e1a838494c8b3d5269a0fe0b7
|
f721a41844a75448e1e81c7f2306b770b295eb77
|
/man/Multipledata.rd
|
07777d1413ecb2390e1655c99d32ab81cecbb184
|
[] |
no_license
|
cran/bear
|
a2446a1ab7c21d1f5f73fa702d55d6c1a2dad32e
|
e28412d9eb8182620ee1d119d31de4aeafaa0602
|
refs/heads/master
| 2021-01-01T06:54:50.681758
| 2014-12-10T00:00:00
| 2014-12-10T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 257
|
rd
|
Multipledata.rd
|
\encoding{UTF-8}
\name{Multipledata}
\docType{data}
\alias{Multipledata}
\title{Data for NCA analyze for multiple dose}
\description{
The data give the data of subjects, drug, sequence, period, time, and concentration.
}
\keyword{misc}
|
136bf121cbeb323d0ca206d3d32a45a96aff5a38
|
33b7262af06cab5cd28c4821ead49b3a0c24bb9d
|
/pkg/caret/R/classDist.R
|
98ae1563e38ed1ec58f71064580b370b2404c4f3
|
[] |
no_license
|
topepo/caret
|
d54ea1125ad41396fd86808c609aee58cbcf287d
|
5f4bd2069bf486ae92240979f9d65b5c138ca8d4
|
refs/heads/master
| 2023-06-01T09:12:56.022839
| 2023-03-21T18:00:51
| 2023-03-21T18:00:51
| 19,862,061
| 1,642
| 858
| null | 2023-03-30T20:55:19
| 2014-05-16T15:50:16
|
R
|
UTF-8
|
R
| false
| false
| 6,225
|
r
|
classDist.R
|
#' Compute and predict the distances to class centroids
#'
#' @aliases classDist.default classDist predict.classDist
#' @description This function computes the class centroids and covariance matrix for a training set for determining Mahalanobis distances of samples to each class centroid.
#'
#'
#' @param x a matrix or data frame of predictor variables
#' @param y a numeric or factor vector of class labels
#' @param groups an integer for the number of bins for splitting a numeric outcome
#' @param pca a logical: should principal components analysis be applied to the dataset prior to splitting the data by class?
#' @param keep an integer for the number of PCA components that should by used to predict new samples (\code{NULL} uses all within a tolerance of \code{sqrt(.Machine$double.eps)})
#' @param object an object of class \code{classDist}
#' @param newdata a matrix or data frame. If \code{vars} was previously specified, these columns should be in \code{newdata}
#' @param trans an optional function that can be applied to each class distance. \code{trans = NULL} will not apply a function
#' @param \dots optional arguments to pass (not currently used)
#'
#' @details
#' For factor outcomes, the data are split into groups for each class
#' and the mean and covariance matrix are calculated. These are then
#' used to compute Mahalanobis distances to the class centers (using
#' \code{predict.classDist} The function will check for non-singular matrices.
#'
#' For numeric outcomes, the data are split into roughly equal sized
#' bins based on \code{groups}. Percentiles are used to split the data.
#'
#' @return
#' for \code{classDist}, an object of class \code{classDist} with
#' elements:
#' \item{values }{a list with elements for each class. Each element
#' contains a mean vector for the class centroid and the
#' inverse of the class covariance matrix}
#' \item{classes}{a character vector of class labels}
#' \item{pca}{the results of \code{\link[stats]{prcomp}} when
#' \code{pca = TRUE}}
#' \item{call}{the function call}
#' \item{p}{the number of variables}
#' \item{n}{a vector of samples sizes per class}
#'
#' For \code{predict.classDist}, a matrix with columns for each class.
#' The columns names are the names of the class with the prefix
#' \code{dist.}. In the case of numeric \code{y}, the class labels are
#' the percentiles. For example, of \code{groups = 9}, the variable names
#' would be \code{dist.11.11}, \code{dist.22.22}, etc.
#'
#' @author Max Kuhn
#'
#' @references Forina et al. CAIMAN brothers: A family of powerful classification and class modeling techniques. Chemometrics and Intelligent Laboratory Systems (2009) vol. 96 (2) pp. 239-245
#'
#' @seealso \code{\link[stats]{mahalanobis}}
#'
#' @examples
#' trainSet <- sample(1:150, 100)
#'
#' distData <- classDist(iris[trainSet, 1:4],
#' iris$Species[trainSet])
#'
#' newDist <- predict(distData,
#' iris[-trainSet, 1:4])
#'
#' splom(newDist, groups = iris$Species[-trainSet])
#'
#' @keywords manip
#' @export
classDist <- function (x, ...) UseMethod("classDist")
#' @rdname classDist
#' @method classDist default
#' @importFrom stats cov predict quantile prcomp
#' @export
classDist.default <- function(x, y, groups = 5,
pca = FALSE,
keep = NULL,
...)
{
if(is.numeric(y))
{
y <- cut(y,
unique(quantile(y, probs = seq(0, 1, length = groups + 1))),
include.lowest = TRUE)
classLabels <- paste(round((1:groups)/groups*100, 2))
y <- factor(y)
cuts <- levels(y)
} else {
classLabels <- levels(y)
cuts <- NULL
}
p <- ncol(x)
if(pca)
{
pca <- prcomp(x, center = TRUE, scale. = TRUE,
tol = sqrt(.Machine$double.eps))
keep <- min(keep, ncol(pca$rotation))
if(!is.null(keep)) pca$rotation <- pca$rotation[, 1:keep, drop = FALSE]
x <- as.data.frame(predict(pca, newdata = x), stringsAsFactors = FALSE)
} else pca <- NULL
x <- split(x, y)
getStats <- function(u)
{
if(nrow(u) < ncol(u))
stop("there must be more rows than columns for this class")
A <- try(cov(u), silent = TRUE)
if(inherits(A, "try-error"))
stop("Cannot compute the covariance matrix")
A <- try(solve(A), silent = TRUE)
if(inherits(A, "try-error"))
stop("Cannot invert the covariance matrix")
list(means = colMeans(u, na.rm = TRUE),
A = A)
}
structure(
list(values = lapply(x, getStats),
classes = classLabels,
cuts = cuts,
pca = pca,
call = match.call(),
p = p,
n = unlist(lapply(x, nrow))),
class = "classDist")
}
#' @export
print.classDist <- function(x, ...)
{
printCall(x$call)
if(!is.null(x$cuts))
{
cat("Classes based on", length(x$cuts) - 1,
"cuts of the data\n")
paste(x$cuts, collapse = " ")
cat("\n")
}
if(!is.null(x$pca)) cat("PCA applied,",
ncol(x$pca$rotation),
"components retained\n\n")
cat("# predictors variables:", x$p, "\n")
cat("# samples:",
paste(
paste(x$n,
ifelse(is.null(x$cuts), " (", " "),
names(x$n),
ifelse(is.null(x$cuts), ")", ""),
sep = ""),
collapse = ", "),
"\n")
invisible(x)
}
#' @rdname classDist
#' @method predict classDist
#' @importFrom stats mahalanobis predict
#' @export
predict.classDist <- function(object, newdata, trans = log, ...)
{
if(!is.null(object$pca))
{
newdata <- predict(object$pca, newdata = newdata)
}
pred <- function(a, x) mahalanobis(x, center = a$means, cov = a$A, inverted = TRUE)
out <- lapply(object$values, pred, x = newdata)
out <- do.call("cbind", out)
colnames(out) <- paste("dist.", object$classes, sep = "")
if(!is.null(trans)) out <- apply(out, 2, trans)
out
}
|
78511e6b9fe95bed0be4473f664bd507243f242c
|
8037c9e7047e73c6fbefc5614130aed272bbecb8
|
/Quantile_by_gene_expression.r
|
e3a68d4a1842fce6d73838bba9e869de033f7dce
|
[] |
no_license
|
pengweixing/FACT
|
5df5b16034db37bc51ad1a51a67aa5196e8f059c
|
19e6dd1be860d61424fa441b67c5f4c3b2d8dc64
|
refs/heads/master
| 2023-07-05T22:31:45.723150
| 2021-08-01T17:26:05
| 2021-08-01T17:26:05
| 318,553,287
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,499
|
r
|
Quantile_by_gene_expression.r
|
#################################################
# File Name:process.r
# Author: xingpengwei
# Mail: xingwei421@qq.com
# Created Time: Fri 25 Jun 2021 03:30:09 PM UTC
#################################################
library(org.Hs.eg.db)
library(TxDb.Hsapiens.UCSC.hg38.knownGene)
library(Organism.dplyr)
data = read.table("gene_expression_matrix",header=T)
###gene RJ051C RJ053C RJ043C
# OR4F5 1 1 2
# OR4F29 0 1 2
data2 = data
data3 = data2[,2:ncol(data2)]
sample = colnames(data3)
gene = data2[,1]
src <- src_ucsc("Homo sapiens")
tx2=as.data.frame(transcripts(src,columns = c("symbol")))
for(i in 1:ncol(data3)){
temp = data3[,i]
temp_q = quantile(temp)
temp_q2=as.data.frame(t(temp_q))
quan1_index = which(temp<temp_q[2])
quan2_index = which(temp>=temp_q[2] & temp<temp_q[3])
quan3_index = which(temp>=temp_q[3] & temp<temp_q[4])
quan4_index = which(temp>=temp_q[4])
quan1_gene = gene[quan1_index]
quan2_gene = gene[quan2_index]
quan3_gene = gene[quan3_index]
quan4_gene = gene[quan4_index]
index1 = which(!is.na(match(tx2$symbol,quan1_gene)))
index2 = which(!is.na(match(tx2$symbol,quan2_gene)))
index3 = which(!is.na(match(tx2$symbol,quan3_gene)))
index4 = which(!is.na(match(tx2$symbol,quan4_gene)))
quantile1_tx = tx2[index1,]
quantile2_tx = tx2[index2,]
quantile3_tx = tx2[index3,]
quantile4_tx = tx2[index4,]
quantile1_tx2 = quantile1_tx[,c(1,2,3,7,8,5)]
quantile2_tx2 = quantile2_tx[,c(1,2,3,7,8,5)]
quantile3_tx2 = quantile3_tx[,c(1,2,3,7,8,5)]
quantile4_tx2 = quantile4_tx[,c(1,2,3,7,8,5)]
if(!dir.exists(sample[i])){
dir.create(sample[i])
}
write.table(quantile1_tx2,file=paste0(sample[i],"/quantile1_tx2.bed"),sep="\t",row.names=F,col.names=F,quote=F)
write.table(quantile2_tx2,file=paste0(sample[i],"/quantile2_tx2.bed"),sep="\t",row.names=F,col.names=F,quote=F)
write.table(quantile3_tx2,file=paste0(sample[i],"/quantile3_tx2.bed"),sep="\t",row.names=F,col.names=F,quote=F)
write.table(quantile4_tx2,file=paste0(sample[i],"/quantile4_tx2.bed"),sep="\t",row.names=F,col.names=F,quote=F)
write.table(quan1_gene,file=paste0(sample[i],"/quantile1_gene.bed"),sep="\t",row.names=F,col.names=F,quote=F)
write.table(quan2_gene,file=paste0(sample[i],"/quantile2_gene.bed"),sep="\t",row.names=F,col.names=F,quote=F)
write.table(quan3_gene,file=paste0(sample[i],"/quantile3_gene.bed"),sep="\t",row.names=F,col.names=F,quote=F)
write.table(quan4_gene,file=paste0(sample[i],"/quantile4_gene.bed"),sep="\t",row.names=F,col.names=F,quote=F)
}
|
457346e0487deddc74869e89d99f78440a8db3d7
|
292faf92e0db785f6a65354e88b67a1124b089c5
|
/31_enero_2017_conceptos_basicos.R
|
9ea1108d5506d4df2ab75a34c561dd09a0ab36dc
|
[] |
no_license
|
jose-eduardo/humanidades-digitales-unl
|
e55a9bcaf7ed40111520ce2dc2932ef58d22b471
|
dc2c4592b3af0a849e5139d87e6ae87dd8237521
|
refs/heads/master
| 2021-01-23T07:10:18.934281
| 2017-09-22T16:49:38
| 2017-09-22T16:49:38
| 80,492,548
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,305
|
r
|
31_enero_2017_conceptos_basicos.R
|
##NOTA: No estoy colocando tildes por si alguien tiene problemas para verlos en su sistema
##REPASO de conceptos básicos aprendidos en el curso de "lectura distante"
#ASIGNACION DE VALOR
#Se le puede asignar un valor a una variable con el signo (<) y el guión (-):
numero <- 300
#Los valores pueden ser numéricos ("numeric")
miembros.de.familia_1 <- 5
#letras ("character")
apellido_paterno <- "Gonzalez"
#o lógico ("logical")
miembro_vive_en_eeuu <- FALSE
#Puedo sumar /dividir / multiplicar
miembros.de.familia_1 <- (miembros.de.familia_1 + 4) / 2
#Si quiero guardar el nuevo valor tengo que pedir que el nuevo valor sea asignado de manera
#permanente con la siguiente expresión
miembros.de.familia_1 <- miembros.de.familia_1 * 4
#VECTORES
#En lugar de tener información similar en diferentes variables, podemos asignarlas a un vector
miembros.de.familia <- c(3, 5, 7)
#La letra "c" indica concadenar/concatenar todos los valores
#Si queremos ver sólo el valor de la familia 1, escribimos:
miembros.de.familia[1]
#[1] 3
#Lo mismo con las otras familias:
miembros.de.familia[2]
#[1] 5
miembros.de.familia[3]
#[1] 7
#Pregunta: Siguiendo el modelo anterior, ¿cómo se puede hacer un vector que
#contenga los apellidos paternos de tres diferentes familias?
#SUCESIÓN O SECUENCIA ('sequences')
#Estas se crean con el símbolo (:)
#Así que podemos asignarlas a una variable:
A <- 1:5
B <- 5:10
#O utilizarlas para leer parte de los elementos de un vector
objetos<-c("mesa", "libro", "lapiz", "plato", "servilleta", "zapato", "pelota")
#Puedo ver los elementos del 3 al 6 usando una secuencia. Los números nos dan los índices para localizar elementos.
objetos[3:6]
#Por supuesto que para nuestros propósitos, cada linea de un texto sera un elemento de un vector.
#en el siguiente fragmento de un poema, he colocado cada verso en un vector
poema<-c("Son los Centauros. Cubren la llanura. Les siente", "La montana. De lejos, forman son de torrente", "Que cae; su galope al aire que reposa", "Despierta, y estremece la hoja de laurel-rosa.", "Son los Centauros. Unos enormes, rudos; otros", "Alegres y saltantes como jovenes potros;", "Unos con largas barbas como los padres-rios", "Otros imberbes, agiles y de piafantes brios", "Y de robustos musculos, brazos y lomos aptos", "Para portar las ninfas rosadas en los raptos.")
#si queremos saber cuantas líneas tiene nuestro texto (o cuál es el tamaño de un vector)
length(poema)
#Podemos ver sólo los versos 4 al 7 usando el índice de los versos que queremos
poema[4:7]
#LISTAS
#Una lista es diferente de un vector porque podemos colocar diferentes tipos de elementos en ella
numeros_y_palabras.l<-list(23, 34, "agua", "casa")
#si usamos el comando "str" y vemos la estructura de la lista que acabamos de crear
#y así podemos ver la diferencia con el vector
str(numeros_y_palabras.l)
#si sólo queremos ver un componente de la lista
numeros_y_palabras.l[[2]]
#notese el uso de dos pares de corchetes para accesar el contenido a diferencia de los vectores
#si usamos solo un par de corchetes, podriamos ver el contenido pero no manipularlo directamente
#Otra característica de la lista es que puede tener muchos elementos dentro de cada componente de la lista
#Por ejemplo, si creamos dos vectores
n = c(2, 3, 5)
s = c("aa", "bb", "cc", "dd", "ee")
# y los guardamos en una lista
una_lista.l = list(n, s)
#cada vector es un componente de la lista x
#el primer elemento de la lista
una_lista.l[[1]]
#contiene 3 elementos
#y el segundo
una_lista.l[[2]]
#contiene cinco "palabras"
#o podemos ver el cuarto elemento
una_lista.l[[2]][4]
#pero usar número para nombrar los vectores que componen la lista puede ser
#confuso, así que puedo usar nombres
lista_2 = list(numeros=10:20, palabras=c("agua", "casa", "camino", "perro"))
#ahora puedo ver los elementos en "números" de la siguiente manera
lista_2[["numeros"]]
#o así
lista_2$numeros
#Si quisiera cambiar uno de los elementos en la lista puedo asignarle un valor nuevo
#Por ejemplo, el cuarto elemento en "palabras" es "perro" y quiero cambiarlo
# a "ventana"
lista_2[["palabras"]][4]<-"ventana"
#Ahora el resultado es diferente
lista_2[["palabras"]][4]
###LOOPS
#En un "loop" se prueba la validez lógica de una condición y se ejecuta
#una expresión si la condición es verdadera
primera_variable<-6
segunda_variable<-5
if (primera_variable > segunda_variable) {
print("la condicion es cierta")
}
#FOR nos ayuda a repetir una tarea varias veces
#Por ejemplo, digamos que tenemos un grupo de nombres
grupo_de_personas<-c("Pedro", "Manuel", "Isabel")
#Una manera de hacerlo es contar cuántos nombres hay e ir cambiando el elemento del vector que se quiere imprimir
for (numero in 1:length(grupo_de_personas)){
print(paste("Hola", grupo_de_personas[numero]))
}
#UNA MEJOR MANERA es dejar que el FOR ya busque cuantos elementos hay en el vector
for (nombre in grupo_de_personas){
print(paste("Hola", nombre))
}
#FUNCIONES
#Podemos crear nuestras propias funciones con una serie de comandos que queremos que se ejecuten
#o apliquen a los datos que le damos.
repetir<-function(x, y){
resultado<-x
for (i in 1:y) {
print(resultado)
}
}
repetir("hola", 5)
|
b0f8b8d04b7e55344d1fe2b42cfaac7a3b8743da
|
e0e538679b6e29837839fdbc3d68b4550e256bb9
|
/docs/spring/code/slide02.R
|
0384fd554116c1c7a58bee832b3a3545d165ebff
|
[] |
no_license
|
noboru-murata/sda
|
69e3076da2f6c24faf754071702a5edfe317ced4
|
4f535c3749f6e60f641d6600e99a0e269d1fa4ea
|
refs/heads/master
| 2020-09-24T20:23:36.224958
| 2020-09-22T07:17:54
| 2020-09-22T07:17:54
| 225,833,335
| 0
| 0
| null | 2019-12-05T08:20:51
| 2019-12-04T09:51:18
| null |
UTF-8
|
R
| false
| false
| 5,464
|
r
|
slide02.R
|
### 第2回 演習問題解答例
### 例題1
### Hamilton-Cayleyの定理の確認
## 行列を作成 (好きに設定してよい)
(A <- matrix(1:4,2,2)-diag(rep(3,2)))
## 左辺を計算
A%*%A - sum(diag(A)) * A + det(A) * diag(rep(1,2))
### 練習1.1
### 1から10までの2乗値からなるベクトル
1:10 # 1から10までのベクトル
1:10 * 1:10 # 2乗値のベクトル
### 練習1.2
### 1から10までの和
1:10 %*% rep(1,10) # (1,2,...,10)と(1,1,...,1)の内積
### 練習1.3
### 九九の表
matrix(rep(1:9,9),9,9) # 行ごとに1から9を並べる
matrix(rep(1:9,9),9,9,byrow=TRUE) # 列ごとに1から9を並べる
matrix(rep(1:9,9),9,9) * matrix(rep(1:9,9),9,9,byrow=TRUE)
### 練習1.4
### 30度の回転行列の2乗は60度の回転行列
theta <- pi/6 # 30度のラジアン値
R30 <- matrix(c(cos(theta),sin(theta),
-sin(theta),cos(theta)),2,2)
R60 <- matrix(c(cos(2*theta),sin(2*theta),
-sin(2*theta),cos(2*theta)),2,2)
R30 # 30度の回転行列
R30 %*% R30 # 30度の回転行列の2乗
R60 # 60度の回転行列
### 例題2
### 3元連立1次方程式の解法
## 行列とベクトルを作成 (好きに設定してよい)
## rnorm(9) は正規乱数を9つ作成する(第5回で詳しく説明)
(A <- matrix(rnorm(9),3,3)+diag(rep(1,3)))
(b <- 1:3)
## 解を計算
(x <- solve(A,b))
A%*%x # 結果の確認(b になるはず)
### 練習2.1
### 1から10までの2乗値からなるベクトル
(1:10)^2 # ^2も関数として成分ごとに計算される
### 練習2.2
### 回転してもベクトルの長さが変わらないことを確認
## 回転行列とベクトルを作成 (好きに設定してよい)
theta <- 2*pi/3 # 120度のラジアン値
(R <- matrix(c(cos(theta),sin(theta),
-sin(theta),cos(theta)),2,2))
(x <- 1:2)
(y <- R %*% x) # xを回転してyを作成
## 長さの2乗はベクトルの内積で計算できる
x %*% x # xの長さの2乗
as.vector(y) %*% as.vector(y) # yの長さの2乗
### 練習2.3
### エラーになる理由を考察
A %*% b # 列ベクトル (3x1型行列)
b %*% A # 行ベクトル (1x3型行列)
A %*% b + b %*% A # 大きさが異なるので計算できない
### 例題3
### if文の例
if(20200724 %% 19 == 0) {# %% は余りを計算
print("割り切れます")
print(20200724 %/% 19) # 商を表示
} else { # {}で囲まれたブロックが1つのプログラム
print("割り切れません")
print(20200724 %% 19) # 余りを表示
}
### 例題4
### for文の例
print(LETTERS) # LETTERS ベクトルの内容を表示
for(i in c(20,15,11,25,15)) {
print(LETTERS[i]) # 順番に表示
}
### 例題5
### while文の例
n <- 20200809 # 分解の対象
p <- 2 # 最初に調べる数
while(n != 1){ # 商が1になるまで計算する
for(i in p:n){ # pからnまで順に調べる
if(n%%i == 0) { # 余りが0か確認
print(i) # 割り切った数を表示
n <- n/i # 商を計算して分解の対象を更新
p <- i # 最初に調べる数を更新
break # for文を途中で終了
}
}
}
### 例題6
### 三角形の面積を計算する関数
area <- function(x,y,z){
s <- (x+y+z)/2
S <- (s*(s-x)*(s-y)*(s-z))^(1/2)
## S <- sqrt(s*(s-x)*(s-y)*(s-z)) # 平方根を求める関数を用いても良い
return(S)
}
area(3,4,5) # 直角三角形で検算
area(12,13,5)
### 練習3.1
### 階乗を計算する関数
## for文を用いた関数
fact1 <- function(n){
val <- 1
for(i in 1:n){
val <- val*i
}
return(val)
}
fact1(0) # 間違い
fact1(1)
fact1(2)
fact1(3)
fact1(4)
## if文を用いた修正版
fact2 <- function(n){
if(n==0){
return(1)
} else {
val <- 1
for(i in 1:n){
val <- val*i
}
return(val)
}
}
fact2(0) # 正しい
fact2(1)
fact2(2)
fact2(3)
fact2(4)
## while文を用いた関数
fact3 <- function(n){
val <- 1
while(n>0){
val <- val*n
n <- n-1
}
return(val)
}
fact3(0)
fact3(1)
fact3(2)
fact3(3)
fact3(4)
### 練習3.2
### Fibonacci数を返す関数
fibo <- function(n){
f0 <- 0 # 第0項の設定
f1 <- 1 # 第1項の設定
if(n<0) {
print("計算できません")
return(NA) # 欠損値を返す
}
if(n==0) { # n=0の場合
return(f0)
}
if(n==1) { # n=1の場合
return(f1)
}
for(i in 2:n) { # n>=2の場合
fn <- f1 + f0 # fn = fn-1 + fn-2 の計算
f0 <- f1 # fn-2 の値の更新
f1 <- fn # fn-1 の値の更新
}
return(fn) # 計算結果を返す
}
### 練習3.3
### 行列の列の平均を計算する関数
colave <- function(X) {
ave <- rep(0,length=ncol(X)) # 平均を記録するベクトルを用意
for(i in 1:ncol(X)){ # 列ごとに計算
ave[i] <- sum(X[,i])/nrow(X) # 平均の定義に従って計算
## ave[i] <- mean(X[,i]) # 平均を計算する関数を用いても良い
}
return(ave)
}
(A <- matrix(1:12,3,4,byrow=TRUE))
colave(A)
### 練習3.4
### ベクトルと行列を扱えるように修正
colave <- function(X){
if(is.vector(X)){
ave <- mean(X)
} else {
ave <- rep(0,length=ncol(X))
for(i in 1:ncol(X)){
ave[i] <- mean(X[,i])
}
}
return(ave)
}
(A <- matrix(1:12,3,4,byrow=TRUE))
colave(A)
(x <- 1:12)
colave(x)
|
ec0f89bdd913e3c103690ff605ce323f92a9f633
|
47ff58b06afa42fea7b7102fc180ba5a8312fd20
|
/geom_voronoi.R
|
93ddb41e3d419f8b10f40c1df1d3a428cf32b211
|
[] |
no_license
|
garretrc/voronoi_dev
|
ae94b1dfd0476056248831cd7ab6dabd20fcc4f2
|
1095ee0e935bf1a355e921d188a091f34cb89531
|
refs/heads/master
| 2020-03-14T23:45:32.968600
| 2018-07-24T13:33:02
| 2018-07-24T13:33:02
| 131,852,286
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 419
|
r
|
geom_voronoi.R
|
geom_voronoi = function (mapping = NULL, data = NULL, stat = StatVoronoi, position = "identity",
...,na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, outline = NULL)
{
layer(data = data, mapping = mapping, stat = stat, geom = GeomPolygon,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, outline = outline,...))
}
|
1a2b1f1d65f143b38a8eda16ee036105e0e09ca3
|
6287eae42147975e4032aeac9261a33bbebe3124
|
/Functions.R
|
246e4b8bcfc29e45c351dabfb4a1a6ffc3cd2d29
|
[] |
no_license
|
vaisvila/R-bioeco
|
ec3d8389c5348febd2c840473ebd230e061bed8a
|
76b810828aecb29dbf6696234b164eaaade3d09f
|
refs/heads/master
| 2022-12-11T02:20:09.122531
| 2020-09-15T19:58:26
| 2020-09-15T19:58:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,205
|
r
|
Functions.R
|
library(tidyverse)
setwd("~/Dropbox/R Biology")
# Load the main data file
Main <- read.csv("Creosote_seed_weights.csv", header=T)
names(Main)
Main$seed[is.na(Main$seed)] <- 0 # Fill NAs with zeros
Main$shrub <- as.character(Main$shrub) # Make shrub # a character variable
str(Main)
Main <- Main %>%
mutate(binary = if_else(seed == 0, 0, 1)) %>% # Create binary variable for seed production
glimpse()
# Make a proportional value of seed weight to mericarp weight
Main$prop.seed <- Main$seed / Main$mericarp
#### Let's assume we need to calculate a percentage value
# and want to add the "%" sign for a table to our output
#### The long way--do this over and over for any variable of interest
# changing the variable name in the code
percent <- round(Main$prop.seed * 100, digits = 1)
result <- paste(percent, "%", sep = " ")
print(result)
#### The easily repeated way--Make a function and use it forever
# by calling the function for the varible name
calc_percent <- function(x){
percent <- round(x * 100, digits = 1)
result <- paste(percent, "%", sep = " ")
return(result)
}
# Example use of your function
calc_percent(Main$prop.seed)
#### Another example: perhaps you commonly use a specific transformation
# or index that would require another package be loaded but the package
# creates conflicts with other packages that are in use
hist(Main$prop.seed) # Are the proportions of mericarp weight made up by seeds normal?
#### A common transformation for variables on a 0 to 1.0 scale is the "logit"
# Logit is available in several packages but not base R, so let's make our own function
# We can make a transformation to logit for our proportional weight
logit_trans <- function(x){
log(x) - log(1-x)
}
# Apply the transformation to the proportional weights and recheck the distribution
Main$logit_seed <- logit_trans(Main$prop.seed)
hist(Main$logit_seed) # Now it's better
#### How many mericarps have no seeds? Let's find the zeros
ZerosPerCol <- function(x) {
D <- (x == 0)
colSums(D)
}
ZerosPerCol(Main)
#### I ran into a frustration yesterday while working with a student on data
# We knew there were NAs in the data set and could find them in a list view
# but wanted a summary by each column--so I wrote this function for later
# A function to sum for NAs by column
NAsPerCol <- function(x) {
D <- is.na(x)
colSums(D)
}
NAsPerCol(Main) # Apply the function to the data object
#### How do we combine these functions into one?
ColumnInfo <- function(X1, Choice1) {
if (Choice1 == "Zeros") { D = (X1==0) }
if (Choice1 == "NAs") { D <- is.na(X1) }
colSums(D, na.rm = TRUE)
}
ColumnInfo(Main, "Zeros") # Here we need to specify which choice we want
#### Let's add a default choice to the function
ColumnInfo <- function(X1, Choice1 = "Zeros") {
if (Choice1 == "Zeros") { D = (X1==0) }
if (Choice1 == "NAs") { D <- is.na(X1) }
colSums(D, na.rm = TRUE)
}
ColumnInfo(Main) # defaults to zeros
ColumnInfo(Main, "NAs") # specified as NAs
#### Existing R functions have error messages to help you debug your code
# Make an intentional mistake and mis-spell "NAs" as "nas"
ColumnInfo(Main, "nas") # You get gibberish
#### Fix this in your function
ColumnInfo <- function(X1, Choice1 = "Zeros") {
if (Choice1 == "Zeros") { D = (X1==0) }
if (Choice1 == "NAs") { D <- is.na(X1) }
if (Choice1 != "Zeros" & Choice1 != "NAs") {
print("Error in specified choice text") } else {
colSums(D, na.rm = TRUE) }
}
# Make the same mistake again
ColumnInfo(Main, "nas")
#### YOUR TURN: WRITE A FUNCTION THAT CONVERTS mg TO ounces AND RUN IT ON THE SEED WEIGHTS
# mg * 0.000035274 = oz
# verify by plotting the seed weight in mg against seed weight in oz
mg_to_oz <- function(x){
oz <- round(x * 0.000035274, digits = 5)
return(oz)
}
Main$seed_wt_oz <- mg_to_oz(Main$seed)
library(ggplot2)
ggplot(data = Main, aes(x = seed, y = seed_wt_oz)) +
geom_point()
#### YOUR TURN: WRITE A FUNCTION THAT CONVERTS FAHRENHEIT INTO CELSIUS
# (F - 32) * (5/9)
# verify that it works by checking the freezing and boiling points
F_to_C <- function(x){
C <- round((x-32) * (5/9), digits = 2)
return(C)
}
F_to_C(32)
F_to_C(212)
|
9dd34a749629c81eb85248c77cc9a4c16ca4d67d
|
f516a90d0210956806165f7561e7bbd692e81e57
|
/cachematrix.R
|
f4af6d975ec2ee88d474766b30c0fe4d9d726418
|
[] |
no_license
|
Rouzhman/ProgrammingAssignment2
|
99327237f78904392841166e27c7007b4a4f1ee4
|
0bad3ea65c623064be150cab0c10a488486fdc82
|
refs/heads/master
| 2021-01-24T01:40:41.871075
| 2016-01-25T04:52:25
| 2016-01-25T04:52:25
| 50,325,278
| 0
| 0
| null | 2016-01-25T04:08:00
| 2016-01-25T04:08:00
| null |
UTF-8
|
R
| false
| false
| 1,439
|
r
|
cachematrix.R
|
# This function creates a special "matrix" object,
# which is a list containing a function to
# set the value of the matrix
# get the value of the matrix
# set the value of the inverse of the matrix
# get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The following function calculates the inverse
# of the special "matrix" created with the above function.
# Then, it first checks to see if the inverse has already been calculated.
# If so, it gets the matrix inverse from the cache and skips the computation.
# Otherwise, it calculates the matrix inverse of the data
# and sets the value of the inverse in the cache via the setInverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
# example
f <- matrix(c(1, -2, 5, 3), 2, 2)
k <- makeCacheMatrix(f)
cacheSolve(k)
|
5fa3539bd57b74eb75062d9d14ae19495c1b390f
|
b9db037ee7bc2ebf9c228ad1f66fecabccfa70be
|
/man/Portfolio-class.Rd
|
f34d51d92a02b67ea991a8e04632e7f6493e3e1d
|
[] |
no_license
|
IsaakBM/prioritizr
|
924a6d8dcc7c8ff68cd7f5a2077de2fa1f300fe7
|
1488f8062d03e8736de74c9e7803ade57d6fcc29
|
refs/heads/master
| 2020-12-10T06:23:19.437647
| 2019-12-22T00:04:20
| 2019-12-22T00:04:20
| 233,524,401
| 1
| 0
| null | 2020-01-13T06:13:19
| 2020-01-13T06:13:18
| null |
UTF-8
|
R
| false
| true
| 1,319
|
rd
|
Portfolio-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Portfolio-proto.R
\name{Portfolio-class}
\alias{Portfolio-class}
\alias{Portfolio}
\title{Portfolio prototype}
\description{
This prototype is used to represent methods for generating portfolios of
optimization problems. \strong{This class represents a recipe to
create portfolio generating method and is only recommended for use by expert
users. To customize the method used to generate portfolios, please see the
help page on \code{\link{portfolios}}}.
}
\section{Fields}{
\describe{
\item{$name}{\code{character} name of portfolio method.}
\item{$parameters}{\code{Parameters} object with parameters used to customize
the the portfolio.}
\item{$run}{\code{function} used to generate a portfolio.}
}
}
\section{Usage}{
\code{x$print()}
\code{x$show()}
\code{x$repr()}
\code{x$run(op, sol)}
}
\section{Arguments}{
\describe{
\item{x}{\code{\link{Solver-class}} object.}
\item{op}{\code{\link{OptimizationProblem-class}} object.}
}
}
\section{Details}{
\describe{
\item{print}{print the object.}
\item{show}{show the object.}
\item{repr}{\code{character} representation of object.}
\item{run}{solve an \code{\link{OptimizationProblem-class}} object using this
object and a \code{\link{Solver-class}} object.}
}
}
|
669a5217421e4de622d957e45b9fbeabf95655a1
|
ca1d77c04fb3c44b5456e20f604c69fd0626a53f
|
/R/frequency_ld.r
|
f9425dd3d5447349c9f3c3594b9372fa2373c5a3
|
[] |
no_license
|
nreid/mscr
|
1d4643f37d85ade3d21d5344dd0b58b75c3a0cab
|
280e805154cdf162ca242efcd8bb7b7f66d2fdf1
|
refs/heads/master
| 2021-01-17T09:28:53.309441
| 2016-05-20T23:32:01
| 2016-05-20T23:32:01
| 42,897,088
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,021
|
r
|
frequency_ld.r
|
###see file Frequency_LD_scripts.txt for some tests
###functions to estimate allele frequencies given precalculated genotype likelihoods from Lynch 2008
#Lpgl gives the likelihood of an allele frequency, p, given a vector of genotype likelihoods, gl
#x is a vector of 3 genotype likelihoods
#p is an allele frequency
#assume p is alt allele, and gls are ordered (1-p)^2,2*(1-p)*p,p^2
#gls are from freebayes, log10 scaled.
Lpgl_ind<-function(gl,p){
if(sum(is.na(gl))){
lik<-1
}
else{
gl<-10^gl
lik<-((p^2)*gl[3])+(2*p*(1-p)*gl[2])+(((1-p)^2)*gl[1])
}
lik<-log(lik)
}
#function to apply function Lpgl across a table of genotype likelihoods for multiple individuals at a single site
#columns are gls, ordered (1-p)^2,2*(1-p)*p,p^2, rows are individuals
Lpgl_site<-function(pt,glt){
lik<-apply(glt,MAR=1,FUN=Lpgl_ind,p=pt)
lik<-sum(lik)
return(lik)
}
#function to calculate frequencies
#vcf is vcf file read in using read.table()
#sub is a vector of individual numbers (not column numbers) to calculate the frequency from
calcfreqs<-function(vcf,sub=NULL){
nsites<-length(vcf[,1])
if(is.null(sub)){
sub<-1:(length(vcf[1,])-9)
}
vcf<-as.matrix(vcf[,sub+9])
vcf<-gsub(".*:","",vcf)
freqs<-rep(NA,nsites)
for(i in 1:nsites){
gltab<-do.call(rbind,strsplit(x=vcf[i,],split=","))
gltab[gltab=="."]<-NA
class(gltab)<-"numeric"
freqs[i]<-optimize(f=Lpgl_site, interval=c(0,1),glt=gltab,maximum=TRUE)$maximum
if(i%%1000==0){
cat(i, " iterations are done\n")
}
}
return(freqs)
}
###this function takes genotype likelihoods from two loci gl1,gl2
###a value of D, and allele frequencies at each locus, p,q
###and returns the natural log scaled likelihood of D.
Ld_glpq_ind<-function(gl1,gl2,d,p,q){
if(any(is.na(c(gl1,gl2)))){return(log(1))}
altalt<-log(p*q+d,base=10)
altref<-log(p*(1-q)-d,base=10)
refalt<-log((1-p)*q-d,base=10)
refref<-log((1-p)*(1-p)+d,base=10)
lscores<-c(
gl1[3]+gl2[3]+altalt+altalt,
gl1[3]+gl2[1]+altref+altref,
gl1[1]+gl2[3]+refalt+refalt,
gl1[1]+gl2[1]+refref+refref,
gl1[3]+gl2[2]+altalt+altref+log(2,base=10),
gl1[1]+gl2[2]+refalt+refref+log(2,base=10),
gl1[2]+gl2[3]+altalt+refalt+log(2,base=10),
gl1[2]+gl2[1]+altref+refref+log(2,base=10),
gl1[2]+gl2[2]+altalt+refref+log(2,base=10),
gl1[2]+gl2[2]+altref+refalt+log(2,base=10)
)
log(sum(10^lscores))
}
##calculate likelihood of D given gls and frequencies for two sites for all individuals
Ld_glpq_sitepair<-function(ds,gl1s,gl2s,ps,qs){
l_ind<-apply(cbind(gl1s,gl2s),MAR=1,FUN=function(x){Ld_glpq_ind(x[1:3],x[4:6],d=ds,p=ps,q=qs)})
sum(l_ind)
}
calcD<-function(vcf,sub=NULL,pvec=NULL,maxdist=NULL,bypos=FALSE){
if(is.null(pvec)){
print("calculating allele frequencies")
pvec<-calcfreqs(vcf=vcf,sub=sub)
}
subsites<-pvec>.2&pvec<.8
pvec<-pvec[subsites]
vcf<-vcf[subsites,]
pos<-vcf[,2]
nsites<-dim(vcf)[1]
if(is.null(sub)){
sub<-1:(length(vcf[1,])-9)
}
vcf<-as.matrix(vcf[,sub+9])
vcf<-gsub(".*:","",vcf)
allpairs<-t(combn(1:nsites,2))
if(!is.null(maxdist&!bypos)){
allpairs<-allpairs[allpairs[,2]-allpairs[,1]<maxdist,]
}
if(!is.null(maxdist&bypos)){
pairpos<-t(combn(pos,2))
subpos<-pairpos[,2]-pairpos[,1]<maxdist
allpairs<-allpairs[subpos,]
}
npairs<-dim(allpairs)[1]
out<-matrix(nrow=npairs,ncol=5)
cat("calculating D values\n")
for(i in 1:npairs){
GL1<-do.call(rbind,strsplit(split=",",vcf[allpairs[i,1],]))
GL2<-do.call(rbind,strsplit(split=",",vcf[allpairs[i,2],]))
class(GL1)<-"numeric"
class(GL2)<-"numeric"
P1<-pvec[allpairs[i,1]]
P2<-pvec[allpairs[i,2]]
dmin<-(-min(P1*P2,(1-P1)*(1-P2)))
dmax<-min(P1*(1-P2),(1-P1)*(P2))
out[i,1]<-pos[allpairs[i,1]]
out[i,2]<-pos[allpairs[i,2]]
out[i,3]<-P1
out[i,4]<-P2
out[i,5]<-optimize(f=Ld_glpq_sitepair, interval=c(dmin,dmax),gl1s=GL1,gl2s=GL2,ps=P1,qs=P2,maximum=TRUE)$maximum
if(i%%1000==0){
cat(i, " iterations are done\n")
}
}
return(out)
}
|
a953301d6992ce36d0769046602cca4c7dd20e4f
|
3d560900291b0b323d1c8f5512e47a785774141e
|
/R/utils.R
|
6f2cd3f08ff1f104df339ba499b97c9ebcbbfddc
|
[
"MIT"
] |
permissive
|
hjanime/ggrgl
|
a4de12f6ede8471dbd542499d730e92e420318fa
|
27ba63cc57102e1f410273f688ef7e4ea7a01d85
|
refs/heads/main
| 2023-02-01T16:17:22.454753
| 2020-12-21T20:00:05
| 2020-12-21T20:00:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,001
|
r
|
utils.R
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Darken/lighten a hex colour by the given amount
#' Stolen from \url{https://benjaminlmoore.wordpress.com/2014/03/18/guardian-data-blog-uk-elections/}
#'
#' @param hex_colour strings e.g. "#345678"
#' @param amount fraction to darken by. default 0.15
#'
#' @return darkened hex colours
#'
#' @importFrom grDevices col2rgb rgb
#' @export
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
darken_colour <- function(hex_colour, amount = 0.15) {
rgb(t(col2rgb(hex_colour) * (1 - amount)), maxColorValue = 255)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' @rdname darken_colour
#' @export
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
lighten_colour <- function(hex_colour, amount = 0.15) {
lighter <- t(col2rgb(hex_colour) * (1 + amount))
lighter[lighter > 255] <- 255 # clamp
rgb(lighter, maxColorValue = 255)
}
|
348c010f871a60436ae187d42f3a9b9c99b392af
|
b7d1239a7708bcd011f712c4119cf907816fc6df
|
/cloud-seeding-analysis-2-19-04.R
|
2247dd2114f99f21318c5c8250b325c8adbeb42d
|
[] |
no_license
|
wstuetzle/STAT180
|
45513e05576833ca9dd62ca732c9ffd33c6a8ee6
|
5a9ab7ab5cb63ed502181229a4fa58499c396f89
|
refs/heads/master
| 2021-05-13T12:47:00.509865
| 2019-02-20T20:11:20
| 2019-02-20T20:11:20
| 116,683,798
| 1
| 2
| null | 2019-05-29T22:40:01
| 2018-01-08T14:01:23
|
HTML
|
UTF-8
|
R
| false
| false
| 5,404
|
r
|
cloud-seeding-analysis-2-19-04.R
|
## Experiment with cloud seeding data(2-10-04)
## Clouds are first selected as suitable or unsuitable for seeding
## Then a coin is tossed to decide whether or not they are actually
## seeded
pathname <- "g:\\Stat390\\Winter04\\Bootstrap\\cloud-seeding-2-19-04.dat"
source(pathname)
length(seeded)
length(unseeded)
n <- length(seeded) ## 26 seeded, 26 unseeded
par(mfrow = c(2,1))
hist(seeded, col = "green", nclass = 10, ylim = c(0,20))
hist(unseeded, col = "green", nclass = 10, ylim = c(0, 20))
par(mfrow = c(1,1))
boxplot(seeded, unseeded, col = "green", names = c("seeded", "unseeded"))
## Look at difference between means
diff.means <- mean(seeded) - mean(unseeded) ## 277
se.diff.means <- sqrt((var(seeded) + var(unseeded))/n) ## 139
ci.lower.sd <- diff.means - 1.96 * se.diff.means
ci.upper.sd <- diff.means + 1.96 * se.diff.means
c(ci.lower.sd, ci.upper.sd) ## (5, 549)
## 95% CLT based confidence interval for difference in means
## is [5, 549]
## Find Bootstrap distribution for difference between means
nboot <- 1000
diff.means.boot <- rep(0, nboot)
for (i in 1:nboot) {
seeded.boot <- sample(seeded, n, replace = T)
unseeded.boot <- sample(unseeded, n, replace = T)
diff.means.boot[i] <- mean(seeded.boot) - mean(unseeded.boot)
}
hist(diff.means.boot, col = "green", nclass = 20, main = "")
title("Cloud seeding: Bootstrap distribution of difference between means",
cex.main = 1.0)
## Looks pretty Gaussian
## Compute Bootstrap percentile interval for difference between means
ci.lower.perc <- sort(diff.means.boot)[0.05 * nboot / 2 + 1]
ci.upper.perc <- sort(diff.means.boot)[nboot -0.05 * nboot / 2]
c(ci.lower.perc, ci.upper.perc)
## 95% Bootstrap percentile confidence interval for difference in means
## is [25, 569]
##-----------------------------------------------------------------
## Now analyze difference between medians
diff.medians <- median(seeded) - median(unseeded) ## 177
nboot <- 1000
diff.medians.boot <- rep(0, nboot)
for (i in 1:nboot) {
seeded.boot <- sample(seeded, n, replace = T)
unseeded.boot <- sample(unseeded, n, replace = T)
diff.medians.boot[i] <- median(seeded.boot) - median(unseeded.boot)
}
hist(diff.medians.boot, col = "green", nclass = 20, main = "")
title("Cloud seeding: Bootstrap distribution of difference between medians",
cex.main = 1.0)
## Compute 95% CLT based Bootstrap confidence interval for difference
## between medians
ci.lower.sd <- diff.medians - 1.96 * sd(diff.medians.boot)
ci.upper.sd <- diff.medians + 1.96 * sd(diff.medians.boot)
c(ci.lower.sd, ci.upper.sd)
## 95% CLT based bootstrap confidence interval for difference between
## medians is [53, 301]
## Compute 95% Boostrap percentile confidence intervals for difference
## between medians
ci.lower.perc <- sort(diff.medians.boot)[0.05 * nboot / 2 + 1]
ci.upper.perc <- sort(diff.medians.boot)[nboot -0.05 * nboot / 2]
c(ci.lower.perc, ci.upper.perc)
## 95% Bootstrap percentile confidence interval for difference
## between medians is [40, 262]
## Summary of analysis so far: There is a consistent picture - cloud
## seeding appears to increase the amount of rainfall.
##=================================================================
##=================================================================
## Permutation tests: An alternative way of looking at the question
rainfalls <- c(seeded, unseeded)
labels <- c(rep("seeded", n), rep("unseeded", n))
## Compute difference between between medians of "seeded" and
## "unseeded" rainfalls
diff.orig.medians <- median(rainfalls[labels == "seeded"]) -
median(rainfalls[labels == "unseeded"])
diff.orig.medians
## If there was no difference, then switching labels should not matter
nperm <- 1000
diff.permuted.medians <- rep(0, nperm)
for (i in 1:nperm) {
permuted.labels <- sample(labels)
diff.permuted.medians[i] <- median(rainfalls[permuted.labels == "seeded"]) -
median(rainfalls[permuted.labels == "unseeded"])
}
hist(diff.permuted.medians, nclass = 20, col = "green", main = "")
abline(v = diff.orig.medians, col = "red", lwd = 3)
title("Cloud seeding: Permutation distribution of difference between medians",
cex.main = 1.0)
sum(diff.permuted.medians > diff.orig.medians) ## 7
## If seeding had no effect, only 0.7% of the (52 choose 26) possible
## assignements of labels to rainfalls would give a difference bigger
## than the one we observed. Therefore it is very unlikely that the
## difference can be explained by a fortuitous choice of clouds for
## seeding.
## Repeat analysis for means instead of medians
diff.orig.means <- mean(rainfalls[labels == "seeded"]) -
mean(rainfalls[labels == "unseeded"])
diff.orig.means
nperm <- 1000
diff.permuted.means <- rep(0, nperm)
for (i in 1:nperm) {
permuted.labels <- sample(labels)
diff.permuted.means[i] <- mean(rainfalls[permuted.labels == "seeded"]) -
mean(rainfalls[permuted.labels == "unseeded"])
}
hist(diff.permuted.means, nclass = 20, col = "green", main = "")
abline(v = diff.orig.means, col = "red", lwd = 3)
title("Cloud seeding: Permutation distribution of difference between means",
cex.main = 1.0)
sum(diff.permuted.means > diff.orig.means) ## 26
|
355c49ae38fd847397eb5ff42ba2b26e18f4127a
|
863aa7e71911423a9096c82a03ef755d1cf34654
|
/man/recursive_feature_elimination.Rd
|
89ecc1d0b0f4c307156e80fcf2d9fdb4bd7a778c
|
[] |
no_license
|
BioSystemsUM/specmine
|
8bd2d2b0ee1b1db9133251b80724966a5ee71040
|
13b5cbb73989e1f84e726dab90ff4ff34fed68df
|
refs/heads/master
| 2023-08-18T05:51:53.650469
| 2021-09-21T13:35:11
| 2021-09-21T13:35:11
| 313,974,923
| 1
| 1
| null | 2021-09-21T13:35:12
| 2020-11-18T15:22:49
|
R
|
UTF-8
|
R
| false
| false
| 1,729
|
rd
|
recursive_feature_elimination.Rd
|
\name{recursive_feature_elimination}
\alias{recursive_feature_elimination}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Perform recursive feature elimination
}
\description{
Perform recursive feature elimination on the dataset using caret's package.
}
\usage{
recursive_feature_elimination(datamat, samples.class,
functions = caret::rfFuncs, method = "cv", repeats = 5,
number = 10, subsets = 2^(2:4))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{datamat}{
data matrix from dataset.
}
\item{samples.class}{
string or index indicating what metadata to use.
}
\item{functions}{
a list of functions for model fitting, prediction and variable importance.
}
\item{method}{
the external resampling method: boot, cv, LOOCV or LGOCV (for repeated training/test splits.
}
\item{repeats}{
for repeated k-fold cross-validation only: the number of complete sets of folds to compute.
}
\item{number}{
either the number of folds or number of resampling iterations.
}
\item{subsets}{
a numeric vector of integers corresponding to the number of features that should be retained.
}
}
\value{
A caret's rfe object with the result of recursive feature selection.
}
\examples{
\donttest{
## Example of recursive feature elimination
library(specmine.datasets)
data(cachexia)
library(caret)
rfe.result = recursive_feature_elimination(cachexia$data,
cachexia$metadata$Muscle.loss, functions = caret::rfFuncs,
method = "cv", number = 3, subsets = 2^(1:6))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ rfe }
\keyword{ wrappers }% __ONLY ONE__ keyword per line
|
5fd56dc628cda00f89bc598a8ac3ef19cbd36ebf
|
1bdfacbfb304b3056afe40a259374c19f7b80f50
|
/Morpho_penalty.R
|
428e611a4c46c4ef2196393ff97c3e460d96954a
|
[] |
no_license
|
M-Atsuhiko/Gausian
|
ed71ee76ae183fa0283ec52b4b0bf62c1aaa2421
|
e84e9895eb8c93b39d5837a8aa20a2caab91a1e4
|
refs/heads/master
| 2020-05-18T13:50:49.084679
| 2015-02-10T09:53:40
| 2015-02-10T09:53:40
| 28,582,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 391
|
r
|
Morpho_penalty.R
|
Morpho_penalty <- function(TREE){
Upper_Dend <- TREE[[1]]
Lower_Dend <- TREE[[2]]
upper_y <- max(sapply(Upper_Dend,function(Branch){
return(Branch[["coordi"]][,2])
}))
lower_y <- min(sapply(Lower_Dend,function(Branch){
return(Branch[["coordi"]][,2])
}))
return(-max(UPPER_SYNAPTIC_ZONE_Y - upper_y,0) + min(LOWER_SYNAPTIC_ZONE_Y - lower_y,0) + MORPHO_PENALTY_MIEW)
}
|
cc815b4548229b6f411d6adf5798d6cdbe9c2af5
|
4a7bd0a7d52a70b43f24b0a33d09045b92c75f4d
|
/Measure_spacing.R
|
c93fe7df1337d9837452a5951c346f890951b735
|
[] |
no_license
|
padmer/FracVAL_cda_helpers
|
5820b13b4de5458f57eea3d0d5779ea7c8152b60
|
8aa5038a7e0f9f2e17ee711a9257f7999dbf1bc2
|
refs/heads/main
| 2023-08-06T20:28:37.641409
| 2021-10-11T08:32:34
| 2021-10-11T08:32:34
| 323,617,901
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 489
|
r
|
Measure_spacing.R
|
source("C:/Users/padmer/OneDrive - KI.SE/R_CDA/FracVAL/PROGRAM_source_codes/test/Common_functions.R")
#Select Directory to run in
wd_fold="C:/Users/padmer/OneDrive - KI.SE/R_CDA/FracVAL/PROGRAM_source_codes/test"
#Move to that directory
setwd(wd_fold)
#Define the range of "Multiplication factors" to measure the spacing of
spac=seq(0.8,1.45,0.05)
#Calculate the mean and standard deviation of the interparticle spacings
Inter_1p8=get_mean_std_interpart("Dfvals_ 1.8",spac)
|
8e14a8b6a047d429375e97776e6a4228f8a3adbb
|
ddf0c1ddf1e2df05f2fb752ea2c6c3702972edeb
|
/R/rename.r
|
a20053894da65cffbbb63b69d2803c637be16275
|
[] |
no_license
|
talgalili/plyr
|
f61d5ca395f69345b04d7db98558518f28028ddf
|
dc98253e4ec68951c3c53426f13c31a93e47d13c
|
refs/heads/master
| 2020-04-07T21:29:41.218492
| 2012-10-29T15:04:38
| 2012-10-29T15:04:38
| 8,526,939
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 812
|
r
|
rename.r
|
#' Modify names by name, not position.
#'
#' @param x named object to modify
#' @param replace named character vector, with new names as values, and
#' old names as names.
#' @param warn_missing print a message if any of the old names are
#' not actually present in \code{x}.
#' Note: x is not altered: To save the result, you need to copy the returned
#' data into a variable.
#' @export
#' @importFrom stats setNames
#' @examples
#' x <- c("a" = 1, "b" = 2, d = 3, 4)
#' # Rename column d to "c", updating the variable "x" with the result
#' x <- rename(x, replace=c("d" = "c"))
#' x
#' # Rename column "disp" to "displacement"
#' rename(mtcars, c("disp" = "displacement"))
rename <- function(x, replace, warn_missing = TRUE) {
names(x) <- revalue(names(x), replace, warn_missing = warn_missing)
x
}
|
4b8607e8290db64d92855968500b40e3244eaf3b
|
d2a1402ec7225f160436fa9997c22dfbc98b2c2b
|
/loadSBCvijversignature.R
|
4bdc3bceb710132370504ae8fe16eb82808bc09b
|
[] |
no_license
|
ashar799/SBC
|
d9fe9e6a02ab6b70a3b3d0532b45b76ac1846cd9
|
731d73821ad27944f0767957ff5205554702ad4b
|
refs/heads/master
| 2021-01-20T20:32:35.588709
| 2019-04-11T11:42:16
| 2019-04-11T11:42:16
| 61,547,525
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,020
|
r
|
loadSBCvijversignature.R
|
### This file loads SBC gene signature ####
loadSBCvijversignature = function(Y.pre.train, time, censoring){
# train.index <- pheno.vijver$Label_Traing_and_Validation == 'Training'
# test.index <- pheno.vijver$Label_Traing_and_Validation == 'Validation'
######## Prefiltering of the Genes ############################### ###########################
######### Using Univariate Cox's Model for Ranking of the Survival Power of the Genes ########
######## Using T-test (or K way Anova) for Ranking of Clustering Power of the Genes ###########
######## The Class Labels ARE WHETHER METASTATIS OCCURED OR NOT ###############################
surv.obj <- Surv(time,censoring)
coeff.sig <- c(0)
pvalue.sig <- c(0)
pvalue.anova <- c(0)
calcCox = function(x){
q1 <- unlist(summary(coxph(surv.obj ~ ., data = as.data.frame(x))))
return(q1$logtest.pvalue)
}
calcANOVA = function(x){
q2 <- unlist(summary(aov(as.numeric(censoring) ~ ., data=as.data.frame(x)) ))
return(as.numeric(q2[9]))
}
pvalue.sig <- apply(Y.pre.train,2,calcCox)
pvalue.anova <- apply(Y.pre.train,2,calcANOVA)
###### Adjusting p-values for Multiple Test Correction
pvalue.sig.adj <- p.adjust(pvalue.sig, method = "fdr")
pvalue.anova.adj <- p.adjust(pvalue.anova, method = "fdr")
#### As the number of features are quite variable choose first a very loose cut-off
signature.loose <- colnames(Y.pre.train)[(pvalue.anova.adj < 0.05) & (pvalue.sig.adj < 0.05)]
### Combined the P-values
pvalue.combined <- (pvalue.sig.adj + pvalue.anova.adj)
names(pvalue.combined) <- colnames(Y.pre.train)
## Sort it
pvalue.combined.sort <- sort(pvalue.combined)
## Only select those genes which are loosely in the signature
pvalue.combined.adj <- pvalue.combined.sort[names(pvalue.combined.sort) %in% signature.loose]
### Take the top 50 genes ####
probes.signature <- names(pvalue.combined.adj[1:50])
relev <- list('signature.sbc'= probes.signature)
}
|
1ae02c71db6c2db963e158bbb62fd58eaeef0cdb
|
5460106f94ddc1af3c052eefac25680debf5d367
|
/scripts/illustration_script.R
|
97688f568d4a25cb3c689e855b098ca3f508a12e
|
[] |
no_license
|
svdataman/gin
|
5f867370ea7c69514e729c3564435938acf2e705
|
799df93ff1266ab141600dabf9081fbc2eea928e
|
refs/heads/master
| 2021-07-19T20:24:45.690609
| 2021-03-05T17:31:49
| 2021-03-05T17:31:49
| 63,324,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,093
|
r
|
illustration_script.R
|
# define an ACV function
acv <- function(theta, tau) {
A <- abs(theta[1])
l <- abs(theta[2])
acov <- A * exp(-(tau / l^2))
return(acov)
}
# --------------------------------------
theta <- c(0.0, 1.0, 1.0, 3)
m <- 500
t <- seq(-0.5, 100.5, length = m)
y <- gp_sim(theta, acv.model = acv, t.star = t)
y <- as.vector(y)
dat <- data.frame(cbind(t, y))
# plot the 'true' curve
xlim <- range(dat$t)
ylim <- range(dat$y)
par(mar=c(6, 6, 2, 2))
mask <- seq(1, 500, by = 1)
plot(dat$t[mask], dat$y[mask], col="pink3", type = "l", pch=1, bty = "n",
axes=FALSE, lwd = 3, cex = 2,
xlab="time", ylab="y(t)", xlim = xlim, ylim = ylim, cex.lab = 2)
grid()
axis(1, lwd=0, cex.axis=1.5)
mask <- c(100, 250, 300, 350, 400)
#points(dat$t[mask], dat$y[mask], cex = 2, pch = 1, lwd = 3)
y <- gin::gp_sim(theta, acv.model = acv, t.star = t)
y <- as.vector(y)
dat <- data.frame(t = t, y = y)
lines(dat$t, dat$y, lwd = 3, col = "red3")
#points(dat$t[mask], dat$y[mask], cex = 2, pch = 1, lwd = 3)
y <- gp_sim(theta, acv.model = acv, t.star = t)
y <- as.vector(y)
dat <- data.frame(t = t, y = y)
lines(dat$t, dat$y, lwd = 3, col = "red4")
#points(dat$t[mask], dat$y[mask], cex = 2, pch = 1, lwd = 3)
#segments(dat$t[mask], ylim[1], dat$t[mask], ylim[1]+0.2, lwd=3)
# --------------------------------------
# define parameters of ACV
# theta[1] = mu (mean)
# theta[2] = nu (error scale factor)
# theta[3:p] = parameters of ACV
theta <- c(0.0, 1.0, 1.0, 3)
# define vector of times for reconstruction
m <- 1000
t <- seq(-0.5, 100.5, length = m)
# produce Gaussian vector (simulation)
y <- gin::gp_sim(theta, acv.model = acv, t.star = t)
y <- as.vector(y)
dat <- data.frame(t = t, y = y)
# plot the 'true' curve
xlim <- range(dat$t)
ylim <- range(dat$y)
par(mar=c(6,6,2,2))
plot(dat$t, dat$y, col="pink3", type = "l", bty = "n", axes=FALSE, lwd = 3,
xlab="time", ylab="y(t)", xlim = xlim, ylim = ylim, cex.lab = 2)
grid()
axis(1, lwd=0, cex.axis=1.5)
# ------------------------------------------------
# -------------------------------------------------
# now observe f(t) only at n random times
n <- 25
bookend <- round(m/4)
midrange <- (bookend):(m-bookend)
indx <- sample(midrange, size = n)
indx <- sort(indx)
t <- dat$t[indx]
y <- dat$y[indx]
# now add measurement errors
dy <- rep(0.1, n) * sample(c(1.0,0.5,3), size = n, replace = TRUE,
prob = c(0.8,0.10,0.1))
epsilon <- rnorm(n, mean = 0, sd = dy)
y <- y + epsilon
plot(0, 0, type = "n", bty = "n", axes=FALSE, lwd = 2,
xlab="time", ylab="y(t)", xlim = xlim, ylim = ylim, cex.lab = 2)
grid()
axis(1, lwd=0, cex.axis=1.5)
lines(dat$t, dat$y, col = "pink3", lwd = 3)
# plot the observations
points(t, y, pch = 16, cex = 2)
# plot error bars
segments(t, y-dy, t, y+dy, lwd=3)
obs <- data.matrix(data.frame(t=t, y=y, dy=dy))
# ------------------------------------------------
# --------------------------------------------
# plot snake
plot(0, 0, type = "n", bty = "n", axes=FALSE, lwd = 2,
xlab="time", ylab="y(t)", xlim = xlim, ylim = ylim, cex.lab = 2)
grid()
axis(1, lwd=0, cex.axis=1.5)
# reconstruct process: compute 'conditional' mean and covariance
gp <- gin::gp_conditional(theta, acv.model = acv, dat = obs, t.star = dat$t)
# plot a 'snake' showing mean +/- std.dev
plot_snake(gp, add = TRUE, col.line = 3, sigma = c(1, 2))
# plot the observations
points(t, y, pch = 16, cex = 2)
# plot error bars
segments(t, y-dy, t, y+dy, lwd=3)
# ------------------------------------------------
# --------------------------------------------
# plot snake with true data
plot(0, 0, type = "n", bty = "n", axes=FALSE, lwd = 2,
xlab="time", ylab="y(t)", xlim = xlim, ylim = ylim, cex.lab = 2)
grid()
axis(1, lwd=0, cex.axis=1.5)
# plot a 'snake' showing mean +/- std.dev
plot_snake(gp, add = TRUE, col.line = 3, sigma = c(1, 2))
# add some constrained realisations
y.sim <- gp_sim(theta, dat = obs, acv.model = acv, t.star = dat$t,
N.sim = 5, plot = FALSE)
#for (i in 1:5) lines(dat$t, y.sim[, i], col = i)
lines(dat$t, dat$y, col = "pink3", lwd = 3)
# plot the observations
points(t, y, pch = 16, cex = 2)
# plot error bars
segments(t, y-dy, t, y+dy, lwd=3)
# ------------------------------------------------
# --------------------------------------------
# zoom - plot snake with true data
plot(0, 0, type = "n", bty = "n", axes=FALSE, lwd = 2,
xlab="time", ylab="y(t)", xlim = c(60, 80), ylim = ylim, cex.lab = 2)
grid()
axis(1, lwd=0, cex.axis=1.5)
# plot a 'snake' showing mean +/- std.dev
plot_snake(gp, add = TRUE, col.line = 3, sigma = c(1, 2))
# add some constrained realisations
y.sim <- gp_sim(theta, dat = obs, acv.model = acv, t.star = dat$t,
N.sim = 5, plot = FALSE)
#for (i in 1:5) lines(dat$t, y.sim[, i], col = i)
lines(dat$t, dat$y, col = "pink3", lwd =3)
# plot the observations
points(t, y, pch = 16, cex = 2)
# plot error bars
segments(t, y-dy, t, y+dy, lwd=3)
# ------------------------------------------------
# --------------------------------------------
plot(0, 0, type = "n", bty = "n", axes=FALSE, lwd = 2,
xlab="time", ylab="y(t)", xlim = xlim, ylim = ylim, cex.lab = 2)
grid()
axis(1, lwd=0, cex.axis=1.5)
# plot a 'snake' showing mean +/- std.dev
plot_snake(gp, add = TRUE, col.line = 3, sigma = c(1, 2))
# add some constrained realisations
t.star <- seq(-0.5, 100.5, length = 500)
y.sim <- gp_sim(theta, dat = obs, acv.model = acv, t.star = t.star,
N.sim = 5, plot = FALSE)
col <- c("red", "blue", "black", "brown")
col <- rgb(t(col2rgb(col)), alpha = 30,
maxColorValue = 255)
for (i in 1:4) lines(t.star, y.sim[, i], col = col[i], lwd=2)
# plot the observations
points(t, y, pch = 16, cex = 2)
# plot error bars
segments(t, y-dy, t, y+dy, lwd=3)
# ------------------------------------------------
# ------------------------------------------------
# ------------------------------------------------
# ------------------------------------------------
# ------------------------------------------------
# ------------------------------------------------
|
1bac4a00d0edb004b500c82cdbdf606e18ff7121
|
c27b3240d4d2525b52a71e419e9623f2d1152533
|
/plot2.R
|
1771ae209a9c682945822929ebc07de434ec94b8
|
[] |
no_license
|
samramsey/ExData_Plotting1
|
4dcd2ea832cccbce16baf27bf2f1642dec3d19f9
|
d1422e3e7aa074e2f278e9df3b8137ed578a4d40
|
refs/heads/master
| 2022-06-07T15:09:08.749944
| 2020-05-05T15:42:58
| 2020-05-05T15:42:58
| 257,377,848
| 0
| 0
| null | 2020-04-20T19:03:58
| 2020-04-20T19:03:57
| null |
UTF-8
|
R
| false
| false
| 463
|
r
|
plot2.R
|
library(data.table)
data <- fread("household_power_consumption.txt")
data <- data[(Date == "1/2/2007") | (Date == "2/2/2007"),]
data <- data[, Date := paste(Date,Time)]
date <- data[, Date := strptime(Date,format = "%d/%m/%Y %H:%M:%S")]
png("plot2.png", width = 480, height = 480)
plot(data$Date,as.numeric(data$Global_active_power),
type = "l",
xlab = "",
main = "Global Active Power",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
ab53d1463a3dbc624d6475a24ba6aa32a727d71e
|
2efd08b36b4d7a60f94617dee81668235d589651
|
/r_programming/test2.R
|
e804ecf34f93844d391ff8497091cac016ebbe5b
|
[] |
no_license
|
agawronski/datasciencecoursera
|
cebacf9abf4c73ecb69099f686d449da23de08a3
|
c151a7dcd2e494ba21b8235b2f5919eb670c614a
|
refs/heads/master
| 2021-01-18T16:35:36.933648
| 2015-08-23T23:23:29
| 2015-08-23T23:23:29
| 22,187,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 836
|
r
|
test2.R
|
rm(list=ls())
getwd()
setwd("C:/Users/NC/data_science")
complete <- function(directory, id = 1:332){
files<-list.files("specdata", full.names=TRUE) #character vec of filenames in specdata
all_data<-data.frame() #empty frame
for (i in id) {
all_data<-rbind(all_data, read.csv(files[i])) #add all data for given id
}
cc<-all_data[complete.cases(all_data),] #subset all complete cases
cc_vec<-cc[,"ID"] #remove only the character vector of id numbers
almost<-as.data.frame(table(cc_vec)) #tabulate and coerce to a df
colnames(almost)<-c("num","nobs") #change names
length(cc_vec[cc_vec==30])
done<-data.frame()
for(i in id){
done<-rbind(data.frame(i,length(cc_vec[cc_vec==i])))
}
colnames(done)<-c("num","nobs") #change names
print(done)
print(class(done))
print(names(done))
print(dim(done))
}
|
03c82917117476d1274d21e44a0eac001e3f2f5c
|
351bb59ba08b55b3493b7df1f5aa3ac3036be986
|
/R/eval.R
|
cbd8b3c6da26f2a03df69d5cc6b1a6a1de71e5af
|
[] |
no_license
|
ramnathv/rapport
|
2565efb6744da6e1de089d32cabf0133fc319cbd
|
55d957b844883f2ad6ae3a40a43f25a45f9453d2
|
refs/heads/master
| 2020-12-25T10:59:51.120953
| 2012-01-09T22:41:36
| 2012-01-09T22:41:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,520
|
r
|
eval.R
|
##' Evals chunk(s) of R code
##'
##' This function takes either a list of integer indices which point to position of R code in body character vector, or a list of strings with actual R code, then evaluates each list element, and returns a list with two elements: a character value with R code and generated output. The output can be NULL (eg. \code{x <- runif(100)}), a table (eg. \code{table(mtcars$am, mtcars$cyl)} or any other R object. If a graph is plotted in the given text, the returned object is a string specifying the path to the saved png in temporary directory (see: \code{tmpfile()}). The function takes care of warnings and errors too, please check the the returned value below.
##' @param txt a list with character values containing R code
##' @param ind a list with numeric indices pointing to R code in \code{body}
##' @param body a character vector that contains template body
##' @param classes a vector or list of classes which should be returned. If set to NULL (by default) all R objects will be returned.
##' @param hooks list of hooks to bo run for given classes in the form of \code{list(class=fn)}. If you would also specify some parameters of the function, a list should be provided in the form of \code{list(fn, param1, param2=NULL)} etc. So the hooks would become \code{list(class1=list(fn, param1, param2=NULL), ...)}. See example below. A default hook can be specified too by setting the class to \code{'default'}. This can be handy if you do not want to define separate methods/functions to each possible class, but automatically apply the default hook to all classes not mentioned in the list. You may also specify only one element in the list like: \code{hooks=list('default'=ascii)}.
##' @param length R object exceeding the specified length will not be returned. The default value (\code{Inf}) does not have any restrictions.
##' @param output a character vector of required returned values. See below.
##' @param env environment where evaluation takes place. If not set (by default), a new temporary environment is created.
##' @param ... optional parameters passed to \code{png(...)}
##' @return a list of parsed elements each containg: src (the command run), output (what the command returns, NULL if nothing returned), type (class of returned object if any) and messages: warnings (if any returned by the command run, otherwise set to NULL) and errors (if any returned by the command run, otherwise set to NULL)
##' @author Gergely Daróczi
##' @examples \dontrun{
##' # parsing line-by-line
##' txt <- readLines(textConnection('x <- rnorm(100)
##' runif(10)
##' warning("You should check out rapport package!")
##' plot(1:10)
##' qplot(rating, data=movies, geom="histogram")
##' y <- round(runif(100))
##' cor.test(x, y)
##' crl <- cor.test(runif(10), runif(10))
##' table(mtcars$am, mtcars$cyl)
##' ggplot(mtcars) + geom_point(aes(x = hp, y = mpg))'))
##' evals(txt)
##'
##' ## parsing a list of commnads
##' txt <- list('df <- mtcars',
##' c('plot(mtcars$hp, pch = 19)','text(mtcars$hp, label = rownames(mtcars), pos = 4)'),
##' 'ggplot(mtcars) + geom_point(aes(x = hp, y = mpg))')
##' evals(txt)
##'
##' ## returning only a few classes
##' txt <- readLines(textConnection('rnorm(100)
##' list(x = 10:1, y = "Godzilla!")
##' c(1,2,3)
## matrix(0,3,5)'))
##' evals(txt, classes='numeric')
##' evals(txt, classes=c('numeric', 'list'))
##'
##' ## handling warnings
##' evals('chisq.test(mtcars$gear, mtcars$hp)')
##'
##' ## handling errors
##' evals('runiff(20)')
##' evals('Old MacDonald had a farm\\dots')
##' evals('## Some comment')
##'
##' ## hooks
##' hooks <- list('numeric'=round, 'matrix'=ascii)
##' evals(txt, hooks=hooks)
##' evals('22/7', hooks=list('numeric'=rp.round))
##' evals('matrix(runif(25), 5, 5)', hooks=list('matrix'=rp.round))
##'
##' ## using rapport's default hook
##' evals('22/7', hooks=TRUE)
##'
##' ## setting default hook
##' evals(c('runif(10)', 'matrix(runif(9), 3, 3)'), hooks=list('default'=round))
##' ## round all values except for matrices
##' evals(c('runif(10)', 'matrix(runif(9), 3, 3)'), hooks=list(matrix='print', 'default'=round))
##'
##' # advanced hooks
##' fun <- function(x, asciiformat) paste(capture.output(print(ascii(x), asciiformat)), collapse='\n')
##' hooks <- list('numeric'=list(round, 2), 'matrix'=list(fun, "rest"))
##' evals(txt, hooks=hooks)
##'
##' # return only returned values
##' evals(txt, output='output')
##'
##' # return only messages (for checking syntax errors etc.)
##' evals(txt, output='msg')
##'
##' # check the length of returned values
##' evals('runif(10)', length=5)
##'
##' # note the following will not be filtered!
##' evals('matrix(1,1,1)', length=1)
##'
##' # if you do not want to let such things be evaled in the middle of a string use it with other filters :)
##' evals('matrix(1,1,1)', length=1, classes='numeric')
##'
##' # hooks & filtering
##' evals('matrix(5,5,5)', hooks=list('matrix'=ascii), output='output')
##'
##' # evaling chunks in given environment
##' myenv <- new.env()
##' evals('x <- c(0,10)', env=myenv)
##' evals('mean(x)', env=myenv)
##' rm(myenv)
##' # note: if you had not specified 'myenv', the second 'evals' would have failed
##' evals('x <- c(0,10)')
##' evals('mean(x)')
##' }
##' @export
evals <- function(txt = NULL, ind = NULL, body = NULL, classes = NULL, hooks = NULL, length = Inf, output = c('all', 'src', 'output', 'type', 'msg'), env = NULL, ...){
if (!xor(missing(txt), missing(ind)))
stop('either a list of text or a list of indices should be provided')
if (!is.null(ind)){
if (is.null(body))
stop('you must provide body vector')
txt <- lapply(ind, function(x) body[x])
}
if (!all(output %in% c('all', 'src', 'output', 'type', 'msg')))
stop('Wrong output option!')
if (sum(grepl('all', output)) > 0)
output <- c('src', 'output', 'type', 'msg')
if (!any(is.list(hooks), is.null(hooks))) stop('Wrong list of hooks provided!')
if (is.null(env)) env <- new.env()
if (!is.environment(env)) stop('Wrong env paramater (not an environment) provided!')
lapply(txt, function(src) {
clear.devs <- function() while (!is.null(dev.list())) dev.off(as.numeric(dev.list()))
clear.devs()
file <- tempfile(fileext = '.png', ...)
png(file)
eval <- suppressWarnings(try(evaluate(src, envir = env), silent=TRUE))
## error handling
error <- grep('error', lapply(eval, function(x) class(x)))
error <- c(error, grep('error', class(eval)))
if (length(error) != 0) {
res <- list(src = src,
output = NULL,
type = 'error',
msg = list(
messages = NULL,
warnings = NULL,
errors = sprintf('**Error** in "%s": "%s"', paste(src, collapse=' ; '), ifelse(error==1, gsub('Error in parse.(text) = string, src = src) : <text>:[[:digit:]]:[[:digit:]]: |\n.*', '', as.character(eval[error])), paste(eval[[error]]$message, collapse=' ; '))))
)
return(res[output])
}
## warnings
warnings <- grep('warning', lapply(eval, function(x) class(x)))
if (length(warnings) == 0) {
warnings <- NULL
} else {
##warnings <- sprintf('**Warning** in "%s": "%s"', paste(src, collapse=' ; '), names(warnings()[1]))
warnings <- sprintf('**Warning** in "%s": "%s"', paste(src, collapse=' ; '), paste(sapply(eval[warnings], function(x) x$message), collapse = " + "))
}
## good code survived!
graph <- ifelse(is.na(file.info(file)$size), FALSE, file)
returns <- length(eval) > 1
if (returns) {
if (is.logical(graph)) returns <- suppressWarnings(eval(parse(text = src), envir = env))
} else {
returns <- NULL
}
if (is.character(graph)) {
returns <- graph
class(returns) <- "image"
}
clear.devs()
## check length
if (length(returns) > length) returns <- NULL
## check classes
if (!is.null(classes))
if (!(class(returns) %in% classes))
returns <- NULL
## run hooks if specified
if (!is.null(hooks))
## Q: why not inherits(returns, names(hooks)) ?
if (class(returns) %in% names(hooks)) {
fn <- hooks[[class(returns)]]; params <- list(returns)
if (is.list(fn)) {
params <- list(returns, fn[[-1]])
fn <- fn[[1]]
}
returns <- do.call(fn, params)
} else {
if ('default' %in% names(hooks)) {
fn <- hooks[['default']]; params <- list(returns)
if (is.list(fn)) {
params <- list(returns, fn[[-1]])
fn <- fn[[1]]
}
returns <- do.call(fn, params)
}
}
## return list at last
res <- list(src = src,
output = returns,
type = class(returns),
msg = list(
messages = NULL,
warnings = warnings,
errors = NULL)
)
return(res[output])
})
}
|
e882b4bf09db59ff18ac8269f0c2e05b1166dd48
|
bb154d2aec34cf0a6b44fb123714c127a66c90ce
|
/R_exploratory_plots/plot3.R
|
4a32c7110acb8183920ac1ace479762a87e0ca73
|
[] |
no_license
|
jaydog7070/ExData_Plotting1
|
220e2ae725d60c0ae093cc9940b6dd8038b6c3e7
|
442a5891896c1a0ab9e6f70256d31645ac6791a0
|
refs/heads/master
| 2020-12-14T09:49:54.107887
| 2015-01-11T22:35:18
| 2015-01-11T22:35:18
| 29,104,353
| 0
| 0
| null | 2015-01-11T20:12:49
| 2015-01-11T20:12:48
| null |
UTF-8
|
R
| false
| false
| 2,064
|
r
|
plot3.R
|
## Jason Stedl plot3 project 1 Jan 2014
#removes database objects from working memory if they exist
if (exists("powerConsumption"))
{rm(powerConsumption)}
if (exists("con"))
{rm(con)}
## loads R sqlite library into working memory
library(RSQLite)
## creates a database
con <- dbConnect(SQLite(), dbname = "powerconsumption")
## writes file household power consumption to database
dbWriteTable(con, name="powerconsumption", value="household_power_consumption.txt", row.names=FALSE, header=TRUE, sep = ";", overwrite = TRUE)
## query of database for certian dates
powerConsumption <- dbGetQuery(con, "SELECT * FROM powerconsumption WHERE Date='1/2/2007' OR Date='2/2/2007'")
dbDisconnect(con)
## pastes time and date text together on 1st column
powerConsumption[[1]] <- paste(powerConsumption[[1]], powerConsumption[[2]])
## converts 1st column to a date object
powerConsumption[[1]] <- strptime(powerConsumption[[1]], "%d/%m/%Y %H:%M:%S")
## open a connection to a .png file
png(filename = "plot3.png")
## gets range of y data used in all three calls to plot
y_range = range(c(powerConsumption$Sub_metering_1,powerConsumption$Sub_metering_2, powerConsumption$Sub_meterin_3))
##plots first line to graph
plot(powerConsumption$Date ,powerConsumption$Sub_metering_1, , type = "l",xlab = "" , ylab ="Energy sub metering" , ylim = y_range)
## lets plot keep writing to existing graph
par(new = T)
## plots second line to graph
plot(powerConsumption$Date ,powerConsumption$Sub_metering_2, , type = "l", col = "red2",ylim = y_range , xlab = "", ylab = "")
## lets plot keep writing to existing graph
par(new = T)
## plots thirs line to graph
plot(powerConsumption$Date ,powerConsumption$Sub_metering_3, , type = "l", col = "blue",ylim = y_range , xlab = "", ylab = "Energy sub metering")
## attaches legend to graph
legend("topright", lty = c(1,1,1), col = c("black","red2","blue") , legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3" ))
#closes connection to .png file
dev.off()
|
c4ac0ba2ce6adc828e51e87a8b3d2cbf69a07dd7
|
4ce228293965fc3fc96fccad5d7d409ce4c05208
|
/src/inferences/inferences.R
|
e4ca608dbf095f506362605464aafd0371fe73cc
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
sirine-chahma/DSCI_522_group_401
|
4ba9095d1d9fd095837960f8144daa7f8270fdef
|
51d5072b05906ae65d5f4cc71ab2e34771b832c3
|
refs/heads/master
| 2020-12-14T05:55:49.886835
| 2020-05-03T00:14:24
| 2020-05-03T00:14:24
| 234,663,741
| 0
| 0
|
MIT
| 2020-05-03T00:14:25
| 2020-01-18T01:13:30
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,929
|
r
|
inferences.R
|
# author: Karanpal Singh, Sreejith Munthikodu, Sirine Chahma
# date: 2020-01-23
#
" This script will perform hypothesis test using t-test
It will read data from user specified location and save outputs to
user specified locations in csv format.
Usage: inferences.R --input_file=<input_file> --output_dir=<out_dir>
Options:
--input_file=<input_file> Path (including filename) to raw data (csv file)
--output_dir=<output_dir> Path to directory where the inferences output should be written
" -> doc
library(tidyverse)
library(testthat)
library(docopt)
library(broom)
set.seed(123)
test_smokers <- 0
test_sex <- 0
opt <- docopt(doc)
main <- function(input_file, output_dir){
# read data
data <- read_csv(input_file)
# hypothesis test for mean expenses of smokers are higher than non-smokers
smokers <- data %>% filter(smoker == 'yes') %>% select(charges)
non_smokers <- data %>% filter(smoker == 'no') %>% select(charges)
test_smokers <- t.test(smokers$charges, mu = mean(non_smokers$charges), alt = 'greater', conf = 0.95)
# hypothesis test for mean expenses difference between males and females
test_sex <- t.test(data$charges ~ data$sex, mu = 0, alt = 'two.sided', conf = 0.95, var.eq = FALSE, paired = FALSE)
# write training and test data to csv files
write_csv(broom::tidy(test_smokers), paste0(output_dir, "/1.hypothesis_smokers.csv"))
write_csv(broom::tidy(test_sex), paste0(output_dir, "/2.hypothesis_sex.csv"))
}
#test that the if hypothesis didn't return null
test_split <- function(test_smokers,test_sex){
test_that("hypothesis results of smokers cannot be null, please check you input", {
expect_equal(!is.null(test_smokers), TRUE)
})
test_that("hypothesis results of sex cannot be null, please check you input", {
expect_equal(!is.null(test_sex), TRUE)
})
}
test_split(test_smokers,test_sex)
main(opt[["--input_file"]], opt[["--output_dir"]])
|
04b684fa553cec0a80f17c36882b17625bba3730
|
270c4fbe4bdb58d0cc79d999f2fde9758340c871
|
/man/parse_user_create_body.Rd
|
0c6bf6c060a14a29b87a2014406f06d5c2dc1a15
|
[] |
no_license
|
amoeba/rt
|
cc21218316b5a8d985618bc34be82d92ba5967d2
|
55221f8f9303ff1db600727c17cfc640dc5f6ba6
|
refs/heads/master
| 2020-03-25T22:46:29.609025
| 2020-03-01T09:58:00
| 2020-03-01T09:58:00
| 83,843,396
| 0
| 1
| null | 2018-04-09T20:49:33
| 2017-03-03T21:34:05
|
R
|
UTF-8
|
R
| false
| true
| 426
|
rd
|
parse_user_create_body.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rt_user_create.R
\name{parse_user_create_body}
\alias{parse_user_create_body}
\title{Parse the response body from a call to \code{\link{rt_user_create}}}
\usage{
parse_user_create_body(body)
}
\arguments{
\item{body}{(character)}
}
\value{
(numeric) The user ID
}
\description{
Parse the response body from a call to \code{\link{rt_user_create}}
}
|
cea38c21de48ef7786e7808e35c7852cb5ca3b7e
|
4a0dd6fd1d8018e617657070d07d6dc7fee82d76
|
/R/get_data.R
|
90f7b393a6289bd9f99c5128783ef6c6eb8db51b
|
[
"CC0-1.0"
] |
permissive
|
trebor/metropolis
|
412f3c955f8a96172ebb39bdfaacb0c78688a4c8
|
711574023caa2c0bef23548cba7eeea2955316a9
|
refs/heads/master
| 2020-04-16T16:23:34.914045
| 2015-04-07T15:11:12
| 2015-04-07T15:11:12
| 31,475,758
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,168
|
r
|
get_data.R
|
library(RCurl)
library(jsonlite)
library(AnomalyDetection)
library(dplyr)
library(magrittr)
cities = c('San+Francisco', 'Bangalore','Boston', 'Rio+De+Janeiro','Geneva','Singapore','Shanghai')
query_prefix = "http://sensor-api.localdata.com/api/v1/aggregations?op=mean&over.city="
query_suffix = "&from=2015-02-01T00:00:00-0800&before=2015-02-28T00:00:00-0800&resolution=1h&fields=airquality_raw"
final_data =
query_city = function(c){
url = paste(query_prefix, c, query_suffix, sep='')
check_url = url.exists(url)
if(check_url) curled = getURL(url)
df = fromJSON(curled)
df$data
}
final_data = query_city(cities[1])
for(c in cities){
final_data = rbind(final_data, query_city(c))
}
write.table(final_data, file='small_data.txt', sep=",",row.names=F,col.names=T)
#outlier check
#one city
one_city = filter(final_data, city == "San Francisco") %>%
res = AnomalyDetectionVec(one_city[,1], max_anoms=0.02, direction='both', plot=TRUE, period=168)
anoms = res$anoms
label_anoms = function(df, anom_list){
labeled = mutate(df, anom=F)
for(i in anom_list){
labeled[i,'anom']=T
}
labeled
}
labeled = label_anoms(one_city, res$anoms)
|
3e5c1f6331906d6ff6e88dbcde1e2b3269958b27
|
9c7d151e52ca5b83b4e97634ba796d2a86da4b88
|
/Data.Science/2.R.Programming/Week.2/cachematrix.R
|
7ee89a04c70c8e0221bffd787f65d2f93f1a349c
|
[] |
no_license
|
mickgraham/Coursera
|
a5592e5632886e9f0d6e234c36b6e50b448d7229
|
4ca066557c3e60a4e55efec5bba6ba503bf053c7
|
refs/heads/master
| 2021-01-10T06:32:44.941519
| 2016-01-31T01:03:16
| 2016-01-31T01:03:16
| 48,262,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,824
|
r
|
cachematrix.R
|
## These functions cache the inverse of a matrix.
## Matrix inversion is usually a costly computation and their may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
# Reset the stored inverse to NULL when makeCacheMatrix is called.
i <- NULL
# Function that stores the matrix and resets the inverse.
set <- function(y) {
x <<- y
i <<- NULL
}
# function that gets the stored matrix.
get <- function() {
x
}
# Function that stores the inverse of the stored matrix.
setinverse <- function(inverse) {
i <<- inverse
}
# Function that gets the inverse of the stored matrix.
getinverse <- function() {
i
}
# Return the list of functions that are part of the object.
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by the
## makeCacheMatrix function.
cacheSolve <- function(x, ...) {
# Get the stored inverse from 'x'.
i <- x$getinverse()
# Return the stored inverse if it is not NULL.
if(!is.null(i)) {
message("getting cached data")
return(i)
}
# Get the stored matrix in 'x', compute the inverse, and store in 'x'.
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
# Return the computed inverse.
i
}
|
e9dbb0276d0443414a1db48cebab4dc7c6eceb2c
|
3f1046972d8ed8f5aaf622d1c6be383201386cfd
|
/R/null_model_helpers.R
|
4304e16fad711d9f627ded97e8f1cd698bbc5111
|
[
"MIT"
] |
permissive
|
W529/spatialwarnings
|
0e1e81f6bc32edc73330ca9c7fe94d05961613b4
|
defa6bf462d0f6906b32298c123bd7f531a9650c
|
refs/heads/master
| 2020-05-22T05:13:06.449867
| 2019-01-20T16:21:04
| 2019-01-20T16:21:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,370
|
r
|
null_model_helpers.R
|
#
# These functions factorize some code between numerical indicators. Most
# notably it handles creating the null model testing, etc.
#
compute_indicator_with_null <- function(input,
nreplicates,
indicf) {
# Compute the observed value
value <- indicf(input)
result <- list(value = value)
if (nreplicates > 2) {
if ( length(unique(input)) == 1 ) {
# Compute the index on the same matrix as randomization will do nothing
nulldistr <- matrix(rep(indicf(input), nreplicates),
nrow = 1, ncol = nreplicates)
} else {
# Compute the index on a randomized matrix
# shuffle_and_compute will convert all matrices to numeric matrices
# internally. We need to explicitely convert back to logical after
# shuffling before computing the indicator
if ( is.logical(input) ) {
nulldistr <- shuffle_and_compute(input, function(x) indicf(x>0),
nreplicates)
} else {
nulldistr <- shuffle_and_compute(input, indicf, nreplicates)
}
nulldistr <- t( do.call(rbind, nulldistr) )
}
# Note that here nulldistr always has one or more rows and nreplicates
# columns -> it is always a matrix
nullstats <- list(null_mean = apply(nulldistr, 1, mean),
null_sd = apply(nulldistr, 1, sd),
null_95 = apply(nulldistr, 1, safe_quantile, .95),
null_05 = apply(nulldistr, 1, safe_quantile, .05),
z_score = apply(cbind(value, nulldistr), 1,
function(X) (X[1] - mean(X[-1])) / sd(X[-1])),
pval = apply(cbind(value, nulldistr), 1,
function(X) 1 - rank(X)[1] / length(X)))
result <- append(result, nullstats)
}
return(result)
}
# We use a safe version of quantile that reports the appearance of warnings in
# the null distribution.
safe_quantile <- function(nulldistr, p) {
if ( any( is.na(nulldistr) ) ) {
warning(paste0('Computation of null values produced NAs (',
sum(is.na(nulldistr)), " out of ", length(nulldistr), "). "))
}
quantile(nulldistr, p, na.rm = TRUE)
}
|
4d6b8e9fbc62848653934ae84d7f11893c4ebeb9
|
692fc86ef250638274e2d0d68e6c19be186d0c60
|
/man/textmodel_wordshoal.Rd
|
6fae5d1934f4cf17b9244561ba8b2ef67824d587
|
[] |
no_license
|
kbenoit/wordshoal
|
5bb83e87cd6a5ef10e7f9c29cfd392c347104026
|
ced5f71f9e618c20c343900adb57c378240ec637
|
refs/heads/master
| 2022-04-21T21:22:15.950595
| 2020-04-20T10:50:29
| 2020-04-20T10:50:29
| 115,543,902
| 13
| 4
| null | 2018-02-07T23:18:04
| 2017-12-27T17:38:51
|
R
|
UTF-8
|
R
| false
| true
| 3,082
|
rd
|
textmodel_wordshoal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/textmodel_wordshoal.R
\name{textmodel_wordshoal}
\alias{textmodel_wordshoal}
\title{Wordshoal text model}
\usage{
textmodel_wordshoal(x, groups, authors, dir = c(1, 2), tol = 0.001)
}
\arguments{
\item{x}{the \pkg{quanteda} \link{dfm} from which the model will be fit}
\item{groups}{the name of a variable in the document variables for data
giving the document group for each document}
\item{authors}{the name of a variable in the document variables for data
giving the author of each document}
\item{dir}{set global identification by specifying the indexes for a pair of
authors such that \eqn{\hat{\theta}_{dir[1]} < \hat{\theta}_{dir[2]}}}
\item{tol}{a convergence threshold for the
log-posterior of the model}
}
\value{
An object of class textmodel_wordshoal. This is a list
containing: \item{tol}{log-posterior tolerance used in fitting}
\item{dir}{global identification of the dimension} \item{theta}{estimated
document positions} \item{beta}{debate marginal effects}
\item{alpha}{estimated document fixed effects} \item{psi}{estimated
document debate-level positions} \item{groups}{document groups}
\item{authors}{document authors} \item{ll}{log likelihood at convergence}
\item{se.theta}{standard errors for theta-hats} \item{data}{corpus to which
the model was fit}
}
\description{
Estimate Lauderdale and Herzog's (2016) model for one-dimensional document
author (e.g. speakers) positions based on multiple groups of texts (e.g.
debates). Each group of texts is scaled using Slapin and Proksch's (2008)
"wordfish" Poisson scaling model of one-dimensional document positions, and
then the positions from a particular author are scaled across groups using a
second-level linear factor model, using conditional maximum likelihood.
}
\details{
Returns estimates of relative author positions across the full
corpus of texts.
}
\examples{
library("quanteda")
iedfm <- dfm(data_corpus_irish30, remove_punct = TRUE)
wordshoalfit <-
textmodel_wordshoal(iedfm, dir = c(7,1),
groups = docvars(data_corpus_irish30, "debateID"),
authors = docvars(data_corpus_irish30, "member.name"))
summary(wordshoalfit)
author_positions <- summary(wordshoalfit)$estimated.author.positions
author_positions$row_names <- rownames(author_positions)
fitdf <- merge(author_positions,
docvars(data_corpus_irish30),
by.x = "row_names", by.y = "member.name")
fitdf <- subset(fitdf, !duplicated(memberID))
aggregate(theta ~ party.name, data = fitdf, mean)
}
\references{
Benjamin E. Lauderdale and Alexander Herzog. 2016.
"\href{https://www.cambridge.org/core/journals/political-analysis/article/measuring-political-positions-from-legislative-speech/35D8B53C4B7367185325C25BBE5F42B4}{Measuring
Political Positions from Legislative Speech}." \emph{Political Analysis}
24 (3, July): 374-394.
}
\author{
Benjamin Lauderdale and Kenneth Benoit
}
\keyword{experimental}
\keyword{textmodel}
|
2be27cc5f58f8f83f4af19fe370ca94378f29885
|
ac655728cfed40aacb3686b9a3fd2c26f8facdc0
|
/scripts/dhs/dhs_counts_explore.lung.R
|
aa5bf49e355b6796ea648a068cd2fe47fae1b0e3
|
[] |
no_license
|
jakeyeung/Yeung_et_al_2018_TissueSpecificity
|
8ba092245e934eff8c5dd6eab3d265a35ccfca06
|
f1a6550aa3d703b4bb494066be1b647dfedcb51c
|
refs/heads/master
| 2020-09-20T12:29:01.164008
| 2020-08-07T07:49:46
| 2020-08-07T07:49:46
| 224,476,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,905
|
r
|
dhs_counts_explore.lung.R
|
# Look at DHS counts data
# Date: 2015-03-17
rm(list=ls())
setwd("~/projects/tissue-specificity")
library(ggplot2)
# Functions ---------------------------------------------------------------
source("scripts/functions/MixtureModelFunctions.R")
source("scripts/functions/DhsFunctions.R")
GetReplicates <- function(cnames, get.index=TRUE){
# from colnames of data, retrieve replicate names
# expects that colnames are not called "chr", "start", "end", "zscore" or "total"
# get.index -> return the index from cnames otherwise get names
colnames.remove <- c("chr", "start", "end", "zscore", "total")
if (get.index){
return(which(!cnames %in% colnames.remove))
}
else {
return(cnames[which(!cnames %in% colnames.remove)])
}
}
fit <- function(x, a, b){
return(exp(a*x + b))
}
# Main --------------------------------------------------------------------
# Get data ----------------------------------------------------------------
dhs.path <- "/home/yeung/projects/tissue-specificity/data/beds/ENCODE:Seq:Lung/combined.bed"
dhs.dat <- read.table(dhs.path, header = TRUE, sep = "\t") # gigantic file ~ 1.2 gigs in memory
# save(dhs.dat, file = "~/projects/tissue-specificity/docs/2015-03-19-felix/dhs_dat.Robj")
jtissue <- "Lung"
# subset constants --------------------------------------------------------
set.seed(0)
n.sub <- 0.01 * nrow(dhs.dat)
rows.sub <- sample(seq(1:nrow(dhs.dat)), n.sub)
i.reps <- GetReplicates(colnames(dhs.dat))
n.samps <- length(i.reps)
# Normalize by total counts -----------------------------------------------
sum.samps <- colSums(dhs.dat[, i.reps])
dhs.reps <- sweep(dhs.dat[, i.reps], 2, sum.samps, "/") * 10^6
barplot(sum.samps, las = 2)
dhs.reps.sub <- dhs.dat[rows.sub, i.reps] # for plotting
pairs(log2(dhs.reps.sub))
for (i in 1:ncol(dhs.reps.sub)){
print(colnames(dhs.reps.sub)[i])
plot(density(log2(unlist(dhs.reps.sub[, i]))), main = paste(jtissue, colnames(dhs.reps.sub)[i]))
}
# All samples look good, find cutoff with mixmdl -----------------------------
good.samples <- colnames(dhs.reps.sub)
sink("data/beds/filtered_beds/encode_lung_good_samples.txt")
for (s in good.samples){
cat(s)
cat("\n")
}
sink()
counts <- unlist(dhs.reps[rows.sub, good.samples])
counts <- log2(counts[which(counts > 0)])
plot(density(counts))
# (cutoff.log2 <- FindCutoff(x = counts, lambdas = c(0.6, 0.4), mus = c(-5, -1), k = 2))
# cutoff <- 2^cutoff.log2$maximum
cutoff.log2 <- -3.4
abline(v = cutoff.log2)
cutoff <- 2^cutoff.log2
# Filter read counts ------------------------------------------------------
dhs.clean.filtered <- FilterReadcounts(dhs.dat, dhs.reps, good.samples, cutoff)
# Write to output ---------------------------------------------------------
write.table(dhs.clean.filtered, file = "data/beds/filtered_beds/encode_peaks.lung.bed",
quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
|
d96142ee6e04779f7b44a45d3c064d290ab9ca96
|
cd5e884de7a263aafd9ca943069aa37006218b97
|
/U8/analysis.R
|
bacbd27d6a768074a59fce7dceeb1fea73a3453b
|
[] |
no_license
|
FRosner/InfoVis
|
d289669b1db48ebb78826cdd9763c83e862d5b30
|
51b27b07af3d043484a0099f2a1ca30e06fd7d35
|
refs/heads/master
| 2020-12-29T01:11:34.900450
| 2013-12-18T10:03:14
| 2013-12-18T10:03:14
| 13,618,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,078
|
r
|
analysis.R
|
rm(list = ls())
library(lattice)
dji = read.csv("djia-100.txt", colClasses=c("character", "numeric"))
dji = dji[which(dji$YYMMDD == "890103"):which(dji$YYMMDD == "941230"),]
dji$YYMMDD = as.Date(dji$YYMMD, "%y%m%d")
pdf("cm_dji.pdf", width=10, height=5)
plot.new()
dji.relative = sapply(dji$closing.value, FUN=function(x) {return(x/dji$closing.value[1])})*100
dji.relative.range = max(dji.relative) - min(dji.relative)
dji.relative.min = min(dji.relative)
colors = cm.colors(ceiling(dji.relative.range), alpha = 1)
border = par("fg")
#border = NA
currentDay = 1;
for (year in 1:4) {
year.xleft = (year - 1)/4
year.xright = (year)/4
year.ytop = 0
year.ybottom = 1
for (quart in 1:4) {
quart.xleft = year.xleft + (quart - 1)/4*(year.xright-year.xleft)
quart.xright = year.xleft + (quart)/4*(year.xright-year.xleft)
quart.ytop = year.ytop
quart.ybottom = year.ybottom
rect(xleft=quart.xleft, xright=quart.xright, ytop=quart.ytop, ybottom=quart.ybottom, border=border)
for (month in 1:3) {
month.xleft = quart.xleft
month.xright = quart.xright
month.ytop = (month - 1)/3
month.ybottom = (month)/3
rect(xleft=month.xleft, xright=month.xright, ytop=month.ytop, ybottom=month.ybottom, border=border)
for (week in 1:4) {
week.xleft = month.xleft
week.xright = month.xright
week.ytop = month.ytop + (week - 1)/4*(month.ybottom-month.ytop)
week.ybottom = month.ytop + (week)/4*(month.ybottom-month.ytop)
for (day in 1:7) {
day.xleft = week.xleft + (day - 1)/7*(week.xright-week.xleft)
day.xright = week.xleft + (day)/7*(week.xright-week.xleft)
day.ytop = week.ytop
day.ybottom = week.ybottom
rect(xleft=day.xleft, xright=day.xright, ytop=day.ytop, ybottom=day.ybottom, col=colors[ceiling(dji.relative[currentDay]-dji.relative.min)], border=NA)
currentDay = currentDay + 1
}
rect(xleft=week.xleft, xright=week.xright, ytop=week.ytop, ybottom=week.ybottom, border=border)
}
}
}
}
dev.off()
|
f50025735bcd0dd0a37b89756d85193a8ed0533c
|
8888df0b621a89b51fd4571a8ce1fbfbe6991e23
|
/2003.R
|
42c129f89bc99f74ca4e45e4821380f7fd1f15a1
|
[] |
no_license
|
qin-yu/R-Markov-STAT0007
|
a7a8839a898beb357d726feade4b275e833fe0c6
|
c66210a0d40a4d740d4a4020e2ef6a66b9edc213
|
refs/heads/master
| 2021-09-06T07:24:15.940095
| 2018-02-03T19:02:58
| 2018-02-03T19:02:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,594
|
r
|
2003.R
|
# R code for STAT2003/STAT3102 course (2017/18)
# Created - Jan 2018, last update - 24 Jan 2018
# by Qin Yu, qin.yu.15@ucl.ac.uk
Sys.setenv(LANG = "en")
library("markovchain")
library("MASS") # Now purely for converting decimal to fraction
print("----------------------------------------------------")
printFraction <- function(mc){
# CAUTION: very small numbers will be displayed as 0
# To print the transition matrix in mc, a markovchain, with entries displayed as fraction
# e.g call printFraction(HIVmc^10)
print(as.fractions(attributes(mc)$transitionMatrix))
}
# The HIV example is important and will be mentioned repeatedly:
HIVstates <- c("high", "medium", "low")
HIVmatrix <- matrix(c(0.9, 0.05, 0.05,
0.6, 0.25, 0.15,
0.25, 0.25, 0.5),
byrow = TRUE,
nrow = 3,
dimnames = list(HIVstates, HIVstates)) # To perform matrix multiplication on HIVmatrix, use %*%
HIVmc = new("markovchain",
states = HIVstates,
byrow = TRUE,
transitionMatrix = HIVmatrix,
name = "HIV progression") # To perform matrix multiplication on HIVmc, use *
print("The setting of HIV markov chain:")
print(HIVmc)
initialState <- c(0.7, 0.2, 0.1)
print("The initial state has been set as: ")
print(initialState)
print("----------------------------------------------------")
# Exercise Sheet 2:
cw2q1matrix = matrix(c(1/2, 1/2, 0, 0,
1, 0, 0, 0,
0, 1/2, 1/3, 1/6,
0, 0, 0, 1), byrow = TRUE, nrow = 4)
cw2q1 = new("markovchain",
byrow = TRUE,
transitionMatrix = cw2q1matrix,
name = "CW2 Q1")
print("The setting of CW2 Q1")
printFraction(cw2q1)
cw2q2matrix = matrix(c(1/6, 1/6, 1/6, 1/6, 1/6, 1/6,
0, 1/3, 1/6, 1/6, 1/6, 1/6,
0, 0, 1/2, 1/6, 1/6, 1/6,
0, 0, 0, 2/3, 1/6, 1/6,
0, 0, 0, 0, 5/6, 1/6,
0, 0, 0, 0, 0, 1), byrow = TRUE, nrow = 6)
cw2q2 = new("markovchain",
byrow = TRUE,
transitionMatrix = cw2q2matrix,
name = "CW2 Q2")
print("The setting of CW2 Q2")
printFraction(cw2q2)
cw2q3matrix = matrix(c(1/4, 0, 1/2, 1/4,
0, 1/5, 0, 4/5,
0, 1, 0, 0,
1/3, 1/3, 0, 1/3), byrow = TRUE, nrow = 4)
cw2q3 = new("markovchain",
byrow = TRUE,
transitionMatrix = cw2q3matrix,
name = "CW2 Q3")
print("The setting of CW2 Q3")
printFraction(cw2q3)
print("----------------------------------------------------")
# Exercise Sheet 4:
cw4q2matrix = matrix(c(1/4, 0, 1/2, 1/4,
0, 1/5, 0, 4/5,
0, 1, 0, 0,
1/3, 1/3, 0, 1/3), byrow = TRUE, nrow = 4)
cw4q2 = new("markovchain",
byrow = TRUE,
transitionMatrix = cw4q2matrix,
name = "CW4 Q2")
print("The setting of CW4 Q2")
printFraction(cw4q2)
print("----------------------------------------------------")
plotHIV <- function(t0, n){
# Notice that the initial state is not plotted.
# e.g. call plotHIV("low", 20)
simulated_data <- rmarkovchain(n = n, object = HIVmc, t0 = t0)
print(simulated_data)
numeric_simulated_data <- (as.numeric(simulated_data == "low")
+ as.numeric(simulated_data == "medium") * 2
+ as.numeric(simulated_data == "high") * 3)
t <- 1:n
plot(t, numeric_simulated_data,
yaxt = "n", type = "o", ylab = "State", xlab = "Time",
main = "Simulation of the HIV Markov chain", col = "blue")
axis(side = 2, at = c(1,2,3), labels = c("low", "medium", "high"))
}
valOfState <- function(object, s){
for(i in 1:length(states(object)))
if(states(object)[i] == s)
return(i)
}
numericStates <- function(object){
return(c(1:length(states(object))))
}
plotMarkov <- function(object, t0, n){
# e.g. call plotHIV(cw2q2, 1, 50)
simulated_data <- rmarkovchain(n = n, object = object, t0 = t0)
numeric_simulated_data <- NULL
for(i in 1:length(simulated_data))
numeric_simulated_data <- c(numeric_simulated_data, valOfState(object, simulated_data[i]))
t <- 1:n
numeric_states <- numericStates(object)
plot(t, numeric_simulated_data,
yaxt = "n", type = "o", ylab = "State", xlab = "Time",
main = "Simulation of Markov chain", col = "blue" )
axis(side = 2, at = numeric_states, labels = states(object))
}
|
901bee0a274de1d027dc9d3d22b71ac086e468d8
|
31a36ae16efd9dd1c9a81790ed11ad66a4bb3edd
|
/Ex02_03.R
|
62443350f73c55b258359973e1a78958ec739aa5
|
[] |
no_license
|
poudelkhem/RProgramming
|
9707ccdac228cd98aa436feac6e783640fa5cc96
|
6d37930be4bc5fed4d728fde1771178135e50b80
|
refs/heads/master
| 2020-05-24T06:45:20.590451
| 2019-05-17T04:23:14
| 2019-05-17T04:23:14
| 187,145,337
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
r
|
Ex02_03.R
|
# Up and Running with R
# Ex02_02
2 + 2 # Basic math; Mac: cmd-enter; PC: ctrl-r
1:100 # Prints numbers 1 to 100 across several lines
print("Hello World!") # Prints "Hello World" in console
# Variables
x <- 1:5 # Put the numbers 1-5 in the variable x
x # Displays the values in x
y <- c(6, 7, 8, 9, 10) # Puts the numbers 6-10 in y
y # Displays y
x + y # Adds corresponding elements in x and y
x * 2 # Multiplies each element in x by 2
|
9510280e397f7c74ce318d5eeeb4c23b24ae0fd9
|
12f1c62b907dd68d778bf780bd887b9ff76d0788
|
/man/ripser.Rd
|
1e0e19405ac419e98ff4fcda9a260516ffff65de
|
[] |
no_license
|
PiyushaB/ripserr
|
d0612f551b7bb32b83a2b723f4b852e5fd04ad35
|
a7ca110ffd67bea1fca047ae00a9fc3a7febf2c1
|
refs/heads/master
| 2021-09-07T05:15:43.727154
| 2018-02-17T23:02:20
| 2018-02-17T23:02:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 858
|
rd
|
ripser.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ripser_func.R
\name{ripser}
\alias{ripser}
\title{Calculate Persistent Homology of a Point Cloud}
\usage{
ripser(mat)
}
\arguments{
\item{mat}{numeric matrix containing point cloud}
}
\value{
3-column matrix, with each row representing a TDA feature
}
\description{
The `mat` parameter should be a numeric matrix with each row corresponding
to a single point, and each column corresponding to a single dimension. Thus,
if `mat` has 50 rows and 5 columns, it represents a point cloud with 50 points
in 5 dimensions.
}
\examples{
# create a 2-d point cloud of a circle (100 points)
num.pts <- 100
rand.angle <- runif(num.pts, 0, 2*pi)
pt.cloud <- cbind(cos(rand.angle), sin(rand.angle))
# calculate persistent homology (num.pts by 3 numeric matrix)
pers.hom <- ripser(pt.cloud)
}
|
ab19788840c1853957dba55e25d2d73787bd273c
|
57144c2d9a8c77faa7766d3003efe553bea2a9eb
|
/man/olive.Rd
|
77109a6f0414b52da38a87feb545e0234dd98e35
|
[] |
no_license
|
Bhanditz/loon
|
63565f059d3c0ccb0756ede8c6536690649e44df
|
540eecd6dc5efa9c3369a69cae78b94ee5c54857
|
refs/heads/master
| 2020-04-19T08:46:48.989059
| 2018-12-10T08:10:03
| 2018-12-10T08:10:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,166
|
rd
|
olive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{olive}
\alias{olive}
\title{Fatty Acid Composition of Italian Olive Oils}
\format{A data frame containing 572 cases and 10 variates.}
\usage{
olive
}
\description{
This data set records the percentage composition of 8 fatty
acids (palmitic, palmitoleic, stearic, oleic, linoleic, linolenic,
arachidic, eicosenoic) found in the lipid fraction of 572 Italian olive
oils. The oils are samples taken from three Italian regions varying number
of areas within each region. The regions and their areas are recorded as
shown in the following table:
\tabular{ll}{ \bold{Region} \tab \bold{Area}\cr North \tab North-Apulia,
South-Apulia, Calabria, Sicily \cr South \tab East-Liguria, West-Liguria,
Umbria \cr Sardinia \tab Coastal-Sardinia, Inland-Sardinia }
}
\references{
Forina, M., Armanino, C., Lanteri, S., and Tiscornia, E. (1983)
"Classification of Olive Oils from their Fatty Acid Composition", in Food
Research and Data Analysis (Martens, H., Russwurm, H., eds.), p. 189,
Applied Science Publ., Barking.
}
\keyword{datasets}
|
8c805c1445da41858f36a90d4d16baae9d88f99c
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610055858-test.R
|
fc9b30d35acd0bd46852ea003a979aa9bef934d4
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 418
|
r
|
1610055858-test.R
|
testlist <- list(a = 1966080L, b = 2L, x = c(-54017L, 184549123L, 1987475199L, 469762047L, 1375708324L, -1532713820L, -1532713820L, -1532713820L, -1532713820L, 822804277L, -1532713820L, -1532713820L, -1532713820L, -1532713820L, -1532713820L, -1532713820L, -1532713820L, -1532713820L, 822804277L, -1532713820L, -1532713820L, -1532713820L, -1532845534L ))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
322a9b2d0d1b0007ae9bc6266be1ea4750db2317
|
413db59d938326e8687e3c3fad93e65594d40bb4
|
/Naive Bayes.R
|
5c0ab0faf5bcddbef7037cc642b14ec5124642b3
|
[] |
no_license
|
sedale33/TF
|
076e55cf7dc6d5e9303042a306c0bf7651907c61
|
7242f66a59819763c2921941a7fa7ed0582300c8
|
refs/heads/master
| 2020-07-11T17:10:37.934262
| 2019-09-16T11:10:05
| 2019-09-16T11:10:05
| 204,601,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 840
|
r
|
Naive Bayes.R
|
require(ramify)
require(R6)
GaussNB <- R6Class("GaussNB", list( K = 0, mu = 0, cov = 0, y = 0,
fit = function(X, y, epsilon= 1e-2){
self$y <- y
self$K <- unique(y)
data <- cbind(X, y)
nfeat <- length(data[1,]) - 1
self$mu <- aggregate(data[,1:nfeat], list(data[,nfeat+1]), mean)
self$cov <- aggregate(data[,1:nfeat], list(data[,nfeat+1]), function(x) mean((x-mean(x))^2)) + epsilon
prior <- summary(as.factor(y))/length(y)
invisible(self)
},
perdict = function(X){
P_hat <- mat.or.vec(length(X[,1]), length(self$K))
for(i in seq_along(self$K)){
for(r in seq_along(X[,1])){
P_hat[r, i] <- sum(-0.5*log(2*pi) - 0.5*log((self$cov[i,-1])) - sum((X[r,]-self$mu[i,-1])^2)/(2*(self$cov[i,-1]^2)))
}
}
argmax(P_hat) - 1
}
))
|
e12a9dd8e43599bbd2aa56017f1cf52c76f24b36
|
5faaacb1d3e3b35cecb54a4f02fa8b761644d09a
|
/cachematrix.R
|
1f1a3ae2f671b750899e1aa28caeb62e70f48224
|
[] |
no_license
|
cataclysmic/ProgrammingAssignment2
|
d5de877d572c7e5ba71800b03675ef8fb6b3e280
|
3f95fa3f5324116957185bc41058c35f298c9a9f
|
refs/heads/master
| 2021-01-20T23:36:28.878505
| 2014-07-25T14:30:22
| 2014-07-25T14:30:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,724
|
r
|
cachematrix.R
|
## This set of two functions stores the matrix and its inverse to the
## environment provided by makeCacheMatrix for further computation.
## makeCacheMatrix stores a matrix and its inverse for further computation
makeCacheMatrix <- function(x = matrix()) {
i <- NULL # generated empty inverse matrix 'i' when first assigning function
set <- function(y) {
x <<- y # store 'y' to var 'x' in envir
i <<- NULL # empty (set to NULL) var 'i' in envir
}
get <- function() x # get func. to check 'get for computation'
getinverse <- function() i # external get function to check for stored inverse matrix 'i' of matrix x
setinverse <- function(inverse) i <<- inverse # store inverse matrix 'inverse' of matrix 'x' to environment var 'i'
# cached + returned list element in function environment, providing functions "set, get, setinverse, getinverse")
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve calculates the inverse of a matrix stored in the makeCacheMatrix environment
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse() # check if inverse is cached
if(!is.null(i)) { # if inverse is cached, show msg + return cache
message("Retrieving cached inverted matrix")
return(i)
}
origMat <- x$get() # retrieve original matrix
i <- solve(origMat, ...) # calculate inverse of origMat
x$setinverse(i) # store origMat in makeCacheMatrix environment
i # output inverse matrix
}
# e.g.
#foo <- matrix(c(1,0,5,2,1,6,3,5,0),ncol=3,nrow=3)
#
#bar <- makeCacheMatrix()
#bar$set(foo)
#cacheSolve(bar)
|
791101ba571052c31d1a53f0096935b1f5b788e5
|
13f706cb7734d513921e1435cc58b9acba22e7ed
|
/Medical_Places_California.R
|
e7045a9b51df82c62aee0485cbeb17a2213207f8
|
[] |
no_license
|
chirantghosh/California_Medical_Locations
|
552ed66b4679612e5e0305c6640bf93201b9be21
|
fd63412b3063e6656cc70988933fc1c772d0ddaf
|
refs/heads/master
| 2022-09-13T05:10:55.728449
| 2016-12-18T19:29:27
| 2016-12-18T19:29:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 899
|
r
|
Medical_Places_California.R
|
library(readr)
library(stringr)
library(leaflet)
Health <- read_csv("C:\\Users\\Chirantan\\Desktop\\T\\Healthcare_Facility_Locations.csv")
ct <- Health[,c(3,25,26)]
summary(ct$LATITUDE)
summary(ct$LONGITUDE)
ct$LATITUDE[which(is.na(ct$LATITUDE))] <- 35.40
ct$LONGITUDE[which(is.na(ct$LONGITUDE))] <- -119.3
m <- leaflet(ct) #%>% #addTiles('http://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png',
m <- addTiles(m) #attribution='Map tiles by <a href="http://stamen.com">Stamen Design</a>, <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a> — Map data © <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a>')
m %>% setView(-72.690940, 41.651426, zoom = 8)
m %>% addCircles(lng=ct$LONGITUDE, lat=ct$LATITUDE, popup=ct$FAC_TYPE_CODE, weight = 3, radius=40,
color="red", stroke = TRUE, fillOpacity = 0.15)
|
d1831cc539c1b947a21d83b1cd0797397e9c140d
|
d713cc25ad9631ce9644997d0a8da173a26cd29a
|
/docs/R/seqOutBias_evalmask.R
|
0b9ec8c11c102019e8d94d9309b538084f645713
|
[
"BSD-3-Clause"
] |
permissive
|
guertinlab/seqOutBias
|
de263fbdf194153ec8b3ed4aa921e1e1a4c4b757
|
1446ebc35d1cf5ede03fe87fab7650a5e8c0516d
|
refs/heads/master
| 2023-02-21T00:26:55.058616
| 2023-02-21T00:05:27
| 2023-02-21T00:05:27
| 79,238,409
| 5
| 2
|
BSD-3-Clause
| 2021-04-02T18:46:24
| 2017-01-17T14:59:40
|
Rust
|
UTF-8
|
R
| false
| false
| 2,303
|
r
|
seqOutBias_evalmask.R
|
#
# Code to compute a fitness metric for a given
#
require(bigWig)
run.cutmask <- function(cutmask, seqoutbias.args, prefix = "run_", bw.split = FALSE, cleanup = TRUE, sqcmd = "seqOutBias") {
if (!grepl("C", cutmask)) {
# add C to middle of cutmask
n = nchar(cutmask)
stopifnot(n %% 2 == 0)
left = substr(cutmask, 1, n / 2)
right = substr(cutmask, n / 2 + 1, n)
cutmask = paste(left, right, sep='C')
print(cutmask)
}
# build system command
#
# seqOutBias <fasta-file> <bam-file>... [options]
#
# --kmer-mask=<cutmask>
# --skip-bed
# if bw.split => --stranded
# --out=<... filename 1 ...>
# --bw=<... filename 2 ...>
#
outfilename = paste(prefix, cutmask, ".tbl", sep='')
out_arg = paste("--out=", outfilename, sep='')
bwfilename = paste(prefix, cutmask, ".bw", sep='')
bw_arg = paste("--bw=", bwfilename, sep='')
mask_arg = paste("--kmer-mask=", cutmask, sep='')
cmd = paste(sqcmd, seqoutbias.args, mask_arg, "--skip-bed", out_arg, bw_arg)
# execute
print(cmd)
system(cmd)
# clean up - remove files specific to this execution
if (cleanup) {
cat("deleting stuff ....\n")
cat("rm", outfilename,"\n")
unlink(outfilename)
}
# return output file names
if (bw.split) {
c(paste(prefix, cutmask, "_plus.bw", sep=''), paste(prefix, cutmask, "_minus.bw", sep=''))
} else {
c(bwfilename, bwfilename)
}
}
#' Evaluate cutmask metric
#'
#' @param motif.sets List of data.frames containing BED6 format coordinates for motif locations; one data.frame per reference transcription factor
#' @param bw.plus BigWig file containing the cutmask scaled data to use for plus strand motif sites
#' @param bw.minus BigWig file containing the cutmask scaled data to use for minus strand motif sites
#'
#' @return metric value
eval.cutmask <- function(motif.sets, bw.plus, bw.minus) {
metric = 0
for (bed6 in motif.sets) {
x = colSums(bed6.step.bpQuery.bigWig(bw.plus, bw.minus, bed6, step = 1,
with.attributes = FALSE, as.matrix = TRUE, follow.strand = TRUE))
metric = metric + sqrt(sum((x - mean(x))^2)/(length(x) - 1))
}
return(metric)
}
|
43af92a9aec74f182ca8b982909efa26c49e9248
|
bec3168f8d6d63a6acf7bfa98dd5ee2c637b0ae6
|
/plot4.R
|
9fe5d3aad567da5a836efc46089cd5ea6ab34bae
|
[] |
no_license
|
y44k0v/ExData_Plotting1
|
3a200a747a0a268e4ccb861b79eb04f7d0b42579
|
dcfed13b025161afc9446352f02c7b8e52d64e9a
|
refs/heads/master
| 2022-06-02T01:34:54.805484
| 2017-04-05T09:46:15
| 2017-04-05T09:46:15
| 87,271,661
| 0
| 0
| null | 2022-05-10T16:44:15
| 2017-04-05T05:57:13
|
R
|
UTF-8
|
R
| false
| false
| 764
|
r
|
plot4.R
|
#Run first 14 lines of plot1.R to create the data set
# Plot 4
png(file = "plot4.png", bg = "white", width = 480, height = 480, units = "px")
par(mfcol = c(2,2))
plot(Time,EPC$Global_active_power, type="l", xlab="",
ylab="Global Active Power (kilowatts)")
plot(Time, EPC$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(Time,EPC$Sub_metering_2, col="red")
lines(Time,EPC$Sub_metering_3, col="blue")
legend("topright", legend = names(EPC)[6:8],col = c("black","red","blue"), lty=1, bty="n")
plot(Time,EPC$Voltage, type="l", xlab="datetime",
ylab="Voltage")
plot(Time,EPC$Global_reactive_power, type="l", xlab="datetime",
ylab="Global_reactive_power")
dev.off()
|
e27cfa5c476412f672eacb8f8da26c5b5f7f15a3
|
419b6031e4a75171bb9bb0caa1aef9e027ea0628
|
/man/subset.corpus.Rd
|
a1c8a9011ba06c7367cef3e88550226062e966d4
|
[] |
no_license
|
HaiyanLW/quanteda
|
ce51f51939535b7bf5e9e7f4e560a8c3e98a8d3f
|
9f9e9acb795bf5e82580ef40b148c1b8c2f9e330
|
refs/heads/master
| 2021-01-11T06:36:07.089239
| 2016-10-25T08:35:22
| 2016-10-25T08:35:22
| 71,881,808
| 2
| 0
| null | 2016-10-25T09:37:00
| 2016-10-25T09:36:58
| null |
UTF-8
|
R
| false
| true
| 895
|
rd
|
subset.corpus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corpus.R
\name{subset.corpus}
\alias{subset.corpus}
\title{extract a subset of a corpus}
\usage{
\method{subset}{corpus}(x, subset, select, ...)
}
\arguments{
\item{x}{corpus object to be subsetted.}
\item{subset}{logical expression indicating elements or rows to keep: missing
values are taken as false.}
\item{select}{expression, indicating the attributes to select from the corpus}
\item{...}{not used}
}
\value{
corpus object
}
\description{
Returns subsets of a corpus that meet certain conditions, including direct
logical operations on docvars (document-level variables). Works just like
the normal subset command but for corpus objects.
}
\examples{
summary(subset(inaugCorpus, Year>1980))
summary(subset(inaugCorpus, Year>1930 & President=="Roosevelt", select=Year))
}
\seealso{
\code{\link{select}}
}
|
0bf27234641cc40444f3f0371e99527419c02f8e
|
98f15f69dabae8a614acb0e5a95324eef1730a85
|
/R/extract_features.R
|
18786f98fef343e599b77340c81ad4e19991058a
|
[
"Apache-2.0"
] |
permissive
|
IanniMuliterno/RBERT
|
d83430747f7a51afb473fc9475a6fe9fc1242e5a
|
f4359747e11ead8ed092b59c713f371d35c33668
|
refs/heads/master
| 2020-08-31T09:59:48.657821
| 2019-10-30T13:12:57
| 2019-10-30T13:12:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37,513
|
r
|
extract_features.R
|
# Copyright 2019 Bedford Freeman & Worth Pub Grp LLC DBA Macmillan Learning.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# class InputExample_EF ---------------------------------------------------
#' Construct objects of class \code{InputExample_EF}
#'
#' An InputExample_EF is a single test example for feature extraction. Note that
#' this class is similiar to the InputExample class used for simple sequence
#' classification, but doesn't have a label property. The name of the id
#' property is also annoyingly different; should eventually standardize better
#' than the Python folks did. (RBERT issue #28.)
#'
#' @param unique_id Integer or character; a unique id for this example.
#' @param text_a Character; the untokenized text of the first sequence.
#' @param text_b (Optional) Character; the untokenized text of the second
#' sequence.
#'
#' @return An object of class \code{InputExample_EF}.
#' @export
#'
#' @examples
#' input_ex <- InputExample_EF(unique_id = 1,
#' text_a = "I work at the bank.")
InputExample_EF <- function(unique_id,
text_a,
text_b = NULL) {
obj <- list("unique_id" = unique_id,
"text_a" = text_a,
"text_b" = text_b)
class(obj) <- "InputExample_EF"
return(obj)
}
# class InputFeatures_EF --------------------------------------------------
#' Construct objects of class \code{InputFeatures_FE}
#'
#' An InputFeatures object is a single set of (input) features of data used for
#' (output) feature extraction. Note that this class is similiar to the
#' InputFeatures class used for simple sequence classification, with annoying
#' differences. Will eventually standardize; till then, check parameter names.
#' (RBERT issue #28.)
#'
#' @param unique_id Integer or character; a unique id for this example.
#' @param tokens Character vector; the actual tokens in this example.
#' @param input_ids Integer vector; the sequence of token ids in this example.
#' @param input_mask Integer vector; sequence of 1s (for "real" tokens) and 0s
#' (for padding tokens).
#' @param input_type_ids Integer vector; aka token_type_ids. Indicators for
#' which sentence (or sequence) each token belongs to. Classical BERT supports
#' only 0s and 1s (for first and second sentence, respectively).
#'
#' @return An object of class \code{InputFeatures_FE}.
#' @keywords internal
.InputFeatures_EF <- function(unique_id,
tokens,
input_ids,
input_mask,
input_type_ids) {
obj <- list("unique_id" = unique_id,
"tokens" = tokens,
"input_ids" = input_ids,
"input_mask" = input_mask,
"input_type_ids" = input_type_ids)
class(obj) <- "InputFeatures"
return(obj)
}
# input_fn_builder_EF -----------------------------------------------------
#' Create an \code{input_fn} closure to be passed to TPUEstimator
#'
#' Creates an \code{input_fn} closure to be passed to TPUEstimator. The output
#' of this closure is the (modified) output of
#' \code{tensorflow::tf$data$Dataset$from_tensor_slices} (an object of class
#' "tensorflow.python.data.ops.dataset_ops.BatchDataset"). This function is
#' similar to \code{input_fn_builder} from run_classifier.R. (RBERT issue #28.)
#'
#' @param features A list of features (objects of class
#' \code{InputFeatures_EF}).
#' @param seq_length Integer; the maximum length (number of tokens) of each
#' example. (Examples should already be padded to this length by this point.)
#'
#' @return An \code{input_fn} closure to be passed to TPUEstimator.
#' @keywords internal
input_fn_builder_EF <- function(features,
seq_length) {
all_unique_ids <- purrr::map(features,
function(f) { as.integer(f$unique_id) })
all_input_ids <- purrr::map(features,
function(f) { as.integer(f$input_ids) })
all_input_mask <- purrr::map(features,
function(f) { as.integer(f$input_mask) })
all_input_type_ids <- purrr::map(features,
function(f) { as.integer(f$input_type_ids) })
input_fn <- function(params) {
batch_size <- params$batch_size
num_examples <- length(features)
# "This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader."
d <- tensorflow::tf$data$Dataset$from_tensor_slices(
# "A nested structure of tensors, each having the same size in the 0th
# dimension." Try as a list. -JDB
list(
"unique_ids" = tensorflow::tf$constant(
all_unique_ids,
shape = tensorflow::shape(num_examples),
dtype = tensorflow::tf$int32
),
"input_ids" = tensorflow::tf$constant(
all_input_ids,
shape = tensorflow::shape(num_examples, seq_length),
dtype = tensorflow::tf$int32
),
"input_mask" = tensorflow::tf$constant(
all_input_mask,
shape = tensorflow::shape(num_examples, seq_length),
dtype = tensorflow::tf$int32
),
"input_type_ids" = tensorflow::tf$constant(
all_input_type_ids,
shape = tensorflow::shape(num_examples, seq_length),
dtype = tensorflow::tf$int32
)
)
)
d <- d$batch(batch_size = batch_size,
drop_remainder = FALSE)
return(d) # return from `input_fn`
}
return(input_fn)
}
# .model_fn_builder_EF -----------------------------------------------------
#' Define \code{model_fn} closure for \code{TPUEstimator}
#'
#' Returns \code{model_fn} closure, which is an input to \code{TPUEstimator}.
#' This function is similar to \code{model_fn_builder} from run_classifier.R.
#' (RBERT issue #28.)
#'
#' The \code{model_fn} function takes four parameters: \describe{
#' \item{features}{A list (or similar structure) that contains objects such as
#' \code{input_ids}, \code{input_mask}, \code{tokens}, and
#' \code{input_type_ids}. These objects will be inputs to the
#' \code{create_model} function.} \item{labels}{Not used in this function, but
#' presumably we need to keep this slot here.} \item{mode}{Character; value such
#' as "train", "infer", or "eval".} \item{params}{Not used in this function, but
#' presumably we need to keep this slot here.} }
#'
#' The output of \code{model_fn} is the result of a
#' \code{tf$contrib$tpu$TPUEstimatorSpec} call.
#'
#' @param bert_config \code{BertConfig} instance.
#' @param init_checkpoint Character; path to the checkpoint directory, plus
#' checkpoint name stub (e.g. "bert_model.ckpt"). Path must be absolute and
#' explicit, starting with "/".
#' @param layer_indexes Integer list; indexes (positive, or negative counting
#' back from the end) indicating which layers to extract as "output features".
#' (It needs to be specified here because we get them back as the model
#' "predictions".)
#' @param use_tpu Logical; whether to use TPU.
#' @param use_one_hot_embeddings Logical; whether to use one-hot word embeddings
#' or tf.embedding_lookup() for the word embeddings.
#'
#' @return \code{model_fn} closure for \code{TPUEstimator}.
#' @keywords internal
.model_fn_builder_EF <- function(bert_config,
init_checkpoint,
layer_indexes,
use_tpu,
use_one_hot_embeddings) {
# The `model_fn` for TPUEstimator.
model_fn <- function(features, labels, mode, params) {
unique_ids <- features$unique_ids
input_ids <- features$input_ids
input_mask <- features$input_mask
input_type_ids <- features$input_type_ids
input_shape <- get_shape_list(input_ids, expected_rank = 2L)
model <- BertModel(config = bert_config,
is_training = FALSE,
input_ids = input_ids,
input_mask = input_mask,
token_type_ids = input_type_ids,
use_one_hot_embeddings = use_one_hot_embeddings)
if (mode != tensorflow::tf$estimator$ModeKeys$PREDICT) {
stop("Only PREDICT modes are supported.") # nocov
}
tvars <- tensorflow::tf$trainable_variables()
initialized_variable_names <- list()
scaffold_fn <- NULL
gamap <- get_assignment_map_from_checkpoint(tvars, init_checkpoint)
assignment_map <- gamap$assignment_map
initialized_variable_names <- gamap$initialized_variable_names
if (use_tpu) { # nocov start
tpu_scaffold <- function() {
tensorflow::tf$train$init_from_checkpoint(init_checkpoint,
assignment_map)
return(tensorflow::tf$train$Scaffold())
}
scaffold_fn <- tpu_scaffold # nocov end
} else {
tensorflow::tf$train$init_from_checkpoint(init_checkpoint,
assignment_map)
}
all_layers <- model$all_encoder_layers
# ATTN: modified below to get attention_data from model
attention_data <- model$attention_data
# ATTN: modified above to get attention_data from model
predictions <- list()
predictions[["unique_id"]] <- unique_ids
# Always include raw embeddings as the zeroth layer "output". We'll filter
# them back out if we don't want them.
predictions[["layer_output_0"]] <- model$embedding_output
for (i in seq_along(layer_indexes)) {
layer_index <- layer_indexes[[i]]
# Accomodate both positive and negative indices.
# Note that `all_layers` is 1-indexed!
actual_index <- .get_actual_index(layer_index, length(all_layers))
# For clarity, always use actual index to label outputs.
key_str <- paste0("layer_output_", actual_index)
predictions[[key_str]] <- all_layers[[actual_index]]
# ATTN: modified below to include attention_data in "predictions"
attn_key_str <- paste0("layer_attention_", actual_index)
predictions[[attn_key_str]] <- attention_data[[actual_index]]
# ATTN: modified above to include attention_data in "predictions"
}
output_spec <- tensorflow::tf$contrib$tpu$TPUEstimatorSpec(
mode = mode,
predictions = predictions,
scaffold_fn = scaffold_fn
)
return(output_spec)
} # end of `model_fn` definition
return(model_fn)
}
# .convert_single_example_EF -----------------------------------------------
#' Convert a single \code{InputExample_EF} into a single \code{InputFeatures_EF}
#'
#' Converts a single \code{InputExample_EF} into a single
#' \code{InputFeatures_EF}. Very similar to \code{convert_single_example} from
#' run_classifier.R. (RBERT issue #28.)
#'
#' @param ex_index Integer; the index of this example. This is used to determine
#' whether or not to print out some log info (for debugging or runtime
#' confirmation). It is assumed this starts with 1 (in R).
#' @param example The \code{InputExample_EF} to convert.
#' @param seq_length Integer; the maximum number of tokens that will be
#' considered together.
#' @param tokenizer A tokenizer object to use (e.g. object of class
#' FullTokenizer).
#'
#' @return An object of class \code{InputFeatures_EF}.
#' @keywords internal
.convert_single_example_EF <- function(ex_index,
example,
seq_length,
tokenizer) {
# note use of S3 classes for dispatch, not methods.
tokens_a <- tokenize(tokenizer, example$text_a)
tokens_b <- NULL
if (!is.null(example$text_b)) {
tokens_b <- tokenize(tokenizer, example$text_b)
}
if (!is.null(tokens_b)) {
# Modifies `tokens_a` and `tokens_b` so that the total length is less than
# the specified length. Account for [CLS], [SEP], [SEP] with "- 3"
truncated_seq <- truncate_seq_pair(tokens_a, tokens_b,
seq_length - 3)
tokens_a <- truncated_seq$trunc_a
tokens_b <- truncated_seq$trunc_b
} else {
# Account for [CLS] and [SEP] with "- 2"
if (length(tokens_a) > seq_length - 2) {
tokens_a <- tokens_a[1:(seq_length - 2)]
}
}
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# The next few lines of code just insert the "[CLS]" and "[SEP]" tokens
# in the appropriate places, and create the type_ids list. -JDB
cls_token <- "[CLS]"
sep_token <- "[SEP]"
tokens <- unlist(list(cls_token, tokens_a, sep_token))
input_type_ids <- rep(0, length(tokens))
if (!is.null(tokens_b)) {
tokens2 <- unlist(list(tokens_b, sep_token))
input_type_ids2 <- rep(1, length(tokens2))
tokens <- c(tokens, tokens2)
input_type_ids <- c(input_type_ids, input_type_ids2)
}
input_ids <- convert_tokens_to_ids(tokenizer$vocab, tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask <- rep(1, length(input_ids))
# Zero-pad up to the sequence length.
pad_length <- seq_length - length(input_ids)
padding <- rep(0, pad_length)
input_ids <- c(input_ids, padding)
input_mask <- c(input_mask, padding)
input_type_ids <- c(input_type_ids, padding)
# Stop now if the lengths aren't right somehow. -JDB
if (length(input_ids) != seq_length |
length(input_mask) != seq_length |
length(input_type_ids) != seq_length) {
stop("input_ids, input_mask, or input_type_ids are wrong length.") # nocov
}
feature <- .InputFeatures_EF(unique_id = example$unique_id,
tokens = tokens,
input_ids = input_ids,
input_mask = input_mask,
input_type_ids = input_type_ids)
return(feature)
}
# .convert_examples_to_features_EF -----------------------------------------
#' Convert \code{InputExample_EF}s to \code{InputFeatures_EF}
#'
#' Converts a set of \code{InputExample_EF}s to a list of
#' \code{InputFeatures_EF}. Very similar to \code{convert_examples_to_features}
#' from run_classifier.R. (RBERT issue #28.)
#'
#' @param examples List of \code{InputExample_EF}s to convert.
#' @param seq_length Integer; the maximum number of tokens that will be
#' considered together.
#' @param tokenizer A tokenizer object to use (e.g. object of class
#' FullTokenizer).
#'
#' @return A list of \code{InputFeatures}.
#' @keywords internal
.convert_examples_to_features_EF <- function(examples,
seq_length,
tokenizer) {
# I have no idea why they had to rename the elements of examples/features
# and then recreate these functions to handle the slightly different versions.
# Whatever. We will clean up later. -JDB
example_indices <- seq_along(examples)
num_examples <- length(examples)
features <- purrr::map2(
example_indices,
examples,
function(ex_index, example,
seq_length, tokenizer) {
.convert_single_example_EF(ex_index = ex_index,
example = example,
seq_length = seq_length,
tokenizer = tokenizer)
},
seq_length, tokenizer)
return(features)
}
# extract_features --------------------------------------------------------
#' Extract output features from BERT
#'
#' Given example sentences (as a list of \code{InputExample_EF}s), apply an
#' existing BERT model and capture certain output layers. (These could
#' potentially be used as features in downstream tasks.)
#'
#' @param examples List of \code{InputExample_EF}s to convert.
#' @param vocab_file path to vocabulary file. File is assumed to be a text file,
#' with one token per line, with the line number corresponding to the index of
#' that token in the vocabulary.
#' @param bert_config_file Character; the path to a json config file.
#' @param init_checkpoint Character; path to the checkpoint directory, plus
#' checkpoint name stub (e.g. "bert_model.ckpt"). Path must be absolute and
#' explicit, starting with "/".
#' @param output_file (optional) Character; file path (stub) for writing output
#' to.
#' @param max_seq_length Integer; the maximum number of tokens that will be
#' considered together.
#' @param layer_indexes Integer vector; indexes (positive, or negative counting
#' back from the end) indicating which layers to extract as "output features".
#' The "zeroth" layer embeddings are the input embeddings vectors to the first
#' layer.
#' @param use_one_hot_embeddings Logical; whether to use one-hot word embeddings
#' or tf.embedding_lookup() for the word embeddings.
#' @param batch_size Integer; how many examples to process per batch.
#' @param features Character; whether to return "output" (layer outputs, the
#' default), "attention" (attention probabilities), "attention_arrays", or a
#' combination thereof.
#'
#' @return A list with elements "output" (the layer outputs as a tibble),
#' "attention" (the attention weights as a tibble), and/or "attention_arrays".
#' @export
#'
#' @examples
#' \dontrun{
#' BERT_PRETRAINED_DIR <- download_BERT_checkpoint("bert_base_uncased")
#' vocab_file <- file.path(BERT_PRETRAINED_DIR, 'vocab.txt')
#' init_checkpoint <- file.path(BERT_PRETRAINED_DIR, 'bert_model.ckpt')
#' bert_config_file <- file.path(BERT_PRETRAINED_DIR, 'bert_config.json')
#' examples <- list(InputExample_EF(unique_id = 1,
#' text_a = "I saw the branch on the bank."),
#' InputExample_EF(unique_id = 2,
#' text_a = "I saw the branch of the bank."))
#' feats <- extract_features(examples = examples,
#' vocab_file = vocab_file,
#' bert_config_file = bert_config_file,
#' init_checkpoint = init_checkpoint,
#' batch_size = 2L)
#' }
extract_features <- function(examples,
vocab_file,
bert_config_file,
init_checkpoint,
output_file = NULL,
max_seq_length = 128L,
layer_indexes = -4:-1,
use_one_hot_embeddings = FALSE,
batch_size = 2L,
features = c("output",
"attention",
"attention_arrays")) {
if (missing(features)) {
features <- "output"
}
features <- match.arg(features, several.ok = TRUE)
include_zeroth <- FALSE
if (0 %in% layer_indexes) {
include_zeroth <- TRUE
layer_indexes <- layer_indexes[layer_indexes != 0]
}
layer_indexes <- as.list(layer_indexes)
bert_config <- bert_config_from_json_file(bert_config_file)
n_layers <- bert_config$num_hidden_layers
tokenizer <- FullTokenizer(vocab_file = vocab_file,
do_lower_case = TRUE)
is_per_host <- tensorflow::tf$contrib$tpu$InputPipelineConfig$PER_HOST_V2
run_config <- tensorflow::tf$contrib$tpu$RunConfig(
master = NULL, # assume for now *not* for TPU
tpu_config = tensorflow::tf$contrib$tpu$TPUConfig(
num_shards = 8L,
per_host_input_for_training = is_per_host)
)
raw_features <- .convert_examples_to_features_EF(examples = examples,
seq_length = max_seq_length,
tokenizer = tokenizer)
unique_id_to_feature <- list()
for (feature in raw_features) {
unique_id_to_feature[[feature$unique_id]] <- feature
}
model_fn <- .model_fn_builder_EF(
bert_config = bert_config,
init_checkpoint = init_checkpoint,
layer_indexes = unlist(layer_indexes),
use_tpu = FALSE,
use_one_hot_embeddings = use_one_hot_embeddings
)
estimator <- tensorflow::tf$contrib$tpu$TPUEstimator(
use_tpu = FALSE, # no tpu support for now
model_fn = reticulate::py_func(model_fn),
config = run_config,
predict_batch_size = batch_size
)
input_fn <- input_fn_builder_EF(features = raw_features,
seq_length = max_seq_length)
result_iterator <- estimator$predict(reticulate::py_func(input_fn),
yield_single_examples = TRUE)
# Set up the needed lists. They'll be filled in the while below.
big_output <- NULL
attention_arrays <- NULL
attention_tibble <- NULL
wants_output <- "output" %in% features
wants_attention <- "attention" %in% features
wants_attention_arrays <- "attention_arrays" %in% features
if (wants_output) {
big_output <- list()
}
if (wants_attention | wants_attention_arrays) {
big_attention <- list()
}
# "...it is normal to keep running the iterator’s `next` operation till
# Tensorflow’s tf.errors.OutOfRangeError exception is occurred."
while (TRUE) {
result <- tryCatch({
if ("next" %in% names(result_iterator)) {
result_iterator$`next`() # nocov
} else {
result_iterator$`__next__`() # nocov
}
}, error = function(e) {
FALSE
# If we get error, `result` will be assigned this FALSE.
# The only way to tell we've reached the end is to get an error. :-/
})
if (identical(result, FALSE)) {
break
}
unique_id <- as.integer(result$unique_id)
feature <- unique_id_to_feature[[unique_id]]
num_tokens <- length(feature$tokens)
output_str <- paste0("example_", unique_id)
if (wants_output) {
output_list <- list()
output_list$linex_index <- unique_id
all_features <- list()
for (i in seq_len(num_tokens)) {
token <- feature$tokens[[i]]
all_layers <- list()
# Always include "zeroth" layer (fixed embeddings) for now
zeroth_layer <- list("index" = 0,
"values" = result[["layer_output_0"]][i, ])
all_layers[["layer_output_0"]] <- zeroth_layer
for (j in seq_along(layer_indexes)) {
layer_index <- layer_indexes[[j]]
# Accomodate both positive and negative indices.
# Note that `all_layers` is 1-indexed!
actual_index <- .get_actual_index(layer_index, n_layers)
# For clarity, always use actual index to label outputs.
key_str <- paste0("layer_output_", actual_index)
layer_output <- result[[key_str]]
layers <- list()
layers$index <- actual_index
layers$values <- layer_output[i, ]
all_layers[[key_str]] <- layers
}
raw_features <- list()
raw_features$token <- token
raw_features$layers <- all_layers
feat_str <- paste0("token_", i)
all_features[[feat_str]] <- raw_features
}
output_list$features <- all_features
if (!is.null(output_file)) {
out_filename <- paste0(output_file, unique_id, ".rds") # nocov start
saveRDS(output_list, out_filename) # nocov end
}
big_output[[output_str]] <- output_list
}
if (wants_attention | wants_attention_arrays) {
# ATTN: modified below to extract attention data
this_seq_attn <- list()
for (j in seq_along(layer_indexes)) {
layer_index <- layer_indexes[[j]]
# Accomodate both positive and negative indices.
# Note that `all_layers` is 1-indexed!
actual_index <- .get_actual_index(layer_index, n_layers)
# For clarity, always use actual index to label outputs.
key_str <- paste0("layer_attention_", actual_index)
layer_attention <- result[[key_str]]
# Save space by keeping only the relevant parts of each matrix
layer_attention <- layer_attention[ ,
seq_len(num_tokens),
seq_len(num_tokens)]
# Just return matrix as-is for now.
this_seq_attn[[key_str]] <- layer_attention
}
this_seq_attn[["sequence"]] <- feature$tokens
big_attention[[output_str]] <- this_seq_attn
# ATTN: modified above to extract attention data
}
}
# Tidy everything
if (wants_output) {
big_output <- .extract_output_df(big_output)
if (!include_zeroth) {
big_output <- dplyr::filter(big_output, layer_index != 0)
}
}
if (wants_attention) {
attention_tibble <- .extract_attention_df(big_attention)
}
if (wants_attention_arrays) {
attention_arrays <- big_attention
}
# I do it this way so, if they're NULL, that value won't appear in the list,
# rather than appearing there as "NULL" like it would if I set this up in one
# step.
to_return <- list()
to_return$output <- big_output
to_return$attention <- attention_tibble
to_return$attention_arrays <- attention_arrays
return(to_return)
}
# .get_actual_index ---------------------------------------------------
#' Standardize Indices
#'
#' Convert negative indices to positive ones. Use the convention that
#' \code{vec[[-1L]]} signifies the last element of \code{vec}, \code{vec[[-2L]]}
#' signifies the second-to-last element of \code{vec}, and so on. 1-based
#' indexing is assumed. Values of zero, or out-of-range indices, will be
#' rejected.
#'
#' @param index Integer; the index to normalize.
#' @param length Integer; the length of the vector or list we are indexing.
#'
#' @return The "actual" integer index, between 1 and \code{length}, inclusive.
#' @keywords internal
.get_actual_index <- function(index,
length) {
index <- as.integer(index)
if (abs(index) > length) {
stop(paste("Index out of range.",
"Absolute value of index must be within specified length."))
} else if (index == 0) {
stop(paste("Ambiguous index.",
"Only strictly positive or negative indices accepted."))
} else if (index < 0) {
return(as.integer((length + index) %% length + 1))
} else {
return(index)
}
}
# make_examples_simple ----------------------------------------------------
#' Easily make examples for BERT
#'
#' A simple wrapper function to turn a list of text (as a character
#' vector or list) into a list of examples suitable for use with RBERT. If the
#' input is a flat list or vector of characters, the examples will be
#' single-segment, with NULL for the second segment. If the input contains
#' length-2 sublists or vectors, those examples will be two-segment sequences,
#' e.g. for doing sentence-pair classification.
#'
#' @param seq_list Character vector or list; text to turn into examples.
#'
#' @return A list of \code{InputExample_EF} objects.
#' @export
#'
#' @examples
#' input_ex <- make_examples_simple(c("Here are some words.",
#' "Here are some more words."))
#' input_ex2 <- make_examples_simple(list(c("First sequence, first segment.",
#' "First sequence, second segment."),
#' c("Second sequence, first segment.",
#' "Second sequence, second segment.")))
make_examples_simple <- function(seq_list) {
if (any(purrr::map_int(seq_list, length) > 2)) {
warning("Examples must contain at most two distinct segments. ",
"Segments beyond the second will be ignored.")
}
seq_nums <- seq_along(seq_list)
purrr::map(seq_nums, function(sn) {
first_segment <- seq_list[[sn]][[1]]
second_segment <- NULL
if (length(seq_list[[sn]]) > 1) {
second_segment <- seq_list[[sn]][[2]]
}
InputExample_EF(unique_id = sn,
text_a = first_segment,
text_b = second_segment)
})
}
# tidy features -----------------------------------------------------------
#' Extract Embeddings
#'
#' Extract the embedding vector values from output for
#' \code{\link{extract_features}}. The columns identifying example sequence,
#' segment, token, and row are extracted separately, by
#' \code{\link{.extract_output_labels}}.
#'
#' @param layer_outputs The \code{layer_outputs} component.
#'
#' @return The embedding vector components as a tbl_df, for all tokens and all
#' layers.
#' @keywords internal
.extract_output_values <- function(layer_outputs) {
vec_len <- length(
layer_outputs$example_1$features$token_1$layers[[1]]$values
)
tmat <- purrr::map(
layer_outputs,
function(seq_data) {
tmat2 <- purrr::map(
seq_along(seq_data$features),
function(tok_index) {
tok_data <- seq_data$features[[tok_index]]
t(vapply(
tok_data$layers,
function(layer_data) {layer_data$values},
FUN.VALUE = numeric(vec_len) ))
})
do.call(rbind, tmat2)
})
tmat <- do.call(rbind, tmat)
colnames(tmat) <- paste0("V", seq_len(vec_len))
return(tibble::as_tibble(tmat))
}
#' Extract Labels for Embeddings
#'
#' Extract the label columns for embedding vector values for output of
#' \code{\link{extract_features}}.
#'
#' @param layer_outputs The \code{layer_outputs} component.
#'
#' @return The embedding vector components as a tbl_df, for all tokens and all
#' layers.
#' @keywords internal
.extract_output_labels <- function(layer_outputs) {
lab_df <- purrr::map_dfr(
layer_outputs,
function(ex_data) {
# Note: Don't use imap to "simplify" this, because they have names, and we
# want the index, not the name.
purrr::map_dfr(
seq_along(ex_data$features),
function(tok_index) {
tok_data <- ex_data$features[[tok_index]]
purrr::map_dfr(
tok_data$layers,
function(layer_data) {
layer_index <- layer_data$index
ex_index <- ex_data$linex_index
tok_str <- tok_data$token
tib <- dplyr::tibble(sequence_index = ex_index,
token_index = tok_index,
token = tok_str,
layer_index = layer_index)
})
})
})
# We want to add a column to index which segment (within each example
# sequence; either 1 or 2) each token belongs to. By the time we get to this
# point in the process, the only way to identify tokens in the second
# segment is the rule that every token after the first [SEP] token is in the
# second segment.
lab_df <- dplyr::ungroup(
dplyr::select(
dplyr::mutate(
dplyr::group_by(
dplyr::mutate(lab_df, is_sep = token == "[SEP]"),
sequence_index, layer_index
),
segment_index = cumsum(is_sep) - is_sep + 1
),
sequence_index,
segment_index,
token_index,
token,
layer_index
)
)
return(lab_df)
}
#' Extract Embedding Vectors
#'
#' Extract the embedding vector values for output of
#' \code{\link{extract_features}}. The resulting tbl_df will typically have a
#' large number of columns (> 768), so it will be rather slow to
#' \code{\link{View}}. Consider using \code{\link[dplyr]{glimpse}} if you just
#' want to peek at the values.
#'
#' @param layer_outputs The \code{layer_outputs} component.
#'
#' @return The embedding vector components as a tbl_df, for all tokens and all
#' layers.
#' @keywords internal
.extract_output_df <- function(layer_outputs) {
vals <- .extract_output_values(layer_outputs)
labs <- .extract_output_labels(layer_outputs)
return(dplyr::bind_cols(labs, vals))
}
#' Tidy Attention Probabilities
#'
#' @param attention_probs Raw attention probabilities.
#'
#' @return A tibble of attention weights.
#' @keywords internal
.extract_attention_df <- function(attention_probs) {
# The result of this function should be a tibble with these columns:
# * sequence_index
# * segment_index
# * token_index
# * token
# * attention_token_index
# * attention_segment_index
# * attention_token
# * layer_index
# * head_index
# * weight
# The first 4 of those are identical to the layer_outputs df, but getting
# there will be slightly different.
attention_labels <- .extract_attention_labels(attention_probs)
attention_weights <- .extract_attention_weights(attention_probs)
layer_map <- .extract_attention_layer_names(attention_probs)
return(
tibble::as_tibble(
dplyr::select(
dplyr::left_join(
dplyr::left_join(
dplyr::left_join(
attention_weights,
attention_labels,
by = c("sequence_index", "token_index")
),
attention_labels,
by = c("sequence_index", "attention_token_index" = "token_index"),
suffix = c("", "_attention")
),
layer_map,
by = "fake_layer_index"
),
sequence_index,
token_index,
segment_index,
token,
layer_index,
head_index,
attention_token_index,
attention_segment_index = segment_index_attention,
attention_token = token_attention,
attention_weight
)
)
)
}
#' Tidy Attention Weights
#'
#' @inheritParams .extract_attention_df
#'
#' @return A tibble of attention weights
#' @keywords internal
.extract_attention_weights <- function(attention_probs) {
return(
dplyr::mutate_at(
purrr::map_dfr(
unname(attention_probs),
function(ex_data) {
ex_data$sequence <- NULL
purrr::map_dfr(
unname(ex_data),
function(layer_data) {
purrr::map_dfr(
purrr::array_tree(layer_data),
function(this_head) {
purrr::map_dfr(this_head, function(this_token) {
data.frame(
attention_token_index = seq_along(this_token),
attention_weight = unlist(this_token)
)
},
.id = "token_index"
)
},
.id = "head_index"
)
},
.id = "fake_layer_index"
)
},
.id = "sequence_index"
),
c("sequence_index", "fake_layer_index", "head_index", "token_index"),
as.integer
)
)
}
#' Tidy Attention Layer Names
#'
#' @inheritParams .extract_attention_df
#'
#' @return A tibble of attention layer indexes and fake indexes (a temporary
#' index based on this layer's position in the list).
#' @keywords internal
.extract_attention_layer_names <- function(attention_probs) {
layers <- names(attention_probs[[1]])
layers <- layers[layers != "sequence"]
return(
data.frame(
fake_layer_index = seq_along(layers),
layer_index = as.integer(
stringr::str_extract(
layers,
"\\d+$"
)
)
)
)
}
#' Tidy Token Labels, Etc
#'
#' @inheritParams .extract_attention_df
#'
#' @return A tibble with token_index, token, sequence_index, and segment_index.
#' @keywords internal
.extract_attention_labels <- function(attention_probs) {
return(
dplyr::select(
dplyr::ungroup(
dplyr::mutate(
dplyr::group_by(
dplyr::mutate(
tidyr::unnest_longer(
tibble::enframe(
purrr::map(unname(attention_probs), "sequence"),
name = "sequence_index"
),
value,
indices_to = "token_index",
values_to = "token"
),
is_sep = token == "[SEP]"
),
sequence_index
),
segment_index = cumsum(is_sep) - is_sep + 1L
)
),
-is_sep
)
)
}
|
b8640d4e4d16006b8cfda9f60a320b60d95aa936
|
ae600e461b37998bd5610f13b5551f4a1119d090
|
/Miscellaneous/mostCommonInteger.R
|
977b39e351d18e8c2688ddc667fe90457edbfe89
|
[] |
no_license
|
RickyBensics/R-Programming
|
68c4b5261ca95f6778d2decb4c4751e322569cde
|
698474db7f7e366ca5adcdcfc3637b98e0f7232e
|
refs/heads/master
| 2021-01-11T14:55:21.848252
| 2017-05-16T16:22:42
| 2017-05-16T16:22:42
| 80,251,445
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 495
|
r
|
mostCommonInteger.R
|
# Find the most frequent integer in an array
array <- sample(1:100,100,replace=T)
array <- sort(array)
mostCommonInteger = -1
mostCommonIntegerCount = 0
for(i in 1:length(array))
{
temp = 1
j = i
while(array[j] == array[j+1] && j+1 <= length(array) )
{
temp = temp + 1
j = j + 1
}
if(temp > mostCommonIntegerCount)
{
mostCommonInteger = array[i]
mostCommonIntegerCount = temp
}
}
array
paste("The most integer is ",mostCommonInteger," counted ",mostCommonIntegerCount," times")
|
9c9824607e1bd9b06036879a3bc449e678ecf6ec
|
da38efaad6bc1a58b80a01d3416c45d876265f43
|
/CREMP_Reef_code/17_Distance.R
|
1491a6b6e2a1490509171f84affcdc8974ea2035
|
[] |
no_license
|
cestes-19/rvc18
|
0d533604d11290e3523741c947c91150e826ed5e
|
c66118ac184b20cc218344c218ef1824f38242b9
|
refs/heads/master
| 2021-07-25T19:54:05.149061
| 2020-08-18T16:52:06
| 2020-08-18T16:52:06
| 208,288,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,626
|
r
|
17_Distance.R
|
## this takes a long time (FYI)... read in CREMP_distances_all.csv for analyses
library(sf)
library(geosphere)
library(osmdata)
library(rio)
library(ggplot2)
library(geosphere)
library(purrr)
library(progress)
library(magrittr)
library(binr)
library(tidyverse)
library(dygraphs)
library(htmlwidgets)
# spDistPoint2Line <- function (p, line, distfun)
# {
# ## rewrite of internal function from geosphere
# test <- !sp::is.projected(line)
# if (!isTRUE(test)) {
# if (is.na(test)) {
# warning("Coordinate reference system of SpatialPolygons object is not set. Assuming it is degrees (longitude/latitude)!")
# }
# else {
# stop("Points are projected. They should be in degrees (longitude/latitude)")
# }
# }
#
# x <- line@lines
# n <- length(x)
# res <- matrix(nrow = nrow(p), ncol = 3)
# colnames(res) <- c("distance", "lon", "lat")
#
# line_coords <- map(x, ~(map(.@Lines, ~(.@coords)))) #basically an unlist
# pb <- progress_bar$new(
# total = length(line_coords),
# format = "(:spin) :current of :total, :percent, eta: :eta"
# )
# res[] <- Inf
# result <- reduce(
# .x = line_coords,
# .init = res,
# .f = function(res, crd){
# pb$tick()
# crd <- crd[[1]]
# r <- dist2Line(p, crd, distfun) # have to live without ID
# k <- r[, 1] < res[, 1]
# res[k, ] <- r[k, ]
# return(res)
# }
# )
# return(result)
# }
#
# dist2Line <- function (p, line, distfun = distGeo)
# {
# p <- geosphere:::.pointsToMatrix(p)
# if (inherits(line, "SpatialPolygons")) {
# line <- methods::as(line, "SpatialLines")
# }
# if (inherits(line, "SpatialLines")) {
# return(spDistPoint2Line(p, line, distfun))
# }
#
# line <- geosphere:::.pointsToMatrix(line)
# line1 <- line[-nrow(line), , drop = FALSE]
# line2 <- line[-1, , drop = FALSE]
# seglength <- distfun(line1, line2)
#
# res <-
# p %>%
# array_branch(1) %>%
# map(
# function(xy){
# crossdist <- abs(dist2gc(line1, line2, xy))
# trackdist1 <- alongTrackDistance(line1, line2, xy)
# trackdist2 <- alongTrackDistance(line2, line1, xy)
# mintrackdist <- pmin(trackdist1, trackdist2)
# maxtrackdist <- pmax(trackdist1, trackdist2)
# crossdist[maxtrackdist >= seglength] <- NA
# nodedist <- distfun(xy, line)
# warnopt = getOption("warn")
# options(warn = -1)
# distmin1 <- min(nodedist, na.rm = TRUE)
# distmin2 <- min(crossdist, na.rm = TRUE)
# options(warn = warnopt)
# if (distmin1 <= distmin2) {
# j <- which.min(nodedist)
# return(c(distmin1, line[j, ]))
# }
# else {
# j <- which.min(crossdist)
# if (trackdist1[j] < trackdist2[j]) {
# bear <- bearing(line1[j, ], line2[j, ])
# pt <- destPoint(line1[j, ], bear, mintrackdist[j])
# return(c(crossdist[j], pt))
# }
# else {
# bear <- bearing(line2[j, ], line1[j, ])
# pt <- destPoint(line2[j, ], bear, mintrackdist[j])
# return(c(crossdist[j], pt))
# }
# }
# }
# ) %>%
# simplify %>%
# matrix(ncol = 3, byrow = TRUE)
#
# colnames(res) <- c("distance", "lon", "lat")
# return(res)
# }
#
#
# setwd('C:/Users/cara.estes/Documents')
#
#
# CREMP <- import("Summer_2020/ArcGIS_Analyses/CREMP_Locations.xls")%>%
# select(sitename,latDD,lonDD)
#
# CREMP_locations <- CREMP %>%
# st_as_sf(coords = c('lonDD','latDD')) %>%
# st_set_crs(4326)
#
# osm_box <- getbb (place_name = "Florida") %>%
# opq() %>%
# add_osm_feature("natural","coastline") %>%
# osmdata_sf()
#
# # use dist2Line from geosphere - only works for WGS84
# #data
# dist <- geosphere::dist2Line(p = st_coordinates(CREMP_locations),
# line = st_coordinates(osm_box$osm_lines)[,1:2])
#
# CREMP_distances <- cbind( CREMP %>%
# rename(y=latDD,x=lonDD),dist) %>%
# mutate(miles=distance/1609)
#
# # export(CREMP_distances,"Summer_2020/CREMP_distance/CREMP_distances_all.csv")
#
# ## plot
#
# ggplot() +
# geom_sf(data=osm_box$osm_lines) +
# geom_sf(data=CREMP_locations) +
# coord_sf(xlim = c(-80,-83),ylim = c(27.5,24))+
# geom_segment(data=CREMP_distances,aes(x=x,y=y,xend=lon,yend=lat))
#ggsave("Summer_2020/CREMP_distance/CREMP_sites_from_land.png")
### Compare distances farther from those closer... first get the range values (CREMP_distances_all.csv)
CREMP_distances <- read_csv("Summer_2020/CREMP_distance/CREMP_distances_all.csv")
## add back subregionID
CREMP_ID <- import("Summer_2020/ArcGIS_Analyses/CREMP_Locations.xls") %>%
select(sitename,subRegionI)
CREMP_distances_w_subregion <- left_join(CREMP_distances,CREMP_ID)
CREMP_distances_w_subregion <- data.frame(unique(CREMP_distances_w_subregion))
## filter out DT,LK,MK,UK
keys <- c("DT","LK","MK","UK")
## arrange() puts the distances in ascending order
CREMP_Keys <- CREMP_distances_w_subregion %>%
filter(subRegionI %in% keys) %>%
select(sitename,distance,subRegionI,miles) %>%
arrange(distance)
## Average the distances for reefs with multiple station locations
Average_distance <- CREMP_Keys[duplicated(CREMP_Keys$sitename)|duplicated(CREMP_Keys$sitename, fromLast = TRUE),]
Average_distance_merge <- Average_distance %>%
group_by(sitename) %>%
mutate(KMAverage = mean(distance)) %>%
mutate(MilesAverage = mean(miles)) %>%
ungroup() %>%
select(sitename,KMAverage,subRegionI,MilesAverage) %>%
unique()
colnames(Average_distance_merge) <- c("sitename","distance","subRegionI","miles")
# Average_distance_merge <- ddply(Average_distance, 'sitename', summarize, distance =mean(distance),
# subRegionI=head(subRegionI,1), miles=head(miles,1))
test <- anti_join(CREMP_Keys,Average_distance_merge, by = "sitename")
CREMP_Keys_average <- rbind(test,Average_distance_merge)
## Combine with CHI and RHI data from 14_CHI_RHI_MPAs
All_CHI <- read_csv("Summer_2020/Dygraphs_CHI_RHI/CHI_combined.csv")
CHI_distance <- left_join(All_CHI,CREMP_Keys_average, by = "sitename")
All_RHI <- read_csv("Summer_2020/Dygraphs_CHI_RHI/RHI_combined.csv")
RHI_distance <- left_join(All_RHI,CREMP_Keys_average, by = "sitename")
## Pull out only the reefs and miles to get bins and distance code for CHI and RHI
CHI_reefs <- CHI_distance %>%
select(sitename,miles) %>%
unique()
## Find the bins just to break up the data into shallow, mid, and offshore for CHI
CHI_bins <- bins(CHI_reefs$miles,target.bins = 3, max.breaks = NA, exact.groups = TRUE,
minpts = 1)
view(CHI_bins$binct)
CHI_distance_shallow <- subset(CHI_reefs, miles <= 4.80572166136828)
CHI_distance_mid <- subset(CHI_reefs, miles > 4.80572166136828 & miles <= 6.31235987622819)
CHI_distance_offshore <- subset(CHI_reefs, miles > 6.31235987622819 & miles < Inf)
### 1 will be considered shallow, 2 mid, and 3 offshore
CHI_distance_shallow$DistanceCode <- "1"
CHI_distance_mid$DistanceCode <- "2"
CHI_distance_offshore$DistanceCode <- "3"
CHI_distance_code <- rbind(CHI_distance_shallow,CHI_distance_mid,CHI_distance_offshore)
RHI_reefs <- RHI_distance %>%
select(sitename,miles) %>%
unique()
## Find the bins just to break up the data into shallow, mid, and offshore for RHI
RHI_bins <- bins(RHI_reefs$miles,target.bins = 3, max.breaks = NA, exact.groups = TRUE,
minpts = 1)
view(RHI_bins$binct)
RHI_distance_shallow <- subset(RHI_reefs, miles <= 4.80572166136828)
RHI_distance_mid <- subset(RHI_reefs, miles > 4.80572166136828 & miles <= 6.31235987622819)
RHI_distance_offshore <- subset(RHI_reefs, miles > 6.31235987622819 & miles < Inf)
### 1 will be considered shallow, 2 mid, and 3 offshore
RHI_distance_shallow$DistanceCode <- "1"
RHI_distance_mid$DistanceCode <- "2"
RHI_distance_offshore$DistanceCode <- "3"
RHI_distance_code <- rbind(RHI_distance_shallow,RHI_distance_mid,RHI_distance_offshore)
## these are the same codes for each reef.... now merge the DistanceCode with CHI and RHI averages
CHI_distance_code <- left_join(All_CHI,CHI_distance_code)
RHI_distance_code <- left_join(All_RHI,RHI_distance_code)
## CHI plot
CHI_1_graph <- CHI_distance_code %>%
filter(DistanceCode == "1") %>%
group_by(Year) %>%
mutate(CHIShallow = mean(CHI_Average)) %>%
ungroup() %>%
select(Year,CHIShallow) %>%
unique()
CHI_2_graph <- CHI_distance_code %>%
filter(DistanceCode == "2") %>%
group_by(Year) %>%
mutate(CHIMid = mean(CHI_Average)) %>%
ungroup() %>%
select(Year,CHIMid) %>%
unique()
CHI_3_graph <- CHI_distance_code %>%
filter(DistanceCode == "3") %>%
group_by(Year) %>%
mutate(CHIOffshore = mean(CHI_Average)) %>%
ungroup() %>%
select(Year,CHIOffshore) %>%
unique()
## merge
CHI_distance_graph <- Reduce(merge,list(CHI_1_graph,CHI_2_graph,CHI_3_graph))
setwd("C:/Users/cara.estes/Documents/Summer_2020/Dygraphs_CHI_RHI/plots")
dygraph(CHI_distance_graph, main = 'CHI Trends <br> Distance from Shore') %>%
dyAxis("y", label = "CHI %",valueRange = c(0,100)) %>%
dyAxis("x", label = "Year") %>%
dyOptions(stackedGraph = F, fillGraph = T, fillAlpha = .01)%>%
saveWidget(file=paste0( getwd(), "/CHI_distance_from_shore.html"))
RHI_1_graph <- RHI_distance_code %>%
filter(DistanceCode == "1") %>%
group_by(Year) %>%
mutate(RHIShallow = mean(RHI_Average)) %>%
ungroup() %>%
select(Year,RHIShallow) %>%
unique()
RHI_2_graph <- RHI_distance_code %>%
filter(DistanceCode == "2") %>%
group_by(Year) %>%
mutate(RHIMid = mean(RHI_Average)) %>%
ungroup() %>%
select(Year,RHIMid) %>%
unique()
RHI_3_graph <- RHI_distance_code %>%
filter(DistanceCode == "3") %>%
group_by(Year) %>%
mutate(RHIOffshore = mean(RHI_Average)) %>%
ungroup() %>%
select(Year,RHIOffshore) %>%
unique()
## merge
RHI_distance_graph <- Reduce(merge,list(RHI_1_graph,RHI_2_graph,RHI_3_graph))
dygraph(RHI_distance_graph, main = 'RHI Trends <br> Distance from Shore') %>%
dyAxis("y", label = "RHI",valueRange = c(0,5)) %>%
dyAxis("x", label = "Year") %>%
dyOptions(stackedGraph = F, fillGraph = T, fillAlpha = .01)%>%
saveWidget(file=paste0( getwd(), "/RHI_distance_from_shore.html"))
|
50bbe18ffb283bca627fa23f5c03280659d4fcb1
|
d48a3c9455fabdbe3257d81437006e224740e4ee
|
/man/algo.Rd
|
d30e770497d5cdae42fd73d79589ea0294d36d50
|
[
"MIT"
] |
permissive
|
feddelegrand7/algo
|
ef2a12166751446d4cce8f6903a71c80c0d874ac
|
d7caa63d1472f190eedddaf131f7deea263e627e
|
refs/heads/master
| 2023-01-20T02:35:07.375735
| 2020-07-19T20:44:48
| 2020-07-19T20:44:48
| 272,821,663
| 11
| 0
|
NOASSERTION
| 2020-11-24T15:14:52
| 2020-06-16T22:07:14
|
R
|
UTF-8
|
R
| false
| true
| 1,226
|
rd
|
algo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algo.R
\name{algo}
\alias{algo}
\title{Implement the Algolia Places address search-autocompletion menu on shiny text inputs}
\usage{
algo(element, type = "address", language = "en_US", countries = NULL)
}
\arguments{
\item{element}{the shiny text element that will be used for the Algolia Places autocompletion menu}
\item{type}{Restrict the search results to a specific type. The user can choose from "city", "country", "address", "busStop", "trainStation", "townhall" and airport. Defaults to "address".}
\item{language}{Get the results in a specific language. The user can pass two letters language codes (ISO 639-1). Defaults to "en_US"}
\item{countries}{Change the countries to search in. The user can pass a vector of two letters country codes (ISO 639-1). Defaults to the whole world.}
}
\value{
An address search-autocompletion menu on shiny text inputs
}
\description{
In order to use this function, the user must get an application ID and an API key from the Algolia website
and store them within her environment (please refer to the package's vignette). He must also put the use_algolia() function at the beginning of her shiny ui.
}
|
6dd2bcdddbd7771ae573138225936b9b3501e8dc
|
dcaf51e33e8e6e5b3c88e772994298a0058c7d1f
|
/Plot2.R
|
d6778d6409f8115343208ede6ce94590ed87f69b
|
[] |
no_license
|
kgoffe/EDA_assignment1
|
9dfc398d5a548c49dc5a538988830c2041687675
|
80e4a80115eea81b97d9a2e2a0c146b8392c890d
|
refs/heads/master
| 2020-04-02T13:53:55.745487
| 2018-10-24T13:00:02
| 2018-10-24T13:00:02
| 154,501,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 926
|
r
|
Plot2.R
|
require(dplyr)
require(lubridate)
#setwd("~/Coursera/EDA_assignment1/data")
elec <- read.table("household_power_consumption.txt",sep=";",header=TRUE)
# Convert Date & Time using lubridate library
elec$Date <- dmy(as.character(elec$Date))
# factor to relevant class type
elec_f <- elec %>% filter(Date >= "2007-02-01" & Date< "2007-02-03")
elec_f$Global_active_power <- as.numeric(levels(elec_f$Global_active_power))[elec_f$Global_active_power]
elec_f$Sub_metering_1 <- as.numeric(levels(elec_f$Sub_metering_1))[elec_f$Sub_metering_1]
elec_f$Sub_metering_2 <- as.numeric(levels(elec_f$Sub_metering_2))[elec_f$Sub_metering_2]
elec_f$Time <- as.POSIXct(paste(elec_f$Date, as.character(elec_f$Time)))
# Plot 2 - convert time to Day (in x-axis)
png(filename = "Plot2.png", width=480, height = 480)
with(elec_f,plot(Time,Global_active_power,type="l",ylab="Global Active Power (kilowatts)", xlab=""))
dev.off()
|
7346460a852efe2709b43d3ebf05ff85d76dc558
|
8342f5b555e96b0d8499f6007290097538754be2
|
/archive/XX_calculate_FQR.R
|
2dc4125406405a07dfa4d8e0e41ff12a2bd7db32
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_model_prep_code
|
220e64b2b967765c04e0662ccfee7dbe00d3491b
|
15d0176290d0a11703880ba1ea24825cd5ca021d
|
refs/heads/master
| 2023-06-06T12:15:44.917221
| 2021-07-01T19:45:25
| 2021-07-01T19:45:25
| 251,318,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,051
|
r
|
XX_calculate_FQR.R
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Create rows for the fluoroquinolone insensitivity #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#create a rows for ciprofloxacin by current breakpoints or nalidixic acid if cipro is unavailable
#if cipro is only available for combined serotypes then included seperate NAR ones
cipro <- mydata[mydata$antimicrobial=="ciprofloxacin",]
cipro <- cipro[!is.na(cipro$number_ri),]
cipro1 <- cipro[cipro$species!="Typhi & Paratyphi",]
cipro2 <- cipro[cipro$species=="Typhi & Paratyphi", ]
nal <- mydata[mydata$antimicrobial=="nalidixic acid",]
nalid <- unique(nal$source_id[nal$species!="Typhi & Paratyphi"])
#drop ciprofloxacin records for combined serotypes if nalidixic acid are available for combined
cipro2 <- cipro2[which(!(cipro2$source_id%in%nalid)),]
cipro <- rbind(cipro1, cipro2)
rm(cipro1, cipro2)
#replace the resistance number and percentage with the adjusted ones
cipro$number_resistant <- cipro$number_ri
cipro$number_susceptible <- cipro$number_susceptible_adj
#identify nalidixic acid records for studies without ciprofloxacin records
cipro.id <- unique(cipro$source_id)
#remove 2796 from cipro.id and assign to antimicrobial cipro+nal as had 2 time periods - 1 with cipro results and one with nal
cipro.id <- cipro.id[cipro.id!=2796]
nal <- nal[which(!(nal$source_id%in%cipro.id)),]
nal <-nal[nal$row_id != 2228,]
cipro$antimicrobial <- "FQR - ciprofloxacin"
cipro$resistance_breakpoints <- "R>=0.125ug/ml"
nal$antimicrobial <- "FQR - nalidixic acid"
cipro.nal <- rbind(cipro, nal)
#clean up options for 2796
cipro.nal$antimicrobial[cipro.nal$source_id == 2796] <- "FQR - cipro & nalidixic acid"
cipro.nal$resistance_breakpoints[cipro.nal$source_id == 2796] <- "Not specified"
#join back onto the main dataset
new.rows <- unique(cipro.nal$row_id)
# new.data <- mydata[which(!(mydata$row_id%in%new.rows)),] # keep all rows for the moment
new.data <- mydata
mydata<- rbind(new.data, cipro.nal)
rm(cipro.nal, new.data, new.rows, nalid, cipro.id, nal, cipro)
|
0cf448f7ac15c5cab8676b657eb773d8a3fb475c
|
bae12d4a373f25031f95415b6cf53fbe3fa7fb34
|
/R/nba.R
|
bc0e3ffe41919fb76a30f651b056b4a23ef78da5
|
[] |
no_license
|
ck2136/nba
|
d91ed2ee4f330f70fe6fda0f3bc51c1fa27a68df
|
52a09391866079413a098b8c15d7c64a1cc681c4
|
refs/heads/master
| 2021-04-27T14:29:04.953062
| 2018-02-23T00:30:10
| 2018-02-23T00:30:10
| 122,454,284
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,201
|
r
|
nba.R
|
#' Net Benefit Analysis of Prediction Models and Predictive Markers
#'
#' \code{nba} implements Vicker's et al's net benefit calculation
#' (based on weighing the true and false positive cases) for prediction models
#' and predictive markers. It can be used to compare the clinical utility
#' of prediction models.
#'
#' @param formula A formula containing the outcome and predictor variables.
#' @param data A data frame containing the variables in the model.
#' @param xstart The starting probability threshold value.
#' @param xstop The ending probability threshold value.
#' @param xby The interval between \code{xstart} and \code{xstop}.
#' @param ymin The minimum value of net benefit (for displaying/graphing).
#' @param harm A vector of length equal to the vector of predictors. Specify the value of harm into model assessment.
#' @param graph If set to \code{TRUE}, net benefit graphic of the prediction model will be outputted on the device graphics output.
#' @param intervention If set to \code{TRUE}, net reduction in interventions per 100 patients are plotted.
#' @param pred.model Prediction models to use. This should be a vector of models such as \code{"glm"} or \code{"svm"}.
#' @param interventionper Number used to calculate the number of interventions avoided by. Default is \code{100}.
#' @param smooth If set to \code{TRUE}, a smoothed net benefit values are calculated.
#' @param loess.span The value of \code{span} for the loess smoothing.
#' @return If the nba() function is stored in an object, the resulting output will contain an object of type list
#' containing values of the net benefit of the specified prediction models in addition to the reduction in
#' interventions.
#' @author Chong Kim \email{chong.kim@ucdenver.edu} and Andrew Vickers \email{vickersa@mskcc.org} based on the \href{http://journals.sagepub.com/doi/abs/10.1177/0272989x06295361}{2006 Article}
#' @references Vickers, A. (2008), Decision Curve Analysis: A Novel Method for Evaluating Prediction Models Vol 26,
#' Issue 6, 2006.
#' @seealso \code{\link[DecisionCurve]{DecisionCurve}}
#' @examples
#' ## standard net benefit analysis
#' nba(cancer ~ famhistory + age + marker, data.set, pred.mode= c("glm","rpart","svm","rf"))
#'
#' ## loess smooth
#' modified_dca(cancer ~ famhistory + age + marker, data.set, pred.mode= c("glm","rpart","svm","rf"), smooth = TRUE, loess.span = 0.5)
#'
#' ## plot intervention reduced
#' modified_dca(cancer ~ famhistory + age + marker, data.set, pred.mode= c("glm","rpart","svm","rf"), smooth = TRUE, loess.span = 0.5, intervention = TRUE)
nba <- function(formula, data, xstart=0.01, xstop=0.99, xby=0.01,
ymin=-0.05, harm=NULL,graph=TRUE, intervention=FALSE, pred.model,
interventionper=100, smooth=FALSE,loess.span=0.10) {
# LOADING REQUIRED LIBRARIES
require(stats)
require(xgboost)
require(e1071)
require(rpart)
require(randomForest)
require(dplyr)
# data MUST BE A DATA FRAME
if (class(data)!="data.frame") {
stop("Input data must be class data.frame")
}
# formula MUST BE A FORMULA
if (class(formula) != "formula") {
stop("Formula is not a valid formula class!")
}
#ONLY KEEPING COMPLETE CASES
data=data[complete.cases(data[, all.vars(formula)]), all.vars(formula)]
# outcome MUST BE CODED AS 0 AND 1
if (max(data[[all.vars(formula)[1]]])>1 | min(data[[all.vars(formula)[1]]])<0) {
stop("outcome cannot be less than 0 or greater than 1")
}
# xstart IS BETWEEN 0 AND 1
if (xstart<0 | xstart>1) {
stop("xstart must lie between 0 and 1")
}
# xstop IS BETWEEN 0 AND 1
if (xstop<0 | xstop>1) {
stop("xstop must lie between 0 and 1")
}
# xby IS BETWEEN 0 AND 1
if (xby<=0 | xby>=1) {
stop("xby must lie between 0 and 1")
}
# xstart IS BEFORE xstop
if (xstart>=xstop) {
stop("xstop must be larger than xstart")
}
# STORING THE NUMBER OF PREDICTORS SPECIFIED.. here it is based on the number of algorithms
pred.n=length(pred.model)
# Based on the algorithm selected attach the predicted probabilities onto the original dataset
if("glm" %in% pred.model) {
fit <- glm(formula, data, family=binomial(link = "logit"))
data$pred_glm <- fit$fitted.values
}
if("rpart" %in% pred.model) {
fit <- rpart(formula, data)
data$pred_rpart <- predict(fit, data)
}
if("rf" %in% pred.model) {
fit <- randomForest(formula, data)
data$pred_rf <- predict(fit, data)
}
if("svm" %in% pred.model) {
fit <- svm(formula, data)
data$pred_svm <- predict(fit, data)
}
if("gbm" %in% pred.model) {
fit <- xgboost(formula, data)
data$pred_gbm <- predict(fit, data)
}
#IF harm SPECIFIED ENSURING THAT EACH PREDICTOR HAS A SPECIFIED HARM
if (length(harm)>0 & pred.n!=length(harm)) {
stop("Number of harms specified must be the same as the number of predictors being checked.")
}
#INITIALIZING DEFAULT VALUES FOR PROBABILITES AND HARMS IF NOT SPECIFIED
if (length(harm)==0) {
harm=rep(0,pred.n)
}
######### CALCULATING NET BENEFIT #########
N=dim(data)[1]
event.rate=colMeans(data[all.vars(formula)[1]])
# CREATING DATAFRAME THAT IS ONE LINE PER THRESHOLD PER all AND none STRATEGY
nb=data.frame(seq(from=xstart, to=xstop, by=xby))
names(nb)="threshold"
interv=nb
nb["all"]=event.rate - (1-event.rate)*nb$threshold/(1-nb$threshold)
nb["none"]=0
# CYCLING THROUGH EACH PREDICTION MODEL AND CALCULATING NET BENEFIT
for(m in 1:pred.n){
for(t in 1:length(nb$threshold)){
# COUNTING TRUE POSITIVES AT EACH THRESHOLD
tp=mean(data[data[[paste0("pred_",pred.model[m])]]>=nb$threshold[t],all.vars(formula)[1]])*sum(data[[paste0("pred_",pred.model[m])]]>=nb$threshold[t])
# COUNTING FALSE POSITIVES AT EACH THRESHOLD
fp=(1-mean(data[data[[paste0("pred_",pred.model[m])]]>=nb$threshold[t],all.vars(formula)[1]]))*sum(data[[paste0("pred_",pred.model[m])]]>=nb$threshold[t])
#setting TP and FP to 0 if no observations meet threshold prob.
if (sum(data[[paste0("pred_",pred.model[m])]]>=nb$threshold[t])==0) {
tp=0
fp=0
}
# CALCULATING NET BENEFIT
nb[t,paste0("pred_",pred.model[m])]=tp/N - fp/N*(nb$threshold[t]/(1-nb$threshold[t])) - harm[m]
}
interv[paste0("pred_",pred.model[m])]=(nb[paste0("pred_",pred.model[m])] - nb["all"])*interventionper/(interv$threshold/(1-interv$threshold))
}
# CYCLING THROUGH EACH PREDICTOR AND SMOOTH NET BENEFIT AND INTERVENTIONS AVOIDED
for(m in 1:pred.n) {
if (smooth==TRUE){
lws=loess(data.matrix(nb[!is.na(nb[[paste0("pred_",pred.model[m])]]),paste0("pred_",pred.model[m])]) ~ data.matrix(nb[!is.na(nb[[paste0("pred_",pred.model[m])]]),"threshold"]),span=loess.span)
nb[!is.na(nb[[paste0("pred_",pred.model[m])]]),paste0("pred_",pred.model[m],"_sm")]=lws$fitted
lws=loess(data.matrix(interv[!is.na(nb[[paste0("pred_",pred.model[m])]]),paste0("pred_",pred.model[m])]) ~ data.matrix(interv[!is.na(nb[[paste0("pred_",pred.model[m])]]),"threshold"]),span=loess.span)
interv[!is.na(nb[[paste0("pred_",pred.model[m])]]),paste0("pred_",pred.model[m],"_sm")]=lws$fitted
}
}
# PLOTTING GRAPH IF REQUESTED
if (graph==TRUE) {
require(graphics)
# PLOTTING INTERVENTIONS AVOIDED IF REQUESTED
if(intervention==TRUE) {
# initialize the legend label, color, and width using the standard specs of the none and all lines
legendlabel <- NULL
legendcolor <- NULL
legendwidth <- NULL
legendpattern <- NULL
#getting maximum number of avoided interventions
ymax=max(interv[paste0("pred_",pred.model)],na.rm = TRUE)
#INITIALIZING EMPTY PLOT WITH LABELS
plot(x=nb$threshold, y=nb$all, type="n" ,xlim=c(xstart, xstop), ylim=c(ymin, ymax), xlab="Threshold probability", ylab=paste("Net reduction in interventions per",interventionper,"patients"))
#PLOTTING INTERVENTIONS AVOIDED FOR EACH PREDICTOR
for(m in 1:pred.n) {
if (smooth==TRUE){
lines(interv$threshold,data.matrix(interv[paste0("pred_",pred.model[m],"_sm")]),col=m,lty=2)
} else {
lines(interv$threshold,data.matrix(interv[paste0("pred_",pred.model[m])]),col=m,lty=2)
}
# adding each model to the legend
legendlabel <- c(legendlabel, paste0("pred_",pred.model[m]))
legendcolor <- c(legendcolor, m)
legendwidth <- c(legendwidth, 1)
legendpattern <- c(legendpattern, 2)
}
} else {
# PLOTTING NET BENEFIT IF REQUESTED
# initialize the legend label, color, and width using the standard specs of the none and all lines
legendlabel <- c("None", "All")
legendcolor <- c(17, 8)
legendwidth <- c(2, 2)
legendpattern <- c(1, 1)
#getting maximum net benefit
ymax=max(nb[names(nb)!="threshold"],na.rm = TRUE)
# inializing new benfit plot with treat all option
plot(x=nb$threshold, y=nb$all, type="l", col=8, lwd=2 ,xlim=c(xstart, xstop), ylim=c(ymin, ymax), xlab="Threshold probability", ylab="Net benefit")
# adding treat none option
lines(x=nb$threshold, y=nb$none,lwd=2)
#PLOTTING net benefit FOR EACH PREDICTOR
for(m in 1:pred.n) {
if (smooth==TRUE){
lines(nb$threshold,data.matrix(nb[paste0("pred_",pred.model[m],"_sm")]),col=m,lty=2)
} else {
lines(nb$threshold,data.matrix(nb[paste0("pred_",pred.model[m])]),col=m,lty=2)
}
# adding each model to the legend
legendlabel <- c(legendlabel, paste0("pred_",pred.model[m]))
legendcolor <- c(legendcolor, m)
legendwidth <- c(legendwidth, 1)
legendpattern <- c(legendpattern, 2)
}
}
# then add the legend
legend("topright", legendlabel, cex=0.8, col=legendcolor, lwd=legendwidth, lty=legendpattern)
}
#RETURNING RESULTS
results=list()
results$N=N
results$pred.model=data.frame(cbind(pred.model,harm,probability))
names(results$pred.model)=c("models","harm.applied","probability")
results$interventions.avoided.per=interventionper
results$net.benefit=nb
results$interventions.avoided=interv
return(results)
}
|
ad2d5b180e18b7cff00833bf1e94fc67e000aa3d
|
e9146ed051fadd6ed7241237c87f7bd71c95062f
|
/man/get_header.Rd
|
540802b6f5523fdedf89cf1596e23082610c9b7f
|
[
"MIT"
] |
permissive
|
mvpsc30/crosslink
|
0470bd5ca992c8172ae68069e58305eddf96edf0
|
1ee507aaa268abdbb2c7d11af078831660de3a85
|
refs/heads/master
| 2023-04-06T13:19:06.880188
| 2021-04-16T05:41:21
| 2021-04-16T05:41:21
| 383,643,936
| 1
| 0
|
MIT
| 2021-07-07T01:48:36
| 2021-07-07T01:48:35
| null |
UTF-8
|
R
| false
| true
| 399
|
rd
|
get_header.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crosslink.R
\name{get_header}
\alias{get_header}
\title{Get header data for plotting}
\usage{
get_header(object, layout = NULL)
}
\arguments{
\item{object}{a CrossLink object}
\item{layout}{name of the layout to be retrived or set}
}
\value{
data.frame
}
\description{
Retrieve 2D coordinates of headers
}
\examples{
}
|
87bed5461951b07bd7e7db9e063fd0a278c138f9
|
f36b2ad1dc17ec05278f13c7fa72a1fd8343ee19
|
/tests/testthat/test-check-names.R
|
d29c0b2764d48aea828188bec209839fadc3f5c2
|
[
"MIT"
] |
permissive
|
poissonconsulting/chk
|
45f5d81df8a967aad6e148f0bff9a9f5b89a51ac
|
c2545f04b23e918444d4758e4362d20dfaa8350b
|
refs/heads/main
| 2023-06-14T19:32:17.452025
| 2023-05-27T23:53:25
| 2023-05-27T23:53:25
| 199,894,184
| 43
| 3
|
NOASSERTION
| 2023-01-05T18:50:23
| 2019-07-31T16:42:59
|
R
|
UTF-8
|
R
| false
| false
| 1,457
|
r
|
test-check-names.R
|
test_that("check_names", {
expect_identical(check_names(c(x = 1)), c(x = 1))
expect_invisible(check_names(c(x = 1)))
expect_identical(check_names(c(x = 1), "x"), check_names(c(x = 1), "x"))
expect_identical(
check_names(c(x = 1, y = 2), "x"),
check_names(c(x = 1, y = 2), "x")
)
expect_identical(
check_names(c(x = 1, y = 2), c("y", "x")),
check_names(c(x = 1, y = 2), c("y", "x"))
)
})
test_that("check_names fails", {
expect_chk_error(
check_names(character(0)),
"^`character[(]0[)]` must be named[.]$"
)
x <- structure(list(), .Names = character(0))
x <- structure(list(), .Names = character(0))
expect_chk_error(
check_names(c(x = 1), exclusive = TRUE),
"^`c[(]x = 1[)]` must not have any elements[.]$"
)
expect_chk_error(
check_names(x, "x"),
"^`names[(]x[)]` must include 'x'[.]$"
)
expect_chk_error(
check_names(c(x = 1), c("x", "y")),
"`names[(]c[(]x = 1[)][)]` must include 'y'[.]$"
)
expect_chk_error(
check_names(c(x = 1, z = 2), "x", exclusive = TRUE),
"^`names[(]c[(]x = 1, z = 2[)][)]` must not include 'z'[.]$"
)
expect_chk_error(
check_names(c(x = 1, y = 2), c("y", "x"), order = TRUE),
"`names[(]c[(]x = 1, y = 2[)][)]` must include 'y' and 'x' in that order[.]$"
)
expect_chk_error(
check_names(c(x = 1, y = 2), c("y", "x"), order = TRUE, x_name = "b"),
"`names[(]b[)]` must include 'y' and 'x' in that order[.]$"
)
})
|
1decd6fc124cddfef23cd24dfae3c1eaf9d8cdb6
|
d60a4a66919a8c54d29a4677574b418107b4131d
|
/man/tsmat.Rd
|
b4e7e84a57a0f0e4f71cddf08c65b4ede2703d8a
|
[] |
no_license
|
cran/tsapp
|
65203e21a255e832f0ad9471f9ee308793eb7983
|
f2679a3d5ee0e3956a4ba013b7879324f77cf95f
|
refs/heads/master
| 2021-11-12T21:18:18.835475
| 2021-10-30T10:30:02
| 2021-10-30T10:30:02
| 248,760,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 670
|
rd
|
tsmat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxilary.r
\name{tsmat}
\alias{tsmat}
\title{\code{tsmat} constructs a (n-p+1,p) matrix from a time series
where the first column is the shortened series y[p],...,y[n], the second is y[p-1],...,y[n-1], etc.}
\usage{
tsmat(y, p)
}
\arguments{
\item{y}{the series, a vector or a time series of length n}
\item{p}{desired number of columns}
}
\value{
mat (n-p+1,p) matrix
}
\description{
\code{tsmat} constructs a (n-p+1,p) matrix from a time series
where the first column is the shortened series y[p],...,y[n], the second is y[p-1],...,y[n-1], etc.
}
\examples{
out <- tsmat(c(1:20),4)
}
|
46c43d3b4af955aa1c90719d212fc74261058b53
|
27f53c5a9aa2d0962b5cd74efd373d5e9d9e0a99
|
/R/convertX.R
|
73705f7b29143c6efcfc27e55b4ba80f74cb864c
|
[] |
no_license
|
dickoa/mlr
|
aaa2c27e20ae9fd95a0b63fc5215ee373fa88420
|
4e3db7eb3f60c15ce2dfa43098abc0ed84767b2d
|
refs/heads/master
| 2020-12-24T13:44:59.269011
| 2015-04-18T19:57:42
| 2015-04-18T19:57:42
| 31,710,800
| 2
| 0
| null | 2015-04-18T19:57:43
| 2015-03-05T11:29:18
|
R
|
UTF-8
|
R
| false
| false
| 1,229
|
r
|
convertX.R
|
# start is a named list, flatten it to an unnamed num vec, of correct order as in par.set
convertStartToNumeric = function(start, par.set) {
ids = getParamIds(par.set, repeated = FALSE)
start = start[ids]
as.numeric(unlist(start))
}
convertXNumeric = function(x, par.set) {
ids = getParamIds(par.set, repeated = TRUE, with.nr = FALSE)
# factor usually does sort(unique(...)) for levels which changes order!
x = split(x, factor(ids, levels = unique(ids)))
names(x) = getParamIds(par.set, repeated = FALSE)
roundIntegers(x, par.set = par.set)
}
convertXMatrixCols = function(xs, par.set) {
rownames(xs) = colnames(xs) = NULL
xs = lapply(seq_col(xs), function(i) {
convertXNumeric(xs[, i], par.set)
})
}
# convert logical param values from chars to true logicals, eg irace produces strings in tuning
convertXLogicalsNotAsStrings = function(x, par.set) {
types = getParamTypes(par.set, use.names = TRUE)
j = types %in% c("logical", "logicalvector")
if (any(j))
x[j] = lapply(x[j], as.logical)
return(x)
}
roundIntegers = function(x, par.set) {
Map(function(par, v) {
if (par$type %in% c("integer", "integervector"))
as.integer(round(v))
else
v
}, par.set$pars, x)
}
|
3c0fa6ef609fa9ad13e49cb75e91c94ab613c56b
|
2ef87066df69ba12d8fc613ed95e8118391d1589
|
/pttstability/man/sampler_fun0.Rd
|
0fd3f7ecb9477d3d899814c9f7fb913a286b083c
|
[] |
no_license
|
adamtclark/pts_r_package
|
9203e49e1f30e7f048c58afd0f003d2401bd2aa1
|
8a5dab701012b9273f989989d3bf9b1c6aa932a9
|
refs/heads/master
| 2022-10-23T16:33:39.833688
| 2022-08-24T17:33:48
| 2022-08-24T17:33:48
| 177,118,587
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 597
|
rd
|
sampler_fun0.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bayesfun.R
\name{sampler_fun0}
\alias{sampler_fun0}
\title{Default sampler function for prior}
\usage{
sampler_fun0(n = 1, minv, maxv)
}
\arguments{
\item{n}{number of random draws to take from the priors}
\item{minv}{Vector of minimum values to return for each parameter}
\item{maxv}{Vector of maximum values to return for each parameter}
}
\value{
returns random draws from the priors
}
\description{
Draws samples from a flat prior
}
\keyword{MCMC}
\keyword{optimization}
\keyword{stability}
\keyword{time-series}
|
741d9928096f59470f756134faa9ddeb157125c4
|
560a56d957ffb97a824b2ccc3c3daedc47651f42
|
/man/is_pubmed_id.Rd
|
8ca4f7fed6677ed3243fe4e6e649e952c558e37b
|
[
"MIT"
] |
permissive
|
ramiromagno/gwasrapidd
|
fe3b789b0b8ef4a7eb6fd9a151bfb631f63fd264
|
f0b02769a5cbf553b9642a2a107bab8c19a80613
|
refs/heads/master
| 2023-06-09T09:04:05.353540
| 2023-06-04T18:46:38
| 2023-06-04T18:46:38
| 155,549,651
| 70
| 14
|
NOASSERTION
| 2022-12-22T22:55:53
| 2018-10-31T11:55:20
|
R
|
UTF-8
|
R
| false
| true
| 690
|
rd
|
is_pubmed_id.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse-utils.R
\name{is_pubmed_id}
\alias{is_pubmed_id}
\title{Is a string a PubMed ID?}
\usage{
is_pubmed_id(str, convert_NA_to_FALSE = TRUE)
}
\arguments{
\item{str}{A character vector of strings.}
\item{convert_NA_to_FALSE}{Whether to treat \code{NA} as \code{NA}
(\code{convert_NA_to_FALSE = FALSE}) or whether to return \code{FALSE} when
an \code{NA} is found (\code{convert_NA_to_FALSE = TRUE}).}
}
\value{
A logical vector.
}
\description{
Find which strings are valid PubMed IDs (returns \code{TRUE}). PubMed IDs are
tested against the following regular expression: \code{^\\\\d+$}.
}
\keyword{internal}
|
50d8788c5034713a6eaa1ae072604e0a3ca3f523
|
1136ef7f3ab73dd36dbcc98b75808b8dc99e4d36
|
/ejemplos.R
|
0f42c0c98f2fd8867e65b6262a63e294811c1ea1
|
[] |
no_license
|
libreim/data-mining-classification
|
c6a61cde623b5ce8a05adecc9d3be1dc112473ac
|
c81dc8ba0649a24b78a575b835c5374a8f06464c
|
refs/heads/master
| 2020-12-25T22:06:57.521386
| 2015-02-28T15:13:42
| 2015-02-28T15:13:42
| 24,795,937
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,445
|
r
|
ejemplos.R
|
# Ejemplos asociados al seminario de algoritmos de
# clasificación en Minería de datos
# install.packages("RWeka")
# install.packages('e1071',dependencies=TRUE)
# install.packages("partykit")
library(RWeka)
library(e1071)
library(partykit)
#----- Gráfico multiclase -----
data(iris)
plot(iris$Petal.Length ~ iris$Petal.Width, col=iris$Species, pch=20)
#----- kNN -----
# Cargamos dataset
iris <- read.arff(system.file("arff", "iris.arff", package = "RWeka"))
# Generamos clasificador kNN con k = 10
classifier <- IBk(class ~., data = iris, control = Weka_control(K = 10))
# Evaluamos con validación cruzada
evaluate_Weka_classifier(classifier, numFolds = 5)
#----- Árbol de decisión -----
# Cargamos dataset
iris <- read.arff(system.file("arff", "iris.arff", package = "RWeka"))
# Generamos clasificador C4.5 con parámetros por defecto
classifier <- J48(class ~., data = iris)
# Mostramos árbol de decisión
plot(classifier)
#----- SVM -----
# Cargamos dataset integrado en R
data(iris)
# Nos quedamos con 2 características
iris2 <- data.frame(Petal.Width = iris$Petal.Width,
Petal.Length = iris$Petal.Length,
Species = iris$Species)
# Eliminamos una de las clases
iris2$Species[iris2$Species == "virginica"] <- "versicolor"
# Generamos un modelo de máquina de vectores de soporte
model <- svm(Species~., data = iris2)
# Mostramos un gráfico del modelo
plot(model, iris2, grid = 200)
|
e8b3781758ef984cec9bbc3367fcf2ba1639746f
|
1a63510ce53f071bd6a948af435b5fded26c9801
|
/man/lmtp-package.Rd
|
4ec695c560f2e2a832dbee8d66cc35aaeda99851
|
[
"MIT"
] |
permissive
|
rfherrerac/lmtp
|
2d51e1101eaeb7ac7057aea21fe39aa5c00da981
|
f7252b32c17878eae3b86a375b4d714b55eae926
|
refs/heads/master
| 2022-10-06T17:37:46.385501
| 2020-06-08T18:27:30
| 2020-06-08T18:27:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,278
|
rd
|
lmtp-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lmtp-package.R
\docType{package}
\name{lmtp-package}
\alias{lmtp}
\alias{lmtp-package}
\title{lmtp: Non-parametric Causal Effects of Feasible Interventions Based on Modified Treatment Policies}
\description{
Non-parametric estimators for casual effects based on longitudinal modified treatment
policies as described in Diaz, Williams, and Hoffman (2020) <arXiv:https://arxiv.org/abs/2006.01366>, traditional point treatment,
and traditional longitudinal effects. Continuous, binary, and categorical treatments are allowed as well are
censored outcomes. The treatment mechanism is estimated via a density ratio classification procedure
irrespective of treatment variable type. Estimation is performed using the Super Learner from `sl3`.
For both continuous and binary outcomes, additive treatment effects can be calculated and relative
risks and odds ratios may be calculated for binary outcomes.
}
\author{
\strong{Maintainer}: Nicholas Williams \email{niw4001@med.cornell.edu} (\href{https://orcid.org/0000-0002-1378-4831}{ORCID})
Authors:
\itemize{
\item Iván Díaz \email{ild2005@med.cornell.edu} (\href{https://orcid.org/0000-0001-9056-2047}{ORCID})
}
}
\keyword{internal}
|
9e3ecc94ede33f67f829db40a5f4214e05701c60
|
414cd802b37149e6a963087c83cfbfa65f4b6f67
|
/man/merge_oecd_data.Rd
|
caaa651a7708f6d14ddfd32f1c9d5b7afd2f4a09
|
[] |
no_license
|
graebnerc/competitivenessData
|
b132e834d98230e7d28c3cfe9f996f0fe770442e
|
bad18af93ee844670fcb0e6567a3504fc93b625f
|
refs/heads/master
| 2023-05-09T10:28:55.838914
| 2021-06-02T09:26:24
| 2021-06-02T09:26:24
| 297,374,020
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 294
|
rd
|
merge_oecd_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_oecd_functions.R
\name{merge_oecd_data}
\alias{merge_oecd_data}
\title{Merges OECD data}
\usage{
merge_oecd_data(oecd_data_list)
}
\description{
Merges the OECD data provided in a list and tests for duplicates
}
|
b42073cebf6e762e07afff0ffe7df541171b9a35
|
f0d026c47399c70e0b6b35d428eb301463782246
|
/Clases/Clase 2 26-2-18.R
|
57f73bc0b0f3cc7027ef05c01e7d4f4dbc0cfb52
|
[] |
no_license
|
FernandoLuna1997/Software-Acturarial-III
|
e80f8ee541521803d8bb27abd7cff864276cf928
|
1a3f3810642acb228b9f16cf63bf7d111874ebf2
|
refs/heads/master
| 2021-05-09T09:21:23.426806
| 2018-05-22T04:50:37
| 2018-05-22T04:50:37
| 119,440,733
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,765
|
r
|
Clase 2 26-2-18.R
|
########################### LISTAS ########################################
#Lista son un tipo de vector que contiene ELEMENTOS DE DIFERENTES TIPOS
# Para crear un lista se hace con función "list"
x<- list(1,"a",TRUE,3+2i,5L, 1:50)
x
# Una lista es un vector de vectores
#Todos los elemntos de la lista mantienen su clase original
#"[[1]]" indican posición
#"[1]" Indica la posicion del elemento
############################# MATRICES #####################################
#Son vectores con un atributo llamado dimensión, este atributo es un vector en sí
#mismos, son compuestos de dos elementos: #ncol #nrow
#para crear se usa matrix
m<- matrix(nrow= 2, ncol= 3)
m
dim(m)
attributes(m)
#COMO LLENAR UNA MATRIZ
m<- matrix(data= 1:6, nrow=2, ncol=3)
#ES LO MISMO VERLO COMO:
m<-matrix(1:6,2,3)
m
#SE LLENÓ COLUMNA POR COLUMNA
#AHORA FILA POR FILA CON "byrow"
m<- matrix(data= 1:6, nrow= 2,ncol= 3,byrow=TRUE)
m<- matrix(1:6,2,3,T)
m
#Una manera alternativa de crear una matriz, es desde un vector y
#modificar sus dimensiones
m<- 1:10
m
dim (m)<- c(2,5)
m
#Otra forma de crear matrices es: unir vectores diferentes
x<- 1:3
y<- 10:12
#Cbind, unir columnas
cbind(x,y)
#rbind, unir renglones
rbind(x,y)
################################ FACTORES ####################################
#Se utilizan para representar datos categoricos
x <- factor(c("Si", "Si", "No", "No", "Si"))
x
#Factores con orden definido
x<- factor(c("Azul", "Verde", "Verde", "Azul", "Rojo"),
levels=c("Rojo", "Amarillo", "Verde", "Naranja"))
x
table(x)
#Table te dice cuantas veces aparece cada categoria en el vector x
unclass(x)
#Valores faltantes
x <- c(1,2,NA,10,3)
is.na(x) #Valor faltante detectado
is.nan(x)#Valor no numerico que no es faltante
y <- c(1,2,NaN,10,3)
is.na(y) #Valor faltante detectado
is.nan(y)#Valor no numerico que no es faltante
########################## DATA FRAMES #########################
#Se utilizan para almacenar datos tabulares
#Es una lista en la que cada elemento debe tener la mis ma longitud
#Cada elemento puede pensarse como una columna de una matriz y la longitud
#de estos elementos el numero de filas
#A diferencia de las matrices, los data frames pueden almacenar diferentes
#clases de objetos (como listas con vectores)
#Tienen un atributo que es "row.names"
#Se crean a partir de un llamado estilo "readtable()" o "readcsv()"
#Puede convertirde en una matriz usando "data.matrix()"
x<- data.frame(Col1=1:4, Col2= c(T,T,F,F))
x
nrow(x)
ncol(x)
################## NOMBRRAR ELEMENTOS #########################################
x<- 1:3
names(x)
names(x)<- c("foo","bar","norf")
names(x)
#Las listas tambien pueden tener nombres
#Las matrices tambien
m<- matrix(1:4,2,2)
dimnames(m)<- list(c("a","b"), c("c","d"))
m
|
f52b02e67aea951601ddda9245b0081a992e71b4
|
e43a7ab4cfff179caab5fc11937172b55ef93838
|
/R/timeSeries_generate.R
|
6ed0d4784b83aff8909da985d2ecd8c8e00dec9c
|
[] |
no_license
|
muschitiello/PrescRiptions
|
b12d97e6f708e838fc058e5f5672c07eb8dca551
|
e4c0bd86734938107c2c2f4347cfc345f9d31e60
|
refs/heads/master
| 2021-02-08T06:52:21.321111
| 2020-06-06T17:24:27
| 2020-06-06T17:24:27
| 244,121,159
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,018
|
r
|
timeSeries_generate.R
|
##' timeSeries_generate
##'
##' generate Time series for regional and bnf data
##'
##' @param from start date in the format "mm-yyyy"
##' @param to end date in the format "mm-yyyy"
##' @param settings analysis settings generated with \emph{setConfig()}
##' @param on one of \emph{"REGION"}, \emph{"BNF"} and \emph{"BOTH"} indicating which ts to produce,
##' default = "BOTH"
##' @param save logic, if TRUE, an external file will be saved in rootdir. Default = TRUE
##' @details The function summaries plpd or bnf data on ITEMS, NIC, ACT.COST, QUANTITY and REGION
##' and generates TS over the period of time indicated trought the \emph{from} and \emph{to} arguments
##' @return The function returns a data.table with time series data for the selected period, as generated, in the time range selected,
##' via \emph{generateSummaries()}
##' @export
timeSeries_generate = function(from = "mm/yyyy", to = "mm/yyyy", settings = NULL, on = c("BOTH"), save = TRUE){
dirs = dirsGen(settings)
if(!save%in%c(TRUE,FALSE)){
stop("'save' must be logic: TRUE or FALSE")
}
onAll = c("REGION","BNF","BOTH")
if(any(!on%in%onAll)){
stop(paste0("Error in 'on' Admitted values are: ",paste0(onAll, collapse = ", ")," quoted"))
}
if(length(on)>1 & "BOTH" %in% on){
on = "BOTH"
}
if(class(from)!="character"|class(to)!="character"){
stop("from and to must be character vectors")
}
if(grepl("/",substr(from,1,2))|grepl("/",substr(to,1,2))){
stop("Errore. Il formato mese deve essere 'mm'")}
fmm = as.numeric(substr(from,1,2))
fyy = as.numeric(substr(from,4,7))
tmm = as.numeric(substr(to,1,2))
tyy = as.numeric(substr(to,4,7))
# starting checks
if(!all(c(fmm,tmm) %in% 1:12)){
stop("Error in month format")
}
if(!all(c(fyy,tyy) %in% 2018:2019)){
stop("Error in year: only years 2018 and 2019 admitted")
}
fromDate = as.numeric(paste0(fyy,stringr::str_pad(fmm,2,"left",0)))
toDate = as.numeric(paste0(tyy,stringr::str_pad(tmm,2,"left",0)))
if(fromDate>toDate){
stop("'from' date must be prior to 'to' date")
}
REGION_TF = FALSE
BNF_TF = FALSE
if(on == "REGION"){
REGION_TF = TRUE
}
if(on=="BNF"){
BNF_TF = TRUE
}
if(on=="BOTH"){
REGION_TF = TRUE
BNF_TF = TRUE
}
#Check if times series files already exist
if(all(REGION_TF,BNF_TF)){
existsTF = all(file.exists(paste0(dirs$timeSeriesDir,"regionTS_",fromDate,"_",toDate,".csv")),
file.exists(paste0(dirs$timeSeriesDir,"bnfTS_",fromDate,"_",toDate,".csv")))
}
if(REGION_TF&!BNF_TF){
existsTF = file.exists(paste0(dirs$timeSeriesDir,"regionTS_",fromDate,"_",toDate,".csv"))
}
if(BNF_TF&!REGION_TF){
existsTF = file.exists(paste0(dirs$timeSeriesDir,"bnfTS_",fromDate,"_",toDate,".csv"))
}
if(existsTF){
return(message(paste("TS file/s already exist/s in", dirs$timeSeriesDir,sep='\n')))
}else{
# create vector of times to download
times = data.table::data.table(expand.grid(2018:2019,stringr::str_pad(1:12,2,"left",0)))[
order(Var1)][,times:=paste0(Var1,Var2)][,mget(c("Var2","Var1","times"))]
data.table::setnames(times,colnames(times),c("mm","yyyy","times"))
times2extract = times[data.table::between(times,fromDate,toDate)]
#######################################
## DOWNLOAD DATA
tempSet = settings
message("DOWNLOAD DATA")
# Download BNF Data
year2extract = times2extract[,unique(yyyy)]
times2extractBnf=times[yyyy %in%year2extract & mm=="01"]
for (t in times2extractBnf[,times]){
tempSet$year=times2extractBnf[times==t,yyyy]
tempSet$month=times2extractBnf[times==t,mm]
monthlyData_download(tempSet,whichData = "bnf")
}
# Download PLPD Data
for (t in times2extract[,times]){
print(paste0(times2extract[times==t,yyyy]," - ",times2extract[times==t,mm]))
tempSet$year=times2extract[times==t,yyyy]
tempSet$month=times2extract[times==t,mm]
monthlyData_download(tempSet,whichData = "plpd")
}
#######################################################
### IMPORT DATA IN WS, GENERATE & AGGREGATE SUMMARIES
message("GENERATE TS")
for (t in times2extract[,times]){
print(paste0(times2extract[times==t,yyyy]," - ",times2extract[times==t,mm]))
### IMPORT
# BNF Data
year2extract = times2extract[times==t,unique(yyyy)]
times2extractBnf=times[yyyy %in%year2extract & mm=="01"]
tempSet$year=times2extractBnf[,unique(yyyy)]
tempSet$month=as.character(times2extractBnf[,mm])
bnfDat = monthlyData_import(tempSet,whichData = "bnf")[[1]]
# PLPD Data
tempSet$year=times2extract[times==t,yyyy]
tempSet$month=as.character(times2extract[times==t,mm])
plpdDat = suppressMessages(monthlyData_import(tempSet,whichData = "plpd"))[[1]]
### AGGREGATE
if(REGION_TF){
tempSet$year=times2extract[times==t,yyyy]
tempSet$month=as.character(times2extract[times==t,mm])
if(exists("regionPresc_ts")){
regionPresc_ts = rbind(regionPresc_ts,generateSummaries(plpdDat,bnfDat,on = "REGION",settings = tempSet))
}else{
regionPresc_ts = generateSummaries(plpdDat,bnfDat,on = "REGION",settings = tempSet)
}
}
if(BNF_TF){
tempSet$year=times2extract[times==t,yyyy]
tempSet$month=as.character(times2extract[times==t,mm])
if(exists("bnfPresc_ts")){
bnfPresc_ts = unique(rbind(bnfPresc_ts,suppressWarnings(generateSummaries(plpdDat,bnfDat,on = "BNF",settings = tempSet))))
}else{
bnfPresc_ts = suppressWarnings(generateSummaries(plpdDat,bnfDat,on = "BNF",settings = tempSet))
}
}
### REMOVE
rm(plpdDat)
rm(bnfDat)
}
if(all(BNF_TF, REGION_TF)){
out = list(regionTS = regionPresc_ts,
bnfTS = bnfPresc_ts)
}
if(BNF_TF&!REGION_TF){
out = list(bnfTS = bnfPresc_ts)
}
if(REGION_TF&!BNF_TF){
out = list(regionTS = regionPresc_ts)
}
#######################################################
### SAVE OUTPUT
if(save){
if(!dir.exists(dirs$timeSeriesDir)){
dir.create(dirs$timeSeriesDir,recursive = T)
}
if("regionTS"%in%names(out)){
write.csv2(out$regionTS,paste0(dirs$timeSeriesDir,"regionTS_",fromDate,"_",toDate,".csv"),row.names = F)
}
if("bnfTS"%in%names(out)){
write.csv2(out$bnfTS,paste0(dirs$timeSeriesDir,"bnfTS_",fromDate,"_",toDate,".csv"),row.names = F)
}
message(paste0("Time series: ",
"regionTs_",fromDate,"_",toDate,"csv \n ",
"bnfTS_",fromDate,"_",toDate,"csv \n",
"saved in ",settings$rootdir,"/timeSeries/" ))
}
return(out)
}
}
|
cfbf298b88eac96c0182beb02d5aca08a93010ca
|
a9eb0f16decb9acc9920f31bcb0876ee549c3c6d
|
/man/print.pelora.Rd
|
4f9c7929f574d1ad7d6723d2ff74873cd907e818
|
[] |
no_license
|
mmaechler/supclust
|
9f9d8f6a4d2c8793772bc2d57bed4e2ac4597a9b
|
4341e7e68ab472049251cc8f050b1f04a8b7c765
|
refs/heads/master
| 2023-08-21T18:43:56.790652
| 2021-09-25T17:10:32
| 2021-09-25T17:10:32
| 304,247,366
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 966
|
rd
|
print.pelora.Rd
|
\name{print.pelora}
\alias{print.pelora}
\title{Print Method for Pelora Objects}
\description{Yields an overview about the type, size and final criterion
value of the predictor variables that were selected by \code{pelora}.}
\usage{
\S3method{print}{pelora}(x, digits = getOption("digits"), details = FALSE, ...)
}
\arguments{
\item{x}{an \R object of \code{\link{class}} \code{"pelora"},
typically the result of \code{\link{pelora}()}.}
\item{digits}{the number of digits that should be printed.}
\item{details}{logical, defaults to \code{FALSE}. If set to
\code{TRUE}, the output corresponds to
\code{\link{summary.pelora}}.}
\item{\dots}{Further arguments passed to and from methods.}
}
\author{Marcel Dettling, \email{dettling@stat.math.ethz.ch}}
\seealso{\code{\link{pelora}}, also for references.
}
\examples{
## Running the examples of Pelora's help page
example(pelora, echo = FALSE)
print(fit)
}
\keyword{classif}
\keyword{cluster}
|
3635bb851866750fdb475735b2127b5c9ef1a788
|
ce79fd85cec435844db55554f2e6703ba712a45b
|
/R/get_eig.R
|
c931e7d2063c902bd05db9a01491ab38169767b6
|
[
"MIT"
] |
permissive
|
aswansyahputra/sensolution
|
67f84e578e24e357b5dbe31e0725cbadf43127de
|
5e489cf9281ad8583700f2125d19dc366e7b9095
|
refs/heads/master
| 2020-04-16T14:05:04.627914
| 2019-01-17T09:49:11
| 2019-01-17T09:49:11
| 140,168,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 647
|
r
|
get_eig.R
|
#' Get eigenvalues
#'
#' Function to augment eigenvalues from multivariate analysis
#'
#' @import dplyr magrittr
#' @return dataframe
#' @export
get_eig <- function(.res) {
if (missing(.res)) {
stop("Data is not supplied", call. = FALSE)
} else if (!any(class(.res) %in% c("PCA", "CA", "MFA", "MCA"))) {
stop("Data is not one of PCA, CA, MFA, MCA class", call. = FALSE)
}
.res %>%
extract2("eig") %>%
as_tibble(rownames = "dim") %>%
rename(
"var_percent" = `percentage of variance`,
"var_cummulative" = `cumulative percentage of variance`
) %>%
mutate(dim = str_replace_all(dim, "comp", "Dim"))
}
|
2e5463084aa489d82b111dd75514b0e66df48d12
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/taRifx/examples/xtablelm.Rd.R
|
01cf25c759fd23d4de24488384f1e40f6839053a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 199
|
r
|
xtablelm.Rd.R
|
library(taRifx)
### Name: xtablelm
### Title: Produces the output of an lm object as it appears in the R
### console when you type summary(lmobject)
### Aliases: xtablelm
### ** Examples
##
|
81f12d672f50dca222a75a62db11e926176e795a
|
02ec749326b02b9452af36852003b17e3200d815
|
/man/chr_uzemi.Rd
|
3f41a8b1c047d7d8b9b35c7f6cb096b99cfdaeba
|
[
"MIT"
] |
permissive
|
jlacko/RCzechia
|
1b3a9f763e27fc7ef76bd64e1067c135a08ce1b1
|
2c60fe5a68f40db943b25b42c0cd95a9f9d64a0c
|
refs/heads/master
| 2023-09-03T20:33:28.475093
| 2023-05-05T10:33:33
| 2023-05-05T10:33:33
| 97,862,932
| 24
| 6
|
NOASSERTION
| 2023-05-05T10:33:34
| 2017-07-20T17:46:17
|
R
|
UTF-8
|
R
| false
| true
| 870
|
rd
|
chr_uzemi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chr_uzemi.R
\name{chr_uzemi}
\alias{chr_uzemi}
\title{Protected Natural Areas}
\source{
© AOPK ČR, 2020 \url{https://data.nature.cz/}
}
\usage{
chr_uzemi()
}
\value{
\code{sf} data frame with 2677 rows of 3 variables + geometry
\describe{
\item{TYP}{Type of protected area}
\item{NAZEV}{Name, with Czech accents}
\item{PLOCHA}{type of protected area: large or small}
}
}
\description{
Function returning data frame of protected natural areas (Chráněná území) of the Czech Republic as \code{sf} polygons. It has no obligatory parameters.
}
\details{
Due to package size constraints the data are stored externally (and a working internet connection is required to use the package).
The data is current to September 2020. Downloaded size is 7 MB (so use with caution, and patience).
}
|
27972bdceb3362a3369c8a3426f5b204715b4937
|
290dcf2dcab08672cd8e7e6910bfd36cc024824d
|
/inst/ACORN-app/www/R/output/overview_evolution_blood_culture.R
|
bf408a85d5a5227841cefd0f5985aac7eec59d01
|
[] |
no_license
|
ocelhay/ACORN
|
ceda5a0edbd455507261c30043ff84f94c88e86f
|
755384ba96364fb223a1928a814417c5b8b17079
|
refs/heads/master
| 2020-12-07T07:54:54.420477
| 2020-12-03T22:25:06
| 2020-12-03T22:25:06
| 232,677,929
| 0
| 1
| null | 2020-09-11T21:46:16
| 2020-01-08T23:07:29
|
R
|
UTF-8
|
R
| false
| false
| 1,397
|
r
|
overview_evolution_blood_culture.R
|
output$evolution_blood_culture <- renderHighchart({
req(microbio_filter())
req(microbio_filter() %>% nrow() > 0)
# Add date of enrollment to microbio_filter
microbio_filter_mod <- left_join(microbio_filter(),
patient_filter() %>% select(date_enrollment, episode_id),
by = 'episode_id')
dta <- left_join(
patient_filter() %>%
group_by(month = floor_date(date_enrollment, "month")) %>%
summarise(all = n_distinct(episode_id), .groups = "drop"), # Number of episodes per month of enrollment
microbio_filter_mod %>%
fun_filter_blood_only() %>%
group_by(month = floor_date(date_enrollment, "month")) %>%
summarise(blood = n_distinct(episode_id), .groups = "drop"), # Number of blood specimen per month of enrollment
by = "month") %>%
mutate(month = substr(as.character(month), 1, 7),
percent = round(100 * blood/all, 1),
color = "#e31a1c")
hchart(dta, type = "column", hcaes(x = "month", y = "percent", color = "color")) %>%
hc_yAxis(title = list(text = "%", rotation = 0), max = 100) %>% hc_xAxis(title = "Month of Enrollment") %>%
hc_tooltip(pointFormat = "<b>Blood specimens {point.percent}%</b><br> ({point.blood} of {point.all} enrollments)") %>%
hc_exporting(enabled = TRUE, buttons = list(contextButton = list(menuItems = hc_export_kind)))
})
|
356ea9ac40e6003402473f474f60b6e6c0cab701
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.database/man/neptune_list_tags_for_resource.Rd
|
20cf4fa18023fbe5f417ccb496b84c486b6ad599
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 857
|
rd
|
neptune_list_tags_for_resource.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neptune_operations.R
\name{neptune_list_tags_for_resource}
\alias{neptune_list_tags_for_resource}
\title{Lists all tags on an Amazon Neptune resource}
\usage{
neptune_list_tags_for_resource(ResourceName, Filters = NULL)
}
\arguments{
\item{ResourceName}{[required] The Amazon Neptune resource with tags to be listed. This value is an
Amazon Resource Name (ARN). For information about creating an ARN, see
\href{https://docs.aws.amazon.com/neptune/latest/userguide/#tagging.ARN.Constructing}{Constructing an Amazon Resource Name (ARN)}.}
\item{Filters}{This parameter is not currently supported.}
}
\description{
Lists all tags on an Amazon Neptune resource.
See \url{https://www.paws-r-sdk.com/docs/neptune_list_tags_for_resource/} for full documentation.
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.